summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLibravatar Daenney <daenney@users.noreply.github.com>2023-09-07 13:20:37 +0200
committerLibravatar GitHub <noreply@github.com>2023-09-07 13:20:37 +0200
commit14ef09809942800db57de87fe3963770a56b585b (patch)
tree89e95f21145bc7ad5745f77be1d998faa9c09695
parent[bugfix] fix checks for deref the same status descendants / ascendants (#2181) (diff)
downloadgotosocial-14ef09809942800db57de87fe3963770a56b585b.tar.xz
[feature] Support OTLP HTTP, drop Jaeger (#2184)
* [feature] Add http trace exporter, drop Jaeger Jaeger supports ingesting traces using the OpenTelemetry gRPC or HTTP methods. The Jaeger project has deprecated the old jaeger transport. * Add support for submitting traces over HTTP * Drop support for the old Jaeger protocol * Upgrade the trace libraries to v1.17 Fixes: #2176 Fixes: #2179
-rw-r--r--docs/configuration/observability.md17
-rw-r--r--example/config.yaml16
-rw-r--r--go.mod24
-rw-r--r--go.sum82
-rw-r--r--internal/config/config.go6
-rw-r--r--internal/config/helpers.gen.go2
-rw-r--r--internal/tracing/tracing.go12
-rwxr-xr-xtest/envparsing.sh3
-rw-r--r--vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go4
-rw-r--r--vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go30
-rw-r--r--vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel10
-rw-r--r--vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go72
-rw-r--r--vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go46
-rw-r--r--vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go17
-rw-r--r--vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go9
-rw-r--r--vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go26
-rw-r--r--vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go38
-rw-r--r--vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go9
-rw-r--r--vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go162
-rw-r--r--vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go2
-rw-r--r--vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go69
-rw-r--r--vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel6
-rw-r--r--vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go3
-rw-r--r--vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go33
-rw-r--r--vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/.golangci.yml84
-rw-r--r--vendor/go.opentelemetry.io/otel/CHANGELOG.md102
-rw-r--r--vendor/go.opentelemetry.io/otel/CODEOWNERS2
-rw-r--r--vendor/go.opentelemetry.io/otel/CONTRIBUTING.md90
-rw-r--r--vendor/go.opentelemetry.io/otel/Makefile60
-rw-r--r--vendor/go.opentelemetry.io/otel/README.md37
-rw-r--r--vendor/go.opentelemetry.io/otel/RELEASING.md31
-rw-r--r--vendor/go.opentelemetry.io/otel/attribute/filter.go60
-rw-r--r--vendor/go.opentelemetry.io/otel/attribute/set.go7
-rw-r--r--vendor/go.opentelemetry.io/otel/baggage/baggage.go14
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/README.md50
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/agent.go213
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/env.go44
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/GoUnusedProtection__.go6
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent-consts.go27
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent.go412
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/GoUnusedProtection__.go6
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger-consts.go22
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger.go3022
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/GoUnusedProtection__.go6
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore-consts.go39
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore.go2067
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/LICENSE306
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/NOTICE5
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/application_exception.go180
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/binary_protocol.go555
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/buffered_transport.go99
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/client.go109
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/compact_protocol.go865
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/configuration.go378
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/context.go24
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/debug_protocol.go447
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/deserializer.go121
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/exception.go116
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/framed_transport.go223
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_context.go110
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_protocol.go351
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_transport.go809
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_client.go257
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_transport.go74
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/iostream_transport.go222
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/json_protocol.go591
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/logger.go69
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/memory_buffer.go80
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/messagetype.go31
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/middleware.go109
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/multiplexed_protocol.go235
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/numeric.go164
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/pointerize.go52
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/processor_factory.go80
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol.go177
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_exception.go104
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_factory.go25
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/response_helper.go94
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/rich_transport.go71
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/serializer.go136
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server.go35
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_socket.go137
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_transport.go34
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_json_protocol.go1373
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_server.go332
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket.go238
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_conn.go102
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_unix_conn.go83
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_windows_conn.go34
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_server_socket.go112
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_socket.go258
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport.go70
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_exception.go131
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_factory.go39
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/type.go69
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/zlib_transport.go137
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/jaeger.go360
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/reconnecting_udp_client.go204
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/jaeger/uploader.go339
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/internal/config.go34
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/internal/wrappederror.go61
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go4
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go7
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go (renamed from vendor/go.opentelemetry.io/otel/exporters/otlp/internal/envconfig/envconfig.go)5
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go35
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go (renamed from vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go)7
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go (renamed from vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/options.go)31
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go (renamed from vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/optiontypes.go)5
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go (renamed from vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/tls.go)5
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go (renamed from vendor/go.opentelemetry.io/otel/exporters/otlp/internal/partialsuccess.go)5
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go (renamed from vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go)5
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go4
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/LICENSE (renamed from vendor/go.opentelemetry.io/otel/exporters/jaeger/LICENSE)0
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go341
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go (renamed from vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go)18
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/exporter.go (renamed from vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/header.go)16
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go202
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/gen.go35
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/envconfig.go153
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go328
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/optiontypes.go51
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/tls.go37
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess.go67
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go156
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go116
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/internal/gen.go29
-rw-r--r--vendor/go.opentelemetry.io/otel/internal/global/handler.go7
-rw-r--r--vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go7
-rw-r--r--vendor/go.opentelemetry.io/otel/metric/instrument.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/metric/meter.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/internal/gen.go29
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/container.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/env.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/os.go4
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/process.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/resource/resource.go9
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go68
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/span.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/version.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go1877
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go20
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go199
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go (renamed from vendor/go.opentelemetry.io/otel/exporters/jaeger/doc.go)8
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go2310
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go20
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go2495
-rw-r--r--vendor/go.opentelemetry.io/otel/version.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/versions.yaml6
-rw-r--r--vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go2
-rw-r--r--vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go26
-rw-r--r--vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go2
-rw-r--r--vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go9
-rw-r--r--vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go2
-rw-r--r--vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go26
-rw-r--r--vendor/google.golang.org/genproto/googleapis/api/LICENSE (renamed from vendor/google.golang.org/genproto/LICENSE)0
-rw-r--r--vendor/google.golang.org/genproto/googleapis/rpc/LICENSE (renamed from vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/LICENSE)1
-rw-r--r--vendor/google.golang.org/grpc/attributes/attributes.go41
-rw-r--r--vendor/google.golang.org/grpc/balancer/balancer.go2
-rw-r--r--vendor/google.golang.org/grpc/balancer_conn_wrappers.go486
-rw-r--r--vendor/google.golang.org/grpc/call.go5
-rw-r--r--vendor/google.golang.org/grpc/clientconn.go694
-rw-r--r--vendor/google.golang.org/grpc/dialoptions.go44
-rw-r--r--vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go308
-rw-r--r--vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go223
-rw-r--r--vendor/google.golang.org/grpc/idle.go287
-rw-r--r--vendor/google.golang.org/grpc/internal/binarylog/binarylog.go3
-rw-r--r--vendor/google.golang.org/grpc/internal/binarylog/method_logger.go9
-rw-r--r--vendor/google.golang.org/grpc/internal/buffer/unbounded.go26
-rw-r--r--vendor/google.golang.org/grpc/internal/envconfig/envconfig.go7
-rw-r--r--vendor/google.golang.org/grpc/internal/envconfig/observability.go6
-rw-r--r--vendor/google.golang.org/grpc/internal/envconfig/xds.go19
-rw-r--r--vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go21
-rw-r--r--vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go64
-rw-r--r--vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go136
-rw-r--r--vendor/google.golang.org/grpc/internal/internal.go24
-rw-r--r--vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go74
-rw-r--r--vendor/google.golang.org/grpc/internal/serviceconfig/duration.go130
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/handler_server.go2
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/http2_client.go2
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/http2_server.go12
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/transport.go2
-rw-r--r--vendor/google.golang.org/grpc/picker_wrapper.go38
-rw-r--r--vendor/google.golang.org/grpc/pickfirst.go52
-rw-r--r--vendor/google.golang.org/grpc/resolver/resolver.go24
-rw-r--r--vendor/google.golang.org/grpc/resolver_conn_wrapper.go229
-rw-r--r--vendor/google.golang.org/grpc/rpc_util.go27
-rw-r--r--vendor/google.golang.org/grpc/server.go33
-rw-r--r--vendor/google.golang.org/grpc/service_config.go75
-rw-r--r--vendor/google.golang.org/grpc/shared_buffer_pool.go154
-rw-r--r--vendor/google.golang.org/grpc/status/status.go29
-rw-r--r--vendor/google.golang.org/grpc/stream.go33
-rw-r--r--vendor/google.golang.org/grpc/version.go2
-rw-r--r--vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go810
-rw-r--r--vendor/modules.txt53
199 files changed, 12971 insertions, 18580 deletions
diff --git a/docs/configuration/observability.md b/docs/configuration/observability.md
index 6c812a8fc..c29074d1a 100644
--- a/docs/configuration/observability.md
+++ b/docs/configuration/observability.md
@@ -18,21 +18,20 @@ request-id-header: "X-Request-Id"
# Default: false
tracing-enabled: false
-# String. Set the transport protocol for the tracing system. Can either be "grpc" for
-# OTLP gRPC or "jaeger" for jaeger based ingesters.
-# Options: ["grpc", "jaeger"]
+# String. Set the transport protocol for the tracing system. Can either be "grpc"
+# for OTLP gRPC, or "http" for OTLP HTTP.
+# Options: ["grpc", "http"]
# Default: "grpc"
tracing-transport: "grpc"
-# String. Endpoint of the trace ingester. When using the gRPC based transport, the
-# endpoint is usually a single address/port combination. For the jaeger transport it
-# should be a fully qualified URL.
-# OTLP gRPC or "jaeger" for jaeger based ingesters
-# Examples: ["localhost:4317", "http://localhost:14268/api/traces"]
+# String. Endpoint of the trace ingester. When using the gRPC or HTTP based
+# transports, provide the endpoint as a single address/port combination without a
+# protocol scheme.
+# Examples: ["localhost:4317"]
# Default: ""
tracing-endpoint: ""
-# Bool. Disable HTTPS for the gRPC transport protocol.
+# Bool. Disable TLS for the gRPC and HTTP transport protocols.
# Default: false
tracing-insecure-transport: false
```
diff --git a/example/config.yaml b/example/config.yaml
index c27a8f64b..7db577978 100644
--- a/example/config.yaml
+++ b/example/config.yaml
@@ -724,21 +724,19 @@ request-id-header: "X-Request-Id"
# Default: false
tracing-enabled: false
-# String. Set the transport protocol for the tracing system. Can either be "grpc" for
-# OTLP gRPC or "jaeger" for jaeger based ingesters.
-# Options: ["grpc", "jaeger"]
+# String. Set the transport protocol for the tracing system. Can either be "grpc"
+# for OTLP gRPC, or "http" for OTLP HTTP.
+# Options: ["grpc", "http"]
# Default: "grpc"
tracing-transport: "grpc"
-# String. Endpoint of the trace ingester. When using the gRPC based transport, the
-# endpoint is usually a single address/port combination. For the jaeger transport it
-# should be a fully qualified URL.
-# OTLP gRPC or "jaeger" for jaeger based ingesters
-# Examples: ["localhost:4317", "http://localhost:14268/api/traces"]
+# String. Endpoint of the trace ingester. When using the gRPC or HTTP based transports,
+# provide the endpoint as a single address/port combination without a protocol scheme.
+# Examples: ["localhost:4317"]
# Default: ""
tracing-endpoint: ""
-# Bool. Disable HTTPS for the gRPC transport protocol.
+# Bool. Disable TLS for the gRPC and HTTP transport protocols.
# Default: false
tracing-insecure-transport: false
diff --git a/go.mod b/go.mod
index 0d2e5376b..38b42d0a7 100644
--- a/go.mod
+++ b/go.mod
@@ -53,11 +53,11 @@ require (
github.com/uptrace/bun/extra/bunotel v1.1.14
github.com/wagslane/go-password-validator v0.3.0
github.com/yuin/goldmark v1.5.6
- go.opentelemetry.io/otel v1.16.0
- go.opentelemetry.io/otel/exporters/jaeger v1.16.0
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0
- go.opentelemetry.io/otel/sdk v1.16.0
- go.opentelemetry.io/otel/trace v1.16.0
+ go.opentelemetry.io/otel v1.17.0
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.17.0
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.17.0
+ go.opentelemetry.io/otel/sdk v1.17.0
+ go.opentelemetry.io/otel/trace v1.17.0
go.uber.org/automaxprocs v1.5.3
golang.org/x/crypto v0.12.0
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1
@@ -118,7 +118,7 @@ require (
github.com/gorilla/css v1.0.0 // indirect
github.com/gorilla/securecookie v1.1.1 // indirect
github.com/gorilla/sessions v1.2.1 // indirect
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
@@ -159,17 +159,17 @@ require (
github.com/uptrace/opentelemetry-go-extra/otelsql v0.2.1 // indirect
github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 // indirect
- go.opentelemetry.io/otel/metric v1.16.0 // indirect
- go.opentelemetry.io/proto/otlp v0.19.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.17.0 // indirect
+ go.opentelemetry.io/otel/metric v1.17.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.0.0 // indirect
golang.org/x/arch v0.3.0 // indirect
golang.org/x/mod v0.10.0 // indirect
golang.org/x/sys v0.11.0 // indirect
golang.org/x/tools v0.6.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
- google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
- google.golang.org/grpc v1.55.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect
+ google.golang.org/grpc v1.57.0 // indirect
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
diff --git a/go.sum b/go.sum
index 2a9681801..d90a47c6e 100644
--- a/go.sum
+++ b/go.sum
@@ -91,14 +91,12 @@ github.com/DmitriyVTitov/size v1.5.0 h1:/PzqxYrOyOUX1BXj6J9OuVRVGe+66VL4D9FlUaW5
github.com/DmitriyVTitov/size v1.5.0/go.mod h1:le6rNI4CoLQV1b9gzp1+3d7hMAD/uu2QcJ+aYbNgiU0=
github.com/KimMachineGun/automemlimit v0.3.0 h1:khgwM5ESVN85cE6Bq2ozMAAWDfrOEwQ51D/YlmThE04=
github.com/KimMachineGun/automemlimit v0.3.0/go.mod h1:pJhTW/nWJMj6SnWSU2TEKSlCaM+1N5Mej+IfS/5/Ol0=
-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/abema/go-mp4 v0.13.0 h1:gjEZLt7g0ePpYA5sUDrI2r8X+WuI8o+USkgG5wMgmkI=
github.com/abema/go-mp4 v0.13.0/go.mod h1:vPl9t5ZK7K0x68jh12/+ECWBCXoWuIDtNgPtU2f04ws=
github.com/ajg/form v1.5.1 h1:t9c7v8JUKu/XxOGBU0yjNpaMloxGEJhUkqFRq0ibGeU=
github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY=
github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs=
-github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk=
github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
github.com/buckket/go-blurhash v1.1.0 h1:X5M6r0LIvwdvKiUtiNcRL2YlmOfMzYobI3VCKCZc9Do=
@@ -109,8 +107,6 @@ github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZX
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
@@ -123,11 +119,6 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
-github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cnf/structhash v0.0.0-20201127153200-e1b16c1ebc08 h1:ox2F0PSMlrAAiAdknSRMDrAr8mfxPCfSZolH+/qQnyQ=
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
github.com/containerd/cgroups/v3 v3.0.1 h1:4hfGvu8rfGIwVIDd+nLzn/B9ZXx4BcCjzt5ToenJRaE=
@@ -175,8 +166,6 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
-github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
@@ -191,7 +180,6 @@ github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
github.com/gavv/httpexpect v2.0.0+incompatible h1:1X9kcRshkSKEjNJJxX9Y9mQ5BRfbxU5kORdjhlA1yX8=
github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gin-contrib/cors v1.4.0 h1:oJ6gwtUl3lqV0WEIwM/LxPF1QZ5qe2lGWdY2+bz7y0g=
github.com/gin-contrib/cors v1.4.0/go.mod h1:bs9pNM0x/UsmHPBWT2xZz9ROh8xYjYkiURUfmBoMlcs=
github.com/gin-contrib/gzip v0.0.6 h1:NjcunTcGAj5CO1gn4N8jHOSIeRFHIbn51z6K+xaN4d4=
@@ -251,7 +239,6 @@ github.com/golang/geo v0.0.0-20200319012246-673a6f80352d/go.mod h1:QZ0nwyI2jOfgR
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 h1:gtexQ/VGyN+VVFRXSFiguSNcXmS6rkKT+X7FdIrTtfo=
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -281,7 +268,6 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@@ -296,7 +282,6 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
@@ -338,9 +323,8 @@ github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/z
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg=
github.com/h2non/filetype v1.1.3/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@@ -494,7 +478,6 @@ github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b/go.mod h1:wTPjTep
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
-github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
@@ -518,7 +501,6 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykE
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM=
github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ=
github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA=
@@ -535,7 +517,6 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
-github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
@@ -647,25 +628,22 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
-go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s=
-go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4=
-go.opentelemetry.io/otel/exporters/jaeger v1.16.0 h1:YhxxmXZ011C0aDZKoNw+juVWAmEfv/0W2XBOv9aHTaA=
-go.opentelemetry.io/otel/exporters/jaeger v1.16.0/go.mod h1:grYbBo/5afWlPpdPZYhyn78Bk04hnvxn2+hvxQhKIQM=
-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 h1:t4ZwRPU+emrcvM2e9DHd0Fsf0JTPVcbfa/BhTDF03d0=
-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0/go.mod h1:vLarbg68dH2Wa77g71zmKQqlQ8+8Rq3GRG31uc0WcWI=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 h1:cbsD4cUcviQGXdw8+bo5x2wazq10SKz8hEbtCRPcU78=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0/go.mod h1:JgXSGah17croqhJfhByOLVY719k1emAXC8MVhCIJlRs=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0 h1:TVQp/bboR4mhZSav+MdgXB8FaRho1RC8UwVn3T0vjVc=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0/go.mod h1:I33vtIe0sR96wfrUcilIzLoA3mLHhRmz9S9Te0S3gDo=
-go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo=
-go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4=
-go.opentelemetry.io/otel/sdk v1.16.0 h1:Z1Ok1YsijYL0CSJpHt4cS3wDDh7p572grzNrBMiMWgE=
-go.opentelemetry.io/otel/sdk v1.16.0/go.mod h1:tMsIuKXuuIWPBAOrH+eHtvhTL+SntFtXF9QD68aP6p4=
-go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs=
-go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0=
-go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
-go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw=
-go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
+go.opentelemetry.io/otel v1.17.0 h1:MW+phZ6WZ5/uk2nd93ANk/6yJ+dVrvNWUjGhnnFU5jM=
+go.opentelemetry.io/otel v1.17.0/go.mod h1:I2vmBGtFaODIVMBSTPVDlJSzBDNf93k60E6Ft0nyjo0=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.17.0 h1:U5GYackKpVKlPrd/5gKMlrTlP2dCESAAFU682VCpieY=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.17.0/go.mod h1:aFsJfCEnLzEu9vRRAcUiB/cpRTbVsNdF3OHSPpdjxZQ=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.17.0 h1:iGeIsSYwpYSvh5UGzWrJfTDJvPjrXtxl3GUppj6IXQU=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.17.0/go.mod h1:1j3H3G1SBYpZFti6OI4P0uRQCW20MXkG5v4UWXppLLE=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.17.0 h1:kvWMtSUNVylLVrOE4WLUmBtgziYoCIYUNSpTYtMzVJI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.17.0/go.mod h1:SExUrRYIXhDgEKG4tkiQovd2HTaELiHUsuK08s5Nqx4=
+go.opentelemetry.io/otel/metric v1.17.0 h1:iG6LGVz5Gh+IuO0jmgvpTB6YVrCGngi8QGm+pMd8Pdc=
+go.opentelemetry.io/otel/metric v1.17.0/go.mod h1:h4skoxdZI17AxwITdmdZjjYJQH5nzijUUjm+wtPph5o=
+go.opentelemetry.io/otel/sdk v1.17.0 h1:FLN2X66Ke/k5Sg3V623Q7h7nt3cHXaW1FOvKKrW0IpE=
+go.opentelemetry.io/otel/sdk v1.17.0/go.mod h1:U87sE0f5vQB7hwUoW98pW5Rz4ZDuCFBZFNUBlSgmDFQ=
+go.opentelemetry.io/otel/trace v1.17.0 h1:/SWhSRHmDPOImIAetP1QAeMnZYiQXrTy4fMMYOdSKWQ=
+go.opentelemetry.io/otel/trace v1.17.0/go.mod h1:I/4vKTgFclIsXRVucpH25X0mpFSczM7aHeaz0ZBLWjY=
+go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
+go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8=
@@ -771,7 +749,6 @@ golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
@@ -787,7 +764,6 @@ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU=
golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -848,10 +824,8 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -875,7 +849,6 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
@@ -993,7 +966,6 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
@@ -1007,9 +979,11 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
-google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
+google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e h1:Ao9GzfUMPH3zjVfzXG5rlWlk+Q8MXWKwWpwVQE1MXfw=
+google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1:kVKPf/IiYSBWEWtkIn6wZXwWGCnLKcC8oWfZvXjsGnM=
+google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@@ -1023,15 +997,11 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
-google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
-google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag=
-google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8=
+google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw=
+google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -1044,7 +1014,6 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
@@ -1064,7 +1033,6 @@ gopkg.in/src-d/go-billy.v4 v4.3.2 h1:0SQA1pRztfTFx2miS8sA97XvooFeNOmvUenF4o0EcVg
gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/internal/config/config.go b/internal/config/config.go
index 5da226237..16ef32a8b 100644
--- a/internal/config/config.go
+++ b/internal/config/config.go
@@ -131,9 +131,9 @@ type Configuration struct {
OIDCAdminGroups []string `name:"oidc-admin-groups" usage:"Membership of one of the listed groups makes someone a GtS admin"`
TracingEnabled bool `name:"tracing-enabled" usage:"Enable OTLP Tracing"`
- TracingTransport string `name:"tracing-transport" usage:"grpc or jaeger"`
- TracingEndpoint string `name:"tracing-endpoint" usage:"Endpoint of your trace collector. Eg., 'localhost:4317' for gRPC, 'http://localhost:14268/api/traces' for jaeger"`
- TracingInsecureTransport bool `name:"tracing-insecure" usage:"Disable HTTPS for the gRPC transport protocol"`
+ TracingTransport string `name:"tracing-transport" usage:"grpc or http"`
+ TracingEndpoint string `name:"tracing-endpoint" usage:"Endpoint of your trace collector. Eg., 'localhost:4317' for gRPC, 'localhost:4318' for http"`
+ TracingInsecureTransport bool `name:"tracing-insecure-transport" usage:"Disable TLS for the gRPC or HTTP transport protocol"`
SMTPHost string `name:"smtp-host" usage:"Host of the smtp server. Eg., 'smtp.eu.mailgun.org'"`
SMTPPort int `name:"smtp-port" usage:"Port of the smtp server. Eg., 587"`
diff --git a/internal/config/helpers.gen.go b/internal/config/helpers.gen.go
index a515da200..f232d37a3 100644
--- a/internal/config/helpers.gen.go
+++ b/internal/config/helpers.gen.go
@@ -1991,7 +1991,7 @@ func (st *ConfigState) SetTracingInsecureTransport(v bool) {
}
// TracingInsecureTransportFlag returns the flag name for the 'TracingInsecureTransport' field
-func TracingInsecureTransportFlag() string { return "tracing-insecure" }
+func TracingInsecureTransportFlag() string { return "tracing-insecure-transport" }
// GetTracingInsecureTransport safely fetches the value for global configuration 'TracingInsecureTransport' field
func GetTracingInsecureTransport() bool { return global.GetTracingInsecureTransport() }
diff --git a/internal/tracing/tracing.go b/internal/tracing/tracing.go
index 16e5a5eb5..a0b25f487 100644
--- a/internal/tracing/tracing.go
+++ b/internal/tracing/tracing.go
@@ -29,8 +29,8 @@ import (
"github.com/uptrace/bun/extra/bunotel"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/exporters/jaeger"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/sdk/resource"
"go.opentelemetry.io/otel/sdk/trace"
@@ -69,8 +69,14 @@ func Initialize() error {
return fmt.Errorf("building tracing exporter: %w", err)
}
tpo = trace.WithBatcher(exp)
- case "jaeger":
- exp, err := jaeger.New(jaeger.WithCollectorEndpoint(jaeger.WithEndpoint(config.GetTracingEndpoint())))
+ case "http":
+ opts := []otlptracehttp.Option{
+ otlptracehttp.WithEndpoint(config.GetTracingEndpoint()),
+ }
+ if insecure {
+ opts = append(opts, otlptracehttp.WithInsecure())
+ }
+ exp, err := otlptracehttp.New(context.Background(), opts...)
if err != nil {
return fmt.Errorf("building tracing exporter: %w", err)
}
diff --git a/test/envparsing.sh b/test/envparsing.sh
index 63f1bc5e3..68e250db0 100755
--- a/test/envparsing.sh
+++ b/test/envparsing.sh
@@ -146,7 +146,7 @@ EXPECT=$(cat << "EOF"
"tls-certificate-key": "",
"tracing-enabled": false,
"tracing-endpoint": "localhost:4317",
- "tracing-insecure": false,
+ "tracing-insecure-transport": true,
"tracing-transport": "grpc",
"trusted-proxies": [
"127.0.0.1/32",
@@ -242,6 +242,7 @@ GTS_SYSLOG_ENABLED=true \
GTS_SYSLOG_PROTOCOL='udp' \
GTS_SYSLOG_ADDRESS='127.0.0.1:6969' \
GTS_TRACING_ENDPOINT='localhost:4317' \
+GTS_TRACING_INSECURE_TRANSPORT=true \
GTS_ADVANCED_COOKIES_SAMESITE='strict' \
GTS_ADVANCED_RATE_LIMIT_EXCEPTIONS="192.0.2.0/24,127.0.0.1/32" \
GTS_ADVANCED_RATE_LIMIT_REQUESTS=6969 \
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go
index 138f7c12f..c056bd305 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go
@@ -1,10 +1,10 @@
+//go:build gofuzz
// +build gofuzz
package httprule
func Fuzz(data []byte) int {
- _, err := Parse(string(data))
- if err != nil {
+ if _, err := Parse(string(data)); err != nil {
return 0
}
return 0
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go
index 5edd784e6..65ffcf5cf 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go
@@ -1,6 +1,7 @@
package httprule
import (
+ "errors"
"fmt"
"strings"
)
@@ -164,9 +165,9 @@ func (p *parser) segment() (segment, error) {
v, err := p.variable()
if err != nil {
- return nil, fmt.Errorf("segment neither wildcards, literal or variable: %v", err)
+ return nil, fmt.Errorf("segment neither wildcards, literal or variable: %w", err)
}
- return v, err
+ return v, nil
}
func (p *parser) literal() (segment, error) {
@@ -191,7 +192,7 @@ func (p *parser) variable() (segment, error) {
if _, err := p.accept("="); err == nil {
segs, err = p.segments()
if err != nil {
- return nil, fmt.Errorf("invalid segment in variable %q: %v", path, err)
+ return nil, fmt.Errorf("invalid segment in variable %q: %w", path, err)
}
} else {
segs = []segment{wildcard{}}
@@ -213,12 +214,12 @@ func (p *parser) fieldPath() (string, error) {
}
components := []string{c}
for {
- if _, err = p.accept("."); err != nil {
+ if _, err := p.accept("."); err != nil {
return strings.Join(components, "."), nil
}
c, err := p.accept(typeIdent)
if err != nil {
- return "", fmt.Errorf("invalid field path component: %v", err)
+ return "", fmt.Errorf("invalid field path component: %w", err)
}
components = append(components, c)
}
@@ -237,10 +238,8 @@ const (
typeEOF = termType("$")
)
-const (
- // eof is the terminal symbol which always appears at the end of token sequence.
- eof = "\u0000"
-)
+// eof is the terminal symbol which always appears at the end of token sequence.
+const eof = "\u0000"
// accept tries to accept a token in "p".
// This function consumes a token and returns it if it matches to the specified "term".
@@ -275,11 +274,12 @@ func (p *parser) accept(term termType) (string, error) {
// expectPChars determines if "t" consists of only pchars defined in RFC3986.
//
// https://www.ietf.org/rfc/rfc3986.txt, P.49
-// pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
-// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
-// sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
-// / "*" / "+" / "," / ";" / "="
-// pct-encoded = "%" HEXDIG HEXDIG
+//
+// pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
+// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
+// sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
+// / "*" / "+" / "," / ";" / "="
+// pct-encoded = "%" HEXDIG HEXDIG
func expectPChars(t string) error {
const (
init = iota
@@ -333,7 +333,7 @@ func expectPChars(t string) error {
// expectIdent determines if "ident" is a valid identifier in .proto schema ([[:alpha:]_][[:alphanum:]_]*).
func expectIdent(ident string) error {
if ident == "" {
- return fmt.Errorf("empty identifier")
+ return errors.New("empty identifier")
}
for pos, r := range ident {
switch {
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
index 95f867a52..a8789f170 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
@@ -27,9 +27,9 @@ go_library(
"//internal/httprule",
"//utilities",
"@go_googleapis//google/api:httpbody_go_proto",
- "@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
"@org_golang_google_grpc//codes",
"@org_golang_google_grpc//grpclog",
+ "@org_golang_google_grpc//health/grpc_health_v1",
"@org_golang_google_grpc//metadata",
"@org_golang_google_grpc//status",
"@org_golang_google_protobuf//encoding/protojson",
@@ -37,6 +37,8 @@ go_library(
"@org_golang_google_protobuf//reflect/protoreflect",
"@org_golang_google_protobuf//reflect/protoregistry",
"@org_golang_google_protobuf//types/known/durationpb",
+ "@org_golang_google_protobuf//types/known/fieldmaskpb",
+ "@org_golang_google_protobuf//types/known/structpb",
"@org_golang_google_protobuf//types/known/timestamppb",
"@org_golang_google_protobuf//types/known/wrapperspb",
],
@@ -56,8 +58,10 @@ go_test(
"marshal_jsonpb_test.go",
"marshal_proto_test.go",
"marshaler_registry_test.go",
+ "mux_internal_test.go",
"mux_test.go",
"pattern_test.go",
+ "query_fuzz_test.go",
"query_test.go",
],
embed = [":runtime"],
@@ -69,8 +73,9 @@ go_test(
"@go_googleapis//google/api:httpbody_go_proto",
"@go_googleapis//google/rpc:errdetails_go_proto",
"@go_googleapis//google/rpc:status_go_proto",
- "@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
+ "@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//codes",
+ "@org_golang_google_grpc//health/grpc_health_v1",
"@org_golang_google_grpc//metadata",
"@org_golang_google_grpc//status",
"@org_golang_google_protobuf//encoding/protojson",
@@ -78,6 +83,7 @@ go_test(
"@org_golang_google_protobuf//testing/protocmp",
"@org_golang_google_protobuf//types/known/durationpb",
"@org_golang_google_protobuf//types/known/emptypb",
+ "@org_golang_google_protobuf//types/known/fieldmaskpb",
"@org_golang_google_protobuf//types/known/structpb",
"@org_golang_google_protobuf//types/known/timestamppb",
"@org_golang_google_protobuf//types/known/wrapperspb",
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
index fb57b9366..31553e784 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
@@ -13,6 +13,7 @@ import (
"time"
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
@@ -35,11 +36,15 @@ const metadataHeaderBinarySuffix = "-Bin"
const xForwardedFor = "X-Forwarded-For"
const xForwardedHost = "X-Forwarded-Host"
-var (
- // DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound
- // header isn't present. If the value is 0 the sent `context` will not have a timeout.
- DefaultContextTimeout = 0 * time.Second
-)
+// DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound
+// header isn't present. If the value is 0 the sent `context` will not have a timeout.
+var DefaultContextTimeout = 0 * time.Second
+
+// malformedHTTPHeaders lists the headers that the gRPC server may reject outright as malformed.
+// See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more context.
+var malformedHTTPHeaders = map[string]struct{}{
+ "connection": {},
+}
type (
rpcMethodKey struct{}
@@ -95,12 +100,43 @@ func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Reque
return metadata.NewIncomingContext(ctx, md), nil
}
+func isValidGRPCMetadataKey(key string) bool {
+ // Must be a valid gRPC "Header-Name" as defined here:
+ // https://github.com/grpc/grpc/blob/4b05dc88b724214d0c725c8e7442cbc7a61b1374/doc/PROTOCOL-HTTP2.md
+ // This means 0-9 a-z _ - .
+ // Only lowercase letters are valid in the wire protocol, but the client library will normalize
+ // uppercase ASCII to lowercase, so uppercase ASCII is also acceptable.
+ bytes := []byte(key) // gRPC validates strings on the byte level, not Unicode.
+ for _, ch := range bytes {
+ validLowercaseLetter := ch >= 'a' && ch <= 'z'
+ validUppercaseLetter := ch >= 'A' && ch <= 'Z'
+ validDigit := ch >= '0' && ch <= '9'
+ validOther := ch == '.' || ch == '-' || ch == '_'
+ if !validLowercaseLetter && !validUppercaseLetter && !validDigit && !validOther {
+ return false
+ }
+ }
+ return true
+}
+
+func isValidGRPCMetadataTextValue(textValue string) bool {
+ // Must be a valid gRPC "ASCII-Value" as defined here:
+ // https://github.com/grpc/grpc/blob/4b05dc88b724214d0c725c8e7442cbc7a61b1374/doc/PROTOCOL-HTTP2.md
+ // This means printable ASCII (including/plus spaces); 0x20 to 0x7E inclusive.
+ bytes := []byte(textValue) // gRPC validates strings on the byte level, not Unicode.
+ for _, ch := range bytes {
+ if ch < 0x20 || ch > 0x7E {
+ return false
+ }
+ }
+ return true
+}
+
func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, metadata.MD, error) {
ctx = withRPCMethod(ctx, rpcMethodName)
for _, o := range options {
ctx = o(ctx)
}
- var pairs []string
timeout := DefaultContextTimeout
if tm := req.Header.Get(metadataGrpcTimeout); tm != "" {
var err error
@@ -109,7 +145,7 @@ func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcM
return nil, nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm)
}
}
-
+ var pairs []string
for key, vals := range req.Header {
key = textproto.CanonicalMIMEHeaderKey(key)
for _, val := range vals {
@@ -118,6 +154,10 @@ func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcM
pairs = append(pairs, "authorization", val)
}
if h, ok := mux.incomingHeaderMatcher(key); ok {
+ if !isValidGRPCMetadataKey(h) {
+ grpclog.Errorf("HTTP header name %q is not valid as gRPC metadata key; skipping", h)
+ continue
+ }
// Handles "-bin" metadata in grpc, since grpc will do another base64
// encode before sending to server, we need to decode it first.
if strings.HasSuffix(key, metadataHeaderBinarySuffix) {
@@ -127,6 +167,9 @@ func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcM
}
val = string(b)
+ } else if !isValidGRPCMetadataTextValue(val) {
+ grpclog.Errorf("Value of HTTP header %q contains non-ASCII value (not valid as gRPC metadata): skipping", h)
+ continue
}
pairs = append(pairs, h, val)
}
@@ -172,11 +215,17 @@ type serverMetadataKey struct{}
// NewServerMetadataContext creates a new context with ServerMetadata
func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context {
+ if ctx == nil {
+ ctx = context.Background()
+ }
return context.WithValue(ctx, serverMetadataKey{}, md)
}
// ServerMetadataFromContext returns the ServerMetadata in ctx
func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) {
+ if ctx == nil {
+ return md, false
+ }
md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata)
return
}
@@ -269,8 +318,8 @@ func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) {
case 'n':
return time.Nanosecond, true
default:
+ return
}
- return
}
// isPermanentHTTPHeader checks whether hdr belongs to the list of
@@ -308,6 +357,13 @@ func isPermanentHTTPHeader(hdr string) bool {
return false
}
+// isMalformedHTTPHeader checks whether header belongs to the list of
+// "malformed headers" and would be rejected by the gRPC server.
+func isMalformedHTTPHeader(header string) bool {
+ _, isMalformed := malformedHTTPHeaders[strings.ToLower(header)]
+ return isMalformed
+}
+
// RPCMethod returns the method string for the server context. The returned
// string is in the format of "/package.service/method".
func RPCMethod(ctx context.Context) (string, bool) {
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go
index e6bc4e6ce..d7b15fcfb 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go
@@ -37,7 +37,7 @@ func BoolSlice(val, sep string) ([]bool, error) {
for i, v := range s {
value, err := Bool(v)
if err != nil {
- return values, err
+ return nil, err
}
values[i] = value
}
@@ -57,7 +57,7 @@ func Float64Slice(val, sep string) ([]float64, error) {
for i, v := range s {
value, err := Float64(v)
if err != nil {
- return values, err
+ return nil, err
}
values[i] = value
}
@@ -81,7 +81,7 @@ func Float32Slice(val, sep string) ([]float32, error) {
for i, v := range s {
value, err := Float32(v)
if err != nil {
- return values, err
+ return nil, err
}
values[i] = value
}
@@ -101,7 +101,7 @@ func Int64Slice(val, sep string) ([]int64, error) {
for i, v := range s {
value, err := Int64(v)
if err != nil {
- return values, err
+ return nil, err
}
values[i] = value
}
@@ -125,7 +125,7 @@ func Int32Slice(val, sep string) ([]int32, error) {
for i, v := range s {
value, err := Int32(v)
if err != nil {
- return values, err
+ return nil, err
}
values[i] = value
}
@@ -145,7 +145,7 @@ func Uint64Slice(val, sep string) ([]uint64, error) {
for i, v := range s {
value, err := Uint64(v)
if err != nil {
- return values, err
+ return nil, err
}
values[i] = value
}
@@ -169,7 +169,7 @@ func Uint32Slice(val, sep string) ([]uint32, error) {
for i, v := range s {
value, err := Uint32(v)
if err != nil {
- return values, err
+ return nil, err
}
values[i] = value
}
@@ -197,7 +197,7 @@ func BytesSlice(val, sep string) ([][]byte, error) {
for i, v := range s {
value, err := Bytes(v)
if err != nil {
- return values, err
+ return nil, err
}
values[i] = value
}
@@ -209,8 +209,7 @@ func Timestamp(val string) (*timestamppb.Timestamp, error) {
var r timestamppb.Timestamp
val = strconv.Quote(strings.Trim(val, `"`))
unmarshaler := &protojson.UnmarshalOptions{}
- err := unmarshaler.Unmarshal([]byte(val), &r)
- if err != nil {
+ if err := unmarshaler.Unmarshal([]byte(val), &r); err != nil {
return nil, err
}
return &r, nil
@@ -221,8 +220,7 @@ func Duration(val string) (*durationpb.Duration, error) {
var r durationpb.Duration
val = strconv.Quote(strings.Trim(val, `"`))
unmarshaler := &protojson.UnmarshalOptions{}
- err := unmarshaler.Unmarshal([]byte(val), &r)
- if err != nil {
+ if err := unmarshaler.Unmarshal([]byte(val), &r); err != nil {
return nil, err
}
return &r, nil
@@ -257,66 +255,64 @@ func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) {
for i, v := range s {
value, err := Enum(v, enumValMap)
if err != nil {
- return values, err
+ return nil, err
}
values[i] = value
}
return values, nil
}
-/*
- Support fot google.protobuf.wrappers on top of primitive types
-*/
+// Support for google.protobuf.wrappers on top of primitive types
// StringValue well-known type support as wrapper around string type
func StringValue(val string) (*wrapperspb.StringValue, error) {
- return &wrapperspb.StringValue{Value: val}, nil
+ return wrapperspb.String(val), nil
}
// FloatValue well-known type support as wrapper around float32 type
func FloatValue(val string) (*wrapperspb.FloatValue, error) {
parsedVal, err := Float32(val)
- return &wrapperspb.FloatValue{Value: parsedVal}, err
+ return wrapperspb.Float(parsedVal), err
}
// DoubleValue well-known type support as wrapper around float64 type
func DoubleValue(val string) (*wrapperspb.DoubleValue, error) {
parsedVal, err := Float64(val)
- return &wrapperspb.DoubleValue{Value: parsedVal}, err
+ return wrapperspb.Double(parsedVal), err
}
// BoolValue well-known type support as wrapper around bool type
func BoolValue(val string) (*wrapperspb.BoolValue, error) {
parsedVal, err := Bool(val)
- return &wrapperspb.BoolValue{Value: parsedVal}, err
+ return wrapperspb.Bool(parsedVal), err
}
// Int32Value well-known type support as wrapper around int32 type
func Int32Value(val string) (*wrapperspb.Int32Value, error) {
parsedVal, err := Int32(val)
- return &wrapperspb.Int32Value{Value: parsedVal}, err
+ return wrapperspb.Int32(parsedVal), err
}
// UInt32Value well-known type support as wrapper around uint32 type
func UInt32Value(val string) (*wrapperspb.UInt32Value, error) {
parsedVal, err := Uint32(val)
- return &wrapperspb.UInt32Value{Value: parsedVal}, err
+ return wrapperspb.UInt32(parsedVal), err
}
// Int64Value well-known type support as wrapper around int64 type
func Int64Value(val string) (*wrapperspb.Int64Value, error) {
parsedVal, err := Int64(val)
- return &wrapperspb.Int64Value{Value: parsedVal}, err
+ return wrapperspb.Int64(parsedVal), err
}
// UInt64Value well-known type support as wrapper around uint64 type
func UInt64Value(val string) (*wrapperspb.UInt64Value, error) {
parsedVal, err := Uint64(val)
- return &wrapperspb.UInt64Value{Value: parsedVal}, err
+ return wrapperspb.UInt64(parsedVal), err
}
// BytesValue well-known type support as wrapper around bytes[] type
func BytesValue(val string) (*wrapperspb.BytesValue, error) {
parsedVal, err := Bytes(val)
- return &wrapperspb.BytesValue{Value: parsedVal}, err
+ return wrapperspb.Bytes(parsedVal), err
}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
index d9e0013c4..d2bcbb7d2 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
@@ -38,7 +38,7 @@ func HTTPStatusFromCode(code codes.Code) int {
case codes.OK:
return http.StatusOK
case codes.Canceled:
- return http.StatusRequestTimeout
+ return 499
case codes.Unknown:
return http.StatusInternalServerError
case codes.InvalidArgument:
@@ -70,10 +70,10 @@ func HTTPStatusFromCode(code codes.Code) int {
return http.StatusServiceUnavailable
case codes.DataLoss:
return http.StatusInternalServerError
+ default:
+ grpclog.Infof("Unknown gRPC error code: %v", code)
+ return http.StatusInternalServerError
}
-
- grpclog.Infof("Unknown gRPC error code: %v", code)
- return http.StatusInternalServerError
}
// HTTPError uses the mux-configured error handler.
@@ -162,10 +162,11 @@ func DefaultStreamErrorHandler(_ context.Context, err error) *status.Status {
// DefaultRoutingErrorHandler is our default handler for routing errors.
// By default http error codes mapped on the following error codes:
-// NotFound -> grpc.NotFound
-// StatusBadRequest -> grpc.InvalidArgument
-// MethodNotAllowed -> grpc.Unimplemented
-// Other -> grpc.Internal, method is not expecting to be called for anything else
+//
+// NotFound -> grpc.NotFound
+// StatusBadRequest -> grpc.InvalidArgument
+// MethodNotAllowed -> grpc.Unimplemented
+// Other -> grpc.Internal, method is not expecting to be called for anything else
func DefaultRoutingErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, httpStatus int) {
sterr := status.Error(codes.Internal, "Unexpected routing error")
switch httpStatus {
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
index 0138ed2f7..a03dd166b 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
@@ -2,13 +2,14 @@ package runtime
import (
"encoding/json"
+ "errors"
"fmt"
"io"
"sort"
- "google.golang.org/genproto/protobuf/field_mask"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
+ field_mask "google.golang.org/protobuf/types/known/fieldmaskpb"
)
func getFieldByName(fields protoreflect.FieldDescriptors, name string) protoreflect.FieldDescriptor {
@@ -44,7 +45,7 @@ func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.Field
// if the item is an object, then enqueue all of its children
for k, v := range m {
if item.msg == nil {
- return nil, fmt.Errorf("JSON structure did not match request type")
+ return nil, errors.New("JSON structure did not match request type")
}
fd := getFieldByName(item.msg.Descriptor().Fields(), k)
@@ -53,7 +54,7 @@ func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.Field
}
if isDynamicProtoMessage(fd.Message()) {
- for _, p := range buildPathsBlindly(k, v) {
+ for _, p := range buildPathsBlindly(string(fd.FullName().Name()), v) {
newPath := p
if item.path != "" {
newPath = item.path + "." + newPath
@@ -63,7 +64,7 @@ func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.Field
continue
}
- if isProtobufAnyMessage(fd.Message()) {
+ if isProtobufAnyMessage(fd.Message()) && !fd.IsList() {
_, hasTypeField := v.(map[string]interface{})["@type"]
if hasTypeField {
queue = append(queue, fieldMaskPathItem{path: k})
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
index d1e21df48..945f3a5eb 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
@@ -52,11 +52,11 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal
return
}
if err != nil {
- handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
+ handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
return
}
if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
- handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
+ handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
return
}
@@ -82,15 +82,15 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal
if err != nil {
grpclog.Infof("Failed to marshal response chunk: %v", err)
- handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
+ handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
return
}
- if _, err = w.Write(buf); err != nil {
+ if _, err := w.Write(buf); err != nil {
grpclog.Infof("Failed to send response chunk: %v", err)
return
}
wroteHeader = true
- if _, err = w.Write(delimiter); err != nil {
+ if _, err := w.Write(delimiter); err != nil {
grpclog.Infof("Failed to send delimiter chunk: %v", err)
return
}
@@ -200,20 +200,24 @@ func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, re
return nil
}
-func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error) {
+func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error, delimiter []byte) {
st := mux.streamErrorHandler(ctx, err)
msg := errorChunk(st)
if !wroteHeader {
w.Header().Set("Content-Type", marshaler.ContentType(msg))
w.WriteHeader(HTTPStatusFromCode(st.Code()))
}
- buf, merr := marshaler.Marshal(msg)
- if merr != nil {
- grpclog.Infof("Failed to marshal an error: %v", merr)
+ buf, err := marshaler.Marshal(msg)
+ if err != nil {
+ grpclog.Infof("Failed to marshal an error: %v", err)
+ return
+ }
+ if _, err := w.Write(buf); err != nil {
+ grpclog.Infof("Failed to notify error to client: %v", err)
return
}
- if _, werr := w.Write(buf); werr != nil {
- grpclog.Infof("Failed to notify error to client: %v", werr)
+ if _, err := w.Write(delimiter); err != nil {
+ grpclog.Infof("Failed to send delimiter chunk: %v", err)
return
}
}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
index 7387c8e39..51b8247da 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
@@ -92,23 +92,20 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
if rv.Type().Elem().Implements(protoMessageType) {
var buf bytes.Buffer
- err := buf.WriteByte('[')
- if err != nil {
+ if err := buf.WriteByte('['); err != nil {
return nil, err
}
for i := 0; i < rv.Len(); i++ {
if i != 0 {
- err = buf.WriteByte(',')
- if err != nil {
+ if err := buf.WriteByte(','); err != nil {
return nil, err
}
}
- if err = j.marshalTo(&buf, rv.Index(i).Interface().(proto.Message)); err != nil {
+ if err := j.marshalTo(&buf, rv.Index(i).Interface().(proto.Message)); err != nil {
return nil, err
}
}
- err = buf.WriteByte(']')
- if err != nil {
+ if err := buf.WriteByte(']'); err != nil {
return nil, err
}
@@ -117,17 +114,16 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
if rv.Type().Elem().Implements(typeProtoEnum) {
var buf bytes.Buffer
- err := buf.WriteByte('[')
- if err != nil {
+ if err := buf.WriteByte('['); err != nil {
return nil, err
}
for i := 0; i < rv.Len(); i++ {
if i != 0 {
- err = buf.WriteByte(',')
- if err != nil {
+ if err := buf.WriteByte(','); err != nil {
return nil, err
}
}
+ var err error
if j.UseEnumNumbers {
_, err = buf.WriteString(strconv.FormatInt(rv.Index(i).Int(), 10))
} else {
@@ -137,8 +133,7 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
return nil, err
}
}
- err = buf.WriteByte(']')
- if err != nil {
+ if err := buf.WriteByte(']'); err != nil {
return nil, err
}
@@ -219,8 +214,7 @@ func decodeJSONPb(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v int
// Decode into bytes for marshalling
var b json.RawMessage
- err := d.Decode(&b)
- if err != nil {
+ if err := d.Decode(&b); err != nil {
return err
}
@@ -239,8 +233,7 @@ func decodeNonProtoField(d *json.Decoder, unmarshaler protojson.UnmarshalOptions
if rv.Type().ConvertibleTo(typeProtoMessage) {
// Decode into bytes for marshalling
var b json.RawMessage
- err := d.Decode(&b)
- if err != nil {
+ if err := d.Decode(&b); err != nil {
return err
}
@@ -280,6 +273,17 @@ func decodeNonProtoField(d *json.Decoder, unmarshaler protojson.UnmarshalOptions
return nil
}
if rv.Kind() == reflect.Slice {
+ if rv.Type().Elem().Kind() == reflect.Uint8 {
+ var sl []byte
+ if err := d.Decode(&sl); err != nil {
+ return err
+ }
+ if sl != nil {
+ rv.SetBytes(sl)
+ }
+ return nil
+ }
+
var sl []json.RawMessage
if err := d.Decode(&sl); err != nil {
return err
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go
index 007f8f1a2..398c780dc 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go
@@ -1,10 +1,8 @@
package runtime
import (
- "io"
-
"errors"
- "io/ioutil"
+ "io"
"google.golang.org/protobuf/proto"
)
@@ -38,7 +36,7 @@ func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error {
// NewDecoder returns a Decoder which reads proto stream from "reader".
func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder {
return DecoderFunc(func(value interface{}) error {
- buffer, err := ioutil.ReadAll(reader)
+ buffer, err := io.ReadAll(reader)
if err != nil {
return err
}
@@ -53,8 +51,7 @@ func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder {
if err != nil {
return err
}
- _, err = writer.Write(buffer)
- if err != nil {
+ if _, err := writer.Write(buffer); err != nil {
return err
}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
index 46a4aabaf..f451cb441 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
@@ -6,10 +6,13 @@ import (
"fmt"
"net/http"
"net/textproto"
+ "regexp"
"strings"
"github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule"
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/health/grpc_health_v1"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
@@ -23,15 +26,15 @@ const (
// path string before doing any routing.
UnescapingModeLegacy UnescapingMode = iota
- // EscapingTypeExceptReserved unescapes all path parameters except RFC 6570
+ // UnescapingModeAllExceptReserved unescapes all path parameters except RFC 6570
// reserved characters.
UnescapingModeAllExceptReserved
- // EscapingTypeExceptSlash unescapes URL path parameters except path
- // seperators, which will be left as "%2F".
+ // UnescapingModeAllExceptSlash unescapes URL path parameters except path
+ // separators, which will be left as "%2F".
UnescapingModeAllExceptSlash
- // URL path parameters will be fully decoded.
+ // UnescapingModeAllCharacters unescapes all URL path parameters.
UnescapingModeAllCharacters
// UnescapingModeDefault is the default escaping type.
@@ -40,6 +43,8 @@ const (
UnescapingModeDefault = UnescapingModeLegacy
)
+var encodedPathSplitter = regexp.MustCompile("(/|%2F)")
+
// A HandlerFunc handles a specific pair of path pattern and HTTP method.
type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string)
@@ -75,7 +80,7 @@ func WithForwardResponseOption(forwardResponseOption func(context.Context, http.
}
}
-// WithEscapingType sets the escaping type. See the definitions of UnescapingMode
+// WithUnescapingMode sets the escaping type. See the definitions of UnescapingMode
// for more information.
func WithUnescapingMode(mode UnescapingMode) ServeMuxOption {
return func(serveMux *ServeMux) {
@@ -96,13 +101,14 @@ func SetQueryParameterParser(queryParameterParser QueryParameterParser) ServeMux
type HeaderMatcherFunc func(string) (string, bool)
// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header
-// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with
-// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'.
+// keys (as specified by the IANA, e.g: Accept, Cookie, Host) to the gRPC metadata with the grpcgateway- prefix. If you want to know which headers are considered permanent, you can view the isPermanentHTTPHeader function.
+// HTTP headers that start with 'Grpc-Metadata-' are mapped to gRPC metadata after removing the prefix 'Grpc-Metadata-'.
+// Other headers are not added to the gRPC metadata.
func DefaultHeaderMatcher(key string) (string, bool) {
- key = textproto.CanonicalMIMEHeaderKey(key)
- if isPermanentHTTPHeader(key) {
+ switch key = textproto.CanonicalMIMEHeaderKey(key); {
+ case isPermanentHTTPHeader(key):
return MetadataPrefix + key, true
- } else if strings.HasPrefix(key, MetadataHeaderPrefix) {
+ case strings.HasPrefix(key, MetadataHeaderPrefix):
return key[len(MetadataHeaderPrefix):], true
}
return "", false
@@ -113,11 +119,30 @@ func DefaultHeaderMatcher(key string) (string, bool) {
// This matcher will be called with each header in http.Request. If matcher returns true, that header will be
// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header.
func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
+ for _, header := range fn.matchedMalformedHeaders() {
+ grpclog.Warningf("The configured forwarding filter would allow %q to be sent to the gRPC server, which will likely cause errors. See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more information.", header)
+ }
+
return func(mux *ServeMux) {
mux.incomingHeaderMatcher = fn
}
}
+// matchedMalformedHeaders returns the malformed headers that would be forwarded to gRPC server.
+func (fn HeaderMatcherFunc) matchedMalformedHeaders() []string {
+ if fn == nil {
+ return nil
+ }
+ headers := make([]string, 0)
+ for header := range malformedHTTPHeaders {
+ out, accept := fn(header)
+ if accept && isMalformedHTTPHeader(out) {
+ headers = append(headers, out)
+ }
+ }
+ return headers
+}
+
// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway.
//
// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be
@@ -179,6 +204,56 @@ func WithDisablePathLengthFallback() ServeMuxOption {
}
}
+// WithHealthEndpointAt returns a ServeMuxOption that will add an endpoint to the created ServeMux at the path specified by endpointPath.
+// When called the handler will forward the request to the upstream grpc service health check (defined in the
+// gRPC Health Checking Protocol).
+//
+// See here https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/health_check/ for more information on how
+// to setup the protocol in the grpc server.
+//
+// If you define a service as query parameter, this will also be forwarded as service in the HealthCheckRequest.
+func WithHealthEndpointAt(healthCheckClient grpc_health_v1.HealthClient, endpointPath string) ServeMuxOption {
+ return func(s *ServeMux) {
+ // error can be ignored since pattern is definitely valid
+ _ = s.HandlePath(
+ http.MethodGet, endpointPath, func(w http.ResponseWriter, r *http.Request, _ map[string]string,
+ ) {
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+
+ resp, err := healthCheckClient.Check(r.Context(), &grpc_health_v1.HealthCheckRequest{
+ Service: r.URL.Query().Get("service"),
+ })
+ if err != nil {
+ s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+
+ if resp.GetStatus() != grpc_health_v1.HealthCheckResponse_SERVING {
+ switch resp.GetStatus() {
+ case grpc_health_v1.HealthCheckResponse_NOT_SERVING, grpc_health_v1.HealthCheckResponse_UNKNOWN:
+ err = status.Error(codes.Unavailable, resp.String())
+ case grpc_health_v1.HealthCheckResponse_SERVICE_UNKNOWN:
+ err = status.Error(codes.NotFound, resp.String())
+ }
+
+ s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err)
+ return
+ }
+
+ _ = outboundMarshaler.NewEncoder(w).Encode(resp)
+ })
+ }
+}
+
+// WithHealthzEndpoint returns a ServeMuxOption that will add a /healthz endpoint to the created ServeMux.
+//
+// See WithHealthEndpointAt for the general implementation.
+func WithHealthzEndpoint(healthCheckClient grpc_health_v1.HealthClient) ServeMuxOption {
+ return WithHealthEndpointAt(healthCheckClient, "/healthz")
+}
+
// NewServeMux returns a new ServeMux whose internal mapping is empty.
func NewServeMux(opts ...ServeMuxOption) *ServeMux {
serveMux := &ServeMux{
@@ -229,7 +304,7 @@ func (s *ServeMux) HandlePath(meth string, pathPattern string, h HandlerFunc) er
return nil
}
-// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path.
+// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.URL.Path.
func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@@ -245,8 +320,6 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
path = r.URL.RawPath
}
- components := strings.Split(path[1:], "/")
-
if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) {
r.Method = strings.ToUpper(override)
if err := r.ParseForm(); err != nil {
@@ -257,8 +330,18 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
}
- // Verb out here is to memoize for the fallback case below
- var verb string
+ var pathComponents []string
+ // since in UnescapeModeLegacy, the URL will already have been fully unescaped, if we also split on "%2F"
+ // in this escaping mode we would be double unescaping but in UnescapingModeAllCharacters, we still do as the
+ // path is the RawPath (i.e. unescaped). That does mean that the behavior of this function will change its default
+ // behavior when the UnescapingModeDefault gets changed from UnescapingModeLegacy to UnescapingModeAllExceptReserved
+ if s.unescapingMode == UnescapingModeAllCharacters {
+ pathComponents = encodedPathSplitter.Split(path[1:], -1)
+ } else {
+ pathComponents = strings.Split(path[1:], "/")
+ }
+
+ lastPathComponent := pathComponents[len(pathComponents)-1]
for _, h := range s.handlers[r.Method] {
// If the pattern has a verb, explicitly look for a suffix in the last
@@ -269,23 +352,28 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// parser because we know what verb we're looking for, however, there
// are still some cases that the parser itself cannot disambiguate. See
// the comment there if interested.
+
+ var verb string
patVerb := h.pat.Verb()
- l := len(components)
- lastComponent := components[l-1]
- var idx int = -1
- if patVerb != "" && strings.HasSuffix(lastComponent, ":"+patVerb) {
- idx = len(lastComponent) - len(patVerb) - 1
+
+ idx := -1
+ if patVerb != "" && strings.HasSuffix(lastPathComponent, ":"+patVerb) {
+ idx = len(lastPathComponent) - len(patVerb) - 1
}
if idx == 0 {
_, outboundMarshaler := MarshalerForRequest(s, r)
s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound)
return
}
+
+ comps := make([]string, len(pathComponents))
+ copy(comps, pathComponents)
+
if idx > 0 {
- components[l-1], verb = lastComponent[:idx], lastComponent[idx+1:]
+ comps[len(comps)-1], verb = lastPathComponent[:idx], lastPathComponent[idx+1:]
}
- pathParams, err := h.pat.MatchAndEscape(components, verb, s.unescapingMode)
+ pathParams, err := h.pat.MatchAndEscape(comps, verb, s.unescapingMode)
if err != nil {
var mse MalformedSequenceError
if ok := errors.As(err, &mse); ok {
@@ -301,14 +389,33 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
- // lookup other methods to handle fallback from GET to POST and
- // to determine if it is NotImplemented or NotFound.
+ // if no handler has found for the request, lookup for other methods
+ // to handle POST -> GET fallback if the request is subject to path
+ // length fallback.
+ // Note we are not eagerly checking the request here as we want to return the
+ // right HTTP status code, and we need to process the fallback candidates in
+ // order to do that.
for m, handlers := range s.handlers {
if m == r.Method {
continue
}
for _, h := range handlers {
- pathParams, err := h.pat.MatchAndEscape(components, verb, s.unescapingMode)
+ var verb string
+ patVerb := h.pat.Verb()
+
+ idx := -1
+ if patVerb != "" && strings.HasSuffix(lastPathComponent, ":"+patVerb) {
+ idx = len(lastPathComponent) - len(patVerb) - 1
+ }
+
+ comps := make([]string, len(pathComponents))
+ copy(comps, pathComponents)
+
+ if idx > 0 {
+ comps[len(comps)-1], verb = lastPathComponent[:idx], lastPathComponent[idx+1:]
+ }
+
+ pathParams, err := h.pat.MatchAndEscape(comps, verb, s.unescapingMode)
if err != nil {
var mse MalformedSequenceError
if ok := errors.As(err, &mse); ok {
@@ -320,8 +427,11 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
continue
}
+
// X-HTTP-Method-Override is optional. Always allow fallback to POST.
- if s.isPathLengthFallback(r) {
+ // Also, only consider POST -> GET fallbacks, and avoid falling back to
+ // potentially dangerous operations like DELETE.
+ if s.isPathLengthFallback(r) && m == http.MethodGet {
if err := r.ParseForm(); err != nil {
_, outboundMarshaler := MarshalerForRequest(s, r)
sterr := status.Error(codes.InvalidArgument, err.Error())
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go
index df7cb8142..8f90d15a5 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go
@@ -15,8 +15,6 @@ var (
ErrNotMatch = errors.New("not match to the path pattern")
// ErrInvalidPattern indicates that the given definition of Pattern is not valid.
ErrInvalidPattern = errors.New("invalid pattern")
- // ErrMalformedSequence indicates that an escape sequence was malformed.
- ErrMalformedSequence = errors.New("malformed escape sequence")
)
type MalformedSequenceError string
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
index fb0c84ef0..d01933c4f 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
@@ -1,7 +1,6 @@
package runtime
import (
- "encoding/base64"
"errors"
"fmt"
"net/url"
@@ -11,19 +10,21 @@ import (
"time"
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
- "google.golang.org/genproto/protobuf/field_mask"
"google.golang.org/grpc/grpclog"
+ "google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
"google.golang.org/protobuf/types/known/durationpb"
+ field_mask "google.golang.org/protobuf/types/known/fieldmaskpb"
+ "google.golang.org/protobuf/types/known/structpb"
"google.golang.org/protobuf/types/known/timestamppb"
"google.golang.org/protobuf/types/known/wrapperspb"
)
var valuesKeyRegexp = regexp.MustCompile(`^(.*)\[(.*)\]$`)
-var currentQueryParser QueryParameterParser = &defaultQueryParser{}
+var currentQueryParser QueryParameterParser = &DefaultQueryParser{}
// QueryParameterParser defines interface for all query parameter parsers
type QueryParameterParser interface {
@@ -36,14 +37,17 @@ func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utili
return currentQueryParser.Parse(msg, values, filter)
}
-type defaultQueryParser struct{}
+// DefaultQueryParser is a QueryParameterParser which implements the default
+// query parameters parsing behavior.
+//
+// See https://github.com/grpc-ecosystem/grpc-gateway/issues/2632 for more context.
+type DefaultQueryParser struct{}
// Parse populates "values" into "msg".
// A value is ignored if its key starts with one of the elements in "filter".
-func (*defaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
+func (*DefaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
for key, values := range values {
- match := valuesKeyRegexp.FindStringSubmatch(key)
- if len(match) == 3 {
+ if match := valuesKeyRegexp.FindStringSubmatch(key); len(match) == 3 {
key = match[1]
values = append([]string{match[2]}, values...)
}
@@ -175,10 +179,10 @@ func parseField(fieldDescriptor protoreflect.FieldDescriptor, value string) (pro
return protoreflect.ValueOfBool(v), nil
case protoreflect.EnumKind:
enum, err := protoregistry.GlobalTypes.FindEnumByName(fieldDescriptor.Enum().FullName())
- switch {
- case errors.Is(err, protoregistry.NotFound):
- return protoreflect.Value{}, fmt.Errorf("enum %q is not registered", fieldDescriptor.Enum().FullName())
- case err != nil:
+ if err != nil {
+ if errors.Is(err, protoregistry.NotFound) {
+ return protoreflect.Value{}, fmt.Errorf("enum %q is not registered", fieldDescriptor.Enum().FullName())
+ }
return protoreflect.Value{}, fmt.Errorf("failed to look up enum: %w", err)
}
// Look for enum by name
@@ -189,8 +193,7 @@ func parseField(fieldDescriptor protoreflect.FieldDescriptor, value string) (pro
return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value)
}
// Look for enum by number
- v = enum.Descriptor().Values().ByNumber(protoreflect.EnumNumber(i))
- if v == nil {
+ if v = enum.Descriptor().Values().ByNumber(protoreflect.EnumNumber(i)); v == nil {
return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value)
}
}
@@ -234,7 +237,7 @@ func parseField(fieldDescriptor protoreflect.FieldDescriptor, value string) (pro
case protoreflect.StringKind:
return protoreflect.ValueOfString(value), nil
case protoreflect.BytesKind:
- v, err := base64.URLEncoding.DecodeString(value)
+ v, err := Bytes(value)
if err != nil {
return protoreflect.Value{}, err
}
@@ -250,18 +253,12 @@ func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (p
var msg proto.Message
switch msgDescriptor.FullName() {
case "google.protobuf.Timestamp":
- if value == "null" {
- break
- }
t, err := time.Parse(time.RFC3339Nano, value)
if err != nil {
return protoreflect.Value{}, err
}
msg = timestamppb.New(t)
case "google.protobuf.Duration":
- if value == "null" {
- break
- }
d, err := time.ParseDuration(value)
if err != nil {
return protoreflect.Value{}, err
@@ -272,55 +269,67 @@ func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (p
if err != nil {
return protoreflect.Value{}, err
}
- msg = &wrapperspb.DoubleValue{Value: v}
+ msg = wrapperspb.Double(v)
case "google.protobuf.FloatValue":
v, err := strconv.ParseFloat(value, 32)
if err != nil {
return protoreflect.Value{}, err
}
- msg = &wrapperspb.FloatValue{Value: float32(v)}
+ msg = wrapperspb.Float(float32(v))
case "google.protobuf.Int64Value":
v, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return protoreflect.Value{}, err
}
- msg = &wrapperspb.Int64Value{Value: v}
+ msg = wrapperspb.Int64(v)
case "google.protobuf.Int32Value":
v, err := strconv.ParseInt(value, 10, 32)
if err != nil {
return protoreflect.Value{}, err
}
- msg = &wrapperspb.Int32Value{Value: int32(v)}
+ msg = wrapperspb.Int32(int32(v))
case "google.protobuf.UInt64Value":
v, err := strconv.ParseUint(value, 10, 64)
if err != nil {
return protoreflect.Value{}, err
}
- msg = &wrapperspb.UInt64Value{Value: v}
+ msg = wrapperspb.UInt64(v)
case "google.protobuf.UInt32Value":
v, err := strconv.ParseUint(value, 10, 32)
if err != nil {
return protoreflect.Value{}, err
}
- msg = &wrapperspb.UInt32Value{Value: uint32(v)}
+ msg = wrapperspb.UInt32(uint32(v))
case "google.protobuf.BoolValue":
v, err := strconv.ParseBool(value)
if err != nil {
return protoreflect.Value{}, err
}
- msg = &wrapperspb.BoolValue{Value: v}
+ msg = wrapperspb.Bool(v)
case "google.protobuf.StringValue":
- msg = &wrapperspb.StringValue{Value: value}
+ msg = wrapperspb.String(value)
case "google.protobuf.BytesValue":
- v, err := base64.URLEncoding.DecodeString(value)
+ v, err := Bytes(value)
if err != nil {
return protoreflect.Value{}, err
}
- msg = &wrapperspb.BytesValue{Value: v}
+ msg = wrapperspb.Bytes(v)
case "google.protobuf.FieldMask":
fm := &field_mask.FieldMask{}
fm.Paths = append(fm.Paths, strings.Split(value, ",")...)
msg = fm
+ case "google.protobuf.Value":
+ var v structpb.Value
+ if err := protojson.Unmarshal([]byte(value), &v); err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = &v
+ case "google.protobuf.Struct":
+ var v structpb.Struct
+ if err := protojson.Unmarshal([]byte(value), &v); err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = &v
default:
return protoreflect.Value{}, fmt.Errorf("unsupported message type: %q", string(msgDescriptor.FullName()))
}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel
index 5d8d12bc4..b89409465 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel
@@ -8,6 +8,7 @@ go_library(
"doc.go",
"pattern.go",
"readerfactory.go",
+ "string_array_flag.go",
"trie.go",
],
importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/utilities",
@@ -16,7 +17,10 @@ go_library(
go_test(
name = "utilities_test",
size = "small",
- srcs = ["trie_test.go"],
+ srcs = [
+ "string_array_flag_test.go",
+ "trie_test.go",
+ ],
deps = [":utilities"],
)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go
index 6dd385466..01d26edae 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go
@@ -3,13 +3,12 @@ package utilities
import (
"bytes"
"io"
- "io/ioutil"
)
// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins
// at the start of the stream
func IOReaderFactory(r io.Reader) (func() io.Reader, error) {
- b, err := ioutil.ReadAll(r)
+ b, err := io.ReadAll(r)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go
new file mode 100644
index 000000000..d224ab776
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go
@@ -0,0 +1,33 @@
+package utilities
+
+import (
+ "flag"
+ "strings"
+)
+
+// flagInterface is an cut down interface to `flag`
+type flagInterface interface {
+ Var(value flag.Value, name string, usage string)
+}
+
+// StringArrayFlag defines a flag with the specified name and usage string.
+// The return value is the address of a `StringArrayFlags` variable that stores the repeated values of the flag.
+func StringArrayFlag(f flagInterface, name string, usage string) *StringArrayFlags {
+ value := &StringArrayFlags{}
+ f.Var(value, name, usage)
+ return value
+}
+
+// StringArrayFlags is a wrapper of `[]string` to provider an interface for `flag.Var`
+type StringArrayFlags []string
+
+// String returns a string representation of `StringArrayFlags`
+func (i *StringArrayFlags) String() string {
+ return strings.Join(*i, ",")
+}
+
+// Set appends a value to `StringArrayFlags`
+func (i *StringArrayFlags) Set(value string) error {
+ *i = append(*i, value)
+ return nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go
index af3b703d5..dd99b0ed2 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go
@@ -40,7 +40,7 @@ func NewDoubleArray(seqs [][]string) *DoubleArray {
func registerTokens(da *DoubleArray, seqs [][]string) [][]int {
var result [][]int
for _, seq := range seqs {
- var encoded []int
+ encoded := make([]int, 0, len(seq))
for _, token := range seq {
if _, ok := da.Encoding[token]; !ok {
da.Encoding[token] = len(da.Encoding)
diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml
index dbb6670b3..61782fbf0 100644
--- a/vendor/go.opentelemetry.io/otel/.golangci.yml
+++ b/vendor/go.opentelemetry.io/otel/.golangci.yml
@@ -61,28 +61,68 @@ issues:
linters-settings:
depguard:
- # Check the list against standard lib.
- # Default: false
- include-go-root: true
- # A list of packages for the list type specified.
- # Default: []
- packages:
- - "crypto/md5"
- - "crypto/sha1"
- - "crypto/**/pkix"
- ignore-file-rules:
- - "**/*_test.go"
- additional-guards:
- # Do not allow testing packages in non-test files.
- - list-type: denylist
- include-go-root: true
- packages:
- - testing
- - github.com/stretchr/testify
- ignore-file-rules:
- - "**/*_test.go"
- - "**/*test/*.go"
- - "**/internal/matchers/*.go"
+ rules:
+ non-tests:
+ files:
+ - "!$test"
+ - "!**/*test/*.go"
+ - "!**/internal/matchers/*.go"
+ deny:
+ - pkg: "testing"
+ - pkg: "github.com/stretchr/testify"
+ - pkg: "crypto/md5"
+ - pkg: "crypto/sha1"
+ - pkg: "crypto/**/pkix"
+ otlp-internal:
+ files:
+ - "!**/exporters/otlp/internal/**/*.go"
+ # TODO: remove the following when otlpmetric/internal is removed.
+ - "!**/exporters/otlp/otlpmetric/internal/oconf/envconfig.go"
+ - "!**/exporters/otlp/otlpmetric/internal/oconf/options.go"
+ - "!**/exporters/otlp/otlpmetric/internal/oconf/options_test.go"
+ - "!**/exporters/otlp/otlpmetric/internal/otest/client_test.go"
+ deny:
+ - pkg: "go.opentelemetry.io/otel/exporters/otlp/internal"
+ desc: Do not use cross-module internal packages.
+ otlptrace-internal:
+ files:
+ - "!**/exporters/otlp/otlptrace/*.go"
+ - "!**/exporters/otlp/otlptrace/internal/**.go"
+ deny:
+ - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal"
+ desc: Do not use cross-module internal packages.
+ otlpmetric-internal:
+ files:
+ - "!**/exporters/otlp/otlpmetric/internal/*.go"
+ - "!**/exporters/otlp/otlpmetric/internal/**/*.go"
+ deny:
+ - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal"
+ desc: Do not use cross-module internal packages.
+ otel-internal:
+ files:
+ - "**/sdk/*.go"
+ - "**/sdk/**/*.go"
+ - "**/exporters/*.go"
+ - "**/exporters/**/*.go"
+ - "**/schema/*.go"
+ - "**/schema/**/*.go"
+ - "**/metric/*.go"
+ - "**/metric/**/*.go"
+ - "**/bridge/*.go"
+ - "**/bridge/**/*.go"
+ - "**/example/*.go"
+ - "**/example/**/*.go"
+ - "**/trace/*.go"
+ - "**/trace/**/*.go"
+ deny:
+ - pkg: "go.opentelemetry.io/otel/internal$"
+ desc: Do not use cross-module internal packages.
+ - pkg: "go.opentelemetry.io/otel/internal/attribute"
+ desc: Do not use cross-module internal packages.
+ - pkg: "go.opentelemetry.io/otel/internal/internaltest"
+ desc: Do not use cross-module internal packages.
+ - pkg: "go.opentelemetry.io/otel/internal/matchers"
+ desc: Do not use cross-module internal packages.
godot:
exclude:
# Exclude links.
diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
index d9f145f86..7aa5c8051 100644
--- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md
+++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
@@ -8,6 +8,99 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
## [Unreleased]
+## [1.17.0/0.40.0/0.0.5] 2023-08-28
+
+### Added
+
+- Export the `ManualReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244)
+- Export the `PeriodicReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244)
+- Add support for exponential histogram aggregations.
+ A histogram can be configured as an exponential histogram using a view with `"go.opentelemetry.io/otel/sdk/metric".ExponentialHistogram` as the aggregation. (#4245)
+- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4272)
+- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4272)
+- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE` environment variable. (#4287)
+- Add `WithoutCounterSuffixes` option in `go.opentelemetry.io/otel/exporters/prometheus` to disable addition of `_total` suffixes. (#4306)
+- Add info and debug logging to the metric SDK in `go.opentelemetry.io/otel/sdk/metric`. (#4315)
+- The `go.opentelemetry.io/otel/semconv/v1.21.0` package.
+ The package contains semantic conventions from the `v1.21.0` version of the OpenTelemetry Semantic Conventions. (#4362)
+- Accept 201 to 299 HTTP status as success in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4365)
+- Document the `Temporality` and `Aggregation` methods of the `"go.opentelemetry.io/otel/sdk/metric".Exporter"` need to be concurrent safe. (#4381)
+- Expand the set of units supported by the Prometheus exporter, and don't add unit suffixes if they are already present in `go.opentelemetry.op/otel/exporters/prometheus` (#4374)
+- Move the `Aggregation` interface and its implementations from `go.opentelemetry.io/otel/sdk/metric/aggregation` to `go.opentelemetry.io/otel/sdk/metric`. (#4435)
+- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION` environment variable. (#4437)
+- Add the `NewAllowKeysFilter` and `NewDenyKeysFilter` functions to `go.opentelemetry.io/otel/attribute` to allow convenient creation of allow-keys and deny-keys filters. (#4444)
+- Support Go 1.21. (#4463)
+
+### Changed
+
+- Starting from `v1.21.0` of semantic conventions, `go.opentelemetry.io/otel/semconv/{version}/httpconv` and `go.opentelemetry.io/otel/semconv/{version}/netconv` packages will no longer be published. (#4145)
+- Log duplicate instrument conflict at a warning level instead of info in `go.opentelemetry.io/otel/sdk/metric`. (#4202)
+- Return an error on the creation of new instruments in `go.opentelemetry.io/otel/sdk/metric` if their name doesn't pass regexp validation. (#4210)
+- `NewManualReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*ManualReader` instead of `Reader`. (#4244)
+- `NewPeriodicReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*PeriodicReader` instead of `Reader`. (#4244)
+- Count the Collect time in the `PeriodicReader` timeout in `go.opentelemetry.io/otel/sdk/metric`. (#4221)
+- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272)
+- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272)
+- If an attribute set is omitted from an async callback, the previous value will no longer be exported in `go.opentelemetry.io/otel/sdk/metric`. (#4290)
+- If an attribute set is observed multiple times in an async callback in `go.opentelemetry.io/otel/sdk/metric`, the values will be summed instead of the last observation winning. (#4289)
+- Allow the explicit bucket histogram aggregation to be used for the up-down counter, observable counter, observable up-down counter, and observable gauge in the `go.opentelemetry.io/otel/sdk/metric` package. (#4332)
+- Restrict `Meter`s in `go.opentelemetry.io/otel/sdk/metric` to only register and collect instruments it created. (#4333)
+- `PeriodicReader.Shutdown` and `PeriodicReader.ForceFlush` in `go.opentelemetry.io/otel/sdk/metric` now apply the periodic reader's timeout to the operation if the user provided context does not contain a deadline. (#4356, #4377)
+- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.21.0`. (#4408)
+- Increase instrument name maximum length from 63 to 255 characters in `go.opentelemetry.io/otel/sdk/metric`. (#4434)
+- Add `go.opentelemetry.op/otel/sdk/metric.WithProducer` as an `Option` for `"go.opentelemetry.io/otel/sdk/metric".NewManualReader` and `"go.opentelemetry.io/otel/sdk/metric".NewPeriodicReader`. (#4346)
+
+### Removed
+
+- Remove `Reader.RegisterProducer` in `go.opentelemetry.io/otel/metric`.
+ Use the added `WithProducer` option instead. (#4346)
+- Remove `Reader.ForceFlush` in `go.opentelemetry.io/otel/metric`.
+ Notice that `PeriodicReader.ForceFlush` is still available. (#4375)
+
+### Fixed
+
+- Correctly format log messages from the `go.opentelemetry.io/otel/exporters/zipkin` exporter. (#4143)
+- Log an error for calls to `NewView` in `go.opentelemetry.io/otel/sdk/metric` that have empty criteria. (#4307)
+- Fix `"go.opentelemetry.io/otel/sdk/resource".WithHostID()` to not set an empty `host.id`. (#4317)
+- Use the instrument identifying fields to cache aggregators and determine duplicate instrument registrations in `go.opentelemetry.io/otel/sdk/metric`. (#4337)
+- Detect duplicate instruments for case-insensitive names in `go.opentelemetry.io/otel/sdk/metric`. (#4338)
+- The `ManualReader` will not panic if `AggregationSelector` returns `nil` in `go.opentelemetry.io/otel/sdk/metric`. (#4350)
+- If a `Reader`'s `AggregationSelector` returns `nil` or `DefaultAggregation` the pipeline will use the default aggregation. (#4350)
+- Log a suggested view that fixes instrument conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4349)
+- Fix possible panic, deadlock and race condition in batch span processor in `go.opentelemetry.io/otel/sdk/trace`. (#4353)
+- Improve context cancellation handling in batch span processor's `ForceFlush` in `go.opentelemetry.io/otel/sdk/trace`. (#4369)
+- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` using gotmpl. (#4397, #3846)
+- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4404, #3846)
+- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4407, #3846)
+- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4400, #3846)
+- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4401, #3846)
+- Do not block the metric SDK when OTLP metric exports are blocked in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#3925, #4395)
+- Do not append `_total` if the counter already has that suffix for the Prometheus exproter in `go.opentelemetry.io/otel/exporter/prometheus`. (#4373)
+- Fix resource detection data race in `go.opentelemetry.io/otel/sdk/resource`. (#4409)
+- Use the first-seen instrument name during instrument name conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4428)
+
+### Deprecated
+
+- The `go.opentelemetry.io/otel/exporters/jaeger` package is deprecated.
+ OpenTelemetry dropped support for Jaeger exporter in July 2023.
+ Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`
+ or `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` instead. (#4423)
+- The `go.opentelemetry.io/otel/example/jaeger` package is deprecated. (#4423)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` package is deprecated. (#4420)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf` package is deprecated. (#4420)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest` package is deprecated. (#4420)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform` package is deprecated. (#4420)
+- The `go.opentelemetry.io/otel/exporters/otlp/internal` package is deprecated. (#4421)
+- The `go.opentelemetry.io/otel/exporters/otlp/internal/envconfig` package is deprecated. (#4421)
+- The `go.opentelemetry.io/otel/exporters/otlp/internal/retry` package is deprecated. (#4421)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` package is deprecated. (#4425)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/envconfig` package is deprecated. (#4425)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig` package is deprecated. (#4425)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlptracetest` package is deprecated. (#4425)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/retry` package is deprecated. (#4425)
+- The `go.opentelemetry.io/otel/sdk/metric/aggregation` package is deprecated.
+ Use the aggregation types added to `go.opentelemetry.io/otel/sdk/metric` instead. (#4435)
+
## [1.16.0/0.39.0] 2023-05-18
This release contains the first stable release of the OpenTelemetry Go [metric API].
@@ -20,10 +113,14 @@ See our [versioning policy](VERSIONING.md) for more information about these stab
The package contains semantic conventions from the `v1.19.0` version of the OpenTelemetry specification. (#3848)
- The `go.opentelemetry.io/otel/semconv/v1.20.0` package.
The package contains semantic conventions from the `v1.20.0` version of the OpenTelemetry specification. (#4078)
+- The Exponential Histogram data types in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#4165)
+- OTLP metrics exporter now supports the Exponential Histogram Data Type. (#4222)
+- Fix serialization of `time.Time` zero values in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` packages. (#4271)
### Changed
- Use `strings.Cut()` instead of `string.SplitN()` for better readability and memory use. (#4049)
+- `MeterProvider` returns noop meters once it has been shutdown. (#4154)
### Removed
@@ -188,6 +285,8 @@ This release drops the compatibility guarantee of [Go 1.18].
- Handle empty environment variable as it they were not set. (#3764)
- Clarify the `httpconv` and `netconv` packages in `go.opentelemetry.io/otel/semconv/*` provide tracing semantic conventions. (#3823)
+- Fix race conditions in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic. (#3899)
+- Fix sending nil `scopeInfo` to metrics channel in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic in `github.com/prometheus/client_golang/prometheus`. (#3899)
### Deprecated
@@ -2492,7 +2591,8 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
-[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.16.0...HEAD
+[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.17.0...HEAD
+[1.17.0/0.40.0/0.0.5]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.17.0
[1.16.0/0.39.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0
[1.16.0-rc.1/0.39.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0-rc.1
[1.15.1/0.38.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.1
diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS
index f6f6a313b..623740007 100644
--- a/vendor/go.opentelemetry.io/otel/CODEOWNERS
+++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS
@@ -14,4 +14,4 @@
* @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu
-CODEOWNERS @MrAlias @Aneurysm9 @MadVikingGod
+CODEOWNERS @MrAlias @MadVikingGod @pellared \ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
index b2df5de34..a00dbca7b 100644
--- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
+++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
@@ -179,23 +179,23 @@ For a deeper discussion, see
## Documentation
-Each non-example Go Module should have its own `README.md` containing:
+Each (non-internal, non-test) package must be documented using
+[Go Doc Comments](https://go.dev/doc/comment),
+preferably in a `doc.go` file.
-- A pkg.go.dev badge which can be generated [here](https://pkg.go.dev/badge/).
-- Brief description.
-- Installation instructions (and requirements if applicable).
-- Hyperlink to an example. Depending on the component the example can be:
- - An `example_test.go` like [here](exporters/stdout/stdouttrace/example_test.go).
- - A sample Go application with its own `README.md`, like [here](example/zipkin).
-- Additional documentation sections such us:
- - Configuration,
- - Contributing,
- - References.
+Prefer using [Examples](https://pkg.go.dev/testing#hdr-Examples)
+instead of putting code snippets in Go doc comments.
+In some cases, you can even create [Testable Examples](https://go.dev/blog/examples).
-[Here](exporters/jaeger/README.md) is an example of a concise `README.md`.
+You can install and run a "local Go Doc site" in the following way:
-Moreover, it should be possible to navigate to any `README.md` from the
-root `README.md`.
+ ```sh
+ go install golang.org/x/pkgsite/cmd/pkgsite@latest
+ pkgsite
+ ```
+
+[`go.opentelemetry.io/otel/metric`](https://pkg.go.dev/go.opentelemetry.io/otel/metric)
+is an example of a very well-documented package.
## Style Guide
@@ -475,8 +475,33 @@ documentation are allowed to be extended with additional methods.
> Warning: methods may be added to this interface in minor releases.
+These interfaces are defined by the OpenTelemetry specification and will be
+updated as the specification evolves.
+
Otherwise, stable interfaces MUST NOT be modified.
+#### How to Change Specification Interfaces
+
+When an API change must be made, we will update the SDK with the new method one
+release before the API change. This will allow the SDK one version before the
+API change to work seamlessly with the new API.
+
+If an incompatible version of the SDK is used with the new API the application
+will fail to compile.
+
+#### How Not to Change Specification Interfaces
+
+We have explored using a v2 of the API to change interfaces and found that there
+was no way to introduce a v2 and have it work seamlessly with the v1 of the API.
+Problems happened with libraries that upgraded to v2 when an application did not,
+and would not produce any telemetry.
+
+More detail of the approaches considered and their limitations can be found in
+the [Use a V2 API to evolve interfaces](https://github.com/open-telemetry/opentelemetry-go/issues/3920)
+issue.
+
+#### How to Change Other Interfaces
+
If new functionality is needed for an interface that cannot be changed it MUST
be added by including an additional interface. That added interface can be a
simple interface for the specific functionality that you want to add or it can
@@ -531,6 +556,37 @@ functionality should be added, each one will need their own super-set
interfaces and will duplicate the pattern. For this reason, the simple targeted
interface that defines the specific functionality should be preferred.
+### Testing
+
+The tests should never leak goroutines.
+
+Use the term `ConcurrentSafe` in the test name when it aims to verify the
+absence of race conditions.
+
+### Internal packages
+
+The use of internal packages should be scoped to a single module. A sub-module
+should never import from a parent internal package. This creates a coupling
+between the two modules where a user can upgrade the parent without the child
+and if the internal package API has changed it will fail to upgrade[^3].
+
+There are two known exceptions to this rule:
+
+- `go.opentelemetry.io/otel/internal/global`
+ - This package manages global state for all of opentelemetry-go. It needs to
+ be a single package in order to ensure the uniqueness of the global state.
+- `go.opentelemetry.io/otel/internal/baggage`
+ - This package provides values in a `context.Context` that need to be
+ recognized by `go.opentelemetry.io/otel/baggage` and
+ `go.opentelemetry.io/otel/bridge/opentracing` but remain private.
+
+If you have duplicate code in multiple modules, make that code into a Go
+template stored in `go.opentelemetry.io/otel/internal/shared` and use [gotmpl]
+to render the templates in the desired locations. See [#4404] for an example of
+this.
+
+[^3]: https://github.com/open-telemetry/opentelemetry-go/issues/3548
+
## Approvers and Maintainers
### Approvers
@@ -538,14 +594,14 @@ interface that defines the specific functionality should be preferred.
- [Evan Torrie](https://github.com/evantorrie), Verizon Media
- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics
- [David Ashpole](https://github.com/dashpole), Google
-- [Robert Pająk](https://github.com/pellared), Splunk
- [Chester Cheung](https://github.com/hanyuancheung), Tencent
- [Damien Mathieu](https://github.com/dmathieu), Elastic
+- [Anthony Mirabella](https://github.com/Aneurysm9), AWS
### Maintainers
- [Aaron Clawson](https://github.com/MadVikingGod), LightStep
-- [Anthony Mirabella](https://github.com/Aneurysm9), AWS
+- [Robert Pająk](https://github.com/pellared), Splunk
- [Tyler Yahn](https://github.com/MrAlias), Splunk
### Emeritus
@@ -560,3 +616,5 @@ repo](https://github.com/open-telemetry/community/blob/main/community-membership
[Approver]: #approvers
[Maintainer]: #maintainers
+[gotmpl]: https://pkg.go.dev/go.opentelemetry.io/build-tools/gotmpl
+[#4404]: https://github.com/open-telemetry/opentelemetry-go/pull/4404
diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile
index 26e4bed22..c996d227b 100644
--- a/vendor/go.opentelemetry.io/otel/Makefile
+++ b/vendor/go.opentelemetry.io/otel/Makefile
@@ -25,7 +25,7 @@ TIMEOUT = 60
.DEFAULT_GOAL := precommit
.PHONY: precommit ci
-precommit: generate dependabot-generate license-check vanity-import-fix misspell go-mod-tidy golangci-lint-fix test-default
+precommit: generate dependabot-generate license-check misspell go-mod-tidy golangci-lint-fix test-default
ci: generate dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage
# Tools
@@ -71,8 +71,14 @@ $(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto
GOJQ = $(TOOLS)/gojq
$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq
+GOTMPL = $(TOOLS)/gotmpl
+$(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl
+
+GORELEASE = $(TOOLS)/gorelease
+$(GORELEASE): PACKAGE=golang.org/x/exp/cmd/gorelease
+
.PHONY: tools
-tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT)
+tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
# Virtualized python tools via docker
@@ -110,13 +116,24 @@ $(CODESPELL): PACKAGE=codespell
# Generate
.PHONY: generate
+generate: go-generate vanity-import-fix
-generate: $(OTEL_GO_MOD_DIRS:%=generate/%)
-generate/%: DIR=$*
-generate/%: | $(STRINGER) $(PORTO)
+.PHONY: go-generate
+go-generate: $(OTEL_GO_MOD_DIRS:%=go-generate/%)
+go-generate/%: DIR=$*
+go-generate/%: | $(STRINGER) $(GOTMPL)
@echo "$(GO) generate $(DIR)/..." \
&& cd $(DIR) \
- && PATH="$(TOOLS):$${PATH}" $(GO) generate ./... && $(PORTO) -w .
+ && PATH="$(TOOLS):$${PATH}" $(GO) generate ./...
+
+.PHONY: vanity-import-fix
+vanity-import-fix: | $(PORTO)
+ @$(PORTO) --include-internal -w .
+
+# Generate go.work file for local development.
+.PHONY: go-work
+go-work: | $(CROSSLINK)
+ $(CROSSLINK) work --root=$(shell pwd)
# Build
@@ -203,11 +220,7 @@ lint: misspell lint-modules golangci-lint
.PHONY: vanity-import-check
vanity-import-check: | $(PORTO)
- @$(PORTO) --include-internal -l . || echo "(run: make vanity-import-fix)"
-
-.PHONY: vanity-import-fix
-vanity-import-fix: | $(PORTO)
- @$(PORTO) --include-internal -w .
+ @$(PORTO) --include-internal -l . || ( echo "(run: make vanity-import-fix)"; exit 1 )
.PHONY: misspell
misspell: | $(MISSPELL)
@@ -220,7 +233,7 @@ codespell: | $(CODESPELL)
.PHONY: license-check
license-check:
@licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \
- awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=3 { found=1; next } END { if (!found) print FILENAME }' $$f; \
+ awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=4 { found=1; next } END { if (!found) print FILENAME }' $$f; \
done); \
if [ -n "$${licRes}" ]; then \
echo "license header checking failed:"; echo "$${licRes}"; \
@@ -230,7 +243,7 @@ license-check:
DEPENDABOT_CONFIG = .github/dependabot.yml
.PHONY: dependabot-check
dependabot-check: | $(DBOTCONF)
- @$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || echo "(run: make dependabot-generate)"
+ @$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || ( echo "(run: make dependabot-generate)"; exit 1 )
.PHONY: dependabot-generate
dependabot-generate: | $(DBOTCONF)
@@ -249,14 +262,23 @@ check-clean-work-tree:
SEMCONVPKG ?= "semconv/"
.PHONY: semconv-generate
semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT)
- [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry specification tag"; exit 1 )
- [ "$(OTEL_SPEC_REPO)" ] || ( echo "OTEL_SPEC_REPO unset: missing path to opentelemetry specification repo"; exit 1 )
- $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
- $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
- $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
- $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
+ [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 )
+ [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 )
+ $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
+ $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
+ $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
+ $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
$(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)"
+.PHONY: gorelease
+gorelease: $(OTEL_GO_MOD_DIRS:%=gorelease/%)
+gorelease/%: DIR=$*
+gorelease/%:| $(GORELEASE)
+ @echo "gorelease in $(DIR):" \
+ && cd $(DIR) \
+ && $(GORELEASE) \
+ || echo ""
+
.PHONY: prerelease
prerelease: | $(MULTIMOD)
@[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )
diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md
index e138a8a07..4e5531f30 100644
--- a/vendor/go.opentelemetry.io/otel/README.md
+++ b/vendor/go.opentelemetry.io/otel/README.md
@@ -11,22 +11,25 @@ It provides a set of APIs to directly measure performance and behavior of your s
## Project Status
-| Signal | Status | Project |
-| ------- | ---------- | ------- |
-| Traces | Stable | N/A |
-| Metrics | Beta | N/A |
-| Logs | Frozen [1] | N/A |
+| Signal | Status | Project |
+|---------|------------|-----------------------|
+| Traces | Stable | N/A |
+| Metrics | Mixed [1] | [Go: Metric SDK (GA)] |
+| Logs | Frozen [2] | N/A |
-- [1]: The Logs signal development is halted for this project while we develop both Traces and Metrics.
+[Go: Metric SDK (GA)]: https://github.com/orgs/open-telemetry/projects/34
+
+- [1]: [Metrics API](https://pkg.go.dev/go.opentelemetry.io/otel/metric) is Stable. [Metrics SDK](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric) is Beta.
+- [2]: The Logs signal development is halted for this project while we stabilize the Metrics SDK.
No Logs Pull Requests are currently being accepted.
-Progress and status specific to this repository is tracked in our local
+Progress and status specific to this repository is tracked in our
[project boards](https://github.com/open-telemetry/opentelemetry-go/projects)
and
[milestones](https://github.com/open-telemetry/opentelemetry-go/milestones).
Project versioning information and stability guarantees can be found in the
-[versioning documentation](./VERSIONING.md).
+[versioning documentation](VERSIONING.md).
### Compatibility
@@ -49,15 +52,20 @@ stop ensuring compatibility with these versions in the following manner:
Currently, this project supports the following environments.
| OS | Go Version | Architecture |
-| ------- | ---------- | ------------ |
+|---------|------------|--------------|
+| Ubuntu | 1.21 | amd64 |
| Ubuntu | 1.20 | amd64 |
| Ubuntu | 1.19 | amd64 |
+| Ubuntu | 1.21 | 386 |
| Ubuntu | 1.20 | 386 |
| Ubuntu | 1.19 | 386 |
+| MacOS | 1.21 | amd64 |
| MacOS | 1.20 | amd64 |
| MacOS | 1.19 | amd64 |
+| Windows | 1.21 | amd64 |
| Windows | 1.20 | amd64 |
| Windows | 1.19 | amd64 |
+| Windows | 1.21 | 386 |
| Windows | 1.20 | 386 |
| Windows | 1.19 | 386 |
@@ -97,12 +105,11 @@ export pipeline to send that telemetry to an observability platform.
All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters).
| Exporter | Metrics | Traces |
-| :-----------------------------------: | :-----: | :----: |
-| [Jaeger](./exporters/jaeger/) | | ✓ |
-| [OTLP](./exporters/otlp/) | ✓ | ✓ |
-| [Prometheus](./exporters/prometheus/) | ✓ | |
-| [stdout](./exporters/stdout/) | ✓ | ✓ |
-| [Zipkin](./exporters/zipkin/) | | ✓ |
+|---------------------------------------|:-------:|:------:|
+| [OTLP](./exporters/otlp/) | ✓ | ✓ |
+| [Prometheus](./exporters/prometheus/) | ✓ | |
+| [stdout](./exporters/stdout/) | ✓ | ✓ |
+| [Zipkin](./exporters/zipkin/) | | ✓ |
## Contributing
diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md
index 5e6daf6c4..82ce3ee46 100644
--- a/vendor/go.opentelemetry.io/otel/RELEASING.md
+++ b/vendor/go.opentelemetry.io/otel/RELEASING.md
@@ -2,27 +2,30 @@
## Semantic Convention Generation
-New versions of the [OpenTelemetry Specification] mean new versions of the `semconv` package need to be generated.
+New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated.
The `semconv-generate` make target is used for this.
-1. Checkout a local copy of the [OpenTelemetry Specification] to the desired release tag.
+1. Checkout a local copy of the [OpenTelemetry Semantic Conventions] to the desired release tag.
2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest`
3. Run the `make semconv-generate ...` target from this repository.
For example,
```sh
-export TAG="v1.13.0" # Change to the release version you are generating.
-export OTEL_SPEC_REPO="/absolute/path/to/opentelemetry-specification"
+export TAG="v1.21.0" # Change to the release version you are generating.
+export OTEL_SEMCONV_REPO="/absolute/path/to/opentelemetry/semantic-conventions"
docker pull otel/semconvgen:latest
-make semconv-generate # Uses the exported TAG and OTEL_SPEC_REPO.
+make semconv-generate # Uses the exported TAG and OTEL_SEMCONV_REPO.
```
This should create a new sub-package of [`semconv`](./semconv).
Ensure things look correct before submitting a pull request to include the addition.
-**Note**, the generation code was changed to generate versions >= 1.13.
-To generate versions prior to this, checkout the old release of this repository (i.e. [2fe8861](https://github.com/open-telemetry/opentelemetry-go/commit/2fe8861a24e20088c065b116089862caf9e3cd8b)).
+## Breaking changes validation
+
+You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes done in the public API.
+
+You can check/report problems with `gorelease` [here](https://golang.org/issues/26420).
## Pre-Release
@@ -120,7 +123,17 @@ Once verified be sure to [make a release for the `contrib` repository](https://g
### Website Documentation
-Update [the documentation](./website_docs) for [the OpenTelemetry website](https://opentelemetry.io/docs/go/).
+Update the [Go instrumentation documentation] in the OpenTelemetry website under [content/en/docs/instrumentation/go].
Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate.
-[OpenTelemetry Specification]: https://github.com/open-telemetry/opentelemetry-specification
+[OpenTelemetry Semantic Conventions]: https://github.com/open-telemetry/semantic-conventions
+[Go instrumentation documentation]: https://opentelemetry.io/docs/instrumentation/go/
+[content/en/docs/instrumentation/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/instrumentation/go
+
+### Demo Repository
+
+Bump the dependencies in the following Go services:
+
+- [`accountingservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accountingservice)
+- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkoutservice)
+- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/productcatalogservice)
diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go
new file mode 100644
index 000000000..638c213d5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/filter.go
@@ -0,0 +1,60 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package attribute // import "go.opentelemetry.io/otel/attribute"
+
+// Filter supports removing certain attributes from attribute sets. When
+// the filter returns true, the attribute will be kept in the filtered
+// attribute set. When the filter returns false, the attribute is excluded
+// from the filtered attribute set, and the attribute instead appears in
+// the removed list of excluded attributes.
+type Filter func(KeyValue) bool
+
+// NewAllowKeysFilter returns a Filter that only allows attributes with one of
+// the provided keys.
+//
+// If keys is empty a deny-all filter is returned.
+func NewAllowKeysFilter(keys ...Key) Filter {
+ if len(keys) <= 0 {
+ return func(kv KeyValue) bool { return false }
+ }
+
+ allowed := make(map[Key]struct{})
+ for _, k := range keys {
+ allowed[k] = struct{}{}
+ }
+ return func(kv KeyValue) bool {
+ _, ok := allowed[kv.Key]
+ return ok
+ }
+}
+
+// NewDenyKeysFilter returns a Filter that only allows attributes
+// that do not have one of the provided keys.
+//
+// If keys is empty an allow-all filter is returned.
+func NewDenyKeysFilter(keys ...Key) Filter {
+ if len(keys) <= 0 {
+ return func(kv KeyValue) bool { return true }
+ }
+
+ forbid := make(map[Key]struct{})
+ for _, k := range keys {
+ forbid[k] = struct{}{}
+ }
+ return func(kv KeyValue) bool {
+ _, ok := forbid[kv.Key]
+ return !ok
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go
index b976367e4..9f9303d4f 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/set.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/set.go
@@ -39,13 +39,6 @@ type (
iface interface{}
}
- // Filter supports removing certain attributes from attribute sets. When
- // the filter returns true, the attribute will be kept in the filtered
- // attribute set. When the filter returns false, the attribute is excluded
- // from the filtered attribute set, and the attribute instead appears in
- // the removed list of excluded attributes.
- Filter func(KeyValue) bool
-
// Sortable implements sort.Interface, used for sorting KeyValue. This is
// an exported type to support a memory optimization. A pointer to one of
// these is needed for the call to sort.Stable(), which the caller may
diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
index 46e523a80..9e6b3b7b5 100644
--- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go
+++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
@@ -61,11 +61,6 @@ type Property struct {
// hasValue indicates if a zero-value value means the property does not
// have a value or if it was the zero-value.
hasValue bool
-
- // hasData indicates whether the created property contains data or not.
- // Properties that do not contain data are invalid with no other check
- // required.
- hasData bool
}
// NewKeyProperty returns a new Property for key.
@@ -76,7 +71,7 @@ func NewKeyProperty(key string) (Property, error) {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
}
- p := Property{key: key, hasData: true}
+ p := Property{key: key}
return p, nil
}
@@ -95,7 +90,6 @@ func NewKeyValueProperty(key, value string) (Property, error) {
key: key,
value: value,
hasValue: true,
- hasData: true,
}
return p, nil
}
@@ -117,7 +111,7 @@ func parseProperty(property string) (Property, error) {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property)
}
- p := Property{hasData: true}
+ var p Property
if match[1] != "" {
p.key = match[1]
} else {
@@ -136,10 +130,6 @@ func (p Property) validate() error {
return fmt.Errorf("invalid property: %w", err)
}
- if !p.hasData {
- return errFunc(fmt.Errorf("%w: %q", errInvalidProperty, p))
- }
-
if !keyRe.MatchString(p.key) {
return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key))
}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/README.md b/vendor/go.opentelemetry.io/otel/exporters/jaeger/README.md
deleted file mode 100644
index 19060ba4f..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/README.md
+++ /dev/null
@@ -1,50 +0,0 @@
-# OpenTelemetry-Go Jaeger Exporter
-
-[![Go Reference](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/jaeger.svg)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger)
-
-[OpenTelemetry span exporter for Jaeger](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/trace/sdk_exporters/jaeger.md) implementation.
-
-## Installation
-
-```
-go get -u go.opentelemetry.io/otel/exporters/jaeger
-```
-
-## Example
-
-See [../../example/jaeger](../../example/jaeger).
-
-## Configuration
-
-The exporter can be used to send spans to:
-
-- Jaeger agent using `jaeger.thrift` over compact thrift protocol via
- [`WithAgentEndpoint`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithAgentEndpoint) option.
-- Jaeger collector using `jaeger.thrift` over HTTP via
- [`WithCollectorEndpoint`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithCollectorEndpoint) option.
-
-### Environment Variables
-
-The following environment variables can be used
-(instead of options objects) to override the default configuration.
-
-| Environment variable | Option | Default value |
-| --------------------------------- | --------------------------------------------------------------------------------------------- | ----------------------------------- |
-| `OTEL_EXPORTER_JAEGER_AGENT_HOST` | [`WithAgentHost`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithAgentHost) | `localhost` |
-| `OTEL_EXPORTER_JAEGER_AGENT_PORT` | [`WithAgentPort`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithAgentPort) | `6831` |
-| `OTEL_EXPORTER_JAEGER_ENDPOINT` | [`WithEndpoint`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithEndpoint) | `http://localhost:14268/api/traces` |
-| `OTEL_EXPORTER_JAEGER_USER` | [`WithUsername`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithUsername) | |
-| `OTEL_EXPORTER_JAEGER_PASSWORD` | [`WithPassword`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithPassword) | |
-
-Configuration using options have precedence over the environment variables.
-
-## Contributing
-
-This exporter uses a vendored copy of the Apache Thrift library (v0.14.1) at a custom import path.
-When re-generating Thrift code in the future, please adapt import paths as necessary.
-
-## References
-
-- [Jaeger](https://www.jaegertracing.io/)
-- [OpenTelemetry to Jaeger Transformation](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/trace/sdk_exporters/jaeger.md)
-- [OpenTelemetry Environment Variable Specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/sdk-environment-variables.md#jaeger-exporter)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/agent.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/agent.go
deleted file mode 100644
index a050020bb..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/agent.go
+++ /dev/null
@@ -1,213 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger"
-
-import (
- "context"
- "fmt"
- "io"
- "net"
- "strings"
- "time"
-
- "github.com/go-logr/logr"
-
- genAgent "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent"
- gen "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger"
- "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift"
-)
-
-const (
- // udpPacketMaxLength is the max size of UDP packet we want to send, synced with jaeger-agent.
- udpPacketMaxLength = 65000
- // emitBatchOverhead is the additional overhead bytes used for enveloping the datagram,
- // synced with jaeger-agent https://github.com/jaegertracing/jaeger-client-go/blob/master/transport_udp.go#L37
- emitBatchOverhead = 70
-)
-
-// agentClientUDP is a UDP client to Jaeger agent that implements gen.Agent interface.
-type agentClientUDP struct {
- genAgent.Agent
- io.Closer
-
- connUDP udpConn
- client *genAgent.AgentClient
- maxPacketSize int // max size of datagram in bytes
- thriftBuffer *thrift.TMemoryBuffer // buffer used to calculate byte size of a span
- thriftProtocol thrift.TProtocol
-}
-
-type udpConn interface {
- Write([]byte) (int, error)
- SetWriteBuffer(int) error
- Close() error
-}
-
-type agentClientUDPParams struct {
- Host string
- Port string
- MaxPacketSize int
- Logger logr.Logger
- AttemptReconnecting bool
- AttemptReconnectInterval time.Duration
-}
-
-// newAgentClientUDP creates a client that sends spans to Jaeger Agent over UDP.
-func newAgentClientUDP(params agentClientUDPParams) (*agentClientUDP, error) {
- hostPort := net.JoinHostPort(params.Host, params.Port)
- // validate hostport
- if _, _, err := net.SplitHostPort(hostPort); err != nil {
- return nil, err
- }
-
- if params.MaxPacketSize <= 0 || params.MaxPacketSize > udpPacketMaxLength {
- params.MaxPacketSize = udpPacketMaxLength
- }
-
- if params.AttemptReconnecting && params.AttemptReconnectInterval <= 0 {
- params.AttemptReconnectInterval = time.Second * 30
- }
-
- thriftBuffer := thrift.NewTMemoryBufferLen(params.MaxPacketSize)
- protocolFactory := thrift.NewTCompactProtocolFactoryConf(&thrift.TConfiguration{})
- thriftProtocol := protocolFactory.GetProtocol(thriftBuffer)
- client := genAgent.NewAgentClientFactory(thriftBuffer, protocolFactory)
-
- var connUDP udpConn
- var err error
-
- if params.AttemptReconnecting {
- // host is hostname, setup resolver loop in case host record changes during operation
- connUDP, err = newReconnectingUDPConn(hostPort, params.MaxPacketSize, params.AttemptReconnectInterval, net.ResolveUDPAddr, net.DialUDP, params.Logger)
- if err != nil {
- return nil, err
- }
- } else {
- destAddr, err := net.ResolveUDPAddr("udp", hostPort)
- if err != nil {
- return nil, err
- }
-
- connUDP, err = net.DialUDP(destAddr.Network(), nil, destAddr)
- if err != nil {
- return nil, err
- }
- }
-
- if err := connUDP.SetWriteBuffer(params.MaxPacketSize); err != nil {
- return nil, err
- }
-
- return &agentClientUDP{
- connUDP: connUDP,
- client: client,
- maxPacketSize: params.MaxPacketSize,
- thriftBuffer: thriftBuffer,
- thriftProtocol: thriftProtocol,
- }, nil
-}
-
-// EmitBatch buffers batch to fit into UDP packets and sends the data to the agent.
-func (a *agentClientUDP) EmitBatch(ctx context.Context, batch *gen.Batch) error {
- var errs []error
- processSize, err := a.calcSizeOfSerializedThrift(ctx, batch.Process)
- if err != nil {
- // drop the batch if serialization of process fails.
- return err
- }
-
- maxPacketSize := a.maxPacketSize
- if maxPacketSize > udpPacketMaxLength-emitBatchOverhead {
- maxPacketSize = udpPacketMaxLength - emitBatchOverhead
- }
- totalSize := processSize
- var spans []*gen.Span
- for _, span := range batch.Spans {
- spanSize, err := a.calcSizeOfSerializedThrift(ctx, span)
- if err != nil {
- errs = append(errs, fmt.Errorf("thrift serialization failed: %v", span))
- continue
- }
- if spanSize+processSize >= maxPacketSize {
- // drop the span that exceeds the limit.
- errs = append(errs, fmt.Errorf("span too large to send: %v", span))
- continue
- }
- if totalSize+spanSize >= maxPacketSize {
- if err := a.flush(ctx, &gen.Batch{
- Process: batch.Process,
- Spans: spans,
- }); err != nil {
- errs = append(errs, err)
- }
- spans = spans[:0]
- totalSize = processSize
- }
- totalSize += spanSize
- spans = append(spans, span)
- }
-
- if len(spans) > 0 {
- if err := a.flush(ctx, &gen.Batch{
- Process: batch.Process,
- Spans: spans,
- }); err != nil {
- errs = append(errs, err)
- }
- }
-
- if len(errs) == 1 {
- return errs[0]
- } else if len(errs) > 1 {
- joined := a.makeJoinedErrorString(errs)
- return fmt.Errorf("multiple errors during transform: %s", joined)
- }
- return nil
-}
-
-// makeJoinedErrorString join all the errors to one error message.
-func (a *agentClientUDP) makeJoinedErrorString(errs []error) string {
- var errMsgs []string
- for _, err := range errs {
- errMsgs = append(errMsgs, err.Error())
- }
- return strings.Join(errMsgs, ", ")
-}
-
-// flush will send the batch of spans to the agent.
-func (a *agentClientUDP) flush(ctx context.Context, batch *gen.Batch) error {
- a.thriftBuffer.Reset()
- if err := a.client.EmitBatch(ctx, batch); err != nil {
- return err
- }
- if a.thriftBuffer.Len() > a.maxPacketSize {
- return fmt.Errorf("data does not fit within one UDP packet; size %d, max %d, spans %d",
- a.thriftBuffer.Len(), a.maxPacketSize, len(batch.Spans))
- }
- _, err := a.connUDP.Write(a.thriftBuffer.Bytes())
- return err
-}
-
-// calcSizeOfSerializedThrift calculate the serialized thrift packet size.
-func (a *agentClientUDP) calcSizeOfSerializedThrift(ctx context.Context, thriftStruct thrift.TStruct) (int, error) {
- a.thriftBuffer.Reset()
- err := thriftStruct.Write(ctx, a.thriftProtocol)
- return a.thriftBuffer.Len(), err
-}
-
-// Close implements Close() of io.Closer and closes the underlying UDP connection.
-func (a *agentClientUDP) Close() error {
- return a.connUDP.Close()
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/env.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/env.go
deleted file mode 100644
index 460fb5e13..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/env.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger"
-
-import (
- "os"
-)
-
-// Environment variable names.
-const (
- // Hostname for the Jaeger agent, part of address where exporter sends spans
- // i.e. "localhost".
- envAgentHost = "OTEL_EXPORTER_JAEGER_AGENT_HOST"
- // Port for the Jaeger agent, part of address where exporter sends spans
- // i.e. 6831.
- envAgentPort = "OTEL_EXPORTER_JAEGER_AGENT_PORT"
- // The HTTP endpoint for sending spans directly to a collector,
- // i.e. http://jaeger-collector:14268/api/traces.
- envEndpoint = "OTEL_EXPORTER_JAEGER_ENDPOINT"
- // Username to send as part of "Basic" authentication to the collector endpoint.
- envUser = "OTEL_EXPORTER_JAEGER_USER"
- // Password to send as part of "Basic" authentication to the collector endpoint.
- envPassword = "OTEL_EXPORTER_JAEGER_PASSWORD"
-)
-
-// envOr returns an env variable's value if it is exists or the default if not.
-func envOr(key, defaultValue string) string {
- if v := os.Getenv(key); v != "" {
- return v
- }
- return defaultValue
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/GoUnusedProtection__.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/GoUnusedProtection__.go
deleted file mode 100644
index 54cd3b086..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/GoUnusedProtection__.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package agent
-
-var GoUnusedProtection__ int;
-
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent-consts.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent-consts.go
deleted file mode 100644
index 3b96e3222..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent-consts.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package agent
-
-import (
- "bytes"
- "context"
- "fmt"
- "time"
-
- "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger"
- "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore"
- "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = context.Background
-var _ = time.Now
-var _ = bytes.Equal
-
-var _ = jaeger.GoUnusedProtection__
-var _ = zipkincore.GoUnusedProtection__
-
-func init() {
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent.go
deleted file mode 100644
index c7c8e9ca3..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent.go
+++ /dev/null
@@ -1,412 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package agent
-
-import (
- "bytes"
- "context"
- "fmt"
- "time"
-
- "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger"
- "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore"
- "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = context.Background
-var _ = time.Now
-var _ = bytes.Equal
-
-var _ = jaeger.GoUnusedProtection__
-var _ = zipkincore.GoUnusedProtection__
-
-type Agent interface {
- // Parameters:
- // - Spans
- EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (_err error)
- // Parameters:
- // - Batch
- EmitBatch(ctx context.Context, batch *jaeger.Batch) (_err error)
-}
-
-type AgentClient struct {
- c thrift.TClient
- meta thrift.ResponseMeta
-}
-
-func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient {
- return &AgentClient{
- c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
- }
-}
-
-func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient {
- return &AgentClient{
- c: thrift.NewTStandardClient(iprot, oprot),
- }
-}
-
-func NewAgentClient(c thrift.TClient) *AgentClient {
- return &AgentClient{
- c: c,
- }
-}
-
-func (p *AgentClient) Client_() thrift.TClient {
- return p.c
-}
-
-func (p *AgentClient) LastResponseMeta_() thrift.ResponseMeta {
- return p.meta
-}
-
-func (p *AgentClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
- p.meta = meta
-}
-
-// Parameters:
-// - Spans
-func (p *AgentClient) EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (_err error) {
- var _args0 AgentEmitZipkinBatchArgs
- _args0.Spans = spans
- p.SetLastResponseMeta_(thrift.ResponseMeta{})
- if _, err := p.Client_().Call(ctx, "emitZipkinBatch", &_args0, nil); err != nil {
- return err
- }
- return nil
-}
-
-// Parameters:
-// - Batch
-func (p *AgentClient) EmitBatch(ctx context.Context, batch *jaeger.Batch) (_err error) {
- var _args1 AgentEmitBatchArgs
- _args1.Batch = batch
- p.SetLastResponseMeta_(thrift.ResponseMeta{})
- if _, err := p.Client_().Call(ctx, "emitBatch", &_args1, nil); err != nil {
- return err
- }
- return nil
-}
-
-type AgentProcessor struct {
- processorMap map[string]thrift.TProcessorFunction
- handler Agent
-}
-
-func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
- p.processorMap[key] = processor
-}
-
-func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
- processor, ok = p.processorMap[key]
- return processor, ok
-}
-
-func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
- return p.processorMap
-}
-
-func NewAgentProcessor(handler Agent) *AgentProcessor {
-
- self2 := &AgentProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
- self2.processorMap["emitZipkinBatch"] = &agentProcessorEmitZipkinBatch{handler: handler}
- self2.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler: handler}
- return self2
-}
-
-func (p *AgentProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
- if err2 != nil {
- return false, thrift.WrapTException(err2)
- }
- if processor, ok := p.GetProcessorFunction(name); ok {
- return processor.Process(ctx, seqId, iprot, oprot)
- }
- iprot.Skip(ctx, thrift.STRUCT)
- iprot.ReadMessageEnd(ctx)
- x3 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
- oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
- x3.Write(ctx, oprot)
- oprot.WriteMessageEnd(ctx)
- oprot.Flush(ctx)
- return false, x3
-
-}
-
-type agentProcessorEmitZipkinBatch struct {
- handler Agent
-}
-
-func (p *agentProcessorEmitZipkinBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := AgentEmitZipkinBatchArgs{}
- var err2 error
- if err2 = args.Read(ctx, iprot); err2 != nil {
- iprot.ReadMessageEnd(ctx)
- return false, thrift.WrapTException(err2)
- }
- iprot.ReadMessageEnd(ctx)
-
- tickerCancel := func() {}
- _ = tickerCancel
-
- if err2 = p.handler.EmitZipkinBatch(ctx, args.Spans); err2 != nil {
- tickerCancel()
- return true, thrift.WrapTException(err2)
- }
- tickerCancel()
- return true, nil
-}
-
-type agentProcessorEmitBatch struct {
- handler Agent
-}
-
-func (p *agentProcessorEmitBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := AgentEmitBatchArgs{}
- var err2 error
- if err2 = args.Read(ctx, iprot); err2 != nil {
- iprot.ReadMessageEnd(ctx)
- return false, thrift.WrapTException(err2)
- }
- iprot.ReadMessageEnd(ctx)
-
- tickerCancel := func() {}
- _ = tickerCancel
-
- if err2 = p.handler.EmitBatch(ctx, args.Batch); err2 != nil {
- tickerCancel()
- return true, thrift.WrapTException(err2)
- }
- tickerCancel()
- return true, nil
-}
-
-// HELPER FUNCTIONS AND STRUCTURES
-
-// Attributes:
-// - Spans
-type AgentEmitZipkinBatchArgs struct {
- Spans []*zipkincore.Span `thrift:"spans,1" db:"spans" json:"spans"`
-}
-
-func NewAgentEmitZipkinBatchArgs() *AgentEmitZipkinBatchArgs {
- return &AgentEmitZipkinBatchArgs{}
-}
-
-func (p *AgentEmitZipkinBatchArgs) GetSpans() []*zipkincore.Span {
- return p.Spans
-}
-func (p *AgentEmitZipkinBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.LIST {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *AgentEmitZipkinBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin(ctx)
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*zipkincore.Span, 0, size)
- p.Spans = tSlice
- for i := 0; i < size; i++ {
- _elem4 := &zipkincore.Span{}
- if err := _elem4.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err)
- }
- p.Spans = append(p.Spans, _elem4)
- }
- if err := iprot.ReadListEnd(ctx); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *AgentEmitZipkinBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "emitZipkinBatch_args"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil {
- return err
- }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *AgentEmitZipkinBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err)
- }
- if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Spans {
- if err := v.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(ctx); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err)
- }
- return err
-}
-
-func (p *AgentEmitZipkinBatchArgs) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("AgentEmitZipkinBatchArgs(%+v)", *p)
-}
-
-// Attributes:
-// - Batch
-type AgentEmitBatchArgs struct {
- Batch *jaeger.Batch `thrift:"batch,1" db:"batch" json:"batch"`
-}
-
-func NewAgentEmitBatchArgs() *AgentEmitBatchArgs {
- return &AgentEmitBatchArgs{}
-}
-
-var AgentEmitBatchArgs_Batch_DEFAULT *jaeger.Batch
-
-func (p *AgentEmitBatchArgs) GetBatch() *jaeger.Batch {
- if !p.IsSetBatch() {
- return AgentEmitBatchArgs_Batch_DEFAULT
- }
- return p.Batch
-}
-func (p *AgentEmitBatchArgs) IsSetBatch() bool {
- return p.Batch != nil
-}
-
-func (p *AgentEmitBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.STRUCT {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *AgentEmitBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- p.Batch = &jaeger.Batch{}
- if err := p.Batch.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err)
- }
- return nil
-}
-
-func (p *AgentEmitBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "emitBatch_args"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil {
- return err
- }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *AgentEmitBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "batch", thrift.STRUCT, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err)
- }
- if err := p.Batch.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err)
- }
- return err
-}
-
-func (p *AgentEmitBatchArgs) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p)
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/GoUnusedProtection__.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/GoUnusedProtection__.go
deleted file mode 100644
index fe45a9f9a..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/GoUnusedProtection__.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package jaeger
-
-var GoUnusedProtection__ int;
-
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger-consts.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger-consts.go
deleted file mode 100644
index 10162857f..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger-consts.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package jaeger
-
-import (
- "bytes"
- "context"
- "fmt"
- "time"
-
- "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = context.Background
-var _ = time.Now
-var _ = bytes.Equal
-
-func init() {
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger.go
deleted file mode 100644
index b1fe26c57..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger.go
+++ /dev/null
@@ -1,3022 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package jaeger
-
-import (
- "bytes"
- "context"
- "database/sql/driver"
- "errors"
- "fmt"
- "time"
-
- "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = context.Background
-var _ = time.Now
-var _ = bytes.Equal
-
-type TagType int64
-
-const (
- TagType_STRING TagType = 0
- TagType_DOUBLE TagType = 1
- TagType_BOOL TagType = 2
- TagType_LONG TagType = 3
- TagType_BINARY TagType = 4
-)
-
-func (p TagType) String() string {
- switch p {
- case TagType_STRING:
- return "STRING"
- case TagType_DOUBLE:
- return "DOUBLE"
- case TagType_BOOL:
- return "BOOL"
- case TagType_LONG:
- return "LONG"
- case TagType_BINARY:
- return "BINARY"
- }
- return "<UNSET>"
-}
-
-func TagTypeFromString(s string) (TagType, error) {
- switch s {
- case "STRING":
- return TagType_STRING, nil
- case "DOUBLE":
- return TagType_DOUBLE, nil
- case "BOOL":
- return TagType_BOOL, nil
- case "LONG":
- return TagType_LONG, nil
- case "BINARY":
- return TagType_BINARY, nil
- }
- return TagType(0), fmt.Errorf("not a valid TagType string")
-}
-
-func TagTypePtr(v TagType) *TagType { return &v }
-
-func (p TagType) MarshalText() ([]byte, error) {
- return []byte(p.String()), nil
-}
-
-func (p *TagType) UnmarshalText(text []byte) error {
- q, err := TagTypeFromString(string(text))
- if err != nil {
- return err
- }
- *p = q
- return nil
-}
-
-func (p *TagType) Scan(value interface{}) error {
- v, ok := value.(int64)
- if !ok {
- return errors.New("Scan value is not int64")
- }
- *p = TagType(v)
- return nil
-}
-
-func (p *TagType) Value() (driver.Value, error) {
- if p == nil {
- return nil, nil
- }
- return int64(*p), nil
-}
-
-type SpanRefType int64
-
-const (
- SpanRefType_CHILD_OF SpanRefType = 0
- SpanRefType_FOLLOWS_FROM SpanRefType = 1
-)
-
-func (p SpanRefType) String() string {
- switch p {
- case SpanRefType_CHILD_OF:
- return "CHILD_OF"
- case SpanRefType_FOLLOWS_FROM:
- return "FOLLOWS_FROM"
- }
- return "<UNSET>"
-}
-
-func SpanRefTypeFromString(s string) (SpanRefType, error) {
- switch s {
- case "CHILD_OF":
- return SpanRefType_CHILD_OF, nil
- case "FOLLOWS_FROM":
- return SpanRefType_FOLLOWS_FROM, nil
- }
- return SpanRefType(0), fmt.Errorf("not a valid SpanRefType string")
-}
-
-func SpanRefTypePtr(v SpanRefType) *SpanRefType { return &v }
-
-func (p SpanRefType) MarshalText() ([]byte, error) {
- return []byte(p.String()), nil
-}
-
-func (p *SpanRefType) UnmarshalText(text []byte) error {
- q, err := SpanRefTypeFromString(string(text))
- if err != nil {
- return err
- }
- *p = q
- return nil
-}
-
-func (p *SpanRefType) Scan(value interface{}) error {
- v, ok := value.(int64)
- if !ok {
- return errors.New("Scan value is not int64")
- }
- *p = SpanRefType(v)
- return nil
-}
-
-func (p *SpanRefType) Value() (driver.Value, error) {
- if p == nil {
- return nil, nil
- }
- return int64(*p), nil
-}
-
-// Attributes:
-// - Key
-// - VType
-// - VStr
-// - VDouble
-// - VBool
-// - VLong
-// - VBinary
-type Tag struct {
- Key string `thrift:"key,1,required" db:"key" json:"key"`
- VType TagType `thrift:"vType,2,required" db:"vType" json:"vType"`
- VStr *string `thrift:"vStr,3" db:"vStr" json:"vStr,omitempty"`
- VDouble *float64 `thrift:"vDouble,4" db:"vDouble" json:"vDouble,omitempty"`
- VBool *bool `thrift:"vBool,5" db:"vBool" json:"vBool,omitempty"`
- VLong *int64 `thrift:"vLong,6" db:"vLong" json:"vLong,omitempty"`
- VBinary []byte `thrift:"vBinary,7" db:"vBinary" json:"vBinary,omitempty"`
-}
-
-func NewTag() *Tag {
- return &Tag{}
-}
-
-func (p *Tag) GetKey() string {
- return p.Key
-}
-
-func (p *Tag) GetVType() TagType {
- return p.VType
-}
-
-var Tag_VStr_DEFAULT string
-
-func (p *Tag) GetVStr() string {
- if !p.IsSetVStr() {
- return Tag_VStr_DEFAULT
- }
- return *p.VStr
-}
-
-var Tag_VDouble_DEFAULT float64
-
-func (p *Tag) GetVDouble() float64 {
- if !p.IsSetVDouble() {
- return Tag_VDouble_DEFAULT
- }
- return *p.VDouble
-}
-
-var Tag_VBool_DEFAULT bool
-
-func (p *Tag) GetVBool() bool {
- if !p.IsSetVBool() {
- return Tag_VBool_DEFAULT
- }
- return *p.VBool
-}
-
-var Tag_VLong_DEFAULT int64
-
-func (p *Tag) GetVLong() int64 {
- if !p.IsSetVLong() {
- return Tag_VLong_DEFAULT
- }
- return *p.VLong
-}
-
-var Tag_VBinary_DEFAULT []byte
-
-func (p *Tag) GetVBinary() []byte {
- return p.VBinary
-}
-func (p *Tag) IsSetVStr() bool {
- return p.VStr != nil
-}
-
-func (p *Tag) IsSetVDouble() bool {
- return p.VDouble != nil
-}
-
-func (p *Tag) IsSetVBool() bool {
- return p.VBool != nil
-}
-
-func (p *Tag) IsSetVLong() bool {
- return p.VLong != nil
-}
-
-func (p *Tag) IsSetVBinary() bool {
- return p.VBinary != nil
-}
-
-func (p *Tag) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetKey bool = false
- var issetVType bool = false
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.STRING {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- issetKey = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 2:
- if fieldTypeId == thrift.I32 {
- if err := p.ReadField2(ctx, iprot); err != nil {
- return err
- }
- issetVType = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 3:
- if fieldTypeId == thrift.STRING {
- if err := p.ReadField3(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 4:
- if fieldTypeId == thrift.DOUBLE {
- if err := p.ReadField4(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 5:
- if fieldTypeId == thrift.BOOL {
- if err := p.ReadField5(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 6:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField6(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 7:
- if fieldTypeId == thrift.STRING {
- if err := p.ReadField7(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetKey {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Key is not set"))
- }
- if !issetVType {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field VType is not set"))
- }
- return nil
-}
-
-func (p *Tag) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(ctx); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- p.Key = v
- }
- return nil
-}
-
-func (p *Tag) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI32(ctx); err != nil {
- return thrift.PrependError("error reading field 2: ", err)
- } else {
- temp := TagType(v)
- p.VType = temp
- }
- return nil
-}
-
-func (p *Tag) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(ctx); err != nil {
- return thrift.PrependError("error reading field 3: ", err)
- } else {
- p.VStr = &v
- }
- return nil
-}
-
-func (p *Tag) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadDouble(ctx); err != nil {
- return thrift.PrependError("error reading field 4: ", err)
- } else {
- p.VDouble = &v
- }
- return nil
-}
-
-func (p *Tag) ReadField5(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadBool(ctx); err != nil {
- return thrift.PrependError("error reading field 5: ", err)
- } else {
- p.VBool = &v
- }
- return nil
-}
-
-func (p *Tag) ReadField6(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 6: ", err)
- } else {
- p.VLong = &v
- }
- return nil
-}
-
-func (p *Tag) ReadField7(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadBinary(ctx); err != nil {
- return thrift.PrependError("error reading field 7: ", err)
- } else {
- p.VBinary = v
- }
- return nil
-}
-
-func (p *Tag) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "Tag"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField2(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField3(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField4(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField5(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField6(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField7(ctx, oprot); err != nil {
- return err
- }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *Tag) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err)
- }
- if err := oprot.WriteString(ctx, string(p.Key)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err)
- }
- return err
-}
-
-func (p *Tag) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "vType", thrift.I32, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:vType: ", p), err)
- }
- if err := oprot.WriteI32(ctx, int32(p.VType)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.vType (2) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:vType: ", p), err)
- }
- return err
-}
-
-func (p *Tag) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetVStr() {
- if err := oprot.WriteFieldBegin(ctx, "vStr", thrift.STRING, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:vStr: ", p), err)
- }
- if err := oprot.WriteString(ctx, string(*p.VStr)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.vStr (3) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:vStr: ", p), err)
- }
- }
- return err
-}
-
-func (p *Tag) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetVDouble() {
- if err := oprot.WriteFieldBegin(ctx, "vDouble", thrift.DOUBLE, 4); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:vDouble: ", p), err)
- }
- if err := oprot.WriteDouble(ctx, float64(*p.VDouble)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.vDouble (4) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 4:vDouble: ", p), err)
- }
- }
- return err
-}
-
-func (p *Tag) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetVBool() {
- if err := oprot.WriteFieldBegin(ctx, "vBool", thrift.BOOL, 5); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:vBool: ", p), err)
- }
- if err := oprot.WriteBool(ctx, bool(*p.VBool)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.vBool (5) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 5:vBool: ", p), err)
- }
- }
- return err
-}
-
-func (p *Tag) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetVLong() {
- if err := oprot.WriteFieldBegin(ctx, "vLong", thrift.I64, 6); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:vLong: ", p), err)
- }
- if err := oprot.WriteI64(ctx, int64(*p.VLong)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.vLong (6) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 6:vLong: ", p), err)
- }
- }
- return err
-}
-
-func (p *Tag) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetVBinary() {
- if err := oprot.WriteFieldBegin(ctx, "vBinary", thrift.STRING, 7); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:vBinary: ", p), err)
- }
- if err := oprot.WriteBinary(ctx, p.VBinary); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.vBinary (7) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 7:vBinary: ", p), err)
- }
- }
- return err
-}
-
-func (p *Tag) Equals(other *Tag) bool {
- if p == other {
- return true
- } else if p == nil || other == nil {
- return false
- }
- if p.Key != other.Key {
- return false
- }
- if p.VType != other.VType {
- return false
- }
- if p.VStr != other.VStr {
- if p.VStr == nil || other.VStr == nil {
- return false
- }
- if (*p.VStr) != (*other.VStr) {
- return false
- }
- }
- if p.VDouble != other.VDouble {
- if p.VDouble == nil || other.VDouble == nil {
- return false
- }
- if (*p.VDouble) != (*other.VDouble) {
- return false
- }
- }
- if p.VBool != other.VBool {
- if p.VBool == nil || other.VBool == nil {
- return false
- }
- if (*p.VBool) != (*other.VBool) {
- return false
- }
- }
- if p.VLong != other.VLong {
- if p.VLong == nil || other.VLong == nil {
- return false
- }
- if (*p.VLong) != (*other.VLong) {
- return false
- }
- }
- if bytes.Compare(p.VBinary, other.VBinary) != 0 {
- return false
- }
- return true
-}
-
-func (p *Tag) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("Tag(%+v)", *p)
-}
-
-// Attributes:
-// - Timestamp
-// - Fields
-type Log struct {
- Timestamp int64 `thrift:"timestamp,1,required" db:"timestamp" json:"timestamp"`
- Fields []*Tag `thrift:"fields,2,required" db:"fields" json:"fields"`
-}
-
-func NewLog() *Log {
- return &Log{}
-}
-
-func (p *Log) GetTimestamp() int64 {
- return p.Timestamp
-}
-
-func (p *Log) GetFields() []*Tag {
- return p.Fields
-}
-func (p *Log) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetTimestamp bool = false
- var issetFields bool = false
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- issetTimestamp = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 2:
- if fieldTypeId == thrift.LIST {
- if err := p.ReadField2(ctx, iprot); err != nil {
- return err
- }
- issetFields = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetTimestamp {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Timestamp is not set"))
- }
- if !issetFields {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Fields is not set"))
- }
- return nil
-}
-
-func (p *Log) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- p.Timestamp = v
- }
- return nil
-}
-
-func (p *Log) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin(ctx)
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*Tag, 0, size)
- p.Fields = tSlice
- for i := 0; i < size; i++ {
- _elem0 := &Tag{}
- if err := _elem0.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
- }
- p.Fields = append(p.Fields, _elem0)
- }
- if err := iprot.ReadListEnd(ctx); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *Log) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "Log"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField2(ctx, oprot); err != nil {
- return err
- }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *Log) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err)
- }
- if err := oprot.WriteI64(ctx, int64(p.Timestamp)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err)
- }
- return err
-}
-
-func (p *Log) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "fields", thrift.LIST, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:fields: ", p), err)
- }
- if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Fields)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Fields {
- if err := v.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(ctx); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:fields: ", p), err)
- }
- return err
-}
-
-func (p *Log) Equals(other *Log) bool {
- if p == other {
- return true
- } else if p == nil || other == nil {
- return false
- }
- if p.Timestamp != other.Timestamp {
- return false
- }
- if len(p.Fields) != len(other.Fields) {
- return false
- }
- for i, _tgt := range p.Fields {
- _src1 := other.Fields[i]
- if !_tgt.Equals(_src1) {
- return false
- }
- }
- return true
-}
-
-func (p *Log) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("Log(%+v)", *p)
-}
-
-// Attributes:
-// - RefType
-// - TraceIdLow
-// - TraceIdHigh
-// - SpanId
-type SpanRef struct {
- RefType SpanRefType `thrift:"refType,1,required" db:"refType" json:"refType"`
- TraceIdLow int64 `thrift:"traceIdLow,2,required" db:"traceIdLow" json:"traceIdLow"`
- TraceIdHigh int64 `thrift:"traceIdHigh,3,required" db:"traceIdHigh" json:"traceIdHigh"`
- SpanId int64 `thrift:"spanId,4,required" db:"spanId" json:"spanId"`
-}
-
-func NewSpanRef() *SpanRef {
- return &SpanRef{}
-}
-
-func (p *SpanRef) GetRefType() SpanRefType {
- return p.RefType
-}
-
-func (p *SpanRef) GetTraceIdLow() int64 {
- return p.TraceIdLow
-}
-
-func (p *SpanRef) GetTraceIdHigh() int64 {
- return p.TraceIdHigh
-}
-
-func (p *SpanRef) GetSpanId() int64 {
- return p.SpanId
-}
-func (p *SpanRef) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetRefType bool = false
- var issetTraceIdLow bool = false
- var issetTraceIdHigh bool = false
- var issetSpanId bool = false
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.I32 {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- issetRefType = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 2:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField2(ctx, iprot); err != nil {
- return err
- }
- issetTraceIdLow = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 3:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField3(ctx, iprot); err != nil {
- return err
- }
- issetTraceIdHigh = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 4:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField4(ctx, iprot); err != nil {
- return err
- }
- issetSpanId = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetRefType {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field RefType is not set"))
- }
- if !issetTraceIdLow {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set"))
- }
- if !issetTraceIdHigh {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set"))
- }
- if !issetSpanId {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set"))
- }
- return nil
-}
-
-func (p *SpanRef) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI32(ctx); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- temp := SpanRefType(v)
- p.RefType = temp
- }
- return nil
-}
-
-func (p *SpanRef) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 2: ", err)
- } else {
- p.TraceIdLow = v
- }
- return nil
-}
-
-func (p *SpanRef) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 3: ", err)
- } else {
- p.TraceIdHigh = v
- }
- return nil
-}
-
-func (p *SpanRef) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 4: ", err)
- } else {
- p.SpanId = v
- }
- return nil
-}
-
-func (p *SpanRef) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "SpanRef"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField2(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField3(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField4(ctx, oprot); err != nil {
- return err
- }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *SpanRef) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "refType", thrift.I32, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:refType: ", p), err)
- }
- if err := oprot.WriteI32(ctx, int32(p.RefType)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.refType (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:refType: ", p), err)
- }
- return err
-}
-
-func (p *SpanRef) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "traceIdLow", thrift.I64, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdLow: ", p), err)
- }
- if err := oprot.WriteI64(ctx, int64(p.TraceIdLow)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (2) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdLow: ", p), err)
- }
- return err
-}
-
-func (p *SpanRef) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "traceIdHigh", thrift.I64, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:traceIdHigh: ", p), err)
- }
- if err := oprot.WriteI64(ctx, int64(p.TraceIdHigh)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (3) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:traceIdHigh: ", p), err)
- }
- return err
-}
-
-func (p *SpanRef) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "spanId", thrift.I64, 4); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:spanId: ", p), err)
- }
- if err := oprot.WriteI64(ctx, int64(p.SpanId)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.spanId (4) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 4:spanId: ", p), err)
- }
- return err
-}
-
-func (p *SpanRef) Equals(other *SpanRef) bool {
- if p == other {
- return true
- } else if p == nil || other == nil {
- return false
- }
- if p.RefType != other.RefType {
- return false
- }
- if p.TraceIdLow != other.TraceIdLow {
- return false
- }
- if p.TraceIdHigh != other.TraceIdHigh {
- return false
- }
- if p.SpanId != other.SpanId {
- return false
- }
- return true
-}
-
-func (p *SpanRef) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("SpanRef(%+v)", *p)
-}
-
-// Attributes:
-// - TraceIdLow
-// - TraceIdHigh
-// - SpanId
-// - ParentSpanId
-// - OperationName
-// - References
-// - Flags
-// - StartTime
-// - Duration
-// - Tags
-// - Logs
-type Span struct {
- TraceIdLow int64 `thrift:"traceIdLow,1,required" db:"traceIdLow" json:"traceIdLow"`
- TraceIdHigh int64 `thrift:"traceIdHigh,2,required" db:"traceIdHigh" json:"traceIdHigh"`
- SpanId int64 `thrift:"spanId,3,required" db:"spanId" json:"spanId"`
- ParentSpanId int64 `thrift:"parentSpanId,4,required" db:"parentSpanId" json:"parentSpanId"`
- OperationName string `thrift:"operationName,5,required" db:"operationName" json:"operationName"`
- References []*SpanRef `thrift:"references,6" db:"references" json:"references,omitempty"`
- Flags int32 `thrift:"flags,7,required" db:"flags" json:"flags"`
- StartTime int64 `thrift:"startTime,8,required" db:"startTime" json:"startTime"`
- Duration int64 `thrift:"duration,9,required" db:"duration" json:"duration"`
- Tags []*Tag `thrift:"tags,10" db:"tags" json:"tags,omitempty"`
- Logs []*Log `thrift:"logs,11" db:"logs" json:"logs,omitempty"`
-}
-
-func NewSpan() *Span {
- return &Span{}
-}
-
-func (p *Span) GetTraceIdLow() int64 {
- return p.TraceIdLow
-}
-
-func (p *Span) GetTraceIdHigh() int64 {
- return p.TraceIdHigh
-}
-
-func (p *Span) GetSpanId() int64 {
- return p.SpanId
-}
-
-func (p *Span) GetParentSpanId() int64 {
- return p.ParentSpanId
-}
-
-func (p *Span) GetOperationName() string {
- return p.OperationName
-}
-
-var Span_References_DEFAULT []*SpanRef
-
-func (p *Span) GetReferences() []*SpanRef {
- return p.References
-}
-
-func (p *Span) GetFlags() int32 {
- return p.Flags
-}
-
-func (p *Span) GetStartTime() int64 {
- return p.StartTime
-}
-
-func (p *Span) GetDuration() int64 {
- return p.Duration
-}
-
-var Span_Tags_DEFAULT []*Tag
-
-func (p *Span) GetTags() []*Tag {
- return p.Tags
-}
-
-var Span_Logs_DEFAULT []*Log
-
-func (p *Span) GetLogs() []*Log {
- return p.Logs
-}
-func (p *Span) IsSetReferences() bool {
- return p.References != nil
-}
-
-func (p *Span) IsSetTags() bool {
- return p.Tags != nil
-}
-
-func (p *Span) IsSetLogs() bool {
- return p.Logs != nil
-}
-
-func (p *Span) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetTraceIdLow bool = false
- var issetTraceIdHigh bool = false
- var issetSpanId bool = false
- var issetParentSpanId bool = false
- var issetOperationName bool = false
- var issetFlags bool = false
- var issetStartTime bool = false
- var issetDuration bool = false
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- issetTraceIdLow = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 2:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField2(ctx, iprot); err != nil {
- return err
- }
- issetTraceIdHigh = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 3:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField3(ctx, iprot); err != nil {
- return err
- }
- issetSpanId = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 4:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField4(ctx, iprot); err != nil {
- return err
- }
- issetParentSpanId = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 5:
- if fieldTypeId == thrift.STRING {
- if err := p.ReadField5(ctx, iprot); err != nil {
- return err
- }
- issetOperationName = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 6:
- if fieldTypeId == thrift.LIST {
- if err := p.ReadField6(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 7:
- if fieldTypeId == thrift.I32 {
- if err := p.ReadField7(ctx, iprot); err != nil {
- return err
- }
- issetFlags = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 8:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField8(ctx, iprot); err != nil {
- return err
- }
- issetStartTime = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 9:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField9(ctx, iprot); err != nil {
- return err
- }
- issetDuration = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 10:
- if fieldTypeId == thrift.LIST {
- if err := p.ReadField10(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 11:
- if fieldTypeId == thrift.LIST {
- if err := p.ReadField11(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetTraceIdLow {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set"))
- }
- if !issetTraceIdHigh {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set"))
- }
- if !issetSpanId {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set"))
- }
- if !issetParentSpanId {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ParentSpanId is not set"))
- }
- if !issetOperationName {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field OperationName is not set"))
- }
- if !issetFlags {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Flags is not set"))
- }
- if !issetStartTime {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StartTime is not set"))
- }
- if !issetDuration {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Duration is not set"))
- }
- return nil
-}
-
-func (p *Span) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- p.TraceIdLow = v
- }
- return nil
-}
-
-func (p *Span) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 2: ", err)
- } else {
- p.TraceIdHigh = v
- }
- return nil
-}
-
-func (p *Span) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 3: ", err)
- } else {
- p.SpanId = v
- }
- return nil
-}
-
-func (p *Span) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 4: ", err)
- } else {
- p.ParentSpanId = v
- }
- return nil
-}
-
-func (p *Span) ReadField5(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(ctx); err != nil {
- return thrift.PrependError("error reading field 5: ", err)
- } else {
- p.OperationName = v
- }
- return nil
-}
-
-func (p *Span) ReadField6(ctx context.Context, iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin(ctx)
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*SpanRef, 0, size)
- p.References = tSlice
- for i := 0; i < size; i++ {
- _elem2 := &SpanRef{}
- if err := _elem2.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err)
- }
- p.References = append(p.References, _elem2)
- }
- if err := iprot.ReadListEnd(ctx); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *Span) ReadField7(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI32(ctx); err != nil {
- return thrift.PrependError("error reading field 7: ", err)
- } else {
- p.Flags = v
- }
- return nil
-}
-
-func (p *Span) ReadField8(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 8: ", err)
- } else {
- p.StartTime = v
- }
- return nil
-}
-
-func (p *Span) ReadField9(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 9: ", err)
- } else {
- p.Duration = v
- }
- return nil
-}
-
-func (p *Span) ReadField10(ctx context.Context, iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin(ctx)
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*Tag, 0, size)
- p.Tags = tSlice
- for i := 0; i < size; i++ {
- _elem3 := &Tag{}
- if err := _elem3.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err)
- }
- p.Tags = append(p.Tags, _elem3)
- }
- if err := iprot.ReadListEnd(ctx); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *Span) ReadField11(ctx context.Context, iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin(ctx)
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*Log, 0, size)
- p.Logs = tSlice
- for i := 0; i < size; i++ {
- _elem4 := &Log{}
- if err := _elem4.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err)
- }
- p.Logs = append(p.Logs, _elem4)
- }
- if err := iprot.ReadListEnd(ctx); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *Span) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "Span"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField2(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField3(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField4(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField5(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField6(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField7(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField8(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField9(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField10(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField11(ctx, oprot); err != nil {
- return err
- }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *Span) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "traceIdLow", thrift.I64, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:traceIdLow: ", p), err)
- }
- if err := oprot.WriteI64(ctx, int64(p.TraceIdLow)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:traceIdLow: ", p), err)
- }
- return err
-}
-
-func (p *Span) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "traceIdHigh", thrift.I64, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdHigh: ", p), err)
- }
- if err := oprot.WriteI64(ctx, int64(p.TraceIdHigh)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (2) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdHigh: ", p), err)
- }
- return err
-}
-
-func (p *Span) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "spanId", thrift.I64, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:spanId: ", p), err)
- }
- if err := oprot.WriteI64(ctx, int64(p.SpanId)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.spanId (3) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:spanId: ", p), err)
- }
- return err
-}
-
-func (p *Span) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "parentSpanId", thrift.I64, 4); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:parentSpanId: ", p), err)
- }
- if err := oprot.WriteI64(ctx, int64(p.ParentSpanId)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.parentSpanId (4) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 4:parentSpanId: ", p), err)
- }
- return err
-}
-
-func (p *Span) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "operationName", thrift.STRING, 5); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:operationName: ", p), err)
- }
- if err := oprot.WriteString(ctx, string(p.OperationName)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.operationName (5) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 5:operationName: ", p), err)
- }
- return err
-}
-
-func (p *Span) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetReferences() {
- if err := oprot.WriteFieldBegin(ctx, "references", thrift.LIST, 6); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:references: ", p), err)
- }
- if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.References)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.References {
- if err := v.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(ctx); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 6:references: ", p), err)
- }
- }
- return err
-}
-
-func (p *Span) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "flags", thrift.I32, 7); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:flags: ", p), err)
- }
- if err := oprot.WriteI32(ctx, int32(p.Flags)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.flags (7) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 7:flags: ", p), err)
- }
- return err
-}
-
-func (p *Span) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "startTime", thrift.I64, 8); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:startTime: ", p), err)
- }
- if err := oprot.WriteI64(ctx, int64(p.StartTime)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.startTime (8) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 8:startTime: ", p), err)
- }
- return err
-}
-
-func (p *Span) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "duration", thrift.I64, 9); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:duration: ", p), err)
- }
- if err := oprot.WriteI64(ctx, int64(p.Duration)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.duration (9) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 9:duration: ", p), err)
- }
- return err
-}
-
-func (p *Span) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetTags() {
- if err := oprot.WriteFieldBegin(ctx, "tags", thrift.LIST, 10); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:tags: ", p), err)
- }
- if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Tags)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Tags {
- if err := v.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(ctx); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 10:tags: ", p), err)
- }
- }
- return err
-}
-
-func (p *Span) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetLogs() {
- if err := oprot.WriteFieldBegin(ctx, "logs", thrift.LIST, 11); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:logs: ", p), err)
- }
- if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Logs)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Logs {
- if err := v.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(ctx); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 11:logs: ", p), err)
- }
- }
- return err
-}
-
-func (p *Span) Equals(other *Span) bool {
- if p == other {
- return true
- } else if p == nil || other == nil {
- return false
- }
- if p.TraceIdLow != other.TraceIdLow {
- return false
- }
- if p.TraceIdHigh != other.TraceIdHigh {
- return false
- }
- if p.SpanId != other.SpanId {
- return false
- }
- if p.ParentSpanId != other.ParentSpanId {
- return false
- }
- if p.OperationName != other.OperationName {
- return false
- }
- if len(p.References) != len(other.References) {
- return false
- }
- for i, _tgt := range p.References {
- _src5 := other.References[i]
- if !_tgt.Equals(_src5) {
- return false
- }
- }
- if p.Flags != other.Flags {
- return false
- }
- if p.StartTime != other.StartTime {
- return false
- }
- if p.Duration != other.Duration {
- return false
- }
- if len(p.Tags) != len(other.Tags) {
- return false
- }
- for i, _tgt := range p.Tags {
- _src6 := other.Tags[i]
- if !_tgt.Equals(_src6) {
- return false
- }
- }
- if len(p.Logs) != len(other.Logs) {
- return false
- }
- for i, _tgt := range p.Logs {
- _src7 := other.Logs[i]
- if !_tgt.Equals(_src7) {
- return false
- }
- }
- return true
-}
-
-func (p *Span) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("Span(%+v)", *p)
-}
-
-// Attributes:
-// - ServiceName
-// - Tags
-type Process struct {
- ServiceName string `thrift:"serviceName,1,required" db:"serviceName" json:"serviceName"`
- Tags []*Tag `thrift:"tags,2" db:"tags" json:"tags,omitempty"`
-}
-
-func NewProcess() *Process {
- return &Process{}
-}
-
-func (p *Process) GetServiceName() string {
- return p.ServiceName
-}
-
-var Process_Tags_DEFAULT []*Tag
-
-func (p *Process) GetTags() []*Tag {
- return p.Tags
-}
-func (p *Process) IsSetTags() bool {
- return p.Tags != nil
-}
-
-func (p *Process) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetServiceName bool = false
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.STRING {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- issetServiceName = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 2:
- if fieldTypeId == thrift.LIST {
- if err := p.ReadField2(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetServiceName {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ServiceName is not set"))
- }
- return nil
-}
-
-func (p *Process) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(ctx); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- p.ServiceName = v
- }
- return nil
-}
-
-func (p *Process) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin(ctx)
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*Tag, 0, size)
- p.Tags = tSlice
- for i := 0; i < size; i++ {
- _elem8 := &Tag{}
- if err := _elem8.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem8), err)
- }
- p.Tags = append(p.Tags, _elem8)
- }
- if err := iprot.ReadListEnd(ctx); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *Process) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "Process"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField2(ctx, oprot); err != nil {
- return err
- }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *Process) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "serviceName", thrift.STRING, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err)
- }
- if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err)
- }
- return err
-}
-
-func (p *Process) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetTags() {
- if err := oprot.WriteFieldBegin(ctx, "tags", thrift.LIST, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tags: ", p), err)
- }
- if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Tags)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Tags {
- if err := v.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(ctx); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tags: ", p), err)
- }
- }
- return err
-}
-
-func (p *Process) Equals(other *Process) bool {
- if p == other {
- return true
- } else if p == nil || other == nil {
- return false
- }
- if p.ServiceName != other.ServiceName {
- return false
- }
- if len(p.Tags) != len(other.Tags) {
- return false
- }
- for i, _tgt := range p.Tags {
- _src9 := other.Tags[i]
- if !_tgt.Equals(_src9) {
- return false
- }
- }
- return true
-}
-
-func (p *Process) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("Process(%+v)", *p)
-}
-
-// Attributes:
-// - FullQueueDroppedSpans
-// - TooLargeDroppedSpans
-// - FailedToEmitSpans
-type ClientStats struct {
- FullQueueDroppedSpans int64 `thrift:"fullQueueDroppedSpans,1,required" db:"fullQueueDroppedSpans" json:"fullQueueDroppedSpans"`
- TooLargeDroppedSpans int64 `thrift:"tooLargeDroppedSpans,2,required" db:"tooLargeDroppedSpans" json:"tooLargeDroppedSpans"`
- FailedToEmitSpans int64 `thrift:"failedToEmitSpans,3,required" db:"failedToEmitSpans" json:"failedToEmitSpans"`
-}
-
-func NewClientStats() *ClientStats {
- return &ClientStats{}
-}
-
-func (p *ClientStats) GetFullQueueDroppedSpans() int64 {
- return p.FullQueueDroppedSpans
-}
-
-func (p *ClientStats) GetTooLargeDroppedSpans() int64 {
- return p.TooLargeDroppedSpans
-}
-
-func (p *ClientStats) GetFailedToEmitSpans() int64 {
- return p.FailedToEmitSpans
-}
-func (p *ClientStats) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetFullQueueDroppedSpans bool = false
- var issetTooLargeDroppedSpans bool = false
- var issetFailedToEmitSpans bool = false
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- issetFullQueueDroppedSpans = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 2:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField2(ctx, iprot); err != nil {
- return err
- }
- issetTooLargeDroppedSpans = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 3:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField3(ctx, iprot); err != nil {
- return err
- }
- issetFailedToEmitSpans = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetFullQueueDroppedSpans {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FullQueueDroppedSpans is not set"))
- }
- if !issetTooLargeDroppedSpans {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TooLargeDroppedSpans is not set"))
- }
- if !issetFailedToEmitSpans {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FailedToEmitSpans is not set"))
- }
- return nil
-}
-
-func (p *ClientStats) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- p.FullQueueDroppedSpans = v
- }
- return nil
-}
-
-func (p *ClientStats) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 2: ", err)
- } else {
- p.TooLargeDroppedSpans = v
- }
- return nil
-}
-
-func (p *ClientStats) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 3: ", err)
- } else {
- p.FailedToEmitSpans = v
- }
- return nil
-}
-
-func (p *ClientStats) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "ClientStats"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField2(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField3(ctx, oprot); err != nil {
- return err
- }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *ClientStats) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "fullQueueDroppedSpans", thrift.I64, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:fullQueueDroppedSpans: ", p), err)
- }
- if err := oprot.WriteI64(ctx, int64(p.FullQueueDroppedSpans)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.fullQueueDroppedSpans (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:fullQueueDroppedSpans: ", p), err)
- }
- return err
-}
-
-func (p *ClientStats) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "tooLargeDroppedSpans", thrift.I64, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tooLargeDroppedSpans: ", p), err)
- }
- if err := oprot.WriteI64(ctx, int64(p.TooLargeDroppedSpans)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.tooLargeDroppedSpans (2) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tooLargeDroppedSpans: ", p), err)
- }
- return err
-}
-
-func (p *ClientStats) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "failedToEmitSpans", thrift.I64, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:failedToEmitSpans: ", p), err)
- }
- if err := oprot.WriteI64(ctx, int64(p.FailedToEmitSpans)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.failedToEmitSpans (3) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:failedToEmitSpans: ", p), err)
- }
- return err
-}
-
-func (p *ClientStats) Equals(other *ClientStats) bool {
- if p == other {
- return true
- } else if p == nil || other == nil {
- return false
- }
- if p.FullQueueDroppedSpans != other.FullQueueDroppedSpans {
- return false
- }
- if p.TooLargeDroppedSpans != other.TooLargeDroppedSpans {
- return false
- }
- if p.FailedToEmitSpans != other.FailedToEmitSpans {
- return false
- }
- return true
-}
-
-func (p *ClientStats) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("ClientStats(%+v)", *p)
-}
-
-// Attributes:
-// - Process
-// - Spans
-// - SeqNo
-// - Stats
-type Batch struct {
- Process *Process `thrift:"process,1,required" db:"process" json:"process"`
- Spans []*Span `thrift:"spans,2,required" db:"spans" json:"spans"`
- SeqNo *int64 `thrift:"seqNo,3" db:"seqNo" json:"seqNo,omitempty"`
- Stats *ClientStats `thrift:"stats,4" db:"stats" json:"stats,omitempty"`
-}
-
-func NewBatch() *Batch {
- return &Batch{}
-}
-
-var Batch_Process_DEFAULT *Process
-
-func (p *Batch) GetProcess() *Process {
- if !p.IsSetProcess() {
- return Batch_Process_DEFAULT
- }
- return p.Process
-}
-
-func (p *Batch) GetSpans() []*Span {
- return p.Spans
-}
-
-var Batch_SeqNo_DEFAULT int64
-
-func (p *Batch) GetSeqNo() int64 {
- if !p.IsSetSeqNo() {
- return Batch_SeqNo_DEFAULT
- }
- return *p.SeqNo
-}
-
-var Batch_Stats_DEFAULT *ClientStats
-
-func (p *Batch) GetStats() *ClientStats {
- if !p.IsSetStats() {
- return Batch_Stats_DEFAULT
- }
- return p.Stats
-}
-func (p *Batch) IsSetProcess() bool {
- return p.Process != nil
-}
-
-func (p *Batch) IsSetSeqNo() bool {
- return p.SeqNo != nil
-}
-
-func (p *Batch) IsSetStats() bool {
- return p.Stats != nil
-}
-
-func (p *Batch) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetProcess bool = false
- var issetSpans bool = false
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.STRUCT {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- issetProcess = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 2:
- if fieldTypeId == thrift.LIST {
- if err := p.ReadField2(ctx, iprot); err != nil {
- return err
- }
- issetSpans = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 3:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField3(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 4:
- if fieldTypeId == thrift.STRUCT {
- if err := p.ReadField4(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetProcess {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Process is not set"))
- }
- if !issetSpans {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Spans is not set"))
- }
- return nil
-}
-
-func (p *Batch) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- p.Process = &Process{}
- if err := p.Process.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Process), err)
- }
- return nil
-}
-
-func (p *Batch) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin(ctx)
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*Span, 0, size)
- p.Spans = tSlice
- for i := 0; i < size; i++ {
- _elem10 := &Span{}
- if err := _elem10.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err)
- }
- p.Spans = append(p.Spans, _elem10)
- }
- if err := iprot.ReadListEnd(ctx); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *Batch) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 3: ", err)
- } else {
- p.SeqNo = &v
- }
- return nil
-}
-
-func (p *Batch) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
- p.Stats = &ClientStats{}
- if err := p.Stats.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Stats), err)
- }
- return nil
-}
-
-func (p *Batch) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "Batch"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField2(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField3(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField4(ctx, oprot); err != nil {
- return err
- }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *Batch) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "process", thrift.STRUCT, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:process: ", p), err)
- }
- if err := p.Process.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Process), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:process: ", p), err)
- }
- return err
-}
-
-func (p *Batch) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:spans: ", p), err)
- }
- if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Spans {
- if err := v.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(ctx); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:spans: ", p), err)
- }
- return err
-}
-
-func (p *Batch) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetSeqNo() {
- if err := oprot.WriteFieldBegin(ctx, "seqNo", thrift.I64, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:seqNo: ", p), err)
- }
- if err := oprot.WriteI64(ctx, int64(*p.SeqNo)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.seqNo (3) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:seqNo: ", p), err)
- }
- }
- return err
-}
-
-func (p *Batch) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetStats() {
- if err := oprot.WriteFieldBegin(ctx, "stats", thrift.STRUCT, 4); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:stats: ", p), err)
- }
- if err := p.Stats.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Stats), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 4:stats: ", p), err)
- }
- }
- return err
-}
-
-func (p *Batch) Equals(other *Batch) bool {
- if p == other {
- return true
- } else if p == nil || other == nil {
- return false
- }
- if !p.Process.Equals(other.Process) {
- return false
- }
- if len(p.Spans) != len(other.Spans) {
- return false
- }
- for i, _tgt := range p.Spans {
- _src11 := other.Spans[i]
- if !_tgt.Equals(_src11) {
- return false
- }
- }
- if p.SeqNo != other.SeqNo {
- if p.SeqNo == nil || other.SeqNo == nil {
- return false
- }
- if (*p.SeqNo) != (*other.SeqNo) {
- return false
- }
- }
- if !p.Stats.Equals(other.Stats) {
- return false
- }
- return true
-}
-
-func (p *Batch) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("Batch(%+v)", *p)
-}
-
-// Attributes:
-// - Ok
-type BatchSubmitResponse struct {
- Ok bool `thrift:"ok,1,required" db:"ok" json:"ok"`
-}
-
-func NewBatchSubmitResponse() *BatchSubmitResponse {
- return &BatchSubmitResponse{}
-}
-
-func (p *BatchSubmitResponse) GetOk() bool {
- return p.Ok
-}
-func (p *BatchSubmitResponse) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetOk bool = false
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.BOOL {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- issetOk = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetOk {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set"))
- }
- return nil
-}
-
-func (p *BatchSubmitResponse) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadBool(ctx); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- p.Ok = v
- }
- return nil
-}
-
-func (p *BatchSubmitResponse) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "BatchSubmitResponse"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil {
- return err
- }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *BatchSubmitResponse) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "ok", thrift.BOOL, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err)
- }
- if err := oprot.WriteBool(ctx, bool(p.Ok)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err)
- }
- return err
-}
-
-func (p *BatchSubmitResponse) Equals(other *BatchSubmitResponse) bool {
- if p == other {
- return true
- } else if p == nil || other == nil {
- return false
- }
- if p.Ok != other.Ok {
- return false
- }
- return true
-}
-
-func (p *BatchSubmitResponse) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("BatchSubmitResponse(%+v)", *p)
-}
-
-type Collector interface {
- // Parameters:
- // - Batches
- SubmitBatches(ctx context.Context, batches []*Batch) (_r []*BatchSubmitResponse, _err error)
-}
-
-type CollectorClient struct {
- c thrift.TClient
- meta thrift.ResponseMeta
-}
-
-func NewCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *CollectorClient {
- return &CollectorClient{
- c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
- }
-}
-
-func NewCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *CollectorClient {
- return &CollectorClient{
- c: thrift.NewTStandardClient(iprot, oprot),
- }
-}
-
-func NewCollectorClient(c thrift.TClient) *CollectorClient {
- return &CollectorClient{
- c: c,
- }
-}
-
-func (p *CollectorClient) Client_() thrift.TClient {
- return p.c
-}
-
-func (p *CollectorClient) LastResponseMeta_() thrift.ResponseMeta {
- return p.meta
-}
-
-func (p *CollectorClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
- p.meta = meta
-}
-
-// Parameters:
-// - Batches
-func (p *CollectorClient) SubmitBatches(ctx context.Context, batches []*Batch) (_r []*BatchSubmitResponse, _err error) {
- var _args12 CollectorSubmitBatchesArgs
- _args12.Batches = batches
- var _result14 CollectorSubmitBatchesResult
- var _meta13 thrift.ResponseMeta
- _meta13, _err = p.Client_().Call(ctx, "submitBatches", &_args12, &_result14)
- p.SetLastResponseMeta_(_meta13)
- if _err != nil {
- return
- }
- return _result14.GetSuccess(), nil
-}
-
-type CollectorProcessor struct {
- processorMap map[string]thrift.TProcessorFunction
- handler Collector
-}
-
-func (p *CollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
- p.processorMap[key] = processor
-}
-
-func (p *CollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
- processor, ok = p.processorMap[key]
- return processor, ok
-}
-
-func (p *CollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
- return p.processorMap
-}
-
-func NewCollectorProcessor(handler Collector) *CollectorProcessor {
-
- self15 := &CollectorProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
- self15.processorMap["submitBatches"] = &collectorProcessorSubmitBatches{handler: handler}
- return self15
-}
-
-func (p *CollectorProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
- if err2 != nil {
- return false, thrift.WrapTException(err2)
- }
- if processor, ok := p.GetProcessorFunction(name); ok {
- return processor.Process(ctx, seqId, iprot, oprot)
- }
- iprot.Skip(ctx, thrift.STRUCT)
- iprot.ReadMessageEnd(ctx)
- x16 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
- oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
- x16.Write(ctx, oprot)
- oprot.WriteMessageEnd(ctx)
- oprot.Flush(ctx)
- return false, x16
-
-}
-
-type collectorProcessorSubmitBatches struct {
- handler Collector
-}
-
-func (p *collectorProcessorSubmitBatches) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := CollectorSubmitBatchesArgs{}
- var err2 error
- if err2 = args.Read(ctx, iprot); err2 != nil {
- iprot.ReadMessageEnd(ctx)
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error())
- oprot.WriteMessageBegin(ctx, "submitBatches", thrift.EXCEPTION, seqId)
- x.Write(ctx, oprot)
- oprot.WriteMessageEnd(ctx)
- oprot.Flush(ctx)
- return false, thrift.WrapTException(err2)
- }
- iprot.ReadMessageEnd(ctx)
-
- tickerCancel := func() {}
- // Start a goroutine to do server side connectivity check.
- if thrift.ServerConnectivityCheckInterval > 0 {
- var cancel context.CancelFunc
- ctx, cancel = context.WithCancel(ctx)
- defer cancel()
- var tickerCtx context.Context
- tickerCtx, tickerCancel = context.WithCancel(context.Background())
- defer tickerCancel()
- go func(ctx context.Context, cancel context.CancelFunc) {
- ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval)
- defer ticker.Stop()
- for {
- select {
- case <-ctx.Done():
- return
- case <-ticker.C:
- if !iprot.Transport().IsOpen() {
- cancel()
- return
- }
- }
- }
- }(tickerCtx, cancel)
- }
-
- result := CollectorSubmitBatchesResult{}
- var retval []*BatchSubmitResponse
- if retval, err2 = p.handler.SubmitBatches(ctx, args.Batches); err2 != nil {
- tickerCancel()
- if err2 == thrift.ErrAbandonRequest {
- return false, thrift.WrapTException(err2)
- }
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitBatches: "+err2.Error())
- oprot.WriteMessageBegin(ctx, "submitBatches", thrift.EXCEPTION, seqId)
- x.Write(ctx, oprot)
- oprot.WriteMessageEnd(ctx)
- oprot.Flush(ctx)
- return true, thrift.WrapTException(err2)
- } else {
- result.Success = retval
- }
- tickerCancel()
- if err2 = oprot.WriteMessageBegin(ctx, "submitBatches", thrift.REPLY, seqId); err2 != nil {
- err = thrift.WrapTException(err2)
- }
- if err2 = result.Write(ctx, oprot); err == nil && err2 != nil {
- err = thrift.WrapTException(err2)
- }
- if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil {
- err = thrift.WrapTException(err2)
- }
- if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
- err = thrift.WrapTException(err2)
- }
- if err != nil {
- return
- }
- return true, err
-}
-
-// HELPER FUNCTIONS AND STRUCTURES
-
-// Attributes:
-// - Batches
-type CollectorSubmitBatchesArgs struct {
- Batches []*Batch `thrift:"batches,1" db:"batches" json:"batches"`
-}
-
-func NewCollectorSubmitBatchesArgs() *CollectorSubmitBatchesArgs {
- return &CollectorSubmitBatchesArgs{}
-}
-
-func (p *CollectorSubmitBatchesArgs) GetBatches() []*Batch {
- return p.Batches
-}
-func (p *CollectorSubmitBatchesArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.LIST {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *CollectorSubmitBatchesArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin(ctx)
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*Batch, 0, size)
- p.Batches = tSlice
- for i := 0; i < size; i++ {
- _elem17 := &Batch{}
- if err := _elem17.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem17), err)
- }
- p.Batches = append(p.Batches, _elem17)
- }
- if err := iprot.ReadListEnd(ctx); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *CollectorSubmitBatchesArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "submitBatches_args"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil {
- return err
- }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *CollectorSubmitBatchesArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "batches", thrift.LIST, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batches: ", p), err)
- }
- if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Batches)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Batches {
- if err := v.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(ctx); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batches: ", p), err)
- }
- return err
-}
-
-func (p *CollectorSubmitBatchesArgs) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("CollectorSubmitBatchesArgs(%+v)", *p)
-}
-
-// Attributes:
-// - Success
-type CollectorSubmitBatchesResult struct {
- Success []*BatchSubmitResponse `thrift:"success,0" db:"success" json:"success,omitempty"`
-}
-
-func NewCollectorSubmitBatchesResult() *CollectorSubmitBatchesResult {
- return &CollectorSubmitBatchesResult{}
-}
-
-var CollectorSubmitBatchesResult_Success_DEFAULT []*BatchSubmitResponse
-
-func (p *CollectorSubmitBatchesResult) GetSuccess() []*BatchSubmitResponse {
- return p.Success
-}
-func (p *CollectorSubmitBatchesResult) IsSetSuccess() bool {
- return p.Success != nil
-}
-
-func (p *CollectorSubmitBatchesResult) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 0:
- if fieldTypeId == thrift.LIST {
- if err := p.ReadField0(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *CollectorSubmitBatchesResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin(ctx)
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*BatchSubmitResponse, 0, size)
- p.Success = tSlice
- for i := 0; i < size; i++ {
- _elem18 := &BatchSubmitResponse{}
- if err := _elem18.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem18), err)
- }
- p.Success = append(p.Success, _elem18)
- }
- if err := iprot.ReadListEnd(ctx); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *CollectorSubmitBatchesResult) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "submitBatches_result"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if p != nil {
- if err := p.writeField0(ctx, oprot); err != nil {
- return err
- }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *CollectorSubmitBatchesResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetSuccess() {
- if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
- }
- if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Success {
- if err := v.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(ctx); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
- }
- }
- return err
-}
-
-func (p *CollectorSubmitBatchesResult) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("CollectorSubmitBatchesResult(%+v)", *p)
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/GoUnusedProtection__.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/GoUnusedProtection__.go
deleted file mode 100644
index ebf43018f..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/GoUnusedProtection__.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package zipkincore
-
-var GoUnusedProtection__ int;
-
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore-consts.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore-consts.go
deleted file mode 100644
index 043ecba96..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore-consts.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package zipkincore
-
-import (
- "bytes"
- "context"
- "fmt"
- "time"
-
- "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = context.Background
-var _ = time.Now
-var _ = bytes.Equal
-
-const CLIENT_SEND = "cs"
-const CLIENT_RECV = "cr"
-const SERVER_SEND = "ss"
-const SERVER_RECV = "sr"
-const MESSAGE_SEND = "ms"
-const MESSAGE_RECV = "mr"
-const WIRE_SEND = "ws"
-const WIRE_RECV = "wr"
-const CLIENT_SEND_FRAGMENT = "csf"
-const CLIENT_RECV_FRAGMENT = "crf"
-const SERVER_SEND_FRAGMENT = "ssf"
-const SERVER_RECV_FRAGMENT = "srf"
-const LOCAL_COMPONENT = "lc"
-const CLIENT_ADDR = "ca"
-const SERVER_ADDR = "sa"
-const MESSAGE_ADDR = "ma"
-
-func init() {
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore.go
deleted file mode 100644
index 7f46810e0..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore.go
+++ /dev/null
@@ -1,2067 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package zipkincore
-
-import (
- "bytes"
- "context"
- "database/sql/driver"
- "errors"
- "fmt"
- "time"
-
- "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = context.Background
-var _ = time.Now
-var _ = bytes.Equal
-
-type AnnotationType int64
-
-const (
- AnnotationType_BOOL AnnotationType = 0
- AnnotationType_BYTES AnnotationType = 1
- AnnotationType_I16 AnnotationType = 2
- AnnotationType_I32 AnnotationType = 3
- AnnotationType_I64 AnnotationType = 4
- AnnotationType_DOUBLE AnnotationType = 5
- AnnotationType_STRING AnnotationType = 6
-)
-
-func (p AnnotationType) String() string {
- switch p {
- case AnnotationType_BOOL:
- return "BOOL"
- case AnnotationType_BYTES:
- return "BYTES"
- case AnnotationType_I16:
- return "I16"
- case AnnotationType_I32:
- return "I32"
- case AnnotationType_I64:
- return "I64"
- case AnnotationType_DOUBLE:
- return "DOUBLE"
- case AnnotationType_STRING:
- return "STRING"
- }
- return "<UNSET>"
-}
-
-func AnnotationTypeFromString(s string) (AnnotationType, error) {
- switch s {
- case "BOOL":
- return AnnotationType_BOOL, nil
- case "BYTES":
- return AnnotationType_BYTES, nil
- case "I16":
- return AnnotationType_I16, nil
- case "I32":
- return AnnotationType_I32, nil
- case "I64":
- return AnnotationType_I64, nil
- case "DOUBLE":
- return AnnotationType_DOUBLE, nil
- case "STRING":
- return AnnotationType_STRING, nil
- }
- return AnnotationType(0), fmt.Errorf("not a valid AnnotationType string")
-}
-
-func AnnotationTypePtr(v AnnotationType) *AnnotationType { return &v }
-
-func (p AnnotationType) MarshalText() ([]byte, error) {
- return []byte(p.String()), nil
-}
-
-func (p *AnnotationType) UnmarshalText(text []byte) error {
- q, err := AnnotationTypeFromString(string(text))
- if err != nil {
- return err
- }
- *p = q
- return nil
-}
-
-func (p *AnnotationType) Scan(value interface{}) error {
- v, ok := value.(int64)
- if !ok {
- return errors.New("Scan value is not int64")
- }
- *p = AnnotationType(v)
- return nil
-}
-
-func (p *AnnotationType) Value() (driver.Value, error) {
- if p == nil {
- return nil, nil
- }
- return int64(*p), nil
-}
-
-// Indicates the network context of a service recording an annotation with two
-// exceptions.
-//
-// When a BinaryAnnotation, and key is CLIENT_ADDR or SERVER_ADDR,
-// the endpoint indicates the source or destination of an RPC. This exception
-// allows zipkin to display network context of uninstrumented services, or
-// clients such as web browsers.
-//
-// Attributes:
-// - Ipv4: IPv4 host address packed into 4 bytes.
-//
-// Ex for the ip 1.2.3.4, it would be (1 << 24) | (2 << 16) | (3 << 8) | 4
-// - Port: IPv4 port
-//
-// Note: this is to be treated as an unsigned integer, so watch for negatives.
-//
-// Conventionally, when the port isn't known, port = 0.
-// - ServiceName: Service name in lowercase, such as "memcache" or "zipkin-web"
-//
-// Conventionally, when the service name isn't known, service_name = "unknown".
-// - Ipv6: IPv6 host address packed into 16 bytes. Ex Inet6Address.getBytes()
-type Endpoint struct {
- Ipv4 int32 `thrift:"ipv4,1" db:"ipv4" json:"ipv4"`
- Port int16 `thrift:"port,2" db:"port" json:"port"`
- ServiceName string `thrift:"service_name,3" db:"service_name" json:"service_name"`
- Ipv6 []byte `thrift:"ipv6,4" db:"ipv6" json:"ipv6,omitempty"`
-}
-
-func NewEndpoint() *Endpoint {
- return &Endpoint{}
-}
-
-func (p *Endpoint) GetIpv4() int32 {
- return p.Ipv4
-}
-
-func (p *Endpoint) GetPort() int16 {
- return p.Port
-}
-
-func (p *Endpoint) GetServiceName() string {
- return p.ServiceName
-}
-
-var Endpoint_Ipv6_DEFAULT []byte
-
-func (p *Endpoint) GetIpv6() []byte {
- return p.Ipv6
-}
-func (p *Endpoint) IsSetIpv6() bool {
- return p.Ipv6 != nil
-}
-
-func (p *Endpoint) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.I32 {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 2:
- if fieldTypeId == thrift.I16 {
- if err := p.ReadField2(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 3:
- if fieldTypeId == thrift.STRING {
- if err := p.ReadField3(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 4:
- if fieldTypeId == thrift.STRING {
- if err := p.ReadField4(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *Endpoint) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI32(ctx); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- p.Ipv4 = v
- }
- return nil
-}
-
-func (p *Endpoint) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI16(ctx); err != nil {
- return thrift.PrependError("error reading field 2: ", err)
- } else {
- p.Port = v
- }
- return nil
-}
-
-func (p *Endpoint) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(ctx); err != nil {
- return thrift.PrependError("error reading field 3: ", err)
- } else {
- p.ServiceName = v
- }
- return nil
-}
-
-func (p *Endpoint) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadBinary(ctx); err != nil {
- return thrift.PrependError("error reading field 4: ", err)
- } else {
- p.Ipv6 = v
- }
- return nil
-}
-
-func (p *Endpoint) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "Endpoint"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField2(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField3(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField4(ctx, oprot); err != nil {
- return err
- }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *Endpoint) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "ipv4", thrift.I32, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ipv4: ", p), err)
- }
- if err := oprot.WriteI32(ctx, int32(p.Ipv4)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.ipv4 (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ipv4: ", p), err)
- }
- return err
-}
-
-func (p *Endpoint) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "port", thrift.I16, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:port: ", p), err)
- }
- if err := oprot.WriteI16(ctx, int16(p.Port)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.port (2) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:port: ", p), err)
- }
- return err
-}
-
-func (p *Endpoint) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "service_name", thrift.STRING, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:service_name: ", p), err)
- }
- if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.service_name (3) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:service_name: ", p), err)
- }
- return err
-}
-
-func (p *Endpoint) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetIpv6() {
- if err := oprot.WriteFieldBegin(ctx, "ipv6", thrift.STRING, 4); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:ipv6: ", p), err)
- }
- if err := oprot.WriteBinary(ctx, p.Ipv6); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.ipv6 (4) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 4:ipv6: ", p), err)
- }
- }
- return err
-}
-
-func (p *Endpoint) Equals(other *Endpoint) bool {
- if p == other {
- return true
- } else if p == nil || other == nil {
- return false
- }
- if p.Ipv4 != other.Ipv4 {
- return false
- }
- if p.Port != other.Port {
- return false
- }
- if p.ServiceName != other.ServiceName {
- return false
- }
- if bytes.Compare(p.Ipv6, other.Ipv6) != 0 {
- return false
- }
- return true
-}
-
-func (p *Endpoint) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("Endpoint(%+v)", *p)
-}
-
-// An annotation is similar to a log statement. It includes a host field which
-// allows these events to be attributed properly, and also aggregatable.
-//
-// Attributes:
-// - Timestamp: Microseconds from epoch.
-//
-// This value should use the most precise value possible. For example,
-// gettimeofday or syncing nanoTime against a tick of currentTimeMillis.
-// - Value
-// - Host: Always the host that recorded the event. By specifying the host you allow
-// rollup of all events (such as client requests to a service) by IP address.
-type Annotation struct {
- Timestamp int64 `thrift:"timestamp,1" db:"timestamp" json:"timestamp"`
- Value string `thrift:"value,2" db:"value" json:"value"`
- Host *Endpoint `thrift:"host,3" db:"host" json:"host,omitempty"`
-}
-
-func NewAnnotation() *Annotation {
- return &Annotation{}
-}
-
-func (p *Annotation) GetTimestamp() int64 {
- return p.Timestamp
-}
-
-func (p *Annotation) GetValue() string {
- return p.Value
-}
-
-var Annotation_Host_DEFAULT *Endpoint
-
-func (p *Annotation) GetHost() *Endpoint {
- if !p.IsSetHost() {
- return Annotation_Host_DEFAULT
- }
- return p.Host
-}
-func (p *Annotation) IsSetHost() bool {
- return p.Host != nil
-}
-
-func (p *Annotation) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 2:
- if fieldTypeId == thrift.STRING {
- if err := p.ReadField2(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 3:
- if fieldTypeId == thrift.STRUCT {
- if err := p.ReadField3(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *Annotation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- p.Timestamp = v
- }
- return nil
-}
-
-func (p *Annotation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(ctx); err != nil {
- return thrift.PrependError("error reading field 2: ", err)
- } else {
- p.Value = v
- }
- return nil
-}
-
-func (p *Annotation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
- p.Host = &Endpoint{}
- if err := p.Host.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err)
- }
- return nil
-}
-
-func (p *Annotation) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "Annotation"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField2(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField3(ctx, oprot); err != nil {
- return err
- }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *Annotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err)
- }
- if err := oprot.WriteI64(ctx, int64(p.Timestamp)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err)
- }
- return err
-}
-
-func (p *Annotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err)
- }
- if err := oprot.WriteString(ctx, string(p.Value)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err)
- }
- return err
-}
-
-func (p *Annotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetHost() {
- if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:host: ", p), err)
- }
- if err := p.Host.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:host: ", p), err)
- }
- }
- return err
-}
-
-func (p *Annotation) Equals(other *Annotation) bool {
- if p == other {
- return true
- } else if p == nil || other == nil {
- return false
- }
- if p.Timestamp != other.Timestamp {
- return false
- }
- if p.Value != other.Value {
- return false
- }
- if !p.Host.Equals(other.Host) {
- return false
- }
- return true
-}
-
-func (p *Annotation) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("Annotation(%+v)", *p)
-}
-
-// Binary annotations are tags applied to a Span to give it context. For
-// example, a binary annotation of "http.uri" could the path to a resource in a
-// RPC call.
-//
-// Binary annotations of type STRING are always queryable, though more a
-// historical implementation detail than a structural concern.
-//
-// Binary annotations can repeat, and vary on the host. Similar to Annotation,
-// the host indicates who logged the event. This allows you to tell the
-// difference between the client and server side of the same key. For example,
-// the key "http.uri" might be different on the client and server side due to
-// rewriting, like "/api/v1/myresource" vs "/myresource. Via the host field,
-// you can see the different points of view, which often help in debugging.
-//
-// Attributes:
-// - Key
-// - Value
-// - AnnotationType
-// - Host: The host that recorded tag, which allows you to differentiate between
-// multiple tags with the same key. There are two exceptions to this.
-//
-// When the key is CLIENT_ADDR or SERVER_ADDR, host indicates the source or
-// destination of an RPC. This exception allows zipkin to display network
-// context of uninstrumented services, or clients such as web browsers.
-type BinaryAnnotation struct {
- Key string `thrift:"key,1" db:"key" json:"key"`
- Value []byte `thrift:"value,2" db:"value" json:"value"`
- AnnotationType AnnotationType `thrift:"annotation_type,3" db:"annotation_type" json:"annotation_type"`
- Host *Endpoint `thrift:"host,4" db:"host" json:"host,omitempty"`
-}
-
-func NewBinaryAnnotation() *BinaryAnnotation {
- return &BinaryAnnotation{}
-}
-
-func (p *BinaryAnnotation) GetKey() string {
- return p.Key
-}
-
-func (p *BinaryAnnotation) GetValue() []byte {
- return p.Value
-}
-
-func (p *BinaryAnnotation) GetAnnotationType() AnnotationType {
- return p.AnnotationType
-}
-
-var BinaryAnnotation_Host_DEFAULT *Endpoint
-
-func (p *BinaryAnnotation) GetHost() *Endpoint {
- if !p.IsSetHost() {
- return BinaryAnnotation_Host_DEFAULT
- }
- return p.Host
-}
-func (p *BinaryAnnotation) IsSetHost() bool {
- return p.Host != nil
-}
-
-func (p *BinaryAnnotation) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.STRING {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 2:
- if fieldTypeId == thrift.STRING {
- if err := p.ReadField2(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 3:
- if fieldTypeId == thrift.I32 {
- if err := p.ReadField3(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 4:
- if fieldTypeId == thrift.STRUCT {
- if err := p.ReadField4(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *BinaryAnnotation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(ctx); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- p.Key = v
- }
- return nil
-}
-
-func (p *BinaryAnnotation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadBinary(ctx); err != nil {
- return thrift.PrependError("error reading field 2: ", err)
- } else {
- p.Value = v
- }
- return nil
-}
-
-func (p *BinaryAnnotation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI32(ctx); err != nil {
- return thrift.PrependError("error reading field 3: ", err)
- } else {
- temp := AnnotationType(v)
- p.AnnotationType = temp
- }
- return nil
-}
-
-func (p *BinaryAnnotation) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
- p.Host = &Endpoint{}
- if err := p.Host.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err)
- }
- return nil
-}
-
-func (p *BinaryAnnotation) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "BinaryAnnotation"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField2(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField3(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField4(ctx, oprot); err != nil {
- return err
- }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *BinaryAnnotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err)
- }
- if err := oprot.WriteString(ctx, string(p.Key)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err)
- }
- return err
-}
-
-func (p *BinaryAnnotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err)
- }
- if err := oprot.WriteBinary(ctx, p.Value); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err)
- }
- return err
-}
-
-func (p *BinaryAnnotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "annotation_type", thrift.I32, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:annotation_type: ", p), err)
- }
- if err := oprot.WriteI32(ctx, int32(p.AnnotationType)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.annotation_type (3) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:annotation_type: ", p), err)
- }
- return err
-}
-
-func (p *BinaryAnnotation) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetHost() {
- if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 4); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:host: ", p), err)
- }
- if err := p.Host.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 4:host: ", p), err)
- }
- }
- return err
-}
-
-func (p *BinaryAnnotation) Equals(other *BinaryAnnotation) bool {
- if p == other {
- return true
- } else if p == nil || other == nil {
- return false
- }
- if p.Key != other.Key {
- return false
- }
- if bytes.Compare(p.Value, other.Value) != 0 {
- return false
- }
- if p.AnnotationType != other.AnnotationType {
- return false
- }
- if !p.Host.Equals(other.Host) {
- return false
- }
- return true
-}
-
-func (p *BinaryAnnotation) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("BinaryAnnotation(%+v)", *p)
-}
-
-// A trace is a series of spans (often RPC calls) which form a latency tree.
-//
-// The root span is where trace_id = id and parent_id = Nil. The root span is
-// usually the longest interval in the trace, starting with a SERVER_RECV
-// annotation and ending with a SERVER_SEND.
-//
-// Attributes:
-// - TraceID
-// - Name: Span name in lowercase, rpc method for example
-//
-// Conventionally, when the span name isn't known, name = "unknown".
-// - ID
-// - ParentID
-// - Annotations
-// - BinaryAnnotations
-// - Debug
-// - Timestamp: Microseconds from epoch of the creation of this span.
-//
-// This value should be set directly by instrumentation, using the most
-// precise value possible. For example, gettimeofday or syncing nanoTime
-// against a tick of currentTimeMillis.
-//
-// For compatibility with instrumentation that precede this field, collectors
-// or span stores can derive this via Annotation.timestamp.
-// For example, SERVER_RECV.timestamp or CLIENT_SEND.timestamp.
-//
-// This field is optional for compatibility with old data: first-party span
-// stores are expected to support this at time of introduction.
-// - Duration: Measurement of duration in microseconds, used to support queries.
-//
-// This value should be set directly, where possible. Doing so encourages
-// precise measurement decoupled from problems of clocks, such as skew or NTP
-// updates causing time to move backwards.
-//
-// For compatibility with instrumentation that precede this field, collectors
-// or span stores can derive this by subtracting Annotation.timestamp.
-// For example, SERVER_SEND.timestamp - SERVER_RECV.timestamp.
-//
-// If this field is persisted as unset, zipkin will continue to work, except
-// duration query support will be implementation-specific. Similarly, setting
-// this field non-atomically is implementation-specific.
-//
-// This field is i64 vs i32 to support spans longer than 35 minutes.
-// - TraceIDHigh: Optional unique 8-byte additional identifier for a trace. If non zero, this
-// means the trace uses 128 bit traceIds instead of 64 bit.
-type Span struct {
- TraceID int64 `thrift:"trace_id,1" db:"trace_id" json:"trace_id"`
- // unused field # 2
- Name string `thrift:"name,3" db:"name" json:"name"`
- ID int64 `thrift:"id,4" db:"id" json:"id"`
- ParentID *int64 `thrift:"parent_id,5" db:"parent_id" json:"parent_id,omitempty"`
- Annotations []*Annotation `thrift:"annotations,6" db:"annotations" json:"annotations"`
- // unused field # 7
- BinaryAnnotations []*BinaryAnnotation `thrift:"binary_annotations,8" db:"binary_annotations" json:"binary_annotations"`
- Debug bool `thrift:"debug,9" db:"debug" json:"debug"`
- Timestamp *int64 `thrift:"timestamp,10" db:"timestamp" json:"timestamp,omitempty"`
- Duration *int64 `thrift:"duration,11" db:"duration" json:"duration,omitempty"`
- TraceIDHigh *int64 `thrift:"trace_id_high,12" db:"trace_id_high" json:"trace_id_high,omitempty"`
-}
-
-func NewSpan() *Span {
- return &Span{}
-}
-
-func (p *Span) GetTraceID() int64 {
- return p.TraceID
-}
-
-func (p *Span) GetName() string {
- return p.Name
-}
-
-func (p *Span) GetID() int64 {
- return p.ID
-}
-
-var Span_ParentID_DEFAULT int64
-
-func (p *Span) GetParentID() int64 {
- if !p.IsSetParentID() {
- return Span_ParentID_DEFAULT
- }
- return *p.ParentID
-}
-
-func (p *Span) GetAnnotations() []*Annotation {
- return p.Annotations
-}
-
-func (p *Span) GetBinaryAnnotations() []*BinaryAnnotation {
- return p.BinaryAnnotations
-}
-
-var Span_Debug_DEFAULT bool = false
-
-func (p *Span) GetDebug() bool {
- return p.Debug
-}
-
-var Span_Timestamp_DEFAULT int64
-
-func (p *Span) GetTimestamp() int64 {
- if !p.IsSetTimestamp() {
- return Span_Timestamp_DEFAULT
- }
- return *p.Timestamp
-}
-
-var Span_Duration_DEFAULT int64
-
-func (p *Span) GetDuration() int64 {
- if !p.IsSetDuration() {
- return Span_Duration_DEFAULT
- }
- return *p.Duration
-}
-
-var Span_TraceIDHigh_DEFAULT int64
-
-func (p *Span) GetTraceIDHigh() int64 {
- if !p.IsSetTraceIDHigh() {
- return Span_TraceIDHigh_DEFAULT
- }
- return *p.TraceIDHigh
-}
-func (p *Span) IsSetParentID() bool {
- return p.ParentID != nil
-}
-
-func (p *Span) IsSetDebug() bool {
- return p.Debug != Span_Debug_DEFAULT
-}
-
-func (p *Span) IsSetTimestamp() bool {
- return p.Timestamp != nil
-}
-
-func (p *Span) IsSetDuration() bool {
- return p.Duration != nil
-}
-
-func (p *Span) IsSetTraceIDHigh() bool {
- return p.TraceIDHigh != nil
-}
-
-func (p *Span) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 3:
- if fieldTypeId == thrift.STRING {
- if err := p.ReadField3(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 4:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField4(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 5:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField5(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 6:
- if fieldTypeId == thrift.LIST {
- if err := p.ReadField6(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 8:
- if fieldTypeId == thrift.LIST {
- if err := p.ReadField8(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 9:
- if fieldTypeId == thrift.BOOL {
- if err := p.ReadField9(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 10:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField10(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 11:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField11(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 12:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField12(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *Span) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- p.TraceID = v
- }
- return nil
-}
-
-func (p *Span) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(ctx); err != nil {
- return thrift.PrependError("error reading field 3: ", err)
- } else {
- p.Name = v
- }
- return nil
-}
-
-func (p *Span) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 4: ", err)
- } else {
- p.ID = v
- }
- return nil
-}
-
-func (p *Span) ReadField5(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 5: ", err)
- } else {
- p.ParentID = &v
- }
- return nil
-}
-
-func (p *Span) ReadField6(ctx context.Context, iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin(ctx)
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*Annotation, 0, size)
- p.Annotations = tSlice
- for i := 0; i < size; i++ {
- _elem0 := &Annotation{}
- if err := _elem0.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
- }
- p.Annotations = append(p.Annotations, _elem0)
- }
- if err := iprot.ReadListEnd(ctx); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *Span) ReadField8(ctx context.Context, iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin(ctx)
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*BinaryAnnotation, 0, size)
- p.BinaryAnnotations = tSlice
- for i := 0; i < size; i++ {
- _elem1 := &BinaryAnnotation{}
- if err := _elem1.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err)
- }
- p.BinaryAnnotations = append(p.BinaryAnnotations, _elem1)
- }
- if err := iprot.ReadListEnd(ctx); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *Span) ReadField9(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadBool(ctx); err != nil {
- return thrift.PrependError("error reading field 9: ", err)
- } else {
- p.Debug = v
- }
- return nil
-}
-
-func (p *Span) ReadField10(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 10: ", err)
- } else {
- p.Timestamp = &v
- }
- return nil
-}
-
-func (p *Span) ReadField11(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 11: ", err)
- } else {
- p.Duration = &v
- }
- return nil
-}
-
-func (p *Span) ReadField12(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 12: ", err)
- } else {
- p.TraceIDHigh = &v
- }
- return nil
-}
-
-func (p *Span) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "Span"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField3(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField4(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField5(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField6(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField8(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField9(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField10(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField11(ctx, oprot); err != nil {
- return err
- }
- if err := p.writeField12(ctx, oprot); err != nil {
- return err
- }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *Span) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "trace_id", thrift.I64, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:trace_id: ", p), err)
- }
- if err := oprot.WriteI64(ctx, int64(p.TraceID)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.trace_id (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:trace_id: ", p), err)
- }
- return err
-}
-
-func (p *Span) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "name", thrift.STRING, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:name: ", p), err)
- }
- if err := oprot.WriteString(ctx, string(p.Name)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.name (3) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:name: ", p), err)
- }
- return err
-}
-
-func (p *Span) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "id", thrift.I64, 4); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:id: ", p), err)
- }
- if err := oprot.WriteI64(ctx, int64(p.ID)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.id (4) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 4:id: ", p), err)
- }
- return err
-}
-
-func (p *Span) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetParentID() {
- if err := oprot.WriteFieldBegin(ctx, "parent_id", thrift.I64, 5); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:parent_id: ", p), err)
- }
- if err := oprot.WriteI64(ctx, int64(*p.ParentID)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.parent_id (5) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 5:parent_id: ", p), err)
- }
- }
- return err
-}
-
-func (p *Span) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "annotations", thrift.LIST, 6); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:annotations: ", p), err)
- }
- if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Annotations)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Annotations {
- if err := v.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(ctx); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 6:annotations: ", p), err)
- }
- return err
-}
-
-func (p *Span) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "binary_annotations", thrift.LIST, 8); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:binary_annotations: ", p), err)
- }
- if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.BinaryAnnotations)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.BinaryAnnotations {
- if err := v.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(ctx); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 8:binary_annotations: ", p), err)
- }
- return err
-}
-
-func (p *Span) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetDebug() {
- if err := oprot.WriteFieldBegin(ctx, "debug", thrift.BOOL, 9); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:debug: ", p), err)
- }
- if err := oprot.WriteBool(ctx, bool(p.Debug)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.debug (9) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 9:debug: ", p), err)
- }
- }
- return err
-}
-
-func (p *Span) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetTimestamp() {
- if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 10); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:timestamp: ", p), err)
- }
- if err := oprot.WriteI64(ctx, int64(*p.Timestamp)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.timestamp (10) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 10:timestamp: ", p), err)
- }
- }
- return err
-}
-
-func (p *Span) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetDuration() {
- if err := oprot.WriteFieldBegin(ctx, "duration", thrift.I64, 11); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:duration: ", p), err)
- }
- if err := oprot.WriteI64(ctx, int64(*p.Duration)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.duration (11) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 11:duration: ", p), err)
- }
- }
- return err
-}
-
-func (p *Span) writeField12(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetTraceIDHigh() {
- if err := oprot.WriteFieldBegin(ctx, "trace_id_high", thrift.I64, 12); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:trace_id_high: ", p), err)
- }
- if err := oprot.WriteI64(ctx, int64(*p.TraceIDHigh)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.trace_id_high (12) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 12:trace_id_high: ", p), err)
- }
- }
- return err
-}
-
-func (p *Span) Equals(other *Span) bool {
- if p == other {
- return true
- } else if p == nil || other == nil {
- return false
- }
- if p.TraceID != other.TraceID {
- return false
- }
- if p.Name != other.Name {
- return false
- }
- if p.ID != other.ID {
- return false
- }
- if p.ParentID != other.ParentID {
- if p.ParentID == nil || other.ParentID == nil {
- return false
- }
- if (*p.ParentID) != (*other.ParentID) {
- return false
- }
- }
- if len(p.Annotations) != len(other.Annotations) {
- return false
- }
- for i, _tgt := range p.Annotations {
- _src2 := other.Annotations[i]
- if !_tgt.Equals(_src2) {
- return false
- }
- }
- if len(p.BinaryAnnotations) != len(other.BinaryAnnotations) {
- return false
- }
- for i, _tgt := range p.BinaryAnnotations {
- _src3 := other.BinaryAnnotations[i]
- if !_tgt.Equals(_src3) {
- return false
- }
- }
- if p.Debug != other.Debug {
- return false
- }
- if p.Timestamp != other.Timestamp {
- if p.Timestamp == nil || other.Timestamp == nil {
- return false
- }
- if (*p.Timestamp) != (*other.Timestamp) {
- return false
- }
- }
- if p.Duration != other.Duration {
- if p.Duration == nil || other.Duration == nil {
- return false
- }
- if (*p.Duration) != (*other.Duration) {
- return false
- }
- }
- if p.TraceIDHigh != other.TraceIDHigh {
- if p.TraceIDHigh == nil || other.TraceIDHigh == nil {
- return false
- }
- if (*p.TraceIDHigh) != (*other.TraceIDHigh) {
- return false
- }
- }
- return true
-}
-
-func (p *Span) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("Span(%+v)", *p)
-}
-
-// Attributes:
-// - Ok
-type Response struct {
- Ok bool `thrift:"ok,1,required" db:"ok" json:"ok"`
-}
-
-func NewResponse() *Response {
- return &Response{}
-}
-
-func (p *Response) GetOk() bool {
- return p.Ok
-}
-func (p *Response) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetOk bool = false
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.BOOL {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- issetOk = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetOk {
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set"))
- }
- return nil
-}
-
-func (p *Response) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadBool(ctx); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- p.Ok = v
- }
- return nil
-}
-
-func (p *Response) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "Response"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil {
- return err
- }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *Response) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "ok", thrift.BOOL, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err)
- }
- if err := oprot.WriteBool(ctx, bool(p.Ok)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err)
- }
- return err
-}
-
-func (p *Response) Equals(other *Response) bool {
- if p == other {
- return true
- } else if p == nil || other == nil {
- return false
- }
- if p.Ok != other.Ok {
- return false
- }
- return true
-}
-
-func (p *Response) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("Response(%+v)", *p)
-}
-
-type ZipkinCollector interface {
- // Parameters:
- // - Spans
- SubmitZipkinBatch(ctx context.Context, spans []*Span) (_r []*Response, _err error)
-}
-
-type ZipkinCollectorClient struct {
- c thrift.TClient
- meta thrift.ResponseMeta
-}
-
-func NewZipkinCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ZipkinCollectorClient {
- return &ZipkinCollectorClient{
- c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
- }
-}
-
-func NewZipkinCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ZipkinCollectorClient {
- return &ZipkinCollectorClient{
- c: thrift.NewTStandardClient(iprot, oprot),
- }
-}
-
-func NewZipkinCollectorClient(c thrift.TClient) *ZipkinCollectorClient {
- return &ZipkinCollectorClient{
- c: c,
- }
-}
-
-func (p *ZipkinCollectorClient) Client_() thrift.TClient {
- return p.c
-}
-
-func (p *ZipkinCollectorClient) LastResponseMeta_() thrift.ResponseMeta {
- return p.meta
-}
-
-func (p *ZipkinCollectorClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
- p.meta = meta
-}
-
-// Parameters:
-// - Spans
-func (p *ZipkinCollectorClient) SubmitZipkinBatch(ctx context.Context, spans []*Span) (_r []*Response, _err error) {
- var _args4 ZipkinCollectorSubmitZipkinBatchArgs
- _args4.Spans = spans
- var _result6 ZipkinCollectorSubmitZipkinBatchResult
- var _meta5 thrift.ResponseMeta
- _meta5, _err = p.Client_().Call(ctx, "submitZipkinBatch", &_args4, &_result6)
- p.SetLastResponseMeta_(_meta5)
- if _err != nil {
- return
- }
- return _result6.GetSuccess(), nil
-}
-
-type ZipkinCollectorProcessor struct {
- processorMap map[string]thrift.TProcessorFunction
- handler ZipkinCollector
-}
-
-func (p *ZipkinCollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
- p.processorMap[key] = processor
-}
-
-func (p *ZipkinCollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
- processor, ok = p.processorMap[key]
- return processor, ok
-}
-
-func (p *ZipkinCollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
- return p.processorMap
-}
-
-func NewZipkinCollectorProcessor(handler ZipkinCollector) *ZipkinCollectorProcessor {
-
- self7 := &ZipkinCollectorProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
- self7.processorMap["submitZipkinBatch"] = &zipkinCollectorProcessorSubmitZipkinBatch{handler: handler}
- return self7
-}
-
-func (p *ZipkinCollectorProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
- if err2 != nil {
- return false, thrift.WrapTException(err2)
- }
- if processor, ok := p.GetProcessorFunction(name); ok {
- return processor.Process(ctx, seqId, iprot, oprot)
- }
- iprot.Skip(ctx, thrift.STRUCT)
- iprot.ReadMessageEnd(ctx)
- x8 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
- oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
- x8.Write(ctx, oprot)
- oprot.WriteMessageEnd(ctx)
- oprot.Flush(ctx)
- return false, x8
-
-}
-
-type zipkinCollectorProcessorSubmitZipkinBatch struct {
- handler ZipkinCollector
-}
-
-func (p *zipkinCollectorProcessorSubmitZipkinBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := ZipkinCollectorSubmitZipkinBatchArgs{}
- var err2 error
- if err2 = args.Read(ctx, iprot); err2 != nil {
- iprot.ReadMessageEnd(ctx)
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error())
- oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.EXCEPTION, seqId)
- x.Write(ctx, oprot)
- oprot.WriteMessageEnd(ctx)
- oprot.Flush(ctx)
- return false, thrift.WrapTException(err2)
- }
- iprot.ReadMessageEnd(ctx)
-
- tickerCancel := func() {}
- // Start a goroutine to do server side connectivity check.
- if thrift.ServerConnectivityCheckInterval > 0 {
- var cancel context.CancelFunc
- ctx, cancel = context.WithCancel(ctx)
- defer cancel()
- var tickerCtx context.Context
- tickerCtx, tickerCancel = context.WithCancel(context.Background())
- defer tickerCancel()
- go func(ctx context.Context, cancel context.CancelFunc) {
- ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval)
- defer ticker.Stop()
- for {
- select {
- case <-ctx.Done():
- return
- case <-ticker.C:
- if !iprot.Transport().IsOpen() {
- cancel()
- return
- }
- }
- }
- }(tickerCtx, cancel)
- }
-
- result := ZipkinCollectorSubmitZipkinBatchResult{}
- var retval []*Response
- if retval, err2 = p.handler.SubmitZipkinBatch(ctx, args.Spans); err2 != nil {
- tickerCancel()
- if err2 == thrift.ErrAbandonRequest {
- return false, thrift.WrapTException(err2)
- }
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitZipkinBatch: "+err2.Error())
- oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.EXCEPTION, seqId)
- x.Write(ctx, oprot)
- oprot.WriteMessageEnd(ctx)
- oprot.Flush(ctx)
- return true, thrift.WrapTException(err2)
- } else {
- result.Success = retval
- }
- tickerCancel()
- if err2 = oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.REPLY, seqId); err2 != nil {
- err = thrift.WrapTException(err2)
- }
- if err2 = result.Write(ctx, oprot); err == nil && err2 != nil {
- err = thrift.WrapTException(err2)
- }
- if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil {
- err = thrift.WrapTException(err2)
- }
- if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
- err = thrift.WrapTException(err2)
- }
- if err != nil {
- return
- }
- return true, err
-}
-
-// HELPER FUNCTIONS AND STRUCTURES
-
-// Attributes:
-// - Spans
-type ZipkinCollectorSubmitZipkinBatchArgs struct {
- Spans []*Span `thrift:"spans,1" db:"spans" json:"spans"`
-}
-
-func NewZipkinCollectorSubmitZipkinBatchArgs() *ZipkinCollectorSubmitZipkinBatchArgs {
- return &ZipkinCollectorSubmitZipkinBatchArgs{}
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchArgs) GetSpans() []*Span {
- return p.Spans
-}
-func (p *ZipkinCollectorSubmitZipkinBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.LIST {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin(ctx)
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*Span, 0, size)
- p.Spans = tSlice
- for i := 0; i < size; i++ {
- _elem9 := &Span{}
- if err := _elem9.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem9), err)
- }
- p.Spans = append(p.Spans, _elem9)
- }
- if err := iprot.ReadListEnd(ctx); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "submitZipkinBatch_args"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil {
- return err
- }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err)
- }
- if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Spans {
- if err := v.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(ctx); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err)
- }
- return err
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchArgs) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchArgs(%+v)", *p)
-}
-
-// Attributes:
-// - Success
-type ZipkinCollectorSubmitZipkinBatchResult struct {
- Success []*Response `thrift:"success,0" db:"success" json:"success,omitempty"`
-}
-
-func NewZipkinCollectorSubmitZipkinBatchResult() *ZipkinCollectorSubmitZipkinBatchResult {
- return &ZipkinCollectorSubmitZipkinBatchResult{}
-}
-
-var ZipkinCollectorSubmitZipkinBatchResult_Success_DEFAULT []*Response
-
-func (p *ZipkinCollectorSubmitZipkinBatchResult) GetSuccess() []*Response {
- return p.Success
-}
-func (p *ZipkinCollectorSubmitZipkinBatchResult) IsSetSuccess() bool {
- return p.Success != nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchResult) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 0:
- if fieldTypeId == thrift.LIST {
- if err := p.ReadField0(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin(ctx)
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*Response, 0, size)
- p.Success = tSlice
- for i := 0; i < size; i++ {
- _elem10 := &Response{}
- if err := _elem10.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err)
- }
- p.Success = append(p.Success, _elem10)
- }
- if err := iprot.ReadListEnd(ctx); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchResult) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "submitZipkinBatch_result"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if p != nil {
- if err := p.writeField0(ctx, oprot); err != nil {
- return err
- }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetSuccess() {
- if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
- }
- if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Success {
- if err := v.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(ctx); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
- }
- }
- return err
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchResult) String() string {
- if p == nil {
- return "<nil>"
- }
- return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchResult(%+v)", *p)
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/LICENSE
deleted file mode 100644
index 2bc6fbbf6..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/LICENSE
+++ /dev/null
@@ -1,306 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
---------------------------------------------------
-SOFTWARE DISTRIBUTED WITH THRIFT:
-
-The Apache Thrift software includes a number of subcomponents with
-separate copyright notices and license terms. Your use of the source
-code for the these subcomponents is subject to the terms and
-conditions of the following licenses.
-
---------------------------------------------------
-Portions of the following files are licensed under the MIT License:
-
- lib/erl/src/Makefile.am
-
-Please see doc/otp-base-license.txt for the full terms of this license.
-
---------------------------------------------------
-For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components:
-
-# Copyright (c) 2007 Thomas Porschberg <thomas@randspringer.de>
-#
-# Copying and distribution of this file, with or without
-# modification, are permitted in any medium without royalty provided
-# the copyright notice and this notice are preserved.
-
---------------------------------------------------
-For the lib/nodejs/lib/thrift/json_parse.js:
-
-/*
- json_parse.js
- 2015-05-02
- Public Domain.
- NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
-
-*/
-(By Douglas Crockford <douglas@crockford.com>)
-
---------------------------------------------------
-For lib/cpp/src/thrift/windows/SocketPair.cpp
-
-/* socketpair.c
- * Copyright 2007 by Nathan C. Myers <ncm@cantrip.org>; some rights reserved.
- * This code is Free Software. It may be copied freely, in original or
- * modified form, subject only to the restrictions that (1) the author is
- * relieved from all responsibilities for any use for any purpose, and (2)
- * this copyright notice must be retained, unchanged, in its entirety. If
- * for any reason the author might be held responsible for any consequences
- * of copying or use, license is withheld.
- */
-
-
---------------------------------------------------
-For lib/py/compat/win32/stdint.h
-
-// ISO C9x compliant stdint.h for Microsoft Visual Studio
-// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
-//
-// Copyright (c) 2006-2008 Alexander Chemeris
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// 1. Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. The name of the author may be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
-// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
-// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
-// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
-// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
-// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-///////////////////////////////////////////////////////////////////////////////
-
-
---------------------------------------------------
-Codegen template in t_html_generator.h
-
-* Bootstrap v2.0.3
-*
-* Copyright 2012 Twitter, Inc
-* Licensed under the Apache License v2.0
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Designed and built with all the love in the world @twitter by @mdo and @fat.
-
----------------------------------------------------
-For t_cl_generator.cc
-
- * Copyright (c) 2008- Patrick Collison <patrick@collison.ie>
- * Copyright (c) 2006- Facebook
-
----------------------------------------------------
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/NOTICE b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/NOTICE
deleted file mode 100644
index 37824e7fb..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/NOTICE
+++ /dev/null
@@ -1,5 +0,0 @@
-Apache Thrift
-Copyright (C) 2006 - 2019, The Apache Software Foundation
-
-This product includes software developed at
-The Apache Software Foundation (http://www.apache.org/).
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/application_exception.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/application_exception.go
deleted file mode 100644
index 32d5b0147..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/application_exception.go
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "context"
-)
-
-const (
- UNKNOWN_APPLICATION_EXCEPTION = 0
- UNKNOWN_METHOD = 1
- INVALID_MESSAGE_TYPE_EXCEPTION = 2
- WRONG_METHOD_NAME = 3
- BAD_SEQUENCE_ID = 4
- MISSING_RESULT = 5
- INTERNAL_ERROR = 6
- PROTOCOL_ERROR = 7
- INVALID_TRANSFORM = 8
- INVALID_PROTOCOL = 9
- UNSUPPORTED_CLIENT_TYPE = 10
-)
-
-var defaultApplicationExceptionMessage = map[int32]string{
- UNKNOWN_APPLICATION_EXCEPTION: "unknown application exception",
- UNKNOWN_METHOD: "unknown method",
- INVALID_MESSAGE_TYPE_EXCEPTION: "invalid message type",
- WRONG_METHOD_NAME: "wrong method name",
- BAD_SEQUENCE_ID: "bad sequence ID",
- MISSING_RESULT: "missing result",
- INTERNAL_ERROR: "unknown internal error",
- PROTOCOL_ERROR: "unknown protocol error",
- INVALID_TRANSFORM: "Invalid transform",
- INVALID_PROTOCOL: "Invalid protocol",
- UNSUPPORTED_CLIENT_TYPE: "Unsupported client type",
-}
-
-// Application level Thrift exception
-type TApplicationException interface {
- TException
- TypeId() int32
- Read(ctx context.Context, iprot TProtocol) error
- Write(ctx context.Context, oprot TProtocol) error
-}
-
-type tApplicationException struct {
- message string
- type_ int32
-}
-
-var _ TApplicationException = (*tApplicationException)(nil)
-
-func (tApplicationException) TExceptionType() TExceptionType {
- return TExceptionTypeApplication
-}
-
-func (e tApplicationException) Error() string {
- if e.message != "" {
- return e.message
- }
- return defaultApplicationExceptionMessage[e.type_]
-}
-
-func NewTApplicationException(type_ int32, message string) TApplicationException {
- return &tApplicationException{message, type_}
-}
-
-func (p *tApplicationException) TypeId() int32 {
- return p.type_
-}
-
-func (p *tApplicationException) Read(ctx context.Context, iprot TProtocol) error {
- // TODO: this should really be generated by the compiler
- _, err := iprot.ReadStructBegin(ctx)
- if err != nil {
- return err
- }
-
- message := ""
- type_ := int32(UNKNOWN_APPLICATION_EXCEPTION)
-
- for {
- _, ttype, id, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return err
- }
- if ttype == STOP {
- break
- }
- switch id {
- case 1:
- if ttype == STRING {
- if message, err = iprot.ReadString(ctx); err != nil {
- return err
- }
- } else {
- if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil {
- return err
- }
- }
- case 2:
- if ttype == I32 {
- if type_, err = iprot.ReadI32(ctx); err != nil {
- return err
- }
- } else {
- if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil {
- return err
- }
- }
- default:
- if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil {
- return err
- }
- }
- if err = iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return err
- }
-
- p.message = message
- p.type_ = type_
-
- return nil
-}
-
-func (p *tApplicationException) Write(ctx context.Context, oprot TProtocol) (err error) {
- err = oprot.WriteStructBegin(ctx, "TApplicationException")
- if len(p.Error()) > 0 {
- err = oprot.WriteFieldBegin(ctx, "message", STRING, 1)
- if err != nil {
- return
- }
- err = oprot.WriteString(ctx, p.Error())
- if err != nil {
- return
- }
- err = oprot.WriteFieldEnd(ctx)
- if err != nil {
- return
- }
- }
- err = oprot.WriteFieldBegin(ctx, "type", I32, 2)
- if err != nil {
- return
- }
- err = oprot.WriteI32(ctx, p.type_)
- if err != nil {
- return
- }
- err = oprot.WriteFieldEnd(ctx)
- if err != nil {
- return
- }
- err = oprot.WriteFieldStop(ctx)
- if err != nil {
- return
- }
- err = oprot.WriteStructEnd(ctx)
- return
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/binary_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/binary_protocol.go
deleted file mode 100644
index 45c880d32..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/binary_protocol.go
+++ /dev/null
@@ -1,555 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "bytes"
- "context"
- "encoding/binary"
- "errors"
- "fmt"
- "io"
- "math"
-)
-
-type TBinaryProtocol struct {
- trans TRichTransport
- origTransport TTransport
- cfg *TConfiguration
- buffer [64]byte
-}
-
-type TBinaryProtocolFactory struct {
- cfg *TConfiguration
-}
-
-// Deprecated: Use NewTBinaryProtocolConf instead.
-func NewTBinaryProtocolTransport(t TTransport) *TBinaryProtocol {
- return NewTBinaryProtocolConf(t, &TConfiguration{
- noPropagation: true,
- })
-}
-
-// Deprecated: Use NewTBinaryProtocolConf instead.
-func NewTBinaryProtocol(t TTransport, strictRead, strictWrite bool) *TBinaryProtocol {
- return NewTBinaryProtocolConf(t, &TConfiguration{
- TBinaryStrictRead: &strictRead,
- TBinaryStrictWrite: &strictWrite,
-
- noPropagation: true,
- })
-}
-
-func NewTBinaryProtocolConf(t TTransport, conf *TConfiguration) *TBinaryProtocol {
- PropagateTConfiguration(t, conf)
- p := &TBinaryProtocol{
- origTransport: t,
- cfg: conf,
- }
- if et, ok := t.(TRichTransport); ok {
- p.trans = et
- } else {
- p.trans = NewTRichTransport(t)
- }
- return p
-}
-
-// Deprecated: Use NewTBinaryProtocolFactoryConf instead.
-func NewTBinaryProtocolFactoryDefault() *TBinaryProtocolFactory {
- return NewTBinaryProtocolFactoryConf(&TConfiguration{
- noPropagation: true,
- })
-}
-
-// Deprecated: Use NewTBinaryProtocolFactoryConf instead.
-func NewTBinaryProtocolFactory(strictRead, strictWrite bool) *TBinaryProtocolFactory {
- return NewTBinaryProtocolFactoryConf(&TConfiguration{
- TBinaryStrictRead: &strictRead,
- TBinaryStrictWrite: &strictWrite,
-
- noPropagation: true,
- })
-}
-
-func NewTBinaryProtocolFactoryConf(conf *TConfiguration) *TBinaryProtocolFactory {
- return &TBinaryProtocolFactory{
- cfg: conf,
- }
-}
-
-func (p *TBinaryProtocolFactory) GetProtocol(t TTransport) TProtocol {
- return NewTBinaryProtocolConf(t, p.cfg)
-}
-
-func (p *TBinaryProtocolFactory) SetTConfiguration(conf *TConfiguration) {
- p.cfg = conf
-}
-
-/**
- * Writing Methods
- */
-
-func (p *TBinaryProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error {
- if p.cfg.GetTBinaryStrictWrite() {
- version := uint32(VERSION_1) | uint32(typeId)
- e := p.WriteI32(ctx, int32(version))
- if e != nil {
- return e
- }
- e = p.WriteString(ctx, name)
- if e != nil {
- return e
- }
- e = p.WriteI32(ctx, seqId)
- return e
- } else {
- e := p.WriteString(ctx, name)
- if e != nil {
- return e
- }
- e = p.WriteByte(ctx, int8(typeId))
- if e != nil {
- return e
- }
- e = p.WriteI32(ctx, seqId)
- return e
- }
- return nil
-}
-
-func (p *TBinaryProtocol) WriteMessageEnd(ctx context.Context) error {
- return nil
-}
-
-func (p *TBinaryProtocol) WriteStructBegin(ctx context.Context, name string) error {
- return nil
-}
-
-func (p *TBinaryProtocol) WriteStructEnd(ctx context.Context) error {
- return nil
-}
-
-func (p *TBinaryProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
- e := p.WriteByte(ctx, int8(typeId))
- if e != nil {
- return e
- }
- e = p.WriteI16(ctx, id)
- return e
-}
-
-func (p *TBinaryProtocol) WriteFieldEnd(ctx context.Context) error {
- return nil
-}
-
-func (p *TBinaryProtocol) WriteFieldStop(ctx context.Context) error {
- e := p.WriteByte(ctx, STOP)
- return e
-}
-
-func (p *TBinaryProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
- e := p.WriteByte(ctx, int8(keyType))
- if e != nil {
- return e
- }
- e = p.WriteByte(ctx, int8(valueType))
- if e != nil {
- return e
- }
- e = p.WriteI32(ctx, int32(size))
- return e
-}
-
-func (p *TBinaryProtocol) WriteMapEnd(ctx context.Context) error {
- return nil
-}
-
-func (p *TBinaryProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
- e := p.WriteByte(ctx, int8(elemType))
- if e != nil {
- return e
- }
- e = p.WriteI32(ctx, int32(size))
- return e
-}
-
-func (p *TBinaryProtocol) WriteListEnd(ctx context.Context) error {
- return nil
-}
-
-func (p *TBinaryProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
- e := p.WriteByte(ctx, int8(elemType))
- if e != nil {
- return e
- }
- e = p.WriteI32(ctx, int32(size))
- return e
-}
-
-func (p *TBinaryProtocol) WriteSetEnd(ctx context.Context) error {
- return nil
-}
-
-func (p *TBinaryProtocol) WriteBool(ctx context.Context, value bool) error {
- if value {
- return p.WriteByte(ctx, 1)
- }
- return p.WriteByte(ctx, 0)
-}
-
-func (p *TBinaryProtocol) WriteByte(ctx context.Context, value int8) error {
- e := p.trans.WriteByte(byte(value))
- return NewTProtocolException(e)
-}
-
-func (p *TBinaryProtocol) WriteI16(ctx context.Context, value int16) error {
- v := p.buffer[0:2]
- binary.BigEndian.PutUint16(v, uint16(value))
- _, e := p.trans.Write(v)
- return NewTProtocolException(e)
-}
-
-func (p *TBinaryProtocol) WriteI32(ctx context.Context, value int32) error {
- v := p.buffer[0:4]
- binary.BigEndian.PutUint32(v, uint32(value))
- _, e := p.trans.Write(v)
- return NewTProtocolException(e)
-}
-
-func (p *TBinaryProtocol) WriteI64(ctx context.Context, value int64) error {
- v := p.buffer[0:8]
- binary.BigEndian.PutUint64(v, uint64(value))
- _, err := p.trans.Write(v)
- return NewTProtocolException(err)
-}
-
-func (p *TBinaryProtocol) WriteDouble(ctx context.Context, value float64) error {
- return p.WriteI64(ctx, int64(math.Float64bits(value)))
-}
-
-func (p *TBinaryProtocol) WriteString(ctx context.Context, value string) error {
- e := p.WriteI32(ctx, int32(len(value)))
- if e != nil {
- return e
- }
- _, err := p.trans.WriteString(value)
- return NewTProtocolException(err)
-}
-
-func (p *TBinaryProtocol) WriteBinary(ctx context.Context, value []byte) error {
- e := p.WriteI32(ctx, int32(len(value)))
- if e != nil {
- return e
- }
- _, err := p.trans.Write(value)
- return NewTProtocolException(err)
-}
-
-/**
- * Reading methods
- */
-
-func (p *TBinaryProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) {
- size, e := p.ReadI32(ctx)
- if e != nil {
- return "", typeId, 0, NewTProtocolException(e)
- }
- if size < 0 {
- typeId = TMessageType(size & 0x0ff)
- version := int64(int64(size) & VERSION_MASK)
- if version != VERSION_1 {
- return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Bad version in ReadMessageBegin"))
- }
- name, e = p.ReadString(ctx)
- if e != nil {
- return name, typeId, seqId, NewTProtocolException(e)
- }
- seqId, e = p.ReadI32(ctx)
- if e != nil {
- return name, typeId, seqId, NewTProtocolException(e)
- }
- return name, typeId, seqId, nil
- }
- if p.cfg.GetTBinaryStrictRead() {
- return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Missing version in ReadMessageBegin"))
- }
- name, e2 := p.readStringBody(size)
- if e2 != nil {
- return name, typeId, seqId, e2
- }
- b, e3 := p.ReadByte(ctx)
- if e3 != nil {
- return name, typeId, seqId, e3
- }
- typeId = TMessageType(b)
- seqId, e4 := p.ReadI32(ctx)
- if e4 != nil {
- return name, typeId, seqId, e4
- }
- return name, typeId, seqId, nil
-}
-
-func (p *TBinaryProtocol) ReadMessageEnd(ctx context.Context) error {
- return nil
-}
-
-func (p *TBinaryProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
- return
-}
-
-func (p *TBinaryProtocol) ReadStructEnd(ctx context.Context) error {
- return nil
-}
-
-func (p *TBinaryProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, seqId int16, err error) {
- t, err := p.ReadByte(ctx)
- typeId = TType(t)
- if err != nil {
- return name, typeId, seqId, err
- }
- if t != STOP {
- seqId, err = p.ReadI16(ctx)
- }
- return name, typeId, seqId, err
-}
-
-func (p *TBinaryProtocol) ReadFieldEnd(ctx context.Context) error {
- return nil
-}
-
-var invalidDataLength = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Invalid data length"))
-
-func (p *TBinaryProtocol) ReadMapBegin(ctx context.Context) (kType, vType TType, size int, err error) {
- k, e := p.ReadByte(ctx)
- if e != nil {
- err = NewTProtocolException(e)
- return
- }
- kType = TType(k)
- v, e := p.ReadByte(ctx)
- if e != nil {
- err = NewTProtocolException(e)
- return
- }
- vType = TType(v)
- size32, e := p.ReadI32(ctx)
- if e != nil {
- err = NewTProtocolException(e)
- return
- }
- if size32 < 0 {
- err = invalidDataLength
- return
- }
- size = int(size32)
- return kType, vType, size, nil
-}
-
-func (p *TBinaryProtocol) ReadMapEnd(ctx context.Context) error {
- return nil
-}
-
-func (p *TBinaryProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) {
- b, e := p.ReadByte(ctx)
- if e != nil {
- err = NewTProtocolException(e)
- return
- }
- elemType = TType(b)
- size32, e := p.ReadI32(ctx)
- if e != nil {
- err = NewTProtocolException(e)
- return
- }
- if size32 < 0 {
- err = invalidDataLength
- return
- }
- size = int(size32)
-
- return
-}
-
-func (p *TBinaryProtocol) ReadListEnd(ctx context.Context) error {
- return nil
-}
-
-func (p *TBinaryProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) {
- b, e := p.ReadByte(ctx)
- if e != nil {
- err = NewTProtocolException(e)
- return
- }
- elemType = TType(b)
- size32, e := p.ReadI32(ctx)
- if e != nil {
- err = NewTProtocolException(e)
- return
- }
- if size32 < 0 {
- err = invalidDataLength
- return
- }
- size = int(size32)
- return elemType, size, nil
-}
-
-func (p *TBinaryProtocol) ReadSetEnd(ctx context.Context) error {
- return nil
-}
-
-func (p *TBinaryProtocol) ReadBool(ctx context.Context) (bool, error) {
- b, e := p.ReadByte(ctx)
- v := true
- if b != 1 {
- v = false
- }
- return v, e
-}
-
-func (p *TBinaryProtocol) ReadByte(ctx context.Context) (int8, error) {
- v, err := p.trans.ReadByte()
- return int8(v), err
-}
-
-func (p *TBinaryProtocol) ReadI16(ctx context.Context) (value int16, err error) {
- buf := p.buffer[0:2]
- err = p.readAll(ctx, buf)
- value = int16(binary.BigEndian.Uint16(buf))
- return value, err
-}
-
-func (p *TBinaryProtocol) ReadI32(ctx context.Context) (value int32, err error) {
- buf := p.buffer[0:4]
- err = p.readAll(ctx, buf)
- value = int32(binary.BigEndian.Uint32(buf))
- return value, err
-}
-
-func (p *TBinaryProtocol) ReadI64(ctx context.Context) (value int64, err error) {
- buf := p.buffer[0:8]
- err = p.readAll(ctx, buf)
- value = int64(binary.BigEndian.Uint64(buf))
- return value, err
-}
-
-func (p *TBinaryProtocol) ReadDouble(ctx context.Context) (value float64, err error) {
- buf := p.buffer[0:8]
- err = p.readAll(ctx, buf)
- value = math.Float64frombits(binary.BigEndian.Uint64(buf))
- return value, err
-}
-
-func (p *TBinaryProtocol) ReadString(ctx context.Context) (value string, err error) {
- size, e := p.ReadI32(ctx)
- if e != nil {
- return "", e
- }
- err = checkSizeForProtocol(size, p.cfg)
- if err != nil {
- return
- }
- if size < 0 {
- err = invalidDataLength
- return
- }
- if size == 0 {
- return "", nil
- }
- if size < int32(len(p.buffer)) {
- // Avoid allocation on small reads
- buf := p.buffer[:size]
- read, e := io.ReadFull(p.trans, buf)
- return string(buf[:read]), NewTProtocolException(e)
- }
-
- return p.readStringBody(size)
-}
-
-func (p *TBinaryProtocol) ReadBinary(ctx context.Context) ([]byte, error) {
- size, e := p.ReadI32(ctx)
- if e != nil {
- return nil, e
- }
- if err := checkSizeForProtocol(size, p.cfg); err != nil {
- return nil, err
- }
-
- buf, err := safeReadBytes(size, p.trans)
- return buf, NewTProtocolException(err)
-}
-
-func (p *TBinaryProtocol) Flush(ctx context.Context) (err error) {
- return NewTProtocolException(p.trans.Flush(ctx))
-}
-
-func (p *TBinaryProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
- return SkipDefaultDepth(ctx, p, fieldType)
-}
-
-func (p *TBinaryProtocol) Transport() TTransport {
- return p.origTransport
-}
-
-func (p *TBinaryProtocol) readAll(ctx context.Context, buf []byte) (err error) {
- var read int
- _, deadlineSet := ctx.Deadline()
- for {
- read, err = io.ReadFull(p.trans, buf)
- if deadlineSet && read == 0 && isTimeoutError(err) && ctx.Err() == nil {
- // This is I/O timeout without anything read,
- // and we still have time left, keep retrying.
- continue
- }
- // For anything else, don't retry
- break
- }
- return NewTProtocolException(err)
-}
-
-func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) {
- buf, err := safeReadBytes(size, p.trans)
- return string(buf), NewTProtocolException(err)
-}
-
-func (p *TBinaryProtocol) SetTConfiguration(conf *TConfiguration) {
- PropagateTConfiguration(p.trans, conf)
- PropagateTConfiguration(p.origTransport, conf)
- p.cfg = conf
-}
-
-var (
- _ TConfigurationSetter = (*TBinaryProtocolFactory)(nil)
- _ TConfigurationSetter = (*TBinaryProtocol)(nil)
-)
-
-// This function is shared between TBinaryProtocol and TCompactProtocol.
-//
-// It tries to read size bytes from trans, in a way that prevents large
-// allocations when size is insanely large (mostly caused by malformed message).
-func safeReadBytes(size int32, trans io.Reader) ([]byte, error) {
- if size < 0 {
- return nil, nil
- }
-
- buf := new(bytes.Buffer)
- _, err := io.CopyN(buf, trans, int64(size))
- return buf.Bytes(), err
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/buffered_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/buffered_transport.go
deleted file mode 100644
index aa551b4ab..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/buffered_transport.go
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "bufio"
- "context"
-)
-
-type TBufferedTransportFactory struct {
- size int
-}
-
-type TBufferedTransport struct {
- bufio.ReadWriter
- tp TTransport
-}
-
-func (p *TBufferedTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
- return NewTBufferedTransport(trans, p.size), nil
-}
-
-func NewTBufferedTransportFactory(bufferSize int) *TBufferedTransportFactory {
- return &TBufferedTransportFactory{size: bufferSize}
-}
-
-func NewTBufferedTransport(trans TTransport, bufferSize int) *TBufferedTransport {
- return &TBufferedTransport{
- ReadWriter: bufio.ReadWriter{
- Reader: bufio.NewReaderSize(trans, bufferSize),
- Writer: bufio.NewWriterSize(trans, bufferSize),
- },
- tp: trans,
- }
-}
-
-func (p *TBufferedTransport) IsOpen() bool {
- return p.tp.IsOpen()
-}
-
-func (p *TBufferedTransport) Open() (err error) {
- return p.tp.Open()
-}
-
-func (p *TBufferedTransport) Close() (err error) {
- return p.tp.Close()
-}
-
-func (p *TBufferedTransport) Read(b []byte) (int, error) {
- n, err := p.ReadWriter.Read(b)
- if err != nil {
- p.ReadWriter.Reader.Reset(p.tp)
- }
- return n, err
-}
-
-func (p *TBufferedTransport) Write(b []byte) (int, error) {
- n, err := p.ReadWriter.Write(b)
- if err != nil {
- p.ReadWriter.Writer.Reset(p.tp)
- }
- return n, err
-}
-
-func (p *TBufferedTransport) Flush(ctx context.Context) error {
- if err := p.ReadWriter.Flush(); err != nil {
- p.ReadWriter.Writer.Reset(p.tp)
- return err
- }
- return p.tp.Flush(ctx)
-}
-
-func (p *TBufferedTransport) RemainingBytes() (num_bytes uint64) {
- return p.tp.RemainingBytes()
-}
-
-// SetTConfiguration implements TConfigurationSetter for propagation.
-func (p *TBufferedTransport) SetTConfiguration(conf *TConfiguration) {
- PropagateTConfiguration(p.tp, conf)
-}
-
-var _ TConfigurationSetter = (*TBufferedTransport)(nil)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/client.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/client.go
deleted file mode 100644
index ea2c01fda..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/client.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package thrift
-
-import (
- "context"
- "fmt"
-)
-
-// ResponseMeta represents the metadata attached to the response.
-type ResponseMeta struct {
- // The headers in the response, if any.
- // If the underlying transport/protocol is not THeader, this will always be nil.
- Headers THeaderMap
-}
-
-type TClient interface {
- Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error)
-}
-
-type TStandardClient struct {
- seqId int32
- iprot, oprot TProtocol
-}
-
-// TStandardClient implements TClient, and uses the standard message format for Thrift.
-// It is not safe for concurrent use.
-func NewTStandardClient(inputProtocol, outputProtocol TProtocol) *TStandardClient {
- return &TStandardClient{
- iprot: inputProtocol,
- oprot: outputProtocol,
- }
-}
-
-func (p *TStandardClient) Send(ctx context.Context, oprot TProtocol, seqId int32, method string, args TStruct) error {
- // Set headers from context object on THeaderProtocol
- if headerProt, ok := oprot.(*THeaderProtocol); ok {
- headerProt.ClearWriteHeaders()
- for _, key := range GetWriteHeaderList(ctx) {
- if value, ok := GetHeader(ctx, key); ok {
- headerProt.SetWriteHeader(key, value)
- }
- }
- }
-
- if err := oprot.WriteMessageBegin(ctx, method, CALL, seqId); err != nil {
- return err
- }
- if err := args.Write(ctx, oprot); err != nil {
- return err
- }
- if err := oprot.WriteMessageEnd(ctx); err != nil {
- return err
- }
- return oprot.Flush(ctx)
-}
-
-func (p *TStandardClient) Recv(ctx context.Context, iprot TProtocol, seqId int32, method string, result TStruct) error {
- rMethod, rTypeId, rSeqId, err := iprot.ReadMessageBegin(ctx)
- if err != nil {
- return err
- }
-
- if method != rMethod {
- return NewTApplicationException(WRONG_METHOD_NAME, fmt.Sprintf("%s: wrong method name", method))
- } else if seqId != rSeqId {
- return NewTApplicationException(BAD_SEQUENCE_ID, fmt.Sprintf("%s: out of order sequence response", method))
- } else if rTypeId == EXCEPTION {
- var exception tApplicationException
- if err := exception.Read(ctx, iprot); err != nil {
- return err
- }
-
- if err := iprot.ReadMessageEnd(ctx); err != nil {
- return err
- }
-
- return &exception
- } else if rTypeId != REPLY {
- return NewTApplicationException(INVALID_MESSAGE_TYPE_EXCEPTION, fmt.Sprintf("%s: invalid message type", method))
- }
-
- if err := result.Read(ctx, iprot); err != nil {
- return err
- }
-
- return iprot.ReadMessageEnd(ctx)
-}
-
-func (p *TStandardClient) Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) {
- p.seqId++
- seqId := p.seqId
-
- if err := p.Send(ctx, p.oprot, seqId, method, args); err != nil {
- return ResponseMeta{}, err
- }
-
- // method is oneway
- if result == nil {
- return ResponseMeta{}, nil
- }
-
- err := p.Recv(ctx, p.iprot, seqId, method, result)
- var headers THeaderMap
- if hp, ok := p.iprot.(*THeaderProtocol); ok {
- headers = hp.transport.readHeaders
- }
- return ResponseMeta{
- Headers: headers,
- }, err
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/compact_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/compact_protocol.go
deleted file mode 100644
index a49225dab..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/compact_protocol.go
+++ /dev/null
@@ -1,865 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "context"
- "encoding/binary"
- "errors"
- "fmt"
- "io"
- "math"
-)
-
-const (
- COMPACT_PROTOCOL_ID = 0x082
- COMPACT_VERSION = 1
- COMPACT_VERSION_MASK = 0x1f
- COMPACT_TYPE_MASK = 0x0E0
- COMPACT_TYPE_BITS = 0x07
- COMPACT_TYPE_SHIFT_AMOUNT = 5
-)
-
-type tCompactType byte
-
-const (
- COMPACT_BOOLEAN_TRUE = 0x01
- COMPACT_BOOLEAN_FALSE = 0x02
- COMPACT_BYTE = 0x03
- COMPACT_I16 = 0x04
- COMPACT_I32 = 0x05
- COMPACT_I64 = 0x06
- COMPACT_DOUBLE = 0x07
- COMPACT_BINARY = 0x08
- COMPACT_LIST = 0x09
- COMPACT_SET = 0x0A
- COMPACT_MAP = 0x0B
- COMPACT_STRUCT = 0x0C
-)
-
-var (
- ttypeToCompactType map[TType]tCompactType
-)
-
-func init() {
- ttypeToCompactType = map[TType]tCompactType{
- STOP: STOP,
- BOOL: COMPACT_BOOLEAN_TRUE,
- BYTE: COMPACT_BYTE,
- I16: COMPACT_I16,
- I32: COMPACT_I32,
- I64: COMPACT_I64,
- DOUBLE: COMPACT_DOUBLE,
- STRING: COMPACT_BINARY,
- LIST: COMPACT_LIST,
- SET: COMPACT_SET,
- MAP: COMPACT_MAP,
- STRUCT: COMPACT_STRUCT,
- }
-}
-
-type TCompactProtocolFactory struct {
- cfg *TConfiguration
-}
-
-// Deprecated: Use NewTCompactProtocolFactoryConf instead.
-func NewTCompactProtocolFactory() *TCompactProtocolFactory {
- return NewTCompactProtocolFactoryConf(&TConfiguration{
- noPropagation: true,
- })
-}
-
-func NewTCompactProtocolFactoryConf(conf *TConfiguration) *TCompactProtocolFactory {
- return &TCompactProtocolFactory{
- cfg: conf,
- }
-}
-
-func (p *TCompactProtocolFactory) GetProtocol(trans TTransport) TProtocol {
- return NewTCompactProtocolConf(trans, p.cfg)
-}
-
-func (p *TCompactProtocolFactory) SetTConfiguration(conf *TConfiguration) {
- p.cfg = conf
-}
-
-type TCompactProtocol struct {
- trans TRichTransport
- origTransport TTransport
-
- cfg *TConfiguration
-
- // Used to keep track of the last field for the current and previous structs,
- // so we can do the delta stuff.
- lastField []int
- lastFieldId int
-
- // If we encounter a boolean field begin, save the TField here so it can
- // have the value incorporated.
- booleanFieldName string
- booleanFieldId int16
- booleanFieldPending bool
-
- // If we read a field header, and it's a boolean field, save the boolean
- // value here so that readBool can use it.
- boolValue bool
- boolValueIsNotNull bool
- buffer [64]byte
-}
-
-// Deprecated: Use NewTCompactProtocolConf instead.
-func NewTCompactProtocol(trans TTransport) *TCompactProtocol {
- return NewTCompactProtocolConf(trans, &TConfiguration{
- noPropagation: true,
- })
-}
-
-func NewTCompactProtocolConf(trans TTransport, conf *TConfiguration) *TCompactProtocol {
- PropagateTConfiguration(trans, conf)
- p := &TCompactProtocol{
- origTransport: trans,
- cfg: conf,
- }
- if et, ok := trans.(TRichTransport); ok {
- p.trans = et
- } else {
- p.trans = NewTRichTransport(trans)
- }
-
- return p
-}
-
-//
-// Public Writing methods.
-//
-
-// Write a message header to the wire. Compact Protocol messages contain the
-// protocol version so we can migrate forwards in the future if need be.
-func (p *TCompactProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error {
- err := p.writeByteDirect(COMPACT_PROTOCOL_ID)
- if err != nil {
- return NewTProtocolException(err)
- }
- err = p.writeByteDirect((COMPACT_VERSION & COMPACT_VERSION_MASK) | ((byte(typeId) << COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_MASK))
- if err != nil {
- return NewTProtocolException(err)
- }
- _, err = p.writeVarint32(seqid)
- if err != nil {
- return NewTProtocolException(err)
- }
- e := p.WriteString(ctx, name)
- return e
-
-}
-
-func (p *TCompactProtocol) WriteMessageEnd(ctx context.Context) error { return nil }
-
-// Write a struct begin. This doesn't actually put anything on the wire. We
-// use it as an opportunity to put special placeholder markers on the field
-// stack so we can get the field id deltas correct.
-func (p *TCompactProtocol) WriteStructBegin(ctx context.Context, name string) error {
- p.lastField = append(p.lastField, p.lastFieldId)
- p.lastFieldId = 0
- return nil
-}
-
-// Write a struct end. This doesn't actually put anything on the wire. We use
-// this as an opportunity to pop the last field from the current struct off
-// of the field stack.
-func (p *TCompactProtocol) WriteStructEnd(ctx context.Context) error {
- if len(p.lastField) <= 0 {
- return NewTProtocolExceptionWithType(INVALID_DATA, errors.New("WriteStructEnd called without matching WriteStructBegin call before"))
- }
- p.lastFieldId = p.lastField[len(p.lastField)-1]
- p.lastField = p.lastField[:len(p.lastField)-1]
- return nil
-}
-
-func (p *TCompactProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
- if typeId == BOOL {
- // we want to possibly include the value, so we'll wait.
- p.booleanFieldName, p.booleanFieldId, p.booleanFieldPending = name, id, true
- return nil
- }
- _, err := p.writeFieldBeginInternal(ctx, name, typeId, id, 0xFF)
- return NewTProtocolException(err)
-}
-
-// The workhorse of writeFieldBegin. It has the option of doing a
-// 'type override' of the type header. This is used specifically in the
-// boolean field case.
-func (p *TCompactProtocol) writeFieldBeginInternal(ctx context.Context, name string, typeId TType, id int16, typeOverride byte) (int, error) {
- // short lastField = lastField_.pop();
-
- // if there's a type override, use that.
- var typeToWrite byte
- if typeOverride == 0xFF {
- typeToWrite = byte(p.getCompactType(typeId))
- } else {
- typeToWrite = typeOverride
- }
- // check if we can use delta encoding for the field id
- fieldId := int(id)
- written := 0
- if fieldId > p.lastFieldId && fieldId-p.lastFieldId <= 15 {
- // write them together
- err := p.writeByteDirect(byte((fieldId-p.lastFieldId)<<4) | typeToWrite)
- if err != nil {
- return 0, err
- }
- } else {
- // write them separate
- err := p.writeByteDirect(typeToWrite)
- if err != nil {
- return 0, err
- }
- err = p.WriteI16(ctx, id)
- written = 1 + 2
- if err != nil {
- return 0, err
- }
- }
-
- p.lastFieldId = fieldId
- return written, nil
-}
-
-func (p *TCompactProtocol) WriteFieldEnd(ctx context.Context) error { return nil }
-
-func (p *TCompactProtocol) WriteFieldStop(ctx context.Context) error {
- err := p.writeByteDirect(STOP)
- return NewTProtocolException(err)
-}
-
-func (p *TCompactProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
- if size == 0 {
- err := p.writeByteDirect(0)
- return NewTProtocolException(err)
- }
- _, err := p.writeVarint32(int32(size))
- if err != nil {
- return NewTProtocolException(err)
- }
- err = p.writeByteDirect(byte(p.getCompactType(keyType))<<4 | byte(p.getCompactType(valueType)))
- return NewTProtocolException(err)
-}
-
-func (p *TCompactProtocol) WriteMapEnd(ctx context.Context) error { return nil }
-
-// Write a list header.
-func (p *TCompactProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
- _, err := p.writeCollectionBegin(elemType, size)
- return NewTProtocolException(err)
-}
-
-func (p *TCompactProtocol) WriteListEnd(ctx context.Context) error { return nil }
-
-// Write a set header.
-func (p *TCompactProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
- _, err := p.writeCollectionBegin(elemType, size)
- return NewTProtocolException(err)
-}
-
-func (p *TCompactProtocol) WriteSetEnd(ctx context.Context) error { return nil }
-
-func (p *TCompactProtocol) WriteBool(ctx context.Context, value bool) error {
- v := byte(COMPACT_BOOLEAN_FALSE)
- if value {
- v = byte(COMPACT_BOOLEAN_TRUE)
- }
- if p.booleanFieldPending {
- // we haven't written the field header yet
- _, err := p.writeFieldBeginInternal(ctx, p.booleanFieldName, BOOL, p.booleanFieldId, v)
- p.booleanFieldPending = false
- return NewTProtocolException(err)
- }
- // we're not part of a field, so just write the value.
- err := p.writeByteDirect(v)
- return NewTProtocolException(err)
-}
-
-// Write a byte. Nothing to see here!
-func (p *TCompactProtocol) WriteByte(ctx context.Context, value int8) error {
- err := p.writeByteDirect(byte(value))
- return NewTProtocolException(err)
-}
-
-// Write an I16 as a zigzag varint.
-func (p *TCompactProtocol) WriteI16(ctx context.Context, value int16) error {
- _, err := p.writeVarint32(p.int32ToZigzag(int32(value)))
- return NewTProtocolException(err)
-}
-
-// Write an i32 as a zigzag varint.
-func (p *TCompactProtocol) WriteI32(ctx context.Context, value int32) error {
- _, err := p.writeVarint32(p.int32ToZigzag(value))
- return NewTProtocolException(err)
-}
-
-// Write an i64 as a zigzag varint.
-func (p *TCompactProtocol) WriteI64(ctx context.Context, value int64) error {
- _, err := p.writeVarint64(p.int64ToZigzag(value))
- return NewTProtocolException(err)
-}
-
-// Write a double to the wire as 8 bytes.
-func (p *TCompactProtocol) WriteDouble(ctx context.Context, value float64) error {
- buf := p.buffer[0:8]
- binary.LittleEndian.PutUint64(buf, math.Float64bits(value))
- _, err := p.trans.Write(buf)
- return NewTProtocolException(err)
-}
-
-// Write a string to the wire with a varint size preceding.
-func (p *TCompactProtocol) WriteString(ctx context.Context, value string) error {
- _, e := p.writeVarint32(int32(len(value)))
- if e != nil {
- return NewTProtocolException(e)
- }
- if len(value) > 0 {
- }
- _, e = p.trans.WriteString(value)
- return e
-}
-
-// Write a byte array, using a varint for the size.
-func (p *TCompactProtocol) WriteBinary(ctx context.Context, bin []byte) error {
- _, e := p.writeVarint32(int32(len(bin)))
- if e != nil {
- return NewTProtocolException(e)
- }
- if len(bin) > 0 {
- _, e = p.trans.Write(bin)
- return NewTProtocolException(e)
- }
- return nil
-}
-
-//
-// Reading methods.
-//
-
-// Read a message header.
-func (p *TCompactProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) {
- var protocolId byte
-
- _, deadlineSet := ctx.Deadline()
- for {
- protocolId, err = p.readByteDirect()
- if deadlineSet && isTimeoutError(err) && ctx.Err() == nil {
- // keep retrying I/O timeout errors since we still have
- // time left
- continue
- }
- // For anything else, don't retry
- break
- }
- if err != nil {
- return
- }
-
- if protocolId != COMPACT_PROTOCOL_ID {
- e := fmt.Errorf("Expected protocol id %02x but got %02x", COMPACT_PROTOCOL_ID, protocolId)
- return "", typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, e)
- }
-
- versionAndType, err := p.readByteDirect()
- if err != nil {
- return
- }
-
- version := versionAndType & COMPACT_VERSION_MASK
- typeId = TMessageType((versionAndType >> COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_BITS)
- if version != COMPACT_VERSION {
- e := fmt.Errorf("Expected version %02x but got %02x", COMPACT_VERSION, version)
- err = NewTProtocolExceptionWithType(BAD_VERSION, e)
- return
- }
- seqId, e := p.readVarint32()
- if e != nil {
- err = NewTProtocolException(e)
- return
- }
- name, err = p.ReadString(ctx)
- return
-}
-
-func (p *TCompactProtocol) ReadMessageEnd(ctx context.Context) error { return nil }
-
-// Read a struct begin. There's nothing on the wire for this, but it is our
-// opportunity to push a new struct begin marker onto the field stack.
-func (p *TCompactProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
- p.lastField = append(p.lastField, p.lastFieldId)
- p.lastFieldId = 0
- return
-}
-
-// Doesn't actually consume any wire data, just removes the last field for
-// this struct from the field stack.
-func (p *TCompactProtocol) ReadStructEnd(ctx context.Context) error {
- // consume the last field we read off the wire.
- if len(p.lastField) <= 0 {
- return NewTProtocolExceptionWithType(INVALID_DATA, errors.New("ReadStructEnd called without matching ReadStructBegin call before"))
- }
- p.lastFieldId = p.lastField[len(p.lastField)-1]
- p.lastField = p.lastField[:len(p.lastField)-1]
- return nil
-}
-
-// Read a field header off the wire.
-func (p *TCompactProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) {
- t, err := p.readByteDirect()
- if err != nil {
- return
- }
-
- // if it's a stop, then we can return immediately, as the struct is over.
- if (t & 0x0f) == STOP {
- return "", STOP, 0, nil
- }
-
- // mask off the 4 MSB of the type header. it could contain a field id delta.
- modifier := int16((t & 0xf0) >> 4)
- if modifier == 0 {
- // not a delta. look ahead for the zigzag varint field id.
- id, err = p.ReadI16(ctx)
- if err != nil {
- return
- }
- } else {
- // has a delta. add the delta to the last read field id.
- id = int16(p.lastFieldId) + modifier
- }
- typeId, e := p.getTType(tCompactType(t & 0x0f))
- if e != nil {
- err = NewTProtocolException(e)
- return
- }
-
- // if this happens to be a boolean field, the value is encoded in the type
- if p.isBoolType(t) {
- // save the boolean value in a special instance variable.
- p.boolValue = (byte(t)&0x0f == COMPACT_BOOLEAN_TRUE)
- p.boolValueIsNotNull = true
- }
-
- // push the new field onto the field stack so we can keep the deltas going.
- p.lastFieldId = int(id)
- return
-}
-
-func (p *TCompactProtocol) ReadFieldEnd(ctx context.Context) error { return nil }
-
-// Read a map header off the wire. If the size is zero, skip reading the key
-// and value type. This means that 0-length maps will yield TMaps without the
-// "correct" types.
-func (p *TCompactProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) {
- size32, e := p.readVarint32()
- if e != nil {
- err = NewTProtocolException(e)
- return
- }
- if size32 < 0 {
- err = invalidDataLength
- return
- }
- size = int(size32)
-
- keyAndValueType := byte(STOP)
- if size != 0 {
- keyAndValueType, err = p.readByteDirect()
- if err != nil {
- return
- }
- }
- keyType, _ = p.getTType(tCompactType(keyAndValueType >> 4))
- valueType, _ = p.getTType(tCompactType(keyAndValueType & 0xf))
- return
-}
-
-func (p *TCompactProtocol) ReadMapEnd(ctx context.Context) error { return nil }
-
-// Read a list header off the wire. If the list size is 0-14, the size will
-// be packed into the element type header. If it's a longer list, the 4 MSB
-// of the element type header will be 0xF, and a varint will follow with the
-// true size.
-func (p *TCompactProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) {
- size_and_type, err := p.readByteDirect()
- if err != nil {
- return
- }
- size = int((size_and_type >> 4) & 0x0f)
- if size == 15 {
- size2, e := p.readVarint32()
- if e != nil {
- err = NewTProtocolException(e)
- return
- }
- if size2 < 0 {
- err = invalidDataLength
- return
- }
- size = int(size2)
- }
- elemType, e := p.getTType(tCompactType(size_and_type))
- if e != nil {
- err = NewTProtocolException(e)
- return
- }
- return
-}
-
-func (p *TCompactProtocol) ReadListEnd(ctx context.Context) error { return nil }
-
-// Read a set header off the wire. If the set size is 0-14, the size will
-// be packed into the element type header. If it's a longer set, the 4 MSB
-// of the element type header will be 0xF, and a varint will follow with the
-// true size.
-func (p *TCompactProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) {
- return p.ReadListBegin(ctx)
-}
-
-func (p *TCompactProtocol) ReadSetEnd(ctx context.Context) error { return nil }
-
-// Read a boolean off the wire. If this is a boolean field, the value should
-// already have been read during readFieldBegin, so we'll just consume the
-// pre-stored value. Otherwise, read a byte.
-func (p *TCompactProtocol) ReadBool(ctx context.Context) (value bool, err error) {
- if p.boolValueIsNotNull {
- p.boolValueIsNotNull = false
- return p.boolValue, nil
- }
- v, err := p.readByteDirect()
- return v == COMPACT_BOOLEAN_TRUE, err
-}
-
-// Read a single byte off the wire. Nothing interesting here.
-func (p *TCompactProtocol) ReadByte(ctx context.Context) (int8, error) {
- v, err := p.readByteDirect()
- if err != nil {
- return 0, NewTProtocolException(err)
- }
- return int8(v), err
-}
-
-// Read an i16 from the wire as a zigzag varint.
-func (p *TCompactProtocol) ReadI16(ctx context.Context) (value int16, err error) {
- v, err := p.ReadI32(ctx)
- return int16(v), err
-}
-
-// Read an i32 from the wire as a zigzag varint.
-func (p *TCompactProtocol) ReadI32(ctx context.Context) (value int32, err error) {
- v, e := p.readVarint32()
- if e != nil {
- return 0, NewTProtocolException(e)
- }
- value = p.zigzagToInt32(v)
- return value, nil
-}
-
-// Read an i64 from the wire as a zigzag varint.
-func (p *TCompactProtocol) ReadI64(ctx context.Context) (value int64, err error) {
- v, e := p.readVarint64()
- if e != nil {
- return 0, NewTProtocolException(e)
- }
- value = p.zigzagToInt64(v)
- return value, nil
-}
-
-// No magic here - just read a double off the wire.
-func (p *TCompactProtocol) ReadDouble(ctx context.Context) (value float64, err error) {
- longBits := p.buffer[0:8]
- _, e := io.ReadFull(p.trans, longBits)
- if e != nil {
- return 0.0, NewTProtocolException(e)
- }
- return math.Float64frombits(p.bytesToUint64(longBits)), nil
-}
-
-// Reads a []byte (via readBinary), and then UTF-8 decodes it.
-func (p *TCompactProtocol) ReadString(ctx context.Context) (value string, err error) {
- length, e := p.readVarint32()
- if e != nil {
- return "", NewTProtocolException(e)
- }
- err = checkSizeForProtocol(length, p.cfg)
- if err != nil {
- return
- }
- if length == 0 {
- return "", nil
- }
- if length < int32(len(p.buffer)) {
- // Avoid allocation on small reads
- buf := p.buffer[:length]
- read, e := io.ReadFull(p.trans, buf)
- return string(buf[:read]), NewTProtocolException(e)
- }
-
- buf, e := safeReadBytes(length, p.trans)
- return string(buf), NewTProtocolException(e)
-}
-
-// Read a []byte from the wire.
-func (p *TCompactProtocol) ReadBinary(ctx context.Context) (value []byte, err error) {
- length, e := p.readVarint32()
- if e != nil {
- return nil, NewTProtocolException(e)
- }
- err = checkSizeForProtocol(length, p.cfg)
- if err != nil {
- return
- }
- if length == 0 {
- return []byte{}, nil
- }
-
- buf, e := safeReadBytes(length, p.trans)
- return buf, NewTProtocolException(e)
-}
-
-func (p *TCompactProtocol) Flush(ctx context.Context) (err error) {
- return NewTProtocolException(p.trans.Flush(ctx))
-}
-
-func (p *TCompactProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
- return SkipDefaultDepth(ctx, p, fieldType)
-}
-
-func (p *TCompactProtocol) Transport() TTransport {
- return p.origTransport
-}
-
-//
-// Internal writing methods
-//
-
-// Abstract method for writing the start of lists and sets. List and sets on
-// the wire differ only by the type indicator.
-func (p *TCompactProtocol) writeCollectionBegin(elemType TType, size int) (int, error) {
- if size <= 14 {
- return 1, p.writeByteDirect(byte(int32(size<<4) | int32(p.getCompactType(elemType))))
- }
- err := p.writeByteDirect(0xf0 | byte(p.getCompactType(elemType)))
- if err != nil {
- return 0, err
- }
- m, err := p.writeVarint32(int32(size))
- return 1 + m, err
-}
-
-// Write an i32 as a varint. Results in 1-5 bytes on the wire.
-// TODO(pomack): make a permanent buffer like writeVarint64?
-func (p *TCompactProtocol) writeVarint32(n int32) (int, error) {
- i32buf := p.buffer[0:5]
- idx := 0
- for {
- if (n & ^0x7F) == 0 {
- i32buf[idx] = byte(n)
- idx++
- // p.writeByteDirect(byte(n));
- break
- // return;
- } else {
- i32buf[idx] = byte((n & 0x7F) | 0x80)
- idx++
- // p.writeByteDirect(byte(((n & 0x7F) | 0x80)));
- u := uint32(n)
- n = int32(u >> 7)
- }
- }
- return p.trans.Write(i32buf[0:idx])
-}
-
-// Write an i64 as a varint. Results in 1-10 bytes on the wire.
-func (p *TCompactProtocol) writeVarint64(n int64) (int, error) {
- varint64out := p.buffer[0:10]
- idx := 0
- for {
- if (n & ^0x7F) == 0 {
- varint64out[idx] = byte(n)
- idx++
- break
- } else {
- varint64out[idx] = byte((n & 0x7F) | 0x80)
- idx++
- u := uint64(n)
- n = int64(u >> 7)
- }
- }
- return p.trans.Write(varint64out[0:idx])
-}
-
-// Convert l into a zigzag long. This allows negative numbers to be
-// represented compactly as a varint.
-func (p *TCompactProtocol) int64ToZigzag(l int64) int64 {
- return (l << 1) ^ (l >> 63)
-}
-
-// Convert l into a zigzag long. This allows negative numbers to be
-// represented compactly as a varint.
-func (p *TCompactProtocol) int32ToZigzag(n int32) int32 {
- return (n << 1) ^ (n >> 31)
-}
-
-func (p *TCompactProtocol) fixedUint64ToBytes(n uint64, buf []byte) {
- binary.LittleEndian.PutUint64(buf, n)
-}
-
-func (p *TCompactProtocol) fixedInt64ToBytes(n int64, buf []byte) {
- binary.LittleEndian.PutUint64(buf, uint64(n))
-}
-
-// Writes a byte without any possibility of all that field header nonsense.
-// Used internally by other writing methods that know they need to write a byte.
-func (p *TCompactProtocol) writeByteDirect(b byte) error {
- return p.trans.WriteByte(b)
-}
-
-// Writes a byte without any possibility of all that field header nonsense.
-func (p *TCompactProtocol) writeIntAsByteDirect(n int) (int, error) {
- return 1, p.writeByteDirect(byte(n))
-}
-
-//
-// Internal reading methods
-//
-
-// Read an i32 from the wire as a varint. The MSB of each byte is set
-// if there is another byte to follow. This can read up to 5 bytes.
-func (p *TCompactProtocol) readVarint32() (int32, error) {
- // if the wire contains the right stuff, this will just truncate the i64 we
- // read and get us the right sign.
- v, err := p.readVarint64()
- return int32(v), err
-}
-
-// Read an i64 from the wire as a proper varint. The MSB of each byte is set
-// if there is another byte to follow. This can read up to 10 bytes.
-func (p *TCompactProtocol) readVarint64() (int64, error) {
- shift := uint(0)
- result := int64(0)
- for {
- b, err := p.readByteDirect()
- if err != nil {
- return 0, err
- }
- result |= int64(b&0x7f) << shift
- if (b & 0x80) != 0x80 {
- break
- }
- shift += 7
- }
- return result, nil
-}
-
-// Read a byte, unlike ReadByte that reads Thrift-byte that is i8.
-func (p *TCompactProtocol) readByteDirect() (byte, error) {
- return p.trans.ReadByte()
-}
-
-//
-// encoding helpers
-//
-
-// Convert from zigzag int to int.
-func (p *TCompactProtocol) zigzagToInt32(n int32) int32 {
- u := uint32(n)
- return int32(u>>1) ^ -(n & 1)
-}
-
-// Convert from zigzag long to long.
-func (p *TCompactProtocol) zigzagToInt64(n int64) int64 {
- u := uint64(n)
- return int64(u>>1) ^ -(n & 1)
-}
-
-// Note that it's important that the mask bytes are long literals,
-// otherwise they'll default to ints, and when you shift an int left 56 bits,
-// you just get a messed up int.
-func (p *TCompactProtocol) bytesToInt64(b []byte) int64 {
- return int64(binary.LittleEndian.Uint64(b))
-}
-
-// Note that it's important that the mask bytes are long literals,
-// otherwise they'll default to ints, and when you shift an int left 56 bits,
-// you just get a messed up int.
-func (p *TCompactProtocol) bytesToUint64(b []byte) uint64 {
- return binary.LittleEndian.Uint64(b)
-}
-
-//
-// type testing and converting
-//
-
-func (p *TCompactProtocol) isBoolType(b byte) bool {
- return (b&0x0f) == COMPACT_BOOLEAN_TRUE || (b&0x0f) == COMPACT_BOOLEAN_FALSE
-}
-
-// Given a tCompactType constant, convert it to its corresponding
-// TType value.
-func (p *TCompactProtocol) getTType(t tCompactType) (TType, error) {
- switch byte(t) & 0x0f {
- case STOP:
- return STOP, nil
- case COMPACT_BOOLEAN_FALSE, COMPACT_BOOLEAN_TRUE:
- return BOOL, nil
- case COMPACT_BYTE:
- return BYTE, nil
- case COMPACT_I16:
- return I16, nil
- case COMPACT_I32:
- return I32, nil
- case COMPACT_I64:
- return I64, nil
- case COMPACT_DOUBLE:
- return DOUBLE, nil
- case COMPACT_BINARY:
- return STRING, nil
- case COMPACT_LIST:
- return LIST, nil
- case COMPACT_SET:
- return SET, nil
- case COMPACT_MAP:
- return MAP, nil
- case COMPACT_STRUCT:
- return STRUCT, nil
- }
- return STOP, NewTProtocolException(fmt.Errorf("don't know what type: %v", t&0x0f))
-}
-
-// Given a TType value, find the appropriate TCompactProtocol.Types constant.
-func (p *TCompactProtocol) getCompactType(t TType) tCompactType {
- return ttypeToCompactType[t]
-}
-
-func (p *TCompactProtocol) SetTConfiguration(conf *TConfiguration) {
- PropagateTConfiguration(p.trans, conf)
- PropagateTConfiguration(p.origTransport, conf)
- p.cfg = conf
-}
-
-var (
- _ TConfigurationSetter = (*TCompactProtocolFactory)(nil)
- _ TConfigurationSetter = (*TCompactProtocol)(nil)
-)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/configuration.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/configuration.go
deleted file mode 100644
index 454d9f377..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/configuration.go
+++ /dev/null
@@ -1,378 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "crypto/tls"
- "fmt"
- "time"
-)
-
-// Default TConfiguration values.
-const (
- DEFAULT_MAX_MESSAGE_SIZE = 100 * 1024 * 1024
- DEFAULT_MAX_FRAME_SIZE = 16384000
-
- DEFAULT_TBINARY_STRICT_READ = false
- DEFAULT_TBINARY_STRICT_WRITE = true
-
- DEFAULT_CONNECT_TIMEOUT = 0
- DEFAULT_SOCKET_TIMEOUT = 0
-)
-
-// TConfiguration defines some configurations shared between TTransport,
-// TProtocol, TTransportFactory, TProtocolFactory, and other implementations.
-//
-// When constructing TConfiguration, you only need to specify the non-default
-// fields. All zero values have sane default values.
-//
-// Not all configurations defined are applicable to all implementations.
-// Implementations are free to ignore the configurations not applicable to them.
-//
-// All functions attached to this type are nil-safe.
-//
-// See [1] for spec.
-//
-// NOTE: When using TConfiguration, fill in all the configurations you want to
-// set across the stack, not only the ones you want to set in the immediate
-// TTransport/TProtocol.
-//
-// For example, say you want to migrate this old code into using TConfiguration:
-//
-// sccket := thrift.NewTSocketTimeout("host:port", time.Second)
-// transFactory := thrift.NewTFramedTransportFactoryMaxLength(
-// thrift.NewTTransportFactory(),
-// 1024 * 1024 * 256,
-// )
-// protoFactory := thrift.NewTBinaryProtocolFactory(true, true)
-//
-// This is the wrong way to do it because in the end the TConfiguration used by
-// socket and transFactory will be overwritten by the one used by protoFactory
-// because of TConfiguration propagation:
-//
-// // bad example, DO NOT USE
-// sccket := thrift.NewTSocketConf("host:port", &thrift.TConfiguration{
-// ConnectTimeout: time.Second,
-// SocketTimeout: time.Second,
-// })
-// transFactory := thrift.NewTFramedTransportFactoryConf(
-// thrift.NewTTransportFactory(),
-// &thrift.TConfiguration{
-// MaxFrameSize: 1024 * 1024 * 256,
-// },
-// )
-// protoFactory := thrift.NewTBinaryProtocolFactoryConf(&thrift.TConfiguration{
-// TBinaryStrictRead: thrift.BoolPtr(true),
-// TBinaryStrictWrite: thrift.BoolPtr(true),
-// })
-//
-// This is the correct way to do it:
-//
-// conf := &thrift.TConfiguration{
-// ConnectTimeout: time.Second,
-// SocketTimeout: time.Second,
-//
-// MaxFrameSize: 1024 * 1024 * 256,
-//
-// TBinaryStrictRead: thrift.BoolPtr(true),
-// TBinaryStrictWrite: thrift.BoolPtr(true),
-// }
-// sccket := thrift.NewTSocketConf("host:port", conf)
-// transFactory := thrift.NewTFramedTransportFactoryConf(thrift.NewTTransportFactory(), conf)
-// protoFactory := thrift.NewTBinaryProtocolFactoryConf(conf)
-//
-// [1]: https://github.com/apache/thrift/blob/master/doc/specs/thrift-tconfiguration.md
-type TConfiguration struct {
- // If <= 0, DEFAULT_MAX_MESSAGE_SIZE will be used instead.
- MaxMessageSize int32
-
- // If <= 0, DEFAULT_MAX_FRAME_SIZE will be used instead.
- //
- // Also if MaxMessageSize < MaxFrameSize,
- // MaxMessageSize will be used instead.
- MaxFrameSize int32
-
- // Connect and socket timeouts to be used by TSocket and TSSLSocket.
- //
- // 0 means no timeout.
- //
- // If <0, DEFAULT_CONNECT_TIMEOUT and DEFAULT_SOCKET_TIMEOUT will be
- // used.
- ConnectTimeout time.Duration
- SocketTimeout time.Duration
-
- // TLS config to be used by TSSLSocket.
- TLSConfig *tls.Config
-
- // Strict read/write configurations for TBinaryProtocol.
- //
- // BoolPtr helper function is available to use literal values.
- TBinaryStrictRead *bool
- TBinaryStrictWrite *bool
-
- // The wrapped protocol id to be used in THeader transport/protocol.
- //
- // THeaderProtocolIDPtr and THeaderProtocolIDPtrMust helper functions
- // are provided to help filling this value.
- THeaderProtocolID *THeaderProtocolID
-
- // Used internally by deprecated constructors, to avoid overriding
- // underlying TTransport/TProtocol's cfg by accidental propagations.
- //
- // For external users this is always false.
- noPropagation bool
-}
-
-// GetMaxMessageSize returns the max message size an implementation should
-// follow.
-//
-// It's nil-safe. DEFAULT_MAX_MESSAGE_SIZE will be returned if tc is nil.
-func (tc *TConfiguration) GetMaxMessageSize() int32 {
- if tc == nil || tc.MaxMessageSize <= 0 {
- return DEFAULT_MAX_MESSAGE_SIZE
- }
- return tc.MaxMessageSize
-}
-
-// GetMaxFrameSize returns the max frame size an implementation should follow.
-//
-// It's nil-safe. DEFAULT_MAX_FRAME_SIZE will be returned if tc is nil.
-//
-// If the configured max message size is smaller than the configured max frame
-// size, the smaller one will be returned instead.
-func (tc *TConfiguration) GetMaxFrameSize() int32 {
- if tc == nil {
- return DEFAULT_MAX_FRAME_SIZE
- }
- maxFrameSize := tc.MaxFrameSize
- if maxFrameSize <= 0 {
- maxFrameSize = DEFAULT_MAX_FRAME_SIZE
- }
- if maxMessageSize := tc.GetMaxMessageSize(); maxMessageSize < maxFrameSize {
- return maxMessageSize
- }
- return maxFrameSize
-}
-
-// GetConnectTimeout returns the connect timeout should be used by TSocket and
-// TSSLSocket.
-//
-// It's nil-safe. If tc is nil, DEFAULT_CONNECT_TIMEOUT will be returned instead.
-func (tc *TConfiguration) GetConnectTimeout() time.Duration {
- if tc == nil || tc.ConnectTimeout < 0 {
- return DEFAULT_CONNECT_TIMEOUT
- }
- return tc.ConnectTimeout
-}
-
-// GetSocketTimeout returns the socket timeout should be used by TSocket and
-// TSSLSocket.
-//
-// It's nil-safe. If tc is nil, DEFAULT_SOCKET_TIMEOUT will be returned instead.
-func (tc *TConfiguration) GetSocketTimeout() time.Duration {
- if tc == nil || tc.SocketTimeout < 0 {
- return DEFAULT_SOCKET_TIMEOUT
- }
- return tc.SocketTimeout
-}
-
-// GetTLSConfig returns the tls config should be used by TSSLSocket.
-//
-// It's nil-safe. If tc is nil, nil will be returned instead.
-func (tc *TConfiguration) GetTLSConfig() *tls.Config {
- if tc == nil {
- return nil
- }
- return tc.TLSConfig
-}
-
-// GetTBinaryStrictRead returns the strict read configuration TBinaryProtocol
-// should follow.
-//
-// It's nil-safe. DEFAULT_TBINARY_STRICT_READ will be returned if either tc or
-// tc.TBinaryStrictRead is nil.
-func (tc *TConfiguration) GetTBinaryStrictRead() bool {
- if tc == nil || tc.TBinaryStrictRead == nil {
- return DEFAULT_TBINARY_STRICT_READ
- }
- return *tc.TBinaryStrictRead
-}
-
-// GetTBinaryStrictWrite returns the strict read configuration TBinaryProtocol
-// should follow.
-//
-// It's nil-safe. DEFAULT_TBINARY_STRICT_WRITE will be returned if either tc or
-// tc.TBinaryStrictWrite is nil.
-func (tc *TConfiguration) GetTBinaryStrictWrite() bool {
- if tc == nil || tc.TBinaryStrictWrite == nil {
- return DEFAULT_TBINARY_STRICT_WRITE
- }
- return *tc.TBinaryStrictWrite
-}
-
-// GetTHeaderProtocolID returns the THeaderProtocolID should be used by
-// THeaderProtocol clients (for servers, they always use the same one as the
-// client instead).
-//
-// It's nil-safe. If either tc or tc.THeaderProtocolID is nil,
-// THeaderProtocolDefault will be returned instead.
-// THeaderProtocolDefault will also be returned if configured value is invalid.
-func (tc *TConfiguration) GetTHeaderProtocolID() THeaderProtocolID {
- if tc == nil || tc.THeaderProtocolID == nil {
- return THeaderProtocolDefault
- }
- protoID := *tc.THeaderProtocolID
- if err := protoID.Validate(); err != nil {
- return THeaderProtocolDefault
- }
- return protoID
-}
-
-// THeaderProtocolIDPtr validates and returns the pointer to id.
-//
-// If id is not a valid THeaderProtocolID, a pointer to THeaderProtocolDefault
-// and the validation error will be returned.
-func THeaderProtocolIDPtr(id THeaderProtocolID) (*THeaderProtocolID, error) {
- err := id.Validate()
- if err != nil {
- id = THeaderProtocolDefault
- }
- return &id, err
-}
-
-// THeaderProtocolIDPtrMust validates and returns the pointer to id.
-//
-// It's similar to THeaderProtocolIDPtr, but it panics on validation errors
-// instead of returning them.
-func THeaderProtocolIDPtrMust(id THeaderProtocolID) *THeaderProtocolID {
- ptr, err := THeaderProtocolIDPtr(id)
- if err != nil {
- panic(err)
- }
- return ptr
-}
-
-// TConfigurationSetter is an optional interface TProtocol, TTransport,
-// TProtocolFactory, TTransportFactory, and other implementations can implement.
-//
-// It's intended to be called during intializations.
-// The behavior of calling SetTConfiguration on a TTransport/TProtocol in the
-// middle of a message is undefined:
-// It may or may not change the behavior of the current processing message,
-// and it may even cause the current message to fail.
-//
-// Note for implementations: SetTConfiguration might be called multiple times
-// with the same value in quick successions due to the implementation of the
-// propagation. Implementations should make SetTConfiguration as simple as
-// possible (usually just overwrite the stored configuration and propagate it to
-// the wrapped TTransports/TProtocols).
-type TConfigurationSetter interface {
- SetTConfiguration(*TConfiguration)
-}
-
-// PropagateTConfiguration propagates cfg to impl if impl implements
-// TConfigurationSetter and cfg is non-nil, otherwise it does nothing.
-//
-// NOTE: nil cfg is not propagated. If you want to propagate a TConfiguration
-// with everything being default value, use &TConfiguration{} explicitly instead.
-func PropagateTConfiguration(impl interface{}, cfg *TConfiguration) {
- if cfg == nil || cfg.noPropagation {
- return
- }
-
- if setter, ok := impl.(TConfigurationSetter); ok {
- setter.SetTConfiguration(cfg)
- }
-}
-
-func checkSizeForProtocol(size int32, cfg *TConfiguration) error {
- if size < 0 {
- return NewTProtocolExceptionWithType(
- NEGATIVE_SIZE,
- fmt.Errorf("negative size: %d", size),
- )
- }
- if size > cfg.GetMaxMessageSize() {
- return NewTProtocolExceptionWithType(
- SIZE_LIMIT,
- fmt.Errorf("size exceeded max allowed: %d", size),
- )
- }
- return nil
-}
-
-type tTransportFactoryConf struct {
- delegate TTransportFactory
- cfg *TConfiguration
-}
-
-func (f *tTransportFactoryConf) GetTransport(orig TTransport) (TTransport, error) {
- trans, err := f.delegate.GetTransport(orig)
- if err == nil {
- PropagateTConfiguration(orig, f.cfg)
- PropagateTConfiguration(trans, f.cfg)
- }
- return trans, err
-}
-
-func (f *tTransportFactoryConf) SetTConfiguration(cfg *TConfiguration) {
- PropagateTConfiguration(f.delegate, f.cfg)
- f.cfg = cfg
-}
-
-// TTransportFactoryConf wraps a TTransportFactory to propagate
-// TConfiguration on the factory's GetTransport calls.
-func TTransportFactoryConf(delegate TTransportFactory, conf *TConfiguration) TTransportFactory {
- return &tTransportFactoryConf{
- delegate: delegate,
- cfg: conf,
- }
-}
-
-type tProtocolFactoryConf struct {
- delegate TProtocolFactory
- cfg *TConfiguration
-}
-
-func (f *tProtocolFactoryConf) GetProtocol(trans TTransport) TProtocol {
- proto := f.delegate.GetProtocol(trans)
- PropagateTConfiguration(trans, f.cfg)
- PropagateTConfiguration(proto, f.cfg)
- return proto
-}
-
-func (f *tProtocolFactoryConf) SetTConfiguration(cfg *TConfiguration) {
- PropagateTConfiguration(f.delegate, f.cfg)
- f.cfg = cfg
-}
-
-// TProtocolFactoryConf wraps a TProtocolFactory to propagate
-// TConfiguration on the factory's GetProtocol calls.
-func TProtocolFactoryConf(delegate TProtocolFactory, conf *TConfiguration) TProtocolFactory {
- return &tProtocolFactoryConf{
- delegate: delegate,
- cfg: conf,
- }
-}
-
-var (
- _ TConfigurationSetter = (*tTransportFactoryConf)(nil)
- _ TConfigurationSetter = (*tProtocolFactoryConf)(nil)
-)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/context.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/context.go
deleted file mode 100644
index d15c1bcf8..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/context.go
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import "context"
-
-var defaultCtx = context.Background()
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/debug_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/debug_protocol.go
deleted file mode 100644
index fdf9bfec1..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/debug_protocol.go
+++ /dev/null
@@ -1,447 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "context"
- "fmt"
-)
-
-type TDebugProtocol struct {
- // Required. The actual TProtocol to do the read/write.
- Delegate TProtocol
-
- // Optional. The logger and prefix to log all the args/return values
- // from Delegate TProtocol calls.
- //
- // If Logger is nil, StdLogger using stdlib log package with os.Stderr
- // will be used. If disable logging is desired, set Logger to NopLogger
- // explicitly instead of leaving it as nil/unset.
- Logger Logger
- LogPrefix string
-
- // Optional. An TProtocol to duplicate everything read/written from Delegate.
- //
- // A typical use case of this is to use TSimpleJSONProtocol wrapping
- // TMemoryBuffer in a middleware to json logging requests/responses.
- //
- // This feature is not available from TDebugProtocolFactory. In order to
- // use it you have to construct TDebugProtocol directly, or set DuplicateTo
- // field after getting a TDebugProtocol from the factory.
- DuplicateTo TProtocol
-}
-
-type TDebugProtocolFactory struct {
- Underlying TProtocolFactory
- LogPrefix string
- Logger Logger
-}
-
-// NewTDebugProtocolFactory creates a TDebugProtocolFactory.
-//
-// Deprecated: Please use NewTDebugProtocolFactoryWithLogger or the struct
-// itself instead. This version will use the default logger from standard
-// library.
-func NewTDebugProtocolFactory(underlying TProtocolFactory, logPrefix string) *TDebugProtocolFactory {
- return &TDebugProtocolFactory{
- Underlying: underlying,
- LogPrefix: logPrefix,
- Logger: StdLogger(nil),
- }
-}
-
-// NewTDebugProtocolFactoryWithLogger creates a TDebugProtocolFactory.
-func NewTDebugProtocolFactoryWithLogger(underlying TProtocolFactory, logPrefix string, logger Logger) *TDebugProtocolFactory {
- return &TDebugProtocolFactory{
- Underlying: underlying,
- LogPrefix: logPrefix,
- Logger: logger,
- }
-}
-
-func (t *TDebugProtocolFactory) GetProtocol(trans TTransport) TProtocol {
- return &TDebugProtocol{
- Delegate: t.Underlying.GetProtocol(trans),
- LogPrefix: t.LogPrefix,
- Logger: fallbackLogger(t.Logger),
- }
-}
-
-func (tdp *TDebugProtocol) logf(format string, v ...interface{}) {
- fallbackLogger(tdp.Logger)(fmt.Sprintf(format, v...))
-}
-
-func (tdp *TDebugProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error {
- err := tdp.Delegate.WriteMessageBegin(ctx, name, typeId, seqid)
- tdp.logf("%sWriteMessageBegin(name=%#v, typeId=%#v, seqid=%#v) => %#v", tdp.LogPrefix, name, typeId, seqid, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteMessageBegin(ctx, name, typeId, seqid)
- }
- return err
-}
-func (tdp *TDebugProtocol) WriteMessageEnd(ctx context.Context) error {
- err := tdp.Delegate.WriteMessageEnd(ctx)
- tdp.logf("%sWriteMessageEnd() => %#v", tdp.LogPrefix, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteMessageEnd(ctx)
- }
- return err
-}
-func (tdp *TDebugProtocol) WriteStructBegin(ctx context.Context, name string) error {
- err := tdp.Delegate.WriteStructBegin(ctx, name)
- tdp.logf("%sWriteStructBegin(name=%#v) => %#v", tdp.LogPrefix, name, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteStructBegin(ctx, name)
- }
- return err
-}
-func (tdp *TDebugProtocol) WriteStructEnd(ctx context.Context) error {
- err := tdp.Delegate.WriteStructEnd(ctx)
- tdp.logf("%sWriteStructEnd() => %#v", tdp.LogPrefix, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteStructEnd(ctx)
- }
- return err
-}
-func (tdp *TDebugProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
- err := tdp.Delegate.WriteFieldBegin(ctx, name, typeId, id)
- tdp.logf("%sWriteFieldBegin(name=%#v, typeId=%#v, id%#v) => %#v", tdp.LogPrefix, name, typeId, id, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteFieldBegin(ctx, name, typeId, id)
- }
- return err
-}
-func (tdp *TDebugProtocol) WriteFieldEnd(ctx context.Context) error {
- err := tdp.Delegate.WriteFieldEnd(ctx)
- tdp.logf("%sWriteFieldEnd() => %#v", tdp.LogPrefix, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteFieldEnd(ctx)
- }
- return err
-}
-func (tdp *TDebugProtocol) WriteFieldStop(ctx context.Context) error {
- err := tdp.Delegate.WriteFieldStop(ctx)
- tdp.logf("%sWriteFieldStop() => %#v", tdp.LogPrefix, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteFieldStop(ctx)
- }
- return err
-}
-func (tdp *TDebugProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
- err := tdp.Delegate.WriteMapBegin(ctx, keyType, valueType, size)
- tdp.logf("%sWriteMapBegin(keyType=%#v, valueType=%#v, size=%#v) => %#v", tdp.LogPrefix, keyType, valueType, size, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteMapBegin(ctx, keyType, valueType, size)
- }
- return err
-}
-func (tdp *TDebugProtocol) WriteMapEnd(ctx context.Context) error {
- err := tdp.Delegate.WriteMapEnd(ctx)
- tdp.logf("%sWriteMapEnd() => %#v", tdp.LogPrefix, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteMapEnd(ctx)
- }
- return err
-}
-func (tdp *TDebugProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
- err := tdp.Delegate.WriteListBegin(ctx, elemType, size)
- tdp.logf("%sWriteListBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteListBegin(ctx, elemType, size)
- }
- return err
-}
-func (tdp *TDebugProtocol) WriteListEnd(ctx context.Context) error {
- err := tdp.Delegate.WriteListEnd(ctx)
- tdp.logf("%sWriteListEnd() => %#v", tdp.LogPrefix, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteListEnd(ctx)
- }
- return err
-}
-func (tdp *TDebugProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
- err := tdp.Delegate.WriteSetBegin(ctx, elemType, size)
- tdp.logf("%sWriteSetBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteSetBegin(ctx, elemType, size)
- }
- return err
-}
-func (tdp *TDebugProtocol) WriteSetEnd(ctx context.Context) error {
- err := tdp.Delegate.WriteSetEnd(ctx)
- tdp.logf("%sWriteSetEnd() => %#v", tdp.LogPrefix, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteSetEnd(ctx)
- }
- return err
-}
-func (tdp *TDebugProtocol) WriteBool(ctx context.Context, value bool) error {
- err := tdp.Delegate.WriteBool(ctx, value)
- tdp.logf("%sWriteBool(value=%#v) => %#v", tdp.LogPrefix, value, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteBool(ctx, value)
- }
- return err
-}
-func (tdp *TDebugProtocol) WriteByte(ctx context.Context, value int8) error {
- err := tdp.Delegate.WriteByte(ctx, value)
- tdp.logf("%sWriteByte(value=%#v) => %#v", tdp.LogPrefix, value, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteByte(ctx, value)
- }
- return err
-}
-func (tdp *TDebugProtocol) WriteI16(ctx context.Context, value int16) error {
- err := tdp.Delegate.WriteI16(ctx, value)
- tdp.logf("%sWriteI16(value=%#v) => %#v", tdp.LogPrefix, value, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteI16(ctx, value)
- }
- return err
-}
-func (tdp *TDebugProtocol) WriteI32(ctx context.Context, value int32) error {
- err := tdp.Delegate.WriteI32(ctx, value)
- tdp.logf("%sWriteI32(value=%#v) => %#v", tdp.LogPrefix, value, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteI32(ctx, value)
- }
- return err
-}
-func (tdp *TDebugProtocol) WriteI64(ctx context.Context, value int64) error {
- err := tdp.Delegate.WriteI64(ctx, value)
- tdp.logf("%sWriteI64(value=%#v) => %#v", tdp.LogPrefix, value, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteI64(ctx, value)
- }
- return err
-}
-func (tdp *TDebugProtocol) WriteDouble(ctx context.Context, value float64) error {
- err := tdp.Delegate.WriteDouble(ctx, value)
- tdp.logf("%sWriteDouble(value=%#v) => %#v", tdp.LogPrefix, value, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteDouble(ctx, value)
- }
- return err
-}
-func (tdp *TDebugProtocol) WriteString(ctx context.Context, value string) error {
- err := tdp.Delegate.WriteString(ctx, value)
- tdp.logf("%sWriteString(value=%#v) => %#v", tdp.LogPrefix, value, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteString(ctx, value)
- }
- return err
-}
-func (tdp *TDebugProtocol) WriteBinary(ctx context.Context, value []byte) error {
- err := tdp.Delegate.WriteBinary(ctx, value)
- tdp.logf("%sWriteBinary(value=%#v) => %#v", tdp.LogPrefix, value, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteBinary(ctx, value)
- }
- return err
-}
-
-func (tdp *TDebugProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error) {
- name, typeId, seqid, err = tdp.Delegate.ReadMessageBegin(ctx)
- tdp.logf("%sReadMessageBegin() (name=%#v, typeId=%#v, seqid=%#v, err=%#v)", tdp.LogPrefix, name, typeId, seqid, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteMessageBegin(ctx, name, typeId, seqid)
- }
- return
-}
-func (tdp *TDebugProtocol) ReadMessageEnd(ctx context.Context) (err error) {
- err = tdp.Delegate.ReadMessageEnd(ctx)
- tdp.logf("%sReadMessageEnd() err=%#v", tdp.LogPrefix, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteMessageEnd(ctx)
- }
- return
-}
-func (tdp *TDebugProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
- name, err = tdp.Delegate.ReadStructBegin(ctx)
- tdp.logf("%sReadStructBegin() (name%#v, err=%#v)", tdp.LogPrefix, name, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteStructBegin(ctx, name)
- }
- return
-}
-func (tdp *TDebugProtocol) ReadStructEnd(ctx context.Context) (err error) {
- err = tdp.Delegate.ReadStructEnd(ctx)
- tdp.logf("%sReadStructEnd() err=%#v", tdp.LogPrefix, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteStructEnd(ctx)
- }
- return
-}
-func (tdp *TDebugProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) {
- name, typeId, id, err = tdp.Delegate.ReadFieldBegin(ctx)
- tdp.logf("%sReadFieldBegin() (name=%#v, typeId=%#v, id=%#v, err=%#v)", tdp.LogPrefix, name, typeId, id, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteFieldBegin(ctx, name, typeId, id)
- }
- return
-}
-func (tdp *TDebugProtocol) ReadFieldEnd(ctx context.Context) (err error) {
- err = tdp.Delegate.ReadFieldEnd(ctx)
- tdp.logf("%sReadFieldEnd() err=%#v", tdp.LogPrefix, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteFieldEnd(ctx)
- }
- return
-}
-func (tdp *TDebugProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) {
- keyType, valueType, size, err = tdp.Delegate.ReadMapBegin(ctx)
- tdp.logf("%sReadMapBegin() (keyType=%#v, valueType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, keyType, valueType, size, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteMapBegin(ctx, keyType, valueType, size)
- }
- return
-}
-func (tdp *TDebugProtocol) ReadMapEnd(ctx context.Context) (err error) {
- err = tdp.Delegate.ReadMapEnd(ctx)
- tdp.logf("%sReadMapEnd() err=%#v", tdp.LogPrefix, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteMapEnd(ctx)
- }
- return
-}
-func (tdp *TDebugProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) {
- elemType, size, err = tdp.Delegate.ReadListBegin(ctx)
- tdp.logf("%sReadListBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteListBegin(ctx, elemType, size)
- }
- return
-}
-func (tdp *TDebugProtocol) ReadListEnd(ctx context.Context) (err error) {
- err = tdp.Delegate.ReadListEnd(ctx)
- tdp.logf("%sReadListEnd() err=%#v", tdp.LogPrefix, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteListEnd(ctx)
- }
- return
-}
-func (tdp *TDebugProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) {
- elemType, size, err = tdp.Delegate.ReadSetBegin(ctx)
- tdp.logf("%sReadSetBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteSetBegin(ctx, elemType, size)
- }
- return
-}
-func (tdp *TDebugProtocol) ReadSetEnd(ctx context.Context) (err error) {
- err = tdp.Delegate.ReadSetEnd(ctx)
- tdp.logf("%sReadSetEnd() err=%#v", tdp.LogPrefix, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteSetEnd(ctx)
- }
- return
-}
-func (tdp *TDebugProtocol) ReadBool(ctx context.Context) (value bool, err error) {
- value, err = tdp.Delegate.ReadBool(ctx)
- tdp.logf("%sReadBool() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteBool(ctx, value)
- }
- return
-}
-func (tdp *TDebugProtocol) ReadByte(ctx context.Context) (value int8, err error) {
- value, err = tdp.Delegate.ReadByte(ctx)
- tdp.logf("%sReadByte() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteByte(ctx, value)
- }
- return
-}
-func (tdp *TDebugProtocol) ReadI16(ctx context.Context) (value int16, err error) {
- value, err = tdp.Delegate.ReadI16(ctx)
- tdp.logf("%sReadI16() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteI16(ctx, value)
- }
- return
-}
-func (tdp *TDebugProtocol) ReadI32(ctx context.Context) (value int32, err error) {
- value, err = tdp.Delegate.ReadI32(ctx)
- tdp.logf("%sReadI32() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteI32(ctx, value)
- }
- return
-}
-func (tdp *TDebugProtocol) ReadI64(ctx context.Context) (value int64, err error) {
- value, err = tdp.Delegate.ReadI64(ctx)
- tdp.logf("%sReadI64() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteI64(ctx, value)
- }
- return
-}
-func (tdp *TDebugProtocol) ReadDouble(ctx context.Context) (value float64, err error) {
- value, err = tdp.Delegate.ReadDouble(ctx)
- tdp.logf("%sReadDouble() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteDouble(ctx, value)
- }
- return
-}
-func (tdp *TDebugProtocol) ReadString(ctx context.Context) (value string, err error) {
- value, err = tdp.Delegate.ReadString(ctx)
- tdp.logf("%sReadString() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteString(ctx, value)
- }
- return
-}
-func (tdp *TDebugProtocol) ReadBinary(ctx context.Context) (value []byte, err error) {
- value, err = tdp.Delegate.ReadBinary(ctx)
- tdp.logf("%sReadBinary() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.WriteBinary(ctx, value)
- }
- return
-}
-func (tdp *TDebugProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
- err = tdp.Delegate.Skip(ctx, fieldType)
- tdp.logf("%sSkip(fieldType=%#v) (err=%#v)", tdp.LogPrefix, fieldType, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.Skip(ctx, fieldType)
- }
- return
-}
-func (tdp *TDebugProtocol) Flush(ctx context.Context) (err error) {
- err = tdp.Delegate.Flush(ctx)
- tdp.logf("%sFlush() (err=%#v)", tdp.LogPrefix, err)
- if tdp.DuplicateTo != nil {
- tdp.DuplicateTo.Flush(ctx)
- }
- return
-}
-
-func (tdp *TDebugProtocol) Transport() TTransport {
- return tdp.Delegate.Transport()
-}
-
-// SetTConfiguration implements TConfigurationSetter for propagation.
-func (tdp *TDebugProtocol) SetTConfiguration(conf *TConfiguration) {
- PropagateTConfiguration(tdp.Delegate, conf)
- PropagateTConfiguration(tdp.DuplicateTo, conf)
-}
-
-var _ TConfigurationSetter = (*TDebugProtocol)(nil)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/deserializer.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/deserializer.go
deleted file mode 100644
index cefc7ecda..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/deserializer.go
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "context"
- "sync"
-)
-
-type TDeserializer struct {
- Transport *TMemoryBuffer
- Protocol TProtocol
-}
-
-func NewTDeserializer() *TDeserializer {
- transport := NewTMemoryBufferLen(1024)
- protocol := NewTBinaryProtocolTransport(transport)
-
- return &TDeserializer{
- Transport: transport,
- Protocol: protocol,
- }
-}
-
-func (t *TDeserializer) ReadString(ctx context.Context, msg TStruct, s string) (err error) {
- t.Transport.Reset()
-
- err = nil
- if _, err = t.Transport.Write([]byte(s)); err != nil {
- return
- }
- if err = msg.Read(ctx, t.Protocol); err != nil {
- return
- }
- return
-}
-
-func (t *TDeserializer) Read(ctx context.Context, msg TStruct, b []byte) (err error) {
- t.Transport.Reset()
-
- err = nil
- if _, err = t.Transport.Write(b); err != nil {
- return
- }
- if err = msg.Read(ctx, t.Protocol); err != nil {
- return
- }
- return
-}
-
-// TDeserializerPool is the thread-safe version of TDeserializer,
-// it uses resource pool of TDeserializer under the hood.
-//
-// It must be initialized with either NewTDeserializerPool or
-// NewTDeserializerPoolSizeFactory.
-type TDeserializerPool struct {
- pool sync.Pool
-}
-
-// NewTDeserializerPool creates a new TDeserializerPool.
-//
-// NewTDeserializer can be used as the arg here.
-func NewTDeserializerPool(f func() *TDeserializer) *TDeserializerPool {
- return &TDeserializerPool{
- pool: sync.Pool{
- New: func() interface{} {
- return f()
- },
- },
- }
-}
-
-// NewTDeserializerPoolSizeFactory creates a new TDeserializerPool with
-// the given size and protocol factory.
-//
-// Note that the size is not the limit. The TMemoryBuffer underneath can grow
-// larger than that. It just dictates the initial size.
-func NewTDeserializerPoolSizeFactory(size int, factory TProtocolFactory) *TDeserializerPool {
- return &TDeserializerPool{
- pool: sync.Pool{
- New: func() interface{} {
- transport := NewTMemoryBufferLen(size)
- protocol := factory.GetProtocol(transport)
-
- return &TDeserializer{
- Transport: transport,
- Protocol: protocol,
- }
- },
- },
- }
-}
-
-func (t *TDeserializerPool) ReadString(ctx context.Context, msg TStruct, s string) error {
- d := t.pool.Get().(*TDeserializer)
- defer t.pool.Put(d)
- return d.ReadString(ctx, msg, s)
-}
-
-func (t *TDeserializerPool) Read(ctx context.Context, msg TStruct, b []byte) error {
- d := t.pool.Get().(*TDeserializer)
- defer t.pool.Put(d)
- return d.Read(ctx, msg, b)
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/exception.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/exception.go
deleted file mode 100644
index 630b938f0..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/exception.go
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "errors"
-)
-
-// Generic Thrift exception
-type TException interface {
- error
-
- TExceptionType() TExceptionType
-}
-
-// Prepends additional information to an error without losing the Thrift exception interface
-func PrependError(prepend string, err error) error {
- msg := prepend + err.Error()
-
- var te TException
- if errors.As(err, &te) {
- switch te.TExceptionType() {
- case TExceptionTypeTransport:
- if t, ok := err.(TTransportException); ok {
- return prependTTransportException(prepend, t)
- }
- case TExceptionTypeProtocol:
- if t, ok := err.(TProtocolException); ok {
- return prependTProtocolException(prepend, t)
- }
- case TExceptionTypeApplication:
- var t TApplicationException
- if errors.As(err, &t) {
- return NewTApplicationException(t.TypeId(), msg)
- }
- }
-
- return wrappedTException{
- err: err,
- msg: msg,
- tExceptionType: te.TExceptionType(),
- }
- }
-
- return errors.New(msg)
-}
-
-// TExceptionType is an enum type to categorize different "subclasses" of TExceptions.
-type TExceptionType byte
-
-// TExceptionType values
-const (
- TExceptionTypeUnknown TExceptionType = iota
- TExceptionTypeCompiled // TExceptions defined in thrift files and generated by thrift compiler
- TExceptionTypeApplication // TApplicationExceptions
- TExceptionTypeProtocol // TProtocolExceptions
- TExceptionTypeTransport // TTransportExceptions
-)
-
-// WrapTException wraps an error into TException.
-//
-// If err is nil or already TException, it's returned as-is.
-// Otherwise it will be wrapped into TException with TExceptionType() returning
-// TExceptionTypeUnknown, and Unwrap() returning the original error.
-func WrapTException(err error) TException {
- if err == nil {
- return nil
- }
-
- if te, ok := err.(TException); ok {
- return te
- }
-
- return wrappedTException{
- err: err,
- msg: err.Error(),
- tExceptionType: TExceptionTypeUnknown,
- }
-}
-
-type wrappedTException struct {
- err error
- msg string
- tExceptionType TExceptionType
-}
-
-func (w wrappedTException) Error() string {
- return w.msg
-}
-
-func (w wrappedTException) TExceptionType() TExceptionType {
- return w.tExceptionType
-}
-
-func (w wrappedTException) Unwrap() error {
- return w.err
-}
-
-var _ TException = wrappedTException{}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/framed_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/framed_transport.go
deleted file mode 100644
index f683e7f54..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/framed_transport.go
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "bufio"
- "bytes"
- "context"
- "encoding/binary"
- "fmt"
- "io"
-)
-
-// Deprecated: Use DEFAULT_MAX_FRAME_SIZE instead.
-const DEFAULT_MAX_LENGTH = 16384000
-
-type TFramedTransport struct {
- transport TTransport
-
- cfg *TConfiguration
-
- writeBuf bytes.Buffer
-
- reader *bufio.Reader
- readBuf bytes.Buffer
-
- buffer [4]byte
-}
-
-type tFramedTransportFactory struct {
- factory TTransportFactory
- cfg *TConfiguration
-}
-
-// Deprecated: Use NewTFramedTransportFactoryConf instead.
-func NewTFramedTransportFactory(factory TTransportFactory) TTransportFactory {
- return NewTFramedTransportFactoryConf(factory, &TConfiguration{
- MaxFrameSize: DEFAULT_MAX_LENGTH,
-
- noPropagation: true,
- })
-}
-
-// Deprecated: Use NewTFramedTransportFactoryConf instead.
-func NewTFramedTransportFactoryMaxLength(factory TTransportFactory, maxLength uint32) TTransportFactory {
- return NewTFramedTransportFactoryConf(factory, &TConfiguration{
- MaxFrameSize: int32(maxLength),
-
- noPropagation: true,
- })
-}
-
-func NewTFramedTransportFactoryConf(factory TTransportFactory, conf *TConfiguration) TTransportFactory {
- PropagateTConfiguration(factory, conf)
- return &tFramedTransportFactory{
- factory: factory,
- cfg: conf,
- }
-}
-
-func (p *tFramedTransportFactory) GetTransport(base TTransport) (TTransport, error) {
- PropagateTConfiguration(base, p.cfg)
- tt, err := p.factory.GetTransport(base)
- if err != nil {
- return nil, err
- }
- return NewTFramedTransportConf(tt, p.cfg), nil
-}
-
-func (p *tFramedTransportFactory) SetTConfiguration(cfg *TConfiguration) {
- PropagateTConfiguration(p.factory, cfg)
- p.cfg = cfg
-}
-
-// Deprecated: Use NewTFramedTransportConf instead.
-func NewTFramedTransport(transport TTransport) *TFramedTransport {
- return NewTFramedTransportConf(transport, &TConfiguration{
- MaxFrameSize: DEFAULT_MAX_LENGTH,
-
- noPropagation: true,
- })
-}
-
-// Deprecated: Use NewTFramedTransportConf instead.
-func NewTFramedTransportMaxLength(transport TTransport, maxLength uint32) *TFramedTransport {
- return NewTFramedTransportConf(transport, &TConfiguration{
- MaxFrameSize: int32(maxLength),
-
- noPropagation: true,
- })
-}
-
-func NewTFramedTransportConf(transport TTransport, conf *TConfiguration) *TFramedTransport {
- PropagateTConfiguration(transport, conf)
- return &TFramedTransport{
- transport: transport,
- reader: bufio.NewReader(transport),
- cfg: conf,
- }
-}
-
-func (p *TFramedTransport) Open() error {
- return p.transport.Open()
-}
-
-func (p *TFramedTransport) IsOpen() bool {
- return p.transport.IsOpen()
-}
-
-func (p *TFramedTransport) Close() error {
- return p.transport.Close()
-}
-
-func (p *TFramedTransport) Read(buf []byte) (read int, err error) {
- read, err = p.readBuf.Read(buf)
- if err != io.EOF {
- return
- }
-
- // For bytes.Buffer.Read, EOF would only happen when read is zero,
- // but still, do a sanity check,
- // in case that behavior is changed in a future version of go stdlib.
- // When that happens, just return nil error,
- // and let the caller call Read again to read the next frame.
- if read > 0 {
- return read, nil
- }
-
- // Reaching here means that the last Read finished the last frame,
- // so we need to read the next frame into readBuf now.
- if err = p.readFrame(); err != nil {
- return read, err
- }
- newRead, err := p.Read(buf[read:])
- return read + newRead, err
-}
-
-func (p *TFramedTransport) ReadByte() (c byte, err error) {
- buf := p.buffer[:1]
- _, err = p.Read(buf)
- if err != nil {
- return
- }
- c = buf[0]
- return
-}
-
-func (p *TFramedTransport) Write(buf []byte) (int, error) {
- n, err := p.writeBuf.Write(buf)
- return n, NewTTransportExceptionFromError(err)
-}
-
-func (p *TFramedTransport) WriteByte(c byte) error {
- return p.writeBuf.WriteByte(c)
-}
-
-func (p *TFramedTransport) WriteString(s string) (n int, err error) {
- return p.writeBuf.WriteString(s)
-}
-
-func (p *TFramedTransport) Flush(ctx context.Context) error {
- size := p.writeBuf.Len()
- buf := p.buffer[:4]
- binary.BigEndian.PutUint32(buf, uint32(size))
- _, err := p.transport.Write(buf)
- if err != nil {
- p.writeBuf.Reset()
- return NewTTransportExceptionFromError(err)
- }
- if size > 0 {
- if _, err := io.Copy(p.transport, &p.writeBuf); err != nil {
- p.writeBuf.Reset()
- return NewTTransportExceptionFromError(err)
- }
- }
- err = p.transport.Flush(ctx)
- return NewTTransportExceptionFromError(err)
-}
-
-func (p *TFramedTransport) readFrame() error {
- buf := p.buffer[:4]
- if _, err := io.ReadFull(p.reader, buf); err != nil {
- return err
- }
- size := binary.BigEndian.Uint32(buf)
- if size < 0 || size > uint32(p.cfg.GetMaxFrameSize()) {
- return NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, fmt.Sprintf("Incorrect frame size (%d)", size))
- }
- _, err := io.CopyN(&p.readBuf, p.reader, int64(size))
- return NewTTransportExceptionFromError(err)
-}
-
-func (p *TFramedTransport) RemainingBytes() (num_bytes uint64) {
- return uint64(p.readBuf.Len())
-}
-
-// SetTConfiguration implements TConfigurationSetter.
-func (p *TFramedTransport) SetTConfiguration(cfg *TConfiguration) {
- PropagateTConfiguration(p.transport, cfg)
- p.cfg = cfg
-}
-
-var (
- _ TConfigurationSetter = (*tFramedTransportFactory)(nil)
- _ TConfigurationSetter = (*TFramedTransport)(nil)
-)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_context.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_context.go
deleted file mode 100644
index ac9bd4882..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_context.go
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "context"
-)
-
-// See https://godoc.org/context#WithValue on why do we need the unexported typedefs.
-type (
- headerKey string
- headerKeyList int
-)
-
-// Values for headerKeyList.
-const (
- headerKeyListRead headerKeyList = iota
- headerKeyListWrite
-)
-
-// SetHeader sets a header in the context.
-func SetHeader(ctx context.Context, key, value string) context.Context {
- return context.WithValue(
- ctx,
- headerKey(key),
- value,
- )
-}
-
-// UnsetHeader unsets a previously set header in the context.
-func UnsetHeader(ctx context.Context, key string) context.Context {
- return context.WithValue(
- ctx,
- headerKey(key),
- nil,
- )
-}
-
-// GetHeader returns a value of the given header from the context.
-func GetHeader(ctx context.Context, key string) (value string, ok bool) {
- if v := ctx.Value(headerKey(key)); v != nil {
- value, ok = v.(string)
- }
- return
-}
-
-// SetReadHeaderList sets the key list of read THeaders in the context.
-func SetReadHeaderList(ctx context.Context, keys []string) context.Context {
- return context.WithValue(
- ctx,
- headerKeyListRead,
- keys,
- )
-}
-
-// GetReadHeaderList returns the key list of read THeaders from the context.
-func GetReadHeaderList(ctx context.Context) []string {
- if v := ctx.Value(headerKeyListRead); v != nil {
- if value, ok := v.([]string); ok {
- return value
- }
- }
- return nil
-}
-
-// SetWriteHeaderList sets the key list of THeaders to write in the context.
-func SetWriteHeaderList(ctx context.Context, keys []string) context.Context {
- return context.WithValue(
- ctx,
- headerKeyListWrite,
- keys,
- )
-}
-
-// GetWriteHeaderList returns the key list of THeaders to write from the context.
-func GetWriteHeaderList(ctx context.Context) []string {
- if v := ctx.Value(headerKeyListWrite); v != nil {
- if value, ok := v.([]string); ok {
- return value
- }
- }
- return nil
-}
-
-// AddReadTHeaderToContext adds the whole THeader headers into context.
-func AddReadTHeaderToContext(ctx context.Context, headers THeaderMap) context.Context {
- keys := make([]string, 0, len(headers))
- for key, value := range headers {
- ctx = SetHeader(ctx, key, value)
- keys = append(keys, key)
- }
- return SetReadHeaderList(ctx, keys)
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_protocol.go
deleted file mode 100644
index 878041f8d..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_protocol.go
+++ /dev/null
@@ -1,351 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "context"
- "errors"
-)
-
-// THeaderProtocol is a thrift protocol that implements THeader:
-// https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md
-//
-// It supports either binary or compact protocol as the wrapped protocol.
-//
-// Most of the THeader handlings are happening inside THeaderTransport.
-type THeaderProtocol struct {
- transport *THeaderTransport
-
- // Will be initialized on first read/write.
- protocol TProtocol
-
- cfg *TConfiguration
-}
-
-// Deprecated: Use NewTHeaderProtocolConf instead.
-func NewTHeaderProtocol(trans TTransport) *THeaderProtocol {
- return newTHeaderProtocolConf(trans, &TConfiguration{
- noPropagation: true,
- })
-}
-
-// NewTHeaderProtocolConf creates a new THeaderProtocol from the underlying
-// transport with given TConfiguration.
-//
-// The passed in transport will be wrapped with THeaderTransport.
-//
-// Note that THeaderTransport handles frame and zlib by itself,
-// so the underlying transport should be a raw socket transports (TSocket or TSSLSocket),
-// instead of rich transports like TZlibTransport or TFramedTransport.
-func NewTHeaderProtocolConf(trans TTransport, conf *TConfiguration) *THeaderProtocol {
- return newTHeaderProtocolConf(trans, conf)
-}
-
-func newTHeaderProtocolConf(trans TTransport, cfg *TConfiguration) *THeaderProtocol {
- t := NewTHeaderTransportConf(trans, cfg)
- p, _ := t.cfg.GetTHeaderProtocolID().GetProtocol(t)
- PropagateTConfiguration(p, cfg)
- return &THeaderProtocol{
- transport: t,
- protocol: p,
- cfg: cfg,
- }
-}
-
-type tHeaderProtocolFactory struct {
- cfg *TConfiguration
-}
-
-func (f tHeaderProtocolFactory) GetProtocol(trans TTransport) TProtocol {
- return newTHeaderProtocolConf(trans, f.cfg)
-}
-
-func (f *tHeaderProtocolFactory) SetTConfiguration(cfg *TConfiguration) {
- f.cfg = cfg
-}
-
-// Deprecated: Use NewTHeaderProtocolFactoryConf instead.
-func NewTHeaderProtocolFactory() TProtocolFactory {
- return NewTHeaderProtocolFactoryConf(&TConfiguration{
- noPropagation: true,
- })
-}
-
-// NewTHeaderProtocolFactoryConf creates a factory for THeader with given
-// TConfiguration.
-func NewTHeaderProtocolFactoryConf(conf *TConfiguration) TProtocolFactory {
- return tHeaderProtocolFactory{
- cfg: conf,
- }
-}
-
-// Transport returns the underlying transport.
-//
-// It's guaranteed to be of type *THeaderTransport.
-func (p *THeaderProtocol) Transport() TTransport {
- return p.transport
-}
-
-// GetReadHeaders returns the THeaderMap read from transport.
-func (p *THeaderProtocol) GetReadHeaders() THeaderMap {
- return p.transport.GetReadHeaders()
-}
-
-// SetWriteHeader sets a header for write.
-func (p *THeaderProtocol) SetWriteHeader(key, value string) {
- p.transport.SetWriteHeader(key, value)
-}
-
-// ClearWriteHeaders clears all write headers previously set.
-func (p *THeaderProtocol) ClearWriteHeaders() {
- p.transport.ClearWriteHeaders()
-}
-
-// AddTransform add a transform for writing.
-func (p *THeaderProtocol) AddTransform(transform THeaderTransformID) error {
- return p.transport.AddTransform(transform)
-}
-
-func (p *THeaderProtocol) Flush(ctx context.Context) error {
- return p.transport.Flush(ctx)
-}
-
-func (p *THeaderProtocol) WriteMessageBegin(ctx context.Context, name string, typeID TMessageType, seqID int32) error {
- newProto, err := p.transport.Protocol().GetProtocol(p.transport)
- if err != nil {
- return err
- }
- PropagateTConfiguration(newProto, p.cfg)
- p.protocol = newProto
- p.transport.SequenceID = seqID
- return p.protocol.WriteMessageBegin(ctx, name, typeID, seqID)
-}
-
-func (p *THeaderProtocol) WriteMessageEnd(ctx context.Context) error {
- if err := p.protocol.WriteMessageEnd(ctx); err != nil {
- return err
- }
- return p.transport.Flush(ctx)
-}
-
-func (p *THeaderProtocol) WriteStructBegin(ctx context.Context, name string) error {
- return p.protocol.WriteStructBegin(ctx, name)
-}
-
-func (p *THeaderProtocol) WriteStructEnd(ctx context.Context) error {
- return p.protocol.WriteStructEnd(ctx)
-}
-
-func (p *THeaderProtocol) WriteFieldBegin(ctx context.Context, name string, typeID TType, id int16) error {
- return p.protocol.WriteFieldBegin(ctx, name, typeID, id)
-}
-
-func (p *THeaderProtocol) WriteFieldEnd(ctx context.Context) error {
- return p.protocol.WriteFieldEnd(ctx)
-}
-
-func (p *THeaderProtocol) WriteFieldStop(ctx context.Context) error {
- return p.protocol.WriteFieldStop(ctx)
-}
-
-func (p *THeaderProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
- return p.protocol.WriteMapBegin(ctx, keyType, valueType, size)
-}
-
-func (p *THeaderProtocol) WriteMapEnd(ctx context.Context) error {
- return p.protocol.WriteMapEnd(ctx)
-}
-
-func (p *THeaderProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
- return p.protocol.WriteListBegin(ctx, elemType, size)
-}
-
-func (p *THeaderProtocol) WriteListEnd(ctx context.Context) error {
- return p.protocol.WriteListEnd(ctx)
-}
-
-func (p *THeaderProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
- return p.protocol.WriteSetBegin(ctx, elemType, size)
-}
-
-func (p *THeaderProtocol) WriteSetEnd(ctx context.Context) error {
- return p.protocol.WriteSetEnd(ctx)
-}
-
-func (p *THeaderProtocol) WriteBool(ctx context.Context, value bool) error {
- return p.protocol.WriteBool(ctx, value)
-}
-
-func (p *THeaderProtocol) WriteByte(ctx context.Context, value int8) error {
- return p.protocol.WriteByte(ctx, value)
-}
-
-func (p *THeaderProtocol) WriteI16(ctx context.Context, value int16) error {
- return p.protocol.WriteI16(ctx, value)
-}
-
-func (p *THeaderProtocol) WriteI32(ctx context.Context, value int32) error {
- return p.protocol.WriteI32(ctx, value)
-}
-
-func (p *THeaderProtocol) WriteI64(ctx context.Context, value int64) error {
- return p.protocol.WriteI64(ctx, value)
-}
-
-func (p *THeaderProtocol) WriteDouble(ctx context.Context, value float64) error {
- return p.protocol.WriteDouble(ctx, value)
-}
-
-func (p *THeaderProtocol) WriteString(ctx context.Context, value string) error {
- return p.protocol.WriteString(ctx, value)
-}
-
-func (p *THeaderProtocol) WriteBinary(ctx context.Context, value []byte) error {
- return p.protocol.WriteBinary(ctx, value)
-}
-
-// ReadFrame calls underlying THeaderTransport's ReadFrame function.
-func (p *THeaderProtocol) ReadFrame(ctx context.Context) error {
- return p.transport.ReadFrame(ctx)
-}
-
-func (p *THeaderProtocol) ReadMessageBegin(ctx context.Context) (name string, typeID TMessageType, seqID int32, err error) {
- if err = p.transport.ReadFrame(ctx); err != nil {
- return
- }
-
- var newProto TProtocol
- newProto, err = p.transport.Protocol().GetProtocol(p.transport)
- if err != nil {
- var tAppExc TApplicationException
- if !errors.As(err, &tAppExc) {
- return
- }
- if e := p.protocol.WriteMessageBegin(ctx, "", EXCEPTION, seqID); e != nil {
- return
- }
- if e := tAppExc.Write(ctx, p.protocol); e != nil {
- return
- }
- if e := p.protocol.WriteMessageEnd(ctx); e != nil {
- return
- }
- if e := p.transport.Flush(ctx); e != nil {
- return
- }
- return
- }
- PropagateTConfiguration(newProto, p.cfg)
- p.protocol = newProto
-
- return p.protocol.ReadMessageBegin(ctx)
-}
-
-func (p *THeaderProtocol) ReadMessageEnd(ctx context.Context) error {
- return p.protocol.ReadMessageEnd(ctx)
-}
-
-func (p *THeaderProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
- return p.protocol.ReadStructBegin(ctx)
-}
-
-func (p *THeaderProtocol) ReadStructEnd(ctx context.Context) error {
- return p.protocol.ReadStructEnd(ctx)
-}
-
-func (p *THeaderProtocol) ReadFieldBegin(ctx context.Context) (name string, typeID TType, id int16, err error) {
- return p.protocol.ReadFieldBegin(ctx)
-}
-
-func (p *THeaderProtocol) ReadFieldEnd(ctx context.Context) error {
- return p.protocol.ReadFieldEnd(ctx)
-}
-
-func (p *THeaderProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) {
- return p.protocol.ReadMapBegin(ctx)
-}
-
-func (p *THeaderProtocol) ReadMapEnd(ctx context.Context) error {
- return p.protocol.ReadMapEnd(ctx)
-}
-
-func (p *THeaderProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) {
- return p.protocol.ReadListBegin(ctx)
-}
-
-func (p *THeaderProtocol) ReadListEnd(ctx context.Context) error {
- return p.protocol.ReadListEnd(ctx)
-}
-
-func (p *THeaderProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) {
- return p.protocol.ReadSetBegin(ctx)
-}
-
-func (p *THeaderProtocol) ReadSetEnd(ctx context.Context) error {
- return p.protocol.ReadSetEnd(ctx)
-}
-
-func (p *THeaderProtocol) ReadBool(ctx context.Context) (value bool, err error) {
- return p.protocol.ReadBool(ctx)
-}
-
-func (p *THeaderProtocol) ReadByte(ctx context.Context) (value int8, err error) {
- return p.protocol.ReadByte(ctx)
-}
-
-func (p *THeaderProtocol) ReadI16(ctx context.Context) (value int16, err error) {
- return p.protocol.ReadI16(ctx)
-}
-
-func (p *THeaderProtocol) ReadI32(ctx context.Context) (value int32, err error) {
- return p.protocol.ReadI32(ctx)
-}
-
-func (p *THeaderProtocol) ReadI64(ctx context.Context) (value int64, err error) {
- return p.protocol.ReadI64(ctx)
-}
-
-func (p *THeaderProtocol) ReadDouble(ctx context.Context) (value float64, err error) {
- return p.protocol.ReadDouble(ctx)
-}
-
-func (p *THeaderProtocol) ReadString(ctx context.Context) (value string, err error) {
- return p.protocol.ReadString(ctx)
-}
-
-func (p *THeaderProtocol) ReadBinary(ctx context.Context) (value []byte, err error) {
- return p.protocol.ReadBinary(ctx)
-}
-
-func (p *THeaderProtocol) Skip(ctx context.Context, fieldType TType) error {
- return p.protocol.Skip(ctx, fieldType)
-}
-
-// SetTConfiguration implements TConfigurationSetter.
-func (p *THeaderProtocol) SetTConfiguration(cfg *TConfiguration) {
- PropagateTConfiguration(p.transport, cfg)
- PropagateTConfiguration(p.protocol, cfg)
- p.cfg = cfg
-}
-
-var (
- _ TConfigurationSetter = (*tHeaderProtocolFactory)(nil)
- _ TConfigurationSetter = (*THeaderProtocol)(nil)
-)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_transport.go
deleted file mode 100644
index 6a99535a4..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_transport.go
+++ /dev/null
@@ -1,809 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "bufio"
- "bytes"
- "compress/zlib"
- "context"
- "encoding/binary"
- "errors"
- "fmt"
- "io"
-)
-
-// Size in bytes for 32-bit ints.
-const size32 = 4
-
-type headerMeta struct {
- MagicFlags uint32
- SequenceID int32
- HeaderLength uint16
-}
-
-const headerMetaSize = 10
-
-type clientType int
-
-const (
- clientUnknown clientType = iota
- clientHeaders
- clientFramedBinary
- clientUnframedBinary
- clientFramedCompact
- clientUnframedCompact
-)
-
-// Constants defined in THeader format:
-// https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md
-const (
- THeaderHeaderMagic uint32 = 0x0fff0000
- THeaderHeaderMask uint32 = 0xffff0000
- THeaderFlagsMask uint32 = 0x0000ffff
- THeaderMaxFrameSize uint32 = 0x3fffffff
-)
-
-// THeaderMap is the type of the header map in THeader transport.
-type THeaderMap map[string]string
-
-// THeaderProtocolID is the wrapped protocol id used in THeader.
-type THeaderProtocolID int32
-
-// Supported THeaderProtocolID values.
-const (
- THeaderProtocolBinary THeaderProtocolID = 0x00
- THeaderProtocolCompact THeaderProtocolID = 0x02
- THeaderProtocolDefault = THeaderProtocolBinary
-)
-
-// Declared globally to avoid repetitive allocations, not really used.
-var globalMemoryBuffer = NewTMemoryBuffer()
-
-// Validate checks whether the THeaderProtocolID is a valid/supported one.
-func (id THeaderProtocolID) Validate() error {
- _, err := id.GetProtocol(globalMemoryBuffer)
- return err
-}
-
-// GetProtocol gets the corresponding TProtocol from the wrapped protocol id.
-func (id THeaderProtocolID) GetProtocol(trans TTransport) (TProtocol, error) {
- switch id {
- default:
- return nil, NewTApplicationException(
- INVALID_PROTOCOL,
- fmt.Sprintf("THeader protocol id %d not supported", id),
- )
- case THeaderProtocolBinary:
- return NewTBinaryProtocolTransport(trans), nil
- case THeaderProtocolCompact:
- return NewTCompactProtocol(trans), nil
- }
-}
-
-// THeaderTransformID defines the numeric id of the transform used.
-type THeaderTransformID int32
-
-// THeaderTransformID values.
-//
-// Values not defined here are not currently supported, namely HMAC and Snappy.
-const (
- TransformNone THeaderTransformID = iota // 0, no special handling
- TransformZlib // 1, zlib
-)
-
-var supportedTransformIDs = map[THeaderTransformID]bool{
- TransformNone: true,
- TransformZlib: true,
-}
-
-// TransformReader is an io.ReadCloser that handles transforms reading.
-type TransformReader struct {
- io.Reader
-
- closers []io.Closer
-}
-
-var _ io.ReadCloser = (*TransformReader)(nil)
-
-// NewTransformReaderWithCapacity initializes a TransformReader with expected
-// closers capacity.
-//
-// If you don't know the closers capacity beforehand, just use
-//
-// &TransformReader{Reader: baseReader}
-//
-// instead would be sufficient.
-func NewTransformReaderWithCapacity(baseReader io.Reader, capacity int) *TransformReader {
- return &TransformReader{
- Reader: baseReader,
- closers: make([]io.Closer, 0, capacity),
- }
-}
-
-// Close calls the underlying closers in appropriate order,
-// stops at and returns the first error encountered.
-func (tr *TransformReader) Close() error {
- // Call closers in reversed order
- for i := len(tr.closers) - 1; i >= 0; i-- {
- if err := tr.closers[i].Close(); err != nil {
- return err
- }
- }
- return nil
-}
-
-// AddTransform adds a transform.
-func (tr *TransformReader) AddTransform(id THeaderTransformID) error {
- switch id {
- default:
- return NewTApplicationException(
- INVALID_TRANSFORM,
- fmt.Sprintf("THeaderTransformID %d not supported", id),
- )
- case TransformNone:
- // no-op
- case TransformZlib:
- readCloser, err := zlib.NewReader(tr.Reader)
- if err != nil {
- return err
- }
- tr.Reader = readCloser
- tr.closers = append(tr.closers, readCloser)
- }
- return nil
-}
-
-// TransformWriter is an io.WriteCloser that handles transforms writing.
-type TransformWriter struct {
- io.Writer
-
- closers []io.Closer
-}
-
-var _ io.WriteCloser = (*TransformWriter)(nil)
-
-// NewTransformWriter creates a new TransformWriter with base writer and transforms.
-func NewTransformWriter(baseWriter io.Writer, transforms []THeaderTransformID) (io.WriteCloser, error) {
- writer := &TransformWriter{
- Writer: baseWriter,
- closers: make([]io.Closer, 0, len(transforms)),
- }
- for _, id := range transforms {
- if err := writer.AddTransform(id); err != nil {
- return nil, err
- }
- }
- return writer, nil
-}
-
-// Close calls the underlying closers in appropriate order,
-// stops at and returns the first error encountered.
-func (tw *TransformWriter) Close() error {
- // Call closers in reversed order
- for i := len(tw.closers) - 1; i >= 0; i-- {
- if err := tw.closers[i].Close(); err != nil {
- return err
- }
- }
- return nil
-}
-
-// AddTransform adds a transform.
-func (tw *TransformWriter) AddTransform(id THeaderTransformID) error {
- switch id {
- default:
- return NewTApplicationException(
- INVALID_TRANSFORM,
- fmt.Sprintf("THeaderTransformID %d not supported", id),
- )
- case TransformNone:
- // no-op
- case TransformZlib:
- writeCloser := zlib.NewWriter(tw.Writer)
- tw.Writer = writeCloser
- tw.closers = append(tw.closers, writeCloser)
- }
- return nil
-}
-
-// THeaderInfoType is the type id of the info headers.
-type THeaderInfoType int32
-
-// Supported THeaderInfoType values.
-const (
- _ THeaderInfoType = iota // Skip 0
- InfoKeyValue // 1
- // Rest of the info types are not supported.
-)
-
-// THeaderTransport is a Transport mode that implements THeader.
-//
-// Note that THeaderTransport handles frame and zlib by itself,
-// so the underlying transport should be a raw socket transports (TSocket or TSSLSocket),
-// instead of rich transports like TZlibTransport or TFramedTransport.
-type THeaderTransport struct {
- SequenceID int32
- Flags uint32
-
- transport TTransport
-
- // THeaderMap for read and write
- readHeaders THeaderMap
- writeHeaders THeaderMap
-
- // Reading related variables.
- reader *bufio.Reader
- // When frame is detected, we read the frame fully into frameBuffer.
- frameBuffer bytes.Buffer
- // When it's non-nil, Read should read from frameReader instead of
- // reader, and EOF error indicates end of frame instead of end of all
- // transport.
- frameReader io.ReadCloser
-
- // Writing related variables
- writeBuffer bytes.Buffer
- writeTransforms []THeaderTransformID
-
- clientType clientType
- protocolID THeaderProtocolID
- cfg *TConfiguration
-
- // buffer is used in the following scenarios to avoid repetitive
- // allocations, while 4 is big enough for all those scenarios:
- //
- // * header padding (max size 4)
- // * write the frame size (size 4)
- buffer [4]byte
-}
-
-var _ TTransport = (*THeaderTransport)(nil)
-
-// Deprecated: Use NewTHeaderTransportConf instead.
-func NewTHeaderTransport(trans TTransport) *THeaderTransport {
- return NewTHeaderTransportConf(trans, &TConfiguration{
- noPropagation: true,
- })
-}
-
-// NewTHeaderTransportConf creates THeaderTransport from the
-// underlying transport, with given TConfiguration attached.
-//
-// If trans is already a *THeaderTransport, it will be returned as is,
-// but with TConfiguration overridden by the value passed in.
-//
-// The protocol ID in TConfiguration is only useful for client transports.
-// For servers,
-// the protocol ID will be overridden again to the one set by the client,
-// to ensure that servers always speak the same dialect as the client.
-func NewTHeaderTransportConf(trans TTransport, conf *TConfiguration) *THeaderTransport {
- if ht, ok := trans.(*THeaderTransport); ok {
- ht.SetTConfiguration(conf)
- return ht
- }
- PropagateTConfiguration(trans, conf)
- return &THeaderTransport{
- transport: trans,
- reader: bufio.NewReader(trans),
- writeHeaders: make(THeaderMap),
- protocolID: conf.GetTHeaderProtocolID(),
- cfg: conf,
- }
-}
-
-// Open calls the underlying transport's Open function.
-func (t *THeaderTransport) Open() error {
- return t.transport.Open()
-}
-
-// IsOpen calls the underlying transport's IsOpen function.
-func (t *THeaderTransport) IsOpen() bool {
- return t.transport.IsOpen()
-}
-
-// ReadFrame tries to read the frame header, guess the client type, and handle
-// unframed clients.
-func (t *THeaderTransport) ReadFrame(ctx context.Context) error {
- if !t.needReadFrame() {
- // No need to read frame, skipping.
- return nil
- }
-
- // Peek and handle the first 32 bits.
- // They could either be the length field of a framed message,
- // or the first bytes of an unframed message.
- var buf []byte
- var err error
- // This is also usually the first read from a connection,
- // so handle retries around socket timeouts.
- _, deadlineSet := ctx.Deadline()
- for {
- buf, err = t.reader.Peek(size32)
- if deadlineSet && isTimeoutError(err) && ctx.Err() == nil {
- // This is I/O timeout and we still have time,
- // continue trying
- continue
- }
- // For anything else, do not retry
- break
- }
- if err != nil {
- return err
- }
-
- frameSize := binary.BigEndian.Uint32(buf)
- if frameSize&VERSION_MASK == VERSION_1 {
- t.clientType = clientUnframedBinary
- return nil
- }
- if buf[0] == COMPACT_PROTOCOL_ID && buf[1]&COMPACT_VERSION_MASK == COMPACT_VERSION {
- t.clientType = clientUnframedCompact
- return nil
- }
-
- // At this point it should be a framed message,
- // sanity check on frameSize then discard the peeked part.
- if frameSize > THeaderMaxFrameSize || frameSize > uint32(t.cfg.GetMaxFrameSize()) {
- return NewTProtocolExceptionWithType(
- SIZE_LIMIT,
- errors.New("frame too large"),
- )
- }
- t.reader.Discard(size32)
-
- // Read the frame fully into frameBuffer.
- _, err = io.CopyN(&t.frameBuffer, t.reader, int64(frameSize))
- if err != nil {
- return err
- }
- t.frameReader = io.NopCloser(&t.frameBuffer)
-
- // Peek and handle the next 32 bits.
- buf = t.frameBuffer.Bytes()[:size32]
- version := binary.BigEndian.Uint32(buf)
- if version&THeaderHeaderMask == THeaderHeaderMagic {
- t.clientType = clientHeaders
- return t.parseHeaders(ctx, frameSize)
- }
- if version&VERSION_MASK == VERSION_1 {
- t.clientType = clientFramedBinary
- return nil
- }
- if buf[0] == COMPACT_PROTOCOL_ID && buf[1]&COMPACT_VERSION_MASK == COMPACT_VERSION {
- t.clientType = clientFramedCompact
- return nil
- }
- if err := t.endOfFrame(); err != nil {
- return err
- }
- return NewTProtocolExceptionWithType(
- NOT_IMPLEMENTED,
- errors.New("unsupported client transport type"),
- )
-}
-
-// endOfFrame does end of frame handling.
-//
-// It closes frameReader, and also resets frame related states.
-func (t *THeaderTransport) endOfFrame() error {
- defer func() {
- t.frameBuffer.Reset()
- t.frameReader = nil
- }()
- return t.frameReader.Close()
-}
-
-func (t *THeaderTransport) parseHeaders(ctx context.Context, frameSize uint32) error {
- if t.clientType != clientHeaders {
- return nil
- }
-
- var err error
- var meta headerMeta
- if err = binary.Read(&t.frameBuffer, binary.BigEndian, &meta); err != nil {
- return err
- }
- frameSize -= headerMetaSize
- t.Flags = meta.MagicFlags & THeaderFlagsMask
- t.SequenceID = meta.SequenceID
- headerLength := int64(meta.HeaderLength) * 4
- if int64(frameSize) < headerLength {
- return NewTProtocolExceptionWithType(
- SIZE_LIMIT,
- errors.New("header size is larger than the whole frame"),
- )
- }
- headerBuf := NewTMemoryBuffer()
- _, err = io.CopyN(headerBuf, &t.frameBuffer, headerLength)
- if err != nil {
- return err
- }
- hp := NewTCompactProtocol(headerBuf)
- hp.SetTConfiguration(t.cfg)
-
- // At this point the header is already read into headerBuf,
- // and t.frameBuffer starts from the actual payload.
- protoID, err := hp.readVarint32()
- if err != nil {
- return err
- }
- t.protocolID = THeaderProtocolID(protoID)
-
- var transformCount int32
- transformCount, err = hp.readVarint32()
- if err != nil {
- return err
- }
- if transformCount > 0 {
- reader := NewTransformReaderWithCapacity(
- &t.frameBuffer,
- int(transformCount),
- )
- t.frameReader = reader
- transformIDs := make([]THeaderTransformID, transformCount)
- for i := 0; i < int(transformCount); i++ {
- id, err := hp.readVarint32()
- if err != nil {
- return err
- }
- transformIDs[i] = THeaderTransformID(id)
- }
- // The transform IDs on the wire was added based on the order of
- // writing, so on the reading side we need to reverse the order.
- for i := transformCount - 1; i >= 0; i-- {
- id := transformIDs[i]
- if err := reader.AddTransform(id); err != nil {
- return err
- }
- }
- }
-
- // The info part does not use the transforms yet, so it's
- // important to continue using headerBuf.
- headers := make(THeaderMap)
- for {
- infoType, err := hp.readVarint32()
- if errors.Is(err, io.EOF) {
- break
- }
- if err != nil {
- return err
- }
- if THeaderInfoType(infoType) == InfoKeyValue {
- count, err := hp.readVarint32()
- if err != nil {
- return err
- }
- for i := 0; i < int(count); i++ {
- key, err := hp.ReadString(ctx)
- if err != nil {
- return err
- }
- value, err := hp.ReadString(ctx)
- if err != nil {
- return err
- }
- headers[key] = value
- }
- } else {
- // Skip reading info section on the first
- // unsupported info type.
- break
- }
- }
- t.readHeaders = headers
-
- return nil
-}
-
-func (t *THeaderTransport) needReadFrame() bool {
- if t.clientType == clientUnknown {
- // This is a new connection that's never read before.
- return true
- }
- if t.isFramed() && t.frameReader == nil {
- // We just finished the last frame.
- return true
- }
- return false
-}
-
-func (t *THeaderTransport) Read(p []byte) (read int, err error) {
- // Here using context.Background instead of a context passed in is safe.
- // First is that there's no way to pass context into this function.
- // Then, 99% of the case when calling this Read frame is already read
- // into frameReader. ReadFrame here is more of preventing bugs that
- // didn't call ReadFrame before calling Read.
- err = t.ReadFrame(context.Background())
- if err != nil {
- return
- }
- if t.frameReader != nil {
- read, err = t.frameReader.Read(p)
- if err == nil && t.frameBuffer.Len() <= 0 {
- // the last Read finished the frame, do endOfFrame
- // handling here.
- err = t.endOfFrame()
- } else if err == io.EOF {
- err = t.endOfFrame()
- if err != nil {
- return
- }
- if read == 0 {
- // Try to read the next frame when we hit EOF
- // (end of frame) immediately.
- // When we got here, it means the last read
- // finished the previous frame, but didn't
- // do endOfFrame handling yet.
- // We have to read the next frame here,
- // as otherwise we would return 0 and nil,
- // which is a case not handled well by most
- // protocol implementations.
- return t.Read(p)
- }
- }
- return
- }
- return t.reader.Read(p)
-}
-
-// Write writes data to the write buffer.
-//
-// You need to call Flush to actually write them to the transport.
-func (t *THeaderTransport) Write(p []byte) (int, error) {
- return t.writeBuffer.Write(p)
-}
-
-// Flush writes the appropriate header and the write buffer to the underlying transport.
-func (t *THeaderTransport) Flush(ctx context.Context) error {
- if t.writeBuffer.Len() == 0 {
- return nil
- }
-
- defer t.writeBuffer.Reset()
-
- switch t.clientType {
- default:
- fallthrough
- case clientUnknown:
- t.clientType = clientHeaders
- fallthrough
- case clientHeaders:
- headers := NewTMemoryBuffer()
- hp := NewTCompactProtocol(headers)
- hp.SetTConfiguration(t.cfg)
- if _, err := hp.writeVarint32(int32(t.protocolID)); err != nil {
- return NewTTransportExceptionFromError(err)
- }
- if _, err := hp.writeVarint32(int32(len(t.writeTransforms))); err != nil {
- return NewTTransportExceptionFromError(err)
- }
- for _, transform := range t.writeTransforms {
- if _, err := hp.writeVarint32(int32(transform)); err != nil {
- return NewTTransportExceptionFromError(err)
- }
- }
- if len(t.writeHeaders) > 0 {
- if _, err := hp.writeVarint32(int32(InfoKeyValue)); err != nil {
- return NewTTransportExceptionFromError(err)
- }
- if _, err := hp.writeVarint32(int32(len(t.writeHeaders))); err != nil {
- return NewTTransportExceptionFromError(err)
- }
- for key, value := range t.writeHeaders {
- if err := hp.WriteString(ctx, key); err != nil {
- return NewTTransportExceptionFromError(err)
- }
- if err := hp.WriteString(ctx, value); err != nil {
- return NewTTransportExceptionFromError(err)
- }
- }
- }
- padding := 4 - headers.Len()%4
- if padding < 4 {
- buf := t.buffer[:padding]
- for i := range buf {
- buf[i] = 0
- }
- if _, err := headers.Write(buf); err != nil {
- return NewTTransportExceptionFromError(err)
- }
- }
-
- var payload bytes.Buffer
- meta := headerMeta{
- MagicFlags: THeaderHeaderMagic + t.Flags&THeaderFlagsMask,
- SequenceID: t.SequenceID,
- HeaderLength: uint16(headers.Len() / 4),
- }
- if err := binary.Write(&payload, binary.BigEndian, meta); err != nil {
- return NewTTransportExceptionFromError(err)
- }
- if _, err := io.Copy(&payload, headers); err != nil {
- return NewTTransportExceptionFromError(err)
- }
-
- writer, err := NewTransformWriter(&payload, t.writeTransforms)
- if err != nil {
- return NewTTransportExceptionFromError(err)
- }
- if _, err := io.Copy(writer, &t.writeBuffer); err != nil {
- return NewTTransportExceptionFromError(err)
- }
- if err := writer.Close(); err != nil {
- return NewTTransportExceptionFromError(err)
- }
-
- // First write frame length
- buf := t.buffer[:size32]
- binary.BigEndian.PutUint32(buf, uint32(payload.Len()))
- if _, err := t.transport.Write(buf); err != nil {
- return NewTTransportExceptionFromError(err)
- }
- // Then write the payload
- if _, err := io.Copy(t.transport, &payload); err != nil {
- return NewTTransportExceptionFromError(err)
- }
-
- case clientFramedBinary, clientFramedCompact:
- buf := t.buffer[:size32]
- binary.BigEndian.PutUint32(buf, uint32(t.writeBuffer.Len()))
- if _, err := t.transport.Write(buf); err != nil {
- return NewTTransportExceptionFromError(err)
- }
- fallthrough
- case clientUnframedBinary, clientUnframedCompact:
- if _, err := io.Copy(t.transport, &t.writeBuffer); err != nil {
- return NewTTransportExceptionFromError(err)
- }
- }
-
- select {
- default:
- case <-ctx.Done():
- return NewTTransportExceptionFromError(ctx.Err())
- }
-
- return t.transport.Flush(ctx)
-}
-
-// Close closes the transport, along with its underlying transport.
-func (t *THeaderTransport) Close() error {
- if err := t.Flush(context.Background()); err != nil {
- return err
- }
- return t.transport.Close()
-}
-
-// RemainingBytes calls underlying transport's RemainingBytes.
-//
-// Even in framed cases, because of all the possible compression transforms
-// involved, the remaining frame size is likely to be different from the actual
-// remaining readable bytes, so we don't bother to keep tracking the remaining
-// frame size by ourselves and just use the underlying transport's
-// RemainingBytes directly.
-func (t *THeaderTransport) RemainingBytes() uint64 {
- return t.transport.RemainingBytes()
-}
-
-// GetReadHeaders returns the THeaderMap read from transport.
-func (t *THeaderTransport) GetReadHeaders() THeaderMap {
- return t.readHeaders
-}
-
-// SetWriteHeader sets a header for write.
-func (t *THeaderTransport) SetWriteHeader(key, value string) {
- t.writeHeaders[key] = value
-}
-
-// ClearWriteHeaders clears all write headers previously set.
-func (t *THeaderTransport) ClearWriteHeaders() {
- t.writeHeaders = make(THeaderMap)
-}
-
-// AddTransform add a transform for writing.
-func (t *THeaderTransport) AddTransform(transform THeaderTransformID) error {
- if !supportedTransformIDs[transform] {
- return NewTProtocolExceptionWithType(
- NOT_IMPLEMENTED,
- fmt.Errorf("THeaderTransformID %d not supported", transform),
- )
- }
- t.writeTransforms = append(t.writeTransforms, transform)
- return nil
-}
-
-// Protocol returns the wrapped protocol id used in this THeaderTransport.
-func (t *THeaderTransport) Protocol() THeaderProtocolID {
- switch t.clientType {
- default:
- return t.protocolID
- case clientFramedBinary, clientUnframedBinary:
- return THeaderProtocolBinary
- case clientFramedCompact, clientUnframedCompact:
- return THeaderProtocolCompact
- }
-}
-
-func (t *THeaderTransport) isFramed() bool {
- switch t.clientType {
- default:
- return false
- case clientHeaders, clientFramedBinary, clientFramedCompact:
- return true
- }
-}
-
-// SetTConfiguration implements TConfigurationSetter.
-func (t *THeaderTransport) SetTConfiguration(cfg *TConfiguration) {
- PropagateTConfiguration(t.transport, cfg)
- t.cfg = cfg
-}
-
-// THeaderTransportFactory is a TTransportFactory implementation to create
-// THeaderTransport.
-//
-// It also implements TConfigurationSetter.
-type THeaderTransportFactory struct {
- // The underlying factory, could be nil.
- Factory TTransportFactory
-
- cfg *TConfiguration
-}
-
-// Deprecated: Use NewTHeaderTransportFactoryConf instead.
-func NewTHeaderTransportFactory(factory TTransportFactory) TTransportFactory {
- return NewTHeaderTransportFactoryConf(factory, &TConfiguration{
- noPropagation: true,
- })
-}
-
-// NewTHeaderTransportFactoryConf creates a new *THeaderTransportFactory with
-// the given *TConfiguration.
-func NewTHeaderTransportFactoryConf(factory TTransportFactory, conf *TConfiguration) TTransportFactory {
- return &THeaderTransportFactory{
- Factory: factory,
-
- cfg: conf,
- }
-}
-
-// GetTransport implements TTransportFactory.
-func (f *THeaderTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
- if f.Factory != nil {
- t, err := f.Factory.GetTransport(trans)
- if err != nil {
- return nil, err
- }
- return NewTHeaderTransportConf(t, f.cfg), nil
- }
- return NewTHeaderTransportConf(trans, f.cfg), nil
-}
-
-// SetTConfiguration implements TConfigurationSetter.
-func (f *THeaderTransportFactory) SetTConfiguration(cfg *TConfiguration) {
- PropagateTConfiguration(f.Factory, f.cfg)
- f.cfg = cfg
-}
-
-var (
- _ TConfigurationSetter = (*THeaderTransportFactory)(nil)
- _ TConfigurationSetter = (*THeaderTransport)(nil)
-)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_client.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_client.go
deleted file mode 100644
index 9a2cc98cc..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_client.go
+++ /dev/null
@@ -1,257 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "bytes"
- "context"
- "errors"
- "io"
- "net/http"
- "net/url"
- "strconv"
-)
-
-// Default to using the shared http client. Library users are
-// free to change this global client or specify one through
-// THttpClientOptions.
-var DefaultHttpClient *http.Client = http.DefaultClient
-
-type THttpClient struct {
- client *http.Client
- response *http.Response
- url *url.URL
- requestBuffer *bytes.Buffer
- header http.Header
- nsecConnectTimeout int64
- nsecReadTimeout int64
-}
-
-type THttpClientTransportFactory struct {
- options THttpClientOptions
- url string
-}
-
-func (p *THttpClientTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
- if trans != nil {
- t, ok := trans.(*THttpClient)
- if ok && t.url != nil {
- return NewTHttpClientWithOptions(t.url.String(), p.options)
- }
- }
- return NewTHttpClientWithOptions(p.url, p.options)
-}
-
-type THttpClientOptions struct {
- // If nil, DefaultHttpClient is used
- Client *http.Client
-}
-
-func NewTHttpClientTransportFactory(url string) *THttpClientTransportFactory {
- return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{})
-}
-
-func NewTHttpClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory {
- return &THttpClientTransportFactory{url: url, options: options}
-}
-
-func NewTHttpClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) {
- parsedURL, err := url.Parse(urlstr)
- if err != nil {
- return nil, err
- }
- buf := make([]byte, 0, 1024)
- client := options.Client
- if client == nil {
- client = DefaultHttpClient
- }
- httpHeader := map[string][]string{"Content-Type": {"application/x-thrift"}}
- return &THttpClient{client: client, url: parsedURL, requestBuffer: bytes.NewBuffer(buf), header: httpHeader}, nil
-}
-
-func NewTHttpClient(urlstr string) (TTransport, error) {
- return NewTHttpClientWithOptions(urlstr, THttpClientOptions{})
-}
-
-// Set the HTTP Header for this specific Thrift Transport
-// It is important that you first assert the TTransport as a THttpClient type
-// like so:
-//
-// httpTrans := trans.(THttpClient)
-// httpTrans.SetHeader("User-Agent","Thrift Client 1.0")
-func (p *THttpClient) SetHeader(key string, value string) {
- p.header.Add(key, value)
-}
-
-// Get the HTTP Header represented by the supplied Header Key for this specific Thrift Transport
-// It is important that you first assert the TTransport as a THttpClient type
-// like so:
-//
-// httpTrans := trans.(THttpClient)
-// hdrValue := httpTrans.GetHeader("User-Agent")
-func (p *THttpClient) GetHeader(key string) string {
- return p.header.Get(key)
-}
-
-// Deletes the HTTP Header given a Header Key for this specific Thrift Transport
-// It is important that you first assert the TTransport as a THttpClient type
-// like so:
-//
-// httpTrans := trans.(THttpClient)
-// httpTrans.DelHeader("User-Agent")
-func (p *THttpClient) DelHeader(key string) {
- p.header.Del(key)
-}
-
-func (p *THttpClient) Open() error {
- // do nothing
- return nil
-}
-
-func (p *THttpClient) IsOpen() bool {
- return p.response != nil || p.requestBuffer != nil
-}
-
-func (p *THttpClient) closeResponse() error {
- var err error
- if p.response != nil && p.response.Body != nil {
- // The docs specify that if keepalive is enabled and the response body is not
- // read to completion the connection will never be returned to the pool and
- // reused. Errors are being ignored here because if the connection is invalid
- // and this fails for some reason, the Close() method will do any remaining
- // cleanup.
- io.Copy(io.Discard, p.response.Body)
-
- err = p.response.Body.Close()
- }
-
- p.response = nil
- return err
-}
-
-func (p *THttpClient) Close() error {
- if p.requestBuffer != nil {
- p.requestBuffer.Reset()
- p.requestBuffer = nil
- }
- return p.closeResponse()
-}
-
-func (p *THttpClient) Read(buf []byte) (int, error) {
- if p.response == nil {
- return 0, NewTTransportException(NOT_OPEN, "Response buffer is empty, no request.")
- }
- n, err := p.response.Body.Read(buf)
- if n > 0 && (err == nil || errors.Is(err, io.EOF)) {
- return n, nil
- }
- return n, NewTTransportExceptionFromError(err)
-}
-
-func (p *THttpClient) ReadByte() (c byte, err error) {
- if p.response == nil {
- return 0, NewTTransportException(NOT_OPEN, "Response buffer is empty, no request.")
- }
- return readByte(p.response.Body)
-}
-
-func (p *THttpClient) Write(buf []byte) (int, error) {
- if p.requestBuffer == nil {
- return 0, NewTTransportException(NOT_OPEN, "Request buffer is nil, connection may have been closed.")
- }
- return p.requestBuffer.Write(buf)
-}
-
-func (p *THttpClient) WriteByte(c byte) error {
- if p.requestBuffer == nil {
- return NewTTransportException(NOT_OPEN, "Request buffer is nil, connection may have been closed.")
- }
- return p.requestBuffer.WriteByte(c)
-}
-
-func (p *THttpClient) WriteString(s string) (n int, err error) {
- if p.requestBuffer == nil {
- return 0, NewTTransportException(NOT_OPEN, "Request buffer is nil, connection may have been closed.")
- }
- return p.requestBuffer.WriteString(s)
-}
-
-func (p *THttpClient) Flush(ctx context.Context) error {
- // Close any previous response body to avoid leaking connections.
- p.closeResponse()
-
- // Give up the ownership of the current request buffer to http request,
- // and create a new buffer for the next request.
- buf := p.requestBuffer
- p.requestBuffer = new(bytes.Buffer)
- req, err := http.NewRequest("POST", p.url.String(), buf)
- if err != nil {
- return NewTTransportExceptionFromError(err)
- }
- req.Header = p.header
- if ctx != nil {
- req = req.WithContext(ctx)
- }
- response, err := p.client.Do(req)
- if err != nil {
- return NewTTransportExceptionFromError(err)
- }
- if response.StatusCode != http.StatusOK {
- // Close the response to avoid leaking file descriptors. closeResponse does
- // more than just call Close(), so temporarily assign it and reuse the logic.
- p.response = response
- p.closeResponse()
-
- // TODO(pomack) log bad response
- return NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "HTTP Response code: "+strconv.Itoa(response.StatusCode))
- }
- p.response = response
- return nil
-}
-
-func (p *THttpClient) RemainingBytes() (num_bytes uint64) {
- len := p.response.ContentLength
- if len >= 0 {
- return uint64(len)
- }
-
- const maxSize = ^uint64(0)
- return maxSize // the truth is, we just don't know unless framed is used
-}
-
-// Deprecated: Use NewTHttpClientTransportFactory instead.
-func NewTHttpPostClientTransportFactory(url string) *THttpClientTransportFactory {
- return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{})
-}
-
-// Deprecated: Use NewTHttpClientTransportFactoryWithOptions instead.
-func NewTHttpPostClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory {
- return NewTHttpClientTransportFactoryWithOptions(url, options)
-}
-
-// Deprecated: Use NewTHttpClientWithOptions instead.
-func NewTHttpPostClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) {
- return NewTHttpClientWithOptions(urlstr, options)
-}
-
-// Deprecated: Use NewTHttpClient instead.
-func NewTHttpPostClient(urlstr string) (TTransport, error) {
- return NewTHttpClientWithOptions(urlstr, THttpClientOptions{})
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_transport.go
deleted file mode 100644
index bc6922762..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_transport.go
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "compress/gzip"
- "io"
- "net/http"
- "strings"
- "sync"
-)
-
-// NewThriftHandlerFunc is a function that create a ready to use Apache Thrift Handler function
-func NewThriftHandlerFunc(processor TProcessor,
- inPfactory, outPfactory TProtocolFactory) func(w http.ResponseWriter, r *http.Request) {
-
- return gz(func(w http.ResponseWriter, r *http.Request) {
- w.Header().Add("Content-Type", "application/x-thrift")
-
- transport := NewStreamTransport(r.Body, w)
- processor.Process(r.Context(), inPfactory.GetProtocol(transport), outPfactory.GetProtocol(transport))
- })
-}
-
-// gz transparently compresses the HTTP response if the client supports it.
-func gz(handler http.HandlerFunc) http.HandlerFunc {
- sp := &sync.Pool{
- New: func() interface{} {
- return gzip.NewWriter(nil)
- },
- }
-
- return func(w http.ResponseWriter, r *http.Request) {
- if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
- handler(w, r)
- return
- }
- w.Header().Set("Content-Encoding", "gzip")
- gz := sp.Get().(*gzip.Writer)
- gz.Reset(w)
- defer func() {
- _ = gz.Close()
- sp.Put(gz)
- }()
- gzw := gzipResponseWriter{Writer: gz, ResponseWriter: w}
- handler(gzw, r)
- }
-}
-
-type gzipResponseWriter struct {
- io.Writer
- http.ResponseWriter
-}
-
-func (w gzipResponseWriter) Write(b []byte) (int, error) {
- return w.Writer.Write(b)
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/iostream_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/iostream_transport.go
deleted file mode 100644
index 1c477990f..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/iostream_transport.go
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "bufio"
- "context"
- "io"
-)
-
-// StreamTransport is a Transport made of an io.Reader and/or an io.Writer
-type StreamTransport struct {
- io.Reader
- io.Writer
- isReadWriter bool
- closed bool
-}
-
-type StreamTransportFactory struct {
- Reader io.Reader
- Writer io.Writer
- isReadWriter bool
-}
-
-func (p *StreamTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
- if trans != nil {
- t, ok := trans.(*StreamTransport)
- if ok {
- if t.isReadWriter {
- return NewStreamTransportRW(t.Reader.(io.ReadWriter)), nil
- }
- if t.Reader != nil && t.Writer != nil {
- return NewStreamTransport(t.Reader, t.Writer), nil
- }
- if t.Reader != nil && t.Writer == nil {
- return NewStreamTransportR(t.Reader), nil
- }
- if t.Reader == nil && t.Writer != nil {
- return NewStreamTransportW(t.Writer), nil
- }
- return &StreamTransport{}, nil
- }
- }
- if p.isReadWriter {
- return NewStreamTransportRW(p.Reader.(io.ReadWriter)), nil
- }
- if p.Reader != nil && p.Writer != nil {
- return NewStreamTransport(p.Reader, p.Writer), nil
- }
- if p.Reader != nil && p.Writer == nil {
- return NewStreamTransportR(p.Reader), nil
- }
- if p.Reader == nil && p.Writer != nil {
- return NewStreamTransportW(p.Writer), nil
- }
- return &StreamTransport{}, nil
-}
-
-func NewStreamTransportFactory(reader io.Reader, writer io.Writer, isReadWriter bool) *StreamTransportFactory {
- return &StreamTransportFactory{Reader: reader, Writer: writer, isReadWriter: isReadWriter}
-}
-
-func NewStreamTransport(r io.Reader, w io.Writer) *StreamTransport {
- return &StreamTransport{Reader: bufio.NewReader(r), Writer: bufio.NewWriter(w)}
-}
-
-func NewStreamTransportR(r io.Reader) *StreamTransport {
- return &StreamTransport{Reader: bufio.NewReader(r)}
-}
-
-func NewStreamTransportW(w io.Writer) *StreamTransport {
- return &StreamTransport{Writer: bufio.NewWriter(w)}
-}
-
-func NewStreamTransportRW(rw io.ReadWriter) *StreamTransport {
- bufrw := bufio.NewReadWriter(bufio.NewReader(rw), bufio.NewWriter(rw))
- return &StreamTransport{Reader: bufrw, Writer: bufrw, isReadWriter: true}
-}
-
-func (p *StreamTransport) IsOpen() bool {
- return !p.closed
-}
-
-// implicitly opened on creation, can't be reopened once closed
-func (p *StreamTransport) Open() error {
- if !p.closed {
- return NewTTransportException(ALREADY_OPEN, "StreamTransport already open.")
- } else {
- return NewTTransportException(NOT_OPEN, "cannot reopen StreamTransport.")
- }
-}
-
-// Closes both the input and output streams.
-func (p *StreamTransport) Close() error {
- if p.closed {
- return NewTTransportException(NOT_OPEN, "StreamTransport already closed.")
- }
- p.closed = true
- closedReader := false
- if p.Reader != nil {
- c, ok := p.Reader.(io.Closer)
- if ok {
- e := c.Close()
- closedReader = true
- if e != nil {
- return e
- }
- }
- p.Reader = nil
- }
- if p.Writer != nil && (!closedReader || !p.isReadWriter) {
- c, ok := p.Writer.(io.Closer)
- if ok {
- e := c.Close()
- if e != nil {
- return e
- }
- }
- p.Writer = nil
- }
- return nil
-}
-
-// Flushes the underlying output stream if not null.
-func (p *StreamTransport) Flush(ctx context.Context) error {
- if p.Writer == nil {
- return NewTTransportException(NOT_OPEN, "Cannot flush null outputStream")
- }
- f, ok := p.Writer.(Flusher)
- if ok {
- err := f.Flush()
- if err != nil {
- return NewTTransportExceptionFromError(err)
- }
- }
- return nil
-}
-
-func (p *StreamTransport) Read(c []byte) (n int, err error) {
- n, err = p.Reader.Read(c)
- if err != nil {
- err = NewTTransportExceptionFromError(err)
- }
- return
-}
-
-func (p *StreamTransport) ReadByte() (c byte, err error) {
- f, ok := p.Reader.(io.ByteReader)
- if ok {
- c, err = f.ReadByte()
- } else {
- c, err = readByte(p.Reader)
- }
- if err != nil {
- err = NewTTransportExceptionFromError(err)
- }
- return
-}
-
-func (p *StreamTransport) Write(c []byte) (n int, err error) {
- n, err = p.Writer.Write(c)
- if err != nil {
- err = NewTTransportExceptionFromError(err)
- }
- return
-}
-
-func (p *StreamTransport) WriteByte(c byte) (err error) {
- f, ok := p.Writer.(io.ByteWriter)
- if ok {
- err = f.WriteByte(c)
- } else {
- err = writeByte(p.Writer, c)
- }
- if err != nil {
- err = NewTTransportExceptionFromError(err)
- }
- return
-}
-
-func (p *StreamTransport) WriteString(s string) (n int, err error) {
- f, ok := p.Writer.(stringWriter)
- if ok {
- n, err = f.WriteString(s)
- } else {
- n, err = p.Writer.Write([]byte(s))
- }
- if err != nil {
- err = NewTTransportExceptionFromError(err)
- }
- return
-}
-
-func (p *StreamTransport) RemainingBytes() (num_bytes uint64) {
- const maxSize = ^uint64(0)
- return maxSize // the truth is, we just don't know unless framed is used
-}
-
-// SetTConfiguration implements TConfigurationSetter for propagation.
-func (p *StreamTransport) SetTConfiguration(conf *TConfiguration) {
- PropagateTConfiguration(p.Reader, conf)
- PropagateTConfiguration(p.Writer, conf)
-}
-
-var _ TConfigurationSetter = (*StreamTransport)(nil)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/json_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/json_protocol.go
deleted file mode 100644
index 8e59d16cf..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/json_protocol.go
+++ /dev/null
@@ -1,591 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "context"
- "encoding/base64"
- "fmt"
-)
-
-const (
- THRIFT_JSON_PROTOCOL_VERSION = 1
-)
-
-// for references to _ParseContext see tsimplejson_protocol.go
-
-// JSON protocol implementation for thrift.
-// Utilizes Simple JSON protocol
-//
-type TJSONProtocol struct {
- *TSimpleJSONProtocol
-}
-
-// Constructor
-func NewTJSONProtocol(t TTransport) *TJSONProtocol {
- v := &TJSONProtocol{TSimpleJSONProtocol: NewTSimpleJSONProtocol(t)}
- v.parseContextStack.push(_CONTEXT_IN_TOPLEVEL)
- v.dumpContext.push(_CONTEXT_IN_TOPLEVEL)
- return v
-}
-
-// Factory
-type TJSONProtocolFactory struct{}
-
-func (p *TJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol {
- return NewTJSONProtocol(trans)
-}
-
-func NewTJSONProtocolFactory() *TJSONProtocolFactory {
- return &TJSONProtocolFactory{}
-}
-
-func (p *TJSONProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error {
- p.resetContextStack() // THRIFT-3735
- if e := p.OutputListBegin(); e != nil {
- return e
- }
- if e := p.WriteI32(ctx, THRIFT_JSON_PROTOCOL_VERSION); e != nil {
- return e
- }
- if e := p.WriteString(ctx, name); e != nil {
- return e
- }
- if e := p.WriteByte(ctx, int8(typeId)); e != nil {
- return e
- }
- if e := p.WriteI32(ctx, seqId); e != nil {
- return e
- }
- return nil
-}
-
-func (p *TJSONProtocol) WriteMessageEnd(ctx context.Context) error {
- return p.OutputListEnd()
-}
-
-func (p *TJSONProtocol) WriteStructBegin(ctx context.Context, name string) error {
- if e := p.OutputObjectBegin(); e != nil {
- return e
- }
- return nil
-}
-
-func (p *TJSONProtocol) WriteStructEnd(ctx context.Context) error {
- return p.OutputObjectEnd()
-}
-
-func (p *TJSONProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
- if e := p.WriteI16(ctx, id); e != nil {
- return e
- }
- if e := p.OutputObjectBegin(); e != nil {
- return e
- }
- s, e1 := p.TypeIdToString(typeId)
- if e1 != nil {
- return e1
- }
- if e := p.WriteString(ctx, s); e != nil {
- return e
- }
- return nil
-}
-
-func (p *TJSONProtocol) WriteFieldEnd(ctx context.Context) error {
- return p.OutputObjectEnd()
-}
-
-func (p *TJSONProtocol) WriteFieldStop(ctx context.Context) error { return nil }
-
-func (p *TJSONProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
- if e := p.OutputListBegin(); e != nil {
- return e
- }
- s, e1 := p.TypeIdToString(keyType)
- if e1 != nil {
- return e1
- }
- if e := p.WriteString(ctx, s); e != nil {
- return e
- }
- s, e1 = p.TypeIdToString(valueType)
- if e1 != nil {
- return e1
- }
- if e := p.WriteString(ctx, s); e != nil {
- return e
- }
- if e := p.WriteI64(ctx, int64(size)); e != nil {
- return e
- }
- return p.OutputObjectBegin()
-}
-
-func (p *TJSONProtocol) WriteMapEnd(ctx context.Context) error {
- if e := p.OutputObjectEnd(); e != nil {
- return e
- }
- return p.OutputListEnd()
-}
-
-func (p *TJSONProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
- return p.OutputElemListBegin(elemType, size)
-}
-
-func (p *TJSONProtocol) WriteListEnd(ctx context.Context) error {
- return p.OutputListEnd()
-}
-
-func (p *TJSONProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
- return p.OutputElemListBegin(elemType, size)
-}
-
-func (p *TJSONProtocol) WriteSetEnd(ctx context.Context) error {
- return p.OutputListEnd()
-}
-
-func (p *TJSONProtocol) WriteBool(ctx context.Context, b bool) error {
- if b {
- return p.WriteI32(ctx, 1)
- }
- return p.WriteI32(ctx, 0)
-}
-
-func (p *TJSONProtocol) WriteByte(ctx context.Context, b int8) error {
- return p.WriteI32(ctx, int32(b))
-}
-
-func (p *TJSONProtocol) WriteI16(ctx context.Context, v int16) error {
- return p.WriteI32(ctx, int32(v))
-}
-
-func (p *TJSONProtocol) WriteI32(ctx context.Context, v int32) error {
- return p.OutputI64(int64(v))
-}
-
-func (p *TJSONProtocol) WriteI64(ctx context.Context, v int64) error {
- return p.OutputI64(int64(v))
-}
-
-func (p *TJSONProtocol) WriteDouble(ctx context.Context, v float64) error {
- return p.OutputF64(v)
-}
-
-func (p *TJSONProtocol) WriteString(ctx context.Context, v string) error {
- return p.OutputString(v)
-}
-
-func (p *TJSONProtocol) WriteBinary(ctx context.Context, v []byte) error {
- // JSON library only takes in a string,
- // not an arbitrary byte array, to ensure bytes are transmitted
- // efficiently we must convert this into a valid JSON string
- // therefore we use base64 encoding to avoid excessive escaping/quoting
- if e := p.OutputPreValue(); e != nil {
- return e
- }
- if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
- return NewTProtocolException(e)
- }
- writer := base64.NewEncoder(base64.StdEncoding, p.writer)
- if _, e := writer.Write(v); e != nil {
- p.writer.Reset(p.trans) // THRIFT-3735
- return NewTProtocolException(e)
- }
- if e := writer.Close(); e != nil {
- return NewTProtocolException(e)
- }
- if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
- return NewTProtocolException(e)
- }
- return p.OutputPostValue()
-}
-
-// Reading methods.
-func (p *TJSONProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) {
- p.resetContextStack() // THRIFT-3735
- if isNull, err := p.ParseListBegin(); isNull || err != nil {
- return name, typeId, seqId, err
- }
- version, err := p.ReadI32(ctx)
- if err != nil {
- return name, typeId, seqId, err
- }
- if version != THRIFT_JSON_PROTOCOL_VERSION {
- e := fmt.Errorf("Unknown Protocol version %d, expected version %d", version, THRIFT_JSON_PROTOCOL_VERSION)
- return name, typeId, seqId, NewTProtocolExceptionWithType(INVALID_DATA, e)
-
- }
- if name, err = p.ReadString(ctx); err != nil {
- return name, typeId, seqId, err
- }
- bTypeId, err := p.ReadByte(ctx)
- typeId = TMessageType(bTypeId)
- if err != nil {
- return name, typeId, seqId, err
- }
- if seqId, err = p.ReadI32(ctx); err != nil {
- return name, typeId, seqId, err
- }
- return name, typeId, seqId, nil
-}
-
-func (p *TJSONProtocol) ReadMessageEnd(ctx context.Context) error {
- err := p.ParseListEnd()
- return err
-}
-
-func (p *TJSONProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
- _, err = p.ParseObjectStart()
- return "", err
-}
-
-func (p *TJSONProtocol) ReadStructEnd(ctx context.Context) error {
- return p.ParseObjectEnd()
-}
-
-func (p *TJSONProtocol) ReadFieldBegin(ctx context.Context) (string, TType, int16, error) {
- b, _ := p.reader.Peek(1)
- if len(b) < 1 || b[0] == JSON_RBRACE[0] || b[0] == JSON_RBRACKET[0] {
- return "", STOP, -1, nil
- }
- fieldId, err := p.ReadI16(ctx)
- if err != nil {
- return "", STOP, fieldId, err
- }
- if _, err = p.ParseObjectStart(); err != nil {
- return "", STOP, fieldId, err
- }
- sType, err := p.ReadString(ctx)
- if err != nil {
- return "", STOP, fieldId, err
- }
- fType, err := p.StringToTypeId(sType)
- return "", fType, fieldId, err
-}
-
-func (p *TJSONProtocol) ReadFieldEnd(ctx context.Context) error {
- return p.ParseObjectEnd()
-}
-
-func (p *TJSONProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, e error) {
- if isNull, e := p.ParseListBegin(); isNull || e != nil {
- return VOID, VOID, 0, e
- }
-
- // read keyType
- sKeyType, e := p.ReadString(ctx)
- if e != nil {
- return keyType, valueType, size, e
- }
- keyType, e = p.StringToTypeId(sKeyType)
- if e != nil {
- return keyType, valueType, size, e
- }
-
- // read valueType
- sValueType, e := p.ReadString(ctx)
- if e != nil {
- return keyType, valueType, size, e
- }
- valueType, e = p.StringToTypeId(sValueType)
- if e != nil {
- return keyType, valueType, size, e
- }
-
- // read size
- iSize, e := p.ReadI64(ctx)
- if e != nil {
- return keyType, valueType, size, e
- }
- size = int(iSize)
-
- _, e = p.ParseObjectStart()
- return keyType, valueType, size, e
-}
-
-func (p *TJSONProtocol) ReadMapEnd(ctx context.Context) error {
- e := p.ParseObjectEnd()
- if e != nil {
- return e
- }
- return p.ParseListEnd()
-}
-
-func (p *TJSONProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, e error) {
- return p.ParseElemListBegin()
-}
-
-func (p *TJSONProtocol) ReadListEnd(ctx context.Context) error {
- return p.ParseListEnd()
-}
-
-func (p *TJSONProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, e error) {
- return p.ParseElemListBegin()
-}
-
-func (p *TJSONProtocol) ReadSetEnd(ctx context.Context) error {
- return p.ParseListEnd()
-}
-
-func (p *TJSONProtocol) ReadBool(ctx context.Context) (bool, error) {
- value, err := p.ReadI32(ctx)
- return (value != 0), err
-}
-
-func (p *TJSONProtocol) ReadByte(ctx context.Context) (int8, error) {
- v, err := p.ReadI64(ctx)
- return int8(v), err
-}
-
-func (p *TJSONProtocol) ReadI16(ctx context.Context) (int16, error) {
- v, err := p.ReadI64(ctx)
- return int16(v), err
-}
-
-func (p *TJSONProtocol) ReadI32(ctx context.Context) (int32, error) {
- v, err := p.ReadI64(ctx)
- return int32(v), err
-}
-
-func (p *TJSONProtocol) ReadI64(ctx context.Context) (int64, error) {
- v, _, err := p.ParseI64()
- return v, err
-}
-
-func (p *TJSONProtocol) ReadDouble(ctx context.Context) (float64, error) {
- v, _, err := p.ParseF64()
- return v, err
-}
-
-func (p *TJSONProtocol) ReadString(ctx context.Context) (string, error) {
- var v string
- if err := p.ParsePreValue(); err != nil {
- return v, err
- }
- f, _ := p.reader.Peek(1)
- if len(f) > 0 && f[0] == JSON_QUOTE {
- p.reader.ReadByte()
- value, err := p.ParseStringBody()
- v = value
- if err != nil {
- return v, err
- }
- } else if len(f) > 0 && f[0] == JSON_NULL[0] {
- b := make([]byte, len(JSON_NULL))
- _, err := p.reader.Read(b)
- if err != nil {
- return v, NewTProtocolException(err)
- }
- if string(b) != string(JSON_NULL) {
- e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
- return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- } else {
- e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
- return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- return v, p.ParsePostValue()
-}
-
-func (p *TJSONProtocol) ReadBinary(ctx context.Context) ([]byte, error) {
- var v []byte
- if err := p.ParsePreValue(); err != nil {
- return nil, err
- }
- f, _ := p.reader.Peek(1)
- if len(f) > 0 && f[0] == JSON_QUOTE {
- p.reader.ReadByte()
- value, err := p.ParseBase64EncodedBody()
- v = value
- if err != nil {
- return v, err
- }
- } else if len(f) > 0 && f[0] == JSON_NULL[0] {
- b := make([]byte, len(JSON_NULL))
- _, err := p.reader.Read(b)
- if err != nil {
- return v, NewTProtocolException(err)
- }
- if string(b) != string(JSON_NULL) {
- e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
- return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- } else {
- e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
- return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
-
- return v, p.ParsePostValue()
-}
-
-func (p *TJSONProtocol) Flush(ctx context.Context) (err error) {
- err = p.writer.Flush()
- if err == nil {
- err = p.trans.Flush(ctx)
- }
- return NewTProtocolException(err)
-}
-
-func (p *TJSONProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
- return SkipDefaultDepth(ctx, p, fieldType)
-}
-
-func (p *TJSONProtocol) Transport() TTransport {
- return p.trans
-}
-
-func (p *TJSONProtocol) OutputElemListBegin(elemType TType, size int) error {
- if e := p.OutputListBegin(); e != nil {
- return e
- }
- s, e1 := p.TypeIdToString(elemType)
- if e1 != nil {
- return e1
- }
- if e := p.OutputString(s); e != nil {
- return e
- }
- if e := p.OutputI64(int64(size)); e != nil {
- return e
- }
- return nil
-}
-
-func (p *TJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) {
- if isNull, e := p.ParseListBegin(); isNull || e != nil {
- return VOID, 0, e
- }
- // We don't really use the ctx in ReadString implementation,
- // so this is safe for now.
- // We might want to add context to ParseElemListBegin if we start to use
- // ctx in ReadString implementation in the future.
- sElemType, err := p.ReadString(context.Background())
- if err != nil {
- return VOID, size, err
- }
- elemType, err = p.StringToTypeId(sElemType)
- if err != nil {
- return elemType, size, err
- }
- nSize, _, err2 := p.ParseI64()
- size = int(nSize)
- return elemType, size, err2
-}
-
-func (p *TJSONProtocol) readElemListBegin() (elemType TType, size int, e error) {
- if isNull, e := p.ParseListBegin(); isNull || e != nil {
- return VOID, 0, e
- }
- // We don't really use the ctx in ReadString implementation,
- // so this is safe for now.
- // We might want to add context to ParseElemListBegin if we start to use
- // ctx in ReadString implementation in the future.
- sElemType, err := p.ReadString(context.Background())
- if err != nil {
- return VOID, size, err
- }
- elemType, err = p.StringToTypeId(sElemType)
- if err != nil {
- return elemType, size, err
- }
- nSize, _, err2 := p.ParseI64()
- size = int(nSize)
- return elemType, size, err2
-}
-
-func (p *TJSONProtocol) writeElemListBegin(elemType TType, size int) error {
- if e := p.OutputListBegin(); e != nil {
- return e
- }
- s, e1 := p.TypeIdToString(elemType)
- if e1 != nil {
- return e1
- }
- if e := p.OutputString(s); e != nil {
- return e
- }
- if e := p.OutputI64(int64(size)); e != nil {
- return e
- }
- return nil
-}
-
-func (p *TJSONProtocol) TypeIdToString(fieldType TType) (string, error) {
- switch byte(fieldType) {
- case BOOL:
- return "tf", nil
- case BYTE:
- return "i8", nil
- case I16:
- return "i16", nil
- case I32:
- return "i32", nil
- case I64:
- return "i64", nil
- case DOUBLE:
- return "dbl", nil
- case STRING:
- return "str", nil
- case STRUCT:
- return "rec", nil
- case MAP:
- return "map", nil
- case SET:
- return "set", nil
- case LIST:
- return "lst", nil
- }
-
- e := fmt.Errorf("Unknown fieldType: %d", int(fieldType))
- return "", NewTProtocolExceptionWithType(INVALID_DATA, e)
-}
-
-func (p *TJSONProtocol) StringToTypeId(fieldType string) (TType, error) {
- switch fieldType {
- case "tf":
- return TType(BOOL), nil
- case "i8":
- return TType(BYTE), nil
- case "i16":
- return TType(I16), nil
- case "i32":
- return TType(I32), nil
- case "i64":
- return TType(I64), nil
- case "dbl":
- return TType(DOUBLE), nil
- case "str":
- return TType(STRING), nil
- case "rec":
- return TType(STRUCT), nil
- case "map":
- return TType(MAP), nil
- case "set":
- return TType(SET), nil
- case "lst":
- return TType(LIST), nil
- }
-
- e := fmt.Errorf("Unknown type identifier: %s", fieldType)
- return TType(STOP), NewTProtocolExceptionWithType(INVALID_DATA, e)
-}
-
-var _ TConfigurationSetter = (*TJSONProtocol)(nil)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/logger.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/logger.go
deleted file mode 100644
index c42aac998..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/logger.go
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "log"
- "os"
- "testing"
-)
-
-// Logger is a simple wrapper of a logging function.
-//
-// In reality the users might actually use different logging libraries, and they
-// are not always compatible with each other.
-//
-// Logger is meant to be a simple common ground that it's easy to wrap whatever
-// logging library they use into.
-//
-// See https://issues.apache.org/jira/browse/THRIFT-4985 for the design
-// discussion behind it.
-type Logger func(msg string)
-
-// NopLogger is a Logger implementation that does nothing.
-func NopLogger(msg string) {}
-
-// StdLogger wraps stdlib log package into a Logger.
-//
-// If logger passed in is nil, it will fallback to use stderr and default flags.
-func StdLogger(logger *log.Logger) Logger {
- if logger == nil {
- logger = log.New(os.Stderr, "", log.LstdFlags)
- }
- return func(msg string) {
- logger.Print(msg)
- }
-}
-
-// TestLogger is a Logger implementation can be used in test codes.
-//
-// It fails the test when being called.
-func TestLogger(tb testing.TB) Logger {
- return func(msg string) {
- tb.Errorf("logger called with msg: %q", msg)
- }
-}
-
-func fallbackLogger(logger Logger) Logger {
- if logger == nil {
- return StdLogger(nil)
- }
- return logger
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/memory_buffer.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/memory_buffer.go
deleted file mode 100644
index 5936d2730..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/memory_buffer.go
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "bytes"
- "context"
-)
-
-// Memory buffer-based implementation of the TTransport interface.
-type TMemoryBuffer struct {
- *bytes.Buffer
- size int
-}
-
-type TMemoryBufferTransportFactory struct {
- size int
-}
-
-func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
- if trans != nil {
- t, ok := trans.(*TMemoryBuffer)
- if ok && t.size > 0 {
- return NewTMemoryBufferLen(t.size), nil
- }
- }
- return NewTMemoryBufferLen(p.size), nil
-}
-
-func NewTMemoryBufferTransportFactory(size int) *TMemoryBufferTransportFactory {
- return &TMemoryBufferTransportFactory{size: size}
-}
-
-func NewTMemoryBuffer() *TMemoryBuffer {
- return &TMemoryBuffer{Buffer: &bytes.Buffer{}, size: 0}
-}
-
-func NewTMemoryBufferLen(size int) *TMemoryBuffer {
- buf := make([]byte, 0, size)
- return &TMemoryBuffer{Buffer: bytes.NewBuffer(buf), size: size}
-}
-
-func (p *TMemoryBuffer) IsOpen() bool {
- return true
-}
-
-func (p *TMemoryBuffer) Open() error {
- return nil
-}
-
-func (p *TMemoryBuffer) Close() error {
- p.Buffer.Reset()
- return nil
-}
-
-// Flushing a memory buffer is a no-op
-func (p *TMemoryBuffer) Flush(ctx context.Context) error {
- return nil
-}
-
-func (p *TMemoryBuffer) RemainingBytes() (num_bytes uint64) {
- return uint64(p.Buffer.Len())
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/messagetype.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/messagetype.go
deleted file mode 100644
index 25ab2e98a..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/messagetype.go
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-// Message type constants in the Thrift protocol.
-type TMessageType int32
-
-const (
- INVALID_TMESSAGE_TYPE TMessageType = 0
- CALL TMessageType = 1
- REPLY TMessageType = 2
- EXCEPTION TMessageType = 3
- ONEWAY TMessageType = 4
-)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/middleware.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/middleware.go
deleted file mode 100644
index 8a788df02..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/middleware.go
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import "context"
-
-// ProcessorMiddleware is a function that can be passed to WrapProcessor to wrap the
-// TProcessorFunctions for that TProcessor.
-//
-// Middlewares are passed in the name of the function as set in the processor
-// map of the TProcessor.
-type ProcessorMiddleware func(name string, next TProcessorFunction) TProcessorFunction
-
-// WrapProcessor takes an existing TProcessor and wraps each of its inner
-// TProcessorFunctions with the middlewares passed in and returns it.
-//
-// Middlewares will be called in the order that they are defined:
-//
-// 1. Middlewares[0]
-// 2. Middlewares[1]
-// ...
-// N. Middlewares[n]
-func WrapProcessor(processor TProcessor, middlewares ...ProcessorMiddleware) TProcessor {
- for name, processorFunc := range processor.ProcessorMap() {
- wrapped := processorFunc
- // Add middlewares in reverse so the first in the list is the outermost.
- for i := len(middlewares) - 1; i >= 0; i-- {
- wrapped = middlewares[i](name, wrapped)
- }
- processor.AddToProcessorMap(name, wrapped)
- }
- return processor
-}
-
-// WrappedTProcessorFunction is a convenience struct that implements the
-// TProcessorFunction interface that can be used when implementing custom
-// Middleware.
-type WrappedTProcessorFunction struct {
- // Wrapped is called by WrappedTProcessorFunction.Process and should be a
- // "wrapped" call to a base TProcessorFunc.Process call.
- Wrapped func(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException)
-}
-
-// Process implements the TProcessorFunction interface using p.Wrapped.
-func (p WrappedTProcessorFunction) Process(ctx context.Context, seqID int32, in, out TProtocol) (bool, TException) {
- return p.Wrapped(ctx, seqID, in, out)
-}
-
-// verify that WrappedTProcessorFunction implements TProcessorFunction
-var (
- _ TProcessorFunction = WrappedTProcessorFunction{}
- _ TProcessorFunction = (*WrappedTProcessorFunction)(nil)
-)
-
-// ClientMiddleware can be passed to WrapClient in order to wrap TClient calls
-// with custom middleware.
-type ClientMiddleware func(TClient) TClient
-
-// WrappedTClient is a convenience struct that implements the TClient interface
-// using inner Wrapped function.
-//
-// This is provided to aid in developing ClientMiddleware.
-type WrappedTClient struct {
- Wrapped func(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error)
-}
-
-// Call implements the TClient interface by calling and returning c.Wrapped.
-func (c WrappedTClient) Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) {
- return c.Wrapped(ctx, method, args, result)
-}
-
-// verify that WrappedTClient implements TClient
-var (
- _ TClient = WrappedTClient{}
- _ TClient = (*WrappedTClient)(nil)
-)
-
-// WrapClient wraps the given TClient in the given middlewares.
-//
-// Middlewares will be called in the order that they are defined:
-//
-// 1. Middlewares[0]
-// 2. Middlewares[1]
-// ...
-// N. Middlewares[n]
-func WrapClient(client TClient, middlewares ...ClientMiddleware) TClient {
- // Add middlewares in reverse so the first in the list is the outermost.
- for i := len(middlewares) - 1; i >= 0; i-- {
- client = middlewares[i](client)
- }
- return client
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/multiplexed_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/multiplexed_protocol.go
deleted file mode 100644
index d542b23a9..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/multiplexed_protocol.go
+++ /dev/null
@@ -1,235 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "context"
- "fmt"
- "strings"
-)
-
-/*
-TMultiplexedProtocol is a protocol-independent concrete decorator
-that allows a Thrift client to communicate with a multiplexing Thrift server,
-by prepending the service name to the function name during function calls.
-
-NOTE: THIS IS NOT USED BY SERVERS. On the server, use TMultiplexedProcessor to handle request
-from a multiplexing client.
-
-This example uses a single socket transport to invoke two services:
-
-socket := thrift.NewTSocketFromAddrTimeout(addr, TIMEOUT)
-transport := thrift.NewTFramedTransport(socket)
-protocol := thrift.NewTBinaryProtocolTransport(transport)
-
-mp := thrift.NewTMultiplexedProtocol(protocol, "Calculator")
-service := Calculator.NewCalculatorClient(mp)
-
-mp2 := thrift.NewTMultiplexedProtocol(protocol, "WeatherReport")
-service2 := WeatherReport.NewWeatherReportClient(mp2)
-
-err := transport.Open()
-if err != nil {
- t.Fatal("Unable to open client socket", err)
-}
-
-fmt.Println(service.Add(2,2))
-fmt.Println(service2.GetTemperature())
-*/
-
-type TMultiplexedProtocol struct {
- TProtocol
- serviceName string
-}
-
-const MULTIPLEXED_SEPARATOR = ":"
-
-func NewTMultiplexedProtocol(protocol TProtocol, serviceName string) *TMultiplexedProtocol {
- return &TMultiplexedProtocol{
- TProtocol: protocol,
- serviceName: serviceName,
- }
-}
-
-func (t *TMultiplexedProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error {
- if typeId == CALL || typeId == ONEWAY {
- return t.TProtocol.WriteMessageBegin(ctx, t.serviceName+MULTIPLEXED_SEPARATOR+name, typeId, seqid)
- } else {
- return t.TProtocol.WriteMessageBegin(ctx, name, typeId, seqid)
- }
-}
-
-/*
-TMultiplexedProcessor is a TProcessor allowing
-a single TServer to provide multiple services.
-
-To do so, you instantiate the processor and then register additional
-processors with it, as shown in the following example:
-
-var processor = thrift.NewTMultiplexedProcessor()
-
-firstProcessor :=
-processor.RegisterProcessor("FirstService", firstProcessor)
-
-processor.registerProcessor(
- "Calculator",
- Calculator.NewCalculatorProcessor(&CalculatorHandler{}),
-)
-
-processor.registerProcessor(
- "WeatherReport",
- WeatherReport.NewWeatherReportProcessor(&WeatherReportHandler{}),
-)
-
-serverTransport, err := thrift.NewTServerSocketTimeout(addr, TIMEOUT)
-if err != nil {
- t.Fatal("Unable to create server socket", err)
-}
-server := thrift.NewTSimpleServer2(processor, serverTransport)
-server.Serve();
-*/
-
-type TMultiplexedProcessor struct {
- serviceProcessorMap map[string]TProcessor
- DefaultProcessor TProcessor
-}
-
-func NewTMultiplexedProcessor() *TMultiplexedProcessor {
- return &TMultiplexedProcessor{
- serviceProcessorMap: make(map[string]TProcessor),
- }
-}
-
-// ProcessorMap returns a mapping of "{ProcessorName}{MULTIPLEXED_SEPARATOR}{FunctionName}"
-// to TProcessorFunction for any registered processors. If there is also a
-// DefaultProcessor, the keys for the methods on that processor will simply be
-// "{FunctionName}". If the TMultiplexedProcessor has both a DefaultProcessor and
-// other registered processors, then the keys will be a mix of both formats.
-//
-// The implementation differs with other TProcessors in that the map returned is
-// a new map, while most TProcessors just return their internal mapping directly.
-// This means that edits to the map returned by this implementation of ProcessorMap
-// will not affect the underlying mapping within the TMultiplexedProcessor.
-func (t *TMultiplexedProcessor) ProcessorMap() map[string]TProcessorFunction {
- processorFuncMap := make(map[string]TProcessorFunction)
- for name, processor := range t.serviceProcessorMap {
- for method, processorFunc := range processor.ProcessorMap() {
- processorFuncName := name + MULTIPLEXED_SEPARATOR + method
- processorFuncMap[processorFuncName] = processorFunc
- }
- }
- if t.DefaultProcessor != nil {
- for method, processorFunc := range t.DefaultProcessor.ProcessorMap() {
- processorFuncMap[method] = processorFunc
- }
- }
- return processorFuncMap
-}
-
-// AddToProcessorMap updates the underlying TProcessor ProccessorMaps depending on
-// the format of "name".
-//
-// If "name" is in the format "{ProcessorName}{MULTIPLEXED_SEPARATOR}{FunctionName}",
-// then it sets the given TProcessorFunction on the inner TProcessor with the
-// ProcessorName component using the FunctionName component.
-//
-// If "name" is just in the format "{FunctionName}", that is to say there is no
-// MULTIPLEXED_SEPARATOR, and the TMultiplexedProcessor has a DefaultProcessor
-// configured, then it will set the given TProcessorFunction on the DefaultProcessor
-// using the given name.
-//
-// If there is not a TProcessor available for the given name, then this function
-// does nothing. This can happen when there is no TProcessor registered for
-// the given ProcessorName or if all that is given is the FunctionName and there
-// is no DefaultProcessor set.
-func (t *TMultiplexedProcessor) AddToProcessorMap(name string, processorFunc TProcessorFunction) {
- processorName, funcName, found := strings.Cut(name, MULTIPLEXED_SEPARATOR)
- if !found {
- if t.DefaultProcessor != nil {
- t.DefaultProcessor.AddToProcessorMap(processorName, processorFunc)
- }
- return
- }
- if processor, ok := t.serviceProcessorMap[processorName]; ok {
- processor.AddToProcessorMap(funcName, processorFunc)
- }
-
-}
-
-// verify that TMultiplexedProcessor implements TProcessor
-var _ TProcessor = (*TMultiplexedProcessor)(nil)
-
-func (t *TMultiplexedProcessor) RegisterDefault(processor TProcessor) {
- t.DefaultProcessor = processor
-}
-
-func (t *TMultiplexedProcessor) RegisterProcessor(name string, processor TProcessor) {
- if t.serviceProcessorMap == nil {
- t.serviceProcessorMap = make(map[string]TProcessor)
- }
- t.serviceProcessorMap[name] = processor
-}
-
-func (t *TMultiplexedProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) {
- name, typeId, seqid, err := in.ReadMessageBegin(ctx)
- if err != nil {
- return false, NewTProtocolException(err)
- }
- if typeId != CALL && typeId != ONEWAY {
- return false, NewTProtocolException(fmt.Errorf("Unexpected message type %v", typeId))
- }
- // extract the service name
- processorName, funcName, found := strings.Cut(name, MULTIPLEXED_SEPARATOR)
- if !found {
- if t.DefaultProcessor != nil {
- smb := NewStoredMessageProtocol(in, name, typeId, seqid)
- return t.DefaultProcessor.Process(ctx, smb, out)
- }
- return false, NewTProtocolException(fmt.Errorf(
- "Service name not found in message name: %s. Did you forget to use a TMultiplexProtocol in your client?",
- name,
- ))
- }
- actualProcessor, ok := t.serviceProcessorMap[processorName]
- if !ok {
- return false, NewTProtocolException(fmt.Errorf(
- "Service name not found: %s. Did you forget to call registerProcessor()?",
- processorName,
- ))
- }
- smb := NewStoredMessageProtocol(in, funcName, typeId, seqid)
- return actualProcessor.Process(ctx, smb, out)
-}
-
-// Protocol that use stored message for ReadMessageBegin
-type storedMessageProtocol struct {
- TProtocol
- name string
- typeId TMessageType
- seqid int32
-}
-
-func NewStoredMessageProtocol(protocol TProtocol, name string, typeId TMessageType, seqid int32) *storedMessageProtocol {
- return &storedMessageProtocol{protocol, name, typeId, seqid}
-}
-
-func (s *storedMessageProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error) {
- return s.name, s.typeId, s.seqid, nil
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/numeric.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/numeric.go
deleted file mode 100644
index e4512d204..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/numeric.go
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "math"
- "strconv"
-)
-
-type Numeric interface {
- Int64() int64
- Int32() int32
- Int16() int16
- Byte() byte
- Int() int
- Float64() float64
- Float32() float32
- String() string
- isNull() bool
-}
-
-type numeric struct {
- iValue int64
- dValue float64
- sValue string
- isNil bool
-}
-
-var (
- INFINITY Numeric
- NEGATIVE_INFINITY Numeric
- NAN Numeric
- ZERO Numeric
- NUMERIC_NULL Numeric
-)
-
-func NewNumericFromDouble(dValue float64) Numeric {
- if math.IsInf(dValue, 1) {
- return INFINITY
- }
- if math.IsInf(dValue, -1) {
- return NEGATIVE_INFINITY
- }
- if math.IsNaN(dValue) {
- return NAN
- }
- iValue := int64(dValue)
- sValue := strconv.FormatFloat(dValue, 'g', 10, 64)
- isNil := false
- return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
-}
-
-func NewNumericFromI64(iValue int64) Numeric {
- dValue := float64(iValue)
- sValue := strconv.FormatInt(iValue, 10)
- isNil := false
- return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
-}
-
-func NewNumericFromI32(iValue int32) Numeric {
- dValue := float64(iValue)
- sValue := strconv.FormatInt(int64(iValue), 10)
- isNil := false
- return &numeric{iValue: int64(iValue), dValue: dValue, sValue: sValue, isNil: isNil}
-}
-
-func NewNumericFromString(sValue string) Numeric {
- if sValue == INFINITY.String() {
- return INFINITY
- }
- if sValue == NEGATIVE_INFINITY.String() {
- return NEGATIVE_INFINITY
- }
- if sValue == NAN.String() {
- return NAN
- }
- iValue, _ := strconv.ParseInt(sValue, 10, 64)
- dValue, _ := strconv.ParseFloat(sValue, 64)
- isNil := len(sValue) == 0
- return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
-}
-
-func NewNumericFromJSONString(sValue string, isNull bool) Numeric {
- if isNull {
- return NewNullNumeric()
- }
- if sValue == JSON_INFINITY {
- return INFINITY
- }
- if sValue == JSON_NEGATIVE_INFINITY {
- return NEGATIVE_INFINITY
- }
- if sValue == JSON_NAN {
- return NAN
- }
- iValue, _ := strconv.ParseInt(sValue, 10, 64)
- dValue, _ := strconv.ParseFloat(sValue, 64)
- return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNull}
-}
-
-func NewNullNumeric() Numeric {
- return &numeric{iValue: 0, dValue: 0.0, sValue: "", isNil: true}
-}
-
-func (p *numeric) Int64() int64 {
- return p.iValue
-}
-
-func (p *numeric) Int32() int32 {
- return int32(p.iValue)
-}
-
-func (p *numeric) Int16() int16 {
- return int16(p.iValue)
-}
-
-func (p *numeric) Byte() byte {
- return byte(p.iValue)
-}
-
-func (p *numeric) Int() int {
- return int(p.iValue)
-}
-
-func (p *numeric) Float64() float64 {
- return p.dValue
-}
-
-func (p *numeric) Float32() float32 {
- return float32(p.dValue)
-}
-
-func (p *numeric) String() string {
- return p.sValue
-}
-
-func (p *numeric) isNull() bool {
- return p.isNil
-}
-
-func init() {
- INFINITY = &numeric{iValue: 0, dValue: math.Inf(1), sValue: "Infinity", isNil: false}
- NEGATIVE_INFINITY = &numeric{iValue: 0, dValue: math.Inf(-1), sValue: "-Infinity", isNil: false}
- NAN = &numeric{iValue: 0, dValue: math.NaN(), sValue: "NaN", isNil: false}
- ZERO = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: false}
- NUMERIC_NULL = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: true}
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/pointerize.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/pointerize.go
deleted file mode 100644
index fb564ea81..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/pointerize.go
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-///////////////////////////////////////////////////////////////////////////////
-// This file is home to helpers that convert from various base types to
-// respective pointer types. This is necessary because Go does not permit
-// references to constants, nor can a pointer type to base type be allocated
-// and initialized in a single expression.
-//
-// E.g., this is not allowed:
-//
-// var ip *int = &5
-//
-// But this *is* allowed:
-//
-// func IntPtr(i int) *int { return &i }
-// var ip *int = IntPtr(5)
-//
-// Since pointers to base types are commonplace as [optional] fields in
-// exported thrift structs, we factor such helpers here.
-///////////////////////////////////////////////////////////////////////////////
-
-func Float32Ptr(v float32) *float32 { return &v }
-func Float64Ptr(v float64) *float64 { return &v }
-func IntPtr(v int) *int { return &v }
-func Int8Ptr(v int8) *int8 { return &v }
-func Int16Ptr(v int16) *int16 { return &v }
-func Int32Ptr(v int32) *int32 { return &v }
-func Int64Ptr(v int64) *int64 { return &v }
-func StringPtr(v string) *string { return &v }
-func Uint32Ptr(v uint32) *uint32 { return &v }
-func Uint64Ptr(v uint64) *uint64 { return &v }
-func BoolPtr(v bool) *bool { return &v }
-func ByteSlicePtr(v []byte) *[]byte { return &v }
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/processor_factory.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/processor_factory.go
deleted file mode 100644
index 245a3ccfc..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/processor_factory.go
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import "context"
-
-// A processor is a generic object which operates upon an input stream and
-// writes to some output stream.
-type TProcessor interface {
- Process(ctx context.Context, in, out TProtocol) (bool, TException)
-
- // ProcessorMap returns a map of thrift method names to TProcessorFunctions.
- ProcessorMap() map[string]TProcessorFunction
-
- // AddToProcessorMap adds the given TProcessorFunction to the internal
- // processor map at the given key.
- //
- // If one is already set at the given key, it will be replaced with the new
- // TProcessorFunction.
- AddToProcessorMap(string, TProcessorFunction)
-}
-
-type TProcessorFunction interface {
- Process(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException)
-}
-
-// The default processor factory just returns a singleton
-// instance.
-type TProcessorFactory interface {
- GetProcessor(trans TTransport) TProcessor
-}
-
-type tProcessorFactory struct {
- processor TProcessor
-}
-
-func NewTProcessorFactory(p TProcessor) TProcessorFactory {
- return &tProcessorFactory{processor: p}
-}
-
-func (p *tProcessorFactory) GetProcessor(trans TTransport) TProcessor {
- return p.processor
-}
-
-/**
- * The default processor factory just returns a singleton
- * instance.
- */
-type TProcessorFunctionFactory interface {
- GetProcessorFunction(trans TTransport) TProcessorFunction
-}
-
-type tProcessorFunctionFactory struct {
- processor TProcessorFunction
-}
-
-func NewTProcessorFunctionFactory(p TProcessorFunction) TProcessorFunctionFactory {
- return &tProcessorFunctionFactory{processor: p}
-}
-
-func (p *tProcessorFunctionFactory) GetProcessorFunction(trans TTransport) TProcessorFunction {
- return p.processor
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol.go
deleted file mode 100644
index 0a69bd416..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol.go
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "context"
- "errors"
- "fmt"
-)
-
-const (
- VERSION_MASK = 0xffff0000
- VERSION_1 = 0x80010000
-)
-
-type TProtocol interface {
- WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error
- WriteMessageEnd(ctx context.Context) error
- WriteStructBegin(ctx context.Context, name string) error
- WriteStructEnd(ctx context.Context) error
- WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error
- WriteFieldEnd(ctx context.Context) error
- WriteFieldStop(ctx context.Context) error
- WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error
- WriteMapEnd(ctx context.Context) error
- WriteListBegin(ctx context.Context, elemType TType, size int) error
- WriteListEnd(ctx context.Context) error
- WriteSetBegin(ctx context.Context, elemType TType, size int) error
- WriteSetEnd(ctx context.Context) error
- WriteBool(ctx context.Context, value bool) error
- WriteByte(ctx context.Context, value int8) error
- WriteI16(ctx context.Context, value int16) error
- WriteI32(ctx context.Context, value int32) error
- WriteI64(ctx context.Context, value int64) error
- WriteDouble(ctx context.Context, value float64) error
- WriteString(ctx context.Context, value string) error
- WriteBinary(ctx context.Context, value []byte) error
-
- ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error)
- ReadMessageEnd(ctx context.Context) error
- ReadStructBegin(ctx context.Context) (name string, err error)
- ReadStructEnd(ctx context.Context) error
- ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error)
- ReadFieldEnd(ctx context.Context) error
- ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error)
- ReadMapEnd(ctx context.Context) error
- ReadListBegin(ctx context.Context) (elemType TType, size int, err error)
- ReadListEnd(ctx context.Context) error
- ReadSetBegin(ctx context.Context) (elemType TType, size int, err error)
- ReadSetEnd(ctx context.Context) error
- ReadBool(ctx context.Context) (value bool, err error)
- ReadByte(ctx context.Context) (value int8, err error)
- ReadI16(ctx context.Context) (value int16, err error)
- ReadI32(ctx context.Context) (value int32, err error)
- ReadI64(ctx context.Context) (value int64, err error)
- ReadDouble(ctx context.Context) (value float64, err error)
- ReadString(ctx context.Context) (value string, err error)
- ReadBinary(ctx context.Context) (value []byte, err error)
-
- Skip(ctx context.Context, fieldType TType) (err error)
- Flush(ctx context.Context) (err error)
-
- Transport() TTransport
-}
-
-// The maximum recursive depth the skip() function will traverse
-const DEFAULT_RECURSION_DEPTH = 64
-
-// Skips over the next data element from the provided input TProtocol object.
-func SkipDefaultDepth(ctx context.Context, prot TProtocol, typeId TType) (err error) {
- return Skip(ctx, prot, typeId, DEFAULT_RECURSION_DEPTH)
-}
-
-// Skips over the next data element from the provided input TProtocol object.
-func Skip(ctx context.Context, self TProtocol, fieldType TType, maxDepth int) (err error) {
-
- if maxDepth <= 0 {
- return NewTProtocolExceptionWithType(DEPTH_LIMIT, errors.New("Depth limit exceeded"))
- }
-
- switch fieldType {
- case BOOL:
- _, err = self.ReadBool(ctx)
- return
- case BYTE:
- _, err = self.ReadByte(ctx)
- return
- case I16:
- _, err = self.ReadI16(ctx)
- return
- case I32:
- _, err = self.ReadI32(ctx)
- return
- case I64:
- _, err = self.ReadI64(ctx)
- return
- case DOUBLE:
- _, err = self.ReadDouble(ctx)
- return
- case STRING:
- _, err = self.ReadString(ctx)
- return
- case STRUCT:
- if _, err = self.ReadStructBegin(ctx); err != nil {
- return err
- }
- for {
- _, typeId, _, _ := self.ReadFieldBegin(ctx)
- if typeId == STOP {
- break
- }
- err := Skip(ctx, self, typeId, maxDepth-1)
- if err != nil {
- return err
- }
- self.ReadFieldEnd(ctx)
- }
- return self.ReadStructEnd(ctx)
- case MAP:
- keyType, valueType, size, err := self.ReadMapBegin(ctx)
- if err != nil {
- return err
- }
- for i := 0; i < size; i++ {
- err := Skip(ctx, self, keyType, maxDepth-1)
- if err != nil {
- return err
- }
- self.Skip(ctx, valueType)
- }
- return self.ReadMapEnd(ctx)
- case SET:
- elemType, size, err := self.ReadSetBegin(ctx)
- if err != nil {
- return err
- }
- for i := 0; i < size; i++ {
- err := Skip(ctx, self, elemType, maxDepth-1)
- if err != nil {
- return err
- }
- }
- return self.ReadSetEnd(ctx)
- case LIST:
- elemType, size, err := self.ReadListBegin(ctx)
- if err != nil {
- return err
- }
- for i := 0; i < size; i++ {
- err := Skip(ctx, self, elemType, maxDepth-1)
- if err != nil {
- return err
- }
- }
- return self.ReadListEnd(ctx)
- default:
- return NewTProtocolExceptionWithType(INVALID_DATA, errors.New(fmt.Sprintf("Unknown data type %d", fieldType)))
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_exception.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_exception.go
deleted file mode 100644
index 9dcf4bfd9..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_exception.go
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "encoding/base64"
- "errors"
-)
-
-// Thrift Protocol exception
-type TProtocolException interface {
- TException
- TypeId() int
-}
-
-const (
- UNKNOWN_PROTOCOL_EXCEPTION = 0
- INVALID_DATA = 1
- NEGATIVE_SIZE = 2
- SIZE_LIMIT = 3
- BAD_VERSION = 4
- NOT_IMPLEMENTED = 5
- DEPTH_LIMIT = 6
-)
-
-type tProtocolException struct {
- typeId int
- err error
- msg string
-}
-
-var _ TProtocolException = (*tProtocolException)(nil)
-
-func (tProtocolException) TExceptionType() TExceptionType {
- return TExceptionTypeProtocol
-}
-
-func (p *tProtocolException) TypeId() int {
- return p.typeId
-}
-
-func (p *tProtocolException) String() string {
- return p.msg
-}
-
-func (p *tProtocolException) Error() string {
- return p.msg
-}
-
-func (p *tProtocolException) Unwrap() error {
- return p.err
-}
-
-func NewTProtocolException(err error) TProtocolException {
- if err == nil {
- return nil
- }
-
- if e, ok := err.(TProtocolException); ok {
- return e
- }
-
- if errors.As(err, new(base64.CorruptInputError)) {
- return NewTProtocolExceptionWithType(INVALID_DATA, err)
- }
-
- return NewTProtocolExceptionWithType(UNKNOWN_PROTOCOL_EXCEPTION, err)
-}
-
-func NewTProtocolExceptionWithType(errType int, err error) TProtocolException {
- if err == nil {
- return nil
- }
- return &tProtocolException{
- typeId: errType,
- err: err,
- msg: err.Error(),
- }
-}
-
-func prependTProtocolException(prepend string, err TProtocolException) TProtocolException {
- return &tProtocolException{
- typeId: err.TypeId(),
- err: err,
- msg: prepend + err.Error(),
- }
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_factory.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_factory.go
deleted file mode 100644
index c40f796d8..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_factory.go
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-// Factory interface for constructing protocol instances.
-type TProtocolFactory interface {
- GetProtocol(trans TTransport) TProtocol
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/response_helper.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/response_helper.go
deleted file mode 100644
index d884c6ac6..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/response_helper.go
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "context"
-)
-
-// See https://godoc.org/context#WithValue on why do we need the unexported typedefs.
-type responseHelperKey struct{}
-
-// TResponseHelper defines a object with a set of helper functions that can be
-// retrieved from the context object passed into server handler functions.
-//
-// Use GetResponseHelper to retrieve the injected TResponseHelper implementation
-// from the context object.
-//
-// The zero value of TResponseHelper is valid with all helper functions being
-// no-op.
-type TResponseHelper struct {
- // THeader related functions
- *THeaderResponseHelper
-}
-
-// THeaderResponseHelper defines THeader related TResponseHelper functions.
-//
-// The zero value of *THeaderResponseHelper is valid with all helper functions
-// being no-op.
-type THeaderResponseHelper struct {
- proto *THeaderProtocol
-}
-
-// NewTHeaderResponseHelper creates a new THeaderResponseHelper from the
-// underlying TProtocol.
-func NewTHeaderResponseHelper(proto TProtocol) *THeaderResponseHelper {
- if hp, ok := proto.(*THeaderProtocol); ok {
- return &THeaderResponseHelper{
- proto: hp,
- }
- }
- return nil
-}
-
-// SetHeader sets a response header.
-//
-// It's no-op if the underlying protocol/transport does not support THeader.
-func (h *THeaderResponseHelper) SetHeader(key, value string) {
- if h != nil && h.proto != nil {
- h.proto.SetWriteHeader(key, value)
- }
-}
-
-// ClearHeaders clears all the response headers previously set.
-//
-// It's no-op if the underlying protocol/transport does not support THeader.
-func (h *THeaderResponseHelper) ClearHeaders() {
- if h != nil && h.proto != nil {
- h.proto.ClearWriteHeaders()
- }
-}
-
-// GetResponseHelper retrieves the TResponseHelper implementation injected into
-// the context object.
-//
-// If no helper was found in the context object, a nop helper with ok == false
-// will be returned.
-func GetResponseHelper(ctx context.Context) (helper TResponseHelper, ok bool) {
- if v := ctx.Value(responseHelperKey{}); v != nil {
- helper, ok = v.(TResponseHelper)
- }
- return
-}
-
-// SetResponseHelper injects TResponseHelper into the context object.
-func SetResponseHelper(ctx context.Context, helper TResponseHelper) context.Context {
- return context.WithValue(ctx, responseHelperKey{}, helper)
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/rich_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/rich_transport.go
deleted file mode 100644
index 83fdf29f5..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/rich_transport.go
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "errors"
- "io"
-)
-
-type RichTransport struct {
- TTransport
-}
-
-// Wraps Transport to provide TRichTransport interface
-func NewTRichTransport(trans TTransport) *RichTransport {
- return &RichTransport{trans}
-}
-
-func (r *RichTransport) ReadByte() (c byte, err error) {
- return readByte(r.TTransport)
-}
-
-func (r *RichTransport) WriteByte(c byte) error {
- return writeByte(r.TTransport, c)
-}
-
-func (r *RichTransport) WriteString(s string) (n int, err error) {
- return r.Write([]byte(s))
-}
-
-func (r *RichTransport) RemainingBytes() (num_bytes uint64) {
- return r.TTransport.RemainingBytes()
-}
-
-func readByte(r io.Reader) (c byte, err error) {
- v := [1]byte{0}
- n, err := r.Read(v[0:1])
- if n > 0 && (err == nil || errors.Is(err, io.EOF)) {
- return v[0], nil
- }
- if n > 0 && err != nil {
- return v[0], err
- }
- if err != nil {
- return 0, err
- }
- return v[0], nil
-}
-
-func writeByte(w io.Writer, c byte) error {
- v := [1]byte{c}
- _, err := w.Write(v[0:1])
- return err
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/serializer.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/serializer.go
deleted file mode 100644
index c44979094..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/serializer.go
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "context"
- "sync"
-)
-
-type TSerializer struct {
- Transport *TMemoryBuffer
- Protocol TProtocol
-}
-
-type TStruct interface {
- Write(ctx context.Context, p TProtocol) error
- Read(ctx context.Context, p TProtocol) error
-}
-
-func NewTSerializer() *TSerializer {
- transport := NewTMemoryBufferLen(1024)
- protocol := NewTBinaryProtocolTransport(transport)
-
- return &TSerializer{
- Transport: transport,
- Protocol: protocol,
- }
-}
-
-func (t *TSerializer) WriteString(ctx context.Context, msg TStruct) (s string, err error) {
- t.Transport.Reset()
-
- if err = msg.Write(ctx, t.Protocol); err != nil {
- return
- }
-
- if err = t.Protocol.Flush(ctx); err != nil {
- return
- }
- if err = t.Transport.Flush(ctx); err != nil {
- return
- }
-
- return t.Transport.String(), nil
-}
-
-func (t *TSerializer) Write(ctx context.Context, msg TStruct) (b []byte, err error) {
- t.Transport.Reset()
-
- if err = msg.Write(ctx, t.Protocol); err != nil {
- return
- }
-
- if err = t.Protocol.Flush(ctx); err != nil {
- return
- }
-
- if err = t.Transport.Flush(ctx); err != nil {
- return
- }
-
- b = append(b, t.Transport.Bytes()...)
- return
-}
-
-// TSerializerPool is the thread-safe version of TSerializer, it uses resource
-// pool of TSerializer under the hood.
-//
-// It must be initialized with either NewTSerializerPool or
-// NewTSerializerPoolSizeFactory.
-type TSerializerPool struct {
- pool sync.Pool
-}
-
-// NewTSerializerPool creates a new TSerializerPool.
-//
-// NewTSerializer can be used as the arg here.
-func NewTSerializerPool(f func() *TSerializer) *TSerializerPool {
- return &TSerializerPool{
- pool: sync.Pool{
- New: func() interface{} {
- return f()
- },
- },
- }
-}
-
-// NewTSerializerPoolSizeFactory creates a new TSerializerPool with the given
-// size and protocol factory.
-//
-// Note that the size is not the limit. The TMemoryBuffer underneath can grow
-// larger than that. It just dictates the initial size.
-func NewTSerializerPoolSizeFactory(size int, factory TProtocolFactory) *TSerializerPool {
- return &TSerializerPool{
- pool: sync.Pool{
- New: func() interface{} {
- transport := NewTMemoryBufferLen(size)
- protocol := factory.GetProtocol(transport)
-
- return &TSerializer{
- Transport: transport,
- Protocol: protocol,
- }
- },
- },
- }
-}
-
-func (t *TSerializerPool) WriteString(ctx context.Context, msg TStruct) (string, error) {
- s := t.pool.Get().(*TSerializer)
- defer t.pool.Put(s)
- return s.WriteString(ctx, msg)
-}
-
-func (t *TSerializerPool) Write(ctx context.Context, msg TStruct) ([]byte, error) {
- s := t.pool.Get().(*TSerializer)
- defer t.pool.Put(s)
- return s.Write(ctx, msg)
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server.go
deleted file mode 100644
index f813fa353..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server.go
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-type TServer interface {
- ProcessorFactory() TProcessorFactory
- ServerTransport() TServerTransport
- InputTransportFactory() TTransportFactory
- OutputTransportFactory() TTransportFactory
- InputProtocolFactory() TProtocolFactory
- OutputProtocolFactory() TProtocolFactory
-
- // Starts the server
- Serve() error
- // Stops the server. This is optional on a per-implementation basis. Not
- // all servers are required to be cleanly stoppable.
- Stop() error
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_socket.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_socket.go
deleted file mode 100644
index 7dd24ae36..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_socket.go
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "net"
- "sync"
- "time"
-)
-
-type TServerSocket struct {
- listener net.Listener
- addr net.Addr
- clientTimeout time.Duration
-
- // Protects the interrupted value to make it thread safe.
- mu sync.RWMutex
- interrupted bool
-}
-
-func NewTServerSocket(listenAddr string) (*TServerSocket, error) {
- return NewTServerSocketTimeout(listenAddr, 0)
-}
-
-func NewTServerSocketTimeout(listenAddr string, clientTimeout time.Duration) (*TServerSocket, error) {
- addr, err := net.ResolveTCPAddr("tcp", listenAddr)
- if err != nil {
- return nil, err
- }
- return &TServerSocket{addr: addr, clientTimeout: clientTimeout}, nil
-}
-
-// Creates a TServerSocket from a net.Addr
-func NewTServerSocketFromAddrTimeout(addr net.Addr, clientTimeout time.Duration) *TServerSocket {
- return &TServerSocket{addr: addr, clientTimeout: clientTimeout}
-}
-
-func (p *TServerSocket) Listen() error {
- p.mu.Lock()
- defer p.mu.Unlock()
- if p.IsListening() {
- return nil
- }
- l, err := net.Listen(p.addr.Network(), p.addr.String())
- if err != nil {
- return err
- }
- p.listener = l
- return nil
-}
-
-func (p *TServerSocket) Accept() (TTransport, error) {
- p.mu.RLock()
- interrupted := p.interrupted
- p.mu.RUnlock()
-
- if interrupted {
- return nil, errTransportInterrupted
- }
-
- p.mu.Lock()
- listener := p.listener
- p.mu.Unlock()
- if listener == nil {
- return nil, NewTTransportException(NOT_OPEN, "No underlying server socket")
- }
-
- conn, err := listener.Accept()
- if err != nil {
- return nil, NewTTransportExceptionFromError(err)
- }
- return NewTSocketFromConnTimeout(conn, p.clientTimeout), nil
-}
-
-// Checks whether the socket is listening.
-func (p *TServerSocket) IsListening() bool {
- return p.listener != nil
-}
-
-// Connects the socket, creating a new socket object if necessary.
-func (p *TServerSocket) Open() error {
- p.mu.Lock()
- defer p.mu.Unlock()
- if p.IsListening() {
- return NewTTransportException(ALREADY_OPEN, "Server socket already open")
- }
- if l, err := net.Listen(p.addr.Network(), p.addr.String()); err != nil {
- return err
- } else {
- p.listener = l
- }
- return nil
-}
-
-func (p *TServerSocket) Addr() net.Addr {
- if p.listener != nil {
- return p.listener.Addr()
- }
- return p.addr
-}
-
-func (p *TServerSocket) Close() error {
- var err error
- p.mu.Lock()
- if p.IsListening() {
- err = p.listener.Close()
- p.listener = nil
- }
- p.mu.Unlock()
- return err
-}
-
-func (p *TServerSocket) Interrupt() error {
- p.mu.Lock()
- p.interrupted = true
- p.mu.Unlock()
- p.Close()
-
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_transport.go
deleted file mode 100644
index 51c40b64a..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_transport.go
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-// Server transport. Object which provides client transports.
-type TServerTransport interface {
- Listen() error
- Accept() (TTransport, error)
- Close() error
-
- // Optional method implementation. This signals to the server transport
- // that it should break out of any accept() or listen() that it is currently
- // blocked on. This method, if implemented, MUST be thread safe, as it may
- // be called from a different thread context than the other TServerTransport
- // methods.
- Interrupt() error
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_json_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_json_protocol.go
deleted file mode 100644
index d1a815453..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_json_protocol.go
+++ /dev/null
@@ -1,1373 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "bufio"
- "bytes"
- "context"
- "encoding/base64"
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "math"
- "strconv"
-)
-
-type _ParseContext int
-
-const (
- _CONTEXT_INVALID _ParseContext = iota
- _CONTEXT_IN_TOPLEVEL // 1
- _CONTEXT_IN_LIST_FIRST // 2
- _CONTEXT_IN_LIST // 3
- _CONTEXT_IN_OBJECT_FIRST // 4
- _CONTEXT_IN_OBJECT_NEXT_KEY // 5
- _CONTEXT_IN_OBJECT_NEXT_VALUE // 6
-)
-
-func (p _ParseContext) String() string {
- switch p {
- case _CONTEXT_IN_TOPLEVEL:
- return "TOPLEVEL"
- case _CONTEXT_IN_LIST_FIRST:
- return "LIST-FIRST"
- case _CONTEXT_IN_LIST:
- return "LIST"
- case _CONTEXT_IN_OBJECT_FIRST:
- return "OBJECT-FIRST"
- case _CONTEXT_IN_OBJECT_NEXT_KEY:
- return "OBJECT-NEXT-KEY"
- case _CONTEXT_IN_OBJECT_NEXT_VALUE:
- return "OBJECT-NEXT-VALUE"
- }
- return "UNKNOWN-PARSE-CONTEXT"
-}
-
-type jsonContextStack []_ParseContext
-
-func (s *jsonContextStack) push(v _ParseContext) {
- *s = append(*s, v)
-}
-
-func (s jsonContextStack) peek() (v _ParseContext, ok bool) {
- l := len(s)
- if l <= 0 {
- return
- }
- return s[l-1], true
-}
-
-func (s *jsonContextStack) pop() (v _ParseContext, ok bool) {
- l := len(*s)
- if l <= 0 {
- return
- }
- v = (*s)[l-1]
- *s = (*s)[0 : l-1]
- return v, true
-}
-
-var errEmptyJSONContextStack = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Unexpected empty json protocol context stack"))
-
-// Simple JSON protocol implementation for thrift.
-//
-// This protocol produces/consumes a simple output format
-// suitable for parsing by scripting languages. It should not be
-// confused with the full-featured TJSONProtocol.
-//
-type TSimpleJSONProtocol struct {
- trans TTransport
-
- parseContextStack jsonContextStack
- dumpContext jsonContextStack
-
- writer *bufio.Writer
- reader *bufio.Reader
-}
-
-// Constructor
-func NewTSimpleJSONProtocol(t TTransport) *TSimpleJSONProtocol {
- v := &TSimpleJSONProtocol{trans: t,
- writer: bufio.NewWriter(t),
- reader: bufio.NewReader(t),
- }
- v.parseContextStack.push(_CONTEXT_IN_TOPLEVEL)
- v.dumpContext.push(_CONTEXT_IN_TOPLEVEL)
- return v
-}
-
-// Factory
-type TSimpleJSONProtocolFactory struct{}
-
-func (p *TSimpleJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol {
- return NewTSimpleJSONProtocol(trans)
-}
-
-func NewTSimpleJSONProtocolFactory() *TSimpleJSONProtocolFactory {
- return &TSimpleJSONProtocolFactory{}
-}
-
-var (
- JSON_COMMA []byte
- JSON_COLON []byte
- JSON_LBRACE []byte
- JSON_RBRACE []byte
- JSON_LBRACKET []byte
- JSON_RBRACKET []byte
- JSON_QUOTE byte
- JSON_QUOTE_BYTES []byte
- JSON_NULL []byte
- JSON_TRUE []byte
- JSON_FALSE []byte
- JSON_INFINITY string
- JSON_NEGATIVE_INFINITY string
- JSON_NAN string
- JSON_INFINITY_BYTES []byte
- JSON_NEGATIVE_INFINITY_BYTES []byte
- JSON_NAN_BYTES []byte
- json_nonbase_map_elem_bytes []byte
-)
-
-func init() {
- JSON_COMMA = []byte{','}
- JSON_COLON = []byte{':'}
- JSON_LBRACE = []byte{'{'}
- JSON_RBRACE = []byte{'}'}
- JSON_LBRACKET = []byte{'['}
- JSON_RBRACKET = []byte{']'}
- JSON_QUOTE = '"'
- JSON_QUOTE_BYTES = []byte{'"'}
- JSON_NULL = []byte{'n', 'u', 'l', 'l'}
- JSON_TRUE = []byte{'t', 'r', 'u', 'e'}
- JSON_FALSE = []byte{'f', 'a', 'l', 's', 'e'}
- JSON_INFINITY = "Infinity"
- JSON_NEGATIVE_INFINITY = "-Infinity"
- JSON_NAN = "NaN"
- JSON_INFINITY_BYTES = []byte{'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'}
- JSON_NEGATIVE_INFINITY_BYTES = []byte{'-', 'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'}
- JSON_NAN_BYTES = []byte{'N', 'a', 'N'}
- json_nonbase_map_elem_bytes = []byte{']', ',', '['}
-}
-
-func jsonQuote(s string) string {
- b, _ := json.Marshal(s)
- s1 := string(b)
- return s1
-}
-
-func jsonUnquote(s string) (string, bool) {
- s1 := new(string)
- err := json.Unmarshal([]byte(s), s1)
- return *s1, err == nil
-}
-
-func mismatch(expected, actual string) error {
- return fmt.Errorf("Expected '%s' but found '%s' while parsing JSON.", expected, actual)
-}
-
-func (p *TSimpleJSONProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error {
- p.resetContextStack() // THRIFT-3735
- if e := p.OutputListBegin(); e != nil {
- return e
- }
- if e := p.WriteString(ctx, name); e != nil {
- return e
- }
- if e := p.WriteByte(ctx, int8(typeId)); e != nil {
- return e
- }
- if e := p.WriteI32(ctx, seqId); e != nil {
- return e
- }
- return nil
-}
-
-func (p *TSimpleJSONProtocol) WriteMessageEnd(ctx context.Context) error {
- return p.OutputListEnd()
-}
-
-func (p *TSimpleJSONProtocol) WriteStructBegin(ctx context.Context, name string) error {
- if e := p.OutputObjectBegin(); e != nil {
- return e
- }
- return nil
-}
-
-func (p *TSimpleJSONProtocol) WriteStructEnd(ctx context.Context) error {
- return p.OutputObjectEnd()
-}
-
-func (p *TSimpleJSONProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
- if e := p.WriteString(ctx, name); e != nil {
- return e
- }
- return nil
-}
-
-func (p *TSimpleJSONProtocol) WriteFieldEnd(ctx context.Context) error {
- return nil
-}
-
-func (p *TSimpleJSONProtocol) WriteFieldStop(ctx context.Context) error { return nil }
-
-func (p *TSimpleJSONProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
- if e := p.OutputListBegin(); e != nil {
- return e
- }
- if e := p.WriteByte(ctx, int8(keyType)); e != nil {
- return e
- }
- if e := p.WriteByte(ctx, int8(valueType)); e != nil {
- return e
- }
- return p.WriteI32(ctx, int32(size))
-}
-
-func (p *TSimpleJSONProtocol) WriteMapEnd(ctx context.Context) error {
- return p.OutputListEnd()
-}
-
-func (p *TSimpleJSONProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
- return p.OutputElemListBegin(elemType, size)
-}
-
-func (p *TSimpleJSONProtocol) WriteListEnd(ctx context.Context) error {
- return p.OutputListEnd()
-}
-
-func (p *TSimpleJSONProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
- return p.OutputElemListBegin(elemType, size)
-}
-
-func (p *TSimpleJSONProtocol) WriteSetEnd(ctx context.Context) error {
- return p.OutputListEnd()
-}
-
-func (p *TSimpleJSONProtocol) WriteBool(ctx context.Context, b bool) error {
- return p.OutputBool(b)
-}
-
-func (p *TSimpleJSONProtocol) WriteByte(ctx context.Context, b int8) error {
- return p.WriteI32(ctx, int32(b))
-}
-
-func (p *TSimpleJSONProtocol) WriteI16(ctx context.Context, v int16) error {
- return p.WriteI32(ctx, int32(v))
-}
-
-func (p *TSimpleJSONProtocol) WriteI32(ctx context.Context, v int32) error {
- return p.OutputI64(int64(v))
-}
-
-func (p *TSimpleJSONProtocol) WriteI64(ctx context.Context, v int64) error {
- return p.OutputI64(int64(v))
-}
-
-func (p *TSimpleJSONProtocol) WriteDouble(ctx context.Context, v float64) error {
- return p.OutputF64(v)
-}
-
-func (p *TSimpleJSONProtocol) WriteString(ctx context.Context, v string) error {
- return p.OutputString(v)
-}
-
-func (p *TSimpleJSONProtocol) WriteBinary(ctx context.Context, v []byte) error {
- // JSON library only takes in a string,
- // not an arbitrary byte array, to ensure bytes are transmitted
- // efficiently we must convert this into a valid JSON string
- // therefore we use base64 encoding to avoid excessive escaping/quoting
- if e := p.OutputPreValue(); e != nil {
- return e
- }
- if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
- return NewTProtocolException(e)
- }
- writer := base64.NewEncoder(base64.StdEncoding, p.writer)
- if _, e := writer.Write(v); e != nil {
- p.writer.Reset(p.trans) // THRIFT-3735
- return NewTProtocolException(e)
- }
- if e := writer.Close(); e != nil {
- return NewTProtocolException(e)
- }
- if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
- return NewTProtocolException(e)
- }
- return p.OutputPostValue()
-}
-
-// Reading methods.
-func (p *TSimpleJSONProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) {
- p.resetContextStack() // THRIFT-3735
- if isNull, err := p.ParseListBegin(); isNull || err != nil {
- return name, typeId, seqId, err
- }
- if name, err = p.ReadString(ctx); err != nil {
- return name, typeId, seqId, err
- }
- bTypeId, err := p.ReadByte(ctx)
- typeId = TMessageType(bTypeId)
- if err != nil {
- return name, typeId, seqId, err
- }
- if seqId, err = p.ReadI32(ctx); err != nil {
- return name, typeId, seqId, err
- }
- return name, typeId, seqId, nil
-}
-
-func (p *TSimpleJSONProtocol) ReadMessageEnd(ctx context.Context) error {
- return p.ParseListEnd()
-}
-
-func (p *TSimpleJSONProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
- _, err = p.ParseObjectStart()
- return "", err
-}
-
-func (p *TSimpleJSONProtocol) ReadStructEnd(ctx context.Context) error {
- return p.ParseObjectEnd()
-}
-
-func (p *TSimpleJSONProtocol) ReadFieldBegin(ctx context.Context) (string, TType, int16, error) {
- if err := p.ParsePreValue(); err != nil {
- return "", STOP, 0, err
- }
- b, _ := p.reader.Peek(1)
- if len(b) > 0 {
- switch b[0] {
- case JSON_RBRACE[0]:
- return "", STOP, 0, nil
- case JSON_QUOTE:
- p.reader.ReadByte()
- name, err := p.ParseStringBody()
- // simplejson is not meant to be read back into thrift
- // - see http://wiki.apache.org/thrift/ThriftUsageJava
- // - use JSON instead
- if err != nil {
- return name, STOP, 0, err
- }
- return name, STOP, -1, p.ParsePostValue()
- }
- e := fmt.Errorf("Expected \"}\" or '\"', but found: '%s'", string(b))
- return "", STOP, 0, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- return "", STOP, 0, NewTProtocolException(io.EOF)
-}
-
-func (p *TSimpleJSONProtocol) ReadFieldEnd(ctx context.Context) error {
- return nil
-}
-
-func (p *TSimpleJSONProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, e error) {
- if isNull, e := p.ParseListBegin(); isNull || e != nil {
- return VOID, VOID, 0, e
- }
-
- // read keyType
- bKeyType, e := p.ReadByte(ctx)
- keyType = TType(bKeyType)
- if e != nil {
- return keyType, valueType, size, e
- }
-
- // read valueType
- bValueType, e := p.ReadByte(ctx)
- valueType = TType(bValueType)
- if e != nil {
- return keyType, valueType, size, e
- }
-
- // read size
- iSize, err := p.ReadI64(ctx)
- size = int(iSize)
- return keyType, valueType, size, err
-}
-
-func (p *TSimpleJSONProtocol) ReadMapEnd(ctx context.Context) error {
- return p.ParseListEnd()
-}
-
-func (p *TSimpleJSONProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, e error) {
- return p.ParseElemListBegin()
-}
-
-func (p *TSimpleJSONProtocol) ReadListEnd(ctx context.Context) error {
- return p.ParseListEnd()
-}
-
-func (p *TSimpleJSONProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, e error) {
- return p.ParseElemListBegin()
-}
-
-func (p *TSimpleJSONProtocol) ReadSetEnd(ctx context.Context) error {
- return p.ParseListEnd()
-}
-
-func (p *TSimpleJSONProtocol) ReadBool(ctx context.Context) (bool, error) {
- var value bool
-
- if err := p.ParsePreValue(); err != nil {
- return value, err
- }
- f, _ := p.reader.Peek(1)
- if len(f) > 0 {
- switch f[0] {
- case JSON_TRUE[0]:
- b := make([]byte, len(JSON_TRUE))
- _, err := p.reader.Read(b)
- if err != nil {
- return false, NewTProtocolException(err)
- }
- if string(b) == string(JSON_TRUE) {
- value = true
- } else {
- e := fmt.Errorf("Expected \"true\" but found: %s", string(b))
- return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- break
- case JSON_FALSE[0]:
- b := make([]byte, len(JSON_FALSE))
- _, err := p.reader.Read(b)
- if err != nil {
- return false, NewTProtocolException(err)
- }
- if string(b) == string(JSON_FALSE) {
- value = false
- } else {
- e := fmt.Errorf("Expected \"false\" but found: %s", string(b))
- return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- break
- case JSON_NULL[0]:
- b := make([]byte, len(JSON_NULL))
- _, err := p.reader.Read(b)
- if err != nil {
- return false, NewTProtocolException(err)
- }
- if string(b) == string(JSON_NULL) {
- value = false
- } else {
- e := fmt.Errorf("Expected \"null\" but found: %s", string(b))
- return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- default:
- e := fmt.Errorf("Expected \"true\", \"false\", or \"null\" but found: %s", string(f))
- return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- }
- return value, p.ParsePostValue()
-}
-
-func (p *TSimpleJSONProtocol) ReadByte(ctx context.Context) (int8, error) {
- v, err := p.ReadI64(ctx)
- return int8(v), err
-}
-
-func (p *TSimpleJSONProtocol) ReadI16(ctx context.Context) (int16, error) {
- v, err := p.ReadI64(ctx)
- return int16(v), err
-}
-
-func (p *TSimpleJSONProtocol) ReadI32(ctx context.Context) (int32, error) {
- v, err := p.ReadI64(ctx)
- return int32(v), err
-}
-
-func (p *TSimpleJSONProtocol) ReadI64(ctx context.Context) (int64, error) {
- v, _, err := p.ParseI64()
- return v, err
-}
-
-func (p *TSimpleJSONProtocol) ReadDouble(ctx context.Context) (float64, error) {
- v, _, err := p.ParseF64()
- return v, err
-}
-
-func (p *TSimpleJSONProtocol) ReadString(ctx context.Context) (string, error) {
- var v string
- if err := p.ParsePreValue(); err != nil {
- return v, err
- }
- f, _ := p.reader.Peek(1)
- if len(f) > 0 && f[0] == JSON_QUOTE {
- p.reader.ReadByte()
- value, err := p.ParseStringBody()
- v = value
- if err != nil {
- return v, err
- }
- } else if len(f) > 0 && f[0] == JSON_NULL[0] {
- b := make([]byte, len(JSON_NULL))
- _, err := p.reader.Read(b)
- if err != nil {
- return v, NewTProtocolException(err)
- }
- if string(b) != string(JSON_NULL) {
- e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
- return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- } else {
- e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
- return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- return v, p.ParsePostValue()
-}
-
-func (p *TSimpleJSONProtocol) ReadBinary(ctx context.Context) ([]byte, error) {
- var v []byte
- if err := p.ParsePreValue(); err != nil {
- return nil, err
- }
- f, _ := p.reader.Peek(1)
- if len(f) > 0 && f[0] == JSON_QUOTE {
- p.reader.ReadByte()
- value, err := p.ParseBase64EncodedBody()
- v = value
- if err != nil {
- return v, err
- }
- } else if len(f) > 0 && f[0] == JSON_NULL[0] {
- b := make([]byte, len(JSON_NULL))
- _, err := p.reader.Read(b)
- if err != nil {
- return v, NewTProtocolException(err)
- }
- if string(b) != string(JSON_NULL) {
- e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
- return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- } else {
- e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
- return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
-
- return v, p.ParsePostValue()
-}
-
-func (p *TSimpleJSONProtocol) Flush(ctx context.Context) (err error) {
- return NewTProtocolException(p.writer.Flush())
-}
-
-func (p *TSimpleJSONProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
- return SkipDefaultDepth(ctx, p, fieldType)
-}
-
-func (p *TSimpleJSONProtocol) Transport() TTransport {
- return p.trans
-}
-
-func (p *TSimpleJSONProtocol) OutputPreValue() error {
- cxt, ok := p.dumpContext.peek()
- if !ok {
- return errEmptyJSONContextStack
- }
- switch cxt {
- case _CONTEXT_IN_LIST, _CONTEXT_IN_OBJECT_NEXT_KEY:
- if _, e := p.write(JSON_COMMA); e != nil {
- return NewTProtocolException(e)
- }
- case _CONTEXT_IN_OBJECT_NEXT_VALUE:
- if _, e := p.write(JSON_COLON); e != nil {
- return NewTProtocolException(e)
- }
- }
- return nil
-}
-
-func (p *TSimpleJSONProtocol) OutputPostValue() error {
- cxt, ok := p.dumpContext.peek()
- if !ok {
- return errEmptyJSONContextStack
- }
- switch cxt {
- case _CONTEXT_IN_LIST_FIRST:
- p.dumpContext.pop()
- p.dumpContext.push(_CONTEXT_IN_LIST)
- case _CONTEXT_IN_OBJECT_FIRST:
- p.dumpContext.pop()
- p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_VALUE)
- case _CONTEXT_IN_OBJECT_NEXT_KEY:
- p.dumpContext.pop()
- p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_VALUE)
- case _CONTEXT_IN_OBJECT_NEXT_VALUE:
- p.dumpContext.pop()
- p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_KEY)
- }
- return nil
-}
-
-func (p *TSimpleJSONProtocol) OutputBool(value bool) error {
- if e := p.OutputPreValue(); e != nil {
- return e
- }
- var v string
- if value {
- v = string(JSON_TRUE)
- } else {
- v = string(JSON_FALSE)
- }
- cxt, ok := p.dumpContext.peek()
- if !ok {
- return errEmptyJSONContextStack
- }
- switch cxt {
- case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
- v = jsonQuote(v)
- }
- if e := p.OutputStringData(v); e != nil {
- return e
- }
- return p.OutputPostValue()
-}
-
-func (p *TSimpleJSONProtocol) OutputNull() error {
- if e := p.OutputPreValue(); e != nil {
- return e
- }
- if _, e := p.write(JSON_NULL); e != nil {
- return NewTProtocolException(e)
- }
- return p.OutputPostValue()
-}
-
-func (p *TSimpleJSONProtocol) OutputF64(value float64) error {
- if e := p.OutputPreValue(); e != nil {
- return e
- }
- var v string
- if math.IsNaN(value) {
- v = string(JSON_QUOTE) + JSON_NAN + string(JSON_QUOTE)
- } else if math.IsInf(value, 1) {
- v = string(JSON_QUOTE) + JSON_INFINITY + string(JSON_QUOTE)
- } else if math.IsInf(value, -1) {
- v = string(JSON_QUOTE) + JSON_NEGATIVE_INFINITY + string(JSON_QUOTE)
- } else {
- cxt, ok := p.dumpContext.peek()
- if !ok {
- return errEmptyJSONContextStack
- }
- v = strconv.FormatFloat(value, 'g', -1, 64)
- switch cxt {
- case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
- v = string(JSON_QUOTE) + v + string(JSON_QUOTE)
- }
- }
- if e := p.OutputStringData(v); e != nil {
- return e
- }
- return p.OutputPostValue()
-}
-
-func (p *TSimpleJSONProtocol) OutputI64(value int64) error {
- if e := p.OutputPreValue(); e != nil {
- return e
- }
- cxt, ok := p.dumpContext.peek()
- if !ok {
- return errEmptyJSONContextStack
- }
- v := strconv.FormatInt(value, 10)
- switch cxt {
- case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
- v = jsonQuote(v)
- }
- if e := p.OutputStringData(v); e != nil {
- return e
- }
- return p.OutputPostValue()
-}
-
-func (p *TSimpleJSONProtocol) OutputString(s string) error {
- if e := p.OutputPreValue(); e != nil {
- return e
- }
- if e := p.OutputStringData(jsonQuote(s)); e != nil {
- return e
- }
- return p.OutputPostValue()
-}
-
-func (p *TSimpleJSONProtocol) OutputStringData(s string) error {
- _, e := p.write([]byte(s))
- return NewTProtocolException(e)
-}
-
-func (p *TSimpleJSONProtocol) OutputObjectBegin() error {
- if e := p.OutputPreValue(); e != nil {
- return e
- }
- if _, e := p.write(JSON_LBRACE); e != nil {
- return NewTProtocolException(e)
- }
- p.dumpContext.push(_CONTEXT_IN_OBJECT_FIRST)
- return nil
-}
-
-func (p *TSimpleJSONProtocol) OutputObjectEnd() error {
- if _, e := p.write(JSON_RBRACE); e != nil {
- return NewTProtocolException(e)
- }
- _, ok := p.dumpContext.pop()
- if !ok {
- return errEmptyJSONContextStack
- }
- if e := p.OutputPostValue(); e != nil {
- return e
- }
- return nil
-}
-
-func (p *TSimpleJSONProtocol) OutputListBegin() error {
- if e := p.OutputPreValue(); e != nil {
- return e
- }
- if _, e := p.write(JSON_LBRACKET); e != nil {
- return NewTProtocolException(e)
- }
- p.dumpContext.push(_CONTEXT_IN_LIST_FIRST)
- return nil
-}
-
-func (p *TSimpleJSONProtocol) OutputListEnd() error {
- if _, e := p.write(JSON_RBRACKET); e != nil {
- return NewTProtocolException(e)
- }
- _, ok := p.dumpContext.pop()
- if !ok {
- return errEmptyJSONContextStack
- }
- if e := p.OutputPostValue(); e != nil {
- return e
- }
- return nil
-}
-
-func (p *TSimpleJSONProtocol) OutputElemListBegin(elemType TType, size int) error {
- if e := p.OutputListBegin(); e != nil {
- return e
- }
- if e := p.OutputI64(int64(elemType)); e != nil {
- return e
- }
- if e := p.OutputI64(int64(size)); e != nil {
- return e
- }
- return nil
-}
-
-func (p *TSimpleJSONProtocol) ParsePreValue() error {
- if e := p.readNonSignificantWhitespace(); e != nil {
- return NewTProtocolException(e)
- }
- cxt, ok := p.parseContextStack.peek()
- if !ok {
- return errEmptyJSONContextStack
- }
- b, _ := p.reader.Peek(1)
- switch cxt {
- case _CONTEXT_IN_LIST:
- if len(b) > 0 {
- switch b[0] {
- case JSON_RBRACKET[0]:
- return nil
- case JSON_COMMA[0]:
- p.reader.ReadByte()
- if e := p.readNonSignificantWhitespace(); e != nil {
- return NewTProtocolException(e)
- }
- return nil
- default:
- e := fmt.Errorf("Expected \"]\" or \",\" in list context, but found \"%s\"", string(b))
- return NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- }
- case _CONTEXT_IN_OBJECT_NEXT_KEY:
- if len(b) > 0 {
- switch b[0] {
- case JSON_RBRACE[0]:
- return nil
- case JSON_COMMA[0]:
- p.reader.ReadByte()
- if e := p.readNonSignificantWhitespace(); e != nil {
- return NewTProtocolException(e)
- }
- return nil
- default:
- e := fmt.Errorf("Expected \"}\" or \",\" in object context, but found \"%s\"", string(b))
- return NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- }
- case _CONTEXT_IN_OBJECT_NEXT_VALUE:
- if len(b) > 0 {
- switch b[0] {
- case JSON_COLON[0]:
- p.reader.ReadByte()
- if e := p.readNonSignificantWhitespace(); e != nil {
- return NewTProtocolException(e)
- }
- return nil
- default:
- e := fmt.Errorf("Expected \":\" in object context, but found \"%s\"", string(b))
- return NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- }
- }
- return nil
-}
-
-func (p *TSimpleJSONProtocol) ParsePostValue() error {
- if e := p.readNonSignificantWhitespace(); e != nil {
- return NewTProtocolException(e)
- }
- cxt, ok := p.parseContextStack.peek()
- if !ok {
- return errEmptyJSONContextStack
- }
- switch cxt {
- case _CONTEXT_IN_LIST_FIRST:
- p.parseContextStack.pop()
- p.parseContextStack.push(_CONTEXT_IN_LIST)
- case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
- p.parseContextStack.pop()
- p.parseContextStack.push(_CONTEXT_IN_OBJECT_NEXT_VALUE)
- case _CONTEXT_IN_OBJECT_NEXT_VALUE:
- p.parseContextStack.pop()
- p.parseContextStack.push(_CONTEXT_IN_OBJECT_NEXT_KEY)
- }
- return nil
-}
-
-func (p *TSimpleJSONProtocol) readNonSignificantWhitespace() error {
- for {
- b, _ := p.reader.Peek(1)
- if len(b) < 1 {
- return nil
- }
- switch b[0] {
- case ' ', '\r', '\n', '\t':
- p.reader.ReadByte()
- continue
- default:
- break
- }
- break
- }
- return nil
-}
-
-func (p *TSimpleJSONProtocol) ParseStringBody() (string, error) {
- line, err := p.reader.ReadString(JSON_QUOTE)
- if err != nil {
- return "", NewTProtocolException(err)
- }
- l := len(line)
- // count number of escapes to see if we need to keep going
- i := 1
- for ; i < l; i++ {
- if line[l-i-1] != '\\' {
- break
- }
- }
- if i&0x01 == 1 {
- v, ok := jsonUnquote(string(JSON_QUOTE) + line)
- if !ok {
- return "", NewTProtocolException(err)
- }
- return v, nil
- }
- s, err := p.ParseQuotedStringBody()
- if err != nil {
- return "", NewTProtocolException(err)
- }
- str := string(JSON_QUOTE) + line + s
- v, ok := jsonUnquote(str)
- if !ok {
- e := fmt.Errorf("Unable to parse as JSON string %s", str)
- return "", NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- return v, nil
-}
-
-func (p *TSimpleJSONProtocol) ParseQuotedStringBody() (string, error) {
- line, err := p.reader.ReadString(JSON_QUOTE)
- if err != nil {
- return "", NewTProtocolException(err)
- }
- l := len(line)
- // count number of escapes to see if we need to keep going
- i := 1
- for ; i < l; i++ {
- if line[l-i-1] != '\\' {
- break
- }
- }
- if i&0x01 == 1 {
- return line, nil
- }
- s, err := p.ParseQuotedStringBody()
- if err != nil {
- return "", NewTProtocolException(err)
- }
- v := line + s
- return v, nil
-}
-
-func (p *TSimpleJSONProtocol) ParseBase64EncodedBody() ([]byte, error) {
- line, err := p.reader.ReadBytes(JSON_QUOTE)
- if err != nil {
- return line, NewTProtocolException(err)
- }
- line2 := line[0 : len(line)-1]
- l := len(line2)
- if (l % 4) != 0 {
- pad := 4 - (l % 4)
- fill := [...]byte{'=', '=', '='}
- line2 = append(line2, fill[:pad]...)
- l = len(line2)
- }
- output := make([]byte, base64.StdEncoding.DecodedLen(l))
- n, err := base64.StdEncoding.Decode(output, line2)
- return output[0:n], NewTProtocolException(err)
-}
-
-func (p *TSimpleJSONProtocol) ParseI64() (int64, bool, error) {
- if err := p.ParsePreValue(); err != nil {
- return 0, false, err
- }
- var value int64
- var isnull bool
- if p.safePeekContains(JSON_NULL) {
- p.reader.Read(make([]byte, len(JSON_NULL)))
- isnull = true
- } else {
- num, err := p.readNumeric()
- isnull = (num == nil)
- if !isnull {
- value = num.Int64()
- }
- if err != nil {
- return value, isnull, err
- }
- }
- return value, isnull, p.ParsePostValue()
-}
-
-func (p *TSimpleJSONProtocol) ParseF64() (float64, bool, error) {
- if err := p.ParsePreValue(); err != nil {
- return 0, false, err
- }
- var value float64
- var isnull bool
- if p.safePeekContains(JSON_NULL) {
- p.reader.Read(make([]byte, len(JSON_NULL)))
- isnull = true
- } else {
- num, err := p.readNumeric()
- isnull = (num == nil)
- if !isnull {
- value = num.Float64()
- }
- if err != nil {
- return value, isnull, err
- }
- }
- return value, isnull, p.ParsePostValue()
-}
-
-func (p *TSimpleJSONProtocol) ParseObjectStart() (bool, error) {
- if err := p.ParsePreValue(); err != nil {
- return false, err
- }
- var b []byte
- b, err := p.reader.Peek(1)
- if err != nil {
- return false, err
- }
- if len(b) > 0 && b[0] == JSON_LBRACE[0] {
- p.reader.ReadByte()
- p.parseContextStack.push(_CONTEXT_IN_OBJECT_FIRST)
- return false, nil
- } else if p.safePeekContains(JSON_NULL) {
- return true, nil
- }
- e := fmt.Errorf("Expected '{' or null, but found '%s'", string(b))
- return false, NewTProtocolExceptionWithType(INVALID_DATA, e)
-}
-
-func (p *TSimpleJSONProtocol) ParseObjectEnd() error {
- if isNull, err := p.readIfNull(); isNull || err != nil {
- return err
- }
- cxt, _ := p.parseContextStack.peek()
- if (cxt != _CONTEXT_IN_OBJECT_FIRST) && (cxt != _CONTEXT_IN_OBJECT_NEXT_KEY) {
- e := fmt.Errorf("Expected to be in the Object Context, but not in Object Context (%d)", cxt)
- return NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- line, err := p.reader.ReadString(JSON_RBRACE[0])
- if err != nil {
- return NewTProtocolException(err)
- }
- for _, char := range line {
- switch char {
- default:
- e := fmt.Errorf("Expecting end of object \"}\", but found: \"%s\"", line)
- return NewTProtocolExceptionWithType(INVALID_DATA, e)
- case ' ', '\n', '\r', '\t', '}':
- break
- }
- }
- p.parseContextStack.pop()
- return p.ParsePostValue()
-}
-
-func (p *TSimpleJSONProtocol) ParseListBegin() (isNull bool, err error) {
- if e := p.ParsePreValue(); e != nil {
- return false, e
- }
- var b []byte
- b, err = p.reader.Peek(1)
- if err != nil {
- return false, err
- }
- if len(b) >= 1 && b[0] == JSON_LBRACKET[0] {
- p.parseContextStack.push(_CONTEXT_IN_LIST_FIRST)
- p.reader.ReadByte()
- isNull = false
- } else if p.safePeekContains(JSON_NULL) {
- isNull = true
- } else {
- err = fmt.Errorf("Expected \"null\" or \"[\", received %q", b)
- }
- return isNull, NewTProtocolExceptionWithType(INVALID_DATA, err)
-}
-
-func (p *TSimpleJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) {
- if isNull, e := p.ParseListBegin(); isNull || e != nil {
- return VOID, 0, e
- }
- bElemType, _, err := p.ParseI64()
- elemType = TType(bElemType)
- if err != nil {
- return elemType, size, err
- }
- nSize, _, err2 := p.ParseI64()
- size = int(nSize)
- return elemType, size, err2
-}
-
-func (p *TSimpleJSONProtocol) ParseListEnd() error {
- if isNull, err := p.readIfNull(); isNull || err != nil {
- return err
- }
- cxt, _ := p.parseContextStack.peek()
- if cxt != _CONTEXT_IN_LIST {
- e := fmt.Errorf("Expected to be in the List Context, but not in List Context (%d)", cxt)
- return NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- line, err := p.reader.ReadString(JSON_RBRACKET[0])
- if err != nil {
- return NewTProtocolException(err)
- }
- for _, char := range line {
- switch char {
- default:
- e := fmt.Errorf("Expecting end of list \"]\", but found: \"%v\"", line)
- return NewTProtocolExceptionWithType(INVALID_DATA, e)
- case ' ', '\n', '\r', '\t', rune(JSON_RBRACKET[0]):
- break
- }
- }
- p.parseContextStack.pop()
- if cxt, ok := p.parseContextStack.peek(); !ok {
- return errEmptyJSONContextStack
- } else if cxt == _CONTEXT_IN_TOPLEVEL {
- return nil
- }
- return p.ParsePostValue()
-}
-
-func (p *TSimpleJSONProtocol) readSingleValue() (interface{}, TType, error) {
- e := p.readNonSignificantWhitespace()
- if e != nil {
- return nil, VOID, NewTProtocolException(e)
- }
- b, e := p.reader.Peek(1)
- if len(b) > 0 {
- c := b[0]
- switch c {
- case JSON_NULL[0]:
- buf := make([]byte, len(JSON_NULL))
- _, e := p.reader.Read(buf)
- if e != nil {
- return nil, VOID, NewTProtocolException(e)
- }
- if string(JSON_NULL) != string(buf) {
- e = mismatch(string(JSON_NULL), string(buf))
- return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- return nil, VOID, nil
- case JSON_QUOTE:
- p.reader.ReadByte()
- v, e := p.ParseStringBody()
- if e != nil {
- return v, UTF8, NewTProtocolException(e)
- }
- if v == JSON_INFINITY {
- return INFINITY, DOUBLE, nil
- } else if v == JSON_NEGATIVE_INFINITY {
- return NEGATIVE_INFINITY, DOUBLE, nil
- } else if v == JSON_NAN {
- return NAN, DOUBLE, nil
- }
- return v, UTF8, nil
- case JSON_TRUE[0]:
- buf := make([]byte, len(JSON_TRUE))
- _, e := p.reader.Read(buf)
- if e != nil {
- return true, BOOL, NewTProtocolException(e)
- }
- if string(JSON_TRUE) != string(buf) {
- e := mismatch(string(JSON_TRUE), string(buf))
- return true, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- return true, BOOL, nil
- case JSON_FALSE[0]:
- buf := make([]byte, len(JSON_FALSE))
- _, e := p.reader.Read(buf)
- if e != nil {
- return false, BOOL, NewTProtocolException(e)
- }
- if string(JSON_FALSE) != string(buf) {
- e := mismatch(string(JSON_FALSE), string(buf))
- return false, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- return false, BOOL, nil
- case JSON_LBRACKET[0]:
- _, e := p.reader.ReadByte()
- return make([]interface{}, 0), LIST, NewTProtocolException(e)
- case JSON_LBRACE[0]:
- _, e := p.reader.ReadByte()
- return make(map[string]interface{}), STRUCT, NewTProtocolException(e)
- case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-', JSON_INFINITY[0], JSON_NAN[0]:
- // assume numeric
- v, e := p.readNumeric()
- return v, DOUBLE, e
- default:
- e := fmt.Errorf("Expected element in list but found '%s' while parsing JSON.", string(c))
- return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- }
- e = fmt.Errorf("Cannot read a single element while parsing JSON.")
- return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e)
-
-}
-
-func (p *TSimpleJSONProtocol) readIfNull() (bool, error) {
- cont := true
- for cont {
- b, _ := p.reader.Peek(1)
- if len(b) < 1 {
- return false, nil
- }
- switch b[0] {
- default:
- return false, nil
- case JSON_NULL[0]:
- cont = false
- break
- case ' ', '\n', '\r', '\t':
- p.reader.ReadByte()
- break
- }
- }
- if p.safePeekContains(JSON_NULL) {
- p.reader.Read(make([]byte, len(JSON_NULL)))
- return true, nil
- }
- return false, nil
-}
-
-func (p *TSimpleJSONProtocol) readQuoteIfNext() {
- b, _ := p.reader.Peek(1)
- if len(b) > 0 && b[0] == JSON_QUOTE {
- p.reader.ReadByte()
- }
-}
-
-func (p *TSimpleJSONProtocol) readNumeric() (Numeric, error) {
- isNull, err := p.readIfNull()
- if isNull || err != nil {
- return NUMERIC_NULL, err
- }
- hasDecimalPoint := false
- nextCanBeSign := true
- hasE := false
- MAX_LEN := 40
- buf := bytes.NewBuffer(make([]byte, 0, MAX_LEN))
- continueFor := true
- inQuotes := false
- for continueFor {
- c, err := p.reader.ReadByte()
- if err != nil {
- if err == io.EOF {
- break
- }
- return NUMERIC_NULL, NewTProtocolException(err)
- }
- switch c {
- case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
- buf.WriteByte(c)
- nextCanBeSign = false
- case '.':
- if hasDecimalPoint {
- e := fmt.Errorf("Unable to parse number with multiple decimal points '%s.'", buf.String())
- return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- if hasE {
- e := fmt.Errorf("Unable to parse number with decimal points in the exponent '%s.'", buf.String())
- return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- buf.WriteByte(c)
- hasDecimalPoint, nextCanBeSign = true, false
- case 'e', 'E':
- if hasE {
- e := fmt.Errorf("Unable to parse number with multiple exponents '%s%c'", buf.String(), c)
- return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- buf.WriteByte(c)
- hasE, nextCanBeSign = true, true
- case '-', '+':
- if !nextCanBeSign {
- e := fmt.Errorf("Negative sign within number")
- return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- buf.WriteByte(c)
- nextCanBeSign = false
- case ' ', 0, '\t', '\n', '\r', JSON_RBRACE[0], JSON_RBRACKET[0], JSON_COMMA[0], JSON_COLON[0]:
- p.reader.UnreadByte()
- continueFor = false
- case JSON_NAN[0]:
- if buf.Len() == 0 {
- buffer := make([]byte, len(JSON_NAN))
- buffer[0] = c
- _, e := p.reader.Read(buffer[1:])
- if e != nil {
- return NUMERIC_NULL, NewTProtocolException(e)
- }
- if JSON_NAN != string(buffer) {
- e := mismatch(JSON_NAN, string(buffer))
- return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- if inQuotes {
- p.readQuoteIfNext()
- }
- return NAN, nil
- } else {
- e := fmt.Errorf("Unable to parse number starting with character '%c'", c)
- return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- case JSON_INFINITY[0]:
- if buf.Len() == 0 || (buf.Len() == 1 && buf.Bytes()[0] == '+') {
- buffer := make([]byte, len(JSON_INFINITY))
- buffer[0] = c
- _, e := p.reader.Read(buffer[1:])
- if e != nil {
- return NUMERIC_NULL, NewTProtocolException(e)
- }
- if JSON_INFINITY != string(buffer) {
- e := mismatch(JSON_INFINITY, string(buffer))
- return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- if inQuotes {
- p.readQuoteIfNext()
- }
- return INFINITY, nil
- } else if buf.Len() == 1 && buf.Bytes()[0] == JSON_NEGATIVE_INFINITY[0] {
- buffer := make([]byte, len(JSON_NEGATIVE_INFINITY))
- buffer[0] = JSON_NEGATIVE_INFINITY[0]
- buffer[1] = c
- _, e := p.reader.Read(buffer[2:])
- if e != nil {
- return NUMERIC_NULL, NewTProtocolException(e)
- }
- if JSON_NEGATIVE_INFINITY != string(buffer) {
- e := mismatch(JSON_NEGATIVE_INFINITY, string(buffer))
- return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- if inQuotes {
- p.readQuoteIfNext()
- }
- return NEGATIVE_INFINITY, nil
- } else {
- e := fmt.Errorf("Unable to parse number starting with character '%c' due to existing buffer %s", c, buf.String())
- return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- case JSON_QUOTE:
- if !inQuotes {
- inQuotes = true
- } else {
- break
- }
- default:
- e := fmt.Errorf("Unable to parse number starting with character '%c'", c)
- return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- }
- if buf.Len() == 0 {
- e := fmt.Errorf("Unable to parse number from empty string ''")
- return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- return NewNumericFromJSONString(buf.String(), false), nil
-}
-
-// Safely peeks into the buffer, reading only what is necessary
-func (p *TSimpleJSONProtocol) safePeekContains(b []byte) bool {
- for i := 0; i < len(b); i++ {
- a, _ := p.reader.Peek(i + 1)
- if len(a) < (i+1) || a[i] != b[i] {
- return false
- }
- }
- return true
-}
-
-// Reset the context stack to its initial state.
-func (p *TSimpleJSONProtocol) resetContextStack() {
- p.parseContextStack = jsonContextStack{_CONTEXT_IN_TOPLEVEL}
- p.dumpContext = jsonContextStack{_CONTEXT_IN_TOPLEVEL}
-}
-
-func (p *TSimpleJSONProtocol) write(b []byte) (int, error) {
- n, err := p.writer.Write(b)
- if err != nil {
- p.writer.Reset(p.trans) // THRIFT-3735
- }
- return n, err
-}
-
-// SetTConfiguration implements TConfigurationSetter for propagation.
-func (p *TSimpleJSONProtocol) SetTConfiguration(conf *TConfiguration) {
- PropagateTConfiguration(p.trans, conf)
-}
-
-var _ TConfigurationSetter = (*TSimpleJSONProtocol)(nil)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_server.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_server.go
deleted file mode 100644
index 563cbfc69..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_server.go
+++ /dev/null
@@ -1,332 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "errors"
- "fmt"
- "io"
- "sync"
- "sync/atomic"
- "time"
-)
-
-// ErrAbandonRequest is a special error server handler implementations can
-// return to indicate that the request has been abandoned.
-//
-// TSimpleServer will check for this error, and close the client connection
-// instead of writing the response/error back to the client.
-//
-// It shall only be used when the server handler implementation know that the
-// client already abandoned the request (by checking that the passed in context
-// is already canceled, for example).
-var ErrAbandonRequest = errors.New("request abandoned")
-
-// ServerConnectivityCheckInterval defines the ticker interval used by
-// connectivity check in thrift compiled TProcessorFunc implementations.
-//
-// It's defined as a variable instead of constant, so that thrift server
-// implementations can change its value to control the behavior.
-//
-// If it's changed to <=0, the feature will be disabled.
-var ServerConnectivityCheckInterval = time.Millisecond * 5
-
-/*
- * This is not a typical TSimpleServer as it is not blocked after accept a socket.
- * It is more like a TThreadedServer that can handle different connections in different goroutines.
- * This will work if golang user implements a conn-pool like thing in client side.
- */
-type TSimpleServer struct {
- closed int32
- wg sync.WaitGroup
- mu sync.Mutex
-
- processorFactory TProcessorFactory
- serverTransport TServerTransport
- inputTransportFactory TTransportFactory
- outputTransportFactory TTransportFactory
- inputProtocolFactory TProtocolFactory
- outputProtocolFactory TProtocolFactory
-
- // Headers to auto forward in THeaderProtocol
- forwardHeaders []string
-
- logger Logger
-}
-
-func NewTSimpleServer2(processor TProcessor, serverTransport TServerTransport) *TSimpleServer {
- return NewTSimpleServerFactory2(NewTProcessorFactory(processor), serverTransport)
-}
-
-func NewTSimpleServer4(processor TProcessor, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {
- return NewTSimpleServerFactory4(NewTProcessorFactory(processor),
- serverTransport,
- transportFactory,
- protocolFactory,
- )
-}
-
-func NewTSimpleServer6(processor TProcessor, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {
- return NewTSimpleServerFactory6(NewTProcessorFactory(processor),
- serverTransport,
- inputTransportFactory,
- outputTransportFactory,
- inputProtocolFactory,
- outputProtocolFactory,
- )
-}
-
-func NewTSimpleServerFactory2(processorFactory TProcessorFactory, serverTransport TServerTransport) *TSimpleServer {
- return NewTSimpleServerFactory6(processorFactory,
- serverTransport,
- NewTTransportFactory(),
- NewTTransportFactory(),
- NewTBinaryProtocolFactoryDefault(),
- NewTBinaryProtocolFactoryDefault(),
- )
-}
-
-func NewTSimpleServerFactory4(processorFactory TProcessorFactory, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {
- return NewTSimpleServerFactory6(processorFactory,
- serverTransport,
- transportFactory,
- transportFactory,
- protocolFactory,
- protocolFactory,
- )
-}
-
-func NewTSimpleServerFactory6(processorFactory TProcessorFactory, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {
- return &TSimpleServer{
- processorFactory: processorFactory,
- serverTransport: serverTransport,
- inputTransportFactory: inputTransportFactory,
- outputTransportFactory: outputTransportFactory,
- inputProtocolFactory: inputProtocolFactory,
- outputProtocolFactory: outputProtocolFactory,
- }
-}
-
-func (p *TSimpleServer) ProcessorFactory() TProcessorFactory {
- return p.processorFactory
-}
-
-func (p *TSimpleServer) ServerTransport() TServerTransport {
- return p.serverTransport
-}
-
-func (p *TSimpleServer) InputTransportFactory() TTransportFactory {
- return p.inputTransportFactory
-}
-
-func (p *TSimpleServer) OutputTransportFactory() TTransportFactory {
- return p.outputTransportFactory
-}
-
-func (p *TSimpleServer) InputProtocolFactory() TProtocolFactory {
- return p.inputProtocolFactory
-}
-
-func (p *TSimpleServer) OutputProtocolFactory() TProtocolFactory {
- return p.outputProtocolFactory
-}
-
-func (p *TSimpleServer) Listen() error {
- return p.serverTransport.Listen()
-}
-
-// SetForwardHeaders sets the list of header keys that will be auto forwarded
-// while using THeaderProtocol.
-//
-// "forward" means that when the server is also a client to other upstream
-// thrift servers, the context object user gets in the processor functions will
-// have both read and write headers set, with write headers being forwarded.
-// Users can always override the write headers by calling SetWriteHeaderList
-// before calling thrift client functions.
-func (p *TSimpleServer) SetForwardHeaders(headers []string) {
- size := len(headers)
- if size == 0 {
- p.forwardHeaders = nil
- return
- }
-
- keys := make([]string, size)
- copy(keys, headers)
- p.forwardHeaders = keys
-}
-
-// SetLogger sets the logger used by this TSimpleServer.
-//
-// If no logger was set before Serve is called, a default logger using standard
-// log library will be used.
-func (p *TSimpleServer) SetLogger(logger Logger) {
- p.logger = logger
-}
-
-func (p *TSimpleServer) innerAccept() (int32, error) {
- client, err := p.serverTransport.Accept()
- p.mu.Lock()
- defer p.mu.Unlock()
- closed := atomic.LoadInt32(&p.closed)
- if closed != 0 {
- return closed, nil
- }
- if err != nil {
- return 0, err
- }
- if client != nil {
- p.wg.Add(1)
- go func() {
- defer p.wg.Done()
- if err := p.processRequests(client); err != nil {
- p.logger(fmt.Sprintf("error processing request: %v", err))
- }
- }()
- }
- return 0, nil
-}
-
-func (p *TSimpleServer) AcceptLoop() error {
- for {
- closed, err := p.innerAccept()
- if err != nil {
- return err
- }
- if closed != 0 {
- return nil
- }
- }
-}
-
-func (p *TSimpleServer) Serve() error {
- p.logger = fallbackLogger(p.logger)
-
- err := p.Listen()
- if err != nil {
- return err
- }
- p.AcceptLoop()
- return nil
-}
-
-func (p *TSimpleServer) Stop() error {
- p.mu.Lock()
- defer p.mu.Unlock()
- if atomic.LoadInt32(&p.closed) != 0 {
- return nil
- }
- atomic.StoreInt32(&p.closed, 1)
- p.serverTransport.Interrupt()
- p.wg.Wait()
- return nil
-}
-
-// If err is actually EOF, return nil, otherwise return err as-is.
-func treatEOFErrorsAsNil(err error) error {
- if err == nil {
- return nil
- }
- if errors.Is(err, io.EOF) {
- return nil
- }
- var te TTransportException
- if errors.As(err, &te) && te.TypeId() == END_OF_FILE {
- return nil
- }
- return err
-}
-
-func (p *TSimpleServer) processRequests(client TTransport) (err error) {
- defer func() {
- err = treatEOFErrorsAsNil(err)
- }()
-
- processor := p.processorFactory.GetProcessor(client)
- inputTransport, err := p.inputTransportFactory.GetTransport(client)
- if err != nil {
- return err
- }
- inputProtocol := p.inputProtocolFactory.GetProtocol(inputTransport)
- var outputTransport TTransport
- var outputProtocol TProtocol
-
- // for THeaderProtocol, we must use the same protocol instance for
- // input and output so that the response is in the same dialect that
- // the server detected the request was in.
- headerProtocol, ok := inputProtocol.(*THeaderProtocol)
- if ok {
- outputProtocol = inputProtocol
- } else {
- oTrans, err := p.outputTransportFactory.GetTransport(client)
- if err != nil {
- return err
- }
- outputTransport = oTrans
- outputProtocol = p.outputProtocolFactory.GetProtocol(outputTransport)
- }
-
- if inputTransport != nil {
- defer inputTransport.Close()
- }
- if outputTransport != nil {
- defer outputTransport.Close()
- }
- for {
- if atomic.LoadInt32(&p.closed) != 0 {
- return nil
- }
-
- ctx := SetResponseHelper(
- defaultCtx,
- TResponseHelper{
- THeaderResponseHelper: NewTHeaderResponseHelper(outputProtocol),
- },
- )
- if headerProtocol != nil {
- // We need to call ReadFrame here, otherwise we won't
- // get any headers on the AddReadTHeaderToContext call.
- //
- // ReadFrame is safe to be called multiple times so it
- // won't break when it's called again later when we
- // actually start to read the message.
- if err := headerProtocol.ReadFrame(ctx); err != nil {
- return err
- }
- ctx = AddReadTHeaderToContext(ctx, headerProtocol.GetReadHeaders())
- ctx = SetWriteHeaderList(ctx, p.forwardHeaders)
- }
-
- ok, err := processor.Process(ctx, inputProtocol, outputProtocol)
- if errors.Is(err, ErrAbandonRequest) {
- return client.Close()
- }
- if errors.As(err, new(TTransportException)) && err != nil {
- return err
- }
- var tae TApplicationException
- if errors.As(err, &tae) && tae.TypeId() == UNKNOWN_METHOD {
- continue
- }
- if !ok {
- break
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket.go
deleted file mode 100644
index e911bf166..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket.go
+++ /dev/null
@@ -1,238 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "context"
- "net"
- "time"
-)
-
-type TSocket struct {
- conn *socketConn
- addr net.Addr
- cfg *TConfiguration
-
- connectTimeout time.Duration
- socketTimeout time.Duration
-}
-
-// Deprecated: Use NewTSocketConf instead.
-func NewTSocket(hostPort string) (*TSocket, error) {
- return NewTSocketConf(hostPort, &TConfiguration{
- noPropagation: true,
- })
-}
-
-// NewTSocketConf creates a net.Conn-backed TTransport, given a host and port.
-//
-// Example:
-//
-// trans, err := thrift.NewTSocketConf("localhost:9090", &TConfiguration{
-// ConnectTimeout: time.Second, // Use 0 for no timeout
-// SocketTimeout: time.Second, // Use 0 for no timeout
-// })
-func NewTSocketConf(hostPort string, conf *TConfiguration) (*TSocket, error) {
- addr, err := net.ResolveTCPAddr("tcp", hostPort)
- if err != nil {
- return nil, err
- }
- return NewTSocketFromAddrConf(addr, conf), nil
-}
-
-// Deprecated: Use NewTSocketConf instead.
-func NewTSocketTimeout(hostPort string, connTimeout time.Duration, soTimeout time.Duration) (*TSocket, error) {
- return NewTSocketConf(hostPort, &TConfiguration{
- ConnectTimeout: connTimeout,
- SocketTimeout: soTimeout,
-
- noPropagation: true,
- })
-}
-
-// NewTSocketFromAddrConf creates a TSocket from a net.Addr
-func NewTSocketFromAddrConf(addr net.Addr, conf *TConfiguration) *TSocket {
- return &TSocket{
- addr: addr,
- cfg: conf,
- }
-}
-
-// Deprecated: Use NewTSocketFromAddrConf instead.
-func NewTSocketFromAddrTimeout(addr net.Addr, connTimeout time.Duration, soTimeout time.Duration) *TSocket {
- return NewTSocketFromAddrConf(addr, &TConfiguration{
- ConnectTimeout: connTimeout,
- SocketTimeout: soTimeout,
-
- noPropagation: true,
- })
-}
-
-// NewTSocketFromConnConf creates a TSocket from an existing net.Conn.
-func NewTSocketFromConnConf(conn net.Conn, conf *TConfiguration) *TSocket {
- return &TSocket{
- conn: wrapSocketConn(conn),
- addr: conn.RemoteAddr(),
- cfg: conf,
- }
-}
-
-// Deprecated: Use NewTSocketFromConnConf instead.
-func NewTSocketFromConnTimeout(conn net.Conn, socketTimeout time.Duration) *TSocket {
- return NewTSocketFromConnConf(conn, &TConfiguration{
- SocketTimeout: socketTimeout,
-
- noPropagation: true,
- })
-}
-
-// SetTConfiguration implements TConfigurationSetter.
-//
-// It can be used to set connect and socket timeouts.
-func (p *TSocket) SetTConfiguration(conf *TConfiguration) {
- p.cfg = conf
-}
-
-// Sets the connect timeout
-func (p *TSocket) SetConnTimeout(timeout time.Duration) error {
- if p.cfg == nil {
- p.cfg = &TConfiguration{
- noPropagation: true,
- }
- }
- p.cfg.ConnectTimeout = timeout
- return nil
-}
-
-// Sets the socket timeout
-func (p *TSocket) SetSocketTimeout(timeout time.Duration) error {
- if p.cfg == nil {
- p.cfg = &TConfiguration{
- noPropagation: true,
- }
- }
- p.cfg.SocketTimeout = timeout
- return nil
-}
-
-func (p *TSocket) pushDeadline(read, write bool) {
- var t time.Time
- if timeout := p.cfg.GetSocketTimeout(); timeout > 0 {
- t = time.Now().Add(time.Duration(timeout))
- }
- if read && write {
- p.conn.SetDeadline(t)
- } else if read {
- p.conn.SetReadDeadline(t)
- } else if write {
- p.conn.SetWriteDeadline(t)
- }
-}
-
-// Connects the socket, creating a new socket object if necessary.
-func (p *TSocket) Open() error {
- if p.conn.isValid() {
- return NewTTransportException(ALREADY_OPEN, "Socket already connected.")
- }
- if p.addr == nil {
- return NewTTransportException(NOT_OPEN, "Cannot open nil address.")
- }
- if len(p.addr.Network()) == 0 {
- return NewTTransportException(NOT_OPEN, "Cannot open bad network name.")
- }
- if len(p.addr.String()) == 0 {
- return NewTTransportException(NOT_OPEN, "Cannot open bad address.")
- }
- var err error
- if p.conn, err = createSocketConnFromReturn(net.DialTimeout(
- p.addr.Network(),
- p.addr.String(),
- p.cfg.GetConnectTimeout(),
- )); err != nil {
- return NewTTransportException(NOT_OPEN, err.Error())
- }
- return nil
-}
-
-// Retrieve the underlying net.Conn
-func (p *TSocket) Conn() net.Conn {
- return p.conn
-}
-
-// Returns true if the connection is open
-func (p *TSocket) IsOpen() bool {
- return p.conn.IsOpen()
-}
-
-// Closes the socket.
-func (p *TSocket) Close() error {
- // Close the socket
- if p.conn != nil {
- err := p.conn.Close()
- if err != nil {
- return err
- }
- p.conn = nil
- }
- return nil
-}
-
-//Returns the remote address of the socket.
-func (p *TSocket) Addr() net.Addr {
- return p.addr
-}
-
-func (p *TSocket) Read(buf []byte) (int, error) {
- if !p.conn.isValid() {
- return 0, NewTTransportException(NOT_OPEN, "Connection not open")
- }
- p.pushDeadline(true, false)
- // NOTE: Calling any of p.IsOpen, p.conn.read0, or p.conn.IsOpen between
- // p.pushDeadline and p.conn.Read could cause the deadline set inside
- // p.pushDeadline being reset, thus need to be avoided.
- n, err := p.conn.Read(buf)
- return n, NewTTransportExceptionFromError(err)
-}
-
-func (p *TSocket) Write(buf []byte) (int, error) {
- if !p.conn.isValid() {
- return 0, NewTTransportException(NOT_OPEN, "Connection not open")
- }
- p.pushDeadline(false, true)
- return p.conn.Write(buf)
-}
-
-func (p *TSocket) Flush(ctx context.Context) error {
- return nil
-}
-
-func (p *TSocket) Interrupt() error {
- if !p.conn.isValid() {
- return nil
- }
- return p.conn.Close()
-}
-
-func (p *TSocket) RemainingBytes() (num_bytes uint64) {
- const maxSize = ^uint64(0)
- return maxSize // the truth is, we just don't know unless framed is used
-}
-
-var _ TConfigurationSetter = (*TSocket)(nil)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_conn.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_conn.go
deleted file mode 100644
index c1cc30c6c..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_conn.go
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "net"
-)
-
-// socketConn is a wrapped net.Conn that tries to do connectivity check.
-type socketConn struct {
- net.Conn
-
- buffer [1]byte
-}
-
-var _ net.Conn = (*socketConn)(nil)
-
-// createSocketConnFromReturn is a language sugar to help create socketConn from
-// return values of functions like net.Dial, tls.Dial, net.Listener.Accept, etc.
-func createSocketConnFromReturn(conn net.Conn, err error) (*socketConn, error) {
- if err != nil {
- return nil, err
- }
- return &socketConn{
- Conn: conn,
- }, nil
-}
-
-// wrapSocketConn wraps an existing net.Conn into *socketConn.
-func wrapSocketConn(conn net.Conn) *socketConn {
- // In case conn is already wrapped,
- // return it as-is and avoid double wrapping.
- if sc, ok := conn.(*socketConn); ok {
- return sc
- }
-
- return &socketConn{
- Conn: conn,
- }
-}
-
-// isValid checks whether there's a valid connection.
-//
-// It's nil safe, and returns false if sc itself is nil, or if the underlying
-// connection is nil.
-//
-// It's the same as the previous implementation of TSocket.IsOpen and
-// TSSLSocket.IsOpen before we added connectivity check.
-func (sc *socketConn) isValid() bool {
- return sc != nil && sc.Conn != nil
-}
-
-// IsOpen checks whether the connection is open.
-//
-// It's nil safe, and returns false if sc itself is nil, or if the underlying
-// connection is nil.
-//
-// Otherwise, it tries to do a connectivity check and returns the result.
-//
-// It also has the side effect of resetting the previously set read deadline on
-// the socket. As a result, it shouldn't be called between setting read deadline
-// and doing actual read.
-func (sc *socketConn) IsOpen() bool {
- if !sc.isValid() {
- return false
- }
- return sc.checkConn() == nil
-}
-
-// Read implements io.Reader.
-//
-// On Windows, it behaves the same as the underlying net.Conn.Read.
-//
-// On non-Windows, it treats len(p) == 0 as a connectivity check instead of
-// readability check, which means instead of blocking until there's something to
-// read (readability check), or always return (0, nil) (the default behavior of
-// go's stdlib implementation on non-Windows), it never blocks, and will return
-// an error if the connection is lost.
-func (sc *socketConn) Read(p []byte) (n int, err error) {
- if len(p) == 0 {
- return 0, sc.read0()
- }
-
- return sc.Conn.Read(p)
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_unix_conn.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_unix_conn.go
deleted file mode 100644
index f5fab3ab6..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_unix_conn.go
+++ /dev/null
@@ -1,83 +0,0 @@
-// +build !windows
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "errors"
- "io"
- "syscall"
- "time"
-)
-
-// We rely on this variable to be the zero time,
-// but define it as global variable to avoid repetitive allocations.
-// Please DO NOT mutate this variable in any way.
-var zeroTime time.Time
-
-func (sc *socketConn) read0() error {
- return sc.checkConn()
-}
-
-func (sc *socketConn) checkConn() error {
- syscallConn, ok := sc.Conn.(syscall.Conn)
- if !ok {
- // No way to check, return nil
- return nil
- }
-
- // The reading about to be done here is non-blocking so we don't really
- // need a read deadline. We just need to clear the previously set read
- // deadline, if any.
- sc.Conn.SetReadDeadline(zeroTime)
-
- rc, err := syscallConn.SyscallConn()
- if err != nil {
- return err
- }
-
- var n int
-
- if readErr := rc.Read(func(fd uintptr) bool {
- n, _, err = syscall.Recvfrom(int(fd), sc.buffer[:], syscall.MSG_PEEK|syscall.MSG_DONTWAIT)
- return true
- }); readErr != nil {
- return readErr
- }
-
- if n > 0 {
- // We got something, which means we are good
- return nil
- }
-
- if errors.Is(err, syscall.EAGAIN) || errors.Is(err, syscall.EWOULDBLOCK) {
- // This means the connection is still open but we don't have
- // anything to read right now.
- return nil
- }
-
- if err != nil {
- return err
- }
-
- // At this point, it means the other side already closed the connection.
- return io.EOF
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_windows_conn.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_windows_conn.go
deleted file mode 100644
index 679838c3b..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_windows_conn.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// +build windows
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-func (sc *socketConn) read0() error {
- // On windows, we fallback to the default behavior of reading 0 bytes.
- var p []byte
- _, err := sc.Conn.Read(p)
- return err
-}
-
-func (sc *socketConn) checkConn() error {
- // On windows, we always return nil for this check.
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_server_socket.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_server_socket.go
deleted file mode 100644
index 907afca32..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_server_socket.go
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "crypto/tls"
- "net"
- "time"
-)
-
-type TSSLServerSocket struct {
- listener net.Listener
- addr net.Addr
- clientTimeout time.Duration
- interrupted bool
- cfg *tls.Config
-}
-
-func NewTSSLServerSocket(listenAddr string, cfg *tls.Config) (*TSSLServerSocket, error) {
- return NewTSSLServerSocketTimeout(listenAddr, cfg, 0)
-}
-
-func NewTSSLServerSocketTimeout(listenAddr string, cfg *tls.Config, clientTimeout time.Duration) (*TSSLServerSocket, error) {
- if cfg.MinVersion == 0 {
- cfg.MinVersion = tls.VersionTLS10
- }
- addr, err := net.ResolveTCPAddr("tcp", listenAddr)
- if err != nil {
- return nil, err
- }
- return &TSSLServerSocket{addr: addr, clientTimeout: clientTimeout, cfg: cfg}, nil
-}
-
-func (p *TSSLServerSocket) Listen() error {
- if p.IsListening() {
- return nil
- }
- l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg)
- if err != nil {
- return err
- }
- p.listener = l
- return nil
-}
-
-func (p *TSSLServerSocket) Accept() (TTransport, error) {
- if p.interrupted {
- return nil, errTransportInterrupted
- }
- if p.listener == nil {
- return nil, NewTTransportException(NOT_OPEN, "No underlying server socket")
- }
- conn, err := p.listener.Accept()
- if err != nil {
- return nil, NewTTransportExceptionFromError(err)
- }
- return NewTSSLSocketFromConnTimeout(conn, p.cfg, p.clientTimeout), nil
-}
-
-// Checks whether the socket is listening.
-func (p *TSSLServerSocket) IsListening() bool {
- return p.listener != nil
-}
-
-// Connects the socket, creating a new socket object if necessary.
-func (p *TSSLServerSocket) Open() error {
- if p.IsListening() {
- return NewTTransportException(ALREADY_OPEN, "Server socket already open")
- }
- if l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg); err != nil {
- return err
- } else {
- p.listener = l
- }
- return nil
-}
-
-func (p *TSSLServerSocket) Addr() net.Addr {
- return p.addr
-}
-
-func (p *TSSLServerSocket) Close() error {
- defer func() {
- p.listener = nil
- }()
- if p.IsListening() {
- return p.listener.Close()
- }
- return nil
-}
-
-func (p *TSSLServerSocket) Interrupt() error {
- p.interrupted = true
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_socket.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_socket.go
deleted file mode 100644
index 6359a74ce..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_socket.go
+++ /dev/null
@@ -1,258 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "context"
- "crypto/tls"
- "net"
- "time"
-)
-
-type TSSLSocket struct {
- conn *socketConn
- // hostPort contains host:port (e.g. "asdf.com:12345"). The field is
- // only valid if addr is nil.
- hostPort string
- // addr is nil when hostPort is not "", and is only used when the
- // TSSLSocket is constructed from a net.Addr.
- addr net.Addr
-
- cfg *TConfiguration
-}
-
-// NewTSSLSocketConf creates a net.Conn-backed TTransport, given a host and port.
-//
-// Example:
-//
-// trans, err := thrift.NewTSSLSocketConf("localhost:9090", nil, &TConfiguration{
-// ConnectTimeout: time.Second, // Use 0 for no timeout
-// SocketTimeout: time.Second, // Use 0 for no timeout
-// })
-func NewTSSLSocketConf(hostPort string, conf *TConfiguration) (*TSSLSocket, error) {
- if cfg := conf.GetTLSConfig(); cfg != nil && cfg.MinVersion == 0 {
- cfg.MinVersion = tls.VersionTLS10
- }
- return &TSSLSocket{
- hostPort: hostPort,
- cfg: conf,
- }, nil
-}
-
-// Deprecated: Use NewTSSLSocketConf instead.
-func NewTSSLSocket(hostPort string, cfg *tls.Config) (*TSSLSocket, error) {
- return NewTSSLSocketConf(hostPort, &TConfiguration{
- TLSConfig: cfg,
-
- noPropagation: true,
- })
-}
-
-// Deprecated: Use NewTSSLSocketConf instead.
-func NewTSSLSocketTimeout(hostPort string, cfg *tls.Config, connectTimeout, socketTimeout time.Duration) (*TSSLSocket, error) {
- return NewTSSLSocketConf(hostPort, &TConfiguration{
- ConnectTimeout: connectTimeout,
- SocketTimeout: socketTimeout,
- TLSConfig: cfg,
-
- noPropagation: true,
- })
-}
-
-// NewTSSLSocketFromAddrConf creates a TSSLSocket from a net.Addr.
-func NewTSSLSocketFromAddrConf(addr net.Addr, conf *TConfiguration) *TSSLSocket {
- return &TSSLSocket{
- addr: addr,
- cfg: conf,
- }
-}
-
-// Deprecated: Use NewTSSLSocketFromAddrConf instead.
-func NewTSSLSocketFromAddrTimeout(addr net.Addr, cfg *tls.Config, connectTimeout, socketTimeout time.Duration) *TSSLSocket {
- return NewTSSLSocketFromAddrConf(addr, &TConfiguration{
- ConnectTimeout: connectTimeout,
- SocketTimeout: socketTimeout,
- TLSConfig: cfg,
-
- noPropagation: true,
- })
-}
-
-// NewTSSLSocketFromConnConf creates a TSSLSocket from an existing net.Conn.
-func NewTSSLSocketFromConnConf(conn net.Conn, conf *TConfiguration) *TSSLSocket {
- return &TSSLSocket{
- conn: wrapSocketConn(conn),
- addr: conn.RemoteAddr(),
- cfg: conf,
- }
-}
-
-// Deprecated: Use NewTSSLSocketFromConnConf instead.
-func NewTSSLSocketFromConnTimeout(conn net.Conn, cfg *tls.Config, socketTimeout time.Duration) *TSSLSocket {
- return NewTSSLSocketFromConnConf(conn, &TConfiguration{
- SocketTimeout: socketTimeout,
- TLSConfig: cfg,
-
- noPropagation: true,
- })
-}
-
-// SetTConfiguration implements TConfigurationSetter.
-//
-// It can be used to change connect and socket timeouts.
-func (p *TSSLSocket) SetTConfiguration(conf *TConfiguration) {
- p.cfg = conf
-}
-
-// Sets the connect timeout
-func (p *TSSLSocket) SetConnTimeout(timeout time.Duration) error {
- if p.cfg == nil {
- p.cfg = &TConfiguration{}
- }
- p.cfg.ConnectTimeout = timeout
- return nil
-}
-
-// Sets the socket timeout
-func (p *TSSLSocket) SetSocketTimeout(timeout time.Duration) error {
- if p.cfg == nil {
- p.cfg = &TConfiguration{}
- }
- p.cfg.SocketTimeout = timeout
- return nil
-}
-
-func (p *TSSLSocket) pushDeadline(read, write bool) {
- var t time.Time
- if timeout := p.cfg.GetSocketTimeout(); timeout > 0 {
- t = time.Now().Add(time.Duration(timeout))
- }
- if read && write {
- p.conn.SetDeadline(t)
- } else if read {
- p.conn.SetReadDeadline(t)
- } else if write {
- p.conn.SetWriteDeadline(t)
- }
-}
-
-// Connects the socket, creating a new socket object if necessary.
-func (p *TSSLSocket) Open() error {
- var err error
- // If we have a hostname, we need to pass the hostname to tls.Dial for
- // certificate hostname checks.
- if p.hostPort != "" {
- if p.conn, err = createSocketConnFromReturn(tls.DialWithDialer(
- &net.Dialer{
- Timeout: p.cfg.GetConnectTimeout(),
- },
- "tcp",
- p.hostPort,
- p.cfg.GetTLSConfig(),
- )); err != nil {
- return NewTTransportException(NOT_OPEN, err.Error())
- }
- } else {
- if p.conn.isValid() {
- return NewTTransportException(ALREADY_OPEN, "Socket already connected.")
- }
- if p.addr == nil {
- return NewTTransportException(NOT_OPEN, "Cannot open nil address.")
- }
- if len(p.addr.Network()) == 0 {
- return NewTTransportException(NOT_OPEN, "Cannot open bad network name.")
- }
- if len(p.addr.String()) == 0 {
- return NewTTransportException(NOT_OPEN, "Cannot open bad address.")
- }
- if p.conn, err = createSocketConnFromReturn(tls.DialWithDialer(
- &net.Dialer{
- Timeout: p.cfg.GetConnectTimeout(),
- },
- p.addr.Network(),
- p.addr.String(),
- p.cfg.GetTLSConfig(),
- )); err != nil {
- return NewTTransportException(NOT_OPEN, err.Error())
- }
- }
- return nil
-}
-
-// Retrieve the underlying net.Conn
-func (p *TSSLSocket) Conn() net.Conn {
- return p.conn
-}
-
-// Returns true if the connection is open
-func (p *TSSLSocket) IsOpen() bool {
- return p.conn.IsOpen()
-}
-
-// Closes the socket.
-func (p *TSSLSocket) Close() error {
- // Close the socket
- if p.conn != nil {
- err := p.conn.Close()
- if err != nil {
- return err
- }
- p.conn = nil
- }
- return nil
-}
-
-func (p *TSSLSocket) Read(buf []byte) (int, error) {
- if !p.conn.isValid() {
- return 0, NewTTransportException(NOT_OPEN, "Connection not open")
- }
- p.pushDeadline(true, false)
- // NOTE: Calling any of p.IsOpen, p.conn.read0, or p.conn.IsOpen between
- // p.pushDeadline and p.conn.Read could cause the deadline set inside
- // p.pushDeadline being reset, thus need to be avoided.
- n, err := p.conn.Read(buf)
- return n, NewTTransportExceptionFromError(err)
-}
-
-func (p *TSSLSocket) Write(buf []byte) (int, error) {
- if !p.conn.isValid() {
- return 0, NewTTransportException(NOT_OPEN, "Connection not open")
- }
- p.pushDeadline(false, true)
- return p.conn.Write(buf)
-}
-
-func (p *TSSLSocket) Flush(ctx context.Context) error {
- return nil
-}
-
-func (p *TSSLSocket) Interrupt() error {
- if !p.conn.isValid() {
- return nil
- }
- return p.conn.Close()
-}
-
-func (p *TSSLSocket) RemainingBytes() (num_bytes uint64) {
- const maxSize = ^uint64(0)
- return maxSize // the truth is, we just don't know unless framed is used
-}
-
-var _ TConfigurationSetter = (*TSSLSocket)(nil)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport.go
deleted file mode 100644
index d68d0b317..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport.go
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "context"
- "errors"
- "io"
-)
-
-var errTransportInterrupted = errors.New("Transport Interrupted")
-
-type Flusher interface {
- Flush() (err error)
-}
-
-type ContextFlusher interface {
- Flush(ctx context.Context) (err error)
-}
-
-type ReadSizeProvider interface {
- RemainingBytes() (num_bytes uint64)
-}
-
-// Encapsulates the I/O layer
-type TTransport interface {
- io.ReadWriteCloser
- ContextFlusher
- ReadSizeProvider
-
- // Opens the transport for communication
- Open() error
-
- // Returns true if the transport is open
- IsOpen() bool
-}
-
-type stringWriter interface {
- WriteString(s string) (n int, err error)
-}
-
-// This is "enhanced" transport with extra capabilities. You need to use one of these
-// to construct protocol.
-// Notably, TSocket does not implement this interface, and it is always a mistake to use
-// TSocket directly in protocol.
-type TRichTransport interface {
- io.ReadWriter
- io.ByteReader
- io.ByteWriter
- stringWriter
- ContextFlusher
- ReadSizeProvider
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_exception.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_exception.go
deleted file mode 100644
index 0a3f07646..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_exception.go
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "errors"
- "io"
-)
-
-type timeoutable interface {
- Timeout() bool
-}
-
-// Thrift Transport exception
-type TTransportException interface {
- TException
- TypeId() int
- Err() error
-}
-
-const (
- UNKNOWN_TRANSPORT_EXCEPTION = 0
- NOT_OPEN = 1
- ALREADY_OPEN = 2
- TIMED_OUT = 3
- END_OF_FILE = 4
-)
-
-type tTransportException struct {
- typeId int
- err error
- msg string
-}
-
-var _ TTransportException = (*tTransportException)(nil)
-
-func (tTransportException) TExceptionType() TExceptionType {
- return TExceptionTypeTransport
-}
-
-func (p *tTransportException) TypeId() int {
- return p.typeId
-}
-
-func (p *tTransportException) Error() string {
- return p.msg
-}
-
-func (p *tTransportException) Err() error {
- return p.err
-}
-
-func (p *tTransportException) Unwrap() error {
- return p.err
-}
-
-func (p *tTransportException) Timeout() bool {
- return p.typeId == TIMED_OUT
-}
-
-func NewTTransportException(t int, e string) TTransportException {
- return &tTransportException{
- typeId: t,
- err: errors.New(e),
- msg: e,
- }
-}
-
-func NewTTransportExceptionFromError(e error) TTransportException {
- if e == nil {
- return nil
- }
-
- if t, ok := e.(TTransportException); ok {
- return t
- }
-
- te := &tTransportException{
- typeId: UNKNOWN_TRANSPORT_EXCEPTION,
- err: e,
- msg: e.Error(),
- }
-
- if isTimeoutError(e) {
- te.typeId = TIMED_OUT
- return te
- }
-
- if errors.Is(e, io.EOF) {
- te.typeId = END_OF_FILE
- return te
- }
-
- return te
-}
-
-func prependTTransportException(prepend string, e TTransportException) TTransportException {
- return &tTransportException{
- typeId: e.TypeId(),
- err: e,
- msg: prepend + e.Error(),
- }
-}
-
-// isTimeoutError returns true when err is an error caused by timeout.
-//
-// Note that this also includes TTransportException wrapped timeout errors.
-func isTimeoutError(err error) bool {
- var t timeoutable
- if errors.As(err, &t) {
- return t.Timeout()
- }
- return false
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_factory.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_factory.go
deleted file mode 100644
index c80580794..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_factory.go
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-// Factory class used to create wrapped instance of Transports.
-// This is used primarily in servers, which get Transports from
-// a ServerTransport and then may want to mutate them (i.e. create
-// a BufferedTransport from the underlying base transport)
-type TTransportFactory interface {
- GetTransport(trans TTransport) (TTransport, error)
-}
-
-type tTransportFactory struct{}
-
-// Return a wrapped instance of the base Transport.
-func (p *tTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
- return trans, nil
-}
-
-func NewTTransportFactory() TTransportFactory {
- return &tTransportFactory{}
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/type.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/type.go
deleted file mode 100644
index b24f1b05c..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/type.go
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-// Type constants in the Thrift protocol
-type TType byte
-
-const (
- STOP = 0
- VOID = 1
- BOOL = 2
- BYTE = 3
- I08 = 3
- DOUBLE = 4
- I16 = 6
- I32 = 8
- I64 = 10
- STRING = 11
- UTF7 = 11
- STRUCT = 12
- MAP = 13
- SET = 14
- LIST = 15
- UTF8 = 16
- UTF16 = 17
- //BINARY = 18 wrong and unused
-)
-
-var typeNames = map[int]string{
- STOP: "STOP",
- VOID: "VOID",
- BOOL: "BOOL",
- BYTE: "BYTE",
- DOUBLE: "DOUBLE",
- I16: "I16",
- I32: "I32",
- I64: "I64",
- STRING: "STRING",
- STRUCT: "STRUCT",
- MAP: "MAP",
- SET: "SET",
- LIST: "LIST",
- UTF8: "UTF8",
- UTF16: "UTF16",
-}
-
-func (p TType) String() string {
- if s, ok := typeNames[int(p)]; ok {
- return s
- }
- return "Unknown"
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/zlib_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/zlib_transport.go
deleted file mode 100644
index 259943a62..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/zlib_transport.go
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements. See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership. The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License. You may obtain a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied. See the License for the
-* specific language governing permissions and limitations
-* under the License.
- */
-
-package thrift
-
-import (
- "compress/zlib"
- "context"
- "io"
-)
-
-// TZlibTransportFactory is a factory for TZlibTransport instances
-type TZlibTransportFactory struct {
- level int
- factory TTransportFactory
-}
-
-// TZlibTransport is a TTransport implementation that makes use of zlib compression.
-type TZlibTransport struct {
- reader io.ReadCloser
- transport TTransport
- writer *zlib.Writer
-}
-
-// GetTransport constructs a new instance of NewTZlibTransport
-func (p *TZlibTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
- if p.factory != nil {
- // wrap other factory
- var err error
- trans, err = p.factory.GetTransport(trans)
- if err != nil {
- return nil, err
- }
- }
- return NewTZlibTransport(trans, p.level)
-}
-
-// NewTZlibTransportFactory constructs a new instance of NewTZlibTransportFactory
-func NewTZlibTransportFactory(level int) *TZlibTransportFactory {
- return &TZlibTransportFactory{level: level, factory: nil}
-}
-
-// NewTZlibTransportFactory constructs a new instance of TZlibTransportFactory
-// as a wrapper over existing transport factory
-func NewTZlibTransportFactoryWithFactory(level int, factory TTransportFactory) *TZlibTransportFactory {
- return &TZlibTransportFactory{level: level, factory: factory}
-}
-
-// NewTZlibTransport constructs a new instance of TZlibTransport
-func NewTZlibTransport(trans TTransport, level int) (*TZlibTransport, error) {
- w, err := zlib.NewWriterLevel(trans, level)
- if err != nil {
- return nil, err
- }
-
- return &TZlibTransport{
- writer: w,
- transport: trans,
- }, nil
-}
-
-// Close closes the reader and writer (flushing any unwritten data) and closes
-// the underlying transport.
-func (z *TZlibTransport) Close() error {
- if z.reader != nil {
- if err := z.reader.Close(); err != nil {
- return err
- }
- }
- if err := z.writer.Close(); err != nil {
- return err
- }
- return z.transport.Close()
-}
-
-// Flush flushes the writer and its underlying transport.
-func (z *TZlibTransport) Flush(ctx context.Context) error {
- if err := z.writer.Flush(); err != nil {
- return err
- }
- return z.transport.Flush(ctx)
-}
-
-// IsOpen returns true if the transport is open
-func (z *TZlibTransport) IsOpen() bool {
- return z.transport.IsOpen()
-}
-
-// Open opens the transport for communication
-func (z *TZlibTransport) Open() error {
- return z.transport.Open()
-}
-
-func (z *TZlibTransport) Read(p []byte) (int, error) {
- if z.reader == nil {
- r, err := zlib.NewReader(z.transport)
- if err != nil {
- return 0, NewTTransportExceptionFromError(err)
- }
- z.reader = r
- }
-
- return z.reader.Read(p)
-}
-
-// RemainingBytes returns the size in bytes of the data that is still to be
-// read.
-func (z *TZlibTransport) RemainingBytes() uint64 {
- return z.transport.RemainingBytes()
-}
-
-func (z *TZlibTransport) Write(p []byte) (int, error) {
- return z.writer.Write(p)
-}
-
-// SetTConfiguration implements TConfigurationSetter for propagation.
-func (z *TZlibTransport) SetTConfiguration(conf *TConfiguration) {
- PropagateTConfiguration(z.transport, conf)
-}
-
-var _ TConfigurationSetter = (*TZlibTransport)(nil)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/jaeger.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/jaeger.go
deleted file mode 100644
index ddbd681d0..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/jaeger.go
+++ /dev/null
@@ -1,360 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger"
-
-import (
- "context"
- "encoding/binary"
- "encoding/json"
- "fmt"
- "sync"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/codes"
- gen "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger"
- "go.opentelemetry.io/otel/sdk/resource"
- sdktrace "go.opentelemetry.io/otel/sdk/trace"
- semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
- "go.opentelemetry.io/otel/trace"
-)
-
-const (
- keyInstrumentationLibraryName = "otel.library.name"
- keyInstrumentationLibraryVersion = "otel.library.version"
- keyError = "error"
- keySpanKind = "span.kind"
- keyStatusCode = "otel.status_code"
- keyStatusMessage = "otel.status_description"
- keyDroppedAttributeCount = "otel.event.dropped_attributes_count"
- keyEventName = "event"
-)
-
-// New returns an OTel Exporter implementation that exports the collected
-// spans to Jaeger.
-func New(endpointOption EndpointOption) (*Exporter, error) {
- uploader, err := endpointOption.newBatchUploader()
- if err != nil {
- return nil, err
- }
-
- // Fetch default service.name from default resource for backup
- var defaultServiceName string
- defaultResource := resource.Default()
- if value, exists := defaultResource.Set().Value(semconv.ServiceNameKey); exists {
- defaultServiceName = value.AsString()
- }
- if defaultServiceName == "" {
- return nil, fmt.Errorf("failed to get service name from default resource")
- }
-
- stopCh := make(chan struct{})
- e := &Exporter{
- uploader: uploader,
- stopCh: stopCh,
- defaultServiceName: defaultServiceName,
- }
- return e, nil
-}
-
-// Exporter exports OpenTelemetry spans to a Jaeger agent or collector.
-type Exporter struct {
- uploader batchUploader
- stopOnce sync.Once
- stopCh chan struct{}
- defaultServiceName string
-}
-
-var _ sdktrace.SpanExporter = (*Exporter)(nil)
-
-// ExportSpans transforms and exports OpenTelemetry spans to Jaeger.
-func (e *Exporter) ExportSpans(ctx context.Context, spans []sdktrace.ReadOnlySpan) error {
- // Return fast if context is already canceled or Exporter shutdown.
- select {
- case <-ctx.Done():
- return ctx.Err()
- case <-e.stopCh:
- return nil
- default:
- }
-
- // Cancel export if Exporter is shutdown.
- var cancel context.CancelFunc
- ctx, cancel = context.WithCancel(ctx)
- defer cancel()
- go func(ctx context.Context, cancel context.CancelFunc) {
- select {
- case <-ctx.Done():
- case <-e.stopCh:
- cancel()
- }
- }(ctx, cancel)
-
- for _, batch := range jaegerBatchList(spans, e.defaultServiceName) {
- if err := e.uploader.upload(ctx, batch); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Shutdown stops the Exporter. This will close all connections and release
-// all resources held by the Exporter.
-func (e *Exporter) Shutdown(ctx context.Context) error {
- // Stop any active and subsequent exports.
- e.stopOnce.Do(func() { close(e.stopCh) })
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
- return e.uploader.shutdown(ctx)
-}
-
-// MarshalLog is the marshaling function used by the logging system to represent this exporter.
-func (e *Exporter) MarshalLog() interface{} {
- return struct {
- Type string
- }{
- Type: "jaeger",
- }
-}
-
-func spanToThrift(ss sdktrace.ReadOnlySpan) *gen.Span {
- attr := ss.Attributes()
- tags := make([]*gen.Tag, 0, len(attr))
- for _, kv := range attr {
- tag := keyValueToTag(kv)
- if tag != nil {
- tags = append(tags, tag)
- }
- }
-
- if is := ss.InstrumentationScope(); is.Name != "" {
- tags = append(tags, getStringTag(keyInstrumentationLibraryName, is.Name))
- if is.Version != "" {
- tags = append(tags, getStringTag(keyInstrumentationLibraryVersion, is.Version))
- }
- }
-
- if ss.SpanKind() != trace.SpanKindInternal {
- tags = append(tags,
- getStringTag(keySpanKind, ss.SpanKind().String()),
- )
- }
-
- if ss.Status().Code != codes.Unset {
- switch ss.Status().Code {
- case codes.Ok:
- tags = append(tags, getStringTag(keyStatusCode, "OK"))
- case codes.Error:
- tags = append(tags, getBoolTag(keyError, true))
- tags = append(tags, getStringTag(keyStatusCode, "ERROR"))
- }
- if ss.Status().Description != "" {
- tags = append(tags, getStringTag(keyStatusMessage, ss.Status().Description))
- }
- }
-
- var logs []*gen.Log
- for _, a := range ss.Events() {
- nTags := len(a.Attributes)
- if a.Name != "" {
- nTags++
- }
- if a.DroppedAttributeCount != 0 {
- nTags++
- }
- fields := make([]*gen.Tag, 0, nTags)
- if a.Name != "" {
- // If an event contains an attribute with the same key, it needs
- // to be given precedence and overwrite this.
- fields = append(fields, getStringTag(keyEventName, a.Name))
- }
- for _, kv := range a.Attributes {
- tag := keyValueToTag(kv)
- if tag != nil {
- fields = append(fields, tag)
- }
- }
- if a.DroppedAttributeCount != 0 {
- fields = append(fields, getInt64Tag(keyDroppedAttributeCount, int64(a.DroppedAttributeCount)))
- }
- logs = append(logs, &gen.Log{
- Timestamp: a.Time.UnixNano() / 1000,
- Fields: fields,
- })
- }
-
- var refs []*gen.SpanRef
- for _, link := range ss.Links() {
- tid := link.SpanContext.TraceID()
- sid := link.SpanContext.SpanID()
- refs = append(refs, &gen.SpanRef{
- TraceIdHigh: int64(binary.BigEndian.Uint64(tid[0:8])),
- TraceIdLow: int64(binary.BigEndian.Uint64(tid[8:16])),
- SpanId: int64(binary.BigEndian.Uint64(sid[:])),
- RefType: gen.SpanRefType_FOLLOWS_FROM,
- })
- }
-
- tid := ss.SpanContext().TraceID()
- sid := ss.SpanContext().SpanID()
- psid := ss.Parent().SpanID()
- return &gen.Span{
- TraceIdHigh: int64(binary.BigEndian.Uint64(tid[0:8])),
- TraceIdLow: int64(binary.BigEndian.Uint64(tid[8:16])),
- SpanId: int64(binary.BigEndian.Uint64(sid[:])),
- ParentSpanId: int64(binary.BigEndian.Uint64(psid[:])),
- OperationName: ss.Name(), // TODO: if span kind is added then add prefix "Sent"/"Recv"
- Flags: int32(ss.SpanContext().TraceFlags()),
- StartTime: ss.StartTime().UnixNano() / 1000,
- Duration: ss.EndTime().Sub(ss.StartTime()).Nanoseconds() / 1000,
- Tags: tags,
- Logs: logs,
- References: refs,
- }
-}
-
-func keyValueToTag(keyValue attribute.KeyValue) *gen.Tag {
- var tag *gen.Tag
- switch keyValue.Value.Type() {
- case attribute.STRING:
- s := keyValue.Value.AsString()
- tag = &gen.Tag{
- Key: string(keyValue.Key),
- VStr: &s,
- VType: gen.TagType_STRING,
- }
- case attribute.BOOL:
- b := keyValue.Value.AsBool()
- tag = &gen.Tag{
- Key: string(keyValue.Key),
- VBool: &b,
- VType: gen.TagType_BOOL,
- }
- case attribute.INT64:
- i := keyValue.Value.AsInt64()
- tag = &gen.Tag{
- Key: string(keyValue.Key),
- VLong: &i,
- VType: gen.TagType_LONG,
- }
- case attribute.FLOAT64:
- f := keyValue.Value.AsFloat64()
- tag = &gen.Tag{
- Key: string(keyValue.Key),
- VDouble: &f,
- VType: gen.TagType_DOUBLE,
- }
- case attribute.BOOLSLICE,
- attribute.INT64SLICE,
- attribute.FLOAT64SLICE,
- attribute.STRINGSLICE:
- data, _ := json.Marshal(keyValue.Value.AsInterface())
- a := (string)(data)
- tag = &gen.Tag{
- Key: string(keyValue.Key),
- VStr: &a,
- VType: gen.TagType_STRING,
- }
- }
- return tag
-}
-
-func getInt64Tag(k string, i int64) *gen.Tag {
- return &gen.Tag{
- Key: k,
- VLong: &i,
- VType: gen.TagType_LONG,
- }
-}
-
-func getStringTag(k, s string) *gen.Tag {
- return &gen.Tag{
- Key: k,
- VStr: &s,
- VType: gen.TagType_STRING,
- }
-}
-
-func getBoolTag(k string, b bool) *gen.Tag {
- return &gen.Tag{
- Key: k,
- VBool: &b,
- VType: gen.TagType_BOOL,
- }
-}
-
-// jaegerBatchList transforms a slice of spans into a slice of jaeger Batch.
-func jaegerBatchList(ssl []sdktrace.ReadOnlySpan, defaultServiceName string) []*gen.Batch {
- if len(ssl) == 0 {
- return nil
- }
-
- batchDict := make(map[attribute.Distinct]*gen.Batch)
-
- for _, ss := range ssl {
- if ss == nil {
- continue
- }
-
- resourceKey := ss.Resource().Equivalent()
- batch, bOK := batchDict[resourceKey]
- if !bOK {
- batch = &gen.Batch{
- Process: process(ss.Resource(), defaultServiceName),
- Spans: []*gen.Span{},
- }
- }
- batch.Spans = append(batch.Spans, spanToThrift(ss))
- batchDict[resourceKey] = batch
- }
-
- // Transform the categorized map into a slice
- batchList := make([]*gen.Batch, 0, len(batchDict))
- for _, batch := range batchDict {
- batchList = append(batchList, batch)
- }
- return batchList
-}
-
-// process transforms an OTel Resource into a jaeger Process.
-func process(res *resource.Resource, defaultServiceName string) *gen.Process {
- var process gen.Process
-
- var serviceName attribute.KeyValue
- if res != nil {
- for iter := res.Iter(); iter.Next(); {
- if iter.Attribute().Key == semconv.ServiceNameKey {
- serviceName = iter.Attribute()
- // Don't convert service.name into tag.
- continue
- }
- if tag := keyValueToTag(iter.Attribute()); tag != nil {
- process.Tags = append(process.Tags, tag)
- }
- }
- }
-
- // If no service.name is contained in a Span's Resource,
- // that field MUST be populated from the default Resource.
- if serviceName.Value.AsString() == "" {
- serviceName = semconv.ServiceName(defaultServiceName)
- }
- process.ServiceName = serviceName.Value.AsString()
-
- return &process
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/reconnecting_udp_client.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/reconnecting_udp_client.go
deleted file mode 100644
index 88055c8a3..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/reconnecting_udp_client.go
+++ /dev/null
@@ -1,204 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger"
-
-import (
- "fmt"
- "net"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/go-logr/logr"
-)
-
-// reconnectingUDPConn is an implementation of udpConn that resolves hostPort every resolveTimeout, if the resolved address is
-// different than the current conn then the new address is dialed and the conn is swapped.
-type reconnectingUDPConn struct {
- // `sync/atomic` expects the first word in an allocated struct to be 64-bit
- // aligned on both ARM and x86-32. See https://goo.gl/zW7dgq for more details.
- bufferBytes int64
- hostPort string
- resolveFunc resolveFunc
- dialFunc dialFunc
- logger logr.Logger
-
- connMtx sync.RWMutex
- conn *net.UDPConn
- destAddr *net.UDPAddr
- closeChan chan struct{}
-}
-
-type resolveFunc func(network string, hostPort string) (*net.UDPAddr, error)
-type dialFunc func(network string, laddr, raddr *net.UDPAddr) (*net.UDPConn, error)
-
-// newReconnectingUDPConn returns a new udpConn that resolves hostPort every resolveTimeout, if the resolved address is
-// different than the current conn then the new address is dialed and the conn is swapped.
-func newReconnectingUDPConn(hostPort string, bufferBytes int, resolveTimeout time.Duration, resolveFunc resolveFunc, dialFunc dialFunc, logger logr.Logger) (*reconnectingUDPConn, error) {
- conn := &reconnectingUDPConn{
- hostPort: hostPort,
- resolveFunc: resolveFunc,
- dialFunc: dialFunc,
- logger: logger,
- closeChan: make(chan struct{}),
- bufferBytes: int64(bufferBytes),
- }
-
- if err := conn.attemptResolveAndDial(); err != nil {
- conn.logf("failed resolving destination address on connection startup, with err: %q. retrying in %s", err.Error(), resolveTimeout)
- }
-
- go conn.reconnectLoop(resolveTimeout)
-
- return conn, nil
-}
-
-func (c *reconnectingUDPConn) logf(format string, args ...interface{}) {
- if c.logger != emptyLogger {
- c.logger.Info(format, args...)
- }
-}
-
-func (c *reconnectingUDPConn) reconnectLoop(resolveTimeout time.Duration) {
- ticker := time.NewTicker(resolveTimeout)
- defer ticker.Stop()
-
- for {
- select {
- case <-c.closeChan:
- return
- case <-ticker.C:
- if err := c.attemptResolveAndDial(); err != nil {
- c.logf("%s", err.Error())
- }
- }
- }
-}
-
-func (c *reconnectingUDPConn) attemptResolveAndDial() error {
- newAddr, err := c.resolveFunc("udp", c.hostPort)
- if err != nil {
- return fmt.Errorf("failed to resolve new addr for host %q, with err: %w", c.hostPort, err)
- }
-
- c.connMtx.RLock()
- curAddr := c.destAddr
- c.connMtx.RUnlock()
-
- // dont attempt dial if an addr was successfully dialed previously and, resolved addr is the same as current conn
- if curAddr != nil && newAddr.String() == curAddr.String() {
- return nil
- }
-
- if err := c.attemptDialNewAddr(newAddr); err != nil {
- return fmt.Errorf("failed to dial newly resolved addr '%s', with err: %w", newAddr, err)
- }
-
- return nil
-}
-
-func (c *reconnectingUDPConn) attemptDialNewAddr(newAddr *net.UDPAddr) error {
- connUDP, err := c.dialFunc(newAddr.Network(), nil, newAddr)
- if err != nil {
- return err
- }
-
- if bufferBytes := int(atomic.LoadInt64(&c.bufferBytes)); bufferBytes != 0 {
- if err = connUDP.SetWriteBuffer(bufferBytes); err != nil {
- return err
- }
- }
-
- c.connMtx.Lock()
- c.destAddr = newAddr
- // store prev to close later
- prevConn := c.conn
- c.conn = connUDP
- c.connMtx.Unlock()
-
- if prevConn != nil {
- return prevConn.Close()
- }
-
- return nil
-}
-
-// Write calls net.udpConn.Write, if it fails an attempt is made to connect to a new addr, if that succeeds the write is retried before returning.
-func (c *reconnectingUDPConn) Write(b []byte) (int, error) {
- var bytesWritten int
- var err error
-
- c.connMtx.RLock()
- conn := c.conn
- c.connMtx.RUnlock()
-
- if conn == nil {
- // if connection is not initialized indicate this with err in order to hook into retry logic
- err = fmt.Errorf("UDP connection not yet initialized, an address has not been resolved")
- } else {
- bytesWritten, err = conn.Write(b)
- }
-
- if err == nil {
- return bytesWritten, nil
- }
-
- // attempt to resolve and dial new address in case that's the problem, if resolve and dial succeeds, try write again
- if reconnErr := c.attemptResolveAndDial(); reconnErr == nil {
- c.connMtx.RLock()
- conn := c.conn
- c.connMtx.RUnlock()
-
- return conn.Write(b)
- }
-
- // return original error if reconn fails
- return bytesWritten, err
-}
-
-// Close stops the reconnectLoop, then closes the connection via net.udpConn 's implementation.
-func (c *reconnectingUDPConn) Close() error {
- close(c.closeChan)
-
- // acquire rw lock before closing conn to ensure calls to Write drain
- c.connMtx.Lock()
- defer c.connMtx.Unlock()
-
- if c.conn != nil {
- return c.conn.Close()
- }
-
- return nil
-}
-
-// SetWriteBuffer defers to the net.udpConn SetWriteBuffer implementation wrapped with a RLock. if no conn is currently held
-// and SetWriteBuffer is called store bufferBytes to be set for new conns.
-func (c *reconnectingUDPConn) SetWriteBuffer(bytes int) error {
- var err error
-
- c.connMtx.RLock()
- conn := c.conn
- c.connMtx.RUnlock()
-
- if conn != nil {
- err = c.conn.SetWriteBuffer(bytes)
- }
-
- if err == nil {
- atomic.StoreInt64(&c.bufferBytes, int64(bytes))
- }
-
- return err
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/uploader.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/uploader.go
deleted file mode 100644
index f65e3a678..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/uploader.go
+++ /dev/null
@@ -1,339 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger"
-
-import (
- "bytes"
- "context"
- "fmt"
- "io"
- "log"
- "net/http"
- "time"
-
- "github.com/go-logr/logr"
- "github.com/go-logr/stdr"
-
- gen "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger"
- "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift"
-)
-
-// batchUploader send a batch of spans to Jaeger.
-type batchUploader interface {
- upload(context.Context, *gen.Batch) error
- shutdown(context.Context) error
-}
-
-// EndpointOption configures a Jaeger endpoint.
-type EndpointOption interface {
- newBatchUploader() (batchUploader, error)
-}
-
-type endpointOptionFunc func() (batchUploader, error)
-
-func (fn endpointOptionFunc) newBatchUploader() (batchUploader, error) {
- return fn()
-}
-
-// WithAgentEndpoint configures the Jaeger exporter to send spans to a Jaeger agent
-// over compact thrift protocol. This will use the following environment variables for
-// configuration if no explicit option is provided:
-//
-// - OTEL_EXPORTER_JAEGER_AGENT_HOST is used for the agent address host
-// - OTEL_EXPORTER_JAEGER_AGENT_PORT is used for the agent address port
-//
-// The passed options will take precedence over any environment variables and default values
-// will be used if neither are provided.
-func WithAgentEndpoint(options ...AgentEndpointOption) EndpointOption {
- return endpointOptionFunc(func() (batchUploader, error) {
- cfg := agentEndpointConfig{
- agentClientUDPParams{
- AttemptReconnecting: true,
- Host: envOr(envAgentHost, "localhost"),
- Port: envOr(envAgentPort, "6831"),
- },
- }
- for _, opt := range options {
- cfg = opt.apply(cfg)
- }
-
- client, err := newAgentClientUDP(cfg.agentClientUDPParams)
- if err != nil {
- return nil, err
- }
-
- return &agentUploader{client: client}, nil
- })
-}
-
-// AgentEndpointOption configures a Jaeger agent endpoint.
-type AgentEndpointOption interface {
- apply(agentEndpointConfig) agentEndpointConfig
-}
-
-type agentEndpointConfig struct {
- agentClientUDPParams
-}
-
-type agentEndpointOptionFunc func(agentEndpointConfig) agentEndpointConfig
-
-func (fn agentEndpointOptionFunc) apply(cfg agentEndpointConfig) agentEndpointConfig {
- return fn(cfg)
-}
-
-// WithAgentHost sets a host to be used in the agent client endpoint.
-// This option overrides any value set for the
-// OTEL_EXPORTER_JAEGER_AGENT_HOST environment variable.
-// If this option is not passed and the env var is not set, "localhost" will be used by default.
-func WithAgentHost(host string) AgentEndpointOption {
- return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig {
- o.Host = host
- return o
- })
-}
-
-// WithAgentPort sets a port to be used in the agent client endpoint.
-// This option overrides any value set for the
-// OTEL_EXPORTER_JAEGER_AGENT_PORT environment variable.
-// If this option is not passed and the env var is not set, "6831" will be used by default.
-func WithAgentPort(port string) AgentEndpointOption {
- return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig {
- o.Port = port
- return o
- })
-}
-
-var emptyLogger = logr.Logger{}
-
-// WithLogger sets a logger to be used by agent client.
-// WithLogger and WithLogr will overwrite each other.
-func WithLogger(logger *log.Logger) AgentEndpointOption {
- return WithLogr(stdr.New(logger))
-}
-
-// WithLogr sets a logr.Logger to be used by agent client.
-// WithLogr and WithLogger will overwrite each other.
-func WithLogr(logger logr.Logger) AgentEndpointOption {
- return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig {
- o.Logger = logger
- return o
- })
-}
-
-// WithDisableAttemptReconnecting sets option to disable reconnecting udp client.
-func WithDisableAttemptReconnecting() AgentEndpointOption {
- return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig {
- o.AttemptReconnecting = false
- return o
- })
-}
-
-// WithAttemptReconnectingInterval sets the interval between attempts to re resolve agent endpoint.
-func WithAttemptReconnectingInterval(interval time.Duration) AgentEndpointOption {
- return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig {
- o.AttemptReconnectInterval = interval
- return o
- })
-}
-
-// WithMaxPacketSize sets the maximum UDP packet size for transport to the Jaeger agent.
-func WithMaxPacketSize(size int) AgentEndpointOption {
- return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig {
- o.MaxPacketSize = size
- return o
- })
-}
-
-// WithCollectorEndpoint defines the full URL to the Jaeger HTTP Thrift collector. This will
-// use the following environment variables for configuration if no explicit option is provided:
-//
-// - OTEL_EXPORTER_JAEGER_ENDPOINT is the HTTP endpoint for sending spans directly to a collector.
-// - OTEL_EXPORTER_JAEGER_USER is the username to be sent as authentication to the collector endpoint.
-// - OTEL_EXPORTER_JAEGER_PASSWORD is the password to be sent as authentication to the collector endpoint.
-//
-// The passed options will take precedence over any environment variables.
-// If neither values are provided for the endpoint, the default value of "http://localhost:14268/api/traces" will be used.
-// If neither values are provided for the username or the password, they will not be set since there is no default.
-func WithCollectorEndpoint(options ...CollectorEndpointOption) EndpointOption {
- return endpointOptionFunc(func() (batchUploader, error) {
- cfg := collectorEndpointConfig{
- endpoint: envOr(envEndpoint, "http://localhost:14268/api/traces"),
- username: envOr(envUser, ""),
- password: envOr(envPassword, ""),
- httpClient: http.DefaultClient,
- }
-
- for _, opt := range options {
- cfg = opt.apply(cfg)
- }
-
- return &collectorUploader{
- endpoint: cfg.endpoint,
- username: cfg.username,
- password: cfg.password,
- httpClient: cfg.httpClient,
- }, nil
- })
-}
-
-// CollectorEndpointOption configures a Jaeger collector endpoint.
-type CollectorEndpointOption interface {
- apply(collectorEndpointConfig) collectorEndpointConfig
-}
-
-type collectorEndpointConfig struct {
- // endpoint for sending spans directly to a collector.
- endpoint string
-
- // username to be used for authentication with the collector endpoint.
- username string
-
- // password to be used for authentication with the collector endpoint.
- password string
-
- // httpClient to be used to make requests to the collector endpoint.
- httpClient *http.Client
-}
-
-type collectorEndpointOptionFunc func(collectorEndpointConfig) collectorEndpointConfig
-
-func (fn collectorEndpointOptionFunc) apply(cfg collectorEndpointConfig) collectorEndpointConfig {
- return fn(cfg)
-}
-
-// WithEndpoint is the URL for the Jaeger collector that spans are sent to.
-// This option overrides any value set for the
-// OTEL_EXPORTER_JAEGER_ENDPOINT environment variable.
-// If this option is not passed and the environment variable is not set,
-// "http://localhost:14268/api/traces" will be used by default.
-func WithEndpoint(endpoint string) CollectorEndpointOption {
- return collectorEndpointOptionFunc(func(o collectorEndpointConfig) collectorEndpointConfig {
- o.endpoint = endpoint
- return o
- })
-}
-
-// WithUsername sets the username to be used in the authorization header sent for all requests to the collector.
-// This option overrides any value set for the
-// OTEL_EXPORTER_JAEGER_USER environment variable.
-// If this option is not passed and the environment variable is not set, no username will be set.
-func WithUsername(username string) CollectorEndpointOption {
- return collectorEndpointOptionFunc(func(o collectorEndpointConfig) collectorEndpointConfig {
- o.username = username
- return o
- })
-}
-
-// WithPassword sets the password to be used in the authorization header sent for all requests to the collector.
-// This option overrides any value set for the
-// OTEL_EXPORTER_JAEGER_PASSWORD environment variable.
-// If this option is not passed and the environment variable is not set, no password will be set.
-func WithPassword(password string) CollectorEndpointOption {
- return collectorEndpointOptionFunc(func(o collectorEndpointConfig) collectorEndpointConfig {
- o.password = password
- return o
- })
-}
-
-// WithHTTPClient sets the http client to be used to make request to the collector endpoint.
-func WithHTTPClient(client *http.Client) CollectorEndpointOption {
- return collectorEndpointOptionFunc(func(o collectorEndpointConfig) collectorEndpointConfig {
- o.httpClient = client
- return o
- })
-}
-
-// agentUploader implements batchUploader interface sending batches to
-// Jaeger through the UDP agent.
-type agentUploader struct {
- client *agentClientUDP
-}
-
-var _ batchUploader = (*agentUploader)(nil)
-
-func (a *agentUploader) shutdown(ctx context.Context) error {
- done := make(chan error, 1)
- go func() {
- done <- a.client.Close()
- }()
-
- select {
- case <-ctx.Done():
- // Prioritize not blocking the calling thread and just leak the
- // spawned goroutine to close the client.
- return ctx.Err()
- case err := <-done:
- return err
- }
-}
-
-func (a *agentUploader) upload(ctx context.Context, batch *gen.Batch) error {
- return a.client.EmitBatch(ctx, batch)
-}
-
-// collectorUploader implements batchUploader interface sending batches to
-// Jaeger through the collector http endpoint.
-type collectorUploader struct {
- endpoint string
- username string
- password string
- httpClient *http.Client
-}
-
-var _ batchUploader = (*collectorUploader)(nil)
-
-func (c *collectorUploader) shutdown(ctx context.Context) error {
- // The Exporter will cancel any active exports and will prevent all
- // subsequent exports, so nothing to do here.
- return nil
-}
-
-func (c *collectorUploader) upload(ctx context.Context, batch *gen.Batch) error {
- body, err := serialize(batch)
- if err != nil {
- return err
- }
- req, err := http.NewRequestWithContext(ctx, "POST", c.endpoint, body)
- if err != nil {
- return err
- }
- if c.username != "" && c.password != "" {
- req.SetBasicAuth(c.username, c.password)
- }
- req.Header.Set("Content-Type", "application/x-thrift")
-
- resp, err := c.httpClient.Do(req)
- if err != nil {
- return err
- }
-
- _, _ = io.Copy(io.Discard, resp.Body)
- if err = resp.Body.Close(); err != nil {
- return err
- }
-
- if resp.StatusCode < 200 || resp.StatusCode >= 300 {
- return fmt.Errorf("failed to upload traces; HTTP status code: %d", resp.StatusCode)
- }
- return nil
-}
-
-func serialize(obj thrift.TStruct) (*bytes.Buffer, error) {
- buf := thrift.NewTMemoryBuffer()
- if err := obj.Write(context.Background(), thrift.NewTBinaryProtocolConf(buf, &thrift.TConfiguration{})); err != nil {
- return nil, err
- }
- return buf.Buffer, nil
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/config.go
deleted file mode 100644
index b3fd45d9d..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/config.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package internal contains common functionality for all OTLP exporters.
-package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal"
-
-import (
- "fmt"
- "path"
- "strings"
-)
-
-// CleanPath returns a path with all spaces trimmed and all redundancies removed. If urlPath is empty or cleaning it results in an empty string, defaultPath is returned instead.
-func CleanPath(urlPath string, defaultPath string) string {
- tmp := path.Clean(strings.TrimSpace(urlPath))
- if tmp == "." {
- return defaultPath
- }
- if !path.IsAbs(tmp) {
- tmp = fmt.Sprintf("/%s", tmp)
- }
- return tmp
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/wrappederror.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/wrappederror.go
deleted file mode 100644
index 217751da5..000000000
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/wrappederror.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal"
-
-// ErrorKind is used to identify the kind of export error
-// being wrapped.
-type ErrorKind int
-
-const (
- // TracesExport indicates the error comes from the OTLP trace exporter.
- TracesExport ErrorKind = iota
-)
-
-// prefix returns a prefix for the Error() string.
-func (k ErrorKind) prefix() string {
- switch k {
- case TracesExport:
- return "traces export: "
- default:
- return "unknown: "
- }
-}
-
-// wrappedExportError wraps an OTLP exporter error with the kind of
-// signal that produced it.
-type wrappedExportError struct {
- wrap error
- kind ErrorKind
-}
-
-// WrapTracesError wraps an error from the OTLP exporter for traces.
-func WrapTracesError(err error) error {
- return wrappedExportError{
- wrap: err,
- kind: TracesExport,
- }
-}
-
-var _ error = wrappedExportError{}
-
-// Error attaches a prefix corresponding to the kind of exporter.
-func (t wrappedExportError) Error() string {
- return t.kind.prefix() + t.wrap.Error()
-}
-
-// Unwrap returns the wrapped error.
-func (t wrappedExportError) Unwrap() error {
- return t.wrap
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go
index b65802edb..0dbe15555 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go
@@ -17,9 +17,9 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
import (
"context"
"errors"
+ "fmt"
"sync"
- "go.opentelemetry.io/otel/exporters/otlp/internal"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
tracesdk "go.opentelemetry.io/otel/sdk/trace"
)
@@ -48,7 +48,7 @@ func (e *Exporter) ExportSpans(ctx context.Context, ss []tracesdk.ReadOnlySpan)
err := e.client.UploadTraces(ctx, protoSpans)
if err != nil {
- return internal.WrapTracesError(err)
+ return fmt.Errorf("traces export: %w", err)
}
return nil
}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
index 2ab2a6e14..86fb61a0d 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
@@ -27,10 +27,10 @@ import (
"google.golang.org/grpc/status"
"go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/exporters/otlp/internal"
- "go.opentelemetry.io/otel/exporters/otlp/internal/retry"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
- "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry"
coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1"
tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
)
@@ -259,7 +259,6 @@ func (c *client) exportContext(parent context.Context) (context.Context, context
// retryable returns if err identifies a request that can be retried and a
// duration to wait for if an explicit throttle time is included in err.
func retryable(err error) (bool, time.Duration) {
- //func retryable(err error) (bool, time.Duration) {
s := status.Convert(err)
switch s.Code() {
case codes.Canceled,
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go
index 444eefbb3..becb1f0fb 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/envconfig/envconfig.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go
@@ -1,3 +1,6 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
+
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
@@ -12,7 +15,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/internal/envconfig"
+package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig"
import (
"crypto/tls"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go
new file mode 100644
index 000000000..1fb290618
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go
@@ -0,0 +1,35 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal"
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess_test.go.tmpl "--data={}" --out=partialsuccess_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig.go.tmpl "--data={}" --out=envconfig/envconfig.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig_test.go.tmpl "--data={}" --out=envconfig/envconfig_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig\"}" --out=otlpconfig/envconfig.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl "--data={\"retryImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry\"}" --out=otlpconfig/options.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options_test.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig\"}" --out=otlpconfig/options_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl "--data={}" --out=otlpconfig/optiontypes.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl "--data={}" --out=otlpconfig/tls.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/client.go.tmpl "--data={}" --out=otlptracetest/client.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/collector.go.tmpl "--data={}" --out=otlptracetest/collector.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/data.go.tmpl "--data={}" --out=otlptracetest/data.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/otlptest.go.tmpl "--data={}" --out=otlptracetest/otlptest.go
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go
index 62c5029db..32f6dddb4 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go
@@ -1,3 +1,6 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl
+
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
@@ -12,7 +15,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig"
+package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
import (
"crypto/tls"
@@ -23,7 +26,7 @@ import (
"strings"
"time"
- "go.opentelemetry.io/otel/exporters/otlp/internal/envconfig"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig"
)
// DefaultEnvOptionsReader is the default environments reader.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go
index 1a6bb423b..19b8434d4 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/options.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go
@@ -1,3 +1,6 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl
+
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
@@ -12,11 +15,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig"
+package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
import (
"crypto/tls"
"fmt"
+ "path"
+ "strings"
"time"
"google.golang.org/grpc"
@@ -25,9 +30,8 @@ import (
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/encoding/gzip"
- "go.opentelemetry.io/otel/exporters/otlp/internal"
- "go.opentelemetry.io/otel/exporters/otlp/internal/retry"
- otinternal "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry"
)
const (
@@ -83,13 +87,28 @@ func NewHTTPConfig(opts ...HTTPOption) Config {
for _, opt := range opts {
cfg = opt.ApplyHTTPOption(cfg)
}
- cfg.Traces.URLPath = internal.CleanPath(cfg.Traces.URLPath, DefaultTracesPath)
+ cfg.Traces.URLPath = cleanPath(cfg.Traces.URLPath, DefaultTracesPath)
return cfg
}
+// cleanPath returns a path with all spaces trimmed and all redundancies
+// removed. If urlPath is empty or cleaning it results in an empty string,
+// defaultPath is returned instead.
+func cleanPath(urlPath string, defaultPath string) string {
+ tmp := path.Clean(strings.TrimSpace(urlPath))
+ if tmp == "." {
+ return defaultPath
+ }
+ if !path.IsAbs(tmp) {
+ tmp = fmt.Sprintf("/%s", tmp)
+ }
+ return tmp
+}
+
// NewGRPCConfig returns a new Config with all settings applied from opts and
// any unset setting using the default gRPC config values.
func NewGRPCConfig(opts ...GRPCOption) Config {
+ userAgent := "OTel OTLP Exporter Go/" + otlptrace.Version()
cfg := Config{
Traces: SignalConfig{
Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort),
@@ -98,7 +117,7 @@ func NewGRPCConfig(opts ...GRPCOption) Config {
Timeout: DefaultTimeout,
},
RetryConfig: retry.DefaultConfig,
- DialOptions: []grpc.DialOption{grpc.WithUserAgent(otinternal.GetUserAgentHeader())},
+ DialOptions: []grpc.DialOption{grpc.WithUserAgent(userAgent)},
}
cfg = ApplyGRPCEnvConfigs(cfg)
for _, opt := range opts {
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go
index c2d6c0361..d9dcdc96e 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/optiontypes.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go
@@ -1,3 +1,6 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl
+
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
@@ -12,7 +15,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig"
+package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
const (
// DefaultCollectorGRPCPort is the default gRPC port of the collector.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go
index 7287cf6cf..19b6d4b21 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/tls.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go
@@ -1,3 +1,6 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl
+
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
@@ -12,7 +15,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig"
+package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
import (
"crypto/tls"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go
index 9ab89b375..076905e54 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/partialsuccess.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go
@@ -1,3 +1,6 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/partialsuccess.go
+
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
@@ -12,7 +15,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal"
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal"
import "fmt"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go
index 7e1b0055a..3ce7d6632 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go
@@ -1,3 +1,6 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/retry/retry.go.tmpl
+
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
@@ -15,7 +18,7 @@
// Package retry provides request retry functionality that can perform
// configurable exponential backoff for transient errors and honor any
// explicit throttle responses received.
-package retry // import "go.opentelemetry.io/otel/exporters/otlp/internal/retry"
+package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry"
import (
"context"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go
index 3d09ce590..78ce9ad8f 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go
@@ -22,8 +22,8 @@ import (
"google.golang.org/grpc/credentials"
"go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/exporters/otlp/internal/retry"
- "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry"
)
// Option applies an option to the gRPC driver.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/LICENSE
index 261eeb9e9..261eeb9e9 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/LICENSE
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/LICENSE
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go
new file mode 100644
index 000000000..3a3cfec0c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go
@@ -0,0 +1,341 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlptracehttp // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
+
+import (
+ "bytes"
+ "compress/gzip"
+ "context"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "strconv"
+ "sync"
+ "time"
+
+ "google.golang.org/protobuf/proto"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry"
+ coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1"
+ tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
+)
+
+const contentTypeProto = "application/x-protobuf"
+
+var gzPool = sync.Pool{
+ New: func() interface{} {
+ w := gzip.NewWriter(io.Discard)
+ return w
+ },
+}
+
+// Keep it in sync with golang's DefaultTransport from net/http! We
+// have our own copy to avoid handling a situation where the
+// DefaultTransport is overwritten with some different implementation
+// of http.RoundTripper or it's modified by other package.
+var ourTransport = &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).DialContext,
+ ForceAttemptHTTP2: true,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+}
+
+type client struct {
+ name string
+ cfg otlpconfig.SignalConfig
+ generalCfg otlpconfig.Config
+ requestFunc retry.RequestFunc
+ client *http.Client
+ stopCh chan struct{}
+ stopOnce sync.Once
+}
+
+var _ otlptrace.Client = (*client)(nil)
+
+// NewClient creates a new HTTP trace client.
+func NewClient(opts ...Option) otlptrace.Client {
+ cfg := otlpconfig.NewHTTPConfig(asHTTPOptions(opts)...)
+
+ httpClient := &http.Client{
+ Transport: ourTransport,
+ Timeout: cfg.Traces.Timeout,
+ }
+ if cfg.Traces.TLSCfg != nil {
+ transport := ourTransport.Clone()
+ transport.TLSClientConfig = cfg.Traces.TLSCfg
+ httpClient.Transport = transport
+ }
+
+ stopCh := make(chan struct{})
+ return &client{
+ name: "traces",
+ cfg: cfg.Traces,
+ generalCfg: cfg,
+ requestFunc: cfg.RetryConfig.RequestFunc(evaluate),
+ stopCh: stopCh,
+ client: httpClient,
+ }
+}
+
+// Start does nothing in a HTTP client.
+func (d *client) Start(ctx context.Context) error {
+ // nothing to do
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ return nil
+}
+
+// Stop shuts down the client and interrupt any in-flight request.
+func (d *client) Stop(ctx context.Context) error {
+ d.stopOnce.Do(func() {
+ close(d.stopCh)
+ })
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ return nil
+}
+
+// UploadTraces sends a batch of spans to the collector.
+func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error {
+ pbRequest := &coltracepb.ExportTraceServiceRequest{
+ ResourceSpans: protoSpans,
+ }
+ rawRequest, err := proto.Marshal(pbRequest)
+ if err != nil {
+ return err
+ }
+
+ ctx, cancel := d.contextWithStop(ctx)
+ defer cancel()
+
+ request, err := d.newRequest(rawRequest)
+ if err != nil {
+ return err
+ }
+
+ return d.requestFunc(ctx, func(ctx context.Context) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ request.reset(ctx)
+ resp, err := d.client.Do(request.Request)
+ if err != nil {
+ return err
+ }
+
+ if resp != nil && resp.Body != nil {
+ defer func() {
+ if err := resp.Body.Close(); err != nil {
+ otel.Handle(err)
+ }
+ }()
+ }
+
+ switch sc := resp.StatusCode; {
+ case sc >= 200 && sc <= 299:
+ // Success, do not retry.
+ // Read the partial success message, if any.
+ var respData bytes.Buffer
+ if _, err := io.Copy(&respData, resp.Body); err != nil {
+ return err
+ }
+
+ if respData.Len() != 0 {
+ var respProto coltracepb.ExportTraceServiceResponse
+ if err := proto.Unmarshal(respData.Bytes(), &respProto); err != nil {
+ return err
+ }
+
+ if respProto.PartialSuccess != nil {
+ msg := respProto.PartialSuccess.GetErrorMessage()
+ n := respProto.PartialSuccess.GetRejectedSpans()
+ if n != 0 || msg != "" {
+ err := internal.TracePartialSuccessError(n, msg)
+ otel.Handle(err)
+ }
+ }
+ }
+ return nil
+
+ case sc == http.StatusTooManyRequests, sc == http.StatusServiceUnavailable:
+ // Retry-able failures. Drain the body to reuse the connection.
+ if _, err := io.Copy(io.Discard, resp.Body); err != nil {
+ otel.Handle(err)
+ }
+ return newResponseError(resp.Header)
+ default:
+ return fmt.Errorf("failed to send to %s: %s", request.URL, resp.Status)
+ }
+ })
+}
+
+func (d *client) newRequest(body []byte) (request, error) {
+ u := url.URL{Scheme: d.getScheme(), Host: d.cfg.Endpoint, Path: d.cfg.URLPath}
+ r, err := http.NewRequest(http.MethodPost, u.String(), nil)
+ if err != nil {
+ return request{Request: r}, err
+ }
+
+ userAgent := "OTel OTLP Exporter Go/" + otlptrace.Version()
+ r.Header.Set("User-Agent", userAgent)
+
+ for k, v := range d.cfg.Headers {
+ r.Header.Set(k, v)
+ }
+ r.Header.Set("Content-Type", contentTypeProto)
+
+ req := request{Request: r}
+ switch Compression(d.cfg.Compression) {
+ case NoCompression:
+ r.ContentLength = (int64)(len(body))
+ req.bodyReader = bodyReader(body)
+ case GzipCompression:
+ // Ensure the content length is not used.
+ r.ContentLength = -1
+ r.Header.Set("Content-Encoding", "gzip")
+
+ gz := gzPool.Get().(*gzip.Writer)
+ defer gzPool.Put(gz)
+
+ var b bytes.Buffer
+ gz.Reset(&b)
+
+ if _, err := gz.Write(body); err != nil {
+ return req, err
+ }
+ // Close needs to be called to ensure body if fully written.
+ if err := gz.Close(); err != nil {
+ return req, err
+ }
+
+ req.bodyReader = bodyReader(b.Bytes())
+ }
+
+ return req, nil
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Client.
+func (d *client) MarshalLog() interface{} {
+ return struct {
+ Type string
+ Endpoint string
+ Insecure bool
+ }{
+ Type: "otlphttphttp",
+ Endpoint: d.cfg.Endpoint,
+ Insecure: d.cfg.Insecure,
+ }
+}
+
+// bodyReader returns a closure returning a new reader for buf.
+func bodyReader(buf []byte) func() io.ReadCloser {
+ return func() io.ReadCloser {
+ return io.NopCloser(bytes.NewReader(buf))
+ }
+}
+
+// request wraps an http.Request with a resettable body reader.
+type request struct {
+ *http.Request
+
+ // bodyReader allows the same body to be used for multiple requests.
+ bodyReader func() io.ReadCloser
+}
+
+// reset reinitializes the request Body and uses ctx for the request.
+func (r *request) reset(ctx context.Context) {
+ r.Body = r.bodyReader()
+ r.Request = r.Request.WithContext(ctx)
+}
+
+// retryableError represents a request failure that can be retried.
+type retryableError struct {
+ throttle int64
+}
+
+// newResponseError returns a retryableError and will extract any explicit
+// throttle delay contained in headers.
+func newResponseError(header http.Header) error {
+ var rErr retryableError
+ if s, ok := header["Retry-After"]; ok {
+ if t, err := strconv.ParseInt(s[0], 10, 64); err == nil {
+ rErr.throttle = t
+ }
+ }
+ return rErr
+}
+
+func (e retryableError) Error() string {
+ return "retry-able request failure"
+}
+
+// evaluate returns if err is retry-able. If it is and it includes an explicit
+// throttling delay, that delay is also returned.
+func evaluate(err error) (bool, time.Duration) {
+ if err == nil {
+ return false, 0
+ }
+
+ rErr, ok := err.(retryableError)
+ if !ok {
+ return false, 0
+ }
+
+ return true, time.Duration(rErr.throttle)
+}
+
+func (d *client) getScheme() string {
+ if d.cfg.Insecure {
+ return "http"
+ }
+ return "https"
+}
+
+func (d *client) contextWithStop(ctx context.Context) (context.Context, context.CancelFunc) {
+ // Unify the parent context Done signal with the client's stop
+ // channel.
+ ctx, cancel := context.WithCancel(ctx)
+ go func(ctx context.Context, cancel context.CancelFunc) {
+ select {
+ case <-ctx.Done():
+ // Nothing to do, either cancelled or deadline
+ // happened.
+ case <-d.stopCh:
+ cancel()
+ }
+ }(ctx, cancel)
+ return ctx, cancel
+}
diff --git a/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go
index d10ad6653..e7f066b43 100644
--- a/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go
@@ -1,10 +1,10 @@
-// Copyright 2020 Google LLC
+// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -12,12 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Package field_mask aliases all exported identifiers in
-// package "google.golang.org/protobuf/types/known/fieldmaskpb".
-package field_mask
-
-import "google.golang.org/protobuf/types/known/fieldmaskpb"
-
-type FieldMask = fieldmaskpb.FieldMask
-
-var File_google_protobuf_field_mask_proto = fieldmaskpb.File_google_protobuf_field_mask_proto
+/*
+Package otlptracehttp a client that sends traces to the collector using HTTP
+with binary protobuf payloads.
+*/
+package otlptracehttp // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/header.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/exporter.go
index 36d3ca8e1..23b864204 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/header.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/exporter.go
@@ -12,14 +12,20 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal"
+package otlptracehttp // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
import (
+ "context"
+
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
)
-// GetUserAgentHeader returns an OTLP header value form "OTel OTLP Exporter Go/{{ .Version }}"
-// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/protocol/exporter.md#user-agent
-func GetUserAgentHeader() string {
- return "OTel OTLP Exporter Go/" + otlptrace.Version()
+// New constructs a new Exporter and starts it.
+func New(ctx context.Context, opts ...Option) (*otlptrace.Exporter, error) {
+ return otlptrace.New(ctx, NewClient(opts...))
+}
+
+// NewUnstarted constructs a new Exporter and does not start it.
+func NewUnstarted(opts ...Option) *otlptrace.Exporter {
+ return otlptrace.NewUnstarted(NewClient(opts...))
}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go
new file mode 100644
index 000000000..5e9e8185d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go
@@ -0,0 +1,202 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig"
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "go.opentelemetry.io/otel/internal/global"
+)
+
+// ConfigFn is the generic function used to set a config.
+type ConfigFn func(*EnvOptionsReader)
+
+// EnvOptionsReader reads the required environment variables.
+type EnvOptionsReader struct {
+ GetEnv func(string) string
+ ReadFile func(string) ([]byte, error)
+ Namespace string
+}
+
+// Apply runs every ConfigFn.
+func (e *EnvOptionsReader) Apply(opts ...ConfigFn) {
+ for _, o := range opts {
+ o(e)
+ }
+}
+
+// GetEnvValue gets an OTLP environment variable value of the specified key
+// using the GetEnv function.
+// This function prepends the OTLP specified namespace to all key lookups.
+func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) {
+ v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key)))
+ return v, v != ""
+}
+
+// WithString retrieves the specified config and passes it to ConfigFn as a string.
+func WithString(n string, fn func(string)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ fn(v)
+ }
+ }
+}
+
+// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn.
+func WithBool(n string, fn func(bool)) ConfigFn {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ b := strings.ToLower(v) == "true"
+ fn(b)
+ }
+ }
+}
+
+// WithDuration retrieves the specified config and passes it to ConfigFn as a duration.
+func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ d, err := strconv.Atoi(v)
+ if err != nil {
+ global.Error(err, "parse duration", "input", v)
+ return
+ }
+ fn(time.Duration(d) * time.Millisecond)
+ }
+ }
+}
+
+// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers.
+func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ fn(stringToHeader(v))
+ }
+ }
+}
+
+// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL.
+func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ u, err := url.Parse(v)
+ if err != nil {
+ global.Error(err, "parse url", "input", v)
+ return
+ }
+ fn(u)
+ }
+ }
+}
+
+// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn.
+func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ b, err := e.ReadFile(v)
+ if err != nil {
+ global.Error(err, "read tls ca cert file", "file", v)
+ return
+ }
+ c, err := createCertPool(b)
+ if err != nil {
+ global.Error(err, "create tls cert pool")
+ return
+ }
+ fn(c)
+ }
+ }
+}
+
+// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn.
+func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn {
+ return func(e *EnvOptionsReader) {
+ vc, okc := e.GetEnvValue(nc)
+ vk, okk := e.GetEnvValue(nk)
+ if !okc || !okk {
+ return
+ }
+ cert, err := e.ReadFile(vc)
+ if err != nil {
+ global.Error(err, "read tls client cert", "file", vc)
+ return
+ }
+ key, err := e.ReadFile(vk)
+ if err != nil {
+ global.Error(err, "read tls client key", "file", vk)
+ return
+ }
+ crt, err := tls.X509KeyPair(cert, key)
+ if err != nil {
+ global.Error(err, "create tls client key pair")
+ return
+ }
+ fn(crt)
+ }
+}
+
+func keyWithNamespace(ns, key string) string {
+ if ns == "" {
+ return key
+ }
+ return fmt.Sprintf("%s_%s", ns, key)
+}
+
+func stringToHeader(value string) map[string]string {
+ headersPairs := strings.Split(value, ",")
+ headers := make(map[string]string)
+
+ for _, header := range headersPairs {
+ n, v, found := strings.Cut(header, "=")
+ if !found {
+ global.Error(errors.New("missing '="), "parse headers", "input", header)
+ continue
+ }
+ name, err := url.QueryUnescape(n)
+ if err != nil {
+ global.Error(err, "escape header key", "key", n)
+ continue
+ }
+ trimmedName := strings.TrimSpace(name)
+ value, err := url.QueryUnescape(v)
+ if err != nil {
+ global.Error(err, "escape header value", "value", v)
+ continue
+ }
+ trimmedValue := strings.TrimSpace(value)
+
+ headers[trimmedName] = trimmedValue
+ }
+
+ return headers
+}
+
+func createCertPool(certBytes []byte) (*x509.CertPool, error) {
+ cp := x509.NewCertPool()
+ if ok := cp.AppendCertsFromPEM(certBytes); !ok {
+ return nil, errors.New("failed to append certificate to the cert pool")
+ }
+ return cp, nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/gen.go
new file mode 100644
index 000000000..01347d8c6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/gen.go
@@ -0,0 +1,35 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal"
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess_test.go.tmpl "--data={}" --out=partialsuccess_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig.go.tmpl "--data={}" --out=envconfig/envconfig.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig_test.go.tmpl "--data={}" --out=envconfig/envconfig_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig\"}" --out=otlpconfig/envconfig.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl "--data={\"retryImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry\"}" --out=otlpconfig/options.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options_test.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig\"}" --out=otlpconfig/options_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl "--data={}" --out=otlpconfig/optiontypes.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl "--data={}" --out=otlpconfig/tls.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/client.go.tmpl "--data={}" --out=otlptracetest/client.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/collector.go.tmpl "--data={}" --out=otlptracetest/collector.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/data.go.tmpl "--data={}" --out=otlptracetest/data.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/otlptest.go.tmpl "--data={}" --out=otlptracetest/otlptest.go
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/envconfig.go
new file mode 100644
index 000000000..45f137a78
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/envconfig.go
@@ -0,0 +1,153 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig"
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "net/url"
+ "os"
+ "path"
+ "strings"
+ "time"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig"
+)
+
+// DefaultEnvOptionsReader is the default environments reader.
+var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
+ GetEnv: os.Getenv,
+ ReadFile: os.ReadFile,
+ Namespace: "OTEL_EXPORTER_OTLP",
+}
+
+// ApplyGRPCEnvConfigs applies the env configurations for gRPC.
+func ApplyGRPCEnvConfigs(cfg Config) Config {
+ opts := getOptionsFromEnv()
+ for _, opt := range opts {
+ cfg = opt.ApplyGRPCOption(cfg)
+ }
+ return cfg
+}
+
+// ApplyHTTPEnvConfigs applies the env configurations for HTTP.
+func ApplyHTTPEnvConfigs(cfg Config) Config {
+ opts := getOptionsFromEnv()
+ for _, opt := range opts {
+ cfg = opt.ApplyHTTPOption(cfg)
+ }
+ return cfg
+}
+
+func getOptionsFromEnv() []GenericOption {
+ opts := []GenericOption{}
+
+ tlsConf := &tls.Config{}
+ DefaultEnvOptionsReader.Apply(
+ envconfig.WithURL("ENDPOINT", func(u *url.URL) {
+ opts = append(opts, withEndpointScheme(u))
+ opts = append(opts, newSplitOption(func(cfg Config) Config {
+ cfg.Traces.Endpoint = u.Host
+ // For OTLP/HTTP endpoint URLs without a per-signal
+ // configuration, the passed endpoint is used as a base URL
+ // and the signals are sent to these paths relative to that.
+ cfg.Traces.URLPath = path.Join(u.Path, DefaultTracesPath)
+ return cfg
+ }, withEndpointForGRPC(u)))
+ }),
+ envconfig.WithURL("TRACES_ENDPOINT", func(u *url.URL) {
+ opts = append(opts, withEndpointScheme(u))
+ opts = append(opts, newSplitOption(func(cfg Config) Config {
+ cfg.Traces.Endpoint = u.Host
+ // For endpoint URLs for OTLP/HTTP per-signal variables, the
+ // URL MUST be used as-is without any modification. The only
+ // exception is that if an URL contains no path part, the root
+ // path / MUST be used.
+ path := u.Path
+ if path == "" {
+ path = "/"
+ }
+ cfg.Traces.URLPath = path
+ return cfg
+ }, withEndpointForGRPC(u)))
+ }),
+ envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
+ envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
+ envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+ envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+ withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
+ envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
+ envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
+ envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
+ envconfig.WithHeaders("TRACES_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
+ WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
+ WithEnvCompression("TRACES_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
+ envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
+ envconfig.WithDuration("TRACES_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
+ )
+
+ return opts
+}
+
+func withEndpointScheme(u *url.URL) GenericOption {
+ switch strings.ToLower(u.Scheme) {
+ case "http", "unix":
+ return WithInsecure()
+ default:
+ return WithSecure()
+ }
+}
+
+func withEndpointForGRPC(u *url.URL) func(cfg Config) Config {
+ return func(cfg Config) Config {
+ // For OTLP/gRPC endpoints, this is the target to which the
+ // exporter is going to send telemetry.
+ cfg.Traces.Endpoint = path.Join(u.Host, u.Path)
+ return cfg
+ }
+}
+
+// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression.
+func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) {
+ return func(e *envconfig.EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ cp := NoCompression
+ if v == "gzip" {
+ cp = GzipCompression
+ }
+
+ fn(cp)
+ }
+ }
+}
+
+// revive:disable-next-line:flag-parameter
+func withInsecure(b bool) GenericOption {
+ if b {
+ return WithInsecure()
+ }
+ return WithSecure()
+}
+
+func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) {
+ return func(e *envconfig.EnvOptionsReader) {
+ if c.RootCAs != nil || len(c.Certificates) > 0 {
+ fn(c)
+ }
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go
new file mode 100644
index 000000000..9a595c36a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go
@@ -0,0 +1,328 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig"
+
+import (
+ "crypto/tls"
+ "fmt"
+ "path"
+ "strings"
+ "time"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/backoff"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/credentials/insecure"
+ "google.golang.org/grpc/encoding/gzip"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry"
+)
+
+const (
+ // DefaultTracesPath is a default URL path for endpoint that
+ // receives spans.
+ DefaultTracesPath string = "/v1/traces"
+ // DefaultTimeout is a default max waiting time for the backend to process
+ // each span batch.
+ DefaultTimeout time.Duration = 10 * time.Second
+)
+
+type (
+ SignalConfig struct {
+ Endpoint string
+ Insecure bool
+ TLSCfg *tls.Config
+ Headers map[string]string
+ Compression Compression
+ Timeout time.Duration
+ URLPath string
+
+ // gRPC configurations
+ GRPCCredentials credentials.TransportCredentials
+ }
+
+ Config struct {
+ // Signal specific configurations
+ Traces SignalConfig
+
+ RetryConfig retry.Config
+
+ // gRPC configurations
+ ReconnectionPeriod time.Duration
+ ServiceConfig string
+ DialOptions []grpc.DialOption
+ GRPCConn *grpc.ClientConn
+ }
+)
+
+// NewHTTPConfig returns a new Config with all settings applied from opts and
+// any unset setting using the default HTTP config values.
+func NewHTTPConfig(opts ...HTTPOption) Config {
+ cfg := Config{
+ Traces: SignalConfig{
+ Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort),
+ URLPath: DefaultTracesPath,
+ Compression: NoCompression,
+ Timeout: DefaultTimeout,
+ },
+ RetryConfig: retry.DefaultConfig,
+ }
+ cfg = ApplyHTTPEnvConfigs(cfg)
+ for _, opt := range opts {
+ cfg = opt.ApplyHTTPOption(cfg)
+ }
+ cfg.Traces.URLPath = cleanPath(cfg.Traces.URLPath, DefaultTracesPath)
+ return cfg
+}
+
+// cleanPath returns a path with all spaces trimmed and all redundancies
+// removed. If urlPath is empty or cleaning it results in an empty string,
+// defaultPath is returned instead.
+func cleanPath(urlPath string, defaultPath string) string {
+ tmp := path.Clean(strings.TrimSpace(urlPath))
+ if tmp == "." {
+ return defaultPath
+ }
+ if !path.IsAbs(tmp) {
+ tmp = fmt.Sprintf("/%s", tmp)
+ }
+ return tmp
+}
+
+// NewGRPCConfig returns a new Config with all settings applied from opts and
+// any unset setting using the default gRPC config values.
+func NewGRPCConfig(opts ...GRPCOption) Config {
+ userAgent := "OTel OTLP Exporter Go/" + otlptrace.Version()
+ cfg := Config{
+ Traces: SignalConfig{
+ Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort),
+ URLPath: DefaultTracesPath,
+ Compression: NoCompression,
+ Timeout: DefaultTimeout,
+ },
+ RetryConfig: retry.DefaultConfig,
+ DialOptions: []grpc.DialOption{grpc.WithUserAgent(userAgent)},
+ }
+ cfg = ApplyGRPCEnvConfigs(cfg)
+ for _, opt := range opts {
+ cfg = opt.ApplyGRPCOption(cfg)
+ }
+
+ if cfg.ServiceConfig != "" {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
+ }
+ // Priroritize GRPCCredentials over Insecure (passing both is an error).
+ if cfg.Traces.GRPCCredentials != nil {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials))
+ } else if cfg.Traces.Insecure {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials()))
+ } else {
+ // Default to using the host's root CA.
+ creds := credentials.NewTLS(nil)
+ cfg.Traces.GRPCCredentials = creds
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds))
+ }
+ if cfg.Traces.Compression == GzipCompression {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
+ }
+ if len(cfg.DialOptions) != 0 {
+ cfg.DialOptions = append(cfg.DialOptions, cfg.DialOptions...)
+ }
+ if cfg.ReconnectionPeriod != 0 {
+ p := grpc.ConnectParams{
+ Backoff: backoff.DefaultConfig,
+ MinConnectTimeout: cfg.ReconnectionPeriod,
+ }
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p))
+ }
+
+ return cfg
+}
+
+type (
+ // GenericOption applies an option to the HTTP or gRPC driver.
+ GenericOption interface {
+ ApplyHTTPOption(Config) Config
+ ApplyGRPCOption(Config) Config
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate compatibility.
+ private()
+ }
+
+ // HTTPOption applies an option to the HTTP driver.
+ HTTPOption interface {
+ ApplyHTTPOption(Config) Config
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate compatibility.
+ private()
+ }
+
+ // GRPCOption applies an option to the gRPC driver.
+ GRPCOption interface {
+ ApplyGRPCOption(Config) Config
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate compatibility.
+ private()
+ }
+)
+
+// genericOption is an option that applies the same logic
+// for both gRPC and HTTP.
+type genericOption struct {
+ fn func(Config) Config
+}
+
+func (g *genericOption) ApplyGRPCOption(cfg Config) Config {
+ return g.fn(cfg)
+}
+
+func (g *genericOption) ApplyHTTPOption(cfg Config) Config {
+ return g.fn(cfg)
+}
+
+func (genericOption) private() {}
+
+func newGenericOption(fn func(cfg Config) Config) GenericOption {
+ return &genericOption{fn: fn}
+}
+
+// splitOption is an option that applies different logics
+// for gRPC and HTTP.
+type splitOption struct {
+ httpFn func(Config) Config
+ grpcFn func(Config) Config
+}
+
+func (g *splitOption) ApplyGRPCOption(cfg Config) Config {
+ return g.grpcFn(cfg)
+}
+
+func (g *splitOption) ApplyHTTPOption(cfg Config) Config {
+ return g.httpFn(cfg)
+}
+
+func (splitOption) private() {}
+
+func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption {
+ return &splitOption{httpFn: httpFn, grpcFn: grpcFn}
+}
+
+// httpOption is an option that is only applied to the HTTP driver.
+type httpOption struct {
+ fn func(Config) Config
+}
+
+func (h *httpOption) ApplyHTTPOption(cfg Config) Config {
+ return h.fn(cfg)
+}
+
+func (httpOption) private() {}
+
+func NewHTTPOption(fn func(cfg Config) Config) HTTPOption {
+ return &httpOption{fn: fn}
+}
+
+// grpcOption is an option that is only applied to the gRPC driver.
+type grpcOption struct {
+ fn func(Config) Config
+}
+
+func (h *grpcOption) ApplyGRPCOption(cfg Config) Config {
+ return h.fn(cfg)
+}
+
+func (grpcOption) private() {}
+
+func NewGRPCOption(fn func(cfg Config) Config) GRPCOption {
+ return &grpcOption{fn: fn}
+}
+
+// Generic Options
+
+func WithEndpoint(endpoint string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.Endpoint = endpoint
+ return cfg
+ })
+}
+
+func WithCompression(compression Compression) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.Compression = compression
+ return cfg
+ })
+}
+
+func WithURLPath(urlPath string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.URLPath = urlPath
+ return cfg
+ })
+}
+
+func WithRetry(rc retry.Config) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.RetryConfig = rc
+ return cfg
+ })
+}
+
+func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption {
+ return newSplitOption(func(cfg Config) Config {
+ cfg.Traces.TLSCfg = tlsCfg.Clone()
+ return cfg
+ }, func(cfg Config) Config {
+ cfg.Traces.GRPCCredentials = credentials.NewTLS(tlsCfg)
+ return cfg
+ })
+}
+
+func WithInsecure() GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.Insecure = true
+ return cfg
+ })
+}
+
+func WithSecure() GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.Insecure = false
+ return cfg
+ })
+}
+
+func WithHeaders(headers map[string]string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.Headers = headers
+ return cfg
+ })
+}
+
+func WithTimeout(duration time.Duration) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.Timeout = duration
+ return cfg
+ })
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/optiontypes.go
new file mode 100644
index 000000000..862567485
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/optiontypes.go
@@ -0,0 +1,51 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig"
+
+const (
+ // DefaultCollectorGRPCPort is the default gRPC port of the collector.
+ DefaultCollectorGRPCPort uint16 = 4317
+ // DefaultCollectorHTTPPort is the default HTTP port of the collector.
+ DefaultCollectorHTTPPort uint16 = 4318
+ // DefaultCollectorHost is the host address the Exporter will attempt
+ // connect to if no collector address is provided.
+ DefaultCollectorHost string = "localhost"
+)
+
+// Compression describes the compression used for payloads sent to the
+// collector.
+type Compression int
+
+const (
+ // NoCompression tells the driver to send payloads without
+ // compression.
+ NoCompression Compression = iota
+ // GzipCompression tells the driver to send payloads after
+ // compressing them with gzip.
+ GzipCompression
+)
+
+// Marshaler describes the kind of message format sent to the collector.
+type Marshaler int
+
+const (
+ // MarshalProto tells the driver to send using the protobuf binary format.
+ MarshalProto Marshaler = iota
+ // MarshalJSON tells the driver to send using json format.
+ MarshalJSON
+)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/tls.go
new file mode 100644
index 000000000..c342f7d68
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/tls.go
@@ -0,0 +1,37 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig"
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+)
+
+// CreateTLSConfig creates a tls.Config from a raw certificate bytes
+// to verify a server certificate.
+func CreateTLSConfig(certBytes []byte) (*tls.Config, error) {
+ cp := x509.NewCertPool()
+ if ok := cp.AppendCertsFromPEM(certBytes); !ok {
+ return nil, errors.New("failed to append certificate to the cert pool")
+ }
+
+ return &tls.Config{
+ RootCAs: cp,
+ }, nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess.go
new file mode 100644
index 000000000..f051ad5d9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess.go
@@ -0,0 +1,67 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/partialsuccess.go
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal"
+
+import "fmt"
+
+// PartialSuccess represents the underlying error for all handling
+// OTLP partial success messages. Use `errors.Is(err,
+// PartialSuccess{})` to test whether an error passed to the OTel
+// error handler belongs to this category.
+type PartialSuccess struct {
+ ErrorMessage string
+ RejectedItems int64
+ RejectedKind string
+}
+
+var _ error = PartialSuccess{}
+
+// Error implements the error interface.
+func (ps PartialSuccess) Error() string {
+ msg := ps.ErrorMessage
+ if msg == "" {
+ msg = "empty message"
+ }
+ return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind)
+}
+
+// Is supports the errors.Is() interface.
+func (ps PartialSuccess) Is(err error) bool {
+ _, ok := err.(PartialSuccess)
+ return ok
+}
+
+// TracePartialSuccessError returns an error describing a partial success
+// response for the trace signal.
+func TracePartialSuccessError(itemsRejected int64, errorMessage string) error {
+ return PartialSuccess{
+ ErrorMessage: errorMessage,
+ RejectedItems: itemsRejected,
+ RejectedKind: "spans",
+ }
+}
+
+// MetricPartialSuccessError returns an error describing a partial success
+// response for the metric signal.
+func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error {
+ return PartialSuccess{
+ ErrorMessage: errorMessage,
+ RejectedItems: itemsRejected,
+ RejectedKind: "metric data points",
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go
new file mode 100644
index 000000000..44974ff49
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go
@@ -0,0 +1,156 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/retry/retry.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package retry provides request retry functionality that can perform
+// configurable exponential backoff for transient errors and honor any
+// explicit throttle responses received.
+package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry"
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/cenkalti/backoff/v4"
+)
+
+// DefaultConfig are the recommended defaults to use.
+var DefaultConfig = Config{
+ Enabled: true,
+ InitialInterval: 5 * time.Second,
+ MaxInterval: 30 * time.Second,
+ MaxElapsedTime: time.Minute,
+}
+
+// Config defines configuration for retrying batches in case of export failure
+// using an exponential backoff.
+type Config struct {
+ // Enabled indicates whether to not retry sending batches in case of
+ // export failure.
+ Enabled bool
+ // InitialInterval the time to wait after the first failure before
+ // retrying.
+ InitialInterval time.Duration
+ // MaxInterval is the upper bound on backoff interval. Once this value is
+ // reached the delay between consecutive retries will always be
+ // `MaxInterval`.
+ MaxInterval time.Duration
+ // MaxElapsedTime is the maximum amount of time (including retries) spent
+ // trying to send a request/batch. Once this value is reached, the data
+ // is discarded.
+ MaxElapsedTime time.Duration
+}
+
+// RequestFunc wraps a request with retry logic.
+type RequestFunc func(context.Context, func(context.Context) error) error
+
+// EvaluateFunc returns if an error is retry-able and if an explicit throttle
+// duration should be honored that was included in the error.
+//
+// The function must return true if the error argument is retry-able,
+// otherwise it must return false for the first return parameter.
+//
+// The function must return a non-zero time.Duration if the error contains
+// explicit throttle duration that should be honored, otherwise it must return
+// a zero valued time.Duration.
+type EvaluateFunc func(error) (bool, time.Duration)
+
+// RequestFunc returns a RequestFunc using the evaluate function to determine
+// if requests can be retried and based on the exponential backoff
+// configuration of c.
+func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
+ if !c.Enabled {
+ return func(ctx context.Context, fn func(context.Context) error) error {
+ return fn(ctx)
+ }
+ }
+
+ return func(ctx context.Context, fn func(context.Context) error) error {
+ // Do not use NewExponentialBackOff since it calls Reset and the code here
+ // must call Reset after changing the InitialInterval (this saves an
+ // unnecessary call to Now).
+ b := &backoff.ExponentialBackOff{
+ InitialInterval: c.InitialInterval,
+ RandomizationFactor: backoff.DefaultRandomizationFactor,
+ Multiplier: backoff.DefaultMultiplier,
+ MaxInterval: c.MaxInterval,
+ MaxElapsedTime: c.MaxElapsedTime,
+ Stop: backoff.Stop,
+ Clock: backoff.SystemClock,
+ }
+ b.Reset()
+
+ for {
+ err := fn(ctx)
+ if err == nil {
+ return nil
+ }
+
+ retryable, throttle := evaluate(err)
+ if !retryable {
+ return err
+ }
+
+ bOff := b.NextBackOff()
+ if bOff == backoff.Stop {
+ return fmt.Errorf("max retry time elapsed: %w", err)
+ }
+
+ // Wait for the greater of the backoff or throttle delay.
+ var delay time.Duration
+ if bOff > throttle {
+ delay = bOff
+ } else {
+ elapsed := b.GetElapsedTime()
+ if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
+ return fmt.Errorf("max retry time would elapse: %w", err)
+ }
+ delay = throttle
+ }
+
+ if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
+ return fmt.Errorf("%w: %s", ctxErr, err)
+ }
+ }
+ }
+}
+
+// Allow override for testing.
+var waitFunc = wait
+
+// wait takes the caller's context, and the amount of time to wait. It will
+// return nil if the timer fires before or at the same time as the context's
+// deadline. This indicates that the call can be retried.
+func wait(ctx context.Context, delay time.Duration) error {
+ timer := time.NewTimer(delay)
+ defer timer.Stop()
+
+ select {
+ case <-ctx.Done():
+ // Handle the case where the timer and context deadline end
+ // simultaneously by prioritizing the timer expiration nil value
+ // response.
+ select {
+ case <-timer.C:
+ default:
+ return ctx.Err()
+ }
+ case <-timer.C:
+ }
+
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go
new file mode 100644
index 000000000..e3ed6494c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go
@@ -0,0 +1,116 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlptracehttp // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
+
+import (
+ "crypto/tls"
+ "time"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry"
+)
+
+// Compression describes the compression used for payloads sent to the
+// collector.
+type Compression otlpconfig.Compression
+
+const (
+ // NoCompression tells the driver to send payloads without
+ // compression.
+ NoCompression = Compression(otlpconfig.NoCompression)
+ // GzipCompression tells the driver to send payloads after
+ // compressing them with gzip.
+ GzipCompression = Compression(otlpconfig.GzipCompression)
+)
+
+// Option applies an option to the HTTP client.
+type Option interface {
+ applyHTTPOption(otlpconfig.Config) otlpconfig.Config
+}
+
+func asHTTPOptions(opts []Option) []otlpconfig.HTTPOption {
+ converted := make([]otlpconfig.HTTPOption, len(opts))
+ for i, o := range opts {
+ converted[i] = otlpconfig.NewHTTPOption(o.applyHTTPOption)
+ }
+ return converted
+}
+
+// RetryConfig defines configuration for retrying batches in case of export
+// failure using an exponential backoff.
+type RetryConfig retry.Config
+
+type wrappedOption struct {
+ otlpconfig.HTTPOption
+}
+
+func (w wrappedOption) applyHTTPOption(cfg otlpconfig.Config) otlpconfig.Config {
+ return w.ApplyHTTPOption(cfg)
+}
+
+// WithEndpoint allows one to set the address of the collector
+// endpoint that the driver will use to send spans. If
+// unset, it will instead try to use
+// the default endpoint (localhost:4318). Note that the endpoint
+// must not contain any URL path.
+func WithEndpoint(endpoint string) Option {
+ return wrappedOption{otlpconfig.WithEndpoint(endpoint)}
+}
+
+// WithCompression tells the driver to compress the sent data.
+func WithCompression(compression Compression) Option {
+ return wrappedOption{otlpconfig.WithCompression(otlpconfig.Compression(compression))}
+}
+
+// WithURLPath allows one to override the default URL path used
+// for sending traces. If unset, default ("/v1/traces") will be used.
+func WithURLPath(urlPath string) Option {
+ return wrappedOption{otlpconfig.WithURLPath(urlPath)}
+}
+
+// WithTLSClientConfig can be used to set up a custom TLS
+// configuration for the client used to send payloads to the
+// collector. Use it if you want to use a custom certificate.
+func WithTLSClientConfig(tlsCfg *tls.Config) Option {
+ return wrappedOption{otlpconfig.WithTLSClientConfig(tlsCfg)}
+}
+
+// WithInsecure tells the driver to connect to the collector using the
+// HTTP scheme, instead of HTTPS.
+func WithInsecure() Option {
+ return wrappedOption{otlpconfig.WithInsecure()}
+}
+
+// WithHeaders allows one to tell the driver to send additional HTTP
+// headers with the payloads. Specifying headers like Content-Length,
+// Content-Encoding and Content-Type may result in a broken driver.
+func WithHeaders(headers map[string]string) Option {
+ return wrappedOption{otlpconfig.WithHeaders(headers)}
+}
+
+// WithTimeout tells the driver the max waiting time for the backend to process
+// each spans batch. If unset, the default will be 10 seconds.
+func WithTimeout(duration time.Duration) Option {
+ return wrappedOption{otlpconfig.WithTimeout(duration)}
+}
+
+// WithRetry configures the retry policy for transient errors that may occurs
+// when exporting traces. An exponential back-off algorithm is used to ensure
+// endpoints are not overwhelmed with retries. If unset, the default retry
+// policy will retry after 5 seconds and increase exponentially after each
+// error for a total of 1 minute.
+func WithRetry(rc RetryConfig) Option {
+ return wrappedOption{otlpconfig.WithRetry(retry.Config(rc))}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
index db70dc531..1780b7165 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
@@ -16,5 +16,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
// Version is the current release version of the OpenTelemetry OTLP trace exporter in use.
func Version() string {
- return "1.16.0"
+ return "1.17.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/internal/gen.go b/vendor/go.opentelemetry.io/otel/internal/gen.go
new file mode 100644
index 000000000..f532f07e9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/gen.go
@@ -0,0 +1,29 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "go.opentelemetry.io/otel/internal"
+
+//go:generate gotmpl --body=./shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go
+//go:generate gotmpl --body=./shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go
+//go:generate gotmpl --body=./shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go
+
+//go:generate gotmpl --body=./shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go
+//go:generate gotmpl --body=./shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go
+//go:generate gotmpl --body=./shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go
+//go:generate gotmpl --body=./shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go
+//go:generate gotmpl --body=./shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/internal/matchers\"}" --out=internaltest/harness.go
+//go:generate gotmpl --body=./shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go
+//go:generate gotmpl --body=./shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go
+//go:generate gotmpl --body=./shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go
+//go:generate gotmpl --body=./shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/vendor/go.opentelemetry.io/otel/internal/global/handler.go
index 3dcd1caae..5e9b83047 100644
--- a/vendor/go.opentelemetry.io/otel/internal/global/handler.go
+++ b/vendor/go.opentelemetry.io/otel/internal/global/handler.go
@@ -18,7 +18,6 @@ import (
"log"
"os"
"sync/atomic"
- "unsafe"
)
var (
@@ -42,7 +41,7 @@ type ErrorHandler interface {
}
type ErrDelegator struct {
- delegate unsafe.Pointer
+ delegate atomic.Pointer[ErrorHandler]
}
func (d *ErrDelegator) Handle(err error) {
@@ -50,12 +49,12 @@ func (d *ErrDelegator) Handle(err error) {
}
func (d *ErrDelegator) getDelegate() ErrorHandler {
- return *(*ErrorHandler)(atomic.LoadPointer(&d.delegate))
+ return *d.delegate.Load()
}
// setDelegate sets the ErrorHandler delegate.
func (d *ErrDelegator) setDelegate(eh ErrorHandler) {
- atomic.StorePointer(&d.delegate, unsafe.Pointer(&eh))
+ d.delegate.Store(&eh)
}
func defaultErrorHandler() *ErrDelegator {
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
index 5951fd06d..c6f305a2b 100644
--- a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
+++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
@@ -18,7 +18,6 @@ import (
"log"
"os"
"sync/atomic"
- "unsafe"
"github.com/go-logr/logr"
"github.com/go-logr/stdr"
@@ -28,7 +27,7 @@ import (
//
// The default logger uses stdr which is backed by the standard `log.Logger`
// interface. This logger will only show messages at the Error Level.
-var globalLogger unsafe.Pointer
+var globalLogger atomic.Pointer[logr.Logger]
func init() {
SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)))
@@ -40,11 +39,11 @@ func init() {
// To see Info messages use a logger with `l.V(4).Enabled() == true`
// To see Debug messages use a logger with `l.V(8).Enabled() == true`.
func SetLogger(l logr.Logger) {
- atomic.StorePointer(&globalLogger, unsafe.Pointer(&l))
+ globalLogger.Store(&l)
}
func getLogger() logr.Logger {
- return *(*logr.Logger)(atomic.LoadPointer(&globalLogger))
+ return *globalLogger.Load()
}
// Info prints messages about the general state of the API or SDK.
diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go
index 0033c1e12..cdca00058 100644
--- a/vendor/go.opentelemetry.io/otel/metric/instrument.go
+++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go
@@ -167,6 +167,8 @@ func (o unitOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64Ob
}
// WithUnit sets the instrument unit.
+//
+// The unit u should be defined using the appropriate [UCUM](https://ucum.org) case-sensitive code.
func WithUnit(u string) InstrumentOption { return unitOpt(u) }
// AddOption applies options to an addition measurement. See
diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go
index 8e1917c32..2520bc74a 100644
--- a/vendor/go.opentelemetry.io/otel/metric/meter.go
+++ b/vendor/go.opentelemetry.io/otel/metric/meter.go
@@ -157,6 +157,8 @@ type Meter interface {
//
// If no instruments are passed, f should not be registered nor called
// during collection.
+ //
+ // The function f needs to be concurrent safe.
RegisterCallback(f Callback, instruments ...Observable) (Registration, error)
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/gen.go b/vendor/go.opentelemetry.io/otel/sdk/internal/gen.go
new file mode 100644
index 000000000..bd84f624b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/internal/gen.go
@@ -0,0 +1,29 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "go.opentelemetry.io/otel/sdk/internal"
+
+//go:generate gotmpl --body=../../internal/shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go
+//go:generate gotmpl --body=../../internal/shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go
+//go:generate gotmpl --body=../../internal/shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go
+
+//go:generate gotmpl --body=../../internal/shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go
+//go:generate gotmpl --body=../../internal/shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go
+//go:generate gotmpl --body=../../internal/shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go
+//go:generate gotmpl --body=../../internal/shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go
+//go:generate gotmpl --body=../../internal/shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/sdk/internal/matchers\"}" --out=internaltest/harness.go
+//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go
+//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go
+//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go
+//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
index 72320ca51..c63a0dd1f 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
@@ -22,7 +22,7 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk"
- semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
)
type (
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
index 318dcf82f..3d5362282 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
@@ -22,7 +22,7 @@ import (
"os"
"regexp"
- semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
)
type containerIDProvider func() (string, error)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
index f09a78190..a847c5062 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
@@ -23,7 +23,7 @@ import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
- semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
)
const (
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
index b8e934d4f..fb1ebf2ca 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
@@ -19,7 +19,7 @@ import (
"errors"
"strings"
- semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
)
type hostIDProvider func() (string, error)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go
index f92c6dad0..721e3ca6e 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go
@@ -21,7 +21,7 @@ import "os"
func readFile(filename string) (string, error) {
b, err := os.ReadFile(filename)
if err != nil {
- return "", nil
+ return "", err
}
return string(b), nil
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
index 815fe5c20..84e1c5856 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
@@ -19,7 +19,7 @@ import (
"strings"
"go.opentelemetry.io/otel/attribute"
- semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
)
type osDescriptionProvider func() (string, error)
@@ -75,6 +75,7 @@ func mapRuntimeOSToSemconvOSType(osType string) attribute.KeyValue {
// the elements in this map are the intersection between
// available GOOS values and defined semconv OS types
osTypeAttributeMap := map[string]attribute.KeyValue{
+ "aix": semconv.OSTypeAIX,
"darwin": semconv.OSTypeDarwin,
"dragonfly": semconv.OSTypeDragonflyBSD,
"freebsd": semconv.OSTypeFreeBSD,
@@ -83,6 +84,7 @@ func mapRuntimeOSToSemconvOSType(osType string) attribute.KeyValue {
"openbsd": semconv.OSTypeOpenBSD,
"solaris": semconv.OSTypeSolaris,
"windows": semconv.OSTypeWindows,
+ "zos": semconv.OSTypeZOS,
}
var osTypeAttribute attribute.KeyValue
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
index bdd0e7fe6..e67ff29e2 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
@@ -22,7 +22,7 @@ import (
"path/filepath"
"runtime"
- semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
)
type pidProvider func() int
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
index 139dc7e8f..176ff1066 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
@@ -36,7 +36,6 @@ type Resource struct {
}
var (
- emptyResource Resource
defaultResource *Resource
defaultResourceOnce sync.Once
)
@@ -70,7 +69,7 @@ func NewWithAttributes(schemaURL string, attrs ...attribute.KeyValue) *Resource
// of the attrs is known use NewWithAttributes instead.
func NewSchemaless(attrs ...attribute.KeyValue) *Resource {
if len(attrs) == 0 {
- return &emptyResource
+ return &Resource{}
}
// Ensure attributes comply with the specification:
@@ -81,7 +80,7 @@ func NewSchemaless(attrs ...attribute.KeyValue) *Resource {
// If attrs only contains invalid entries do not allocate a new resource.
if s.Len() == 0 {
- return &emptyResource
+ return &Resource{}
}
return &Resource{attrs: s} //nolint
@@ -195,7 +194,7 @@ func Merge(a, b *Resource) (*Resource, error) {
// Empty returns an instance of Resource with no attributes. It is
// equivalent to a `nil` Resource.
func Empty() *Resource {
- return &emptyResource
+ return &Resource{}
}
// Default returns an instance of Resource with a default
@@ -214,7 +213,7 @@ func Default() *Resource {
}
// If Detect did not return a valid resource, fall back to emptyResource.
if defaultResource == nil {
- defaultResource = &emptyResource
+ defaultResource = &Resource{}
}
})
return defaultResource
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
index 43d5b0423..c9c7effbf 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
@@ -16,7 +16,6 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace"
import (
"context"
- "runtime"
"sync"
"sync/atomic"
"time"
@@ -84,6 +83,7 @@ type batchSpanProcessor struct {
stopWait sync.WaitGroup
stopOnce sync.Once
stopCh chan struct{}
+ stopped atomic.Bool
}
var _ SpanProcessor = (*batchSpanProcessor)(nil)
@@ -137,6 +137,11 @@ func (bsp *batchSpanProcessor) OnStart(parent context.Context, s ReadWriteSpan)
// OnEnd method enqueues a ReadOnlySpan for later processing.
func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) {
+ // Do not enqueue spans after Shutdown.
+ if bsp.stopped.Load() {
+ return
+ }
+
// Do not enqueue spans if we are just going to drop them.
if bsp.e == nil {
return
@@ -149,6 +154,7 @@ func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) {
func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error {
var err error
bsp.stopOnce.Do(func() {
+ bsp.stopped.Store(true)
wait := make(chan struct{})
go func() {
close(bsp.stopCh)
@@ -181,11 +187,24 @@ func (f forceFlushSpan) SpanContext() trace.SpanContext {
// ForceFlush exports all ended spans that have not yet been exported.
func (bsp *batchSpanProcessor) ForceFlush(ctx context.Context) error {
+ // Interrupt if context is already canceled.
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+
+ // Do nothing after Shutdown.
+ if bsp.stopped.Load() {
+ return nil
+ }
+
var err error
if bsp.e != nil {
flushCh := make(chan struct{})
if bsp.enqueueBlockOnQueueFull(ctx, forceFlushSpan{flushed: flushCh}) {
select {
+ case <-bsp.stopCh:
+ // The batchSpanProcessor is Shutdown.
+ return nil
case <-flushCh:
// Processed any items in queue prior to ForceFlush being called
case <-ctx.Done():
@@ -326,11 +345,9 @@ func (bsp *batchSpanProcessor) drainQueue() {
for {
select {
case sd := <-bsp.queue:
- if sd == nil {
- if err := bsp.exportSpans(ctx); err != nil {
- otel.Handle(err)
- }
- return
+ if _, ok := sd.(forceFlushSpan); ok {
+ // Ignore flush requests as they are not valid spans.
+ continue
}
bsp.batchMutex.Lock()
@@ -344,7 +361,11 @@ func (bsp *batchSpanProcessor) drainQueue() {
}
}
default:
- close(bsp.queue)
+ // There are no more enqueued spans. Make final export.
+ if err := bsp.exportSpans(ctx); err != nil {
+ otel.Handle(err)
+ }
+ return
}
}
}
@@ -358,34 +379,11 @@ func (bsp *batchSpanProcessor) enqueue(sd ReadOnlySpan) {
}
}
-func recoverSendOnClosedChan() {
- x := recover()
- switch err := x.(type) {
- case nil:
- return
- case runtime.Error:
- if err.Error() == "send on closed channel" {
- return
- }
- }
- panic(x)
-}
-
func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd ReadOnlySpan) bool {
if !sd.SpanContext().IsSampled() {
return false
}
- // This ensures the bsp.queue<- below does not panic as the
- // processor shuts down.
- defer recoverSendOnClosedChan()
-
- select {
- case <-bsp.stopCh:
- return false
- default:
- }
-
select {
case bsp.queue <- sd:
return true
@@ -399,16 +397,6 @@ func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan)
return false
}
- // This ensures the bsp.queue<- below does not panic as the
- // processor shuts down.
- defer recoverSendOnClosedChan()
-
- select {
- case <-bsp.stopCh:
- return false
- default:
- }
-
select {
case bsp.queue <- sd:
return true
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
index 4fcca26e0..37cdd4a69 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
@@ -30,7 +30,7 @@ import (
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/sdk/internal"
"go.opentelemetry.io/otel/sdk/resource"
- semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
"go.opentelemetry.io/otel/trace"
)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go
index dbef90b0d..a99bdd38e 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/version.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/version.go
@@ -16,5 +16,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk"
// Version is the current release version of the OpenTelemetry SDK in use.
func Version() string {
- return "1.16.0"
+ return "1.17.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go
new file mode 100644
index 000000000..e6cf89510
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go
@@ -0,0 +1,1877 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// These attributes may be used to describe the client in a connection-based
+// network interaction where there is one side that initiates the connection
+// (the client is the side that initiates the connection). This covers all TCP
+// network interactions since TCP is connection-based and one side initiates
+// the connection (an exception is made for peer-to-peer communication over TCP
+// where the "user-facing" surface of the protocol / API does not expose a
+// clear notion of client and server). This also covers UDP network
+// interactions where one side initiates the interaction, e.g. QUIC (HTTP/3)
+// and DNS.
+const (
+ // ClientAddressKey is the attribute Key conforming to the "client.address"
+ // semantic conventions. It represents the client address - unix domain
+ // socket name, IPv4 or IPv6 address.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/tmp/my.sock', '10.1.2.80'
+ // Note: When observed from the server side, and when communicating through
+ // an intermediary, `client.address` SHOULD represent client address behind
+ // any intermediaries (e.g. proxies) if it's available.
+ ClientAddressKey = attribute.Key("client.address")
+
+ // ClientPortKey is the attribute Key conforming to the "client.port"
+ // semantic conventions. It represents the client port number
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 65123
+ // Note: When observed from the server side, and when communicating through
+ // an intermediary, `client.port` SHOULD represent client port behind any
+ // intermediaries (e.g. proxies) if it's available.
+ ClientPortKey = attribute.Key("client.port")
+
+ // ClientSocketAddressKey is the attribute Key conforming to the
+ // "client.socket.address" semantic conventions. It represents the
+ // immediate client peer address - unix domain socket name, IPv4 or IPv6
+ // address.
+ //
+ // Type: string
+ // RequirementLevel: Recommended (If different than `client.address`.)
+ // Stability: stable
+ // Examples: '/tmp/my.sock', '127.0.0.1'
+ ClientSocketAddressKey = attribute.Key("client.socket.address")
+
+ // ClientSocketPortKey is the attribute Key conforming to the
+ // "client.socket.port" semantic conventions. It represents the immediate
+ // client peer port number
+ //
+ // Type: int
+ // RequirementLevel: Recommended (If different than `client.port`.)
+ // Stability: stable
+ // Examples: 35555
+ ClientSocketPortKey = attribute.Key("client.socket.port")
+)
+
+// ClientAddress returns an attribute KeyValue conforming to the
+// "client.address" semantic conventions. It represents the client address -
+// unix domain socket name, IPv4 or IPv6 address.
+func ClientAddress(val string) attribute.KeyValue {
+ return ClientAddressKey.String(val)
+}
+
+// ClientPort returns an attribute KeyValue conforming to the "client.port"
+// semantic conventions. It represents the client port number
+func ClientPort(val int) attribute.KeyValue {
+ return ClientPortKey.Int(val)
+}
+
+// ClientSocketAddress returns an attribute KeyValue conforming to the
+// "client.socket.address" semantic conventions. It represents the immediate
+// client peer address - unix domain socket name, IPv4 or IPv6 address.
+func ClientSocketAddress(val string) attribute.KeyValue {
+ return ClientSocketAddressKey.String(val)
+}
+
+// ClientSocketPort returns an attribute KeyValue conforming to the
+// "client.socket.port" semantic conventions. It represents the immediate
+// client peer port number
+func ClientSocketPort(val int) attribute.KeyValue {
+ return ClientSocketPortKey.Int(val)
+}
+
+// Describes deprecated HTTP attributes.
+const (
+ // HTTPMethodKey is the attribute Key conforming to the "http.method"
+ // semantic conventions. It represents the deprecated, use
+ // `http.request.method` instead.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'GET', 'POST', 'HEAD'
+ HTTPMethodKey = attribute.Key("http.method")
+
+ // HTTPStatusCodeKey is the attribute Key conforming to the
+ // "http.status_code" semantic conventions. It represents the deprecated,
+ // use `http.response.status_code` instead.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 200
+ HTTPStatusCodeKey = attribute.Key("http.status_code")
+
+ // HTTPSchemeKey is the attribute Key conforming to the "http.scheme"
+ // semantic conventions. It represents the deprecated, use `url.scheme`
+ // instead.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'http', 'https'
+ HTTPSchemeKey = attribute.Key("http.scheme")
+
+ // HTTPURLKey is the attribute Key conforming to the "http.url" semantic
+ // conventions. It represents the deprecated, use `url.full` instead.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv'
+ HTTPURLKey = attribute.Key("http.url")
+
+ // HTTPTargetKey is the attribute Key conforming to the "http.target"
+ // semantic conventions. It represents the deprecated, use `url.path` and
+ // `url.query` instead.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '/search?q=OpenTelemetry#SemConv'
+ HTTPTargetKey = attribute.Key("http.target")
+
+ // HTTPRequestContentLengthKey is the attribute Key conforming to the
+ // "http.request_content_length" semantic conventions. It represents the
+ // deprecated, use `http.request.body.size` instead.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 3495
+ HTTPRequestContentLengthKey = attribute.Key("http.request_content_length")
+
+ // HTTPResponseContentLengthKey is the attribute Key conforming to the
+ // "http.response_content_length" semantic conventions. It represents the
+ // deprecated, use `http.response.body.size` instead.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 3495
+ HTTPResponseContentLengthKey = attribute.Key("http.response_content_length")
+)
+
+// HTTPMethod returns an attribute KeyValue conforming to the "http.method"
+// semantic conventions. It represents the deprecated, use
+// `http.request.method` instead.
+func HTTPMethod(val string) attribute.KeyValue {
+ return HTTPMethodKey.String(val)
+}
+
+// HTTPStatusCode returns an attribute KeyValue conforming to the
+// "http.status_code" semantic conventions. It represents the deprecated, use
+// `http.response.status_code` instead.
+func HTTPStatusCode(val int) attribute.KeyValue {
+ return HTTPStatusCodeKey.Int(val)
+}
+
+// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme"
+// semantic conventions. It represents the deprecated, use `url.scheme`
+// instead.
+func HTTPScheme(val string) attribute.KeyValue {
+ return HTTPSchemeKey.String(val)
+}
+
+// HTTPURL returns an attribute KeyValue conforming to the "http.url"
+// semantic conventions. It represents the deprecated, use `url.full` instead.
+func HTTPURL(val string) attribute.KeyValue {
+ return HTTPURLKey.String(val)
+}
+
+// HTTPTarget returns an attribute KeyValue conforming to the "http.target"
+// semantic conventions. It represents the deprecated, use `url.path` and
+// `url.query` instead.
+func HTTPTarget(val string) attribute.KeyValue {
+ return HTTPTargetKey.String(val)
+}
+
+// HTTPRequestContentLength returns an attribute KeyValue conforming to the
+// "http.request_content_length" semantic conventions. It represents the
+// deprecated, use `http.request.body.size` instead.
+func HTTPRequestContentLength(val int) attribute.KeyValue {
+ return HTTPRequestContentLengthKey.Int(val)
+}
+
+// HTTPResponseContentLength returns an attribute KeyValue conforming to the
+// "http.response_content_length" semantic conventions. It represents the
+// deprecated, use `http.response.body.size` instead.
+func HTTPResponseContentLength(val int) attribute.KeyValue {
+ return HTTPResponseContentLengthKey.Int(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+ // NetSockPeerNameKey is the attribute Key conforming to the
+ // "net.sock.peer.name" semantic conventions. It represents the deprecated,
+ // use `server.socket.domain` on client spans.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '/var/my.sock'
+ NetSockPeerNameKey = attribute.Key("net.sock.peer.name")
+
+ // NetSockPeerAddrKey is the attribute Key conforming to the
+ // "net.sock.peer.addr" semantic conventions. It represents the deprecated,
+ // use `server.socket.address` on client spans and `client.socket.address`
+ // on server spans.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '192.168.0.1'
+ NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr")
+
+ // NetSockPeerPortKey is the attribute Key conforming to the
+ // "net.sock.peer.port" semantic conventions. It represents the deprecated,
+ // use `server.socket.port` on client spans and `client.socket.port` on
+ // server spans.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 65531
+ NetSockPeerPortKey = attribute.Key("net.sock.peer.port")
+
+ // NetPeerNameKey is the attribute Key conforming to the "net.peer.name"
+ // semantic conventions. It represents the deprecated, use `server.address`
+ // on client spans and `client.address` on server spans.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'example.com'
+ NetPeerNameKey = attribute.Key("net.peer.name")
+
+ // NetPeerPortKey is the attribute Key conforming to the "net.peer.port"
+ // semantic conventions. It represents the deprecated, use `server.port` on
+ // client spans and `client.port` on server spans.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 8080
+ NetPeerPortKey = attribute.Key("net.peer.port")
+
+ // NetHostNameKey is the attribute Key conforming to the "net.host.name"
+ // semantic conventions. It represents the deprecated, use
+ // `server.address`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'example.com'
+ NetHostNameKey = attribute.Key("net.host.name")
+
+ // NetHostPortKey is the attribute Key conforming to the "net.host.port"
+ // semantic conventions. It represents the deprecated, use `server.port`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 8080
+ NetHostPortKey = attribute.Key("net.host.port")
+
+ // NetSockHostAddrKey is the attribute Key conforming to the
+ // "net.sock.host.addr" semantic conventions. It represents the deprecated,
+ // use `server.socket.address`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '/var/my.sock'
+ NetSockHostAddrKey = attribute.Key("net.sock.host.addr")
+
+ // NetSockHostPortKey is the attribute Key conforming to the
+ // "net.sock.host.port" semantic conventions. It represents the deprecated,
+ // use `server.socket.port`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 8080
+ NetSockHostPortKey = attribute.Key("net.sock.host.port")
+
+ // NetTransportKey is the attribute Key conforming to the "net.transport"
+ // semantic conventions. It represents the deprecated, use
+ // `network.transport`.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ NetTransportKey = attribute.Key("net.transport")
+
+ // NetProtocolNameKey is the attribute Key conforming to the
+ // "net.protocol.name" semantic conventions. It represents the deprecated,
+ // use `network.protocol.name`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'amqp', 'http', 'mqtt'
+ NetProtocolNameKey = attribute.Key("net.protocol.name")
+
+ // NetProtocolVersionKey is the attribute Key conforming to the
+ // "net.protocol.version" semantic conventions. It represents the
+ // deprecated, use `network.protocol.version`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '3.1.1'
+ NetProtocolVersionKey = attribute.Key("net.protocol.version")
+
+ // NetSockFamilyKey is the attribute Key conforming to the
+ // "net.sock.family" semantic conventions. It represents the deprecated,
+ // use `network.transport` and `network.type`.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ NetSockFamilyKey = attribute.Key("net.sock.family")
+)
+
+var (
+ // ip_tcp
+ NetTransportTCP = NetTransportKey.String("ip_tcp")
+ // ip_udp
+ NetTransportUDP = NetTransportKey.String("ip_udp")
+ // Named or anonymous pipe
+ NetTransportPipe = NetTransportKey.String("pipe")
+ // In-process communication
+ NetTransportInProc = NetTransportKey.String("inproc")
+ // Something else (non IP-based)
+ NetTransportOther = NetTransportKey.String("other")
+)
+
+var (
+ // IPv4 address
+ NetSockFamilyInet = NetSockFamilyKey.String("inet")
+ // IPv6 address
+ NetSockFamilyInet6 = NetSockFamilyKey.String("inet6")
+ // Unix domain socket path
+ NetSockFamilyUnix = NetSockFamilyKey.String("unix")
+)
+
+// NetSockPeerName returns an attribute KeyValue conforming to the
+// "net.sock.peer.name" semantic conventions. It represents the deprecated, use
+// `server.socket.domain` on client spans.
+func NetSockPeerName(val string) attribute.KeyValue {
+ return NetSockPeerNameKey.String(val)
+}
+
+// NetSockPeerAddr returns an attribute KeyValue conforming to the
+// "net.sock.peer.addr" semantic conventions. It represents the deprecated, use
+// `server.socket.address` on client spans and `client.socket.address` on
+// server spans.
+func NetSockPeerAddr(val string) attribute.KeyValue {
+ return NetSockPeerAddrKey.String(val)
+}
+
+// NetSockPeerPort returns an attribute KeyValue conforming to the
+// "net.sock.peer.port" semantic conventions. It represents the deprecated, use
+// `server.socket.port` on client spans and `client.socket.port` on server
+// spans.
+func NetSockPeerPort(val int) attribute.KeyValue {
+ return NetSockPeerPortKey.Int(val)
+}
+
+// NetPeerName returns an attribute KeyValue conforming to the
+// "net.peer.name" semantic conventions. It represents the deprecated, use
+// `server.address` on client spans and `client.address` on server spans.
+func NetPeerName(val string) attribute.KeyValue {
+ return NetPeerNameKey.String(val)
+}
+
+// NetPeerPort returns an attribute KeyValue conforming to the
+// "net.peer.port" semantic conventions. It represents the deprecated, use
+// `server.port` on client spans and `client.port` on server spans.
+func NetPeerPort(val int) attribute.KeyValue {
+ return NetPeerPortKey.Int(val)
+}
+
+// NetHostName returns an attribute KeyValue conforming to the
+// "net.host.name" semantic conventions. It represents the deprecated, use
+// `server.address`.
+func NetHostName(val string) attribute.KeyValue {
+ return NetHostNameKey.String(val)
+}
+
+// NetHostPort returns an attribute KeyValue conforming to the
+// "net.host.port" semantic conventions. It represents the deprecated, use
+// `server.port`.
+func NetHostPort(val int) attribute.KeyValue {
+ return NetHostPortKey.Int(val)
+}
+
+// NetSockHostAddr returns an attribute KeyValue conforming to the
+// "net.sock.host.addr" semantic conventions. It represents the deprecated, use
+// `server.socket.address`.
+func NetSockHostAddr(val string) attribute.KeyValue {
+ return NetSockHostAddrKey.String(val)
+}
+
+// NetSockHostPort returns an attribute KeyValue conforming to the
+// "net.sock.host.port" semantic conventions. It represents the deprecated, use
+// `server.socket.port`.
+func NetSockHostPort(val int) attribute.KeyValue {
+ return NetSockHostPortKey.Int(val)
+}
+
+// NetProtocolName returns an attribute KeyValue conforming to the
+// "net.protocol.name" semantic conventions. It represents the deprecated, use
+// `network.protocol.name`.
+func NetProtocolName(val string) attribute.KeyValue {
+ return NetProtocolNameKey.String(val)
+}
+
+// NetProtocolVersion returns an attribute KeyValue conforming to the
+// "net.protocol.version" semantic conventions. It represents the deprecated,
+// use `network.protocol.version`.
+func NetProtocolVersion(val string) attribute.KeyValue {
+ return NetProtocolVersionKey.String(val)
+}
+
+// These attributes may be used to describe the receiver of a network
+// exchange/packet. These should be used when there is no client/server
+// relationship between the two sides, or when that relationship is unknown.
+// This covers low-level network interactions (e.g. packet tracing) where you
+// don't know if there was a connection or which side initiated it. This also
+// covers unidirectional UDP flows and peer-to-peer communication where the
+// "user-facing" surface of the protocol / API does not expose a clear notion
+// of client and server.
+const (
+ // DestinationDomainKey is the attribute Key conforming to the
+ // "destination.domain" semantic conventions. It represents the domain name
+ // of the destination system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'foo.example.com'
+ // Note: This value may be a host name, a fully qualified domain name, or
+ // another host naming format.
+ DestinationDomainKey = attribute.Key("destination.domain")
+
+ // DestinationAddressKey is the attribute Key conforming to the
+ // "destination.address" semantic conventions. It represents the peer
+ // address, for example IP address or UNIX socket name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '10.5.3.2'
+ DestinationAddressKey = attribute.Key("destination.address")
+
+ // DestinationPortKey is the attribute Key conforming to the
+ // "destination.port" semantic conventions. It represents the peer port
+ // number
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3389, 2888
+ DestinationPortKey = attribute.Key("destination.port")
+)
+
+// DestinationDomain returns an attribute KeyValue conforming to the
+// "destination.domain" semantic conventions. It represents the domain name of
+// the destination system.
+func DestinationDomain(val string) attribute.KeyValue {
+ return DestinationDomainKey.String(val)
+}
+
+// DestinationAddress returns an attribute KeyValue conforming to the
+// "destination.address" semantic conventions. It represents the peer address,
+// for example IP address or UNIX socket name.
+func DestinationAddress(val string) attribute.KeyValue {
+ return DestinationAddressKey.String(val)
+}
+
+// DestinationPort returns an attribute KeyValue conforming to the
+// "destination.port" semantic conventions. It represents the peer port number
+func DestinationPort(val int) attribute.KeyValue {
+ return DestinationPortKey.Int(val)
+}
+
+// Describes HTTP attributes.
+const (
+ // HTTPRequestMethodKey is the attribute Key conforming to the
+ // "http.request.method" semantic conventions. It represents the hTTP
+ // request method.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'GET', 'POST', 'HEAD'
+ // Note: HTTP request method value SHOULD be "known" to the
+ // instrumentation.
+ // By default, this convention defines "known" methods as the ones listed
+ // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods)
+ // and the PATCH method defined in
+ // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html).
+ //
+ // If the HTTP request method is not known to instrumentation, it MUST set
+ // the `http.request.method` attribute to `_OTHER` and, except if reporting
+ // a metric, MUST
+ // set the exact method received in the request line as value of the
+ // `http.request.method_original` attribute.
+ //
+ // If the HTTP instrumentation could end up converting valid HTTP request
+ // methods to `_OTHER`, then it MUST provide a way to override
+ // the list of known HTTP methods. If this override is done via environment
+ // variable, then the environment variable MUST be named
+ // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated
+ // list of case-sensitive known HTTP methods
+ // (this list MUST be a full override of the default known method, it is
+ // not a list of known methods in addition to the defaults).
+ //
+ // HTTP method names are case-sensitive and `http.request.method` attribute
+ // value MUST match a known HTTP method name exactly.
+ // Instrumentations for specific web frameworks that consider HTTP methods
+ // to be case insensitive, SHOULD populate a canonical equivalent.
+ // Tracing instrumentations that do so, MUST also set
+ // `http.request.method_original` to the original value.
+ HTTPRequestMethodKey = attribute.Key("http.request.method")
+
+ // HTTPResponseStatusCodeKey is the attribute Key conforming to the
+ // "http.response.status_code" semantic conventions. It represents the
+ // [HTTP response status
+ // code](https://tools.ietf.org/html/rfc7231#section-6).
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If and only if one was
+ // received/sent.)
+ // Stability: stable
+ // Examples: 200
+ HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code")
+)
+
+var (
+ // CONNECT method
+ HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT")
+ // DELETE method
+ HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE")
+ // GET method
+ HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET")
+ // HEAD method
+ HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD")
+ // OPTIONS method
+ HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS")
+ // PATCH method
+ HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH")
+ // POST method
+ HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST")
+ // PUT method
+ HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT")
+ // TRACE method
+ HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE")
+ // Any HTTP method that the instrumentation has no prior knowledge of
+ HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER")
+)
+
+// HTTPResponseStatusCode returns an attribute KeyValue conforming to the
+// "http.response.status_code" semantic conventions. It represents the [HTTP
+// response status code](https://tools.ietf.org/html/rfc7231#section-6).
+func HTTPResponseStatusCode(val int) attribute.KeyValue {
+ return HTTPResponseStatusCodeKey.Int(val)
+}
+
+// HTTP Server attributes
+const (
+ // HTTPRouteKey is the attribute Key conforming to the "http.route"
+ // semantic conventions. It represents the matched route (path template in
+ // the format used by the respective server framework). See note below
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If and only if it's available)
+ // Stability: stable
+ // Examples: '/users/:userID?', '{controller}/{action}/{id?}'
+ // Note: MUST NOT be populated when this is not supported by the HTTP
+ // server framework as the route attribute should have low-cardinality and
+ // the URI path can NOT substitute it.
+ // SHOULD include the [application
+ // root](/docs/http/http-spans.md#http-server-definitions) if there is one.
+ HTTPRouteKey = attribute.Key("http.route")
+)
+
+// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
+// semantic conventions. It represents the matched route (path template in the
+// format used by the respective server framework). See note below
+func HTTPRoute(val string) attribute.KeyValue {
+ return HTTPRouteKey.String(val)
+}
+
+// Attributes for Events represented using Log Records.
+const (
+ // EventNameKey is the attribute Key conforming to the "event.name"
+ // semantic conventions. It represents the name identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'click', 'exception'
+ EventNameKey = attribute.Key("event.name")
+
+ // EventDomainKey is the attribute Key conforming to the "event.domain"
+ // semantic conventions. It represents the domain identifies the business
+ // context for the events.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: Events across different domains may have same `event.name`, yet be
+ // unrelated events.
+ EventDomainKey = attribute.Key("event.domain")
+)
+
+var (
+ // Events from browser apps
+ EventDomainBrowser = EventDomainKey.String("browser")
+ // Events from mobile apps
+ EventDomainDevice = EventDomainKey.String("device")
+ // Events from Kubernetes
+ EventDomainK8S = EventDomainKey.String("k8s")
+)
+
+// EventName returns an attribute KeyValue conforming to the "event.name"
+// semantic conventions. It represents the name identifies the event.
+func EventName(val string) attribute.KeyValue {
+ return EventNameKey.String(val)
+}
+
+// The attributes described in this section are rather generic. They may be
+// used in any Log Record they apply to.
+const (
+ // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
+ // semantic conventions. It represents a unique identifier for the Log
+ // Record.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV'
+ // Note: If an id is provided, other log records with the same id will be
+ // considered duplicates and can be removed safely. This means, that two
+ // distinguishable log records MUST have different values.
+ // The id MAY be an [Universally Unique Lexicographically Sortable
+ // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers
+ // (e.g. UUID) may be used as needed.
+ LogRecordUIDKey = attribute.Key("log.record.uid")
+)
+
+// LogRecordUID returns an attribute KeyValue conforming to the
+// "log.record.uid" semantic conventions. It represents a unique identifier for
+// the Log Record.
+func LogRecordUID(val string) attribute.KeyValue {
+ return LogRecordUIDKey.String(val)
+}
+
+// Describes Log attributes
+const (
+ // LogIostreamKey is the attribute Key conforming to the "log.iostream"
+ // semantic conventions. It represents the stream associated with the log.
+ // See below for a list of well-known values.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ LogIostreamKey = attribute.Key("log.iostream")
+)
+
+var (
+ // Logs from stdout stream
+ LogIostreamStdout = LogIostreamKey.String("stdout")
+ // Events from stderr stream
+ LogIostreamStderr = LogIostreamKey.String("stderr")
+)
+
+// A file to which log was emitted.
+const (
+ // LogFileNameKey is the attribute Key conforming to the "log.file.name"
+ // semantic conventions. It represents the basename of the file.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'audit.log'
+ LogFileNameKey = attribute.Key("log.file.name")
+
+ // LogFilePathKey is the attribute Key conforming to the "log.file.path"
+ // semantic conventions. It represents the full path to the file.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/var/log/mysql/audit.log'
+ LogFilePathKey = attribute.Key("log.file.path")
+
+ // LogFileNameResolvedKey is the attribute Key conforming to the
+ // "log.file.name_resolved" semantic conventions. It represents the
+ // basename of the file, with symlinks resolved.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'uuid.log'
+ LogFileNameResolvedKey = attribute.Key("log.file.name_resolved")
+
+ // LogFilePathResolvedKey is the attribute Key conforming to the
+ // "log.file.path_resolved" semantic conventions. It represents the full
+ // path to the file, with symlinks resolved.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/var/lib/docker/uuid.log'
+ LogFilePathResolvedKey = attribute.Key("log.file.path_resolved")
+)
+
+// LogFileName returns an attribute KeyValue conforming to the
+// "log.file.name" semantic conventions. It represents the basename of the
+// file.
+func LogFileName(val string) attribute.KeyValue {
+ return LogFileNameKey.String(val)
+}
+
+// LogFilePath returns an attribute KeyValue conforming to the
+// "log.file.path" semantic conventions. It represents the full path to the
+// file.
+func LogFilePath(val string) attribute.KeyValue {
+ return LogFilePathKey.String(val)
+}
+
+// LogFileNameResolved returns an attribute KeyValue conforming to the
+// "log.file.name_resolved" semantic conventions. It represents the basename of
+// the file, with symlinks resolved.
+func LogFileNameResolved(val string) attribute.KeyValue {
+ return LogFileNameResolvedKey.String(val)
+}
+
+// LogFilePathResolved returns an attribute KeyValue conforming to the
+// "log.file.path_resolved" semantic conventions. It represents the full path
+// to the file, with symlinks resolved.
+func LogFilePathResolved(val string) attribute.KeyValue {
+ return LogFilePathResolvedKey.String(val)
+}
+
+// Describes JVM memory metric attributes.
+const (
+ // TypeKey is the attribute Key conforming to the "type" semantic
+ // conventions. It represents the type of memory.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'heap', 'non_heap'
+ TypeKey = attribute.Key("type")
+
+ // PoolKey is the attribute Key conforming to the "pool" semantic
+ // conventions. It represents the name of the memory pool.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space'
+ // Note: Pool names are generally obtained via
+ // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()).
+ PoolKey = attribute.Key("pool")
+)
+
+var (
+ // Heap memory
+ TypeHeap = TypeKey.String("heap")
+ // Non-heap memory
+ TypeNonHeap = TypeKey.String("non_heap")
+)
+
+// Pool returns an attribute KeyValue conforming to the "pool" semantic
+// conventions. It represents the name of the memory pool.
+func Pool(val string) attribute.KeyValue {
+ return PoolKey.String(val)
+}
+
+// These attributes may be used to describe the server in a connection-based
+// network interaction where there is one side that initiates the connection
+// (the client is the side that initiates the connection). This covers all TCP
+// network interactions since TCP is connection-based and one side initiates
+// the connection (an exception is made for peer-to-peer communication over TCP
+// where the "user-facing" surface of the protocol / API does not expose a
+// clear notion of client and server). This also covers UDP network
+// interactions where one side initiates the interaction, e.g. QUIC (HTTP/3)
+// and DNS.
+const (
+ // ServerAddressKey is the attribute Key conforming to the "server.address"
+ // semantic conventions. It represents the logical server hostname, matches
+ // server FQDN if available, and IP or socket address if FQDN is not known.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'example.com'
+ ServerAddressKey = attribute.Key("server.address")
+
+ // ServerPortKey is the attribute Key conforming to the "server.port"
+ // semantic conventions. It represents the logical server port number
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 80, 8080, 443
+ ServerPortKey = attribute.Key("server.port")
+
+ // ServerSocketDomainKey is the attribute Key conforming to the
+ // "server.socket.domain" semantic conventions. It represents the domain
+ // name of an immediate peer.
+ //
+ // Type: string
+ // RequirementLevel: Recommended (If different than `server.address`.)
+ // Stability: stable
+ // Examples: 'proxy.example.com'
+ // Note: Typically observed from the client side, and represents a proxy or
+ // other intermediary domain name.
+ ServerSocketDomainKey = attribute.Key("server.socket.domain")
+
+ // ServerSocketAddressKey is the attribute Key conforming to the
+ // "server.socket.address" semantic conventions. It represents the physical
+ // server IP address or Unix socket address. If set from the client, should
+ // simply use the socket's peer address, and not attempt to find any actual
+ // server IP (i.e., if set from client, this may represent some proxy
+ // server instead of the logical server).
+ //
+ // Type: string
+ // RequirementLevel: Recommended (If different than `server.address`.)
+ // Stability: stable
+ // Examples: '10.5.3.2'
+ ServerSocketAddressKey = attribute.Key("server.socket.address")
+
+ // ServerSocketPortKey is the attribute Key conforming to the
+ // "server.socket.port" semantic conventions. It represents the physical
+ // server port.
+ //
+ // Type: int
+ // RequirementLevel: Recommended (If different than `server.port`.)
+ // Stability: stable
+ // Examples: 16456
+ ServerSocketPortKey = attribute.Key("server.socket.port")
+)
+
+// ServerAddress returns an attribute KeyValue conforming to the
+// "server.address" semantic conventions. It represents the logical server
+// hostname, matches server FQDN if available, and IP or socket address if FQDN
+// is not known.
+func ServerAddress(val string) attribute.KeyValue {
+ return ServerAddressKey.String(val)
+}
+
+// ServerPort returns an attribute KeyValue conforming to the "server.port"
+// semantic conventions. It represents the logical server port number
+func ServerPort(val int) attribute.KeyValue {
+ return ServerPortKey.Int(val)
+}
+
+// ServerSocketDomain returns an attribute KeyValue conforming to the
+// "server.socket.domain" semantic conventions. It represents the domain name
+// of an immediate peer.
+func ServerSocketDomain(val string) attribute.KeyValue {
+ return ServerSocketDomainKey.String(val)
+}
+
+// ServerSocketAddress returns an attribute KeyValue conforming to the
+// "server.socket.address" semantic conventions. It represents the physical
+// server IP address or Unix socket address. If set from the client, should
+// simply use the socket's peer address, and not attempt to find any actual
+// server IP (i.e., if set from client, this may represent some proxy server
+// instead of the logical server).
+func ServerSocketAddress(val string) attribute.KeyValue {
+ return ServerSocketAddressKey.String(val)
+}
+
+// ServerSocketPort returns an attribute KeyValue conforming to the
+// "server.socket.port" semantic conventions. It represents the physical server
+// port.
+func ServerSocketPort(val int) attribute.KeyValue {
+ return ServerSocketPortKey.Int(val)
+}
+
+// These attributes may be used to describe the sender of a network
+// exchange/packet. These should be used when there is no client/server
+// relationship between the two sides, or when that relationship is unknown.
+// This covers low-level network interactions (e.g. packet tracing) where you
+// don't know if there was a connection or which side initiated it. This also
+// covers unidirectional UDP flows and peer-to-peer communication where the
+// "user-facing" surface of the protocol / API does not expose a clear notion
+// of client and server.
+const (
+ // SourceDomainKey is the attribute Key conforming to the "source.domain"
+ // semantic conventions. It represents the domain name of the source
+ // system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'foo.example.com'
+ // Note: This value may be a host name, a fully qualified domain name, or
+ // another host naming format.
+ SourceDomainKey = attribute.Key("source.domain")
+
+ // SourceAddressKey is the attribute Key conforming to the "source.address"
+ // semantic conventions. It represents the source address, for example IP
+ // address or Unix socket name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '10.5.3.2'
+ SourceAddressKey = attribute.Key("source.address")
+
+ // SourcePortKey is the attribute Key conforming to the "source.port"
+ // semantic conventions. It represents the source port number
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3389, 2888
+ SourcePortKey = attribute.Key("source.port")
+)
+
+// SourceDomain returns an attribute KeyValue conforming to the
+// "source.domain" semantic conventions. It represents the domain name of the
+// source system.
+func SourceDomain(val string) attribute.KeyValue {
+ return SourceDomainKey.String(val)
+}
+
+// SourceAddress returns an attribute KeyValue conforming to the
+// "source.address" semantic conventions. It represents the source address, for
+// example IP address or Unix socket name.
+func SourceAddress(val string) attribute.KeyValue {
+ return SourceAddressKey.String(val)
+}
+
+// SourcePort returns an attribute KeyValue conforming to the "source.port"
+// semantic conventions. It represents the source port number
+func SourcePort(val int) attribute.KeyValue {
+ return SourcePortKey.Int(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+ // NetworkTransportKey is the attribute Key conforming to the
+ // "network.transport" semantic conventions. It represents the [OSI
+ // Transport Layer](https://osi-model.com/transport-layer/) or
+ // [Inter-process Communication
+ // method](https://en.wikipedia.org/wiki/Inter-process_communication). The
+ // value SHOULD be normalized to lowercase.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'tcp', 'udp'
+ NetworkTransportKey = attribute.Key("network.transport")
+
+ // NetworkTypeKey is the attribute Key conforming to the "network.type"
+ // semantic conventions. It represents the [OSI Network
+ // Layer](https://osi-model.com/network-layer/) or non-OSI equivalent. The
+ // value SHOULD be normalized to lowercase.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'ipv4', 'ipv6'
+ NetworkTypeKey = attribute.Key("network.type")
+
+ // NetworkProtocolNameKey is the attribute Key conforming to the
+ // "network.protocol.name" semantic conventions. It represents the [OSI
+ // Application Layer](https://osi-model.com/application-layer/) or non-OSI
+ // equivalent. The value SHOULD be normalized to lowercase.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'amqp', 'http', 'mqtt'
+ NetworkProtocolNameKey = attribute.Key("network.protocol.name")
+
+ // NetworkProtocolVersionKey is the attribute Key conforming to the
+ // "network.protocol.version" semantic conventions. It represents the
+ // version of the application layer protocol used. See note below.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '3.1.1'
+ // Note: `network.protocol.version` refers to the version of the protocol
+ // used and might be different from the protocol client's version. If the
+ // HTTP client used has a version of `0.27.2`, but sends HTTP version
+ // `1.1`, this attribute should be set to `1.1`.
+ NetworkProtocolVersionKey = attribute.Key("network.protocol.version")
+)
+
+var (
+ // TCP
+ NetworkTransportTCP = NetworkTransportKey.String("tcp")
+ // UDP
+ NetworkTransportUDP = NetworkTransportKey.String("udp")
+ // Named or anonymous pipe. See note below
+ NetworkTransportPipe = NetworkTransportKey.String("pipe")
+ // Unix domain socket
+ NetworkTransportUnix = NetworkTransportKey.String("unix")
+)
+
+var (
+ // IPv4
+ NetworkTypeIpv4 = NetworkTypeKey.String("ipv4")
+ // IPv6
+ NetworkTypeIpv6 = NetworkTypeKey.String("ipv6")
+)
+
+// NetworkProtocolName returns an attribute KeyValue conforming to the
+// "network.protocol.name" semantic conventions. It represents the [OSI
+// Application Layer](https://osi-model.com/application-layer/) or non-OSI
+// equivalent. The value SHOULD be normalized to lowercase.
+func NetworkProtocolName(val string) attribute.KeyValue {
+ return NetworkProtocolNameKey.String(val)
+}
+
+// NetworkProtocolVersion returns an attribute KeyValue conforming to the
+// "network.protocol.version" semantic conventions. It represents the version
+// of the application layer protocol used. See note below.
+func NetworkProtocolVersion(val string) attribute.KeyValue {
+ return NetworkProtocolVersionKey.String(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+ // NetworkConnectionTypeKey is the attribute Key conforming to the
+ // "network.connection.type" semantic conventions. It represents the
+ // internet connection type.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'wifi'
+ NetworkConnectionTypeKey = attribute.Key("network.connection.type")
+
+ // NetworkConnectionSubtypeKey is the attribute Key conforming to the
+ // "network.connection.subtype" semantic conventions. It represents the
+ // this describes more details regarding the connection.type. It may be the
+ // type of cell technology connection, but it could be used for describing
+ // details about a wifi connection.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'LTE'
+ NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype")
+
+ // NetworkCarrierNameKey is the attribute Key conforming to the
+ // "network.carrier.name" semantic conventions. It represents the name of
+ // the mobile carrier.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'sprint'
+ NetworkCarrierNameKey = attribute.Key("network.carrier.name")
+
+ // NetworkCarrierMccKey is the attribute Key conforming to the
+ // "network.carrier.mcc" semantic conventions. It represents the mobile
+ // carrier country code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '310'
+ NetworkCarrierMccKey = attribute.Key("network.carrier.mcc")
+
+ // NetworkCarrierMncKey is the attribute Key conforming to the
+ // "network.carrier.mnc" semantic conventions. It represents the mobile
+ // carrier network code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '001'
+ NetworkCarrierMncKey = attribute.Key("network.carrier.mnc")
+
+ // NetworkCarrierIccKey is the attribute Key conforming to the
+ // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+ // alpha-2 2-character country code associated with the mobile carrier
+ // network.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'DE'
+ NetworkCarrierIccKey = attribute.Key("network.carrier.icc")
+)
+
+var (
+ // wifi
+ NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi")
+ // wired
+ NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired")
+ // cell
+ NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell")
+ // unavailable
+ NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable")
+ // unknown
+ NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown")
+)
+
+var (
+ // GPRS
+ NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs")
+ // EDGE
+ NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge")
+ // UMTS
+ NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts")
+ // CDMA
+ NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma")
+ // EVDO Rel. 0
+ NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0")
+ // EVDO Rev. A
+ NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a")
+ // CDMA2000 1XRTT
+ NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt")
+ // HSDPA
+ NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa")
+ // HSUPA
+ NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa")
+ // HSPA
+ NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa")
+ // IDEN
+ NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden")
+ // EVDO Rev. B
+ NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b")
+ // LTE
+ NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte")
+ // EHRPD
+ NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd")
+ // HSPAP
+ NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap")
+ // GSM
+ NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm")
+ // TD-SCDMA
+ NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma")
+ // IWLAN
+ NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan")
+ // 5G NR (New Radio)
+ NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr")
+ // 5G NRNSA (New Radio Non-Standalone)
+ NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa")
+ // LTE CA
+ NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca")
+)
+
+// NetworkCarrierName returns an attribute KeyValue conforming to the
+// "network.carrier.name" semantic conventions. It represents the name of the
+// mobile carrier.
+func NetworkCarrierName(val string) attribute.KeyValue {
+ return NetworkCarrierNameKey.String(val)
+}
+
+// NetworkCarrierMcc returns an attribute KeyValue conforming to the
+// "network.carrier.mcc" semantic conventions. It represents the mobile carrier
+// country code.
+func NetworkCarrierMcc(val string) attribute.KeyValue {
+ return NetworkCarrierMccKey.String(val)
+}
+
+// NetworkCarrierMnc returns an attribute KeyValue conforming to the
+// "network.carrier.mnc" semantic conventions. It represents the mobile carrier
+// network code.
+func NetworkCarrierMnc(val string) attribute.KeyValue {
+ return NetworkCarrierMncKey.String(val)
+}
+
+// NetworkCarrierIcc returns an attribute KeyValue conforming to the
+// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+// alpha-2 2-character country code associated with the mobile carrier network.
+func NetworkCarrierIcc(val string) attribute.KeyValue {
+ return NetworkCarrierIccKey.String(val)
+}
+
+// Semantic conventions for HTTP client and server Spans.
+const (
+ // HTTPRequestMethodOriginalKey is the attribute Key conforming to the
+ // "http.request.method_original" semantic conventions. It represents the
+ // original HTTP method sent by the client in the request line.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If and only if it's different
+ // than `http.request.method`.)
+ // Stability: stable
+ // Examples: 'GeT', 'ACL', 'foo'
+ HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original")
+
+ // HTTPRequestBodySizeKey is the attribute Key conforming to the
+ // "http.request.body.size" semantic conventions. It represents the size of
+ // the request payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3495
+ HTTPRequestBodySizeKey = attribute.Key("http.request.body.size")
+
+ // HTTPResponseBodySizeKey is the attribute Key conforming to the
+ // "http.response.body.size" semantic conventions. It represents the size
+ // of the response payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3495
+ HTTPResponseBodySizeKey = attribute.Key("http.response.body.size")
+)
+
+// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the
+// "http.request.method_original" semantic conventions. It represents the
+// original HTTP method sent by the client in the request line.
+func HTTPRequestMethodOriginal(val string) attribute.KeyValue {
+ return HTTPRequestMethodOriginalKey.String(val)
+}
+
+// HTTPRequestBodySize returns an attribute KeyValue conforming to the
+// "http.request.body.size" semantic conventions. It represents the size of the
+// request payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPRequestBodySize(val int) attribute.KeyValue {
+ return HTTPRequestBodySizeKey.Int(val)
+}
+
+// HTTPResponseBodySize returns an attribute KeyValue conforming to the
+// "http.response.body.size" semantic conventions. It represents the size of
+// the response payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPResponseBodySize(val int) attribute.KeyValue {
+ return HTTPResponseBodySizeKey.Int(val)
+}
+
+// Semantic convention describing per-message attributes populated on messaging
+// spans or links.
+const (
+ // MessagingMessageIDKey is the attribute Key conforming to the
+ // "messaging.message.id" semantic conventions. It represents a value used
+ // by the messaging system as an identifier for the message, represented as
+ // a string.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
+ MessagingMessageIDKey = attribute.Key("messaging.message.id")
+
+ // MessagingMessageConversationIDKey is the attribute Key conforming to the
+ // "messaging.message.conversation_id" semantic conventions. It represents
+ // the [conversation ID](#conversations) identifying the conversation to
+ // which the message belongs, represented as a string. Sometimes called
+ // "Correlation ID".
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MyConversationID'
+ MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
+
+ // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to
+ // the "messaging.message.payload_size_bytes" semantic conventions. It
+ // represents the (uncompressed) size of the message payload in bytes. Also
+ // use this attribute if it is unknown whether the compressed or
+ // uncompressed payload size is reported.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2738
+ MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes")
+
+ // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key
+ // conforming to the "messaging.message.payload_compressed_size_bytes"
+ // semantic conventions. It represents the compressed size of the message
+ // payload in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2048
+ MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes")
+)
+
+// MessagingMessageID returns an attribute KeyValue conforming to the
+// "messaging.message.id" semantic conventions. It represents a value used by
+// the messaging system as an identifier for the message, represented as a
+// string.
+func MessagingMessageID(val string) attribute.KeyValue {
+ return MessagingMessageIDKey.String(val)
+}
+
+// MessagingMessageConversationID returns an attribute KeyValue conforming
+// to the "messaging.message.conversation_id" semantic conventions. It
+// represents the [conversation ID](#conversations) identifying the
+// conversation to which the message belongs, represented as a string.
+// Sometimes called "Correlation ID".
+func MessagingMessageConversationID(val string) attribute.KeyValue {
+ return MessagingMessageConversationIDKey.String(val)
+}
+
+// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming
+// to the "messaging.message.payload_size_bytes" semantic conventions. It
+// represents the (uncompressed) size of the message payload in bytes. Also use
+// this attribute if it is unknown whether the compressed or uncompressed
+// payload size is reported.
+func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue {
+ return MessagingMessagePayloadSizeBytesKey.Int(val)
+}
+
+// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue
+// conforming to the "messaging.message.payload_compressed_size_bytes" semantic
+// conventions. It represents the compressed size of the message payload in
+// bytes.
+func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue {
+ return MessagingMessagePayloadCompressedSizeBytesKey.Int(val)
+}
+
+// Semantic convention for attributes that describe messaging destination on
+// broker
+const (
+ // MessagingDestinationNameKey is the attribute Key conforming to the
+ // "messaging.destination.name" semantic conventions. It represents the
+ // message destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: Destination name SHOULD uniquely identify a specific queue, topic
+ // or other entity within the broker. If
+ // the broker does not have such notion, the destination name SHOULD
+ // uniquely identify the broker.
+ MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
+
+ // MessagingDestinationTemplateKey is the attribute Key conforming to the
+ // "messaging.destination.template" semantic conventions. It represents the
+ // low cardinality representation of the messaging destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/customers/{customerID}'
+ // Note: Destination names could be constructed from templates. An example
+ // would be a destination name involving a user name or product id.
+ // Although the destination name in this case is of high cardinality, the
+ // underlying template is of low cardinality and can be effectively used
+ // for grouping and aggregation.
+ MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
+
+ // MessagingDestinationTemporaryKey is the attribute Key conforming to the
+ // "messaging.destination.temporary" semantic conventions. It represents a
+ // boolean that is true if the message destination is temporary and might
+ // not exist anymore after messages are processed.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
+
+ // MessagingDestinationAnonymousKey is the attribute Key conforming to the
+ // "messaging.destination.anonymous" semantic conventions. It represents a
+ // boolean that is true if the message destination is anonymous (could be
+ // unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
+)
+
+// MessagingDestinationName returns an attribute KeyValue conforming to the
+// "messaging.destination.name" semantic conventions. It represents the message
+// destination name
+func MessagingDestinationName(val string) attribute.KeyValue {
+ return MessagingDestinationNameKey.String(val)
+}
+
+// MessagingDestinationTemplate returns an attribute KeyValue conforming to
+// the "messaging.destination.template" semantic conventions. It represents the
+// low cardinality representation of the messaging destination name
+func MessagingDestinationTemplate(val string) attribute.KeyValue {
+ return MessagingDestinationTemplateKey.String(val)
+}
+
+// MessagingDestinationTemporary returns an attribute KeyValue conforming to
+// the "messaging.destination.temporary" semantic conventions. It represents a
+// boolean that is true if the message destination is temporary and might not
+// exist anymore after messages are processed.
+func MessagingDestinationTemporary(val bool) attribute.KeyValue {
+ return MessagingDestinationTemporaryKey.Bool(val)
+}
+
+// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
+// the "messaging.destination.anonymous" semantic conventions. It represents a
+// boolean that is true if the message destination is anonymous (could be
+// unnamed or have auto-generated name).
+func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
+ return MessagingDestinationAnonymousKey.Bool(val)
+}
+
+// Attributes for RabbitMQ
+const (
+ // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
+ // conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+ // conventions. It represents the rabbitMQ message routing key.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If not empty.)
+ // Stability: stable
+ // Examples: 'myKey'
+ MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
+)
+
+// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
+// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+// conventions. It represents the rabbitMQ message routing key.
+func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
+ return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
+}
+
+// Attributes for Apache Kafka
+const (
+ // MessagingKafkaMessageKeyKey is the attribute Key conforming to the
+ // "messaging.kafka.message.key" semantic conventions. It represents the
+ // message keys in Kafka are used for grouping alike messages to ensure
+ // they're processed on the same partition. They differ from
+ // `messaging.message.id` in that they're not unique. If the key is `null`,
+ // the attribute MUST NOT be set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'myKey'
+ // Note: If the key type is not string, it's string representation has to
+ // be supplied for the attribute. If the key has no unambiguous, canonical
+ // string form, don't include its value.
+ MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
+
+ // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
+ // "messaging.kafka.consumer.group" semantic conventions. It represents the
+ // name of the Kafka Consumer Group that is handling the message. Only
+ // applies to consumers, not producers.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'my-group'
+ MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
+
+ // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to
+ // the "messaging.kafka.destination.partition" semantic conventions. It
+ // represents the partition the message is sent to.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2
+ MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition")
+
+ // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
+ // "messaging.kafka.message.offset" semantic conventions. It represents the
+ // offset of a record in the corresponding Kafka partition.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
+
+ // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
+ // "messaging.kafka.message.tombstone" semantic conventions. It represents
+ // a boolean that is true if the message is a tombstone.
+ //
+ // Type: boolean
+ // RequirementLevel: ConditionallyRequired (If value is `true`. When
+ // missing, the value is assumed to be `false`.)
+ // Stability: stable
+ MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
+)
+
+// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
+// "messaging.kafka.message.key" semantic conventions. It represents the
+// message keys in Kafka are used for grouping alike messages to ensure they're
+// processed on the same partition. They differ from `messaging.message.id` in
+// that they're not unique. If the key is `null`, the attribute MUST NOT be
+// set.
+func MessagingKafkaMessageKey(val string) attribute.KeyValue {
+ return MessagingKafkaMessageKeyKey.String(val)
+}
+
+// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
+// the "messaging.kafka.consumer.group" semantic conventions. It represents the
+// name of the Kafka Consumer Group that is handling the message. Only applies
+// to consumers, not producers.
+func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
+ return MessagingKafkaConsumerGroupKey.String(val)
+}
+
+// MessagingKafkaDestinationPartition returns an attribute KeyValue
+// conforming to the "messaging.kafka.destination.partition" semantic
+// conventions. It represents the partition the message is sent to.
+func MessagingKafkaDestinationPartition(val int) attribute.KeyValue {
+ return MessagingKafkaDestinationPartitionKey.Int(val)
+}
+
+// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
+// the "messaging.kafka.message.offset" semantic conventions. It represents the
+// offset of a record in the corresponding Kafka partition.
+func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
+ return MessagingKafkaMessageOffsetKey.Int(val)
+}
+
+// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
+// to the "messaging.kafka.message.tombstone" semantic conventions. It
+// represents a boolean that is true if the message is a tombstone.
+func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
+ return MessagingKafkaMessageTombstoneKey.Bool(val)
+}
+
+// Attributes for Apache RocketMQ
+const (
+ // MessagingRocketmqNamespaceKey is the attribute Key conforming to the
+ // "messaging.rocketmq.namespace" semantic conventions. It represents the
+ // namespace of RocketMQ resources, resources in different namespaces are
+ // individual.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myNamespace'
+ MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
+
+ // MessagingRocketmqClientGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.client_group" semantic conventions. It represents
+ // the name of the RocketMQ producer/consumer group that is handling the
+ // message. The client type is identified by the SpanKind.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myConsumerGroup'
+ MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
+
+ // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delivery_timestamp"
+ // semantic conventions. It represents the timestamp in milliseconds that
+ // the delay message is expected to be delivered to consumer.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the message type is delay
+ // and delay time level is not specified.)
+ // Stability: stable
+ // Examples: 1665987217045
+ MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
+
+ // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+ // conventions. It represents the delay time level for delay message, which
+ // determines the message delay time.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the message type is delay
+ // and delivery timestamp is not specified.)
+ // Stability: stable
+ // Examples: 3
+ MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
+
+ // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.group" semantic conventions. It represents
+ // the it is essential for FIFO message. Messages that belong to the same
+ // message group are always processed one by one within the same consumer
+ // group.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If the message type is FIFO.)
+ // Stability: stable
+ // Examples: 'myMessageGroup'
+ MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
+
+ // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.type" semantic conventions. It represents
+ // the type of message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
+
+ // MessagingRocketmqMessageTagKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.tag" semantic conventions. It represents the
+ // secondary classifier of message besides topic.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'tagA'
+ MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
+
+ // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.keys" semantic conventions. It represents
+ // the key(s) of message, another way to mark message besides message id.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'keyA', 'keyB'
+ MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
+
+ // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
+ // the "messaging.rocketmq.consumption_model" semantic conventions. It
+ // represents the model of message consumption. This only applies to
+ // consumer spans.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
+)
+
+var (
+ // Normal message
+ MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
+ // FIFO message
+ MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
+ // Delay message
+ MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
+ // Transaction message
+ MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
+)
+
+var (
+ // Clustering consumption model
+ MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
+ // Broadcasting consumption model
+ MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
+)
+
+// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.namespace" semantic conventions. It represents the
+// namespace of RocketMQ resources, resources in different namespaces are
+// individual.
+func MessagingRocketmqNamespace(val string) attribute.KeyValue {
+ return MessagingRocketmqNamespaceKey.String(val)
+}
+
+// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.client_group" semantic conventions. It represents
+// the name of the RocketMQ producer/consumer group that is handling the
+// message. The client type is identified by the SpanKind.
+func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqClientGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
+// conventions. It represents the timestamp in milliseconds that the delay
+// message is expected to be delivered to consumer.
+func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
+}
+
+// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+// conventions. It represents the delay time level for delay message, which
+// determines the message delay time.
+func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
+}
+
+// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.group" semantic conventions. It represents
+// the it is essential for FIFO message. Messages that belong to the same
+// message group are always processed one by one within the same consumer
+// group.
+func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
+// secondary classifier of message besides topic.
+func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageTagKey.String(val)
+}
+
+// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.keys" semantic conventions. It represents
+// the key(s) of message, another way to mark message besides message id.
+func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
+ return MessagingRocketmqMessageKeysKey.StringSlice(val)
+}
+
+// Attributes describing URL.
+const (
+ // URLSchemeKey is the attribute Key conforming to the "url.scheme"
+ // semantic conventions. It represents the [URI
+ // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
+ // identifying the used protocol.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'https', 'ftp', 'telnet'
+ URLSchemeKey = attribute.Key("url.scheme")
+
+ // URLFullKey is the attribute Key conforming to the "url.full" semantic
+ // conventions. It represents the absolute URL describing a network
+ // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv',
+ // '//localhost'
+ // Note: For network calls, URL usually has
+ // `scheme://host[:port][path][?query][#fragment]` format, where the
+ // fragment is not transmitted over HTTP, but if it is known, it should be
+ // included nevertheless.
+ // `url.full` MUST NOT contain credentials passed via URL in form of
+ // `https://username:password@www.example.com/`. In such case username and
+ // password should be redacted and attribute's value should be
+ // `https://REDACTED:REDACTED@www.example.com/`.
+ // `url.full` SHOULD capture the absolute URL when it is available (or can
+ // be reconstructed) and SHOULD NOT be validated or modified except for
+ // sanitizing purposes.
+ URLFullKey = attribute.Key("url.full")
+
+ // URLPathKey is the attribute Key conforming to the "url.path" semantic
+ // conventions. It represents the [URI
+ // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/search'
+ // Note: When missing, the value is assumed to be `/`
+ URLPathKey = attribute.Key("url.path")
+
+ // URLQueryKey is the attribute Key conforming to the "url.query" semantic
+ // conventions. It represents the [URI
+ // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'q=OpenTelemetry'
+ // Note: Sensitive content provided in query string SHOULD be scrubbed when
+ // instrumentations can identify it.
+ URLQueryKey = attribute.Key("url.query")
+
+ // URLFragmentKey is the attribute Key conforming to the "url.fragment"
+ // semantic conventions. It represents the [URI
+ // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'SemConv'
+ URLFragmentKey = attribute.Key("url.fragment")
+)
+
+// URLScheme returns an attribute KeyValue conforming to the "url.scheme"
+// semantic conventions. It represents the [URI
+// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
+// identifying the used protocol.
+func URLScheme(val string) attribute.KeyValue {
+ return URLSchemeKey.String(val)
+}
+
+// URLFull returns an attribute KeyValue conforming to the "url.full"
+// semantic conventions. It represents the absolute URL describing a network
+// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
+func URLFull(val string) attribute.KeyValue {
+ return URLFullKey.String(val)
+}
+
+// URLPath returns an attribute KeyValue conforming to the "url.path"
+// semantic conventions. It represents the [URI
+// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
+func URLPath(val string) attribute.KeyValue {
+ return URLPathKey.String(val)
+}
+
+// URLQuery returns an attribute KeyValue conforming to the "url.query"
+// semantic conventions. It represents the [URI
+// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
+func URLQuery(val string) attribute.KeyValue {
+ return URLQueryKey.String(val)
+}
+
+// URLFragment returns an attribute KeyValue conforming to the
+// "url.fragment" semantic conventions. It represents the [URI
+// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
+func URLFragment(val string) attribute.KeyValue {
+ return URLFragmentKey.String(val)
+}
+
+// Describes user-agent attributes.
+const (
+ // UserAgentOriginalKey is the attribute Key conforming to the
+ // "user_agent.original" semantic conventions. It represents the value of
+ // the [HTTP
+ // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+ // header sent by the client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'CERN-LineMode/2.15 libwww/2.17b3'
+ UserAgentOriginalKey = attribute.Key("user_agent.original")
+)
+
+// UserAgentOriginal returns an attribute KeyValue conforming to the
+// "user_agent.original" semantic conventions. It represents the value of the
+// [HTTP
+// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+// header sent by the client.
+func UserAgentOriginal(val string) attribute.KeyValue {
+ return UserAgentOriginalKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go
new file mode 100644
index 000000000..7cf424855
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package semconv implements OpenTelemetry semantic conventions.
+//
+// OpenTelemetry semantic conventions are agreed standardized naming
+// patterns for OpenTelemetry things. This package represents the conventions
+// as of the v1.21.0 version of the OpenTelemetry specification.
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go
new file mode 100644
index 000000000..30ae34fe4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go
@@ -0,0 +1,199 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// This semantic convention defines the attributes used to represent a feature
+// flag evaluation as an event.
+const (
+ // FeatureFlagKeyKey is the attribute Key conforming to the
+ // "feature_flag.key" semantic conventions. It represents the unique
+ // identifier of the feature flag.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'logo-color'
+ FeatureFlagKeyKey = attribute.Key("feature_flag.key")
+
+ // FeatureFlagProviderNameKey is the attribute Key conforming to the
+ // "feature_flag.provider_name" semantic conventions. It represents the
+ // name of the service provider that performs the flag evaluation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'Flag Manager'
+ FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
+
+ // FeatureFlagVariantKey is the attribute Key conforming to the
+ // "feature_flag.variant" semantic conventions. It represents the sHOULD be
+ // a semantic identifier for a value. If one is unavailable, a stringified
+ // version of the value can be used.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'red', 'true', 'on'
+ // Note: A semantic identifier, commonly referred to as a variant, provides
+ // a means
+ // for referring to a value without including the value itself. This can
+ // provide additional context for understanding the meaning behind a value.
+ // For example, the variant `red` maybe be used for the value `#c05543`.
+ //
+ // A stringified version of the value can be used in situations where a
+ // semantic identifier is unavailable. String representation of the value
+ // should be determined by the implementer.
+ FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
+)
+
+// FeatureFlagKey returns an attribute KeyValue conforming to the
+// "feature_flag.key" semantic conventions. It represents the unique identifier
+// of the feature flag.
+func FeatureFlagKey(val string) attribute.KeyValue {
+ return FeatureFlagKeyKey.String(val)
+}
+
+// FeatureFlagProviderName returns an attribute KeyValue conforming to the
+// "feature_flag.provider_name" semantic conventions. It represents the name of
+// the service provider that performs the flag evaluation.
+func FeatureFlagProviderName(val string) attribute.KeyValue {
+ return FeatureFlagProviderNameKey.String(val)
+}
+
+// FeatureFlagVariant returns an attribute KeyValue conforming to the
+// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
+// semantic identifier for a value. If one is unavailable, a stringified
+// version of the value can be used.
+func FeatureFlagVariant(val string) attribute.KeyValue {
+ return FeatureFlagVariantKey.String(val)
+}
+
+// RPC received/sent message.
+const (
+ // MessageTypeKey is the attribute Key conforming to the "message.type"
+ // semantic conventions. It represents the whether this is a received or
+ // sent message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessageTypeKey = attribute.Key("message.type")
+
+ // MessageIDKey is the attribute Key conforming to the "message.id"
+ // semantic conventions. It represents the mUST be calculated as two
+ // different counters starting from `1` one for sent messages and one for
+ // received message.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: This way we guarantee that the values will be consistent between
+ // different implementations.
+ MessageIDKey = attribute.Key("message.id")
+
+ // MessageCompressedSizeKey is the attribute Key conforming to the
+ // "message.compressed_size" semantic conventions. It represents the
+ // compressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessageCompressedSizeKey = attribute.Key("message.compressed_size")
+
+ // MessageUncompressedSizeKey is the attribute Key conforming to the
+ // "message.uncompressed_size" semantic conventions. It represents the
+ // uncompressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
+)
+
+var (
+ // sent
+ MessageTypeSent = MessageTypeKey.String("SENT")
+ // received
+ MessageTypeReceived = MessageTypeKey.String("RECEIVED")
+)
+
+// MessageID returns an attribute KeyValue conforming to the "message.id"
+// semantic conventions. It represents the mUST be calculated as two different
+// counters starting from `1` one for sent messages and one for received
+// message.
+func MessageID(val int) attribute.KeyValue {
+ return MessageIDKey.Int(val)
+}
+
+// MessageCompressedSize returns an attribute KeyValue conforming to the
+// "message.compressed_size" semantic conventions. It represents the compressed
+// size of the message in bytes.
+func MessageCompressedSize(val int) attribute.KeyValue {
+ return MessageCompressedSizeKey.Int(val)
+}
+
+// MessageUncompressedSize returns an attribute KeyValue conforming to the
+// "message.uncompressed_size" semantic conventions. It represents the
+// uncompressed size of the message in bytes.
+func MessageUncompressedSize(val int) attribute.KeyValue {
+ return MessageUncompressedSizeKey.Int(val)
+}
+
+// The attributes used to report a single exception associated with a span.
+const (
+ // ExceptionEscapedKey is the attribute Key conforming to the
+ // "exception.escaped" semantic conventions. It represents the sHOULD be
+ // set to true if the exception event is recorded at a point where it is
+ // known that the exception is escaping the scope of the span.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: An exception is considered to have escaped (or left) the scope of
+ // a span,
+ // if that span is ended while the exception is still logically "in
+ // flight".
+ // This may be actually "in flight" in some languages (e.g. if the
+ // exception
+ // is passed to a Context manager's `__exit__` method in Python) but will
+ // usually be caught at the point of recording the exception in most
+ // languages.
+ //
+ // It is usually not possible to determine at the point where an exception
+ // is thrown
+ // whether it will escape the scope of a span.
+ // However, it is trivial to know that an exception
+ // will escape, if one checks for an active exception just before ending
+ // the span,
+ // as done in the [example above](#recording-an-exception).
+ //
+ // It follows that an exception may still escape the scope of the span
+ // even if the `exception.escaped` attribute was not set or set to false,
+ // since the event might have been recorded at a time where it was not
+ // clear whether the exception will escape.
+ ExceptionEscapedKey = attribute.Key("exception.escaped")
+)
+
+// ExceptionEscaped returns an attribute KeyValue conforming to the
+// "exception.escaped" semantic conventions. It represents the sHOULD be set to
+// true if the exception event is recorded at a point where it is known that
+// the exception is escaping the scope of the span.
+func ExceptionEscaped(val bool) attribute.KeyValue {
+ return ExceptionEscapedKey.Bool(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go
index 0d7ba8676..93d3c1760 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/doc.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go
@@ -12,5 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Package jaeger contains an OpenTelemetry tracing exporter for Jaeger.
-package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger"
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0"
+
+const (
+ // ExceptionEventName is the name of the Span event representing an exception.
+ ExceptionEventName = "exception"
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go
new file mode 100644
index 000000000..b6d8935cf
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go
@@ -0,0 +1,2310 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The web browser in which the application represented by the resource is
+// running. The `browser.*` attributes MUST be used only for resources that
+// represent applications running in a web browser (regardless of whether
+// running on a mobile or desktop device).
+const (
+ // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
+ // semantic conventions. It represents the array of brand name and version
+ // separated by a space
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.brands`).
+ BrowserBrandsKey = attribute.Key("browser.brands")
+
+ // BrowserPlatformKey is the attribute Key conforming to the
+ // "browser.platform" semantic conventions. It represents the platform on
+ // which the browser is running
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Windows', 'macOS', 'Android'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.platform`). If unavailable, the legacy
+ // `navigator.platform` API SHOULD NOT be used instead and this attribute
+ // SHOULD be left unset in order for the values to be consistent.
+ // The list of possible values is defined in the [W3C User-Agent Client
+ // Hints
+ // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
+ // Note that some (but not all) of these values can overlap with values in
+ // the [`os.type` and `os.name` attributes](./os.md). However, for
+ // consistency, the values in the `browser.platform` attribute should
+ // capture the exact value that the user agent provides.
+ BrowserPlatformKey = attribute.Key("browser.platform")
+
+ // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
+ // semantic conventions. It represents a boolean that is true if the
+ // browser is running on a mobile device
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.mobile`). If unavailable, this attribute
+ // SHOULD be left unset.
+ BrowserMobileKey = attribute.Key("browser.mobile")
+
+ // BrowserLanguageKey is the attribute Key conforming to the
+ // "browser.language" semantic conventions. It represents the preferred
+ // language of the user using the browser
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'en', 'en-US', 'fr', 'fr-FR'
+ // Note: This value is intended to be taken from the Navigator API
+ // `navigator.language`.
+ BrowserLanguageKey = attribute.Key("browser.language")
+)
+
+// BrowserBrands returns an attribute KeyValue conforming to the
+// "browser.brands" semantic conventions. It represents the array of brand name
+// and version separated by a space
+func BrowserBrands(val ...string) attribute.KeyValue {
+ return BrowserBrandsKey.StringSlice(val)
+}
+
+// BrowserPlatform returns an attribute KeyValue conforming to the
+// "browser.platform" semantic conventions. It represents the platform on which
+// the browser is running
+func BrowserPlatform(val string) attribute.KeyValue {
+ return BrowserPlatformKey.String(val)
+}
+
+// BrowserMobile returns an attribute KeyValue conforming to the
+// "browser.mobile" semantic conventions. It represents a boolean that is true
+// if the browser is running on a mobile device
+func BrowserMobile(val bool) attribute.KeyValue {
+ return BrowserMobileKey.Bool(val)
+}
+
+// BrowserLanguage returns an attribute KeyValue conforming to the
+// "browser.language" semantic conventions. It represents the preferred
+// language of the user using the browser
+func BrowserLanguage(val string) attribute.KeyValue {
+ return BrowserLanguageKey.String(val)
+}
+
+// A cloud environment (e.g. GCP, Azure, AWS)
+const (
+ // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
+ // semantic conventions. It represents the name of the cloud provider.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ CloudProviderKey = attribute.Key("cloud.provider")
+
+ // CloudAccountIDKey is the attribute Key conforming to the
+ // "cloud.account.id" semantic conventions. It represents the cloud account
+ // ID the resource is assigned to.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '111111111111', 'opentelemetry'
+ CloudAccountIDKey = attribute.Key("cloud.account.id")
+
+ // CloudRegionKey is the attribute Key conforming to the "cloud.region"
+ // semantic conventions. It represents the geographical region the resource
+ // is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-central1', 'us-east-1'
+ // Note: Refer to your provider's docs to see the available regions, for
+ // example [Alibaba Cloud
+ // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
+ // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
+ // [Azure
+ // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/),
+ // [Google Cloud regions](https://cloud.google.com/about/locations), or
+ // [Tencent Cloud
+ // regions](https://www.tencentcloud.com/document/product/213/6091).
+ CloudRegionKey = attribute.Key("cloud.region")
+
+ // CloudResourceIDKey is the attribute Key conforming to the
+ // "cloud.resource_id" semantic conventions. It represents the cloud
+ // provider-specific native identifier of the monitored cloud resource
+ // (e.g. an
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+ // on AWS, a [fully qualified resource
+ // ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
+ // on Azure, a [full resource
+ // name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+ // on GCP)
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function',
+ // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID',
+ // '/subscriptions/<SUBSCIPTION_GUID>/resourceGroups/<RG>/providers/Microsoft.Web/sites/<FUNCAPP>/functions/<FUNC>'
+ // Note: On some cloud providers, it may not be possible to determine the
+ // full ID at startup,
+ // so it may be necessary to set `cloud.resource_id` as a span attribute
+ // instead.
+ //
+ // The exact value to use for `cloud.resource_id` depends on the cloud
+ // provider.
+ // The following well-known definitions MUST be used if you set this
+ // attribute and they apply:
+ //
+ // * **AWS Lambda:** The function
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
+ // Take care not to use the "invoked ARN" directly but replace any
+ // [alias
+ // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
+ // with the resolved function version, as the same runtime instance may
+ // be invokable with
+ // multiple different aliases.
+ // * **GCP:** The [URI of the
+ // resource](https://cloud.google.com/iam/docs/full-resource-names)
+ // * **Azure:** The [Fully Qualified Resource
+ // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
+ // of the invoked function,
+ // *not* the function app, having the form
+ // `/subscriptions/<SUBSCIPTION_GUID>/resourceGroups/<RG>/providers/Microsoft.Web/sites/<FUNCAPP>/functions/<FUNC>`.
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider.
+ CloudResourceIDKey = attribute.Key("cloud.resource_id")
+
+ // CloudAvailabilityZoneKey is the attribute Key conforming to the
+ // "cloud.availability_zone" semantic conventions. It represents the cloud
+ // regions often have multiple, isolated locations known as zones to
+ // increase availability. Availability zone represents the zone where the
+ // resource is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-east-1c'
+ // Note: Availability zones are called "zones" on Alibaba Cloud and Google
+ // Cloud.
+ CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
+
+ // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
+ // semantic conventions. It represents the cloud platform in use.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: The prefix of the service SHOULD match the one specified in
+ // `cloud.provider`.
+ CloudPlatformKey = attribute.Key("cloud.platform")
+)
+
+var (
+ // Alibaba Cloud
+ CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ CloudProviderAWS = CloudProviderKey.String("aws")
+ // Microsoft Azure
+ CloudProviderAzure = CloudProviderKey.String("azure")
+ // Google Cloud Platform
+ CloudProviderGCP = CloudProviderKey.String("gcp")
+ // Heroku Platform as a Service
+ CloudProviderHeroku = CloudProviderKey.String("heroku")
+ // IBM Cloud
+ CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
+ // Tencent Cloud
+ CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
+)
+
+var (
+ // Alibaba Cloud Elastic Compute Service
+ CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
+ // Alibaba Cloud Function Compute
+ CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
+ // Red Hat OpenShift on Alibaba Cloud
+ CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
+ // AWS Elastic Compute Cloud
+ CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
+ // AWS Elastic Container Service
+ CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
+ // AWS Elastic Kubernetes Service
+ CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
+ // AWS Lambda
+ CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
+ // AWS Elastic Beanstalk
+ CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
+ // AWS App Runner
+ CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
+ // Red Hat OpenShift on AWS (ROSA)
+ CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
+ // Azure Virtual Machines
+ CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
+ // Azure Container Instances
+ CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
+ // Azure Kubernetes Service
+ CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
+ // Azure Functions
+ CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
+ // Azure App Service
+ CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
+ // Azure Red Hat OpenShift
+ CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
+ // Google Bare Metal Solution (BMS)
+ CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution")
+ // Google Cloud Compute Engine (GCE)
+ CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
+ // Google Cloud Run
+ CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
+ // Google Cloud Kubernetes Engine (GKE)
+ CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
+ // Google Cloud Functions (GCF)
+ CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
+ // Google Cloud App Engine (GAE)
+ CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
+ // Red Hat OpenShift on Google Cloud
+ CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift")
+ // Red Hat OpenShift on IBM Cloud
+ CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
+ // Tencent Cloud Cloud Virtual Machine (CVM)
+ CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
+ // Tencent Cloud Elastic Kubernetes Service (EKS)
+ CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
+ // Tencent Cloud Serverless Cloud Function (SCF)
+ CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
+)
+
+// CloudAccountID returns an attribute KeyValue conforming to the
+// "cloud.account.id" semantic conventions. It represents the cloud account ID
+// the resource is assigned to.
+func CloudAccountID(val string) attribute.KeyValue {
+ return CloudAccountIDKey.String(val)
+}
+
+// CloudRegion returns an attribute KeyValue conforming to the
+// "cloud.region" semantic conventions. It represents the geographical region
+// the resource is running.
+func CloudRegion(val string) attribute.KeyValue {
+ return CloudRegionKey.String(val)
+}
+
+// CloudResourceID returns an attribute KeyValue conforming to the
+// "cloud.resource_id" semantic conventions. It represents the cloud
+// provider-specific native identifier of the monitored cloud resource (e.g. an
+// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+// on AWS, a [fully qualified resource
+// ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
+// on Azure, a [full resource
+// name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+// on GCP)
+func CloudResourceID(val string) attribute.KeyValue {
+ return CloudResourceIDKey.String(val)
+}
+
+// CloudAvailabilityZone returns an attribute KeyValue conforming to the
+// "cloud.availability_zone" semantic conventions. It represents the cloud
+// regions often have multiple, isolated locations known as zones to increase
+// availability. Availability zone represents the zone where the resource is
+// running.
+func CloudAvailabilityZone(val string) attribute.KeyValue {
+ return CloudAvailabilityZoneKey.String(val)
+}
+
+// Resources used by AWS Elastic Container Service (ECS).
+const (
+ // AWSECSContainerARNKey is the attribute Key conforming to the
+ // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+ // Resource Name (ARN) of an [ECS container
+ // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
+ AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
+
+ // AWSECSClusterARNKey is the attribute Key conforming to the
+ // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
+ // [ECS
+ // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
+
+ // AWSECSLaunchtypeKey is the attribute Key conforming to the
+ // "aws.ecs.launchtype" semantic conventions. It represents the [launch
+ // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
+ // for an ECS task.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
+
+ // AWSECSTaskARNKey is the attribute Key conforming to the
+ // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an
+ // [ECS task
+ // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
+
+ // AWSECSTaskFamilyKey is the attribute Key conforming to the
+ // "aws.ecs.task.family" semantic conventions. It represents the task
+ // definition family this task definition is a member of.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-family'
+ AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
+
+ // AWSECSTaskRevisionKey is the attribute Key conforming to the
+ // "aws.ecs.task.revision" semantic conventions. It represents the revision
+ // for this task definition.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '8', '26'
+ AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
+)
+
+var (
+ // ec2
+ AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
+ // fargate
+ AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
+)
+
+// AWSECSContainerARN returns an attribute KeyValue conforming to the
+// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+// Resource Name (ARN) of an [ECS container
+// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+func AWSECSContainerARN(val string) attribute.KeyValue {
+ return AWSECSContainerARNKey.String(val)
+}
+
+// AWSECSClusterARN returns an attribute KeyValue conforming to the
+// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
+// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+func AWSECSClusterARN(val string) attribute.KeyValue {
+ return AWSECSClusterARNKey.String(val)
+}
+
+// AWSECSTaskARN returns an attribute KeyValue conforming to the
+// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS
+// task
+// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+func AWSECSTaskARN(val string) attribute.KeyValue {
+ return AWSECSTaskARNKey.String(val)
+}
+
+// AWSECSTaskFamily returns an attribute KeyValue conforming to the
+// "aws.ecs.task.family" semantic conventions. It represents the task
+// definition family this task definition is a member of.
+func AWSECSTaskFamily(val string) attribute.KeyValue {
+ return AWSECSTaskFamilyKey.String(val)
+}
+
+// AWSECSTaskRevision returns an attribute KeyValue conforming to the
+// "aws.ecs.task.revision" semantic conventions. It represents the revision for
+// this task definition.
+func AWSECSTaskRevision(val string) attribute.KeyValue {
+ return AWSECSTaskRevisionKey.String(val)
+}
+
+// Resources used by AWS Elastic Kubernetes Service (EKS).
+const (
+ // AWSEKSClusterARNKey is the attribute Key conforming to the
+ // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
+ // EKS cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
+)
+
+// AWSEKSClusterARN returns an attribute KeyValue conforming to the
+// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
+// cluster.
+func AWSEKSClusterARN(val string) attribute.KeyValue {
+ return AWSEKSClusterARNKey.String(val)
+}
+
+// Resources specific to Amazon Web Services.
+const (
+ // AWSLogGroupNamesKey is the attribute Key conforming to the
+ // "aws.log.group.names" semantic conventions. It represents the name(s) of
+ // the AWS log group(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
+ // Note: Multiple log groups must be supported for cases like
+ // multi-container applications, where a single application has sidecar
+ // containers, and each write to their own log group.
+ AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
+
+ // AWSLogGroupARNsKey is the attribute Key conforming to the
+ // "aws.log.group.arns" semantic conventions. It represents the Amazon
+ // Resource Name(s) (ARN) of the AWS log group(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
+ // Note: See the [log group ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
+
+ // AWSLogStreamNamesKey is the attribute Key conforming to the
+ // "aws.log.stream.names" semantic conventions. It represents the name(s)
+ // of the AWS log stream(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
+
+ // AWSLogStreamARNsKey is the attribute Key conforming to the
+ // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
+ // the AWS log stream(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ // Note: See the [log stream ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ // One log group can contain several log streams, so these ARNs necessarily
+ // identify both a log group and a log stream.
+ AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
+)
+
+// AWSLogGroupNames returns an attribute KeyValue conforming to the
+// "aws.log.group.names" semantic conventions. It represents the name(s) of the
+// AWS log group(s) an application is writing to.
+func AWSLogGroupNames(val ...string) attribute.KeyValue {
+ return AWSLogGroupNamesKey.StringSlice(val)
+}
+
+// AWSLogGroupARNs returns an attribute KeyValue conforming to the
+// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
+// Name(s) (ARN) of the AWS log group(s).
+func AWSLogGroupARNs(val ...string) attribute.KeyValue {
+ return AWSLogGroupARNsKey.StringSlice(val)
+}
+
+// AWSLogStreamNames returns an attribute KeyValue conforming to the
+// "aws.log.stream.names" semantic conventions. It represents the name(s) of
+// the AWS log stream(s) an application is writing to.
+func AWSLogStreamNames(val ...string) attribute.KeyValue {
+ return AWSLogStreamNamesKey.StringSlice(val)
+}
+
+// AWSLogStreamARNs returns an attribute KeyValue conforming to the
+// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
+// AWS log stream(s).
+func AWSLogStreamARNs(val ...string) attribute.KeyValue {
+ return AWSLogStreamARNsKey.StringSlice(val)
+}
+
+// Resource used by Google Cloud Run.
+const (
+ // GCPCloudRunJobExecutionKey is the attribute Key conforming to the
+ // "gcp.cloud_run.job.execution" semantic conventions. It represents the
+ // name of the Cloud Run
+ // [execution](https://cloud.google.com/run/docs/managing/job-executions)
+ // being run for the Job, as set by the
+ // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+ // environment variable.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'job-name-xxxx', 'sample-job-mdw84'
+ GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution")
+
+ // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the
+ // "gcp.cloud_run.job.task_index" semantic conventions. It represents the
+ // index for a task within an execution as provided by the
+ // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+ // environment variable.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 0, 1
+ GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index")
+)
+
+// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.execution" semantic conventions. It represents the name
+// of the Cloud Run
+// [execution](https://cloud.google.com/run/docs/managing/job-executions) being
+// run for the Job, as set by the
+// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+// environment variable.
+func GCPCloudRunJobExecution(val string) attribute.KeyValue {
+ return GCPCloudRunJobExecutionKey.String(val)
+}
+
+// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index
+// for a task within an execution as provided by the
+// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+// environment variable.
+func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue {
+ return GCPCloudRunJobTaskIndexKey.Int(val)
+}
+
+// Resources used by Google Compute Engine (GCE).
+const (
+ // GCPGceInstanceNameKey is the attribute Key conforming to the
+ // "gcp.gce.instance.name" semantic conventions. It represents the instance
+ // name of a GCE instance. This is the value provided by `host.name`, the
+ // visible name of the instance in the Cloud Console UI, and the prefix for
+ // the default hostname of the instance as defined by the [default internal
+ // DNS
+ // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'instance-1', 'my-vm-name'
+ GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name")
+
+ // GCPGceInstanceHostnameKey is the attribute Key conforming to the
+ // "gcp.gce.instance.hostname" semantic conventions. It represents the
+ // hostname of a GCE instance. This is the full value of the default or
+ // [custom
+ // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'my-host1234.example.com',
+ // 'sample-vm.us-west1-b.c.my-project.internal'
+ GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname")
+)
+
+// GCPGceInstanceName returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.name" semantic conventions. It represents the instance
+// name of a GCE instance. This is the value provided by `host.name`, the
+// visible name of the instance in the Cloud Console UI, and the prefix for the
+// default hostname of the instance as defined by the [default internal DNS
+// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
+func GCPGceInstanceName(val string) attribute.KeyValue {
+ return GCPGceInstanceNameKey.String(val)
+}
+
+// GCPGceInstanceHostname returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname
+// of a GCE instance. This is the full value of the default or [custom
+// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
+func GCPGceInstanceHostname(val string) attribute.KeyValue {
+ return GCPGceInstanceHostnameKey.String(val)
+}
+
+// Heroku dyno metadata
+const (
+ // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the
+ // "heroku.release.creation_timestamp" semantic conventions. It represents
+ // the time and date the release was created
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2022-10-23T18:00:42Z'
+ HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp")
+
+ // HerokuReleaseCommitKey is the attribute Key conforming to the
+ // "heroku.release.commit" semantic conventions. It represents the commit
+ // hash for the current release
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec'
+ HerokuReleaseCommitKey = attribute.Key("heroku.release.commit")
+
+ // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id"
+ // semantic conventions. It represents the unique identifier for the
+ // application
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da'
+ HerokuAppIDKey = attribute.Key("heroku.app.id")
+)
+
+// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming
+// to the "heroku.release.creation_timestamp" semantic conventions. It
+// represents the time and date the release was created
+func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue {
+ return HerokuReleaseCreationTimestampKey.String(val)
+}
+
+// HerokuReleaseCommit returns an attribute KeyValue conforming to the
+// "heroku.release.commit" semantic conventions. It represents the commit hash
+// for the current release
+func HerokuReleaseCommit(val string) attribute.KeyValue {
+ return HerokuReleaseCommitKey.String(val)
+}
+
+// HerokuAppID returns an attribute KeyValue conforming to the
+// "heroku.app.id" semantic conventions. It represents the unique identifier
+// for the application
+func HerokuAppID(val string) attribute.KeyValue {
+ return HerokuAppIDKey.String(val)
+}
+
+// A container instance.
+const (
+ // ContainerNameKey is the attribute Key conforming to the "container.name"
+ // semantic conventions. It represents the container name used by container
+ // runtime.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-autoconf'
+ ContainerNameKey = attribute.Key("container.name")
+
+ // ContainerIDKey is the attribute Key conforming to the "container.id"
+ // semantic conventions. It represents the container ID. Usually a UUID, as
+ // for example used to [identify Docker
+ // containers](https://docs.docker.com/engine/reference/run/#container-identification).
+ // The UUID might be abbreviated.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'a3bf90e006b2'
+ ContainerIDKey = attribute.Key("container.id")
+
+ // ContainerRuntimeKey is the attribute Key conforming to the
+ // "container.runtime" semantic conventions. It represents the container
+ // runtime managing this container.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'docker', 'containerd', 'rkt'
+ ContainerRuntimeKey = attribute.Key("container.runtime")
+
+ // ContainerImageNameKey is the attribute Key conforming to the
+ // "container.image.name" semantic conventions. It represents the name of
+ // the image the container was built on.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'gcr.io/opentelemetry/operator'
+ ContainerImageNameKey = attribute.Key("container.image.name")
+
+ // ContainerImageTagKey is the attribute Key conforming to the
+ // "container.image.tag" semantic conventions. It represents the container
+ // image tag.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0.1'
+ ContainerImageTagKey = attribute.Key("container.image.tag")
+
+ // ContainerImageIDKey is the attribute Key conforming to the
+ // "container.image.id" semantic conventions. It represents the runtime
+ // specific image identifier. Usually a hash algorithm followed by a UUID.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f'
+ // Note: Docker defines a sha256 of the image id; `container.image.id`
+ // corresponds to the `Image` field from the Docker container inspect
+ // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect)
+ // endpoint.
+ // K8S defines a link to the container registry repository with digest
+ // `"imageID": "registry.azurecr.io
+ // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`.
+ // OCI defines a digest of manifest.
+ ContainerImageIDKey = attribute.Key("container.image.id")
+
+ // ContainerCommandKey is the attribute Key conforming to the
+ // "container.command" semantic conventions. It represents the command used
+ // to run the container (i.e. the command name).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'otelcontribcol'
+ // Note: If using embedded credentials or sensitive data, it is recommended
+ // to remove them to prevent potential leakage.
+ ContainerCommandKey = attribute.Key("container.command")
+
+ // ContainerCommandLineKey is the attribute Key conforming to the
+ // "container.command_line" semantic conventions. It represents the full
+ // command run by the container as a single string representing the full
+ // command. [2]
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'otelcontribcol --config config.yaml'
+ ContainerCommandLineKey = attribute.Key("container.command_line")
+
+ // ContainerCommandArgsKey is the attribute Key conforming to the
+ // "container.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) run by the
+ // container. [2]
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'otelcontribcol, --config, config.yaml'
+ ContainerCommandArgsKey = attribute.Key("container.command_args")
+)
+
+// ContainerName returns an attribute KeyValue conforming to the
+// "container.name" semantic conventions. It represents the container name used
+// by container runtime.
+func ContainerName(val string) attribute.KeyValue {
+ return ContainerNameKey.String(val)
+}
+
+// ContainerID returns an attribute KeyValue conforming to the
+// "container.id" semantic conventions. It represents the container ID. Usually
+// a UUID, as for example used to [identify Docker
+// containers](https://docs.docker.com/engine/reference/run/#container-identification).
+// The UUID might be abbreviated.
+func ContainerID(val string) attribute.KeyValue {
+ return ContainerIDKey.String(val)
+}
+
+// ContainerRuntime returns an attribute KeyValue conforming to the
+// "container.runtime" semantic conventions. It represents the container
+// runtime managing this container.
+func ContainerRuntime(val string) attribute.KeyValue {
+ return ContainerRuntimeKey.String(val)
+}
+
+// ContainerImageName returns an attribute KeyValue conforming to the
+// "container.image.name" semantic conventions. It represents the name of the
+// image the container was built on.
+func ContainerImageName(val string) attribute.KeyValue {
+ return ContainerImageNameKey.String(val)
+}
+
+// ContainerImageTag returns an attribute KeyValue conforming to the
+// "container.image.tag" semantic conventions. It represents the container
+// image tag.
+func ContainerImageTag(val string) attribute.KeyValue {
+ return ContainerImageTagKey.String(val)
+}
+
+// ContainerImageID returns an attribute KeyValue conforming to the
+// "container.image.id" semantic conventions. It represents the runtime
+// specific image identifier. Usually a hash algorithm followed by a UUID.
+func ContainerImageID(val string) attribute.KeyValue {
+ return ContainerImageIDKey.String(val)
+}
+
+// ContainerCommand returns an attribute KeyValue conforming to the
+// "container.command" semantic conventions. It represents the command used to
+// run the container (i.e. the command name).
+func ContainerCommand(val string) attribute.KeyValue {
+ return ContainerCommandKey.String(val)
+}
+
+// ContainerCommandLine returns an attribute KeyValue conforming to the
+// "container.command_line" semantic conventions. It represents the full
+// command run by the container as a single string representing the full
+// command. [2]
+func ContainerCommandLine(val string) attribute.KeyValue {
+ return ContainerCommandLineKey.String(val)
+}
+
+// ContainerCommandArgs returns an attribute KeyValue conforming to the
+// "container.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) run by the
+// container. [2]
+func ContainerCommandArgs(val ...string) attribute.KeyValue {
+ return ContainerCommandArgsKey.StringSlice(val)
+}
+
+// The software deployment.
+const (
+ // DeploymentEnvironmentKey is the attribute Key conforming to the
+ // "deployment.environment" semantic conventions. It represents the name of
+ // the [deployment
+ // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
+ // deployment tier).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'staging', 'production'
+ DeploymentEnvironmentKey = attribute.Key("deployment.environment")
+)
+
+// DeploymentEnvironment returns an attribute KeyValue conforming to the
+// "deployment.environment" semantic conventions. It represents the name of the
+// [deployment
+// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
+// deployment tier).
+func DeploymentEnvironment(val string) attribute.KeyValue {
+ return DeploymentEnvironmentKey.String(val)
+}
+
+// The device on which the process represented by this resource is running.
+const (
+ // DeviceIDKey is the attribute Key conforming to the "device.id" semantic
+ // conventions. It represents a unique identifier representing the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
+ // Note: The device identifier MUST only be defined using the values
+ // outlined below. This value is not an advertising identifier and MUST NOT
+ // be used as such. On iOS (Swift or Objective-C), this value MUST be equal
+ // to the [vendor
+ // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
+ // On Android (Java or Kotlin), this value MUST be equal to the Firebase
+ // Installation ID or a globally unique UUID which is persisted across
+ // sessions in your application. More information can be found
+ // [here](https://developer.android.com/training/articles/user-data-ids) on
+ // best practices and exact implementation details. Caution should be taken
+ // when storing personal data or anything which can identify a user. GDPR
+ // and data protection laws may apply, ensure you do your own due
+ // diligence.
+ DeviceIDKey = attribute.Key("device.id")
+
+ // DeviceModelIdentifierKey is the attribute Key conforming to the
+ // "device.model.identifier" semantic conventions. It represents the model
+ // identifier for the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iPhone3,4', 'SM-G920F'
+ // Note: It's recommended this value represents a machine readable version
+ // of the model identifier rather than the market or consumer-friendly name
+ // of the device.
+ DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
+
+ // DeviceModelNameKey is the attribute Key conforming to the
+ // "device.model.name" semantic conventions. It represents the marketing
+ // name for the device model
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
+ // Note: It's recommended this value represents a human readable version of
+ // the device model rather than a machine readable alternative.
+ DeviceModelNameKey = attribute.Key("device.model.name")
+
+ // DeviceManufacturerKey is the attribute Key conforming to the
+ // "device.manufacturer" semantic conventions. It represents the name of
+ // the device manufacturer
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Apple', 'Samsung'
+ // Note: The Android OS provides this field via
+ // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
+ // iOS apps SHOULD hardcode the value `Apple`.
+ DeviceManufacturerKey = attribute.Key("device.manufacturer")
+)
+
+// DeviceID returns an attribute KeyValue conforming to the "device.id"
+// semantic conventions. It represents a unique identifier representing the
+// device
+func DeviceID(val string) attribute.KeyValue {
+ return DeviceIDKey.String(val)
+}
+
+// DeviceModelIdentifier returns an attribute KeyValue conforming to the
+// "device.model.identifier" semantic conventions. It represents the model
+// identifier for the device
+func DeviceModelIdentifier(val string) attribute.KeyValue {
+ return DeviceModelIdentifierKey.String(val)
+}
+
+// DeviceModelName returns an attribute KeyValue conforming to the
+// "device.model.name" semantic conventions. It represents the marketing name
+// for the device model
+func DeviceModelName(val string) attribute.KeyValue {
+ return DeviceModelNameKey.String(val)
+}
+
+// DeviceManufacturer returns an attribute KeyValue conforming to the
+// "device.manufacturer" semantic conventions. It represents the name of the
+// device manufacturer
+func DeviceManufacturer(val string) attribute.KeyValue {
+ return DeviceManufacturerKey.String(val)
+}
+
+// A serverless instance.
+const (
+ // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
+ // conventions. It represents the name of the single function that this
+ // runtime instance executes.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'my-function', 'myazurefunctionapp/some-function-name'
+ // Note: This is the name of the function as configured/deployed on the
+ // FaaS
+ // platform and is usually different from the name of the callback
+ // function (which may be stored in the
+ // [`code.namespace`/`code.function`](/docs/general/general-attributes.md#source-code-attributes)
+ // span attributes).
+ //
+ // For some cloud providers, the above definition is ambiguous. The
+ // following
+ // definition of function name MUST be used for this attribute
+ // (and consequently the span name) for the listed cloud
+ // providers/products:
+ //
+ // * **Azure:** The full name `<FUNCAPP>/<FUNC>`, i.e., function app name
+ // followed by a forward slash followed by the function name (this form
+ // can also be seen in the resource JSON for the function).
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider (see also the `cloud.resource_id` attribute).
+ FaaSNameKey = attribute.Key("faas.name")
+
+ // FaaSVersionKey is the attribute Key conforming to the "faas.version"
+ // semantic conventions. It represents the immutable version of the
+ // function being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '26', 'pinkfroid-00002'
+ // Note: Depending on the cloud provider and platform, use:
+ //
+ // * **AWS Lambda:** The [function
+ // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
+ // (an integer represented as a decimal string).
+ // * **Google Cloud Run (Services):** The
+ // [revision](https://cloud.google.com/run/docs/managing/revisions)
+ // (i.e., the function name plus the revision suffix).
+ // * **Google Cloud Functions:** The value of the
+ // [`K_REVISION` environment
+ // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
+ // * **Azure Functions:** Not applicable. Do not set this attribute.
+ FaaSVersionKey = attribute.Key("faas.version")
+
+ // FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
+ // semantic conventions. It represents the execution environment ID as a
+ // string, that will be potentially reused for other invocations to the
+ // same function/function version.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
+ // Note: * **AWS Lambda:** Use the (full) log stream name.
+ FaaSInstanceKey = attribute.Key("faas.instance")
+
+ // FaaSMaxMemoryKey is the attribute Key conforming to the
+ // "faas.max_memory" semantic conventions. It represents the amount of
+ // memory available to the serverless function converted to Bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 134217728
+ // Note: It's recommended to set this attribute since e.g. too little
+ // memory can easily stop a Java AWS Lambda function from working
+ // correctly. On AWS Lambda, the environment variable
+ // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must
+ // be multiplied by 1,048,576).
+ FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
+)
+
+// FaaSName returns an attribute KeyValue conforming to the "faas.name"
+// semantic conventions. It represents the name of the single function that
+// this runtime instance executes.
+func FaaSName(val string) attribute.KeyValue {
+ return FaaSNameKey.String(val)
+}
+
+// FaaSVersion returns an attribute KeyValue conforming to the
+// "faas.version" semantic conventions. It represents the immutable version of
+// the function being executed.
+func FaaSVersion(val string) attribute.KeyValue {
+ return FaaSVersionKey.String(val)
+}
+
+// FaaSInstance returns an attribute KeyValue conforming to the
+// "faas.instance" semantic conventions. It represents the execution
+// environment ID as a string, that will be potentially reused for other
+// invocations to the same function/function version.
+func FaaSInstance(val string) attribute.KeyValue {
+ return FaaSInstanceKey.String(val)
+}
+
+// FaaSMaxMemory returns an attribute KeyValue conforming to the
+// "faas.max_memory" semantic conventions. It represents the amount of memory
+// available to the serverless function converted to Bytes.
+func FaaSMaxMemory(val int) attribute.KeyValue {
+ return FaaSMaxMemoryKey.Int(val)
+}
+
+// A host is defined as a computing instance. For example, physical servers,
+// virtual machines, switches or disk array.
+const (
+ // HostIDKey is the attribute Key conforming to the "host.id" semantic
+ // conventions. It represents the unique host ID. For Cloud, this must be
+ // the instance_id assigned by the cloud provider. For non-containerized
+ // systems, this should be the `machine-id`. See the table below for the
+ // sources to use to determine the `machine-id` based on operating system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'fdbf79e8af94cb7f9e8df36789187052'
+ HostIDKey = attribute.Key("host.id")
+
+ // HostNameKey is the attribute Key conforming to the "host.name" semantic
+ // conventions. It represents the name of the host. On Unix systems, it may
+ // contain what the hostname command returns, or the fully qualified
+ // hostname, or another name specified by the user.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-test'
+ HostNameKey = attribute.Key("host.name")
+
+ // HostTypeKey is the attribute Key conforming to the "host.type" semantic
+ // conventions. It represents the type of host. For Cloud, this must be the
+ // machine type.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'n1-standard-1'
+ HostTypeKey = attribute.Key("host.type")
+
+ // HostArchKey is the attribute Key conforming to the "host.arch" semantic
+ // conventions. It represents the CPU architecture the host system is
+ // running on.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ HostArchKey = attribute.Key("host.arch")
+
+ // HostImageNameKey is the attribute Key conforming to the
+ // "host.image.name" semantic conventions. It represents the name of the VM
+ // image or OS install the host was instantiated from.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
+ HostImageNameKey = attribute.Key("host.image.name")
+
+ // HostImageIDKey is the attribute Key conforming to the "host.image.id"
+ // semantic conventions. It represents the vM image ID or host OS image ID.
+ // For Cloud, this value is from the provider.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'ami-07b06b442921831e5'
+ HostImageIDKey = attribute.Key("host.image.id")
+
+ // HostImageVersionKey is the attribute Key conforming to the
+ // "host.image.version" semantic conventions. It represents the version
+ // string of the VM image or host OS as defined in [Version
+ // Attributes](README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0.1'
+ HostImageVersionKey = attribute.Key("host.image.version")
+)
+
+var (
+ // AMD64
+ HostArchAMD64 = HostArchKey.String("amd64")
+ // ARM32
+ HostArchARM32 = HostArchKey.String("arm32")
+ // ARM64
+ HostArchARM64 = HostArchKey.String("arm64")
+ // Itanium
+ HostArchIA64 = HostArchKey.String("ia64")
+ // 32-bit PowerPC
+ HostArchPPC32 = HostArchKey.String("ppc32")
+ // 64-bit PowerPC
+ HostArchPPC64 = HostArchKey.String("ppc64")
+ // IBM z/Architecture
+ HostArchS390x = HostArchKey.String("s390x")
+ // 32-bit x86
+ HostArchX86 = HostArchKey.String("x86")
+)
+
+// HostID returns an attribute KeyValue conforming to the "host.id" semantic
+// conventions. It represents the unique host ID. For Cloud, this must be the
+// instance_id assigned by the cloud provider. For non-containerized systems,
+// this should be the `machine-id`. See the table below for the sources to use
+// to determine the `machine-id` based on operating system.
+func HostID(val string) attribute.KeyValue {
+ return HostIDKey.String(val)
+}
+
+// HostName returns an attribute KeyValue conforming to the "host.name"
+// semantic conventions. It represents the name of the host. On Unix systems,
+// it may contain what the hostname command returns, or the fully qualified
+// hostname, or another name specified by the user.
+func HostName(val string) attribute.KeyValue {
+ return HostNameKey.String(val)
+}
+
+// HostType returns an attribute KeyValue conforming to the "host.type"
+// semantic conventions. It represents the type of host. For Cloud, this must
+// be the machine type.
+func HostType(val string) attribute.KeyValue {
+ return HostTypeKey.String(val)
+}
+
+// HostImageName returns an attribute KeyValue conforming to the
+// "host.image.name" semantic conventions. It represents the name of the VM
+// image or OS install the host was instantiated from.
+func HostImageName(val string) attribute.KeyValue {
+ return HostImageNameKey.String(val)
+}
+
+// HostImageID returns an attribute KeyValue conforming to the
+// "host.image.id" semantic conventions. It represents the vM image ID or host
+// OS image ID. For Cloud, this value is from the provider.
+func HostImageID(val string) attribute.KeyValue {
+ return HostImageIDKey.String(val)
+}
+
+// HostImageVersion returns an attribute KeyValue conforming to the
+// "host.image.version" semantic conventions. It represents the version string
+// of the VM image or host OS as defined in [Version
+// Attributes](README.md#version-attributes).
+func HostImageVersion(val string) attribute.KeyValue {
+ return HostImageVersionKey.String(val)
+}
+
+// A Kubernetes Cluster.
+const (
+ // K8SClusterNameKey is the attribute Key conforming to the
+ // "k8s.cluster.name" semantic conventions. It represents the name of the
+ // cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-cluster'
+ K8SClusterNameKey = attribute.Key("k8s.cluster.name")
+
+ // K8SClusterUIDKey is the attribute Key conforming to the
+ // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for
+ // the cluster, set to the UID of the `kube-system` namespace.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d'
+ // Note: K8S does not have support for obtaining a cluster ID. If this is
+ // ever
+ // added, we will recommend collecting the `k8s.cluster.uid` through the
+ // official APIs. In the meantime, we are able to use the `uid` of the
+ // `kube-system` namespace as a proxy for cluster ID. Read on for the
+ // rationale.
+ //
+ // Every object created in a K8S cluster is assigned a distinct UID. The
+ // `kube-system` namespace is used by Kubernetes itself and will exist
+ // for the lifetime of the cluster. Using the `uid` of the `kube-system`
+ // namespace is a reasonable proxy for the K8S ClusterID as it will only
+ // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are
+ // UUIDs as standardized by
+ // [ISO/IEC 9834-8 and ITU-T
+ // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html).
+ // Which states:
+ //
+ // > If generated according to one of the mechanisms defined in Rec.
+ // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be
+ // different from all other UUIDs generated before 3603 A.D., or is
+ // extremely likely to be different (depending on the mechanism chosen).
+ //
+ // Therefore, UIDs between clusters should be extremely unlikely to
+ // conflict.
+ K8SClusterUIDKey = attribute.Key("k8s.cluster.uid")
+)
+
+// K8SClusterName returns an attribute KeyValue conforming to the
+// "k8s.cluster.name" semantic conventions. It represents the name of the
+// cluster.
+func K8SClusterName(val string) attribute.KeyValue {
+ return K8SClusterNameKey.String(val)
+}
+
+// K8SClusterUID returns an attribute KeyValue conforming to the
+// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the
+// cluster, set to the UID of the `kube-system` namespace.
+func K8SClusterUID(val string) attribute.KeyValue {
+ return K8SClusterUIDKey.String(val)
+}
+
+// A Kubernetes Node object.
+const (
+ // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
+ // semantic conventions. It represents the name of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'node-1'
+ K8SNodeNameKey = attribute.Key("k8s.node.name")
+
+ // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
+ // semantic conventions. It represents the UID of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
+ K8SNodeUIDKey = attribute.Key("k8s.node.uid")
+)
+
+// K8SNodeName returns an attribute KeyValue conforming to the
+// "k8s.node.name" semantic conventions. It represents the name of the Node.
+func K8SNodeName(val string) attribute.KeyValue {
+ return K8SNodeNameKey.String(val)
+}
+
+// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
+// semantic conventions. It represents the UID of the Node.
+func K8SNodeUID(val string) attribute.KeyValue {
+ return K8SNodeUIDKey.String(val)
+}
+
+// A Kubernetes Namespace.
+const (
+ // K8SNamespaceNameKey is the attribute Key conforming to the
+ // "k8s.namespace.name" semantic conventions. It represents the name of the
+ // namespace that the pod is running in.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'default'
+ K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
+)
+
+// K8SNamespaceName returns an attribute KeyValue conforming to the
+// "k8s.namespace.name" semantic conventions. It represents the name of the
+// namespace that the pod is running in.
+func K8SNamespaceName(val string) attribute.KeyValue {
+ return K8SNamespaceNameKey.String(val)
+}
+
+// A Kubernetes Pod object.
+const (
+ // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
+ // semantic conventions. It represents the UID of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SPodUIDKey = attribute.Key("k8s.pod.uid")
+
+ // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
+ // semantic conventions. It represents the name of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-pod-autoconf'
+ K8SPodNameKey = attribute.Key("k8s.pod.name")
+)
+
+// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
+// semantic conventions. It represents the UID of the Pod.
+func K8SPodUID(val string) attribute.KeyValue {
+ return K8SPodUIDKey.String(val)
+}
+
+// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
+// semantic conventions. It represents the name of the Pod.
+func K8SPodName(val string) attribute.KeyValue {
+ return K8SPodNameKey.String(val)
+}
+
+// A container in a
+// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates).
+const (
+ // K8SContainerNameKey is the attribute Key conforming to the
+ // "k8s.container.name" semantic conventions. It represents the name of the
+ // Container from Pod specification, must be unique within a Pod. Container
+ // runtime usually uses different globally unique name (`container.name`).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'redis'
+ K8SContainerNameKey = attribute.Key("k8s.container.name")
+
+ // K8SContainerRestartCountKey is the attribute Key conforming to the
+ // "k8s.container.restart_count" semantic conventions. It represents the
+ // number of times the container was restarted. This attribute can be used
+ // to identify a particular container (running or stopped) within a
+ // container spec.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 0, 2
+ K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
+)
+
+// K8SContainerName returns an attribute KeyValue conforming to the
+// "k8s.container.name" semantic conventions. It represents the name of the
+// Container from Pod specification, must be unique within a Pod. Container
+// runtime usually uses different globally unique name (`container.name`).
+func K8SContainerName(val string) attribute.KeyValue {
+ return K8SContainerNameKey.String(val)
+}
+
+// K8SContainerRestartCount returns an attribute KeyValue conforming to the
+// "k8s.container.restart_count" semantic conventions. It represents the number
+// of times the container was restarted. This attribute can be used to identify
+// a particular container (running or stopped) within a container spec.
+func K8SContainerRestartCount(val int) attribute.KeyValue {
+ return K8SContainerRestartCountKey.Int(val)
+}
+
+// A Kubernetes ReplicaSet object.
+const (
+ // K8SReplicaSetUIDKey is the attribute Key conforming to the
+ // "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+ // ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
+
+ // K8SReplicaSetNameKey is the attribute Key conforming to the
+ // "k8s.replicaset.name" semantic conventions. It represents the name of
+ // the ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
+)
+
+// K8SReplicaSetUID returns an attribute KeyValue conforming to the
+// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+// ReplicaSet.
+func K8SReplicaSetUID(val string) attribute.KeyValue {
+ return K8SReplicaSetUIDKey.String(val)
+}
+
+// K8SReplicaSetName returns an attribute KeyValue conforming to the
+// "k8s.replicaset.name" semantic conventions. It represents the name of the
+// ReplicaSet.
+func K8SReplicaSetName(val string) attribute.KeyValue {
+ return K8SReplicaSetNameKey.String(val)
+}
+
+// A Kubernetes Deployment object.
+const (
+ // K8SDeploymentUIDKey is the attribute Key conforming to the
+ // "k8s.deployment.uid" semantic conventions. It represents the UID of the
+ // Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
+
+ // K8SDeploymentNameKey is the attribute Key conforming to the
+ // "k8s.deployment.name" semantic conventions. It represents the name of
+ // the Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
+)
+
+// K8SDeploymentUID returns an attribute KeyValue conforming to the
+// "k8s.deployment.uid" semantic conventions. It represents the UID of the
+// Deployment.
+func K8SDeploymentUID(val string) attribute.KeyValue {
+ return K8SDeploymentUIDKey.String(val)
+}
+
+// K8SDeploymentName returns an attribute KeyValue conforming to the
+// "k8s.deployment.name" semantic conventions. It represents the name of the
+// Deployment.
+func K8SDeploymentName(val string) attribute.KeyValue {
+ return K8SDeploymentNameKey.String(val)
+}
+
+// A Kubernetes StatefulSet object.
+const (
+ // K8SStatefulSetUIDKey is the attribute Key conforming to the
+ // "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+ // StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
+
+ // K8SStatefulSetNameKey is the attribute Key conforming to the
+ // "k8s.statefulset.name" semantic conventions. It represents the name of
+ // the StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
+)
+
+// K8SStatefulSetUID returns an attribute KeyValue conforming to the
+// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+// StatefulSet.
+func K8SStatefulSetUID(val string) attribute.KeyValue {
+ return K8SStatefulSetUIDKey.String(val)
+}
+
+// K8SStatefulSetName returns an attribute KeyValue conforming to the
+// "k8s.statefulset.name" semantic conventions. It represents the name of the
+// StatefulSet.
+func K8SStatefulSetName(val string) attribute.KeyValue {
+ return K8SStatefulSetNameKey.String(val)
+}
+
+// A Kubernetes DaemonSet object.
+const (
+ // K8SDaemonSetUIDKey is the attribute Key conforming to the
+ // "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
+
+ // K8SDaemonSetNameKey is the attribute Key conforming to the
+ // "k8s.daemonset.name" semantic conventions. It represents the name of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
+)
+
+// K8SDaemonSetUID returns an attribute KeyValue conforming to the
+// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+// DaemonSet.
+func K8SDaemonSetUID(val string) attribute.KeyValue {
+ return K8SDaemonSetUIDKey.String(val)
+}
+
+// K8SDaemonSetName returns an attribute KeyValue conforming to the
+// "k8s.daemonset.name" semantic conventions. It represents the name of the
+// DaemonSet.
+func K8SDaemonSetName(val string) attribute.KeyValue {
+ return K8SDaemonSetNameKey.String(val)
+}
+
+// A Kubernetes Job object.
+const (
+ // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
+ // semantic conventions. It represents the UID of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SJobUIDKey = attribute.Key("k8s.job.uid")
+
+ // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
+ // semantic conventions. It represents the name of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SJobNameKey = attribute.Key("k8s.job.name")
+)
+
+// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
+// semantic conventions. It represents the UID of the Job.
+func K8SJobUID(val string) attribute.KeyValue {
+ return K8SJobUIDKey.String(val)
+}
+
+// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
+// semantic conventions. It represents the name of the Job.
+func K8SJobName(val string) attribute.KeyValue {
+ return K8SJobNameKey.String(val)
+}
+
+// A Kubernetes CronJob object.
+const (
+ // K8SCronJobUIDKey is the attribute Key conforming to the
+ // "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
+
+ // K8SCronJobNameKey is the attribute Key conforming to the
+ // "k8s.cronjob.name" semantic conventions. It represents the name of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
+)
+
+// K8SCronJobUID returns an attribute KeyValue conforming to the
+// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+// CronJob.
+func K8SCronJobUID(val string) attribute.KeyValue {
+ return K8SCronJobUIDKey.String(val)
+}
+
+// K8SCronJobName returns an attribute KeyValue conforming to the
+// "k8s.cronjob.name" semantic conventions. It represents the name of the
+// CronJob.
+func K8SCronJobName(val string) attribute.KeyValue {
+ return K8SCronJobNameKey.String(val)
+}
+
+// The operating system (OS) on which the process represented by this resource
+// is running.
+const (
+ // OSTypeKey is the attribute Key conforming to the "os.type" semantic
+ // conventions. It represents the operating system type.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ OSTypeKey = attribute.Key("os.type")
+
+ // OSDescriptionKey is the attribute Key conforming to the "os.description"
+ // semantic conventions. It represents the human readable (not intended to
+ // be parsed) OS version information, like e.g. reported by `ver` or
+ // `lsb_release -a` commands.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
+ // LTS'
+ OSDescriptionKey = attribute.Key("os.description")
+
+ // OSNameKey is the attribute Key conforming to the "os.name" semantic
+ // conventions. It represents the human readable operating system name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iOS', 'Android', 'Ubuntu'
+ OSNameKey = attribute.Key("os.name")
+
+ // OSVersionKey is the attribute Key conforming to the "os.version"
+ // semantic conventions. It represents the version string of the operating
+ // system as defined in [Version
+ // Attributes](/docs/resource/README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '14.2.1', '18.04.1'
+ OSVersionKey = attribute.Key("os.version")
+)
+
+var (
+ // Microsoft Windows
+ OSTypeWindows = OSTypeKey.String("windows")
+ // Linux
+ OSTypeLinux = OSTypeKey.String("linux")
+ // Apple Darwin
+ OSTypeDarwin = OSTypeKey.String("darwin")
+ // FreeBSD
+ OSTypeFreeBSD = OSTypeKey.String("freebsd")
+ // NetBSD
+ OSTypeNetBSD = OSTypeKey.String("netbsd")
+ // OpenBSD
+ OSTypeOpenBSD = OSTypeKey.String("openbsd")
+ // DragonFly BSD
+ OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
+ // HP-UX (Hewlett Packard Unix)
+ OSTypeHPUX = OSTypeKey.String("hpux")
+ // AIX (Advanced Interactive eXecutive)
+ OSTypeAIX = OSTypeKey.String("aix")
+ // SunOS, Oracle Solaris
+ OSTypeSolaris = OSTypeKey.String("solaris")
+ // IBM z/OS
+ OSTypeZOS = OSTypeKey.String("z_os")
+)
+
+// OSDescription returns an attribute KeyValue conforming to the
+// "os.description" semantic conventions. It represents the human readable (not
+// intended to be parsed) OS version information, like e.g. reported by `ver`
+// or `lsb_release -a` commands.
+func OSDescription(val string) attribute.KeyValue {
+ return OSDescriptionKey.String(val)
+}
+
+// OSName returns an attribute KeyValue conforming to the "os.name" semantic
+// conventions. It represents the human readable operating system name.
+func OSName(val string) attribute.KeyValue {
+ return OSNameKey.String(val)
+}
+
+// OSVersion returns an attribute KeyValue conforming to the "os.version"
+// semantic conventions. It represents the version string of the operating
+// system as defined in [Version
+// Attributes](/docs/resource/README.md#version-attributes).
+func OSVersion(val string) attribute.KeyValue {
+ return OSVersionKey.String(val)
+}
+
+// An operating system process.
+const (
+ // ProcessPIDKey is the attribute Key conforming to the "process.pid"
+ // semantic conventions. It represents the process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1234
+ ProcessPIDKey = attribute.Key("process.pid")
+
+ // ProcessParentPIDKey is the attribute Key conforming to the
+ // "process.parent_pid" semantic conventions. It represents the parent
+ // Process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 111
+ ProcessParentPIDKey = attribute.Key("process.parent_pid")
+
+ // ProcessExecutableNameKey is the attribute Key conforming to the
+ // "process.executable.name" semantic conventions. It represents the name
+ // of the process executable. On Linux based systems, can be set to the
+ // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
+ // of `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'otelcol'
+ ProcessExecutableNameKey = attribute.Key("process.executable.name")
+
+ // ProcessExecutablePathKey is the attribute Key conforming to the
+ // "process.executable.path" semantic conventions. It represents the full
+ // path to the process executable. On Linux based systems, can be set to
+ // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
+ // `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: '/usr/bin/cmd/otelcol'
+ ProcessExecutablePathKey = attribute.Key("process.executable.path")
+
+ // ProcessCommandKey is the attribute Key conforming to the
+ // "process.command" semantic conventions. It represents the command used
+ // to launch the process (i.e. the command name). On Linux based systems,
+ // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
+ // be set to the first parameter extracted from `GetCommandLineW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'cmd/otelcol'
+ ProcessCommandKey = attribute.Key("process.command")
+
+ // ProcessCommandLineKey is the attribute Key conforming to the
+ // "process.command_line" semantic conventions. It represents the full
+ // command used to launch the process as a single string representing the
+ // full command. On Windows, can be set to the result of `GetCommandLineW`.
+ // Do not set this if you have to assemble it just for monitoring; use
+ // `process.command_args` instead.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
+ ProcessCommandLineKey = attribute.Key("process.command_line")
+
+ // ProcessCommandArgsKey is the attribute Key conforming to the
+ // "process.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) as received
+ // by the process. On Linux-based systems (and some other Unixoid systems
+ // supporting procfs), can be set according to the list of null-delimited
+ // strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+ // this would be the full argv vector passed to `main`.
+ //
+ // Type: string[]
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'cmd/otecol', '--config=config.yaml'
+ ProcessCommandArgsKey = attribute.Key("process.command_args")
+
+ // ProcessOwnerKey is the attribute Key conforming to the "process.owner"
+ // semantic conventions. It represents the username of the user that owns
+ // the process.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'root'
+ ProcessOwnerKey = attribute.Key("process.owner")
+)
+
+// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
+// semantic conventions. It represents the process identifier (PID).
+func ProcessPID(val int) attribute.KeyValue {
+ return ProcessPIDKey.Int(val)
+}
+
+// ProcessParentPID returns an attribute KeyValue conforming to the
+// "process.parent_pid" semantic conventions. It represents the parent Process
+// identifier (PID).
+func ProcessParentPID(val int) attribute.KeyValue {
+ return ProcessParentPIDKey.Int(val)
+}
+
+// ProcessExecutableName returns an attribute KeyValue conforming to the
+// "process.executable.name" semantic conventions. It represents the name of
+// the process executable. On Linux based systems, can be set to the `Name` in
+// `proc/[pid]/status`. On Windows, can be set to the base name of
+// `GetProcessImageFileNameW`.
+func ProcessExecutableName(val string) attribute.KeyValue {
+ return ProcessExecutableNameKey.String(val)
+}
+
+// ProcessExecutablePath returns an attribute KeyValue conforming to the
+// "process.executable.path" semantic conventions. It represents the full path
+// to the process executable. On Linux based systems, can be set to the target
+// of `proc/[pid]/exe`. On Windows, can be set to the result of
+// `GetProcessImageFileNameW`.
+func ProcessExecutablePath(val string) attribute.KeyValue {
+ return ProcessExecutablePathKey.String(val)
+}
+
+// ProcessCommand returns an attribute KeyValue conforming to the
+// "process.command" semantic conventions. It represents the command used to
+// launch the process (i.e. the command name). On Linux based systems, can be
+// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
+// the first parameter extracted from `GetCommandLineW`.
+func ProcessCommand(val string) attribute.KeyValue {
+ return ProcessCommandKey.String(val)
+}
+
+// ProcessCommandLine returns an attribute KeyValue conforming to the
+// "process.command_line" semantic conventions. It represents the full command
+// used to launch the process as a single string representing the full command.
+// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
+// if you have to assemble it just for monitoring; use `process.command_args`
+// instead.
+func ProcessCommandLine(val string) attribute.KeyValue {
+ return ProcessCommandLineKey.String(val)
+}
+
+// ProcessCommandArgs returns an attribute KeyValue conforming to the
+// "process.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) as received by
+// the process. On Linux-based systems (and some other Unixoid systems
+// supporting procfs), can be set according to the list of null-delimited
+// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+// this would be the full argv vector passed to `main`.
+func ProcessCommandArgs(val ...string) attribute.KeyValue {
+ return ProcessCommandArgsKey.StringSlice(val)
+}
+
+// ProcessOwner returns an attribute KeyValue conforming to the
+// "process.owner" semantic conventions. It represents the username of the user
+// that owns the process.
+func ProcessOwner(val string) attribute.KeyValue {
+ return ProcessOwnerKey.String(val)
+}
+
+// The single (language) runtime instance which is monitored.
+const (
+ // ProcessRuntimeNameKey is the attribute Key conforming to the
+ // "process.runtime.name" semantic conventions. It represents the name of
+ // the runtime of this process. For compiled native binaries, this SHOULD
+ // be the name of the compiler.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'OpenJDK Runtime Environment'
+ ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
+
+ // ProcessRuntimeVersionKey is the attribute Key conforming to the
+ // "process.runtime.version" semantic conventions. It represents the
+ // version of the runtime of this process, as returned by the runtime
+ // without modification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '14.0.2'
+ ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
+
+ // ProcessRuntimeDescriptionKey is the attribute Key conforming to the
+ // "process.runtime.description" semantic conventions. It represents an
+ // additional description about the runtime of the process, for example a
+ // specific vendor customization of the runtime environment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
+ ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
+)
+
+// ProcessRuntimeName returns an attribute KeyValue conforming to the
+// "process.runtime.name" semantic conventions. It represents the name of the
+// runtime of this process. For compiled native binaries, this SHOULD be the
+// name of the compiler.
+func ProcessRuntimeName(val string) attribute.KeyValue {
+ return ProcessRuntimeNameKey.String(val)
+}
+
+// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
+// "process.runtime.version" semantic conventions. It represents the version of
+// the runtime of this process, as returned by the runtime without
+// modification.
+func ProcessRuntimeVersion(val string) attribute.KeyValue {
+ return ProcessRuntimeVersionKey.String(val)
+}
+
+// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
+// "process.runtime.description" semantic conventions. It represents an
+// additional description about the runtime of the process, for example a
+// specific vendor customization of the runtime environment.
+func ProcessRuntimeDescription(val string) attribute.KeyValue {
+ return ProcessRuntimeDescriptionKey.String(val)
+}
+
+// A service instance.
+const (
+ // ServiceNameKey is the attribute Key conforming to the "service.name"
+ // semantic conventions. It represents the logical name of the service.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'shoppingcart'
+ // Note: MUST be the same for all instances of horizontally scaled
+ // services. If the value was not specified, SDKs MUST fallback to
+ // `unknown_service:` concatenated with
+ // [`process.executable.name`](process.md#process), e.g.
+ // `unknown_service:bash`. If `process.executable.name` is not available,
+ // the value MUST be set to `unknown_service`.
+ ServiceNameKey = attribute.Key("service.name")
+
+ // ServiceVersionKey is the attribute Key conforming to the
+ // "service.version" semantic conventions. It represents the version string
+ // of the service API or implementation. The format is not defined by these
+ // conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2.0.0', 'a01dbef8a'
+ ServiceVersionKey = attribute.Key("service.version")
+)
+
+// ServiceName returns an attribute KeyValue conforming to the
+// "service.name" semantic conventions. It represents the logical name of the
+// service.
+func ServiceName(val string) attribute.KeyValue {
+ return ServiceNameKey.String(val)
+}
+
+// ServiceVersion returns an attribute KeyValue conforming to the
+// "service.version" semantic conventions. It represents the version string of
+// the service API or implementation. The format is not defined by these
+// conventions.
+func ServiceVersion(val string) attribute.KeyValue {
+ return ServiceVersionKey.String(val)
+}
+
+// A service instance.
+const (
+ // ServiceNamespaceKey is the attribute Key conforming to the
+ // "service.namespace" semantic conventions. It represents a namespace for
+ // `service.name`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Shop'
+ // Note: A string value having a meaning that helps to distinguish a group
+ // of services, for example the team name that owns a group of services.
+ // `service.name` is expected to be unique within the same namespace. If
+ // `service.namespace` is not specified in the Resource then `service.name`
+ // is expected to be unique for all services that have no explicit
+ // namespace defined (so the empty/unspecified namespace is simply one more
+ // valid namespace). Zero-length namespace string is assumed equal to
+ // unspecified namespace.
+ ServiceNamespaceKey = attribute.Key("service.namespace")
+
+ // ServiceInstanceIDKey is the attribute Key conforming to the
+ // "service.instance.id" semantic conventions. It represents the string ID
+ // of the service instance.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'my-k8s-pod-deployment-1',
+ // '627cc493-f310-47de-96bd-71410b7dec09'
+ // Note: MUST be unique for each instance of the same
+ // `service.namespace,service.name` pair (in other words
+ // `service.namespace,service.name,service.instance.id` triplet MUST be
+ // globally unique). The ID helps to distinguish instances of the same
+ // service that exist at the same time (e.g. instances of a horizontally
+ // scaled service). It is preferable for the ID to be persistent and stay
+ // the same for the lifetime of the service instance, however it is
+ // acceptable that the ID is ephemeral and changes during important
+ // lifetime events for the service (e.g. service restarts). If the service
+ // has no inherent unique ID that can be used as the value of this
+ // attribute it is recommended to generate a random Version 1 or Version 4
+ // RFC 4122 UUID (services aiming for reproducible UUIDs may also use
+ // Version 5, see RFC 4122 for more recommendations).
+ ServiceInstanceIDKey = attribute.Key("service.instance.id")
+)
+
+// ServiceNamespace returns an attribute KeyValue conforming to the
+// "service.namespace" semantic conventions. It represents a namespace for
+// `service.name`.
+func ServiceNamespace(val string) attribute.KeyValue {
+ return ServiceNamespaceKey.String(val)
+}
+
+// ServiceInstanceID returns an attribute KeyValue conforming to the
+// "service.instance.id" semantic conventions. It represents the string ID of
+// the service instance.
+func ServiceInstanceID(val string) attribute.KeyValue {
+ return ServiceInstanceIDKey.String(val)
+}
+
+// The telemetry SDK used to capture data recorded by the instrumentation
+// libraries.
+const (
+ // TelemetrySDKNameKey is the attribute Key conforming to the
+ // "telemetry.sdk.name" semantic conventions. It represents the name of the
+ // telemetry SDK as defined above.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute
+ // to `opentelemetry`.
+ // If another SDK, like a fork or a vendor-provided implementation, is
+ // used, this SDK MUST set the
+ // `telemetry.sdk.name` attribute to the fully-qualified class or module
+ // name of this SDK's main entry point
+ // or another suitable identifier depending on the language.
+ // The identifier `opentelemetry` is reserved and MUST NOT be used in this
+ // case.
+ // All custom identifiers SHOULD be stable across different versions of an
+ // implementation.
+ TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
+
+ // TelemetrySDKLanguageKey is the attribute Key conforming to the
+ // "telemetry.sdk.language" semantic conventions. It represents the
+ // language of the telemetry SDK.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
+
+ // TelemetrySDKVersionKey is the attribute Key conforming to the
+ // "telemetry.sdk.version" semantic conventions. It represents the version
+ // string of the telemetry SDK.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: '1.2.3'
+ TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
+)
+
+var (
+ // cpp
+ TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
+ // dotnet
+ TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
+ // erlang
+ TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
+ // go
+ TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
+ // java
+ TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
+ // nodejs
+ TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
+ // php
+ TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
+ // python
+ TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
+ // ruby
+ TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
+ // rust
+ TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust")
+ // swift
+ TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
+ // webjs
+ TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
+)
+
+// TelemetrySDKName returns an attribute KeyValue conforming to the
+// "telemetry.sdk.name" semantic conventions. It represents the name of the
+// telemetry SDK as defined above.
+func TelemetrySDKName(val string) attribute.KeyValue {
+ return TelemetrySDKNameKey.String(val)
+}
+
+// TelemetrySDKVersion returns an attribute KeyValue conforming to the
+// "telemetry.sdk.version" semantic conventions. It represents the version
+// string of the telemetry SDK.
+func TelemetrySDKVersion(val string) attribute.KeyValue {
+ return TelemetrySDKVersionKey.String(val)
+}
+
+// The telemetry SDK used to capture data recorded by the instrumentation
+// libraries.
+const (
+ // TelemetryAutoVersionKey is the attribute Key conforming to the
+ // "telemetry.auto.version" semantic conventions. It represents the version
+ // string of the auto instrumentation agent, if used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.2.3'
+ TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version")
+)
+
+// TelemetryAutoVersion returns an attribute KeyValue conforming to the
+// "telemetry.auto.version" semantic conventions. It represents the version
+// string of the auto instrumentation agent, if used.
+func TelemetryAutoVersion(val string) attribute.KeyValue {
+ return TelemetryAutoVersionKey.String(val)
+}
+
+// Resource describing the packaged software running the application code. Web
+// engines are typically executed using process.runtime.
+const (
+ // WebEngineNameKey is the attribute Key conforming to the "webengine.name"
+ // semantic conventions. It represents the name of the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'WildFly'
+ WebEngineNameKey = attribute.Key("webengine.name")
+
+ // WebEngineVersionKey is the attribute Key conforming to the
+ // "webengine.version" semantic conventions. It represents the version of
+ // the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '21.0.0'
+ WebEngineVersionKey = attribute.Key("webengine.version")
+
+ // WebEngineDescriptionKey is the attribute Key conforming to the
+ // "webengine.description" semantic conventions. It represents the
+ // additional description of the web engine (e.g. detailed version and
+ // edition information).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
+ // 2.2.2.Final'
+ WebEngineDescriptionKey = attribute.Key("webengine.description")
+)
+
+// WebEngineName returns an attribute KeyValue conforming to the
+// "webengine.name" semantic conventions. It represents the name of the web
+// engine.
+func WebEngineName(val string) attribute.KeyValue {
+ return WebEngineNameKey.String(val)
+}
+
+// WebEngineVersion returns an attribute KeyValue conforming to the
+// "webengine.version" semantic conventions. It represents the version of the
+// web engine.
+func WebEngineVersion(val string) attribute.KeyValue {
+ return WebEngineVersionKey.String(val)
+}
+
+// WebEngineDescription returns an attribute KeyValue conforming to the
+// "webengine.description" semantic conventions. It represents the additional
+// description of the web engine (e.g. detailed version and edition
+// information).
+func WebEngineDescription(val string) attribute.KeyValue {
+ return WebEngineDescriptionKey.String(val)
+}
+
+// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
+// concepts.
+const (
+ // OTelScopeNameKey is the attribute Key conforming to the
+ // "otel.scope.name" semantic conventions. It represents the name of the
+ // instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'io.opentelemetry.contrib.mongodb'
+ OTelScopeNameKey = attribute.Key("otel.scope.name")
+
+ // OTelScopeVersionKey is the attribute Key conforming to the
+ // "otel.scope.version" semantic conventions. It represents the version of
+ // the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.0.0'
+ OTelScopeVersionKey = attribute.Key("otel.scope.version")
+)
+
+// OTelScopeName returns an attribute KeyValue conforming to the
+// "otel.scope.name" semantic conventions. It represents the name of the
+// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+func OTelScopeName(val string) attribute.KeyValue {
+ return OTelScopeNameKey.String(val)
+}
+
+// OTelScopeVersion returns an attribute KeyValue conforming to the
+// "otel.scope.version" semantic conventions. It represents the version of the
+// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+func OTelScopeVersion(val string) attribute.KeyValue {
+ return OTelScopeVersionKey.String(val)
+}
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry
+// Scope's concepts.
+const (
+ // OTelLibraryNameKey is the attribute Key conforming to the
+ // "otel.library.name" semantic conventions. It represents the deprecated,
+ // use the `otel.scope.name` attribute.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'io.opentelemetry.contrib.mongodb'
+ OTelLibraryNameKey = attribute.Key("otel.library.name")
+
+ // OTelLibraryVersionKey is the attribute Key conforming to the
+ // "otel.library.version" semantic conventions. It represents the
+ // deprecated, use the `otel.scope.version` attribute.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '1.0.0'
+ OTelLibraryVersionKey = attribute.Key("otel.library.version")
+)
+
+// OTelLibraryName returns an attribute KeyValue conforming to the
+// "otel.library.name" semantic conventions. It represents the deprecated, use
+// the `otel.scope.name` attribute.
+func OTelLibraryName(val string) attribute.KeyValue {
+ return OTelLibraryNameKey.String(val)
+}
+
+// OTelLibraryVersion returns an attribute KeyValue conforming to the
+// "otel.library.version" semantic conventions. It represents the deprecated,
+// use the `otel.scope.version` attribute.
+func OTelLibraryVersion(val string) attribute.KeyValue {
+ return OTelLibraryVersionKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go
new file mode 100644
index 000000000..66ffd5989
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0"
+
+// SchemaURL is the schema URL that matches the version of the semantic conventions
+// that this package defines. Semconv packages starting from v1.4.0 must declare
+// non-empty schema URL in the form https://opentelemetry.io/schemas/<version>
+const SchemaURL = "https://opentelemetry.io/schemas/1.21.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go
new file mode 100644
index 000000000..b5a91450d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go
@@ -0,0 +1,2495 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The shared attributes used to report a single exception associated with a
+// span or log.
+const (
+ // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
+ // semantic conventions. It represents the type of the exception (its
+ // fully-qualified class name, if applicable). The dynamic type of the
+ // exception should be preferred over the static type in languages that
+ // support it.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'java.net.ConnectException', 'OSError'
+ ExceptionTypeKey = attribute.Key("exception.type")
+
+ // ExceptionMessageKey is the attribute Key conforming to the
+ // "exception.message" semantic conventions. It represents the exception
+ // message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Division by zero', "Can't convert 'int' object to str
+ // implicitly"
+ ExceptionMessageKey = attribute.Key("exception.message")
+
+ // ExceptionStacktraceKey is the attribute Key conforming to the
+ // "exception.stacktrace" semantic conventions. It represents a stacktrace
+ // as a string in the natural representation for the language runtime. The
+ // representation is to be determined and documented by each language SIG.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
+ // exception\\n at '
+ // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+ // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+ // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+ ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+)
+
+// ExceptionType returns an attribute KeyValue conforming to the
+// "exception.type" semantic conventions. It represents the type of the
+// exception (its fully-qualified class name, if applicable). The dynamic type
+// of the exception should be preferred over the static type in languages that
+// support it.
+func ExceptionType(val string) attribute.KeyValue {
+ return ExceptionTypeKey.String(val)
+}
+
+// ExceptionMessage returns an attribute KeyValue conforming to the
+// "exception.message" semantic conventions. It represents the exception
+// message.
+func ExceptionMessage(val string) attribute.KeyValue {
+ return ExceptionMessageKey.String(val)
+}
+
+// ExceptionStacktrace returns an attribute KeyValue conforming to the
+// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func ExceptionStacktrace(val string) attribute.KeyValue {
+ return ExceptionStacktraceKey.String(val)
+}
+
+// Span attributes used by AWS Lambda (in addition to general `faas`
+// attributes).
+const (
+ // AWSLambdaInvokedARNKey is the attribute Key conforming to the
+ // "aws.lambda.invoked_arn" semantic conventions. It represents the full
+ // invoked ARN as provided on the `Context` passed to the function
+ // (`Lambda-Runtime-Invoked-Function-ARN` header on the
+ // `/runtime/invocation/next` applicable).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
+ // Note: This may be different from `cloud.resource_id` if an alias is
+ // involved.
+ AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
+)
+
+// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
+// "aws.lambda.invoked_arn" semantic conventions. It represents the full
+// invoked ARN as provided on the `Context` passed to the function
+// (`Lambda-Runtime-Invoked-Function-ARN` header on the
+// `/runtime/invocation/next` applicable).
+func AWSLambdaInvokedARN(val string) attribute.KeyValue {
+ return AWSLambdaInvokedARNKey.String(val)
+}
+
+// Attributes for CloudEvents. CloudEvents is a specification on how to define
+// event data in a standard way. These attributes can be attached to spans when
+// performing operations with CloudEvents, regardless of the protocol being
+// used.
+const (
+ // CloudeventsEventIDKey is the attribute Key conforming to the
+ // "cloudevents.event_id" semantic conventions. It represents the
+ // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+ // uniquely identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
+ CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
+
+ // CloudeventsEventSourceKey is the attribute Key conforming to the
+ // "cloudevents.event_source" semantic conventions. It represents the
+ // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+ // identifies the context in which an event happened.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'https://github.com/cloudevents',
+ // '/cloudevents/spec/pull/123', 'my-service'
+ CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
+
+ // CloudeventsEventSpecVersionKey is the attribute Key conforming to the
+ // "cloudevents.event_spec_version" semantic conventions. It represents the
+ // [version of the CloudEvents
+ // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+ // which the event uses.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.0'
+ CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
+
+ // CloudeventsEventTypeKey is the attribute Key conforming to the
+ // "cloudevents.event_type" semantic conventions. It represents the
+ // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+ // contains a value describing the type of event related to the originating
+ // occurrence.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'com.github.pull_request.opened',
+ // 'com.example.object.deleted.v2'
+ CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
+
+ // CloudeventsEventSubjectKey is the attribute Key conforming to the
+ // "cloudevents.event_subject" semantic conventions. It represents the
+ // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+ // of the event in the context of the event producer (identified by
+ // source).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'mynewfile.jpg'
+ CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
+)
+
+// CloudeventsEventID returns an attribute KeyValue conforming to the
+// "cloudevents.event_id" semantic conventions. It represents the
+// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+// uniquely identifies the event.
+func CloudeventsEventID(val string) attribute.KeyValue {
+ return CloudeventsEventIDKey.String(val)
+}
+
+// CloudeventsEventSource returns an attribute KeyValue conforming to the
+// "cloudevents.event_source" semantic conventions. It represents the
+// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+// identifies the context in which an event happened.
+func CloudeventsEventSource(val string) attribute.KeyValue {
+ return CloudeventsEventSourceKey.String(val)
+}
+
+// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
+// the "cloudevents.event_spec_version" semantic conventions. It represents the
+// [version of the CloudEvents
+// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+// which the event uses.
+func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
+ return CloudeventsEventSpecVersionKey.String(val)
+}
+
+// CloudeventsEventType returns an attribute KeyValue conforming to the
+// "cloudevents.event_type" semantic conventions. It represents the
+// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+// contains a value describing the type of event related to the originating
+// occurrence.
+func CloudeventsEventType(val string) attribute.KeyValue {
+ return CloudeventsEventTypeKey.String(val)
+}
+
+// CloudeventsEventSubject returns an attribute KeyValue conforming to the
+// "cloudevents.event_subject" semantic conventions. It represents the
+// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+// of the event in the context of the event producer (identified by source).
+func CloudeventsEventSubject(val string) attribute.KeyValue {
+ return CloudeventsEventSubjectKey.String(val)
+}
+
+// Semantic conventions for the OpenTracing Shim
+const (
+ // OpentracingRefTypeKey is the attribute Key conforming to the
+ // "opentracing.ref_type" semantic conventions. It represents the
+ // parent-child Reference type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: The causal relationship between a child Span and a parent Span.
+ OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
+)
+
+var (
+ // The parent Span depends on the child Span in some capacity
+ OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
+ // The parent Span does not depend in any way on the result of the child Span
+ OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
+)
+
+// The attributes used to perform database client calls.
+const (
+ // DBSystemKey is the attribute Key conforming to the "db.system" semantic
+ // conventions. It represents an identifier for the database management
+ // system (DBMS) product being used. See below for a list of well-known
+ // identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ DBSystemKey = attribute.Key("db.system")
+
+ // DBConnectionStringKey is the attribute Key conforming to the
+ // "db.connection_string" semantic conventions. It represents the
+ // connection string used to connect to the database. It is recommended to
+ // remove embedded credentials.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;'
+ DBConnectionStringKey = attribute.Key("db.connection_string")
+
+ // DBUserKey is the attribute Key conforming to the "db.user" semantic
+ // conventions. It represents the username for accessing the database.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'readonly_user', 'reporting_user'
+ DBUserKey = attribute.Key("db.user")
+
+ // DBJDBCDriverClassnameKey is the attribute Key conforming to the
+ // "db.jdbc.driver_classname" semantic conventions. It represents the
+ // fully-qualified class name of the [Java Database Connectivity
+ // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/)
+ // driver used to connect.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'org.postgresql.Driver',
+ // 'com.microsoft.sqlserver.jdbc.SQLServerDriver'
+ DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname")
+
+ // DBNameKey is the attribute Key conforming to the "db.name" semantic
+ // conventions. It represents the this attribute is used to report the name
+ // of the database being accessed. For commands that switch the database,
+ // this should be set to the target database (even if the command fails).
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If applicable.)
+ // Stability: stable
+ // Examples: 'customers', 'main'
+ // Note: In some SQL databases, the database name to be used is called
+ // "schema name". In case there are multiple layers that could be
+ // considered for database name (e.g. Oracle instance name and schema
+ // name), the database name to be used is the more specific layer (e.g.
+ // Oracle schema name).
+ DBNameKey = attribute.Key("db.name")
+
+ // DBStatementKey is the attribute Key conforming to the "db.statement"
+ // semantic conventions. It represents the database statement being
+ // executed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended (Should be collected by default only if
+ // there is sanitization that excludes sensitive information.)
+ // Stability: stable
+ // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"'
+ DBStatementKey = attribute.Key("db.statement")
+
+ // DBOperationKey is the attribute Key conforming to the "db.operation"
+ // semantic conventions. It represents the name of the operation being
+ // executed, e.g. the [MongoDB command
+ // name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+ // such as `findAndModify`, or the SQL keyword.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If `db.statement` is not
+ // applicable.)
+ // Stability: stable
+ // Examples: 'findAndModify', 'HMSET', 'SELECT'
+ // Note: When setting this to an SQL keyword, it is not recommended to
+ // attempt any client-side parsing of `db.statement` just to get this
+ // property, but it should be set if the operation name is provided by the
+ // library being instrumented. If the SQL statement has an ambiguous
+ // operation, or performs more than one operation, this value may be
+ // omitted.
+ DBOperationKey = attribute.Key("db.operation")
+)
+
+var (
+ // Some other SQL database. Fallback only. See notes
+ DBSystemOtherSQL = DBSystemKey.String("other_sql")
+ // Microsoft SQL Server
+ DBSystemMSSQL = DBSystemKey.String("mssql")
+ // Microsoft SQL Server Compact
+ DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact")
+ // MySQL
+ DBSystemMySQL = DBSystemKey.String("mysql")
+ // Oracle Database
+ DBSystemOracle = DBSystemKey.String("oracle")
+ // IBM DB2
+ DBSystemDB2 = DBSystemKey.String("db2")
+ // PostgreSQL
+ DBSystemPostgreSQL = DBSystemKey.String("postgresql")
+ // Amazon Redshift
+ DBSystemRedshift = DBSystemKey.String("redshift")
+ // Apache Hive
+ DBSystemHive = DBSystemKey.String("hive")
+ // Cloudscape
+ DBSystemCloudscape = DBSystemKey.String("cloudscape")
+ // HyperSQL DataBase
+ DBSystemHSQLDB = DBSystemKey.String("hsqldb")
+ // Progress Database
+ DBSystemProgress = DBSystemKey.String("progress")
+ // SAP MaxDB
+ DBSystemMaxDB = DBSystemKey.String("maxdb")
+ // SAP HANA
+ DBSystemHanaDB = DBSystemKey.String("hanadb")
+ // Ingres
+ DBSystemIngres = DBSystemKey.String("ingres")
+ // FirstSQL
+ DBSystemFirstSQL = DBSystemKey.String("firstsql")
+ // EnterpriseDB
+ DBSystemEDB = DBSystemKey.String("edb")
+ // InterSystems Caché
+ DBSystemCache = DBSystemKey.String("cache")
+ // Adabas (Adaptable Database System)
+ DBSystemAdabas = DBSystemKey.String("adabas")
+ // Firebird
+ DBSystemFirebird = DBSystemKey.String("firebird")
+ // Apache Derby
+ DBSystemDerby = DBSystemKey.String("derby")
+ // FileMaker
+ DBSystemFilemaker = DBSystemKey.String("filemaker")
+ // Informix
+ DBSystemInformix = DBSystemKey.String("informix")
+ // InstantDB
+ DBSystemInstantDB = DBSystemKey.String("instantdb")
+ // InterBase
+ DBSystemInterbase = DBSystemKey.String("interbase")
+ // MariaDB
+ DBSystemMariaDB = DBSystemKey.String("mariadb")
+ // Netezza
+ DBSystemNetezza = DBSystemKey.String("netezza")
+ // Pervasive PSQL
+ DBSystemPervasive = DBSystemKey.String("pervasive")
+ // PointBase
+ DBSystemPointbase = DBSystemKey.String("pointbase")
+ // SQLite
+ DBSystemSqlite = DBSystemKey.String("sqlite")
+ // Sybase
+ DBSystemSybase = DBSystemKey.String("sybase")
+ // Teradata
+ DBSystemTeradata = DBSystemKey.String("teradata")
+ // Vertica
+ DBSystemVertica = DBSystemKey.String("vertica")
+ // H2
+ DBSystemH2 = DBSystemKey.String("h2")
+ // ColdFusion IMQ
+ DBSystemColdfusion = DBSystemKey.String("coldfusion")
+ // Apache Cassandra
+ DBSystemCassandra = DBSystemKey.String("cassandra")
+ // Apache HBase
+ DBSystemHBase = DBSystemKey.String("hbase")
+ // MongoDB
+ DBSystemMongoDB = DBSystemKey.String("mongodb")
+ // Redis
+ DBSystemRedis = DBSystemKey.String("redis")
+ // Couchbase
+ DBSystemCouchbase = DBSystemKey.String("couchbase")
+ // CouchDB
+ DBSystemCouchDB = DBSystemKey.String("couchdb")
+ // Microsoft Azure Cosmos DB
+ DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
+ // Amazon DynamoDB
+ DBSystemDynamoDB = DBSystemKey.String("dynamodb")
+ // Neo4j
+ DBSystemNeo4j = DBSystemKey.String("neo4j")
+ // Apache Geode
+ DBSystemGeode = DBSystemKey.String("geode")
+ // Elasticsearch
+ DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
+ // Memcached
+ DBSystemMemcached = DBSystemKey.String("memcached")
+ // CockroachDB
+ DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
+ // OpenSearch
+ DBSystemOpensearch = DBSystemKey.String("opensearch")
+ // ClickHouse
+ DBSystemClickhouse = DBSystemKey.String("clickhouse")
+ // Cloud Spanner
+ DBSystemSpanner = DBSystemKey.String("spanner")
+ // Trino
+ DBSystemTrino = DBSystemKey.String("trino")
+)
+
+// DBConnectionString returns an attribute KeyValue conforming to the
+// "db.connection_string" semantic conventions. It represents the connection
+// string used to connect to the database. It is recommended to remove embedded
+// credentials.
+func DBConnectionString(val string) attribute.KeyValue {
+ return DBConnectionStringKey.String(val)
+}
+
+// DBUser returns an attribute KeyValue conforming to the "db.user" semantic
+// conventions. It represents the username for accessing the database.
+func DBUser(val string) attribute.KeyValue {
+ return DBUserKey.String(val)
+}
+
+// DBJDBCDriverClassname returns an attribute KeyValue conforming to the
+// "db.jdbc.driver_classname" semantic conventions. It represents the
+// fully-qualified class name of the [Java Database Connectivity
+// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver
+// used to connect.
+func DBJDBCDriverClassname(val string) attribute.KeyValue {
+ return DBJDBCDriverClassnameKey.String(val)
+}
+
+// DBName returns an attribute KeyValue conforming to the "db.name" semantic
+// conventions. It represents the this attribute is used to report the name of
+// the database being accessed. For commands that switch the database, this
+// should be set to the target database (even if the command fails).
+func DBName(val string) attribute.KeyValue {
+ return DBNameKey.String(val)
+}
+
+// DBStatement returns an attribute KeyValue conforming to the
+// "db.statement" semantic conventions. It represents the database statement
+// being executed.
+func DBStatement(val string) attribute.KeyValue {
+ return DBStatementKey.String(val)
+}
+
+// DBOperation returns an attribute KeyValue conforming to the
+// "db.operation" semantic conventions. It represents the name of the operation
+// being executed, e.g. the [MongoDB command
+// name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+// such as `findAndModify`, or the SQL keyword.
+func DBOperation(val string) attribute.KeyValue {
+ return DBOperationKey.String(val)
+}
+
+// Connection-level attributes for Microsoft SQL Server
+const (
+ // DBMSSQLInstanceNameKey is the attribute Key conforming to the
+ // "db.mssql.instance_name" semantic conventions. It represents the
+ // Microsoft SQL Server [instance
+ // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+ // connecting to. This name is used to determine the port of a named
+ // instance.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MSSQLSERVER'
+ // Note: If setting a `db.mssql.instance_name`, `server.port` is no longer
+ // required (but still recommended if non-standard).
+ DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name")
+)
+
+// DBMSSQLInstanceName returns an attribute KeyValue conforming to the
+// "db.mssql.instance_name" semantic conventions. It represents the Microsoft
+// SQL Server [instance
+// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+// connecting to. This name is used to determine the port of a named instance.
+func DBMSSQLInstanceName(val string) attribute.KeyValue {
+ return DBMSSQLInstanceNameKey.String(val)
+}
+
+// Call-level attributes for Cassandra
+const (
+ // DBCassandraPageSizeKey is the attribute Key conforming to the
+ // "db.cassandra.page_size" semantic conventions. It represents the fetch
+ // size used for paging, i.e. how many rows will be returned at once.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 5000
+ DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
+
+ // DBCassandraConsistencyLevelKey is the attribute Key conforming to the
+ // "db.cassandra.consistency_level" semantic conventions. It represents the
+ // consistency level of the query. Based on consistency values from
+ // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
+
+ // DBCassandraTableKey is the attribute Key conforming to the
+ // "db.cassandra.table" semantic conventions. It represents the name of the
+ // primary table that the operation is acting upon, including the keyspace
+ // name (if applicable).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'mytable'
+ // Note: This mirrors the db.sql.table attribute but references cassandra
+ // rather than sql. It is not recommended to attempt any client-side
+ // parsing of `db.statement` just to get this property, but it should be
+ // set if it is provided by the library being instrumented. If the
+ // operation is acting upon an anonymous table, or more than one table,
+ // this value MUST NOT be set.
+ DBCassandraTableKey = attribute.Key("db.cassandra.table")
+
+ // DBCassandraIdempotenceKey is the attribute Key conforming to the
+ // "db.cassandra.idempotence" semantic conventions. It represents the
+ // whether or not the query is idempotent.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
+
+ // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
+ // to the "db.cassandra.speculative_execution_count" semantic conventions.
+ // It represents the number of times a query was speculatively executed.
+ // Not set or `0` if the query was not executed speculatively.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 0, 2
+ DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
+
+ // DBCassandraCoordinatorIDKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.id" semantic conventions. It represents the ID
+ // of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
+ DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
+
+ // DBCassandraCoordinatorDCKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.dc" semantic conventions. It represents the
+ // data center of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-west-2'
+ DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
+)
+
+var (
+ // all
+ DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
+ // each_quorum
+ DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
+ // quorum
+ DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
+ // local_quorum
+ DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
+ // one
+ DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
+ // two
+ DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
+ // three
+ DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
+ // local_one
+ DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
+ // any
+ DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
+ // serial
+ DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
+ // local_serial
+ DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
+)
+
+// DBCassandraPageSize returns an attribute KeyValue conforming to the
+// "db.cassandra.page_size" semantic conventions. It represents the fetch size
+// used for paging, i.e. how many rows will be returned at once.
+func DBCassandraPageSize(val int) attribute.KeyValue {
+ return DBCassandraPageSizeKey.Int(val)
+}
+
+// DBCassandraTable returns an attribute KeyValue conforming to the
+// "db.cassandra.table" semantic conventions. It represents the name of the
+// primary table that the operation is acting upon, including the keyspace name
+// (if applicable).
+func DBCassandraTable(val string) attribute.KeyValue {
+ return DBCassandraTableKey.String(val)
+}
+
+// DBCassandraIdempotence returns an attribute KeyValue conforming to the
+// "db.cassandra.idempotence" semantic conventions. It represents the whether
+// or not the query is idempotent.
+func DBCassandraIdempotence(val bool) attribute.KeyValue {
+ return DBCassandraIdempotenceKey.Bool(val)
+}
+
+// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
+// conforming to the "db.cassandra.speculative_execution_count" semantic
+// conventions. It represents the number of times a query was speculatively
+// executed. Not set or `0` if the query was not executed speculatively.
+func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
+ return DBCassandraSpeculativeExecutionCountKey.Int(val)
+}
+
+// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
+// the coordinating node for a query.
+func DBCassandraCoordinatorID(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorIDKey.String(val)
+}
+
+// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
+// center of the coordinating node for a query.
+func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorDCKey.String(val)
+}
+
+// Call-level attributes for Redis
+const (
+ // DBRedisDBIndexKey is the attribute Key conforming to the
+ // "db.redis.database_index" semantic conventions. It represents the index
+ // of the database being accessed as used in the [`SELECT`
+ // command](https://redis.io/commands/select), provided as an integer. To
+ // be used instead of the generic `db.name` attribute.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If other than the default
+ // database (`0`).)
+ // Stability: stable
+ // Examples: 0, 1, 15
+ DBRedisDBIndexKey = attribute.Key("db.redis.database_index")
+)
+
+// DBRedisDBIndex returns an attribute KeyValue conforming to the
+// "db.redis.database_index" semantic conventions. It represents the index of
+// the database being accessed as used in the [`SELECT`
+// command](https://redis.io/commands/select), provided as an integer. To be
+// used instead of the generic `db.name` attribute.
+func DBRedisDBIndex(val int) attribute.KeyValue {
+ return DBRedisDBIndexKey.Int(val)
+}
+
+// Call-level attributes for MongoDB
+const (
+ // DBMongoDBCollectionKey is the attribute Key conforming to the
+ // "db.mongodb.collection" semantic conventions. It represents the
+ // collection being accessed within the database stated in `db.name`.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'customers', 'products'
+ DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection")
+)
+
+// DBMongoDBCollection returns an attribute KeyValue conforming to the
+// "db.mongodb.collection" semantic conventions. It represents the collection
+// being accessed within the database stated in `db.name`.
+func DBMongoDBCollection(val string) attribute.KeyValue {
+ return DBMongoDBCollectionKey.String(val)
+}
+
+// Call-level attributes for SQL databases
+const (
+ // DBSQLTableKey is the attribute Key conforming to the "db.sql.table"
+ // semantic conventions. It represents the name of the primary table that
+ // the operation is acting upon, including the database name (if
+ // applicable).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'public.users', 'customers'
+ // Note: It is not recommended to attempt any client-side parsing of
+ // `db.statement` just to get this property, but it should be set if it is
+ // provided by the library being instrumented. If the operation is acting
+ // upon an anonymous table, or more than one table, this value MUST NOT be
+ // set.
+ DBSQLTableKey = attribute.Key("db.sql.table")
+)
+
+// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table"
+// semantic conventions. It represents the name of the primary table that the
+// operation is acting upon, including the database name (if applicable).
+func DBSQLTable(val string) attribute.KeyValue {
+ return DBSQLTableKey.String(val)
+}
+
+// Call-level attributes for Cosmos DB.
+const (
+ // DBCosmosDBClientIDKey is the attribute Key conforming to the
+ // "db.cosmosdb.client_id" semantic conventions. It represents the unique
+ // Cosmos client instance id.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d'
+ DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id")
+
+ // DBCosmosDBOperationTypeKey is the attribute Key conforming to the
+ // "db.cosmosdb.operation_type" semantic conventions. It represents the
+ // cosmosDB Operation Type.
+ //
+ // Type: Enum
+ // RequirementLevel: ConditionallyRequired (when performing one of the
+ // operations in this list)
+ // Stability: stable
+ DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type")
+
+ // DBCosmosDBConnectionModeKey is the attribute Key conforming to the
+ // "db.cosmosdb.connection_mode" semantic conventions. It represents the
+ // cosmos client connection mode.
+ //
+ // Type: Enum
+ // RequirementLevel: ConditionallyRequired (if not `direct` (or pick gw as
+ // default))
+ // Stability: stable
+ DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode")
+
+ // DBCosmosDBContainerKey is the attribute Key conforming to the
+ // "db.cosmosdb.container" semantic conventions. It represents the cosmos
+ // DB container name.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (if available)
+ // Stability: stable
+ // Examples: 'anystring'
+ DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container")
+
+ // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the
+ // "db.cosmosdb.request_content_length" semantic conventions. It represents
+ // the request payload size in bytes
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length")
+
+ // DBCosmosDBStatusCodeKey is the attribute Key conforming to the
+ // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos
+ // DB status code.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (if response was received)
+ // Stability: stable
+ // Examples: 200, 201
+ DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code")
+
+ // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the
+ // "db.cosmosdb.sub_status_code" semantic conventions. It represents the
+ // cosmos DB sub status code.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (when response was received and
+ // contained sub-code.)
+ // Stability: stable
+ // Examples: 1000, 1002
+ DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code")
+
+ // DBCosmosDBRequestChargeKey is the attribute Key conforming to the
+ // "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+ // consumed for that operation
+ //
+ // Type: double
+ // RequirementLevel: ConditionallyRequired (when available)
+ // Stability: stable
+ // Examples: 46.18, 1.0
+ DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge")
+)
+
+var (
+ // invalid
+ DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid")
+ // create
+ DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create")
+ // patch
+ DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch")
+ // read
+ DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read")
+ // read_feed
+ DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed")
+ // delete
+ DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete")
+ // replace
+ DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace")
+ // execute
+ DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute")
+ // query
+ DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query")
+ // head
+ DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head")
+ // head_feed
+ DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed")
+ // upsert
+ DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert")
+ // batch
+ DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch")
+ // query_plan
+ DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan")
+ // execute_javascript
+ DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript")
+)
+
+var (
+ // Gateway (HTTP) connections mode
+ DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway")
+ // Direct connection
+ DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct")
+)
+
+// DBCosmosDBClientID returns an attribute KeyValue conforming to the
+// "db.cosmosdb.client_id" semantic conventions. It represents the unique
+// Cosmos client instance id.
+func DBCosmosDBClientID(val string) attribute.KeyValue {
+ return DBCosmosDBClientIDKey.String(val)
+}
+
+// DBCosmosDBContainer returns an attribute KeyValue conforming to the
+// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB
+// container name.
+func DBCosmosDBContainer(val string) attribute.KeyValue {
+ return DBCosmosDBContainerKey.String(val)
+}
+
+// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming
+// to the "db.cosmosdb.request_content_length" semantic conventions. It
+// represents the request payload size in bytes
+func DBCosmosDBRequestContentLength(val int) attribute.KeyValue {
+ return DBCosmosDBRequestContentLengthKey.Int(val)
+}
+
+// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB
+// status code.
+func DBCosmosDBStatusCode(val int) attribute.KeyValue {
+ return DBCosmosDBStatusCodeKey.Int(val)
+}
+
+// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos
+// DB sub status code.
+func DBCosmosDBSubStatusCode(val int) attribute.KeyValue {
+ return DBCosmosDBSubStatusCodeKey.Int(val)
+}
+
+// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the
+// "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+// consumed for that operation
+func DBCosmosDBRequestCharge(val float64) attribute.KeyValue {
+ return DBCosmosDBRequestChargeKey.Float64(val)
+}
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's
+// concepts.
+const (
+ // OTelStatusCodeKey is the attribute Key conforming to the
+ // "otel.status_code" semantic conventions. It represents the name of the
+ // code, either "OK" or "ERROR". MUST NOT be set if the status code is
+ // UNSET.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ OTelStatusCodeKey = attribute.Key("otel.status_code")
+
+ // OTelStatusDescriptionKey is the attribute Key conforming to the
+ // "otel.status_description" semantic conventions. It represents the
+ // description of the Status if it has a value, otherwise not set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'resource not found'
+ OTelStatusDescriptionKey = attribute.Key("otel.status_description")
+)
+
+var (
+ // The operation has been validated by an Application developer or Operator to have completed successfully
+ OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
+ // The operation contains an error
+ OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
+)
+
+// OTelStatusDescription returns an attribute KeyValue conforming to the
+// "otel.status_description" semantic conventions. It represents the
+// description of the Status if it has a value, otherwise not set.
+func OTelStatusDescription(val string) attribute.KeyValue {
+ return OTelStatusDescriptionKey.String(val)
+}
+
+// This semantic convention describes an instance of a function that runs
+// without provisioning or managing of servers (also known as serverless
+// functions or Function as a Service (FaaS)) with spans.
+const (
+ // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
+ // semantic conventions. It represents the type of the trigger which caused
+ // this function invocation.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: For the server/consumer span on the incoming side,
+ // `faas.trigger` MUST be set.
+ //
+ // Clients invoking FaaS instances usually cannot set `faas.trigger`,
+ // since they would typically need to look in the payload to determine
+ // the event type. If clients set it, it should be the same as the
+ // trigger that corresponding incoming would have (i.e., this has
+ // nothing to do with the underlying transport used to make the API
+ // call to invoke the lambda, which is often HTTP).
+ FaaSTriggerKey = attribute.Key("faas.trigger")
+
+ // FaaSInvocationIDKey is the attribute Key conforming to the
+ // "faas.invocation_id" semantic conventions. It represents the invocation
+ // ID of the current function invocation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
+ FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
+)
+
+var (
+ // A response to some data source operation such as a database or filesystem read/write
+ FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
+ // To provide an answer to an inbound HTTP request
+ FaaSTriggerHTTP = FaaSTriggerKey.String("http")
+ // A function is set to be executed when messages are sent to a messaging system
+ FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
+ // A function is scheduled to be executed regularly
+ FaaSTriggerTimer = FaaSTriggerKey.String("timer")
+ // If none of the others apply
+ FaaSTriggerOther = FaaSTriggerKey.String("other")
+)
+
+// FaaSInvocationID returns an attribute KeyValue conforming to the
+// "faas.invocation_id" semantic conventions. It represents the invocation ID
+// of the current function invocation.
+func FaaSInvocationID(val string) attribute.KeyValue {
+ return FaaSInvocationIDKey.String(val)
+}
+
+// Semantic Convention for FaaS triggered as a response to some data source
+// operation such as a database or filesystem read/write.
+const (
+ // FaaSDocumentCollectionKey is the attribute Key conforming to the
+ // "faas.document.collection" semantic conventions. It represents the name
+ // of the source on which the triggering operation was performed. For
+ // example, in Cloud Storage or S3 corresponds to the bucket name, and in
+ // Cosmos DB to the database name.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myBucketName', 'myDBName'
+ FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
+
+ // FaaSDocumentOperationKey is the attribute Key conforming to the
+ // "faas.document.operation" semantic conventions. It represents the
+ // describes the type of the operation that was performed on the data.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
+
+ // FaaSDocumentTimeKey is the attribute Key conforming to the
+ // "faas.document.time" semantic conventions. It represents a string
+ // containing the time when the data was accessed in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSDocumentTimeKey = attribute.Key("faas.document.time")
+
+ // FaaSDocumentNameKey is the attribute Key conforming to the
+ // "faas.document.name" semantic conventions. It represents the document
+ // name/table subjected to the operation. For example, in Cloud Storage or
+ // S3 is the name of the file, and in Cosmos DB the table name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'myFile.txt', 'myTableName'
+ FaaSDocumentNameKey = attribute.Key("faas.document.name")
+)
+
+var (
+ // When a new object is created
+ FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
+ // When an object is modified
+ FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
+ // When an object is deleted
+ FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
+)
+
+// FaaSDocumentCollection returns an attribute KeyValue conforming to the
+// "faas.document.collection" semantic conventions. It represents the name of
+// the source on which the triggering operation was performed. For example, in
+// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
+// database name.
+func FaaSDocumentCollection(val string) attribute.KeyValue {
+ return FaaSDocumentCollectionKey.String(val)
+}
+
+// FaaSDocumentTime returns an attribute KeyValue conforming to the
+// "faas.document.time" semantic conventions. It represents a string containing
+// the time when the data was accessed in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSDocumentTime(val string) attribute.KeyValue {
+ return FaaSDocumentTimeKey.String(val)
+}
+
+// FaaSDocumentName returns an attribute KeyValue conforming to the
+// "faas.document.name" semantic conventions. It represents the document
+// name/table subjected to the operation. For example, in Cloud Storage or S3
+// is the name of the file, and in Cosmos DB the table name.
+func FaaSDocumentName(val string) attribute.KeyValue {
+ return FaaSDocumentNameKey.String(val)
+}
+
+// Semantic Convention for FaaS scheduled to be executed regularly.
+const (
+ // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
+ // conventions. It represents a string containing the function invocation
+ // time in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSTimeKey = attribute.Key("faas.time")
+
+ // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
+ // conventions. It represents a string containing the schedule period as
+ // [Cron
+ // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0/5 * * * ? *'
+ FaaSCronKey = attribute.Key("faas.cron")
+)
+
+// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
+// semantic conventions. It represents a string containing the function
+// invocation time in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSTime(val string) attribute.KeyValue {
+ return FaaSTimeKey.String(val)
+}
+
+// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
+// semantic conventions. It represents a string containing the schedule period
+// as [Cron
+// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+func FaaSCron(val string) attribute.KeyValue {
+ return FaaSCronKey.String(val)
+}
+
+// Contains additional attributes for incoming FaaS spans.
+const (
+ // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
+ // semantic conventions. It represents a boolean that is true if the
+ // serverless function is executed for the first time (aka cold-start).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ FaaSColdstartKey = attribute.Key("faas.coldstart")
+)
+
+// FaaSColdstart returns an attribute KeyValue conforming to the
+// "faas.coldstart" semantic conventions. It represents a boolean that is true
+// if the serverless function is executed for the first time (aka cold-start).
+func FaaSColdstart(val bool) attribute.KeyValue {
+ return FaaSColdstartKey.Bool(val)
+}
+
+// Contains additional attributes for outgoing FaaS spans.
+const (
+ // FaaSInvokedNameKey is the attribute Key conforming to the
+ // "faas.invoked_name" semantic conventions. It represents the name of the
+ // invoked function.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'my-function'
+ // Note: SHOULD be equal to the `faas.name` resource attribute of the
+ // invoked function.
+ FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
+
+ // FaaSInvokedProviderKey is the attribute Key conforming to the
+ // "faas.invoked_provider" semantic conventions. It represents the cloud
+ // provider of the invoked function.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: SHOULD be equal to the `cloud.provider` resource attribute of the
+ // invoked function.
+ FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
+
+ // FaaSInvokedRegionKey is the attribute Key conforming to the
+ // "faas.invoked_region" semantic conventions. It represents the cloud
+ // region of the invoked function.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (For some cloud providers, like
+ // AWS or GCP, the region in which a function is hosted is essential to
+ // uniquely identify the function and also part of its endpoint. Since it's
+ // part of the endpoint being called, the region is always known to
+ // clients. In these cases, `faas.invoked_region` MUST be set accordingly.
+ // If the region is unknown to the client or not required for identifying
+ // the invoked function, setting `faas.invoked_region` is optional.)
+ // Stability: stable
+ // Examples: 'eu-central-1'
+ // Note: SHOULD be equal to the `cloud.region` resource attribute of the
+ // invoked function.
+ FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
+)
+
+var (
+ // Alibaba Cloud
+ FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
+ // Microsoft Azure
+ FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
+ // Google Cloud Platform
+ FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
+ // Tencent Cloud
+ FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
+)
+
+// FaaSInvokedName returns an attribute KeyValue conforming to the
+// "faas.invoked_name" semantic conventions. It represents the name of the
+// invoked function.
+func FaaSInvokedName(val string) attribute.KeyValue {
+ return FaaSInvokedNameKey.String(val)
+}
+
+// FaaSInvokedRegion returns an attribute KeyValue conforming to the
+// "faas.invoked_region" semantic conventions. It represents the cloud region
+// of the invoked function.
+func FaaSInvokedRegion(val string) attribute.KeyValue {
+ return FaaSInvokedRegionKey.String(val)
+}
+
+// Operations that access some remote service.
+const (
+ // PeerServiceKey is the attribute Key conforming to the "peer.service"
+ // semantic conventions. It represents the
+ // [`service.name`](/docs/resource/README.md#service) of the remote
+ // service. SHOULD be equal to the actual `service.name` resource attribute
+ // of the remote service if any.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'AuthTokenCache'
+ PeerServiceKey = attribute.Key("peer.service")
+)
+
+// PeerService returns an attribute KeyValue conforming to the
+// "peer.service" semantic conventions. It represents the
+// [`service.name`](/docs/resource/README.md#service) of the remote service.
+// SHOULD be equal to the actual `service.name` resource attribute of the
+// remote service if any.
+func PeerService(val string) attribute.KeyValue {
+ return PeerServiceKey.String(val)
+}
+
+// These attributes may be used for any operation with an authenticated and/or
+// authorized enduser.
+const (
+ // EnduserIDKey is the attribute Key conforming to the "enduser.id"
+ // semantic conventions. It represents the username or client_id extracted
+ // from the access token or
+ // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
+ // in the inbound request from outside the system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'username'
+ EnduserIDKey = attribute.Key("enduser.id")
+
+ // EnduserRoleKey is the attribute Key conforming to the "enduser.role"
+ // semantic conventions. It represents the actual/assumed role the client
+ // is making the request under extracted from token or application security
+ // context.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'admin'
+ EnduserRoleKey = attribute.Key("enduser.role")
+
+ // EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
+ // semantic conventions. It represents the scopes or granted authorities
+ // the client currently possesses extracted from token or application
+ // security context. The value would come from the scope associated with an
+ // [OAuth 2.0 Access
+ // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+ // value in a [SAML 2.0
+ // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'read:message, write:files'
+ EnduserScopeKey = attribute.Key("enduser.scope")
+)
+
+// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
+// semantic conventions. It represents the username or client_id extracted from
+// the access token or
+// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
+// the inbound request from outside the system.
+func EnduserID(val string) attribute.KeyValue {
+ return EnduserIDKey.String(val)
+}
+
+// EnduserRole returns an attribute KeyValue conforming to the
+// "enduser.role" semantic conventions. It represents the actual/assumed role
+// the client is making the request under extracted from token or application
+// security context.
+func EnduserRole(val string) attribute.KeyValue {
+ return EnduserRoleKey.String(val)
+}
+
+// EnduserScope returns an attribute KeyValue conforming to the
+// "enduser.scope" semantic conventions. It represents the scopes or granted
+// authorities the client currently possesses extracted from token or
+// application security context. The value would come from the scope associated
+// with an [OAuth 2.0 Access
+// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+// value in a [SAML 2.0
+// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+func EnduserScope(val string) attribute.KeyValue {
+ return EnduserScopeKey.String(val)
+}
+
+// These attributes may be used for any operation to store information about a
+// thread that started a span.
+const (
+ // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
+ // conventions. It represents the current "managed" thread ID (as opposed
+ // to OS thread ID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ ThreadIDKey = attribute.Key("thread.id")
+
+ // ThreadNameKey is the attribute Key conforming to the "thread.name"
+ // semantic conventions. It represents the current thread name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'main'
+ ThreadNameKey = attribute.Key("thread.name")
+)
+
+// ThreadID returns an attribute KeyValue conforming to the "thread.id"
+// semantic conventions. It represents the current "managed" thread ID (as
+// opposed to OS thread ID).
+func ThreadID(val int) attribute.KeyValue {
+ return ThreadIDKey.Int(val)
+}
+
+// ThreadName returns an attribute KeyValue conforming to the "thread.name"
+// semantic conventions. It represents the current thread name.
+func ThreadName(val string) attribute.KeyValue {
+ return ThreadNameKey.String(val)
+}
+
+// These attributes allow to report this unit of code and therefore to provide
+// more context about the span.
+const (
+ // CodeFunctionKey is the attribute Key conforming to the "code.function"
+ // semantic conventions. It represents the method or function name, or
+ // equivalent (usually rightmost part of the code unit's name).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'serveRequest'
+ CodeFunctionKey = attribute.Key("code.function")
+
+ // CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
+ // semantic conventions. It represents the "namespace" within which
+ // `code.function` is defined. Usually the qualified class or module name,
+ // such that `code.namespace` + some separator + `code.function` form a
+ // unique identifier for the code unit.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'com.example.MyHTTPService'
+ CodeNamespaceKey = attribute.Key("code.namespace")
+
+ // CodeFilepathKey is the attribute Key conforming to the "code.filepath"
+ // semantic conventions. It represents the source code file name that
+ // identifies the code unit as uniquely as possible (preferably an absolute
+ // file path).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/usr/local/MyApplication/content_root/app/index.php'
+ CodeFilepathKey = attribute.Key("code.filepath")
+
+ // CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
+ // semantic conventions. It represents the line number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ CodeLineNumberKey = attribute.Key("code.lineno")
+
+ // CodeColumnKey is the attribute Key conforming to the "code.column"
+ // semantic conventions. It represents the column number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 16
+ CodeColumnKey = attribute.Key("code.column")
+)
+
+// CodeFunction returns an attribute KeyValue conforming to the
+// "code.function" semantic conventions. It represents the method or function
+// name, or equivalent (usually rightmost part of the code unit's name).
+func CodeFunction(val string) attribute.KeyValue {
+ return CodeFunctionKey.String(val)
+}
+
+// CodeNamespace returns an attribute KeyValue conforming to the
+// "code.namespace" semantic conventions. It represents the "namespace" within
+// which `code.function` is defined. Usually the qualified class or module
+// name, such that `code.namespace` + some separator + `code.function` form a
+// unique identifier for the code unit.
+func CodeNamespace(val string) attribute.KeyValue {
+ return CodeNamespaceKey.String(val)
+}
+
+// CodeFilepath returns an attribute KeyValue conforming to the
+// "code.filepath" semantic conventions. It represents the source code file
+// name that identifies the code unit as uniquely as possible (preferably an
+// absolute file path).
+func CodeFilepath(val string) attribute.KeyValue {
+ return CodeFilepathKey.String(val)
+}
+
+// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
+// semantic conventions. It represents the line number in `code.filepath` best
+// representing the operation. It SHOULD point within the code unit named in
+// `code.function`.
+func CodeLineNumber(val int) attribute.KeyValue {
+ return CodeLineNumberKey.Int(val)
+}
+
+// CodeColumn returns an attribute KeyValue conforming to the "code.column"
+// semantic conventions. It represents the column number in `code.filepath`
+// best representing the operation. It SHOULD point within the code unit named
+// in `code.function`.
+func CodeColumn(val int) attribute.KeyValue {
+ return CodeColumnKey.Int(val)
+}
+
+// Semantic Convention for HTTP Client
+const (
+ // HTTPResendCountKey is the attribute Key conforming to the
+ // "http.resend_count" semantic conventions. It represents the ordinal
+ // number of request resending attempt (for any reason, including
+ // redirects).
+ //
+ // Type: int
+ // RequirementLevel: Recommended (if and only if request was retried.)
+ // Stability: stable
+ // Examples: 3
+ // Note: The resend count SHOULD be updated each time an HTTP request gets
+ // resent by the client, regardless of what was the cause of the resending
+ // (e.g. redirection, authorization failure, 503 Server Unavailable,
+ // network issues, or any other).
+ HTTPResendCountKey = attribute.Key("http.resend_count")
+)
+
+// HTTPResendCount returns an attribute KeyValue conforming to the
+// "http.resend_count" semantic conventions. It represents the ordinal number
+// of request resending attempt (for any reason, including redirects).
+func HTTPResendCount(val int) attribute.KeyValue {
+ return HTTPResendCountKey.Int(val)
+}
+
+// The `aws` conventions apply to operations using the AWS SDK. They map
+// request or response parameters in AWS SDK API calls to attributes on a Span.
+// The conventions have been collected over time based on feedback from AWS
+// users of tracing and will continue to evolve as new interesting conventions
+// are found.
+// Some descriptions are also provided for populating general OpenTelemetry
+// semantic conventions based on these APIs.
+const (
+ // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
+ // semantic conventions. It represents the AWS request ID as returned in
+ // the response headers `x-amz-request-id` or `x-amz-requestid`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ'
+ AWSRequestIDKey = attribute.Key("aws.request_id")
+)
+
+// AWSRequestID returns an attribute KeyValue conforming to the
+// "aws.request_id" semantic conventions. It represents the AWS request ID as
+// returned in the response headers `x-amz-request-id` or `x-amz-requestid`.
+func AWSRequestID(val string) attribute.KeyValue {
+ return AWSRequestIDKey.String(val)
+}
+
+// Attributes that exist for multiple DynamoDB request types.
+const (
+ // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_names" semantic conventions. It represents the keys
+ // in the `RequestItems` object field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Users', 'Cats'
+ AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
+
+ // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
+ // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+ // JSON-serialized value of each item in the `ConsumedCapacity` response
+ // field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
+ // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number }, "TableName": "string",
+ // "WriteCapacityUnits": number }'
+ AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
+
+ // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
+ // the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+ // represents the JSON-serialized value of the `ItemCollectionMetrics`
+ // response field.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
+ // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
+ // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
+ // "NULL": boolean, "S": "string", "SS": [ "string" ] } },
+ // "SizeEstimateRangeGB": [ number ] } ] }'
+ AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
+
+ // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
+ // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
+ // represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
+ // request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
+
+ // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
+ // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
+ // It represents the value of the
+ // `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
+
+ // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
+ // "aws.dynamodb.consistent_read" semantic conventions. It represents the
+ // value of the `ConsistentRead` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
+
+ // AWSDynamoDBProjectionKey is the attribute Key conforming to the
+ // "aws.dynamodb.projection" semantic conventions. It represents the value
+ // of the `ProjectionExpression` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Title', 'Title, Price, Color', 'Title, Description,
+ // RelatedItems, ProductReviews'
+ AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
+
+ // AWSDynamoDBLimitKey is the attribute Key conforming to the
+ // "aws.dynamodb.limit" semantic conventions. It represents the value of
+ // the `Limit` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
+
+ // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
+ // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+ // value of the `AttributesToGet` request parameter.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'lives', 'id'
+ AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
+
+ // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
+ // "aws.dynamodb.index_name" semantic conventions. It represents the value
+ // of the `IndexName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'name_to_group'
+ AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
+
+ // AWSDynamoDBSelectKey is the attribute Key conforming to the
+ // "aws.dynamodb.select" semantic conventions. It represents the value of
+ // the `Select` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'ALL_ATTRIBUTES', 'COUNT'
+ AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
+)
+
+// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
+// the `RequestItems` object field.
+func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
+ return AWSDynamoDBTableNamesKey.StringSlice(val)
+}
+
+// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
+// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+// JSON-serialized value of each item in the `ConsumedCapacity` response field.
+func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
+ return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
+}
+
+// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
+// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+// represents the JSON-serialized value of the `ItemCollectionMetrics` response
+// field.
+func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
+ return AWSDynamoDBItemCollectionMetricsKey.String(val)
+}
+
+// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
+// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
+// of the `ConsistentRead` request parameter.
+func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
+ return AWSDynamoDBConsistentReadKey.Bool(val)
+}
+
+// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
+// "aws.dynamodb.projection" semantic conventions. It represents the value of
+// the `ProjectionExpression` request parameter.
+func AWSDynamoDBProjection(val string) attribute.KeyValue {
+ return AWSDynamoDBProjectionKey.String(val)
+}
+
+// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
+// "aws.dynamodb.limit" semantic conventions. It represents the value of the
+// `Limit` request parameter.
+func AWSDynamoDBLimit(val int) attribute.KeyValue {
+ return AWSDynamoDBLimitKey.Int(val)
+}
+
+// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
+// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+// value of the `AttributesToGet` request parameter.
+func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributesToGetKey.StringSlice(val)
+}
+
+// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
+// "aws.dynamodb.index_name" semantic conventions. It represents the value of
+// the `IndexName` request parameter.
+func AWSDynamoDBIndexName(val string) attribute.KeyValue {
+ return AWSDynamoDBIndexNameKey.String(val)
+}
+
+// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
+// "aws.dynamodb.select" semantic conventions. It represents the value of the
+// `Select` request parameter.
+func AWSDynamoDBSelect(val string) attribute.KeyValue {
+ return AWSDynamoDBSelectKey.String(val)
+}
+
+// DynamoDB.CreateTable
+const (
+ // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `GlobalSecondaryIndexes` request field
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
+ // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
+ // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
+
+ // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `LocalSecondaryIndexes` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "IndexARN": "string", "IndexName": "string",
+ // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
+ AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
+)
+
+// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
+// conventions. It represents the JSON-serialized value of each item of the
+// `GlobalSecondaryIndexes` request field
+func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
+// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+// represents the JSON-serialized value of each item of the
+// `LocalSecondaryIndexes` request field.
+func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
+}
+
+// DynamoDB.ListTables
+const (
+ // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
+ // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
+ // the value of the `ExclusiveStartTableName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Users', 'CatsTable'
+ AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
+
+ // AWSDynamoDBTableCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_count" semantic conventions. It represents the the
+ // number of items in the `TableNames` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 20
+ AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
+)
+
+// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
+// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
+// represents the value of the `ExclusiveStartTableName` request parameter.
+func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
+ return AWSDynamoDBExclusiveStartTableKey.String(val)
+}
+
+// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_count" semantic conventions. It represents the the
+// number of items in the `TableNames` response parameter.
+func AWSDynamoDBTableCount(val int) attribute.KeyValue {
+ return AWSDynamoDBTableCountKey.Int(val)
+}
+
+// DynamoDB.Query
+const (
+ // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
+ // "aws.dynamodb.scan_forward" semantic conventions. It represents the
+ // value of the `ScanIndexForward` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
+)
+
+// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
+// the `ScanIndexForward` request parameter.
+func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
+ return AWSDynamoDBScanForwardKey.Bool(val)
+}
+
+// DynamoDB.Scan
+const (
+ // AWSDynamoDBSegmentKey is the attribute Key conforming to the
+ // "aws.dynamodb.segment" semantic conventions. It represents the value of
+ // the `Segment` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
+
+ // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
+ // "aws.dynamodb.total_segments" semantic conventions. It represents the
+ // value of the `TotalSegments` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 100
+ AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
+
+ // AWSDynamoDBCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.count" semantic conventions. It represents the value of
+ // the `Count` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
+
+ // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.scanned_count" semantic conventions. It represents the
+ // value of the `ScannedCount` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 50
+ AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
+)
+
+// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
+// "aws.dynamodb.segment" semantic conventions. It represents the value of the
+// `Segment` request parameter.
+func AWSDynamoDBSegment(val int) attribute.KeyValue {
+ return AWSDynamoDBSegmentKey.Int(val)
+}
+
+// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
+// "aws.dynamodb.total_segments" semantic conventions. It represents the value
+// of the `TotalSegments` request parameter.
+func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
+ return AWSDynamoDBTotalSegmentsKey.Int(val)
+}
+
+// AWSDynamoDBCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.count" semantic conventions. It represents the value of the
+// `Count` response parameter.
+func AWSDynamoDBCount(val int) attribute.KeyValue {
+ return AWSDynamoDBCountKey.Int(val)
+}
+
+// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
+// of the `ScannedCount` response parameter.
+func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
+ return AWSDynamoDBScannedCountKey.Int(val)
+}
+
+// DynamoDB.UpdateTable
+const (
+ // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
+ // the "aws.dynamodb.attribute_definitions" semantic conventions. It
+ // represents the JSON-serialized value of each item in the
+ // `AttributeDefinitions` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
+ AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
+
+ // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
+ // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+ // conventions. It represents the JSON-serialized value of each item in the
+ // the `GlobalSecondaryIndexUpdates` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
+ // "ProvisionedThroughput": { "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
+)
+
+// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
+// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
+// represents the JSON-serialized value of each item in the
+// `AttributeDefinitions` request field.
+func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+// conventions. It represents the JSON-serialized value of each item in the the
+// `GlobalSecondaryIndexUpdates` request field.
+func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
+}
+
+// Attributes that exist for S3 request types.
+const (
+ // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
+ // semantic conventions. It represents the S3 bucket name the request
+ // refers to. Corresponds to the `--bucket` parameter of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // operations.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'some-bucket-name'
+ // Note: The `bucket` attribute is applicable to all S3 operations that
+ // reference a bucket, i.e. that require the bucket name as a mandatory
+ // parameter.
+ // This applies to almost all S3 operations except `list-buckets`.
+ AWSS3BucketKey = attribute.Key("aws.s3.bucket")
+
+ // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
+ // conventions. It represents the S3 object key the request refers to.
+ // Corresponds to the `--key` parameter of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // operations.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'someFile.yml'
+ // Note: The `key` attribute is applicable to all object-related S3
+ // operations, i.e. that require the object key as a mandatory parameter.
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+ // -
+ // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+ // -
+ // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html)
+ // -
+ // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html)
+ // -
+ // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html)
+ // -
+ // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html)
+ // -
+ // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html)
+ // -
+ // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+ // -
+ // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+ // -
+ // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html)
+ // -
+ // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+ // -
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3KeyKey = attribute.Key("aws.s3.key")
+
+ // AWSS3CopySourceKey is the attribute Key conforming to the
+ // "aws.s3.copy_source" semantic conventions. It represents the source
+ // object (in the form `bucket`/`key`) for the copy operation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'someFile.yml'
+ // Note: The `copy_source` attribute applies to S3 copy operations and
+ // corresponds to the `--copy-source` parameter
+ // of the [copy-object operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html).
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
+
+ // AWSS3UploadIDKey is the attribute Key conforming to the
+ // "aws.s3.upload_id" semantic conventions. It represents the upload ID
+ // that identifies the multipart upload.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ'
+ // Note: The `upload_id` attribute applies to S3 multipart-upload
+ // operations and corresponds to the `--upload-id` parameter
+ // of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // multipart operations.
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+ // -
+ // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+ // -
+ // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+ // -
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
+
+ // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
+ // semantic conventions. It represents the delete request container that
+ // specifies the objects to be deleted.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean'
+ // Note: The `delete` attribute is only applicable to the
+ // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+ // operation.
+ // The `delete` attribute corresponds to the `--delete` parameter of the
+ // [delete-objects operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html).
+ AWSS3DeleteKey = attribute.Key("aws.s3.delete")
+
+ // AWSS3PartNumberKey is the attribute Key conforming to the
+ // "aws.s3.part_number" semantic conventions. It represents the part number
+ // of the part being uploaded in a multipart-upload operation. This is a
+ // positive integer between 1 and 10,000.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3456
+ // Note: The `part_number` attribute is only applicable to the
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // and
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ // operations.
+ // The `part_number` attribute corresponds to the `--part-number` parameter
+ // of the
+ // [upload-part operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html).
+ AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
+)
+
+// AWSS3Bucket returns an attribute KeyValue conforming to the
+// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the
+// request refers to. Corresponds to the `--bucket` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Bucket(val string) attribute.KeyValue {
+ return AWSS3BucketKey.String(val)
+}
+
+// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key"
+// semantic conventions. It represents the S3 object key the request refers to.
+// Corresponds to the `--key` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Key(val string) attribute.KeyValue {
+ return AWSS3KeyKey.String(val)
+}
+
+// AWSS3CopySource returns an attribute KeyValue conforming to the
+// "aws.s3.copy_source" semantic conventions. It represents the source object
+// (in the form `bucket`/`key`) for the copy operation.
+func AWSS3CopySource(val string) attribute.KeyValue {
+ return AWSS3CopySourceKey.String(val)
+}
+
+// AWSS3UploadID returns an attribute KeyValue conforming to the
+// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
+// identifies the multipart upload.
+func AWSS3UploadID(val string) attribute.KeyValue {
+ return AWSS3UploadIDKey.String(val)
+}
+
+// AWSS3Delete returns an attribute KeyValue conforming to the
+// "aws.s3.delete" semantic conventions. It represents the delete request
+// container that specifies the objects to be deleted.
+func AWSS3Delete(val string) attribute.KeyValue {
+ return AWSS3DeleteKey.String(val)
+}
+
+// AWSS3PartNumber returns an attribute KeyValue conforming to the
+// "aws.s3.part_number" semantic conventions. It represents the part number of
+// the part being uploaded in a multipart-upload operation. This is a positive
+// integer between 1 and 10,000.
+func AWSS3PartNumber(val int) attribute.KeyValue {
+ return AWSS3PartNumberKey.Int(val)
+}
+
+// Semantic conventions to apply when instrumenting the GraphQL implementation.
+// They map GraphQL operations to attributes on a Span.
+const (
+ // GraphqlOperationNameKey is the attribute Key conforming to the
+ // "graphql.operation.name" semantic conventions. It represents the name of
+ // the operation being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'findBookByID'
+ GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
+
+ // GraphqlOperationTypeKey is the attribute Key conforming to the
+ // "graphql.operation.type" semantic conventions. It represents the type of
+ // the operation being executed.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'query', 'mutation', 'subscription'
+ GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
+
+ // GraphqlDocumentKey is the attribute Key conforming to the
+ // "graphql.document" semantic conventions. It represents the GraphQL
+ // document being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'query findBookByID { bookByID(id: ?) { name } }'
+ // Note: The value may be sanitized to exclude sensitive information.
+ GraphqlDocumentKey = attribute.Key("graphql.document")
+)
+
+var (
+ // GraphQL query
+ GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
+ // GraphQL mutation
+ GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
+ // GraphQL subscription
+ GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
+)
+
+// GraphqlOperationName returns an attribute KeyValue conforming to the
+// "graphql.operation.name" semantic conventions. It represents the name of the
+// operation being executed.
+func GraphqlOperationName(val string) attribute.KeyValue {
+ return GraphqlOperationNameKey.String(val)
+}
+
+// GraphqlDocument returns an attribute KeyValue conforming to the
+// "graphql.document" semantic conventions. It represents the GraphQL document
+// being executed.
+func GraphqlDocument(val string) attribute.KeyValue {
+ return GraphqlDocumentKey.String(val)
+}
+
+// General attributes used in messaging systems.
+const (
+ // MessagingSystemKey is the attribute Key conforming to the
+ // "messaging.system" semantic conventions. It represents a string
+ // identifying the messaging system.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS'
+ MessagingSystemKey = attribute.Key("messaging.system")
+
+ // MessagingOperationKey is the attribute Key conforming to the
+ // "messaging.operation" semantic conventions. It represents a string
+ // identifying the kind of messaging operation as defined in the [Operation
+ // names](#operation-names) section above.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: If a custom value is used, it MUST be of low cardinality.
+ MessagingOperationKey = attribute.Key("messaging.operation")
+
+ // MessagingBatchMessageCountKey is the attribute Key conforming to the
+ // "messaging.batch.message_count" semantic conventions. It represents the
+ // number of messages sent, received, or processed in the scope of the
+ // batching operation.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the span describes an
+ // operation on a batch of messages.)
+ // Stability: stable
+ // Examples: 0, 1, 2
+ // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
+ // spans that operate with a single message. When a messaging client
+ // library supports both batch and single-message API for the same
+ // operation, instrumentations SHOULD use `messaging.batch.message_count`
+ // for batching APIs and SHOULD NOT use it for single-message APIs.
+ MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
+
+ // MessagingClientIDKey is the attribute Key conforming to the
+ // "messaging.client_id" semantic conventions. It represents a unique
+ // identifier for the client that consumes or produces a message.
+ //
+ // Type: string
+ // RequirementLevel: Recommended (If a client id is available)
+ // Stability: stable
+ // Examples: 'client-5', 'myhost@8742@s8083jm'
+ MessagingClientIDKey = attribute.Key("messaging.client_id")
+)
+
+var (
+ // publish
+ MessagingOperationPublish = MessagingOperationKey.String("publish")
+ // receive
+ MessagingOperationReceive = MessagingOperationKey.String("receive")
+ // process
+ MessagingOperationProcess = MessagingOperationKey.String("process")
+)
+
+// MessagingSystem returns an attribute KeyValue conforming to the
+// "messaging.system" semantic conventions. It represents a string identifying
+// the messaging system.
+func MessagingSystem(val string) attribute.KeyValue {
+ return MessagingSystemKey.String(val)
+}
+
+// MessagingBatchMessageCount returns an attribute KeyValue conforming to
+// the "messaging.batch.message_count" semantic conventions. It represents the
+// number of messages sent, received, or processed in the scope of the batching
+// operation.
+func MessagingBatchMessageCount(val int) attribute.KeyValue {
+ return MessagingBatchMessageCountKey.Int(val)
+}
+
+// MessagingClientID returns an attribute KeyValue conforming to the
+// "messaging.client_id" semantic conventions. It represents a unique
+// identifier for the client that consumes or produces a message.
+func MessagingClientID(val string) attribute.KeyValue {
+ return MessagingClientIDKey.String(val)
+}
+
+// Semantic conventions for remote procedure calls.
+const (
+ // RPCSystemKey is the attribute Key conforming to the "rpc.system"
+ // semantic conventions. It represents a string identifying the remoting
+ // system. See below for a list of well-known identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ RPCSystemKey = attribute.Key("rpc.system")
+
+ // RPCServiceKey is the attribute Key conforming to the "rpc.service"
+ // semantic conventions. It represents the full (logical) name of the
+ // service being called, including its package name, if applicable.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'myservice.EchoService'
+ // Note: This is the logical name of the service from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // class. The `code.namespace` attribute may be used to store the latter
+ // (despite the attribute name, it may include a class name; e.g., class
+ // with method actually executing the call on the server side, RPC client
+ // stub class on the client side).
+ RPCServiceKey = attribute.Key("rpc.service")
+
+ // RPCMethodKey is the attribute Key conforming to the "rpc.method"
+ // semantic conventions. It represents the name of the (logical) method
+ // being called, must be equal to the $method part in the span name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'exampleMethod'
+ // Note: This is the logical name of the method from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // method/function. The `code.function` attribute may be used to store the
+ // latter (e.g., method actually executing the call on the server side, RPC
+ // client stub method on the client side).
+ RPCMethodKey = attribute.Key("rpc.method")
+)
+
+var (
+ // gRPC
+ RPCSystemGRPC = RPCSystemKey.String("grpc")
+ // Java RMI
+ RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
+ // .NET WCF
+ RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
+ // Apache Dubbo
+ RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
+ // Connect RPC
+ RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc")
+)
+
+// RPCService returns an attribute KeyValue conforming to the "rpc.service"
+// semantic conventions. It represents the full (logical) name of the service
+// being called, including its package name, if applicable.
+func RPCService(val string) attribute.KeyValue {
+ return RPCServiceKey.String(val)
+}
+
+// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
+// semantic conventions. It represents the name of the (logical) method being
+// called, must be equal to the $method part in the span name.
+func RPCMethod(val string) attribute.KeyValue {
+ return RPCMethodKey.String(val)
+}
+
+// Tech-specific attributes for gRPC.
+const (
+ // RPCGRPCStatusCodeKey is the attribute Key conforming to the
+ // "rpc.grpc.status_code" semantic conventions. It represents the [numeric
+ // status
+ // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
+ // the gRPC request.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+)
+
+var (
+ // OK
+ RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
+ // CANCELLED
+ RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
+ // UNKNOWN
+ RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
+ // INVALID_ARGUMENT
+ RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
+ // DEADLINE_EXCEEDED
+ RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
+ // NOT_FOUND
+ RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
+ // ALREADY_EXISTS
+ RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
+ // PERMISSION_DENIED
+ RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
+ // RESOURCE_EXHAUSTED
+ RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
+ // FAILED_PRECONDITION
+ RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
+ // ABORTED
+ RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
+ // OUT_OF_RANGE
+ RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
+ // UNIMPLEMENTED
+ RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
+ // INTERNAL
+ RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
+ // UNAVAILABLE
+ RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
+ // DATA_LOSS
+ RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
+ // UNAUTHENTICATED
+ RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
+)
+
+// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/).
+const (
+ // RPCJsonrpcVersionKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+ // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+ // does not specify this, the value can be omitted.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If other than the default
+ // version (`1.0`))
+ // Stability: stable
+ // Examples: '2.0', '1.0'
+ RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
+
+ // RPCJsonrpcRequestIDKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+ // property of request or response. Since protocol allows id to be int,
+ // string, `null` or missing (for notifications), value is expected to be
+ // cast to string for simplicity. Use empty string in case of `null` value.
+ // Omit entirely if this is a notification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '10', 'request-7', ''
+ RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
+
+ // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_code" semantic conventions. It represents the
+ // `error.code` property of response if it is an error response.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If response is not successful.)
+ // Stability: stable
+ // Examples: -32700, 100
+ RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
+
+ // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_message" semantic conventions. It represents the
+ // `error.message` property of response if it is an error response.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Parse error', 'User already exists'
+ RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
+)
+
+// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+// does not specify this, the value can be omitted.
+func RPCJsonrpcVersion(val string) attribute.KeyValue {
+ return RPCJsonrpcVersionKey.String(val)
+}
+
+// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+// property of request or response. Since protocol allows id to be int, string,
+// `null` or missing (for notifications), value is expected to be cast to
+// string for simplicity. Use empty string in case of `null` value. Omit
+// entirely if this is a notification.
+func RPCJsonrpcRequestID(val string) attribute.KeyValue {
+ return RPCJsonrpcRequestIDKey.String(val)
+}
+
+// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_code" semantic conventions. It represents the
+// `error.code` property of response if it is an error response.
+func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
+ return RPCJsonrpcErrorCodeKey.Int(val)
+}
+
+// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_message" semantic conventions. It represents the
+// `error.message` property of response if it is an error response.
+func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
+ return RPCJsonrpcErrorMessageKey.String(val)
+}
+
+// Tech-specific attributes for Connect RPC.
+const (
+ // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the
+ // "rpc.connect_rpc.error_code" semantic conventions. It represents the
+ // [error codes](https://connect.build/docs/protocol/#error-codes) of the
+ // Connect request. Error codes are always string values.
+ //
+ // Type: Enum
+ // RequirementLevel: ConditionallyRequired (If response is not successful
+ // and if error code available.)
+ // Stability: stable
+ RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code")
+)
+
+var (
+ // cancelled
+ RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled")
+ // unknown
+ RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown")
+ // invalid_argument
+ RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument")
+ // deadline_exceeded
+ RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded")
+ // not_found
+ RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found")
+ // already_exists
+ RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists")
+ // permission_denied
+ RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied")
+ // resource_exhausted
+ RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted")
+ // failed_precondition
+ RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition")
+ // aborted
+ RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted")
+ // out_of_range
+ RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range")
+ // unimplemented
+ RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented")
+ // internal
+ RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal")
+ // unavailable
+ RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable")
+ // data_loss
+ RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss")
+ // unauthenticated
+ RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated")
+)
diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go
index c2217a28d..3bce1b1e4 100644
--- a/vendor/go.opentelemetry.io/otel/version.go
+++ b/vendor/go.opentelemetry.io/otel/version.go
@@ -16,5 +16,5 @@ package otel // import "go.opentelemetry.io/otel"
// Version is the current release version of OpenTelemetry in use.
func Version() string {
- return "1.16.0"
+ return "1.17.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml
index 9dc47532b..94f1c919e 100644
--- a/vendor/go.opentelemetry.io/otel/versions.yaml
+++ b/vendor/go.opentelemetry.io/otel/versions.yaml
@@ -14,7 +14,7 @@
module-sets:
stable-v1:
- version: v1.16.0
+ version: v1.17.0
modules:
- go.opentelemetry.io/otel
- go.opentelemetry.io/otel/bridge/opentracing
@@ -36,7 +36,7 @@ module-sets:
- go.opentelemetry.io/otel/sdk
- go.opentelemetry.io/otel/trace
experimental-metrics:
- version: v0.39.0
+ version: v0.40.0
modules:
- go.opentelemetry.io/otel/example/opencensus
- go.opentelemetry.io/otel/example/prometheus
@@ -50,7 +50,7 @@ module-sets:
- go.opentelemetry.io/otel/bridge/opencensus/test
- go.opentelemetry.io/otel/example/view
experimental-schema:
- version: v0.0.4
+ version: v0.0.5
modules:
- go.opentelemetry.io/otel/schema
excluded-modules:
diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go
index fc285c089..c1af04e84 100644
--- a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go
+++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go
@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
-// protoc v3.17.3
+// protoc v3.21.6
// source: opentelemetry/proto/collector/trace/v1/trace_service.proto
package v1
diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go
index d142c2a44..bb1bd261e 100644
--- a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go
+++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go
@@ -77,20 +77,22 @@ func RegisterTraceServiceHandlerServer(ctx context.Context, mux *runtime.ServeMu
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/trace"))
+ var err error
+ var annotatedContext context.Context
+ annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/traces"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := local_request_TraceService_Export_0(rctx, inboundMarshaler, server, req, pathParams)
+ resp, md, err := local_request_TraceService_Export_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
- forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ forward_TraceService_Export_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@@ -139,19 +141,21 @@ func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMu
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/trace"))
+ var err error
+ var annotatedContext context.Context
+ annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/traces"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
- resp, md, err := request_TraceService_Export_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
+ resp, md, err := request_TraceService_Export_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
- forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ forward_TraceService_Export_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@@ -159,7 +163,7 @@ func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMu
}
var (
- pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "trace"}, ""))
+ pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "traces"}, ""))
)
var (
diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go
index c21f2cb47..dd1b73f1e 100644
--- a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go
+++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go
@@ -1,7 +1,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.1.0
-// - protoc v3.17.3
+// - protoc v3.21.6
// source: opentelemetry/proto/collector/trace/v1/trace_service.proto
package v1
diff --git a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go
index 8502e607b..852209b09 100644
--- a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go
+++ b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go
@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
-// protoc v3.17.3
+// protoc v3.21.6
// source: opentelemetry/proto/common/v1/common.proto
package v1
@@ -361,8 +361,11 @@ type InstrumentationScope struct {
unknownFields protoimpl.UnknownFields
// An empty instrumentation scope name means the name is unknown.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
+ // Additional attributes that describe the scope. [Optional].
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
Attributes []*KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"`
DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
}
diff --git a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go
index bcc1060e3..b7545b03b 100644
--- a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go
+++ b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go
@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
-// protoc v3.17.3
+// protoc v3.21.6
// source: opentelemetry/proto/resource/v1/resource.proto
package v1
diff --git a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go
index 499a43d77..51a499816 100644
--- a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go
+++ b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go
@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
-// protoc v3.17.3
+// protoc v3.21.6
// source: opentelemetry/proto/trace/v1/trace.proto
package v1
@@ -117,8 +117,8 @@ type Status_StatusCode int32
const (
// The default status.
Status_STATUS_CODE_UNSET Status_StatusCode = 0
- // The Span has been validated by an Application developers or Operator to have
- // completed successfully.
+ // The Span has been validated by an Application developer or Operator to
+ // have completed successfully.
Status_STATUS_CODE_OK Status_StatusCode = 1
// The Span contains an error.
Status_STATUS_CODE_ERROR Status_StatusCode = 2
@@ -374,20 +374,16 @@ type Span struct {
unknownFields protoimpl.UnknownFields
// A unique identifier for a trace. All spans from the same trace share
- // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes
- // is considered invalid.
- //
- // This field is semantically required. Receiver should generate new
- // random trace_id if empty or invalid trace_id was received.
+ // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR
+ // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON
+ // is zero-length and thus is also invalid).
//
// This field is required.
TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
// A unique identifier for a span within a trace, assigned when the span
- // is created. The ID is an 8-byte array. An ID with all zeroes is considered
- // invalid.
- //
- // This field is semantically required. Receiver should generate new
- // random span_id if empty or invalid span_id was received.
+ // is created. The ID is an 8-byte array. An ID with all zeroes OR of length
+ // other than 8 bytes is considered invalid (empty string in OTLP/JSON
+ // is zero-length and thus is also invalid).
//
// This field is required.
SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"`
@@ -433,8 +429,8 @@ type Span struct {
//
// "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
// "/http/server_latency": 300
- // "abc.com/myattribute": true
- // "abc.com/score": 10.239
+ // "example.com/myattribute": true
+ // "example.com/score": 10.239
//
// The OpenTelemetry API specification further restricts the allowed value types:
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute
diff --git a/vendor/google.golang.org/genproto/LICENSE b/vendor/google.golang.org/genproto/googleapis/api/LICENSE
index d64569567..d64569567 100644
--- a/vendor/google.golang.org/genproto/LICENSE
+++ b/vendor/google.golang.org/genproto/googleapis/api/LICENSE
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/LICENSE b/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE
index 261eeb9e9..d64569567 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/LICENSE
+++ b/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE
@@ -1,3 +1,4 @@
+
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go
index 02f5dc531..49712aca3 100644
--- a/vendor/google.golang.org/grpc/attributes/attributes.go
+++ b/vendor/google.golang.org/grpc/attributes/attributes.go
@@ -25,6 +25,11 @@
// later release.
package attributes
+import (
+ "fmt"
+ "strings"
+)
+
// Attributes is an immutable struct for storing and retrieving generic
// key/value pairs. Keys must be hashable, and users should define their own
// types for keys. Values should not be modified after they are added to an
@@ -99,3 +104,39 @@ func (a *Attributes) Equal(o *Attributes) bool {
}
return true
}
+
+// String prints the attribute map. If any key or values throughout the map
+// implement fmt.Stringer, it calls that method and appends.
+func (a *Attributes) String() string {
+ var sb strings.Builder
+ sb.WriteString("{")
+ first := true
+ for k, v := range a.m {
+ if !first {
+ sb.WriteString(", ")
+ }
+ sb.WriteString(fmt.Sprintf("%q: %q ", str(k), str(v)))
+ first = false
+ }
+ sb.WriteString("}")
+ return sb.String()
+}
+
+func str(x interface{}) string {
+ if v, ok := x.(fmt.Stringer); ok {
+ return v.String()
+ } else if v, ok := x.(string); ok {
+ return v
+ }
+ return fmt.Sprintf("<%p>", x)
+}
+
+// MarshalJSON helps implement the json.Marshaler interface, thereby rendering
+// the Attributes correctly when printing (via pretty.JSON) structs containing
+// Attributes as fields.
+//
+// Is it impossible to unmarshal attributes from a JSON representation and this
+// method is meant only for debugging purposes.
+func (a *Attributes) MarshalJSON() ([]byte, error) {
+ return []byte(a.String()), nil
+}
diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go
index 09d61dd1b..8f00523c0 100644
--- a/vendor/google.golang.org/grpc/balancer/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/balancer.go
@@ -286,7 +286,7 @@ type PickResult struct {
//
// LB policies with child policies are responsible for propagating metadata
// injected by their children to the ClientConn, as part of Pick().
- Metatada metadata.MD
+ Metadata metadata.MD
}
// TransientFailureError returns e. It exists for backward compatibility and
diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
index 0359956d3..04b9ad411 100644
--- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
+++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
@@ -25,14 +25,20 @@ import (
"sync"
"google.golang.org/grpc/balancer"
- "google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/internal/balancer/gracefulswitch"
- "google.golang.org/grpc/internal/buffer"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/resolver"
- "google.golang.org/grpc/status"
+)
+
+type ccbMode int
+
+const (
+ ccbModeActive = iota
+ ccbModeIdle
+ ccbModeClosed
+ ccbModeExitingIdle
)
// ccBalancerWrapper sits between the ClientConn and the Balancer.
@@ -49,192 +55,101 @@ import (
// It uses the gracefulswitch.Balancer internally to ensure that balancer
// switches happen in a graceful manner.
type ccBalancerWrapper struct {
- cc *ClientConn
-
- // Since these fields are accessed only from handleXxx() methods which are
- // synchronized by the watcher goroutine, we do not need a mutex to protect
- // these fields.
+ // The following fields are initialized when the wrapper is created and are
+ // read-only afterwards, and therefore can be accessed without a mutex.
+ cc *ClientConn
+ opts balancer.BuildOptions
+
+ // Outgoing (gRPC --> balancer) calls are guaranteed to execute in a
+ // mutually exclusive manner as they are scheduled in the serializer. Fields
+ // accessed *only* in these serializer callbacks, can therefore be accessed
+ // without a mutex.
balancer *gracefulswitch.Balancer
curBalancerName string
- updateCh *buffer.Unbounded // Updates written on this channel are processed by watcher().
- resultCh *buffer.Unbounded // Results of calls to UpdateClientConnState() are pushed here.
- closed *grpcsync.Event // Indicates if close has been called.
- done *grpcsync.Event // Indicates if close has completed its work.
+ // mu guards access to the below fields. Access to the serializer and its
+ // cancel function needs to be mutex protected because they are overwritten
+ // when the wrapper exits idle mode.
+ mu sync.Mutex
+ serializer *grpcsync.CallbackSerializer // To serialize all outoing calls.
+ serializerCancel context.CancelFunc // To close the seralizer at close/enterIdle time.
+ mode ccbMode // Tracks the current mode of the wrapper.
}
// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer
// is not created until the switchTo() method is invoked.
func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper {
+ ctx, cancel := context.WithCancel(context.Background())
ccb := &ccBalancerWrapper{
- cc: cc,
- updateCh: buffer.NewUnbounded(),
- resultCh: buffer.NewUnbounded(),
- closed: grpcsync.NewEvent(),
- done: grpcsync.NewEvent(),
+ cc: cc,
+ opts: bopts,
+ serializer: grpcsync.NewCallbackSerializer(ctx),
+ serializerCancel: cancel,
}
- go ccb.watcher()
ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts)
return ccb
}
-// The following xxxUpdate structs wrap the arguments received as part of the
-// corresponding update. The watcher goroutine uses the 'type' of the update to
-// invoke the appropriate handler routine to handle the update.
-
-type ccStateUpdate struct {
- ccs *balancer.ClientConnState
-}
-
-type scStateUpdate struct {
- sc balancer.SubConn
- state connectivity.State
- err error
-}
-
-type exitIdleUpdate struct{}
-
-type resolverErrorUpdate struct {
- err error
-}
-
-type switchToUpdate struct {
- name string
-}
-
-type subConnUpdate struct {
- acbw *acBalancerWrapper
-}
-
-// watcher is a long-running goroutine which reads updates from a channel and
-// invokes corresponding methods on the underlying balancer. It ensures that
-// these methods are invoked in a synchronous fashion. It also ensures that
-// these methods are invoked in the order in which the updates were received.
-func (ccb *ccBalancerWrapper) watcher() {
- for {
- select {
- case u := <-ccb.updateCh.Get():
- ccb.updateCh.Load()
- if ccb.closed.HasFired() {
- break
- }
- switch update := u.(type) {
- case *ccStateUpdate:
- ccb.handleClientConnStateChange(update.ccs)
- case *scStateUpdate:
- ccb.handleSubConnStateChange(update)
- case *exitIdleUpdate:
- ccb.handleExitIdle()
- case *resolverErrorUpdate:
- ccb.handleResolverError(update.err)
- case *switchToUpdate:
- ccb.handleSwitchTo(update.name)
- case *subConnUpdate:
- ccb.handleRemoveSubConn(update.acbw)
- default:
- logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", update, update)
- }
- case <-ccb.closed.Done():
- }
-
- if ccb.closed.HasFired() {
- ccb.handleClose()
- return
- }
- }
-}
-
// updateClientConnState is invoked by grpc to push a ClientConnState update to
// the underlying balancer.
-//
-// Unlike other methods invoked by grpc to push updates to the underlying
-// balancer, this method cannot simply push the update onto the update channel
-// and return. It needs to return the error returned by the underlying balancer
-// back to grpc which propagates that to the resolver.
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
- ccb.updateCh.Put(&ccStateUpdate{ccs: ccs})
-
- var res interface{}
- select {
- case res = <-ccb.resultCh.Get():
- ccb.resultCh.Load()
- case <-ccb.closed.Done():
- // Return early if the balancer wrapper is closed while we are waiting for
- // the underlying balancer to process a ClientConnState update.
- return nil
- }
- // If the returned error is nil, attempting to type assert to error leads to
- // panic. So, this needs to handled separately.
- if res == nil {
- return nil
- }
- return res.(error)
-}
-
-// handleClientConnStateChange handles a ClientConnState update from the update
-// channel and invokes the appropriate method on the underlying balancer.
-//
-// If the addresses specified in the update contain addresses of type "grpclb"
-// and the selected LB policy is not "grpclb", these addresses will be filtered
-// out and ccs will be modified with the updated address list.
-func (ccb *ccBalancerWrapper) handleClientConnStateChange(ccs *balancer.ClientConnState) {
- if ccb.curBalancerName != grpclbName {
- // Filter any grpclb addresses since we don't have the grpclb balancer.
- var addrs []resolver.Address
- for _, addr := range ccs.ResolverState.Addresses {
- if addr.Type == resolver.GRPCLB {
- continue
+ ccb.mu.Lock()
+ errCh := make(chan error, 1)
+ // Here and everywhere else where Schedule() is called, it is done with the
+ // lock held. But the lock guards only the scheduling part. The actual
+ // callback is called asynchronously without the lock being held.
+ ok := ccb.serializer.Schedule(func(_ context.Context) {
+ // If the addresses specified in the update contain addresses of type
+ // "grpclb" and the selected LB policy is not "grpclb", these addresses
+ // will be filtered out and ccs will be modified with the updated
+ // address list.
+ if ccb.curBalancerName != grpclbName {
+ var addrs []resolver.Address
+ for _, addr := range ccs.ResolverState.Addresses {
+ if addr.Type == resolver.GRPCLB {
+ continue
+ }
+ addrs = append(addrs, addr)
}
- addrs = append(addrs, addr)
+ ccs.ResolverState.Addresses = addrs
}
- ccs.ResolverState.Addresses = addrs
+ errCh <- ccb.balancer.UpdateClientConnState(*ccs)
+ })
+ if !ok {
+ // If we are unable to schedule a function with the serializer, it
+ // indicates that it has been closed. A serializer is only closed when
+ // the wrapper is closed or is in idle.
+ ccb.mu.Unlock()
+ return fmt.Errorf("grpc: cannot send state update to a closed or idle balancer")
}
- ccb.resultCh.Put(ccb.balancer.UpdateClientConnState(*ccs))
+ ccb.mu.Unlock()
+
+ // We get here only if the above call to Schedule succeeds, in which case it
+ // is guaranteed that the scheduled function will run. Therefore it is safe
+ // to block on this channel.
+ err := <-errCh
+ if logger.V(2) && err != nil {
+ logger.Infof("error from balancer.UpdateClientConnState: %v", err)
+ }
+ return err
}
// updateSubConnState is invoked by grpc to push a subConn state update to the
// underlying balancer.
func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) {
- // When updating addresses for a SubConn, if the address in use is not in
- // the new addresses, the old ac will be tearDown() and a new ac will be
- // created. tearDown() generates a state change with Shutdown state, we
- // don't want the balancer to receive this state change. So before
- // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and
- // this function will be called with (nil, Shutdown). We don't need to call
- // balancer method in this case.
- if sc == nil {
- return
- }
- ccb.updateCh.Put(&scStateUpdate{
- sc: sc,
- state: s,
- err: err,
+ ccb.mu.Lock()
+ ccb.serializer.Schedule(func(_ context.Context) {
+ ccb.balancer.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s, ConnectionError: err})
})
-}
-
-// handleSubConnStateChange handles a SubConnState update from the update
-// channel and invokes the appropriate method on the underlying balancer.
-func (ccb *ccBalancerWrapper) handleSubConnStateChange(update *scStateUpdate) {
- ccb.balancer.UpdateSubConnState(update.sc, balancer.SubConnState{ConnectivityState: update.state, ConnectionError: update.err})
-}
-
-func (ccb *ccBalancerWrapper) exitIdle() {
- ccb.updateCh.Put(&exitIdleUpdate{})
-}
-
-func (ccb *ccBalancerWrapper) handleExitIdle() {
- if ccb.cc.GetState() != connectivity.Idle {
- return
- }
- ccb.balancer.ExitIdle()
+ ccb.mu.Unlock()
}
func (ccb *ccBalancerWrapper) resolverError(err error) {
- ccb.updateCh.Put(&resolverErrorUpdate{err: err})
-}
-
-func (ccb *ccBalancerWrapper) handleResolverError(err error) {
- ccb.balancer.ResolverError(err)
+ ccb.mu.Lock()
+ ccb.serializer.Schedule(func(_ context.Context) {
+ ccb.balancer.ResolverError(err)
+ })
+ ccb.mu.Unlock()
}
// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the
@@ -248,24 +163,27 @@ func (ccb *ccBalancerWrapper) handleResolverError(err error) {
// the ccBalancerWrapper keeps track of the current LB policy name, and skips
// the graceful balancer switching process if the name does not change.
func (ccb *ccBalancerWrapper) switchTo(name string) {
- ccb.updateCh.Put(&switchToUpdate{name: name})
+ ccb.mu.Lock()
+ ccb.serializer.Schedule(func(_ context.Context) {
+ // TODO: Other languages use case-sensitive balancer registries. We should
+ // switch as well. See: https://github.com/grpc/grpc-go/issues/5288.
+ if strings.EqualFold(ccb.curBalancerName, name) {
+ return
+ }
+ ccb.buildLoadBalancingPolicy(name)
+ })
+ ccb.mu.Unlock()
}
-// handleSwitchTo handles a balancer switch update from the update channel. It
-// calls the SwitchTo() method on the gracefulswitch.Balancer with a
-// balancer.Builder corresponding to name. If no balancer.Builder is registered
-// for the given name, it uses the default LB policy which is "pick_first".
-func (ccb *ccBalancerWrapper) handleSwitchTo(name string) {
- // TODO: Other languages use case-insensitive balancer registries. We should
- // switch as well. See: https://github.com/grpc/grpc-go/issues/5288.
- if strings.EqualFold(ccb.curBalancerName, name) {
- return
- }
-
- // TODO: Ensure that name is a registered LB policy when we get here.
- // We currently only validate the `loadBalancingConfig` field. We need to do
- // the same for the `loadBalancingPolicy` field and reject the service config
- // if the specified policy is not registered.
+// buildLoadBalancingPolicy performs the following:
+// - retrieve a balancer builder for the given name. Use the default LB
+// policy, pick_first, if no LB policy with name is found in the registry.
+// - instruct the gracefulswitch balancer to switch to the above builder. This
+// will actually build the new balancer.
+// - update the `curBalancerName` field
+//
+// Must be called from a serializer callback.
+func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) {
builder := balancer.Get(name)
if builder == nil {
channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name)
@@ -281,26 +199,114 @@ func (ccb *ccBalancerWrapper) handleSwitchTo(name string) {
ccb.curBalancerName = builder.Name()
}
-// handleRemoveSucConn handles a request from the underlying balancer to remove
-// a subConn.
-//
-// See comments in RemoveSubConn() for more details.
-func (ccb *ccBalancerWrapper) handleRemoveSubConn(acbw *acBalancerWrapper) {
- ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
+func (ccb *ccBalancerWrapper) close() {
+ channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing")
+ ccb.closeBalancer(ccbModeClosed)
}
-func (ccb *ccBalancerWrapper) close() {
- ccb.closed.Fire()
- <-ccb.done.Done()
+// enterIdleMode is invoked by grpc when the channel enters idle mode upon
+// expiry of idle_timeout. This call blocks until the balancer is closed.
+func (ccb *ccBalancerWrapper) enterIdleMode() {
+ channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: entering idle mode")
+ ccb.closeBalancer(ccbModeIdle)
+}
+
+// closeBalancer is invoked when the channel is being closed or when it enters
+// idle mode upon expiry of idle_timeout.
+func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) {
+ ccb.mu.Lock()
+ if ccb.mode == ccbModeClosed || ccb.mode == ccbModeIdle {
+ ccb.mu.Unlock()
+ return
+ }
+
+ ccb.mode = m
+ done := ccb.serializer.Done
+ b := ccb.balancer
+ ok := ccb.serializer.Schedule(func(_ context.Context) {
+ // Close the serializer to ensure that no more calls from gRPC are sent
+ // to the balancer.
+ ccb.serializerCancel()
+ // Empty the current balancer name because we don't have a balancer
+ // anymore and also so that we act on the next call to switchTo by
+ // creating a new balancer specified by the new resolver.
+ ccb.curBalancerName = ""
+ })
+ if !ok {
+ ccb.mu.Unlock()
+ return
+ }
+ ccb.mu.Unlock()
+
+ // Give enqueued callbacks a chance to finish.
+ <-done
+ // Spawn a goroutine to close the balancer (since it may block trying to
+ // cleanup all allocated resources) and return early.
+ go b.Close()
}
-func (ccb *ccBalancerWrapper) handleClose() {
- ccb.balancer.Close()
- ccb.done.Fire()
+// exitIdleMode is invoked by grpc when the channel exits idle mode either
+// because of an RPC or because of an invocation of the Connect() API. This
+// recreates the balancer that was closed previously when entering idle mode.
+//
+// If the channel is not in idle mode, we know for a fact that we are here as a
+// result of the user calling the Connect() method on the ClientConn. In this
+// case, we can simply forward the call to the underlying balancer, instructing
+// it to reconnect to the backends.
+func (ccb *ccBalancerWrapper) exitIdleMode() {
+ ccb.mu.Lock()
+ if ccb.mode == ccbModeClosed {
+ // Request to exit idle is a no-op when wrapper is already closed.
+ ccb.mu.Unlock()
+ return
+ }
+
+ if ccb.mode == ccbModeIdle {
+ // Recreate the serializer which was closed when we entered idle.
+ ctx, cancel := context.WithCancel(context.Background())
+ ccb.serializer = grpcsync.NewCallbackSerializer(ctx)
+ ccb.serializerCancel = cancel
+ }
+
+ // The ClientConn guarantees that mutual exclusion between close() and
+ // exitIdleMode(), and since we just created a new serializer, we can be
+ // sure that the below function will be scheduled.
+ done := make(chan struct{})
+ ccb.serializer.Schedule(func(_ context.Context) {
+ defer close(done)
+
+ ccb.mu.Lock()
+ defer ccb.mu.Unlock()
+
+ if ccb.mode != ccbModeIdle {
+ ccb.balancer.ExitIdle()
+ return
+ }
+
+ // Gracefulswitch balancer does not support a switchTo operation after
+ // being closed. Hence we need to create a new one here.
+ ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts)
+ ccb.mode = ccbModeActive
+ channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: exiting idle mode")
+
+ })
+ ccb.mu.Unlock()
+
+ <-done
+}
+
+func (ccb *ccBalancerWrapper) isIdleOrClosed() bool {
+ ccb.mu.Lock()
+ defer ccb.mu.Unlock()
+ return ccb.mode == ccbModeIdle || ccb.mode == ccbModeClosed
}
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
- if len(addrs) <= 0 {
+ if ccb.isIdleOrClosed() {
+ return nil, fmt.Errorf("grpc: cannot create SubConn when balancer is closed or idle")
+ }
+
+ if len(addrs) == 0 {
return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
}
ac, err := ccb.cc.newAddrConn(addrs, opts)
@@ -309,31 +315,35 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer
return nil, err
}
acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)}
- acbw.ac.mu.Lock()
ac.acbw = acbw
- acbw.ac.mu.Unlock()
return acbw, nil
}
func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
- // Before we switched the ccBalancerWrapper to use gracefulswitch.Balancer, it
- // was required to handle the RemoveSubConn() method asynchronously by pushing
- // the update onto the update channel. This was done to avoid a deadlock as
- // switchBalancer() was holding cc.mu when calling Close() on the old
- // balancer, which would in turn call RemoveSubConn().
- //
- // With the use of gracefulswitch.Balancer in ccBalancerWrapper, handling this
- // asynchronously is probably not required anymore since the switchTo() method
- // handles the balancer switch by pushing the update onto the channel.
- // TODO(easwars): Handle this inline.
+ if ccb.isIdleOrClosed() {
+ // It it safe to ignore this call when the balancer is closed or in idle
+ // because the ClientConn takes care of closing the connections.
+ //
+ // Not returning early from here when the balancer is closed or in idle
+ // leads to a deadlock though, because of the following sequence of
+ // calls when holding cc.mu:
+ // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close -->
+ // ccb.RemoveAddrConn --> cc.removeAddrConn
+ return
+ }
+
acbw, ok := sc.(*acBalancerWrapper)
if !ok {
return
}
- ccb.updateCh.Put(&subConnUpdate{acbw: acbw})
+ ccb.cc.removeAddrConn(acbw.ac, errConnDrain)
}
func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
+ if ccb.isIdleOrClosed() {
+ return
+ }
+
acbw, ok := sc.(*acBalancerWrapper)
if !ok {
return
@@ -342,6 +352,10 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol
}
func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
+ if ccb.isIdleOrClosed() {
+ return
+ }
+
// Update picker before updating state. Even though the ordering here does
// not matter, it can lead to multiple calls of Pick in the common start-up
// case where we wait for ready and then perform an RPC. If the picker is
@@ -352,6 +366,10 @@ func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
}
func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) {
+ if ccb.isIdleOrClosed() {
+ return
+ }
+
ccb.cc.resolveNow(o)
}
@@ -362,71 +380,31 @@ func (ccb *ccBalancerWrapper) Target() string {
// acBalancerWrapper is a wrapper on top of ac for balancers.
// It implements balancer.SubConn interface.
type acBalancerWrapper struct {
+ ac *addrConn // read-only
+
mu sync.Mutex
- ac *addrConn
producers map[balancer.ProducerBuilder]*refCountedProducer
}
-func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
- acbw.mu.Lock()
- defer acbw.mu.Unlock()
- if len(addrs) <= 0 {
- acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain)
- return
- }
- if !acbw.ac.tryUpdateAddrs(addrs) {
- cc := acbw.ac.cc
- opts := acbw.ac.scopts
- acbw.ac.mu.Lock()
- // Set old ac.acbw to nil so the Shutdown state update will be ignored
- // by balancer.
- //
- // TODO(bar) the state transition could be wrong when tearDown() old ac
- // and creating new ac, fix the transition.
- acbw.ac.acbw = nil
- acbw.ac.mu.Unlock()
- acState := acbw.ac.getState()
- acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain)
-
- if acState == connectivity.Shutdown {
- return
- }
+func (acbw *acBalancerWrapper) String() string {
+ return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int())
+}
- newAC, err := cc.newAddrConn(addrs, opts)
- if err != nil {
- channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
- return
- }
- acbw.ac = newAC
- newAC.mu.Lock()
- newAC.acbw = acbw
- newAC.mu.Unlock()
- if acState != connectivity.Idle {
- go newAC.connect()
- }
- }
+func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
+ acbw.ac.updateAddrs(addrs)
}
func (acbw *acBalancerWrapper) Connect() {
- acbw.mu.Lock()
- defer acbw.mu.Unlock()
go acbw.ac.connect()
}
-func (acbw *acBalancerWrapper) getAddrConn() *addrConn {
- acbw.mu.Lock()
- defer acbw.mu.Unlock()
- return acbw.ac
-}
-
-var errSubConnNotReady = status.Error(codes.Unavailable, "SubConn not currently connected")
-
// NewStream begins a streaming RPC on the addrConn. If the addrConn is not
-// ready, returns errSubConnNotReady.
+// ready, blocks until it is or ctx expires. Returns an error when the context
+// expires or the addrConn is shut down.
func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
- transport := acbw.ac.getReadyTransport()
- if transport == nil {
- return nil, errSubConnNotReady
+ transport, err := acbw.ac.getTransport(ctx)
+ if err != nil {
+ return nil, err
}
return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...)
}
diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go
index 9e20e4d38..e6a1dc5d7 100644
--- a/vendor/google.golang.org/grpc/call.go
+++ b/vendor/google.golang.org/grpc/call.go
@@ -27,6 +27,11 @@ import (
//
// All errors returned by Invoke are compatible with the status package.
func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error {
+ if err := cc.idlenessMgr.onCallBegin(); err != nil {
+ return err
+ }
+ defer cc.idlenessMgr.onCallEnd()
+
// allow interceptor to see all applicable call options, which means those
// configured as defaults from dial option as well as per-call options
opts = combine(cc.dopts.callOptions, opts)
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index 3a7614242..bfd7555a8 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -24,7 +24,6 @@ import (
"fmt"
"math"
"net/url"
- "reflect"
"strings"
"sync"
"sync/atomic"
@@ -38,6 +37,7 @@ import (
"google.golang.org/grpc/internal/backoff"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcsync"
+ "google.golang.org/grpc/internal/pretty"
iresolver "google.golang.org/grpc/internal/resolver"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
@@ -69,6 +69,9 @@ var (
errConnDrain = errors.New("grpc: the connection is drained")
// errConnClosing indicates that the connection is closing.
errConnClosing = errors.New("grpc: the connection is closing")
+ // errConnIdling indicates the the connection is being closed as the channel
+ // is moving to an idle mode due to inactivity.
+ errConnIdling = errors.New("grpc: the connection is closing due to channel idleness")
// invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default
// service config.
invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid"
@@ -134,17 +137,29 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires
// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target.
func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
cc := &ClientConn{
- target: target,
- csMgr: &connectivityStateManager{},
- conns: make(map[*addrConn]struct{}),
- dopts: defaultDialOptions(),
- blockingpicker: newPickerWrapper(),
- czData: new(channelzData),
- firstResolveEvent: grpcsync.NewEvent(),
- }
+ target: target,
+ csMgr: &connectivityStateManager{},
+ conns: make(map[*addrConn]struct{}),
+ dopts: defaultDialOptions(),
+ czData: new(channelzData),
+ }
+
+ // We start the channel off in idle mode, but kick it out of idle at the end
+ // of this method, instead of waiting for the first RPC. Other gRPC
+ // implementations do wait for the first RPC to kick the channel out of
+ // idle. But doing so would be a major behavior change for our users who are
+ // used to seeing the channel active after Dial.
+ //
+ // Taking this approach of kicking it out of idle at the end of this method
+ // allows us to share the code between channel creation and exiting idle
+ // mode. This will also make it easy for us to switch to starting the
+ // channel off in idle, if at all we ever get to do that.
+ cc.idlenessState = ccIdlenessStateIdle
+
cc.retryThrottler.Store((*retryThrottler)(nil))
cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil})
cc.ctx, cc.cancel = context.WithCancel(context.Background())
+ cc.exitIdleCond = sync.NewCond(&cc.mu)
disableGlobalOpts := false
for _, opt := range opts {
@@ -173,40 +188,11 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
}
}()
- pid := cc.dopts.channelzParentID
- cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, pid, target)
- ted := &channelz.TraceEventDesc{
- Desc: "Channel created",
- Severity: channelz.CtInfo,
- }
- if cc.dopts.channelzParentID != nil {
- ted.Parent = &channelz.TraceEventDesc{
- Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID.Int()),
- Severity: channelz.CtInfo,
- }
- }
- channelz.AddTraceEvent(logger, cc.channelzID, 1, ted)
- cc.csMgr.channelzID = cc.channelzID
+ // Register ClientConn with channelz.
+ cc.channelzRegistration(target)
- if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil {
- return nil, errNoTransportSecurity
- }
- if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil {
- return nil, errTransportCredsAndBundle
- }
- if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil {
- return nil, errNoTransportCredsInBundle
- }
- transportCreds := cc.dopts.copts.TransportCredentials
- if transportCreds == nil {
- transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials()
- }
- if transportCreds.Info().SecurityProtocol == "insecure" {
- for _, cd := range cc.dopts.copts.PerRPCCredentials {
- if cd.RequireTransportSecurity() {
- return nil, errTransportCredentialsMissing
- }
- }
+ if err := cc.validateTransportCredentials(); err != nil {
+ return nil, err
}
if cc.dopts.defaultServiceConfigRawJSON != nil {
@@ -249,15 +235,12 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
}
// Determine the resolver to use.
- resolverBuilder, err := cc.parseTargetAndFindResolver()
- if err != nil {
+ if err := cc.parseTargetAndFindResolver(); err != nil {
return nil, err
}
- cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint(), cc.target, cc.dopts)
- if err != nil {
+ if err = cc.determineAuthority(); err != nil {
return nil, err
}
- channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority)
if cc.dopts.scChan != nil {
// Blocking wait for the initial service config.
@@ -275,57 +258,224 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
go cc.scWatcher()
}
+ // This creates the name resolver, load balancer, blocking picker etc.
+ if err := cc.exitIdleMode(); err != nil {
+ return nil, err
+ }
+
+ // Configure idleness support with configured idle timeout or default idle
+ // timeout duration. Idleness can be explicitly disabled by the user, by
+ // setting the dial option to 0.
+ cc.idlenessMgr = newIdlenessManager(cc, cc.dopts.idleTimeout)
+
+ // Return early for non-blocking dials.
+ if !cc.dopts.block {
+ return cc, nil
+ }
+
+ // A blocking dial blocks until the clientConn is ready.
+ for {
+ s := cc.GetState()
+ if s == connectivity.Idle {
+ cc.Connect()
+ }
+ if s == connectivity.Ready {
+ return cc, nil
+ } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure {
+ if err = cc.connectionError(); err != nil {
+ terr, ok := err.(interface {
+ Temporary() bool
+ })
+ if ok && !terr.Temporary() {
+ return nil, err
+ }
+ }
+ }
+ if !cc.WaitForStateChange(ctx, s) {
+ // ctx got timeout or canceled.
+ if err = cc.connectionError(); err != nil && cc.dopts.returnLastError {
+ return nil, err
+ }
+ return nil, ctx.Err()
+ }
+ }
+}
+
+// addTraceEvent is a helper method to add a trace event on the channel. If the
+// channel is a nested one, the same event is also added on the parent channel.
+func (cc *ClientConn) addTraceEvent(msg string) {
+ ted := &channelz.TraceEventDesc{
+ Desc: fmt.Sprintf("Channel %s", msg),
+ Severity: channelz.CtInfo,
+ }
+ if cc.dopts.channelzParentID != nil {
+ ted.Parent = &channelz.TraceEventDesc{
+ Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelzID.Int(), msg),
+ Severity: channelz.CtInfo,
+ }
+ }
+ channelz.AddTraceEvent(logger, cc.channelzID, 0, ted)
+}
+
+// exitIdleMode moves the channel out of idle mode by recreating the name
+// resolver and load balancer.
+func (cc *ClientConn) exitIdleMode() error {
+ cc.mu.Lock()
+ if cc.conns == nil {
+ cc.mu.Unlock()
+ return errConnClosing
+ }
+ if cc.idlenessState != ccIdlenessStateIdle {
+ cc.mu.Unlock()
+ logger.Info("ClientConn asked to exit idle mode when not in idle mode")
+ return nil
+ }
+
+ defer func() {
+ // When Close() and exitIdleMode() race against each other, one of the
+ // following two can happen:
+ // - Close() wins the race and runs first. exitIdleMode() runs after, and
+ // sees that the ClientConn is already closed and hence returns early.
+ // - exitIdleMode() wins the race and runs first and recreates the balancer
+ // and releases the lock before recreating the resolver. If Close() runs
+ // in this window, it will wait for exitIdleMode to complete.
+ //
+ // We achieve this synchronization using the below condition variable.
+ cc.mu.Lock()
+ cc.idlenessState = ccIdlenessStateActive
+ cc.exitIdleCond.Signal()
+ cc.mu.Unlock()
+ }()
+
+ cc.idlenessState = ccIdlenessStateExitingIdle
+ exitedIdle := false
+ if cc.blockingpicker == nil {
+ cc.blockingpicker = newPickerWrapper()
+ } else {
+ cc.blockingpicker.exitIdleMode()
+ exitedIdle = true
+ }
+
var credsClone credentials.TransportCredentials
if creds := cc.dopts.copts.TransportCredentials; creds != nil {
credsClone = creds.Clone()
}
- cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{
- DialCreds: credsClone,
- CredsBundle: cc.dopts.copts.CredsBundle,
- Dialer: cc.dopts.copts.Dialer,
- Authority: cc.authority,
- CustomUserAgent: cc.dopts.copts.UserAgent,
- ChannelzParentID: cc.channelzID,
- Target: cc.parsedTarget,
- })
+ if cc.balancerWrapper == nil {
+ cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{
+ DialCreds: credsClone,
+ CredsBundle: cc.dopts.copts.CredsBundle,
+ Dialer: cc.dopts.copts.Dialer,
+ Authority: cc.authority,
+ CustomUserAgent: cc.dopts.copts.UserAgent,
+ ChannelzParentID: cc.channelzID,
+ Target: cc.parsedTarget,
+ })
+ } else {
+ cc.balancerWrapper.exitIdleMode()
+ }
+ cc.firstResolveEvent = grpcsync.NewEvent()
+ cc.mu.Unlock()
- // Build the resolver.
- rWrapper, err := newCCResolverWrapper(cc, resolverBuilder)
- if err != nil {
- return nil, fmt.Errorf("failed to build resolver: %v", err)
+ // This needs to be called without cc.mu because this builds a new resolver
+ // which might update state or report error inline which needs to be handled
+ // by cc.updateResolverState() which also grabs cc.mu.
+ if err := cc.initResolverWrapper(credsClone); err != nil {
+ return err
+ }
+
+ if exitedIdle {
+ cc.addTraceEvent("exiting idle mode")
}
+ return nil
+}
+
+// enterIdleMode puts the channel in idle mode, and as part of it shuts down the
+// name resolver, load balancer and any subchannels.
+func (cc *ClientConn) enterIdleMode() error {
cc.mu.Lock()
- cc.resolverWrapper = rWrapper
+ if cc.conns == nil {
+ cc.mu.Unlock()
+ return ErrClientConnClosing
+ }
+ if cc.idlenessState != ccIdlenessStateActive {
+ logger.Error("ClientConn asked to enter idle mode when not active")
+ return nil
+ }
+
+ // cc.conns == nil is a proxy for the ClientConn being closed. So, instead
+ // of setting it to nil here, we recreate the map. This also means that we
+ // don't have to do this when exiting idle mode.
+ conns := cc.conns
+ cc.conns = make(map[*addrConn]struct{})
+
+ // TODO: Currently, we close the resolver wrapper upon entering idle mode
+ // and create a new one upon exiting idle mode. This means that the
+ // `cc.resolverWrapper` field would be overwritten everytime we exit idle
+ // mode. While this means that we need to hold `cc.mu` when accessing
+ // `cc.resolverWrapper`, it makes the code simpler in the wrapper. We should
+ // try to do the same for the balancer and picker wrappers too.
+ cc.resolverWrapper.close()
+ cc.blockingpicker.enterIdleMode()
+ cc.balancerWrapper.enterIdleMode()
+ cc.csMgr.updateState(connectivity.Idle)
+ cc.idlenessState = ccIdlenessStateIdle
cc.mu.Unlock()
- // A blocking dial blocks until the clientConn is ready.
- if cc.dopts.block {
- for {
- cc.Connect()
- s := cc.GetState()
- if s == connectivity.Ready {
- break
- } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure {
- if err = cc.connectionError(); err != nil {
- terr, ok := err.(interface {
- Temporary() bool
- })
- if ok && !terr.Temporary() {
- return nil, err
- }
- }
- }
- if !cc.WaitForStateChange(ctx, s) {
- // ctx got timeout or canceled.
- if err = cc.connectionError(); err != nil && cc.dopts.returnLastError {
- return nil, err
- }
- return nil, ctx.Err()
+ go func() {
+ cc.addTraceEvent("entering idle mode")
+ for ac := range conns {
+ ac.tearDown(errConnIdling)
+ }
+ }()
+ return nil
+}
+
+// validateTransportCredentials performs a series of checks on the configured
+// transport credentials. It returns a non-nil error if any of these conditions
+// are met:
+// - no transport creds and no creds bundle is configured
+// - both transport creds and creds bundle are configured
+// - creds bundle is configured, but it lacks a transport credentials
+// - insecure transport creds configured alongside call creds that require
+// transport level security
+//
+// If none of the above conditions are met, the configured credentials are
+// deemed valid and a nil error is returned.
+func (cc *ClientConn) validateTransportCredentials() error {
+ if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil {
+ return errNoTransportSecurity
+ }
+ if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil {
+ return errTransportCredsAndBundle
+ }
+ if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil {
+ return errNoTransportCredsInBundle
+ }
+ transportCreds := cc.dopts.copts.TransportCredentials
+ if transportCreds == nil {
+ transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials()
+ }
+ if transportCreds.Info().SecurityProtocol == "insecure" {
+ for _, cd := range cc.dopts.copts.PerRPCCredentials {
+ if cd.RequireTransportSecurity() {
+ return errTransportCredentialsMissing
}
}
}
+ return nil
+}
- return cc, nil
+// channelzRegistration registers the newly created ClientConn with channelz and
+// stores the returned identifier in `cc.channelzID` and `cc.csMgr.channelzID`.
+// A channelz trace event is emitted for ClientConn creation. If the newly
+// created ClientConn is a nested one, i.e a valid parent ClientConn ID is
+// specified via a dial option, the trace event is also added to the parent.
+//
+// Doesn't grab cc.mu as this method is expected to be called only at Dial time.
+func (cc *ClientConn) channelzRegistration(target string) {
+ cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target)
+ cc.addTraceEvent("created")
+ cc.csMgr.channelzID = cc.channelzID
}
// chainUnaryClientInterceptors chains all unary client interceptors into one.
@@ -471,7 +621,9 @@ type ClientConn struct {
authority string // See determineAuthority().
dopts dialOptions // Default and user specified dial options.
channelzID *channelz.Identifier // Channelz identifier for the channel.
+ resolverBuilder resolver.Builder // See parseTargetAndFindResolver().
balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath.
+ idlenessMgr idlenessManager
// The following provide their own synchronization, and therefore don't
// require cc.mu to be held to access them.
@@ -492,11 +644,31 @@ type ClientConn struct {
sc *ServiceConfig // Latest service config received from the resolver.
conns map[*addrConn]struct{} // Set to nil on close.
mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway.
+ idlenessState ccIdlenessState // Tracks idleness state of the channel.
+ exitIdleCond *sync.Cond // Signalled when channel exits idle.
lceMu sync.Mutex // protects lastConnectionError
lastConnectionError error
}
+// ccIdlenessState tracks the idleness state of the channel.
+//
+// Channels start off in `active` and move to `idle` after a period of
+// inactivity. When moving back to `active` upon an incoming RPC, they
+// transition through `exiting_idle`. This state is useful for synchronization
+// with Close().
+//
+// This state tracking is mostly for self-protection. The idlenessManager is
+// expected to keep track of the state as well, and is expected not to call into
+// the ClientConn unnecessarily.
+type ccIdlenessState int8
+
+const (
+ ccIdlenessStateActive ccIdlenessState = iota
+ ccIdlenessStateIdle
+ ccIdlenessStateExitingIdle
+)
+
// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
// ctx expires. A true value is returned in former case and false in latter.
//
@@ -536,7 +708,10 @@ func (cc *ClientConn) GetState() connectivity.State {
// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
// release.
func (cc *ClientConn) Connect() {
- cc.balancerWrapper.exitIdle()
+ cc.exitIdleMode()
+ // If the ClientConn was not in idle mode, we need to call ExitIdle on the
+ // LB policy so that connections can be created.
+ cc.balancerWrapper.exitIdleMode()
}
func (cc *ClientConn) scWatcher() {
@@ -693,6 +868,20 @@ func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivi
cc.balancerWrapper.updateSubConnState(sc, s, err)
}
+// Makes a copy of the input addresses slice and clears out the balancer
+// attributes field. Addresses are passed during subconn creation and address
+// update operations. In both cases, we will clear the balancer attributes by
+// calling this function, and therefore we will be able to use the Equal method
+// provided by the resolver.Address type for comparison.
+func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address {
+ out := make([]resolver.Address, len(in))
+ for i := range in {
+ out[i] = in[i]
+ out[i].BalancerAttributes = nil
+ }
+ return out
+}
+
// newAddrConn creates an addrConn for addrs and adds it to cc.conns.
//
// Caller needs to make sure len(addrs) > 0.
@@ -700,11 +889,12 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub
ac := &addrConn{
state: connectivity.Idle,
cc: cc,
- addrs: addrs,
+ addrs: copyAddressesWithoutBalancerAttributes(addrs),
scopts: opts,
dopts: cc.dopts,
czData: new(channelzData),
resetBackoff: make(chan struct{}),
+ stateChan: make(chan struct{}),
}
ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
// Track ac in cc. This needs to be done before any getTransport(...) is called.
@@ -798,9 +988,6 @@ func (ac *addrConn) connect() error {
ac.mu.Unlock()
return nil
}
- // Update connectivity state within the lock to prevent subsequent or
- // concurrent calls from resetting the transport more than once.
- ac.updateConnectivityState(connectivity.Connecting, nil)
ac.mu.Unlock()
ac.resetTransport()
@@ -819,58 +1006,63 @@ func equalAddresses(a, b []resolver.Address) bool {
return true
}
-// tryUpdateAddrs tries to update ac.addrs with the new addresses list.
-//
-// If ac is TransientFailure, it updates ac.addrs and returns true. The updated
-// addresses will be picked up by retry in the next iteration after backoff.
-//
-// If ac is Shutdown or Idle, it updates ac.addrs and returns true.
-//
-// If the addresses is the same as the old list, it does nothing and returns
-// true.
-//
-// If ac is Connecting, it returns false. The caller should tear down the ac and
-// create a new one. Note that the backoff will be reset when this happens.
-//
-// If ac is Ready, it checks whether current connected address of ac is in the
-// new addrs list.
-// - If true, it updates ac.addrs and returns true. The ac will keep using
-// the existing connection.
-// - If false, it does nothing and returns false.
-func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
+// updateAddrs updates ac.addrs with the new addresses list and handles active
+// connections or connection attempts.
+func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
ac.mu.Lock()
- defer ac.mu.Unlock()
- channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs)
+ channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", pretty.ToJSON(ac.curAddr), pretty.ToJSON(addrs))
+
+ addrs = copyAddressesWithoutBalancerAttributes(addrs)
+ if equalAddresses(ac.addrs, addrs) {
+ ac.mu.Unlock()
+ return
+ }
+
+ ac.addrs = addrs
+
if ac.state == connectivity.Shutdown ||
ac.state == connectivity.TransientFailure ||
ac.state == connectivity.Idle {
- ac.addrs = addrs
- return true
+ // We were not connecting, so do nothing but update the addresses.
+ ac.mu.Unlock()
+ return
}
- if equalAddresses(ac.addrs, addrs) {
- return true
+ if ac.state == connectivity.Ready {
+ // Try to find the connected address.
+ for _, a := range addrs {
+ a.ServerName = ac.cc.getServerName(a)
+ if a.Equal(ac.curAddr) {
+ // We are connected to a valid address, so do nothing but
+ // update the addresses.
+ ac.mu.Unlock()
+ return
+ }
+ }
}
- if ac.state == connectivity.Connecting {
- return false
- }
+ // We are either connected to the wrong address or currently connecting.
+ // Stop the current iteration and restart.
- // ac.state is Ready, try to find the connected address.
- var curAddrFound bool
- for _, a := range addrs {
- a.ServerName = ac.cc.getServerName(a)
- if reflect.DeepEqual(ac.curAddr, a) {
- curAddrFound = true
- break
- }
+ ac.cancel()
+ ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx)
+
+ // We have to defer here because GracefulClose => Close => onClose, which
+ // requires locking ac.mu.
+ if ac.transport != nil {
+ defer ac.transport.GracefulClose()
+ ac.transport = nil
}
- channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound)
- if curAddrFound {
- ac.addrs = addrs
+
+ if len(addrs) == 0 {
+ ac.updateConnectivityState(connectivity.Idle, nil)
}
- return curAddrFound
+ ac.mu.Unlock()
+
+ // Since we were connecting/connected, we should start a new connection
+ // attempt.
+ go ac.resetTransport()
}
// getServerName determines the serverName to be used in the connection
@@ -1023,39 +1215,40 @@ func (cc *ClientConn) Close() error {
cc.mu.Unlock()
return ErrClientConnClosing
}
+
+ for cc.idlenessState == ccIdlenessStateExitingIdle {
+ cc.exitIdleCond.Wait()
+ }
+
conns := cc.conns
cc.conns = nil
cc.csMgr.updateState(connectivity.Shutdown)
+ pWrapper := cc.blockingpicker
rWrapper := cc.resolverWrapper
- cc.resolverWrapper = nil
bWrapper := cc.balancerWrapper
+ idlenessMgr := cc.idlenessMgr
cc.mu.Unlock()
// The order of closing matters here since the balancer wrapper assumes the
// picker is closed before it is closed.
- cc.blockingpicker.close()
+ if pWrapper != nil {
+ pWrapper.close()
+ }
if bWrapper != nil {
bWrapper.close()
}
if rWrapper != nil {
rWrapper.close()
}
+ if idlenessMgr != nil {
+ idlenessMgr.close()
+ }
for ac := range conns {
ac.tearDown(ErrClientConnClosing)
}
- ted := &channelz.TraceEventDesc{
- Desc: "Channel deleted",
- Severity: channelz.CtInfo,
- }
- if cc.dopts.channelzParentID != nil {
- ted.Parent = &channelz.TraceEventDesc{
- Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID.Int()),
- Severity: channelz.CtInfo,
- }
- }
- channelz.AddTraceEvent(logger, cc.channelzID, 0, ted)
+ cc.addTraceEvent("deleted")
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add
// trace reference to the entity being deleted, and thus prevent it from being
// deleted right away.
@@ -1085,7 +1278,8 @@ type addrConn struct {
addrs []resolver.Address // All addresses that the resolver resolved to.
// Use updateConnectivityState for updating addrConn's connectivity state.
- state connectivity.State
+ state connectivity.State
+ stateChan chan struct{} // closed and recreated on every state change.
backoffIdx int // Needs to be stateful for resetConnectBackoff.
resetBackoff chan struct{}
@@ -1099,6 +1293,9 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error)
if ac.state == s {
return
}
+ // When changing states, reset the state change channel.
+ close(ac.stateChan)
+ ac.stateChan = make(chan struct{})
ac.state = s
if lastErr == nil {
channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s)
@@ -1124,7 +1321,8 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
func (ac *addrConn) resetTransport() {
ac.mu.Lock()
- if ac.state == connectivity.Shutdown {
+ acCtx := ac.ctx
+ if acCtx.Err() != nil {
ac.mu.Unlock()
return
}
@@ -1152,15 +1350,14 @@ func (ac *addrConn) resetTransport() {
ac.updateConnectivityState(connectivity.Connecting, nil)
ac.mu.Unlock()
- if err := ac.tryAllAddrs(addrs, connectDeadline); err != nil {
+ if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil {
ac.cc.resolveNow(resolver.ResolveNowOptions{})
// After exhausting all addresses, the addrConn enters
// TRANSIENT_FAILURE.
- ac.mu.Lock()
- if ac.state == connectivity.Shutdown {
- ac.mu.Unlock()
+ if acCtx.Err() != nil {
return
}
+ ac.mu.Lock()
ac.updateConnectivityState(connectivity.TransientFailure, err)
// Backoff.
@@ -1175,13 +1372,13 @@ func (ac *addrConn) resetTransport() {
ac.mu.Unlock()
case <-b:
timer.Stop()
- case <-ac.ctx.Done():
+ case <-acCtx.Done():
timer.Stop()
return
}
ac.mu.Lock()
- if ac.state != connectivity.Shutdown {
+ if acCtx.Err() == nil {
ac.updateConnectivityState(connectivity.Idle, err)
}
ac.mu.Unlock()
@@ -1196,14 +1393,13 @@ func (ac *addrConn) resetTransport() {
// tryAllAddrs tries to creates a connection to the addresses, and stop when at
// the first successful one. It returns an error if no address was successfully
// connected, or updates ac appropriately with the new transport.
-func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) error {
+func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error {
var firstConnErr error
for _, addr := range addrs {
- ac.mu.Lock()
- if ac.state == connectivity.Shutdown {
- ac.mu.Unlock()
+ if ctx.Err() != nil {
return errConnClosing
}
+ ac.mu.Lock()
ac.cc.mu.RLock()
ac.dopts.copts.KeepaliveParams = ac.cc.mkp
@@ -1217,7 +1413,7 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T
channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr)
- err := ac.createTransport(addr, copts, connectDeadline)
+ err := ac.createTransport(ctx, addr, copts, connectDeadline)
if err == nil {
return nil
}
@@ -1234,19 +1430,20 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T
// createTransport creates a connection to addr. It returns an error if the
// address was not successfully connected, or updates ac appropriately with the
// new transport.
-func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error {
+func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error {
addr.ServerName = ac.cc.getServerName(addr)
- hctx, hcancel := context.WithCancel(ac.ctx)
+ hctx, hcancel := context.WithCancel(ctx)
onClose := func(r transport.GoAwayReason) {
ac.mu.Lock()
defer ac.mu.Unlock()
// adjust params based on GoAwayReason
ac.adjustParams(r)
- if ac.state == connectivity.Shutdown {
- // Already shut down. tearDown() already cleared the transport and
- // canceled hctx via ac.ctx, and we expected this connection to be
- // closed, so do nothing here.
+ if ctx.Err() != nil {
+ // Already shut down or connection attempt canceled. tearDown() or
+ // updateAddrs() already cleared the transport and canceled hctx
+ // via ac.ctx, and we expected this connection to be closed, so do
+ // nothing here.
return
}
hcancel()
@@ -1265,7 +1462,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
ac.updateConnectivityState(connectivity.Idle, nil)
}
- connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline)
+ connectCtx, cancel := context.WithDeadline(ctx, connectDeadline)
defer cancel()
copts.ChannelzParentID = ac.channelzID
@@ -1282,7 +1479,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
ac.mu.Lock()
defer ac.mu.Unlock()
- if ac.state == connectivity.Shutdown {
+ if ctx.Err() != nil {
// This can happen if the subConn was removed while in `Connecting`
// state. tearDown() would have set the state to `Shutdown`, but
// would not have closed the transport since ac.transport would not
@@ -1294,6 +1491,9 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
// The error we pass to Close() is immaterial since there are no open
// streams at this point, so no trailers with error details will be sent
// out. We just need to pass a non-nil error.
+ //
+ // This can also happen when updateAddrs is called during a connection
+ // attempt.
go newTr.Close(transport.ErrConnClosing)
return nil
}
@@ -1401,6 +1601,29 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport {
return nil
}
+// getTransport waits until the addrconn is ready and returns the transport.
+// If the context expires first, returns an appropriate status. If the
+// addrConn is stopped first, returns an Unavailable status error.
+func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) {
+ for ctx.Err() == nil {
+ ac.mu.Lock()
+ t, state, sc := ac.transport, ac.state, ac.stateChan
+ ac.mu.Unlock()
+ if state == connectivity.Ready {
+ return t, nil
+ }
+ if state == connectivity.Shutdown {
+ return nil, status.Errorf(codes.Unavailable, "SubConn shutting down")
+ }
+
+ select {
+ case <-ctx.Done():
+ case <-sc:
+ }
+ }
+ return nil, status.FromContextError(ctx.Err()).Err()
+}
+
// tearDown starts to tear down the addrConn.
//
// Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct
@@ -1552,7 +1775,14 @@ func (cc *ClientConn) connectionError() error {
return cc.lastConnectionError
}
-func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) {
+// parseTargetAndFindResolver parses the user's dial target and stores the
+// parsed target in `cc.parsedTarget`.
+//
+// The resolver to use is determined based on the scheme in the parsed target
+// and the same is stored in `cc.resolverBuilder`.
+//
+// Doesn't grab cc.mu as this method is expected to be called only at Dial time.
+func (cc *ClientConn) parseTargetAndFindResolver() error {
channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target)
var rb resolver.Builder
@@ -1564,7 +1794,8 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) {
rb = cc.getResolver(parsedTarget.URL.Scheme)
if rb != nil {
cc.parsedTarget = parsedTarget
- return rb, nil
+ cc.resolverBuilder = rb
+ return nil
}
}
@@ -1579,38 +1810,98 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) {
parsedTarget, err = parseTarget(canonicalTarget)
if err != nil {
channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err)
- return nil, err
+ return err
}
channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget)
rb = cc.getResolver(parsedTarget.URL.Scheme)
if rb == nil {
- return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme)
+ return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme)
}
cc.parsedTarget = parsedTarget
- return rb, nil
+ cc.resolverBuilder = rb
+ return nil
}
// parseTarget uses RFC 3986 semantics to parse the given target into a
-// resolver.Target struct containing scheme, authority and url. Query
-// params are stripped from the endpoint.
+// resolver.Target struct containing url. Query params are stripped from the
+// endpoint.
func parseTarget(target string) (resolver.Target, error) {
u, err := url.Parse(target)
if err != nil {
return resolver.Target{}, err
}
- return resolver.Target{
- Scheme: u.Scheme,
- Authority: u.Host,
- URL: *u,
- }, nil
+ return resolver.Target{URL: *u}, nil
+}
+
+func encodeAuthority(authority string) string {
+ const upperhex = "0123456789ABCDEF"
+
+ // Return for characters that must be escaped as per
+ // Valid chars are mentioned here:
+ // https://datatracker.ietf.org/doc/html/rfc3986#section-3.2
+ shouldEscape := func(c byte) bool {
+ // Alphanum are always allowed.
+ if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
+ return false
+ }
+ switch c {
+ case '-', '_', '.', '~': // Unreserved characters
+ return false
+ case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters
+ return false
+ case ':', '[', ']', '@': // Authority related delimeters
+ return false
+ }
+ // Everything else must be escaped.
+ return true
+ }
+
+ hexCount := 0
+ for i := 0; i < len(authority); i++ {
+ c := authority[i]
+ if shouldEscape(c) {
+ hexCount++
+ }
+ }
+
+ if hexCount == 0 {
+ return authority
+ }
+
+ required := len(authority) + 2*hexCount
+ t := make([]byte, required)
+
+ j := 0
+ // This logic is a barebones version of escape in the go net/url library.
+ for i := 0; i < len(authority); i++ {
+ switch c := authority[i]; {
+ case shouldEscape(c):
+ t[j] = '%'
+ t[j+1] = upperhex[c>>4]
+ t[j+2] = upperhex[c&15]
+ j += 3
+ default:
+ t[j] = authority[i]
+ j++
+ }
+ }
+ return string(t)
}
// Determine channel authority. The order of precedence is as follows:
// - user specified authority override using `WithAuthority` dial option
// - creds' notion of server name for the authentication handshake
// - endpoint from dial target of the form "scheme://[authority]/endpoint"
-func determineAuthority(endpoint, target string, dopts dialOptions) (string, error) {
+//
+// Stores the determined authority in `cc.authority`.
+//
+// Returns a non-nil error if the authority returned by the transport
+// credentials do not match the authority configured through the dial option.
+//
+// Doesn't grab cc.mu as this method is expected to be called only at Dial time.
+func (cc *ClientConn) determineAuthority() error {
+ dopts := cc.dopts
// Historically, we had two options for users to specify the serverName or
// authority for a channel. One was through the transport credentials
// (either in its constructor, or through the OverrideServerName() method).
@@ -1627,25 +1918,62 @@ func determineAuthority(endpoint, target string, dopts dialOptions) (string, err
}
authorityFromDialOption := dopts.authority
if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption {
- return "", fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption)
+ return fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption)
}
+ endpoint := cc.parsedTarget.Endpoint()
+ target := cc.target
switch {
case authorityFromDialOption != "":
- return authorityFromDialOption, nil
+ cc.authority = authorityFromDialOption
case authorityFromCreds != "":
- return authorityFromCreds, nil
+ cc.authority = authorityFromCreds
case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"):
// TODO: remove when the unix resolver implements optional interface to
// return channel authority.
- return "localhost", nil
+ cc.authority = "localhost"
case strings.HasPrefix(endpoint, ":"):
- return "localhost" + endpoint, nil
+ cc.authority = "localhost" + endpoint
default:
// TODO: Define an optional interface on the resolver builder to return
// the channel authority given the user's dial target. For resolvers
// which don't implement this interface, we will use the endpoint from
// "scheme://authority/endpoint" as the default authority.
- return endpoint, nil
+ // Escape the endpoint to handle use cases where the endpoint
+ // might not be a valid authority by default.
+ // For example an endpoint which has multiple paths like
+ // 'a/b/c', which is not a valid authority by default.
+ cc.authority = encodeAuthority(endpoint)
}
+ channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority)
+ return nil
+}
+
+// initResolverWrapper creates a ccResolverWrapper, which builds the name
+// resolver. This method grabs the lock to assign the newly built resolver
+// wrapper to the cc.resolverWrapper field.
+func (cc *ClientConn) initResolverWrapper(creds credentials.TransportCredentials) error {
+ rw, err := newCCResolverWrapper(cc, ccResolverWrapperOpts{
+ target: cc.parsedTarget,
+ builder: cc.resolverBuilder,
+ bOpts: resolver.BuildOptions{
+ DisableServiceConfig: cc.dopts.disableServiceConfig,
+ DialCreds: creds,
+ CredsBundle: cc.dopts.copts.CredsBundle,
+ Dialer: cc.dopts.copts.Dialer,
+ },
+ channelzID: cc.channelzID,
+ })
+ if err != nil {
+ return fmt.Errorf("failed to build resolver: %v", err)
+ }
+ // Resolver implementations may report state update or error inline when
+ // built (or right after), and this is handled in cc.updateResolverState.
+ // Also, an error from the resolver might lead to a re-resolution request
+ // from the balancer, which is handled in resolveNow() where
+ // `cc.resolverWrapper` is accessed. Hence, we need to hold the lock here.
+ cc.mu.Lock()
+ cc.resolverWrapper = rw
+ cc.mu.Unlock()
+ return nil
}
diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go
index cdc8263bd..23ea95237 100644
--- a/vendor/google.golang.org/grpc/dialoptions.go
+++ b/vendor/google.golang.org/grpc/dialoptions.go
@@ -77,6 +77,8 @@ type dialOptions struct {
defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON.
defaultServiceConfigRawJSON *string
resolvers []resolver.Builder
+ idleTimeout time.Duration
+ recvBufferPool SharedBufferPool
}
// DialOption configures how we set up the connection.
@@ -627,6 +629,7 @@ func defaultDialOptions() dialOptions {
ReadBufferSize: defaultReadBufSize,
UseProxy: true,
},
+ recvBufferPool: nopBufferPool{},
}
}
@@ -655,3 +658,44 @@ func WithResolvers(rs ...resolver.Builder) DialOption {
o.resolvers = append(o.resolvers, rs...)
})
}
+
+// WithIdleTimeout returns a DialOption that configures an idle timeout for the
+// channel. If the channel is idle for the configured timeout, i.e there are no
+// ongoing RPCs and no new RPCs are initiated, the channel will enter idle mode
+// and as a result the name resolver and load balancer will be shut down. The
+// channel will exit idle mode when the Connect() method is called or when an
+// RPC is initiated.
+//
+// By default this feature is disabled, which can also be explicitly configured
+// by passing zero to this function.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WithIdleTimeout(d time.Duration) DialOption {
+ return newFuncDialOption(func(o *dialOptions) {
+ o.idleTimeout = d
+ })
+}
+
+// WithRecvBufferPool returns a DialOption that configures the ClientConn
+// to use the provided shared buffer pool for parsing incoming messages. Depending
+// on the application's workload, this could result in reduced memory allocation.
+//
+// If you are unsure about how to implement a memory pool but want to utilize one,
+// begin with grpc.NewSharedBufferPool.
+//
+// Note: The shared buffer pool feature will not be active if any of the following
+// options are used: WithStatsHandler, EnableTracing, or binary logging. In such
+// cases, the shared buffer pool will be ignored.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption {
+ return newFuncDialOption(func(o *dialOptions) {
+ o.recvBufferPool = bufferPool
+ })
+}
diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
new file mode 100644
index 000000000..142d35f75
--- /dev/null
+++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
@@ -0,0 +1,308 @@
+// Copyright 2015 The gRPC Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The canonical version of this proto can be found at
+// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v4.22.0
+// source: grpc/health/v1/health.proto
+
+package grpc_health_v1
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type HealthCheckResponse_ServingStatus int32
+
+const (
+ HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0
+ HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1
+ HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2
+ HealthCheckResponse_SERVICE_UNKNOWN HealthCheckResponse_ServingStatus = 3 // Used only by the Watch method.
+)
+
+// Enum value maps for HealthCheckResponse_ServingStatus.
+var (
+ HealthCheckResponse_ServingStatus_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "SERVING",
+ 2: "NOT_SERVING",
+ 3: "SERVICE_UNKNOWN",
+ }
+ HealthCheckResponse_ServingStatus_value = map[string]int32{
+ "UNKNOWN": 0,
+ "SERVING": 1,
+ "NOT_SERVING": 2,
+ "SERVICE_UNKNOWN": 3,
+ }
+)
+
+func (x HealthCheckResponse_ServingStatus) Enum() *HealthCheckResponse_ServingStatus {
+ p := new(HealthCheckResponse_ServingStatus)
+ *p = x
+ return p
+}
+
+func (x HealthCheckResponse_ServingStatus) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (HealthCheckResponse_ServingStatus) Descriptor() protoreflect.EnumDescriptor {
+ return file_grpc_health_v1_health_proto_enumTypes[0].Descriptor()
+}
+
+func (HealthCheckResponse_ServingStatus) Type() protoreflect.EnumType {
+ return &file_grpc_health_v1_health_proto_enumTypes[0]
+}
+
+func (x HealthCheckResponse_ServingStatus) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use HealthCheckResponse_ServingStatus.Descriptor instead.
+func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) {
+ return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{1, 0}
+}
+
+type HealthCheckRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"`
+}
+
+func (x *HealthCheckRequest) Reset() {
+ *x = HealthCheckRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_grpc_health_v1_health_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HealthCheckRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthCheckRequest) ProtoMessage() {}
+
+func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_health_v1_health_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthCheckRequest.ProtoReflect.Descriptor instead.
+func (*HealthCheckRequest) Descriptor() ([]byte, []int) {
+ return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *HealthCheckRequest) GetService() string {
+ if x != nil {
+ return x.Service
+ }
+ return ""
+}
+
+type HealthCheckResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"`
+}
+
+func (x *HealthCheckResponse) Reset() {
+ *x = HealthCheckResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_grpc_health_v1_health_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HealthCheckResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthCheckResponse) ProtoMessage() {}
+
+func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_health_v1_health_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthCheckResponse.ProtoReflect.Descriptor instead.
+func (*HealthCheckResponse) Descriptor() ([]byte, []int) {
+ return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus {
+ if x != nil {
+ return x.Status
+ }
+ return HealthCheckResponse_UNKNOWN
+}
+
+var File_grpc_health_v1_health_proto protoreflect.FileDescriptor
+
+var file_grpc_health_v1_health_proto_rawDesc = []byte{
+ 0x0a, 0x1b, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x76, 0x31,
+ 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x67,
+ 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x22, 0x2e, 0x0a,
+ 0x12, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0xb1, 0x01,
+ 0x0a, 0x13, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61,
+ 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
+ 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x22, 0x4f, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b,
+ 0x0a, 0x07, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4e,
+ 0x4f, 0x54, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f,
+ 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10,
+ 0x03, 0x32, 0xae, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x50, 0x0a, 0x05,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61,
+ 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
+ 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63,
+ 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74,
+ 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52,
+ 0x0a, 0x05, 0x57, 0x61, 0x74, 0x63, 0x68, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68,
+ 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43,
+ 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72,
+ 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61,
+ 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x30, 0x01, 0x42, 0x61, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65,
+ 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
+ 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68,
+ 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74,
+ 0x68, 0x5f, 0x76, 0x31, 0xaa, 0x02, 0x0e, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x61, 0x6c,
+ 0x74, 0x68, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_grpc_health_v1_health_proto_rawDescOnce sync.Once
+ file_grpc_health_v1_health_proto_rawDescData = file_grpc_health_v1_health_proto_rawDesc
+)
+
+func file_grpc_health_v1_health_proto_rawDescGZIP() []byte {
+ file_grpc_health_v1_health_proto_rawDescOnce.Do(func() {
+ file_grpc_health_v1_health_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_health_v1_health_proto_rawDescData)
+ })
+ return file_grpc_health_v1_health_proto_rawDescData
+}
+
+var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_grpc_health_v1_health_proto_goTypes = []interface{}{
+ (HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus
+ (*HealthCheckRequest)(nil), // 1: grpc.health.v1.HealthCheckRequest
+ (*HealthCheckResponse)(nil), // 2: grpc.health.v1.HealthCheckResponse
+}
+var file_grpc_health_v1_health_proto_depIdxs = []int32{
+ 0, // 0: grpc.health.v1.HealthCheckResponse.status:type_name -> grpc.health.v1.HealthCheckResponse.ServingStatus
+ 1, // 1: grpc.health.v1.Health.Check:input_type -> grpc.health.v1.HealthCheckRequest
+ 1, // 2: grpc.health.v1.Health.Watch:input_type -> grpc.health.v1.HealthCheckRequest
+ 2, // 3: grpc.health.v1.Health.Check:output_type -> grpc.health.v1.HealthCheckResponse
+ 2, // 4: grpc.health.v1.Health.Watch:output_type -> grpc.health.v1.HealthCheckResponse
+ 3, // [3:5] is the sub-list for method output_type
+ 1, // [1:3] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_grpc_health_v1_health_proto_init() }
+func file_grpc_health_v1_health_proto_init() {
+ if File_grpc_health_v1_health_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HealthCheckRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HealthCheckResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_grpc_health_v1_health_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_grpc_health_v1_health_proto_goTypes,
+ DependencyIndexes: file_grpc_health_v1_health_proto_depIdxs,
+ EnumInfos: file_grpc_health_v1_health_proto_enumTypes,
+ MessageInfos: file_grpc_health_v1_health_proto_msgTypes,
+ }.Build()
+ File_grpc_health_v1_health_proto = out.File
+ file_grpc_health_v1_health_proto_rawDesc = nil
+ file_grpc_health_v1_health_proto_goTypes = nil
+ file_grpc_health_v1_health_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
new file mode 100644
index 000000000..a01a1b4d5
--- /dev/null
+++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
@@ -0,0 +1,223 @@
+// Copyright 2015 The gRPC Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The canonical version of this proto can be found at
+// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto
+
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.3.0
+// - protoc v4.22.0
+// source: grpc/health/v1/health.proto
+
+package grpc_health_v1
+
+import (
+ context "context"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.32.0 or later.
+const _ = grpc.SupportPackageIsVersion7
+
+const (
+ Health_Check_FullMethodName = "/grpc.health.v1.Health/Check"
+ Health_Watch_FullMethodName = "/grpc.health.v1.Health/Watch"
+)
+
+// HealthClient is the client API for Health service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type HealthClient interface {
+ // If the requested service is unknown, the call will fail with status
+ // NOT_FOUND.
+ Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error)
+ // Performs a watch for the serving status of the requested service.
+ // The server will immediately send back a message indicating the current
+ // serving status. It will then subsequently send a new message whenever
+ // the service's serving status changes.
+ //
+ // If the requested service is unknown when the call is received, the
+ // server will send a message setting the serving status to
+ // SERVICE_UNKNOWN but will *not* terminate the call. If at some
+ // future point, the serving status of the service becomes known, the
+ // server will send a new message with the service's serving status.
+ //
+ // If the call terminates with status UNIMPLEMENTED, then clients
+ // should assume this method is not supported and should not retry the
+ // call. If the call terminates with any other status (including OK),
+ // clients should retry the call with appropriate exponential backoff.
+ Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error)
+}
+
+type healthClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewHealthClient(cc grpc.ClientConnInterface) HealthClient {
+ return &healthClient{cc}
+}
+
+func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) {
+ out := new(HealthCheckResponse)
+ err := c.cc.Invoke(ctx, Health_Check_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) {
+ stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &healthWatchClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type Health_WatchClient interface {
+ Recv() (*HealthCheckResponse, error)
+ grpc.ClientStream
+}
+
+type healthWatchClient struct {
+ grpc.ClientStream
+}
+
+func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) {
+ m := new(HealthCheckResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+// HealthServer is the server API for Health service.
+// All implementations should embed UnimplementedHealthServer
+// for forward compatibility
+type HealthServer interface {
+ // If the requested service is unknown, the call will fail with status
+ // NOT_FOUND.
+ Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error)
+ // Performs a watch for the serving status of the requested service.
+ // The server will immediately send back a message indicating the current
+ // serving status. It will then subsequently send a new message whenever
+ // the service's serving status changes.
+ //
+ // If the requested service is unknown when the call is received, the
+ // server will send a message setting the serving status to
+ // SERVICE_UNKNOWN but will *not* terminate the call. If at some
+ // future point, the serving status of the service becomes known, the
+ // server will send a new message with the service's serving status.
+ //
+ // If the call terminates with status UNIMPLEMENTED, then clients
+ // should assume this method is not supported and should not retry the
+ // call. If the call terminates with any other status (including OK),
+ // clients should retry the call with appropriate exponential backoff.
+ Watch(*HealthCheckRequest, Health_WatchServer) error
+}
+
+// UnimplementedHealthServer should be embedded to have forward compatible implementations.
+type UnimplementedHealthServer struct {
+}
+
+func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Check not implemented")
+}
+func (UnimplementedHealthServer) Watch(*HealthCheckRequest, Health_WatchServer) error {
+ return status.Errorf(codes.Unimplemented, "method Watch not implemented")
+}
+
+// UnsafeHealthServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to HealthServer will
+// result in compilation errors.
+type UnsafeHealthServer interface {
+ mustEmbedUnimplementedHealthServer()
+}
+
+func RegisterHealthServer(s grpc.ServiceRegistrar, srv HealthServer) {
+ s.RegisterService(&Health_ServiceDesc, srv)
+}
+
+func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(HealthCheckRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(HealthServer).Check(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Health_Check_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(HealthCheckRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(HealthServer).Watch(m, &healthWatchServer{stream})
+}
+
+type Health_WatchServer interface {
+ Send(*HealthCheckResponse) error
+ grpc.ServerStream
+}
+
+type healthWatchServer struct {
+ grpc.ServerStream
+}
+
+func (x *healthWatchServer) Send(m *HealthCheckResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+// Health_ServiceDesc is the grpc.ServiceDesc for Health service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var Health_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "grpc.health.v1.Health",
+ HandlerType: (*HealthServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Check",
+ Handler: _Health_Check_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "Watch",
+ Handler: _Health_Watch_Handler,
+ ServerStreams: true,
+ },
+ },
+ Metadata: "grpc/health/v1/health.proto",
+}
diff --git a/vendor/google.golang.org/grpc/idle.go b/vendor/google.golang.org/grpc/idle.go
new file mode 100644
index 000000000..dc3dc72f6
--- /dev/null
+++ b/vendor/google.golang.org/grpc/idle.go
@@ -0,0 +1,287 @@
+/*
+ *
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+ "fmt"
+ "math"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// For overriding in unit tests.
+var timeAfterFunc = func(d time.Duration, f func()) *time.Timer {
+ return time.AfterFunc(d, f)
+}
+
+// idlenessEnforcer is the functionality provided by grpc.ClientConn to enter
+// and exit from idle mode.
+type idlenessEnforcer interface {
+ exitIdleMode() error
+ enterIdleMode() error
+}
+
+// idlenessManager defines the functionality required to track RPC activity on a
+// channel.
+type idlenessManager interface {
+ onCallBegin() error
+ onCallEnd()
+ close()
+}
+
+type noopIdlenessManager struct{}
+
+func (noopIdlenessManager) onCallBegin() error { return nil }
+func (noopIdlenessManager) onCallEnd() {}
+func (noopIdlenessManager) close() {}
+
+// idlenessManagerImpl implements the idlenessManager interface. It uses atomic
+// operations to synchronize access to shared state and a mutex to guarantee
+// mutual exclusion in a critical section.
+type idlenessManagerImpl struct {
+ // State accessed atomically.
+ lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed.
+ activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there.
+ activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback.
+ closed int32 // Boolean; True when the manager is closed.
+
+ // Can be accessed without atomics or mutex since these are set at creation
+ // time and read-only after that.
+ enforcer idlenessEnforcer // Functionality provided by grpc.ClientConn.
+ timeout int64 // Idle timeout duration nanos stored as an int64.
+
+ // idleMu is used to guarantee mutual exclusion in two scenarios:
+ // - Opposing intentions:
+ // - a: Idle timeout has fired and handleIdleTimeout() is trying to put
+ // the channel in idle mode because the channel has been inactive.
+ // - b: At the same time an RPC is made on the channel, and onCallBegin()
+ // is trying to prevent the channel from going idle.
+ // - Competing intentions:
+ // - The channel is in idle mode and there are multiple RPCs starting at
+ // the same time, all trying to move the channel out of idle. Only one
+ // of them should succeed in doing so, while the other RPCs should
+ // piggyback on the first one and be successfully handled.
+ idleMu sync.RWMutex
+ actuallyIdle bool
+ timer *time.Timer
+}
+
+// newIdlenessManager creates a new idleness manager implementation for the
+// given idle timeout.
+func newIdlenessManager(enforcer idlenessEnforcer, idleTimeout time.Duration) idlenessManager {
+ if idleTimeout == 0 {
+ return noopIdlenessManager{}
+ }
+
+ i := &idlenessManagerImpl{
+ enforcer: enforcer,
+ timeout: int64(idleTimeout),
+ }
+ i.timer = timeAfterFunc(idleTimeout, i.handleIdleTimeout)
+ return i
+}
+
+// resetIdleTimer resets the idle timer to the given duration. This method
+// should only be called from the timer callback.
+func (i *idlenessManagerImpl) resetIdleTimer(d time.Duration) {
+ i.idleMu.Lock()
+ defer i.idleMu.Unlock()
+
+ if i.timer == nil {
+ // Only close sets timer to nil. We are done.
+ return
+ }
+
+ // It is safe to ignore the return value from Reset() because this method is
+ // only ever called from the timer callback, which means the timer has
+ // already fired.
+ i.timer.Reset(d)
+}
+
+// handleIdleTimeout is the timer callback that is invoked upon expiry of the
+// configured idle timeout. The channel is considered inactive if there are no
+// ongoing calls and no RPC activity since the last time the timer fired.
+func (i *idlenessManagerImpl) handleIdleTimeout() {
+ if i.isClosed() {
+ return
+ }
+
+ if atomic.LoadInt32(&i.activeCallsCount) > 0 {
+ i.resetIdleTimer(time.Duration(i.timeout))
+ return
+ }
+
+ // There has been activity on the channel since we last got here. Reset the
+ // timer and return.
+ if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 {
+ // Set the timer to fire after a duration of idle timeout, calculated
+ // from the time the most recent RPC completed.
+ atomic.StoreInt32(&i.activeSinceLastTimerCheck, 0)
+ i.resetIdleTimer(time.Duration(atomic.LoadInt64(&i.lastCallEndTime) + i.timeout - time.Now().UnixNano()))
+ return
+ }
+
+ // This CAS operation is extremely likely to succeed given that there has
+ // been no activity since the last time we were here. Setting the
+ // activeCallsCount to -math.MaxInt32 indicates to onCallBegin() that the
+ // channel is either in idle mode or is trying to get there.
+ if !atomic.CompareAndSwapInt32(&i.activeCallsCount, 0, -math.MaxInt32) {
+ // This CAS operation can fail if an RPC started after we checked for
+ // activity at the top of this method, or one was ongoing from before
+ // the last time we were here. In both case, reset the timer and return.
+ i.resetIdleTimer(time.Duration(i.timeout))
+ return
+ }
+
+ // Now that we've set the active calls count to -math.MaxInt32, it's time to
+ // actually move to idle mode.
+ if i.tryEnterIdleMode() {
+ // Successfully entered idle mode. No timer needed until we exit idle.
+ return
+ }
+
+ // Failed to enter idle mode due to a concurrent RPC that kept the channel
+ // active, or because of an error from the channel. Undo the attempt to
+ // enter idle, and reset the timer to try again later.
+ atomic.AddInt32(&i.activeCallsCount, math.MaxInt32)
+ i.resetIdleTimer(time.Duration(i.timeout))
+}
+
+// tryEnterIdleMode instructs the channel to enter idle mode. But before
+// that, it performs a last minute check to ensure that no new RPC has come in,
+// making the channel active.
+//
+// Return value indicates whether or not the channel moved to idle mode.
+//
+// Holds idleMu which ensures mutual exclusion with exitIdleMode.
+func (i *idlenessManagerImpl) tryEnterIdleMode() bool {
+ i.idleMu.Lock()
+ defer i.idleMu.Unlock()
+
+ if atomic.LoadInt32(&i.activeCallsCount) != -math.MaxInt32 {
+ // We raced and lost to a new RPC. Very rare, but stop entering idle.
+ return false
+ }
+ if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 {
+ // An very short RPC could have come in (and also finished) after we
+ // checked for calls count and activity in handleIdleTimeout(), but
+ // before the CAS operation. So, we need to check for activity again.
+ return false
+ }
+
+ // No new RPCs have come in since we last set the active calls count value
+ // -math.MaxInt32 in the timer callback. And since we have the lock, it is
+ // safe to enter idle mode now.
+ if err := i.enforcer.enterIdleMode(); err != nil {
+ logger.Errorf("Failed to enter idle mode: %v", err)
+ return false
+ }
+
+ // Successfully entered idle mode.
+ i.actuallyIdle = true
+ return true
+}
+
+// onCallBegin is invoked at the start of every RPC.
+func (i *idlenessManagerImpl) onCallBegin() error {
+ if i.isClosed() {
+ return nil
+ }
+
+ if atomic.AddInt32(&i.activeCallsCount, 1) > 0 {
+ // Channel is not idle now. Set the activity bit and allow the call.
+ atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1)
+ return nil
+ }
+
+ // Channel is either in idle mode or is in the process of moving to idle
+ // mode. Attempt to exit idle mode to allow this RPC.
+ if err := i.exitIdleMode(); err != nil {
+ // Undo the increment to calls count, and return an error causing the
+ // RPC to fail.
+ atomic.AddInt32(&i.activeCallsCount, -1)
+ return err
+ }
+
+ atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1)
+ return nil
+}
+
+// exitIdleMode instructs the channel to exit idle mode.
+//
+// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode.
+func (i *idlenessManagerImpl) exitIdleMode() error {
+ i.idleMu.Lock()
+ defer i.idleMu.Unlock()
+
+ if !i.actuallyIdle {
+ // This can happen in two scenarios:
+ // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called
+ // tryEnterIdleMode(). But before the latter could grab the lock, an RPC
+ // came in and onCallBegin() noticed that the calls count is negative.
+ // - Channel is in idle mode, and multiple new RPCs come in at the same
+ // time, all of them notice a negative calls count in onCallBegin and get
+ // here. The first one to get the lock would got the channel to exit idle.
+ //
+ // Either way, nothing to do here.
+ return nil
+ }
+
+ if err := i.enforcer.exitIdleMode(); err != nil {
+ return fmt.Errorf("channel failed to exit idle mode: %v", err)
+ }
+
+ // Undo the idle entry process. This also respects any new RPC attempts.
+ atomic.AddInt32(&i.activeCallsCount, math.MaxInt32)
+ i.actuallyIdle = false
+
+ // Start a new timer to fire after the configured idle timeout.
+ i.timer = timeAfterFunc(time.Duration(i.timeout), i.handleIdleTimeout)
+ return nil
+}
+
+// onCallEnd is invoked at the end of every RPC.
+func (i *idlenessManagerImpl) onCallEnd() {
+ if i.isClosed() {
+ return
+ }
+
+ // Record the time at which the most recent call finished.
+ atomic.StoreInt64(&i.lastCallEndTime, time.Now().UnixNano())
+
+ // Decrement the active calls count. This count can temporarily go negative
+ // when the timer callback is in the process of moving the channel to idle
+ // mode, but one or more RPCs come in and complete before the timer callback
+ // can get done with the process of moving to idle mode.
+ atomic.AddInt32(&i.activeCallsCount, -1)
+}
+
+func (i *idlenessManagerImpl) isClosed() bool {
+ return atomic.LoadInt32(&i.closed) == 1
+}
+
+func (i *idlenessManagerImpl) close() {
+ atomic.StoreInt32(&i.closed, 1)
+
+ i.idleMu.Lock()
+ i.timer.Stop()
+ i.timer = nil
+ i.idleMu.Unlock()
+}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
index af03a40d9..755fdebc1 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
@@ -32,6 +32,9 @@ var grpclogLogger = grpclog.Component("binarylog")
// Logger specifies MethodLoggers for method names with a Log call that
// takes a context.
+//
+// This is used in the 1.0 release of gcp/observability, and thus must not be
+// deleted or changed.
type Logger interface {
GetMethodLogger(methodName string) MethodLogger
}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
index 56fcf008d..6c3f63221 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
@@ -49,6 +49,9 @@ func (g *callIDGenerator) reset() {
var idGen callIDGenerator
// MethodLogger is the sub-logger for each method.
+//
+// This is used in the 1.0 release of gcp/observability, and thus must not be
+// deleted or changed.
type MethodLogger interface {
Log(context.Context, LogEntryConfig)
}
@@ -65,6 +68,9 @@ type TruncatingMethodLogger struct {
}
// NewTruncatingMethodLogger returns a new truncating method logger.
+//
+// This is used in the 1.0 release of gcp/observability, and thus must not be
+// deleted or changed.
func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger {
return &TruncatingMethodLogger{
headerMaxLen: h,
@@ -145,6 +151,9 @@ func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (trun
}
// LogEntryConfig represents the configuration for binary log entry.
+//
+// This is used in the 1.0 release of gcp/observability, and thus must not be
+// deleted or changed.
type LogEntryConfig interface {
toProto() *binlogpb.GrpcLogEntry
}
diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
index 9f6a0c120..81c2f5fd7 100644
--- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
+++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
@@ -35,6 +35,7 @@ import "sync"
// internal/transport/transport.go for an example of this.
type Unbounded struct {
c chan interface{}
+ closed bool
mu sync.Mutex
backlog []interface{}
}
@@ -47,16 +48,18 @@ func NewUnbounded() *Unbounded {
// Put adds t to the unbounded buffer.
func (b *Unbounded) Put(t interface{}) {
b.mu.Lock()
+ defer b.mu.Unlock()
+ if b.closed {
+ return
+ }
if len(b.backlog) == 0 {
select {
case b.c <- t:
- b.mu.Unlock()
return
default:
}
}
b.backlog = append(b.backlog, t)
- b.mu.Unlock()
}
// Load sends the earliest buffered data, if any, onto the read channel
@@ -64,6 +67,10 @@ func (b *Unbounded) Put(t interface{}) {
// value from the read channel.
func (b *Unbounded) Load() {
b.mu.Lock()
+ defer b.mu.Unlock()
+ if b.closed {
+ return
+ }
if len(b.backlog) > 0 {
select {
case b.c <- b.backlog[0]:
@@ -72,7 +79,6 @@ func (b *Unbounded) Load() {
default:
}
}
- b.mu.Unlock()
}
// Get returns a read channel on which values added to the buffer, via Put(),
@@ -80,6 +86,20 @@ func (b *Unbounded) Load() {
//
// Upon reading a value from this channel, users are expected to call Load() to
// send the next buffered value onto the channel if there is any.
+//
+// If the unbounded buffer is closed, the read channel returned by this method
+// is closed.
func (b *Unbounded) Get() <-chan interface{} {
return b.c
}
+
+// Close closes the unbounded buffer.
+func (b *Unbounded) Close() {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ if b.closed {
+ return
+ }
+ b.closed = true
+ close(b.c)
+}
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
index 5ba9d94d4..77c2c0b89 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
@@ -36,6 +36,13 @@ var (
// "GRPC_RING_HASH_CAP". This does not override the default bounds
// checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M).
RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024)
+ // PickFirstLBConfig is set if we should support configuration of the
+ // pick_first LB policy, which can be enabled by setting the environment
+ // variable "GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG" to "true".
+ PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", false)
+ // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS
+ // handshakes that can be performed.
+ ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100)
)
func boolFromEnv(envVar string, def bool) bool {
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/observability.go b/vendor/google.golang.org/grpc/internal/envconfig/observability.go
index 821dd0a7c..dd314cfb1 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/observability.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/observability.go
@@ -28,9 +28,15 @@ const (
var (
// ObservabilityConfig is the json configuration for the gcp/observability
// package specified directly in the envObservabilityConfig env var.
+ //
+ // This is used in the 1.0 release of gcp/observability, and thus must not be
+ // deleted or changed.
ObservabilityConfig = os.Getenv(envObservabilityConfig)
// ObservabilityConfigFile is the json configuration for the
// gcp/observability specified in a file with the location specified in
// envObservabilityConfigFile env var.
+ //
+ // This is used in the 1.0 release of gcp/observability, and thus must not be
+ // deleted or changed.
ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile)
)
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
index 3b17705ba..02b4b6a1c 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
@@ -61,11 +61,10 @@ var (
// have a brand new API on the server-side and users explicitly need to use
// the new API to get security integration on the server.
XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true)
- // XDSAggregateAndDNS indicates whether processing of aggregated cluster
- // and DNS cluster is enabled, which can be enabled by setting the
- // environment variable
- // "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to
- // "true".
+ // XDSAggregateAndDNS indicates whether processing of aggregated cluster and
+ // DNS cluster is enabled, which can be disabled by setting the environment
+ // variable "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER"
+ // to "false".
XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true)
// XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled,
@@ -82,11 +81,15 @@ var (
XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true)
// XDSRLS indicates whether processing of Cluster Specifier plugins and
- // support for the RLS CLuster Specifier is enabled, which can be enabled by
+ // support for the RLS CLuster Specifier is enabled, which can be disabled by
// setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to
- // "true".
- XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", false)
+ // "false".
+ XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", true)
// C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing.
C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI")
+ // XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which
+ // can be disabled by setting the environment variable
+ // "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "false".
+ XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", true)
)
diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
index 517ea7064..aa97273e7 100644
--- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
+++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
@@ -72,3 +72,24 @@ func Uint64() uint64 {
defer mu.Unlock()
return r.Uint64()
}
+
+// Uint32 implements rand.Uint32 on the grpcrand global source.
+func Uint32() uint32 {
+ mu.Lock()
+ defer mu.Unlock()
+ return r.Uint32()
+}
+
+// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source.
+func ExpFloat64() float64 {
+ mu.Lock()
+ defer mu.Unlock()
+ return r.ExpFloat64()
+}
+
+// Shuffle implements rand.Shuffle on the grpcrand global source.
+var Shuffle = func(n int, f func(int, int)) {
+ mu.Lock()
+ defer mu.Unlock()
+ r.Shuffle(n, f)
+}
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
index 79993d343..37b8d4117 100644
--- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
+++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
@@ -20,6 +20,7 @@ package grpcsync
import (
"context"
+ "sync"
"google.golang.org/grpc/internal/buffer"
)
@@ -31,15 +32,26 @@ import (
//
// This type is safe for concurrent access.
type CallbackSerializer struct {
+ // Done is closed once the serializer is shut down completely, i.e all
+ // scheduled callbacks are executed and the serializer has deallocated all
+ // its resources.
+ Done chan struct{}
+
callbacks *buffer.Unbounded
+ closedMu sync.Mutex
+ closed bool
}
// NewCallbackSerializer returns a new CallbackSerializer instance. The provided
// context will be passed to the scheduled callbacks. Users should cancel the
// provided context to shutdown the CallbackSerializer. It is guaranteed that no
-// callbacks will be executed once this context is canceled.
+// callbacks will be added once this context is canceled, and any pending un-run
+// callbacks will be executed before the serializer is shut down.
func NewCallbackSerializer(ctx context.Context) *CallbackSerializer {
- t := &CallbackSerializer{callbacks: buffer.NewUnbounded()}
+ t := &CallbackSerializer{
+ Done: make(chan struct{}),
+ callbacks: buffer.NewUnbounded(),
+ }
go t.run(ctx)
return t
}
@@ -48,18 +60,60 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer {
//
// Callbacks are expected to honor the context when performing any blocking
// operations, and should return early when the context is canceled.
-func (t *CallbackSerializer) Schedule(f func(ctx context.Context)) {
+//
+// Return value indicates if the callback was successfully added to the list of
+// callbacks to be executed by the serializer. It is not possible to add
+// callbacks once the context passed to NewCallbackSerializer is cancelled.
+func (t *CallbackSerializer) Schedule(f func(ctx context.Context)) bool {
+ t.closedMu.Lock()
+ defer t.closedMu.Unlock()
+
+ if t.closed {
+ return false
+ }
t.callbacks.Put(f)
+ return true
}
func (t *CallbackSerializer) run(ctx context.Context) {
+ var backlog []func(context.Context)
+
+ defer close(t.Done)
for ctx.Err() == nil {
select {
case <-ctx.Done():
- return
- case callback := <-t.callbacks.Get():
+ // Do nothing here. Next iteration of the for loop will not happen,
+ // since ctx.Err() would be non-nil.
+ case callback, ok := <-t.callbacks.Get():
+ if !ok {
+ return
+ }
t.callbacks.Load()
callback.(func(ctx context.Context))(ctx)
}
}
+
+ // Fetch pending callbacks if any, and execute them before returning from
+ // this method and closing t.Done.
+ t.closedMu.Lock()
+ t.closed = true
+ backlog = t.fetchPendingCallbacks()
+ t.callbacks.Close()
+ t.closedMu.Unlock()
+ for _, b := range backlog {
+ b(ctx)
+ }
+}
+
+func (t *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) {
+ var backlog []func(context.Context)
+ for {
+ select {
+ case b := <-t.callbacks.Get():
+ backlog = append(backlog, b.(func(context.Context)))
+ t.callbacks.Load()
+ default:
+ return backlog
+ }
+ }
}
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go
new file mode 100644
index 000000000..f58b5ffa6
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go
@@ -0,0 +1,136 @@
+/*
+ *
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcsync
+
+import (
+ "context"
+ "sync"
+)
+
+// Subscriber represents an entity that is subscribed to messages published on
+// a PubSub. It wraps the callback to be invoked by the PubSub when a new
+// message is published.
+type Subscriber interface {
+ // OnMessage is invoked when a new message is published. Implementations
+ // must not block in this method.
+ OnMessage(msg interface{})
+}
+
+// PubSub is a simple one-to-many publish-subscribe system that supports
+// messages of arbitrary type. It guarantees that messages are delivered in
+// the same order in which they were published.
+//
+// Publisher invokes the Publish() method to publish new messages, while
+// subscribers interested in receiving these messages register a callback
+// via the Subscribe() method.
+//
+// Once a PubSub is stopped, no more messages can be published, and
+// it is guaranteed that no more subscriber callback will be invoked.
+type PubSub struct {
+ cs *CallbackSerializer
+ cancel context.CancelFunc
+
+ // Access to the below fields are guarded by this mutex.
+ mu sync.Mutex
+ msg interface{}
+ subscribers map[Subscriber]bool
+ stopped bool
+}
+
+// NewPubSub returns a new PubSub instance.
+func NewPubSub() *PubSub {
+ ctx, cancel := context.WithCancel(context.Background())
+ return &PubSub{
+ cs: NewCallbackSerializer(ctx),
+ cancel: cancel,
+ subscribers: map[Subscriber]bool{},
+ }
+}
+
+// Subscribe registers the provided Subscriber to the PubSub.
+//
+// If the PubSub contains a previously published message, the Subscriber's
+// OnMessage() callback will be invoked asynchronously with the existing
+// message to begin with, and subsequently for every newly published message.
+//
+// The caller is responsible for invoking the returned cancel function to
+// unsubscribe itself from the PubSub.
+func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) {
+ ps.mu.Lock()
+ defer ps.mu.Unlock()
+
+ if ps.stopped {
+ return func() {}
+ }
+
+ ps.subscribers[sub] = true
+
+ if ps.msg != nil {
+ msg := ps.msg
+ ps.cs.Schedule(func(context.Context) {
+ ps.mu.Lock()
+ defer ps.mu.Unlock()
+ if !ps.subscribers[sub] {
+ return
+ }
+ sub.OnMessage(msg)
+ })
+ }
+
+ return func() {
+ ps.mu.Lock()
+ defer ps.mu.Unlock()
+ delete(ps.subscribers, sub)
+ }
+}
+
+// Publish publishes the provided message to the PubSub, and invokes
+// callbacks registered by subscribers asynchronously.
+func (ps *PubSub) Publish(msg interface{}) {
+ ps.mu.Lock()
+ defer ps.mu.Unlock()
+
+ if ps.stopped {
+ return
+ }
+
+ ps.msg = msg
+ for sub := range ps.subscribers {
+ s := sub
+ ps.cs.Schedule(func(context.Context) {
+ ps.mu.Lock()
+ defer ps.mu.Unlock()
+ if !ps.subscribers[s] {
+ return
+ }
+ s.OnMessage(msg)
+ })
+ }
+}
+
+// Stop shuts down the PubSub and releases any resources allocated by it.
+// It is guaranteed that no subscriber callbacks would be invoked once this
+// method returns.
+func (ps *PubSub) Stop() {
+ ps.mu.Lock()
+ defer ps.mu.Unlock()
+ ps.stopped = true
+
+ ps.cancel()
+}
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
index 836b6a3b3..42ff39c84 100644
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -60,6 +60,9 @@ var (
GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials
// CanonicalString returns the canonical string of the code defined here:
// https://github.com/grpc/grpc/blob/master/doc/statuscodes.md.
+ //
+ // This is used in the 1.0 release of gcp/observability, and thus must not be
+ // deleted or changed.
CanonicalString interface{} // func (codes.Code) string
// DrainServerTransports initiates a graceful close of existing connections
// on a gRPC server accepted on the provided listener address. An
@@ -69,20 +72,35 @@ var (
// AddGlobalServerOptions adds an array of ServerOption that will be
// effective globally for newly created servers. The priority will be: 1.
// user-provided; 2. this method; 3. default values.
+ //
+ // This is used in the 1.0 release of gcp/observability, and thus must not be
+ // deleted or changed.
AddGlobalServerOptions interface{} // func(opt ...ServerOption)
// ClearGlobalServerOptions clears the array of extra ServerOption. This
// method is useful in testing and benchmarking.
+ //
+ // This is used in the 1.0 release of gcp/observability, and thus must not be
+ // deleted or changed.
ClearGlobalServerOptions func()
// AddGlobalDialOptions adds an array of DialOption that will be effective
// globally for newly created client channels. The priority will be: 1.
// user-provided; 2. this method; 3. default values.
+ //
+ // This is used in the 1.0 release of gcp/observability, and thus must not be
+ // deleted or changed.
AddGlobalDialOptions interface{} // func(opt ...DialOption)
// DisableGlobalDialOptions returns a DialOption that prevents the
// ClientConn from applying the global DialOptions (set via
// AddGlobalDialOptions).
+ //
+ // This is used in the 1.0 release of gcp/observability, and thus must not be
+ // deleted or changed.
DisableGlobalDialOptions interface{} // func() grpc.DialOption
// ClearGlobalDialOptions clears the array of extra DialOption. This
// method is useful in testing and benchmarking.
+ //
+ // This is used in the 1.0 release of gcp/observability, and thus must not be
+ // deleted or changed.
ClearGlobalDialOptions func()
// JoinDialOptions combines the dial options passed as arguments into a
// single dial option.
@@ -93,9 +111,15 @@ var (
// WithBinaryLogger returns a DialOption that specifies the binary logger
// for a ClientConn.
+ //
+ // This is used in the 1.0 release of gcp/observability, and thus must not be
+ // deleted or changed.
WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption
// BinaryLogger returns a ServerOption that can set the binary logger for a
// server.
+ //
+ // This is used in the 1.0 release of gcp/observability, and thus must not be
+ // deleted or changed.
BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption
// NewXDSResolverWithConfigForTesting creates a new xds resolver builder using
diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
index 09a667f33..99e1e5b36 100644
--- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
+++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
@@ -62,7 +62,8 @@ const (
defaultPort = "443"
defaultDNSSvrPort = "53"
golang = "GO"
- // txtPrefix is the prefix string to be prepended to the host name for txt record lookup.
+ // txtPrefix is the prefix string to be prepended to the host name for txt
+ // record lookup.
txtPrefix = "_grpc_config."
// In DNS, service config is encoded in a TXT record via the mechanism
// described in RFC-1464 using the attribute name grpc_config.
@@ -86,14 +87,14 @@ var (
minDNSResRate = 30 * time.Second
)
-var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) {
- return func(ctx context.Context, network, address string) (net.Conn, error) {
+var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) {
+ return func(ctx context.Context, network, _ string) (net.Conn, error) {
var dialer net.Dialer
- return dialer.DialContext(ctx, network, authority)
+ return dialer.DialContext(ctx, network, address)
}
}
-var customAuthorityResolver = func(authority string) (netResolver, error) {
+var newNetResolver = func(authority string) (netResolver, error) {
host, port, err := parseTarget(authority, defaultDNSSvrPort)
if err != nil {
return nil, err
@@ -103,7 +104,7 @@ var customAuthorityResolver = func(authority string) (netResolver, error) {
return &net.Resolver{
PreferGo: true,
- Dial: customAuthorityDialler(authorityWithPort),
+ Dial: addressDialer(authorityWithPort),
}, nil
}
@@ -114,7 +115,8 @@ func NewBuilder() resolver.Builder {
type dnsBuilder struct{}
-// Build creates and starts a DNS resolver that watches the name resolution of the target.
+// Build creates and starts a DNS resolver that watches the name resolution of
+// the target.
func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
host, port, err := parseTarget(target.Endpoint(), defaultPort)
if err != nil {
@@ -143,7 +145,7 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts
if target.URL.Host == "" {
d.resolver = defaultResolver
} else {
- d.resolver, err = customAuthorityResolver(target.URL.Host)
+ d.resolver, err = newNetResolver(target.URL.Host)
if err != nil {
return nil, err
}
@@ -180,19 +182,22 @@ type dnsResolver struct {
ctx context.Context
cancel context.CancelFunc
cc resolver.ClientConn
- // rn channel is used by ResolveNow() to force an immediate resolution of the target.
+ // rn channel is used by ResolveNow() to force an immediate resolution of the
+ // target.
rn chan struct{}
- // wg is used to enforce Close() to return after the watcher() goroutine has finished.
- // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we
- // replace the real lookup functions with mocked ones to facilitate testing.
- // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes
- // will warns lookup (READ the lookup function pointers) inside watcher() goroutine
- // has data race with replaceNetFunc (WRITE the lookup function pointers).
+ // wg is used to enforce Close() to return after the watcher() goroutine has
+ // finished. Otherwise, data race will be possible. [Race Example] in
+ // dns_resolver_test we replace the real lookup functions with mocked ones to
+ // facilitate testing. If Close() doesn't wait for watcher() goroutine
+ // finishes, race detector sometimes will warns lookup (READ the lookup
+ // function pointers) inside watcher() goroutine has data race with
+ // replaceNetFunc (WRITE the lookup function pointers).
wg sync.WaitGroup
disableServiceConfig bool
}
-// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches.
+// ResolveNow invoke an immediate resolution of the target that this
+// dnsResolver watches.
func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) {
select {
case d.rn <- struct{}{}:
@@ -220,8 +225,8 @@ func (d *dnsResolver) watcher() {
var timer *time.Timer
if err == nil {
- // Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least
- // to prevent constantly re-resolving.
+ // Success resolving, wait for the next ResolveNow. However, also wait 30
+ // seconds at the very least to prevent constantly re-resolving.
backoffIndex = 1
timer = newTimerDNSResRate(minDNSResRate)
select {
@@ -231,7 +236,8 @@ func (d *dnsResolver) watcher() {
case <-d.rn:
}
} else {
- // Poll on an error found in DNS Resolver or an error received from ClientConn.
+ // Poll on an error found in DNS Resolver or an error received from
+ // ClientConn.
timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex))
backoffIndex++
}
@@ -278,7 +284,8 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) {
}
func handleDNSError(err error, lookupType string) error {
- if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary {
+ dnsErr, ok := err.(*net.DNSError)
+ if ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary {
// Timeouts and temporary errors should be communicated to gRPC to
// attempt another DNS query (with backoff). Other errors should be
// suppressed (they may represent the absence of a TXT record).
@@ -307,10 +314,12 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult {
res += s
}
- // TXT record must have "grpc_config=" attribute in order to be used as service config.
+ // TXT record must have "grpc_config=" attribute in order to be used as
+ // service config.
if !strings.HasPrefix(res, txtAttribute) {
logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute)
- // This is not an error; it is the equivalent of not having a service config.
+ // This is not an error; it is the equivalent of not having a service
+ // config.
return nil
}
sc := canaryingSC(strings.TrimPrefix(res, txtAttribute))
@@ -352,9 +361,10 @@ func (d *dnsResolver) lookup() (*resolver.State, error) {
return &state, nil
}
-// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
-// If addr is an IPv4 address, return the addr and ok = true.
-// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true.
+// formatIP returns ok = false if addr is not a valid textual representation of
+// an IP address. If addr is an IPv4 address, return the addr and ok = true.
+// If addr is an IPv6 address, return the addr enclosed in square brackets and
+// ok = true.
func formatIP(addr string) (addrIP string, ok bool) {
ip := net.ParseIP(addr)
if ip == nil {
@@ -366,10 +376,10 @@ func formatIP(addr string) (addrIP string, ok bool) {
return "[" + addr + "]", true
}
-// parseTarget takes the user input target string and default port, returns formatted host and port info.
-// If target doesn't specify a port, set the port to be the defaultPort.
-// If target is in IPv6 format and host-name is enclosed in square brackets, brackets
-// are stripped when setting the host.
+// parseTarget takes the user input target string and default port, returns
+// formatted host and port info. If target doesn't specify a port, set the port
+// to be the defaultPort. If target is in IPv6 format and host-name is enclosed
+// in square brackets, brackets are stripped when setting the host.
// examples:
// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443"
// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80"
@@ -385,12 +395,14 @@ func parseTarget(target, defaultPort string) (host, port string, err error) {
}
if host, port, err = net.SplitHostPort(target); err == nil {
if port == "" {
- // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error.
+ // If the port field is empty (target ends with colon), e.g. "[::1]:",
+ // this is an error.
return "", "", errEndsWithColon
}
// target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
if host == "" {
- // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed.
+ // Keep consistent with net.Dial(): If the host is empty, as in ":80",
+ // the local system is assumed.
host = "localhost"
}
return host, port, nil
diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go b/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go
new file mode 100644
index 000000000..11d82afcc
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go
@@ -0,0 +1,130 @@
+/*
+ *
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package serviceconfig
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Duration defines JSON marshal and unmarshal methods to conform to the
+// protobuf JSON spec defined [here].
+//
+// [here]: https://protobuf.dev/reference/protobuf/google.protobuf/#duration
+type Duration time.Duration
+
+func (d Duration) String() string {
+ return fmt.Sprint(time.Duration(d))
+}
+
+// MarshalJSON converts from d to a JSON string output.
+func (d Duration) MarshalJSON() ([]byte, error) {
+ ns := time.Duration(d).Nanoseconds()
+ sec := ns / int64(time.Second)
+ ns = ns % int64(time.Second)
+
+ var sign string
+ if sec < 0 || ns < 0 {
+ sign, sec, ns = "-", -1*sec, -1*ns
+ }
+
+ // Generated output always contains 0, 3, 6, or 9 fractional digits,
+ // depending on required precision.
+ str := fmt.Sprintf("%s%d.%09d", sign, sec, ns)
+ str = strings.TrimSuffix(str, "000")
+ str = strings.TrimSuffix(str, "000")
+ str = strings.TrimSuffix(str, ".000")
+ return []byte(fmt.Sprintf("\"%ss\"", str)), nil
+}
+
+// UnmarshalJSON unmarshals b as a duration JSON string into d.
+func (d *Duration) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ if !strings.HasSuffix(s, "s") {
+ return fmt.Errorf("malformed duration %q: missing seconds unit", s)
+ }
+ neg := false
+ if s[0] == '-' {
+ neg = true
+ s = s[1:]
+ }
+ ss := strings.SplitN(s[:len(s)-1], ".", 3)
+ if len(ss) > 2 {
+ return fmt.Errorf("malformed duration %q: too many decimals", s)
+ }
+ // hasDigits is set if either the whole or fractional part of the number is
+ // present, since both are optional but one is required.
+ hasDigits := false
+ var sec, ns int64
+ if len(ss[0]) > 0 {
+ var err error
+ if sec, err = strconv.ParseInt(ss[0], 10, 64); err != nil {
+ return fmt.Errorf("malformed duration %q: %v", s, err)
+ }
+ // Maximum seconds value per the durationpb spec.
+ const maxProtoSeconds = 315_576_000_000
+ if sec > maxProtoSeconds {
+ return fmt.Errorf("out of range: %q", s)
+ }
+ hasDigits = true
+ }
+ if len(ss) == 2 && len(ss[1]) > 0 {
+ if len(ss[1]) > 9 {
+ return fmt.Errorf("malformed duration %q: too many digits after decimal", s)
+ }
+ var err error
+ if ns, err = strconv.ParseInt(ss[1], 10, 64); err != nil {
+ return fmt.Errorf("malformed duration %q: %v", s, err)
+ }
+ for i := 9; i > len(ss[1]); i-- {
+ ns *= 10
+ }
+ hasDigits = true
+ }
+ if !hasDigits {
+ return fmt.Errorf("malformed duration %q: contains no numbers", s)
+ }
+
+ if neg {
+ sec *= -1
+ ns *= -1
+ }
+
+ // Maximum/minimum seconds/nanoseconds representable by Go's time.Duration.
+ const maxSeconds = math.MaxInt64 / int64(time.Second)
+ const maxNanosAtMaxSeconds = math.MaxInt64 % int64(time.Second)
+ const minSeconds = math.MinInt64 / int64(time.Second)
+ const minNanosAtMinSeconds = math.MinInt64 % int64(time.Second)
+
+ if sec > maxSeconds || (sec == maxSeconds && ns >= maxNanosAtMaxSeconds) {
+ *d = Duration(math.MaxInt64)
+ } else if sec < minSeconds || (sec == minSeconds && ns <= minNanosAtMinSeconds) {
+ *d = Duration(math.MinInt64)
+ } else {
+ *d = Duration(sec*int64(time.Second) + ns)
+ }
+ return nil
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
index fbee581b8..98f80e3fa 100644
--- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
@@ -453,7 +453,7 @@ func (ht *serverHandlerTransport) IncrMsgSent() {}
func (ht *serverHandlerTransport) IncrMsgRecv() {}
-func (ht *serverHandlerTransport) Drain() {
+func (ht *serverHandlerTransport) Drain(debugData string) {
panic("Drain() is not implemented")
}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
index 5216998a8..326bf0848 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -1337,7 +1337,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
// setGoAwayReason sets the value of t.goAwayReason based
// on the GoAway frame received.
-// It expects a lock on transport's mutext to be held by
+// It expects a lock on transport's mutex to be held by
// the caller.
func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) {
t.goAwayReason = GoAwayNoReason
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
index 4b406b8cb..f96064012 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -238,7 +238,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
kp.Timeout = defaultServerKeepaliveTimeout
}
if kp.Time != infinity {
- if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil {
+ if err = syscall.SetTCPUserTimeout(rawConn, kp.Timeout); err != nil {
return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err)
}
}
@@ -1166,12 +1166,12 @@ func (t *http2Server) keepalive() {
if val <= 0 {
// The connection has been idle for a duration of keepalive.MaxConnectionIdle or more.
// Gracefully close the connection.
- t.Drain()
+ t.Drain("max_idle")
return
}
idleTimer.Reset(val)
case <-ageTimer.C:
- t.Drain()
+ t.Drain("max_age")
ageTimer.Reset(t.kp.MaxConnectionAgeGrace)
select {
case <-ageTimer.C:
@@ -1318,14 +1318,14 @@ func (t *http2Server) RemoteAddr() net.Addr {
return t.remoteAddr
}
-func (t *http2Server) Drain() {
+func (t *http2Server) Drain(debugData string) {
t.mu.Lock()
defer t.mu.Unlock()
if t.drainEvent != nil {
return
}
t.drainEvent = grpcsync.NewEvent()
- t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true})
+ t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte(debugData), headsUp: true})
}
var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}}
@@ -1367,7 +1367,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
// originated before the GoAway reaches the client.
// After getting the ack or timer expiration send out another GoAway this
// time with an ID of the max stream server intends to process.
- if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil {
+ if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, g.debugData); err != nil {
return false, err
}
if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil {
diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
index 1b7d7fabc..aa1c89659 100644
--- a/vendor/google.golang.org/grpc/internal/transport/transport.go
+++ b/vendor/google.golang.org/grpc/internal/transport/transport.go
@@ -726,7 +726,7 @@ type ServerTransport interface {
RemoteAddr() net.Addr
// Drain notifies the client this ServerTransport stops accepting new RPCs.
- Drain()
+ Drain(debugData string)
// IncrMsgSent increments the number of message sent through this transport.
IncrMsgSent()
diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go
index c525dc070..02f975951 100644
--- a/vendor/google.golang.org/grpc/picker_wrapper.go
+++ b/vendor/google.golang.org/grpc/picker_wrapper.go
@@ -36,6 +36,7 @@ import (
type pickerWrapper struct {
mu sync.Mutex
done bool
+ idle bool
blockingCh chan struct{}
picker balancer.Picker
}
@@ -47,7 +48,11 @@ func newPickerWrapper() *pickerWrapper {
// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
func (pw *pickerWrapper) updatePicker(p balancer.Picker) {
pw.mu.Lock()
- if pw.done {
+ if pw.done || pw.idle {
+ // There is a small window where a picker update from the LB policy can
+ // race with the channel going to idle mode. If the picker is idle here,
+ // it is because the channel asked it to do so, and therefore it is sage
+ // to ignore the update from the LB policy.
pw.mu.Unlock()
return
}
@@ -63,10 +68,8 @@ func (pw *pickerWrapper) updatePicker(p balancer.Picker) {
// - wraps the done function in the passed in result to increment the calls
// failed or calls succeeded channelz counter before invoking the actual
// done function.
-func doneChannelzWrapper(acw *acBalancerWrapper, result *balancer.PickResult) {
- acw.mu.Lock()
- ac := acw.ac
- acw.mu.Unlock()
+func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) {
+ ac := acbw.ac
ac.incrCallsStarted()
done := result.Done
result.Done = func(b balancer.DoneInfo) {
@@ -152,14 +155,14 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error())
}
- acw, ok := pickResult.SubConn.(*acBalancerWrapper)
+ acbw, ok := pickResult.SubConn.(*acBalancerWrapper)
if !ok {
logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn)
continue
}
- if t := acw.getAddrConn().getReadyTransport(); t != nil {
+ if t := acbw.ac.getReadyTransport(); t != nil {
if channelz.IsOn() {
- doneChannelzWrapper(acw, &pickResult)
+ doneChannelzWrapper(acbw, &pickResult)
return t, pickResult, nil
}
return t, pickResult, nil
@@ -187,6 +190,25 @@ func (pw *pickerWrapper) close() {
close(pw.blockingCh)
}
+func (pw *pickerWrapper) enterIdleMode() {
+ pw.mu.Lock()
+ defer pw.mu.Unlock()
+ if pw.done {
+ return
+ }
+ pw.idle = true
+}
+
+func (pw *pickerWrapper) exitIdleMode() {
+ pw.mu.Lock()
+ defer pw.mu.Unlock()
+ if pw.done {
+ return
+ }
+ pw.blockingCh = make(chan struct{})
+ pw.idle = false
+}
+
// dropError is a wrapper error that indicates the LB policy wishes to drop the
// RPC and not retry it.
type dropError struct {
diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go
index fc91b4d26..abe266b02 100644
--- a/vendor/google.golang.org/grpc/pickfirst.go
+++ b/vendor/google.golang.org/grpc/pickfirst.go
@@ -19,11 +19,15 @@
package grpc
import (
+ "encoding/json"
"errors"
"fmt"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/internal/envconfig"
+ "google.golang.org/grpc/internal/grpcrand"
+ "google.golang.org/grpc/serviceconfig"
)
// PickFirstBalancerName is the name of the pick_first balancer.
@@ -43,10 +47,28 @@ func (*pickfirstBuilder) Name() string {
return PickFirstBalancerName
}
+type pfConfig struct {
+ serviceconfig.LoadBalancingConfig `json:"-"`
+
+ // If set to true, instructs the LB policy to shuffle the order of the list
+ // of addresses received from the name resolver before attempting to
+ // connect to them.
+ ShuffleAddressList bool `json:"shuffleAddressList"`
+}
+
+func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
+ cfg := &pfConfig{}
+ if err := json.Unmarshal(js, cfg); err != nil {
+ return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
+ }
+ return cfg, nil
+}
+
type pickfirstBalancer struct {
state connectivity.State
cc balancer.ClientConn
subConn balancer.SubConn
+ cfg *pfConfig
}
func (b *pickfirstBalancer) ResolverError(err error) {
@@ -69,7 +91,8 @@ func (b *pickfirstBalancer) ResolverError(err error) {
}
func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
- if len(state.ResolverState.Addresses) == 0 {
+ addrs := state.ResolverState.Addresses
+ if len(addrs) == 0 {
// The resolver reported an empty address list. Treat it like an error by
// calling b.ResolverError.
if b.subConn != nil {
@@ -82,12 +105,23 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
return balancer.ErrBadResolverState
}
+ if state.BalancerConfig != nil {
+ cfg, ok := state.BalancerConfig.(*pfConfig)
+ if !ok {
+ return fmt.Errorf("pickfirstBalancer: received nil or illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig)
+ }
+ b.cfg = cfg
+ }
+
+ if envconfig.PickFirstLBConfig && b.cfg != nil && b.cfg.ShuffleAddressList {
+ grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] })
+ }
if b.subConn != nil {
- b.cc.UpdateAddresses(b.subConn, state.ResolverState.Addresses)
+ b.cc.UpdateAddresses(b.subConn, addrs)
return nil
}
- subConn, err := b.cc.NewSubConn(state.ResolverState.Addresses, balancer.NewSubConnOptions{})
+ subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{})
if err != nil {
if logger.V(2) {
logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
@@ -119,7 +153,6 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b
}
return
}
- b.state = state.ConnectivityState
if state.ConnectivityState == connectivity.Shutdown {
b.subConn = nil
return
@@ -132,11 +165,21 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b
Picker: &picker{result: balancer.PickResult{SubConn: subConn}},
})
case connectivity.Connecting:
+ if b.state == connectivity.TransientFailure {
+ // We stay in TransientFailure until we are Ready. See A62.
+ return
+ }
b.cc.UpdateState(balancer.State{
ConnectivityState: state.ConnectivityState,
Picker: &picker{err: balancer.ErrNoSubConnAvailable},
})
case connectivity.Idle:
+ if b.state == connectivity.TransientFailure {
+ // We stay in TransientFailure until we are Ready. Also kick the
+ // subConn out of Idle into Connecting. See A62.
+ b.subConn.Connect()
+ return
+ }
b.cc.UpdateState(balancer.State{
ConnectivityState: state.ConnectivityState,
Picker: &idlePicker{subConn: subConn},
@@ -147,6 +190,7 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b
Picker: &picker{err: state.ConnectionError},
})
}
+ b.state = state.ConnectivityState
}
func (b *pickfirstBalancer) Close() {
diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go
index 6215e5ef2..d8db6f5d3 100644
--- a/vendor/google.golang.org/grpc/resolver/resolver.go
+++ b/vendor/google.golang.org/grpc/resolver/resolver.go
@@ -22,13 +22,13 @@ package resolver
import (
"context"
+ "fmt"
"net"
"net/url"
"strings"
"google.golang.org/grpc/attributes"
"google.golang.org/grpc/credentials"
- "google.golang.org/grpc/internal/pretty"
"google.golang.org/grpc/serviceconfig"
)
@@ -124,7 +124,7 @@ type Address struct {
Attributes *attributes.Attributes
// BalancerAttributes contains arbitrary data about this address intended
- // for consumption by the LB policy. These attribes do not affect SubConn
+ // for consumption by the LB policy. These attributes do not affect SubConn
// creation, connection establishment, handshaking, etc.
BalancerAttributes *attributes.Attributes
@@ -142,6 +142,10 @@ type Address struct {
// Equal returns whether a and o are identical. Metadata is compared directly,
// not with any recursive introspection.
+//
+// This method compares all fields of the address. When used to tell apart
+// addresses during subchannel creation or connection establishment, it might be
+// more appropriate for the caller to implement custom equality logic.
func (a Address) Equal(o Address) bool {
return a.Addr == o.Addr && a.ServerName == o.ServerName &&
a.Attributes.Equal(o.Attributes) &&
@@ -151,7 +155,17 @@ func (a Address) Equal(o Address) bool {
// String returns JSON formatted string representation of the address.
func (a Address) String() string {
- return pretty.ToJSON(a)
+ var sb strings.Builder
+ sb.WriteString(fmt.Sprintf("{Addr: %q, ", a.Addr))
+ sb.WriteString(fmt.Sprintf("ServerName: %q, ", a.ServerName))
+ if a.Attributes != nil {
+ sb.WriteString(fmt.Sprintf("Attributes: %v, ", a.Attributes.String()))
+ }
+ if a.BalancerAttributes != nil {
+ sb.WriteString(fmt.Sprintf("BalancerAttributes: %v", a.BalancerAttributes.String()))
+ }
+ sb.WriteString("}")
+ return sb.String()
}
// BuildOptions includes additional information for the builder to create
@@ -254,10 +268,6 @@ type ClientConn interface {
// - "unknown_scheme://authority/endpoint"
// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}
type Target struct {
- // Deprecated: use URL.Scheme instead.
- Scheme string
- // Deprecated: use URL.Host instead.
- Authority string
// URL contains the parsed dial target with an optional default scheme added
// to it if the original dial target contained no scheme or contained an
// unregistered scheme. Any query params specified in the original dial
diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
index 05a9d4e0b..b408b3688 100644
--- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
+++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
@@ -19,11 +19,11 @@
package grpc
import (
+ "context"
"strings"
"sync"
"google.golang.org/grpc/balancer"
- "google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/pretty"
@@ -31,129 +31,192 @@ import (
"google.golang.org/grpc/serviceconfig"
)
+// resolverStateUpdater wraps the single method used by ccResolverWrapper to
+// report a state update from the actual resolver implementation.
+type resolverStateUpdater interface {
+ updateResolverState(s resolver.State, err error) error
+}
+
// ccResolverWrapper is a wrapper on top of cc for resolvers.
// It implements resolver.ClientConn interface.
type ccResolverWrapper struct {
- cc *ClientConn
- resolverMu sync.Mutex
- resolver resolver.Resolver
- done *grpcsync.Event
- curState resolver.State
+ // The following fields are initialized when the wrapper is created and are
+ // read-only afterwards, and therefore can be accessed without a mutex.
+ cc resolverStateUpdater
+ channelzID *channelz.Identifier
+ ignoreServiceConfig bool
+ opts ccResolverWrapperOpts
+ serializer *grpcsync.CallbackSerializer // To serialize all incoming calls.
+ serializerCancel context.CancelFunc // To close the serializer, accessed only from close().
+
+ // All incoming (resolver --> gRPC) calls are guaranteed to execute in a
+ // mutually exclusive manner as they are scheduled on the serializer.
+ // Fields accessed *only* in these serializer callbacks, can therefore be
+ // accessed without a mutex.
+ curState resolver.State
+
+ // mu guards access to the below fields.
+ mu sync.Mutex
+ closed bool
+ resolver resolver.Resolver // Accessed only from outgoing calls.
+}
- incomingMu sync.Mutex // Synchronizes all the incoming calls.
+// ccResolverWrapperOpts wraps the arguments to be passed when creating a new
+// ccResolverWrapper.
+type ccResolverWrapperOpts struct {
+ target resolver.Target // User specified dial target to resolve.
+ builder resolver.Builder // Resolver builder to use.
+ bOpts resolver.BuildOptions // Resolver build options to use.
+ channelzID *channelz.Identifier // Channelz identifier for the channel.
}
// newCCResolverWrapper uses the resolver.Builder to build a Resolver and
// returns a ccResolverWrapper object which wraps the newly built resolver.
-func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) {
+func newCCResolverWrapper(cc resolverStateUpdater, opts ccResolverWrapperOpts) (*ccResolverWrapper, error) {
+ ctx, cancel := context.WithCancel(context.Background())
ccr := &ccResolverWrapper{
- cc: cc,
- done: grpcsync.NewEvent(),
- }
-
- var credsClone credentials.TransportCredentials
- if creds := cc.dopts.copts.TransportCredentials; creds != nil {
- credsClone = creds.Clone()
- }
- rbo := resolver.BuildOptions{
- DisableServiceConfig: cc.dopts.disableServiceConfig,
- DialCreds: credsClone,
- CredsBundle: cc.dopts.copts.CredsBundle,
- Dialer: cc.dopts.copts.Dialer,
- }
-
- var err error
- // We need to hold the lock here while we assign to the ccr.resolver field
- // to guard against a data race caused by the following code path,
- // rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up
- // accessing ccr.resolver which is being assigned here.
- ccr.resolverMu.Lock()
- defer ccr.resolverMu.Unlock()
- ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo)
+ cc: cc,
+ channelzID: opts.channelzID,
+ ignoreServiceConfig: opts.bOpts.DisableServiceConfig,
+ opts: opts,
+ serializer: grpcsync.NewCallbackSerializer(ctx),
+ serializerCancel: cancel,
+ }
+
+ // Cannot hold the lock at build time because the resolver can send an
+ // update or error inline and these incoming calls grab the lock to schedule
+ // a callback in the serializer.
+ r, err := opts.builder.Build(opts.target, ccr, opts.bOpts)
if err != nil {
+ cancel()
return nil, err
}
+
+ // Any error reported by the resolver at build time that leads to a
+ // re-resolution request from the balancer is dropped by grpc until we
+ // return from this function. So, we don't have to handle pending resolveNow
+ // requests here.
+ ccr.mu.Lock()
+ ccr.resolver = r
+ ccr.mu.Unlock()
+
return ccr, nil
}
func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) {
- ccr.resolverMu.Lock()
- if !ccr.done.HasFired() {
- ccr.resolver.ResolveNow(o)
+ ccr.mu.Lock()
+ defer ccr.mu.Unlock()
+
+ // ccr.resolver field is set only after the call to Build() returns. But in
+ // the process of building, the resolver may send an error update which when
+ // propagated to the balancer may result in a re-resolution request.
+ if ccr.closed || ccr.resolver == nil {
+ return
}
- ccr.resolverMu.Unlock()
+ ccr.resolver.ResolveNow(o)
}
func (ccr *ccResolverWrapper) close() {
- ccr.resolverMu.Lock()
- ccr.resolver.Close()
- ccr.done.Fire()
- ccr.resolverMu.Unlock()
+ ccr.mu.Lock()
+ if ccr.closed {
+ ccr.mu.Unlock()
+ return
+ }
+
+ channelz.Info(logger, ccr.channelzID, "Closing the name resolver")
+
+ // Close the serializer to ensure that no more calls from the resolver are
+ // handled, before actually closing the resolver.
+ ccr.serializerCancel()
+ ccr.closed = true
+ r := ccr.resolver
+ ccr.mu.Unlock()
+
+ // Give enqueued callbacks a chance to finish.
+ <-ccr.serializer.Done
+
+ // Spawn a goroutine to close the resolver (since it may block trying to
+ // cleanup all allocated resources) and return early.
+ go r.Close()
+}
+
+// serializerScheduleLocked is a convenience method to schedule a function to be
+// run on the serializer while holding ccr.mu.
+func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) {
+ ccr.mu.Lock()
+ ccr.serializer.Schedule(f)
+ ccr.mu.Unlock()
}
+// UpdateState is called by resolver implementations to report new state to gRPC
+// which includes addresses and service config.
func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error {
- ccr.incomingMu.Lock()
- defer ccr.incomingMu.Unlock()
- if ccr.done.HasFired() {
+ errCh := make(chan error, 1)
+ ok := ccr.serializer.Schedule(func(context.Context) {
+ ccr.addChannelzTraceEvent(s)
+ ccr.curState = s
+ if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState {
+ errCh <- balancer.ErrBadResolverState
+ return
+ }
+ errCh <- nil
+ })
+ if !ok {
+ // The only time when Schedule() fail to add the callback to the
+ // serializer is when the serializer is closed, and this happens only
+ // when the resolver wrapper is closed.
return nil
}
- ccr.addChannelzTraceEvent(s)
- ccr.curState = s
- if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState {
- return balancer.ErrBadResolverState
- }
- return nil
+ return <-errCh
}
+// ReportError is called by resolver implementations to report errors
+// encountered during name resolution to gRPC.
func (ccr *ccResolverWrapper) ReportError(err error) {
- ccr.incomingMu.Lock()
- defer ccr.incomingMu.Unlock()
- if ccr.done.HasFired() {
- return
- }
- channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err)
- ccr.cc.updateResolverState(resolver.State{}, err)
+ ccr.serializerScheduleLocked(func(_ context.Context) {
+ channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: reporting error to cc: %v", err)
+ ccr.cc.updateResolverState(resolver.State{}, err)
+ })
}
-// NewAddress is called by the resolver implementation to send addresses to gRPC.
+// NewAddress is called by the resolver implementation to send addresses to
+// gRPC.
func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
- ccr.incomingMu.Lock()
- defer ccr.incomingMu.Unlock()
- if ccr.done.HasFired() {
- return
- }
- ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig})
- ccr.curState.Addresses = addrs
- ccr.cc.updateResolverState(ccr.curState, nil)
+ ccr.serializerScheduleLocked(func(_ context.Context) {
+ ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig})
+ ccr.curState.Addresses = addrs
+ ccr.cc.updateResolverState(ccr.curState, nil)
+ })
}
// NewServiceConfig is called by the resolver implementation to send service
// configs to gRPC.
func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
- ccr.incomingMu.Lock()
- defer ccr.incomingMu.Unlock()
- if ccr.done.HasFired() {
- return
- }
- channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %s", sc)
- if ccr.cc.dopts.disableServiceConfig {
- channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config")
- return
- }
- scpr := parseServiceConfig(sc)
- if scpr.Err != nil {
- channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err)
- return
- }
- ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr})
- ccr.curState.ServiceConfig = scpr
- ccr.cc.updateResolverState(ccr.curState, nil)
+ ccr.serializerScheduleLocked(func(_ context.Context) {
+ channelz.Infof(logger, ccr.channelzID, "ccResolverWrapper: got new service config: %s", sc)
+ if ccr.ignoreServiceConfig {
+ channelz.Info(logger, ccr.channelzID, "Service config lookups disabled; ignoring config")
+ return
+ }
+ scpr := parseServiceConfig(sc)
+ if scpr.Err != nil {
+ channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err)
+ return
+ }
+ ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr})
+ ccr.curState.ServiceConfig = scpr
+ ccr.cc.updateResolverState(ccr.curState, nil)
+ })
}
+// ParseServiceConfig is called by resolver implementations to parse a JSON
+// representation of the service config.
func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult {
return parseServiceConfig(scJSON)
}
+// addChannelzTraceEvent adds a channelz trace event containing the new
+// state received from resolver implementations.
func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
var updates []string
var oldSC, newSC *ServiceConfig
@@ -172,5 +235,5 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
} else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 {
updates = append(updates, "resolver returned new addresses")
}
- channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; "))
+ channelz.Infof(logger, ccr.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; "))
}
diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
index 2030736a3..a844d28f4 100644
--- a/vendor/google.golang.org/grpc/rpc_util.go
+++ b/vendor/google.golang.org/grpc/rpc_util.go
@@ -577,6 +577,9 @@ type parser struct {
// The header of a gRPC message. Find more detail at
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
header [5]byte
+
+ // recvBufferPool is the pool of shared receive buffers.
+ recvBufferPool SharedBufferPool
}
// recvMsg reads a complete gRPC message from the stream.
@@ -610,9 +613,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt
if int(length) > maxReceiveMessageSize {
return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
}
- // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead
- // of making it for each message:
- msg = make([]byte, int(length))
+ msg = p.recvBufferPool.Get(int(length))
if _, err := p.r.Read(msg); err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
@@ -726,12 +727,12 @@ type payloadInfo struct {
}
func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) {
- pf, d, err := p.recvMsg(maxReceiveMessageSize)
+ pf, buf, err := p.recvMsg(maxReceiveMessageSize)
if err != nil {
return nil, err
}
if payInfo != nil {
- payInfo.compressedLength = len(d)
+ payInfo.compressedLength = len(buf)
}
if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil {
@@ -743,10 +744,10 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei
// To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor,
// use this decompressor as the default.
if dc != nil {
- d, err = dc.Do(bytes.NewReader(d))
- size = len(d)
+ buf, err = dc.Do(bytes.NewReader(buf))
+ size = len(buf)
} else {
- d, size, err = decompress(compressor, d, maxReceiveMessageSize)
+ buf, size, err = decompress(compressor, buf, maxReceiveMessageSize)
}
if err != nil {
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err)
@@ -757,7 +758,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei
return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize)
}
}
- return d, nil
+ return buf, nil
}
// Using compressor, decompress d, returning data and size.
@@ -792,15 +793,17 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize
// dc takes precedence over compressor.
// TODO(dfawley): wrap the old compressor/decompressor using the new API?
func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error {
- d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor)
+ buf, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor)
if err != nil {
return err
}
- if err := c.Unmarshal(d, m); err != nil {
+ if err := c.Unmarshal(buf, m); err != nil {
return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err)
}
if payInfo != nil {
- payInfo.uncompressedBytes = d
+ payInfo.uncompressedBytes = buf
+ } else {
+ p.recvBufferPool.Put(&buf)
}
return nil
}
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
index 76d152a69..e076ec714 100644
--- a/vendor/google.golang.org/grpc/server.go
+++ b/vendor/google.golang.org/grpc/server.go
@@ -174,6 +174,7 @@ type serverOptions struct {
maxHeaderListSize *uint32
headerTableSize *uint32
numServerWorkers uint32
+ recvBufferPool SharedBufferPool
}
var defaultServerOptions = serverOptions{
@@ -182,6 +183,7 @@ var defaultServerOptions = serverOptions{
connectionTimeout: 120 * time.Second,
writeBufferSize: defaultWriteBufSize,
readBufferSize: defaultReadBufSize,
+ recvBufferPool: nopBufferPool{},
}
var globalServerOptions []ServerOption
@@ -552,6 +554,27 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption {
})
}
+// RecvBufferPool returns a ServerOption that configures the server
+// to use the provided shared buffer pool for parsing incoming messages. Depending
+// on the application's workload, this could result in reduced memory allocation.
+//
+// If you are unsure about how to implement a memory pool but want to utilize one,
+// begin with grpc.NewSharedBufferPool.
+//
+// Note: The shared buffer pool feature will not be active if any of the following
+// options are used: StatsHandler, EnableTracing, or binary logging. In such
+// cases, the shared buffer pool will be ignored.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func RecvBufferPool(bufferPool SharedBufferPool) ServerOption {
+ return newFuncServerOption(func(o *serverOptions) {
+ o.recvBufferPool = bufferPool
+ })
+}
+
// serverWorkerResetThreshold defines how often the stack must be reset. Every
// N requests, by spawning a new goroutine in its place, a worker can reset its
// stack so that large stacks don't live in memory forever. 2^16 should allow
@@ -895,7 +918,7 @@ func (s *Server) drainServerTransports(addr string) {
s.mu.Lock()
conns := s.conns[addr]
for st := range conns {
- st.Drain()
+ st.Drain("")
}
s.mu.Unlock()
}
@@ -1046,7 +1069,7 @@ func (s *Server) addConn(addr string, st transport.ServerTransport) bool {
if s.drain {
// Transport added after we drained our existing conns: drain it
// immediately.
- st.Drain()
+ st.Drain("")
}
if s.conns[addr] == nil {
@@ -1296,7 +1319,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
if len(shs) != 0 || len(binlogs) != 0 {
payInfo = &payloadInfo{}
}
- d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
+ d, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
if err != nil {
if e := t.WriteStatus(stream, status.Convert(err)); e != nil {
channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
@@ -1506,7 +1529,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
ctx: ctx,
t: t,
s: stream,
- p: &parser{r: stream},
+ p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool},
codec: s.getCodec(stream.ContentSubtype()),
maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
maxSendMessageSize: s.opts.maxSendMessageSize,
@@ -1856,7 +1879,7 @@ func (s *Server) GracefulStop() {
if !s.drain {
for _, conns := range s.conns {
for st := range conns {
- st.Drain()
+ st.Drain("graceful_stop")
}
}
s.drain = true
diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go
index f22acace4..0df11fc09 100644
--- a/vendor/google.golang.org/grpc/service_config.go
+++ b/vendor/google.golang.org/grpc/service_config.go
@@ -23,8 +23,6 @@ import (
"errors"
"fmt"
"reflect"
- "strconv"
- "strings"
"time"
"google.golang.org/grpc/codes"
@@ -106,8 +104,8 @@ type healthCheckConfig struct {
type jsonRetryPolicy struct {
MaxAttempts int
- InitialBackoff string
- MaxBackoff string
+ InitialBackoff internalserviceconfig.Duration
+ MaxBackoff internalserviceconfig.Duration
BackoffMultiplier float64
RetryableStatusCodes []codes.Code
}
@@ -129,50 +127,6 @@ type retryThrottlingPolicy struct {
TokenRatio float64
}
-func parseDuration(s *string) (*time.Duration, error) {
- if s == nil {
- return nil, nil
- }
- if !strings.HasSuffix(*s, "s") {
- return nil, fmt.Errorf("malformed duration %q", *s)
- }
- ss := strings.SplitN((*s)[:len(*s)-1], ".", 3)
- if len(ss) > 2 {
- return nil, fmt.Errorf("malformed duration %q", *s)
- }
- // hasDigits is set if either the whole or fractional part of the number is
- // present, since both are optional but one is required.
- hasDigits := false
- var d time.Duration
- if len(ss[0]) > 0 {
- i, err := strconv.ParseInt(ss[0], 10, 32)
- if err != nil {
- return nil, fmt.Errorf("malformed duration %q: %v", *s, err)
- }
- d = time.Duration(i) * time.Second
- hasDigits = true
- }
- if len(ss) == 2 && len(ss[1]) > 0 {
- if len(ss[1]) > 9 {
- return nil, fmt.Errorf("malformed duration %q", *s)
- }
- f, err := strconv.ParseInt(ss[1], 10, 64)
- if err != nil {
- return nil, fmt.Errorf("malformed duration %q: %v", *s, err)
- }
- for i := 9; i > len(ss[1]); i-- {
- f *= 10
- }
- d += time.Duration(f)
- hasDigits = true
- }
- if !hasDigits {
- return nil, fmt.Errorf("malformed duration %q", *s)
- }
-
- return &d, nil
-}
-
type jsonName struct {
Service string
Method string
@@ -201,7 +155,7 @@ func (j jsonName) generatePath() (string, error) {
type jsonMC struct {
Name *[]jsonName
WaitForReady *bool
- Timeout *string
+ Timeout *internalserviceconfig.Duration
MaxRequestMessageBytes *int64
MaxResponseMessageBytes *int64
RetryPolicy *jsonRetryPolicy
@@ -252,15 +206,10 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
if m.Name == nil {
continue
}
- d, err := parseDuration(m.Timeout)
- if err != nil {
- logger.Warningf("grpc: unmarshaling service config %s: %v", js, err)
- return &serviceconfig.ParseResult{Err: err}
- }
mc := MethodConfig{
WaitForReady: m.WaitForReady,
- Timeout: d,
+ Timeout: (*time.Duration)(m.Timeout),
}
if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
logger.Warningf("grpc: unmarshaling service config %s: %v", js, err)
@@ -312,18 +261,10 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol
if jrp == nil {
return nil, nil
}
- ib, err := parseDuration(&jrp.InitialBackoff)
- if err != nil {
- return nil, err
- }
- mb, err := parseDuration(&jrp.MaxBackoff)
- if err != nil {
- return nil, err
- }
if jrp.MaxAttempts <= 1 ||
- *ib <= 0 ||
- *mb <= 0 ||
+ jrp.InitialBackoff <= 0 ||
+ jrp.MaxBackoff <= 0 ||
jrp.BackoffMultiplier <= 0 ||
len(jrp.RetryableStatusCodes) == 0 {
logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp)
@@ -332,8 +273,8 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol
rp := &internalserviceconfig.RetryPolicy{
MaxAttempts: jrp.MaxAttempts,
- InitialBackoff: *ib,
- MaxBackoff: *mb,
+ InitialBackoff: time.Duration(jrp.InitialBackoff),
+ MaxBackoff: time.Duration(jrp.MaxBackoff),
BackoffMultiplier: jrp.BackoffMultiplier,
RetryableStatusCodes: make(map[codes.Code]bool),
}
diff --git a/vendor/google.golang.org/grpc/shared_buffer_pool.go b/vendor/google.golang.org/grpc/shared_buffer_pool.go
new file mode 100644
index 000000000..c3a5a9ac1
--- /dev/null
+++ b/vendor/google.golang.org/grpc/shared_buffer_pool.go
@@ -0,0 +1,154 @@
+/*
+ *
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import "sync"
+
+// SharedBufferPool is a pool of buffers that can be shared, resulting in
+// decreased memory allocation. Currently, in gRPC-go, it is only utilized
+// for parsing incoming messages.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type SharedBufferPool interface {
+ // Get returns a buffer with specified length from the pool.
+ //
+ // The returned byte slice may be not zero initialized.
+ Get(length int) []byte
+
+ // Put returns a buffer to the pool.
+ Put(*[]byte)
+}
+
+// NewSharedBufferPool creates a simple SharedBufferPool with buckets
+// of different sizes to optimize memory usage. This prevents the pool from
+// wasting large amounts of memory, even when handling messages of varying sizes.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func NewSharedBufferPool() SharedBufferPool {
+ return &simpleSharedBufferPool{
+ pools: [poolArraySize]simpleSharedBufferChildPool{
+ newBytesPool(level0PoolMaxSize),
+ newBytesPool(level1PoolMaxSize),
+ newBytesPool(level2PoolMaxSize),
+ newBytesPool(level3PoolMaxSize),
+ newBytesPool(level4PoolMaxSize),
+ newBytesPool(0),
+ },
+ }
+}
+
+// simpleSharedBufferPool is a simple implementation of SharedBufferPool.
+type simpleSharedBufferPool struct {
+ pools [poolArraySize]simpleSharedBufferChildPool
+}
+
+func (p *simpleSharedBufferPool) Get(size int) []byte {
+ return p.pools[p.poolIdx(size)].Get(size)
+}
+
+func (p *simpleSharedBufferPool) Put(bs *[]byte) {
+ p.pools[p.poolIdx(cap(*bs))].Put(bs)
+}
+
+func (p *simpleSharedBufferPool) poolIdx(size int) int {
+ switch {
+ case size <= level0PoolMaxSize:
+ return level0PoolIdx
+ case size <= level1PoolMaxSize:
+ return level1PoolIdx
+ case size <= level2PoolMaxSize:
+ return level2PoolIdx
+ case size <= level3PoolMaxSize:
+ return level3PoolIdx
+ case size <= level4PoolMaxSize:
+ return level4PoolIdx
+ default:
+ return levelMaxPoolIdx
+ }
+}
+
+const (
+ level0PoolMaxSize = 16 // 16 B
+ level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B
+ level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB
+ level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB
+ level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB
+)
+
+const (
+ level0PoolIdx = iota
+ level1PoolIdx
+ level2PoolIdx
+ level3PoolIdx
+ level4PoolIdx
+ levelMaxPoolIdx
+ poolArraySize
+)
+
+type simpleSharedBufferChildPool interface {
+ Get(size int) []byte
+ Put(interface{})
+}
+
+type bufferPool struct {
+ sync.Pool
+
+ defaultSize int
+}
+
+func (p *bufferPool) Get(size int) []byte {
+ bs := p.Pool.Get().(*[]byte)
+
+ if cap(*bs) < size {
+ p.Pool.Put(bs)
+
+ return make([]byte, size)
+ }
+
+ return (*bs)[:size]
+}
+
+func newBytesPool(size int) simpleSharedBufferChildPool {
+ return &bufferPool{
+ Pool: sync.Pool{
+ New: func() interface{} {
+ bs := make([]byte, size)
+ return &bs
+ },
+ },
+ defaultSize: size,
+ }
+}
+
+// nopBufferPool is a buffer pool just makes new buffer without pooling.
+type nopBufferPool struct {
+}
+
+func (nopBufferPool) Get(length int) []byte {
+ return make([]byte, length)
+}
+
+func (nopBufferPool) Put(*[]byte) {
+}
diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go
index 53910fb7c..bcf2e4d81 100644
--- a/vendor/google.golang.org/grpc/status/status.go
+++ b/vendor/google.golang.org/grpc/status/status.go
@@ -77,11 +77,18 @@ func FromProto(s *spb.Status) *Status {
// FromError returns a Status representation of err.
//
// - If err was produced by this package or implements the method `GRPCStatus()
-// *Status`, or if err wraps a type satisfying this, the appropriate Status is
-// returned. For wrapped errors, the message returned contains the entire
-// err.Error() text and not just the wrapped status.
+// *Status` and `GRPCStatus()` does not return nil, or if err wraps a type
+// satisfying this, the Status from `GRPCStatus()` is returned. For wrapped
+// errors, the message returned contains the entire err.Error() text and not
+// just the wrapped status. In that case, ok is true.
//
-// - If err is nil, a Status is returned with codes.OK and no message.
+// - If err is nil, a Status is returned with codes.OK and no message, and ok
+// is true.
+//
+// - If err implements the method `GRPCStatus() *Status` and `GRPCStatus()`
+// returns nil (which maps to Codes.OK), or if err wraps a type
+// satisfying this, a Status is returned with codes.Unknown and err's
+// Error() message, and ok is false.
//
// - Otherwise, err is an error not compatible with this package. In this
// case, a Status is returned with codes.Unknown and err's Error() message,
@@ -92,10 +99,24 @@ func FromError(err error) (s *Status, ok bool) {
}
type grpcstatus interface{ GRPCStatus() *Status }
if gs, ok := err.(grpcstatus); ok {
+ if gs.GRPCStatus() == nil {
+ // Error has status nil, which maps to codes.OK. There
+ // is no sensible behavior for this, so we turn it into
+ // an error with codes.Unknown and discard the existing
+ // status.
+ return New(codes.Unknown, err.Error()), false
+ }
return gs.GRPCStatus(), true
}
var gs grpcstatus
if errors.As(err, &gs) {
+ if gs.GRPCStatus() == nil {
+ // Error wraps an error that has status nil, which maps
+ // to codes.OK. There is no sensible behavior for this,
+ // so we turn it into an error with codes.Unknown and
+ // discard the existing status.
+ return New(codes.Unknown, err.Error()), false
+ }
p := gs.GRPCStatus().Proto()
p.Message = err.Error()
return status.FromProto(p), true
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
index d1226a412..de32a7597 100644
--- a/vendor/google.golang.org/grpc/stream.go
+++ b/vendor/google.golang.org/grpc/stream.go
@@ -123,6 +123,9 @@ type ClientStream interface {
// calling RecvMsg on the same stream at the same time, but it is not safe
// to call SendMsg on the same stream in different goroutines. It is also
// not safe to call CloseSend concurrently with SendMsg.
+ //
+ // It is not safe to modify the message after calling SendMsg. Tracing
+ // libraries and stats handlers may use the message lazily.
SendMsg(m interface{}) error
// RecvMsg blocks until it receives a message into m or the stream is
// done. It returns io.EOF when the stream completes successfully. On
@@ -152,6 +155,11 @@ type ClientStream interface {
// If none of the above happen, a goroutine and a context will be leaked, and grpc
// will not call the optionally-configured stats handler with a stats.End message.
func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
+ if err := cc.idlenessMgr.onCallBegin(); err != nil {
+ return nil, err
+ }
+ defer cc.idlenessMgr.onCallEnd()
+
// allow interceptor to see all applicable call options, which means those
// configured as defaults from dial option as well as per-call options
opts = combine(cc.dopts.callOptions, opts)
@@ -469,7 +477,7 @@ func (a *csAttempt) newStream() error {
// It is safe to overwrite the csAttempt's context here, since all state
// maintained in it are local to the attempt. When the attempt has to be
// retried, a new instance of csAttempt will be created.
- if a.pickResult.Metatada != nil {
+ if a.pickResult.Metadata != nil {
// We currently do not have a function it the metadata package which
// merges given metadata with existing metadata in a context. Existing
// function `AppendToOutgoingContext()` takes a variadic argument of key
@@ -479,7 +487,7 @@ func (a *csAttempt) newStream() error {
// in a form passable to AppendToOutgoingContext(), or create a version
// of AppendToOutgoingContext() that accepts a metadata.MD.
md, _ := metadata.FromOutgoingContext(a.ctx)
- md = metadata.Join(md, a.pickResult.Metatada)
+ md = metadata.Join(md, a.pickResult.Metadata)
a.ctx = metadata.NewOutgoingContext(a.ctx, md)
}
@@ -499,7 +507,7 @@ func (a *csAttempt) newStream() error {
return toRPCErr(nse.Err)
}
a.s = s
- a.p = &parser{r: s}
+ a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool}
return nil
}
@@ -1262,17 +1270,22 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin
return nil, err
}
as.s = s
- as.p = &parser{r: s}
+ as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool}
ac.incrCallsStarted()
if desc != unaryStreamDesc {
- // Listen on cc and stream contexts to cleanup when the user closes the
- // ClientConn or cancels the stream context. In all other cases, an error
- // should already be injected into the recv buffer by the transport, which
- // the client will eventually receive, and then we will cancel the stream's
- // context in clientStream.finish.
+ // Listen on stream context to cleanup when the stream context is
+ // canceled. Also listen for the addrConn's context in case the
+ // addrConn is closed or reconnects to a different address. In all
+ // other cases, an error should already be injected into the recv
+ // buffer by the transport, which the client will eventually receive,
+ // and then we will cancel the stream's context in
+ // addrConnStream.finish.
go func() {
+ ac.mu.Lock()
+ acCtx := ac.ctx
+ ac.mu.Unlock()
select {
- case <-ac.ctx.Done():
+ case <-acCtx.Done():
as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing"))
case <-ctx.Done():
as.finish(toRPCErr(ctx.Err()))
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
index 853ce0e30..353cfd528 100644
--- a/vendor/google.golang.org/grpc/version.go
+++ b/vendor/google.golang.org/grpc/version.go
@@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
-const Version = "1.55.0"
+const Version = "1.57.0"
diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
new file mode 100644
index 000000000..d2bac8b88
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
@@ -0,0 +1,810 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/struct.proto
+
+// Package structpb contains generated types for google/protobuf/struct.proto.
+//
+// The messages (i.e., Value, Struct, and ListValue) defined in struct.proto are
+// used to represent arbitrary JSON. The Value message represents a JSON value,
+// the Struct message represents a JSON object, and the ListValue message
+// represents a JSON array. See https://json.org for more information.
+//
+// The Value, Struct, and ListValue types have generated MarshalJSON and
+// UnmarshalJSON methods such that they serialize JSON equivalent to what the
+// messages themselves represent. Use of these types with the
+// "google.golang.org/protobuf/encoding/protojson" package
+// ensures that they will be serialized as their JSON equivalent.
+//
+// # Conversion to and from a Go interface
+//
+// The standard Go "encoding/json" package has functionality to serialize
+// arbitrary types to a large degree. The Value.AsInterface, Struct.AsMap, and
+// ListValue.AsSlice methods can convert the protobuf message representation into
+// a form represented by interface{}, map[string]interface{}, and []interface{}.
+// This form can be used with other packages that operate on such data structures
+// and also directly with the standard json package.
+//
+// In order to convert the interface{}, map[string]interface{}, and []interface{}
+// forms back as Value, Struct, and ListValue messages, use the NewStruct,
+// NewList, and NewValue constructor functions.
+//
+// # Example usage
+//
+// Consider the following example JSON object:
+//
+// {
+// "firstName": "John",
+// "lastName": "Smith",
+// "isAlive": true,
+// "age": 27,
+// "address": {
+// "streetAddress": "21 2nd Street",
+// "city": "New York",
+// "state": "NY",
+// "postalCode": "10021-3100"
+// },
+// "phoneNumbers": [
+// {
+// "type": "home",
+// "number": "212 555-1234"
+// },
+// {
+// "type": "office",
+// "number": "646 555-4567"
+// }
+// ],
+// "children": [],
+// "spouse": null
+// }
+//
+// To construct a Value message representing the above JSON object:
+//
+// m, err := structpb.NewValue(map[string]interface{}{
+// "firstName": "John",
+// "lastName": "Smith",
+// "isAlive": true,
+// "age": 27,
+// "address": map[string]interface{}{
+// "streetAddress": "21 2nd Street",
+// "city": "New York",
+// "state": "NY",
+// "postalCode": "10021-3100",
+// },
+// "phoneNumbers": []interface{}{
+// map[string]interface{}{
+// "type": "home",
+// "number": "212 555-1234",
+// },
+// map[string]interface{}{
+// "type": "office",
+// "number": "646 555-4567",
+// },
+// },
+// "children": []interface{}{},
+// "spouse": nil,
+// })
+// if err != nil {
+// ... // handle error
+// }
+// ... // make use of m as a *structpb.Value
+package structpb
+
+import (
+ base64 "encoding/base64"
+ protojson "google.golang.org/protobuf/encoding/protojson"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ math "math"
+ reflect "reflect"
+ sync "sync"
+ utf8 "unicode/utf8"
+)
+
+// `NullValue` is a singleton enumeration to represent the null value for the
+// `Value` type union.
+//
+// The JSON representation for `NullValue` is JSON `null`.
+type NullValue int32
+
+const (
+ // Null value.
+ NullValue_NULL_VALUE NullValue = 0
+)
+
+// Enum value maps for NullValue.
+var (
+ NullValue_name = map[int32]string{
+ 0: "NULL_VALUE",
+ }
+ NullValue_value = map[string]int32{
+ "NULL_VALUE": 0,
+ }
+)
+
+func (x NullValue) Enum() *NullValue {
+ p := new(NullValue)
+ *p = x
+ return p
+}
+
+func (x NullValue) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (NullValue) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_protobuf_struct_proto_enumTypes[0].Descriptor()
+}
+
+func (NullValue) Type() protoreflect.EnumType {
+ return &file_google_protobuf_struct_proto_enumTypes[0]
+}
+
+func (x NullValue) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use NullValue.Descriptor instead.
+func (NullValue) EnumDescriptor() ([]byte, []int) {
+ return file_google_protobuf_struct_proto_rawDescGZIP(), []int{0}
+}
+
+// `Struct` represents a structured data value, consisting of fields
+// which map to dynamically typed values. In some languages, `Struct`
+// might be supported by a native representation. For example, in
+// scripting languages like JS a struct is represented as an
+// object. The details of that representation are described together
+// with the proto support for the language.
+//
+// The JSON representation for `Struct` is JSON object.
+type Struct struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Unordered map of dynamically typed values.
+ Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+// NewStruct constructs a Struct from a general-purpose Go map.
+// The map keys must be valid UTF-8.
+// The map values are converted using NewValue.
+func NewStruct(v map[string]interface{}) (*Struct, error) {
+ x := &Struct{Fields: make(map[string]*Value, len(v))}
+ for k, v := range v {
+ if !utf8.ValidString(k) {
+ return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", k)
+ }
+ var err error
+ x.Fields[k], err = NewValue(v)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return x, nil
+}
+
+// AsMap converts x to a general-purpose Go map.
+// The map values are converted by calling Value.AsInterface.
+func (x *Struct) AsMap() map[string]interface{} {
+ f := x.GetFields()
+ vs := make(map[string]interface{}, len(f))
+ for k, v := range f {
+ vs[k] = v.AsInterface()
+ }
+ return vs
+}
+
+func (x *Struct) MarshalJSON() ([]byte, error) {
+ return protojson.Marshal(x)
+}
+
+func (x *Struct) UnmarshalJSON(b []byte) error {
+ return protojson.Unmarshal(b, x)
+}
+
+func (x *Struct) Reset() {
+ *x = Struct{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_struct_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Struct) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Struct) ProtoMessage() {}
+
+func (x *Struct) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_struct_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Struct.ProtoReflect.Descriptor instead.
+func (*Struct) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_struct_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Struct) GetFields() map[string]*Value {
+ if x != nil {
+ return x.Fields
+ }
+ return nil
+}
+
+// `Value` represents a dynamically typed value which can be either
+// null, a number, a string, a boolean, a recursive struct value, or a
+// list of values. A producer of value is expected to set one of these
+// variants. Absence of any variant indicates an error.
+//
+// The JSON representation for `Value` is JSON value.
+type Value struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The kind of value.
+ //
+ // Types that are assignable to Kind:
+ //
+ // *Value_NullValue
+ // *Value_NumberValue
+ // *Value_StringValue
+ // *Value_BoolValue
+ // *Value_StructValue
+ // *Value_ListValue
+ Kind isValue_Kind `protobuf_oneof:"kind"`
+}
+
+// NewValue constructs a Value from a general-purpose Go interface.
+//
+// ╔════════════════════════╤════════════════════════════════════════════╗
+// ║ Go type │ Conversion ║
+// ╠════════════════════════╪════════════════════════════════════════════╣
+// ║ nil │ stored as NullValue ║
+// ║ bool │ stored as BoolValue ║
+// ║ int, int32, int64 │ stored as NumberValue ║
+// ║ uint, uint32, uint64 │ stored as NumberValue ║
+// ║ float32, float64 │ stored as NumberValue ║
+// ║ string │ stored as StringValue; must be valid UTF-8 ║
+// ║ []byte │ stored as StringValue; base64-encoded ║
+// ║ map[string]interface{} │ stored as StructValue ║
+// ║ []interface{} │ stored as ListValue ║
+// ╚════════════════════════╧════════════════════════════════════════════╝
+//
+// When converting an int64 or uint64 to a NumberValue, numeric precision loss
+// is possible since they are stored as a float64.
+func NewValue(v interface{}) (*Value, error) {
+ switch v := v.(type) {
+ case nil:
+ return NewNullValue(), nil
+ case bool:
+ return NewBoolValue(v), nil
+ case int:
+ return NewNumberValue(float64(v)), nil
+ case int32:
+ return NewNumberValue(float64(v)), nil
+ case int64:
+ return NewNumberValue(float64(v)), nil
+ case uint:
+ return NewNumberValue(float64(v)), nil
+ case uint32:
+ return NewNumberValue(float64(v)), nil
+ case uint64:
+ return NewNumberValue(float64(v)), nil
+ case float32:
+ return NewNumberValue(float64(v)), nil
+ case float64:
+ return NewNumberValue(float64(v)), nil
+ case string:
+ if !utf8.ValidString(v) {
+ return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", v)
+ }
+ return NewStringValue(v), nil
+ case []byte:
+ s := base64.StdEncoding.EncodeToString(v)
+ return NewStringValue(s), nil
+ case map[string]interface{}:
+ v2, err := NewStruct(v)
+ if err != nil {
+ return nil, err
+ }
+ return NewStructValue(v2), nil
+ case []interface{}:
+ v2, err := NewList(v)
+ if err != nil {
+ return nil, err
+ }
+ return NewListValue(v2), nil
+ default:
+ return nil, protoimpl.X.NewError("invalid type: %T", v)
+ }
+}
+
+// NewNullValue constructs a new null Value.
+func NewNullValue() *Value {
+ return &Value{Kind: &Value_NullValue{NullValue: NullValue_NULL_VALUE}}
+}
+
+// NewBoolValue constructs a new boolean Value.
+func NewBoolValue(v bool) *Value {
+ return &Value{Kind: &Value_BoolValue{BoolValue: v}}
+}
+
+// NewNumberValue constructs a new number Value.
+func NewNumberValue(v float64) *Value {
+ return &Value{Kind: &Value_NumberValue{NumberValue: v}}
+}
+
+// NewStringValue constructs a new string Value.
+func NewStringValue(v string) *Value {
+ return &Value{Kind: &Value_StringValue{StringValue: v}}
+}
+
+// NewStructValue constructs a new struct Value.
+func NewStructValue(v *Struct) *Value {
+ return &Value{Kind: &Value_StructValue{StructValue: v}}
+}
+
+// NewListValue constructs a new list Value.
+func NewListValue(v *ListValue) *Value {
+ return &Value{Kind: &Value_ListValue{ListValue: v}}
+}
+
+// AsInterface converts x to a general-purpose Go interface.
+//
+// Calling Value.MarshalJSON and "encoding/json".Marshal on this output produce
+// semantically equivalent JSON (assuming no errors occur).
+//
+// Floating-point values (i.e., "NaN", "Infinity", and "-Infinity") are
+// converted as strings to remain compatible with MarshalJSON.
+func (x *Value) AsInterface() interface{} {
+ switch v := x.GetKind().(type) {
+ case *Value_NumberValue:
+ if v != nil {
+ switch {
+ case math.IsNaN(v.NumberValue):
+ return "NaN"
+ case math.IsInf(v.NumberValue, +1):
+ return "Infinity"
+ case math.IsInf(v.NumberValue, -1):
+ return "-Infinity"
+ default:
+ return v.NumberValue
+ }
+ }
+ case *Value_StringValue:
+ if v != nil {
+ return v.StringValue
+ }
+ case *Value_BoolValue:
+ if v != nil {
+ return v.BoolValue
+ }
+ case *Value_StructValue:
+ if v != nil {
+ return v.StructValue.AsMap()
+ }
+ case *Value_ListValue:
+ if v != nil {
+ return v.ListValue.AsSlice()
+ }
+ }
+ return nil
+}
+
+func (x *Value) MarshalJSON() ([]byte, error) {
+ return protojson.Marshal(x)
+}
+
+func (x *Value) UnmarshalJSON(b []byte) error {
+ return protojson.Unmarshal(b, x)
+}
+
+func (x *Value) Reset() {
+ *x = Value{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_struct_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Value) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Value) ProtoMessage() {}
+
+func (x *Value) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_struct_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Value.ProtoReflect.Descriptor instead.
+func (*Value) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_struct_proto_rawDescGZIP(), []int{1}
+}
+
+func (m *Value) GetKind() isValue_Kind {
+ if m != nil {
+ return m.Kind
+ }
+ return nil
+}
+
+func (x *Value) GetNullValue() NullValue {
+ if x, ok := x.GetKind().(*Value_NullValue); ok {
+ return x.NullValue
+ }
+ return NullValue_NULL_VALUE
+}
+
+func (x *Value) GetNumberValue() float64 {
+ if x, ok := x.GetKind().(*Value_NumberValue); ok {
+ return x.NumberValue
+ }
+ return 0
+}
+
+func (x *Value) GetStringValue() string {
+ if x, ok := x.GetKind().(*Value_StringValue); ok {
+ return x.StringValue
+ }
+ return ""
+}
+
+func (x *Value) GetBoolValue() bool {
+ if x, ok := x.GetKind().(*Value_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+func (x *Value) GetStructValue() *Struct {
+ if x, ok := x.GetKind().(*Value_StructValue); ok {
+ return x.StructValue
+ }
+ return nil
+}
+
+func (x *Value) GetListValue() *ListValue {
+ if x, ok := x.GetKind().(*Value_ListValue); ok {
+ return x.ListValue
+ }
+ return nil
+}
+
+type isValue_Kind interface {
+ isValue_Kind()
+}
+
+type Value_NullValue struct {
+ // Represents a null value.
+ NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Value_NumberValue struct {
+ // Represents a double value.
+ NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"`
+}
+
+type Value_StringValue struct {
+ // Represents a string value.
+ StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type Value_BoolValue struct {
+ // Represents a boolean value.
+ BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type Value_StructValue struct {
+ // Represents a structured value.
+ StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"`
+}
+
+type Value_ListValue struct {
+ // Represents a repeated `Value`.
+ ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"`
+}
+
+func (*Value_NullValue) isValue_Kind() {}
+
+func (*Value_NumberValue) isValue_Kind() {}
+
+func (*Value_StringValue) isValue_Kind() {}
+
+func (*Value_BoolValue) isValue_Kind() {}
+
+func (*Value_StructValue) isValue_Kind() {}
+
+func (*Value_ListValue) isValue_Kind() {}
+
+// `ListValue` is a wrapper around a repeated field of values.
+//
+// The JSON representation for `ListValue` is JSON array.
+type ListValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Repeated field of dynamically typed values.
+ Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+}
+
+// NewList constructs a ListValue from a general-purpose Go slice.
+// The slice elements are converted using NewValue.
+func NewList(v []interface{}) (*ListValue, error) {
+ x := &ListValue{Values: make([]*Value, len(v))}
+ for i, v := range v {
+ var err error
+ x.Values[i], err = NewValue(v)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return x, nil
+}
+
+// AsSlice converts x to a general-purpose Go slice.
+// The slice elements are converted by calling Value.AsInterface.
+func (x *ListValue) AsSlice() []interface{} {
+ vals := x.GetValues()
+ vs := make([]interface{}, len(vals))
+ for i, v := range vals {
+ vs[i] = v.AsInterface()
+ }
+ return vs
+}
+
+func (x *ListValue) MarshalJSON() ([]byte, error) {
+ return protojson.Marshal(x)
+}
+
+func (x *ListValue) UnmarshalJSON(b []byte) error {
+ return protojson.Unmarshal(b, x)
+}
+
+func (x *ListValue) Reset() {
+ *x = ListValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_struct_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListValue) ProtoMessage() {}
+
+func (x *ListValue) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_struct_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListValue.ProtoReflect.Descriptor instead.
+func (*ListValue) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_struct_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ListValue) GetValues() []*Value {
+ if x != nil {
+ return x.Values
+ }
+ return nil
+}
+
+var File_google_protobuf_struct_proto protoreflect.FileDescriptor
+
+var file_google_protobuf_struct_proto_rawDesc = []byte{
+ 0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22,
+ 0x98, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x3b, 0x0a, 0x06, 0x66, 0x69,
+ 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72,
+ 0x75, 0x63, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
+ 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x51, 0x0a, 0x0b, 0x46, 0x69, 0x65, 0x6c, 0x64,
+ 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb2, 0x02, 0x0a, 0x05, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x62, 0x65,
+ 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67,
+ 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b,
+ 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62,
+ 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48,
+ 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3c, 0x0a, 0x0c,
+ 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x73,
+ 0x74, 0x72, 0x75, 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6c, 0x69,
+ 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69,
+ 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22,
+ 0x3b, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2e, 0x0a, 0x06,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x2a, 0x1b, 0x0a, 0x09,
+ 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x4e, 0x55, 0x4c,
+ 0x4c, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, 0x00, 0x42, 0x7f, 0x0a, 0x13, 0x63, 0x6f, 0x6d,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x42, 0x0b, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a,
+ 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f,
+ 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65,
+ 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x70, 0x62,
+ 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c,
+ 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
+}
+
+var (
+ file_google_protobuf_struct_proto_rawDescOnce sync.Once
+ file_google_protobuf_struct_proto_rawDescData = file_google_protobuf_struct_proto_rawDesc
+)
+
+func file_google_protobuf_struct_proto_rawDescGZIP() []byte {
+ file_google_protobuf_struct_proto_rawDescOnce.Do(func() {
+ file_google_protobuf_struct_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_struct_proto_rawDescData)
+ })
+ return file_google_protobuf_struct_proto_rawDescData
+}
+
+var file_google_protobuf_struct_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_google_protobuf_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
+var file_google_protobuf_struct_proto_goTypes = []interface{}{
+ (NullValue)(0), // 0: google.protobuf.NullValue
+ (*Struct)(nil), // 1: google.protobuf.Struct
+ (*Value)(nil), // 2: google.protobuf.Value
+ (*ListValue)(nil), // 3: google.protobuf.ListValue
+ nil, // 4: google.protobuf.Struct.FieldsEntry
+}
+var file_google_protobuf_struct_proto_depIdxs = []int32{
+ 4, // 0: google.protobuf.Struct.fields:type_name -> google.protobuf.Struct.FieldsEntry
+ 0, // 1: google.protobuf.Value.null_value:type_name -> google.protobuf.NullValue
+ 1, // 2: google.protobuf.Value.struct_value:type_name -> google.protobuf.Struct
+ 3, // 3: google.protobuf.Value.list_value:type_name -> google.protobuf.ListValue
+ 2, // 4: google.protobuf.ListValue.values:type_name -> google.protobuf.Value
+ 2, // 5: google.protobuf.Struct.FieldsEntry.value:type_name -> google.protobuf.Value
+ 6, // [6:6] is the sub-list for method output_type
+ 6, // [6:6] is the sub-list for method input_type
+ 6, // [6:6] is the sub-list for extension type_name
+ 6, // [6:6] is the sub-list for extension extendee
+ 0, // [0:6] is the sub-list for field type_name
+}
+
+func init() { file_google_protobuf_struct_proto_init() }
+func file_google_protobuf_struct_proto_init() {
+ if File_google_protobuf_struct_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Struct); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Value); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*Value_NullValue)(nil),
+ (*Value_NumberValue)(nil),
+ (*Value_StringValue)(nil),
+ (*Value_BoolValue)(nil),
+ (*Value_StructValue)(nil),
+ (*Value_ListValue)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_protobuf_struct_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 4,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_protobuf_struct_proto_goTypes,
+ DependencyIndexes: file_google_protobuf_struct_proto_depIdxs,
+ EnumInfos: file_google_protobuf_struct_proto_enumTypes,
+ MessageInfos: file_google_protobuf_struct_proto_msgTypes,
+ }.Build()
+ File_google_protobuf_struct_proto = out.File
+ file_google_protobuf_struct_proto_rawDesc = nil
+ file_google_protobuf_struct_proto_goTypes = nil
+ file_google_protobuf_struct_proto_depIdxs = nil
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 7e7806a83..31de773d4 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -293,8 +293,8 @@ github.com/gorilla/sessions
# github.com/gorilla/websocket v1.5.0
## explicit; go 1.12
github.com/gorilla/websocket
-# github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0
-## explicit; go 1.14
+# github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0
+## explicit; go 1.17
github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule
github.com/grpc-ecosystem/grpc-gateway/v2/runtime
github.com/grpc-ecosystem/grpc-gateway/v2/utilities
@@ -746,14 +746,12 @@ github.com/yuin/goldmark/renderer
github.com/yuin/goldmark/renderer/html
github.com/yuin/goldmark/text
github.com/yuin/goldmark/util
-# go.opentelemetry.io/otel v1.16.0
+# go.opentelemetry.io/otel v1.17.0
## explicit; go 1.19
go.opentelemetry.io/otel
go.opentelemetry.io/otel/attribute
go.opentelemetry.io/otel/baggage
go.opentelemetry.io/otel/codes
-go.opentelemetry.io/otel/exporters/otlp/internal
-go.opentelemetry.io/otel/exporters/otlp/internal/envconfig
go.opentelemetry.io/otel/internal
go.opentelemetry.io/otel/internal/attribute
go.opentelemetry.io/otel/internal/baggage
@@ -766,30 +764,32 @@ go.opentelemetry.io/otel/semconv/v1.12.0
go.opentelemetry.io/otel/semconv/v1.17.0
go.opentelemetry.io/otel/semconv/v1.17.0/httpconv
go.opentelemetry.io/otel/semconv/v1.20.0
-# go.opentelemetry.io/otel/exporters/jaeger v1.16.0
-## explicit; go 1.19
-go.opentelemetry.io/otel/exporters/jaeger
-go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent
-go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger
-go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore
-go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift
+go.opentelemetry.io/otel/semconv/v1.21.0
# go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0
## explicit; go 1.19
-go.opentelemetry.io/otel/exporters/otlp/internal/retry
-# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0
+# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.17.0
## explicit; go 1.19
go.opentelemetry.io/otel/exporters/otlp/otlptrace
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig
go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform
-# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0
+# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.17.0
## explicit; go 1.19
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
-# go.opentelemetry.io/otel/metric v1.16.0
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry
+# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.17.0
+## explicit; go 1.19
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry
+# go.opentelemetry.io/otel/metric v1.17.0
## explicit; go 1.19
go.opentelemetry.io/otel/metric
go.opentelemetry.io/otel/metric/embedded
-# go.opentelemetry.io/otel/sdk v1.16.0
+# go.opentelemetry.io/otel/sdk v1.17.0
## explicit; go 1.19
go.opentelemetry.io/otel/sdk
go.opentelemetry.io/otel/sdk/instrumentation
@@ -797,11 +797,11 @@ go.opentelemetry.io/otel/sdk/internal
go.opentelemetry.io/otel/sdk/internal/env
go.opentelemetry.io/otel/sdk/resource
go.opentelemetry.io/otel/sdk/trace
-# go.opentelemetry.io/otel/trace v1.16.0
+# go.opentelemetry.io/otel/trace v1.17.0
## explicit; go 1.19
go.opentelemetry.io/otel/trace
-# go.opentelemetry.io/proto/otlp v0.19.0
-## explicit; go 1.14
+# go.opentelemetry.io/proto/otlp v1.0.0
+## explicit; go 1.17
go.opentelemetry.io/proto/otlp/collector/trace/v1
go.opentelemetry.io/proto/otlp/common/v1
go.opentelemetry.io/proto/otlp/resource/v1
@@ -921,13 +921,14 @@ google.golang.org/appengine/internal/log
google.golang.org/appengine/internal/remote_api
google.golang.org/appengine/internal/urlfetch
google.golang.org/appengine/urlfetch
-# google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1
+# google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc
## explicit; go 1.19
google.golang.org/genproto/googleapis/api/httpbody
+# google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc
+## explicit; go 1.19
google.golang.org/genproto/googleapis/rpc/errdetails
google.golang.org/genproto/googleapis/rpc/status
-google.golang.org/genproto/protobuf/field_mask
-# google.golang.org/grpc v1.55.0
+# google.golang.org/grpc v1.57.0
## explicit; go 1.17
google.golang.org/grpc
google.golang.org/grpc/attributes
@@ -946,6 +947,7 @@ google.golang.org/grpc/encoding
google.golang.org/grpc/encoding/gzip
google.golang.org/grpc/encoding/proto
google.golang.org/grpc/grpclog
+google.golang.org/grpc/health/grpc_health_v1
google.golang.org/grpc/internal
google.golang.org/grpc/internal/backoff
google.golang.org/grpc/internal/balancer/gracefulswitch
@@ -1012,6 +1014,7 @@ google.golang.org/protobuf/types/descriptorpb
google.golang.org/protobuf/types/known/anypb
google.golang.org/protobuf/types/known/durationpb
google.golang.org/protobuf/types/known/fieldmaskpb
+google.golang.org/protobuf/types/known/structpb
google.golang.org/protobuf/types/known/timestamppb
google.golang.org/protobuf/types/known/wrapperspb
# gopkg.in/ini.v1 v1.67.0