summaryrefslogtreecommitdiff
path: root/vendor/go.opentelemetry.io/otel
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/go.opentelemetry.io/otel')
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/LICENSE201
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/client.go258
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/config.go653
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/doc.go63
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/exporter.go93
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go145
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform/log.go391
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go9
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/LICENSE201
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/client.go343
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/config.go602
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/doc.go63
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/exporter.go73
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry/retry.go145
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform/log.go391
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/version.go9
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/LICENSE201
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go205
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go266
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go84
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go157
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go215
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go31
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go210
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go374
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/optiontypes.go47
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go38
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go56
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go145
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go144
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error.go103
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go350
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go9
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/LICENSE201
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go346
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/config.go224
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go82
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go152
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go215
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/gen.go31
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig.go210
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go374
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/optiontypes.go47
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/tls.go38
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/partialsuccess.go56
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go145
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/attribute.go144
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/error.go103
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go350
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go9
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/LICENSE201
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/config.go85
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/doc.go12
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/exporter.go72
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/record.go132
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/LICENSE201
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/config.go132
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/doc.go12
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/encoder.go31
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/exporter.go159
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/LICENSE201
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/config.go85
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/doc.go6
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/trace.go103
-rw-r--r--vendor/go.opentelemetry.io/otel/log/DESIGN.md634
-rw-r--r--vendor/go.opentelemetry.io/otel/log/LICENSE201
-rw-r--r--vendor/go.opentelemetry.io/otel/log/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/log/doc.go76
-rw-r--r--vendor/go.opentelemetry.io/otel/log/embedded/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/log/embedded/embedded.go36
-rw-r--r--vendor/go.opentelemetry.io/otel/log/keyvalue.go443
-rw-r--r--vendor/go.opentelemetry.io/otel/log/kind_string.go30
-rw-r--r--vendor/go.opentelemetry.io/otel/log/logger.go140
-rw-r--r--vendor/go.opentelemetry.io/otel/log/noop/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/log/noop/noop.go50
-rw-r--r--vendor/go.opentelemetry.io/otel/log/provider.go37
-rw-r--r--vendor/go.opentelemetry.io/otel/log/record.go144
-rw-r--r--vendor/go.opentelemetry.io/otel/log/severity.go64
-rw-r--r--vendor/go.opentelemetry.io/otel/log/severity_string.go47
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/log/DESIGN.md176
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/log/LICENSE201
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/log/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/log/batch.go477
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/log/doc.go36
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/log/exporter.go321
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/log/filter_processor.go62
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/log/logger.go110
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/log/processor.go56
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/log/provider.go256
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/log/record.go518
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/log/ring.go82
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/log/setting.go119
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/log/simple.go82
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/exporter.go74
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go94
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go166
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/internal/v4/http.go394
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/internal/v4/net.go313
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.20.0/httpconv/README.md3
-rw-r--r--vendor/go.opentelemetry.io/otel/semconv/v1.20.0/httpconv/http.go143
108 files changed, 15202 insertions, 853 deletions
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/README.md
new file mode 100644
index 000000000..0b4603c8e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/README.md
@@ -0,0 +1,3 @@
+# OTLP Log gRPC Exporter
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/client.go
new file mode 100644
index 000000000..05abd92ee
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/client.go
@@ -0,0 +1,258 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlploggrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc"
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "google.golang.org/genproto/googleapis/rpc/errdetails"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/backoff"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/credentials/insecure"
+ "google.golang.org/grpc/encoding/gzip"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry"
+ collogpb "go.opentelemetry.io/proto/otlp/collector/logs/v1"
+ logpb "go.opentelemetry.io/proto/otlp/logs/v1"
+)
+
+// The methods of this type are not expected to be called concurrently.
+type client struct {
+ metadata metadata.MD
+ exportTimeout time.Duration
+ requestFunc retry.RequestFunc
+
+ // ourConn keeps track of where conn was created: true if created here in
+ // NewClient, or false if passed with an option. This is important on
+ // Shutdown as conn should only be closed if we created it. Otherwise,
+ // it is up to the processes that passed conn to close it.
+ ourConn bool
+ conn *grpc.ClientConn
+ lsc collogpb.LogsServiceClient
+}
+
+// Used for testing.
+var newGRPCClientFn = grpc.NewClient
+
+// newClient creates a new gRPC log client.
+func newClient(cfg config) (*client, error) {
+ c := &client{
+ exportTimeout: cfg.timeout.Value,
+ requestFunc: cfg.retryCfg.Value.RequestFunc(retryable),
+ conn: cfg.gRPCConn.Value,
+ }
+
+ if len(cfg.headers.Value) > 0 {
+ c.metadata = metadata.New(cfg.headers.Value)
+ }
+
+ if c.conn == nil {
+ // If the caller did not provide a ClientConn when the client was
+ // created, create one using the configuration they did provide.
+ dialOpts := newGRPCDialOptions(cfg)
+
+ conn, err := newGRPCClientFn(cfg.endpoint.Value, dialOpts...)
+ if err != nil {
+ return nil, err
+ }
+ // Keep track that we own the lifecycle of this conn and need to close
+ // it on Shutdown.
+ c.ourConn = true
+ c.conn = conn
+ }
+
+ c.lsc = collogpb.NewLogsServiceClient(c.conn)
+
+ return c, nil
+}
+
+func newGRPCDialOptions(cfg config) []grpc.DialOption {
+ userAgent := "OTel Go OTLP over gRPC logs exporter/" + Version()
+ dialOpts := []grpc.DialOption{grpc.WithUserAgent(userAgent)}
+ dialOpts = append(dialOpts, cfg.dialOptions.Value...)
+
+ // Convert other grpc configs to the dial options.
+ // Service config
+ if cfg.serviceConfig.Value != "" {
+ dialOpts = append(dialOpts, grpc.WithDefaultServiceConfig(cfg.serviceConfig.Value))
+ }
+ // Prioritize GRPCCredentials over Insecure (passing both is an error).
+ if cfg.gRPCCredentials.Value != nil {
+ dialOpts = append(dialOpts, grpc.WithTransportCredentials(cfg.gRPCCredentials.Value))
+ } else if cfg.insecure.Value {
+ dialOpts = append(dialOpts, grpc.WithTransportCredentials(insecure.NewCredentials()))
+ } else {
+ // Default to using the host's root CA.
+ dialOpts = append(dialOpts, grpc.WithTransportCredentials(
+ credentials.NewTLS(nil),
+ ))
+ }
+ // Compression
+ if cfg.compression.Value == GzipCompression {
+ dialOpts = append(dialOpts, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
+ }
+ // Reconnection period
+ if cfg.reconnectionPeriod.Value != 0 {
+ p := grpc.ConnectParams{
+ Backoff: backoff.DefaultConfig,
+ MinConnectTimeout: cfg.reconnectionPeriod.Value,
+ }
+ dialOpts = append(dialOpts, grpc.WithConnectParams(p))
+ }
+
+ return dialOpts
+}
+
+// UploadLogs sends proto logs to connected endpoint.
+//
+// Retryable errors from the server will be handled according to any
+// RetryConfig the client was created with.
+//
+// The otlplog.Exporter synchronizes access to client methods, and
+// ensures this is not called after the Exporter is shutdown. Only thing
+// to do here is send data.
+func (c *client) UploadLogs(ctx context.Context, rl []*logpb.ResourceLogs) error {
+ select {
+ case <-ctx.Done():
+ // Do not upload if the context is already expired.
+ return ctx.Err()
+ default:
+ }
+
+ ctx, cancel := c.exportContext(ctx)
+ defer cancel()
+
+ return c.requestFunc(ctx, func(ctx context.Context) error {
+ resp, err := c.lsc.Export(ctx, &collogpb.ExportLogsServiceRequest{
+ ResourceLogs: rl,
+ })
+ if resp != nil && resp.PartialSuccess != nil {
+ msg := resp.PartialSuccess.GetErrorMessage()
+ n := resp.PartialSuccess.GetRejectedLogRecords()
+ if n != 0 || msg != "" {
+ err := fmt.Errorf("OTLP partial success: %s (%d log records rejected)", msg, n)
+ otel.Handle(err)
+ }
+ }
+ // nil is converted to OK.
+ if status.Code(err) == codes.OK {
+ // Success.
+ return nil
+ }
+ return err
+ })
+}
+
+// Shutdown shuts down the client, freeing all resources.
+//
+// Any active connections to a remote endpoint are closed if they were created
+// by the client. Any gRPC connection passed during creation using
+// WithGRPCConn will not be closed. It is the caller's responsibility to
+// handle cleanup of that resource.
+//
+// The otlplog.Exporter synchronizes access to client methods and
+// ensures this is called only once. The only thing that needs to be done
+// here is to release any computational resources the client holds.
+func (c *client) Shutdown(ctx context.Context) error {
+ c.metadata = nil
+ c.requestFunc = nil
+ c.lsc = nil
+
+ // Release the connection if we created it.
+ err := ctx.Err()
+ if c.ourConn {
+ closeErr := c.conn.Close()
+ // A context timeout error takes precedence over this error.
+ if err == nil && closeErr != nil {
+ err = closeErr
+ }
+ }
+ c.conn = nil
+ return err
+}
+
+// exportContext returns a copy of parent with an appropriate deadline and
+// cancellation function based on the clients configured export timeout.
+//
+// It is the callers responsibility to cancel the returned context once its
+// use is complete, via the parent or directly with the returned CancelFunc, to
+// ensure all resources are correctly released.
+func (c *client) exportContext(parent context.Context) (context.Context, context.CancelFunc) {
+ var (
+ ctx context.Context
+ cancel context.CancelFunc
+ )
+
+ if c.exportTimeout > 0 {
+ ctx, cancel = context.WithTimeout(parent, c.exportTimeout)
+ } else {
+ ctx, cancel = context.WithCancel(parent)
+ }
+
+ if c.metadata.Len() > 0 {
+ md := c.metadata
+ if outMD, ok := metadata.FromOutgoingContext(ctx); ok {
+ md = metadata.Join(md, outMD)
+ }
+
+ ctx = metadata.NewOutgoingContext(ctx, md)
+ }
+
+ return ctx, cancel
+}
+
+type noopClient struct{}
+
+func newNoopClient() *noopClient {
+ return &noopClient{}
+}
+
+func (c *noopClient) UploadLogs(context.Context, []*logpb.ResourceLogs) error { return nil }
+
+func (c *noopClient) Shutdown(context.Context) error { return nil }
+
+// retryable returns if err identifies a request that can be retried and a
+// duration to wait for if an explicit throttle time is included in err.
+func retryable(err error) (bool, time.Duration) {
+ s := status.Convert(err)
+ return retryableGRPCStatus(s)
+}
+
+func retryableGRPCStatus(s *status.Status) (bool, time.Duration) {
+ switch s.Code() {
+ case codes.Canceled,
+ codes.DeadlineExceeded,
+ codes.Aborted,
+ codes.OutOfRange,
+ codes.Unavailable,
+ codes.DataLoss:
+ // Additionally, handle RetryInfo.
+ _, d := throttleDelay(s)
+ return true, d
+ case codes.ResourceExhausted:
+ // Retry only if the server signals that the recovery from resource exhaustion is possible.
+ return throttleDelay(s)
+ }
+
+ // Not a retry-able error.
+ return false, 0
+}
+
+// throttleDelay returns if the status is RetryInfo
+// and the duration to wait for if an explicit throttle time is included.
+func throttleDelay(s *status.Status) (bool, time.Duration) {
+ for _, detail := range s.Details() {
+ if t, ok := detail.(*errdetails.RetryInfo); ok {
+ return true, t.RetryDelay.AsDuration()
+ }
+ }
+ return false, 0
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/config.go
new file mode 100644
index 000000000..cd33a1682
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/config.go
@@ -0,0 +1,653 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlploggrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc"
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry"
+ "go.opentelemetry.io/otel/internal/global"
+)
+
+// Default values.
+var (
+ defaultEndpoint = "localhost:4317"
+ defaultTimeout = 10 * time.Second
+ defaultRetryCfg = retry.DefaultConfig
+)
+
+// Environment variable keys.
+var (
+ envEndpoint = []string{
+ "OTEL_EXPORTER_OTLP_LOGS_ENDPOINT",
+ "OTEL_EXPORTER_OTLP_ENDPOINT",
+ }
+ envInsecure = []string{
+ "OTEL_EXPORTER_OTLP_LOGS_INSECURE",
+ "OTEL_EXPORTER_OTLP_INSECURE",
+ }
+
+ envHeaders = []string{
+ "OTEL_EXPORTER_OTLP_LOGS_HEADERS",
+ "OTEL_EXPORTER_OTLP_HEADERS",
+ }
+
+ envCompression = []string{
+ "OTEL_EXPORTER_OTLP_LOGS_COMPRESSION",
+ "OTEL_EXPORTER_OTLP_COMPRESSION",
+ }
+
+ envTimeout = []string{
+ "OTEL_EXPORTER_OTLP_LOGS_TIMEOUT",
+ "OTEL_EXPORTER_OTLP_TIMEOUT",
+ }
+
+ envTLSCert = []string{
+ "OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE",
+ "OTEL_EXPORTER_OTLP_CERTIFICATE",
+ }
+ envTLSClient = []struct {
+ Certificate string
+ Key string
+ }{
+ {
+ "OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE",
+ "OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY",
+ },
+ {
+ "OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE",
+ "OTEL_EXPORTER_OTLP_CLIENT_KEY",
+ },
+ }
+)
+
+type fnOpt func(config) config
+
+func (f fnOpt) applyOption(c config) config { return f(c) }
+
+// Option applies an option to the Exporter.
+type Option interface {
+ applyOption(config) config
+}
+
+type config struct {
+ endpoint setting[string]
+ insecure setting[bool]
+ tlsCfg setting[*tls.Config]
+ headers setting[map[string]string]
+ compression setting[Compression]
+ timeout setting[time.Duration]
+ retryCfg setting[retry.Config]
+
+ // gRPC configurations
+ gRPCCredentials setting[credentials.TransportCredentials]
+ serviceConfig setting[string]
+ reconnectionPeriod setting[time.Duration]
+ dialOptions setting[[]grpc.DialOption]
+ gRPCConn setting[*grpc.ClientConn]
+}
+
+func newConfig(options []Option) config {
+ var c config
+ for _, opt := range options {
+ c = opt.applyOption(c)
+ }
+
+ // Apply environment value and default value
+ c.endpoint = c.endpoint.Resolve(
+ getEnv[string](envEndpoint, convEndpoint),
+ fallback[string](defaultEndpoint),
+ )
+ c.insecure = c.insecure.Resolve(
+ loadInsecureFromEnvEndpoint(envEndpoint),
+ getEnv[bool](envInsecure, convInsecure),
+ )
+ c.tlsCfg = c.tlsCfg.Resolve(
+ loadEnvTLS[*tls.Config](),
+ )
+ c.headers = c.headers.Resolve(
+ getEnv[map[string]string](envHeaders, convHeaders),
+ )
+ c.compression = c.compression.Resolve(
+ getEnv[Compression](envCompression, convCompression),
+ )
+ c.timeout = c.timeout.Resolve(
+ getEnv[time.Duration](envTimeout, convDuration),
+ fallback[time.Duration](defaultTimeout),
+ )
+ c.retryCfg = c.retryCfg.Resolve(
+ fallback[retry.Config](defaultRetryCfg),
+ )
+
+ return c
+}
+
+// RetryConfig defines configuration for retrying the export of log data
+// that failed.
+//
+// This configuration does not define any network retry strategy. That is
+// entirely handled by the gRPC ClientConn.
+type RetryConfig retry.Config
+
+// WithInsecure disables client transport security for the Exporter's gRPC
+// connection, just like grpc.WithInsecure()
+// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does.
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_LOGS_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used to determine client security. If the endpoint has a
+// scheme of "http" or "unix" client security will be disabled. If both are
+// set, OTEL_EXPORTER_OTLP_LOGS_ENDPOINT will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, client security will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithInsecure() Option {
+ return fnOpt(func(c config) config {
+ c.insecure = newSetting(true)
+ return c
+ })
+}
+
+// WithEndpoint sets the target endpoint the Exporter will connect to.
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_LOGS_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both are set, OTEL_EXPORTER_OTLP_LOGS_ENDPOINT
+// will take precedence.
+//
+// If both this option and WithEndpointURL are used, the last used option will
+// take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "localhost:4317" will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithEndpoint(endpoint string) Option {
+ return fnOpt(func(c config) config {
+ c.endpoint = newSetting(endpoint)
+ return c
+ })
+}
+
+// WithEndpointURL sets the target endpoint URL the Exporter will connect to.
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_LOGS_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both are set, OTEL_EXPORTER_OTLP_LOGS_ENDPOINT
+// will take precedence.
+//
+// If both this option and WithEndpoint are used, the last used option will
+// take precedence.
+//
+// If an invalid URL is provided, the default value will be kept.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "localhost:4317" will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithEndpointURL(rawURL string) Option {
+ u, err := url.Parse(rawURL)
+ if err != nil {
+ global.Error(err, "otlplog: parse endpoint url", "url", rawURL)
+ return fnOpt(func(c config) config { return c })
+ }
+ return fnOpt(func(c config) config {
+ c.endpoint = newSetting(u.Host)
+ c.insecure = insecureFromScheme(c.insecure, u.Scheme)
+ return c
+ })
+}
+
+// WithReconnectionPeriod set the minimum amount of time between connection
+// attempts to the target endpoint.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithReconnectionPeriod(rp time.Duration) Option {
+ return fnOpt(func(c config) config {
+ c.reconnectionPeriod = newSetting(rp)
+ return c
+ })
+}
+
+// Compression describes the compression used for exported payloads.
+type Compression int
+
+const (
+ // NoCompression represents that no compression should be used.
+ NoCompression Compression = iota
+ // GzipCompression represents that gzip compression should be used.
+ GzipCompression
+)
+
+// WithCompressor sets the compressor the gRPC client uses.
+// Supported compressor values: "gzip".
+//
+// If the OTEL_EXPORTER_OTLP_COMPRESSION or
+// OTEL_EXPORTER_OTLP_LOGS_COMPRESSION environment variable is set, and
+// this option is not passed, that variable value will be used. That value can
+// be either "none" or "gzip". If both are set,
+// OTEL_EXPORTER_OTLP_LOGS_COMPRESSION will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, no compression strategy will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithCompressor(compressor string) Option {
+ return fnOpt(func(c config) config {
+ c.compression = newSetting(compressorToCompression(compressor))
+ return c
+ })
+}
+
+// WithHeaders will send the provided headers with each gRPC requests.
+//
+// If the OTEL_EXPORTER_OTLP_HEADERS or OTEL_EXPORTER_OTLP_LOGS_HEADERS
+// environment variable is set, and this option is not passed, that variable
+// value will be used. The value will be parsed as a list of key value pairs.
+// These pairs are expected to be in the W3C Correlation-Context format
+// without additional semi-colon delimited metadata (i.e. "k1=v1,k2=v2"). If
+// both are set, OTEL_EXPORTER_OTLP_LOGS_HEADERS will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, no user headers will be set.
+func WithHeaders(headers map[string]string) Option {
+ return fnOpt(func(c config) config {
+ c.headers = newSetting(headers)
+ return c
+ })
+}
+
+// WithTLSCredentials sets the gRPC connection to use creds.
+//
+// If the OTEL_EXPORTER_OTLP_CERTIFICATE or
+// OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE environment variable is set, and
+// this option is not passed, that variable value will be used. The value will
+// be parsed the filepath of the TLS certificate chain to use. If both are
+// set, OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, no TLS credentials will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithTLSCredentials(credential credentials.TransportCredentials) Option {
+ return fnOpt(func(c config) config {
+ c.gRPCCredentials = newSetting(credential)
+ return c
+ })
+}
+
+// WithServiceConfig defines the default gRPC service config used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithServiceConfig(serviceConfig string) Option {
+ return fnOpt(func(c config) config {
+ c.serviceConfig = newSetting(serviceConfig)
+ return c
+ })
+}
+
+// WithDialOption sets explicit grpc.DialOptions to use when establishing a
+// gRPC connection. The options here are appended to the internal grpc.DialOptions
+// used so they will take precedence over any other internal grpc.DialOptions
+// they might conflict with.
+// The [grpc.WithBlock], [grpc.WithTimeout], and [grpc.WithReturnConnectionError]
+// grpc.DialOptions are ignored.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithDialOption(opts ...grpc.DialOption) Option {
+ return fnOpt(func(c config) config {
+ c.dialOptions = newSetting(opts)
+ return c
+ })
+}
+
+// WithGRPCConn sets conn as the gRPC ClientConn used for all communication.
+//
+// This option takes precedence over any other option that relates to
+// establishing or persisting a gRPC connection to a target endpoint. Any
+// other option of those types passed will be ignored.
+//
+// It is the callers responsibility to close the passed conn. The Exporter
+// Shutdown method will not close this connection.
+func WithGRPCConn(conn *grpc.ClientConn) Option {
+ return fnOpt(func(c config) config {
+ c.gRPCConn = newSetting(conn)
+ return c
+ })
+}
+
+// WithTimeout sets the max amount of time an Exporter will attempt an export.
+//
+// This takes precedence over any retry settings defined by WithRetry. Once
+// this time limit has been reached the export is abandoned and the log
+// data is dropped.
+//
+// If the OTEL_EXPORTER_OTLP_TIMEOUT or OTEL_EXPORTER_OTLP_LOGS_TIMEOUT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. The value will be parsed as an integer representing the
+// timeout in milliseconds. If both are set,
+// OTEL_EXPORTER_OTLP_LOGS_TIMEOUT will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, a timeout of 10 seconds will be used.
+func WithTimeout(duration time.Duration) Option {
+ return fnOpt(func(c config) config {
+ c.timeout = newSetting(duration)
+ return c
+ })
+}
+
+// WithRetry sets the retry policy for transient retryable errors that are
+// returned by the target endpoint.
+//
+// If the target endpoint responds with not only a retryable error, but
+// explicitly returns a backoff time in the response, that time will take
+// precedence over these settings.
+//
+// These settings do not define any network retry strategy. That is entirely
+// handled by the gRPC ClientConn.
+//
+// If unset, the default retry policy will be used. It will retry the export
+// 5 seconds after receiving a retryable error and increase exponentially
+// after each error for no more than a total time of 1 minute.
+func WithRetry(rc RetryConfig) Option {
+ return fnOpt(func(c config) config {
+ c.retryCfg = newSetting(retry.Config(rc))
+ return c
+ })
+}
+
+// convCompression returns the parsed compression encoded in s. NoCompression
+// and an errors are returned if s is unknown.
+func convCompression(s string) (Compression, error) {
+ switch s {
+ case "gzip":
+ return GzipCompression, nil
+ case "none", "":
+ return NoCompression, nil
+ }
+ return NoCompression, fmt.Errorf("unknown compression: %s", s)
+}
+
+// convEndpoint converts s from a URL string to an endpoint if s is a valid
+// URL. Otherwise, "" and an error are returned.
+func convEndpoint(s string) (string, error) {
+ u, err := url.Parse(s)
+ if err != nil {
+ return "", err
+ }
+ return u.Host, nil
+}
+
+// convInsecure converts s from string to bool without case sensitivity.
+// If s is not valid returns error.
+func convInsecure(s string) (bool, error) {
+ s = strings.ToLower(s)
+ if s != "true" && s != "false" {
+ return false, fmt.Errorf("can't convert %q to bool", s)
+ }
+
+ return s == "true", nil
+}
+
+// loadInsecureFromEnvEndpoint returns a resolver that fetches
+// insecure setting from envEndpoint is it possible.
+func loadInsecureFromEnvEndpoint(envEndpoint []string) resolver[bool] {
+ return func(s setting[bool]) setting[bool] {
+ if s.Set {
+ // Passed, valid, options have precedence.
+ return s
+ }
+
+ for _, key := range envEndpoint {
+ if vStr := os.Getenv(key); vStr != "" {
+ u, err := url.Parse(vStr)
+ if err != nil {
+ otel.Handle(fmt.Errorf("invalid %s value %s: %w", key, vStr, err))
+ continue
+ }
+
+ return insecureFromScheme(s, u.Scheme)
+ }
+ }
+ return s
+ }
+}
+
+// convHeaders converts the OTel environment variable header value s into a
+// mapping of header key to value. If s is invalid a partial result and error
+// are returned.
+func convHeaders(s string) (map[string]string, error) {
+ out := make(map[string]string)
+ var err error
+ for _, header := range strings.Split(s, ",") {
+ rawKey, rawVal, found := strings.Cut(header, "=")
+ if !found {
+ err = errors.Join(err, fmt.Errorf("invalid header: %s", header))
+ continue
+ }
+
+ escKey, e := url.PathUnescape(rawKey)
+ if e != nil {
+ err = errors.Join(err, fmt.Errorf("invalid header key: %s", rawKey))
+ continue
+ }
+ key := strings.TrimSpace(escKey)
+
+ escVal, e := url.PathUnescape(rawVal)
+ if e != nil {
+ err = errors.Join(err, fmt.Errorf("invalid header value: %s", rawVal))
+ continue
+ }
+ val := strings.TrimSpace(escVal)
+
+ out[key] = val
+ }
+ return out, err
+}
+
+// convDuration converts s into a duration of milliseconds. If s does not
+// contain an integer, 0 and an error are returned.
+func convDuration(s string) (time.Duration, error) {
+ d, err := strconv.Atoi(s)
+ if err != nil {
+ return 0, err
+ }
+ // OTel durations are defined in milliseconds.
+ return time.Duration(d) * time.Millisecond, nil
+}
+
+// loadEnvTLS returns a resolver that loads a *tls.Config from files defined by
+// the OTLP TLS environment variables. This will load both the rootCAs and
+// certificates used for mTLS.
+//
+// If the filepath defined is invalid or does not contain valid TLS files, an
+// error is passed to the OTel ErrorHandler and no TLS configuration is
+// provided.
+func loadEnvTLS[T *tls.Config]() resolver[T] {
+ return func(s setting[T]) setting[T] {
+ if s.Set {
+ // Passed, valid, options have precedence.
+ return s
+ }
+
+ var rootCAs *x509.CertPool
+ var err error
+ for _, key := range envTLSCert {
+ if v := os.Getenv(key); v != "" {
+ rootCAs, err = loadCertPool(v)
+ break
+ }
+ }
+
+ var certs []tls.Certificate
+ for _, pair := range envTLSClient {
+ cert := os.Getenv(pair.Certificate)
+ key := os.Getenv(pair.Key)
+ if cert != "" && key != "" {
+ var e error
+ certs, e = loadCertificates(cert, key)
+ err = errors.Join(err, e)
+ break
+ }
+ }
+
+ if err != nil {
+ err = fmt.Errorf("failed to load TLS: %w", err)
+ otel.Handle(err)
+ } else if rootCAs != nil || certs != nil {
+ s.Set = true
+ s.Value = &tls.Config{RootCAs: rootCAs, Certificates: certs}
+ }
+ return s
+ }
+}
+
+// readFile is used for testing.
+var readFile = os.ReadFile
+
+// loadCertPool loads and returns the *x509.CertPool found at path if it exists
+// and is valid. Otherwise, nil and an error is returned.
+func loadCertPool(path string) (*x509.CertPool, error) {
+ b, err := readFile(path)
+ if err != nil {
+ return nil, err
+ }
+ cp := x509.NewCertPool()
+ if ok := cp.AppendCertsFromPEM(b); !ok {
+ return nil, errors.New("certificate not added")
+ }
+ return cp, nil
+}
+
+// loadCertificates loads and returns the tls.Certificate found at path if it
+// exists and is valid. Otherwise, nil and an error is returned.
+func loadCertificates(certPath, keyPath string) ([]tls.Certificate, error) {
+ cert, err := readFile(certPath)
+ if err != nil {
+ return nil, err
+ }
+ key, err := readFile(keyPath)
+ if err != nil {
+ return nil, err
+ }
+ crt, err := tls.X509KeyPair(cert, key)
+ if err != nil {
+ return nil, err
+ }
+ return []tls.Certificate{crt}, nil
+}
+
+// insecureFromScheme return setting if the connection should
+// use client transport security or not.
+// Empty scheme doesn't force insecure setting.
+func insecureFromScheme(prev setting[bool], scheme string) setting[bool] {
+ if scheme == "https" {
+ return newSetting(false)
+ } else if len(scheme) > 0 {
+ return newSetting(true)
+ }
+
+ return prev
+}
+
+func compressorToCompression(compressor string) Compression {
+ c, err := convCompression(compressor)
+ if err != nil {
+ otel.Handle(fmt.Errorf("%w, using no compression as default", err))
+ return NoCompression
+ }
+
+ return c
+}
+
+// setting is a configuration setting value.
+type setting[T any] struct {
+ Value T
+ Set bool
+}
+
+// newSetting returns a new setting with the value set.
+func newSetting[T any](value T) setting[T] {
+ return setting[T]{Value: value, Set: true}
+}
+
+// resolver returns an updated setting after applying an resolution operation.
+type resolver[T any] func(setting[T]) setting[T]
+
+// Resolve returns a resolved version of s.
+//
+// It will apply all the passed fn in the order provided, chaining together the
+// return setting to the next input. The setting s is used as the initial
+// argument to the first fn.
+//
+// Each fn needs to validate if it should apply given the Set state of the
+// setting. This will not perform any checks on the set state when chaining
+// function.
+func (s setting[T]) Resolve(fn ...resolver[T]) setting[T] {
+ for _, f := range fn {
+ s = f(s)
+ }
+ return s
+}
+
+// getEnv returns a resolver that will apply an environment variable value
+// associated with the first set key to a setting value. The conv function is
+// used to convert between the environment variable value and the setting type.
+//
+// If the input setting to the resolver is set, the environment variable will
+// not be applied.
+//
+// Any error returned from conv is sent to the OTel ErrorHandler and the
+// setting will not be updated.
+func getEnv[T any](keys []string, conv func(string) (T, error)) resolver[T] {
+ return func(s setting[T]) setting[T] {
+ if s.Set {
+ // Passed, valid, options have precedence.
+ return s
+ }
+
+ for _, key := range keys {
+ if vStr := os.Getenv(key); vStr != "" {
+ v, err := conv(vStr)
+ if err == nil {
+ s.Value = v
+ s.Set = true
+ break
+ }
+ otel.Handle(fmt.Errorf("invalid %s value %s: %w", key, vStr, err))
+ }
+ }
+ return s
+ }
+}
+
+// fallback returns a resolve that will set a setting value to val if it is not
+// already set.
+//
+// This is usually passed at the end of a resolver chain to ensure a default is
+// applied if the setting has not already been set.
+func fallback[T any](val T) resolver[T] {
+ return func(s setting[T]) setting[T] {
+ if !s.Set {
+ s.Value = val
+ s.Set = true
+ }
+ return s
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/doc.go
new file mode 100644
index 000000000..67cb81434
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/doc.go
@@ -0,0 +1,63 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package otlploggrpc provides an OTLP log exporter using gRPC. The exporter uses gRPC to
+transport OTLP protobuf payloads.
+
+All Exporters must be created with [New].
+
+The environment variables described below can be used for configuration.
+
+OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_LOGS_ENDPOINT (default: "https://localhost:4317") -
+target to which the exporter sends telemetry.
+The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md.
+The value must contain a scheme ("http" or "https") and host.
+The value may additionally contain a port, and a path.
+The value should not contain a query string or fragment.
+OTEL_EXPORTER_OTLP_LOGS_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT.
+The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_LOGS_INSECURE (default: "false") -
+setting "true" disables client transport security for the exporter's gRPC connection.
+You can use this only when an endpoint is provided without scheme.
+OTEL_EXPORTER_OTLP_LOGS_INSECURE takes precedence over OTEL_EXPORTER_OTLP_INSECURE.
+The configuration can be overridden by [WithInsecure], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_LOGS_HEADERS (default: none) -
+key-value pairs used as gRPC metadata associated with gRPC requests.
+The value is expected to be represented in a format matching the [W3C Baggage HTTP Header Content Format],
+except that additional semi-colon delimited metadata is not supported.
+Example value: "key1=value1,key2=value2".
+OTEL_EXPORTER_OTLP_LOGS_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS.
+The configuration can be overridden by [WithHeaders] option.
+
+OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_LOGS_TIMEOUT (default: "10000") -
+maximum time in milliseconds the OTLP exporter waits for each batch export.
+OTEL_EXPORTER_OTLP_LOGS_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT.
+The configuration can be overridden by [WithTimeout] option.
+
+OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_LOGS_COMPRESSION (default: none) -
+the gRPC compressor the exporter uses.
+Supported value: "gzip".
+OTEL_EXPORTER_OTLP_LOGS_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION.
+The configuration can be overridden by [WithCompressor], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE (default: none) -
+the filepath to the trusted certificate to use when verifying a server's TLS credentials.
+OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE (default: none) -
+the filepath to the client certificate/chain trust for client's private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY (default: none) -
+the filepath to the client's private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] option.
+
+[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content
+*/
+package otlploggrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/exporter.go
new file mode 100644
index 000000000..66895c3a1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/exporter.go
@@ -0,0 +1,93 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlploggrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc"
+
+import (
+ "context"
+ "sync"
+ "sync/atomic"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform"
+ "go.opentelemetry.io/otel/sdk/log"
+ logpb "go.opentelemetry.io/proto/otlp/logs/v1"
+)
+
+type logClient interface {
+ UploadLogs(ctx context.Context, rl []*logpb.ResourceLogs) error
+ Shutdown(context.Context) error
+}
+
+// Exporter is a OpenTelemetry log Exporter. It transports log data encoded as
+// OTLP protobufs using gRPC.
+// All Exporters must be created with [New].
+type Exporter struct {
+ // Ensure synchronous access to the client across all functionality.
+ clientMu sync.Mutex
+ client logClient
+
+ stopped atomic.Bool
+}
+
+// Compile-time check Exporter implements [log.Exporter].
+var _ log.Exporter = (*Exporter)(nil)
+
+// New returns a new [Exporter].
+//
+// It is recommended to use it with a [BatchProcessor]
+// or other processor exporting records asynchronously.
+func New(_ context.Context, options ...Option) (*Exporter, error) {
+ cfg := newConfig(options)
+ c, err := newClient(cfg)
+ if err != nil {
+ return nil, err
+ }
+ return newExporter(c), nil
+}
+
+func newExporter(c logClient) *Exporter {
+ var e Exporter
+ e.client = c
+ return &e
+}
+
+var transformResourceLogs = transform.ResourceLogs
+
+// Export transforms and transmits log records to an OTLP receiver.
+//
+// This method returns nil and drops records if called after Shutdown.
+// This method returns an error if the method is canceled by the passed context.
+func (e *Exporter) Export(ctx context.Context, records []log.Record) error {
+ if e.stopped.Load() {
+ return nil
+ }
+
+ otlp := transformResourceLogs(records)
+ if otlp == nil {
+ return nil
+ }
+
+ e.clientMu.Lock()
+ defer e.clientMu.Unlock()
+ return e.client.UploadLogs(ctx, otlp)
+}
+
+// Shutdown shuts down the Exporter. Calls to Export or ForceFlush will perform
+// no operation after this is called.
+func (e *Exporter) Shutdown(ctx context.Context) error {
+ if e.stopped.Swap(true) {
+ return nil
+ }
+
+ e.clientMu.Lock()
+ defer e.clientMu.Unlock()
+
+ err := e.client.Shutdown(ctx)
+ e.client = newNoopClient()
+ return err
+}
+
+// ForceFlush does nothing. The Exporter holds no state.
+func (e *Exporter) ForceFlush(ctx context.Context) error {
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go
new file mode 100644
index 000000000..f2da12382
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go
@@ -0,0 +1,145 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/retry/retry.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package retry provides request retry functionality that can perform
+// configurable exponential backoff for transient errors and honor any
+// explicit throttle responses received.
+package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry"
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/cenkalti/backoff/v4"
+)
+
+// DefaultConfig are the recommended defaults to use.
+var DefaultConfig = Config{
+ Enabled: true,
+ InitialInterval: 5 * time.Second,
+ MaxInterval: 30 * time.Second,
+ MaxElapsedTime: time.Minute,
+}
+
+// Config defines configuration for retrying batches in case of export failure
+// using an exponential backoff.
+type Config struct {
+ // Enabled indicates whether to not retry sending batches in case of
+ // export failure.
+ Enabled bool
+ // InitialInterval the time to wait after the first failure before
+ // retrying.
+ InitialInterval time.Duration
+ // MaxInterval is the upper bound on backoff interval. Once this value is
+ // reached the delay between consecutive retries will always be
+ // `MaxInterval`.
+ MaxInterval time.Duration
+ // MaxElapsedTime is the maximum amount of time (including retries) spent
+ // trying to send a request/batch. Once this value is reached, the data
+ // is discarded.
+ MaxElapsedTime time.Duration
+}
+
+// RequestFunc wraps a request with retry logic.
+type RequestFunc func(context.Context, func(context.Context) error) error
+
+// EvaluateFunc returns if an error is retry-able and if an explicit throttle
+// duration should be honored that was included in the error.
+//
+// The function must return true if the error argument is retry-able,
+// otherwise it must return false for the first return parameter.
+//
+// The function must return a non-zero time.Duration if the error contains
+// explicit throttle duration that should be honored, otherwise it must return
+// a zero valued time.Duration.
+type EvaluateFunc func(error) (bool, time.Duration)
+
+// RequestFunc returns a RequestFunc using the evaluate function to determine
+// if requests can be retried and based on the exponential backoff
+// configuration of c.
+func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
+ if !c.Enabled {
+ return func(ctx context.Context, fn func(context.Context) error) error {
+ return fn(ctx)
+ }
+ }
+
+ return func(ctx context.Context, fn func(context.Context) error) error {
+ // Do not use NewExponentialBackOff since it calls Reset and the code here
+ // must call Reset after changing the InitialInterval (this saves an
+ // unnecessary call to Now).
+ b := &backoff.ExponentialBackOff{
+ InitialInterval: c.InitialInterval,
+ RandomizationFactor: backoff.DefaultRandomizationFactor,
+ Multiplier: backoff.DefaultMultiplier,
+ MaxInterval: c.MaxInterval,
+ MaxElapsedTime: c.MaxElapsedTime,
+ Stop: backoff.Stop,
+ Clock: backoff.SystemClock,
+ }
+ b.Reset()
+
+ for {
+ err := fn(ctx)
+ if err == nil {
+ return nil
+ }
+
+ retryable, throttle := evaluate(err)
+ if !retryable {
+ return err
+ }
+
+ bOff := b.NextBackOff()
+ if bOff == backoff.Stop {
+ return fmt.Errorf("max retry time elapsed: %w", err)
+ }
+
+ // Wait for the greater of the backoff or throttle delay.
+ var delay time.Duration
+ if bOff > throttle {
+ delay = bOff
+ } else {
+ elapsed := b.GetElapsedTime()
+ if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
+ return fmt.Errorf("max retry time would elapse: %w", err)
+ }
+ delay = throttle
+ }
+
+ if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
+ return fmt.Errorf("%w: %w", ctxErr, err)
+ }
+ }
+ }
+}
+
+// Allow override for testing.
+var waitFunc = wait
+
+// wait takes the caller's context, and the amount of time to wait. It will
+// return nil if the timer fires before or at the same time as the context's
+// deadline. This indicates that the call can be retried.
+func wait(ctx context.Context, delay time.Duration) error {
+ timer := time.NewTimer(delay)
+ defer timer.Stop()
+
+ select {
+ case <-ctx.Done():
+ // Handle the case where the timer and context deadline end
+ // simultaneously by prioritizing the timer expiration nil value
+ // response.
+ select {
+ case <-timer.C:
+ default:
+ return ctx.Err()
+ }
+ case <-timer.C:
+ }
+
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform/log.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform/log.go
new file mode 100644
index 000000000..dfeecf596
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform/log.go
@@ -0,0 +1,391 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlplog/transform/log.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package transform provides transformation functionality from the
+// sdk/log data-types into OTLP data-types.
+package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform"
+
+import (
+ "time"
+
+ cpb "go.opentelemetry.io/proto/otlp/common/v1"
+ lpb "go.opentelemetry.io/proto/otlp/logs/v1"
+ rpb "go.opentelemetry.io/proto/otlp/resource/v1"
+
+ "go.opentelemetry.io/otel/attribute"
+ api "go.opentelemetry.io/otel/log"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/sdk/log"
+)
+
+// ResourceLogs returns an slice of OTLP ResourceLogs generated from records.
+func ResourceLogs(records []log.Record) []*lpb.ResourceLogs {
+ if len(records) == 0 {
+ return nil
+ }
+
+ resMap := make(map[attribute.Distinct]*lpb.ResourceLogs)
+
+ type key struct {
+ r attribute.Distinct
+ is instrumentation.Scope
+ }
+ scopeMap := make(map[key]*lpb.ScopeLogs)
+
+ var resources int
+ for _, r := range records {
+ res := r.Resource()
+ rKey := res.Equivalent()
+ scope := r.InstrumentationScope()
+ k := key{
+ r: rKey,
+ is: scope,
+ }
+ sl, iOk := scopeMap[k]
+ if !iOk {
+ sl = new(lpb.ScopeLogs)
+ var emptyScope instrumentation.Scope
+ if scope != emptyScope {
+ sl.Scope = &cpb.InstrumentationScope{
+ Name: scope.Name,
+ Version: scope.Version,
+ Attributes: AttrIter(scope.Attributes.Iter()),
+ }
+ sl.SchemaUrl = scope.SchemaURL
+ }
+ scopeMap[k] = sl
+ }
+
+ sl.LogRecords = append(sl.LogRecords, LogRecord(r))
+ rl, rOk := resMap[rKey]
+ if !rOk {
+ resources++
+ rl = new(lpb.ResourceLogs)
+ if res.Len() > 0 {
+ rl.Resource = &rpb.Resource{
+ Attributes: AttrIter(res.Iter()),
+ }
+ }
+ rl.SchemaUrl = res.SchemaURL()
+ resMap[rKey] = rl
+ }
+ if !iOk {
+ rl.ScopeLogs = append(rl.ScopeLogs, sl)
+ }
+ }
+
+ // Transform the categorized map into a slice
+ resLogs := make([]*lpb.ResourceLogs, 0, resources)
+ for _, rl := range resMap {
+ resLogs = append(resLogs, rl)
+ }
+
+ return resLogs
+}
+
+// LogRecord returns an OTLP LogRecord generated from record.
+func LogRecord(record log.Record) *lpb.LogRecord {
+ r := &lpb.LogRecord{
+ TimeUnixNano: timeUnixNano(record.Timestamp()),
+ ObservedTimeUnixNano: timeUnixNano(record.ObservedTimestamp()),
+ EventName: record.EventName(),
+ SeverityNumber: SeverityNumber(record.Severity()),
+ SeverityText: record.SeverityText(),
+ Body: LogAttrValue(record.Body()),
+ Attributes: make([]*cpb.KeyValue, 0, record.AttributesLen()),
+ Flags: uint32(record.TraceFlags()),
+ // TODO: DroppedAttributesCount: /* ... */,
+ }
+ record.WalkAttributes(func(kv api.KeyValue) bool {
+ r.Attributes = append(r.Attributes, LogAttr(kv))
+ return true
+ })
+ if tID := record.TraceID(); tID.IsValid() {
+ r.TraceId = tID[:]
+ }
+ if sID := record.SpanID(); sID.IsValid() {
+ r.SpanId = sID[:]
+ }
+ return r
+}
+
+// timeUnixNano returns t as a Unix time, the number of nanoseconds elapsed
+// since January 1, 1970 UTC as uint64. The result is undefined if the Unix
+// time in nanoseconds cannot be represented by an int64 (a date before the
+// year 1678 or after 2262). timeUnixNano on the zero Time returns 0. The
+// result does not depend on the location associated with t.
+func timeUnixNano(t time.Time) uint64 {
+ nano := t.UnixNano()
+ if nano < 0 {
+ return 0
+ }
+ return uint64(nano) // nolint:gosec // Overflow checked.
+}
+
+// AttrIter transforms an [attribute.Iterator] into OTLP key-values.
+func AttrIter(iter attribute.Iterator) []*cpb.KeyValue {
+ l := iter.Len()
+ if l == 0 {
+ return nil
+ }
+
+ out := make([]*cpb.KeyValue, 0, l)
+ for iter.Next() {
+ out = append(out, Attr(iter.Attribute()))
+ }
+ return out
+}
+
+// Attrs transforms a slice of [attribute.KeyValue] into OTLP key-values.
+func Attrs(attrs []attribute.KeyValue) []*cpb.KeyValue {
+ if len(attrs) == 0 {
+ return nil
+ }
+
+ out := make([]*cpb.KeyValue, 0, len(attrs))
+ for _, kv := range attrs {
+ out = append(out, Attr(kv))
+ }
+ return out
+}
+
+// Attr transforms an [attribute.KeyValue] into an OTLP key-value.
+func Attr(kv attribute.KeyValue) *cpb.KeyValue {
+ return &cpb.KeyValue{Key: string(kv.Key), Value: AttrValue(kv.Value)}
+}
+
+// AttrValue transforms an [attribute.Value] into an OTLP AnyValue.
+func AttrValue(v attribute.Value) *cpb.AnyValue {
+ av := new(cpb.AnyValue)
+ switch v.Type() {
+ case attribute.BOOL:
+ av.Value = &cpb.AnyValue_BoolValue{
+ BoolValue: v.AsBool(),
+ }
+ case attribute.BOOLSLICE:
+ av.Value = &cpb.AnyValue_ArrayValue{
+ ArrayValue: &cpb.ArrayValue{
+ Values: boolSliceValues(v.AsBoolSlice()),
+ },
+ }
+ case attribute.INT64:
+ av.Value = &cpb.AnyValue_IntValue{
+ IntValue: v.AsInt64(),
+ }
+ case attribute.INT64SLICE:
+ av.Value = &cpb.AnyValue_ArrayValue{
+ ArrayValue: &cpb.ArrayValue{
+ Values: int64SliceValues(v.AsInt64Slice()),
+ },
+ }
+ case attribute.FLOAT64:
+ av.Value = &cpb.AnyValue_DoubleValue{
+ DoubleValue: v.AsFloat64(),
+ }
+ case attribute.FLOAT64SLICE:
+ av.Value = &cpb.AnyValue_ArrayValue{
+ ArrayValue: &cpb.ArrayValue{
+ Values: float64SliceValues(v.AsFloat64Slice()),
+ },
+ }
+ case attribute.STRING:
+ av.Value = &cpb.AnyValue_StringValue{
+ StringValue: v.AsString(),
+ }
+ case attribute.STRINGSLICE:
+ av.Value = &cpb.AnyValue_ArrayValue{
+ ArrayValue: &cpb.ArrayValue{
+ Values: stringSliceValues(v.AsStringSlice()),
+ },
+ }
+ default:
+ av.Value = &cpb.AnyValue_StringValue{
+ StringValue: "INVALID",
+ }
+ }
+ return av
+}
+
+func boolSliceValues(vals []bool) []*cpb.AnyValue {
+ converted := make([]*cpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &cpb.AnyValue{
+ Value: &cpb.AnyValue_BoolValue{
+ BoolValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+func int64SliceValues(vals []int64) []*cpb.AnyValue {
+ converted := make([]*cpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &cpb.AnyValue{
+ Value: &cpb.AnyValue_IntValue{
+ IntValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+func float64SliceValues(vals []float64) []*cpb.AnyValue {
+ converted := make([]*cpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &cpb.AnyValue{
+ Value: &cpb.AnyValue_DoubleValue{
+ DoubleValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+func stringSliceValues(vals []string) []*cpb.AnyValue {
+ converted := make([]*cpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &cpb.AnyValue{
+ Value: &cpb.AnyValue_StringValue{
+ StringValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+// Attrs transforms a slice of [api.KeyValue] into OTLP key-values.
+func LogAttrs(attrs []api.KeyValue) []*cpb.KeyValue {
+ if len(attrs) == 0 {
+ return nil
+ }
+
+ out := make([]*cpb.KeyValue, 0, len(attrs))
+ for _, kv := range attrs {
+ out = append(out, LogAttr(kv))
+ }
+ return out
+}
+
+// LogAttr transforms an [api.KeyValue] into an OTLP key-value.
+func LogAttr(attr api.KeyValue) *cpb.KeyValue {
+ return &cpb.KeyValue{
+ Key: attr.Key,
+ Value: LogAttrValue(attr.Value),
+ }
+}
+
+// LogAttrValues transforms a slice of [api.Value] into an OTLP []AnyValue.
+func LogAttrValues(vals []api.Value) []*cpb.AnyValue {
+ if len(vals) == 0 {
+ return nil
+ }
+
+ out := make([]*cpb.AnyValue, 0, len(vals))
+ for _, v := range vals {
+ out = append(out, LogAttrValue(v))
+ }
+ return out
+}
+
+// LogAttrValue transforms an [api.Value] into an OTLP AnyValue.
+func LogAttrValue(v api.Value) *cpb.AnyValue {
+ av := new(cpb.AnyValue)
+ switch v.Kind() {
+ case api.KindBool:
+ av.Value = &cpb.AnyValue_BoolValue{
+ BoolValue: v.AsBool(),
+ }
+ case api.KindInt64:
+ av.Value = &cpb.AnyValue_IntValue{
+ IntValue: v.AsInt64(),
+ }
+ case api.KindFloat64:
+ av.Value = &cpb.AnyValue_DoubleValue{
+ DoubleValue: v.AsFloat64(),
+ }
+ case api.KindString:
+ av.Value = &cpb.AnyValue_StringValue{
+ StringValue: v.AsString(),
+ }
+ case api.KindBytes:
+ av.Value = &cpb.AnyValue_BytesValue{
+ BytesValue: v.AsBytes(),
+ }
+ case api.KindSlice:
+ av.Value = &cpb.AnyValue_ArrayValue{
+ ArrayValue: &cpb.ArrayValue{
+ Values: LogAttrValues(v.AsSlice()),
+ },
+ }
+ case api.KindMap:
+ av.Value = &cpb.AnyValue_KvlistValue{
+ KvlistValue: &cpb.KeyValueList{
+ Values: LogAttrs(v.AsMap()),
+ },
+ }
+ default:
+ av.Value = &cpb.AnyValue_StringValue{
+ StringValue: "INVALID",
+ }
+ }
+ return av
+}
+
+// SeverityNumber transforms a [log.Severity] into an OTLP SeverityNumber.
+func SeverityNumber(s api.Severity) lpb.SeverityNumber {
+ switch s {
+ case api.SeverityTrace:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_TRACE
+ case api.SeverityTrace2:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_TRACE2
+ case api.SeverityTrace3:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_TRACE3
+ case api.SeverityTrace4:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_TRACE4
+ case api.SeverityDebug:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_DEBUG
+ case api.SeverityDebug2:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_DEBUG2
+ case api.SeverityDebug3:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_DEBUG3
+ case api.SeverityDebug4:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_DEBUG4
+ case api.SeverityInfo:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_INFO
+ case api.SeverityInfo2:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_INFO2
+ case api.SeverityInfo3:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_INFO3
+ case api.SeverityInfo4:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_INFO4
+ case api.SeverityWarn:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_WARN
+ case api.SeverityWarn2:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_WARN2
+ case api.SeverityWarn3:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_WARN3
+ case api.SeverityWarn4:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_WARN4
+ case api.SeverityError:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_ERROR
+ case api.SeverityError2:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_ERROR2
+ case api.SeverityError3:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_ERROR3
+ case api.SeverityError4:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_ERROR4
+ case api.SeverityFatal:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_FATAL
+ case api.SeverityFatal2:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_FATAL2
+ case api.SeverityFatal3:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_FATAL3
+ case api.SeverityFatal4:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_FATAL4
+ }
+ return lpb.SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go
new file mode 100644
index 000000000..a68ed0591
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlploggrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc"
+
+// Version is the current release version of the OpenTelemetry OTLP over gRPC logs exporter in use.
+func Version() string {
+ return "0.11.0"
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/README.md
new file mode 100644
index 000000000..14c240b07
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/README.md
@@ -0,0 +1,3 @@
+# OTLP Log HTTP Exporter
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/client.go
new file mode 100644
index 000000000..279b4be4f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/client.go
@@ -0,0 +1,343 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlploghttp // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp"
+
+import (
+ "bytes"
+ "compress/gzip"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "google.golang.org/protobuf/proto"
+
+ "go.opentelemetry.io/otel"
+ collogpb "go.opentelemetry.io/proto/otlp/collector/logs/v1"
+ logpb "go.opentelemetry.io/proto/otlp/logs/v1"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry"
+)
+
+type client struct {
+ uploadLogs func(context.Context, []*logpb.ResourceLogs) error
+}
+
+func (c *client) UploadLogs(ctx context.Context, rl []*logpb.ResourceLogs) error {
+ if c.uploadLogs != nil {
+ return c.uploadLogs(ctx, rl)
+ }
+ return nil
+}
+
+func newNoopClient() *client {
+ return &client{}
+}
+
+// newHTTPClient creates a new HTTP log client.
+func newHTTPClient(cfg config) (*client, error) {
+ hc := &http.Client{
+ Transport: ourTransport,
+ Timeout: cfg.timeout.Value,
+ }
+
+ if cfg.tlsCfg.Value != nil || cfg.proxy.Value != nil {
+ clonedTransport := ourTransport.Clone()
+ hc.Transport = clonedTransport
+
+ if cfg.tlsCfg.Value != nil {
+ clonedTransport.TLSClientConfig = cfg.tlsCfg.Value
+ }
+ if cfg.proxy.Value != nil {
+ clonedTransport.Proxy = cfg.proxy.Value
+ }
+ }
+
+ u := &url.URL{
+ Scheme: "https",
+ Host: cfg.endpoint.Value,
+ Path: cfg.path.Value,
+ }
+ if cfg.insecure.Value {
+ u.Scheme = "http"
+ }
+ // Body is set when this is cloned during upload.
+ req, err := http.NewRequest(http.MethodPost, u.String(), http.NoBody)
+ if err != nil {
+ return nil, err
+ }
+
+ userAgent := "OTel Go OTLP over HTTP/protobuf logs exporter/" + Version()
+ req.Header.Set("User-Agent", userAgent)
+
+ if n := len(cfg.headers.Value); n > 0 {
+ for k, v := range cfg.headers.Value {
+ req.Header.Set(k, v)
+ }
+ }
+ req.Header.Set("Content-Type", "application/x-protobuf")
+
+ c := &httpClient{
+ compression: cfg.compression.Value,
+ req: req,
+ requestFunc: cfg.retryCfg.Value.RequestFunc(evaluate),
+ client: hc,
+ }
+ return &client{uploadLogs: c.uploadLogs}, nil
+}
+
+type httpClient struct {
+ // req is cloned for every upload the client makes.
+ req *http.Request
+ compression Compression
+ requestFunc retry.RequestFunc
+ client *http.Client
+}
+
+// Keep it in sync with golang's DefaultTransport from net/http! We
+// have our own copy to avoid handling a situation where the
+// DefaultTransport is overwritten with some different implementation
+// of http.RoundTripper or it's modified by another package.
+var ourTransport = &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).DialContext,
+ ForceAttemptHTTP2: true,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+}
+
+func (c *httpClient) uploadLogs(ctx context.Context, data []*logpb.ResourceLogs) error {
+ // The Exporter synchronizes access to client methods. This is not called
+ // after the Exporter is shutdown. Only thing to do here is send data.
+
+ pbRequest := &collogpb.ExportLogsServiceRequest{ResourceLogs: data}
+ body, err := proto.Marshal(pbRequest)
+ if err != nil {
+ return err
+ }
+ request, err := c.newRequest(ctx, body)
+ if err != nil {
+ return err
+ }
+
+ return c.requestFunc(ctx, func(iCtx context.Context) error {
+ select {
+ case <-iCtx.Done():
+ return iCtx.Err()
+ default:
+ }
+
+ request.reset(iCtx)
+ resp, err := c.client.Do(request.Request)
+ var urlErr *url.Error
+ if errors.As(err, &urlErr) && urlErr.Temporary() {
+ return newResponseError(http.Header{}, err)
+ }
+ if err != nil {
+ return err
+ }
+ if resp != nil && resp.Body != nil {
+ defer func() {
+ if err := resp.Body.Close(); err != nil {
+ otel.Handle(err)
+ }
+ }()
+ }
+
+ if sc := resp.StatusCode; sc >= 200 && sc <= 299 {
+ // Success, do not retry.
+
+ // Read the partial success message, if any.
+ var respData bytes.Buffer
+ if _, err := io.Copy(&respData, resp.Body); err != nil {
+ return err
+ }
+ if respData.Len() == 0 {
+ return nil
+ }
+
+ if resp.Header.Get("Content-Type") == "application/x-protobuf" {
+ var respProto collogpb.ExportLogsServiceResponse
+ if err := proto.Unmarshal(respData.Bytes(), &respProto); err != nil {
+ return err
+ }
+
+ if respProto.PartialSuccess != nil {
+ msg := respProto.PartialSuccess.GetErrorMessage()
+ n := respProto.PartialSuccess.GetRejectedLogRecords()
+ if n != 0 || msg != "" {
+ err := fmt.Errorf("OTLP partial success: %s (%d log records rejected)", msg, n)
+ otel.Handle(err)
+ }
+ }
+ }
+ return nil
+ }
+ // Error cases.
+
+ // server may return a message with the response
+ // body, so we read it to include in the error
+ // message to be returned. It will help in
+ // debugging the actual issue.
+ var respData bytes.Buffer
+ if _, err := io.Copy(&respData, resp.Body); err != nil {
+ return err
+ }
+ respStr := strings.TrimSpace(respData.String())
+ if len(respStr) == 0 {
+ respStr = "(empty)"
+ }
+ bodyErr := fmt.Errorf("body: %s", respStr)
+
+ switch resp.StatusCode {
+ case http.StatusTooManyRequests,
+ http.StatusBadGateway,
+ http.StatusServiceUnavailable,
+ http.StatusGatewayTimeout:
+ // Retryable failure.
+ return newResponseError(resp.Header, bodyErr)
+ default:
+ // Non-retryable failure.
+ return fmt.Errorf("failed to send logs to %s: %s (%w)", request.URL, resp.Status, bodyErr)
+ }
+ })
+}
+
+var gzPool = sync.Pool{
+ New: func() interface{} {
+ w := gzip.NewWriter(io.Discard)
+ return w
+ },
+}
+
+func (c *httpClient) newRequest(ctx context.Context, body []byte) (request, error) {
+ r := c.req.Clone(ctx)
+ req := request{Request: r}
+
+ switch c.compression {
+ case NoCompression:
+ r.ContentLength = (int64)(len(body))
+ req.bodyReader = bodyReader(body)
+ case GzipCompression:
+ // Ensure the content length is not used.
+ r.ContentLength = -1
+ r.Header.Set("Content-Encoding", "gzip")
+
+ gz := gzPool.Get().(*gzip.Writer)
+ defer gzPool.Put(gz)
+
+ var b bytes.Buffer
+ gz.Reset(&b)
+
+ if _, err := gz.Write(body); err != nil {
+ return req, err
+ }
+ // Close needs to be called to ensure body is fully written.
+ if err := gz.Close(); err != nil {
+ return req, err
+ }
+
+ req.bodyReader = bodyReader(b.Bytes())
+ }
+
+ return req, nil
+}
+
+// bodyReader returns a closure returning a new reader for buf.
+func bodyReader(buf []byte) func() io.ReadCloser {
+ return func() io.ReadCloser {
+ return io.NopCloser(bytes.NewReader(buf))
+ }
+}
+
+// request wraps an http.Request with a resettable body reader.
+type request struct {
+ *http.Request
+
+ // bodyReader allows the same body to be used for multiple requests.
+ bodyReader func() io.ReadCloser
+}
+
+// reset reinitializes the request Body and uses ctx for the request.
+func (r *request) reset(ctx context.Context) {
+ r.Body = r.bodyReader()
+ r.Request = r.WithContext(ctx)
+}
+
+// retryableError represents a request failure that can be retried.
+type retryableError struct {
+ throttle int64
+ err error
+}
+
+// newResponseError returns a retryableError and will extract any explicit
+// throttle delay contained in headers. The returned error wraps wrapped
+// if it is not nil.
+func newResponseError(header http.Header, wrapped error) error {
+ var rErr retryableError
+ if v := header.Get("Retry-After"); v != "" {
+ if t, err := strconv.ParseInt(v, 10, 64); err == nil {
+ rErr.throttle = t
+ }
+ }
+
+ rErr.err = wrapped
+ return rErr
+}
+
+func (e retryableError) Error() string {
+ if e.err != nil {
+ return fmt.Sprintf("retry-able request failure: %v", e.err.Error())
+ }
+
+ return "retry-able request failure"
+}
+
+func (e retryableError) Unwrap() error {
+ return e.err
+}
+
+func (e retryableError) As(target interface{}) bool {
+ if e.err == nil {
+ return false
+ }
+
+ switch v := target.(type) {
+ case **retryableError:
+ *v = &e
+ return true
+ default:
+ return false
+ }
+}
+
+// evaluate returns if err is retry-able. If it is and it includes an explicit
+// throttling delay, that delay is also returned.
+func evaluate(err error) (bool, time.Duration) {
+ if err == nil {
+ return false, 0
+ }
+
+ // Do not use errors.As here, this should only be flattened one layer. If
+ // there are several chained errors, all the errors above it will be
+ // discarded if errors.As is used instead.
+ rErr, ok := err.(retryableError) //nolint:errorlint
+ if !ok {
+ return false, 0
+ }
+
+ return true, time.Duration(rErr.throttle)
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/config.go
new file mode 100644
index 000000000..bfe768091
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/config.go
@@ -0,0 +1,602 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlploghttp // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp"
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry"
+ "go.opentelemetry.io/otel/internal/global"
+)
+
+// Default values.
+var (
+ defaultEndpoint = "localhost:4318"
+ defaultPath = "/v1/logs"
+ defaultTimeout = 10 * time.Second
+ defaultProxy HTTPTransportProxyFunc = http.ProxyFromEnvironment
+ defaultRetryCfg = retry.DefaultConfig
+)
+
+// Environment variable keys.
+var (
+ envEndpoint = []string{
+ "OTEL_EXPORTER_OTLP_LOGS_ENDPOINT",
+ "OTEL_EXPORTER_OTLP_ENDPOINT",
+ }
+ envInsecure = envEndpoint
+
+ // Split because these are parsed differently.
+ envPathSignal = []string{"OTEL_EXPORTER_OTLP_LOGS_ENDPOINT"}
+ envPathOTLP = []string{"OTEL_EXPORTER_OTLP_ENDPOINT"}
+
+ envHeaders = []string{
+ "OTEL_EXPORTER_OTLP_LOGS_HEADERS",
+ "OTEL_EXPORTER_OTLP_HEADERS",
+ }
+
+ envCompression = []string{
+ "OTEL_EXPORTER_OTLP_LOGS_COMPRESSION",
+ "OTEL_EXPORTER_OTLP_COMPRESSION",
+ }
+
+ envTimeout = []string{
+ "OTEL_EXPORTER_OTLP_LOGS_TIMEOUT",
+ "OTEL_EXPORTER_OTLP_TIMEOUT",
+ }
+
+ envTLSCert = []string{
+ "OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE",
+ "OTEL_EXPORTER_OTLP_CERTIFICATE",
+ }
+ envTLSClient = []struct {
+ Certificate string
+ Key string
+ }{
+ {
+ "OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE",
+ "OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY",
+ },
+ {
+ "OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE",
+ "OTEL_EXPORTER_OTLP_CLIENT_KEY",
+ },
+ }
+)
+
+// Option applies an option to the Exporter.
+type Option interface {
+ applyHTTPOption(config) config
+}
+
+type fnOpt func(config) config
+
+func (f fnOpt) applyHTTPOption(c config) config { return f(c) }
+
+type config struct {
+ endpoint setting[string]
+ path setting[string]
+ insecure setting[bool]
+ tlsCfg setting[*tls.Config]
+ headers setting[map[string]string]
+ compression setting[Compression]
+ timeout setting[time.Duration]
+ proxy setting[HTTPTransportProxyFunc]
+ retryCfg setting[retry.Config]
+}
+
+func newConfig(options []Option) config {
+ var c config
+ for _, opt := range options {
+ c = opt.applyHTTPOption(c)
+ }
+
+ c.endpoint = c.endpoint.Resolve(
+ getenv[string](envEndpoint, convEndpoint),
+ fallback[string](defaultEndpoint),
+ )
+ c.path = c.path.Resolve(
+ getenv[string](envPathSignal, convPathExact),
+ getenv[string](envPathOTLP, convPath),
+ fallback[string](defaultPath),
+ )
+ c.insecure = c.insecure.Resolve(
+ getenv[bool](envInsecure, convInsecure),
+ )
+ c.tlsCfg = c.tlsCfg.Resolve(
+ loadEnvTLS[*tls.Config](),
+ )
+ c.headers = c.headers.Resolve(
+ getenv[map[string]string](envHeaders, convHeaders),
+ )
+ c.compression = c.compression.Resolve(
+ getenv[Compression](envCompression, convCompression),
+ )
+ c.timeout = c.timeout.Resolve(
+ getenv[time.Duration](envTimeout, convDuration),
+ fallback[time.Duration](defaultTimeout),
+ )
+ c.proxy = c.proxy.Resolve(
+ fallback[HTTPTransportProxyFunc](defaultProxy),
+ )
+ c.retryCfg = c.retryCfg.Resolve(
+ fallback[retry.Config](defaultRetryCfg),
+ )
+
+ return c
+}
+
+// WithEndpoint sets the target endpoint the Exporter will connect to. This
+// endpoint is specified as a host and optional port, no path or scheme should
+// be included (see WithInsecure and WithURLPath).
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_LOGS_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both environment variables are set,
+// OTEL_EXPORTER_OTLP_LOGS_ENDPOINT will take precedence. If an environment
+// variable is set, and this option is passed, this option will take precedence.
+//
+// If both this option and WithEndpointURL are used, the last used option will
+// take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "localhost:4318" will be used.
+func WithEndpoint(endpoint string) Option {
+ return fnOpt(func(c config) config {
+ c.endpoint = newSetting(endpoint)
+ return c
+ })
+}
+
+// WithEndpointURL sets the target endpoint URL the Exporter will connect to.
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_LOGS_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both environment variables are set,
+// OTEL_EXPORTER_OTLP_LOGS_ENDPOINT will take precedence. If an environment
+// variable is set, and this option is passed, this option will take precedence.
+//
+// If both this option and WithEndpoint are used, the last used option will
+// take precedence.
+//
+// If an invalid URL is provided, the default value will be kept.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "localhost:4318" will be used.
+func WithEndpointURL(rawURL string) Option {
+ u, err := url.Parse(rawURL)
+ if err != nil {
+ global.Error(err, "otlplog: parse endpoint url", "url", rawURL)
+ return fnOpt(func(c config) config { return c })
+ }
+ return fnOpt(func(c config) config {
+ c.endpoint = newSetting(u.Host)
+ c.path = newSetting(u.Path)
+ c.insecure = newSetting(u.Scheme != "https")
+ return c
+ })
+}
+
+// Compression describes the compression used for exported payloads.
+type Compression int
+
+const (
+ // NoCompression represents that no compression should be used.
+ NoCompression Compression = iota
+ // GzipCompression represents that gzip compression should be used.
+ GzipCompression
+)
+
+// WithCompression sets the compression strategy the Exporter will use to
+// compress the HTTP body.
+//
+// If the OTEL_EXPORTER_OTLP_COMPRESSION or
+// OTEL_EXPORTER_OTLP_LOGS_COMPRESSION environment variable is set, and
+// this option is not passed, that variable value will be used. That value can
+// be either "none" or "gzip". If both are set,
+// OTEL_EXPORTER_OTLP_LOGS_COMPRESSION will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, no compression strategy will be used.
+func WithCompression(compression Compression) Option {
+ return fnOpt(func(c config) config {
+ c.compression = newSetting(compression)
+ return c
+ })
+}
+
+// WithURLPath sets the URL path the Exporter will send requests to.
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_LOGS_ENDPOINT
+// environment variable is set, and this option is not passed, the path
+// contained in that variable value will be used. If both are set,
+// OTEL_EXPORTER_OTLP_LOGS_ENDPOINT will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "/v1/logs" will be used.
+func WithURLPath(urlPath string) Option {
+ return fnOpt(func(c config) config {
+ c.path = newSetting(urlPath)
+ return c
+ })
+}
+
+// WithTLSClientConfig sets the TLS configuration the Exporter will use for
+// HTTP requests.
+//
+// If the OTEL_EXPORTER_OTLP_CERTIFICATE or
+// OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE environment variable is set, and
+// this option is not passed, that variable value will be used. The value will
+// be parsed the filepath of the TLS certificate chain to use. If both are
+// set, OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, the system default configuration is used.
+func WithTLSClientConfig(tlsCfg *tls.Config) Option {
+ return fnOpt(func(c config) config {
+ c.tlsCfg = newSetting(tlsCfg.Clone())
+ return c
+ })
+}
+
+// WithInsecure disables client transport security for the Exporter's HTTP
+// connection.
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_LOGS_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used to determine client security. If the endpoint has a
+// scheme of "http" or "unix" client security will be disabled. If both are
+// set, OTEL_EXPORTER_OTLP_LOGS_ENDPOINT will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, client security will be used.
+func WithInsecure() Option {
+ return fnOpt(func(c config) config {
+ c.insecure = newSetting(true)
+ return c
+ })
+}
+
+// WithHeaders will send the provided headers with each HTTP requests.
+//
+// If the OTEL_EXPORTER_OTLP_HEADERS or OTEL_EXPORTER_OTLP_LOGS_HEADERS
+// environment variable is set, and this option is not passed, that variable
+// value will be used. The value will be parsed as a list of key value pairs.
+// These pairs are expected to be in the W3C Correlation-Context format
+// without additional semi-colon delimited metadata (i.e. "k1=v1,k2=v2"). If
+// both are set, OTEL_EXPORTER_OTLP_LOGS_HEADERS will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, no user headers will be set.
+func WithHeaders(headers map[string]string) Option {
+ return fnOpt(func(c config) config {
+ c.headers = newSetting(headers)
+ return c
+ })
+}
+
+// WithTimeout sets the max amount of time an Exporter will attempt an export.
+//
+// This takes precedence over any retry settings defined by WithRetry. Once
+// this time limit has been reached the export is abandoned and the log data is
+// dropped.
+//
+// If the OTEL_EXPORTER_OTLP_TIMEOUT or OTEL_EXPORTER_OTLP_LOGS_TIMEOUT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. The value will be parsed as an integer representing the
+// timeout in milliseconds. If both are set,
+// OTEL_EXPORTER_OTLP_LOGS_TIMEOUT will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, a timeout of 10 seconds will be used.
+func WithTimeout(duration time.Duration) Option {
+ return fnOpt(func(c config) config {
+ c.timeout = newSetting(duration)
+ return c
+ })
+}
+
+// RetryConfig defines configuration for retrying the export of log data that
+// failed.
+type RetryConfig retry.Config
+
+// WithRetry sets the retry policy for transient retryable errors that are
+// returned by the target endpoint.
+//
+// If the target endpoint responds with not only a retryable error, but
+// explicitly returns a backoff time in the response, that time will take
+// precedence over these settings.
+//
+// If unset, the default retry policy will be used. It will retry the export
+// 5 seconds after receiving a retryable error and increase exponentially
+// after each error for no more than a total time of 1 minute.
+func WithRetry(rc RetryConfig) Option {
+ return fnOpt(func(c config) config {
+ c.retryCfg = newSetting(retry.Config(rc))
+ return c
+ })
+}
+
+// HTTPTransportProxyFunc is a function that resolves which URL to use as proxy
+// for a given request. This type is compatible with http.Transport.Proxy and
+// can be used to set a custom proxy function to the OTLP HTTP client.
+type HTTPTransportProxyFunc func(*http.Request) (*url.URL, error)
+
+// WithProxy sets the Proxy function the client will use to determine the
+// proxy to use for an HTTP request. If this option is not used, the client
+// will use [http.ProxyFromEnvironment].
+func WithProxy(pf HTTPTransportProxyFunc) Option {
+ return fnOpt(func(c config) config {
+ c.proxy = newSetting(pf)
+ return c
+ })
+}
+
+// setting is a configuration setting value.
+type setting[T any] struct {
+ Value T
+ Set bool
+}
+
+// newSetting returns a new setting with the value set.
+func newSetting[T any](value T) setting[T] {
+ return setting[T]{Value: value, Set: true}
+}
+
+// resolver returns an updated setting after applying an resolution operation.
+type resolver[T any] func(setting[T]) setting[T]
+
+// Resolve returns a resolved version of s.
+//
+// It will apply all the passed fn in the order provided, chaining together the
+// return setting to the next input. The setting s is used as the initial
+// argument to the first fn.
+//
+// Each fn needs to validate if it should apply given the Set state of the
+// setting. This will not perform any checks on the set state when chaining
+// function.
+func (s setting[T]) Resolve(fn ...resolver[T]) setting[T] {
+ for _, f := range fn {
+ s = f(s)
+ }
+ return s
+}
+
+// loadEnvTLS returns a resolver that loads a *tls.Config from files defined by
+// the OTLP TLS environment variables. This will load both the rootCAs and
+// certificates used for mTLS.
+//
+// If the filepath defined is invalid or does not contain valid TLS files, an
+// error is passed to the OTel ErrorHandler and no TLS configuration is
+// provided.
+func loadEnvTLS[T *tls.Config]() resolver[T] {
+ return func(s setting[T]) setting[T] {
+ if s.Set {
+ // Passed, valid, options have precedence.
+ return s
+ }
+
+ var rootCAs *x509.CertPool
+ var err error
+ for _, key := range envTLSCert {
+ if v := os.Getenv(key); v != "" {
+ rootCAs, err = loadCertPool(v)
+ break
+ }
+ }
+
+ var certs []tls.Certificate
+ for _, pair := range envTLSClient {
+ cert := os.Getenv(pair.Certificate)
+ key := os.Getenv(pair.Key)
+ if cert != "" && key != "" {
+ var e error
+ certs, e = loadCertificates(cert, key)
+ err = errors.Join(err, e)
+ break
+ }
+ }
+
+ if err != nil {
+ err = fmt.Errorf("failed to load TLS: %w", err)
+ otel.Handle(err)
+ } else if rootCAs != nil || certs != nil {
+ s.Set = true
+ s.Value = &tls.Config{RootCAs: rootCAs, Certificates: certs}
+ }
+ return s
+ }
+}
+
+// readFile is used for testing.
+var readFile = os.ReadFile
+
+// loadCertPool loads and returns the *x509.CertPool found at path if it exists
+// and is valid. Otherwise, nil and an error is returned.
+func loadCertPool(path string) (*x509.CertPool, error) {
+ b, err := readFile(path)
+ if err != nil {
+ return nil, err
+ }
+ cp := x509.NewCertPool()
+ if ok := cp.AppendCertsFromPEM(b); !ok {
+ return nil, errors.New("certificate not added")
+ }
+ return cp, nil
+}
+
+// loadCertificates loads and returns the tls.Certificate found at path if it
+// exists and is valid. Otherwise, nil and an error is returned.
+func loadCertificates(certPath, keyPath string) ([]tls.Certificate, error) {
+ cert, err := readFile(certPath)
+ if err != nil {
+ return nil, err
+ }
+ key, err := readFile(keyPath)
+ if err != nil {
+ return nil, err
+ }
+ crt, err := tls.X509KeyPair(cert, key)
+ if err != nil {
+ return nil, err
+ }
+ return []tls.Certificate{crt}, nil
+}
+
+// getenv returns a resolver that will apply an environment variable value
+// associated with the first set key to a setting value. The conv function is
+// used to convert between the environment variable value and the setting type.
+//
+// If the input setting to the resolver is set, the environment variable will
+// not be applied.
+//
+// Any error returned from conv is sent to the OTel ErrorHandler and the
+// setting will not be updated.
+func getenv[T any](keys []string, conv func(string) (T, error)) resolver[T] {
+ return func(s setting[T]) setting[T] {
+ if s.Set {
+ // Passed, valid, options have precedence.
+ return s
+ }
+
+ for _, key := range keys {
+ if vStr := os.Getenv(key); vStr != "" {
+ v, err := conv(vStr)
+ if err == nil {
+ s.Value = v
+ s.Set = true
+ break
+ }
+ otel.Handle(fmt.Errorf("invalid %s value %s: %w", key, vStr, err))
+ }
+ }
+ return s
+ }
+}
+
+// convEndpoint converts s from a URL string to an endpoint if s is a valid
+// URL. Otherwise, "" and an error are returned.
+func convEndpoint(s string) (string, error) {
+ u, err := url.Parse(s)
+ if err != nil {
+ return "", err
+ }
+ return u.Host, nil
+}
+
+// convPathExact converts s from a URL string to the exact path if s is a valid
+// URL. Otherwise, "" and an error are returned.
+//
+// If the path contained in s is empty, "/" is returned.
+func convPathExact(s string) (string, error) {
+ u, err := url.Parse(s)
+ if err != nil {
+ return "", err
+ }
+ if u.Path == "" {
+ return "/", nil
+ }
+ return u.Path, nil
+}
+
+// convPath converts s from a URL string to an OTLP endpoint path if s is a
+// valid URL. Otherwise, "" and an error are returned.
+func convPath(s string) (string, error) {
+ u, err := url.Parse(s)
+ if err != nil {
+ return "", err
+ }
+ return u.Path + "/v1/logs", nil
+}
+
+// convInsecure parses s as a URL string and returns if the connection should
+// use client transport security or not. If s is an invalid URL, false and an
+// error are returned.
+func convInsecure(s string) (bool, error) {
+ u, err := url.Parse(s)
+ if err != nil {
+ return false, err
+ }
+ return u.Scheme != "https", nil
+}
+
+// convHeaders converts the OTel environment variable header value s into a
+// mapping of header key to value. If s is invalid a partial result and error
+// are returned.
+func convHeaders(s string) (map[string]string, error) {
+ out := make(map[string]string)
+ var err error
+ for _, header := range strings.Split(s, ",") {
+ rawKey, rawVal, found := strings.Cut(header, "=")
+ if !found {
+ err = errors.Join(err, fmt.Errorf("invalid header: %s", header))
+ continue
+ }
+
+ escKey, e := url.PathUnescape(rawKey)
+ if e != nil {
+ err = errors.Join(err, fmt.Errorf("invalid header key: %s", rawKey))
+ continue
+ }
+ key := strings.TrimSpace(escKey)
+
+ escVal, e := url.PathUnescape(rawVal)
+ if e != nil {
+ err = errors.Join(err, fmt.Errorf("invalid header value: %s", rawVal))
+ continue
+ }
+ val := strings.TrimSpace(escVal)
+
+ out[key] = val
+ }
+ return out, err
+}
+
+// convCompression returns the parsed compression encoded in s. NoCompression
+// and an errors are returned if s is unknown.
+func convCompression(s string) (Compression, error) {
+ switch s {
+ case "gzip":
+ return GzipCompression, nil
+ case "none", "":
+ return NoCompression, nil
+ }
+ return NoCompression, fmt.Errorf("unknown compression: %s", s)
+}
+
+// convDuration converts s into a duration of milliseconds. If s does not
+// contain an integer, 0 and an error are returned.
+func convDuration(s string) (time.Duration, error) {
+ d, err := strconv.Atoi(s)
+ if err != nil {
+ return 0, err
+ }
+ // OTel durations are defined in milliseconds.
+ return time.Duration(d) * time.Millisecond, nil
+}
+
+// fallback returns a resolve that will set a setting value to val if it is not
+// already set.
+//
+// This is usually passed at the end of a resolver chain to ensure a default is
+// applied if the setting has not already been set.
+func fallback[T any](val T) resolver[T] {
+ return func(s setting[T]) setting[T] {
+ if !s.Set {
+ s.Value = val
+ s.Set = true
+ }
+ return s
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/doc.go
new file mode 100644
index 000000000..2607e3b9b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/doc.go
@@ -0,0 +1,63 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package otlploghttp provides an OTLP log exporter. The exporter uses HTTP to
+transport OTLP protobuf payloads.
+
+Exporter should be created using [New].
+
+The environment variables described below can be used for configuration.
+
+OTEL_EXPORTER_OTLP_ENDPOINT (default: "https://localhost:4318") -
+target base URL ("/v1/logs" is appended) to which the exporter sends telemetry.
+The value must contain a scheme ("http" or "https") and host.
+The value may additionally contain a port and a path.
+The value should not contain a query string or fragment.
+The configuration can be overridden by OTEL_EXPORTER_OTLP_LOGS_ENDPOINT
+environment variable and by [WithEndpoint], [WithEndpointURL], [WithInsecure] options.
+
+OTEL_EXPORTER_OTLP_LOGS_ENDPOINT (default: "https://localhost:4318/v1/logs") -
+target URL to which the exporter sends telemetry.
+The value must contain a scheme ("http" or "https") and host.
+The value may additionally contain a port and a path.
+The value should not contain a query string or fragment.
+The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithURLPath] options.
+
+OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_LOGS_HEADERS (default: none) -
+key-value pairs used as headers associated with HTTP requests.
+The value is expected to be represented in a format matching the [W3C Baggage HTTP Header Content Format],
+except that additional semi-colon delimited metadata is not supported.
+Example value: "key1=value1,key2=value2".
+OTEL_EXPORTER_OTLP_LOGS_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS.
+The configuration can be overridden by [WithHeaders] option.
+
+OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_LOGS_TIMEOUT (default: "10000") -
+maximum time in milliseconds the OTLP exporter waits for each batch export.
+OTEL_EXPORTER_OTLP_LOGS_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT.
+The configuration can be overridden by [WithTimeout] option.
+
+OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_LOGS_COMPRESSION (default: none) -
+the compression strategy the exporter uses to compress the HTTP body.
+Supported value: "gzip".
+OTEL_EXPORTER_OTLP_LOGS_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION.
+The configuration can be overridden by [WithCompression] option.
+
+OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE (default: none) -
+the filepath to the trusted certificate to use when verifying a server's TLS credentials.
+OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE.
+The configuration can be overridden by [WithTLSClientConfig] option.
+
+OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE (default: none) -
+the filepath to the client certificate/chain trust for client's private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE.
+The configuration can be overridden by [WithTLSClientConfig] option.
+
+OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY (default: none) -
+the filepath to the client's private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY.
+The configuration can be overridden by [WithTLSClientConfig] option.
+
+[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content
+*/
+package otlploghttp // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/exporter.go
new file mode 100644
index 000000000..f1c8d3ae0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/exporter.go
@@ -0,0 +1,73 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlploghttp // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp"
+
+import (
+ "context"
+ "sync/atomic"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform"
+ "go.opentelemetry.io/otel/sdk/log"
+)
+
+// Exporter is a OpenTelemetry log Exporter. It transports log data encoded as
+// OTLP protobufs using HTTP.
+// Exporter must be created with [New].
+type Exporter struct {
+ client atomic.Pointer[client]
+ stopped atomic.Bool
+}
+
+// Compile-time check Exporter implements [log.Exporter].
+var _ log.Exporter = (*Exporter)(nil)
+
+// New returns a new [Exporter].
+//
+// It is recommended to use it with a [BatchProcessor]
+// or other processor exporting records asynchronously.
+func New(_ context.Context, options ...Option) (*Exporter, error) {
+ cfg := newConfig(options)
+ c, err := newHTTPClient(cfg)
+ if err != nil {
+ return nil, err
+ }
+ return newExporter(c, cfg)
+}
+
+func newExporter(c *client, _ config) (*Exporter, error) {
+ e := &Exporter{}
+ e.client.Store(c)
+ return e, nil
+}
+
+// Used for testing.
+var transformResourceLogs = transform.ResourceLogs
+
+// Export transforms and transmits log records to an OTLP receiver.
+func (e *Exporter) Export(ctx context.Context, records []log.Record) error {
+ if e.stopped.Load() {
+ return nil
+ }
+ otlp := transformResourceLogs(records)
+ if otlp == nil {
+ return nil
+ }
+ return e.client.Load().UploadLogs(ctx, otlp)
+}
+
+// Shutdown shuts down the Exporter. Calls to Export or ForceFlush will perform
+// no operation after this is called.
+func (e *Exporter) Shutdown(ctx context.Context) error {
+ if e.stopped.Swap(true) {
+ return nil
+ }
+
+ e.client.Store(newNoopClient())
+ return nil
+}
+
+// ForceFlush does nothing. The Exporter holds no state.
+func (e *Exporter) ForceFlush(ctx context.Context) error {
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry/retry.go
new file mode 100644
index 000000000..661576ce2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry/retry.go
@@ -0,0 +1,145 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/retry/retry.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package retry provides request retry functionality that can perform
+// configurable exponential backoff for transient errors and honor any
+// explicit throttle responses received.
+package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry"
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/cenkalti/backoff/v4"
+)
+
+// DefaultConfig are the recommended defaults to use.
+var DefaultConfig = Config{
+ Enabled: true,
+ InitialInterval: 5 * time.Second,
+ MaxInterval: 30 * time.Second,
+ MaxElapsedTime: time.Minute,
+}
+
+// Config defines configuration for retrying batches in case of export failure
+// using an exponential backoff.
+type Config struct {
+ // Enabled indicates whether to not retry sending batches in case of
+ // export failure.
+ Enabled bool
+ // InitialInterval the time to wait after the first failure before
+ // retrying.
+ InitialInterval time.Duration
+ // MaxInterval is the upper bound on backoff interval. Once this value is
+ // reached the delay between consecutive retries will always be
+ // `MaxInterval`.
+ MaxInterval time.Duration
+ // MaxElapsedTime is the maximum amount of time (including retries) spent
+ // trying to send a request/batch. Once this value is reached, the data
+ // is discarded.
+ MaxElapsedTime time.Duration
+}
+
+// RequestFunc wraps a request with retry logic.
+type RequestFunc func(context.Context, func(context.Context) error) error
+
+// EvaluateFunc returns if an error is retry-able and if an explicit throttle
+// duration should be honored that was included in the error.
+//
+// The function must return true if the error argument is retry-able,
+// otherwise it must return false for the first return parameter.
+//
+// The function must return a non-zero time.Duration if the error contains
+// explicit throttle duration that should be honored, otherwise it must return
+// a zero valued time.Duration.
+type EvaluateFunc func(error) (bool, time.Duration)
+
+// RequestFunc returns a RequestFunc using the evaluate function to determine
+// if requests can be retried and based on the exponential backoff
+// configuration of c.
+func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
+ if !c.Enabled {
+ return func(ctx context.Context, fn func(context.Context) error) error {
+ return fn(ctx)
+ }
+ }
+
+ return func(ctx context.Context, fn func(context.Context) error) error {
+ // Do not use NewExponentialBackOff since it calls Reset and the code here
+ // must call Reset after changing the InitialInterval (this saves an
+ // unnecessary call to Now).
+ b := &backoff.ExponentialBackOff{
+ InitialInterval: c.InitialInterval,
+ RandomizationFactor: backoff.DefaultRandomizationFactor,
+ Multiplier: backoff.DefaultMultiplier,
+ MaxInterval: c.MaxInterval,
+ MaxElapsedTime: c.MaxElapsedTime,
+ Stop: backoff.Stop,
+ Clock: backoff.SystemClock,
+ }
+ b.Reset()
+
+ for {
+ err := fn(ctx)
+ if err == nil {
+ return nil
+ }
+
+ retryable, throttle := evaluate(err)
+ if !retryable {
+ return err
+ }
+
+ bOff := b.NextBackOff()
+ if bOff == backoff.Stop {
+ return fmt.Errorf("max retry time elapsed: %w", err)
+ }
+
+ // Wait for the greater of the backoff or throttle delay.
+ var delay time.Duration
+ if bOff > throttle {
+ delay = bOff
+ } else {
+ elapsed := b.GetElapsedTime()
+ if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
+ return fmt.Errorf("max retry time would elapse: %w", err)
+ }
+ delay = throttle
+ }
+
+ if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
+ return fmt.Errorf("%w: %w", ctxErr, err)
+ }
+ }
+ }
+}
+
+// Allow override for testing.
+var waitFunc = wait
+
+// wait takes the caller's context, and the amount of time to wait. It will
+// return nil if the timer fires before or at the same time as the context's
+// deadline. This indicates that the call can be retried.
+func wait(ctx context.Context, delay time.Duration) error {
+ timer := time.NewTimer(delay)
+ defer timer.Stop()
+
+ select {
+ case <-ctx.Done():
+ // Handle the case where the timer and context deadline end
+ // simultaneously by prioritizing the timer expiration nil value
+ // response.
+ select {
+ case <-timer.C:
+ default:
+ return ctx.Err()
+ }
+ case <-timer.C:
+ }
+
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform/log.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform/log.go
new file mode 100644
index 000000000..adf407800
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform/log.go
@@ -0,0 +1,391 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlplog/transform/log.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package transform provides transformation functionality from the
+// sdk/log data-types into OTLP data-types.
+package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform"
+
+import (
+ "time"
+
+ cpb "go.opentelemetry.io/proto/otlp/common/v1"
+ lpb "go.opentelemetry.io/proto/otlp/logs/v1"
+ rpb "go.opentelemetry.io/proto/otlp/resource/v1"
+
+ "go.opentelemetry.io/otel/attribute"
+ api "go.opentelemetry.io/otel/log"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/sdk/log"
+)
+
+// ResourceLogs returns an slice of OTLP ResourceLogs generated from records.
+func ResourceLogs(records []log.Record) []*lpb.ResourceLogs {
+ if len(records) == 0 {
+ return nil
+ }
+
+ resMap := make(map[attribute.Distinct]*lpb.ResourceLogs)
+
+ type key struct {
+ r attribute.Distinct
+ is instrumentation.Scope
+ }
+ scopeMap := make(map[key]*lpb.ScopeLogs)
+
+ var resources int
+ for _, r := range records {
+ res := r.Resource()
+ rKey := res.Equivalent()
+ scope := r.InstrumentationScope()
+ k := key{
+ r: rKey,
+ is: scope,
+ }
+ sl, iOk := scopeMap[k]
+ if !iOk {
+ sl = new(lpb.ScopeLogs)
+ var emptyScope instrumentation.Scope
+ if scope != emptyScope {
+ sl.Scope = &cpb.InstrumentationScope{
+ Name: scope.Name,
+ Version: scope.Version,
+ Attributes: AttrIter(scope.Attributes.Iter()),
+ }
+ sl.SchemaUrl = scope.SchemaURL
+ }
+ scopeMap[k] = sl
+ }
+
+ sl.LogRecords = append(sl.LogRecords, LogRecord(r))
+ rl, rOk := resMap[rKey]
+ if !rOk {
+ resources++
+ rl = new(lpb.ResourceLogs)
+ if res.Len() > 0 {
+ rl.Resource = &rpb.Resource{
+ Attributes: AttrIter(res.Iter()),
+ }
+ }
+ rl.SchemaUrl = res.SchemaURL()
+ resMap[rKey] = rl
+ }
+ if !iOk {
+ rl.ScopeLogs = append(rl.ScopeLogs, sl)
+ }
+ }
+
+ // Transform the categorized map into a slice
+ resLogs := make([]*lpb.ResourceLogs, 0, resources)
+ for _, rl := range resMap {
+ resLogs = append(resLogs, rl)
+ }
+
+ return resLogs
+}
+
+// LogRecord returns an OTLP LogRecord generated from record.
+func LogRecord(record log.Record) *lpb.LogRecord {
+ r := &lpb.LogRecord{
+ TimeUnixNano: timeUnixNano(record.Timestamp()),
+ ObservedTimeUnixNano: timeUnixNano(record.ObservedTimestamp()),
+ EventName: record.EventName(),
+ SeverityNumber: SeverityNumber(record.Severity()),
+ SeverityText: record.SeverityText(),
+ Body: LogAttrValue(record.Body()),
+ Attributes: make([]*cpb.KeyValue, 0, record.AttributesLen()),
+ Flags: uint32(record.TraceFlags()),
+ // TODO: DroppedAttributesCount: /* ... */,
+ }
+ record.WalkAttributes(func(kv api.KeyValue) bool {
+ r.Attributes = append(r.Attributes, LogAttr(kv))
+ return true
+ })
+ if tID := record.TraceID(); tID.IsValid() {
+ r.TraceId = tID[:]
+ }
+ if sID := record.SpanID(); sID.IsValid() {
+ r.SpanId = sID[:]
+ }
+ return r
+}
+
+// timeUnixNano returns t as a Unix time, the number of nanoseconds elapsed
+// since January 1, 1970 UTC as uint64. The result is undefined if the Unix
+// time in nanoseconds cannot be represented by an int64 (a date before the
+// year 1678 or after 2262). timeUnixNano on the zero Time returns 0. The
+// result does not depend on the location associated with t.
+func timeUnixNano(t time.Time) uint64 {
+ nano := t.UnixNano()
+ if nano < 0 {
+ return 0
+ }
+ return uint64(nano) // nolint:gosec // Overflow checked.
+}
+
+// AttrIter transforms an [attribute.Iterator] into OTLP key-values.
+func AttrIter(iter attribute.Iterator) []*cpb.KeyValue {
+ l := iter.Len()
+ if l == 0 {
+ return nil
+ }
+
+ out := make([]*cpb.KeyValue, 0, l)
+ for iter.Next() {
+ out = append(out, Attr(iter.Attribute()))
+ }
+ return out
+}
+
+// Attrs transforms a slice of [attribute.KeyValue] into OTLP key-values.
+func Attrs(attrs []attribute.KeyValue) []*cpb.KeyValue {
+ if len(attrs) == 0 {
+ return nil
+ }
+
+ out := make([]*cpb.KeyValue, 0, len(attrs))
+ for _, kv := range attrs {
+ out = append(out, Attr(kv))
+ }
+ return out
+}
+
+// Attr transforms an [attribute.KeyValue] into an OTLP key-value.
+func Attr(kv attribute.KeyValue) *cpb.KeyValue {
+ return &cpb.KeyValue{Key: string(kv.Key), Value: AttrValue(kv.Value)}
+}
+
+// AttrValue transforms an [attribute.Value] into an OTLP AnyValue.
+func AttrValue(v attribute.Value) *cpb.AnyValue {
+ av := new(cpb.AnyValue)
+ switch v.Type() {
+ case attribute.BOOL:
+ av.Value = &cpb.AnyValue_BoolValue{
+ BoolValue: v.AsBool(),
+ }
+ case attribute.BOOLSLICE:
+ av.Value = &cpb.AnyValue_ArrayValue{
+ ArrayValue: &cpb.ArrayValue{
+ Values: boolSliceValues(v.AsBoolSlice()),
+ },
+ }
+ case attribute.INT64:
+ av.Value = &cpb.AnyValue_IntValue{
+ IntValue: v.AsInt64(),
+ }
+ case attribute.INT64SLICE:
+ av.Value = &cpb.AnyValue_ArrayValue{
+ ArrayValue: &cpb.ArrayValue{
+ Values: int64SliceValues(v.AsInt64Slice()),
+ },
+ }
+ case attribute.FLOAT64:
+ av.Value = &cpb.AnyValue_DoubleValue{
+ DoubleValue: v.AsFloat64(),
+ }
+ case attribute.FLOAT64SLICE:
+ av.Value = &cpb.AnyValue_ArrayValue{
+ ArrayValue: &cpb.ArrayValue{
+ Values: float64SliceValues(v.AsFloat64Slice()),
+ },
+ }
+ case attribute.STRING:
+ av.Value = &cpb.AnyValue_StringValue{
+ StringValue: v.AsString(),
+ }
+ case attribute.STRINGSLICE:
+ av.Value = &cpb.AnyValue_ArrayValue{
+ ArrayValue: &cpb.ArrayValue{
+ Values: stringSliceValues(v.AsStringSlice()),
+ },
+ }
+ default:
+ av.Value = &cpb.AnyValue_StringValue{
+ StringValue: "INVALID",
+ }
+ }
+ return av
+}
+
+func boolSliceValues(vals []bool) []*cpb.AnyValue {
+ converted := make([]*cpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &cpb.AnyValue{
+ Value: &cpb.AnyValue_BoolValue{
+ BoolValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+func int64SliceValues(vals []int64) []*cpb.AnyValue {
+ converted := make([]*cpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &cpb.AnyValue{
+ Value: &cpb.AnyValue_IntValue{
+ IntValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+func float64SliceValues(vals []float64) []*cpb.AnyValue {
+ converted := make([]*cpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &cpb.AnyValue{
+ Value: &cpb.AnyValue_DoubleValue{
+ DoubleValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+func stringSliceValues(vals []string) []*cpb.AnyValue {
+ converted := make([]*cpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &cpb.AnyValue{
+ Value: &cpb.AnyValue_StringValue{
+ StringValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+// Attrs transforms a slice of [api.KeyValue] into OTLP key-values.
+func LogAttrs(attrs []api.KeyValue) []*cpb.KeyValue {
+ if len(attrs) == 0 {
+ return nil
+ }
+
+ out := make([]*cpb.KeyValue, 0, len(attrs))
+ for _, kv := range attrs {
+ out = append(out, LogAttr(kv))
+ }
+ return out
+}
+
+// LogAttr transforms an [api.KeyValue] into an OTLP key-value.
+func LogAttr(attr api.KeyValue) *cpb.KeyValue {
+ return &cpb.KeyValue{
+ Key: attr.Key,
+ Value: LogAttrValue(attr.Value),
+ }
+}
+
+// LogAttrValues transforms a slice of [api.Value] into an OTLP []AnyValue.
+func LogAttrValues(vals []api.Value) []*cpb.AnyValue {
+ if len(vals) == 0 {
+ return nil
+ }
+
+ out := make([]*cpb.AnyValue, 0, len(vals))
+ for _, v := range vals {
+ out = append(out, LogAttrValue(v))
+ }
+ return out
+}
+
+// LogAttrValue transforms an [api.Value] into an OTLP AnyValue.
+func LogAttrValue(v api.Value) *cpb.AnyValue {
+ av := new(cpb.AnyValue)
+ switch v.Kind() {
+ case api.KindBool:
+ av.Value = &cpb.AnyValue_BoolValue{
+ BoolValue: v.AsBool(),
+ }
+ case api.KindInt64:
+ av.Value = &cpb.AnyValue_IntValue{
+ IntValue: v.AsInt64(),
+ }
+ case api.KindFloat64:
+ av.Value = &cpb.AnyValue_DoubleValue{
+ DoubleValue: v.AsFloat64(),
+ }
+ case api.KindString:
+ av.Value = &cpb.AnyValue_StringValue{
+ StringValue: v.AsString(),
+ }
+ case api.KindBytes:
+ av.Value = &cpb.AnyValue_BytesValue{
+ BytesValue: v.AsBytes(),
+ }
+ case api.KindSlice:
+ av.Value = &cpb.AnyValue_ArrayValue{
+ ArrayValue: &cpb.ArrayValue{
+ Values: LogAttrValues(v.AsSlice()),
+ },
+ }
+ case api.KindMap:
+ av.Value = &cpb.AnyValue_KvlistValue{
+ KvlistValue: &cpb.KeyValueList{
+ Values: LogAttrs(v.AsMap()),
+ },
+ }
+ default:
+ av.Value = &cpb.AnyValue_StringValue{
+ StringValue: "INVALID",
+ }
+ }
+ return av
+}
+
+// SeverityNumber transforms a [log.Severity] into an OTLP SeverityNumber.
+func SeverityNumber(s api.Severity) lpb.SeverityNumber {
+ switch s {
+ case api.SeverityTrace:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_TRACE
+ case api.SeverityTrace2:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_TRACE2
+ case api.SeverityTrace3:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_TRACE3
+ case api.SeverityTrace4:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_TRACE4
+ case api.SeverityDebug:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_DEBUG
+ case api.SeverityDebug2:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_DEBUG2
+ case api.SeverityDebug3:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_DEBUG3
+ case api.SeverityDebug4:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_DEBUG4
+ case api.SeverityInfo:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_INFO
+ case api.SeverityInfo2:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_INFO2
+ case api.SeverityInfo3:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_INFO3
+ case api.SeverityInfo4:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_INFO4
+ case api.SeverityWarn:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_WARN
+ case api.SeverityWarn2:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_WARN2
+ case api.SeverityWarn3:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_WARN3
+ case api.SeverityWarn4:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_WARN4
+ case api.SeverityError:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_ERROR
+ case api.SeverityError2:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_ERROR2
+ case api.SeverityError3:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_ERROR3
+ case api.SeverityError4:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_ERROR4
+ case api.SeverityFatal:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_FATAL
+ case api.SeverityFatal2:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_FATAL2
+ case api.SeverityFatal3:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_FATAL3
+ case api.SeverityFatal4:
+ return lpb.SeverityNumber_SEVERITY_NUMBER_FATAL4
+ }
+ return lpb.SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/version.go
new file mode 100644
index 000000000..8315200fa
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/version.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlploghttp // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp"
+
+// Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf logs exporter in use.
+func Version() string {
+ return "0.11.0"
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/README.md
new file mode 100644
index 000000000..9184068d8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/README.md
@@ -0,0 +1,3 @@
+# OTLP Metric gRPC Exporter
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go
new file mode 100644
index 000000000..e0fa0570a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go
@@ -0,0 +1,205 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
+
+import (
+ "context"
+ "time"
+
+ "google.golang.org/genproto/googleapis/rpc/errdetails"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal"
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
+ colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
+ metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
+)
+
+type client struct {
+ metadata metadata.MD
+ exportTimeout time.Duration
+ requestFunc retry.RequestFunc
+
+ // ourConn keeps track of where conn was created: true if created here in
+ // NewClient, or false if passed with an option. This is important on
+ // Shutdown as the conn should only be closed if we created it. Otherwise,
+ // it is up to the processes that passed the conn to close it.
+ ourConn bool
+ conn *grpc.ClientConn
+ msc colmetricpb.MetricsServiceClient
+}
+
+// newClient creates a new gRPC metric client.
+func newClient(_ context.Context, cfg oconf.Config) (*client, error) {
+ c := &client{
+ exportTimeout: cfg.Metrics.Timeout,
+ requestFunc: cfg.RetryConfig.RequestFunc(retryable),
+ conn: cfg.GRPCConn,
+ }
+
+ if len(cfg.Metrics.Headers) > 0 {
+ c.metadata = metadata.New(cfg.Metrics.Headers)
+ }
+
+ if c.conn == nil {
+ // If the caller did not provide a ClientConn when the client was
+ // created, create one using the configuration they did provide.
+ userAgent := "OTel Go OTLP over gRPC metrics exporter/" + Version()
+ dialOpts := []grpc.DialOption{grpc.WithUserAgent(userAgent)}
+ dialOpts = append(dialOpts, cfg.DialOptions...)
+
+ conn, err := grpc.NewClient(cfg.Metrics.Endpoint, dialOpts...)
+ if err != nil {
+ return nil, err
+ }
+ // Keep track that we own the lifecycle of this conn and need to close
+ // it on Shutdown.
+ c.ourConn = true
+ c.conn = conn
+ }
+
+ c.msc = colmetricpb.NewMetricsServiceClient(c.conn)
+
+ return c, nil
+}
+
+// Shutdown shuts down the client, freeing all resource.
+//
+// Any active connections to a remote endpoint are closed if they were created
+// by the client. Any gRPC connection passed during creation using
+// WithGRPCConn will not be closed. It is the caller's responsibility to
+// handle cleanup of that resource.
+func (c *client) Shutdown(ctx context.Context) error {
+ // The otlpmetric.Exporter synchronizes access to client methods and
+ // ensures this is called only once. The only thing that needs to be done
+ // here is to release any computational resources the client holds.
+
+ c.metadata = nil
+ c.requestFunc = nil
+ c.msc = nil
+
+ err := ctx.Err()
+ if c.ourConn {
+ closeErr := c.conn.Close()
+ // A context timeout error takes precedence over this error.
+ if err == nil && closeErr != nil {
+ err = closeErr
+ }
+ }
+ c.conn = nil
+ return err
+}
+
+// UploadMetrics sends protoMetrics to connected endpoint.
+//
+// Retryable errors from the server will be handled according to any
+// RetryConfig the client was created with.
+func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) error {
+ // The otlpmetric.Exporter synchronizes access to client methods, and
+ // ensures this is not called after the Exporter is shutdown. Only thing
+ // to do here is send data.
+
+ select {
+ case <-ctx.Done():
+ // Do not upload if the context is already expired.
+ return ctx.Err()
+ default:
+ }
+
+ ctx, cancel := c.exportContext(ctx)
+ defer cancel()
+
+ return c.requestFunc(ctx, func(iCtx context.Context) error {
+ resp, err := c.msc.Export(iCtx, &colmetricpb.ExportMetricsServiceRequest{
+ ResourceMetrics: []*metricpb.ResourceMetrics{protoMetrics},
+ })
+ if resp != nil && resp.PartialSuccess != nil {
+ msg := resp.PartialSuccess.GetErrorMessage()
+ n := resp.PartialSuccess.GetRejectedDataPoints()
+ if n != 0 || msg != "" {
+ err := internal.MetricPartialSuccessError(n, msg)
+ otel.Handle(err)
+ }
+ }
+ // nil is converted to OK.
+ if status.Code(err) == codes.OK {
+ // Success.
+ return nil
+ }
+ return err
+ })
+}
+
+// exportContext returns a copy of parent with an appropriate deadline and
+// cancellation function based on the clients configured export timeout.
+//
+// It is the callers responsibility to cancel the returned context once its
+// use is complete, via the parent or directly with the returned CancelFunc, to
+// ensure all resources are correctly released.
+func (c *client) exportContext(parent context.Context) (context.Context, context.CancelFunc) {
+ var (
+ ctx context.Context
+ cancel context.CancelFunc
+ )
+
+ if c.exportTimeout > 0 {
+ ctx, cancel = context.WithTimeout(parent, c.exportTimeout)
+ } else {
+ ctx, cancel = context.WithCancel(parent)
+ }
+
+ if c.metadata.Len() > 0 {
+ md := c.metadata
+ if outMD, ok := metadata.FromOutgoingContext(ctx); ok {
+ md = metadata.Join(md, outMD)
+ }
+
+ ctx = metadata.NewOutgoingContext(ctx, md)
+ }
+
+ return ctx, cancel
+}
+
+// retryable returns if err identifies a request that can be retried and a
+// duration to wait for if an explicit throttle time is included in err.
+func retryable(err error) (bool, time.Duration) {
+ s := status.Convert(err)
+ return retryableGRPCStatus(s)
+}
+
+func retryableGRPCStatus(s *status.Status) (bool, time.Duration) {
+ switch s.Code() {
+ case codes.Canceled,
+ codes.DeadlineExceeded,
+ codes.Aborted,
+ codes.OutOfRange,
+ codes.Unavailable,
+ codes.DataLoss:
+ // Additionally, handle RetryInfo.
+ _, d := throttleDelay(s)
+ return true, d
+ case codes.ResourceExhausted:
+ // Retry only if the server signals that the recovery from resource exhaustion is possible.
+ return throttleDelay(s)
+ }
+
+ // Not a retry-able error.
+ return false, 0
+}
+
+// throttleDelay returns if the status is RetryInfo
+// and the duration to wait for if an explicit throttle time is included.
+func throttleDelay(s *status.Status) (bool, time.Duration) {
+ for _, detail := range s.Details() {
+ if t, ok := detail.(*errdetails.RetryInfo); ok {
+ return true, t.RetryDelay.AsDuration()
+ }
+ }
+ return false, 0
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go
new file mode 100644
index 000000000..db6e3714b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go
@@ -0,0 +1,266 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
+
+import (
+ "fmt"
+ "time"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
+ "go.opentelemetry.io/otel/sdk/metric"
+)
+
+// Option applies a configuration option to the Exporter.
+type Option interface {
+ applyGRPCOption(oconf.Config) oconf.Config
+}
+
+func asGRPCOptions(opts []Option) []oconf.GRPCOption {
+ converted := make([]oconf.GRPCOption, len(opts))
+ for i, o := range opts {
+ converted[i] = oconf.NewGRPCOption(o.applyGRPCOption)
+ }
+ return converted
+}
+
+// RetryConfig defines configuration for retrying the export of metric data
+// that failed.
+//
+// This configuration does not define any network retry strategy. That is
+// entirely handled by the gRPC ClientConn.
+type RetryConfig retry.Config
+
+type wrappedOption struct {
+ oconf.GRPCOption
+}
+
+func (w wrappedOption) applyGRPCOption(cfg oconf.Config) oconf.Config {
+ return w.ApplyGRPCOption(cfg)
+}
+
+// WithInsecure disables client transport security for the Exporter's gRPC
+// connection, just like grpc.WithInsecure()
+// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does.
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used to determine client security. If the endpoint has a
+// scheme of "http" or "unix" client security will be disabled. If both are
+// set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, client security will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithInsecure() Option {
+ return wrappedOption{oconf.WithInsecure()}
+}
+
+// WithEndpoint sets the target endpoint the Exporter will connect to.
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both environment variables are set,
+// OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence. If an environment
+// variable is set, and this option is passed, this option will take precedence.
+//
+// If both this option and WithEndpointURL are used, the last used option will
+// take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "localhost:4317" will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithEndpoint(endpoint string) Option {
+ return wrappedOption{oconf.WithEndpoint(endpoint)}
+}
+
+// WithEndpointURL sets the target endpoint URL the Exporter will connect to.
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both environment variables are set,
+// OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence. If an environment
+// variable is set, and this option is passed, this option will take precedence.
+//
+// If both this option and WithEndpoint are used, the last used option will
+// take precedence.
+//
+// If an invalid URL is provided, the default value will be kept.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "localhost:4317" will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithEndpointURL(u string) Option {
+ return wrappedOption{oconf.WithEndpointURL(u)}
+}
+
+// WithReconnectionPeriod set the minimum amount of time between connection
+// attempts to the target endpoint.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithReconnectionPeriod(rp time.Duration) Option {
+ return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
+ cfg.ReconnectionPeriod = rp
+ return cfg
+ })}
+}
+
+func compressorToCompression(compressor string) oconf.Compression {
+ if compressor == "gzip" {
+ return oconf.GzipCompression
+ }
+
+ otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor))
+ return oconf.NoCompression
+}
+
+// WithCompressor sets the compressor the gRPC client uses.
+// Supported compressor values: "gzip".
+//
+// If the OTEL_EXPORTER_OTLP_COMPRESSION or
+// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION environment variable is set, and
+// this option is not passed, that variable value will be used. That value can
+// be either "none" or "gzip". If both are set,
+// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, no compressor will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithCompressor(compressor string) Option {
+ return wrappedOption{oconf.WithCompression(compressorToCompression(compressor))}
+}
+
+// WithHeaders will send the provided headers with each gRPC requests.
+//
+// If the OTEL_EXPORTER_OTLP_HEADERS or OTEL_EXPORTER_OTLP_METRICS_HEADERS
+// environment variable is set, and this option is not passed, that variable
+// value will be used. The value will be parsed as a list of key value pairs.
+// These pairs are expected to be in the W3C Correlation-Context format
+// without additional semi-colon delimited metadata (i.e. "k1=v1,k2=v2"). If
+// both are set, OTEL_EXPORTER_OTLP_METRICS_HEADERS will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, no user headers will be set.
+func WithHeaders(headers map[string]string) Option {
+ return wrappedOption{oconf.WithHeaders(headers)}
+}
+
+// WithTLSCredentials sets the gRPC connection to use creds.
+//
+// If the OTEL_EXPORTER_OTLP_CERTIFICATE or
+// OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE environment variable is set, and
+// this option is not passed, that variable value will be used. The value will
+// be parsed the filepath of the TLS certificate chain to use. If both are
+// set, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, no TLS credentials will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithTLSCredentials(creds credentials.TransportCredentials) Option {
+ return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
+ cfg.Metrics.GRPCCredentials = creds
+ return cfg
+ })}
+}
+
+// WithServiceConfig defines the default gRPC service config used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithServiceConfig(serviceConfig string) Option {
+ return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
+ cfg.ServiceConfig = serviceConfig
+ return cfg
+ })}
+}
+
+// WithDialOption sets explicit grpc.DialOptions to use when establishing a
+// gRPC connection. The options here are appended to the internal grpc.DialOptions
+// used so they will take precedence over any other internal grpc.DialOptions
+// they might conflict with.
+// The [grpc.WithBlock], [grpc.WithTimeout], and [grpc.WithReturnConnectionError]
+// grpc.DialOptions are ignored.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithDialOption(opts ...grpc.DialOption) Option {
+ return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
+ cfg.DialOptions = opts
+ return cfg
+ })}
+}
+
+// WithGRPCConn sets conn as the gRPC ClientConn used for all communication.
+//
+// This option takes precedence over any other option that relates to
+// establishing or persisting a gRPC connection to a target endpoint. Any
+// other option of those types passed will be ignored.
+//
+// It is the callers responsibility to close the passed conn. The Exporter
+// Shutdown method will not close this connection.
+func WithGRPCConn(conn *grpc.ClientConn) Option {
+ return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
+ cfg.GRPCConn = conn
+ return cfg
+ })}
+}
+
+// WithTimeout sets the max amount of time an Exporter will attempt an export.
+//
+// This takes precedence over any retry settings defined by WithRetry. Once
+// this time limit has been reached the export is abandoned and the metric
+// data is dropped.
+//
+// If the OTEL_EXPORTER_OTLP_TIMEOUT or OTEL_EXPORTER_OTLP_METRICS_TIMEOUT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. The value will be parsed as an integer representing the
+// timeout in milliseconds. If both are set,
+// OTEL_EXPORTER_OTLP_METRICS_TIMEOUT will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, a timeout of 10 seconds will be used.
+func WithTimeout(duration time.Duration) Option {
+ return wrappedOption{oconf.WithTimeout(duration)}
+}
+
+// WithRetry sets the retry policy for transient retryable errors that are
+// returned by the target endpoint.
+//
+// If the target endpoint responds with not only a retryable error, but
+// explicitly returns a backoff time in the response, that time will take
+// precedence over these settings.
+//
+// These settings do not define any network retry strategy. That is entirely
+// handled by the gRPC ClientConn.
+//
+// If unset, the default retry policy will be used. It will retry the export
+// 5 seconds after receiving a retryable error and increase exponentially
+// after each error for no more than a total time of 1 minute.
+func WithRetry(settings RetryConfig) Option {
+ return wrappedOption{oconf.WithRetry(retry.Config(settings))}
+}
+
+// WithTemporalitySelector sets the TemporalitySelector the client will use to
+// determine the Temporality of an instrument based on its kind. If this option
+// is not used, the client will use the DefaultTemporalitySelector from the
+// go.opentelemetry.io/otel/sdk/metric package.
+func WithTemporalitySelector(selector metric.TemporalitySelector) Option {
+ return wrappedOption{oconf.WithTemporalitySelector(selector)}
+}
+
+// WithAggregationSelector sets the AggregationSelector the client will use to
+// determine the aggregation to use for an instrument based on its kind. If
+// this option is not used, the reader will use the DefaultAggregationSelector
+// from the go.opentelemetry.io/otel/sdk/metric package, or the aggregation
+// explicitly passed for a view matching an instrument.
+func WithAggregationSelector(selector metric.AggregationSelector) Option {
+ return wrappedOption{oconf.WithAggregationSelector(selector)}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go
new file mode 100644
index 000000000..dcd8de5df
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go
@@ -0,0 +1,84 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package otlpmetricgrpc provides an OTLP metrics exporter using gRPC.
+By default the telemetry is sent to https://localhost:4317.
+
+Exporter should be created using [New] and used with a [metric.PeriodicReader].
+
+The environment variables described below can be used for configuration.
+
+OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT (default: "https://localhost:4317") -
+target to which the exporter sends telemetry.
+The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md.
+The value must contain a scheme ("http" or "https") and host.
+The value may additionally contain a port, and a path.
+The value should not contain a query string or fragment.
+OTEL_EXPORTER_OTLP_METRICS_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT.
+The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_METRICS_INSECURE (default: "false") -
+setting "true" disables client transport security for the exporter's gRPC connection.
+You can use this only when an endpoint is provided without the http or https scheme.
+OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT setting overrides
+the scheme defined via OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT.
+OTEL_EXPORTER_OTLP_METRICS_INSECURE takes precedence over OTEL_EXPORTER_OTLP_INSECURE.
+The configuration can be overridden by [WithInsecure], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_METRICS_HEADERS (default: none) -
+key-value pairs used as gRPC metadata associated with gRPC requests.
+The value is expected to be represented in a format matching the [W3C Baggage HTTP Header Content Format],
+except that additional semi-colon delimited metadata is not supported.
+Example value: "key1=value1,key2=value2".
+OTEL_EXPORTER_OTLP_METRICS_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS.
+The configuration can be overridden by [WithHeaders] option.
+
+OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_METRICS_TIMEOUT (default: "10000") -
+maximum time in milliseconds the OTLP exporter waits for each batch export.
+OTEL_EXPORTER_OTLP_METRICS_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT.
+The configuration can be overridden by [WithTimeout] option.
+
+OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_METRICS_COMPRESSION (default: none) -
+the gRPC compressor the exporter uses.
+Supported value: "gzip".
+OTEL_EXPORTER_OTLP_METRICS_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION.
+The configuration can be overridden by [WithCompressor], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE (default: none) -
+the filepath to the trusted certificate to use when verifying a server's TLS credentials.
+OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE (default: none) -
+the filepath to the client certificate/chain trust for client's private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY (default: none) -
+the filepath to the client's private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] option.
+
+OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE (default: "cumulative") -
+aggregation temporality to use on the basis of instrument kind. Supported values:
+ - "cumulative" - Cumulative aggregation temporality for all instrument kinds,
+ - "delta" - Delta aggregation temporality for Counter, Asynchronous Counter and Histogram instrument kinds;
+ Cumulative aggregation for UpDownCounter and Asynchronous UpDownCounter instrument kinds,
+ - "lowmemory" - Delta aggregation temporality for Synchronous Counter and Histogram instrument kinds;
+ Cumulative aggregation temporality for Synchronous UpDownCounter, Asynchronous Counter, and Asynchronous UpDownCounter instrument kinds.
+
+The configuration can be overridden by [WithTemporalitySelector] option.
+
+OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION (default: "explicit_bucket_histogram") -
+default aggregation to use for histogram instruments. Supported values:
+ - "explicit_bucket_histogram" - [Explicit Bucket Histogram Aggregation],
+ - "base2_exponential_bucket_histogram" - [Base2 Exponential Bucket Histogram Aggregation].
+
+The configuration can be overridden by [WithAggregationSelector] option.
+
+[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content
+[Explicit Bucket Histogram Aggregation]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.26.0/specification/metrics/sdk.md#explicit-bucket-histogram-aggregation
+[Base2 Exponential Bucket Histogram Aggregation]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.26.0/specification/metrics/sdk.md#base2-exponential-bucket-histogram-aggregation
+*/
+package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go
new file mode 100644
index 000000000..3977c1f8a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go
@@ -0,0 +1,157 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform"
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/metric"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+ metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
+)
+
+// Exporter is a OpenTelemetry metric Exporter using gRPC.
+type Exporter struct {
+ // Ensure synchronous access to the client across all functionality.
+ clientMu sync.Mutex
+ client interface {
+ UploadMetrics(context.Context, *metricpb.ResourceMetrics) error
+ Shutdown(context.Context) error
+ }
+
+ temporalitySelector metric.TemporalitySelector
+ aggregationSelector metric.AggregationSelector
+
+ shutdownOnce sync.Once
+}
+
+func newExporter(c *client, cfg oconf.Config) (*Exporter, error) {
+ ts := cfg.Metrics.TemporalitySelector
+ if ts == nil {
+ ts = func(metric.InstrumentKind) metricdata.Temporality {
+ return metricdata.CumulativeTemporality
+ }
+ }
+
+ as := cfg.Metrics.AggregationSelector
+ if as == nil {
+ as = metric.DefaultAggregationSelector
+ }
+
+ return &Exporter{
+ client: c,
+
+ temporalitySelector: ts,
+ aggregationSelector: as,
+ }, nil
+}
+
+// Temporality returns the Temporality to use for an instrument kind.
+func (e *Exporter) Temporality(k metric.InstrumentKind) metricdata.Temporality {
+ return e.temporalitySelector(k)
+}
+
+// Aggregation returns the Aggregation to use for an instrument kind.
+func (e *Exporter) Aggregation(k metric.InstrumentKind) metric.Aggregation {
+ return e.aggregationSelector(k)
+}
+
+// Export transforms and transmits metric data to an OTLP receiver.
+//
+// This method returns an error if called after Shutdown.
+// This method returns an error if the method is canceled by the passed context.
+func (e *Exporter) Export(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+ defer global.Debug("OTLP/gRPC exporter export", "Data", rm)
+
+ otlpRm, err := transform.ResourceMetrics(rm)
+ // Best effort upload of transformable metrics.
+ e.clientMu.Lock()
+ upErr := e.client.UploadMetrics(ctx, otlpRm)
+ e.clientMu.Unlock()
+ if upErr != nil {
+ if err == nil {
+ return fmt.Errorf("failed to upload metrics: %w", upErr)
+ }
+ // Merge the two errors.
+ return fmt.Errorf("failed to upload incomplete metrics (%w): %w", err, upErr)
+ }
+ return err
+}
+
+// ForceFlush flushes any metric data held by an exporter.
+//
+// This method returns an error if called after Shutdown.
+// This method returns an error if the method is canceled by the passed context.
+//
+// This method is safe to call concurrently.
+func (e *Exporter) ForceFlush(ctx context.Context) error {
+ // The exporter and client hold no state, nothing to flush.
+ return ctx.Err()
+}
+
+// Shutdown flushes all metric data held by an exporter and releases any held
+// computational resources.
+//
+// This method returns an error if called after Shutdown.
+// This method returns an error if the method is canceled by the passed context.
+//
+// This method is safe to call concurrently.
+func (e *Exporter) Shutdown(ctx context.Context) error {
+ err := errShutdown
+ e.shutdownOnce.Do(func() {
+ e.clientMu.Lock()
+ client := e.client
+ e.client = shutdownClient{}
+ e.clientMu.Unlock()
+ err = client.Shutdown(ctx)
+ })
+ return err
+}
+
+var errShutdown = errors.New("gRPC exporter is shutdown")
+
+type shutdownClient struct{}
+
+func (c shutdownClient) err(ctx context.Context) error {
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+ return errShutdown
+}
+
+func (c shutdownClient) UploadMetrics(ctx context.Context, _ *metricpb.ResourceMetrics) error {
+ return c.err(ctx)
+}
+
+func (c shutdownClient) Shutdown(ctx context.Context) error {
+ return c.err(ctx)
+}
+
+// MarshalLog returns logging data about the Exporter.
+func (e *Exporter) MarshalLog() interface{} {
+ return struct{ Type string }{Type: "OTLP/gRPC"}
+}
+
+// New returns an OpenTelemetry metric Exporter. The Exporter can be used with
+// a PeriodicReader to export OpenTelemetry metric data to an OTLP receiving
+// endpoint using gRPC.
+//
+// If an already established gRPC ClientConn is not passed in options using
+// WithGRPCConn, a connection to the OTLP endpoint will be established based
+// on options. If a connection cannot be establishes in the lifetime of ctx,
+// an error will be returned.
+func New(ctx context.Context, options ...Option) (*Exporter, error) {
+ cfg := oconf.NewGRPCConfig(asGRPCOptions(options)...)
+ c, err := newClient(ctx, cfg)
+ if err != nil {
+ return nil, err
+ }
+ return newExporter(c, cfg)
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go
new file mode 100644
index 000000000..261f55026
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go
@@ -0,0 +1,215 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig"
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+ "unicode"
+
+ "go.opentelemetry.io/otel/internal/global"
+)
+
+// ConfigFn is the generic function used to set a config.
+type ConfigFn func(*EnvOptionsReader)
+
+// EnvOptionsReader reads the required environment variables.
+type EnvOptionsReader struct {
+ GetEnv func(string) string
+ ReadFile func(string) ([]byte, error)
+ Namespace string
+}
+
+// Apply runs every ConfigFn.
+func (e *EnvOptionsReader) Apply(opts ...ConfigFn) {
+ for _, o := range opts {
+ o(e)
+ }
+}
+
+// GetEnvValue gets an OTLP environment variable value of the specified key
+// using the GetEnv function.
+// This function prepends the OTLP specified namespace to all key lookups.
+func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) {
+ v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key)))
+ return v, v != ""
+}
+
+// WithString retrieves the specified config and passes it to ConfigFn as a string.
+func WithString(n string, fn func(string)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ fn(v)
+ }
+ }
+}
+
+// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn.
+func WithBool(n string, fn func(bool)) ConfigFn {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ b := strings.ToLower(v) == "true"
+ fn(b)
+ }
+ }
+}
+
+// WithDuration retrieves the specified config and passes it to ConfigFn as a duration.
+func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ d, err := strconv.Atoi(v)
+ if err != nil {
+ global.Error(err, "parse duration", "input", v)
+ return
+ }
+ fn(time.Duration(d) * time.Millisecond)
+ }
+ }
+}
+
+// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers.
+func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ fn(stringToHeader(v))
+ }
+ }
+}
+
+// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL.
+func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ u, err := url.Parse(v)
+ if err != nil {
+ global.Error(err, "parse url", "input", v)
+ return
+ }
+ fn(u)
+ }
+ }
+}
+
+// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn.
+func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ b, err := e.ReadFile(v)
+ if err != nil {
+ global.Error(err, "read tls ca cert file", "file", v)
+ return
+ }
+ c, err := createCertPool(b)
+ if err != nil {
+ global.Error(err, "create tls cert pool")
+ return
+ }
+ fn(c)
+ }
+ }
+}
+
+// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn.
+func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn {
+ return func(e *EnvOptionsReader) {
+ vc, okc := e.GetEnvValue(nc)
+ vk, okk := e.GetEnvValue(nk)
+ if !okc || !okk {
+ return
+ }
+ cert, err := e.ReadFile(vc)
+ if err != nil {
+ global.Error(err, "read tls client cert", "file", vc)
+ return
+ }
+ key, err := e.ReadFile(vk)
+ if err != nil {
+ global.Error(err, "read tls client key", "file", vk)
+ return
+ }
+ crt, err := tls.X509KeyPair(cert, key)
+ if err != nil {
+ global.Error(err, "create tls client key pair")
+ return
+ }
+ fn(crt)
+ }
+}
+
+func keyWithNamespace(ns, key string) string {
+ if ns == "" {
+ return key
+ }
+ return fmt.Sprintf("%s_%s", ns, key)
+}
+
+func stringToHeader(value string) map[string]string {
+ headersPairs := strings.Split(value, ",")
+ headers := make(map[string]string)
+
+ for _, header := range headersPairs {
+ n, v, found := strings.Cut(header, "=")
+ if !found {
+ global.Error(errors.New("missing '="), "parse headers", "input", header)
+ continue
+ }
+
+ trimmedName := strings.TrimSpace(n)
+
+ // Validate the key.
+ if !isValidHeaderKey(trimmedName) {
+ global.Error(errors.New("invalid header key"), "parse headers", "key", trimmedName)
+ continue
+ }
+
+ // Only decode the value.
+ value, err := url.PathUnescape(v)
+ if err != nil {
+ global.Error(err, "escape header value", "value", v)
+ continue
+ }
+ trimmedValue := strings.TrimSpace(value)
+
+ headers[trimmedName] = trimmedValue
+ }
+
+ return headers
+}
+
+func createCertPool(certBytes []byte) (*x509.CertPool, error) {
+ cp := x509.NewCertPool()
+ if ok := cp.AppendCertsFromPEM(certBytes); !ok {
+ return nil, errors.New("failed to append certificate to the cert pool")
+ }
+ return cp, nil
+}
+
+func isValidHeaderKey(key string) bool {
+ if key == "" {
+ return false
+ }
+ for _, c := range key {
+ if !isTokenChar(c) {
+ return false
+ }
+ }
+ return true
+}
+
+func isTokenChar(c rune) bool {
+ return c <= unicode.MaxASCII && (unicode.IsLetter(c) ||
+ unicode.IsDigit(c) ||
+ c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' ||
+ c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~')
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go
new file mode 100644
index 000000000..95e2f4ba3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go
@@ -0,0 +1,31 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal"
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess_test.go.tmpl "--data={}" --out=partialsuccess_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig.go.tmpl "--data={}" --out=envconfig/envconfig.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig_test.go.tmpl "--data={}" --out=envconfig/envconfig_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig\"}" --out=oconf/envconfig.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/envconfig_test.go.tmpl "--data={}" --out=oconf/envconfig_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/options.go.tmpl "--data={\"retryImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry\"}" --out=oconf/options.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/options_test.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig\"}" --out=oconf/options_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl "--data={}" --out=oconf/optiontypes.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl "--data={}" --out=oconf/tls.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client.go.tmpl "--data={}" --out=otest/client.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client_test.go.tmpl "--data={\"internalImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal\"}" --out=otest/client_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/collector.go.tmpl "--data={\"oconfImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf\"}" --out=otest/collector.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl "--data={}" --out=transform/attribute.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/attribute_test.go.tmpl "--data={}" --out=transform/attribute_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/error.go.tmpl "--data={}" --out=transform/error.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/error_test.go.tmpl "--data={}" --out=transform/error_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl "--data={}" --out=transform/metricdata.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/metricdata_test.go.tmpl "--data={}" --out=transform/metricdata_test.go
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go
new file mode 100644
index 000000000..7ae53f2d1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go
@@ -0,0 +1,210 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "net/url"
+ "os"
+ "path"
+ "strings"
+ "time"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig"
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/metric"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// DefaultEnvOptionsReader is the default environments reader.
+var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
+ GetEnv: os.Getenv,
+ ReadFile: os.ReadFile,
+ Namespace: "OTEL_EXPORTER_OTLP",
+}
+
+// ApplyGRPCEnvConfigs applies the env configurations for gRPC.
+func ApplyGRPCEnvConfigs(cfg Config) Config {
+ opts := getOptionsFromEnv()
+ for _, opt := range opts {
+ cfg = opt.ApplyGRPCOption(cfg)
+ }
+ return cfg
+}
+
+// ApplyHTTPEnvConfigs applies the env configurations for HTTP.
+func ApplyHTTPEnvConfigs(cfg Config) Config {
+ opts := getOptionsFromEnv()
+ for _, opt := range opts {
+ cfg = opt.ApplyHTTPOption(cfg)
+ }
+ return cfg
+}
+
+func getOptionsFromEnv() []GenericOption {
+ opts := []GenericOption{}
+
+ tlsConf := &tls.Config{}
+ DefaultEnvOptionsReader.Apply(
+ envconfig.WithURL("ENDPOINT", func(u *url.URL) {
+ opts = append(opts, withEndpointScheme(u))
+ opts = append(opts, newSplitOption(func(cfg Config) Config {
+ cfg.Metrics.Endpoint = u.Host
+ // For OTLP/HTTP endpoint URLs without a per-signal
+ // configuration, the passed endpoint is used as a base URL
+ // and the signals are sent to these paths relative to that.
+ cfg.Metrics.URLPath = path.Join(u.Path, DefaultMetricsPath)
+ return cfg
+ }, withEndpointForGRPC(u)))
+ }),
+ envconfig.WithURL("METRICS_ENDPOINT", func(u *url.URL) {
+ opts = append(opts, withEndpointScheme(u))
+ opts = append(opts, newSplitOption(func(cfg Config) Config {
+ cfg.Metrics.Endpoint = u.Host
+ // For endpoint URLs for OTLP/HTTP per-signal variables, the
+ // URL MUST be used as-is without any modification. The only
+ // exception is that if an URL contains no path part, the root
+ // path / MUST be used.
+ path := u.Path
+ if path == "" {
+ path = "/"
+ }
+ cfg.Metrics.URLPath = path
+ return cfg
+ }, withEndpointForGRPC(u)))
+ }),
+ envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
+ envconfig.WithCertPool("METRICS_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
+ envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+ envconfig.WithClientCert("METRICS_CLIENT_CERTIFICATE", "METRICS_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+ envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
+ envconfig.WithBool("METRICS_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
+ withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
+ envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
+ envconfig.WithHeaders("METRICS_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
+ WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
+ WithEnvCompression("METRICS_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
+ envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
+ envconfig.WithDuration("METRICS_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
+ withEnvTemporalityPreference("METRICS_TEMPORALITY_PREFERENCE", func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) }),
+ withEnvAggPreference("METRICS_DEFAULT_HISTOGRAM_AGGREGATION", func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) }),
+ )
+
+ return opts
+}
+
+func withEndpointForGRPC(u *url.URL) func(cfg Config) Config {
+ return func(cfg Config) Config {
+ // For OTLP/gRPC endpoints, this is the target to which the
+ // exporter is going to send telemetry.
+ cfg.Metrics.Endpoint = path.Join(u.Host, u.Path)
+ return cfg
+ }
+}
+
+// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression.
+func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) {
+ return func(e *envconfig.EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ cp := NoCompression
+ if v == "gzip" {
+ cp = GzipCompression
+ }
+
+ fn(cp)
+ }
+ }
+}
+
+func withEndpointScheme(u *url.URL) GenericOption {
+ switch strings.ToLower(u.Scheme) {
+ case "http", "unix":
+ return WithInsecure()
+ default:
+ return WithSecure()
+ }
+}
+
+// revive:disable-next-line:flag-parameter
+func withInsecure(b bool) GenericOption {
+ if b {
+ return WithInsecure()
+ }
+ return WithSecure()
+}
+
+func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) {
+ return func(e *envconfig.EnvOptionsReader) {
+ if c.RootCAs != nil || len(c.Certificates) > 0 {
+ fn(c)
+ }
+ }
+}
+
+func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector)) func(e *envconfig.EnvOptionsReader) {
+ return func(e *envconfig.EnvOptionsReader) {
+ if s, ok := e.GetEnvValue(n); ok {
+ switch strings.ToLower(s) {
+ case "cumulative":
+ fn(cumulativeTemporality)
+ case "delta":
+ fn(deltaTemporality)
+ case "lowmemory":
+ fn(lowMemory)
+ default:
+ global.Warn("OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", "value", s)
+ }
+ }
+ }
+}
+
+func cumulativeTemporality(metric.InstrumentKind) metricdata.Temporality {
+ return metricdata.CumulativeTemporality
+}
+
+func deltaTemporality(ik metric.InstrumentKind) metricdata.Temporality {
+ switch ik {
+ case metric.InstrumentKindCounter, metric.InstrumentKindHistogram, metric.InstrumentKindObservableCounter:
+ return metricdata.DeltaTemporality
+ default:
+ return metricdata.CumulativeTemporality
+ }
+}
+
+func lowMemory(ik metric.InstrumentKind) metricdata.Temporality {
+ switch ik {
+ case metric.InstrumentKindCounter, metric.InstrumentKindHistogram:
+ return metricdata.DeltaTemporality
+ default:
+ return metricdata.CumulativeTemporality
+ }
+}
+
+func withEnvAggPreference(n string, fn func(metric.AggregationSelector)) func(e *envconfig.EnvOptionsReader) {
+ return func(e *envconfig.EnvOptionsReader) {
+ if s, ok := e.GetEnvValue(n); ok {
+ switch strings.ToLower(s) {
+ case "explicit_bucket_histogram":
+ fn(metric.DefaultAggregationSelector)
+ case "base2_exponential_bucket_histogram":
+ fn(func(kind metric.InstrumentKind) metric.Aggregation {
+ if kind == metric.InstrumentKindHistogram {
+ return metric.AggregationBase2ExponentialHistogram{
+ MaxSize: 160,
+ MaxScale: 20,
+ NoMinMax: false,
+ }
+ }
+ return metric.DefaultAggregationSelector(kind)
+ })
+ default:
+ global.Warn("OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.", "value", s)
+ }
+ }
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go
new file mode 100644
index 000000000..2ac8db5a8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go
@@ -0,0 +1,374 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/oconf/options.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
+
+import (
+ "crypto/tls"
+ "fmt"
+ "net/http"
+ "net/url"
+ "path"
+ "strings"
+ "time"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/backoff"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/credentials/insecure"
+ "google.golang.org/grpc/encoding/gzip"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/metric"
+)
+
+const (
+ // DefaultMaxAttempts describes how many times the driver
+ // should retry the sending of the payload in case of a
+ // retryable error.
+ DefaultMaxAttempts int = 5
+ // DefaultMetricsPath is a default URL path for endpoint that
+ // receives metrics.
+ DefaultMetricsPath string = "/v1/metrics"
+ // DefaultBackoff is a default base backoff time used in the
+ // exponential backoff strategy.
+ DefaultBackoff time.Duration = 300 * time.Millisecond
+ // DefaultTimeout is a default max waiting time for the backend to process
+ // each span or metrics batch.
+ DefaultTimeout time.Duration = 10 * time.Second
+)
+
+type (
+ // HTTPTransportProxyFunc is a function that resolves which URL to use as proxy for a given request.
+ // This type is compatible with `http.Transport.Proxy` and can be used to set a custom proxy function to the OTLP HTTP client.
+ HTTPTransportProxyFunc func(*http.Request) (*url.URL, error)
+
+ SignalConfig struct {
+ Endpoint string
+ Insecure bool
+ TLSCfg *tls.Config
+ Headers map[string]string
+ Compression Compression
+ Timeout time.Duration
+ URLPath string
+
+ // gRPC configurations
+ GRPCCredentials credentials.TransportCredentials
+
+ TemporalitySelector metric.TemporalitySelector
+ AggregationSelector metric.AggregationSelector
+
+ Proxy HTTPTransportProxyFunc
+ }
+
+ Config struct {
+ // Signal specific configurations
+ Metrics SignalConfig
+
+ RetryConfig retry.Config
+
+ // gRPC configurations
+ ReconnectionPeriod time.Duration
+ ServiceConfig string
+ DialOptions []grpc.DialOption
+ GRPCConn *grpc.ClientConn
+ }
+)
+
+// NewHTTPConfig returns a new Config with all settings applied from opts and
+// any unset setting using the default HTTP config values.
+func NewHTTPConfig(opts ...HTTPOption) Config {
+ cfg := Config{
+ Metrics: SignalConfig{
+ Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort),
+ URLPath: DefaultMetricsPath,
+ Compression: NoCompression,
+ Timeout: DefaultTimeout,
+
+ TemporalitySelector: metric.DefaultTemporalitySelector,
+ AggregationSelector: metric.DefaultAggregationSelector,
+ },
+ RetryConfig: retry.DefaultConfig,
+ }
+ cfg = ApplyHTTPEnvConfigs(cfg)
+ for _, opt := range opts {
+ cfg = opt.ApplyHTTPOption(cfg)
+ }
+ cfg.Metrics.URLPath = cleanPath(cfg.Metrics.URLPath, DefaultMetricsPath)
+ return cfg
+}
+
+// cleanPath returns a path with all spaces trimmed and all redundancies
+// removed. If urlPath is empty or cleaning it results in an empty string,
+// defaultPath is returned instead.
+func cleanPath(urlPath string, defaultPath string) string {
+ tmp := path.Clean(strings.TrimSpace(urlPath))
+ if tmp == "." {
+ return defaultPath
+ }
+ if !path.IsAbs(tmp) {
+ tmp = "/" + tmp
+ }
+ return tmp
+}
+
+// NewGRPCConfig returns a new Config with all settings applied from opts and
+// any unset setting using the default gRPC config values.
+func NewGRPCConfig(opts ...GRPCOption) Config {
+ cfg := Config{
+ Metrics: SignalConfig{
+ Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort),
+ URLPath: DefaultMetricsPath,
+ Compression: NoCompression,
+ Timeout: DefaultTimeout,
+
+ TemporalitySelector: metric.DefaultTemporalitySelector,
+ AggregationSelector: metric.DefaultAggregationSelector,
+ },
+ RetryConfig: retry.DefaultConfig,
+ }
+ cfg = ApplyGRPCEnvConfigs(cfg)
+ for _, opt := range opts {
+ cfg = opt.ApplyGRPCOption(cfg)
+ }
+
+ if cfg.ServiceConfig != "" {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
+ }
+ // Prioritize GRPCCredentials over Insecure (passing both is an error).
+ if cfg.Metrics.GRPCCredentials != nil {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Metrics.GRPCCredentials))
+ } else if cfg.Metrics.Insecure {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials()))
+ } else {
+ // Default to using the host's root CA.
+ creds := credentials.NewTLS(nil)
+ cfg.Metrics.GRPCCredentials = creds
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds))
+ }
+ if cfg.Metrics.Compression == GzipCompression {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
+ }
+ if cfg.ReconnectionPeriod != 0 {
+ p := grpc.ConnectParams{
+ Backoff: backoff.DefaultConfig,
+ MinConnectTimeout: cfg.ReconnectionPeriod,
+ }
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p))
+ }
+
+ return cfg
+}
+
+type (
+ // GenericOption applies an option to the HTTP or gRPC driver.
+ GenericOption interface {
+ ApplyHTTPOption(Config) Config
+ ApplyGRPCOption(Config) Config
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate compatibility.
+ private()
+ }
+
+ // HTTPOption applies an option to the HTTP driver.
+ HTTPOption interface {
+ ApplyHTTPOption(Config) Config
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate compatibility.
+ private()
+ }
+
+ // GRPCOption applies an option to the gRPC driver.
+ GRPCOption interface {
+ ApplyGRPCOption(Config) Config
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate compatibility.
+ private()
+ }
+)
+
+// genericOption is an option that applies the same logic
+// for both gRPC and HTTP.
+type genericOption struct {
+ fn func(Config) Config
+}
+
+func (g *genericOption) ApplyGRPCOption(cfg Config) Config {
+ return g.fn(cfg)
+}
+
+func (g *genericOption) ApplyHTTPOption(cfg Config) Config {
+ return g.fn(cfg)
+}
+
+func (genericOption) private() {}
+
+func newGenericOption(fn func(cfg Config) Config) GenericOption {
+ return &genericOption{fn: fn}
+}
+
+// splitOption is an option that applies different logics
+// for gRPC and HTTP.
+type splitOption struct {
+ httpFn func(Config) Config
+ grpcFn func(Config) Config
+}
+
+func (g *splitOption) ApplyGRPCOption(cfg Config) Config {
+ return g.grpcFn(cfg)
+}
+
+func (g *splitOption) ApplyHTTPOption(cfg Config) Config {
+ return g.httpFn(cfg)
+}
+
+func (splitOption) private() {}
+
+func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption {
+ return &splitOption{httpFn: httpFn, grpcFn: grpcFn}
+}
+
+// httpOption is an option that is only applied to the HTTP driver.
+type httpOption struct {
+ fn func(Config) Config
+}
+
+func (h *httpOption) ApplyHTTPOption(cfg Config) Config {
+ return h.fn(cfg)
+}
+
+func (httpOption) private() {}
+
+func NewHTTPOption(fn func(cfg Config) Config) HTTPOption {
+ return &httpOption{fn: fn}
+}
+
+// grpcOption is an option that is only applied to the gRPC driver.
+type grpcOption struct {
+ fn func(Config) Config
+}
+
+func (h *grpcOption) ApplyGRPCOption(cfg Config) Config {
+ return h.fn(cfg)
+}
+
+func (grpcOption) private() {}
+
+func NewGRPCOption(fn func(cfg Config) Config) GRPCOption {
+ return &grpcOption{fn: fn}
+}
+
+// Generic Options
+
+func WithEndpoint(endpoint string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.Endpoint = endpoint
+ return cfg
+ })
+}
+
+func WithEndpointURL(v string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ u, err := url.Parse(v)
+ if err != nil {
+ global.Error(err, "otlpmetric: parse endpoint url", "url", v)
+ return cfg
+ }
+
+ cfg.Metrics.Endpoint = u.Host
+ cfg.Metrics.URLPath = u.Path
+ cfg.Metrics.Insecure = u.Scheme != "https"
+
+ return cfg
+ })
+}
+
+func WithCompression(compression Compression) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.Compression = compression
+ return cfg
+ })
+}
+
+func WithURLPath(urlPath string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.URLPath = urlPath
+ return cfg
+ })
+}
+
+func WithRetry(rc retry.Config) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.RetryConfig = rc
+ return cfg
+ })
+}
+
+func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption {
+ return newSplitOption(func(cfg Config) Config {
+ cfg.Metrics.TLSCfg = tlsCfg.Clone()
+ return cfg
+ }, func(cfg Config) Config {
+ cfg.Metrics.GRPCCredentials = credentials.NewTLS(tlsCfg)
+ return cfg
+ })
+}
+
+func WithInsecure() GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.Insecure = true
+ return cfg
+ })
+}
+
+func WithSecure() GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.Insecure = false
+ return cfg
+ })
+}
+
+func WithHeaders(headers map[string]string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.Headers = headers
+ return cfg
+ })
+}
+
+func WithTimeout(duration time.Duration) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.Timeout = duration
+ return cfg
+ })
+}
+
+func WithTemporalitySelector(selector metric.TemporalitySelector) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.TemporalitySelector = selector
+ return cfg
+ })
+}
+
+func WithAggregationSelector(selector metric.AggregationSelector) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.AggregationSelector = selector
+ return cfg
+ })
+}
+
+func WithProxy(pf HTTPTransportProxyFunc) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.Proxy = pf
+ return cfg
+ })
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/optiontypes.go
new file mode 100644
index 000000000..83f6d7fd1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/optiontypes.go
@@ -0,0 +1,47 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
+
+import "time"
+
+const (
+ // DefaultCollectorGRPCPort is the default gRPC port of the collector.
+ DefaultCollectorGRPCPort uint16 = 4317
+ // DefaultCollectorHTTPPort is the default HTTP port of the collector.
+ DefaultCollectorHTTPPort uint16 = 4318
+ // DefaultCollectorHost is the host address the Exporter will attempt
+ // connect to if no collector address is provided.
+ DefaultCollectorHost string = "localhost"
+)
+
+// Compression describes the compression used for payloads sent to the
+// collector.
+type Compression int
+
+const (
+ // NoCompression tells the driver to send payloads without
+ // compression.
+ NoCompression Compression = iota
+ // GzipCompression tells the driver to send payloads after
+ // compressing them with gzip.
+ GzipCompression
+)
+
+// RetrySettings defines configuration for retrying batches in case of export failure
+// using an exponential backoff.
+type RetrySettings struct {
+ // Enabled indicates whether to not retry sending batches in case of export failure.
+ Enabled bool
+ // InitialInterval the time to wait after the first failure before retrying.
+ InitialInterval time.Duration
+ // MaxInterval is the upper bound on backoff interval. Once this value is reached the delay between
+ // consecutive retries will always be `MaxInterval`.
+ MaxInterval time.Duration
+ // MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request/batch.
+ // Once this value is reached, the data is discarded.
+ MaxElapsedTime time.Duration
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go
new file mode 100644
index 000000000..03e7fbcdf
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go
@@ -0,0 +1,38 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+ "os"
+)
+
+// ReadTLSConfigFromFile reads a PEM certificate file and creates
+// a tls.Config that will use this certificate to verify a server certificate.
+func ReadTLSConfigFromFile(path string) (*tls.Config, error) {
+ b, err := os.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+
+ return CreateTLSConfig(b)
+}
+
+// CreateTLSConfig creates a tls.Config from a raw certificate bytes
+// to verify a server certificate.
+func CreateTLSConfig(certBytes []byte) (*tls.Config, error) {
+ cp := x509.NewCertPool()
+ if ok := cp.AppendCertsFromPEM(certBytes); !ok {
+ return nil, errors.New("failed to append certificate to the cert pool")
+ }
+
+ return &tls.Config{
+ RootCAs: cp,
+ }, nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go
new file mode 100644
index 000000000..50e25fdbc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go
@@ -0,0 +1,56 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/partialsuccess.go
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal"
+
+import "fmt"
+
+// PartialSuccess represents the underlying error for all handling
+// OTLP partial success messages. Use `errors.Is(err,
+// PartialSuccess{})` to test whether an error passed to the OTel
+// error handler belongs to this category.
+type PartialSuccess struct {
+ ErrorMessage string
+ RejectedItems int64
+ RejectedKind string
+}
+
+var _ error = PartialSuccess{}
+
+// Error implements the error interface.
+func (ps PartialSuccess) Error() string {
+ msg := ps.ErrorMessage
+ if msg == "" {
+ msg = "empty message"
+ }
+ return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind)
+}
+
+// Is supports the errors.Is() interface.
+func (ps PartialSuccess) Is(err error) bool {
+ _, ok := err.(PartialSuccess)
+ return ok
+}
+
+// TracePartialSuccessError returns an error describing a partial success
+// response for the trace signal.
+func TracePartialSuccessError(itemsRejected int64, errorMessage string) error {
+ return PartialSuccess{
+ ErrorMessage: errorMessage,
+ RejectedItems: itemsRejected,
+ RejectedKind: "spans",
+ }
+}
+
+// MetricPartialSuccessError returns an error describing a partial success
+// response for the metric signal.
+func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error {
+ return PartialSuccess{
+ ErrorMessage: errorMessage,
+ RejectedItems: itemsRejected,
+ RejectedKind: "metric data points",
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go
new file mode 100644
index 000000000..cc3a77055
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go
@@ -0,0 +1,145 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/retry/retry.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package retry provides request retry functionality that can perform
+// configurable exponential backoff for transient errors and honor any
+// explicit throttle responses received.
+package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/cenkalti/backoff/v4"
+)
+
+// DefaultConfig are the recommended defaults to use.
+var DefaultConfig = Config{
+ Enabled: true,
+ InitialInterval: 5 * time.Second,
+ MaxInterval: 30 * time.Second,
+ MaxElapsedTime: time.Minute,
+}
+
+// Config defines configuration for retrying batches in case of export failure
+// using an exponential backoff.
+type Config struct {
+ // Enabled indicates whether to not retry sending batches in case of
+ // export failure.
+ Enabled bool
+ // InitialInterval the time to wait after the first failure before
+ // retrying.
+ InitialInterval time.Duration
+ // MaxInterval is the upper bound on backoff interval. Once this value is
+ // reached the delay between consecutive retries will always be
+ // `MaxInterval`.
+ MaxInterval time.Duration
+ // MaxElapsedTime is the maximum amount of time (including retries) spent
+ // trying to send a request/batch. Once this value is reached, the data
+ // is discarded.
+ MaxElapsedTime time.Duration
+}
+
+// RequestFunc wraps a request with retry logic.
+type RequestFunc func(context.Context, func(context.Context) error) error
+
+// EvaluateFunc returns if an error is retry-able and if an explicit throttle
+// duration should be honored that was included in the error.
+//
+// The function must return true if the error argument is retry-able,
+// otherwise it must return false for the first return parameter.
+//
+// The function must return a non-zero time.Duration if the error contains
+// explicit throttle duration that should be honored, otherwise it must return
+// a zero valued time.Duration.
+type EvaluateFunc func(error) (bool, time.Duration)
+
+// RequestFunc returns a RequestFunc using the evaluate function to determine
+// if requests can be retried and based on the exponential backoff
+// configuration of c.
+func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
+ if !c.Enabled {
+ return func(ctx context.Context, fn func(context.Context) error) error {
+ return fn(ctx)
+ }
+ }
+
+ return func(ctx context.Context, fn func(context.Context) error) error {
+ // Do not use NewExponentialBackOff since it calls Reset and the code here
+ // must call Reset after changing the InitialInterval (this saves an
+ // unnecessary call to Now).
+ b := &backoff.ExponentialBackOff{
+ InitialInterval: c.InitialInterval,
+ RandomizationFactor: backoff.DefaultRandomizationFactor,
+ Multiplier: backoff.DefaultMultiplier,
+ MaxInterval: c.MaxInterval,
+ MaxElapsedTime: c.MaxElapsedTime,
+ Stop: backoff.Stop,
+ Clock: backoff.SystemClock,
+ }
+ b.Reset()
+
+ for {
+ err := fn(ctx)
+ if err == nil {
+ return nil
+ }
+
+ retryable, throttle := evaluate(err)
+ if !retryable {
+ return err
+ }
+
+ bOff := b.NextBackOff()
+ if bOff == backoff.Stop {
+ return fmt.Errorf("max retry time elapsed: %w", err)
+ }
+
+ // Wait for the greater of the backoff or throttle delay.
+ var delay time.Duration
+ if bOff > throttle {
+ delay = bOff
+ } else {
+ elapsed := b.GetElapsedTime()
+ if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
+ return fmt.Errorf("max retry time would elapse: %w", err)
+ }
+ delay = throttle
+ }
+
+ if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
+ return fmt.Errorf("%w: %w", ctxErr, err)
+ }
+ }
+ }
+}
+
+// Allow override for testing.
+var waitFunc = wait
+
+// wait takes the caller's context, and the amount of time to wait. It will
+// return nil if the timer fires before or at the same time as the context's
+// deadline. This indicates that the call can be retried.
+func wait(ctx context.Context, delay time.Duration) error {
+ timer := time.NewTimer(delay)
+ defer timer.Stop()
+
+ select {
+ case <-ctx.Done():
+ // Handle the case where the timer and context deadline end
+ // simultaneously by prioritizing the timer expiration nil value
+ // response.
+ select {
+ case <-timer.C:
+ default:
+ return ctx.Err()
+ }
+ case <-timer.C:
+ }
+
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go
new file mode 100644
index 000000000..2605c74d0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go
@@ -0,0 +1,144 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform"
+
+import (
+ "go.opentelemetry.io/otel/attribute"
+ cpb "go.opentelemetry.io/proto/otlp/common/v1"
+)
+
+// AttrIter transforms an attribute iterator into OTLP key-values.
+func AttrIter(iter attribute.Iterator) []*cpb.KeyValue {
+ l := iter.Len()
+ if l == 0 {
+ return nil
+ }
+
+ out := make([]*cpb.KeyValue, 0, l)
+ for iter.Next() {
+ out = append(out, KeyValue(iter.Attribute()))
+ }
+ return out
+}
+
+// KeyValues transforms a slice of attribute KeyValues into OTLP key-values.
+func KeyValues(attrs []attribute.KeyValue) []*cpb.KeyValue {
+ if len(attrs) == 0 {
+ return nil
+ }
+
+ out := make([]*cpb.KeyValue, 0, len(attrs))
+ for _, kv := range attrs {
+ out = append(out, KeyValue(kv))
+ }
+ return out
+}
+
+// KeyValue transforms an attribute KeyValue into an OTLP key-value.
+func KeyValue(kv attribute.KeyValue) *cpb.KeyValue {
+ return &cpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)}
+}
+
+// Value transforms an attribute Value into an OTLP AnyValue.
+func Value(v attribute.Value) *cpb.AnyValue {
+ av := new(cpb.AnyValue)
+ switch v.Type() {
+ case attribute.BOOL:
+ av.Value = &cpb.AnyValue_BoolValue{
+ BoolValue: v.AsBool(),
+ }
+ case attribute.BOOLSLICE:
+ av.Value = &cpb.AnyValue_ArrayValue{
+ ArrayValue: &cpb.ArrayValue{
+ Values: boolSliceValues(v.AsBoolSlice()),
+ },
+ }
+ case attribute.INT64:
+ av.Value = &cpb.AnyValue_IntValue{
+ IntValue: v.AsInt64(),
+ }
+ case attribute.INT64SLICE:
+ av.Value = &cpb.AnyValue_ArrayValue{
+ ArrayValue: &cpb.ArrayValue{
+ Values: int64SliceValues(v.AsInt64Slice()),
+ },
+ }
+ case attribute.FLOAT64:
+ av.Value = &cpb.AnyValue_DoubleValue{
+ DoubleValue: v.AsFloat64(),
+ }
+ case attribute.FLOAT64SLICE:
+ av.Value = &cpb.AnyValue_ArrayValue{
+ ArrayValue: &cpb.ArrayValue{
+ Values: float64SliceValues(v.AsFloat64Slice()),
+ },
+ }
+ case attribute.STRING:
+ av.Value = &cpb.AnyValue_StringValue{
+ StringValue: v.AsString(),
+ }
+ case attribute.STRINGSLICE:
+ av.Value = &cpb.AnyValue_ArrayValue{
+ ArrayValue: &cpb.ArrayValue{
+ Values: stringSliceValues(v.AsStringSlice()),
+ },
+ }
+ default:
+ av.Value = &cpb.AnyValue_StringValue{
+ StringValue: "INVALID",
+ }
+ }
+ return av
+}
+
+func boolSliceValues(vals []bool) []*cpb.AnyValue {
+ converted := make([]*cpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &cpb.AnyValue{
+ Value: &cpb.AnyValue_BoolValue{
+ BoolValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+func int64SliceValues(vals []int64) []*cpb.AnyValue {
+ converted := make([]*cpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &cpb.AnyValue{
+ Value: &cpb.AnyValue_IntValue{
+ IntValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+func float64SliceValues(vals []float64) []*cpb.AnyValue {
+ converted := make([]*cpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &cpb.AnyValue{
+ Value: &cpb.AnyValue_DoubleValue{
+ DoubleValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+func stringSliceValues(vals []string) []*cpb.AnyValue {
+ converted := make([]*cpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &cpb.AnyValue{
+ Value: &cpb.AnyValue_StringValue{
+ StringValue: v,
+ },
+ }
+ }
+ return converted
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error.go
new file mode 100644
index 000000000..d31652b4d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error.go
@@ -0,0 +1,103 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/transform/error.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform"
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
+)
+
+var (
+ errUnknownAggregation = errors.New("unknown aggregation")
+ errUnknownTemporality = errors.New("unknown temporality")
+)
+
+type errMetric struct {
+ m *mpb.Metric
+ err error
+}
+
+func (e errMetric) Unwrap() error {
+ return e.err
+}
+
+func (e errMetric) Error() string {
+ format := "invalid metric (name: %q, description: %q, unit: %q): %s"
+ return fmt.Sprintf(format, e.m.Name, e.m.Description, e.m.Unit, e.err)
+}
+
+func (e errMetric) Is(target error) bool {
+ return errors.Is(e.err, target)
+}
+
+// multiErr is used by the data-type transform functions to wrap multiple
+// errors into a single return value. The error message will show all errors
+// as a list and scope them by the datatype name that is returning them.
+type multiErr struct {
+ datatype string
+ errs []error
+}
+
+// errOrNil returns nil if e contains no errors, otherwise it returns e.
+func (e *multiErr) errOrNil() error {
+ if len(e.errs) == 0 {
+ return nil
+ }
+ return e
+}
+
+// append adds err to e. If err is a multiErr, its errs are flattened into e.
+func (e *multiErr) append(err error) {
+ // Do not use errors.As here, this should only be flattened one layer. If
+ // there is a *multiErr several steps down the chain, all the errors above
+ // it will be discarded if errors.As is used instead.
+ switch other := err.(type) { //nolint:errorlint
+ case *multiErr:
+ // Flatten err errors into e.
+ e.errs = append(e.errs, other.errs...)
+ default:
+ e.errs = append(e.errs, err)
+ }
+}
+
+func (e *multiErr) Error() string {
+ es := make([]string, len(e.errs))
+ for i, err := range e.errs {
+ es[i] = fmt.Sprintf("* %s", err)
+ }
+
+ format := "%d errors occurred transforming %s:\n\t%s"
+ return fmt.Sprintf(format, len(es), e.datatype, strings.Join(es, "\n\t"))
+}
+
+func (e *multiErr) Unwrap() error {
+ switch len(e.errs) {
+ case 0:
+ return nil
+ case 1:
+ return e.errs[0]
+ }
+
+ // Return a multiErr without the leading error.
+ cp := &multiErr{
+ datatype: e.datatype,
+ errs: make([]error, len(e.errs)-1),
+ }
+ copy(cp.errs, e.errs[1:])
+ return cp
+}
+
+func (e *multiErr) Is(target error) bool {
+ if len(e.errs) == 0 {
+ return false
+ }
+ // Check if the first error is target.
+ return errors.Is(e.errs[0], target)
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go
new file mode 100644
index 000000000..abf7f0219
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go
@@ -0,0 +1,350 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package transform provides transformation functionality from the
+// sdk/metric/metricdata data-types into OTLP data-types.
+package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform"
+
+import (
+ "fmt"
+ "time"
+
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+ cpb "go.opentelemetry.io/proto/otlp/common/v1"
+ mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
+ rpb "go.opentelemetry.io/proto/otlp/resource/v1"
+)
+
+// ResourceMetrics returns an OTLP ResourceMetrics generated from rm. If rm
+// contains invalid ScopeMetrics, an error will be returned along with an OTLP
+// ResourceMetrics that contains partial OTLP ScopeMetrics.
+func ResourceMetrics(rm *metricdata.ResourceMetrics) (*mpb.ResourceMetrics, error) {
+ sms, err := ScopeMetrics(rm.ScopeMetrics)
+ return &mpb.ResourceMetrics{
+ Resource: &rpb.Resource{
+ Attributes: AttrIter(rm.Resource.Iter()),
+ },
+ ScopeMetrics: sms,
+ SchemaUrl: rm.Resource.SchemaURL(),
+ }, err
+}
+
+// ScopeMetrics returns a slice of OTLP ScopeMetrics generated from sms. If
+// sms contains invalid metric values, an error will be returned along with a
+// slice that contains partial OTLP ScopeMetrics.
+func ScopeMetrics(sms []metricdata.ScopeMetrics) ([]*mpb.ScopeMetrics, error) {
+ errs := &multiErr{datatype: "ScopeMetrics"}
+ out := make([]*mpb.ScopeMetrics, 0, len(sms))
+ for _, sm := range sms {
+ ms, err := Metrics(sm.Metrics)
+ if err != nil {
+ errs.append(err)
+ }
+
+ out = append(out, &mpb.ScopeMetrics{
+ Scope: &cpb.InstrumentationScope{
+ Name: sm.Scope.Name,
+ Version: sm.Scope.Version,
+ Attributes: AttrIter(sm.Scope.Attributes.Iter()),
+ },
+ Metrics: ms,
+ SchemaUrl: sm.Scope.SchemaURL,
+ })
+ }
+ return out, errs.errOrNil()
+}
+
+// Metrics returns a slice of OTLP Metric generated from ms. If ms contains
+// invalid metric values, an error will be returned along with a slice that
+// contains partial OTLP Metrics.
+func Metrics(ms []metricdata.Metrics) ([]*mpb.Metric, error) {
+ errs := &multiErr{datatype: "Metrics"}
+ out := make([]*mpb.Metric, 0, len(ms))
+ for _, m := range ms {
+ o, err := metric(m)
+ if err != nil {
+ // Do not include invalid data. Drop the metric, report the error.
+ errs.append(errMetric{m: o, err: err})
+ continue
+ }
+ out = append(out, o)
+ }
+ return out, errs.errOrNil()
+}
+
+func metric(m metricdata.Metrics) (*mpb.Metric, error) {
+ var err error
+ out := &mpb.Metric{
+ Name: m.Name,
+ Description: m.Description,
+ Unit: m.Unit,
+ }
+ switch a := m.Data.(type) {
+ case metricdata.Gauge[int64]:
+ out.Data = Gauge(a)
+ case metricdata.Gauge[float64]:
+ out.Data = Gauge(a)
+ case metricdata.Sum[int64]:
+ out.Data, err = Sum(a)
+ case metricdata.Sum[float64]:
+ out.Data, err = Sum(a)
+ case metricdata.Histogram[int64]:
+ out.Data, err = Histogram(a)
+ case metricdata.Histogram[float64]:
+ out.Data, err = Histogram(a)
+ case metricdata.ExponentialHistogram[int64]:
+ out.Data, err = ExponentialHistogram(a)
+ case metricdata.ExponentialHistogram[float64]:
+ out.Data, err = ExponentialHistogram(a)
+ case metricdata.Summary:
+ out.Data = Summary(a)
+ default:
+ return out, fmt.Errorf("%w: %T", errUnknownAggregation, a)
+ }
+ return out, err
+}
+
+// Gauge returns an OTLP Metric_Gauge generated from g.
+func Gauge[N int64 | float64](g metricdata.Gauge[N]) *mpb.Metric_Gauge {
+ return &mpb.Metric_Gauge{
+ Gauge: &mpb.Gauge{
+ DataPoints: DataPoints(g.DataPoints),
+ },
+ }
+}
+
+// Sum returns an OTLP Metric_Sum generated from s. An error is returned
+// if the temporality of s is unknown.
+func Sum[N int64 | float64](s metricdata.Sum[N]) (*mpb.Metric_Sum, error) {
+ t, err := Temporality(s.Temporality)
+ if err != nil {
+ return nil, err
+ }
+ return &mpb.Metric_Sum{
+ Sum: &mpb.Sum{
+ AggregationTemporality: t,
+ IsMonotonic: s.IsMonotonic,
+ DataPoints: DataPoints(s.DataPoints),
+ },
+ }, nil
+}
+
+// DataPoints returns a slice of OTLP NumberDataPoint generated from dPts.
+func DataPoints[N int64 | float64](dPts []metricdata.DataPoint[N]) []*mpb.NumberDataPoint {
+ out := make([]*mpb.NumberDataPoint, 0, len(dPts))
+ for _, dPt := range dPts {
+ ndp := &mpb.NumberDataPoint{
+ Attributes: AttrIter(dPt.Attributes.Iter()),
+ StartTimeUnixNano: timeUnixNano(dPt.StartTime),
+ TimeUnixNano: timeUnixNano(dPt.Time),
+ Exemplars: Exemplars(dPt.Exemplars),
+ }
+ switch v := any(dPt.Value).(type) {
+ case int64:
+ ndp.Value = &mpb.NumberDataPoint_AsInt{
+ AsInt: v,
+ }
+ case float64:
+ ndp.Value = &mpb.NumberDataPoint_AsDouble{
+ AsDouble: v,
+ }
+ }
+ out = append(out, ndp)
+ }
+ return out
+}
+
+// Histogram returns an OTLP Metric_Histogram generated from h. An error is
+// returned if the temporality of h is unknown.
+func Histogram[N int64 | float64](h metricdata.Histogram[N]) (*mpb.Metric_Histogram, error) {
+ t, err := Temporality(h.Temporality)
+ if err != nil {
+ return nil, err
+ }
+ return &mpb.Metric_Histogram{
+ Histogram: &mpb.Histogram{
+ AggregationTemporality: t,
+ DataPoints: HistogramDataPoints(h.DataPoints),
+ },
+ }, nil
+}
+
+// HistogramDataPoints returns a slice of OTLP HistogramDataPoint generated
+// from dPts.
+func HistogramDataPoints[N int64 | float64](dPts []metricdata.HistogramDataPoint[N]) []*mpb.HistogramDataPoint {
+ out := make([]*mpb.HistogramDataPoint, 0, len(dPts))
+ for _, dPt := range dPts {
+ sum := float64(dPt.Sum)
+ hdp := &mpb.HistogramDataPoint{
+ Attributes: AttrIter(dPt.Attributes.Iter()),
+ StartTimeUnixNano: timeUnixNano(dPt.StartTime),
+ TimeUnixNano: timeUnixNano(dPt.Time),
+ Count: dPt.Count,
+ Sum: &sum,
+ BucketCounts: dPt.BucketCounts,
+ ExplicitBounds: dPt.Bounds,
+ Exemplars: Exemplars(dPt.Exemplars),
+ }
+ if v, ok := dPt.Min.Value(); ok {
+ vF64 := float64(v)
+ hdp.Min = &vF64
+ }
+ if v, ok := dPt.Max.Value(); ok {
+ vF64 := float64(v)
+ hdp.Max = &vF64
+ }
+ out = append(out, hdp)
+ }
+ return out
+}
+
+// ExponentialHistogram returns an OTLP Metric_ExponentialHistogram generated from h. An error is
+// returned if the temporality of h is unknown.
+func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N]) (*mpb.Metric_ExponentialHistogram, error) {
+ t, err := Temporality(h.Temporality)
+ if err != nil {
+ return nil, err
+ }
+ return &mpb.Metric_ExponentialHistogram{
+ ExponentialHistogram: &mpb.ExponentialHistogram{
+ AggregationTemporality: t,
+ DataPoints: ExponentialHistogramDataPoints(h.DataPoints),
+ },
+ }, nil
+}
+
+// ExponentialHistogramDataPoints returns a slice of OTLP ExponentialHistogramDataPoint generated
+// from dPts.
+func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.ExponentialHistogramDataPoint[N]) []*mpb.ExponentialHistogramDataPoint {
+ out := make([]*mpb.ExponentialHistogramDataPoint, 0, len(dPts))
+ for _, dPt := range dPts {
+ sum := float64(dPt.Sum)
+ ehdp := &mpb.ExponentialHistogramDataPoint{
+ Attributes: AttrIter(dPt.Attributes.Iter()),
+ StartTimeUnixNano: timeUnixNano(dPt.StartTime),
+ TimeUnixNano: timeUnixNano(dPt.Time),
+ Count: dPt.Count,
+ Sum: &sum,
+ Scale: dPt.Scale,
+ ZeroCount: dPt.ZeroCount,
+ Exemplars: Exemplars(dPt.Exemplars),
+
+ Positive: ExponentialHistogramDataPointBuckets(dPt.PositiveBucket),
+ Negative: ExponentialHistogramDataPointBuckets(dPt.NegativeBucket),
+ }
+ if v, ok := dPt.Min.Value(); ok {
+ vF64 := float64(v)
+ ehdp.Min = &vF64
+ }
+ if v, ok := dPt.Max.Value(); ok {
+ vF64 := float64(v)
+ ehdp.Max = &vF64
+ }
+ out = append(out, ehdp)
+ }
+ return out
+}
+
+// ExponentialHistogramDataPointBuckets returns an OTLP ExponentialHistogramDataPoint_Buckets generated
+// from bucket.
+func ExponentialHistogramDataPointBuckets(bucket metricdata.ExponentialBucket) *mpb.ExponentialHistogramDataPoint_Buckets {
+ return &mpb.ExponentialHistogramDataPoint_Buckets{
+ Offset: bucket.Offset,
+ BucketCounts: bucket.Counts,
+ }
+}
+
+// Temporality returns an OTLP AggregationTemporality generated from t. If t
+// is unknown, an error is returned along with the invalid
+// AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED.
+func Temporality(t metricdata.Temporality) (mpb.AggregationTemporality, error) {
+ switch t {
+ case metricdata.DeltaTemporality:
+ return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, nil
+ case metricdata.CumulativeTemporality:
+ return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, nil
+ default:
+ err := fmt.Errorf("%w: %s", errUnknownTemporality, t)
+ return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED, err
+ }
+}
+
+// timeUnixNano returns t as a Unix time, the number of nanoseconds elapsed
+// since January 1, 1970 UTC as uint64.
+// The result is undefined if the Unix time
+// in nanoseconds cannot be represented by an int64
+// (a date before the year 1678 or after 2262).
+// timeUnixNano on the zero Time returns 0.
+// The result does not depend on the location associated with t.
+func timeUnixNano(t time.Time) uint64 {
+ return uint64(max(0, t.UnixNano())) // nolint:gosec // Overflow checked.
+}
+
+// Exemplars returns a slice of OTLP Exemplars generated from exemplars.
+func Exemplars[N int64 | float64](exemplars []metricdata.Exemplar[N]) []*mpb.Exemplar {
+ out := make([]*mpb.Exemplar, 0, len(exemplars))
+ for _, exemplar := range exemplars {
+ e := &mpb.Exemplar{
+ FilteredAttributes: KeyValues(exemplar.FilteredAttributes),
+ TimeUnixNano: timeUnixNano(exemplar.Time),
+ SpanId: exemplar.SpanID,
+ TraceId: exemplar.TraceID,
+ }
+ switch v := any(exemplar.Value).(type) {
+ case int64:
+ e.Value = &mpb.Exemplar_AsInt{
+ AsInt: v,
+ }
+ case float64:
+ e.Value = &mpb.Exemplar_AsDouble{
+ AsDouble: v,
+ }
+ }
+ out = append(out, e)
+ }
+ return out
+}
+
+// Summary returns an OTLP Metric_Summary generated from s.
+func Summary(s metricdata.Summary) *mpb.Metric_Summary {
+ return &mpb.Metric_Summary{
+ Summary: &mpb.Summary{
+ DataPoints: SummaryDataPoints(s.DataPoints),
+ },
+ }
+}
+
+// SummaryDataPoints returns a slice of OTLP SummaryDataPoint generated from
+// dPts.
+func SummaryDataPoints(dPts []metricdata.SummaryDataPoint) []*mpb.SummaryDataPoint {
+ out := make([]*mpb.SummaryDataPoint, 0, len(dPts))
+ for _, dPt := range dPts {
+ sdp := &mpb.SummaryDataPoint{
+ Attributes: AttrIter(dPt.Attributes.Iter()),
+ StartTimeUnixNano: timeUnixNano(dPt.StartTime),
+ TimeUnixNano: timeUnixNano(dPt.Time),
+ Count: dPt.Count,
+ Sum: dPt.Sum,
+ QuantileValues: QuantileValues(dPt.QuantileValues),
+ }
+ out = append(out, sdp)
+ }
+ return out
+}
+
+// QuantileValues returns a slice of OTLP SummaryDataPoint_ValueAtQuantile
+// generated from quantiles.
+func QuantileValues(quantiles []metricdata.QuantileValue) []*mpb.SummaryDataPoint_ValueAtQuantile {
+ out := make([]*mpb.SummaryDataPoint_ValueAtQuantile, 0, len(quantiles))
+ for _, q := range quantiles {
+ quantile := &mpb.SummaryDataPoint_ValueAtQuantile{
+ Quantile: q.Quantile,
+ Value: q.Value,
+ }
+ out = append(out, quantile)
+ }
+ return out
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go
new file mode 100644
index 000000000..0b5dec3ac
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
+
+// Version is the current release version of the OpenTelemetry OTLP over gRPC metrics exporter in use.
+func Version() string {
+ return "1.35.0"
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/README.md
new file mode 100644
index 000000000..b02cdcbbe
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/README.md
@@ -0,0 +1,3 @@
+# OTLP Metric HTTP Exporter
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go
new file mode 100644
index 000000000..86da30e37
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go
@@ -0,0 +1,346 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
+
+import (
+ "bytes"
+ "compress/gzip"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "google.golang.org/protobuf/proto"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal"
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf"
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry"
+ colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
+ metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
+)
+
+type client struct {
+ // req is cloned for every upload the client makes.
+ req *http.Request
+ compression Compression
+ requestFunc retry.RequestFunc
+ httpClient *http.Client
+}
+
+// Keep it in sync with golang's DefaultTransport from net/http! We
+// have our own copy to avoid handling a situation where the
+// DefaultTransport is overwritten with some different implementation
+// of http.RoundTripper or it's modified by another package.
+var ourTransport = &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).DialContext,
+ ForceAttemptHTTP2: true,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+}
+
+// newClient creates a new HTTP metric client.
+func newClient(cfg oconf.Config) (*client, error) {
+ httpClient := &http.Client{
+ Transport: ourTransport,
+ Timeout: cfg.Metrics.Timeout,
+ }
+
+ if cfg.Metrics.TLSCfg != nil || cfg.Metrics.Proxy != nil {
+ clonedTransport := ourTransport.Clone()
+ httpClient.Transport = clonedTransport
+
+ if cfg.Metrics.TLSCfg != nil {
+ clonedTransport.TLSClientConfig = cfg.Metrics.TLSCfg
+ }
+ if cfg.Metrics.Proxy != nil {
+ clonedTransport.Proxy = cfg.Metrics.Proxy
+ }
+ }
+
+ u := &url.URL{
+ Scheme: "https",
+ Host: cfg.Metrics.Endpoint,
+ Path: cfg.Metrics.URLPath,
+ }
+ if cfg.Metrics.Insecure {
+ u.Scheme = "http"
+ }
+ // Body is set when this is cloned during upload.
+ req, err := http.NewRequest(http.MethodPost, u.String(), http.NoBody)
+ if err != nil {
+ return nil, err
+ }
+
+ userAgent := "OTel Go OTLP over HTTP/protobuf metrics exporter/" + Version()
+ req.Header.Set("User-Agent", userAgent)
+
+ if n := len(cfg.Metrics.Headers); n > 0 {
+ for k, v := range cfg.Metrics.Headers {
+ req.Header.Set(k, v)
+ }
+ }
+ req.Header.Set("Content-Type", "application/x-protobuf")
+
+ return &client{
+ compression: Compression(cfg.Metrics.Compression),
+ req: req,
+ requestFunc: cfg.RetryConfig.RequestFunc(evaluate),
+ httpClient: httpClient,
+ }, nil
+}
+
+// Shutdown shuts down the client, freeing all resources.
+func (c *client) Shutdown(ctx context.Context) error {
+ // The otlpmetric.Exporter synchronizes access to client methods and
+ // ensures this is called only once. The only thing that needs to be done
+ // here is to release any computational resources the client holds.
+
+ c.requestFunc = nil
+ c.httpClient = nil
+ return ctx.Err()
+}
+
+// UploadMetrics sends protoMetrics to the connected endpoint.
+//
+// Retryable errors from the server will be handled according to any
+// RetryConfig the client was created with.
+func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) error {
+ // The otlpmetric.Exporter synchronizes access to client methods, and
+ // ensures this is not called after the Exporter is shutdown. Only thing
+ // to do here is send data.
+
+ pbRequest := &colmetricpb.ExportMetricsServiceRequest{
+ ResourceMetrics: []*metricpb.ResourceMetrics{protoMetrics},
+ }
+ body, err := proto.Marshal(pbRequest)
+ if err != nil {
+ return err
+ }
+ request, err := c.newRequest(ctx, body)
+ if err != nil {
+ return err
+ }
+
+ return c.requestFunc(ctx, func(iCtx context.Context) error {
+ select {
+ case <-iCtx.Done():
+ return iCtx.Err()
+ default:
+ }
+
+ request.reset(iCtx)
+ resp, err := c.httpClient.Do(request.Request)
+ var urlErr *url.Error
+ if errors.As(err, &urlErr) && urlErr.Temporary() {
+ return newResponseError(http.Header{}, err)
+ }
+ if err != nil {
+ return err
+ }
+ if resp != nil && resp.Body != nil {
+ defer func() {
+ if err := resp.Body.Close(); err != nil {
+ otel.Handle(err)
+ }
+ }()
+ }
+
+ if sc := resp.StatusCode; sc >= 200 && sc <= 299 {
+ // Success, do not retry.
+
+ // Read the partial success message, if any.
+ var respData bytes.Buffer
+ if _, err := io.Copy(&respData, resp.Body); err != nil {
+ return err
+ }
+ if respData.Len() == 0 {
+ return nil
+ }
+
+ if resp.Header.Get("Content-Type") == "application/x-protobuf" {
+ var respProto colmetricpb.ExportMetricsServiceResponse
+ if err := proto.Unmarshal(respData.Bytes(), &respProto); err != nil {
+ return err
+ }
+
+ if respProto.PartialSuccess != nil {
+ msg := respProto.PartialSuccess.GetErrorMessage()
+ n := respProto.PartialSuccess.GetRejectedDataPoints()
+ if n != 0 || msg != "" {
+ err := internal.MetricPartialSuccessError(n, msg)
+ otel.Handle(err)
+ }
+ }
+ }
+ return nil
+ }
+ // Error cases.
+
+ // server may return a message with the response
+ // body, so we read it to include in the error
+ // message to be returned. It will help in
+ // debugging the actual issue.
+ var respData bytes.Buffer
+ if _, err := io.Copy(&respData, resp.Body); err != nil {
+ return err
+ }
+ respStr := strings.TrimSpace(respData.String())
+ if len(respStr) == 0 {
+ respStr = "(empty)"
+ }
+ bodyErr := fmt.Errorf("body: %s", respStr)
+
+ switch resp.StatusCode {
+ case http.StatusTooManyRequests,
+ http.StatusBadGateway,
+ http.StatusServiceUnavailable,
+ http.StatusGatewayTimeout:
+ // Retryable failure.
+ return newResponseError(resp.Header, bodyErr)
+ default:
+ // Non-retryable failure.
+ return fmt.Errorf("failed to send metrics to %s: %s (%w)", request.URL, resp.Status, bodyErr)
+ }
+ })
+}
+
+var gzPool = sync.Pool{
+ New: func() interface{} {
+ w := gzip.NewWriter(io.Discard)
+ return w
+ },
+}
+
+func (c *client) newRequest(ctx context.Context, body []byte) (request, error) {
+ r := c.req.Clone(ctx)
+ req := request{Request: r}
+
+ switch c.compression {
+ case NoCompression:
+ r.ContentLength = (int64)(len(body))
+ req.bodyReader = bodyReader(body)
+ case GzipCompression:
+ // Ensure the content length is not used.
+ r.ContentLength = -1
+ r.Header.Set("Content-Encoding", "gzip")
+
+ gz := gzPool.Get().(*gzip.Writer)
+ defer gzPool.Put(gz)
+
+ var b bytes.Buffer
+ gz.Reset(&b)
+
+ if _, err := gz.Write(body); err != nil {
+ return req, err
+ }
+ // Close needs to be called to ensure body is fully written.
+ if err := gz.Close(); err != nil {
+ return req, err
+ }
+
+ req.bodyReader = bodyReader(b.Bytes())
+ }
+
+ return req, nil
+}
+
+// bodyReader returns a closure returning a new reader for buf.
+func bodyReader(buf []byte) func() io.ReadCloser {
+ return func() io.ReadCloser {
+ return io.NopCloser(bytes.NewReader(buf))
+ }
+}
+
+// request wraps an http.Request with a resettable body reader.
+type request struct {
+ *http.Request
+
+ // bodyReader allows the same body to be used for multiple requests.
+ bodyReader func() io.ReadCloser
+}
+
+// reset reinitializes the request Body and uses ctx for the request.
+func (r *request) reset(ctx context.Context) {
+ r.Body = r.bodyReader()
+ r.Request = r.Request.WithContext(ctx)
+}
+
+// retryableError represents a request failure that can be retried.
+type retryableError struct {
+ throttle int64
+ err error
+}
+
+// newResponseError returns a retryableError and will extract any explicit
+// throttle delay contained in headers. The returned error wraps wrapped
+// if it is not nil.
+func newResponseError(header http.Header, wrapped error) error {
+ var rErr retryableError
+ if v := header.Get("Retry-After"); v != "" {
+ if t, err := strconv.ParseInt(v, 10, 64); err == nil {
+ rErr.throttle = t
+ }
+ }
+
+ rErr.err = wrapped
+ return rErr
+}
+
+func (e retryableError) Error() string {
+ if e.err != nil {
+ return "retry-able request failure: " + e.err.Error()
+ }
+
+ return "retry-able request failure"
+}
+
+func (e retryableError) Unwrap() error {
+ return e.err
+}
+
+func (e retryableError) As(target interface{}) bool {
+ if e.err == nil {
+ return false
+ }
+
+ switch v := target.(type) {
+ case **retryableError:
+ *v = &e
+ return true
+ default:
+ return false
+ }
+}
+
+// evaluate returns if err is retry-able. If it is and it includes an explicit
+// throttling delay, that delay is also returned.
+func evaluate(err error) (bool, time.Duration) {
+ if err == nil {
+ return false, 0
+ }
+
+ // Do not use errors.As here, this should only be flattened one layer. If
+ // there are several chained errors, all the errors above it will be
+ // discarded if errors.As is used instead.
+ rErr, ok := err.(retryableError) //nolint:errorlint
+ if !ok {
+ return false, 0
+ }
+
+ return true, time.Duration(rErr.throttle)
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/config.go
new file mode 100644
index 000000000..bf05adcf1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/config.go
@@ -0,0 +1,224 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
+
+import (
+ "crypto/tls"
+ "net/http"
+ "net/url"
+ "time"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf"
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry"
+ "go.opentelemetry.io/otel/sdk/metric"
+)
+
+// Compression describes the compression used for payloads sent to the
+// collector.
+type Compression oconf.Compression
+
+// HTTPTransportProxyFunc is a function that resolves which URL to use as proxy for a given request.
+// This type is compatible with http.Transport.Proxy and can be used to set a custom proxy function
+// to the OTLP HTTP client.
+type HTTPTransportProxyFunc func(*http.Request) (*url.URL, error)
+
+const (
+ // NoCompression tells the driver to send payloads without
+ // compression.
+ NoCompression = Compression(oconf.NoCompression)
+ // GzipCompression tells the driver to send payloads after
+ // compressing them with gzip.
+ GzipCompression = Compression(oconf.GzipCompression)
+)
+
+// Option applies an option to the Exporter.
+type Option interface {
+ applyHTTPOption(oconf.Config) oconf.Config
+}
+
+func asHTTPOptions(opts []Option) []oconf.HTTPOption {
+ converted := make([]oconf.HTTPOption, len(opts))
+ for i, o := range opts {
+ converted[i] = oconf.NewHTTPOption(o.applyHTTPOption)
+ }
+ return converted
+}
+
+// RetryConfig defines configuration for retrying the export of metric data
+// that failed.
+type RetryConfig retry.Config
+
+type wrappedOption struct {
+ oconf.HTTPOption
+}
+
+func (w wrappedOption) applyHTTPOption(cfg oconf.Config) oconf.Config {
+ return w.ApplyHTTPOption(cfg)
+}
+
+// WithEndpoint sets the target endpoint the Exporter will connect to. This
+// endpoint is specified as a host and optional port, no path or scheme should
+// be included (see WithInsecure and WithURLPath).
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both environment variables are set,
+// OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence. If an environment
+// variable is set, and this option is passed, this option will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "localhost:4318" will be used.
+func WithEndpoint(endpoint string) Option {
+ return wrappedOption{oconf.WithEndpoint(endpoint)}
+}
+
+// WithEndpointURL sets the target endpoint URL the Exporter will connect to.
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both environment variables are set,
+// OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence. If an environment
+// variable is set, and this option is passed, this option will take precedence.
+//
+// If both this option and WithEndpoint are used, the last used option will
+// take precedence.
+//
+// If an invalid URL is provided, the default value will be kept.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "localhost:4318" will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithEndpointURL(u string) Option {
+ return wrappedOption{oconf.WithEndpointURL(u)}
+}
+
+// WithCompression sets the compression strategy the Exporter will use to
+// compress the HTTP body.
+//
+// If the OTEL_EXPORTER_OTLP_COMPRESSION or
+// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION environment variable is set, and
+// this option is not passed, that variable value will be used. That value can
+// be either "none" or "gzip". If both are set,
+// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, no compression strategy will be used.
+func WithCompression(compression Compression) Option {
+ return wrappedOption{oconf.WithCompression(oconf.Compression(compression))}
+}
+
+// WithURLPath sets the URL path the Exporter will send requests to.
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// environment variable is set, and this option is not passed, the path
+// contained in that variable value will be used. If both are set,
+// OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "/v1/metrics" will be used.
+func WithURLPath(urlPath string) Option {
+ return wrappedOption{oconf.WithURLPath(urlPath)}
+}
+
+// WithTLSClientConfig sets the TLS configuration the Exporter will use for
+// HTTP requests.
+//
+// If the OTEL_EXPORTER_OTLP_CERTIFICATE or
+// OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE environment variable is set, and
+// this option is not passed, that variable value will be used. The value will
+// be parsed the filepath of the TLS certificate chain to use. If both are
+// set, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, the system default configuration is used.
+func WithTLSClientConfig(tlsCfg *tls.Config) Option {
+ return wrappedOption{oconf.WithTLSClientConfig(tlsCfg)}
+}
+
+// WithInsecure disables client transport security for the Exporter's HTTP
+// connection.
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used to determine client security. If the endpoint has a
+// scheme of "http" or "unix" client security will be disabled. If both are
+// set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, client security will be used.
+func WithInsecure() Option {
+ return wrappedOption{oconf.WithInsecure()}
+}
+
+// WithHeaders will send the provided headers with each HTTP requests.
+//
+// If the OTEL_EXPORTER_OTLP_HEADERS or OTEL_EXPORTER_OTLP_METRICS_HEADERS
+// environment variable is set, and this option is not passed, that variable
+// value will be used. The value will be parsed as a list of key value pairs.
+// These pairs are expected to be in the W3C Correlation-Context format
+// without additional semi-colon delimited metadata (i.e. "k1=v1,k2=v2"). If
+// both are set, OTEL_EXPORTER_OTLP_METRICS_HEADERS will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, no user headers will be set.
+func WithHeaders(headers map[string]string) Option {
+ return wrappedOption{oconf.WithHeaders(headers)}
+}
+
+// WithTimeout sets the max amount of time an Exporter will attempt an export.
+//
+// This takes precedence over any retry settings defined by WithRetry. Once
+// this time limit has been reached the export is abandoned and the metric
+// data is dropped.
+//
+// If the OTEL_EXPORTER_OTLP_TIMEOUT or OTEL_EXPORTER_OTLP_METRICS_TIMEOUT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. The value will be parsed as an integer representing the
+// timeout in milliseconds. If both are set,
+// OTEL_EXPORTER_OTLP_METRICS_TIMEOUT will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, a timeout of 10 seconds will be used.
+func WithTimeout(duration time.Duration) Option {
+ return wrappedOption{oconf.WithTimeout(duration)}
+}
+
+// WithRetry sets the retry policy for transient retryable errors that are
+// returned by the target endpoint.
+//
+// If the target endpoint responds with not only a retryable error, but
+// explicitly returns a backoff time in the response, that time will take
+// precedence over these settings.
+//
+// If unset, the default retry policy will be used. It will retry the export
+// 5 seconds after receiving a retryable error and increase exponentially
+// after each error for no more than a total time of 1 minute.
+func WithRetry(rc RetryConfig) Option {
+ return wrappedOption{oconf.WithRetry(retry.Config(rc))}
+}
+
+// WithTemporalitySelector sets the TemporalitySelector the client will use to
+// determine the Temporality of an instrument based on its kind. If this option
+// is not used, the client will use the DefaultTemporalitySelector from the
+// go.opentelemetry.io/otel/sdk/metric package.
+func WithTemporalitySelector(selector metric.TemporalitySelector) Option {
+ return wrappedOption{oconf.WithTemporalitySelector(selector)}
+}
+
+// WithAggregationSelector sets the AggregationSelector the client will use to
+// determine the aggregation to use for an instrument based on its kind. If
+// this option is not used, the reader will use the DefaultAggregationSelector
+// from the go.opentelemetry.io/otel/sdk/metric package, or the aggregation
+// explicitly passed for a view matching an instrument.
+func WithAggregationSelector(selector metric.AggregationSelector) Option {
+ return wrappedOption{oconf.WithAggregationSelector(selector)}
+}
+
+// WithProxy sets the Proxy function the client will use to determine the
+// proxy to use for an HTTP request. If this option is not used, the client
+// will use [http.ProxyFromEnvironment].
+func WithProxy(pf HTTPTransportProxyFunc) Option {
+ return wrappedOption{oconf.WithProxy(oconf.HTTPTransportProxyFunc(pf))}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go
new file mode 100644
index 000000000..de9e71a6e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go
@@ -0,0 +1,82 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package otlpmetrichttp provides an OTLP metrics exporter using HTTP with protobuf payloads.
+By default the telemetry is sent to https://localhost:4318/v1/metrics.
+
+Exporter should be created using [New] and used with a [metric.PeriodicReader].
+
+The environment variables described below can be used for configuration.
+
+OTEL_EXPORTER_OTLP_ENDPOINT (default: "https://localhost:4318") -
+target base URL ("/v1/metrics" is appended) to which the exporter sends telemetry.
+The value must contain a scheme ("http" or "https") and host.
+The value may additionally contain a port and a path.
+The value should not contain a query string or fragment.
+The configuration can be overridden by OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+environment variable and by [WithEndpoint], [WithEndpointURL], and [WithInsecure] options.
+
+OTEL_EXPORTER_OTLP_METRICS_ENDPOINT (default: "https://localhost:4318/v1/metrics") -
+target URL to which the exporter sends telemetry.
+The value must contain a scheme ("http" or "https") and host.
+The value may additionally contain a port and a path.
+The value should not contain a query string or fragment.
+The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithURLPath] options.
+
+OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_METRICS_HEADERS (default: none) -
+key-value pairs used as headers associated with HTTP requests.
+The value is expected to be represented in a format matching the [W3C Baggage HTTP Header Content Format],
+except that additional semi-colon delimited metadata is not supported.
+Example value: "key1=value1,key2=value2".
+OTEL_EXPORTER_OTLP_METRICS_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS.
+The configuration can be overridden by [WithHeaders] option.
+
+OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_METRICS_TIMEOUT (default: "10000") -
+maximum time in milliseconds the OTLP exporter waits for each batch export.
+OTEL_EXPORTER_OTLP_METRICS_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT.
+The configuration can be overridden by [WithTimeout] option.
+
+OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_METRICS_COMPRESSION (default: none) -
+compression strategy the exporter uses to compress the HTTP body.
+Supported values: "gzip".
+OTEL_EXPORTER_OTLP_METRICS_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION.
+The configuration can be overridden by [WithCompression] option.
+
+OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE (default: none) -
+filepath to the trusted certificate to use when verifying a server's TLS credentials.
+OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE.
+The configuration can be overridden by [WithTLSClientConfig] option.
+
+OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE (default: none) -
+filepath to the client certificate/chain trust for client's private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE.
+The configuration can be overridden by [WithTLSClientConfig] option.
+
+OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY (default: none) -
+filepath to the client's private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY.
+The configuration can be overridden by [WithTLSClientConfig] option.
+
+OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE (default: "cumulative") -
+aggregation temporality to use on the basis of instrument kind. Supported values:
+ - "cumulative" - Cumulative aggregation temporality for all instrument kinds,
+ - "delta" - Delta aggregation temporality for Counter, Asynchronous Counter and Histogram instrument kinds;
+ Cumulative aggregation for UpDownCounter and Asynchronous UpDownCounter instrument kinds,
+ - "lowmemory" - Delta aggregation temporality for Synchronous Counter and Histogram instrument kinds;
+ Cumulative aggregation temporality for Synchronous UpDownCounter, Asynchronous Counter, and Asynchronous UpDownCounter instrument kinds.
+
+The configuration can be overridden by [WithTemporalitySelector] option.
+
+OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION (default: "explicit_bucket_histogram") -
+default aggregation to use for histogram instruments. Supported values:
+ - "explicit_bucket_histogram" - [Explicit Bucket Histogram Aggregation],
+ - "base2_exponential_bucket_histogram" - [Base2 Exponential Bucket Histogram Aggregation].
+
+The configuration can be overridden by [WithAggregationSelector] option.
+
+[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content
+[Explicit Bucket Histogram Aggregation]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.26.0/specification/metrics/sdk.md#explicit-bucket-histogram-aggregation
+[Base2 Exponential Bucket Histogram Aggregation]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.26.0/specification/metrics/sdk.md#base2-exponential-bucket-histogram-aggregation
+*/
+package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go
new file mode 100644
index 000000000..50ac8f86e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go
@@ -0,0 +1,152 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf"
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform"
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/metric"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+ metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
+)
+
+// Exporter is a OpenTelemetry metric Exporter using protobufs over HTTP.
+type Exporter struct {
+ // Ensure synchronous access to the client across all functionality.
+ clientMu sync.Mutex
+ client interface {
+ UploadMetrics(context.Context, *metricpb.ResourceMetrics) error
+ Shutdown(context.Context) error
+ }
+
+ temporalitySelector metric.TemporalitySelector
+ aggregationSelector metric.AggregationSelector
+
+ shutdownOnce sync.Once
+}
+
+func newExporter(c *client, cfg oconf.Config) (*Exporter, error) {
+ ts := cfg.Metrics.TemporalitySelector
+ if ts == nil {
+ ts = func(metric.InstrumentKind) metricdata.Temporality {
+ return metricdata.CumulativeTemporality
+ }
+ }
+
+ as := cfg.Metrics.AggregationSelector
+ if as == nil {
+ as = metric.DefaultAggregationSelector
+ }
+
+ return &Exporter{
+ client: c,
+
+ temporalitySelector: ts,
+ aggregationSelector: as,
+ }, nil
+}
+
+// Temporality returns the Temporality to use for an instrument kind.
+func (e *Exporter) Temporality(k metric.InstrumentKind) metricdata.Temporality {
+ return e.temporalitySelector(k)
+}
+
+// Aggregation returns the Aggregation to use for an instrument kind.
+func (e *Exporter) Aggregation(k metric.InstrumentKind) metric.Aggregation {
+ return e.aggregationSelector(k)
+}
+
+// Export transforms and transmits metric data to an OTLP receiver.
+//
+// This method returns an error if called after Shutdown.
+// This method returns an error if the method is canceled by the passed context.
+func (e *Exporter) Export(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+ defer global.Debug("OTLP/HTTP exporter export", "Data", rm)
+
+ otlpRm, err := transform.ResourceMetrics(rm)
+ // Best effort upload of transformable metrics.
+ e.clientMu.Lock()
+ upErr := e.client.UploadMetrics(ctx, otlpRm)
+ e.clientMu.Unlock()
+ if upErr != nil {
+ if err == nil {
+ return fmt.Errorf("failed to upload metrics: %w", upErr)
+ }
+ // Merge the two errors.
+ return fmt.Errorf("failed to upload incomplete metrics (%w): %w", err, upErr)
+ }
+ return err
+}
+
+// ForceFlush flushes any metric data held by an exporter.
+//
+// This method returns an error if called after Shutdown.
+// This method returns an error if the method is canceled by the passed context.
+//
+// This method is safe to call concurrently.
+func (e *Exporter) ForceFlush(ctx context.Context) error {
+ // The exporter and client hold no state, nothing to flush.
+ return ctx.Err()
+}
+
+// Shutdown flushes all metric data held by an exporter and releases any held
+// computational resources.
+//
+// This method returns an error if called after Shutdown.
+// This method returns an error if the method is canceled by the passed context.
+//
+// This method is safe to call concurrently.
+func (e *Exporter) Shutdown(ctx context.Context) error {
+ err := errShutdown
+ e.shutdownOnce.Do(func() {
+ e.clientMu.Lock()
+ client := e.client
+ e.client = shutdownClient{}
+ e.clientMu.Unlock()
+ err = client.Shutdown(ctx)
+ })
+ return err
+}
+
+var errShutdown = errors.New("HTTP exporter is shutdown")
+
+type shutdownClient struct{}
+
+func (c shutdownClient) err(ctx context.Context) error {
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+ return errShutdown
+}
+
+func (c shutdownClient) UploadMetrics(ctx context.Context, _ *metricpb.ResourceMetrics) error {
+ return c.err(ctx)
+}
+
+func (c shutdownClient) Shutdown(ctx context.Context) error {
+ return c.err(ctx)
+}
+
+// MarshalLog returns logging data about the Exporter.
+func (e *Exporter) MarshalLog() interface{} {
+ return struct{ Type string }{Type: "OTLP/HTTP"}
+}
+
+// New returns an OpenTelemetry metric Exporter. The Exporter can be used with
+// a PeriodicReader to export OpenTelemetry metric data to an OTLP receiving
+// endpoint using protobufs over HTTP.
+func New(_ context.Context, opts ...Option) (*Exporter, error) {
+ cfg := oconf.NewHTTPConfig(asHTTPOptions(opts)...)
+ c, err := newClient(cfg)
+ if err != nil {
+ return nil, err
+ }
+ return newExporter(c, cfg)
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go
new file mode 100644
index 000000000..7ac42759f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go
@@ -0,0 +1,215 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig"
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+ "unicode"
+
+ "go.opentelemetry.io/otel/internal/global"
+)
+
+// ConfigFn is the generic function used to set a config.
+type ConfigFn func(*EnvOptionsReader)
+
+// EnvOptionsReader reads the required environment variables.
+type EnvOptionsReader struct {
+ GetEnv func(string) string
+ ReadFile func(string) ([]byte, error)
+ Namespace string
+}
+
+// Apply runs every ConfigFn.
+func (e *EnvOptionsReader) Apply(opts ...ConfigFn) {
+ for _, o := range opts {
+ o(e)
+ }
+}
+
+// GetEnvValue gets an OTLP environment variable value of the specified key
+// using the GetEnv function.
+// This function prepends the OTLP specified namespace to all key lookups.
+func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) {
+ v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key)))
+ return v, v != ""
+}
+
+// WithString retrieves the specified config and passes it to ConfigFn as a string.
+func WithString(n string, fn func(string)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ fn(v)
+ }
+ }
+}
+
+// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn.
+func WithBool(n string, fn func(bool)) ConfigFn {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ b := strings.ToLower(v) == "true"
+ fn(b)
+ }
+ }
+}
+
+// WithDuration retrieves the specified config and passes it to ConfigFn as a duration.
+func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ d, err := strconv.Atoi(v)
+ if err != nil {
+ global.Error(err, "parse duration", "input", v)
+ return
+ }
+ fn(time.Duration(d) * time.Millisecond)
+ }
+ }
+}
+
+// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers.
+func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ fn(stringToHeader(v))
+ }
+ }
+}
+
+// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL.
+func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ u, err := url.Parse(v)
+ if err != nil {
+ global.Error(err, "parse url", "input", v)
+ return
+ }
+ fn(u)
+ }
+ }
+}
+
+// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn.
+func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ b, err := e.ReadFile(v)
+ if err != nil {
+ global.Error(err, "read tls ca cert file", "file", v)
+ return
+ }
+ c, err := createCertPool(b)
+ if err != nil {
+ global.Error(err, "create tls cert pool")
+ return
+ }
+ fn(c)
+ }
+ }
+}
+
+// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn.
+func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn {
+ return func(e *EnvOptionsReader) {
+ vc, okc := e.GetEnvValue(nc)
+ vk, okk := e.GetEnvValue(nk)
+ if !okc || !okk {
+ return
+ }
+ cert, err := e.ReadFile(vc)
+ if err != nil {
+ global.Error(err, "read tls client cert", "file", vc)
+ return
+ }
+ key, err := e.ReadFile(vk)
+ if err != nil {
+ global.Error(err, "read tls client key", "file", vk)
+ return
+ }
+ crt, err := tls.X509KeyPair(cert, key)
+ if err != nil {
+ global.Error(err, "create tls client key pair")
+ return
+ }
+ fn(crt)
+ }
+}
+
+func keyWithNamespace(ns, key string) string {
+ if ns == "" {
+ return key
+ }
+ return fmt.Sprintf("%s_%s", ns, key)
+}
+
+func stringToHeader(value string) map[string]string {
+ headersPairs := strings.Split(value, ",")
+ headers := make(map[string]string)
+
+ for _, header := range headersPairs {
+ n, v, found := strings.Cut(header, "=")
+ if !found {
+ global.Error(errors.New("missing '="), "parse headers", "input", header)
+ continue
+ }
+
+ trimmedName := strings.TrimSpace(n)
+
+ // Validate the key.
+ if !isValidHeaderKey(trimmedName) {
+ global.Error(errors.New("invalid header key"), "parse headers", "key", trimmedName)
+ continue
+ }
+
+ // Only decode the value.
+ value, err := url.PathUnescape(v)
+ if err != nil {
+ global.Error(err, "escape header value", "value", v)
+ continue
+ }
+ trimmedValue := strings.TrimSpace(value)
+
+ headers[trimmedName] = trimmedValue
+ }
+
+ return headers
+}
+
+func createCertPool(certBytes []byte) (*x509.CertPool, error) {
+ cp := x509.NewCertPool()
+ if ok := cp.AppendCertsFromPEM(certBytes); !ok {
+ return nil, errors.New("failed to append certificate to the cert pool")
+ }
+ return cp, nil
+}
+
+func isValidHeaderKey(key string) bool {
+ if key == "" {
+ return false
+ }
+ for _, c := range key {
+ if !isTokenChar(c) {
+ return false
+ }
+ }
+ return true
+}
+
+func isTokenChar(c rune) bool {
+ return c <= unicode.MaxASCII && (unicode.IsLetter(c) ||
+ unicode.IsDigit(c) ||
+ c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' ||
+ c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~')
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/gen.go
new file mode 100644
index 000000000..1b379f10c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/gen.go
@@ -0,0 +1,31 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal"
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess_test.go.tmpl "--data={}" --out=partialsuccess_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig.go.tmpl "--data={}" --out=envconfig/envconfig.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig_test.go.tmpl "--data={}" --out=envconfig/envconfig_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig\"}" --out=oconf/envconfig.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/envconfig_test.go.tmpl "--data={}" --out=oconf/envconfig_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/options.go.tmpl "--data={\"retryImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry\"}" --out=oconf/options.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/options_test.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig\"}" --out=oconf/options_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl "--data={}" --out=oconf/optiontypes.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl "--data={}" --out=oconf/tls.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client.go.tmpl "--data={}" --out=otest/client.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client_test.go.tmpl "--data={\"internalImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal\"}" --out=otest/client_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/collector.go.tmpl "--data={\"oconfImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf\"}" --out=otest/collector.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl "--data={}" --out=transform/attribute.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/attribute_test.go.tmpl "--data={}" --out=transform/attribute_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/error.go.tmpl "--data={}" --out=transform/error.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/error_test.go.tmpl "--data={}" --out=transform/error_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl "--data={}" --out=transform/metricdata.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/metricdata_test.go.tmpl "--data={}" --out=transform/metricdata_test.go
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig.go
new file mode 100644
index 000000000..89b134a39
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig.go
@@ -0,0 +1,210 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf"
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "net/url"
+ "os"
+ "path"
+ "strings"
+ "time"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig"
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/metric"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// DefaultEnvOptionsReader is the default environments reader.
+var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
+ GetEnv: os.Getenv,
+ ReadFile: os.ReadFile,
+ Namespace: "OTEL_EXPORTER_OTLP",
+}
+
+// ApplyGRPCEnvConfigs applies the env configurations for gRPC.
+func ApplyGRPCEnvConfigs(cfg Config) Config {
+ opts := getOptionsFromEnv()
+ for _, opt := range opts {
+ cfg = opt.ApplyGRPCOption(cfg)
+ }
+ return cfg
+}
+
+// ApplyHTTPEnvConfigs applies the env configurations for HTTP.
+func ApplyHTTPEnvConfigs(cfg Config) Config {
+ opts := getOptionsFromEnv()
+ for _, opt := range opts {
+ cfg = opt.ApplyHTTPOption(cfg)
+ }
+ return cfg
+}
+
+func getOptionsFromEnv() []GenericOption {
+ opts := []GenericOption{}
+
+ tlsConf := &tls.Config{}
+ DefaultEnvOptionsReader.Apply(
+ envconfig.WithURL("ENDPOINT", func(u *url.URL) {
+ opts = append(opts, withEndpointScheme(u))
+ opts = append(opts, newSplitOption(func(cfg Config) Config {
+ cfg.Metrics.Endpoint = u.Host
+ // For OTLP/HTTP endpoint URLs without a per-signal
+ // configuration, the passed endpoint is used as a base URL
+ // and the signals are sent to these paths relative to that.
+ cfg.Metrics.URLPath = path.Join(u.Path, DefaultMetricsPath)
+ return cfg
+ }, withEndpointForGRPC(u)))
+ }),
+ envconfig.WithURL("METRICS_ENDPOINT", func(u *url.URL) {
+ opts = append(opts, withEndpointScheme(u))
+ opts = append(opts, newSplitOption(func(cfg Config) Config {
+ cfg.Metrics.Endpoint = u.Host
+ // For endpoint URLs for OTLP/HTTP per-signal variables, the
+ // URL MUST be used as-is without any modification. The only
+ // exception is that if an URL contains no path part, the root
+ // path / MUST be used.
+ path := u.Path
+ if path == "" {
+ path = "/"
+ }
+ cfg.Metrics.URLPath = path
+ return cfg
+ }, withEndpointForGRPC(u)))
+ }),
+ envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
+ envconfig.WithCertPool("METRICS_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
+ envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+ envconfig.WithClientCert("METRICS_CLIENT_CERTIFICATE", "METRICS_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+ envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
+ envconfig.WithBool("METRICS_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
+ withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
+ envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
+ envconfig.WithHeaders("METRICS_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
+ WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
+ WithEnvCompression("METRICS_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
+ envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
+ envconfig.WithDuration("METRICS_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
+ withEnvTemporalityPreference("METRICS_TEMPORALITY_PREFERENCE", func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) }),
+ withEnvAggPreference("METRICS_DEFAULT_HISTOGRAM_AGGREGATION", func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) }),
+ )
+
+ return opts
+}
+
+func withEndpointForGRPC(u *url.URL) func(cfg Config) Config {
+ return func(cfg Config) Config {
+ // For OTLP/gRPC endpoints, this is the target to which the
+ // exporter is going to send telemetry.
+ cfg.Metrics.Endpoint = path.Join(u.Host, u.Path)
+ return cfg
+ }
+}
+
+// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression.
+func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) {
+ return func(e *envconfig.EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ cp := NoCompression
+ if v == "gzip" {
+ cp = GzipCompression
+ }
+
+ fn(cp)
+ }
+ }
+}
+
+func withEndpointScheme(u *url.URL) GenericOption {
+ switch strings.ToLower(u.Scheme) {
+ case "http", "unix":
+ return WithInsecure()
+ default:
+ return WithSecure()
+ }
+}
+
+// revive:disable-next-line:flag-parameter
+func withInsecure(b bool) GenericOption {
+ if b {
+ return WithInsecure()
+ }
+ return WithSecure()
+}
+
+func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) {
+ return func(e *envconfig.EnvOptionsReader) {
+ if c.RootCAs != nil || len(c.Certificates) > 0 {
+ fn(c)
+ }
+ }
+}
+
+func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector)) func(e *envconfig.EnvOptionsReader) {
+ return func(e *envconfig.EnvOptionsReader) {
+ if s, ok := e.GetEnvValue(n); ok {
+ switch strings.ToLower(s) {
+ case "cumulative":
+ fn(cumulativeTemporality)
+ case "delta":
+ fn(deltaTemporality)
+ case "lowmemory":
+ fn(lowMemory)
+ default:
+ global.Warn("OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", "value", s)
+ }
+ }
+ }
+}
+
+func cumulativeTemporality(metric.InstrumentKind) metricdata.Temporality {
+ return metricdata.CumulativeTemporality
+}
+
+func deltaTemporality(ik metric.InstrumentKind) metricdata.Temporality {
+ switch ik {
+ case metric.InstrumentKindCounter, metric.InstrumentKindHistogram, metric.InstrumentKindObservableCounter:
+ return metricdata.DeltaTemporality
+ default:
+ return metricdata.CumulativeTemporality
+ }
+}
+
+func lowMemory(ik metric.InstrumentKind) metricdata.Temporality {
+ switch ik {
+ case metric.InstrumentKindCounter, metric.InstrumentKindHistogram:
+ return metricdata.DeltaTemporality
+ default:
+ return metricdata.CumulativeTemporality
+ }
+}
+
+func withEnvAggPreference(n string, fn func(metric.AggregationSelector)) func(e *envconfig.EnvOptionsReader) {
+ return func(e *envconfig.EnvOptionsReader) {
+ if s, ok := e.GetEnvValue(n); ok {
+ switch strings.ToLower(s) {
+ case "explicit_bucket_histogram":
+ fn(metric.DefaultAggregationSelector)
+ case "base2_exponential_bucket_histogram":
+ fn(func(kind metric.InstrumentKind) metric.Aggregation {
+ if kind == metric.InstrumentKindHistogram {
+ return metric.AggregationBase2ExponentialHistogram{
+ MaxSize: 160,
+ MaxScale: 20,
+ NoMinMax: false,
+ }
+ }
+ return metric.DefaultAggregationSelector(kind)
+ })
+ default:
+ global.Warn("OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.", "value", s)
+ }
+ }
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go
new file mode 100644
index 000000000..db595e49e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go
@@ -0,0 +1,374 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/oconf/options.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf"
+
+import (
+ "crypto/tls"
+ "fmt"
+ "net/http"
+ "net/url"
+ "path"
+ "strings"
+ "time"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/backoff"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/credentials/insecure"
+ "google.golang.org/grpc/encoding/gzip"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry"
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/metric"
+)
+
+const (
+ // DefaultMaxAttempts describes how many times the driver
+ // should retry the sending of the payload in case of a
+ // retryable error.
+ DefaultMaxAttempts int = 5
+ // DefaultMetricsPath is a default URL path for endpoint that
+ // receives metrics.
+ DefaultMetricsPath string = "/v1/metrics"
+ // DefaultBackoff is a default base backoff time used in the
+ // exponential backoff strategy.
+ DefaultBackoff time.Duration = 300 * time.Millisecond
+ // DefaultTimeout is a default max waiting time for the backend to process
+ // each span or metrics batch.
+ DefaultTimeout time.Duration = 10 * time.Second
+)
+
+type (
+ // HTTPTransportProxyFunc is a function that resolves which URL to use as proxy for a given request.
+ // This type is compatible with `http.Transport.Proxy` and can be used to set a custom proxy function to the OTLP HTTP client.
+ HTTPTransportProxyFunc func(*http.Request) (*url.URL, error)
+
+ SignalConfig struct {
+ Endpoint string
+ Insecure bool
+ TLSCfg *tls.Config
+ Headers map[string]string
+ Compression Compression
+ Timeout time.Duration
+ URLPath string
+
+ // gRPC configurations
+ GRPCCredentials credentials.TransportCredentials
+
+ TemporalitySelector metric.TemporalitySelector
+ AggregationSelector metric.AggregationSelector
+
+ Proxy HTTPTransportProxyFunc
+ }
+
+ Config struct {
+ // Signal specific configurations
+ Metrics SignalConfig
+
+ RetryConfig retry.Config
+
+ // gRPC configurations
+ ReconnectionPeriod time.Duration
+ ServiceConfig string
+ DialOptions []grpc.DialOption
+ GRPCConn *grpc.ClientConn
+ }
+)
+
+// NewHTTPConfig returns a new Config with all settings applied from opts and
+// any unset setting using the default HTTP config values.
+func NewHTTPConfig(opts ...HTTPOption) Config {
+ cfg := Config{
+ Metrics: SignalConfig{
+ Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort),
+ URLPath: DefaultMetricsPath,
+ Compression: NoCompression,
+ Timeout: DefaultTimeout,
+
+ TemporalitySelector: metric.DefaultTemporalitySelector,
+ AggregationSelector: metric.DefaultAggregationSelector,
+ },
+ RetryConfig: retry.DefaultConfig,
+ }
+ cfg = ApplyHTTPEnvConfigs(cfg)
+ for _, opt := range opts {
+ cfg = opt.ApplyHTTPOption(cfg)
+ }
+ cfg.Metrics.URLPath = cleanPath(cfg.Metrics.URLPath, DefaultMetricsPath)
+ return cfg
+}
+
+// cleanPath returns a path with all spaces trimmed and all redundancies
+// removed. If urlPath is empty or cleaning it results in an empty string,
+// defaultPath is returned instead.
+func cleanPath(urlPath string, defaultPath string) string {
+ tmp := path.Clean(strings.TrimSpace(urlPath))
+ if tmp == "." {
+ return defaultPath
+ }
+ if !path.IsAbs(tmp) {
+ tmp = "/" + tmp
+ }
+ return tmp
+}
+
+// NewGRPCConfig returns a new Config with all settings applied from opts and
+// any unset setting using the default gRPC config values.
+func NewGRPCConfig(opts ...GRPCOption) Config {
+ cfg := Config{
+ Metrics: SignalConfig{
+ Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort),
+ URLPath: DefaultMetricsPath,
+ Compression: NoCompression,
+ Timeout: DefaultTimeout,
+
+ TemporalitySelector: metric.DefaultTemporalitySelector,
+ AggregationSelector: metric.DefaultAggregationSelector,
+ },
+ RetryConfig: retry.DefaultConfig,
+ }
+ cfg = ApplyGRPCEnvConfigs(cfg)
+ for _, opt := range opts {
+ cfg = opt.ApplyGRPCOption(cfg)
+ }
+
+ if cfg.ServiceConfig != "" {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
+ }
+ // Prioritize GRPCCredentials over Insecure (passing both is an error).
+ if cfg.Metrics.GRPCCredentials != nil {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Metrics.GRPCCredentials))
+ } else if cfg.Metrics.Insecure {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials()))
+ } else {
+ // Default to using the host's root CA.
+ creds := credentials.NewTLS(nil)
+ cfg.Metrics.GRPCCredentials = creds
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds))
+ }
+ if cfg.Metrics.Compression == GzipCompression {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
+ }
+ if cfg.ReconnectionPeriod != 0 {
+ p := grpc.ConnectParams{
+ Backoff: backoff.DefaultConfig,
+ MinConnectTimeout: cfg.ReconnectionPeriod,
+ }
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p))
+ }
+
+ return cfg
+}
+
+type (
+ // GenericOption applies an option to the HTTP or gRPC driver.
+ GenericOption interface {
+ ApplyHTTPOption(Config) Config
+ ApplyGRPCOption(Config) Config
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate compatibility.
+ private()
+ }
+
+ // HTTPOption applies an option to the HTTP driver.
+ HTTPOption interface {
+ ApplyHTTPOption(Config) Config
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate compatibility.
+ private()
+ }
+
+ // GRPCOption applies an option to the gRPC driver.
+ GRPCOption interface {
+ ApplyGRPCOption(Config) Config
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate compatibility.
+ private()
+ }
+)
+
+// genericOption is an option that applies the same logic
+// for both gRPC and HTTP.
+type genericOption struct {
+ fn func(Config) Config
+}
+
+func (g *genericOption) ApplyGRPCOption(cfg Config) Config {
+ return g.fn(cfg)
+}
+
+func (g *genericOption) ApplyHTTPOption(cfg Config) Config {
+ return g.fn(cfg)
+}
+
+func (genericOption) private() {}
+
+func newGenericOption(fn func(cfg Config) Config) GenericOption {
+ return &genericOption{fn: fn}
+}
+
+// splitOption is an option that applies different logics
+// for gRPC and HTTP.
+type splitOption struct {
+ httpFn func(Config) Config
+ grpcFn func(Config) Config
+}
+
+func (g *splitOption) ApplyGRPCOption(cfg Config) Config {
+ return g.grpcFn(cfg)
+}
+
+func (g *splitOption) ApplyHTTPOption(cfg Config) Config {
+ return g.httpFn(cfg)
+}
+
+func (splitOption) private() {}
+
+func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption {
+ return &splitOption{httpFn: httpFn, grpcFn: grpcFn}
+}
+
+// httpOption is an option that is only applied to the HTTP driver.
+type httpOption struct {
+ fn func(Config) Config
+}
+
+func (h *httpOption) ApplyHTTPOption(cfg Config) Config {
+ return h.fn(cfg)
+}
+
+func (httpOption) private() {}
+
+func NewHTTPOption(fn func(cfg Config) Config) HTTPOption {
+ return &httpOption{fn: fn}
+}
+
+// grpcOption is an option that is only applied to the gRPC driver.
+type grpcOption struct {
+ fn func(Config) Config
+}
+
+func (h *grpcOption) ApplyGRPCOption(cfg Config) Config {
+ return h.fn(cfg)
+}
+
+func (grpcOption) private() {}
+
+func NewGRPCOption(fn func(cfg Config) Config) GRPCOption {
+ return &grpcOption{fn: fn}
+}
+
+// Generic Options
+
+func WithEndpoint(endpoint string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.Endpoint = endpoint
+ return cfg
+ })
+}
+
+func WithEndpointURL(v string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ u, err := url.Parse(v)
+ if err != nil {
+ global.Error(err, "otlpmetric: parse endpoint url", "url", v)
+ return cfg
+ }
+
+ cfg.Metrics.Endpoint = u.Host
+ cfg.Metrics.URLPath = u.Path
+ cfg.Metrics.Insecure = u.Scheme != "https"
+
+ return cfg
+ })
+}
+
+func WithCompression(compression Compression) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.Compression = compression
+ return cfg
+ })
+}
+
+func WithURLPath(urlPath string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.URLPath = urlPath
+ return cfg
+ })
+}
+
+func WithRetry(rc retry.Config) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.RetryConfig = rc
+ return cfg
+ })
+}
+
+func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption {
+ return newSplitOption(func(cfg Config) Config {
+ cfg.Metrics.TLSCfg = tlsCfg.Clone()
+ return cfg
+ }, func(cfg Config) Config {
+ cfg.Metrics.GRPCCredentials = credentials.NewTLS(tlsCfg)
+ return cfg
+ })
+}
+
+func WithInsecure() GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.Insecure = true
+ return cfg
+ })
+}
+
+func WithSecure() GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.Insecure = false
+ return cfg
+ })
+}
+
+func WithHeaders(headers map[string]string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.Headers = headers
+ return cfg
+ })
+}
+
+func WithTimeout(duration time.Duration) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.Timeout = duration
+ return cfg
+ })
+}
+
+func WithTemporalitySelector(selector metric.TemporalitySelector) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.TemporalitySelector = selector
+ return cfg
+ })
+}
+
+func WithAggregationSelector(selector metric.AggregationSelector) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.AggregationSelector = selector
+ return cfg
+ })
+}
+
+func WithProxy(pf HTTPTransportProxyFunc) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.Proxy = pf
+ return cfg
+ })
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/optiontypes.go
new file mode 100644
index 000000000..ae3d09787
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/optiontypes.go
@@ -0,0 +1,47 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf"
+
+import "time"
+
+const (
+ // DefaultCollectorGRPCPort is the default gRPC port of the collector.
+ DefaultCollectorGRPCPort uint16 = 4317
+ // DefaultCollectorHTTPPort is the default HTTP port of the collector.
+ DefaultCollectorHTTPPort uint16 = 4318
+ // DefaultCollectorHost is the host address the Exporter will attempt
+ // connect to if no collector address is provided.
+ DefaultCollectorHost string = "localhost"
+)
+
+// Compression describes the compression used for payloads sent to the
+// collector.
+type Compression int
+
+const (
+ // NoCompression tells the driver to send payloads without
+ // compression.
+ NoCompression Compression = iota
+ // GzipCompression tells the driver to send payloads after
+ // compressing them with gzip.
+ GzipCompression
+)
+
+// RetrySettings defines configuration for retrying batches in case of export failure
+// using an exponential backoff.
+type RetrySettings struct {
+ // Enabled indicates whether to not retry sending batches in case of export failure.
+ Enabled bool
+ // InitialInterval the time to wait after the first failure before retrying.
+ InitialInterval time.Duration
+ // MaxInterval is the upper bound on backoff interval. Once this value is reached the delay between
+ // consecutive retries will always be `MaxInterval`.
+ MaxInterval time.Duration
+ // MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request/batch.
+ // Once this value is reached, the data is discarded.
+ MaxElapsedTime time.Duration
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/tls.go
new file mode 100644
index 000000000..f603dc605
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/tls.go
@@ -0,0 +1,38 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf"
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+ "os"
+)
+
+// ReadTLSConfigFromFile reads a PEM certificate file and creates
+// a tls.Config that will use this certificate to verify a server certificate.
+func ReadTLSConfigFromFile(path string) (*tls.Config, error) {
+ b, err := os.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+
+ return CreateTLSConfig(b)
+}
+
+// CreateTLSConfig creates a tls.Config from a raw certificate bytes
+// to verify a server certificate.
+func CreateTLSConfig(certBytes []byte) (*tls.Config, error) {
+ cp := x509.NewCertPool()
+ if ok := cp.AppendCertsFromPEM(certBytes); !ok {
+ return nil, errors.New("failed to append certificate to the cert pool")
+ }
+
+ return &tls.Config{
+ RootCAs: cp,
+ }, nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/partialsuccess.go
new file mode 100644
index 000000000..ed93844a4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/partialsuccess.go
@@ -0,0 +1,56 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/partialsuccess.go
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal"
+
+import "fmt"
+
+// PartialSuccess represents the underlying error for all handling
+// OTLP partial success messages. Use `errors.Is(err,
+// PartialSuccess{})` to test whether an error passed to the OTel
+// error handler belongs to this category.
+type PartialSuccess struct {
+ ErrorMessage string
+ RejectedItems int64
+ RejectedKind string
+}
+
+var _ error = PartialSuccess{}
+
+// Error implements the error interface.
+func (ps PartialSuccess) Error() string {
+ msg := ps.ErrorMessage
+ if msg == "" {
+ msg = "empty message"
+ }
+ return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind)
+}
+
+// Is supports the errors.Is() interface.
+func (ps PartialSuccess) Is(err error) bool {
+ _, ok := err.(PartialSuccess)
+ return ok
+}
+
+// TracePartialSuccessError returns an error describing a partial success
+// response for the trace signal.
+func TracePartialSuccessError(itemsRejected int64, errorMessage string) error {
+ return PartialSuccess{
+ ErrorMessage: errorMessage,
+ RejectedItems: itemsRejected,
+ RejectedKind: "spans",
+ }
+}
+
+// MetricPartialSuccessError returns an error describing a partial success
+// response for the metric signal.
+func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error {
+ return PartialSuccess{
+ ErrorMessage: errorMessage,
+ RejectedItems: itemsRejected,
+ RejectedKind: "metric data points",
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go
new file mode 100644
index 000000000..a9a08ffe6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go
@@ -0,0 +1,145 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/retry/retry.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package retry provides request retry functionality that can perform
+// configurable exponential backoff for transient errors and honor any
+// explicit throttle responses received.
+package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry"
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/cenkalti/backoff/v4"
+)
+
+// DefaultConfig are the recommended defaults to use.
+var DefaultConfig = Config{
+ Enabled: true,
+ InitialInterval: 5 * time.Second,
+ MaxInterval: 30 * time.Second,
+ MaxElapsedTime: time.Minute,
+}
+
+// Config defines configuration for retrying batches in case of export failure
+// using an exponential backoff.
+type Config struct {
+ // Enabled indicates whether to not retry sending batches in case of
+ // export failure.
+ Enabled bool
+ // InitialInterval the time to wait after the first failure before
+ // retrying.
+ InitialInterval time.Duration
+ // MaxInterval is the upper bound on backoff interval. Once this value is
+ // reached the delay between consecutive retries will always be
+ // `MaxInterval`.
+ MaxInterval time.Duration
+ // MaxElapsedTime is the maximum amount of time (including retries) spent
+ // trying to send a request/batch. Once this value is reached, the data
+ // is discarded.
+ MaxElapsedTime time.Duration
+}
+
+// RequestFunc wraps a request with retry logic.
+type RequestFunc func(context.Context, func(context.Context) error) error
+
+// EvaluateFunc returns if an error is retry-able and if an explicit throttle
+// duration should be honored that was included in the error.
+//
+// The function must return true if the error argument is retry-able,
+// otherwise it must return false for the first return parameter.
+//
+// The function must return a non-zero time.Duration if the error contains
+// explicit throttle duration that should be honored, otherwise it must return
+// a zero valued time.Duration.
+type EvaluateFunc func(error) (bool, time.Duration)
+
+// RequestFunc returns a RequestFunc using the evaluate function to determine
+// if requests can be retried and based on the exponential backoff
+// configuration of c.
+func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
+ if !c.Enabled {
+ return func(ctx context.Context, fn func(context.Context) error) error {
+ return fn(ctx)
+ }
+ }
+
+ return func(ctx context.Context, fn func(context.Context) error) error {
+ // Do not use NewExponentialBackOff since it calls Reset and the code here
+ // must call Reset after changing the InitialInterval (this saves an
+ // unnecessary call to Now).
+ b := &backoff.ExponentialBackOff{
+ InitialInterval: c.InitialInterval,
+ RandomizationFactor: backoff.DefaultRandomizationFactor,
+ Multiplier: backoff.DefaultMultiplier,
+ MaxInterval: c.MaxInterval,
+ MaxElapsedTime: c.MaxElapsedTime,
+ Stop: backoff.Stop,
+ Clock: backoff.SystemClock,
+ }
+ b.Reset()
+
+ for {
+ err := fn(ctx)
+ if err == nil {
+ return nil
+ }
+
+ retryable, throttle := evaluate(err)
+ if !retryable {
+ return err
+ }
+
+ bOff := b.NextBackOff()
+ if bOff == backoff.Stop {
+ return fmt.Errorf("max retry time elapsed: %w", err)
+ }
+
+ // Wait for the greater of the backoff or throttle delay.
+ var delay time.Duration
+ if bOff > throttle {
+ delay = bOff
+ } else {
+ elapsed := b.GetElapsedTime()
+ if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
+ return fmt.Errorf("max retry time would elapse: %w", err)
+ }
+ delay = throttle
+ }
+
+ if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
+ return fmt.Errorf("%w: %w", ctxErr, err)
+ }
+ }
+ }
+}
+
+// Allow override for testing.
+var waitFunc = wait
+
+// wait takes the caller's context, and the amount of time to wait. It will
+// return nil if the timer fires before or at the same time as the context's
+// deadline. This indicates that the call can be retried.
+func wait(ctx context.Context, delay time.Duration) error {
+ timer := time.NewTimer(delay)
+ defer timer.Stop()
+
+ select {
+ case <-ctx.Done():
+ // Handle the case where the timer and context deadline end
+ // simultaneously by prioritizing the timer expiration nil value
+ // response.
+ select {
+ case <-timer.C:
+ default:
+ return ctx.Err()
+ }
+ case <-timer.C:
+ }
+
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/attribute.go
new file mode 100644
index 000000000..d607da78e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/attribute.go
@@ -0,0 +1,144 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform"
+
+import (
+ "go.opentelemetry.io/otel/attribute"
+ cpb "go.opentelemetry.io/proto/otlp/common/v1"
+)
+
+// AttrIter transforms an attribute iterator into OTLP key-values.
+func AttrIter(iter attribute.Iterator) []*cpb.KeyValue {
+ l := iter.Len()
+ if l == 0 {
+ return nil
+ }
+
+ out := make([]*cpb.KeyValue, 0, l)
+ for iter.Next() {
+ out = append(out, KeyValue(iter.Attribute()))
+ }
+ return out
+}
+
+// KeyValues transforms a slice of attribute KeyValues into OTLP key-values.
+func KeyValues(attrs []attribute.KeyValue) []*cpb.KeyValue {
+ if len(attrs) == 0 {
+ return nil
+ }
+
+ out := make([]*cpb.KeyValue, 0, len(attrs))
+ for _, kv := range attrs {
+ out = append(out, KeyValue(kv))
+ }
+ return out
+}
+
+// KeyValue transforms an attribute KeyValue into an OTLP key-value.
+func KeyValue(kv attribute.KeyValue) *cpb.KeyValue {
+ return &cpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)}
+}
+
+// Value transforms an attribute Value into an OTLP AnyValue.
+func Value(v attribute.Value) *cpb.AnyValue {
+ av := new(cpb.AnyValue)
+ switch v.Type() {
+ case attribute.BOOL:
+ av.Value = &cpb.AnyValue_BoolValue{
+ BoolValue: v.AsBool(),
+ }
+ case attribute.BOOLSLICE:
+ av.Value = &cpb.AnyValue_ArrayValue{
+ ArrayValue: &cpb.ArrayValue{
+ Values: boolSliceValues(v.AsBoolSlice()),
+ },
+ }
+ case attribute.INT64:
+ av.Value = &cpb.AnyValue_IntValue{
+ IntValue: v.AsInt64(),
+ }
+ case attribute.INT64SLICE:
+ av.Value = &cpb.AnyValue_ArrayValue{
+ ArrayValue: &cpb.ArrayValue{
+ Values: int64SliceValues(v.AsInt64Slice()),
+ },
+ }
+ case attribute.FLOAT64:
+ av.Value = &cpb.AnyValue_DoubleValue{
+ DoubleValue: v.AsFloat64(),
+ }
+ case attribute.FLOAT64SLICE:
+ av.Value = &cpb.AnyValue_ArrayValue{
+ ArrayValue: &cpb.ArrayValue{
+ Values: float64SliceValues(v.AsFloat64Slice()),
+ },
+ }
+ case attribute.STRING:
+ av.Value = &cpb.AnyValue_StringValue{
+ StringValue: v.AsString(),
+ }
+ case attribute.STRINGSLICE:
+ av.Value = &cpb.AnyValue_ArrayValue{
+ ArrayValue: &cpb.ArrayValue{
+ Values: stringSliceValues(v.AsStringSlice()),
+ },
+ }
+ default:
+ av.Value = &cpb.AnyValue_StringValue{
+ StringValue: "INVALID",
+ }
+ }
+ return av
+}
+
+func boolSliceValues(vals []bool) []*cpb.AnyValue {
+ converted := make([]*cpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &cpb.AnyValue{
+ Value: &cpb.AnyValue_BoolValue{
+ BoolValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+func int64SliceValues(vals []int64) []*cpb.AnyValue {
+ converted := make([]*cpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &cpb.AnyValue{
+ Value: &cpb.AnyValue_IntValue{
+ IntValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+func float64SliceValues(vals []float64) []*cpb.AnyValue {
+ converted := make([]*cpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &cpb.AnyValue{
+ Value: &cpb.AnyValue_DoubleValue{
+ DoubleValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+func stringSliceValues(vals []string) []*cpb.AnyValue {
+ converted := make([]*cpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &cpb.AnyValue{
+ Value: &cpb.AnyValue_StringValue{
+ StringValue: v,
+ },
+ }
+ }
+ return converted
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/error.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/error.go
new file mode 100644
index 000000000..bb6d21f0b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/error.go
@@ -0,0 +1,103 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/transform/error.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform"
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
+)
+
+var (
+ errUnknownAggregation = errors.New("unknown aggregation")
+ errUnknownTemporality = errors.New("unknown temporality")
+)
+
+type errMetric struct {
+ m *mpb.Metric
+ err error
+}
+
+func (e errMetric) Unwrap() error {
+ return e.err
+}
+
+func (e errMetric) Error() string {
+ format := "invalid metric (name: %q, description: %q, unit: %q): %s"
+ return fmt.Sprintf(format, e.m.Name, e.m.Description, e.m.Unit, e.err)
+}
+
+func (e errMetric) Is(target error) bool {
+ return errors.Is(e.err, target)
+}
+
+// multiErr is used by the data-type transform functions to wrap multiple
+// errors into a single return value. The error message will show all errors
+// as a list and scope them by the datatype name that is returning them.
+type multiErr struct {
+ datatype string
+ errs []error
+}
+
+// errOrNil returns nil if e contains no errors, otherwise it returns e.
+func (e *multiErr) errOrNil() error {
+ if len(e.errs) == 0 {
+ return nil
+ }
+ return e
+}
+
+// append adds err to e. If err is a multiErr, its errs are flattened into e.
+func (e *multiErr) append(err error) {
+ // Do not use errors.As here, this should only be flattened one layer. If
+ // there is a *multiErr several steps down the chain, all the errors above
+ // it will be discarded if errors.As is used instead.
+ switch other := err.(type) { //nolint:errorlint
+ case *multiErr:
+ // Flatten err errors into e.
+ e.errs = append(e.errs, other.errs...)
+ default:
+ e.errs = append(e.errs, err)
+ }
+}
+
+func (e *multiErr) Error() string {
+ es := make([]string, len(e.errs))
+ for i, err := range e.errs {
+ es[i] = fmt.Sprintf("* %s", err)
+ }
+
+ format := "%d errors occurred transforming %s:\n\t%s"
+ return fmt.Sprintf(format, len(es), e.datatype, strings.Join(es, "\n\t"))
+}
+
+func (e *multiErr) Unwrap() error {
+ switch len(e.errs) {
+ case 0:
+ return nil
+ case 1:
+ return e.errs[0]
+ }
+
+ // Return a multiErr without the leading error.
+ cp := &multiErr{
+ datatype: e.datatype,
+ errs: make([]error, len(e.errs)-1),
+ }
+ copy(cp.errs, e.errs[1:])
+ return cp
+}
+
+func (e *multiErr) Is(target error) bool {
+ if len(e.errs) == 0 {
+ return false
+ }
+ // Check if the first error is target.
+ return errors.Is(e.errs[0], target)
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go
new file mode 100644
index 000000000..8207b15a4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go
@@ -0,0 +1,350 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package transform provides transformation functionality from the
+// sdk/metric/metricdata data-types into OTLP data-types.
+package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform"
+
+import (
+ "fmt"
+ "time"
+
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+ cpb "go.opentelemetry.io/proto/otlp/common/v1"
+ mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
+ rpb "go.opentelemetry.io/proto/otlp/resource/v1"
+)
+
+// ResourceMetrics returns an OTLP ResourceMetrics generated from rm. If rm
+// contains invalid ScopeMetrics, an error will be returned along with an OTLP
+// ResourceMetrics that contains partial OTLP ScopeMetrics.
+func ResourceMetrics(rm *metricdata.ResourceMetrics) (*mpb.ResourceMetrics, error) {
+ sms, err := ScopeMetrics(rm.ScopeMetrics)
+ return &mpb.ResourceMetrics{
+ Resource: &rpb.Resource{
+ Attributes: AttrIter(rm.Resource.Iter()),
+ },
+ ScopeMetrics: sms,
+ SchemaUrl: rm.Resource.SchemaURL(),
+ }, err
+}
+
+// ScopeMetrics returns a slice of OTLP ScopeMetrics generated from sms. If
+// sms contains invalid metric values, an error will be returned along with a
+// slice that contains partial OTLP ScopeMetrics.
+func ScopeMetrics(sms []metricdata.ScopeMetrics) ([]*mpb.ScopeMetrics, error) {
+ errs := &multiErr{datatype: "ScopeMetrics"}
+ out := make([]*mpb.ScopeMetrics, 0, len(sms))
+ for _, sm := range sms {
+ ms, err := Metrics(sm.Metrics)
+ if err != nil {
+ errs.append(err)
+ }
+
+ out = append(out, &mpb.ScopeMetrics{
+ Scope: &cpb.InstrumentationScope{
+ Name: sm.Scope.Name,
+ Version: sm.Scope.Version,
+ Attributes: AttrIter(sm.Scope.Attributes.Iter()),
+ },
+ Metrics: ms,
+ SchemaUrl: sm.Scope.SchemaURL,
+ })
+ }
+ return out, errs.errOrNil()
+}
+
+// Metrics returns a slice of OTLP Metric generated from ms. If ms contains
+// invalid metric values, an error will be returned along with a slice that
+// contains partial OTLP Metrics.
+func Metrics(ms []metricdata.Metrics) ([]*mpb.Metric, error) {
+ errs := &multiErr{datatype: "Metrics"}
+ out := make([]*mpb.Metric, 0, len(ms))
+ for _, m := range ms {
+ o, err := metric(m)
+ if err != nil {
+ // Do not include invalid data. Drop the metric, report the error.
+ errs.append(errMetric{m: o, err: err})
+ continue
+ }
+ out = append(out, o)
+ }
+ return out, errs.errOrNil()
+}
+
+func metric(m metricdata.Metrics) (*mpb.Metric, error) {
+ var err error
+ out := &mpb.Metric{
+ Name: m.Name,
+ Description: m.Description,
+ Unit: m.Unit,
+ }
+ switch a := m.Data.(type) {
+ case metricdata.Gauge[int64]:
+ out.Data = Gauge(a)
+ case metricdata.Gauge[float64]:
+ out.Data = Gauge(a)
+ case metricdata.Sum[int64]:
+ out.Data, err = Sum(a)
+ case metricdata.Sum[float64]:
+ out.Data, err = Sum(a)
+ case metricdata.Histogram[int64]:
+ out.Data, err = Histogram(a)
+ case metricdata.Histogram[float64]:
+ out.Data, err = Histogram(a)
+ case metricdata.ExponentialHistogram[int64]:
+ out.Data, err = ExponentialHistogram(a)
+ case metricdata.ExponentialHistogram[float64]:
+ out.Data, err = ExponentialHistogram(a)
+ case metricdata.Summary:
+ out.Data = Summary(a)
+ default:
+ return out, fmt.Errorf("%w: %T", errUnknownAggregation, a)
+ }
+ return out, err
+}
+
+// Gauge returns an OTLP Metric_Gauge generated from g.
+func Gauge[N int64 | float64](g metricdata.Gauge[N]) *mpb.Metric_Gauge {
+ return &mpb.Metric_Gauge{
+ Gauge: &mpb.Gauge{
+ DataPoints: DataPoints(g.DataPoints),
+ },
+ }
+}
+
+// Sum returns an OTLP Metric_Sum generated from s. An error is returned
+// if the temporality of s is unknown.
+func Sum[N int64 | float64](s metricdata.Sum[N]) (*mpb.Metric_Sum, error) {
+ t, err := Temporality(s.Temporality)
+ if err != nil {
+ return nil, err
+ }
+ return &mpb.Metric_Sum{
+ Sum: &mpb.Sum{
+ AggregationTemporality: t,
+ IsMonotonic: s.IsMonotonic,
+ DataPoints: DataPoints(s.DataPoints),
+ },
+ }, nil
+}
+
+// DataPoints returns a slice of OTLP NumberDataPoint generated from dPts.
+func DataPoints[N int64 | float64](dPts []metricdata.DataPoint[N]) []*mpb.NumberDataPoint {
+ out := make([]*mpb.NumberDataPoint, 0, len(dPts))
+ for _, dPt := range dPts {
+ ndp := &mpb.NumberDataPoint{
+ Attributes: AttrIter(dPt.Attributes.Iter()),
+ StartTimeUnixNano: timeUnixNano(dPt.StartTime),
+ TimeUnixNano: timeUnixNano(dPt.Time),
+ Exemplars: Exemplars(dPt.Exemplars),
+ }
+ switch v := any(dPt.Value).(type) {
+ case int64:
+ ndp.Value = &mpb.NumberDataPoint_AsInt{
+ AsInt: v,
+ }
+ case float64:
+ ndp.Value = &mpb.NumberDataPoint_AsDouble{
+ AsDouble: v,
+ }
+ }
+ out = append(out, ndp)
+ }
+ return out
+}
+
+// Histogram returns an OTLP Metric_Histogram generated from h. An error is
+// returned if the temporality of h is unknown.
+func Histogram[N int64 | float64](h metricdata.Histogram[N]) (*mpb.Metric_Histogram, error) {
+ t, err := Temporality(h.Temporality)
+ if err != nil {
+ return nil, err
+ }
+ return &mpb.Metric_Histogram{
+ Histogram: &mpb.Histogram{
+ AggregationTemporality: t,
+ DataPoints: HistogramDataPoints(h.DataPoints),
+ },
+ }, nil
+}
+
+// HistogramDataPoints returns a slice of OTLP HistogramDataPoint generated
+// from dPts.
+func HistogramDataPoints[N int64 | float64](dPts []metricdata.HistogramDataPoint[N]) []*mpb.HistogramDataPoint {
+ out := make([]*mpb.HistogramDataPoint, 0, len(dPts))
+ for _, dPt := range dPts {
+ sum := float64(dPt.Sum)
+ hdp := &mpb.HistogramDataPoint{
+ Attributes: AttrIter(dPt.Attributes.Iter()),
+ StartTimeUnixNano: timeUnixNano(dPt.StartTime),
+ TimeUnixNano: timeUnixNano(dPt.Time),
+ Count: dPt.Count,
+ Sum: &sum,
+ BucketCounts: dPt.BucketCounts,
+ ExplicitBounds: dPt.Bounds,
+ Exemplars: Exemplars(dPt.Exemplars),
+ }
+ if v, ok := dPt.Min.Value(); ok {
+ vF64 := float64(v)
+ hdp.Min = &vF64
+ }
+ if v, ok := dPt.Max.Value(); ok {
+ vF64 := float64(v)
+ hdp.Max = &vF64
+ }
+ out = append(out, hdp)
+ }
+ return out
+}
+
+// ExponentialHistogram returns an OTLP Metric_ExponentialHistogram generated from h. An error is
+// returned if the temporality of h is unknown.
+func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N]) (*mpb.Metric_ExponentialHistogram, error) {
+ t, err := Temporality(h.Temporality)
+ if err != nil {
+ return nil, err
+ }
+ return &mpb.Metric_ExponentialHistogram{
+ ExponentialHistogram: &mpb.ExponentialHistogram{
+ AggregationTemporality: t,
+ DataPoints: ExponentialHistogramDataPoints(h.DataPoints),
+ },
+ }, nil
+}
+
+// ExponentialHistogramDataPoints returns a slice of OTLP ExponentialHistogramDataPoint generated
+// from dPts.
+func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.ExponentialHistogramDataPoint[N]) []*mpb.ExponentialHistogramDataPoint {
+ out := make([]*mpb.ExponentialHistogramDataPoint, 0, len(dPts))
+ for _, dPt := range dPts {
+ sum := float64(dPt.Sum)
+ ehdp := &mpb.ExponentialHistogramDataPoint{
+ Attributes: AttrIter(dPt.Attributes.Iter()),
+ StartTimeUnixNano: timeUnixNano(dPt.StartTime),
+ TimeUnixNano: timeUnixNano(dPt.Time),
+ Count: dPt.Count,
+ Sum: &sum,
+ Scale: dPt.Scale,
+ ZeroCount: dPt.ZeroCount,
+ Exemplars: Exemplars(dPt.Exemplars),
+
+ Positive: ExponentialHistogramDataPointBuckets(dPt.PositiveBucket),
+ Negative: ExponentialHistogramDataPointBuckets(dPt.NegativeBucket),
+ }
+ if v, ok := dPt.Min.Value(); ok {
+ vF64 := float64(v)
+ ehdp.Min = &vF64
+ }
+ if v, ok := dPt.Max.Value(); ok {
+ vF64 := float64(v)
+ ehdp.Max = &vF64
+ }
+ out = append(out, ehdp)
+ }
+ return out
+}
+
+// ExponentialHistogramDataPointBuckets returns an OTLP ExponentialHistogramDataPoint_Buckets generated
+// from bucket.
+func ExponentialHistogramDataPointBuckets(bucket metricdata.ExponentialBucket) *mpb.ExponentialHistogramDataPoint_Buckets {
+ return &mpb.ExponentialHistogramDataPoint_Buckets{
+ Offset: bucket.Offset,
+ BucketCounts: bucket.Counts,
+ }
+}
+
+// Temporality returns an OTLP AggregationTemporality generated from t. If t
+// is unknown, an error is returned along with the invalid
+// AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED.
+func Temporality(t metricdata.Temporality) (mpb.AggregationTemporality, error) {
+ switch t {
+ case metricdata.DeltaTemporality:
+ return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, nil
+ case metricdata.CumulativeTemporality:
+ return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, nil
+ default:
+ err := fmt.Errorf("%w: %s", errUnknownTemporality, t)
+ return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED, err
+ }
+}
+
+// timeUnixNano returns t as a Unix time, the number of nanoseconds elapsed
+// since January 1, 1970 UTC as uint64.
+// The result is undefined if the Unix time
+// in nanoseconds cannot be represented by an int64
+// (a date before the year 1678 or after 2262).
+// timeUnixNano on the zero Time returns 0.
+// The result does not depend on the location associated with t.
+func timeUnixNano(t time.Time) uint64 {
+ return uint64(max(0, t.UnixNano())) // nolint:gosec // Overflow checked.
+}
+
+// Exemplars returns a slice of OTLP Exemplars generated from exemplars.
+func Exemplars[N int64 | float64](exemplars []metricdata.Exemplar[N]) []*mpb.Exemplar {
+ out := make([]*mpb.Exemplar, 0, len(exemplars))
+ for _, exemplar := range exemplars {
+ e := &mpb.Exemplar{
+ FilteredAttributes: KeyValues(exemplar.FilteredAttributes),
+ TimeUnixNano: timeUnixNano(exemplar.Time),
+ SpanId: exemplar.SpanID,
+ TraceId: exemplar.TraceID,
+ }
+ switch v := any(exemplar.Value).(type) {
+ case int64:
+ e.Value = &mpb.Exemplar_AsInt{
+ AsInt: v,
+ }
+ case float64:
+ e.Value = &mpb.Exemplar_AsDouble{
+ AsDouble: v,
+ }
+ }
+ out = append(out, e)
+ }
+ return out
+}
+
+// Summary returns an OTLP Metric_Summary generated from s.
+func Summary(s metricdata.Summary) *mpb.Metric_Summary {
+ return &mpb.Metric_Summary{
+ Summary: &mpb.Summary{
+ DataPoints: SummaryDataPoints(s.DataPoints),
+ },
+ }
+}
+
+// SummaryDataPoints returns a slice of OTLP SummaryDataPoint generated from
+// dPts.
+func SummaryDataPoints(dPts []metricdata.SummaryDataPoint) []*mpb.SummaryDataPoint {
+ out := make([]*mpb.SummaryDataPoint, 0, len(dPts))
+ for _, dPt := range dPts {
+ sdp := &mpb.SummaryDataPoint{
+ Attributes: AttrIter(dPt.Attributes.Iter()),
+ StartTimeUnixNano: timeUnixNano(dPt.StartTime),
+ TimeUnixNano: timeUnixNano(dPt.Time),
+ Count: dPt.Count,
+ Sum: dPt.Sum,
+ QuantileValues: QuantileValues(dPt.QuantileValues),
+ }
+ out = append(out, sdp)
+ }
+ return out
+}
+
+// QuantileValues returns a slice of OTLP SummaryDataPoint_ValueAtQuantile
+// generated from quantiles.
+func QuantileValues(quantiles []metricdata.QuantileValue) []*mpb.SummaryDataPoint_ValueAtQuantile {
+ out := make([]*mpb.SummaryDataPoint_ValueAtQuantile, 0, len(quantiles))
+ for _, q := range quantiles {
+ quantile := &mpb.SummaryDataPoint_ValueAtQuantile{
+ Quantile: q.Quantile,
+ Value: q.Value,
+ }
+ out = append(out, quantile)
+ }
+ return out
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go
new file mode 100644
index 000000000..2a67f5800
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
+
+// Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf metrics exporter in use.
+func Version() string {
+ return "1.35.0"
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/README.md b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/README.md
new file mode 100644
index 000000000..cd138deef
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/README.md
@@ -0,0 +1,3 @@
+# STDOUT Log Exporter
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/stdout/stdoutlog)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/stdout/stdoutlog)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/config.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/config.go
new file mode 100644
index 000000000..1b8f8bbb2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/config.go
@@ -0,0 +1,85 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package stdoutlog // import "go.opentelemetry.io/otel/exporters/stdout/stdoutlog"
+
+import (
+ "io"
+ "os"
+)
+
+var (
+ defaultWriter io.Writer = os.Stdout
+ defaultPrettyPrint = false
+ defaultTimestamps = true
+)
+
+// config contains options for the STDOUT exporter.
+type config struct {
+ // Writer is the destination. If not set, os.Stdout is used.
+ Writer io.Writer
+
+ // PrettyPrint will encode the output into readable JSON. Default is
+ // false.
+ PrettyPrint bool
+
+ // Timestamps specifies if timestamps should be printed. Default is
+ // true.
+ Timestamps bool
+}
+
+// newConfig creates a validated Config configured with options.
+func newConfig(options []Option) config {
+ cfg := config{
+ Writer: defaultWriter,
+ PrettyPrint: defaultPrettyPrint,
+ Timestamps: defaultTimestamps,
+ }
+ for _, opt := range options {
+ cfg = opt.apply(cfg)
+ }
+ return cfg
+}
+
+// Option sets the configuration value for an Exporter.
+type Option interface {
+ apply(config) config
+}
+
+// WithWriter sets the export stream destination.
+func WithWriter(w io.Writer) Option {
+ return writerOption{w}
+}
+
+type writerOption struct {
+ W io.Writer
+}
+
+func (o writerOption) apply(cfg config) config {
+ cfg.Writer = o.W
+ return cfg
+}
+
+// WithPrettyPrint prettifies the emitted output.
+func WithPrettyPrint() Option {
+ return prettyPrintOption(true)
+}
+
+type prettyPrintOption bool
+
+func (o prettyPrintOption) apply(cfg config) config {
+ cfg.PrettyPrint = bool(o)
+ return cfg
+}
+
+// WithoutTimestamps sets the export stream to not include timestamps.
+func WithoutTimestamps() Option {
+ return timestampsOption(false)
+}
+
+type timestampsOption bool
+
+func (o timestampsOption) apply(cfg config) config {
+ cfg.Timestamps = bool(o)
+ return cfg
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/doc.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/doc.go
new file mode 100644
index 000000000..d400ab8c5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/doc.go
@@ -0,0 +1,12 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package stdoutlog provides an exporter for OpenTelemetry log
+// telemetry.
+//
+// The exporter is intended to be used for testing and debugging, it is not
+// meant for production use. Additionally, it does not provide an interchange
+// format for OpenTelemetry that is supported with any stability or
+// compatibility guarantees. If these are needed features, please use the OTLP
+// exporter instead.
+package stdoutlog // import "go.opentelemetry.io/otel/exporters/stdout/stdoutlog"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/exporter.go
new file mode 100644
index 000000000..e2bf9bfa2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/exporter.go
@@ -0,0 +1,72 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package stdoutlog // import "go.opentelemetry.io/otel/exporters/stdout/stdoutlog"
+
+import (
+ "context"
+ "encoding/json"
+ "sync/atomic"
+
+ "go.opentelemetry.io/otel/sdk/log"
+)
+
+var _ log.Exporter = &Exporter{}
+
+// Exporter writes JSON-encoded log records to an [io.Writer] ([os.Stdout] by default).
+// Exporter must be created with [New].
+type Exporter struct {
+ encoder atomic.Pointer[json.Encoder]
+ timestamps bool
+}
+
+// New creates an [Exporter].
+func New(options ...Option) (*Exporter, error) {
+ cfg := newConfig(options)
+
+ enc := json.NewEncoder(cfg.Writer)
+ if cfg.PrettyPrint {
+ enc.SetIndent("", "\t")
+ }
+
+ e := Exporter{
+ timestamps: cfg.Timestamps,
+ }
+ e.encoder.Store(enc)
+
+ return &e, nil
+}
+
+// Export exports log records to writer.
+func (e *Exporter) Export(ctx context.Context, records []log.Record) error {
+ enc := e.encoder.Load()
+ if enc == nil {
+ return nil
+ }
+
+ for _, record := range records {
+ // Honor context cancellation.
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+
+ // Encode record, one by one.
+ recordJSON := e.newRecordJSON(record)
+ if err := enc.Encode(recordJSON); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Shutdown shuts down the Exporter.
+// Calls to Export will perform no operation after this is called.
+func (e *Exporter) Shutdown(context.Context) error {
+ e.encoder.Store(nil)
+ return nil
+}
+
+// ForceFlush performs no action.
+func (e *Exporter) ForceFlush(context.Context) error {
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/record.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/record.go
new file mode 100644
index 000000000..43aba8a5c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/record.go
@@ -0,0 +1,132 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package stdoutlog // import "go.opentelemetry.io/otel/exporters/stdout/stdoutlog"
+
+import (
+ "encoding/json"
+ "errors"
+ "time"
+
+ "go.opentelemetry.io/otel/log"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ sdklog "go.opentelemetry.io/otel/sdk/log"
+ "go.opentelemetry.io/otel/sdk/resource"
+ "go.opentelemetry.io/otel/trace"
+)
+
+func newValue(v log.Value) value {
+ return value{Value: v}
+}
+
+type value struct {
+ log.Value
+}
+
+// MarshalJSON implements a custom marshal function to encode log.Value.
+func (v value) MarshalJSON() ([]byte, error) {
+ var jsonVal struct {
+ Type string
+ Value interface{}
+ }
+ jsonVal.Type = v.Kind().String()
+
+ switch v.Kind() {
+ case log.KindString:
+ jsonVal.Value = v.AsString()
+ case log.KindInt64:
+ jsonVal.Value = v.AsInt64()
+ case log.KindFloat64:
+ jsonVal.Value = v.AsFloat64()
+ case log.KindBool:
+ jsonVal.Value = v.AsBool()
+ case log.KindBytes:
+ jsonVal.Value = v.AsBytes()
+ case log.KindMap:
+ m := v.AsMap()
+ values := make([]keyValue, 0, len(m))
+ for _, kv := range m {
+ values = append(values, keyValue{
+ Key: kv.Key,
+ Value: newValue(kv.Value),
+ })
+ }
+
+ jsonVal.Value = values
+ case log.KindSlice:
+ s := v.AsSlice()
+ values := make([]value, 0, len(s))
+ for _, e := range s {
+ values = append(values, newValue(e))
+ }
+
+ jsonVal.Value = values
+ case log.KindEmpty:
+ jsonVal.Value = nil
+ default:
+ return nil, errors.New("invalid Kind")
+ }
+
+ return json.Marshal(jsonVal)
+}
+
+type keyValue struct {
+ Key string
+ Value value
+}
+
+// recordJSON is a JSON-serializable representation of a Record.
+type recordJSON struct {
+ Timestamp *time.Time `json:",omitempty"`
+ ObservedTimestamp *time.Time `json:",omitempty"`
+ EventName string `json:",omitempty"`
+ Severity log.Severity
+ SeverityText string
+ Body value
+ Attributes []keyValue
+ TraceID trace.TraceID
+ SpanID trace.SpanID
+ TraceFlags trace.TraceFlags
+ Resource *resource.Resource
+ Scope instrumentation.Scope
+ DroppedAttributes int
+}
+
+func (e *Exporter) newRecordJSON(r sdklog.Record) recordJSON {
+ res := r.Resource()
+ newRecord := recordJSON{
+ EventName: r.EventName(),
+ Severity: r.Severity(),
+ SeverityText: r.SeverityText(),
+ Body: newValue(r.Body()),
+
+ TraceID: r.TraceID(),
+ SpanID: r.SpanID(),
+ TraceFlags: r.TraceFlags(),
+
+ Attributes: make([]keyValue, 0, r.AttributesLen()),
+
+ Resource: &res,
+ Scope: r.InstrumentationScope(),
+
+ DroppedAttributes: r.DroppedAttributes(),
+ }
+
+ r.WalkAttributes(func(kv log.KeyValue) bool {
+ newRecord.Attributes = append(newRecord.Attributes, keyValue{
+ Key: kv.Key,
+ Value: newValue(kv.Value),
+ })
+ return true
+ })
+
+ if e.timestamps {
+ timestamp := r.Timestamp()
+ newRecord.Timestamp = &timestamp
+
+ observedTimestamp := r.ObservedTimestamp()
+ newRecord.ObservedTimestamp = &observedTimestamp
+ }
+
+ return newRecord
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/README.md b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/README.md
new file mode 100644
index 000000000..5d9191ca6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/README.md
@@ -0,0 +1,3 @@
+# STDOUT Metric Exporter
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/config.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/config.go
new file mode 100644
index 000000000..cb1421074
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/config.go
@@ -0,0 +1,132 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package stdoutmetric // import "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric"
+
+import (
+ "encoding/json"
+ "io"
+ "os"
+
+ "go.opentelemetry.io/otel/sdk/metric"
+)
+
+// config contains options for the exporter.
+type config struct {
+ prettyPrint bool
+ encoder *encoderHolder
+ temporalitySelector metric.TemporalitySelector
+ aggregationSelector metric.AggregationSelector
+ redactTimestamps bool
+}
+
+// newConfig creates a validated config configured with options.
+func newConfig(options ...Option) config {
+ cfg := config{}
+ for _, opt := range options {
+ cfg = opt.apply(cfg)
+ }
+
+ if cfg.encoder == nil {
+ enc := json.NewEncoder(os.Stdout)
+ cfg.encoder = &encoderHolder{encoder: enc}
+ }
+
+ if cfg.prettyPrint {
+ if e, ok := cfg.encoder.encoder.(*json.Encoder); ok {
+ e.SetIndent("", "\t")
+ }
+ }
+
+ if cfg.temporalitySelector == nil {
+ cfg.temporalitySelector = metric.DefaultTemporalitySelector
+ }
+
+ if cfg.aggregationSelector == nil {
+ cfg.aggregationSelector = metric.DefaultAggregationSelector
+ }
+
+ return cfg
+}
+
+// Option sets exporter option values.
+type Option interface {
+ apply(config) config
+}
+
+type optionFunc func(config) config
+
+func (o optionFunc) apply(c config) config {
+ return o(c)
+}
+
+// WithEncoder sets the exporter to use encoder to encode all the metric
+// data-types to an output.
+func WithEncoder(encoder Encoder) Option {
+ return optionFunc(func(c config) config {
+ if encoder != nil {
+ c.encoder = &encoderHolder{encoder: encoder}
+ }
+ return c
+ })
+}
+
+// WithWriter sets the export stream destination.
+// Using this option overrides any previously set encoder.
+func WithWriter(w io.Writer) Option {
+ return WithEncoder(json.NewEncoder(w))
+}
+
+// WithPrettyPrint prettifies the emitted output.
+// This option only works if the encoder is a *json.Encoder, as is the case
+// when using `WithWriter`.
+func WithPrettyPrint() Option {
+ return optionFunc(func(c config) config {
+ c.prettyPrint = true
+ return c
+ })
+}
+
+// WithTemporalitySelector sets the TemporalitySelector the exporter will use
+// to determine the Temporality of an instrument based on its kind. If this
+// option is not used, the exporter will use the DefaultTemporalitySelector
+// from the go.opentelemetry.io/otel/sdk/metric package.
+func WithTemporalitySelector(selector metric.TemporalitySelector) Option {
+ return temporalitySelectorOption{selector: selector}
+}
+
+type temporalitySelectorOption struct {
+ selector metric.TemporalitySelector
+}
+
+func (t temporalitySelectorOption) apply(c config) config {
+ c.temporalitySelector = t.selector
+ return c
+}
+
+// WithAggregationSelector sets the AggregationSelector the exporter will use
+// to determine the aggregation to use for an instrument based on its kind. If
+// this option is not used, the exporter will use the
+// DefaultAggregationSelector from the go.opentelemetry.io/otel/sdk/metric
+// package or the aggregation explicitly passed for a view matching an
+// instrument.
+func WithAggregationSelector(selector metric.AggregationSelector) Option {
+ return aggregationSelectorOption{selector: selector}
+}
+
+type aggregationSelectorOption struct {
+ selector metric.AggregationSelector
+}
+
+func (t aggregationSelectorOption) apply(c config) config {
+ c.aggregationSelector = t.selector
+ return c
+}
+
+// WithoutTimestamps sets all timestamps to zero in the output stream.
+func WithoutTimestamps() Option {
+ return optionFunc(func(c config) config {
+ c.redactTimestamps = true
+ return c
+ })
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/doc.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/doc.go
new file mode 100644
index 000000000..c0388034b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/doc.go
@@ -0,0 +1,12 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package stdoutmetric provides an exporter for OpenTelemetry metric
+// telemetry.
+//
+// The exporter is intended to be used for testing and debugging, it is not
+// meant for production use. Additionally, it does not provide an interchange
+// format for OpenTelemetry that is supported with any stability or
+// compatibility guarantees. If these are needed features, please use the OTLP
+// exporter instead.
+package stdoutmetric // import "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/encoder.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/encoder.go
new file mode 100644
index 000000000..e6ada7b0d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/encoder.go
@@ -0,0 +1,31 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package stdoutmetric // import "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric"
+
+import (
+ "errors"
+)
+
+// Encoder encodes and outputs OpenTelemetry metric data-types as human
+// readable text.
+type Encoder interface {
+ // Encode handles the encoding and writing of OpenTelemetry metric data.
+ Encode(v any) error
+}
+
+// encoderHolder is the concrete type used to wrap an Encoder so it can be
+// used as a atomic.Value type.
+type encoderHolder struct {
+ encoder Encoder
+}
+
+func (e encoderHolder) Encode(v any) error { return e.encoder.Encode(v) }
+
+// shutdownEncoder is used when the exporter is shutdown. It always returns
+// errShutdown when Encode is called.
+type shutdownEncoder struct{}
+
+var errShutdown = errors.New("exporter shutdown")
+
+func (shutdownEncoder) Encode(any) error { return errShutdown }
diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/exporter.go
new file mode 100644
index 000000000..fc155d79f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/exporter.go
@@ -0,0 +1,159 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package stdoutmetric // import "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+ "sync/atomic"
+
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/metric"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// exporter is an OpenTelemetry metric exporter.
+type exporter struct {
+ encVal atomic.Value // encoderHolder
+
+ shutdownOnce sync.Once
+
+ temporalitySelector metric.TemporalitySelector
+ aggregationSelector metric.AggregationSelector
+
+ redactTimestamps bool
+}
+
+// New returns a configured metric exporter.
+//
+// If no options are passed, the default exporter returned will use a JSON
+// encoder with tab indentations that output to STDOUT.
+func New(options ...Option) (metric.Exporter, error) {
+ cfg := newConfig(options...)
+ exp := &exporter{
+ temporalitySelector: cfg.temporalitySelector,
+ aggregationSelector: cfg.aggregationSelector,
+ redactTimestamps: cfg.redactTimestamps,
+ }
+ exp.encVal.Store(*cfg.encoder)
+ return exp, nil
+}
+
+func (e *exporter) Temporality(k metric.InstrumentKind) metricdata.Temporality {
+ return e.temporalitySelector(k)
+}
+
+func (e *exporter) Aggregation(k metric.InstrumentKind) metric.Aggregation {
+ return e.aggregationSelector(k)
+}
+
+func (e *exporter) Export(ctx context.Context, data *metricdata.ResourceMetrics) error {
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+ if e.redactTimestamps {
+ redactTimestamps(data)
+ }
+
+ global.Debug("STDOUT exporter export", "Data", data)
+
+ return e.encVal.Load().(encoderHolder).Encode(data)
+}
+
+func (e *exporter) ForceFlush(context.Context) error {
+ // exporter holds no state, nothing to flush.
+ return nil
+}
+
+func (e *exporter) Shutdown(context.Context) error {
+ e.shutdownOnce.Do(func() {
+ e.encVal.Store(encoderHolder{
+ encoder: shutdownEncoder{},
+ })
+ })
+ return nil
+}
+
+func (e *exporter) MarshalLog() interface{} {
+ return struct{ Type string }{Type: "STDOUT"}
+}
+
+func redactTimestamps(orig *metricdata.ResourceMetrics) {
+ for i, sm := range orig.ScopeMetrics {
+ metrics := sm.Metrics
+ for j, m := range metrics {
+ data := m.Data
+ orig.ScopeMetrics[i].Metrics[j].Data = redactAggregationTimestamps(data)
+ }
+ }
+}
+
+var errUnknownAggType = errors.New("unknown aggregation type")
+
+func redactAggregationTimestamps(orig metricdata.Aggregation) metricdata.Aggregation {
+ switch a := orig.(type) {
+ case metricdata.Sum[float64]:
+ return metricdata.Sum[float64]{
+ Temporality: a.Temporality,
+ DataPoints: redactDataPointTimestamps(a.DataPoints),
+ IsMonotonic: a.IsMonotonic,
+ }
+ case metricdata.Sum[int64]:
+ return metricdata.Sum[int64]{
+ Temporality: a.Temporality,
+ DataPoints: redactDataPointTimestamps(a.DataPoints),
+ IsMonotonic: a.IsMonotonic,
+ }
+ case metricdata.Gauge[float64]:
+ return metricdata.Gauge[float64]{
+ DataPoints: redactDataPointTimestamps(a.DataPoints),
+ }
+ case metricdata.Gauge[int64]:
+ return metricdata.Gauge[int64]{
+ DataPoints: redactDataPointTimestamps(a.DataPoints),
+ }
+ case metricdata.Histogram[int64]:
+ return metricdata.Histogram[int64]{
+ Temporality: a.Temporality,
+ DataPoints: redactHistogramTimestamps(a.DataPoints),
+ }
+ case metricdata.Histogram[float64]:
+ return metricdata.Histogram[float64]{
+ Temporality: a.Temporality,
+ DataPoints: redactHistogramTimestamps(a.DataPoints),
+ }
+ default:
+ global.Error(errUnknownAggType, fmt.Sprintf("%T", a))
+ return orig
+ }
+}
+
+func redactHistogramTimestamps[T int64 | float64](hdp []metricdata.HistogramDataPoint[T]) []metricdata.HistogramDataPoint[T] {
+ out := make([]metricdata.HistogramDataPoint[T], len(hdp))
+ for i, dp := range hdp {
+ out[i] = metricdata.HistogramDataPoint[T]{
+ Attributes: dp.Attributes,
+ Count: dp.Count,
+ Sum: dp.Sum,
+ Bounds: dp.Bounds,
+ BucketCounts: dp.BucketCounts,
+ Min: dp.Min,
+ Max: dp.Max,
+ }
+ }
+ return out
+}
+
+func redactDataPointTimestamps[T int64 | float64](sdp []metricdata.DataPoint[T]) []metricdata.DataPoint[T] {
+ out := make([]metricdata.DataPoint[T], len(sdp))
+ for i, dp := range sdp {
+ out[i] = metricdata.DataPoint[T]{
+ Attributes: dp.Attributes,
+ Value: dp.Value,
+ }
+ }
+ return out
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/README.md b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/README.md
new file mode 100644
index 000000000..f84dee7ee
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/README.md
@@ -0,0 +1,3 @@
+# STDOUT Trace Exporter
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/stdout/stdouttrace)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/stdout/stdouttrace)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/config.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/config.go
new file mode 100644
index 000000000..0ba3424e2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/config.go
@@ -0,0 +1,85 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package stdouttrace // import "go.opentelemetry.io/otel/exporters/stdout/stdouttrace"
+
+import (
+ "io"
+ "os"
+)
+
+var (
+ defaultWriter = os.Stdout
+ defaultPrettyPrint = false
+ defaultTimestamps = true
+)
+
+// config contains options for the STDOUT exporter.
+type config struct {
+ // Writer is the destination. If not set, os.Stdout is used.
+ Writer io.Writer
+
+ // PrettyPrint will encode the output into readable JSON. Default is
+ // false.
+ PrettyPrint bool
+
+ // Timestamps specifies if timestamps should be printed. Default is
+ // true.
+ Timestamps bool
+}
+
+// newConfig creates a validated Config configured with options.
+func newConfig(options ...Option) config {
+ cfg := config{
+ Writer: defaultWriter,
+ PrettyPrint: defaultPrettyPrint,
+ Timestamps: defaultTimestamps,
+ }
+ for _, opt := range options {
+ cfg = opt.apply(cfg)
+ }
+ return cfg
+}
+
+// Option sets the value of an option for a Config.
+type Option interface {
+ apply(config) config
+}
+
+// WithWriter sets the export stream destination.
+func WithWriter(w io.Writer) Option {
+ return writerOption{w}
+}
+
+type writerOption struct {
+ W io.Writer
+}
+
+func (o writerOption) apply(cfg config) config {
+ cfg.Writer = o.W
+ return cfg
+}
+
+// WithPrettyPrint prettifies the emitted output.
+func WithPrettyPrint() Option {
+ return prettyPrintOption(true)
+}
+
+type prettyPrintOption bool
+
+func (o prettyPrintOption) apply(cfg config) config {
+ cfg.PrettyPrint = bool(o)
+ return cfg
+}
+
+// WithoutTimestamps sets the export stream to not include timestamps.
+func WithoutTimestamps() Option {
+ return timestampsOption(false)
+}
+
+type timestampsOption bool
+
+func (o timestampsOption) apply(cfg config) config {
+ cfg.Timestamps = bool(o)
+ return cfg
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/doc.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/doc.go
new file mode 100644
index 000000000..eff7730cd
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/doc.go
@@ -0,0 +1,6 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package stdouttrace contains an OpenTelemetry exporter for tracing
+// telemetry to be written to an output destination as JSON.
+package stdouttrace // import "go.opentelemetry.io/otel/exporters/stdout/stdouttrace"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/trace.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/trace.go
new file mode 100644
index 000000000..bdb915ba8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/trace.go
@@ -0,0 +1,103 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package stdouttrace // import "go.opentelemetry.io/otel/exporters/stdout/stdouttrace"
+
+import (
+ "context"
+ "encoding/json"
+ "sync"
+ "time"
+
+ "go.opentelemetry.io/otel/sdk/trace"
+ "go.opentelemetry.io/otel/sdk/trace/tracetest"
+)
+
+var zeroTime time.Time
+
+var _ trace.SpanExporter = &Exporter{}
+
+// New creates an Exporter with the passed options.
+func New(options ...Option) (*Exporter, error) {
+ cfg := newConfig(options...)
+
+ enc := json.NewEncoder(cfg.Writer)
+ if cfg.PrettyPrint {
+ enc.SetIndent("", "\t")
+ }
+
+ return &Exporter{
+ encoder: enc,
+ timestamps: cfg.Timestamps,
+ }, nil
+}
+
+// Exporter is an implementation of trace.SpanSyncer that writes spans to stdout.
+type Exporter struct {
+ encoder *json.Encoder
+ encoderMu sync.Mutex
+ timestamps bool
+
+ stoppedMu sync.RWMutex
+ stopped bool
+}
+
+// ExportSpans writes spans in json format to stdout.
+func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error {
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+ e.stoppedMu.RLock()
+ stopped := e.stopped
+ e.stoppedMu.RUnlock()
+ if stopped {
+ return nil
+ }
+
+ if len(spans) == 0 {
+ return nil
+ }
+
+ stubs := tracetest.SpanStubsFromReadOnlySpans(spans)
+
+ e.encoderMu.Lock()
+ defer e.encoderMu.Unlock()
+ for i := range stubs {
+ stub := &stubs[i]
+ // Remove timestamps
+ if !e.timestamps {
+ stub.StartTime = zeroTime
+ stub.EndTime = zeroTime
+ for j := range stub.Events {
+ ev := &stub.Events[j]
+ ev.Time = zeroTime
+ }
+ }
+
+ // Encode span stubs, one by one
+ if err := e.encoder.Encode(stub); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Shutdown is called to stop the exporter, it performs no action.
+func (e *Exporter) Shutdown(ctx context.Context) error {
+ e.stoppedMu.Lock()
+ e.stopped = true
+ e.stoppedMu.Unlock()
+
+ return nil
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Exporter.
+func (e *Exporter) MarshalLog() interface{} {
+ return struct {
+ Type string
+ WithTimestamps bool
+ }{
+ Type: "stdout",
+ WithTimestamps: e.timestamps,
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/log/DESIGN.md b/vendor/go.opentelemetry.io/otel/log/DESIGN.md
new file mode 100644
index 000000000..47d39d34b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/log/DESIGN.md
@@ -0,0 +1,634 @@
+# Logs API
+
+## Abstract
+
+`go.opentelemetry.io/otel/log` provides
+[Logs API](https://opentelemetry.io/docs/specs/otel/logs/api/).
+
+The prototype was created in
+[#4725](https://github.com/open-telemetry/opentelemetry-go/pull/4725).
+
+## Background
+
+The key challenge is to create a performant API compliant with the [specification](https://opentelemetry.io/docs/specs/otel/logs/api/)
+with an intuitive and user friendly design.
+Performance is seen as one of the most important characteristics of logging libraries in Go.
+
+## Design
+
+This proposed design aims to:
+
+- be specification compliant,
+- be similar to Trace and Metrics API,
+- take advantage of both OpenTelemetry and `slog` experience to achieve acceptable performance.
+
+### Module structure
+
+The API is published as a single `go.opentelemetry.io/otel/log` Go module.
+
+The package structure is similar to Trace API and Metrics API.
+The Go module consists of the following packages:
+
+- `go.opentelemetry.io/otel/log`
+- `go.opentelemetry.io/otel/log/embedded`
+- `go.opentelemetry.io/otel/log/logtest`
+- `go.opentelemetry.io/otel/log/noop`
+
+Rejected alternative:
+
+- [Reuse slog](#reuse-slog)
+
+### LoggerProvider
+
+The [`LoggerProvider` abstraction](https://opentelemetry.io/docs/specs/otel/logs/api/#loggerprovider)
+is defined as `LoggerProvider` interface in [provider.go](provider.go).
+
+The specification may add new operations to `LoggerProvider`.
+The interface may have methods added without a package major version bump.
+This embeds `embedded.LoggerProvider` to help inform an API implementation
+author about this non-standard API evolution.
+This approach is already used in Trace API and Metrics API.
+
+#### LoggerProvider.Logger
+
+The `Logger` method implements the [`Get a Logger` operation](https://opentelemetry.io/docs/specs/otel/logs/api/#get-a-logger).
+
+The required `name` parameter is accepted as a `string` method argument.
+
+The `LoggerOption` options are defined to support optional parameters.
+
+Implementation requirements:
+
+- The [specification requires](https://opentelemetry.io/docs/specs/otel/logs/api/#concurrency-requirements)
+ the method to be safe to be called concurrently.
+
+- The method should use some default name if the passed name is empty
+ in order to meet the [specification's SDK requirement](https://opentelemetry.io/docs/specs/otel/logs/sdk/#logger-creation)
+ to return a working logger when an invalid name is passed
+ as well as to resemble the behavior of getting tracers and meters.
+
+`Logger` can be extended by adding new `LoggerOption` options
+and adding new exported fields to the `LoggerConfig` struct.
+This design is already used in Trace API for getting tracers
+and in Metrics API for getting meters.
+
+Rejected alternative:
+
+- [Passing struct as parameter to LoggerProvider.Logger](#passing-struct-as-parameter-to-loggerproviderlogger).
+
+### Logger
+
+The [`Logger` abstraction](https://opentelemetry.io/docs/specs/otel/logs/api/#logger)
+is defined as `Logger` interface in [logger.go](logger.go).
+
+The specification may add new operations to `Logger`.
+The interface may have methods added without a package major version bump.
+This embeds `embedded.Logger` to help inform an API implementation
+author about this non-standard API evolution.
+This approach is already used in Trace API and Metrics API.
+
+### Logger.Emit
+
+The `Emit` method implements the [`Emit a LogRecord` operation](https://opentelemetry.io/docs/specs/otel/logs/api/#emit-a-logrecord).
+
+[`Context` associated with the `LogRecord`](https://opentelemetry.io/docs/specs/otel/context/)
+is accepted as a `context.Context` method argument.
+
+Calls to `Emit` are supposed to be on the hot path.
+Therefore, in order to reduce the number of heap allocations,
+the [`LogRecord` abstraction](https://opentelemetry.io/docs/specs/otel/logs/api/#emit-a-logrecord),
+is defined as `Record` struct in [record.go](record.go).
+
+[`Timestamp`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-timestamp)
+is accessed using following methods:
+
+```go
+func (r *Record) Timestamp() time.Time
+func (r *Record) SetTimestamp(t time.Time)
+```
+
+[`ObservedTimestamp`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-observedtimestamp)
+is accessed using following methods:
+
+```go
+func (r *Record) ObservedTimestamp() time.Time
+func (r *Record) SetObservedTimestamp(t time.Time)
+```
+
+[`EventName`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-eventname)
+is accessed using following methods:
+
+```go
+func (r *Record) EventName() string
+func (r *Record) SetEventName(s string)
+```
+
+[`SeverityNumber`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-severitynumber)
+is accessed using following methods:
+
+```go
+func (r *Record) Severity() Severity
+func (r *Record) SetSeverity(s Severity)
+```
+
+`Severity` type is defined in [severity.go](severity.go).
+The constants are are based on
+[Displaying Severity recommendation](https://opentelemetry.io/docs/specs/otel/logs/data-model/#displaying-severity).
+Additionally, `Severity[Level]` constants are defined to make the API more readable and user friendly.
+
+[`SeverityText`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-severitytext)
+is accessed using following methods:
+
+```go
+func (r *Record) SeverityText() string
+func (r *Record) SetSeverityText(s string)
+```
+
+[`Body`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-body)
+is accessed using following methods:
+
+```go
+func (r *Record) Body() Value
+func (r *Record) SetBody(v Value)
+```
+
+[Log record attributes](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-attributes)
+are accessed using following methods:
+
+```go
+func (r *Record) WalkAttributes(f func(KeyValue) bool)
+func (r *Record) AddAttributes(attrs ...KeyValue)
+```
+
+`Record` has a `AttributesLen` method that returns
+the number of attributes to allow slice preallocation
+when converting records to a different representation:
+
+```go
+func (r *Record) AttributesLen() int
+```
+
+The records attributes design and implementation is based on
+[`slog.Record`](https://pkg.go.dev/log/slog#Record).
+It allows achieving high-performance access and manipulation of the attributes
+while keeping the API user friendly.
+It relieves the user from making his own improvements
+for reducing the number of allocations when passing attributes.
+
+The abstractions described in
+[the specification](https://opentelemetry.io/docs/specs/otel/logs/#new-first-party-application-logs)
+are defined in [keyvalue.go](keyvalue.go).
+
+`Value` is representing `any`.
+`KeyValue` is representing a key(string)-value(`any`) pair.
+
+`Kind` is an enumeration used for specifying the underlying value type.
+`KindEmpty` is used for an empty (zero) value.
+`KindBool` is used for boolean value.
+`KindFloat64` is used for a double precision floating point (IEEE 754-1985) value.
+`KindInt64` is used for a signed integer value.
+`KindString` is used for a string value.
+`KindBytes` is used for a slice of bytes (in spec: A byte array).
+`KindSlice` is used for a slice of values (in spec: an array (a list) of any values).
+`KindMap` is used for a slice of key-value pairs (in spec: `map<string, any>`).
+
+These types are defined in `go.opentelemetry.io/otel/log` package
+as they are tightly coupled with the API and different from common attributes.
+
+The internal implementation of `Value` is based on
+[`slog.Value`](https://pkg.go.dev/log/slog#Value)
+and the API is mostly inspired by
+[`attribute.Value`](https://pkg.go.dev/go.opentelemetry.io/otel/attribute#Value).
+The benchmarks[^1] show that the implementation is more performant than
+[`attribute.Value`](https://pkg.go.dev/go.opentelemetry.io/otel/attribute#Value).
+
+The value accessors (`func (v Value) As[Kind]` methods) must not panic,
+as it would violate the [specification](https://opentelemetry.io/docs/specs/otel/error-handling/):
+
+> API methods MUST NOT throw unhandled exceptions when used incorrectly by end
+> users. The API and SDK SHOULD provide safe defaults for missing or invalid
+> arguments. [...] Whenever the library suppresses an error that would otherwise
+> have been exposed to the user, the library SHOULD log the error using
+> language-specific conventions.
+
+Therefore, the value accessors should return a zero value
+and log an error when a bad accessor is called.
+
+The `Severity`, `Kind`, `Value`, `KeyValue` may implement
+the [`fmt.Stringer`](https://pkg.go.dev/fmt#Stringer) interface.
+However, it is not needed for the first stable release
+and the `String` methods can be added later.
+
+The caller must not subsequently mutate the record passed to `Emit`.
+This would allow the implementation to not clone the record,
+but simply retain, modify or discard it.
+The implementation may still choose to clone the record or copy its attributes
+if it needs to retain or modify it,
+e.g. in case of asynchronous processing to eliminate the possibility of data races,
+because the user can technically reuse the record and add new attributes
+after the call (even when the documentation says that the caller must not do it).
+
+Implementation requirements:
+
+- The [specification requires](https://opentelemetry.io/docs/specs/otel/logs/api/#concurrency-requirements)
+ the method to be safe to be called concurrently.
+
+- The method must not interrupt the record processing if the context is canceled
+ per ["ignoring context cancellation" guideline](../CONTRIBUTING.md#ignoring-context-cancellation).
+
+- The [specification requires](https://opentelemetry.io/docs/specs/otel/logs/api/#emit-a-logrecord)
+ use the current time as observed timestamp if the passed is empty.
+
+- The method should handle the trace context passed via `ctx` argument in order to meet the
+ [specification's SDK requirement](https://opentelemetry.io/docs/specs/otel/logs/sdk/#readablelogrecord)
+ to populate the trace context fields from the resolved context.
+
+`Emit` can be extended by adding new exported fields to the `Record` struct.
+
+Rejected alternatives:
+
+- [Record as interface](#record-as-interface)
+- [Options as parameter to Logger.Emit](#options-as-parameter-to-loggeremit)
+- [Passing record as pointer to Logger.Emit](#passing-record-as-pointer-to-loggeremit)
+- [Logger.WithAttributes](#loggerwithattributes)
+- [Record attributes as slice](#record-attributes-as-slice)
+- [Use any instead of defining Value](#use-any-instead-of-defining-value)
+- [Severity type encapsulating number and text](#severity-type-encapsulating-number-and-text)
+- [Reuse attribute package](#reuse-attribute-package)
+- [Mix receiver types for Record](#mix-receiver-types-for-record)
+- [Add XYZ method to Logger](#add-xyz-method-to-logger)
+- [Rename KeyValue to Attr](#rename-keyvalue-to-attr)
+
+### Logger.Enabled
+
+The `Enabled` method implements the [`Enabled` operation](https://opentelemetry.io/docs/specs/otel/logs/api/#enabled).
+
+[`Context` associated with the `LogRecord`](https://opentelemetry.io/docs/specs/otel/context/)
+is accepted as a `context.Context` method argument.
+
+Calls to `Enabled` are supposed to be on the hot path and the list of arguments
+can be extendend in future. Therefore, in order to reduce the number of heap
+allocations and make it possible to handle new arguments, `Enabled` accepts
+a `EnabledParameters` struct, defined in [logger.go](logger.go), as the second
+method argument.
+
+The `EnabledParameters` uses fields, instead of getters and setters, to allow
+simpler usage which allows configuring the `EnabledParameters` in the same line
+where `Enabled` is called.
+
+### noop package
+
+The `go.opentelemetry.io/otel/log/noop` package provides
+[Logs API No-Op Implementation](https://opentelemetry.io/docs/specs/otel/logs/noop/).
+
+### Trace context correlation
+
+The bridge implementation should do its best to pass
+the `ctx` containing the trace context from the caller
+so it can later be passed via `Logger.Emit`.
+
+It is not expected that users (caller or bridge implementation) reconstruct
+a `context.Context`. Reconstructing a `context.Context` with
+[`trace.ContextWithSpanContext`](https://pkg.go.dev/go.opentelemetry.io/otel/trace#ContextWithSpanContext)
+and [`trace.NewSpanContext`](https://pkg.go.dev/go.opentelemetry.io/otel/trace#NewSpanContext)
+would usually involve more memory allocations.
+
+The logging libraries which have recording methods that accepts `context.Context`,
+such us [`slog`](https://pkg.go.dev/log/slog),
+[`logrus`](https://pkg.go.dev/github.com/sirupsen/logrus),
+[`zerolog`](https://pkg.go.dev/github.com/rs/zerolog),
+makes passing the trace context trivial.
+
+However, some libraries do not accept a `context.Context` in their recording methods.
+Structured logging libraries,
+such as [`logr`](https://pkg.go.dev/github.com/go-logr/logr)
+and [`zap`](https://pkg.go.dev/go.uber.org/zap),
+offer passing `any` type as a log attribute/field.
+Therefore, their bridge implementations can define a "special" log attributes/field
+that will be used to capture the trace context.
+
+[The prototype](https://github.com/open-telemetry/opentelemetry-go/pull/4725)
+has bridge implementations that handle trace context correlation efficiently.
+
+## Benchmarking
+
+The benchmarks take inspiration from [`slog`](https://pkg.go.dev/log/slog),
+because for the Go team it was also critical to create API that would be fast
+and interoperable with existing logging packages.[^2][^3]
+
+The benchmark results can be found in [the prototype](https://github.com/open-telemetry/opentelemetry-go/pull/4725).
+
+## Rejected alternatives
+
+### Reuse slog
+
+The API must not be coupled to [`slog`](https://pkg.go.dev/log/slog),
+nor any other logging library.
+
+The API needs to evolve orthogonally to `slog`.
+
+`slog` is not compliant with the [Logs API](https://opentelemetry.io/docs/specs/otel/logs/api/).
+and we cannot expect the Go team to make `slog` compliant with it.
+
+The interoperability can be achieved using [a log bridge](https://opentelemetry.io/docs/specs/otel/glossary/#log-appender--bridge).
+
+You can read more about OpenTelemetry Logs design on [opentelemetry.io](https://opentelemetry.io/docs/concepts/signals/logs/).
+
+### Record as interface
+
+`Record` is defined as a `struct` because of the following reasons.
+
+Log record is a value object without any behavior.
+It is used as data input for Logger methods.
+
+The log record resembles the instrument config structs like [metric.Float64CounterConfig](https://pkg.go.dev/go.opentelemetry.io/otel/metric#Float64CounterConfig).
+
+Using `struct` instead of `interface` improves the performance as e.g.
+indirect calls are less optimized,
+usage of interfaces tend to increase heap allocations.[^3]
+
+### Options as parameter to Logger.Emit
+
+One of the initial ideas was to have:
+
+```go
+type Logger interface{
+ embedded.Logger
+ Emit(ctx context.Context, options ...RecordOption)
+}
+```
+
+The main reason was that design would be similar
+to the [Meter API](https://pkg.go.dev/go.opentelemetry.io/otel/metric#Meter)
+for creating instruments.
+
+However, passing `Record` directly, instead of using options,
+is more performant as it reduces heap allocations.[^4]
+
+Another advantage of passing `Record` is that API would not have functions like `NewRecord(options...)`,
+which would be used by the SDK and not by the users.
+
+Finally, the definition would be similar to [`slog.Handler.Handle`](https://pkg.go.dev/log/slog#Handler)
+that was designed to provide optimization opportunities.[^2]
+
+### Passing record as pointer to Logger.Emit
+
+So far the benchmarks do not show differences that would
+favor passing the record via pointer (and vice versa).
+
+Passing via value feels safer because of the following reasons.
+
+The user would not be able to pass `nil`.
+Therefore, it reduces the possibility to have a nil pointer dereference.
+
+It should reduce the possibility of a heap allocation.
+
+It follows the design of [`slog.Handler`](https://pkg.go.dev/log/slog#Handler).
+
+If follows one of Google's Go Style Decisions
+to prefer [passing values](https://google.github.io/styleguide/go/decisions#pass-values).
+
+### Passing struct as parameter to LoggerProvider.Logger
+
+Similarly to `Logger.Emit`, we could have something like:
+
+```go
+type LoggerProvider interface{
+ embedded.LoggerProvider
+ Logger(name string, config LoggerConfig)
+}
+```
+
+The drawback of this idea would be that this would be
+a different design from Trace and Metrics API.
+
+The performance of acquiring a logger is not as critical
+as the performance of emitting a log record. While a single
+HTTP/RPC handler could write hundreds of logs, it should not
+create a new logger for each log entry.
+The bridge implementation should reuse loggers whenever possible.
+
+### Logger.WithAttributes
+
+We could add `WithAttributes` to the `Logger` interface.
+Then `Record` could be a simple struct with only exported fields.
+The idea was that the SDK would implement the performance improvements
+instead of doing it in the API.
+This would allow having different optimization strategies.
+
+During the analysis[^5], it occurred that the main problem of this proposal
+is that the variadic slice passed to an interface method is always heap allocated.
+
+Moreover, the logger returned by `WithAttribute` was allocated on the heap.
+
+Lastly, the proposal was not specification compliant.
+
+### Record attributes as slice
+
+One of the proposals[^6] was to have `Record` as a simple struct:
+
+```go
+type Record struct {
+ Timestamp time.Time
+ ObservedTimestamp time.Time
+ EventName string
+ Severity Severity
+ SeverityText string
+ Body Value
+ Attributes []KeyValue
+}
+```
+
+The bridge implementations could use [`sync.Pool`](https://pkg.go.dev/sync#Pool)
+for reducing the number of allocations when passing attributes.
+
+The benchmarks results were better.
+
+In such a design, most bridges would have a `sync.Pool`
+to reduce the number of heap allocations.
+However, the `sync.Pool` will not work correctly with API implementations
+that would take ownership of the record
+(e.g. implementations that do not copy records for asynchronous processing).
+The current design, even in case of improper API implementation,
+has lower chances of encountering a bug as most bridges would
+create a record, pass it, and forget about it.
+
+For reference, here is the reason why `slog` does not use `sync.Pool`[^3]
+as well:
+
+> We can use a sync pool for records though we decided not to.
+You can but it's a bad idea for us. Why?
+Because users have control of Records.
+Handler writers can get their hands on a record
+and we'd have to ask them to free it
+or try to free it magically at some some point.
+But either way, they could get themselves in trouble by freeing it twice
+or holding on to one after they free it.
+That's a use after free bug and that's why `zerolog` was problematic for us.
+`zerolog` as as part of its speed exposes a pool allocated value to users
+if you use `zerolog` the normal way, that you'll see in all the examples,
+you will never encounter a problem.
+But if you do something a little out of the ordinary you can get
+use after free bugs and we just didn't want to put that in the standard library.
+
+Therefore, we decided to not follow the proposal as it is
+less user friendly (users and bridges would use e.g. a `sync.Pool` to reduce
+the number of heap allocation), less safe (more prone to use after free bugs
+and race conditions), and the benchmark differences were not significant.
+
+### Use any instead of defining Value
+
+[Logs Data Model](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-body)
+defines Body to be `any`.
+One could propose to define `Body` (and attribute values) as `any`
+instead of a defining a new type (`Value`).
+
+First of all, [`any` type defined in the specification](https://opentelemetry.io/docs/specs/otel/logs/data-model/#type-any)
+is not the same as `any` (`interface{}`) in Go.
+
+Moreover, using `any` as a field would decrease the performance.[^7]
+
+Notice it will be still possible to add following kind and factories
+in a backwards compatible way:
+
+```go
+const KindMap Kind
+
+func AnyValue(value any) KeyValue
+
+func Any(key string, value any) KeyValue
+```
+
+However, currently, it would not be specification compliant.
+
+### Severity type encapsulating number and text
+
+We could combine severity into a single field defining a type:
+
+```go
+type Severity struct {
+ Number SeverityNumber
+ Text string
+}
+```
+
+However, the [Logs Data Model](https://opentelemetry.io/docs/specs/otel/logs/data-model/#log-and-event-record-definition)
+define it as independent fields.
+It should be more user friendly to have them separated.
+Especially when having getter and setter methods, setting one value
+when the other is already set would be unpleasant.
+
+### Reuse attribute package
+
+It was tempting to reuse the existing
+[https://pkg.go.dev/go.opentelemetry.io/otel/attribute] package
+for defining log attributes and body.
+
+However, this would be wrong because [the log attribute definition](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-attributes)
+is different from [the common attribute definition](https://opentelemetry.io/docs/specs/otel/common/#attribute).
+
+Moreover, it there is nothing telling that [the body definition](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-body)
+has anything in common with a common attribute value.
+
+Therefore, we define new types representing the abstract types defined
+in the [Logs Data Model](https://opentelemetry.io/docs/specs/otel/logs/data-model/#definitions-used-in-this-document).
+
+### Mix receiver types for Record
+
+Methods of [`slog.Record`](https://pkg.go.dev/log/slog#Record)
+have different receiver types.
+
+In `log/slog` GitHub issue we can only find that the reason is:[^8]
+
+>> some receiver of Record struct is by value
+> Passing Records by value means they incur no heap allocation.
+> That improves performance overall, even though they are copied.
+
+However, the benchmarks do not show any noticeable differences.[^9]
+
+The compiler is smart-enough to not make a heap allocation for any of these methods.
+The use of a pointer receiver does not cause any heap allocation.
+From Go FAQ:[^10]
+
+> In the current compilers, if a variable has its address taken,
+> that variable is a candidate for allocation on the heap.
+> However, a basic escape analysis recognizes some cases
+> when such variables will not live past the return from the function
+> and can reside on the stack.
+
+The [Understanding Allocations: the Stack and the Heap](https://www.youtube.com/watch?v=ZMZpH4yT7M0)
+presentation by Jacob Walker describes the escape analysis with details.
+
+Moreover, also from Go FAQ:[^10]
+
+> Also, if a local variable is very large,
+> it might make more sense to store it on the heap rather than the stack.
+
+Therefore, even if we use a value receiver and the value is very large
+it may be heap allocated.
+
+Both [Go Code Review Comments](https://go.dev/wiki/CodeReviewComments#receiver-type)
+and [Google's Go Style Decisions](https://google.github.io/styleguide/go/decisions#receiver-type)
+highly recommend making the methods for a type either all pointer methods
+or all value methods. Google's Go Style Decisions even goes further and says:
+
+> There is a lot of misinformation about whether passing a value or a pointer
+> to a function can affect performance.
+> The compiler can choose to pass pointers to values on the stack
+> as well as copying values on the stack,
+> but these considerations should not outweigh the readability
+> and correctness of the code in most circumstances.
+> When the performance does matter, it is important to profile both approaches
+> with a realistic benchmark before deciding that one approach outperforms the other.
+
+Because, the benchmarks[^9] do not proof any performance difference
+and the general recommendation is to not mix receiver types,
+we decided to use pointer receivers for all `Record` methods.
+
+### Add XYZ method to Logger
+
+The `Logger` does not have methods like `SetSeverity`, etc.
+as the Logs API needs to follow (be compliant with)
+the [specification](https://opentelemetry.io/docs/specs/otel/logs/api/)
+
+### Rename KeyValue to Attr
+
+There was a proposal to rename `KeyValue` to `Attr` (or `Attribute`).[^11]
+New developers may not intuitively know that `log.KeyValue` is an attribute in
+the OpenTelemetry parlance.
+
+During the discussion we agreed to keep the `KeyValue` name.
+
+The type is used in multiple semantics:
+
+- as a log attribute,
+- as a map item,
+- as a log record Body.
+
+As for map item semantics, this type is a key-value pair, not an attribute.
+Naming the type as `Attr` would convey semantical meaning
+that would not be correct for a map.
+
+We expect that most of the Logs API users will be OpenTelemetry contributors.
+We plan to implement bridges for the most popular logging libraries ourselves.
+Given we will all have the context needed to disambiguate these overlapping
+names, developers' confusion should not be an issue.
+
+For bridges not developed by us,
+developers will likely look at our existing bridges for inspiration.
+Our correct use of these types will be a reference to them.
+
+At last, we provide `ValueFromAttribute` and `KeyValueFromAttribute`
+to offer reuse of `attribute.Value` and `attribute.KeyValue`.
+
+[^1]: [Handle structured body and attributes](https://github.com/pellared/opentelemetry-go/pull/7)
+[^2]: Jonathan Amsterdam, [The Go Blog: Structured Logging with slog](https://go.dev/blog/slog)
+[^3]: Jonathan Amsterdam, [GopherCon Europe 2023: A Fast Structured Logging Package](https://www.youtube.com/watch?v=tC4Jt3i62ns)
+[^4]: [Emit definition discussion with benchmarks](https://github.com/open-telemetry/opentelemetry-go/pull/4725#discussion_r1400869566)
+[^5]: [Logger.WithAttributes analysis](https://github.com/pellared/opentelemetry-go/pull/3)
+[^6]: [Record attributes as field and use sync.Pool for reducing allocations](https://github.com/pellared/opentelemetry-go/pull/4) and [Record attributes based on slog.Record](https://github.com/pellared/opentelemetry-go/pull/6)
+[^7]: [Record.Body as any](https://github.com/pellared/opentelemetry-go/pull/5)
+[^8]: [log/slog: structured, leveled logging](https://github.com/golang/go/issues/56345#issuecomment-1302563756)
+[^9]: [Record with pointer receivers only](https://github.com/pellared/opentelemetry-go/pull/8)
+[^10]: [Go FAQ: Stack or heap](https://go.dev/doc/faq#stack_or_heap)
+[^11]: [Rename KeyValue to Attr discussion](https://github.com/open-telemetry/opentelemetry-go/pull/4809#discussion_r1476080093)
diff --git a/vendor/go.opentelemetry.io/otel/log/LICENSE b/vendor/go.opentelemetry.io/otel/log/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/log/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/log/README.md b/vendor/go.opentelemetry.io/otel/log/README.md
new file mode 100644
index 000000000..3f7142711
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/log/README.md
@@ -0,0 +1,3 @@
+# Log API
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/log)](https://pkg.go.dev/go.opentelemetry.io/otel/log)
diff --git a/vendor/go.opentelemetry.io/otel/log/doc.go b/vendor/go.opentelemetry.io/otel/log/doc.go
new file mode 100644
index 000000000..18cbd1cb2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/log/doc.go
@@ -0,0 +1,76 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package log provides the OpenTelemetry Logs API.
+
+This package is intended to be used by bridges between existing logging
+libraries and OpenTelemetry. Users should not directly use this package as a
+logging library. Instead, install one of the bridges listed in the
+[registry], and use the associated logging library.
+
+# API Implementations
+
+This package does not conform to the standard Go versioning policy, all of its
+interfaces may have methods added to them without a package major version bump.
+This non-standard API evolution could surprise an uninformed implementation
+author. They could unknowingly build their implementation in a way that would
+result in a runtime panic for their users that update to the new API.
+
+The API is designed to help inform an instrumentation author about this
+non-standard API evolution. It requires them to choose a default behavior for
+unimplemented interface methods. There are three behavior choices they can
+make:
+
+ - Compilation failure
+ - Panic
+ - Default to another implementation
+
+All interfaces in this API embed a corresponding interface from
+[go.opentelemetry.io/otel/log/embedded]. If an author wants the default
+behavior of their implementations to be a compilation failure, signaling to
+their users they need to update to the latest version of that implementation,
+they need to embed the corresponding interface from
+[go.opentelemetry.io/otel/log/embedded] in their implementation. For example,
+
+ import "go.opentelemetry.io/otel/log/embedded"
+
+ type LoggerProvider struct {
+ embedded.LoggerProvider
+ // ...
+ }
+
+If an author wants the default behavior of their implementations to a panic,
+they need to embed the API interface directly.
+
+ import "go.opentelemetry.io/otel/log"
+
+ type LoggerProvider struct {
+ log.LoggerProvider
+ // ...
+ }
+
+This is not a recommended behavior as it could lead to publishing packages that
+contain runtime panics when users update other package that use newer versions
+of [go.opentelemetry.io/otel/log].
+
+Finally, an author can embed another implementation in theirs. The embedded
+implementation will be used for methods not defined by the author. For example,
+an author who wants to default to silently dropping the call can use
+[go.opentelemetry.io/otel/log/noop]:
+
+ import "go.opentelemetry.io/otel/log/noop"
+
+ type LoggerProvider struct {
+ noop.LoggerProvider
+ // ...
+ }
+
+It is strongly recommended that authors only embed
+go.opentelemetry.io/otel/log/noop if they choose this default behavior. That
+implementation is the only one OpenTelemetry authors can guarantee will fully
+implement all the API interfaces when a user updates their API.
+
+[registry]: https://opentelemetry.io/ecosystem/registry/?language=go&component=log-bridge
+*/
+package log // import "go.opentelemetry.io/otel/log"
diff --git a/vendor/go.opentelemetry.io/otel/log/embedded/README.md b/vendor/go.opentelemetry.io/otel/log/embedded/README.md
new file mode 100644
index 000000000..bae4ac68f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/log/embedded/README.md
@@ -0,0 +1,3 @@
+# Log Embedded
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/log/embedded)](https://pkg.go.dev/go.opentelemetry.io/otel/log/embedded)
diff --git a/vendor/go.opentelemetry.io/otel/log/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/log/embedded/embedded.go
new file mode 100644
index 000000000..a3714c4c6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/log/embedded/embedded.go
@@ -0,0 +1,36 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package embedded provides interfaces embedded within the [OpenTelemetry Logs
+// Bridge API].
+//
+// Implementers of the [OpenTelemetry Logs Bridge API] can embed the relevant
+// type from this package into their implementation directly. Doing so will
+// result in a compilation error for users when the [OpenTelemetry Logs Bridge
+// API] is extended (which is something that can happen without a major version
+// bump of the API package).
+//
+// [OpenTelemetry Logs Bridge API]: https://pkg.go.dev/go.opentelemetry.io/otel/log
+package embedded // import "go.opentelemetry.io/otel/log/embedded"
+
+// LoggerProvider is embedded in the [Logs Bridge API LoggerProvider].
+//
+// Embed this interface in your implementation of the [Logs Bridge API
+// LoggerProvider] if you want users to experience a compilation error,
+// signaling they need to update to your latest implementation, when the [Logs
+// Bridge API LoggerProvider] interface is extended (which is something that
+// can happen without a major version bump of the API package).
+//
+// [Logs Bridge API LoggerProvider]: https://pkg.go.dev/go.opentelemetry.io/otel/log#LoggerProvider
+type LoggerProvider interface{ loggerProvider() }
+
+// Logger is embedded in [Logs Bridge API Logger].
+//
+// Embed this interface in your implementation of the [Logs Bridge API Logger]
+// if you want users to experience a compilation error, signaling they need to
+// update to your latest implementation, when the [Logs Bridge API Logger]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+//
+// [Logs Bridge API Logger]: https://pkg.go.dev/go.opentelemetry.io/otel/log#Logger
+type Logger interface{ logger() }
diff --git a/vendor/go.opentelemetry.io/otel/log/keyvalue.go b/vendor/go.opentelemetry.io/otel/log/keyvalue.go
new file mode 100644
index 000000000..73e4e7dca
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/log/keyvalue.go
@@ -0,0 +1,443 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:generate stringer -type=Kind -trimprefix=Kind
+
+package log // import "go.opentelemetry.io/otel/log"
+
+import (
+ "bytes"
+ "cmp"
+ "errors"
+ "fmt"
+ "math"
+ "slices"
+ "strconv"
+ "unsafe"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/internal/global"
+)
+
+// errKind is logged when a Value is decoded to an incompatible type.
+var errKind = errors.New("invalid Kind")
+
+// Kind is the kind of a [Value].
+type Kind int
+
+// Kind values.
+const (
+ KindEmpty Kind = iota
+ KindBool
+ KindFloat64
+ KindInt64
+ KindString
+ KindBytes
+ KindSlice
+ KindMap
+)
+
+// A Value represents a structured log value.
+// A zero value is valid and represents an empty value.
+type Value struct {
+ // Ensure forward compatibility by explicitly making this not comparable.
+ noCmp [0]func() //nolint: unused // This is indeed used.
+
+ // num holds the value for Int64, Float64, and Bool. It holds the length
+ // for String, Bytes, Slice, Map.
+ num uint64
+ // any holds either the KindBool, KindInt64, KindFloat64, stringptr,
+ // bytesptr, sliceptr, or mapptr. If KindBool, KindInt64, or KindFloat64
+ // then the value of Value is in num as described above. Otherwise, it
+ // contains the value wrapped in the appropriate type.
+ any any
+}
+
+type (
+ // sliceptr represents a value in Value.any for KindString Values.
+ stringptr *byte
+ // bytesptr represents a value in Value.any for KindBytes Values.
+ bytesptr *byte
+ // sliceptr represents a value in Value.any for KindSlice Values.
+ sliceptr *Value
+ // mapptr represents a value in Value.any for KindMap Values.
+ mapptr *KeyValue
+)
+
+// StringValue returns a new [Value] for a string.
+func StringValue(v string) Value {
+ return Value{
+ num: uint64(len(v)),
+ any: stringptr(unsafe.StringData(v)),
+ }
+}
+
+// IntValue returns a [Value] for an int.
+func IntValue(v int) Value { return Int64Value(int64(v)) }
+
+// Int64Value returns a [Value] for an int64.
+func Int64Value(v int64) Value {
+ // This can be later converted back to int64 (overflow not checked).
+ return Value{num: uint64(v), any: KindInt64} // nolint:gosec
+}
+
+// Float64Value returns a [Value] for a float64.
+func Float64Value(v float64) Value {
+ return Value{num: math.Float64bits(v), any: KindFloat64}
+}
+
+// BoolValue returns a [Value] for a bool.
+func BoolValue(v bool) Value { //nolint:revive // Not a control flag.
+ var n uint64
+ if v {
+ n = 1
+ }
+ return Value{num: n, any: KindBool}
+}
+
+// BytesValue returns a [Value] for a byte slice. The passed slice must not be
+// changed after it is passed.
+func BytesValue(v []byte) Value {
+ return Value{
+ num: uint64(len(v)),
+ any: bytesptr(unsafe.SliceData(v)),
+ }
+}
+
+// SliceValue returns a [Value] for a slice of [Value]. The passed slice must
+// not be changed after it is passed.
+func SliceValue(vs ...Value) Value {
+ return Value{
+ num: uint64(len(vs)),
+ any: sliceptr(unsafe.SliceData(vs)),
+ }
+}
+
+// MapValue returns a new [Value] for a slice of key-value pairs. The passed
+// slice must not be changed after it is passed.
+func MapValue(kvs ...KeyValue) Value {
+ return Value{
+ num: uint64(len(kvs)),
+ any: mapptr(unsafe.SliceData(kvs)),
+ }
+}
+
+// AsString returns the value held by v as a string.
+func (v Value) AsString() string {
+ if sp, ok := v.any.(stringptr); ok {
+ return unsafe.String(sp, v.num)
+ }
+ global.Error(errKind, "AsString", "Kind", v.Kind())
+ return ""
+}
+
+// asString returns the value held by v as a string. It will panic if the Value
+// is not KindString.
+func (v Value) asString() string {
+ return unsafe.String(v.any.(stringptr), v.num)
+}
+
+// AsInt64 returns the value held by v as an int64.
+func (v Value) AsInt64() int64 {
+ if v.Kind() != KindInt64 {
+ global.Error(errKind, "AsInt64", "Kind", v.Kind())
+ return 0
+ }
+ return v.asInt64()
+}
+
+// asInt64 returns the value held by v as an int64. If v is not of KindInt64,
+// this will return garbage.
+func (v Value) asInt64() int64 {
+ // Assumes v.num was a valid int64 (overflow not checked).
+ return int64(v.num) // nolint: gosec
+}
+
+// AsBool returns the value held by v as a bool.
+func (v Value) AsBool() bool {
+ if v.Kind() != KindBool {
+ global.Error(errKind, "AsBool", "Kind", v.Kind())
+ return false
+ }
+ return v.asBool()
+}
+
+// asBool returns the value held by v as a bool. If v is not of KindBool, this
+// will return garbage.
+func (v Value) asBool() bool { return v.num == 1 }
+
+// AsFloat64 returns the value held by v as a float64.
+func (v Value) AsFloat64() float64 {
+ if v.Kind() != KindFloat64 {
+ global.Error(errKind, "AsFloat64", "Kind", v.Kind())
+ return 0
+ }
+ return v.asFloat64()
+}
+
+// asFloat64 returns the value held by v as a float64. If v is not of
+// KindFloat64, this will return garbage.
+func (v Value) asFloat64() float64 { return math.Float64frombits(v.num) }
+
+// AsBytes returns the value held by v as a []byte.
+func (v Value) AsBytes() []byte {
+ if sp, ok := v.any.(bytesptr); ok {
+ return unsafe.Slice((*byte)(sp), v.num)
+ }
+ global.Error(errKind, "AsBytes", "Kind", v.Kind())
+ return nil
+}
+
+// asBytes returns the value held by v as a []byte. It will panic if the Value
+// is not KindBytes.
+func (v Value) asBytes() []byte {
+ return unsafe.Slice((*byte)(v.any.(bytesptr)), v.num)
+}
+
+// AsSlice returns the value held by v as a []Value.
+func (v Value) AsSlice() []Value {
+ if sp, ok := v.any.(sliceptr); ok {
+ return unsafe.Slice((*Value)(sp), v.num)
+ }
+ global.Error(errKind, "AsSlice", "Kind", v.Kind())
+ return nil
+}
+
+// asSlice returns the value held by v as a []Value. It will panic if the Value
+// is not KindSlice.
+func (v Value) asSlice() []Value {
+ return unsafe.Slice((*Value)(v.any.(sliceptr)), v.num)
+}
+
+// AsMap returns the value held by v as a []KeyValue.
+func (v Value) AsMap() []KeyValue {
+ if sp, ok := v.any.(mapptr); ok {
+ return unsafe.Slice((*KeyValue)(sp), v.num)
+ }
+ global.Error(errKind, "AsMap", "Kind", v.Kind())
+ return nil
+}
+
+// asMap returns the value held by v as a []KeyValue. It will panic if the
+// Value is not KindMap.
+func (v Value) asMap() []KeyValue {
+ return unsafe.Slice((*KeyValue)(v.any.(mapptr)), v.num)
+}
+
+// Kind returns the Kind of v.
+func (v Value) Kind() Kind {
+ switch x := v.any.(type) {
+ case Kind:
+ return x
+ case stringptr:
+ return KindString
+ case bytesptr:
+ return KindBytes
+ case sliceptr:
+ return KindSlice
+ case mapptr:
+ return KindMap
+ default:
+ return KindEmpty
+ }
+}
+
+// Empty returns if v does not hold any value.
+func (v Value) Empty() bool { return v.Kind() == KindEmpty }
+
+// Equal returns if v is equal to w.
+func (v Value) Equal(w Value) bool {
+ k1 := v.Kind()
+ k2 := w.Kind()
+ if k1 != k2 {
+ return false
+ }
+ switch k1 {
+ case KindInt64, KindBool:
+ return v.num == w.num
+ case KindString:
+ return v.asString() == w.asString()
+ case KindFloat64:
+ return v.asFloat64() == w.asFloat64()
+ case KindSlice:
+ return slices.EqualFunc(v.asSlice(), w.asSlice(), Value.Equal)
+ case KindMap:
+ sv := sortMap(v.asMap())
+ sw := sortMap(w.asMap())
+ return slices.EqualFunc(sv, sw, KeyValue.Equal)
+ case KindBytes:
+ return bytes.Equal(v.asBytes(), w.asBytes())
+ case KindEmpty:
+ return true
+ default:
+ global.Error(errKind, "Equal", "Kind", k1)
+ return false
+ }
+}
+
+func sortMap(m []KeyValue) []KeyValue {
+ sm := make([]KeyValue, len(m))
+ copy(sm, m)
+ slices.SortFunc(sm, func(a, b KeyValue) int {
+ return cmp.Compare(a.Key, b.Key)
+ })
+
+ return sm
+}
+
+// String returns Value's value as a string, formatted like [fmt.Sprint].
+//
+// The returned string is meant for debugging;
+// the string representation is not stable.
+func (v Value) String() string {
+ switch v.Kind() {
+ case KindString:
+ return v.asString()
+ case KindInt64:
+ // Assumes v.num was a valid int64 (overflow not checked).
+ return strconv.FormatInt(int64(v.num), 10) // nolint: gosec
+ case KindFloat64:
+ return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64)
+ case KindBool:
+ return strconv.FormatBool(v.asBool())
+ case KindBytes:
+ return fmt.Sprint(v.asBytes())
+ case KindMap:
+ return fmt.Sprint(v.asMap())
+ case KindSlice:
+ return fmt.Sprint(v.asSlice())
+ case KindEmpty:
+ return "<nil>"
+ default:
+ // Try to handle this as gracefully as possible.
+ //
+ // Don't panic here. The goal here is to have developers find this
+ // first if a slog.Kind is is not handled. It is
+ // preferable to have user's open issue asking why their attributes
+ // have a "unhandled: " prefix than say that their code is panicking.
+ return fmt.Sprintf("<unhandled log.Kind: %s>", v.Kind())
+ }
+}
+
+// A KeyValue is a key-value pair used to represent a log attribute (a
+// superset of [go.opentelemetry.io/otel/attribute.KeyValue]) and map item.
+type KeyValue struct {
+ Key string
+ Value Value
+}
+
+// Equal returns if a is equal to b.
+func (a KeyValue) Equal(b KeyValue) bool {
+ return a.Key == b.Key && a.Value.Equal(b.Value)
+}
+
+// String returns a KeyValue for a string value.
+func String(key, value string) KeyValue {
+ return KeyValue{key, StringValue(value)}
+}
+
+// Int64 returns a KeyValue for an int64 value.
+func Int64(key string, value int64) KeyValue {
+ return KeyValue{key, Int64Value(value)}
+}
+
+// Int returns a KeyValue for an int value.
+func Int(key string, value int) KeyValue {
+ return KeyValue{key, IntValue(value)}
+}
+
+// Float64 returns a KeyValue for a float64 value.
+func Float64(key string, value float64) KeyValue {
+ return KeyValue{key, Float64Value(value)}
+}
+
+// Bool returns a KeyValue for a bool value.
+func Bool(key string, value bool) KeyValue {
+ return KeyValue{key, BoolValue(value)}
+}
+
+// Bytes returns a KeyValue for a []byte value.
+// The passed slice must not be changed after it is passed.
+func Bytes(key string, value []byte) KeyValue {
+ return KeyValue{key, BytesValue(value)}
+}
+
+// Slice returns a KeyValue for a []Value value.
+// The passed slice must not be changed after it is passed.
+func Slice(key string, value ...Value) KeyValue {
+ return KeyValue{key, SliceValue(value...)}
+}
+
+// Map returns a KeyValue for a map value.
+// The passed slice must not be changed after it is passed.
+func Map(key string, value ...KeyValue) KeyValue {
+ return KeyValue{key, MapValue(value...)}
+}
+
+// Empty returns a KeyValue with an empty value.
+func Empty(key string) KeyValue {
+ return KeyValue{key, Value{}}
+}
+
+// String returns key-value pair as a string, formatted like "key:value".
+//
+// The returned string is meant for debugging;
+// the string representation is not stable.
+func (a KeyValue) String() string {
+ return fmt.Sprintf("%s:%s", a.Key, a.Value)
+}
+
+// ValueFromAttribute converts [attribute.Value] to [Value].
+func ValueFromAttribute(value attribute.Value) Value {
+ switch value.Type() {
+ case attribute.INVALID:
+ return Value{}
+ case attribute.BOOL:
+ return BoolValue(value.AsBool())
+ case attribute.BOOLSLICE:
+ val := value.AsBoolSlice()
+ res := make([]Value, 0, len(val))
+ for _, v := range val {
+ res = append(res, BoolValue(v))
+ }
+ return SliceValue(res...)
+ case attribute.INT64:
+ return Int64Value(value.AsInt64())
+ case attribute.INT64SLICE:
+ val := value.AsInt64Slice()
+ res := make([]Value, 0, len(val))
+ for _, v := range val {
+ res = append(res, Int64Value(v))
+ }
+ return SliceValue(res...)
+ case attribute.FLOAT64:
+ return Float64Value(value.AsFloat64())
+ case attribute.FLOAT64SLICE:
+ val := value.AsFloat64Slice()
+ res := make([]Value, 0, len(val))
+ for _, v := range val {
+ res = append(res, Float64Value(v))
+ }
+ return SliceValue(res...)
+ case attribute.STRING:
+ return StringValue(value.AsString())
+ case attribute.STRINGSLICE:
+ val := value.AsStringSlice()
+ res := make([]Value, 0, len(val))
+ for _, v := range val {
+ res = append(res, StringValue(v))
+ }
+ return SliceValue(res...)
+ }
+ // This code should never be reached
+ // as log attributes are a superset of standard attributes.
+ panic("unknown attribute type")
+}
+
+// KeyValueFromAttribute converts [attribute.KeyValue] to [KeyValue].
+func KeyValueFromAttribute(kv attribute.KeyValue) KeyValue {
+ return KeyValue{
+ Key: string(kv.Key),
+ Value: ValueFromAttribute(kv.Value),
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/log/kind_string.go b/vendor/go.opentelemetry.io/otel/log/kind_string.go
new file mode 100644
index 000000000..bdfaa1866
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/log/kind_string.go
@@ -0,0 +1,30 @@
+// Code generated by "stringer -type=Kind -trimprefix=Kind"; DO NOT EDIT.
+
+package log
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[KindEmpty-0]
+ _ = x[KindBool-1]
+ _ = x[KindFloat64-2]
+ _ = x[KindInt64-3]
+ _ = x[KindString-4]
+ _ = x[KindBytes-5]
+ _ = x[KindSlice-6]
+ _ = x[KindMap-7]
+}
+
+const _Kind_name = "EmptyBoolFloat64Int64StringBytesSliceMap"
+
+var _Kind_index = [...]uint8{0, 5, 9, 16, 21, 27, 32, 37, 40}
+
+func (i Kind) String() string {
+ if i < 0 || i >= Kind(len(_Kind_index)-1) {
+ return "Kind(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Kind_name[_Kind_index[i]:_Kind_index[i+1]]
+}
diff --git a/vendor/go.opentelemetry.io/otel/log/logger.go b/vendor/go.opentelemetry.io/otel/log/logger.go
new file mode 100644
index 000000000..1205f08e2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/log/logger.go
@@ -0,0 +1,140 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package log // import "go.opentelemetry.io/otel/log"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/log/embedded"
+)
+
+// Logger emits log records.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Logger interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Logger
+
+ // Emit emits a log record.
+ //
+ // The record may be held by the implementation. Callers should not mutate
+ // the record after passed.
+ //
+ // Implementations of this method need to be safe for a user to call
+ // concurrently.
+ Emit(ctx context.Context, record Record)
+
+ // Enabled returns whether the Logger emits for the given context and
+ // param.
+ //
+ // This is useful for users that want to know if a [Record]
+ // will be processed or dropped before they perform complex operations to
+ // construct the [Record].
+ //
+ // The passed param is likely to be a partial record information being
+ // provided (e.g a param with only the Severity set).
+ // If a Logger needs more information than is provided, it
+ // is said to be in an indeterminate state (see below).
+ //
+ // The returned value will be true when the Logger will emit for the
+ // provided context and param, and will be false if the Logger will not
+ // emit. The returned value may be true or false in an indeterminate state.
+ // An implementation should default to returning true for an indeterminate
+ // state, but may return false if valid reasons in particular circumstances
+ // exist (e.g. performance, correctness).
+ //
+ // The param should not be held by the implementation. A copy should be
+ // made if the param needs to be held after the call returns.
+ //
+ // Implementations of this method need to be safe for a user to call
+ // concurrently.
+ Enabled(ctx context.Context, param EnabledParameters) bool
+}
+
+// LoggerOption applies configuration options to a [Logger].
+type LoggerOption interface {
+ // applyLogger is used to set a LoggerOption value of a LoggerConfig.
+ applyLogger(LoggerConfig) LoggerConfig
+}
+
+// LoggerConfig contains options for a [Logger].
+type LoggerConfig struct {
+ // Ensure forward compatibility by explicitly making this not comparable.
+ noCmp [0]func() //nolint: unused // This is indeed used.
+
+ version string
+ schemaURL string
+ attrs attribute.Set
+}
+
+// NewLoggerConfig returns a new [LoggerConfig] with all the options applied.
+func NewLoggerConfig(options ...LoggerOption) LoggerConfig {
+ var c LoggerConfig
+ for _, opt := range options {
+ c = opt.applyLogger(c)
+ }
+ return c
+}
+
+// InstrumentationVersion returns the version of the library providing
+// instrumentation.
+func (cfg LoggerConfig) InstrumentationVersion() string {
+ return cfg.version
+}
+
+// InstrumentationAttributes returns the attributes associated with the library
+// providing instrumentation.
+func (cfg LoggerConfig) InstrumentationAttributes() attribute.Set {
+ return cfg.attrs
+}
+
+// SchemaURL returns the schema URL of the library providing instrumentation.
+func (cfg LoggerConfig) SchemaURL() string {
+ return cfg.schemaURL
+}
+
+type loggerOptionFunc func(LoggerConfig) LoggerConfig
+
+func (fn loggerOptionFunc) applyLogger(cfg LoggerConfig) LoggerConfig {
+ return fn(cfg)
+}
+
+// WithInstrumentationVersion returns a [LoggerOption] that sets the
+// instrumentation version of a [Logger].
+func WithInstrumentationVersion(version string) LoggerOption {
+ return loggerOptionFunc(func(config LoggerConfig) LoggerConfig {
+ config.version = version
+ return config
+ })
+}
+
+// WithInstrumentationAttributes returns a [LoggerOption] that sets the
+// instrumentation attributes of a [Logger].
+//
+// The passed attributes will be de-duplicated.
+func WithInstrumentationAttributes(attr ...attribute.KeyValue) LoggerOption {
+ return loggerOptionFunc(func(config LoggerConfig) LoggerConfig {
+ config.attrs = attribute.NewSet(attr...)
+ return config
+ })
+}
+
+// WithSchemaURL returns a [LoggerOption] that sets the schema URL for a
+// [Logger].
+func WithSchemaURL(schemaURL string) LoggerOption {
+ return loggerOptionFunc(func(config LoggerConfig) LoggerConfig {
+ config.schemaURL = schemaURL
+ return config
+ })
+}
+
+// EnabledParameters represents payload for [Logger]'s Enabled method.
+type EnabledParameters struct {
+ Severity Severity
+}
diff --git a/vendor/go.opentelemetry.io/otel/log/noop/README.md b/vendor/go.opentelemetry.io/otel/log/noop/README.md
new file mode 100644
index 000000000..da08ea638
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/log/noop/README.md
@@ -0,0 +1,3 @@
+# Log Noop
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/log/noop)](https://pkg.go.dev/go.opentelemetry.io/otel/log/noop)
diff --git a/vendor/go.opentelemetry.io/otel/log/noop/noop.go b/vendor/go.opentelemetry.io/otel/log/noop/noop.go
new file mode 100644
index 000000000..f45a7c7e0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/log/noop/noop.go
@@ -0,0 +1,50 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package noop provides an implementation of the [OpenTelemetry Logs Bridge
+// API] that produces no telemetry and minimizes used computation resources.
+//
+// Using this package to implement the [OpenTelemetry Logs Bridge API] will
+// effectively disable OpenTelemetry.
+//
+// This implementation can be embedded in other implementations of the
+// [OpenTelemetry Logs Bridge API]. Doing so will mean the implementation
+// defaults to no operation for methods it does not implement.
+//
+// [OpenTelemetry Logs Bridge API]: https://pkg.go.dev/go.opentelemetry.io/otel/log
+package noop // import "go.opentelemetry.io/otel/log/noop"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/log"
+ "go.opentelemetry.io/otel/log/embedded"
+)
+
+var (
+ // Compile-time check this implements the OpenTelemetry API.
+ _ log.LoggerProvider = LoggerProvider{}
+ _ log.Logger = Logger{}
+)
+
+// LoggerProvider is an OpenTelemetry No-Op LoggerProvider.
+type LoggerProvider struct{ embedded.LoggerProvider }
+
+// NewLoggerProvider returns a LoggerProvider that does not record any telemetry.
+func NewLoggerProvider() LoggerProvider {
+ return LoggerProvider{}
+}
+
+// Logger returns an OpenTelemetry Logger that does not record any telemetry.
+func (LoggerProvider) Logger(string, ...log.LoggerOption) log.Logger {
+ return Logger{}
+}
+
+// Logger is an OpenTelemetry No-Op Logger.
+type Logger struct{ embedded.Logger }
+
+// Emit does nothing.
+func (Logger) Emit(context.Context, log.Record) {}
+
+// Enabled returns false. No log records are ever emitted.
+func (Logger) Enabled(context.Context, log.EnabledParameters) bool { return false }
diff --git a/vendor/go.opentelemetry.io/otel/log/provider.go b/vendor/go.opentelemetry.io/otel/log/provider.go
new file mode 100644
index 000000000..5c8ca328f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/log/provider.go
@@ -0,0 +1,37 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package log // import "go.opentelemetry.io/otel/log"
+
+import "go.opentelemetry.io/otel/log/embedded"
+
+// LoggerProvider provides access to [Logger].
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type LoggerProvider interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.LoggerProvider
+
+ // Logger returns a new [Logger] with the provided name and configuration.
+ //
+ // The name needs to uniquely identify the source of logged code. It is
+ // recommended that name is the Go package name of the library using a log
+ // bridge (note: this is not the name of the bridge package). Most
+ // commonly, this means a bridge will need to accept this value from its
+ // users.
+ //
+ // If name is empty, implementations need to provide a default name.
+ //
+ // The version of the packages using a bridge can be critical information
+ // to include when logging. The bridge should accept this version
+ // information and use the [WithInstrumentationVersion] option to configure
+ // the Logger appropriately.
+ //
+ // Implementations of this method need to be safe for a user to call
+ // concurrently.
+ Logger(name string, options ...LoggerOption) Logger
+}
diff --git a/vendor/go.opentelemetry.io/otel/log/record.go b/vendor/go.opentelemetry.io/otel/log/record.go
new file mode 100644
index 000000000..4d2f32d0f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/log/record.go
@@ -0,0 +1,144 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package log // import "go.opentelemetry.io/otel/log"
+
+import (
+ "slices"
+ "time"
+)
+
+// attributesInlineCount is the number of attributes that are efficiently
+// stored in an array within a Record. This value is borrowed from slog which
+// performed a quantitative survey of log library use and found this value to
+// cover 95% of all use-cases (https://go.dev/blog/slog#performance).
+const attributesInlineCount = 5
+
+// Record represents a log record.
+// A log record with non-empty event name is interpreted as an event record.
+type Record struct {
+ // Ensure forward compatibility by explicitly making this not comparable.
+ noCmp [0]func() //nolint: unused // This is indeed used.
+
+ eventName string
+ timestamp time.Time
+ observedTimestamp time.Time
+ severity Severity
+ severityText string
+ body Value
+
+ // The fields below are for optimizing the implementation of Attributes and
+ // AddAttributes. This design is borrowed from the slog Record type:
+ // https://cs.opensource.google/go/go/+/refs/tags/go1.22.0:src/log/slog/record.go;l=20
+
+ // Allocation optimization: an inline array sized to hold
+ // the majority of log calls (based on examination of open-source
+ // code). It holds the start of the list of attributes.
+ front [attributesInlineCount]KeyValue
+
+ // The number of attributes in front.
+ nFront int
+
+ // The list of attributes except for those in front.
+ // Invariants:
+ // - len(back) > 0 if nFront == len(front)
+ // - Unused array elements are zero-ed. Used to detect mistakes.
+ back []KeyValue
+}
+
+// EventName returns the event name.
+// A log record with non-empty event name is interpreted as an event record.
+func (r *Record) EventName() string {
+ return r.eventName
+}
+
+// SetEventName sets the event name.
+// A log record with non-empty event name is interpreted as an event record.
+func (r *Record) SetEventName(s string) {
+ r.eventName = s
+}
+
+// Timestamp returns the time when the log record occurred.
+func (r *Record) Timestamp() time.Time {
+ return r.timestamp
+}
+
+// SetTimestamp sets the time when the log record occurred.
+func (r *Record) SetTimestamp(t time.Time) {
+ r.timestamp = t
+}
+
+// ObservedTimestamp returns the time when the log record was observed.
+func (r *Record) ObservedTimestamp() time.Time {
+ return r.observedTimestamp
+}
+
+// SetObservedTimestamp sets the time when the log record was observed.
+func (r *Record) SetObservedTimestamp(t time.Time) {
+ r.observedTimestamp = t
+}
+
+// Severity returns the [Severity] of the log record.
+func (r *Record) Severity() Severity {
+ return r.severity
+}
+
+// SetSeverity sets the [Severity] level of the log record.
+func (r *Record) SetSeverity(level Severity) {
+ r.severity = level
+}
+
+// SeverityText returns severity (also known as log level) text. This is the
+// original string representation of the severity as it is known at the source.
+func (r *Record) SeverityText() string {
+ return r.severityText
+}
+
+// SetSeverityText sets severity (also known as log level) text. This is the
+// original string representation of the severity as it is known at the source.
+func (r *Record) SetSeverityText(text string) {
+ r.severityText = text
+}
+
+// Body returns the body of the log record.
+func (r *Record) Body() Value {
+ return r.body
+}
+
+// SetBody sets the body of the log record.
+func (r *Record) SetBody(v Value) {
+ r.body = v
+}
+
+// WalkAttributes walks all attributes the log record holds by calling f for
+// each on each [KeyValue] in the [Record]. Iteration stops if f returns false.
+func (r *Record) WalkAttributes(f func(KeyValue) bool) {
+ for i := 0; i < r.nFront; i++ {
+ if !f(r.front[i]) {
+ return
+ }
+ }
+ for _, a := range r.back {
+ if !f(a) {
+ return
+ }
+ }
+}
+
+// AddAttributes adds attributes to the log record.
+func (r *Record) AddAttributes(attrs ...KeyValue) {
+ var i int
+ for i = 0; i < len(attrs) && r.nFront < len(r.front); i++ {
+ a := attrs[i]
+ r.front[r.nFront] = a
+ r.nFront++
+ }
+
+ r.back = slices.Grow(r.back, len(attrs[i:]))
+ r.back = append(r.back, attrs[i:]...)
+}
+
+// AttributesLen returns the number of attributes in the log record.
+func (r *Record) AttributesLen() int {
+ return r.nFront + len(r.back)
+}
diff --git a/vendor/go.opentelemetry.io/otel/log/severity.go b/vendor/go.opentelemetry.io/otel/log/severity.go
new file mode 100644
index 000000000..0240fd5ac
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/log/severity.go
@@ -0,0 +1,64 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:generate stringer -type=Severity -linecomment
+
+package log // import "go.opentelemetry.io/otel/log"
+
+// Severity represents a log record severity (also known as log level). Smaller
+// numerical values correspond to less severe log records (such as debug
+// events), larger numerical values correspond to more severe log records (such
+// as errors and critical events).
+type Severity int
+
+// Severity values defined by OpenTelemetry.
+const (
+ // SeverityUndefined represents an unset Severity.
+ SeverityUndefined Severity = 0 // UNDEFINED
+
+ // A fine-grained debugging log record. Typically disabled in default
+ // configurations.
+ SeverityTrace1 Severity = 1 // TRACE
+ SeverityTrace2 Severity = 2 // TRACE2
+ SeverityTrace3 Severity = 3 // TRACE3
+ SeverityTrace4 Severity = 4 // TRACE4
+
+ // A debugging log record.
+ SeverityDebug1 Severity = 5 // DEBUG
+ SeverityDebug2 Severity = 6 // DEBUG2
+ SeverityDebug3 Severity = 7 // DEBUG3
+ SeverityDebug4 Severity = 8 // DEBUG4
+
+ // An informational log record. Indicates that an event happened.
+ SeverityInfo1 Severity = 9 // INFO
+ SeverityInfo2 Severity = 10 // INFO2
+ SeverityInfo3 Severity = 11 // INFO3
+ SeverityInfo4 Severity = 12 // INFO4
+
+ // A warning log record. Not an error but is likely more important than an
+ // informational event.
+ SeverityWarn1 Severity = 13 // WARN
+ SeverityWarn2 Severity = 14 // WARN2
+ SeverityWarn3 Severity = 15 // WARN3
+ SeverityWarn4 Severity = 16 // WARN4
+
+ // An error log record. Something went wrong.
+ SeverityError1 Severity = 17 // ERROR
+ SeverityError2 Severity = 18 // ERROR2
+ SeverityError3 Severity = 19 // ERROR3
+ SeverityError4 Severity = 20 // ERROR4
+
+ // A fatal log record such as application or system crash.
+ SeverityFatal1 Severity = 21 // FATAL
+ SeverityFatal2 Severity = 22 // FATAL2
+ SeverityFatal3 Severity = 23 // FATAL3
+ SeverityFatal4 Severity = 24 // FATAL4
+
+ // Convenience definitions for the base severity of each level.
+ SeverityTrace = SeverityTrace1
+ SeverityDebug = SeverityDebug1
+ SeverityInfo = SeverityInfo1
+ SeverityWarn = SeverityWarn1
+ SeverityError = SeverityError1
+ SeverityFatal = SeverityFatal1
+)
diff --git a/vendor/go.opentelemetry.io/otel/log/severity_string.go b/vendor/go.opentelemetry.io/otel/log/severity_string.go
new file mode 100644
index 000000000..4c20fa5e8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/log/severity_string.go
@@ -0,0 +1,47 @@
+// Code generated by "stringer -type=Severity -linecomment"; DO NOT EDIT.
+
+package log
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[SeverityUndefined-0]
+ _ = x[SeverityTrace1-1]
+ _ = x[SeverityTrace2-2]
+ _ = x[SeverityTrace3-3]
+ _ = x[SeverityTrace4-4]
+ _ = x[SeverityDebug1-5]
+ _ = x[SeverityDebug2-6]
+ _ = x[SeverityDebug3-7]
+ _ = x[SeverityDebug4-8]
+ _ = x[SeverityInfo1-9]
+ _ = x[SeverityInfo2-10]
+ _ = x[SeverityInfo3-11]
+ _ = x[SeverityInfo4-12]
+ _ = x[SeverityWarn1-13]
+ _ = x[SeverityWarn2-14]
+ _ = x[SeverityWarn3-15]
+ _ = x[SeverityWarn4-16]
+ _ = x[SeverityError1-17]
+ _ = x[SeverityError2-18]
+ _ = x[SeverityError3-19]
+ _ = x[SeverityError4-20]
+ _ = x[SeverityFatal1-21]
+ _ = x[SeverityFatal2-22]
+ _ = x[SeverityFatal3-23]
+ _ = x[SeverityFatal4-24]
+}
+
+const _Severity_name = "UNDEFINEDTRACETRACE2TRACE3TRACE4DEBUGDEBUG2DEBUG3DEBUG4INFOINFO2INFO3INFO4WARNWARN2WARN3WARN4ERRORERROR2ERROR3ERROR4FATALFATAL2FATAL3FATAL4"
+
+var _Severity_index = [...]uint8{0, 9, 14, 20, 26, 32, 37, 43, 49, 55, 59, 64, 69, 74, 78, 83, 88, 93, 98, 104, 110, 116, 121, 127, 133, 139}
+
+func (i Severity) String() string {
+ if i < 0 || i >= Severity(len(_Severity_index)-1) {
+ return "Severity(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Severity_name[_Severity_index[i]:_Severity_index[i+1]]
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/DESIGN.md b/vendor/go.opentelemetry.io/otel/sdk/log/DESIGN.md
new file mode 100644
index 000000000..2e0fb15e2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/log/DESIGN.md
@@ -0,0 +1,176 @@
+# Logs SDK
+
+## Abstract
+
+`go.opentelemetry.io/otel/sdk/log` provides Logs SDK compliant with the
+[specification](https://opentelemetry.io/docs/specs/otel/logs/sdk/).
+
+The prototype was created in
+[#4955](https://github.com/open-telemetry/opentelemetry-go/pull/4955).
+
+## Background
+
+The goal is to design the exported API of the SDK would have low performance
+overhead. Most importantly, have a design that reduces the amount of heap
+allocations and even make it possible to have a zero-allocation implementation.
+Eliminating the amount of heap allocations reduces the GC pressure which can
+produce some of the largest improvements in performance.[^1]
+
+The main and recommended use case is to configure the SDK to use an OTLP
+exporter with a batch processor.[^2] Therefore, the implementation aims to be
+high-performant in this scenario. Some users that require high throughput may
+also want to use e.g. an [user_events](https://docs.kernel.org/trace/user_events.html),
+[LLTng](https://lttng.org/docs/v2.13/#doc-tracing-your-own-user-application)
+or [ETW](https://learn.microsoft.com/en-us/windows/win32/etw/about-event-tracing)
+exporter with a simple processor. Users may also want to use
+[OTLP File](https://opentelemetry.io/docs/specs/otel/protocol/file-exporter/)
+or [Standard Output](https://opentelemetry.io/docs/specs/otel/logs/sdk_exporters/stdout/)
+exporter in order to emit logs to standard output/error or files.
+
+## Modules structure
+
+The SDK is published as a single `go.opentelemetry.io/otel/sdk/log` Go module.
+
+The exporters are going to be published as following Go modules:
+
+- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`
+- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`
+- `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`
+
+## LoggerProvider
+
+The [LoggerProvider](https://opentelemetry.io/docs/specs/otel/logs/sdk/#loggerprovider)
+is implemented as `LoggerProvider` struct in [provider.go](provider.go).
+
+## LogRecord limits
+
+The [LogRecord limits](https://opentelemetry.io/docs/specs/otel/logs/sdk/#logrecord-limits)
+can be configured using following options:
+
+```go
+func WithAttributeCountLimit(limit int) LoggerProviderOption
+func WithAttributeValueLengthLimit(limit int) LoggerProviderOption
+```
+
+The limits can be also configured using the `OTEL_LOGRECORD_*` environment variables as
+[defined by the specification](https://opentelemetry.io/docs/specs/otel/configuration/sdk-environment-variables/#logrecord-limits).
+
+### Processor
+
+The [LogRecordProcessor](https://opentelemetry.io/docs/specs/otel/logs/sdk/#logrecordprocessor)
+is defined as `Processor` interface in [processor.go](processor.go).
+
+The user set processors for the `LoggerProvider` using
+`func WithProcessor(processor Processor) LoggerProviderOption`.
+
+The user can configure custom processors and decorate built-in processors.
+
+The specification may add new operations to the
+[LogRecordProcessor](https://opentelemetry.io/docs/specs/otel/logs/sdk/#logrecordprocessor).
+If it happens, [CONTRIBUTING.md](../../CONTRIBUTING.md#how-to-change-other-interfaces)
+describes how the SDK can be extended in a backwards-compatible way.
+
+### SimpleProcessor
+
+The [Simple processor](https://opentelemetry.io/docs/specs/otel/logs/sdk/#simple-processor)
+is implemented as `SimpleProcessor` struct in [simple.go](simple.go).
+
+### BatchProcessor
+
+The [Batching processor](https://opentelemetry.io/docs/specs/otel/logs/sdk/#batching-processor)
+is implemented as `BatchProcessor` struct in [batch.go](batch.go).
+
+The `Batcher` can be also configured using the `OTEL_BLRP_*` environment variables as
+[defined by the specification](https://opentelemetry.io/docs/specs/otel/configuration/sdk-environment-variables/#batch-logrecord-processor).
+
+### Exporter
+
+The [LogRecordExporter](https://opentelemetry.io/docs/specs/otel/logs/sdk/#logrecordexporter)
+is defined as `Exporter` interface in [exporter.go](exporter.go).
+
+The slice passed to `Export` must not be retained by the implementation
+(like e.g. [`io.Writer`](https://pkg.go.dev/io#Writer))
+so that the caller can reuse the passed slice
+(e.g. using [`sync.Pool`](https://pkg.go.dev/sync#Pool))
+to avoid heap allocations on each call.
+
+The specification may add new operations to the
+[LogRecordExporter](https://opentelemetry.io/docs/specs/otel/logs/sdk/#logrecordexporter).
+If it happens, [CONTRIBUTING.md](../../CONTRIBUTING.md#how-to-change-other-interfaces)
+describes how the SDK can be extended in a backwards-compatible way.
+
+### Record
+
+The [ReadWriteLogRecord](https://opentelemetry.io/docs/specs/otel/logs/sdk/#readwritelogrecord)
+is defined as `Record` struct in [record.go](record.go).
+
+The `Record` is designed similarly to [`log.Record`](https://pkg.go.dev/go.opentelemetry.io/otel/log#Record)
+in order to reduce the number of heap allocations when processing attributes.
+
+The SDK does not have have an additional definition of
+[ReadableLogRecord](https://opentelemetry.io/docs/specs/otel/logs/sdk/#readablelogrecord)
+as the specification does not say that the exporters must not be able to modify
+the log records. It simply requires them to be able to read the log records.
+Having less abstractions reduces the API surface and makes the design simpler.
+
+## Benchmarking
+
+The benchmarks are supposed to test end-to-end scenarios
+and avoid I/O that could affect the stability of the results.
+
+The benchmark results can be found in [the prototype](https://github.com/open-telemetry/opentelemetry-go/pull/4955).
+
+## Rejected alternatives
+
+### Represent both LogRecordProcessor and LogRecordExporter as Exporter
+
+Because the [LogRecordProcessor](https://opentelemetry.io/docs/specs/otel/logs/sdk/#logrecordprocessor)
+and the [LogRecordProcessor](https://opentelemetry.io/docs/specs/otel/logs/sdk/#logrecordexporter)
+abstractions are so similar, there was a proposal to unify them under
+single `Exporter` interface.[^3]
+
+However, introducing a `Processor` interface makes it easier
+to create custom processor decorators[^4]
+and makes the design more aligned with the specification.
+
+### Embed log.Record
+
+Because [`Record`](#record) and [`log.Record`](https://pkg.go.dev/go.opentelemetry.io/otel/log#Record)
+are very similar, there was a proposal to embed `log.Record` in `Record` definition.
+
+[`log.Record`](https://pkg.go.dev/go.opentelemetry.io/otel/log#Record)
+supports only adding attributes.
+In the SDK, we also need to be able to modify the attributes (e.g. removal)
+provided via API.
+
+Moreover it is safer to have these abstraction decoupled.
+E.g. there can be a need for some fields that can be set via API and cannot be modified by the processors.
+
+### Processor.OnEmit to accept Record values
+
+There was a proposal to make the [Processor](#processor)'s `OnEmit`
+to accept a [Record](#record) value instead of a pointer to reduce allocations
+as well as to have design similar to [`slog.Handler`](https://pkg.go.dev/log/slog#Handler).
+
+There have been long discussions within the OpenTelemetry Specification SIG[^5]
+about whether such a design would comply with the specification. The summary
+was that the current processor design flaws are present in other languages as
+well. Therefore, it would be favorable to introduce new processing concepts
+(e.g. chaining processors) in the specification that would coexist with the
+current "mutable" processor design.
+
+The performance disadvantages caused by using a pointer (which at the time of
+writing causes an additional heap allocation) may be mitigated by future
+versions of the Go compiler, thanks to improved escape analysis and
+profile-guided optimization (PGO)[^6].
+
+On the other hand, [Processor](#processor)'s `Enabled` is fine to accept
+a [Record](#record) value as the processors should not mutate the passed
+parameters.
+
+[^1]: [A Guide to the Go Garbage Collector](https://tip.golang.org/doc/gc-guide)
+[^2]: [OpenTelemetry Logging](https://opentelemetry.io/docs/specs/otel/logs)
+[^3]: [Conversation on representing LogRecordProcessor and LogRecordExporter via a single Exporter interface](https://github.com/open-telemetry/opentelemetry-go/pull/4954#discussion_r1515050480)
+[^4]: [Introduce Processor](https://github.com/pellared/opentelemetry-go/pull/9)
+[^5]: [Log record mutations do not have to be visible in next registered processors](https://github.com/open-telemetry/opentelemetry-specification/pull/4067)
+[^6]: [Profile-guided optimization](https://go.dev/doc/pgo)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/log/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/log/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/README.md b/vendor/go.opentelemetry.io/otel/sdk/log/README.md
new file mode 100644
index 000000000..729aca091
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/log/README.md
@@ -0,0 +1,3 @@
+# Log SDK
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/log)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/log)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/batch.go b/vendor/go.opentelemetry.io/otel/sdk/log/batch.go
new file mode 100644
index 000000000..28c969262
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/log/batch.go
@@ -0,0 +1,477 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package log // import "go.opentelemetry.io/otel/sdk/log"
+
+import (
+ "context"
+ "errors"
+ "slices"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "go.opentelemetry.io/otel/internal/global"
+)
+
+const (
+ dfltMaxQSize = 2048
+ dfltExpInterval = time.Second
+ dfltExpTimeout = 30 * time.Second
+ dfltExpMaxBatchSize = 512
+ dfltExpBufferSize = 1
+
+ envarMaxQSize = "OTEL_BLRP_MAX_QUEUE_SIZE"
+ envarExpInterval = "OTEL_BLRP_SCHEDULE_DELAY"
+ envarExpTimeout = "OTEL_BLRP_EXPORT_TIMEOUT"
+ envarExpMaxBatchSize = "OTEL_BLRP_MAX_EXPORT_BATCH_SIZE"
+)
+
+// Compile-time check BatchProcessor implements Processor.
+var _ Processor = (*BatchProcessor)(nil)
+
+// BatchProcessor is a processor that exports batches of log records.
+//
+// Use [NewBatchProcessor] to create a BatchProcessor. An empty BatchProcessor
+// is shut down by default, no records will be batched or exported.
+type BatchProcessor struct {
+ // The BatchProcessor is designed to provide the highest throughput of
+ // log records possible while being compatible with OpenTelemetry. The
+ // entry point of log records is the OnEmit method. This method is designed
+ // to receive records as fast as possible while still honoring shutdown
+ // commands. All records received are enqueued to queue.
+ //
+ // In order to block OnEmit as little as possible, a separate "poll"
+ // goroutine is spawned at the creation of a BatchProcessor. This
+ // goroutine is responsible for batching the queue at regular polled
+ // intervals, or when it is directly signaled to.
+ //
+ // To keep the polling goroutine from backing up, all batches it makes are
+ // exported with a bufferedExporter. This exporter allows the poll
+ // goroutine to enqueue an export payload that will be handled in a
+ // separate goroutine dedicated to the export. This asynchronous behavior
+ // allows the poll goroutine to maintain accurate interval polling.
+ //
+ // ___BatchProcessor____ __Poll Goroutine__ __Export Goroutine__
+ // || || || || || ||
+ // || ********** || || || || ********** ||
+ // || Records=>* OnEmit * || || | - ticker || || * export * ||
+ // || ********** || || | - trigger || || ********** ||
+ // || || || || | || || || ||
+ // || || || || | || || || ||
+ // || __________\/___ || || |*********** || || ______/\_______ ||
+ // || (____queue______)>=||=||===|* batch *===||=||=>[_export_buffer_] ||
+ // || || || |*********** || || ||
+ // ||_____________________|| ||__________________|| ||____________________||
+ //
+ //
+ // The "release valve" in this processing is the record queue. This queue
+ // is a ring buffer. It will overwrite the oldest records first when writes
+ // to OnEmit are made faster than the queue can be flushed. If batches
+ // cannot be flushed to the export buffer, the records will remain in the
+ // queue.
+
+ // exporter is the bufferedExporter all batches are exported with.
+ exporter *bufferExporter
+
+ // q is the active queue of records that have not yet been exported.
+ q *queue
+ // batchSize is the minimum number of records needed before an export is
+ // triggered (unless the interval expires).
+ batchSize int
+
+ // pollTrigger triggers the poll goroutine to flush a batch from the queue.
+ // This is sent to when it is known that the queue contains at least one
+ // complete batch.
+ //
+ // When a send is made to the channel, the poll loop will be reset after
+ // the flush. If there is still enough records in the queue for another
+ // batch the reset of the poll loop will automatically re-trigger itself.
+ // There is no need for the original sender to monitor and resend.
+ pollTrigger chan struct{}
+ // pollKill kills the poll goroutine. This is only expected to be closed
+ // once by the Shutdown method.
+ pollKill chan struct{}
+ // pollDone signals the poll goroutine has completed.
+ pollDone chan struct{}
+
+ // stopped holds the stopped state of the BatchProcessor.
+ stopped atomic.Bool
+
+ noCmp [0]func() //nolint: unused // This is indeed used.
+}
+
+// NewBatchProcessor decorates the provided exporter
+// so that the log records are batched before exporting.
+//
+// All of the exporter's methods are called synchronously.
+func NewBatchProcessor(exporter Exporter, opts ...BatchProcessorOption) *BatchProcessor {
+ cfg := newBatchConfig(opts)
+ if exporter == nil {
+ // Do not panic on nil export.
+ exporter = defaultNoopExporter
+ }
+ // Order is important here. Wrap the timeoutExporter with the chunkExporter
+ // to ensure each export completes in timeout (instead of all chunked
+ // exports).
+ exporter = newTimeoutExporter(exporter, cfg.expTimeout.Value)
+ // Use a chunkExporter to ensure ForceFlush and Shutdown calls are batched
+ // appropriately on export.
+ exporter = newChunkExporter(exporter, cfg.expMaxBatchSize.Value)
+
+ b := &BatchProcessor{
+ exporter: newBufferExporter(exporter, cfg.expBufferSize.Value),
+
+ q: newQueue(cfg.maxQSize.Value),
+ batchSize: cfg.expMaxBatchSize.Value,
+ pollTrigger: make(chan struct{}, 1),
+ pollKill: make(chan struct{}),
+ }
+ b.pollDone = b.poll(cfg.expInterval.Value)
+ return b
+}
+
+// poll spawns a goroutine to handle interval polling and batch exporting. The
+// returned done chan is closed when the spawned goroutine completes.
+func (b *BatchProcessor) poll(interval time.Duration) (done chan struct{}) {
+ done = make(chan struct{})
+
+ ticker := time.NewTicker(interval)
+ // TODO: investigate using a sync.Pool instead of cloning.
+ buf := make([]Record, b.batchSize)
+ go func() {
+ defer close(done)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ case <-b.pollTrigger:
+ ticker.Reset(interval)
+ case <-b.pollKill:
+ return
+ }
+
+ if d := b.q.Dropped(); d > 0 {
+ global.Warn("dropped log records", "dropped", d)
+ }
+
+ qLen := b.q.TryDequeue(buf, func(r []Record) bool {
+ ok := b.exporter.EnqueueExport(r)
+ if ok {
+ buf = slices.Clone(buf)
+ }
+ return ok
+ })
+ if qLen >= b.batchSize {
+ // There is another full batch ready. Immediately trigger
+ // another export attempt.
+ select {
+ case b.pollTrigger <- struct{}{}:
+ default:
+ // Another flush signal already received.
+ }
+ }
+ }
+ }()
+ return done
+}
+
+// OnEmit batches provided log record.
+func (b *BatchProcessor) OnEmit(_ context.Context, r *Record) error {
+ if b.stopped.Load() || b.q == nil {
+ return nil
+ }
+ // The record is cloned so that changes done by subsequent processors
+ // are not going to lead to a data race.
+ if n := b.q.Enqueue(r.Clone()); n >= b.batchSize {
+ select {
+ case b.pollTrigger <- struct{}{}:
+ default:
+ // Flush chan full. The poll goroutine will handle this by
+ // re-sending any trigger until the queue has less than batchSize
+ // records.
+ }
+ }
+ return nil
+}
+
+// Shutdown flushes queued log records and shuts down the decorated exporter.
+func (b *BatchProcessor) Shutdown(ctx context.Context) error {
+ if b.stopped.Swap(true) || b.q == nil {
+ return nil
+ }
+
+ // Stop the poll goroutine.
+ close(b.pollKill)
+ select {
+ case <-b.pollDone:
+ case <-ctx.Done():
+ // Out of time.
+ return errors.Join(ctx.Err(), b.exporter.Shutdown(ctx))
+ }
+
+ // Flush remaining queued before exporter shutdown.
+ err := b.exporter.Export(ctx, b.q.Flush())
+ return errors.Join(err, b.exporter.Shutdown(ctx))
+}
+
+var errPartialFlush = errors.New("partial flush: export buffer full")
+
+// Used for testing.
+var ctxErr = func(ctx context.Context) error {
+ return ctx.Err()
+}
+
+// ForceFlush flushes queued log records and flushes the decorated exporter.
+func (b *BatchProcessor) ForceFlush(ctx context.Context) error {
+ if b.stopped.Load() || b.q == nil {
+ return nil
+ }
+
+ buf := make([]Record, b.q.cap)
+ notFlushed := func() bool {
+ var flushed bool
+ _ = b.q.TryDequeue(buf, func(r []Record) bool {
+ flushed = b.exporter.EnqueueExport(r)
+ return flushed
+ })
+ return !flushed
+ }
+ var err error
+ // For as long as ctx allows, try to make a single flush of the queue.
+ for notFlushed() {
+ // Use ctxErr instead of calling ctx.Err directly so we can test
+ // the partial error return.
+ if e := ctxErr(ctx); e != nil {
+ err = errors.Join(e, errPartialFlush)
+ break
+ }
+ }
+ return errors.Join(err, b.exporter.ForceFlush(ctx))
+}
+
+// queue holds a queue of logging records.
+//
+// When the queue becomes full, the oldest records in the queue are
+// overwritten.
+type queue struct {
+ sync.Mutex
+
+ dropped atomic.Uint64
+ cap, len int
+ read, write *ring
+}
+
+func newQueue(size int) *queue {
+ r := newRing(size)
+ return &queue{
+ cap: size,
+ read: r,
+ write: r,
+ }
+}
+
+// Dropped returns the number of Records dropped during enqueueing since the
+// last time Dropped was called.
+func (q *queue) Dropped() uint64 {
+ return q.dropped.Swap(0)
+}
+
+// Enqueue adds r to the queue. The queue size, including the addition of r, is
+// returned.
+//
+// If enqueueing r will exceed the capacity of q, the oldest Record held in q
+// will be dropped and r retained.
+func (q *queue) Enqueue(r Record) int {
+ q.Lock()
+ defer q.Unlock()
+
+ q.write.Value = r
+ q.write = q.write.Next()
+
+ q.len++
+ if q.len > q.cap {
+ // Overflow. Advance read to be the new "oldest".
+ q.len = q.cap
+ q.read = q.read.Next()
+ q.dropped.Add(1)
+ }
+ return q.len
+}
+
+// TryDequeue attempts to dequeue up to len(buf) Records. The available Records
+// will be assigned into buf and passed to write. If write fails, returning
+// false, the Records will not be removed from the queue. If write succeeds,
+// returning true, the dequeued Records are removed from the queue. The number
+// of Records remaining in the queue are returned.
+//
+// When write is called the lock of q is held. The write function must not call
+// other methods of this q that acquire the lock.
+func (q *queue) TryDequeue(buf []Record, write func([]Record) bool) int {
+ q.Lock()
+ defer q.Unlock()
+
+ origRead := q.read
+
+ n := min(len(buf), q.len)
+ for i := 0; i < n; i++ {
+ buf[i] = q.read.Value
+ q.read = q.read.Next()
+ }
+
+ if write(buf[:n]) {
+ q.len -= n
+ } else {
+ q.read = origRead
+ }
+ return q.len
+}
+
+// Flush returns all the Records held in the queue and resets it to be
+// empty.
+func (q *queue) Flush() []Record {
+ q.Lock()
+ defer q.Unlock()
+
+ out := make([]Record, q.len)
+ for i := range out {
+ out[i] = q.read.Value
+ q.read = q.read.Next()
+ }
+ q.len = 0
+
+ return out
+}
+
+type batchConfig struct {
+ maxQSize setting[int]
+ expInterval setting[time.Duration]
+ expTimeout setting[time.Duration]
+ expMaxBatchSize setting[int]
+ expBufferSize setting[int]
+}
+
+func newBatchConfig(options []BatchProcessorOption) batchConfig {
+ var c batchConfig
+ for _, o := range options {
+ c = o.apply(c)
+ }
+
+ c.maxQSize = c.maxQSize.Resolve(
+ clearLessThanOne[int](),
+ getenv[int](envarMaxQSize),
+ clearLessThanOne[int](),
+ fallback[int](dfltMaxQSize),
+ )
+ c.expInterval = c.expInterval.Resolve(
+ clearLessThanOne[time.Duration](),
+ getenv[time.Duration](envarExpInterval),
+ clearLessThanOne[time.Duration](),
+ fallback[time.Duration](dfltExpInterval),
+ )
+ c.expTimeout = c.expTimeout.Resolve(
+ clearLessThanOne[time.Duration](),
+ getenv[time.Duration](envarExpTimeout),
+ clearLessThanOne[time.Duration](),
+ fallback[time.Duration](dfltExpTimeout),
+ )
+ c.expMaxBatchSize = c.expMaxBatchSize.Resolve(
+ clearLessThanOne[int](),
+ getenv[int](envarExpMaxBatchSize),
+ clearLessThanOne[int](),
+ clampMax[int](c.maxQSize.Value),
+ fallback[int](dfltExpMaxBatchSize),
+ )
+ c.expBufferSize = c.expBufferSize.Resolve(
+ clearLessThanOne[int](),
+ fallback[int](dfltExpBufferSize),
+ )
+
+ return c
+}
+
+// BatchProcessorOption applies a configuration to a [BatchProcessor].
+type BatchProcessorOption interface {
+ apply(batchConfig) batchConfig
+}
+
+type batchOptionFunc func(batchConfig) batchConfig
+
+func (fn batchOptionFunc) apply(c batchConfig) batchConfig {
+ return fn(c)
+}
+
+// WithMaxQueueSize sets the maximum queue size used by the Batcher.
+// After the size is reached log records are dropped.
+//
+// If the OTEL_BLRP_MAX_QUEUE_SIZE environment variable is set,
+// and this option is not passed, that variable value will be used.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, 2048 will be used.
+// The default value is also used when the provided value is less than one.
+func WithMaxQueueSize(size int) BatchProcessorOption {
+ return batchOptionFunc(func(cfg batchConfig) batchConfig {
+ cfg.maxQSize = newSetting(size)
+ return cfg
+ })
+}
+
+// WithExportInterval sets the maximum duration between batched exports.
+//
+// If the OTEL_BLRP_SCHEDULE_DELAY environment variable is set,
+// and this option is not passed, that variable value will be used.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, 1s will be used.
+// The default value is also used when the provided value is less than one.
+func WithExportInterval(d time.Duration) BatchProcessorOption {
+ return batchOptionFunc(func(cfg batchConfig) batchConfig {
+ cfg.expInterval = newSetting(d)
+ return cfg
+ })
+}
+
+// WithExportTimeout sets the duration after which a batched export is canceled.
+//
+// If the OTEL_BLRP_EXPORT_TIMEOUT environment variable is set,
+// and this option is not passed, that variable value will be used.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, 30s will be used.
+// The default value is also used when the provided value is less than one.
+func WithExportTimeout(d time.Duration) BatchProcessorOption {
+ return batchOptionFunc(func(cfg batchConfig) batchConfig {
+ cfg.expTimeout = newSetting(d)
+ return cfg
+ })
+}
+
+// WithExportMaxBatchSize sets the maximum batch size of every export.
+// A batch will be split into multiple exports to not exceed this size.
+//
+// If the OTEL_BLRP_MAX_EXPORT_BATCH_SIZE environment variable is set,
+// and this option is not passed, that variable value will be used.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, 512 will be used.
+// The default value is also used when the provided value is less than one.
+func WithExportMaxBatchSize(size int) BatchProcessorOption {
+ return batchOptionFunc(func(cfg batchConfig) batchConfig {
+ cfg.expMaxBatchSize = newSetting(size)
+ return cfg
+ })
+}
+
+// WithExportBufferSize sets the batch buffer size.
+// Batches will be temporarily kept in a memory buffer until they are exported.
+//
+// By default, a value of 1 will be used.
+// The default value is also used when the provided value is less than one.
+func WithExportBufferSize(size int) BatchProcessorOption {
+ return batchOptionFunc(func(cfg batchConfig) batchConfig {
+ cfg.expBufferSize = newSetting(size)
+ return cfg
+ })
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/doc.go b/vendor/go.opentelemetry.io/otel/sdk/log/doc.go
new file mode 100644
index 000000000..6a1f1b0e9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/log/doc.go
@@ -0,0 +1,36 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package log provides the OpenTelemetry Logs SDK.
+
+See https://opentelemetry.io/docs/concepts/signals/logs/ for information
+about the concept of OpenTelemetry Logs and
+https://opentelemetry.io/docs/concepts/components/ for more information
+about OpenTelemetry SDKs.
+
+The entry point for the log package is [NewLoggerProvider].
+[LoggerProvider] is the object that all Bridge API calls use to create
+Loggers, and ultimately emit log records.
+Also, it is an object that should be used to
+control the life-cycle (start, flush, and shutdown) of the Logs SDK.
+
+A LoggerProvider needs to be configured to process the log records, this is
+done by configuring it with a [Processor] implementation using [WithProcessor].
+The log package provides the [BatchProcessor] and [SimpleProcessor]
+that are configured with an [Exporter] implementation which
+exports the log records to given destination. See
+[go.opentelemetry.io/otel/exporters] for exporters that can be used with these
+Processors.
+
+The data generated by a LoggerProvider needs to include information about its
+origin. A LoggerProvider needs to be configured with a Resource, by using
+[WithResource], to include this information. This Resource
+should be used to describe the unique runtime environment instrumented code
+is being run on. That way when multiple instances of the code are collected
+at a single endpoint their origin is decipherable.
+
+See [go.opentelemetry.io/otel/log] for more information about
+the OpenTelemetry Logs Bridge API.
+*/
+package log // import "go.opentelemetry.io/otel/sdk/log"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/exporter.go b/vendor/go.opentelemetry.io/otel/sdk/log/exporter.go
new file mode 100644
index 000000000..e4e3c5402
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/log/exporter.go
@@ -0,0 +1,321 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package log // import "go.opentelemetry.io/otel/sdk/log"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "go.opentelemetry.io/otel"
+)
+
+// Exporter handles the delivery of log records to external receivers.
+type Exporter interface {
+ // Export transmits log records to a receiver.
+ //
+ // The deadline or cancellation of the passed context must be honored. An
+ // appropriate error should be returned in these situations.
+ //
+ // All retry logic must be contained in this function. The SDK does not
+ // implement any retry logic. All errors returned by this function are
+ // considered unrecoverable and will be reported to a configured error
+ // Handler.
+ //
+ // Implementations must not retain the records slice.
+ //
+ // Before modifying a Record, the implementation must use Record.Clone
+ // to create a copy that shares no state with the original.
+ //
+ // Export should never be called concurrently with other Export calls.
+ // However, it may be called concurrently with other methods.
+ Export(ctx context.Context, records []Record) error
+
+ // Shutdown is called when the SDK shuts down. Any cleanup or release of
+ // resources held by the exporter should be done in this call.
+ //
+ // The deadline or cancellation of the passed context must be honored. An
+ // appropriate error should be returned in these situations.
+ //
+ // After Shutdown is called, calls to Export, Shutdown, or ForceFlush
+ // should perform no operation and return nil error.
+ //
+ // Shutdown may be called concurrently with itself or with other methods.
+ Shutdown(ctx context.Context) error
+
+ // ForceFlush exports log records to the configured Exporter that have not yet
+ // been exported.
+ //
+ // The deadline or cancellation of the passed context must be honored. An
+ // appropriate error should be returned in these situations.
+ //
+ // ForceFlush may be called concurrently with itself or with other methods.
+ ForceFlush(ctx context.Context) error
+}
+
+var defaultNoopExporter = &noopExporter{}
+
+type noopExporter struct{}
+
+func (noopExporter) Export(context.Context, []Record) error { return nil }
+
+func (noopExporter) Shutdown(context.Context) error { return nil }
+
+func (noopExporter) ForceFlush(context.Context) error { return nil }
+
+// chunkExporter wraps an Exporter's Export method so it is called with
+// appropriately sized export payloads. Any payload larger than a defined size
+// is chunked into smaller payloads and exported sequentially.
+type chunkExporter struct {
+ Exporter
+
+ // size is the maximum batch size exported.
+ size int
+}
+
+// newChunkExporter wraps exporter. Calls to the Export will have their records
+// payload chunked so they do not exceed size. If size is less than or equal
+// to 0, exporter is returned directly.
+func newChunkExporter(exporter Exporter, size int) Exporter {
+ if size <= 0 {
+ return exporter
+ }
+ return &chunkExporter{Exporter: exporter, size: size}
+}
+
+// Export exports records in chunks no larger than c.size.
+func (c chunkExporter) Export(ctx context.Context, records []Record) error {
+ n := len(records)
+ for i, j := 0, min(c.size, n); i < n; i, j = i+c.size, min(j+c.size, n) {
+ if err := c.Exporter.Export(ctx, records[i:j]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// timeoutExporter wraps an Exporter and ensures any call to Export will have a
+// timeout for the context.
+type timeoutExporter struct {
+ Exporter
+
+ // timeout is the maximum time an export is attempted.
+ timeout time.Duration
+}
+
+// newTimeoutExporter wraps exporter with an Exporter that limits the context
+// lifetime passed to Export to be timeout. If timeout is less than or equal to
+// zero, exporter will be returned directly.
+func newTimeoutExporter(exp Exporter, timeout time.Duration) Exporter {
+ if timeout <= 0 {
+ return exp
+ }
+ return &timeoutExporter{Exporter: exp, timeout: timeout}
+}
+
+// Export sets the timeout of ctx before calling the Exporter e wraps.
+func (e *timeoutExporter) Export(ctx context.Context, records []Record) error {
+ ctx, cancel := context.WithTimeout(ctx, e.timeout)
+ defer cancel()
+ return e.Exporter.Export(ctx, records)
+}
+
+// exportSync exports all data from input using exporter in a spawned
+// goroutine. The returned chan will be closed when the spawned goroutine
+// completes.
+func exportSync(input <-chan exportData, exporter Exporter) (done chan struct{}) {
+ done = make(chan struct{})
+ go func() {
+ defer close(done)
+ for data := range input {
+ data.DoExport(exporter.Export)
+ }
+ }()
+ return done
+}
+
+// exportData is data related to an export.
+type exportData struct {
+ ctx context.Context
+ records []Record
+
+ // respCh is the channel any error returned from the export will be sent
+ // on. If this is nil, and the export error is non-nil, the error will
+ // passed to the OTel error handler.
+ respCh chan<- error
+}
+
+// DoExport calls exportFn with the data contained in e. The error response
+// will be returned on e's respCh if not nil. The error will be handled by the
+// default OTel error handle if it is not nil and respCh is nil or full.
+func (e exportData) DoExport(exportFn func(context.Context, []Record) error) {
+ if len(e.records) == 0 {
+ e.respond(nil)
+ return
+ }
+
+ e.respond(exportFn(e.ctx, e.records))
+}
+
+func (e exportData) respond(err error) {
+ select {
+ case e.respCh <- err:
+ default:
+ // e.respCh is nil or busy, default to otel.Handler.
+ if err != nil {
+ otel.Handle(err)
+ }
+ }
+}
+
+// bufferExporter provides asynchronous and synchronous export functionality by
+// buffering export requests.
+type bufferExporter struct {
+ Exporter
+
+ input chan exportData
+ inputMu sync.Mutex
+
+ done chan struct{}
+ stopped atomic.Bool
+}
+
+// newBufferExporter returns a new bufferExporter that wraps exporter. The
+// returned bufferExporter will buffer at most size number of export requests.
+// If size is less than zero, zero will be used (i.e. only synchronous
+// exporting will be supported).
+func newBufferExporter(exporter Exporter, size int) *bufferExporter {
+ if size < 0 {
+ size = 0
+ }
+ input := make(chan exportData, size)
+ return &bufferExporter{
+ Exporter: exporter,
+
+ input: input,
+ done: exportSync(input, exporter),
+ }
+}
+
+var errStopped = errors.New("exporter stopped")
+
+func (e *bufferExporter) enqueue(ctx context.Context, records []Record, rCh chan<- error) error {
+ data := exportData{ctx, records, rCh}
+
+ e.inputMu.Lock()
+ defer e.inputMu.Unlock()
+
+ // Check stopped before enqueueing now that e.inputMu is held. This
+ // prevents sends on a closed chan when Shutdown is called concurrently.
+ if e.stopped.Load() {
+ return errStopped
+ }
+
+ select {
+ case e.input <- data:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ return nil
+}
+
+// EnqueueExport enqueues an export of records in the context of ctx to be
+// performed asynchronously. This will return true if the records are
+// successfully enqueued (or the bufferExporter is shut down), false otherwise.
+//
+// The passed records are held after this call returns.
+func (e *bufferExporter) EnqueueExport(records []Record) bool {
+ if len(records) == 0 {
+ // Nothing to enqueue, do not waste input space.
+ return true
+ }
+
+ data := exportData{ctx: context.Background(), records: records}
+
+ e.inputMu.Lock()
+ defer e.inputMu.Unlock()
+
+ // Check stopped before enqueueing now that e.inputMu is held. This
+ // prevents sends on a closed chan when Shutdown is called concurrently.
+ if e.stopped.Load() {
+ return true
+ }
+
+ select {
+ case e.input <- data:
+ return true
+ default:
+ return false
+ }
+}
+
+// Export synchronously exports records in the context of ctx. This will not
+// return until the export has been completed.
+func (e *bufferExporter) Export(ctx context.Context, records []Record) error {
+ if len(records) == 0 {
+ return nil
+ }
+
+ resp := make(chan error, 1)
+ err := e.enqueue(ctx, records, resp)
+ if err != nil {
+ if errors.Is(err, errStopped) {
+ return nil
+ }
+ return fmt.Errorf("%w: dropping %d records", err, len(records))
+ }
+
+ select {
+ case err := <-resp:
+ return err
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+}
+
+// ForceFlush flushes buffered exports. Any existing exports that is buffered
+// is flushed before this returns.
+func (e *bufferExporter) ForceFlush(ctx context.Context) error {
+ resp := make(chan error, 1)
+ err := e.enqueue(ctx, nil, resp)
+ if err != nil {
+ if errors.Is(err, errStopped) {
+ return nil
+ }
+ return err
+ }
+
+ select {
+ case <-resp:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ return e.Exporter.ForceFlush(ctx)
+}
+
+// Shutdown shuts down e.
+//
+// Any buffered exports are flushed before this returns.
+//
+// All calls to EnqueueExport or Exporter will return nil without any export
+// after this is called.
+func (e *bufferExporter) Shutdown(ctx context.Context) error {
+ if e.stopped.Swap(true) {
+ return nil
+ }
+ e.inputMu.Lock()
+ defer e.inputMu.Unlock()
+
+ // No more sends will be made.
+ close(e.input)
+ select {
+ case <-e.done:
+ case <-ctx.Done():
+ return errors.Join(ctx.Err(), e.Exporter.Shutdown(ctx))
+ }
+ return e.Exporter.Shutdown(ctx)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/filter_processor.go b/vendor/go.opentelemetry.io/otel/sdk/log/filter_processor.go
new file mode 100644
index 000000000..5b99a4a99
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/log/filter_processor.go
@@ -0,0 +1,62 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package log // import "go.opentelemetry.io/otel/sdk/log"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/log"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/sdk/resource"
+)
+
+// FilterProcessor is a [Processor] that knows, and can identify, what [Record]
+// it will process or drop when it is passed to [Processor.OnEmit].
+//
+// This is useful for users that want to know if a [log.Record]
+// will be processed or dropped before they perform complex operations to
+// construct the [log.Record].
+//
+// The SDK's Logger.Enabled returns false
+// if all the registered Processors implement FilterProcessor
+// and they all return false.
+//
+// Processor implementations that choose to support this by satisfying this
+// interface are expected to re-evaluate the [Record] passed to [Processor.OnEmit],
+// it is not expected that the caller to OnEmit will use the functionality
+// from this interface prior to calling OnEmit.
+//
+// See the [go.opentelemetry.io/contrib/processors/minsev] for an example use-case.
+// It provides a Processor used to filter out [Record]
+// that has a [log.Severity] below a threshold.
+type FilterProcessor interface {
+ // Enabled returns whether the Processor will process for the given context
+ // and param.
+ //
+ // The passed param is likely to be a partial record information being
+ // provided (e.g a param with only the Severity set).
+ // If a Processor needs more information than is provided, it
+ // is said to be in an indeterminate state (see below).
+ //
+ // The returned value will be true when the Processor will process for the
+ // provided context and param, and will be false if the Logger will not
+ // emit. The returned value may be true or false in an indeterminate state.
+ // An implementation should default to returning true for an indeterminate
+ // state, but may return false if valid reasons in particular circumstances
+ // exist (e.g. performance, correctness).
+ //
+ // The param should not be held by the implementation. A copy should be
+ // made if the param needs to be held after the call returns.
+ //
+ // Implementations of this method need to be safe for a user to call
+ // concurrently.
+ Enabled(ctx context.Context, param EnabledParameters) bool
+}
+
+// EnabledParameters represents payload for [FilterProcessor]'s Enabled method.
+type EnabledParameters struct {
+ Resource resource.Resource
+ InstrumentationScope instrumentation.Scope
+ Severity log.Severity
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/logger.go b/vendor/go.opentelemetry.io/otel/sdk/log/logger.go
new file mode 100644
index 000000000..6211d5d92
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/log/logger.go
@@ -0,0 +1,110 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package log // import "go.opentelemetry.io/otel/sdk/log"
+
+import (
+ "context"
+ "time"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/log"
+ "go.opentelemetry.io/otel/log/embedded"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/trace"
+)
+
+var now = time.Now
+
+// Compile-time check logger implements log.Logger.
+var _ log.Logger = (*logger)(nil)
+
+type logger struct {
+ embedded.Logger
+
+ provider *LoggerProvider
+ instrumentationScope instrumentation.Scope
+}
+
+func newLogger(p *LoggerProvider, scope instrumentation.Scope) *logger {
+ return &logger{
+ provider: p,
+ instrumentationScope: scope,
+ }
+}
+
+func (l *logger) Emit(ctx context.Context, r log.Record) {
+ newRecord := l.newRecord(ctx, r)
+ for _, p := range l.provider.processors {
+ if err := p.OnEmit(ctx, &newRecord); err != nil {
+ otel.Handle(err)
+ }
+ }
+}
+
+// Enabled returns true if at least one Processor held by the LoggerProvider
+// that created the logger will process param for the provided context and param.
+//
+// If it is not possible to definitively determine the param will be
+// processed, true will be returned by default. A value of false will only be
+// returned if it can be positively verified that no Processor will process.
+func (l *logger) Enabled(ctx context.Context, param log.EnabledParameters) bool {
+ p := EnabledParameters{
+ Resource: *l.provider.resource,
+ InstrumentationScope: l.instrumentationScope,
+ Severity: param.Severity,
+ }
+
+ // If there are more Processors than FilterProcessors,
+ // which means not all Processors are FilterProcessors,
+ // we cannot be sure that all Processors will drop the record.
+ // Therefore, return true.
+ //
+ // If all Processors are FilterProcessors, check if any is enabled.
+ return len(l.provider.processors) > len(l.provider.fltrProcessors) || anyEnabled(ctx, p, l.provider.fltrProcessors)
+}
+
+func anyEnabled(ctx context.Context, param EnabledParameters, fltrs []FilterProcessor) bool {
+ for _, f := range fltrs {
+ if f.Enabled(ctx, param) {
+ // At least one Processor will process the Record.
+ return true
+ }
+ }
+ // No Processor will process the record
+ return false
+}
+
+func (l *logger) newRecord(ctx context.Context, r log.Record) Record {
+ sc := trace.SpanContextFromContext(ctx)
+
+ newRecord := Record{
+ eventName: r.EventName(),
+ timestamp: r.Timestamp(),
+ observedTimestamp: r.ObservedTimestamp(),
+ severity: r.Severity(),
+ severityText: r.SeverityText(),
+ body: r.Body(),
+
+ traceID: sc.TraceID(),
+ spanID: sc.SpanID(),
+ traceFlags: sc.TraceFlags(),
+
+ resource: l.provider.resource,
+ scope: &l.instrumentationScope,
+ attributeValueLengthLimit: l.provider.attributeValueLengthLimit,
+ attributeCountLimit: l.provider.attributeCountLimit,
+ }
+
+ // This field SHOULD be set once the event is observed by OpenTelemetry.
+ if newRecord.observedTimestamp.IsZero() {
+ newRecord.observedTimestamp = now()
+ }
+
+ r.WalkAttributes(func(kv log.KeyValue) bool {
+ newRecord.AddAttributes(kv)
+ return true
+ })
+
+ return newRecord
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/processor.go b/vendor/go.opentelemetry.io/otel/sdk/log/processor.go
new file mode 100644
index 000000000..c9b306f23
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/log/processor.go
@@ -0,0 +1,56 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package log // import "go.opentelemetry.io/otel/sdk/log"
+
+import (
+ "context"
+)
+
+// Processor handles the processing of log records.
+//
+// Any of the Processor's methods may be called concurrently with itself
+// or with other methods. It is the responsibility of the Processor to manage
+// this concurrency.
+//
+// See [FilterProcessor] for information about how a Processor can support filtering.
+type Processor interface {
+ // OnEmit is called when a Record is emitted.
+ //
+ // OnEmit will be called independent of Enabled. Implementations need to
+ // validate the arguments themselves before processing.
+ //
+ // Implementation should not interrupt the record processing
+ // if the context is canceled.
+ //
+ // All retry logic must be contained in this function. The SDK does not
+ // implement any retry logic. All errors returned by this function are
+ // considered unrecoverable and will be reported to a configured error
+ // Handler.
+ //
+ // The SDK invokes the processors sequentially in the same order as
+ // they were registered using WithProcessor.
+ // Implementations may synchronously modify the record so that the changes
+ // are visible in the next registered processor.
+ // Notice that Record is not concurrent safe. Therefore, asynchronous
+ // processing may cause race conditions. Use Record.Clone
+ // to create a copy that shares no state with the original.
+ OnEmit(ctx context.Context, record *Record) error
+
+ // Shutdown is called when the SDK shuts down. Any cleanup or release of
+ // resources held by the exporter should be done in this call.
+ //
+ // The deadline or cancellation of the passed context must be honored. An
+ // appropriate error should be returned in these situations.
+ //
+ // After Shutdown is called, calls to Export, Shutdown, or ForceFlush
+ // should perform no operation and return nil error.
+ Shutdown(ctx context.Context) error
+
+ // ForceFlush exports log records to the configured Exporter that have not yet
+ // been exported.
+ //
+ // The deadline or cancellation of the passed context must be honored. An
+ // appropriate error should be returned in these situations.
+ ForceFlush(ctx context.Context) error
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/provider.go b/vendor/go.opentelemetry.io/otel/sdk/log/provider.go
new file mode 100644
index 000000000..096944ea1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/log/provider.go
@@ -0,0 +1,256 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package log // import "go.opentelemetry.io/otel/sdk/log"
+
+import (
+ "context"
+ "errors"
+ "sync"
+ "sync/atomic"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/log"
+ "go.opentelemetry.io/otel/log/embedded"
+ "go.opentelemetry.io/otel/log/noop"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/sdk/resource"
+)
+
+const (
+ defaultAttrCntLim = 128
+ defaultAttrValLenLim = -1
+
+ envarAttrCntLim = "OTEL_LOGRECORD_ATTRIBUTE_COUNT_LIMIT"
+ envarAttrValLenLim = "OTEL_LOGRECORD_ATTRIBUTE_VALUE_LENGTH_LIMIT"
+)
+
+type providerConfig struct {
+ resource *resource.Resource
+ processors []Processor
+ fltrProcessors []FilterProcessor
+ attrCntLim setting[int]
+ attrValLenLim setting[int]
+}
+
+func newProviderConfig(opts []LoggerProviderOption) providerConfig {
+ var c providerConfig
+ for _, opt := range opts {
+ c = opt.apply(c)
+ }
+
+ if c.resource == nil {
+ c.resource = resource.Default()
+ }
+
+ c.attrCntLim = c.attrCntLim.Resolve(
+ getenv[int](envarAttrCntLim),
+ fallback[int](defaultAttrCntLim),
+ )
+
+ c.attrValLenLim = c.attrValLenLim.Resolve(
+ getenv[int](envarAttrValLenLim),
+ fallback[int](defaultAttrValLenLim),
+ )
+
+ return c
+}
+
+// LoggerProvider handles the creation and coordination of Loggers. All Loggers
+// created by a LoggerProvider will be associated with the same Resource.
+type LoggerProvider struct {
+ embedded.LoggerProvider
+
+ resource *resource.Resource
+ processors []Processor
+ fltrProcessors []FilterProcessor
+ attributeCountLimit int
+ attributeValueLengthLimit int
+
+ loggersMu sync.Mutex
+ loggers map[instrumentation.Scope]*logger
+
+ stopped atomic.Bool
+
+ noCmp [0]func() //nolint: unused // This is indeed used.
+}
+
+// Compile-time check LoggerProvider implements log.LoggerProvider.
+var _ log.LoggerProvider = (*LoggerProvider)(nil)
+
+// NewLoggerProvider returns a new and configured LoggerProvider.
+//
+// By default, the returned LoggerProvider is configured with the default
+// Resource and no Processors. Processors cannot be added after a LoggerProvider is
+// created. This means the returned LoggerProvider, one created with no
+// Processors, will perform no operations.
+func NewLoggerProvider(opts ...LoggerProviderOption) *LoggerProvider {
+ cfg := newProviderConfig(opts)
+ return &LoggerProvider{
+ resource: cfg.resource,
+ processors: cfg.processors,
+ fltrProcessors: cfg.fltrProcessors,
+ attributeCountLimit: cfg.attrCntLim.Value,
+ attributeValueLengthLimit: cfg.attrValLenLim.Value,
+ }
+}
+
+// Logger returns a new [log.Logger] with the provided name and configuration.
+//
+// If p is shut down, a [noop.Logger] instance is returned.
+//
+// This method can be called concurrently.
+func (p *LoggerProvider) Logger(name string, opts ...log.LoggerOption) log.Logger {
+ if name == "" {
+ global.Warn("Invalid Logger name.", "name", name)
+ }
+
+ if p.stopped.Load() {
+ return noop.NewLoggerProvider().Logger(name, opts...)
+ }
+
+ cfg := log.NewLoggerConfig(opts...)
+ scope := instrumentation.Scope{
+ Name: name,
+ Version: cfg.InstrumentationVersion(),
+ SchemaURL: cfg.SchemaURL(),
+ Attributes: cfg.InstrumentationAttributes(),
+ }
+
+ p.loggersMu.Lock()
+ defer p.loggersMu.Unlock()
+
+ if p.loggers == nil {
+ l := newLogger(p, scope)
+ p.loggers = map[instrumentation.Scope]*logger{scope: l}
+ return l
+ }
+
+ l, ok := p.loggers[scope]
+ if !ok {
+ l = newLogger(p, scope)
+ p.loggers[scope] = l
+ }
+
+ return l
+}
+
+// Shutdown shuts down the provider and all processors.
+//
+// This method can be called concurrently.
+func (p *LoggerProvider) Shutdown(ctx context.Context) error {
+ stopped := p.stopped.Swap(true)
+ if stopped {
+ return nil
+ }
+
+ var err error
+ for _, p := range p.processors {
+ err = errors.Join(err, p.Shutdown(ctx))
+ }
+ return err
+}
+
+// ForceFlush flushes all processors.
+//
+// This method can be called concurrently.
+func (p *LoggerProvider) ForceFlush(ctx context.Context) error {
+ if p.stopped.Load() {
+ return nil
+ }
+
+ var err error
+ for _, p := range p.processors {
+ err = errors.Join(err, p.ForceFlush(ctx))
+ }
+ return err
+}
+
+// LoggerProviderOption applies a configuration option value to a LoggerProvider.
+type LoggerProviderOption interface {
+ apply(providerConfig) providerConfig
+}
+
+type loggerProviderOptionFunc func(providerConfig) providerConfig
+
+func (fn loggerProviderOptionFunc) apply(c providerConfig) providerConfig {
+ return fn(c)
+}
+
+// WithResource associates a Resource with a LoggerProvider. This Resource
+// represents the entity producing telemetry and is associated with all Loggers
+// the LoggerProvider will create.
+//
+// By default, if this Option is not used, the default Resource from the
+// go.opentelemetry.io/otel/sdk/resource package will be used.
+func WithResource(res *resource.Resource) LoggerProviderOption {
+ return loggerProviderOptionFunc(func(cfg providerConfig) providerConfig {
+ var err error
+ cfg.resource, err = resource.Merge(resource.Environment(), res)
+ if err != nil {
+ otel.Handle(err)
+ }
+ return cfg
+ })
+}
+
+// WithProcessor associates Processor with a LoggerProvider.
+//
+// By default, if this option is not used, the LoggerProvider will perform no
+// operations; no data will be exported without a processor.
+//
+// The SDK invokes the processors sequentially in the same order as they were
+// registered.
+//
+// For production, use [NewBatchProcessor] to batch log records before they are exported.
+// For testing and debugging, use [NewSimpleProcessor] to synchronously export log records.
+//
+// See [FilterProcessor] for information about how a Processor can support filtering.
+func WithProcessor(processor Processor) LoggerProviderOption {
+ return loggerProviderOptionFunc(func(cfg providerConfig) providerConfig {
+ cfg.processors = append(cfg.processors, processor)
+ if f, ok := processor.(FilterProcessor); ok {
+ cfg.fltrProcessors = append(cfg.fltrProcessors, f)
+ }
+ return cfg
+ })
+}
+
+// WithAttributeCountLimit sets the maximum allowed log record attribute count.
+// Any attribute added to a log record once this limit is reached will be dropped.
+//
+// Setting this to zero means no attributes will be recorded.
+//
+// Setting this to a negative value means no limit is applied.
+//
+// If the OTEL_LOGRECORD_ATTRIBUTE_COUNT_LIMIT environment variable is set,
+// and this option is not passed, that variable value will be used.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, 128 will be used.
+func WithAttributeCountLimit(limit int) LoggerProviderOption {
+ return loggerProviderOptionFunc(func(cfg providerConfig) providerConfig {
+ cfg.attrCntLim = newSetting(limit)
+ return cfg
+ })
+}
+
+// AttributeValueLengthLimit sets the maximum allowed attribute value length.
+//
+// This limit only applies to string and string slice attribute values.
+// Any string longer than this value will be truncated to this length.
+//
+// Setting this to a negative value means no limit is applied.
+//
+// If the OTEL_LOGRECORD_ATTRIBUTE_VALUE_LENGTH_LIMIT environment variable is set,
+// and this option is not passed, that variable value will be used.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, no limit (-1) will be used.
+func WithAttributeValueLengthLimit(limit int) LoggerProviderOption {
+ return loggerProviderOptionFunc(func(cfg providerConfig) providerConfig {
+ cfg.attrValLenLim = newSetting(limit)
+ return cfg
+ })
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/record.go b/vendor/go.opentelemetry.io/otel/sdk/log/record.go
new file mode 100644
index 000000000..a13fcac7b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/log/record.go
@@ -0,0 +1,518 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package log // import "go.opentelemetry.io/otel/sdk/log"
+
+import (
+ "slices"
+ "strings"
+ "sync"
+ "time"
+ "unicode/utf8"
+
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/log"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/sdk/resource"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// attributesInlineCount is the number of attributes that are efficiently
+// stored in an array within a Record. This value is borrowed from slog which
+// performed a quantitative survey of log library use and found this value to
+// cover 95% of all use-cases (https://go.dev/blog/slog#performance).
+const attributesInlineCount = 5
+
+var logAttrDropped = sync.OnceFunc(func() {
+ global.Warn("limit reached: dropping log Record attributes")
+})
+
+// indexPool is a pool of index maps used for de-duplication.
+var indexPool = sync.Pool{
+ New: func() any { return make(map[string]int) },
+}
+
+func getIndex() map[string]int {
+ return indexPool.Get().(map[string]int)
+}
+
+func putIndex(index map[string]int) {
+ clear(index)
+ indexPool.Put(index)
+}
+
+// Record is a log record emitted by the Logger.
+// A log record with non-empty event name is interpreted as an event record.
+//
+// Do not create instances of Record on your own in production code.
+// You can use [go.opentelemetry.io/otel/sdk/log/logtest.RecordFactory]
+// for testing purposes.
+type Record struct {
+ // Do not embed the log.Record. Attributes need to be overwrite-able and
+ // deep-copying needs to be possible.
+
+ eventName string
+ timestamp time.Time
+ observedTimestamp time.Time
+ severity log.Severity
+ severityText string
+ body log.Value
+
+ // The fields below are for optimizing the implementation of Attributes and
+ // AddAttributes. This design is borrowed from the slog Record type:
+ // https://cs.opensource.google/go/go/+/refs/tags/go1.22.0:src/log/slog/record.go;l=20
+
+ // Allocation optimization: an inline array sized to hold
+ // the majority of log calls (based on examination of open-source
+ // code). It holds the start of the list of attributes.
+ front [attributesInlineCount]log.KeyValue
+
+ // The number of attributes in front.
+ nFront int
+
+ // The list of attributes except for those in front.
+ // Invariants:
+ // - len(back) > 0 if nFront == len(front)
+ // - Unused array elements are zero-ed. Used to detect mistakes.
+ back []log.KeyValue
+
+ // dropped is the count of attributes that have been dropped when limits
+ // were reached.
+ dropped int
+
+ traceID trace.TraceID
+ spanID trace.SpanID
+ traceFlags trace.TraceFlags
+
+ // resource represents the entity that collected the log.
+ resource *resource.Resource
+
+ // scope is the Scope that the Logger was created with.
+ scope *instrumentation.Scope
+
+ attributeValueLengthLimit int
+ attributeCountLimit int
+
+ noCmp [0]func() //nolint: unused // This is indeed used.
+}
+
+func (r *Record) addDropped(n int) {
+ logAttrDropped()
+ r.dropped += n
+}
+
+func (r *Record) setDropped(n int) {
+ logAttrDropped()
+ r.dropped = n
+}
+
+// EventName returns the event name.
+// A log record with non-empty event name is interpreted as an event record.
+func (r *Record) EventName() string {
+ return r.eventName
+}
+
+// SetEventName sets the event name.
+// A log record with non-empty event name is interpreted as an event record.
+func (r *Record) SetEventName(s string) {
+ r.eventName = s
+}
+
+// Timestamp returns the time when the log record occurred.
+func (r *Record) Timestamp() time.Time {
+ return r.timestamp
+}
+
+// SetTimestamp sets the time when the log record occurred.
+func (r *Record) SetTimestamp(t time.Time) {
+ r.timestamp = t
+}
+
+// ObservedTimestamp returns the time when the log record was observed.
+func (r *Record) ObservedTimestamp() time.Time {
+ return r.observedTimestamp
+}
+
+// SetObservedTimestamp sets the time when the log record was observed.
+func (r *Record) SetObservedTimestamp(t time.Time) {
+ r.observedTimestamp = t
+}
+
+// Severity returns the severity of the log record.
+func (r *Record) Severity() log.Severity {
+ return r.severity
+}
+
+// SetSeverity sets the severity level of the log record.
+func (r *Record) SetSeverity(level log.Severity) {
+ r.severity = level
+}
+
+// SeverityText returns severity (also known as log level) text. This is the
+// original string representation of the severity as it is known at the source.
+func (r *Record) SeverityText() string {
+ return r.severityText
+}
+
+// SetSeverityText sets severity (also known as log level) text. This is the
+// original string representation of the severity as it is known at the source.
+func (r *Record) SetSeverityText(text string) {
+ r.severityText = text
+}
+
+// Body returns the body of the log record.
+func (r *Record) Body() log.Value {
+ return r.body
+}
+
+// SetBody sets the body of the log record.
+func (r *Record) SetBody(v log.Value) {
+ r.body = v
+}
+
+// WalkAttributes walks all attributes the log record holds by calling f for
+// each on each [log.KeyValue] in the [Record]. Iteration stops if f returns false.
+func (r *Record) WalkAttributes(f func(log.KeyValue) bool) {
+ for i := 0; i < r.nFront; i++ {
+ if !f(r.front[i]) {
+ return
+ }
+ }
+ for _, a := range r.back {
+ if !f(a) {
+ return
+ }
+ }
+}
+
+// AddAttributes adds attributes to the log record.
+// Attributes in attrs will overwrite any attribute already added to r with the same key.
+func (r *Record) AddAttributes(attrs ...log.KeyValue) {
+ n := r.AttributesLen()
+ if n == 0 {
+ // Avoid the more complex duplicate map lookups below.
+ var drop int
+ attrs, drop = dedup(attrs)
+ r.setDropped(drop)
+
+ attrs, drop = head(attrs, r.attributeCountLimit)
+ r.addDropped(drop)
+
+ r.addAttrs(attrs)
+ return
+ }
+
+ // Used to find duplicates between attrs and existing attributes in r.
+ rIndex := r.attrIndex()
+ defer putIndex(rIndex)
+
+ // Unique attrs that need to be added to r. This uses the same underlying
+ // array as attrs.
+ //
+ // Note, do not iterate attrs twice by just calling dedup(attrs) here.
+ unique := attrs[:0]
+ // Used to find duplicates within attrs itself. The index value is the
+ // index of the element in unique.
+ uIndex := getIndex()
+ defer putIndex(uIndex)
+
+ // Deduplicate attrs within the scope of all existing attributes.
+ for _, a := range attrs {
+ // Last-value-wins for any duplicates in attrs.
+ idx, found := uIndex[a.Key]
+ if found {
+ r.addDropped(1)
+ unique[idx] = a
+ continue
+ }
+
+ idx, found = rIndex[a.Key]
+ if found {
+ // New attrs overwrite any existing with the same key.
+ r.addDropped(1)
+ if idx < 0 {
+ r.front[-(idx + 1)] = a
+ } else {
+ r.back[idx] = a
+ }
+ } else {
+ // Unique attribute.
+ unique = append(unique, a)
+ uIndex[a.Key] = len(unique) - 1
+ }
+ }
+ attrs = unique
+
+ if r.attributeCountLimit > 0 && n+len(attrs) > r.attributeCountLimit {
+ // Truncate the now unique attributes to comply with limit.
+ //
+ // Do not use head(attrs, r.attributeCountLimit - n) here. If
+ // (r.attributeCountLimit - n) <= 0 attrs needs to be emptied.
+ last := max(0, r.attributeCountLimit-n)
+ r.addDropped(len(attrs) - last)
+ attrs = attrs[:last]
+ }
+
+ r.addAttrs(attrs)
+}
+
+// attrIndex returns an index map for all attributes in the Record r. The index
+// maps the attribute key to location the attribute is stored. If the value is
+// < 0 then -(value + 1) (e.g. -1 -> 0, -2 -> 1, -3 -> 2) represents the index
+// in r.nFront. Otherwise, the index is the exact index of r.back.
+//
+// The returned index is taken from the indexPool. It is the callers
+// responsibility to return the index to that pool (putIndex) when done.
+func (r *Record) attrIndex() map[string]int {
+ index := getIndex()
+ for i := 0; i < r.nFront; i++ {
+ key := r.front[i].Key
+ index[key] = -i - 1 // stored in front: negative index.
+ }
+ for i := 0; i < len(r.back); i++ {
+ key := r.back[i].Key
+ index[key] = i // stored in back: positive index.
+ }
+ return index
+}
+
+// addAttrs adds attrs to the Record r. This does not validate any limits or
+// duplication of attributes, these tasks are left to the caller to handle
+// prior to calling.
+func (r *Record) addAttrs(attrs []log.KeyValue) {
+ var i int
+ for i = 0; i < len(attrs) && r.nFront < len(r.front); i++ {
+ a := attrs[i]
+ r.front[r.nFront] = r.applyAttrLimits(a)
+ r.nFront++
+ }
+
+ for j, a := range attrs[i:] {
+ attrs[i+j] = r.applyAttrLimits(a)
+ }
+ r.back = slices.Grow(r.back, len(attrs[i:]))
+ r.back = append(r.back, attrs[i:]...)
+}
+
+// SetAttributes sets (and overrides) attributes to the log record.
+func (r *Record) SetAttributes(attrs ...log.KeyValue) {
+ var drop int
+ attrs, drop = dedup(attrs)
+ r.setDropped(drop)
+
+ attrs, drop = head(attrs, r.attributeCountLimit)
+ r.addDropped(drop)
+
+ r.nFront = 0
+ var i int
+ for i = 0; i < len(attrs) && r.nFront < len(r.front); i++ {
+ a := attrs[i]
+ r.front[r.nFront] = r.applyAttrLimits(a)
+ r.nFront++
+ }
+
+ r.back = slices.Clone(attrs[i:])
+ for i, a := range r.back {
+ r.back[i] = r.applyAttrLimits(a)
+ }
+}
+
+// head returns the first n values of kvs along with the number of elements
+// dropped. If n is less than or equal to zero, kvs is returned with 0.
+func head(kvs []log.KeyValue, n int) (out []log.KeyValue, dropped int) {
+ if n > 0 && len(kvs) > n {
+ return kvs[:n], len(kvs) - n
+ }
+ return kvs, 0
+}
+
+// dedup deduplicates kvs front-to-back with the last value saved.
+func dedup(kvs []log.KeyValue) (unique []log.KeyValue, dropped int) {
+ index := getIndex()
+ defer putIndex(index)
+
+ unique = kvs[:0] // Use the same underlying array as kvs.
+ for _, a := range kvs {
+ idx, found := index[a.Key]
+ if found {
+ dropped++
+ unique[idx] = a
+ } else {
+ unique = append(unique, a)
+ index[a.Key] = len(unique) - 1
+ }
+ }
+ return unique, dropped
+}
+
+// AttributesLen returns the number of attributes in the log record.
+func (r *Record) AttributesLen() int {
+ return r.nFront + len(r.back)
+}
+
+// DroppedAttributes returns the number of attributes dropped due to limits
+// being reached.
+func (r *Record) DroppedAttributes() int {
+ return r.dropped
+}
+
+// TraceID returns the trace ID or empty array.
+func (r *Record) TraceID() trace.TraceID {
+ return r.traceID
+}
+
+// SetTraceID sets the trace ID.
+func (r *Record) SetTraceID(id trace.TraceID) {
+ r.traceID = id
+}
+
+// SpanID returns the span ID or empty array.
+func (r *Record) SpanID() trace.SpanID {
+ return r.spanID
+}
+
+// SetSpanID sets the span ID.
+func (r *Record) SetSpanID(id trace.SpanID) {
+ r.spanID = id
+}
+
+// TraceFlags returns the trace flags.
+func (r *Record) TraceFlags() trace.TraceFlags {
+ return r.traceFlags
+}
+
+// SetTraceFlags sets the trace flags.
+func (r *Record) SetTraceFlags(flags trace.TraceFlags) {
+ r.traceFlags = flags
+}
+
+// Resource returns the entity that collected the log.
+func (r *Record) Resource() resource.Resource {
+ if r.resource == nil {
+ return *resource.Empty()
+ }
+ return *r.resource
+}
+
+// InstrumentationScope returns the scope that the Logger was created with.
+func (r *Record) InstrumentationScope() instrumentation.Scope {
+ if r.scope == nil {
+ return instrumentation.Scope{}
+ }
+ return *r.scope
+}
+
+// Clone returns a copy of the record with no shared state. The original record
+// and the clone can both be modified without interfering with each other.
+func (r *Record) Clone() Record {
+ res := *r
+ res.back = slices.Clone(r.back)
+ return res
+}
+
+func (r *Record) applyAttrLimits(attr log.KeyValue) log.KeyValue {
+ attr.Value = r.applyValueLimits(attr.Value)
+ return attr
+}
+
+func (r *Record) applyValueLimits(val log.Value) log.Value {
+ switch val.Kind() {
+ case log.KindString:
+ s := val.AsString()
+ if len(s) > r.attributeValueLengthLimit {
+ val = log.StringValue(truncate(r.attributeValueLengthLimit, s))
+ }
+ case log.KindSlice:
+ sl := val.AsSlice()
+ for i := range sl {
+ sl[i] = r.applyValueLimits(sl[i])
+ }
+ val = log.SliceValue(sl...)
+ case log.KindMap:
+ // Deduplicate then truncate. Do not do at the same time to avoid
+ // wasted truncation operations.
+ kvs, dropped := dedup(val.AsMap())
+ r.addDropped(dropped)
+ for i := range kvs {
+ kvs[i] = r.applyAttrLimits(kvs[i])
+ }
+ val = log.MapValue(kvs...)
+ }
+ return val
+}
+
+// truncate returns a truncated version of s such that it contains less than
+// the limit number of characters. Truncation is applied by returning the limit
+// number of valid characters contained in s.
+//
+// If limit is negative, it returns the original string.
+//
+// UTF-8 is supported. When truncating, all invalid characters are dropped
+// before applying truncation.
+//
+// If s already contains less than the limit number of bytes, it is returned
+// unchanged. No invalid characters are removed.
+func truncate(limit int, s string) string {
+ // This prioritize performance in the following order based on the most
+ // common expected use-cases.
+ //
+ // - Short values less than the default limit (128).
+ // - Strings with valid encodings that exceed the limit.
+ // - No limit.
+ // - Strings with invalid encodings that exceed the limit.
+ if limit < 0 || len(s) <= limit {
+ return s
+ }
+
+ // Optimistically, assume all valid UTF-8.
+ var b strings.Builder
+ count := 0
+ for i, c := range s {
+ if c != utf8.RuneError {
+ count++
+ if count > limit {
+ return s[:i]
+ }
+ continue
+ }
+
+ _, size := utf8.DecodeRuneInString(s[i:])
+ if size == 1 {
+ // Invalid encoding.
+ b.Grow(len(s) - 1)
+ _, _ = b.WriteString(s[:i])
+ s = s[i:]
+ break
+ }
+ }
+
+ // Fast-path, no invalid input.
+ if b.Cap() == 0 {
+ return s
+ }
+
+ // Truncate while validating UTF-8.
+ for i := 0; i < len(s) && count < limit; {
+ c := s[i]
+ if c < utf8.RuneSelf {
+ // Optimization for single byte runes (common case).
+ _ = b.WriteByte(c)
+ i++
+ count++
+ continue
+ }
+
+ _, size := utf8.DecodeRuneInString(s[i:])
+ if size == 1 {
+ // We checked for all 1-byte runes above, this is a RuneError.
+ i++
+ continue
+ }
+
+ _, _ = b.WriteString(s[i : i+size])
+ i += size
+ count++
+ }
+
+ return b.String()
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/ring.go b/vendor/go.opentelemetry.io/otel/sdk/log/ring.go
new file mode 100644
index 000000000..5e84cb164
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/log/ring.go
@@ -0,0 +1,82 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package log // import "go.opentelemetry.io/otel/sdk/log"
+
+// A ring is an element of a circular list, or ring. Rings do not have a
+// beginning or end; a pointer to any ring element serves as reference to the
+// entire ring. Empty rings are represented as nil ring pointers. The zero
+// value for a ring is a one-element ring with a nil Value.
+//
+// This is copied from the "container/ring" package. It uses a Record type for
+// Value instead of any to avoid allocations.
+type ring struct {
+ next, prev *ring
+ Value Record
+}
+
+func (r *ring) init() *ring {
+ r.next = r
+ r.prev = r
+ return r
+}
+
+// Next returns the next ring element. r must not be empty.
+func (r *ring) Next() *ring {
+ if r.next == nil {
+ return r.init()
+ }
+ return r.next
+}
+
+// Prev returns the previous ring element. r must not be empty.
+func (r *ring) Prev() *ring {
+ if r.next == nil {
+ return r.init()
+ }
+ return r.prev
+}
+
+// newRing creates a ring of n elements.
+func newRing(n int) *ring {
+ if n <= 0 {
+ return nil
+ }
+ r := new(ring)
+ p := r
+ for i := 1; i < n; i++ {
+ p.next = &ring{prev: p}
+ p = p.next
+ }
+ p.next = r
+ r.prev = p
+ return r
+}
+
+// Len computes the number of elements in ring r. It executes in time
+// proportional to the number of elements.
+func (r *ring) Len() int {
+ n := 0
+ if r != nil {
+ n = 1
+ for p := r.Next(); p != r; p = p.next {
+ n++
+ }
+ }
+ return n
+}
+
+// Do calls function f on each element of the ring, in forward order. The
+// behavior of Do is undefined if f changes *r.
+func (r *ring) Do(f func(Record)) {
+ if r != nil {
+ f(r.Value)
+ for p := r.Next(); p != r; p = p.next {
+ f(p.Value)
+ }
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/setting.go b/vendor/go.opentelemetry.io/otel/sdk/log/setting.go
new file mode 100644
index 000000000..72107a9e5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/log/setting.go
@@ -0,0 +1,119 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package log // import "go.opentelemetry.io/otel/sdk/log"
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+ "time"
+
+ "go.opentelemetry.io/otel"
+)
+
+// setting is a configuration setting value.
+type setting[T any] struct {
+ Value T
+ Set bool
+}
+
+// newSetting returns a new [setting] with the value set.
+func newSetting[T any](value T) setting[T] {
+ return setting[T]{Value: value, Set: true}
+}
+
+// resolver returns an updated setting after applying an resolution operation.
+type resolver[T any] func(setting[T]) setting[T]
+
+// Resolve returns a resolved version of s.
+//
+// It will apply all the passed fn in the order provided, chaining together the
+// return setting to the next input. The setting s is used as the initial
+// argument to the first fn.
+//
+// Each fn needs to validate if it should apply given the Set state of the
+// setting. This will not perform any checks on the set state when chaining
+// function.
+func (s setting[T]) Resolve(fn ...resolver[T]) setting[T] {
+ for _, f := range fn {
+ s = f(s)
+ }
+ return s
+}
+
+// clampMax returns a resolver that will ensure a setting value is no greater
+// than n. If it is, the value is set to n.
+func clampMax[T ~int | ~int64](n T) resolver[T] {
+ return func(s setting[T]) setting[T] {
+ if s.Value > n {
+ s.Value = n
+ }
+ return s
+ }
+}
+
+// clearLessThanOne returns a resolver that will clear a setting value and
+// change its set state to false if its value is less than 1.
+func clearLessThanOne[T ~int | ~int64]() resolver[T] {
+ return func(s setting[T]) setting[T] {
+ if s.Value < 1 {
+ s.Value = 0
+ s.Set = false
+ }
+ return s
+ }
+}
+
+// getenv returns a resolver that will apply an integer environment variable
+// value associated with key to a setting value.
+//
+// If the input setting to the resolver is set, the environment variable will
+// not be applied.
+//
+// If the environment variable value associated with key is not an integer, an
+// error will be sent to the OTel error handler and the setting will not be
+// updated.
+//
+// If the setting value is a [time.Duration] type, the environment variable
+// will be interpreted as a duration of milliseconds.
+func getenv[T ~int | ~int64](key string) resolver[T] {
+ return func(s setting[T]) setting[T] {
+ if s.Set {
+ // Passed, valid, options have precedence.
+ return s
+ }
+
+ if v := os.Getenv(key); v != "" {
+ n, err := strconv.Atoi(v)
+ if err != nil {
+ otel.Handle(fmt.Errorf("invalid %s value %s: %w", key, v, err))
+ } else {
+ switch any(s.Value).(type) {
+ case time.Duration:
+ // OTel duration envar are in millisecond.
+ s.Value = T(time.Duration(n) * time.Millisecond)
+ default:
+ s.Value = T(n)
+ }
+ s.Set = true
+ }
+ }
+ return s
+ }
+}
+
+// fallback returns a resolve that will set a setting value to val if it is not
+// already set.
+//
+// This is usually passed at the end of a resolver chain to ensure a default is
+// applied if the setting has not already been set.
+func fallback[T any](val T) resolver[T] {
+ return func(s setting[T]) setting[T] {
+ if !s.Set {
+ s.Value = val
+ s.Set = true
+ }
+ return s
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/simple.go b/vendor/go.opentelemetry.io/otel/sdk/log/simple.go
new file mode 100644
index 000000000..002e52cae
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/log/simple.go
@@ -0,0 +1,82 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package log // import "go.opentelemetry.io/otel/sdk/log"
+
+import (
+ "context"
+ "sync"
+)
+
+// Compile-time check SimpleProcessor implements Processor.
+var _ Processor = (*SimpleProcessor)(nil)
+
+// SimpleProcessor is an processor that synchronously exports log records.
+//
+// Use [NewSimpleProcessor] to create a SimpleProcessor.
+type SimpleProcessor struct {
+ mu sync.Mutex
+ exporter Exporter
+
+ noCmp [0]func() //nolint: unused // This is indeed used.
+}
+
+// NewSimpleProcessor is a simple Processor adapter.
+//
+// This Processor is not recommended for production use due to its synchronous
+// nature, which makes it suitable for testing, debugging, or demonstrating
+// other features, but can lead to slow performance and high computational
+// overhead. For production environments, it is recommended to use
+// [NewBatchProcessor] instead. However, there may be exceptions where certain
+// [Exporter] implementations perform better with this Processor.
+func NewSimpleProcessor(exporter Exporter, _ ...SimpleProcessorOption) *SimpleProcessor {
+ return &SimpleProcessor{exporter: exporter}
+}
+
+var simpleProcRecordsPool = sync.Pool{
+ New: func() any {
+ records := make([]Record, 1)
+ return &records
+ },
+}
+
+// OnEmit batches provided log record.
+func (s *SimpleProcessor) OnEmit(ctx context.Context, r *Record) error {
+ if s.exporter == nil {
+ return nil
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ records := simpleProcRecordsPool.Get().(*[]Record)
+ (*records)[0] = *r
+ defer func() {
+ simpleProcRecordsPool.Put(records)
+ }()
+
+ return s.exporter.Export(ctx, *records)
+}
+
+// Shutdown shuts down the exporter.
+func (s *SimpleProcessor) Shutdown(ctx context.Context) error {
+ if s.exporter == nil {
+ return nil
+ }
+
+ return s.exporter.Shutdown(ctx)
+}
+
+// ForceFlush flushes the exporter.
+func (s *SimpleProcessor) ForceFlush(ctx context.Context) error {
+ if s.exporter == nil {
+ return nil
+ }
+
+ return s.exporter.ForceFlush(ctx)
+}
+
+// SimpleProcessorOption applies a configuration to a [SimpleProcessor].
+type SimpleProcessorOption interface {
+ apply()
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/README.md b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/README.md
new file mode 100644
index 000000000..0678d6564
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/README.md
@@ -0,0 +1,3 @@
+# SDK Trace test
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/trace/tracetest)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/trace/tracetest)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/exporter.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/exporter.go
new file mode 100644
index 000000000..07117495a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/exporter.go
@@ -0,0 +1,74 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package tracetest is a testing helper package for the SDK. User can
+// configure no-op or in-memory exporters to verify different SDK behaviors or
+// custom instrumentation.
+package tracetest // import "go.opentelemetry.io/otel/sdk/trace/tracetest"
+
+import (
+ "context"
+ "sync"
+
+ "go.opentelemetry.io/otel/sdk/trace"
+)
+
+var _ trace.SpanExporter = (*NoopExporter)(nil)
+
+// NewNoopExporter returns a new no-op exporter.
+func NewNoopExporter() *NoopExporter {
+ return new(NoopExporter)
+}
+
+// NoopExporter is an exporter that drops all received spans and performs no
+// action.
+type NoopExporter struct{}
+
+// ExportSpans handles export of spans by dropping them.
+func (nsb *NoopExporter) ExportSpans(context.Context, []trace.ReadOnlySpan) error { return nil }
+
+// Shutdown stops the exporter by doing nothing.
+func (nsb *NoopExporter) Shutdown(context.Context) error { return nil }
+
+var _ trace.SpanExporter = (*InMemoryExporter)(nil)
+
+// NewInMemoryExporter returns a new InMemoryExporter.
+func NewInMemoryExporter() *InMemoryExporter {
+ return new(InMemoryExporter)
+}
+
+// InMemoryExporter is an exporter that stores all received spans in-memory.
+type InMemoryExporter struct {
+ mu sync.Mutex
+ ss SpanStubs
+}
+
+// ExportSpans handles export of spans by storing them in memory.
+func (imsb *InMemoryExporter) ExportSpans(_ context.Context, spans []trace.ReadOnlySpan) error {
+ imsb.mu.Lock()
+ defer imsb.mu.Unlock()
+ imsb.ss = append(imsb.ss, SpanStubsFromReadOnlySpans(spans)...)
+ return nil
+}
+
+// Shutdown stops the exporter by clearing spans held in memory.
+func (imsb *InMemoryExporter) Shutdown(context.Context) error {
+ imsb.Reset()
+ return nil
+}
+
+// Reset the current in-memory storage.
+func (imsb *InMemoryExporter) Reset() {
+ imsb.mu.Lock()
+ defer imsb.mu.Unlock()
+ imsb.ss = nil
+}
+
+// GetSpans returns the current in-memory stored spans.
+func (imsb *InMemoryExporter) GetSpans() SpanStubs {
+ imsb.mu.Lock()
+ defer imsb.mu.Unlock()
+ ret := make(SpanStubs, len(imsb.ss))
+ copy(ret, imsb.ss)
+ return ret
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go
new file mode 100644
index 000000000..732669a17
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go
@@ -0,0 +1,94 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package tracetest // import "go.opentelemetry.io/otel/sdk/trace/tracetest"
+
+import (
+ "context"
+ "sync"
+
+ sdktrace "go.opentelemetry.io/otel/sdk/trace"
+)
+
+// SpanRecorder records started and ended spans.
+type SpanRecorder struct {
+ startedMu sync.RWMutex
+ started []sdktrace.ReadWriteSpan
+
+ endedMu sync.RWMutex
+ ended []sdktrace.ReadOnlySpan
+}
+
+var _ sdktrace.SpanProcessor = (*SpanRecorder)(nil)
+
+// NewSpanRecorder returns a new initialized SpanRecorder.
+func NewSpanRecorder() *SpanRecorder {
+ return new(SpanRecorder)
+}
+
+// OnStart records started spans.
+//
+// This method is safe to be called concurrently.
+func (sr *SpanRecorder) OnStart(_ context.Context, s sdktrace.ReadWriteSpan) {
+ sr.startedMu.Lock()
+ defer sr.startedMu.Unlock()
+ sr.started = append(sr.started, s)
+}
+
+// OnEnd records completed spans.
+//
+// This method is safe to be called concurrently.
+func (sr *SpanRecorder) OnEnd(s sdktrace.ReadOnlySpan) {
+ sr.endedMu.Lock()
+ defer sr.endedMu.Unlock()
+ sr.ended = append(sr.ended, s)
+}
+
+// Shutdown does nothing.
+//
+// This method is safe to be called concurrently.
+func (sr *SpanRecorder) Shutdown(context.Context) error {
+ return nil
+}
+
+// ForceFlush does nothing.
+//
+// This method is safe to be called concurrently.
+func (sr *SpanRecorder) ForceFlush(context.Context) error {
+ return nil
+}
+
+// Started returns a copy of all started spans that have been recorded.
+//
+// This method is safe to be called concurrently.
+func (sr *SpanRecorder) Started() []sdktrace.ReadWriteSpan {
+ sr.startedMu.RLock()
+ defer sr.startedMu.RUnlock()
+ dst := make([]sdktrace.ReadWriteSpan, len(sr.started))
+ copy(dst, sr.started)
+ return dst
+}
+
+// Reset clears the recorded spans.
+//
+// This method is safe to be called concurrently.
+func (sr *SpanRecorder) Reset() {
+ sr.startedMu.Lock()
+ sr.endedMu.Lock()
+ defer sr.startedMu.Unlock()
+ defer sr.endedMu.Unlock()
+
+ sr.started = nil
+ sr.ended = nil
+}
+
+// Ended returns a copy of all ended spans that have been recorded.
+//
+// This method is safe to be called concurrently.
+func (sr *SpanRecorder) Ended() []sdktrace.ReadOnlySpan {
+ sr.endedMu.RLock()
+ defer sr.endedMu.RUnlock()
+ dst := make([]sdktrace.ReadOnlySpan, len(sr.ended))
+ copy(dst, sr.ended)
+ return dst
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go
new file mode 100644
index 000000000..cd2cc30ca
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go
@@ -0,0 +1,166 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package tracetest // import "go.opentelemetry.io/otel/sdk/trace/tracetest"
+
+import (
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/sdk/resource"
+ tracesdk "go.opentelemetry.io/otel/sdk/trace"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// SpanStubs is a slice of SpanStub use for testing an SDK.
+type SpanStubs []SpanStub
+
+// SpanStubsFromReadOnlySpans returns SpanStubs populated from ro.
+func SpanStubsFromReadOnlySpans(ro []tracesdk.ReadOnlySpan) SpanStubs {
+ if len(ro) == 0 {
+ return nil
+ }
+
+ s := make(SpanStubs, 0, len(ro))
+ for _, r := range ro {
+ s = append(s, SpanStubFromReadOnlySpan(r))
+ }
+
+ return s
+}
+
+// Snapshots returns s as a slice of ReadOnlySpans.
+func (s SpanStubs) Snapshots() []tracesdk.ReadOnlySpan {
+ if len(s) == 0 {
+ return nil
+ }
+
+ ro := make([]tracesdk.ReadOnlySpan, len(s))
+ for i := 0; i < len(s); i++ {
+ ro[i] = s[i].Snapshot()
+ }
+ return ro
+}
+
+// SpanStub is a stand-in for a Span.
+type SpanStub struct {
+ Name string
+ SpanContext trace.SpanContext
+ Parent trace.SpanContext
+ SpanKind trace.SpanKind
+ StartTime time.Time
+ EndTime time.Time
+ Attributes []attribute.KeyValue
+ Events []tracesdk.Event
+ Links []tracesdk.Link
+ Status tracesdk.Status
+ DroppedAttributes int
+ DroppedEvents int
+ DroppedLinks int
+ ChildSpanCount int
+ Resource *resource.Resource
+ InstrumentationScope instrumentation.Scope
+
+ // Deprecated: use InstrumentationScope instead.
+ InstrumentationLibrary instrumentation.Library //nolint:staticcheck // This method needs to be define for backwards compatibility
+}
+
+// SpanStubFromReadOnlySpan returns a SpanStub populated from ro.
+func SpanStubFromReadOnlySpan(ro tracesdk.ReadOnlySpan) SpanStub {
+ if ro == nil {
+ return SpanStub{}
+ }
+
+ return SpanStub{
+ Name: ro.Name(),
+ SpanContext: ro.SpanContext(),
+ Parent: ro.Parent(),
+ SpanKind: ro.SpanKind(),
+ StartTime: ro.StartTime(),
+ EndTime: ro.EndTime(),
+ Attributes: ro.Attributes(),
+ Events: ro.Events(),
+ Links: ro.Links(),
+ Status: ro.Status(),
+ DroppedAttributes: ro.DroppedAttributes(),
+ DroppedEvents: ro.DroppedEvents(),
+ DroppedLinks: ro.DroppedLinks(),
+ ChildSpanCount: ro.ChildSpanCount(),
+ Resource: ro.Resource(),
+ InstrumentationScope: ro.InstrumentationScope(),
+ InstrumentationLibrary: ro.InstrumentationScope(),
+ }
+}
+
+// Snapshot returns a read-only copy of the SpanStub.
+func (s SpanStub) Snapshot() tracesdk.ReadOnlySpan {
+ scopeOrLibrary := s.InstrumentationScope
+ if scopeOrLibrary.Name == "" && scopeOrLibrary.Version == "" && scopeOrLibrary.SchemaURL == "" {
+ scopeOrLibrary = s.InstrumentationLibrary
+ }
+
+ return spanSnapshot{
+ name: s.Name,
+ spanContext: s.SpanContext,
+ parent: s.Parent,
+ spanKind: s.SpanKind,
+ startTime: s.StartTime,
+ endTime: s.EndTime,
+ attributes: s.Attributes,
+ events: s.Events,
+ links: s.Links,
+ status: s.Status,
+ droppedAttributes: s.DroppedAttributes,
+ droppedEvents: s.DroppedEvents,
+ droppedLinks: s.DroppedLinks,
+ childSpanCount: s.ChildSpanCount,
+ resource: s.Resource,
+ instrumentationScope: scopeOrLibrary,
+ }
+}
+
+type spanSnapshot struct {
+ // Embed the interface to implement the private method.
+ tracesdk.ReadOnlySpan
+
+ name string
+ spanContext trace.SpanContext
+ parent trace.SpanContext
+ spanKind trace.SpanKind
+ startTime time.Time
+ endTime time.Time
+ attributes []attribute.KeyValue
+ events []tracesdk.Event
+ links []tracesdk.Link
+ status tracesdk.Status
+ droppedAttributes int
+ droppedEvents int
+ droppedLinks int
+ childSpanCount int
+ resource *resource.Resource
+ instrumentationScope instrumentation.Scope
+}
+
+func (s spanSnapshot) Name() string { return s.name }
+func (s spanSnapshot) SpanContext() trace.SpanContext { return s.spanContext }
+func (s spanSnapshot) Parent() trace.SpanContext { return s.parent }
+func (s spanSnapshot) SpanKind() trace.SpanKind { return s.spanKind }
+func (s spanSnapshot) StartTime() time.Time { return s.startTime }
+func (s spanSnapshot) EndTime() time.Time { return s.endTime }
+func (s spanSnapshot) Attributes() []attribute.KeyValue { return s.attributes }
+func (s spanSnapshot) Links() []tracesdk.Link { return s.links }
+func (s spanSnapshot) Events() []tracesdk.Event { return s.events }
+func (s spanSnapshot) Status() tracesdk.Status { return s.status }
+func (s spanSnapshot) DroppedAttributes() int { return s.droppedAttributes }
+func (s spanSnapshot) DroppedLinks() int { return s.droppedLinks }
+func (s spanSnapshot) DroppedEvents() int { return s.droppedEvents }
+func (s spanSnapshot) ChildSpanCount() int { return s.childSpanCount }
+func (s spanSnapshot) Resource() *resource.Resource { return s.resource }
+func (s spanSnapshot) InstrumentationScope() instrumentation.Scope {
+ return s.instrumentationScope
+}
+
+func (s spanSnapshot) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility
+ return s.instrumentationScope
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/v4/http.go b/vendor/go.opentelemetry.io/otel/semconv/internal/v4/http.go
deleted file mode 100644
index aab73ffe1..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/internal/v4/http.go
+++ /dev/null
@@ -1,394 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package internal // import "go.opentelemetry.io/otel/semconv/internal/v4"
-
-import (
- "fmt"
- "net/http"
- "strings"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/codes"
-)
-
-// HTTPConv are the HTTP semantic convention attributes defined for a version
-// of the OpenTelemetry specification.
-type HTTPConv struct {
- NetConv *NetConv
-
- EnduserIDKey attribute.Key
- HTTPClientIPKey attribute.Key
- NetProtocolNameKey attribute.Key
- NetProtocolVersionKey attribute.Key
- HTTPMethodKey attribute.Key
- HTTPRequestContentLengthKey attribute.Key
- HTTPResponseContentLengthKey attribute.Key
- HTTPRouteKey attribute.Key
- HTTPSchemeHTTP attribute.KeyValue
- HTTPSchemeHTTPS attribute.KeyValue
- HTTPStatusCodeKey attribute.Key
- HTTPTargetKey attribute.Key
- HTTPURLKey attribute.Key
- UserAgentOriginalKey attribute.Key
-}
-
-// ClientResponse returns attributes for an HTTP response received by a client
-// from a server. The following attributes are returned if the related values
-// are defined in resp: "http.status.code", "http.response_content_length".
-//
-// This does not add all OpenTelemetry required attributes for an HTTP event,
-// it assumes ClientRequest was used to create the span with a complete set of
-// attributes. If a complete set of attributes can be generated using the
-// request contained in resp. For example:
-//
-// append(ClientResponse(resp), ClientRequest(resp.Request)...)
-func (c *HTTPConv) ClientResponse(resp *http.Response) []attribute.KeyValue {
- var n int
- if resp.StatusCode > 0 {
- n++
- }
- if resp.ContentLength > 0 {
- n++
- }
-
- attrs := make([]attribute.KeyValue, 0, n)
- if resp.StatusCode > 0 {
- attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode))
- }
- if resp.ContentLength > 0 {
- attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength)))
- }
- return attrs
-}
-
-// ClientRequest returns attributes for an HTTP request made by a client. The
-// following attributes are always returned: "http.url", "http.flavor",
-// "http.method", "net.peer.name". The following attributes are returned if the
-// related values are defined in req: "net.peer.port", "http.user_agent",
-// "http.request_content_length", "enduser.id".
-func (c *HTTPConv) ClientRequest(req *http.Request) []attribute.KeyValue {
- n := 3 // URL, peer name, proto, and method.
- var h string
- if req.URL != nil {
- h = req.URL.Host
- }
- peer, p := firstHostPort(h, req.Header.Get("Host"))
- port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p)
- if port > 0 {
- n++
- }
- useragent := req.UserAgent()
- if useragent != "" {
- n++
- }
- if req.ContentLength > 0 {
- n++
- }
- userID, _, hasUserID := req.BasicAuth()
- if hasUserID {
- n++
- }
- attrs := make([]attribute.KeyValue, 0, n)
-
- attrs = append(attrs, c.method(req.Method))
- attrs = append(attrs, c.proto(req.Proto))
-
- var u string
- if req.URL != nil {
- // Remove any username/password info that may be in the URL.
- userinfo := req.URL.User
- req.URL.User = nil
- u = req.URL.String()
- // Restore any username/password info that was removed.
- req.URL.User = userinfo
- }
- attrs = append(attrs, c.HTTPURLKey.String(u))
-
- attrs = append(attrs, c.NetConv.PeerName(peer))
- if port > 0 {
- attrs = append(attrs, c.NetConv.PeerPort(port))
- }
-
- if useragent != "" {
- attrs = append(attrs, c.UserAgentOriginalKey.String(useragent))
- }
-
- if l := req.ContentLength; l > 0 {
- attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l))
- }
-
- if hasUserID {
- attrs = append(attrs, c.EnduserIDKey.String(userID))
- }
-
- return attrs
-}
-
-// ServerRequest returns attributes for an HTTP request received by a server.
-//
-// The server must be the primary server name if it is known. For example this
-// would be the ServerName directive
-// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
-// server, and the server_name directive
-// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
-// nginx server. More generically, the primary server name would be the host
-// header value that matches the default virtual host of an HTTP server. It
-// should include the host identifier and if a port is used to route to the
-// server that port identifier should be included as an appropriate port
-// suffix.
-//
-// If the primary server name is not known, server should be an empty string.
-// The req Host will be used to determine the server instead.
-//
-// The following attributes are always returned: "http.method", "http.scheme",
-// "http.flavor", "http.target", "net.host.name". The following attributes are
-// returned if they related values are defined in req: "net.host.port",
-// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id",
-// "http.client_ip".
-func (c *HTTPConv) ServerRequest(server string, req *http.Request) []attribute.KeyValue {
- // TODO: This currently does not add the specification required
- // `http.target` attribute. It has too high of a cardinality to safely be
- // added. An alternate should be added, or this comment removed, when it is
- // addressed by the specification. If it is ultimately decided to continue
- // not including the attribute, the HTTPTargetKey field of the HTTPConv
- // should be removed as well.
-
- n := 4 // Method, scheme, proto, and host name.
- var host string
- var p int
- if server == "" {
- host, p = splitHostPort(req.Host)
- } else {
- // Prioritize the primary server name.
- host, p = splitHostPort(server)
- if p < 0 {
- _, p = splitHostPort(req.Host)
- }
- }
- hostPort := requiredHTTPPort(req.TLS != nil, p)
- if hostPort > 0 {
- n++
- }
- peer, peerPort := splitHostPort(req.RemoteAddr)
- if peer != "" {
- n++
- if peerPort > 0 {
- n++
- }
- }
- useragent := req.UserAgent()
- if useragent != "" {
- n++
- }
- userID, _, hasUserID := req.BasicAuth()
- if hasUserID {
- n++
- }
- clientIP := serverClientIP(req.Header.Get("X-Forwarded-For"))
- if clientIP != "" {
- n++
- }
- attrs := make([]attribute.KeyValue, 0, n)
-
- attrs = append(attrs, c.method(req.Method))
- attrs = append(attrs, c.scheme(req.TLS != nil))
- attrs = append(attrs, c.proto(req.Proto))
- attrs = append(attrs, c.NetConv.HostName(host))
-
- if hostPort > 0 {
- attrs = append(attrs, c.NetConv.HostPort(hostPort))
- }
-
- if peer != "" {
- // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a
- // file-path that would be interpreted with a sock family.
- attrs = append(attrs, c.NetConv.SockPeerAddr(peer))
- if peerPort > 0 {
- attrs = append(attrs, c.NetConv.SockPeerPort(peerPort))
- }
- }
-
- if useragent != "" {
- attrs = append(attrs, c.UserAgentOriginalKey.String(useragent))
- }
-
- if hasUserID {
- attrs = append(attrs, c.EnduserIDKey.String(userID))
- }
-
- if clientIP != "" {
- attrs = append(attrs, c.HTTPClientIPKey.String(clientIP))
- }
-
- return attrs
-}
-
-func (c *HTTPConv) method(method string) attribute.KeyValue {
- if method == "" {
- return c.HTTPMethodKey.String(http.MethodGet)
- }
- return c.HTTPMethodKey.String(method)
-}
-
-func (c *HTTPConv) scheme(https bool) attribute.KeyValue { // nolint:revive
- if https {
- return c.HTTPSchemeHTTPS
- }
- return c.HTTPSchemeHTTP
-}
-
-func (c *HTTPConv) proto(proto string) attribute.KeyValue {
- switch proto {
- case "HTTP/1.0":
- return c.NetProtocolVersionKey.String("1.0")
- case "HTTP/1.1":
- return c.NetProtocolVersionKey.String("1.1")
- case "HTTP/2":
- return c.NetProtocolVersionKey.String("2.0")
- case "HTTP/3":
- return c.NetProtocolVersionKey.String("3.0")
- default:
- return c.NetProtocolNameKey.String(proto)
- }
-}
-
-func serverClientIP(xForwardedFor string) string {
- if idx := strings.Index(xForwardedFor, ","); idx >= 0 {
- xForwardedFor = xForwardedFor[:idx]
- }
- return xForwardedFor
-}
-
-func requiredHTTPPort(https bool, port int) int { // nolint:revive
- if https {
- if port > 0 && port != 443 {
- return port
- }
- } else {
- if port > 0 && port != 80 {
- return port
- }
- }
- return -1
-}
-
-// Return the request host and port from the first non-empty source.
-func firstHostPort(source ...string) (host string, port int) {
- for _, hostport := range source {
- host, port = splitHostPort(hostport)
- if host != "" || port > 0 {
- break
- }
- }
- return
-}
-
-// RequestHeader returns the contents of h as OpenTelemetry attributes.
-func (c *HTTPConv) RequestHeader(h http.Header) []attribute.KeyValue {
- return c.header("http.request.header", h)
-}
-
-// ResponseHeader returns the contents of h as OpenTelemetry attributes.
-func (c *HTTPConv) ResponseHeader(h http.Header) []attribute.KeyValue {
- return c.header("http.response.header", h)
-}
-
-func (c *HTTPConv) header(prefix string, h http.Header) []attribute.KeyValue {
- key := func(k string) attribute.Key {
- k = strings.ToLower(k)
- k = strings.ReplaceAll(k, "-", "_")
- k = fmt.Sprintf("%s.%s", prefix, k)
- return attribute.Key(k)
- }
-
- attrs := make([]attribute.KeyValue, 0, len(h))
- for k, v := range h {
- attrs = append(attrs, key(k).StringSlice(v))
- }
- return attrs
-}
-
-// ClientStatus returns a span status code and message for an HTTP status code
-// value received by a client.
-func (c *HTTPConv) ClientStatus(code int) (codes.Code, string) {
- stat, valid := validateHTTPStatusCode(code)
- if !valid {
- return stat, fmt.Sprintf("Invalid HTTP status code %d", code)
- }
- return stat, ""
-}
-
-// ServerStatus returns a span status code and message for an HTTP status code
-// value returned by a server. Status codes in the 400-499 range are not
-// returned as errors.
-func (c *HTTPConv) ServerStatus(code int) (codes.Code, string) {
- stat, valid := validateHTTPStatusCode(code)
- if !valid {
- return stat, fmt.Sprintf("Invalid HTTP status code %d", code)
- }
-
- if code/100 == 4 {
- return codes.Unset, ""
- }
- return stat, ""
-}
-
-type codeRange struct {
- fromInclusive int
- toInclusive int
-}
-
-func (r codeRange) contains(code int) bool {
- return r.fromInclusive <= code && code <= r.toInclusive
-}
-
-var validRangesPerCategory = map[int][]codeRange{
- 1: {
- {http.StatusContinue, http.StatusEarlyHints},
- },
- 2: {
- {http.StatusOK, http.StatusAlreadyReported},
- {http.StatusIMUsed, http.StatusIMUsed},
- },
- 3: {
- {http.StatusMultipleChoices, http.StatusUseProxy},
- {http.StatusTemporaryRedirect, http.StatusPermanentRedirect},
- },
- 4: {
- {http.StatusBadRequest, http.StatusTeapot}, // yes, teapot is so useful…
- {http.StatusMisdirectedRequest, http.StatusUpgradeRequired},
- {http.StatusPreconditionRequired, http.StatusTooManyRequests},
- {http.StatusRequestHeaderFieldsTooLarge, http.StatusRequestHeaderFieldsTooLarge},
- {http.StatusUnavailableForLegalReasons, http.StatusUnavailableForLegalReasons},
- },
- 5: {
- {http.StatusInternalServerError, http.StatusLoopDetected},
- {http.StatusNotExtended, http.StatusNetworkAuthenticationRequired},
- },
-}
-
-// validateHTTPStatusCode validates the HTTP status code and returns
-// corresponding span status code. If the `code` is not a valid HTTP status
-// code, returns span status Error and false.
-func validateHTTPStatusCode(code int) (codes.Code, bool) {
- category := code / 100
- ranges, ok := validRangesPerCategory[category]
- if !ok {
- return codes.Error, false
- }
- ok = false
- for _, crange := range ranges {
- ok = crange.contains(code)
- if ok {
- break
- }
- }
- if !ok {
- return codes.Error, false
- }
- if category > 0 && category < 4 {
- return codes.Unset, true
- }
- return codes.Error, true
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/v4/net.go b/vendor/go.opentelemetry.io/otel/semconv/internal/v4/net.go
deleted file mode 100644
index f240b9af0..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/internal/v4/net.go
+++ /dev/null
@@ -1,313 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package internal // import "go.opentelemetry.io/otel/semconv/internal/v4"
-
-import (
- "net"
- "strconv"
- "strings"
-
- "go.opentelemetry.io/otel/attribute"
-)
-
-// NetConv are the network semantic convention attributes defined for a version
-// of the OpenTelemetry specification.
-type NetConv struct {
- NetHostNameKey attribute.Key
- NetHostPortKey attribute.Key
- NetPeerNameKey attribute.Key
- NetPeerPortKey attribute.Key
- NetSockFamilyKey attribute.Key
- NetSockPeerAddrKey attribute.Key
- NetSockPeerPortKey attribute.Key
- NetSockHostAddrKey attribute.Key
- NetSockHostPortKey attribute.Key
- NetTransportOther attribute.KeyValue
- NetTransportTCP attribute.KeyValue
- NetTransportUDP attribute.KeyValue
- NetTransportInProc attribute.KeyValue
-}
-
-func (c *NetConv) Transport(network string) attribute.KeyValue {
- switch network {
- case "tcp", "tcp4", "tcp6":
- return c.NetTransportTCP
- case "udp", "udp4", "udp6":
- return c.NetTransportUDP
- case "unix", "unixgram", "unixpacket":
- return c.NetTransportInProc
- default:
- // "ip:*", "ip4:*", and "ip6:*" all are considered other.
- return c.NetTransportOther
- }
-}
-
-// Host returns attributes for a network host address.
-func (c *NetConv) Host(address string) []attribute.KeyValue {
- h, p := splitHostPort(address)
- var n int
- if h != "" {
- n++
- if p > 0 {
- n++
- }
- }
-
- if n == 0 {
- return nil
- }
-
- attrs := make([]attribute.KeyValue, 0, n)
- attrs = append(attrs, c.HostName(h))
- if p > 0 {
- attrs = append(attrs, c.HostPort(p))
- }
- return attrs
-}
-
-// Server returns attributes for a network listener listening at address. See
-// net.Listen for information about acceptable address values, address should
-// be the same as the one used to create ln. If ln is nil, only network host
-// attributes will be returned that describe address. Otherwise, the socket
-// level information about ln will also be included.
-func (c *NetConv) Server(address string, ln net.Listener) []attribute.KeyValue {
- if ln == nil {
- return c.Host(address)
- }
-
- lAddr := ln.Addr()
- if lAddr == nil {
- return c.Host(address)
- }
-
- hostName, hostPort := splitHostPort(address)
- sockHostAddr, sockHostPort := splitHostPort(lAddr.String())
- network := lAddr.Network()
- sockFamily := family(network, sockHostAddr)
-
- n := nonZeroStr(hostName, network, sockHostAddr, sockFamily)
- n += positiveInt(hostPort, sockHostPort)
- attr := make([]attribute.KeyValue, 0, n)
- if hostName != "" {
- attr = append(attr, c.HostName(hostName))
- if hostPort > 0 {
- // Only if net.host.name is set should net.host.port be.
- attr = append(attr, c.HostPort(hostPort))
- }
- }
- if network != "" {
- attr = append(attr, c.Transport(network))
- }
- if sockFamily != "" {
- attr = append(attr, c.NetSockFamilyKey.String(sockFamily))
- }
- if sockHostAddr != "" {
- attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr))
- if sockHostPort > 0 {
- // Only if net.sock.host.addr is set should net.sock.host.port be.
- attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort))
- }
- }
- return attr
-}
-
-func (c *NetConv) HostName(name string) attribute.KeyValue {
- return c.NetHostNameKey.String(name)
-}
-
-func (c *NetConv) HostPort(port int) attribute.KeyValue {
- return c.NetHostPortKey.Int(port)
-}
-
-// Client returns attributes for a client network connection to address. See
-// net.Dial for information about acceptable address values, address should be
-// the same as the one used to create conn. If conn is nil, only network peer
-// attributes will be returned that describe address. Otherwise, the socket
-// level information about conn will also be included.
-func (c *NetConv) Client(address string, conn net.Conn) []attribute.KeyValue {
- if conn == nil {
- return c.Peer(address)
- }
-
- lAddr, rAddr := conn.LocalAddr(), conn.RemoteAddr()
-
- var network string
- switch {
- case lAddr != nil:
- network = lAddr.Network()
- case rAddr != nil:
- network = rAddr.Network()
- default:
- return c.Peer(address)
- }
-
- peerName, peerPort := splitHostPort(address)
- var (
- sockFamily string
- sockPeerAddr string
- sockPeerPort int
- sockHostAddr string
- sockHostPort int
- )
-
- if lAddr != nil {
- sockHostAddr, sockHostPort = splitHostPort(lAddr.String())
- }
-
- if rAddr != nil {
- sockPeerAddr, sockPeerPort = splitHostPort(rAddr.String())
- }
-
- switch {
- case sockHostAddr != "":
- sockFamily = family(network, sockHostAddr)
- case sockPeerAddr != "":
- sockFamily = family(network, sockPeerAddr)
- }
-
- n := nonZeroStr(peerName, network, sockPeerAddr, sockHostAddr, sockFamily)
- n += positiveInt(peerPort, sockPeerPort, sockHostPort)
- attr := make([]attribute.KeyValue, 0, n)
- if peerName != "" {
- attr = append(attr, c.PeerName(peerName))
- if peerPort > 0 {
- // Only if net.peer.name is set should net.peer.port be.
- attr = append(attr, c.PeerPort(peerPort))
- }
- }
- if network != "" {
- attr = append(attr, c.Transport(network))
- }
- if sockFamily != "" {
- attr = append(attr, c.NetSockFamilyKey.String(sockFamily))
- }
- if sockPeerAddr != "" {
- attr = append(attr, c.NetSockPeerAddrKey.String(sockPeerAddr))
- if sockPeerPort > 0 {
- // Only if net.sock.peer.addr is set should net.sock.peer.port be.
- attr = append(attr, c.NetSockPeerPortKey.Int(sockPeerPort))
- }
- }
- if sockHostAddr != "" {
- attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr))
- if sockHostPort > 0 {
- // Only if net.sock.host.addr is set should net.sock.host.port be.
- attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort))
- }
- }
- return attr
-}
-
-func family(network, address string) string {
- switch network {
- case "unix", "unixgram", "unixpacket":
- return "unix"
- default:
- if ip := net.ParseIP(address); ip != nil {
- if ip.To4() == nil {
- return "inet6"
- }
- return "inet"
- }
- }
- return ""
-}
-
-func nonZeroStr(strs ...string) int {
- var n int
- for _, str := range strs {
- if str != "" {
- n++
- }
- }
- return n
-}
-
-func positiveInt(ints ...int) int {
- var n int
- for _, i := range ints {
- if i > 0 {
- n++
- }
- }
- return n
-}
-
-// Peer returns attributes for a network peer address.
-func (c *NetConv) Peer(address string) []attribute.KeyValue {
- h, p := splitHostPort(address)
- var n int
- if h != "" {
- n++
- if p > 0 {
- n++
- }
- }
-
- if n == 0 {
- return nil
- }
-
- attrs := make([]attribute.KeyValue, 0, n)
- attrs = append(attrs, c.PeerName(h))
- if p > 0 {
- attrs = append(attrs, c.PeerPort(p))
- }
- return attrs
-}
-
-func (c *NetConv) PeerName(name string) attribute.KeyValue {
- return c.NetPeerNameKey.String(name)
-}
-
-func (c *NetConv) PeerPort(port int) attribute.KeyValue {
- return c.NetPeerPortKey.Int(port)
-}
-
-func (c *NetConv) SockPeerAddr(addr string) attribute.KeyValue {
- return c.NetSockPeerAddrKey.String(addr)
-}
-
-func (c *NetConv) SockPeerPort(port int) attribute.KeyValue {
- return c.NetSockPeerPortKey.Int(port)
-}
-
-// splitHostPort splits a network address hostport of the form "host",
-// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port",
-// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and
-// port.
-//
-// An empty host is returned if it is not provided or unparsable. A negative
-// port is returned if it is not provided or unparsable.
-func splitHostPort(hostport string) (host string, port int) {
- port = -1
-
- if strings.HasPrefix(hostport, "[") {
- addrEnd := strings.LastIndex(hostport, "]")
- if addrEnd < 0 {
- // Invalid hostport.
- return
- }
- if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 {
- host = hostport[1:addrEnd]
- return
- }
- } else {
- if i := strings.LastIndex(hostport, ":"); i < 0 {
- host = hostport
- return
- }
- }
-
- host, pStr, err := net.SplitHostPort(hostport)
- if err != nil {
- return
- }
-
- p, err := strconv.ParseUint(pStr, 10, 16)
- if err != nil {
- return
- }
- return host, int(p) // nolint: gosec // Bit size of 16 checked above.
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/httpconv/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/httpconv/README.md
deleted file mode 100644
index 96b4b0d0b..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/httpconv/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Semconv v1.20.0 HTTP conv
-
-[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.20.0/httpconv)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.20.0/httpconv)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/httpconv/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/httpconv/http.go
deleted file mode 100644
index 8f261a9db..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/httpconv/http.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package httpconv provides OpenTelemetry HTTP semantic conventions for
-// tracing telemetry.
-package httpconv // import "go.opentelemetry.io/otel/semconv/v1.20.0/httpconv"
-
-import (
- "net/http"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/codes"
- "go.opentelemetry.io/otel/semconv/internal/v4"
- semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
-)
-
-var (
- nc = &internal.NetConv{
- NetHostNameKey: semconv.NetHostNameKey,
- NetHostPortKey: semconv.NetHostPortKey,
- NetPeerNameKey: semconv.NetPeerNameKey,
- NetPeerPortKey: semconv.NetPeerPortKey,
- NetSockPeerAddrKey: semconv.NetSockPeerAddrKey,
- NetSockPeerPortKey: semconv.NetSockPeerPortKey,
- NetTransportOther: semconv.NetTransportOther,
- NetTransportTCP: semconv.NetTransportTCP,
- NetTransportUDP: semconv.NetTransportUDP,
- NetTransportInProc: semconv.NetTransportInProc,
- }
-
- hc = &internal.HTTPConv{
- NetConv: nc,
-
- EnduserIDKey: semconv.EnduserIDKey,
- HTTPClientIPKey: semconv.HTTPClientIPKey,
- NetProtocolNameKey: semconv.NetProtocolNameKey,
- NetProtocolVersionKey: semconv.NetProtocolVersionKey,
- HTTPMethodKey: semconv.HTTPMethodKey,
- HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey,
- HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey,
- HTTPRouteKey: semconv.HTTPRouteKey,
- HTTPSchemeHTTP: semconv.HTTPSchemeHTTP,
- HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS,
- HTTPStatusCodeKey: semconv.HTTPStatusCodeKey,
- HTTPTargetKey: semconv.HTTPTargetKey,
- HTTPURLKey: semconv.HTTPURLKey,
- UserAgentOriginalKey: semconv.UserAgentOriginalKey,
- }
-)
-
-// ClientResponse returns trace attributes for an HTTP response received by a
-// client from a server. It will return the following attributes if the related
-// values are defined in resp: "http.status.code",
-// "http.response_content_length".
-//
-// This does not add all OpenTelemetry required attributes for an HTTP event,
-// it assumes ClientRequest was used to create the span with a complete set of
-// attributes. If a complete set of attributes can be generated using the
-// request contained in resp. For example:
-//
-// append(ClientResponse(resp), ClientRequest(resp.Request)...)
-func ClientResponse(resp *http.Response) []attribute.KeyValue {
- return hc.ClientResponse(resp)
-}
-
-// ClientRequest returns trace attributes for an HTTP request made by a client.
-// The following attributes are always returned: "http.url",
-// "net.protocol.(name|version)", "http.method", "net.peer.name".
-// The following attributes are returned if the related values are defined
-// in req: "net.peer.port", "http.user_agent", "http.request_content_length",
-// "enduser.id".
-func ClientRequest(req *http.Request) []attribute.KeyValue {
- return hc.ClientRequest(req)
-}
-
-// ClientStatus returns a span status code and message for an HTTP status code
-// value received by a client.
-func ClientStatus(code int) (codes.Code, string) {
- return hc.ClientStatus(code)
-}
-
-// ServerRequest returns trace attributes for an HTTP request received by a
-// server.
-//
-// The server must be the primary server name if it is known. For example this
-// would be the ServerName directive
-// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
-// server, and the server_name directive
-// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
-// nginx server. More generically, the primary server name would be the host
-// header value that matches the default virtual host of an HTTP server. It
-// should include the host identifier and if a port is used to route to the
-// server that port identifier should be included as an appropriate port
-// suffix.
-//
-// If the primary server name is not known, server should be an empty string.
-// The req Host will be used to determine the server instead.
-//
-// The following attributes are always returned: "http.method", "http.scheme",
-// ""net.protocol.(name|version)", "http.target", "net.host.name".
-// The following attributes are returned if they related values are defined
-// in req: "net.host.port", "net.sock.peer.addr", "net.sock.peer.port",
-// "user_agent.original", "enduser.id", "http.client_ip".
-func ServerRequest(server string, req *http.Request) []attribute.KeyValue {
- return hc.ServerRequest(server, req)
-}
-
-// ServerStatus returns a span status code and message for an HTTP status code
-// value returned by a server. Status codes in the 400-499 range are not
-// returned as errors.
-func ServerStatus(code int) (codes.Code, string) {
- return hc.ServerStatus(code)
-}
-
-// RequestHeader returns the contents of h as attributes.
-//
-// Instrumentation should require an explicit configuration of which headers to
-// captured and then prune what they pass here. Including all headers can be a
-// security risk - explicit configuration helps avoid leaking sensitive
-// information.
-//
-// The User-Agent header is already captured in the user_agent.original attribute
-// from ClientRequest and ServerRequest. Instrumentation may provide an option
-// to capture that header here even though it is not recommended. Otherwise,
-// instrumentation should filter that out of what is passed.
-func RequestHeader(h http.Header) []attribute.KeyValue {
- return hc.RequestHeader(h)
-}
-
-// ResponseHeader returns the contents of h as attributes.
-//
-// Instrumentation should require an explicit configuration of which headers to
-// captured and then prune what they pass here. Including all headers can be a
-// security risk - explicit configuration helps avoid leaking sensitive
-// information.
-//
-// The User-Agent header is already captured in the user_agent.original attribute
-// from ClientRequest and ServerRequest. Instrumentation may provide an option
-// to capture that header here even though it is not recommended. Otherwise,
-// instrumentation should filter that out of what is passed.
-func ResponseHeader(h http.Header) []attribute.KeyValue {
- return hc.ResponseHeader(h)
-}