diff options
Diffstat (limited to 'vendor/google.golang.org/grpc')
122 files changed, 28293 insertions, 0 deletions
diff --git a/vendor/google.golang.org/grpc/AUTHORS b/vendor/google.golang.org/grpc/AUTHORS new file mode 100644 index 000000000..e491a9e7f --- /dev/null +++ b/vendor/google.golang.org/grpc/AUTHORS @@ -0,0 +1 @@ +Google Inc. diff --git a/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md b/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md new file mode 100644 index 000000000..9d4213ebc --- /dev/null +++ b/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md @@ -0,0 +1,3 @@ +## Community Code of Conduct + +gRPC follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md new file mode 100644 index 000000000..52338d004 --- /dev/null +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -0,0 +1,60 @@ +# How to contribute + +We definitely welcome your patches and contributions to gRPC! Please read the gRPC +organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md) +and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding. + +If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) + +## Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf). + +## Guidelines for Pull Requests +How to get your contributions merged smoothly and quickly. + +- Create **small PRs** that are narrowly focused on **addressing a single +  concern**. We often times receive PRs that are trying to fix several things at +  a time, but only one fix is considered acceptable, nothing gets merged and +  both author's & review's time is wasted. Create more PRs to address different +  concerns and everyone will be happy. + +- The grpc package should only depend on standard Go packages and a small number +  of exceptions. If your contribution introduces new dependencies which are NOT +  in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a +  discussion with gRPC-Go authors and consultants. + +- For speculative changes, consider opening an issue and discussing it first. If +  you are suggesting a behavioral or API change, consider starting with a [gRFC +  proposal](https://github.com/grpc/proposal). + +- Provide a good **PR description** as a record of **what** change is being made +  and **why** it was made. Link to a github issue if it exists. + +- Don't fix code style and formatting unless you are already changing that line +  to address an issue. PRs with irrelevant changes won't be merged. If you do +  want to fix formatting or style, do that in a separate PR. + +- Unless your PR is trivial, you should expect there will be reviewer comments +  that you'll need to address before merging. We expect you to be reasonably +  responsive to those comments, otherwise the PR will be closed after 2-3 weeks +  of inactivity. + +- Maintain **clean commit history** and use **meaningful commit messages**. PRs +  with messy commit history are difficult to review and won't be merged. Use +  `rebase -i upstream/master` to curate your commit history and/or to bring in +  latest changes from master (but avoid rebasing in the middle of a code +  review). + +- Keep your PR up to date with upstream/master (if there are merge conflicts, we +  can't really merge your change). + +- **All tests need to be passing** before your change can be merged. We +  recommend you **run tests locally** before creating your PR to catch breakages +  early on. +  - `VET_SKIP_PROTO=1 ./vet.sh` to catch vet errors +  - `go test -cpu 1,4 -timeout 7m ./...` to run the tests +  - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode + +- Exceptions to the rules can be made if there's a compelling reason for doing so. diff --git a/vendor/google.golang.org/grpc/GOVERNANCE.md b/vendor/google.golang.org/grpc/GOVERNANCE.md new file mode 100644 index 000000000..d6ff26747 --- /dev/null +++ b/vendor/google.golang.org/grpc/GOVERNANCE.md @@ -0,0 +1 @@ +This repository is governed by the gRPC organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md). diff --git a/vendor/google.golang.org/grpc/LICENSE b/vendor/google.golang.org/grpc/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/google.golang.org/grpc/LICENSE @@ -0,0 +1,202 @@ + +                                 Apache License +                           Version 2.0, January 2004 +                        http://www.apache.org/licenses/ + +   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +   1. Definitions. + +      "License" shall mean the terms and conditions for use, reproduction, +      and distribution as defined by Sections 1 through 9 of this document. + +      "Licensor" shall mean the copyright owner or entity authorized by +      the copyright owner that is granting the License. + +      "Legal Entity" shall mean the union of the acting entity and all +      other entities that control, are controlled by, or are under common +      control with that entity. For the purposes of this definition, +      "control" means (i) the power, direct or indirect, to cause the +      direction or management of such entity, whether by contract or +      otherwise, or (ii) ownership of fifty percent (50%) or more of the +      outstanding shares, or (iii) beneficial ownership of such entity. + +      "You" (or "Your") shall mean an individual or Legal Entity +      exercising permissions granted by this License. + +      "Source" form shall mean the preferred form for making modifications, +      including but not limited to software source code, documentation +      source, and configuration files. + +      "Object" form shall mean any form resulting from mechanical +      transformation or translation of a Source form, including but +      not limited to compiled object code, generated documentation, +      and conversions to other media types. + +      "Work" shall mean the work of authorship, whether in Source or +      Object form, made available under the License, as indicated by a +      copyright notice that is included in or attached to the work +      (an example is provided in the Appendix below). + +      "Derivative Works" shall mean any work, whether in Source or Object +      form, that is based on (or derived from) the Work and for which the +      editorial revisions, annotations, elaborations, or other modifications +      represent, as a whole, an original work of authorship. For the purposes +      of this License, Derivative Works shall not include works that remain +      separable from, or merely link (or bind by name) to the interfaces of, +      the Work and Derivative Works thereof. + +      "Contribution" shall mean any work of authorship, including +      the original version of the Work and any modifications or additions +      to that Work or Derivative Works thereof, that is intentionally +      submitted to Licensor for inclusion in the Work by the copyright owner +      or by an individual or Legal Entity authorized to submit on behalf of +      the copyright owner. For the purposes of this definition, "submitted" +      means any form of electronic, verbal, or written communication sent +      to the Licensor or its representatives, including but not limited to +      communication on electronic mailing lists, source code control systems, +      and issue tracking systems that are managed by, or on behalf of, the +      Licensor for the purpose of discussing and improving the Work, but +      excluding communication that is conspicuously marked or otherwise +      designated in writing by the copyright owner as "Not a Contribution." + +      "Contributor" shall mean Licensor and any individual or Legal Entity +      on behalf of whom a Contribution has been received by Licensor and +      subsequently incorporated within the Work. + +   2. Grant of Copyright License. Subject to the terms and conditions of +      this License, each Contributor hereby grants to You a perpetual, +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable +      copyright license to reproduce, prepare Derivative Works of, +      publicly display, publicly perform, sublicense, and distribute the +      Work and such Derivative Works in Source or Object form. + +   3. Grant of Patent License. Subject to the terms and conditions of +      this License, each Contributor hereby grants to You a perpetual, +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable +      (except as stated in this section) patent license to make, have made, +      use, offer to sell, sell, import, and otherwise transfer the Work, +      where such license applies only to those patent claims licensable +      by such Contributor that are necessarily infringed by their +      Contribution(s) alone or by combination of their Contribution(s) +      with the Work to which such Contribution(s) was submitted. If You +      institute patent litigation against any entity (including a +      cross-claim or counterclaim in a lawsuit) alleging that the Work +      or a Contribution incorporated within the Work constitutes direct +      or contributory patent infringement, then any patent licenses +      granted to You under this License for that Work shall terminate +      as of the date such litigation is filed. + +   4. Redistribution. You may reproduce and distribute copies of the +      Work or Derivative Works thereof in any medium, with or without +      modifications, and in Source or Object form, provided that You +      meet the following conditions: + +      (a) You must give any other recipients of the Work or +          Derivative Works a copy of this License; and + +      (b) You must cause any modified files to carry prominent notices +          stating that You changed the files; and + +      (c) You must retain, in the Source form of any Derivative Works +          that You distribute, all copyright, patent, trademark, and +          attribution notices from the Source form of the Work, +          excluding those notices that do not pertain to any part of +          the Derivative Works; and + +      (d) If the Work includes a "NOTICE" text file as part of its +          distribution, then any Derivative Works that You distribute must +          include a readable copy of the attribution notices contained +          within such NOTICE file, excluding those notices that do not +          pertain to any part of the Derivative Works, in at least one +          of the following places: within a NOTICE text file distributed +          as part of the Derivative Works; within the Source form or +          documentation, if provided along with the Derivative Works; or, +          within a display generated by the Derivative Works, if and +          wherever such third-party notices normally appear. The contents +          of the NOTICE file are for informational purposes only and +          do not modify the License. You may add Your own attribution +          notices within Derivative Works that You distribute, alongside +          or as an addendum to the NOTICE text from the Work, provided +          that such additional attribution notices cannot be construed +          as modifying the License. + +      You may add Your own copyright statement to Your modifications and +      may provide additional or different license terms and conditions +      for use, reproduction, or distribution of Your modifications, or +      for any such Derivative Works as a whole, provided Your use, +      reproduction, and distribution of the Work otherwise complies with +      the conditions stated in this License. + +   5. Submission of Contributions. Unless You explicitly state otherwise, +      any Contribution intentionally submitted for inclusion in the Work +      by You to the Licensor shall be under the terms and conditions of +      this License, without any additional terms or conditions. +      Notwithstanding the above, nothing herein shall supersede or modify +      the terms of any separate license agreement you may have executed +      with Licensor regarding such Contributions. + +   6. Trademarks. This License does not grant permission to use the trade +      names, trademarks, service marks, or product names of the Licensor, +      except as required for reasonable and customary use in describing the +      origin of the Work and reproducing the content of the NOTICE file. + +   7. Disclaimer of Warranty. Unless required by applicable law or +      agreed to in writing, Licensor provides the Work (and each +      Contributor provides its Contributions) on an "AS IS" BASIS, +      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +      implied, including, without limitation, any warranties or conditions +      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +      PARTICULAR PURPOSE. You are solely responsible for determining the +      appropriateness of using or redistributing the Work and assume any +      risks associated with Your exercise of permissions under this License. + +   8. Limitation of Liability. In no event and under no legal theory, +      whether in tort (including negligence), contract, or otherwise, +      unless required by applicable law (such as deliberate and grossly +      negligent acts) or agreed to in writing, shall any Contributor be +      liable to You for damages, including any direct, indirect, special, +      incidental, or consequential damages of any character arising as a +      result of this License or out of the use or inability to use the +      Work (including but not limited to damages for loss of goodwill, +      work stoppage, computer failure or malfunction, or any and all +      other commercial damages or losses), even if such Contributor +      has been advised of the possibility of such damages. + +   9. Accepting Warranty or Additional Liability. While redistributing +      the Work or Derivative Works thereof, You may choose to offer, +      and charge a fee for, acceptance of support, warranty, indemnity, +      or other liability obligations and/or rights consistent with this +      License. However, in accepting such obligations, You may act only +      on Your own behalf and on Your sole responsibility, not on behalf +      of any other Contributor, and only if You agree to indemnify, +      defend, and hold each Contributor harmless for any liability +      incurred by, or claims asserted against, such Contributor by reason +      of your accepting any such warranty or additional liability. + +   END OF TERMS AND CONDITIONS + +   APPENDIX: How to apply the Apache License to your work. + +      To apply the Apache License to your work, attach the following +      boilerplate notice, with the fields enclosed by brackets "[]" +      replaced with your own identifying information. (Don't include +      the brackets!)  The text should be enclosed in the appropriate +      comment syntax for the file format. We also recommend that a +      file or class name and description of purpose be included on the +      same "printed page" as the copyright notice for easier +      identification within third-party archives. + +   Copyright [yyyy] [name of copyright owner] + +   Licensed under the Apache License, Version 2.0 (the "License"); +   you may not use this file except in compliance with the License. +   You may obtain a copy of the License at + +       http://www.apache.org/licenses/LICENSE-2.0 + +   Unless required by applicable law or agreed to in writing, software +   distributed under the License is distributed on an "AS IS" BASIS, +   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +   See the License for the specific language governing permissions and +   limitations under the License. diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md new file mode 100644 index 000000000..c6672c0a3 --- /dev/null +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -0,0 +1,28 @@ +This page lists all active maintainers of this repository. If you were a +maintainer and would like to add your name to the Emeritus list, please send us a +PR. + +See [GOVERNANCE.md](https://github.com/grpc/grpc-community/blob/master/governance.md) +for governance guidelines and how to become a maintainer. +See [CONTRIBUTING.md](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) +for general contribution guidelines. + +## Maintainers (in alphabetical order) + +- [cesarghali](https://github.com/cesarghali), Google LLC +- [dfawley](https://github.com/dfawley), Google LLC +- [easwars](https://github.com/easwars), Google LLC +- [menghanl](https://github.com/menghanl), Google LLC +- [srini100](https://github.com/srini100), Google LLC + +## Emeritus Maintainers (in alphabetical order) +- [adelez](https://github.com/adelez), Google LLC +- [canguler](https://github.com/canguler), Google LLC +- [iamqizhao](https://github.com/iamqizhao), Google LLC +- [jadekler](https://github.com/jadekler), Google LLC +- [jtattermusch](https://github.com/jtattermusch), Google LLC +- [lyuxuan](https://github.com/lyuxuan), Google LLC +- [makmukhi](https://github.com/makmukhi), Google LLC +- [matt-kwong](https://github.com/matt-kwong), Google LLC +- [nicolasnoble](https://github.com/nicolasnoble), Google LLC +- [yongni](https://github.com/yongni), Google LLC diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile new file mode 100644 index 000000000..1f8960922 --- /dev/null +++ b/vendor/google.golang.org/grpc/Makefile @@ -0,0 +1,46 @@ +all: vet test testrace + +build: +	go build google.golang.org/grpc/... + +clean: +	go clean -i google.golang.org/grpc/... + +deps: +	GO111MODULE=on go get -d -v google.golang.org/grpc/... + +proto: +	@ if ! which protoc > /dev/null; then \ +		echo "error: protoc not installed" >&2; \ +		exit 1; \ +	fi +	go generate google.golang.org/grpc/... + +test: +	go test -cpu 1,4 -timeout 7m google.golang.org/grpc/... + +testsubmodule: +	cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/... +	cd security/authorization && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/authorization/... + +testrace: +	go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/... + +testdeps: +	GO111MODULE=on go get -d -v -t google.golang.org/grpc/... + +vet: vetdeps +	./vet.sh + +vetdeps: +	./vet.sh -install + +.PHONY: \ +	all \ +	build \ +	clean \ +	proto \ +	test \ +	testrace \ +	vet \ +	vetdeps diff --git a/vendor/google.golang.org/grpc/NOTICE.txt b/vendor/google.golang.org/grpc/NOTICE.txt new file mode 100644 index 000000000..530197749 --- /dev/null +++ b/vendor/google.golang.org/grpc/NOTICE.txt @@ -0,0 +1,13 @@ +Copyright 2014 gRPC authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +    http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md new file mode 100644 index 000000000..0e6ae69a5 --- /dev/null +++ b/vendor/google.golang.org/grpc/README.md @@ -0,0 +1,141 @@ +# gRPC-Go + +[](https://travis-ci.org/grpc/grpc-go) +[][API] +[](https://goreportcard.com/report/github.com/grpc/grpc-go) + +The [Go][] implementation of [gRPC][]: A high performance, open source, general +RPC framework that puts mobile and HTTP/2 first. For more information see the +[Go gRPC docs][], or jump directly into the [quick start][]. + +## Prerequisites + +- **[Go][]**: any one of the **three latest major** [releases][go-releases]. + +## Installation + +With [Go module][] support (Go 1.11+), simply add the following import + +```go +import "google.golang.org/grpc" +``` + +to your code, and then `go [build|run|test]` will automatically fetch the +necessary dependencies. + +Otherwise, to install the `grpc-go` package, run the following command: + +```console +$ go get -u google.golang.org/grpc +``` + +> **Note:** If you are trying to access `grpc-go` from **China**, see the +> [FAQ](#FAQ) below. + +## Learn more + +- [Go gRPC docs][], which include a [quick start][] and [API +  reference][API] among other resources +- [Low-level technical docs](Documentation) from this repository +- [Performance benchmark][] +- [Examples](examples) + +## FAQ + +### I/O Timeout Errors + +The `golang.org` domain may be blocked from some countries. `go get` usually +produces an error like the following when this happens: + +```console +$ go get -u google.golang.org/grpc +package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout) +``` + +To build Go code, there are several options: + +- Set up a VPN and access google.golang.org through that. + +- Without Go module support: `git clone` the repo manually: + +  ```sh +  git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc +  ``` + +  You will need to do the same for all of grpc's dependencies in `golang.org`, +  e.g. `golang.org/x/net`. + +- With Go module support: it is possible to use the `replace` feature of `go +  mod` to create aliases for golang.org packages.  In your project's directory: + +  ```sh +  go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest +  go mod tidy +  go mod vendor +  go build -mod=vendor +  ``` + +  Again, this will need to be done for all transitive dependencies hosted on +  golang.org as well. For details, refer to [golang/go issue #28652](https://github.com/golang/go/issues/28652). + +### Compiling error, undefined: grpc.SupportPackageIsVersion + +#### If you are using Go modules: + +Ensure your gRPC-Go version is `require`d at the appropriate version in +the same module containing the generated `.pb.go` files.  For example, +`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file: + +```go +module <your module name> + +require ( +    google.golang.org/grpc v1.27.0 +) +``` + +#### If you are *not* using Go modules: + +Update the `proto` package, gRPC package, and rebuild the `.proto` files: + +```sh +go get -u github.com/golang/protobuf/{proto,protoc-gen-go} +go get -u google.golang.org/grpc +protoc --go_out=plugins=grpc:. *.proto +``` + +### How to turn on logging + +The default logger is controlled by environment variables. Turn everything on +like this: + +```console +$ export GRPC_GO_LOG_VERBOSITY_LEVEL=99 +$ export GRPC_GO_LOG_SEVERITY_LEVEL=info +``` + +### The RPC failed with error `"code = Unavailable desc = transport is closing"` + +This error means the connection the RPC is using was closed, and there are many +possible reasons, including: + 1. mis-configured transport credentials, connection failed on handshaking + 1. bytes disrupted, possibly by a proxy in between + 1. server shutdown + 1. Keepalive parameters caused connection shutdown, for example if you have configured +    your server to terminate connections regularly to [trigger DNS lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). +    If this is the case, you may want to increase your [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), +    to allow longer RPC calls to finish. + +It can be tricky to debug this because the error happens on the client side but +the root cause of the connection being closed is on the server side. Turn on +logging on __both client and server__, and see if there are any transport +errors. + +[API]: https://pkg.go.dev/google.golang.org/grpc +[Go]: https://golang.org +[Go module]: https://github.com/golang/go/wiki/Modules +[gRPC]: https://grpc.io +[Go gRPC docs]: https://grpc.io/docs/languages/go +[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5180705743044608 +[quick start]: https://grpc.io/docs/languages/go/quickstart +[go-releases]: https://golang.org/doc/devel/release.html diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md new file mode 100644 index 000000000..be6e10870 --- /dev/null +++ b/vendor/google.golang.org/grpc/SECURITY.md @@ -0,0 +1,3 @@ +# Security Policy + +For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go new file mode 100644 index 000000000..02f5dc531 --- /dev/null +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -0,0 +1,101 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package attributes defines a generic key/value store used in various gRPC +// components. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package attributes + +// Attributes is an immutable struct for storing and retrieving generic +// key/value pairs.  Keys must be hashable, and users should define their own +// types for keys.  Values should not be modified after they are added to an +// Attributes or if they were received from one.  If values implement 'Equal(o +// interface{}) bool', it will be called by (*Attributes).Equal to determine +// whether two values with the same key should be considered equal. +type Attributes struct { +	m map[interface{}]interface{} +} + +// New returns a new Attributes containing the key/value pair. +func New(key, value interface{}) *Attributes { +	return &Attributes{m: map[interface{}]interface{}{key: value}} +} + +// WithValue returns a new Attributes containing the previous keys and values +// and the new key/value pair.  If the same key appears multiple times, the +// last value overwrites all previous values for that key.  To remove an +// existing key, use a nil value.  value should not be modified later. +func (a *Attributes) WithValue(key, value interface{}) *Attributes { +	if a == nil { +		return New(key, value) +	} +	n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)} +	for k, v := range a.m { +		n.m[k] = v +	} +	n.m[key] = value +	return n +} + +// Value returns the value associated with these attributes for key, or nil if +// no value is associated with key.  The returned value should not be modified. +func (a *Attributes) Value(key interface{}) interface{} { +	if a == nil { +		return nil +	} +	return a.m[key] +} + +// Equal returns whether a and o are equivalent.  If 'Equal(o interface{}) +// bool' is implemented for a value in the attributes, it is called to +// determine if the value matches the one stored in the other attributes.  If +// Equal is not implemented, standard equality is used to determine if the two +// values are equal. Note that some types (e.g. maps) aren't comparable by +// default, so they must be wrapped in a struct, or in an alias type, with Equal +// defined. +func (a *Attributes) Equal(o *Attributes) bool { +	if a == nil && o == nil { +		return true +	} +	if a == nil || o == nil { +		return false +	} +	if len(a.m) != len(o.m) { +		return false +	} +	for k, v := range a.m { +		ov, ok := o.m[k] +		if !ok { +			// o missing element of a +			return false +		} +		if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok { +			if !eq.Equal(ov) { +				return false +			} +		} else if v != ov { +			// Fallback to a standard equality check if Value is unimplemented. +			return false +		} +	} +	return true +} diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go new file mode 100644 index 000000000..29475e31c --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff.go @@ -0,0 +1,61 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// See internal/backoff package for the backoff implementation. This file is +// kept for the exported types and API backward compatibility. + +package grpc + +import ( +	"time" + +	"google.golang.org/grpc/backoff" +) + +// DefaultBackoffConfig uses values specified for backoff in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// Deprecated: use ConnectParams instead. Will be supported throughout 1.x. +var DefaultBackoffConfig = BackoffConfig{ +	MaxDelay: 120 * time.Second, +} + +// BackoffConfig defines the parameters for the default gRPC backoff strategy. +// +// Deprecated: use ConnectParams instead. Will be supported throughout 1.x. +type BackoffConfig struct { +	// MaxDelay is the upper bound of backoff delay. +	MaxDelay time.Duration +} + +// ConnectParams defines the parameters for connecting and retrying. Users are +// encouraged to use this instead of the BackoffConfig type defined above. See +// here for more details: +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ConnectParams struct { +	// Backoff specifies the configuration options for connection backoff. +	Backoff backoff.Config +	// MinConnectTimeout is the minimum amount of time we are willing to give a +	// connection to complete. +	MinConnectTimeout time.Duration +} diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go new file mode 100644 index 000000000..0787d0b50 --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff/backoff.go @@ -0,0 +1,52 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package backoff provides configuration options for backoff. +// +// More details can be found at: +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// All APIs in this package are experimental. +package backoff + +import "time" + +// Config defines the configuration options for backoff. +type Config struct { +	// BaseDelay is the amount of time to backoff after the first failure. +	BaseDelay time.Duration +	// Multiplier is the factor with which to multiply backoffs after a +	// failed retry. Should ideally be greater than 1. +	Multiplier float64 +	// Jitter is the factor with which backoffs are randomized. +	Jitter float64 +	// MaxDelay is the upper bound of backoff delay. +	MaxDelay time.Duration +} + +// DefaultConfig is a backoff configuration with the default values specfied +// at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// This should be useful for callers who want to configure backoff with +// non-default values only for a subset of the options. +var DefaultConfig = Config{ +	BaseDelay:  1.0 * time.Second, +	Multiplier: 1.6, +	Jitter:     0.2, +	MaxDelay:   120 * time.Second, +} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go new file mode 100644 index 000000000..09d61dd1b --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -0,0 +1,404 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package balancer defines APIs for load balancing in gRPC. +// All APIs in this package are experimental. +package balancer + +import ( +	"context" +	"encoding/json" +	"errors" +	"net" +	"strings" + +	"google.golang.org/grpc/channelz" +	"google.golang.org/grpc/connectivity" +	"google.golang.org/grpc/credentials" +	"google.golang.org/grpc/internal" +	"google.golang.org/grpc/metadata" +	"google.golang.org/grpc/resolver" +	"google.golang.org/grpc/serviceconfig" +) + +var ( +	// m is a map from name to balancer builder. +	m = make(map[string]Builder) +) + +// Register registers the balancer builder to the balancer map. b.Name +// (lowercased) will be used as the name registered with this builder.  If the +// Builder implements ConfigParser, ParseConfig will be called when new service +// configs are received by the resolver, and the result will be provided to the +// Balancer in UpdateClientConnState. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Balancers are +// registered with the same name, the one registered last will take effect. +func Register(b Builder) { +	m[strings.ToLower(b.Name())] = b +} + +// unregisterForTesting deletes the balancer with the given name from the +// balancer map. +// +// This function is not thread-safe. +func unregisterForTesting(name string) { +	delete(m, name) +} + +func init() { +	internal.BalancerUnregister = unregisterForTesting +} + +// Get returns the resolver builder registered with the given name. +// Note that the compare is done in a case-insensitive fashion. +// If no builder is register with the name, nil will be returned. +func Get(name string) Builder { +	if b, ok := m[strings.ToLower(name)]; ok { +		return b +	} +	return nil +} + +// A SubConn represents a single connection to a gRPC backend service. +// +// Each SubConn contains a list of addresses. +// +// All SubConns start in IDLE, and will not try to connect. To trigger the +// connecting, Balancers must call Connect.  If a connection re-enters IDLE, +// Balancers must call Connect again to trigger a new connection attempt. +// +// gRPC will try to connect to the addresses in sequence, and stop trying the +// remainder once the first connection is successful. If an attempt to connect +// to all addresses encounters an error, the SubConn will enter +// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE. +// +// Once established, if a connection is lost, the SubConn will transition +// directly to IDLE. +// +// This interface is to be implemented by gRPC. Users should not need their own +// implementation of this interface. For situations like testing, any +// implementations should embed this interface. This allows gRPC to add new +// methods to this interface. +type SubConn interface { +	// UpdateAddresses updates the addresses used in this SubConn. +	// gRPC checks if currently-connected address is still in the new list. +	// If it's in the list, the connection will be kept. +	// If it's not in the list, the connection will gracefully closed, and +	// a new connection will be created. +	// +	// This will trigger a state transition for the SubConn. +	// +	// Deprecated: This method is now part of the ClientConn interface and will +	// eventually be removed from here. +	UpdateAddresses([]resolver.Address) +	// Connect starts the connecting for this SubConn. +	Connect() +	// GetOrBuildProducer returns a reference to the existing Producer for this +	// ProducerBuilder in this SubConn, or, if one does not currently exist, +	// creates a new one and returns it.  Returns a close function which must +	// be called when the Producer is no longer needed. +	GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) +} + +// NewSubConnOptions contains options to create new SubConn. +type NewSubConnOptions struct { +	// CredsBundle is the credentials bundle that will be used in the created +	// SubConn. If it's nil, the original creds from grpc DialOptions will be +	// used. +	// +	// Deprecated: Use the Attributes field in resolver.Address to pass +	// arbitrary data to the credential handshaker. +	CredsBundle credentials.Bundle +	// HealthCheckEnabled indicates whether health check service should be +	// enabled on this SubConn +	HealthCheckEnabled bool +} + +// State contains the balancer's state relevant to the gRPC ClientConn. +type State struct { +	// State contains the connectivity state of the balancer, which is used to +	// determine the state of the ClientConn. +	ConnectivityState connectivity.State +	// Picker is used to choose connections (SubConns) for RPCs. +	Picker Picker +} + +// ClientConn represents a gRPC ClientConn. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type ClientConn interface { +	// NewSubConn is called by balancer to create a new SubConn. +	// It doesn't block and wait for the connections to be established. +	// Behaviors of the SubConn can be controlled by options. +	NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) +	// RemoveSubConn removes the SubConn from ClientConn. +	// The SubConn will be shutdown. +	RemoveSubConn(SubConn) +	// UpdateAddresses updates the addresses used in the passed in SubConn. +	// gRPC checks if the currently connected address is still in the new list. +	// If so, the connection will be kept. Else, the connection will be +	// gracefully closed, and a new connection will be created. +	// +	// This will trigger a state transition for the SubConn. +	UpdateAddresses(SubConn, []resolver.Address) + +	// UpdateState notifies gRPC that the balancer's internal state has +	// changed. +	// +	// gRPC will update the connectivity state of the ClientConn, and will call +	// Pick on the new Picker to pick new SubConns. +	UpdateState(State) + +	// ResolveNow is called by balancer to notify gRPC to do a name resolving. +	ResolveNow(resolver.ResolveNowOptions) + +	// Target returns the dial target for this ClientConn. +	// +	// Deprecated: Use the Target field in the BuildOptions instead. +	Target() string +} + +// BuildOptions contains additional information for Build. +type BuildOptions struct { +	// DialCreds is the transport credentials to use when communicating with a +	// remote load balancer server. Balancer implementations which do not +	// communicate with a remote load balancer server can ignore this field. +	DialCreds credentials.TransportCredentials +	// CredsBundle is the credentials bundle to use when communicating with a +	// remote load balancer server. Balancer implementations which do not +	// communicate with a remote load balancer server can ignore this field. +	CredsBundle credentials.Bundle +	// Dialer is the custom dialer to use when communicating with a remote load +	// balancer server. Balancer implementations which do not communicate with a +	// remote load balancer server can ignore this field. +	Dialer func(context.Context, string) (net.Conn, error) +	// Authority is the server name to use as part of the authentication +	// handshake when communicating with a remote load balancer server. Balancer +	// implementations which do not communicate with a remote load balancer +	// server can ignore this field. +	Authority string +	// ChannelzParentID is the parent ClientConn's channelz ID. +	ChannelzParentID *channelz.Identifier +	// CustomUserAgent is the custom user agent set on the parent ClientConn. +	// The balancer should set the same custom user agent if it creates a +	// ClientConn. +	CustomUserAgent string +	// Target contains the parsed address info of the dial target. It is the +	// same resolver.Target as passed to the resolver. See the documentation for +	// the resolver.Target type for details about what it contains. +	Target resolver.Target +} + +// Builder creates a balancer. +type Builder interface { +	// Build creates a new balancer with the ClientConn. +	Build(cc ClientConn, opts BuildOptions) Balancer +	// Name returns the name of balancers built by this builder. +	// It will be used to pick balancers (for example in service config). +	Name() string +} + +// ConfigParser parses load balancer configs. +type ConfigParser interface { +	// ParseConfig parses the JSON load balancer config provided into an +	// internal form or returns an error if the config is invalid.  For future +	// compatibility reasons, unknown fields in the config should be ignored. +	ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error) +} + +// PickInfo contains additional information for the Pick operation. +type PickInfo struct { +	// FullMethodName is the method name that NewClientStream() is called +	// with. The canonical format is /service/Method. +	FullMethodName string +	// Ctx is the RPC's context, and may contain relevant RPC-level information +	// like the outgoing header metadata. +	Ctx context.Context +} + +// DoneInfo contains additional information for done. +type DoneInfo struct { +	// Err is the rpc error the RPC finished with. It could be nil. +	Err error +	// Trailer contains the metadata from the RPC's trailer, if present. +	Trailer metadata.MD +	// BytesSent indicates if any bytes have been sent to the server. +	BytesSent bool +	// BytesReceived indicates if any byte has been received from the server. +	BytesReceived bool +	// ServerLoad is the load received from server. It's usually sent as part of +	// trailing metadata. +	// +	// The only supported type now is *orca_v3.LoadReport. +	ServerLoad interface{} +} + +var ( +	// ErrNoSubConnAvailable indicates no SubConn is available for pick(). +	// gRPC will block the RPC until a new picker is available via UpdateState(). +	ErrNoSubConnAvailable = errors.New("no SubConn is available") +	// ErrTransientFailure indicates all SubConns are in TransientFailure. +	// WaitForReady RPCs will block, non-WaitForReady RPCs will fail. +	// +	// Deprecated: return an appropriate error based on the last resolution or +	// connection attempt instead.  The behavior is the same for any non-gRPC +	// status error. +	ErrTransientFailure = errors.New("all SubConns are in TransientFailure") +) + +// PickResult contains information related to a connection chosen for an RPC. +type PickResult struct { +	// SubConn is the connection to use for this pick, if its state is Ready. +	// If the state is not Ready, gRPC will block the RPC until a new Picker is +	// provided by the balancer (using ClientConn.UpdateState).  The SubConn +	// must be one returned by ClientConn.NewSubConn. +	SubConn SubConn + +	// Done is called when the RPC is completed.  If the SubConn is not ready, +	// this will be called with a nil parameter.  If the SubConn is not a valid +	// type, Done may not be called.  May be nil if the balancer does not wish +	// to be notified when the RPC completes. +	Done func(DoneInfo) + +	// Metadata provides a way for LB policies to inject arbitrary per-call +	// metadata. Any metadata returned here will be merged with existing +	// metadata added by the client application. +	// +	// LB policies with child policies are responsible for propagating metadata +	// injected by their children to the ClientConn, as part of Pick(). +	Metatada metadata.MD +} + +// TransientFailureError returns e.  It exists for backward compatibility and +// will be deleted soon. +// +// Deprecated: no longer necessary, picker errors are treated this way by +// default. +func TransientFailureError(e error) error { return e } + +// Picker is used by gRPC to pick a SubConn to send an RPC. +// Balancer is expected to generate a new picker from its snapshot every time its +// internal state has changed. +// +// The pickers used by gRPC can be updated by ClientConn.UpdateState(). +type Picker interface { +	// Pick returns the connection to use for this RPC and related information. +	// +	// Pick should not block.  If the balancer needs to do I/O or any blocking +	// or time-consuming work to service this call, it should return +	// ErrNoSubConnAvailable, and the Pick call will be repeated by gRPC when +	// the Picker is updated (using ClientConn.UpdateState). +	// +	// If an error is returned: +	// +	// - If the error is ErrNoSubConnAvailable, gRPC will block until a new +	//   Picker is provided by the balancer (using ClientConn.UpdateState). +	// +	// - If the error is a status error (implemented by the grpc/status +	//   package), gRPC will terminate the RPC with the code and message +	//   provided. +	// +	// - For all other errors, wait for ready RPCs will wait, but non-wait for +	//   ready RPCs will be terminated with this error's Error() string and +	//   status code Unavailable. +	Pick(info PickInfo) (PickResult, error) +} + +// Balancer takes input from gRPC, manages SubConns, and collects and aggregates +// the connectivity states. +// +// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs. +// +// UpdateClientConnState, ResolverError, UpdateSubConnState, and Close are +// guaranteed to be called synchronously from the same goroutine.  There's no +// guarantee on picker.Pick, it may be called anytime. +type Balancer interface { +	// UpdateClientConnState is called by gRPC when the state of the ClientConn +	// changes.  If the error returned is ErrBadResolverState, the ClientConn +	// will begin calling ResolveNow on the active name resolver with +	// exponential backoff until a subsequent call to UpdateClientConnState +	// returns a nil error.  Any other errors are currently ignored. +	UpdateClientConnState(ClientConnState) error +	// ResolverError is called by gRPC when the name resolver reports an error. +	ResolverError(error) +	// UpdateSubConnState is called by gRPC when the state of a SubConn +	// changes. +	UpdateSubConnState(SubConn, SubConnState) +	// Close closes the balancer. The balancer is not required to call +	// ClientConn.RemoveSubConn for its existing SubConns. +	Close() +} + +// ExitIdler is an optional interface for balancers to implement.  If +// implemented, ExitIdle will be called when ClientConn.Connect is called, if +// the ClientConn is idle.  If unimplemented, ClientConn.Connect will cause +// all SubConns to connect. +// +// Notice: it will be required for all balancers to implement this in a future +// release. +type ExitIdler interface { +	// ExitIdle instructs the LB policy to reconnect to backends / exit the +	// IDLE state, if appropriate and possible.  Note that SubConns that enter +	// the IDLE state will not reconnect until SubConn.Connect is called. +	ExitIdle() +} + +// SubConnState describes the state of a SubConn. +type SubConnState struct { +	// ConnectivityState is the connectivity state of the SubConn. +	ConnectivityState connectivity.State +	// ConnectionError is set if the ConnectivityState is TransientFailure, +	// describing the reason the SubConn failed.  Otherwise, it is nil. +	ConnectionError error +} + +// ClientConnState describes the state of a ClientConn relevant to the +// balancer. +type ClientConnState struct { +	ResolverState resolver.State +	// The parsed load balancing configuration returned by the builder's +	// ParseConfig method, if implemented. +	BalancerConfig serviceconfig.LoadBalancingConfig +} + +// ErrBadResolverState may be returned by UpdateClientConnState to indicate a +// problem with the provided name resolver data. +var ErrBadResolverState = errors.New("bad resolver state") + +// A ProducerBuilder is a simple constructor for a Producer.  It is used by the +// SubConn to create producers when needed. +type ProducerBuilder interface { +	// Build creates a Producer.  The first parameter is always a +	// grpc.ClientConnInterface (a type to allow creating RPCs/streams on the +	// associated SubConn), but is declared as interface{} to avoid a +	// dependency cycle.  Should also return a close function that will be +	// called when all references to the Producer have been given up. +	Build(grpcClientConnInterface interface{}) (p Producer, close func()) +} + +// A Producer is a type shared among potentially many consumers.  It is +// associated with a SubConn, and an implementation will typically contain +// other methods to provide additional functionality, e.g. configuration or +// subscription registration. +type Producer interface { +} diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go new file mode 100644 index 000000000..3929c26d3 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -0,0 +1,254 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package base + +import ( +	"errors" +	"fmt" + +	"google.golang.org/grpc/balancer" +	"google.golang.org/grpc/connectivity" +	"google.golang.org/grpc/grpclog" +	"google.golang.org/grpc/resolver" +) + +var logger = grpclog.Component("balancer") + +type baseBuilder struct { +	name          string +	pickerBuilder PickerBuilder +	config        Config +} + +func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { +	bal := &baseBalancer{ +		cc:            cc, +		pickerBuilder: bb.pickerBuilder, + +		subConns: resolver.NewAddressMap(), +		scStates: make(map[balancer.SubConn]connectivity.State), +		csEvltr:  &balancer.ConnectivityStateEvaluator{}, +		config:   bb.config, +		state:    connectivity.Connecting, +	} +	// Initialize picker to a picker that always returns +	// ErrNoSubConnAvailable, because when state of a SubConn changes, we +	// may call UpdateState with this picker. +	bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable) +	return bal +} + +func (bb *baseBuilder) Name() string { +	return bb.name +} + +type baseBalancer struct { +	cc            balancer.ClientConn +	pickerBuilder PickerBuilder + +	csEvltr *balancer.ConnectivityStateEvaluator +	state   connectivity.State + +	subConns *resolver.AddressMap +	scStates map[balancer.SubConn]connectivity.State +	picker   balancer.Picker +	config   Config + +	resolverErr error // the last error reported by the resolver; cleared on successful resolution +	connErr     error // the last connection error; cleared upon leaving TransientFailure +} + +func (b *baseBalancer) ResolverError(err error) { +	b.resolverErr = err +	if b.subConns.Len() == 0 { +		b.state = connectivity.TransientFailure +	} + +	if b.state != connectivity.TransientFailure { +		// The picker will not change since the balancer does not currently +		// report an error. +		return +	} +	b.regeneratePicker() +	b.cc.UpdateState(balancer.State{ +		ConnectivityState: b.state, +		Picker:            b.picker, +	}) +} + +func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { +	// TODO: handle s.ResolverState.ServiceConfig? +	if logger.V(2) { +		logger.Info("base.baseBalancer: got new ClientConn state: ", s) +	} +	// Successful resolution; clear resolver error and ensure we return nil. +	b.resolverErr = nil +	// addrsSet is the set converted from addrs, it's used for quick lookup of an address. +	addrsSet := resolver.NewAddressMap() +	for _, a := range s.ResolverState.Addresses { +		addrsSet.Set(a, nil) +		if _, ok := b.subConns.Get(a); !ok { +			// a is a new address (not existing in b.subConns). +			sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) +			if err != nil { +				logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) +				continue +			} +			b.subConns.Set(a, sc) +			b.scStates[sc] = connectivity.Idle +			b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle) +			sc.Connect() +		} +	} +	for _, a := range b.subConns.Keys() { +		sci, _ := b.subConns.Get(a) +		sc := sci.(balancer.SubConn) +		// a was removed by resolver. +		if _, ok := addrsSet.Get(a); !ok { +			b.cc.RemoveSubConn(sc) +			b.subConns.Delete(a) +			// Keep the state of this sc in b.scStates until sc's state becomes Shutdown. +			// The entry will be deleted in UpdateSubConnState. +		} +	} +	// If resolver state contains no addresses, return an error so ClientConn +	// will trigger re-resolve. Also records this as an resolver error, so when +	// the overall state turns transient failure, the error message will have +	// the zero address information. +	if len(s.ResolverState.Addresses) == 0 { +		b.ResolverError(errors.New("produced zero addresses")) +		return balancer.ErrBadResolverState +	} + +	b.regeneratePicker() +	b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) +	return nil +} + +// mergeErrors builds an error from the last connection error and the last +// resolver error.  Must only be called if b.state is TransientFailure. +func (b *baseBalancer) mergeErrors() error { +	// connErr must always be non-nil unless there are no SubConns, in which +	// case resolverErr must be non-nil. +	if b.connErr == nil { +		return fmt.Errorf("last resolver error: %v", b.resolverErr) +	} +	if b.resolverErr == nil { +		return fmt.Errorf("last connection error: %v", b.connErr) +	} +	return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr) +} + +// regeneratePicker takes a snapshot of the balancer, and generates a picker +// from it. The picker is +//   - errPicker if the balancer is in TransientFailure, +//   - built by the pickerBuilder with all READY SubConns otherwise. +func (b *baseBalancer) regeneratePicker() { +	if b.state == connectivity.TransientFailure { +		b.picker = NewErrPicker(b.mergeErrors()) +		return +	} +	readySCs := make(map[balancer.SubConn]SubConnInfo) + +	// Filter out all ready SCs from full subConn map. +	for _, addr := range b.subConns.Keys() { +		sci, _ := b.subConns.Get(addr) +		sc := sci.(balancer.SubConn) +		if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { +			readySCs[sc] = SubConnInfo{Address: addr} +		} +	} +	b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) +} + +func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { +	s := state.ConnectivityState +	if logger.V(2) { +		logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) +	} +	oldS, ok := b.scStates[sc] +	if !ok { +		if logger.V(2) { +			logger.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) +		} +		return +	} +	if oldS == connectivity.TransientFailure && +		(s == connectivity.Connecting || s == connectivity.Idle) { +		// Once a subconn enters TRANSIENT_FAILURE, ignore subsequent IDLE or +		// CONNECTING transitions to prevent the aggregated state from being +		// always CONNECTING when many backends exist but are all down. +		if s == connectivity.Idle { +			sc.Connect() +		} +		return +	} +	b.scStates[sc] = s +	switch s { +	case connectivity.Idle: +		sc.Connect() +	case connectivity.Shutdown: +		// When an address was removed by resolver, b called RemoveSubConn but +		// kept the sc's state in scStates. Remove state for this sc here. +		delete(b.scStates, sc) +	case connectivity.TransientFailure: +		// Save error to be reported via picker. +		b.connErr = state.ConnectionError +	} + +	b.state = b.csEvltr.RecordTransition(oldS, s) + +	// Regenerate picker when one of the following happens: +	//  - this sc entered or left ready +	//  - the aggregated state of balancer is TransientFailure +	//    (may need to update error message) +	if (s == connectivity.Ready) != (oldS == connectivity.Ready) || +		b.state == connectivity.TransientFailure { +		b.regeneratePicker() +	} +	b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) +} + +// Close is a nop because base balancer doesn't have internal state to clean up, +// and it doesn't need to call RemoveSubConn for the SubConns. +func (b *baseBalancer) Close() { +} + +// ExitIdle is a nop because the base balancer attempts to stay connected to +// all SubConns at all times. +func (b *baseBalancer) ExitIdle() { +} + +// NewErrPicker returns a Picker that always returns err on Pick(). +func NewErrPicker(err error) balancer.Picker { +	return &errPicker{err: err} +} + +// NewErrPickerV2 is temporarily defined for backward compatibility reasons. +// +// Deprecated: use NewErrPicker instead. +var NewErrPickerV2 = NewErrPicker + +type errPicker struct { +	err error // Pick() always returns this err. +} + +func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +	return balancer.PickResult{}, p.err +} diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go new file mode 100644 index 000000000..e31d76e33 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/base/base.go @@ -0,0 +1,71 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package base defines a balancer base that can be used to build balancers with +// different picking algorithms. +// +// The base balancer creates a new SubConn for each resolved address. The +// provided picker will only be notified about READY SubConns. +// +// This package is the base of round_robin balancer, its purpose is to be used +// to build round_robin like balancers with complex picking algorithms. +// Balancers with more complicated logic should try to implement a balancer +// builder from scratch. +// +// All APIs in this package are experimental. +package base + +import ( +	"google.golang.org/grpc/balancer" +	"google.golang.org/grpc/resolver" +) + +// PickerBuilder creates balancer.Picker. +type PickerBuilder interface { +	// Build returns a picker that will be used by gRPC to pick a SubConn. +	Build(info PickerBuildInfo) balancer.Picker +} + +// PickerBuildInfo contains information needed by the picker builder to +// construct a picker. +type PickerBuildInfo struct { +	// ReadySCs is a map from all ready SubConns to the Addresses used to +	// create them. +	ReadySCs map[balancer.SubConn]SubConnInfo +} + +// SubConnInfo contains information about a SubConn created by the base +// balancer. +type SubConnInfo struct { +	Address resolver.Address // the address used to create this SubConn +} + +// Config contains the config info about the base balancer builder. +type Config struct { +	// HealthCheck indicates whether health checking should be enabled for this specific balancer. +	HealthCheck bool +} + +// NewBalancerBuilder returns a base balancer builder configured by the provided config. +func NewBalancerBuilder(name string, pb PickerBuilder, config Config) balancer.Builder { +	return &baseBuilder{ +		name:          name, +		pickerBuilder: pb, +		config:        config, +	} +} diff --git a/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go new file mode 100644 index 000000000..c33413581 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package balancer + +import "google.golang.org/grpc/connectivity" + +// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns +// and returns one aggregated connectivity state. +// +// It's not thread safe. +type ConnectivityStateEvaluator struct { +	numReady            uint64 // Number of addrConns in ready state. +	numConnecting       uint64 // Number of addrConns in connecting state. +	numTransientFailure uint64 // Number of addrConns in transient failure state. +	numIdle             uint64 // Number of addrConns in idle state. +} + +// RecordTransition records state change happening in subConn and based on that +// it evaluates what aggregated state should be. +// +//   - If at least one SubConn in Ready, the aggregated state is Ready; +//   - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +//   - Else if at least one SubConn is Idle, the aggregated state is Idle; +//   - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure. +// +// Shutdown is not considered. +func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { +	// Update counters. +	for idx, state := range []connectivity.State{oldState, newState} { +		updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. +		switch state { +		case connectivity.Ready: +			cse.numReady += updateVal +		case connectivity.Connecting: +			cse.numConnecting += updateVal +		case connectivity.TransientFailure: +			cse.numTransientFailure += updateVal +		case connectivity.Idle: +			cse.numIdle += updateVal +		} +	} +	return cse.CurrentState() +} + +// CurrentState returns the current aggregate conn state by evaluating the counters +func (cse *ConnectivityStateEvaluator) CurrentState() connectivity.State { +	// Evaluate. +	if cse.numReady > 0 { +		return connectivity.Ready +	} +	if cse.numConnecting > 0 { +		return connectivity.Connecting +	} +	if cse.numIdle > 0 { +		return connectivity.Idle +	} +	return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go new file mode 100644 index 000000000..4ecfa1c21 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package state declares grpclb types to be set by resolvers wishing to pass +// information to grpclb via resolver.State Attributes. +package state + +import ( +	"google.golang.org/grpc/resolver" +) + +// keyType is the key to use for storing State in Attributes. +type keyType string + +const key = keyType("grpc.grpclb.state") + +// State contains gRPCLB-relevant data passed from the name resolver. +type State struct { +	// BalancerAddresses contains the remote load balancer address(es).  If +	// set, overrides any resolver-provided addresses with Type of GRPCLB. +	BalancerAddresses []resolver.Address +} + +// Set returns a copy of the provided state with attributes containing s.  s's +// data should not be mutated after calling Set. +func Set(state resolver.State, s *State) resolver.State { +	state.Attributes = state.Attributes.WithValue(key, s) +	return state +} + +// Get returns the grpclb State in the resolver.State, or nil if not present. +// The returned data should not be mutated. +func Get(state resolver.State) *State { +	s, _ := state.Attributes.Value(key).(*State) +	return s +} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go new file mode 100644 index 000000000..f7031ad22 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -0,0 +1,81 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is +// installed as one of the default balancers in gRPC, users don't need to +// explicitly install this balancer. +package roundrobin + +import ( +	"sync/atomic" + +	"google.golang.org/grpc/balancer" +	"google.golang.org/grpc/balancer/base" +	"google.golang.org/grpc/grpclog" +	"google.golang.org/grpc/internal/grpcrand" +) + +// Name is the name of round_robin balancer. +const Name = "round_robin" + +var logger = grpclog.Component("roundrobin") + +// newBuilder creates a new roundrobin balancer builder. +func newBuilder() balancer.Builder { +	return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) +} + +func init() { +	balancer.Register(newBuilder()) +} + +type rrPickerBuilder struct{} + +func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { +	logger.Infof("roundrobinPicker: Build called with info: %v", info) +	if len(info.ReadySCs) == 0 { +		return base.NewErrPicker(balancer.ErrNoSubConnAvailable) +	} +	scs := make([]balancer.SubConn, 0, len(info.ReadySCs)) +	for sc := range info.ReadySCs { +		scs = append(scs, sc) +	} +	return &rrPicker{ +		subConns: scs, +		// Start at a random index, as the same RR balancer rebuilds a new +		// picker when SubConn states change, and we don't want to apply excess +		// load to the first server in the list. +		next: uint32(grpcrand.Intn(len(scs))), +	} +} + +type rrPicker struct { +	// subConns is the snapshot of the roundrobin balancer when this picker was +	// created. The slice is immutable. Each Get() will do a round robin +	// selection from it and return the selected SubConn. +	subConns []balancer.SubConn +	next     uint32 +} + +func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { +	subConnsLen := uint32(len(p.subConns)) +	nextIndex := atomic.AddUint32(&p.next, 1) + +	sc := p.subConns[nextIndex%subConnsLen] +	return balancer.PickResult{SubConn: sc}, nil +} diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go new file mode 100644 index 000000000..0359956d3 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -0,0 +1,481 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( +	"context" +	"fmt" +	"strings" +	"sync" + +	"google.golang.org/grpc/balancer" +	"google.golang.org/grpc/codes" +	"google.golang.org/grpc/connectivity" +	"google.golang.org/grpc/internal/balancer/gracefulswitch" +	"google.golang.org/grpc/internal/buffer" +	"google.golang.org/grpc/internal/channelz" +	"google.golang.org/grpc/internal/grpcsync" +	"google.golang.org/grpc/resolver" +	"google.golang.org/grpc/status" +) + +// ccBalancerWrapper sits between the ClientConn and the Balancer. +// +// ccBalancerWrapper implements methods corresponding to the ones on the +// balancer.Balancer interface. The ClientConn is free to call these methods +// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn +// to the Balancer happen synchronously and in order. +// +// ccBalancerWrapper also implements the balancer.ClientConn interface and is +// passed to the Balancer implementations. It invokes unexported methods on the +// ClientConn to handle these calls from the Balancer. +// +// It uses the gracefulswitch.Balancer internally to ensure that balancer +// switches happen in a graceful manner. +type ccBalancerWrapper struct { +	cc *ClientConn + +	// Since these fields are accessed only from handleXxx() methods which are +	// synchronized by the watcher goroutine, we do not need a mutex to protect +	// these fields. +	balancer        *gracefulswitch.Balancer +	curBalancerName string + +	updateCh *buffer.Unbounded // Updates written on this channel are processed by watcher(). +	resultCh *buffer.Unbounded // Results of calls to UpdateClientConnState() are pushed here. +	closed   *grpcsync.Event   // Indicates if close has been called. +	done     *grpcsync.Event   // Indicates if close has completed its work. +} + +// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer +// is not created until the switchTo() method is invoked. +func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { +	ccb := &ccBalancerWrapper{ +		cc:       cc, +		updateCh: buffer.NewUnbounded(), +		resultCh: buffer.NewUnbounded(), +		closed:   grpcsync.NewEvent(), +		done:     grpcsync.NewEvent(), +	} +	go ccb.watcher() +	ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) +	return ccb +} + +// The following xxxUpdate structs wrap the arguments received as part of the +// corresponding update. The watcher goroutine uses the 'type' of the update to +// invoke the appropriate handler routine to handle the update. + +type ccStateUpdate struct { +	ccs *balancer.ClientConnState +} + +type scStateUpdate struct { +	sc    balancer.SubConn +	state connectivity.State +	err   error +} + +type exitIdleUpdate struct{} + +type resolverErrorUpdate struct { +	err error +} + +type switchToUpdate struct { +	name string +} + +type subConnUpdate struct { +	acbw *acBalancerWrapper +} + +// watcher is a long-running goroutine which reads updates from a channel and +// invokes corresponding methods on the underlying balancer. It ensures that +// these methods are invoked in a synchronous fashion. It also ensures that +// these methods are invoked in the order in which the updates were received. +func (ccb *ccBalancerWrapper) watcher() { +	for { +		select { +		case u := <-ccb.updateCh.Get(): +			ccb.updateCh.Load() +			if ccb.closed.HasFired() { +				break +			} +			switch update := u.(type) { +			case *ccStateUpdate: +				ccb.handleClientConnStateChange(update.ccs) +			case *scStateUpdate: +				ccb.handleSubConnStateChange(update) +			case *exitIdleUpdate: +				ccb.handleExitIdle() +			case *resolverErrorUpdate: +				ccb.handleResolverError(update.err) +			case *switchToUpdate: +				ccb.handleSwitchTo(update.name) +			case *subConnUpdate: +				ccb.handleRemoveSubConn(update.acbw) +			default: +				logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", update, update) +			} +		case <-ccb.closed.Done(): +		} + +		if ccb.closed.HasFired() { +			ccb.handleClose() +			return +		} +	} +} + +// updateClientConnState is invoked by grpc to push a ClientConnState update to +// the underlying balancer. +// +// Unlike other methods invoked by grpc to push updates to the underlying +// balancer, this method cannot simply push the update onto the update channel +// and return. It needs to return the error returned by the underlying balancer +// back to grpc which propagates that to the resolver. +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { +	ccb.updateCh.Put(&ccStateUpdate{ccs: ccs}) + +	var res interface{} +	select { +	case res = <-ccb.resultCh.Get(): +		ccb.resultCh.Load() +	case <-ccb.closed.Done(): +		// Return early if the balancer wrapper is closed while we are waiting for +		// the underlying balancer to process a ClientConnState update. +		return nil +	} +	// If the returned error is nil, attempting to type assert to error leads to +	// panic. So, this needs to handled separately. +	if res == nil { +		return nil +	} +	return res.(error) +} + +// handleClientConnStateChange handles a ClientConnState update from the update +// channel and invokes the appropriate method on the underlying balancer. +// +// If the addresses specified in the update contain addresses of type "grpclb" +// and the selected LB policy is not "grpclb", these addresses will be filtered +// out and ccs will be modified with the updated address list. +func (ccb *ccBalancerWrapper) handleClientConnStateChange(ccs *balancer.ClientConnState) { +	if ccb.curBalancerName != grpclbName { +		// Filter any grpclb addresses since we don't have the grpclb balancer. +		var addrs []resolver.Address +		for _, addr := range ccs.ResolverState.Addresses { +			if addr.Type == resolver.GRPCLB { +				continue +			} +			addrs = append(addrs, addr) +		} +		ccs.ResolverState.Addresses = addrs +	} +	ccb.resultCh.Put(ccb.balancer.UpdateClientConnState(*ccs)) +} + +// updateSubConnState is invoked by grpc to push a subConn state update to the +// underlying balancer. +func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { +	// When updating addresses for a SubConn, if the address in use is not in +	// the new addresses, the old ac will be tearDown() and a new ac will be +	// created. tearDown() generates a state change with Shutdown state, we +	// don't want the balancer to receive this state change. So before +	// tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and +	// this function will be called with (nil, Shutdown). We don't need to call +	// balancer method in this case. +	if sc == nil { +		return +	} +	ccb.updateCh.Put(&scStateUpdate{ +		sc:    sc, +		state: s, +		err:   err, +	}) +} + +// handleSubConnStateChange handles a SubConnState update from the update +// channel and invokes the appropriate method on the underlying balancer. +func (ccb *ccBalancerWrapper) handleSubConnStateChange(update *scStateUpdate) { +	ccb.balancer.UpdateSubConnState(update.sc, balancer.SubConnState{ConnectivityState: update.state, ConnectionError: update.err}) +} + +func (ccb *ccBalancerWrapper) exitIdle() { +	ccb.updateCh.Put(&exitIdleUpdate{}) +} + +func (ccb *ccBalancerWrapper) handleExitIdle() { +	if ccb.cc.GetState() != connectivity.Idle { +		return +	} +	ccb.balancer.ExitIdle() +} + +func (ccb *ccBalancerWrapper) resolverError(err error) { +	ccb.updateCh.Put(&resolverErrorUpdate{err: err}) +} + +func (ccb *ccBalancerWrapper) handleResolverError(err error) { +	ccb.balancer.ResolverError(err) +} + +// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the +// LB policy identified by name. +// +// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the +// first good update from the name resolver, it determines the LB policy to use +// and invokes the switchTo() method. Upon receipt of every subsequent update +// from the name resolver, it invokes this method. +// +// the ccBalancerWrapper keeps track of the current LB policy name, and skips +// the graceful balancer switching process if the name does not change. +func (ccb *ccBalancerWrapper) switchTo(name string) { +	ccb.updateCh.Put(&switchToUpdate{name: name}) +} + +// handleSwitchTo handles a balancer switch update from the update channel. It +// calls the SwitchTo() method on the gracefulswitch.Balancer with a +// balancer.Builder corresponding to name. If no balancer.Builder is registered +// for the given name, it uses the default LB policy which is "pick_first". +func (ccb *ccBalancerWrapper) handleSwitchTo(name string) { +	// TODO: Other languages use case-insensitive balancer registries. We should +	// switch as well. See: https://github.com/grpc/grpc-go/issues/5288. +	if strings.EqualFold(ccb.curBalancerName, name) { +		return +	} + +	// TODO: Ensure that name is a registered LB policy when we get here. +	// We currently only validate the `loadBalancingConfig` field. We need to do +	// the same for the `loadBalancingPolicy` field and reject the service config +	// if the specified policy is not registered. +	builder := balancer.Get(name) +	if builder == nil { +		channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) +		builder = newPickfirstBuilder() +	} else { +		channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name) +	} + +	if err := ccb.balancer.SwitchTo(builder); err != nil { +		channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err) +		return +	} +	ccb.curBalancerName = builder.Name() +} + +// handleRemoveSucConn handles a request from the underlying balancer to remove +// a subConn. +// +// See comments in RemoveSubConn() for more details. +func (ccb *ccBalancerWrapper) handleRemoveSubConn(acbw *acBalancerWrapper) { +	ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +} + +func (ccb *ccBalancerWrapper) close() { +	ccb.closed.Fire() +	<-ccb.done.Done() +} + +func (ccb *ccBalancerWrapper) handleClose() { +	ccb.balancer.Close() +	ccb.done.Fire() +} + +func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { +	if len(addrs) <= 0 { +		return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") +	} +	ac, err := ccb.cc.newAddrConn(addrs, opts) +	if err != nil { +		channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) +		return nil, err +	} +	acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)} +	acbw.ac.mu.Lock() +	ac.acbw = acbw +	acbw.ac.mu.Unlock() +	return acbw, nil +} + +func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { +	// Before we switched the ccBalancerWrapper to use gracefulswitch.Balancer, it +	// was required to handle the RemoveSubConn() method asynchronously by pushing +	// the update onto the update channel. This was done to avoid a deadlock as +	// switchBalancer() was holding cc.mu when calling Close() on the old +	// balancer, which would in turn call RemoveSubConn(). +	// +	// With the use of gracefulswitch.Balancer in ccBalancerWrapper, handling this +	// asynchronously is probably not required anymore since the switchTo() method +	// handles the balancer switch by pushing the update onto the channel. +	// TODO(easwars): Handle this inline. +	acbw, ok := sc.(*acBalancerWrapper) +	if !ok { +		return +	} +	ccb.updateCh.Put(&subConnUpdate{acbw: acbw}) +} + +func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { +	acbw, ok := sc.(*acBalancerWrapper) +	if !ok { +		return +	} +	acbw.UpdateAddresses(addrs) +} + +func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { +	// Update picker before updating state.  Even though the ordering here does +	// not matter, it can lead to multiple calls of Pick in the common start-up +	// case where we wait for ready and then perform an RPC.  If the picker is +	// updated later, we could call the "connecting" picker when the state is +	// updated, and then call the "ready" picker after the picker gets updated. +	ccb.cc.blockingpicker.updatePicker(s.Picker) +	ccb.cc.csMgr.updateState(s.ConnectivityState) +} + +func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { +	ccb.cc.resolveNow(o) +} + +func (ccb *ccBalancerWrapper) Target() string { +	return ccb.cc.target +} + +// acBalancerWrapper is a wrapper on top of ac for balancers. +// It implements balancer.SubConn interface. +type acBalancerWrapper struct { +	mu        sync.Mutex +	ac        *addrConn +	producers map[balancer.ProducerBuilder]*refCountedProducer +} + +func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { +	acbw.mu.Lock() +	defer acbw.mu.Unlock() +	if len(addrs) <= 0 { +		acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) +		return +	} +	if !acbw.ac.tryUpdateAddrs(addrs) { +		cc := acbw.ac.cc +		opts := acbw.ac.scopts +		acbw.ac.mu.Lock() +		// Set old ac.acbw to nil so the Shutdown state update will be ignored +		// by balancer. +		// +		// TODO(bar) the state transition could be wrong when tearDown() old ac +		// and creating new ac, fix the transition. +		acbw.ac.acbw = nil +		acbw.ac.mu.Unlock() +		acState := acbw.ac.getState() +		acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) + +		if acState == connectivity.Shutdown { +			return +		} + +		newAC, err := cc.newAddrConn(addrs, opts) +		if err != nil { +			channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) +			return +		} +		acbw.ac = newAC +		newAC.mu.Lock() +		newAC.acbw = acbw +		newAC.mu.Unlock() +		if acState != connectivity.Idle { +			go newAC.connect() +		} +	} +} + +func (acbw *acBalancerWrapper) Connect() { +	acbw.mu.Lock() +	defer acbw.mu.Unlock() +	go acbw.ac.connect() +} + +func (acbw *acBalancerWrapper) getAddrConn() *addrConn { +	acbw.mu.Lock() +	defer acbw.mu.Unlock() +	return acbw.ac +} + +var errSubConnNotReady = status.Error(codes.Unavailable, "SubConn not currently connected") + +// NewStream begins a streaming RPC on the addrConn.  If the addrConn is not +// ready, returns errSubConnNotReady. +func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { +	transport := acbw.ac.getReadyTransport() +	if transport == nil { +		return nil, errSubConnNotReady +	} +	return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) +} + +// Invoke performs a unary RPC.  If the addrConn is not ready, returns +// errSubConnNotReady. +func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error { +	cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) +	if err != nil { +		return err +	} +	if err := cs.SendMsg(args); err != nil { +		return err +	} +	return cs.RecvMsg(reply) +} + +type refCountedProducer struct { +	producer balancer.Producer +	refs     int    // number of current refs to the producer +	close    func() // underlying producer's close function +} + +func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { +	acbw.mu.Lock() +	defer acbw.mu.Unlock() + +	// Look up existing producer from this builder. +	pData := acbw.producers[pb] +	if pData == nil { +		// Not found; create a new one and add it to the producers map. +		p, close := pb.Build(acbw) +		pData = &refCountedProducer{producer: p, close: close} +		acbw.producers[pb] = pData +	} +	// Account for this new reference. +	pData.refs++ + +	// Return a cleanup function wrapped in a OnceFunc to remove this reference +	// and delete the refCountedProducer from the map if the total reference +	// count goes to zero. +	unref := func() { +		acbw.mu.Lock() +		pData.refs-- +		if pData.refs == 0 { +			defer pData.close() // Run outside the acbw mutex +			delete(acbw.producers, pb) +		} +		acbw.mu.Unlock() +	} +	return pData.producer, grpcsync.OnceFunc(unref) +} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go new file mode 100644 index 000000000..66d141fce --- /dev/null +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -0,0 +1,1183 @@ +// Copyright 2018 The gRPC Authors +// All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +//     http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/binlog/v1/binarylog.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// 	protoc-gen-go v1.28.1 +// 	protoc        v3.14.0 +// source: grpc/binlog/v1/binarylog.proto + +package grpc_binarylog_v1 + +import ( +	protoreflect "google.golang.org/protobuf/reflect/protoreflect" +	protoimpl "google.golang.org/protobuf/runtime/protoimpl" +	durationpb "google.golang.org/protobuf/types/known/durationpb" +	timestamppb "google.golang.org/protobuf/types/known/timestamppb" +	reflect "reflect" +	sync "sync" +) + +const ( +	// Verify that this generated code is sufficiently up-to-date. +	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) +	// Verify that runtime/protoimpl is sufficiently up-to-date. +	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Enumerates the type of event +// Note the terminology is different from the RPC semantics +// definition, but the same meaning is expressed here. +type GrpcLogEntry_EventType int32 + +const ( +	GrpcLogEntry_EVENT_TYPE_UNKNOWN GrpcLogEntry_EventType = 0 +	// Header sent from client to server +	GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER GrpcLogEntry_EventType = 1 +	// Header sent from server to client +	GrpcLogEntry_EVENT_TYPE_SERVER_HEADER GrpcLogEntry_EventType = 2 +	// Message sent from client to server +	GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE GrpcLogEntry_EventType = 3 +	// Message sent from server to client +	GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE GrpcLogEntry_EventType = 4 +	// A signal that client is done sending +	GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE GrpcLogEntry_EventType = 5 +	// Trailer indicates the end of the RPC. +	// On client side, this event means a trailer was either received +	// from the network or the gRPC library locally generated a status +	// to inform the application about a failure. +	// On server side, this event means the server application requested +	// to send a trailer. Note: EVENT_TYPE_CANCEL may still arrive after +	// this due to races on server side. +	GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER GrpcLogEntry_EventType = 6 +	// A signal that the RPC is cancelled. On client side, this +	// indicates the client application requests a cancellation. +	// On server side, this indicates that cancellation was detected. +	// Note: This marks the end of the RPC. Events may arrive after +	// this due to races. For example, on client side a trailer +	// may arrive even though the application requested to cancel the RPC. +	GrpcLogEntry_EVENT_TYPE_CANCEL GrpcLogEntry_EventType = 7 +) + +// Enum value maps for GrpcLogEntry_EventType. +var ( +	GrpcLogEntry_EventType_name = map[int32]string{ +		0: "EVENT_TYPE_UNKNOWN", +		1: "EVENT_TYPE_CLIENT_HEADER", +		2: "EVENT_TYPE_SERVER_HEADER", +		3: "EVENT_TYPE_CLIENT_MESSAGE", +		4: "EVENT_TYPE_SERVER_MESSAGE", +		5: "EVENT_TYPE_CLIENT_HALF_CLOSE", +		6: "EVENT_TYPE_SERVER_TRAILER", +		7: "EVENT_TYPE_CANCEL", +	} +	GrpcLogEntry_EventType_value = map[string]int32{ +		"EVENT_TYPE_UNKNOWN":           0, +		"EVENT_TYPE_CLIENT_HEADER":     1, +		"EVENT_TYPE_SERVER_HEADER":     2, +		"EVENT_TYPE_CLIENT_MESSAGE":    3, +		"EVENT_TYPE_SERVER_MESSAGE":    4, +		"EVENT_TYPE_CLIENT_HALF_CLOSE": 5, +		"EVENT_TYPE_SERVER_TRAILER":    6, +		"EVENT_TYPE_CANCEL":            7, +	} +) + +func (x GrpcLogEntry_EventType) Enum() *GrpcLogEntry_EventType { +	p := new(GrpcLogEntry_EventType) +	*p = x +	return p +} + +func (x GrpcLogEntry_EventType) String() string { +	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GrpcLogEntry_EventType) Descriptor() protoreflect.EnumDescriptor { +	return file_grpc_binlog_v1_binarylog_proto_enumTypes[0].Descriptor() +} + +func (GrpcLogEntry_EventType) Type() protoreflect.EnumType { +	return &file_grpc_binlog_v1_binarylog_proto_enumTypes[0] +} + +func (x GrpcLogEntry_EventType) Number() protoreflect.EnumNumber { +	return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GrpcLogEntry_EventType.Descriptor instead. +func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) { +	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 0} +} + +// Enumerates the entity that generates the log entry +type GrpcLogEntry_Logger int32 + +const ( +	GrpcLogEntry_LOGGER_UNKNOWN GrpcLogEntry_Logger = 0 +	GrpcLogEntry_LOGGER_CLIENT  GrpcLogEntry_Logger = 1 +	GrpcLogEntry_LOGGER_SERVER  GrpcLogEntry_Logger = 2 +) + +// Enum value maps for GrpcLogEntry_Logger. +var ( +	GrpcLogEntry_Logger_name = map[int32]string{ +		0: "LOGGER_UNKNOWN", +		1: "LOGGER_CLIENT", +		2: "LOGGER_SERVER", +	} +	GrpcLogEntry_Logger_value = map[string]int32{ +		"LOGGER_UNKNOWN": 0, +		"LOGGER_CLIENT":  1, +		"LOGGER_SERVER":  2, +	} +) + +func (x GrpcLogEntry_Logger) Enum() *GrpcLogEntry_Logger { +	p := new(GrpcLogEntry_Logger) +	*p = x +	return p +} + +func (x GrpcLogEntry_Logger) String() string { +	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GrpcLogEntry_Logger) Descriptor() protoreflect.EnumDescriptor { +	return file_grpc_binlog_v1_binarylog_proto_enumTypes[1].Descriptor() +} + +func (GrpcLogEntry_Logger) Type() protoreflect.EnumType { +	return &file_grpc_binlog_v1_binarylog_proto_enumTypes[1] +} + +func (x GrpcLogEntry_Logger) Number() protoreflect.EnumNumber { +	return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GrpcLogEntry_Logger.Descriptor instead. +func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) { +	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 1} +} + +type Address_Type int32 + +const ( +	Address_TYPE_UNKNOWN Address_Type = 0 +	// address is in 1.2.3.4 form +	Address_TYPE_IPV4 Address_Type = 1 +	// address is in IPv6 canonical form (RFC5952 section 4) +	// The scope is NOT included in the address string. +	Address_TYPE_IPV6 Address_Type = 2 +	// address is UDS string +	Address_TYPE_UNIX Address_Type = 3 +) + +// Enum value maps for Address_Type. +var ( +	Address_Type_name = map[int32]string{ +		0: "TYPE_UNKNOWN", +		1: "TYPE_IPV4", +		2: "TYPE_IPV6", +		3: "TYPE_UNIX", +	} +	Address_Type_value = map[string]int32{ +		"TYPE_UNKNOWN": 0, +		"TYPE_IPV4":    1, +		"TYPE_IPV6":    2, +		"TYPE_UNIX":    3, +	} +) + +func (x Address_Type) Enum() *Address_Type { +	p := new(Address_Type) +	*p = x +	return p +} + +func (x Address_Type) String() string { +	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Address_Type) Descriptor() protoreflect.EnumDescriptor { +	return file_grpc_binlog_v1_binarylog_proto_enumTypes[2].Descriptor() +} + +func (Address_Type) Type() protoreflect.EnumType { +	return &file_grpc_binlog_v1_binarylog_proto_enumTypes[2] +} + +func (x Address_Type) Number() protoreflect.EnumNumber { +	return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Address_Type.Descriptor instead. +func (Address_Type) EnumDescriptor() ([]byte, []int) { +	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7, 0} +} + +// Log entry we store in binary logs +type GrpcLogEntry struct { +	state         protoimpl.MessageState +	sizeCache     protoimpl.SizeCache +	unknownFields protoimpl.UnknownFields + +	// The timestamp of the binary log message +	Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` +	// Uniquely identifies a call. The value must not be 0 in order to disambiguate +	// from an unset value. +	// Each call may have several log entries, they will all have the same call_id. +	// Nothing is guaranteed about their value other than they are unique across +	// different RPCs in the same gRPC process. +	CallId uint64 `protobuf:"varint,2,opt,name=call_id,json=callId,proto3" json:"call_id,omitempty"` +	// The entry sequence id for this call. The first GrpcLogEntry has a +	// value of 1, to disambiguate from an unset value. The purpose of +	// this field is to detect missing entries in environments where +	// durability or ordering is not guaranteed. +	SequenceIdWithinCall uint64                 `protobuf:"varint,3,opt,name=sequence_id_within_call,json=sequenceIdWithinCall,proto3" json:"sequence_id_within_call,omitempty"` +	Type                 GrpcLogEntry_EventType `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_EventType" json:"type,omitempty"` +	Logger               GrpcLogEntry_Logger    `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"` // One of the above Logger enum +	// The logger uses one of the following fields to record the payload, +	// according to the type of the log entry. +	// +	// Types that are assignable to Payload: +	// +	//	*GrpcLogEntry_ClientHeader +	//	*GrpcLogEntry_ServerHeader +	//	*GrpcLogEntry_Message +	//	*GrpcLogEntry_Trailer +	Payload isGrpcLogEntry_Payload `protobuf_oneof:"payload"` +	// true if payload does not represent the full message or metadata. +	PayloadTruncated bool `protobuf:"varint,10,opt,name=payload_truncated,json=payloadTruncated,proto3" json:"payload_truncated,omitempty"` +	// Peer address information, will only be recorded on the first +	// incoming event. On client side, peer is logged on +	// EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in +	// the case of trailers-only. On server side, peer is always +	// logged on EVENT_TYPE_CLIENT_HEADER. +	Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"` +} + +func (x *GrpcLogEntry) Reset() { +	*x = GrpcLogEntry{} +	if protoimpl.UnsafeEnabled { +		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) +		ms.StoreMessageInfo(mi) +	} +} + +func (x *GrpcLogEntry) String() string { +	return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcLogEntry) ProtoMessage() {} + +func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message { +	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] +	if protoimpl.UnsafeEnabled && x != nil { +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) +		if ms.LoadMessageInfo() == nil { +			ms.StoreMessageInfo(mi) +		} +		return ms +	} +	return mi.MessageOf(x) +} + +// Deprecated: Use GrpcLogEntry.ProtoReflect.Descriptor instead. +func (*GrpcLogEntry) Descriptor() ([]byte, []int) { +	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0} +} + +func (x *GrpcLogEntry) GetTimestamp() *timestamppb.Timestamp { +	if x != nil { +		return x.Timestamp +	} +	return nil +} + +func (x *GrpcLogEntry) GetCallId() uint64 { +	if x != nil { +		return x.CallId +	} +	return 0 +} + +func (x *GrpcLogEntry) GetSequenceIdWithinCall() uint64 { +	if x != nil { +		return x.SequenceIdWithinCall +	} +	return 0 +} + +func (x *GrpcLogEntry) GetType() GrpcLogEntry_EventType { +	if x != nil { +		return x.Type +	} +	return GrpcLogEntry_EVENT_TYPE_UNKNOWN +} + +func (x *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger { +	if x != nil { +		return x.Logger +	} +	return GrpcLogEntry_LOGGER_UNKNOWN +} + +func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload { +	if m != nil { +		return m.Payload +	} +	return nil +} + +func (x *GrpcLogEntry) GetClientHeader() *ClientHeader { +	if x, ok := x.GetPayload().(*GrpcLogEntry_ClientHeader); ok { +		return x.ClientHeader +	} +	return nil +} + +func (x *GrpcLogEntry) GetServerHeader() *ServerHeader { +	if x, ok := x.GetPayload().(*GrpcLogEntry_ServerHeader); ok { +		return x.ServerHeader +	} +	return nil +} + +func (x *GrpcLogEntry) GetMessage() *Message { +	if x, ok := x.GetPayload().(*GrpcLogEntry_Message); ok { +		return x.Message +	} +	return nil +} + +func (x *GrpcLogEntry) GetTrailer() *Trailer { +	if x, ok := x.GetPayload().(*GrpcLogEntry_Trailer); ok { +		return x.Trailer +	} +	return nil +} + +func (x *GrpcLogEntry) GetPayloadTruncated() bool { +	if x != nil { +		return x.PayloadTruncated +	} +	return false +} + +func (x *GrpcLogEntry) GetPeer() *Address { +	if x != nil { +		return x.Peer +	} +	return nil +} + +type isGrpcLogEntry_Payload interface { +	isGrpcLogEntry_Payload() +} + +type GrpcLogEntry_ClientHeader struct { +	ClientHeader *ClientHeader `protobuf:"bytes,6,opt,name=client_header,json=clientHeader,proto3,oneof"` +} + +type GrpcLogEntry_ServerHeader struct { +	ServerHeader *ServerHeader `protobuf:"bytes,7,opt,name=server_header,json=serverHeader,proto3,oneof"` +} + +type GrpcLogEntry_Message struct { +	// Used by EVENT_TYPE_CLIENT_MESSAGE, EVENT_TYPE_SERVER_MESSAGE +	Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"` +} + +type GrpcLogEntry_Trailer struct { +	Trailer *Trailer `protobuf:"bytes,9,opt,name=trailer,proto3,oneof"` +} + +func (*GrpcLogEntry_ClientHeader) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_ServerHeader) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {} + +type ClientHeader struct { +	state         protoimpl.MessageState +	sizeCache     protoimpl.SizeCache +	unknownFields protoimpl.UnknownFields + +	// This contains only the metadata from the application. +	Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` +	// The name of the RPC method, which looks something like: +	// /<service>/<method> +	// Note the leading "/" character. +	MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` +	// A single process may be used to run multiple virtual +	// servers with different identities. +	// The authority is the name of such a server identitiy. +	// It is typically a portion of the URI in the form of +	// <host> or <host>:<port> . +	Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` +	// the RPC timeout +	Timeout *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` +} + +func (x *ClientHeader) Reset() { +	*x = ClientHeader{} +	if protoimpl.UnsafeEnabled { +		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) +		ms.StoreMessageInfo(mi) +	} +} + +func (x *ClientHeader) String() string { +	return protoimpl.X.MessageStringOf(x) +} + +func (*ClientHeader) ProtoMessage() {} + +func (x *ClientHeader) ProtoReflect() protoreflect.Message { +	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] +	if protoimpl.UnsafeEnabled && x != nil { +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) +		if ms.LoadMessageInfo() == nil { +			ms.StoreMessageInfo(mi) +		} +		return ms +	} +	return mi.MessageOf(x) +} + +// Deprecated: Use ClientHeader.ProtoReflect.Descriptor instead. +func (*ClientHeader) Descriptor() ([]byte, []int) { +	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{1} +} + +func (x *ClientHeader) GetMetadata() *Metadata { +	if x != nil { +		return x.Metadata +	} +	return nil +} + +func (x *ClientHeader) GetMethodName() string { +	if x != nil { +		return x.MethodName +	} +	return "" +} + +func (x *ClientHeader) GetAuthority() string { +	if x != nil { +		return x.Authority +	} +	return "" +} + +func (x *ClientHeader) GetTimeout() *durationpb.Duration { +	if x != nil { +		return x.Timeout +	} +	return nil +} + +type ServerHeader struct { +	state         protoimpl.MessageState +	sizeCache     protoimpl.SizeCache +	unknownFields protoimpl.UnknownFields + +	// This contains only the metadata from the application. +	Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *ServerHeader) Reset() { +	*x = ServerHeader{} +	if protoimpl.UnsafeEnabled { +		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) +		ms.StoreMessageInfo(mi) +	} +} + +func (x *ServerHeader) String() string { +	return protoimpl.X.MessageStringOf(x) +} + +func (*ServerHeader) ProtoMessage() {} + +func (x *ServerHeader) ProtoReflect() protoreflect.Message { +	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] +	if protoimpl.UnsafeEnabled && x != nil { +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) +		if ms.LoadMessageInfo() == nil { +			ms.StoreMessageInfo(mi) +		} +		return ms +	} +	return mi.MessageOf(x) +} + +// Deprecated: Use ServerHeader.ProtoReflect.Descriptor instead. +func (*ServerHeader) Descriptor() ([]byte, []int) { +	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{2} +} + +func (x *ServerHeader) GetMetadata() *Metadata { +	if x != nil { +		return x.Metadata +	} +	return nil +} + +type Trailer struct { +	state         protoimpl.MessageState +	sizeCache     protoimpl.SizeCache +	unknownFields protoimpl.UnknownFields + +	// This contains only the metadata from the application. +	Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` +	// The gRPC status code. +	StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` +	// An original status message before any transport specific +	// encoding. +	StatusMessage string `protobuf:"bytes,3,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"` +	// The value of the 'grpc-status-details-bin' metadata key. If +	// present, this is always an encoded 'google.rpc.Status' message. +	StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"` +} + +func (x *Trailer) Reset() { +	*x = Trailer{} +	if protoimpl.UnsafeEnabled { +		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) +		ms.StoreMessageInfo(mi) +	} +} + +func (x *Trailer) String() string { +	return protoimpl.X.MessageStringOf(x) +} + +func (*Trailer) ProtoMessage() {} + +func (x *Trailer) ProtoReflect() protoreflect.Message { +	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] +	if protoimpl.UnsafeEnabled && x != nil { +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) +		if ms.LoadMessageInfo() == nil { +			ms.StoreMessageInfo(mi) +		} +		return ms +	} +	return mi.MessageOf(x) +} + +// Deprecated: Use Trailer.ProtoReflect.Descriptor instead. +func (*Trailer) Descriptor() ([]byte, []int) { +	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{3} +} + +func (x *Trailer) GetMetadata() *Metadata { +	if x != nil { +		return x.Metadata +	} +	return nil +} + +func (x *Trailer) GetStatusCode() uint32 { +	if x != nil { +		return x.StatusCode +	} +	return 0 +} + +func (x *Trailer) GetStatusMessage() string { +	if x != nil { +		return x.StatusMessage +	} +	return "" +} + +func (x *Trailer) GetStatusDetails() []byte { +	if x != nil { +		return x.StatusDetails +	} +	return nil +} + +// Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE +type Message struct { +	state         protoimpl.MessageState +	sizeCache     protoimpl.SizeCache +	unknownFields protoimpl.UnknownFields + +	// Length of the message. It may not be the same as the length of the +	// data field, as the logging payload can be truncated or omitted. +	Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` +	// May be truncated or omitted. +	Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` +} + +func (x *Message) Reset() { +	*x = Message{} +	if protoimpl.UnsafeEnabled { +		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) +		ms.StoreMessageInfo(mi) +	} +} + +func (x *Message) String() string { +	return protoimpl.X.MessageStringOf(x) +} + +func (*Message) ProtoMessage() {} + +func (x *Message) ProtoReflect() protoreflect.Message { +	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] +	if protoimpl.UnsafeEnabled && x != nil { +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) +		if ms.LoadMessageInfo() == nil { +			ms.StoreMessageInfo(mi) +		} +		return ms +	} +	return mi.MessageOf(x) +} + +// Deprecated: Use Message.ProtoReflect.Descriptor instead. +func (*Message) Descriptor() ([]byte, []int) { +	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{4} +} + +func (x *Message) GetLength() uint32 { +	if x != nil { +		return x.Length +	} +	return 0 +} + +func (x *Message) GetData() []byte { +	if x != nil { +		return x.Data +	} +	return nil +} + +// A list of metadata pairs, used in the payload of client header, +// server header, and server trailer. +// Implementations may omit some entries to honor the header limits +// of GRPC_BINARY_LOG_CONFIG. +// +// Header keys added by gRPC are omitted. To be more specific, +// implementations will not log the following entries, and this is +// not to be treated as a truncation: +//   - entries handled by grpc that are not user visible, such as those +//     that begin with 'grpc-' (with exception of grpc-trace-bin) +//     or keys like 'lb-token' +//   - transport specific entries, including but not limited to: +//     ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc +//   - entries added for call credentials +// +// Implementations must always log grpc-trace-bin if it is present. +// Practically speaking it will only be visible on server side because +// grpc-trace-bin is managed by low level client side mechanisms +// inaccessible from the application level. On server side, the +// header is just a normal metadata key. +// The pair will not count towards the size limit. +type Metadata struct { +	state         protoimpl.MessageState +	sizeCache     protoimpl.SizeCache +	unknownFields protoimpl.UnknownFields + +	Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"` +} + +func (x *Metadata) Reset() { +	*x = Metadata{} +	if protoimpl.UnsafeEnabled { +		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) +		ms.StoreMessageInfo(mi) +	} +} + +func (x *Metadata) String() string { +	return protoimpl.X.MessageStringOf(x) +} + +func (*Metadata) ProtoMessage() {} + +func (x *Metadata) ProtoReflect() protoreflect.Message { +	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] +	if protoimpl.UnsafeEnabled && x != nil { +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) +		if ms.LoadMessageInfo() == nil { +			ms.StoreMessageInfo(mi) +		} +		return ms +	} +	return mi.MessageOf(x) +} + +// Deprecated: Use Metadata.ProtoReflect.Descriptor instead. +func (*Metadata) Descriptor() ([]byte, []int) { +	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{5} +} + +func (x *Metadata) GetEntry() []*MetadataEntry { +	if x != nil { +		return x.Entry +	} +	return nil +} + +// A metadata key value pair +type MetadataEntry struct { +	state         protoimpl.MessageState +	sizeCache     protoimpl.SizeCache +	unknownFields protoimpl.UnknownFields + +	Key   string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` +	Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *MetadataEntry) Reset() { +	*x = MetadataEntry{} +	if protoimpl.UnsafeEnabled { +		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) +		ms.StoreMessageInfo(mi) +	} +} + +func (x *MetadataEntry) String() string { +	return protoimpl.X.MessageStringOf(x) +} + +func (*MetadataEntry) ProtoMessage() {} + +func (x *MetadataEntry) ProtoReflect() protoreflect.Message { +	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] +	if protoimpl.UnsafeEnabled && x != nil { +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) +		if ms.LoadMessageInfo() == nil { +			ms.StoreMessageInfo(mi) +		} +		return ms +	} +	return mi.MessageOf(x) +} + +// Deprecated: Use MetadataEntry.ProtoReflect.Descriptor instead. +func (*MetadataEntry) Descriptor() ([]byte, []int) { +	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{6} +} + +func (x *MetadataEntry) GetKey() string { +	if x != nil { +		return x.Key +	} +	return "" +} + +func (x *MetadataEntry) GetValue() []byte { +	if x != nil { +		return x.Value +	} +	return nil +} + +// Address information +type Address struct { +	state         protoimpl.MessageState +	sizeCache     protoimpl.SizeCache +	unknownFields protoimpl.UnknownFields + +	Type    Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"` +	Address string       `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` +	// only for TYPE_IPV4 and TYPE_IPV6 +	IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` +} + +func (x *Address) Reset() { +	*x = Address{} +	if protoimpl.UnsafeEnabled { +		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) +		ms.StoreMessageInfo(mi) +	} +} + +func (x *Address) String() string { +	return protoimpl.X.MessageStringOf(x) +} + +func (*Address) ProtoMessage() {} + +func (x *Address) ProtoReflect() protoreflect.Message { +	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] +	if protoimpl.UnsafeEnabled && x != nil { +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) +		if ms.LoadMessageInfo() == nil { +			ms.StoreMessageInfo(mi) +		} +		return ms +	} +	return mi.MessageOf(x) +} + +// Deprecated: Use Address.ProtoReflect.Descriptor instead. +func (*Address) Descriptor() ([]byte, []int) { +	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7} +} + +func (x *Address) GetType() Address_Type { +	if x != nil { +		return x.Type +	} +	return Address_TYPE_UNKNOWN +} + +func (x *Address) GetAddress() string { +	if x != nil { +		return x.Address +	} +	return "" +} + +func (x *Address) GetIpPort() uint32 { +	if x != nil { +		return x.IpPort +	} +	return 0 +} + +var File_grpc_binlog_v1_binarylog_proto protoreflect.FileDescriptor + +var file_grpc_binlog_v1_binarylog_proto_rawDesc = []byte{ +	0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31, +	0x2f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, +	0x12, 0x11, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, +	0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, +	0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, +	0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, +	0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, +	0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbb, 0x07, 0x0a, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, +	0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, +	0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, +	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, +	0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, +	0x17, 0x0a, 0x07, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, +	0x52, 0x06, 0x63, 0x61, 0x6c, 0x6c, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x65, 0x71, 0x75, +	0x65, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x5f, 0x63, +	0x61, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x73, 0x65, 0x71, 0x75, 0x65, +	0x6e, 0x63, 0x65, 0x49, 0x64, 0x57, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x43, 0x61, 0x6c, 0x6c, 0x12, +	0x3d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, +	0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, +	0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x45, +	0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3e, +	0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, +	0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, +	0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, +	0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x52, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x46, +	0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, +	0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, +	0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, +	0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, +	0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, +	0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, +	0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, +	0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, +	0x52, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x36, +	0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, +	0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, +	0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, +	0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, +	0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, +	0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x69, +	0x6c, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x2b, +	0x0a, 0x11, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, +	0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x61, 0x79, 0x6c, 0x6f, +	0x61, 0x64, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x04, 0x70, +	0x65, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, +	0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, +	0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x22, 0xf5, 0x01, 0x0a, 0x09, +	0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x45, 0x56, 0x45, +	0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, +	0x00, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, +	0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x01, 0x12, +	0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, +	0x52, 0x56, 0x45, 0x52, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x12, 0x1d, 0x0a, +	0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, +	0x4e, 0x54, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x1d, 0x0a, 0x19, +	0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, +	0x52, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c, 0x45, +	0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, +	0x5f, 0x48, 0x41, 0x4c, 0x46, 0x5f, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x10, 0x05, 0x12, 0x1d, 0x0a, +	0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, +	0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, +	0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, +	0x4c, 0x10, 0x07, 0x22, 0x42, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x12, 0x0a, +	0x0e, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, +	0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x43, 0x4c, 0x49, 0x45, +	0x4e, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x53, +	0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, +	0x61, 0x64, 0x22, 0xbb, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x65, 0x61, +	0x64, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, +	0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, +	0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, +	0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0b, +	0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, +	0x09, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, +	0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, +	0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x74, +	0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, +	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, +	0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, +	0x22, 0x47, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, +	0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, +	0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, +	0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, +	0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xb1, 0x01, 0x0a, 0x07, 0x54, 0x72, +	0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, +	0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, +	0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, +	0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, +	0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, +	0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, +	0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, +	0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4d, +	0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, +	0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, +	0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x35, 0x0a, +	0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, +	0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, +	0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, +	0x64, 0x61, 0x74, 0x61, 0x22, 0x42, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, +	0x12, 0x36, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, +	0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, +	0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, +	0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x37, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, +	0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, +	0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, +	0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, +	0x65, 0x22, 0xb8, 0x01, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x33, 0x0a, +	0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x67, 0x72, +	0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, +	0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, +	0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, +	0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x07, +	0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69, +	0x70, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x45, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, +	0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, +	0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x34, 0x10, 0x01, 0x12, 0x0d, +	0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x02, 0x12, 0x0d, 0x0a, +	0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x49, 0x58, 0x10, 0x03, 0x42, 0x5c, 0x0a, 0x14, +	0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, +	0x67, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x4c, 0x6f, 0x67, 0x50, +	0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, +	0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, +	0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x69, +	0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, +	0x6f, 0x33, +} + +var ( +	file_grpc_binlog_v1_binarylog_proto_rawDescOnce sync.Once +	file_grpc_binlog_v1_binarylog_proto_rawDescData = file_grpc_binlog_v1_binarylog_proto_rawDesc +) + +func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte { +	file_grpc_binlog_v1_binarylog_proto_rawDescOnce.Do(func() { +		file_grpc_binlog_v1_binarylog_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_binlog_v1_binarylog_proto_rawDescData) +	}) +	return file_grpc_binlog_v1_binarylog_proto_rawDescData +} + +var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpc_binlog_v1_binarylog_proto_goTypes = []interface{}{ +	(GrpcLogEntry_EventType)(0),   // 0: grpc.binarylog.v1.GrpcLogEntry.EventType +	(GrpcLogEntry_Logger)(0),      // 1: grpc.binarylog.v1.GrpcLogEntry.Logger +	(Address_Type)(0),             // 2: grpc.binarylog.v1.Address.Type +	(*GrpcLogEntry)(nil),          // 3: grpc.binarylog.v1.GrpcLogEntry +	(*ClientHeader)(nil),          // 4: grpc.binarylog.v1.ClientHeader +	(*ServerHeader)(nil),          // 5: grpc.binarylog.v1.ServerHeader +	(*Trailer)(nil),               // 6: grpc.binarylog.v1.Trailer +	(*Message)(nil),               // 7: grpc.binarylog.v1.Message +	(*Metadata)(nil),              // 8: grpc.binarylog.v1.Metadata +	(*MetadataEntry)(nil),         // 9: grpc.binarylog.v1.MetadataEntry +	(*Address)(nil),               // 10: grpc.binarylog.v1.Address +	(*timestamppb.Timestamp)(nil), // 11: google.protobuf.Timestamp +	(*durationpb.Duration)(nil),   // 12: google.protobuf.Duration +} +var file_grpc_binlog_v1_binarylog_proto_depIdxs = []int32{ +	11, // 0: grpc.binarylog.v1.GrpcLogEntry.timestamp:type_name -> google.protobuf.Timestamp +	0,  // 1: grpc.binarylog.v1.GrpcLogEntry.type:type_name -> grpc.binarylog.v1.GrpcLogEntry.EventType +	1,  // 2: grpc.binarylog.v1.GrpcLogEntry.logger:type_name -> grpc.binarylog.v1.GrpcLogEntry.Logger +	4,  // 3: grpc.binarylog.v1.GrpcLogEntry.client_header:type_name -> grpc.binarylog.v1.ClientHeader +	5,  // 4: grpc.binarylog.v1.GrpcLogEntry.server_header:type_name -> grpc.binarylog.v1.ServerHeader +	7,  // 5: grpc.binarylog.v1.GrpcLogEntry.message:type_name -> grpc.binarylog.v1.Message +	6,  // 6: grpc.binarylog.v1.GrpcLogEntry.trailer:type_name -> grpc.binarylog.v1.Trailer +	10, // 7: grpc.binarylog.v1.GrpcLogEntry.peer:type_name -> grpc.binarylog.v1.Address +	8,  // 8: grpc.binarylog.v1.ClientHeader.metadata:type_name -> grpc.binarylog.v1.Metadata +	12, // 9: grpc.binarylog.v1.ClientHeader.timeout:type_name -> google.protobuf.Duration +	8,  // 10: grpc.binarylog.v1.ServerHeader.metadata:type_name -> grpc.binarylog.v1.Metadata +	8,  // 11: grpc.binarylog.v1.Trailer.metadata:type_name -> grpc.binarylog.v1.Metadata +	9,  // 12: grpc.binarylog.v1.Metadata.entry:type_name -> grpc.binarylog.v1.MetadataEntry +	2,  // 13: grpc.binarylog.v1.Address.type:type_name -> grpc.binarylog.v1.Address.Type +	14, // [14:14] is the sub-list for method output_type +	14, // [14:14] is the sub-list for method input_type +	14, // [14:14] is the sub-list for extension type_name +	14, // [14:14] is the sub-list for extension extendee +	0,  // [0:14] is the sub-list for field type_name +} + +func init() { file_grpc_binlog_v1_binarylog_proto_init() } +func file_grpc_binlog_v1_binarylog_proto_init() { +	if File_grpc_binlog_v1_binarylog_proto != nil { +		return +	} +	if !protoimpl.UnsafeEnabled { +		file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { +			switch v := v.(*GrpcLogEntry); i { +			case 0: +				return &v.state +			case 1: +				return &v.sizeCache +			case 2: +				return &v.unknownFields +			default: +				return nil +			} +		} +		file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { +			switch v := v.(*ClientHeader); i { +			case 0: +				return &v.state +			case 1: +				return &v.sizeCache +			case 2: +				return &v.unknownFields +			default: +				return nil +			} +		} +		file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { +			switch v := v.(*ServerHeader); i { +			case 0: +				return &v.state +			case 1: +				return &v.sizeCache +			case 2: +				return &v.unknownFields +			default: +				return nil +			} +		} +		file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { +			switch v := v.(*Trailer); i { +			case 0: +				return &v.state +			case 1: +				return &v.sizeCache +			case 2: +				return &v.unknownFields +			default: +				return nil +			} +		} +		file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { +			switch v := v.(*Message); i { +			case 0: +				return &v.state +			case 1: +				return &v.sizeCache +			case 2: +				return &v.unknownFields +			default: +				return nil +			} +		} +		file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { +			switch v := v.(*Metadata); i { +			case 0: +				return &v.state +			case 1: +				return &v.sizeCache +			case 2: +				return &v.unknownFields +			default: +				return nil +			} +		} +		file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { +			switch v := v.(*MetadataEntry); i { +			case 0: +				return &v.state +			case 1: +				return &v.sizeCache +			case 2: +				return &v.unknownFields +			default: +				return nil +			} +		} +		file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { +			switch v := v.(*Address); i { +			case 0: +				return &v.state +			case 1: +				return &v.sizeCache +			case 2: +				return &v.unknownFields +			default: +				return nil +			} +		} +	} +	file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []interface{}{ +		(*GrpcLogEntry_ClientHeader)(nil), +		(*GrpcLogEntry_ServerHeader)(nil), +		(*GrpcLogEntry_Message)(nil), +		(*GrpcLogEntry_Trailer)(nil), +	} +	type x struct{} +	out := protoimpl.TypeBuilder{ +		File: protoimpl.DescBuilder{ +			GoPackagePath: reflect.TypeOf(x{}).PkgPath(), +			RawDescriptor: file_grpc_binlog_v1_binarylog_proto_rawDesc, +			NumEnums:      3, +			NumMessages:   8, +			NumExtensions: 0, +			NumServices:   0, +		}, +		GoTypes:           file_grpc_binlog_v1_binarylog_proto_goTypes, +		DependencyIndexes: file_grpc_binlog_v1_binarylog_proto_depIdxs, +		EnumInfos:         file_grpc_binlog_v1_binarylog_proto_enumTypes, +		MessageInfos:      file_grpc_binlog_v1_binarylog_proto_msgTypes, +	}.Build() +	File_grpc_binlog_v1_binarylog_proto = out.File +	file_grpc_binlog_v1_binarylog_proto_rawDesc = nil +	file_grpc_binlog_v1_binarylog_proto_goTypes = nil +	file_grpc_binlog_v1_binarylog_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go new file mode 100644 index 000000000..9e20e4d38 --- /dev/null +++ b/vendor/google.golang.org/grpc/call.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( +	"context" +) + +// Invoke sends the RPC request on the wire and returns after response is +// received.  This is typically called by generated code. +// +// All errors returned by Invoke are compatible with the status package. +func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { +	// allow interceptor to see all applicable call options, which means those +	// configured as defaults from dial option as well as per-call options +	opts = combine(cc.dopts.callOptions, opts) + +	if cc.dopts.unaryInt != nil { +		return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...) +	} +	return invoke(ctx, method, args, reply, cc, opts...) +} + +func combine(o1 []CallOption, o2 []CallOption) []CallOption { +	// we don't use append because o1 could have extra capacity whose +	// elements would be overwritten, which could cause inadvertent +	// sharing (and race conditions) between concurrent calls +	if len(o1) == 0 { +		return o2 +	} else if len(o2) == 0 { +		return o1 +	} +	ret := make([]CallOption, len(o1)+len(o2)) +	copy(ret, o1) +	copy(ret[len(o1):], o2) +	return ret +} + +// Invoke sends the RPC request on the wire and returns after response is +// received.  This is typically called by generated code. +// +// DEPRECATED: Use ClientConn.Invoke instead. +func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { +	return cc.Invoke(ctx, method, args, reply, opts...) +} + +var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} + +func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { +	cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) +	if err != nil { +		return err +	} +	if err := cs.SendMsg(req); err != nil { +		return err +	} +	return cs.RecvMsg(reply) +} diff --git a/vendor/google.golang.org/grpc/channelz/channelz.go b/vendor/google.golang.org/grpc/channelz/channelz.go new file mode 100644 index 000000000..32b7fa579 --- /dev/null +++ b/vendor/google.golang.org/grpc/channelz/channelz.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz exports internals of the channelz implementation as required +// by other gRPC packages. +// +// The implementation of the channelz spec as defined in +// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by +// the `internal/channelz` package. +// +// # Experimental +// +// Notice: All APIs in this package are experimental and may be removed in a +// later release. +package channelz + +import "google.golang.org/grpc/internal/channelz" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier = channelz.Identifier diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go new file mode 100644 index 000000000..d607d4e9e --- /dev/null +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -0,0 +1,1647 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( +	"context" +	"errors" +	"fmt" +	"math" +	"net/url" +	"reflect" +	"strings" +	"sync" +	"sync/atomic" +	"time" + +	"google.golang.org/grpc/balancer" +	"google.golang.org/grpc/balancer/base" +	"google.golang.org/grpc/codes" +	"google.golang.org/grpc/connectivity" +	"google.golang.org/grpc/credentials" +	"google.golang.org/grpc/internal/backoff" +	"google.golang.org/grpc/internal/channelz" +	"google.golang.org/grpc/internal/grpcsync" +	iresolver "google.golang.org/grpc/internal/resolver" +	"google.golang.org/grpc/internal/transport" +	"google.golang.org/grpc/keepalive" +	"google.golang.org/grpc/resolver" +	"google.golang.org/grpc/serviceconfig" +	"google.golang.org/grpc/status" + +	_ "google.golang.org/grpc/balancer/roundrobin"           // To register roundrobin. +	_ "google.golang.org/grpc/internal/resolver/dns"         // To register dns resolver. +	_ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver. +	_ "google.golang.org/grpc/internal/resolver/unix"        // To register unix resolver. +) + +const ( +	// minimum time to give a connection to complete +	minConnectTimeout = 20 * time.Second +	// must match grpclbName in grpclb/grpclb.go +	grpclbName = "grpclb" +) + +var ( +	// ErrClientConnClosing indicates that the operation is illegal because +	// the ClientConn is closing. +	// +	// Deprecated: this error should not be relied upon by users; use the status +	// code of Canceled instead. +	ErrClientConnClosing = status.Error(codes.Canceled, "grpc: the client connection is closing") +	// errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs. +	errConnDrain = errors.New("grpc: the connection is drained") +	// errConnClosing indicates that the connection is closing. +	errConnClosing = errors.New("grpc: the connection is closing") +	// invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default +	// service config. +	invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" +) + +// The following errors are returned from Dial and DialContext +var ( +	// errNoTransportSecurity indicates that there is no transport security +	// being set for ClientConn. Users should either set one or explicitly +	// call WithInsecure DialOption to disable security. +	errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithTransportCredentials(insecure.NewCredentials()) explicitly or set credentials)") +	// errTransportCredsAndBundle indicates that creds bundle is used together +	// with other individual Transport Credentials. +	errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") +	// errNoTransportCredsInBundle indicated that the configured creds bundle +	// returned a transport credentials which was nil. +	errNoTransportCredsInBundle = errors.New("grpc: credentials.Bundle must return non-nil transport credentials") +	// errTransportCredentialsMissing indicates that users want to transmit +	// security information (e.g., OAuth2 token) which requires secure +	// connection on an insecure connection. +	errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") +) + +const ( +	defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4 +	defaultClientMaxSendMessageSize    = math.MaxInt32 +	// http2IOBufSize specifies the buffer size for sending frames. +	defaultWriteBufSize = 32 * 1024 +	defaultReadBufSize  = 32 * 1024 +) + +// Dial creates a client connection to the given target. +func Dial(target string, opts ...DialOption) (*ClientConn, error) { +	return DialContext(context.Background(), target, opts...) +} + +type defaultConfigSelector struct { +	sc *ServiceConfig +} + +func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RPCConfig, error) { +	return &iresolver.RPCConfig{ +		Context:      rpcInfo.Context, +		MethodConfig: getMethodConfig(dcs.sc, rpcInfo.Method), +	}, nil +} + +// DialContext creates a client connection to the given target. By default, it's +// a non-blocking dial (the function won't wait for connections to be +// established, and connecting happens in the background). To make it a blocking +// dial, use WithBlock() dial option. +// +// In the non-blocking case, the ctx does not act against the connection. It +// only controls the setup steps. +// +// In the blocking case, ctx can be used to cancel or expire the pending +// connection. Once this function returns, the cancellation and expiration of +// ctx will be noop. Users should call ClientConn.Close to terminate all the +// pending operations after this function returns. +// +// The target name syntax is defined in +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. +func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { +	cc := &ClientConn{ +		target:            target, +		csMgr:             &connectivityStateManager{}, +		conns:             make(map[*addrConn]struct{}), +		dopts:             defaultDialOptions(), +		blockingpicker:    newPickerWrapper(), +		czData:            new(channelzData), +		firstResolveEvent: grpcsync.NewEvent(), +	} +	cc.retryThrottler.Store((*retryThrottler)(nil)) +	cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) +	cc.ctx, cc.cancel = context.WithCancel(context.Background()) + +	for _, opt := range extraDialOptions { +		opt.apply(&cc.dopts) +	} + +	for _, opt := range opts { +		opt.apply(&cc.dopts) +	} + +	chainUnaryClientInterceptors(cc) +	chainStreamClientInterceptors(cc) + +	defer func() { +		if err != nil { +			cc.Close() +		} +	}() + +	pid := cc.dopts.channelzParentID +	cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, pid, target) +	ted := &channelz.TraceEventDesc{ +		Desc:     "Channel created", +		Severity: channelz.CtInfo, +	} +	if cc.dopts.channelzParentID != nil { +		ted.Parent = &channelz.TraceEventDesc{ +			Desc:     fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID.Int()), +			Severity: channelz.CtInfo, +		} +	} +	channelz.AddTraceEvent(logger, cc.channelzID, 1, ted) +	cc.csMgr.channelzID = cc.channelzID + +	if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { +		return nil, errNoTransportSecurity +	} +	if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { +		return nil, errTransportCredsAndBundle +	} +	if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { +		return nil, errNoTransportCredsInBundle +	} +	transportCreds := cc.dopts.copts.TransportCredentials +	if transportCreds == nil { +		transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() +	} +	if transportCreds.Info().SecurityProtocol == "insecure" { +		for _, cd := range cc.dopts.copts.PerRPCCredentials { +			if cd.RequireTransportSecurity() { +				return nil, errTransportCredentialsMissing +			} +		} +	} + +	if cc.dopts.defaultServiceConfigRawJSON != nil { +		scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON) +		if scpr.Err != nil { +			return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err) +		} +		cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig) +	} +	cc.mkp = cc.dopts.copts.KeepaliveParams + +	if cc.dopts.copts.UserAgent != "" { +		cc.dopts.copts.UserAgent += " " + grpcUA +	} else { +		cc.dopts.copts.UserAgent = grpcUA +	} + +	if cc.dopts.timeout > 0 { +		var cancel context.CancelFunc +		ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout) +		defer cancel() +	} +	defer func() { +		select { +		case <-ctx.Done(): +			switch { +			case ctx.Err() == err: +				conn = nil +			case err == nil || !cc.dopts.returnLastError: +				conn, err = nil, ctx.Err() +			default: +				conn, err = nil, fmt.Errorf("%v: %v", ctx.Err(), err) +			} +		default: +		} +	}() + +	scSet := false +	if cc.dopts.scChan != nil { +		// Try to get an initial service config. +		select { +		case sc, ok := <-cc.dopts.scChan: +			if ok { +				cc.sc = &sc +				cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) +				scSet = true +			} +		default: +		} +	} +	if cc.dopts.bs == nil { +		cc.dopts.bs = backoff.DefaultExponential +	} + +	// Determine the resolver to use. +	resolverBuilder, err := cc.parseTargetAndFindResolver() +	if err != nil { +		return nil, err +	} +	cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint(), cc.target, cc.dopts) +	if err != nil { +		return nil, err +	} +	channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) + +	if cc.dopts.scChan != nil && !scSet { +		// Blocking wait for the initial service config. +		select { +		case sc, ok := <-cc.dopts.scChan: +			if ok { +				cc.sc = &sc +				cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) +			} +		case <-ctx.Done(): +			return nil, ctx.Err() +		} +	} +	if cc.dopts.scChan != nil { +		go cc.scWatcher() +	} + +	var credsClone credentials.TransportCredentials +	if creds := cc.dopts.copts.TransportCredentials; creds != nil { +		credsClone = creds.Clone() +	} +	cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ +		DialCreds:        credsClone, +		CredsBundle:      cc.dopts.copts.CredsBundle, +		Dialer:           cc.dopts.copts.Dialer, +		Authority:        cc.authority, +		CustomUserAgent:  cc.dopts.copts.UserAgent, +		ChannelzParentID: cc.channelzID, +		Target:           cc.parsedTarget, +	}) + +	// Build the resolver. +	rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) +	if err != nil { +		return nil, fmt.Errorf("failed to build resolver: %v", err) +	} +	cc.mu.Lock() +	cc.resolverWrapper = rWrapper +	cc.mu.Unlock() + +	// A blocking dial blocks until the clientConn is ready. +	if cc.dopts.block { +		for { +			cc.Connect() +			s := cc.GetState() +			if s == connectivity.Ready { +				break +			} else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { +				if err = cc.connectionError(); err != nil { +					terr, ok := err.(interface { +						Temporary() bool +					}) +					if ok && !terr.Temporary() { +						return nil, err +					} +				} +			} +			if !cc.WaitForStateChange(ctx, s) { +				// ctx got timeout or canceled. +				if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { +					return nil, err +				} +				return nil, ctx.Err() +			} +		} +	} + +	return cc, nil +} + +// chainUnaryClientInterceptors chains all unary client interceptors into one. +func chainUnaryClientInterceptors(cc *ClientConn) { +	interceptors := cc.dopts.chainUnaryInts +	// Prepend dopts.unaryInt to the chaining interceptors if it exists, since unaryInt will +	// be executed before any other chained interceptors. +	if cc.dopts.unaryInt != nil { +		interceptors = append([]UnaryClientInterceptor{cc.dopts.unaryInt}, interceptors...) +	} +	var chainedInt UnaryClientInterceptor +	if len(interceptors) == 0 { +		chainedInt = nil +	} else if len(interceptors) == 1 { +		chainedInt = interceptors[0] +	} else { +		chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { +			return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...) +		} +	} +	cc.dopts.unaryInt = chainedInt +} + +// getChainUnaryInvoker recursively generate the chained unary invoker. +func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, finalInvoker UnaryInvoker) UnaryInvoker { +	if curr == len(interceptors)-1 { +		return finalInvoker +	} +	return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { +		return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...) +	} +} + +// chainStreamClientInterceptors chains all stream client interceptors into one. +func chainStreamClientInterceptors(cc *ClientConn) { +	interceptors := cc.dopts.chainStreamInts +	// Prepend dopts.streamInt to the chaining interceptors if it exists, since streamInt will +	// be executed before any other chained interceptors. +	if cc.dopts.streamInt != nil { +		interceptors = append([]StreamClientInterceptor{cc.dopts.streamInt}, interceptors...) +	} +	var chainedInt StreamClientInterceptor +	if len(interceptors) == 0 { +		chainedInt = nil +	} else if len(interceptors) == 1 { +		chainedInt = interceptors[0] +	} else { +		chainedInt = func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) { +			return interceptors[0](ctx, desc, cc, method, getChainStreamer(interceptors, 0, streamer), opts...) +		} +	} +	cc.dopts.streamInt = chainedInt +} + +// getChainStreamer recursively generate the chained client stream constructor. +func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStreamer Streamer) Streamer { +	if curr == len(interceptors)-1 { +		return finalStreamer +	} +	return func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { +		return interceptors[curr+1](ctx, desc, cc, method, getChainStreamer(interceptors, curr+1, finalStreamer), opts...) +	} +} + +// connectivityStateManager keeps the connectivity.State of ClientConn. +// This struct will eventually be exported so the balancers can access it. +type connectivityStateManager struct { +	mu         sync.Mutex +	state      connectivity.State +	notifyChan chan struct{} +	channelzID *channelz.Identifier +} + +// updateState updates the connectivity.State of ClientConn. +// If there's a change it notifies goroutines waiting on state change to +// happen. +func (csm *connectivityStateManager) updateState(state connectivity.State) { +	csm.mu.Lock() +	defer csm.mu.Unlock() +	if csm.state == connectivity.Shutdown { +		return +	} +	if csm.state == state { +		return +	} +	csm.state = state +	channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state) +	if csm.notifyChan != nil { +		// There are other goroutines waiting on this channel. +		close(csm.notifyChan) +		csm.notifyChan = nil +	} +} + +func (csm *connectivityStateManager) getState() connectivity.State { +	csm.mu.Lock() +	defer csm.mu.Unlock() +	return csm.state +} + +func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { +	csm.mu.Lock() +	defer csm.mu.Unlock() +	if csm.notifyChan == nil { +		csm.notifyChan = make(chan struct{}) +	} +	return csm.notifyChan +} + +// ClientConnInterface defines the functions clients need to perform unary and +// streaming RPCs.  It is implemented by *ClientConn, and is only intended to +// be referenced by generated code. +type ClientConnInterface interface { +	// Invoke performs a unary RPC and returns after the response is received +	// into reply. +	Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error +	// NewStream begins a streaming RPC. +	NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) +} + +// Assert *ClientConn implements ClientConnInterface. +var _ ClientConnInterface = (*ClientConn)(nil) + +// ClientConn represents a virtual connection to a conceptual endpoint, to +// perform RPCs. +// +// A ClientConn is free to have zero or more actual connections to the endpoint +// based on configuration, load, etc. It is also free to determine which actual +// endpoints to use and may change it every RPC, permitting client-side load +// balancing. +// +// A ClientConn encapsulates a range of functionality including name +// resolution, TCP connection establishment (with retries and backoff) and TLS +// handshakes. It also handles errors on established connections by +// re-resolving the name and reconnecting. +type ClientConn struct { +	ctx    context.Context    // Initialized using the background context at dial time. +	cancel context.CancelFunc // Cancelled on close. + +	// The following are initialized at dial time, and are read-only after that. +	target          string               // User's dial target. +	parsedTarget    resolver.Target      // See parseTargetAndFindResolver(). +	authority       string               // See determineAuthority(). +	dopts           dialOptions          // Default and user specified dial options. +	channelzID      *channelz.Identifier // Channelz identifier for the channel. +	balancerWrapper *ccBalancerWrapper   // Uses gracefulswitch.balancer underneath. + +	// The following provide their own synchronization, and therefore don't +	// require cc.mu to be held to access them. +	csMgr              *connectivityStateManager +	blockingpicker     *pickerWrapper +	safeConfigSelector iresolver.SafeConfigSelector +	czData             *channelzData +	retryThrottler     atomic.Value // Updated from service config. + +	// firstResolveEvent is used to track whether the name resolver sent us at +	// least one update. RPCs block on this event. +	firstResolveEvent *grpcsync.Event + +	// mu protects the following fields. +	// TODO: split mu so the same mutex isn't used for everything. +	mu              sync.RWMutex +	resolverWrapper *ccResolverWrapper         // Initialized in Dial; cleared in Close. +	sc              *ServiceConfig             // Latest service config received from the resolver. +	conns           map[*addrConn]struct{}     // Set to nil on close. +	mkp             keepalive.ClientParameters // May be updated upon receipt of a GoAway. + +	lceMu               sync.Mutex // protects lastConnectionError +	lastConnectionError error +} + +// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or +// ctx expires. A true value is returned in former case and false in latter. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { +	ch := cc.csMgr.getNotifyChan() +	if cc.csMgr.getState() != sourceState { +		return true +	} +	select { +	case <-ctx.Done(): +		return false +	case <-ch: +		return true +	} +} + +// GetState returns the connectivity.State of ClientConn. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. +func (cc *ClientConn) GetState() connectivity.State { +	return cc.csMgr.getState() +} + +// Connect causes all subchannels in the ClientConn to attempt to connect if +// the channel is idle.  Does not wait for the connection attempts to begin +// before returning. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. +func (cc *ClientConn) Connect() { +	cc.balancerWrapper.exitIdle() +} + +func (cc *ClientConn) scWatcher() { +	for { +		select { +		case sc, ok := <-cc.dopts.scChan: +			if !ok { +				return +			} +			cc.mu.Lock() +			// TODO: load balance policy runtime change is ignored. +			// We may revisit this decision in the future. +			cc.sc = &sc +			cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) +			cc.mu.Unlock() +		case <-cc.ctx.Done(): +			return +		} +	} +} + +// waitForResolvedAddrs blocks until the resolver has provided addresses or the +// context expires.  Returns nil unless the context expires first; otherwise +// returns a status error based on the context. +func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { +	// This is on the RPC path, so we use a fast path to avoid the +	// more-expensive "select" below after the resolver has returned once. +	if cc.firstResolveEvent.HasFired() { +		return nil +	} +	select { +	case <-cc.firstResolveEvent.Done(): +		return nil +	case <-ctx.Done(): +		return status.FromContextError(ctx.Err()).Err() +	case <-cc.ctx.Done(): +		return ErrClientConnClosing +	} +} + +var emptyServiceConfig *ServiceConfig + +func init() { +	cfg := parseServiceConfig("{}") +	if cfg.Err != nil { +		panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) +	} +	emptyServiceConfig = cfg.Config.(*ServiceConfig) +} + +func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { +	if cc.sc != nil { +		cc.applyServiceConfigAndBalancer(cc.sc, nil, addrs) +		return +	} +	if cc.dopts.defaultServiceConfig != nil { +		cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig}, addrs) +	} else { +		cc.applyServiceConfigAndBalancer(emptyServiceConfig, &defaultConfigSelector{emptyServiceConfig}, addrs) +	} +} + +func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { +	defer cc.firstResolveEvent.Fire() +	cc.mu.Lock() +	// Check if the ClientConn is already closed. Some fields (e.g. +	// balancerWrapper) are set to nil when closing the ClientConn, and could +	// cause nil pointer panic if we don't have this check. +	if cc.conns == nil { +		cc.mu.Unlock() +		return nil +	} + +	if err != nil { +		// May need to apply the initial service config in case the resolver +		// doesn't support service configs, or doesn't provide a service config +		// with the new addresses. +		cc.maybeApplyDefaultServiceConfig(nil) + +		cc.balancerWrapper.resolverError(err) + +		// No addresses are valid with err set; return early. +		cc.mu.Unlock() +		return balancer.ErrBadResolverState +	} + +	var ret error +	if cc.dopts.disableServiceConfig { +		channelz.Infof(logger, cc.channelzID, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig) +		cc.maybeApplyDefaultServiceConfig(s.Addresses) +	} else if s.ServiceConfig == nil { +		cc.maybeApplyDefaultServiceConfig(s.Addresses) +		// TODO: do we need to apply a failing LB policy if there is no +		// default, per the error handling design? +	} else { +		if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok { +			configSelector := iresolver.GetConfigSelector(s) +			if configSelector != nil { +				if len(s.ServiceConfig.Config.(*ServiceConfig).Methods) != 0 { +					channelz.Infof(logger, cc.channelzID, "method configs in service config will be ignored due to presence of config selector") +				} +			} else { +				configSelector = &defaultConfigSelector{sc} +			} +			cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses) +		} else { +			ret = balancer.ErrBadResolverState +			if cc.sc == nil { +				// Apply the failing LB only if we haven't received valid service config +				// from the name resolver in the past. +				cc.applyFailingLB(s.ServiceConfig) +				cc.mu.Unlock() +				return ret +			} +		} +	} + +	var balCfg serviceconfig.LoadBalancingConfig +	if cc.sc != nil && cc.sc.lbConfig != nil { +		balCfg = cc.sc.lbConfig.cfg +	} +	bw := cc.balancerWrapper +	cc.mu.Unlock() + +	uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) +	if ret == nil { +		ret = uccsErr // prefer ErrBadResolver state since any other error is +		// currently meaningless to the caller. +	} +	return ret +} + +// applyFailingLB is akin to configuring an LB policy on the channel which +// always fails RPCs. Here, an actual LB policy is not configured, but an always +// erroring picker is configured, which returns errors with information about +// what was invalid in the received service config. A config selector with no +// service config is configured, and the connectivity state of the channel is +// set to TransientFailure. +// +// Caller must hold cc.mu. +func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { +	var err error +	if sc.Err != nil { +		err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) +	} else { +		err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) +	} +	cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) +	cc.blockingpicker.updatePicker(base.NewErrPicker(err)) +	cc.csMgr.updateState(connectivity.TransientFailure) +} + +func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { +	cc.balancerWrapper.updateSubConnState(sc, s, err) +} + +// newAddrConn creates an addrConn for addrs and adds it to cc.conns. +// +// Caller needs to make sure len(addrs) > 0. +func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { +	ac := &addrConn{ +		state:        connectivity.Idle, +		cc:           cc, +		addrs:        addrs, +		scopts:       opts, +		dopts:        cc.dopts, +		czData:       new(channelzData), +		resetBackoff: make(chan struct{}), +	} +	ac.ctx, ac.cancel = context.WithCancel(cc.ctx) +	// Track ac in cc. This needs to be done before any getTransport(...) is called. +	cc.mu.Lock() +	defer cc.mu.Unlock() +	if cc.conns == nil { +		return nil, ErrClientConnClosing +	} + +	var err error +	ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") +	if err != nil { +		return nil, err +	} +	channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ +		Desc:     "Subchannel created", +		Severity: channelz.CtInfo, +		Parent: &channelz.TraceEventDesc{ +			Desc:     fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID.Int()), +			Severity: channelz.CtInfo, +		}, +	}) + +	cc.conns[ac] = struct{}{} +	return ac, nil +} + +// removeAddrConn removes the addrConn in the subConn from clientConn. +// It also tears down the ac with the given error. +func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) { +	cc.mu.Lock() +	if cc.conns == nil { +		cc.mu.Unlock() +		return +	} +	delete(cc.conns, ac) +	cc.mu.Unlock() +	ac.tearDown(err) +} + +func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { +	return &channelz.ChannelInternalMetric{ +		State:                    cc.GetState(), +		Target:                   cc.target, +		CallsStarted:             atomic.LoadInt64(&cc.czData.callsStarted), +		CallsSucceeded:           atomic.LoadInt64(&cc.czData.callsSucceeded), +		CallsFailed:              atomic.LoadInt64(&cc.czData.callsFailed), +		LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&cc.czData.lastCallStartedTime)), +	} +} + +// Target returns the target string of the ClientConn. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func (cc *ClientConn) Target() string { +	return cc.target +} + +func (cc *ClientConn) incrCallsStarted() { +	atomic.AddInt64(&cc.czData.callsStarted, 1) +	atomic.StoreInt64(&cc.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (cc *ClientConn) incrCallsSucceeded() { +	atomic.AddInt64(&cc.czData.callsSucceeded, 1) +} + +func (cc *ClientConn) incrCallsFailed() { +	atomic.AddInt64(&cc.czData.callsFailed, 1) +} + +// connect starts creating a transport. +// It does nothing if the ac is not IDLE. +// TODO(bar) Move this to the addrConn section. +func (ac *addrConn) connect() error { +	ac.mu.Lock() +	if ac.state == connectivity.Shutdown { +		if logger.V(2) { +			logger.Infof("connect called on shutdown addrConn; ignoring.") +		} +		ac.mu.Unlock() +		return errConnClosing +	} +	if ac.state != connectivity.Idle { +		if logger.V(2) { +			logger.Infof("connect called on addrConn in non-idle state (%v); ignoring.", ac.state) +		} +		ac.mu.Unlock() +		return nil +	} +	// Update connectivity state within the lock to prevent subsequent or +	// concurrent calls from resetting the transport more than once. +	ac.updateConnectivityState(connectivity.Connecting, nil) +	ac.mu.Unlock() + +	ac.resetTransport() +	return nil +} + +func equalAddresses(a, b []resolver.Address) bool { +	if len(a) != len(b) { +		return false +	} +	for i, v := range a { +		if !v.Equal(b[i]) { +			return false +		} +	} +	return true +} + +// tryUpdateAddrs tries to update ac.addrs with the new addresses list. +// +// If ac is TransientFailure, it updates ac.addrs and returns true. The updated +// addresses will be picked up by retry in the next iteration after backoff. +// +// If ac is Shutdown or Idle, it updates ac.addrs and returns true. +// +// If the addresses is the same as the old list, it does nothing and returns +// true. +// +// If ac is Connecting, it returns false. The caller should tear down the ac and +// create a new one. Note that the backoff will be reset when this happens. +// +// If ac is Ready, it checks whether current connected address of ac is in the +// new addrs list. +//   - If true, it updates ac.addrs and returns true. The ac will keep using +//     the existing connection. +//   - If false, it does nothing and returns false. +func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { +	ac.mu.Lock() +	defer ac.mu.Unlock() +	channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) +	if ac.state == connectivity.Shutdown || +		ac.state == connectivity.TransientFailure || +		ac.state == connectivity.Idle { +		ac.addrs = addrs +		return true +	} + +	if equalAddresses(ac.addrs, addrs) { +		return true +	} + +	if ac.state == connectivity.Connecting { +		return false +	} + +	// ac.state is Ready, try to find the connected address. +	var curAddrFound bool +	for _, a := range addrs { +		a.ServerName = ac.cc.getServerName(a) +		if reflect.DeepEqual(ac.curAddr, a) { +			curAddrFound = true +			break +		} +	} +	channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) +	if curAddrFound { +		ac.addrs = addrs +	} + +	return curAddrFound +} + +// getServerName determines the serverName to be used in the connection +// handshake. The default value for the serverName is the authority on the +// ClientConn, which either comes from the user's dial target or through an +// authority override specified using the WithAuthority dial option. Name +// resolvers can specify a per-address override for the serverName through the +// resolver.Address.ServerName field which is used only if the WithAuthority +// dial option was not used. The rationale is that per-address authority +// overrides specified by the name resolver can represent a security risk, while +// an override specified by the user is more dependable since they probably know +// what they are doing. +func (cc *ClientConn) getServerName(addr resolver.Address) string { +	if cc.dopts.authority != "" { +		return cc.dopts.authority +	} +	if addr.ServerName != "" { +		return addr.ServerName +	} +	return cc.authority +} + +func getMethodConfig(sc *ServiceConfig, method string) MethodConfig { +	if sc == nil { +		return MethodConfig{} +	} +	if m, ok := sc.Methods[method]; ok { +		return m +	} +	i := strings.LastIndex(method, "/") +	if m, ok := sc.Methods[method[:i+1]]; ok { +		return m +	} +	return sc.Methods[""] +} + +// GetMethodConfig gets the method config of the input method. +// If there's an exact match for input method (i.e. /service/method), we return +// the corresponding MethodConfig. +// If there isn't an exact match for the input method, we look for the service's default +// config under the service (i.e /service/) and then for the default for all services (empty string). +// +// If there is a default MethodConfig for the service, we return it. +// Otherwise, we return an empty MethodConfig. +func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { +	// TODO: Avoid the locking here. +	cc.mu.RLock() +	defer cc.mu.RUnlock() +	return getMethodConfig(cc.sc, method) +} + +func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { +	cc.mu.RLock() +	defer cc.mu.RUnlock() +	if cc.sc == nil { +		return nil +	} +	return cc.sc.healthCheckConfig +} + +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { +	return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ +		Ctx:            ctx, +		FullMethodName: method, +	}) +} + +func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) { +	if sc == nil { +		// should never reach here. +		return +	} +	cc.sc = sc +	if configSelector != nil { +		cc.safeConfigSelector.UpdateConfigSelector(configSelector) +	} + +	if cc.sc.retryThrottling != nil { +		newThrottler := &retryThrottler{ +			tokens: cc.sc.retryThrottling.MaxTokens, +			max:    cc.sc.retryThrottling.MaxTokens, +			thresh: cc.sc.retryThrottling.MaxTokens / 2, +			ratio:  cc.sc.retryThrottling.TokenRatio, +		} +		cc.retryThrottler.Store(newThrottler) +	} else { +		cc.retryThrottler.Store((*retryThrottler)(nil)) +	} + +	var newBalancerName string +	if cc.sc != nil && cc.sc.lbConfig != nil { +		newBalancerName = cc.sc.lbConfig.name +	} else { +		var isGRPCLB bool +		for _, a := range addrs { +			if a.Type == resolver.GRPCLB { +				isGRPCLB = true +				break +			} +		} +		if isGRPCLB { +			newBalancerName = grpclbName +		} else if cc.sc != nil && cc.sc.LB != nil { +			newBalancerName = *cc.sc.LB +		} else { +			newBalancerName = PickFirstBalancerName +		} +	} +	cc.balancerWrapper.switchTo(newBalancerName) +} + +func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { +	cc.mu.RLock() +	r := cc.resolverWrapper +	cc.mu.RUnlock() +	if r == nil { +		return +	} +	go r.resolveNow(o) +} + +// ResetConnectBackoff wakes up all subchannels in transient failure and causes +// them to attempt another connection immediately.  It also resets the backoff +// times used for subsequent attempts regardless of the current state. +// +// In general, this function should not be used.  Typical service or network +// outages result in a reasonable client reconnection strategy by default. +// However, if a previously unavailable network becomes available, this may be +// used to trigger an immediate reconnect. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func (cc *ClientConn) ResetConnectBackoff() { +	cc.mu.Lock() +	conns := cc.conns +	cc.mu.Unlock() +	for ac := range conns { +		ac.resetConnectBackoff() +	} +} + +// Close tears down the ClientConn and all underlying connections. +func (cc *ClientConn) Close() error { +	defer cc.cancel() + +	cc.mu.Lock() +	if cc.conns == nil { +		cc.mu.Unlock() +		return ErrClientConnClosing +	} +	conns := cc.conns +	cc.conns = nil +	cc.csMgr.updateState(connectivity.Shutdown) + +	rWrapper := cc.resolverWrapper +	cc.resolverWrapper = nil +	bWrapper := cc.balancerWrapper +	cc.mu.Unlock() + +	// The order of closing matters here since the balancer wrapper assumes the +	// picker is closed before it is closed. +	cc.blockingpicker.close() +	if bWrapper != nil { +		bWrapper.close() +	} +	if rWrapper != nil { +		rWrapper.close() +	} + +	for ac := range conns { +		ac.tearDown(ErrClientConnClosing) +	} +	ted := &channelz.TraceEventDesc{ +		Desc:     "Channel deleted", +		Severity: channelz.CtInfo, +	} +	if cc.dopts.channelzParentID != nil { +		ted.Parent = &channelz.TraceEventDesc{ +			Desc:     fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID.Int()), +			Severity: channelz.CtInfo, +		} +	} +	channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) +	// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add +	// trace reference to the entity being deleted, and thus prevent it from being +	// deleted right away. +	channelz.RemoveEntry(cc.channelzID) + +	return nil +} + +// addrConn is a network connection to a given address. +type addrConn struct { +	ctx    context.Context +	cancel context.CancelFunc + +	cc     *ClientConn +	dopts  dialOptions +	acbw   balancer.SubConn +	scopts balancer.NewSubConnOptions + +	// transport is set when there's a viable transport (note: ac state may not be READY as LB channel +	// health checking may require server to report healthy to set ac to READY), and is reset +	// to nil when the current transport should no longer be used to create a stream (e.g. after GoAway +	// is received, transport is closed, ac has been torn down). +	transport transport.ClientTransport // The current transport. + +	mu      sync.Mutex +	curAddr resolver.Address   // The current address. +	addrs   []resolver.Address // All addresses that the resolver resolved to. + +	// Use updateConnectivityState for updating addrConn's connectivity state. +	state connectivity.State + +	backoffIdx   int // Needs to be stateful for resetConnectBackoff. +	resetBackoff chan struct{} + +	channelzID *channelz.Identifier +	czData     *channelzData +} + +// Note: this requires a lock on ac.mu. +func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) { +	if ac.state == s { +		return +	} +	ac.state = s +	channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) +	ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) +} + +// adjustParams updates parameters used to create transports upon +// receiving a GoAway. +func (ac *addrConn) adjustParams(r transport.GoAwayReason) { +	switch r { +	case transport.GoAwayTooManyPings: +		v := 2 * ac.dopts.copts.KeepaliveParams.Time +		ac.cc.mu.Lock() +		if v > ac.cc.mkp.Time { +			ac.cc.mkp.Time = v +		} +		ac.cc.mu.Unlock() +	} +} + +func (ac *addrConn) resetTransport() { +	ac.mu.Lock() +	if ac.state == connectivity.Shutdown { +		ac.mu.Unlock() +		return +	} + +	addrs := ac.addrs +	backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) +	// This will be the duration that dial gets to finish. +	dialDuration := minConnectTimeout +	if ac.dopts.minConnectTimeout != nil { +		dialDuration = ac.dopts.minConnectTimeout() +	} + +	if dialDuration < backoffFor { +		// Give dial more time as we keep failing to connect. +		dialDuration = backoffFor +	} +	// We can potentially spend all the time trying the first address, and +	// if the server accepts the connection and then hangs, the following +	// addresses will never be tried. +	// +	// The spec doesn't mention what should be done for multiple addresses. +	// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm +	connectDeadline := time.Now().Add(dialDuration) + +	ac.updateConnectivityState(connectivity.Connecting, nil) +	ac.mu.Unlock() + +	if err := ac.tryAllAddrs(addrs, connectDeadline); err != nil { +		ac.cc.resolveNow(resolver.ResolveNowOptions{}) +		// After exhausting all addresses, the addrConn enters +		// TRANSIENT_FAILURE. +		ac.mu.Lock() +		if ac.state == connectivity.Shutdown { +			ac.mu.Unlock() +			return +		} +		ac.updateConnectivityState(connectivity.TransientFailure, err) + +		// Backoff. +		b := ac.resetBackoff +		ac.mu.Unlock() + +		timer := time.NewTimer(backoffFor) +		select { +		case <-timer.C: +			ac.mu.Lock() +			ac.backoffIdx++ +			ac.mu.Unlock() +		case <-b: +			timer.Stop() +		case <-ac.ctx.Done(): +			timer.Stop() +			return +		} + +		ac.mu.Lock() +		if ac.state != connectivity.Shutdown { +			ac.updateConnectivityState(connectivity.Idle, err) +		} +		ac.mu.Unlock() +		return +	} +	// Success; reset backoff. +	ac.mu.Lock() +	ac.backoffIdx = 0 +	ac.mu.Unlock() +} + +// tryAllAddrs tries to creates a connection to the addresses, and stop when at +// the first successful one. It returns an error if no address was successfully +// connected, or updates ac appropriately with the new transport. +func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) error { +	var firstConnErr error +	for _, addr := range addrs { +		ac.mu.Lock() +		if ac.state == connectivity.Shutdown { +			ac.mu.Unlock() +			return errConnClosing +		} + +		ac.cc.mu.RLock() +		ac.dopts.copts.KeepaliveParams = ac.cc.mkp +		ac.cc.mu.RUnlock() + +		copts := ac.dopts.copts +		if ac.scopts.CredsBundle != nil { +			copts.CredsBundle = ac.scopts.CredsBundle +		} +		ac.mu.Unlock() + +		channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) + +		err := ac.createTransport(addr, copts, connectDeadline) +		if err == nil { +			return nil +		} +		if firstConnErr == nil { +			firstConnErr = err +		} +		ac.cc.updateConnectionError(err) +	} + +	// Couldn't connect to any address. +	return firstConnErr +} + +// createTransport creates a connection to addr. It returns an error if the +// address was not successfully connected, or updates ac appropriately with the +// new transport. +func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { +	addr.ServerName = ac.cc.getServerName(addr) +	hctx, hcancel := context.WithCancel(ac.ctx) + +	onClose := func(r transport.GoAwayReason) { +		ac.mu.Lock() +		defer ac.mu.Unlock() +		// adjust params based on GoAwayReason +		ac.adjustParams(r) +		if ac.state == connectivity.Shutdown { +			// Already shut down.  tearDown() already cleared the transport and +			// canceled hctx via ac.ctx, and we expected this connection to be +			// closed, so do nothing here. +			return +		} +		hcancel() +		if ac.transport == nil { +			// We're still connecting to this address, which could error.  Do +			// not update the connectivity state or resolve; these will happen +			// at the end of the tryAllAddrs connection loop in the event of an +			// error. +			return +		} +		ac.transport = nil +		// Refresh the name resolver on any connection loss. +		ac.cc.resolveNow(resolver.ResolveNowOptions{}) +		// Always go idle and wait for the LB policy to initiate a new +		// connection attempt. +		ac.updateConnectivityState(connectivity.Idle, nil) +	} + +	connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) +	defer cancel() +	copts.ChannelzParentID = ac.channelzID + +	newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose) +	if err != nil { +		if logger.V(2) { +			logger.Infof("Creating new client transport to %q: %v", addr, err) +		} +		// newTr is either nil, or closed. +		hcancel() +		channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) +		return err +	} + +	ac.mu.Lock() +	defer ac.mu.Unlock() +	if ac.state == connectivity.Shutdown { +		// This can happen if the subConn was removed while in `Connecting` +		// state. tearDown() would have set the state to `Shutdown`, but +		// would not have closed the transport since ac.transport would not +		// have been set at that point. +		// +		// We run this in a goroutine because newTr.Close() calls onClose() +		// inline, which requires locking ac.mu. +		// +		// The error we pass to Close() is immaterial since there are no open +		// streams at this point, so no trailers with error details will be sent +		// out. We just need to pass a non-nil error. +		go newTr.Close(transport.ErrConnClosing) +		return nil +	} +	if hctx.Err() != nil { +		// onClose was already called for this connection, but the connection +		// was successfully established first.  Consider it a success and set +		// the new state to Idle. +		ac.updateConnectivityState(connectivity.Idle, nil) +		return nil +	} +	ac.curAddr = addr +	ac.transport = newTr +	ac.startHealthCheck(hctx) // Will set state to READY if appropriate. +	return nil +} + +// startHealthCheck starts the health checking stream (RPC) to watch the health +// stats of this connection if health checking is requested and configured. +// +// LB channel health checking is enabled when all requirements below are met: +// 1. it is not disabled by the user with the WithDisableHealthCheck DialOption +// 2. internal.HealthCheckFunc is set by importing the grpc/health package +// 3. a service config with non-empty healthCheckConfig field is provided +// 4. the load balancer requests it +// +// It sets addrConn to READY if the health checking stream is not started. +// +// Caller must hold ac.mu. +func (ac *addrConn) startHealthCheck(ctx context.Context) { +	var healthcheckManagingState bool +	defer func() { +		if !healthcheckManagingState { +			ac.updateConnectivityState(connectivity.Ready, nil) +		} +	}() + +	if ac.cc.dopts.disableHealthCheck { +		return +	} +	healthCheckConfig := ac.cc.healthCheckConfig() +	if healthCheckConfig == nil { +		return +	} +	if !ac.scopts.HealthCheckEnabled { +		return +	} +	healthCheckFunc := ac.cc.dopts.healthCheckFunc +	if healthCheckFunc == nil { +		// The health package is not imported to set health check function. +		// +		// TODO: add a link to the health check doc in the error message. +		channelz.Error(logger, ac.channelzID, "Health check is requested but health check function is not set.") +		return +	} + +	healthcheckManagingState = true + +	// Set up the health check helper functions. +	currentTr := ac.transport +	newStream := func(method string) (interface{}, error) { +		ac.mu.Lock() +		if ac.transport != currentTr { +			ac.mu.Unlock() +			return nil, status.Error(codes.Canceled, "the provided transport is no longer valid to use") +		} +		ac.mu.Unlock() +		return newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac) +	} +	setConnectivityState := func(s connectivity.State, lastErr error) { +		ac.mu.Lock() +		defer ac.mu.Unlock() +		if ac.transport != currentTr { +			return +		} +		ac.updateConnectivityState(s, lastErr) +	} +	// Start the health checking stream. +	go func() { +		err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) +		if err != nil { +			if status.Code(err) == codes.Unimplemented { +				channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") +			} else { +				channelz.Errorf(logger, ac.channelzID, "Health checking failed: %v", err) +			} +		} +	}() +} + +func (ac *addrConn) resetConnectBackoff() { +	ac.mu.Lock() +	close(ac.resetBackoff) +	ac.backoffIdx = 0 +	ac.resetBackoff = make(chan struct{}) +	ac.mu.Unlock() +} + +// getReadyTransport returns the transport if ac's state is READY or nil if not. +func (ac *addrConn) getReadyTransport() transport.ClientTransport { +	ac.mu.Lock() +	defer ac.mu.Unlock() +	if ac.state == connectivity.Ready { +		return ac.transport +	} +	return nil +} + +// tearDown starts to tear down the addrConn. +// +// Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct +// will leak. In most cases, call cc.removeAddrConn() instead. +func (ac *addrConn) tearDown(err error) { +	ac.mu.Lock() +	if ac.state == connectivity.Shutdown { +		ac.mu.Unlock() +		return +	} +	curTr := ac.transport +	ac.transport = nil +	// We have to set the state to Shutdown before anything else to prevent races +	// between setting the state and logic that waits on context cancellation / etc. +	ac.updateConnectivityState(connectivity.Shutdown, nil) +	ac.cancel() +	ac.curAddr = resolver.Address{} +	if err == errConnDrain && curTr != nil { +		// GracefulClose(...) may be executed multiple times when +		// i) receiving multiple GoAway frames from the server; or +		// ii) there are concurrent name resolver/Balancer triggered +		// address removal and GoAway. +		// We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu. +		ac.mu.Unlock() +		curTr.GracefulClose() +		ac.mu.Lock() +	} +	channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ +		Desc:     "Subchannel deleted", +		Severity: channelz.CtInfo, +		Parent: &channelz.TraceEventDesc{ +			Desc:     fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelzID.Int()), +			Severity: channelz.CtInfo, +		}, +	}) +	// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add +	// trace reference to the entity being deleted, and thus prevent it from +	// being deleted right away. +	channelz.RemoveEntry(ac.channelzID) +	ac.mu.Unlock() +} + +func (ac *addrConn) getState() connectivity.State { +	ac.mu.Lock() +	defer ac.mu.Unlock() +	return ac.state +} + +func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric { +	ac.mu.Lock() +	addr := ac.curAddr.Addr +	ac.mu.Unlock() +	return &channelz.ChannelInternalMetric{ +		State:                    ac.getState(), +		Target:                   addr, +		CallsStarted:             atomic.LoadInt64(&ac.czData.callsStarted), +		CallsSucceeded:           atomic.LoadInt64(&ac.czData.callsSucceeded), +		CallsFailed:              atomic.LoadInt64(&ac.czData.callsFailed), +		LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&ac.czData.lastCallStartedTime)), +	} +} + +func (ac *addrConn) incrCallsStarted() { +	atomic.AddInt64(&ac.czData.callsStarted, 1) +	atomic.StoreInt64(&ac.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (ac *addrConn) incrCallsSucceeded() { +	atomic.AddInt64(&ac.czData.callsSucceeded, 1) +} + +func (ac *addrConn) incrCallsFailed() { +	atomic.AddInt64(&ac.czData.callsFailed, 1) +} + +type retryThrottler struct { +	max    float64 +	thresh float64 +	ratio  float64 + +	mu     sync.Mutex +	tokens float64 // TODO(dfawley): replace with atomic and remove lock. +} + +// throttle subtracts a retry token from the pool and returns whether a retry +// should be throttled (disallowed) based upon the retry throttling policy in +// the service config. +func (rt *retryThrottler) throttle() bool { +	if rt == nil { +		return false +	} +	rt.mu.Lock() +	defer rt.mu.Unlock() +	rt.tokens-- +	if rt.tokens < 0 { +		rt.tokens = 0 +	} +	return rt.tokens <= rt.thresh +} + +func (rt *retryThrottler) successfulRPC() { +	if rt == nil { +		return +	} +	rt.mu.Lock() +	defer rt.mu.Unlock() +	rt.tokens += rt.ratio +	if rt.tokens > rt.max { +		rt.tokens = rt.max +	} +} + +type channelzChannel struct { +	cc *ClientConn +} + +func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { +	return c.cc.channelzMetric() +} + +// ErrClientConnTimeout indicates that the ClientConn cannot establish the +// underlying connections within the specified timeout. +// +// Deprecated: This error is never returned by grpc and should not be +// referenced by users. +var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") + +func (cc *ClientConn) getResolver(scheme string) resolver.Builder { +	for _, rb := range cc.dopts.resolvers { +		if scheme == rb.Scheme() { +			return rb +		} +	} +	return resolver.Get(scheme) +} + +func (cc *ClientConn) updateConnectionError(err error) { +	cc.lceMu.Lock() +	cc.lastConnectionError = err +	cc.lceMu.Unlock() +} + +func (cc *ClientConn) connectionError() error { +	cc.lceMu.Lock() +	defer cc.lceMu.Unlock() +	return cc.lastConnectionError +} + +func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { +	channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target) + +	var rb resolver.Builder +	parsedTarget, err := parseTarget(cc.target) +	if err != nil { +		channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) +	} else { +		channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) +		rb = cc.getResolver(parsedTarget.URL.Scheme) +		if rb != nil { +			cc.parsedTarget = parsedTarget +			return rb, nil +		} +	} + +	// We are here because the user's dial target did not contain a scheme or +	// specified an unregistered scheme. We should fallback to the default +	// scheme, except when a custom dialer is specified in which case, we should +	// always use passthrough scheme. +	defScheme := resolver.GetDefaultScheme() +	channelz.Infof(logger, cc.channelzID, "fallback to scheme %q", defScheme) +	canonicalTarget := defScheme + ":///" + cc.target + +	parsedTarget, err = parseTarget(canonicalTarget) +	if err != nil { +		channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err) +		return nil, err +	} +	channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) +	rb = cc.getResolver(parsedTarget.URL.Scheme) +	if rb == nil { +		return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) +	} +	cc.parsedTarget = parsedTarget +	return rb, nil +} + +// parseTarget uses RFC 3986 semantics to parse the given target into a +// resolver.Target struct containing scheme, authority and url. Query +// params are stripped from the endpoint. +func parseTarget(target string) (resolver.Target, error) { +	u, err := url.Parse(target) +	if err != nil { +		return resolver.Target{}, err +	} + +	return resolver.Target{ +		Scheme:    u.Scheme, +		Authority: u.Host, +		URL:       *u, +	}, nil +} + +// Determine channel authority. The order of precedence is as follows: +// - user specified authority override using `WithAuthority` dial option +// - creds' notion of server name for the authentication handshake +// - endpoint from dial target of the form "scheme://[authority]/endpoint" +func determineAuthority(endpoint, target string, dopts dialOptions) (string, error) { +	// Historically, we had two options for users to specify the serverName or +	// authority for a channel. One was through the transport credentials +	// (either in its constructor, or through the OverrideServerName() method). +	// The other option (for cases where WithInsecure() dial option was used) +	// was to use the WithAuthority() dial option. +	// +	// A few things have changed since: +	// - `insecure` package with an implementation of the `TransportCredentials` +	//   interface for the insecure case +	// - WithAuthority() dial option support for secure credentials +	authorityFromCreds := "" +	if creds := dopts.copts.TransportCredentials; creds != nil && creds.Info().ServerName != "" { +		authorityFromCreds = creds.Info().ServerName +	} +	authorityFromDialOption := dopts.authority +	if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption { +		return "", fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) +	} + +	switch { +	case authorityFromDialOption != "": +		return authorityFromDialOption, nil +	case authorityFromCreds != "": +		return authorityFromCreds, nil +	case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"): +		// TODO: remove when the unix resolver implements optional interface to +		// return channel authority. +		return "localhost", nil +	case strings.HasPrefix(endpoint, ":"): +		return "localhost" + endpoint, nil +	default: +		// TODO: Define an optional interface on the resolver builder to return +		// the channel authority given the user's dial target. For resolvers +		// which don't implement this interface, we will use the endpoint from +		// "scheme://authority/endpoint" as the default authority. +		return endpoint, nil +	} +} diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go new file mode 100644 index 000000000..129776547 --- /dev/null +++ b/vendor/google.golang.org/grpc/codec.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( +	"google.golang.org/grpc/encoding" +	_ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto" +) + +// baseCodec contains the functionality of both Codec and encoding.Codec, but +// omits the name/string, which vary between the two and are not needed for +// anything besides the registry in the encoding package. +type baseCodec interface { +	Marshal(v interface{}) ([]byte, error) +	Unmarshal(data []byte, v interface{}) error +} + +var _ baseCodec = Codec(nil) +var _ baseCodec = encoding.Codec(nil) + +// Codec defines the interface gRPC uses to encode and decode messages. +// Note that implementations of this interface must be thread safe; +// a Codec's methods can be called from concurrent goroutines. +// +// Deprecated: use encoding.Codec instead. +type Codec interface { +	// Marshal returns the wire format of v. +	Marshal(v interface{}) ([]byte, error) +	// Unmarshal parses the wire format into v. +	Unmarshal(data []byte, v interface{}) error +	// String returns the name of the Codec implementation.  This is unused by +	// gRPC. +	String() string +} diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh new file mode 100644 index 000000000..4cdc6ba7c --- /dev/null +++ b/vendor/google.golang.org/grpc/codegen.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +# This script serves as an example to demonstrate how to generate the gRPC-Go +# interface and the related messages from .proto file. +# +# It assumes the installation of i) Google proto buffer compiler at +# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen +# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have +# not, please install them first. +# +# We recommend running this script at $GOPATH/src. +# +# If this is not what you need, feel free to make your own scripts. Again, this +# script is for demonstration purpose. +# +proto=$1 +protoc --go_out=plugins=grpc:. $proto diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go new file mode 100644 index 000000000..0b206a578 --- /dev/null +++ b/vendor/google.golang.org/grpc/codes/code_string.go @@ -0,0 +1,62 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package codes + +import "strconv" + +func (c Code) String() string { +	switch c { +	case OK: +		return "OK" +	case Canceled: +		return "Canceled" +	case Unknown: +		return "Unknown" +	case InvalidArgument: +		return "InvalidArgument" +	case DeadlineExceeded: +		return "DeadlineExceeded" +	case NotFound: +		return "NotFound" +	case AlreadyExists: +		return "AlreadyExists" +	case PermissionDenied: +		return "PermissionDenied" +	case ResourceExhausted: +		return "ResourceExhausted" +	case FailedPrecondition: +		return "FailedPrecondition" +	case Aborted: +		return "Aborted" +	case OutOfRange: +		return "OutOfRange" +	case Unimplemented: +		return "Unimplemented" +	case Internal: +		return "Internal" +	case Unavailable: +		return "Unavailable" +	case DataLoss: +		return "DataLoss" +	case Unauthenticated: +		return "Unauthenticated" +	default: +		return "Code(" + strconv.FormatInt(int64(c), 10) + ")" +	} +} diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go new file mode 100644 index 000000000..11b106182 --- /dev/null +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -0,0 +1,244 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package codes defines the canonical error codes used by gRPC. It is +// consistent across various languages. +package codes // import "google.golang.org/grpc/codes" + +import ( +	"fmt" +	"strconv" +) + +// A Code is an unsigned 32-bit error code as defined in the gRPC spec. +type Code uint32 + +const ( +	// OK is returned on success. +	OK Code = 0 + +	// Canceled indicates the operation was canceled (typically by the caller). +	// +	// The gRPC framework will generate this error code when cancellation +	// is requested. +	Canceled Code = 1 + +	// Unknown error. An example of where this error may be returned is +	// if a Status value received from another address space belongs to +	// an error-space that is not known in this address space. Also +	// errors raised by APIs that do not return enough error information +	// may be converted to this error. +	// +	// The gRPC framework will generate this error code in the above two +	// mentioned cases. +	Unknown Code = 2 + +	// InvalidArgument indicates client specified an invalid argument. +	// Note that this differs from FailedPrecondition. It indicates arguments +	// that are problematic regardless of the state of the system +	// (e.g., a malformed file name). +	// +	// This error code will not be generated by the gRPC framework. +	InvalidArgument Code = 3 + +	// DeadlineExceeded means operation expired before completion. +	// For operations that change the state of the system, this error may be +	// returned even if the operation has completed successfully. For +	// example, a successful response from a server could have been delayed +	// long enough for the deadline to expire. +	// +	// The gRPC framework will generate this error code when the deadline is +	// exceeded. +	DeadlineExceeded Code = 4 + +	// NotFound means some requested entity (e.g., file or directory) was +	// not found. +	// +	// This error code will not be generated by the gRPC framework. +	NotFound Code = 5 + +	// AlreadyExists means an attempt to create an entity failed because one +	// already exists. +	// +	// This error code will not be generated by the gRPC framework. +	AlreadyExists Code = 6 + +	// PermissionDenied indicates the caller does not have permission to +	// execute the specified operation. It must not be used for rejections +	// caused by exhausting some resource (use ResourceExhausted +	// instead for those errors). It must not be +	// used if the caller cannot be identified (use Unauthenticated +	// instead for those errors). +	// +	// This error code will not be generated by the gRPC core framework, +	// but expect authentication middleware to use it. +	PermissionDenied Code = 7 + +	// ResourceExhausted indicates some resource has been exhausted, perhaps +	// a per-user quota, or perhaps the entire file system is out of space. +	// +	// This error code will be generated by the gRPC framework in +	// out-of-memory and server overload situations, or when a message is +	// larger than the configured maximum size. +	ResourceExhausted Code = 8 + +	// FailedPrecondition indicates operation was rejected because the +	// system is not in a state required for the operation's execution. +	// For example, directory to be deleted may be non-empty, an rmdir +	// operation is applied to a non-directory, etc. +	// +	// A litmus test that may help a service implementor in deciding +	// between FailedPrecondition, Aborted, and Unavailable: +	//  (a) Use Unavailable if the client can retry just the failing call. +	//  (b) Use Aborted if the client should retry at a higher-level +	//      (e.g., restarting a read-modify-write sequence). +	//  (c) Use FailedPrecondition if the client should not retry until +	//      the system state has been explicitly fixed. E.g., if an "rmdir" +	//      fails because the directory is non-empty, FailedPrecondition +	//      should be returned since the client should not retry unless +	//      they have first fixed up the directory by deleting files from it. +	//  (d) Use FailedPrecondition if the client performs conditional +	//      REST Get/Update/Delete on a resource and the resource on the +	//      server does not match the condition. E.g., conflicting +	//      read-modify-write on the same resource. +	// +	// This error code will not be generated by the gRPC framework. +	FailedPrecondition Code = 9 + +	// Aborted indicates the operation was aborted, typically due to a +	// concurrency issue like sequencer check failures, transaction aborts, +	// etc. +	// +	// See litmus test above for deciding between FailedPrecondition, +	// Aborted, and Unavailable. +	// +	// This error code will not be generated by the gRPC framework. +	Aborted Code = 10 + +	// OutOfRange means operation was attempted past the valid range. +	// E.g., seeking or reading past end of file. +	// +	// Unlike InvalidArgument, this error indicates a problem that may +	// be fixed if the system state changes. For example, a 32-bit file +	// system will generate InvalidArgument if asked to read at an +	// offset that is not in the range [0,2^32-1], but it will generate +	// OutOfRange if asked to read from an offset past the current +	// file size. +	// +	// There is a fair bit of overlap between FailedPrecondition and +	// OutOfRange. We recommend using OutOfRange (the more specific +	// error) when it applies so that callers who are iterating through +	// a space can easily look for an OutOfRange error to detect when +	// they are done. +	// +	// This error code will not be generated by the gRPC framework. +	OutOfRange Code = 11 + +	// Unimplemented indicates operation is not implemented or not +	// supported/enabled in this service. +	// +	// This error code will be generated by the gRPC framework. Most +	// commonly, you will see this error code when a method implementation +	// is missing on the server. It can also be generated for unknown +	// compression algorithms or a disagreement as to whether an RPC should +	// be streaming. +	Unimplemented Code = 12 + +	// Internal errors. Means some invariants expected by underlying +	// system has been broken. If you see one of these errors, +	// something is very broken. +	// +	// This error code will be generated by the gRPC framework in several +	// internal error conditions. +	Internal Code = 13 + +	// Unavailable indicates the service is currently unavailable. +	// This is a most likely a transient condition and may be corrected +	// by retrying with a backoff. Note that it is not always safe to retry +	// non-idempotent operations. +	// +	// See litmus test above for deciding between FailedPrecondition, +	// Aborted, and Unavailable. +	// +	// This error code will be generated by the gRPC framework during +	// abrupt shutdown of a server process or network connection. +	Unavailable Code = 14 + +	// DataLoss indicates unrecoverable data loss or corruption. +	// +	// This error code will not be generated by the gRPC framework. +	DataLoss Code = 15 + +	// Unauthenticated indicates the request does not have valid +	// authentication credentials for the operation. +	// +	// The gRPC framework will generate this error code when the +	// authentication metadata is invalid or a Credentials callback fails, +	// but also expect authentication middleware to generate it. +	Unauthenticated Code = 16 + +	_maxCode = 17 +) + +var strToCode = map[string]Code{ +	`"OK"`: OK, +	`"CANCELLED"`:/* [sic] */ Canceled, +	`"UNKNOWN"`:             Unknown, +	`"INVALID_ARGUMENT"`:    InvalidArgument, +	`"DEADLINE_EXCEEDED"`:   DeadlineExceeded, +	`"NOT_FOUND"`:           NotFound, +	`"ALREADY_EXISTS"`:      AlreadyExists, +	`"PERMISSION_DENIED"`:   PermissionDenied, +	`"RESOURCE_EXHAUSTED"`:  ResourceExhausted, +	`"FAILED_PRECONDITION"`: FailedPrecondition, +	`"ABORTED"`:             Aborted, +	`"OUT_OF_RANGE"`:        OutOfRange, +	`"UNIMPLEMENTED"`:       Unimplemented, +	`"INTERNAL"`:            Internal, +	`"UNAVAILABLE"`:         Unavailable, +	`"DATA_LOSS"`:           DataLoss, +	`"UNAUTHENTICATED"`:     Unauthenticated, +} + +// UnmarshalJSON unmarshals b into the Code. +func (c *Code) UnmarshalJSON(b []byte) error { +	// From json.Unmarshaler: By convention, to approximate the behavior of +	// Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as +	// a no-op. +	if string(b) == "null" { +		return nil +	} +	if c == nil { +		return fmt.Errorf("nil receiver passed to UnmarshalJSON") +	} + +	if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { +		if ci >= _maxCode { +			return fmt.Errorf("invalid code: %q", ci) +		} + +		*c = Code(ci) +		return nil +	} + +	if jc, ok := strToCode[string(b)]; ok { +		*c = jc +		return nil +	} +	return fmt.Errorf("invalid code: %q", string(b)) +} diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go new file mode 100644 index 000000000..4a8992642 --- /dev/null +++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go @@ -0,0 +1,94 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package connectivity defines connectivity semantics. +// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. +package connectivity + +import ( +	"google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("core") + +// State indicates the state of connectivity. +// It can be the state of a ClientConn or SubConn. +type State int + +func (s State) String() string { +	switch s { +	case Idle: +		return "IDLE" +	case Connecting: +		return "CONNECTING" +	case Ready: +		return "READY" +	case TransientFailure: +		return "TRANSIENT_FAILURE" +	case Shutdown: +		return "SHUTDOWN" +	default: +		logger.Errorf("unknown connectivity state: %d", s) +		return "INVALID_STATE" +	} +} + +const ( +	// Idle indicates the ClientConn is idle. +	Idle State = iota +	// Connecting indicates the ClientConn is connecting. +	Connecting +	// Ready indicates the ClientConn is ready for work. +	Ready +	// TransientFailure indicates the ClientConn has seen a failure but expects to recover. +	TransientFailure +	// Shutdown indicates the ClientConn has started shutting down. +	Shutdown +) + +// ServingMode indicates the current mode of operation of the server. +// +// Only xDS enabled gRPC servers currently report their serving mode. +type ServingMode int + +const ( +	// ServingModeStarting indicates that the server is starting up. +	ServingModeStarting ServingMode = iota +	// ServingModeServing indicates that the server contains all required +	// configuration and is serving RPCs. +	ServingModeServing +	// ServingModeNotServing indicates that the server is not accepting new +	// connections. Existing connections will be closed gracefully, allowing +	// in-progress RPCs to complete. A server enters this mode when it does not +	// contain the required configuration to serve RPCs. +	ServingModeNotServing +) + +func (s ServingMode) String() string { +	switch s { +	case ServingModeStarting: +		return "STARTING" +	case ServingModeServing: +		return "SERVING" +	case ServingModeNotServing: +		return "NOT_SERVING" +	default: +		logger.Errorf("unknown serving mode: %d", s) +		return "INVALID_MODE" +	} +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go new file mode 100644 index 000000000..5feac3aa0 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -0,0 +1,291 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package credentials implements various credentials supported by gRPC library, +// which encapsulate all the state needed by a client to authenticate with a +// server and make various assertions, e.g., about the client's identity, role, +// or whether it is authorized to make a particular call. +package credentials // import "google.golang.org/grpc/credentials" + +import ( +	"context" +	"errors" +	"fmt" +	"net" + +	"github.com/golang/protobuf/proto" +	"google.golang.org/grpc/attributes" +	icredentials "google.golang.org/grpc/internal/credentials" +) + +// PerRPCCredentials defines the common interface for the credentials which need to +// attach security information to every RPC (e.g., oauth2). +type PerRPCCredentials interface { +	// GetRequestMetadata gets the current request metadata, refreshing tokens +	// if required. This should be called by the transport layer on each +	// request, and the data should be populated in headers or other +	// context. If a status code is returned, it will be used as the status for +	// the RPC (restricted to an allowable set of codes as defined by gRFC +	// A54). uri is the URI of the entry point for the request.  When supported +	// by the underlying implementation, ctx can be used for timeout and +	// cancellation. Additionally, RequestInfo data will be available via ctx +	// to this call.  TODO(zhaoq): Define the set of the qualified keys instead +	// of leaving it as an arbitrary string. +	GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) +	// RequireTransportSecurity indicates whether the credentials requires +	// transport security. +	RequireTransportSecurity() bool +} + +// SecurityLevel defines the protection level on an established connection. +// +// This API is experimental. +type SecurityLevel int + +const ( +	// InvalidSecurityLevel indicates an invalid security level. +	// The zero SecurityLevel value is invalid for backward compatibility. +	InvalidSecurityLevel SecurityLevel = iota +	// NoSecurity indicates a connection is insecure. +	NoSecurity +	// IntegrityOnly indicates a connection only provides integrity protection. +	IntegrityOnly +	// PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection. +	PrivacyAndIntegrity +) + +// String returns SecurityLevel in a string format. +func (s SecurityLevel) String() string { +	switch s { +	case NoSecurity: +		return "NoSecurity" +	case IntegrityOnly: +		return "IntegrityOnly" +	case PrivacyAndIntegrity: +		return "PrivacyAndIntegrity" +	} +	return fmt.Sprintf("invalid SecurityLevel: %v", int(s)) +} + +// CommonAuthInfo contains authenticated information common to AuthInfo implementations. +// It should be embedded in a struct implementing AuthInfo to provide additional information +// about the credentials. +// +// This API is experimental. +type CommonAuthInfo struct { +	SecurityLevel SecurityLevel +} + +// GetCommonAuthInfo returns the pointer to CommonAuthInfo struct. +func (c CommonAuthInfo) GetCommonAuthInfo() CommonAuthInfo { +	return c +} + +// ProtocolInfo provides information regarding the gRPC wire protocol version, +// security protocol, security protocol version in use, server name, etc. +type ProtocolInfo struct { +	// ProtocolVersion is the gRPC wire protocol version. +	ProtocolVersion string +	// SecurityProtocol is the security protocol in use. +	SecurityProtocol string +	// SecurityVersion is the security protocol version.  It is a static version string from the +	// credentials, not a value that reflects per-connection protocol negotiation.  To retrieve +	// details about the credentials used for a connection, use the Peer's AuthInfo field instead. +	// +	// Deprecated: please use Peer.AuthInfo. +	SecurityVersion string +	// ServerName is the user-configured server name. +	ServerName string +} + +// AuthInfo defines the common interface for the auth information the users are interested in. +// A struct that implements AuthInfo should embed CommonAuthInfo by including additional +// information about the credentials in it. +type AuthInfo interface { +	AuthType() string +} + +// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC +// and the caller should not close rawConn. +var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC") + +// TransportCredentials defines the common interface for all the live gRPC wire +// protocols and supported transport security protocols (e.g., TLS, SSL). +type TransportCredentials interface { +	// ClientHandshake does the authentication handshake specified by the +	// corresponding authentication protocol on rawConn for clients. It returns +	// the authenticated connection and the corresponding auth information +	// about the connection.  The auth information should embed CommonAuthInfo +	// to return additional information about the credentials. Implementations +	// must use the provided context to implement timely cancellation.  gRPC +	// will try to reconnect if the error returned is a temporary error +	// (io.EOF, context.DeadlineExceeded or err.Temporary() == true).  If the +	// returned error is a wrapper error, implementations should make sure that +	// the error implements Temporary() to have the correct retry behaviors. +	// Additionally, ClientHandshakeInfo data will be available via the context +	// passed to this call. +	// +	// The second argument to this method is the `:authority` header value used +	// while creating new streams on this connection after authentication +	// succeeds. Implementations must use this as the server name during the +	// authentication handshake. +	// +	// If the returned net.Conn is closed, it MUST close the net.Conn provided. +	ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) +	// ServerHandshake does the authentication handshake for servers. It returns +	// the authenticated connection and the corresponding auth information about +	// the connection. The auth information should embed CommonAuthInfo to return additional information +	// about the credentials. +	// +	// If the returned net.Conn is closed, it MUST close the net.Conn provided. +	ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) +	// Info provides the ProtocolInfo of this TransportCredentials. +	Info() ProtocolInfo +	// Clone makes a copy of this TransportCredentials. +	Clone() TransportCredentials +	// OverrideServerName specifies the value used for the following: +	// - verifying the hostname on the returned certificates +	// - as SNI in the client's handshake to support virtual hosting +	// - as the value for `:authority` header at stream creation time +	// +	// Deprecated: use grpc.WithAuthority instead. Will be supported +	// throughout 1.x. +	OverrideServerName(string) error +} + +// Bundle is a combination of TransportCredentials and PerRPCCredentials. +// +// It also contains a mode switching method, so it can be used as a combination +// of different credential policies. +// +// Bundle cannot be used together with individual TransportCredentials. +// PerRPCCredentials from Bundle will be appended to other PerRPCCredentials. +// +// This API is experimental. +type Bundle interface { +	// TransportCredentials returns the transport credentials from the Bundle. +	// +	// Implementations must return non-nil transport credentials. If transport +	// security is not needed by the Bundle, implementations may choose to +	// return insecure.NewCredentials(). +	TransportCredentials() TransportCredentials + +	// PerRPCCredentials returns the per-RPC credentials from the Bundle. +	// +	// May be nil if per-RPC credentials are not needed. +	PerRPCCredentials() PerRPCCredentials + +	// NewWithMode should make a copy of Bundle, and switch mode. Modifying the +	// existing Bundle may cause races. +	// +	// NewWithMode returns nil if the requested mode is not supported. +	NewWithMode(mode string) (Bundle, error) +} + +// RequestInfo contains request data attached to the context passed to GetRequestMetadata calls. +// +// This API is experimental. +type RequestInfo struct { +	// The method passed to Invoke or NewStream for this RPC. (For proto methods, this has the format "/some.Service/Method") +	Method string +	// AuthInfo contains the information from a security handshake (TransportCredentials.ClientHandshake, TransportCredentials.ServerHandshake) +	AuthInfo AuthInfo +} + +// RequestInfoFromContext extracts the RequestInfo from the context if it exists. +// +// This API is experimental. +func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) { +	ri, ok = icredentials.RequestInfoFromContext(ctx).(RequestInfo) +	return ri, ok +} + +// ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes +// it possible to pass arbitrary data to the handshaker from gRPC, resolver, +// balancer etc. Individual credential implementations control the actual +// format of the data that they are willing to receive. +// +// This API is experimental. +type ClientHandshakeInfo struct { +	// Attributes contains the attributes for the address. It could be provided +	// by the gRPC, resolver, balancer etc. +	Attributes *attributes.Attributes +} + +// ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored +// in ctx. +// +// This API is experimental. +func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo { +	chi, _ := icredentials.ClientHandshakeInfoFromContext(ctx).(ClientHandshakeInfo) +	return chi +} + +// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one. +// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method +// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility. +// +// This API is experimental. +func CheckSecurityLevel(ai AuthInfo, level SecurityLevel) error { +	type internalInfo interface { +		GetCommonAuthInfo() CommonAuthInfo +	} +	if ai == nil { +		return errors.New("AuthInfo is nil") +	} +	if ci, ok := ai.(internalInfo); ok { +		// CommonAuthInfo.SecurityLevel has an invalid value. +		if ci.GetCommonAuthInfo().SecurityLevel == InvalidSecurityLevel { +			return nil +		} +		if ci.GetCommonAuthInfo().SecurityLevel < level { +			return fmt.Errorf("requires SecurityLevel %v; connection has %v", level, ci.GetCommonAuthInfo().SecurityLevel) +		} +	} +	// The condition is satisfied or AuthInfo struct does not implement GetCommonAuthInfo() method. +	return nil +} + +// ChannelzSecurityInfo defines the interface that security protocols should implement +// in order to provide security info to channelz. +// +// This API is experimental. +type ChannelzSecurityInfo interface { +	GetSecurityValue() ChannelzSecurityValue +} + +// ChannelzSecurityValue defines the interface that GetSecurityValue() return value +// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue +// and *OtherChannelzSecurityValue. +// +// This API is experimental. +type ChannelzSecurityValue interface { +	isChannelzSecurityValue() +} + +// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return +// from GetSecurityValue(), which contains protocol specific security info. Note +// the Value field will be sent to users of channelz requesting channel info, and +// thus sensitive info should better be avoided. +// +// This API is experimental. +type OtherChannelzSecurityValue struct { +	ChannelzSecurityValue +	Name  string +	Value proto.Message +} diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go new file mode 100644 index 000000000..82bee1443 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -0,0 +1,98 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package insecure provides an implementation of the +// credentials.TransportCredentials interface which disables transport security. +package insecure + +import ( +	"context" +	"net" + +	"google.golang.org/grpc/credentials" +) + +// NewCredentials returns a credentials which disables transport security. +// +// Note that using this credentials with per-RPC credentials which require +// transport security is incompatible and will cause grpc.Dial() to fail. +func NewCredentials() credentials.TransportCredentials { +	return insecureTC{} +} + +// insecureTC implements the insecure transport credentials. The handshake +// methods simply return the passed in net.Conn and set the security level to +// NoSecurity. +type insecureTC struct{} + +func (insecureTC) ClientHandshake(ctx context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { +	return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil +} + +func (insecureTC) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) { +	return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil +} + +func (insecureTC) Info() credentials.ProtocolInfo { +	return credentials.ProtocolInfo{SecurityProtocol: "insecure"} +} + +func (insecureTC) Clone() credentials.TransportCredentials { +	return insecureTC{} +} + +func (insecureTC) OverrideServerName(string) error { +	return nil +} + +// info contains the auth information for an insecure connection. +// It implements the AuthInfo interface. +type info struct { +	credentials.CommonAuthInfo +} + +// AuthType returns the type of info as a string. +func (info) AuthType() string { +	return "insecure" +} + +// insecureBundle implements an insecure bundle. +// An insecure bundle provides a thin wrapper around insecureTC to support +// the credentials.Bundle interface. +type insecureBundle struct{} + +// NewBundle returns a bundle with disabled transport security and no per rpc credential. +func NewBundle() credentials.Bundle { +	return insecureBundle{} +} + +// NewWithMode returns a new insecure Bundle. The mode is ignored. +func (insecureBundle) NewWithMode(string) (credentials.Bundle, error) { +	return insecureBundle{}, nil +} + +// PerRPCCredentials returns an nil implementation as insecure +// bundle does not support a per rpc credential. +func (insecureBundle) PerRPCCredentials() credentials.PerRPCCredentials { +	return nil +} + +// TransportCredentials returns the underlying insecure transport credential. +func (insecureBundle) TransportCredentials() credentials.TransportCredentials { +	return NewCredentials() +} diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go new file mode 100644 index 000000000..877b7cd21 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -0,0 +1,236 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( +	"context" +	"crypto/tls" +	"crypto/x509" +	"fmt" +	"net" +	"net/url" +	"os" + +	credinternal "google.golang.org/grpc/internal/credentials" +) + +// TLSInfo contains the auth information for a TLS authenticated connection. +// It implements the AuthInfo interface. +type TLSInfo struct { +	State tls.ConnectionState +	CommonAuthInfo +	// This API is experimental. +	SPIFFEID *url.URL +} + +// AuthType returns the type of TLSInfo as a string. +func (t TLSInfo) AuthType() string { +	return "tls" +} + +// GetSecurityValue returns security info requested by channelz. +func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { +	v := &TLSChannelzSecurityValue{ +		StandardName: cipherSuiteLookup[t.State.CipherSuite], +	} +	// Currently there's no way to get LocalCertificate info from tls package. +	if len(t.State.PeerCertificates) > 0 { +		v.RemoteCertificate = t.State.PeerCertificates[0].Raw +	} +	return v +} + +// tlsCreds is the credentials required for authenticating a connection using TLS. +type tlsCreds struct { +	// TLS configuration +	config *tls.Config +} + +func (c tlsCreds) Info() ProtocolInfo { +	return ProtocolInfo{ +		SecurityProtocol: "tls", +		SecurityVersion:  "1.2", +		ServerName:       c.config.ServerName, +	} +} + +func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { +	// use local cfg to avoid clobbering ServerName if using multiple endpoints +	cfg := credinternal.CloneTLSConfig(c.config) +	if cfg.ServerName == "" { +		serverName, _, err := net.SplitHostPort(authority) +		if err != nil { +			// If the authority had no host port or if the authority cannot be parsed, use it as-is. +			serverName = authority +		} +		cfg.ServerName = serverName +	} +	conn := tls.Client(rawConn, cfg) +	errChannel := make(chan error, 1) +	go func() { +		errChannel <- conn.Handshake() +		close(errChannel) +	}() +	select { +	case err := <-errChannel: +		if err != nil { +			conn.Close() +			return nil, nil, err +		} +	case <-ctx.Done(): +		conn.Close() +		return nil, nil, ctx.Err() +	} +	tlsInfo := TLSInfo{ +		State: conn.ConnectionState(), +		CommonAuthInfo: CommonAuthInfo{ +			SecurityLevel: PrivacyAndIntegrity, +		}, +	} +	id := credinternal.SPIFFEIDFromState(conn.ConnectionState()) +	if id != nil { +		tlsInfo.SPIFFEID = id +	} +	return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil +} + +func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { +	conn := tls.Server(rawConn, c.config) +	if err := conn.Handshake(); err != nil { +		conn.Close() +		return nil, nil, err +	} +	tlsInfo := TLSInfo{ +		State: conn.ConnectionState(), +		CommonAuthInfo: CommonAuthInfo{ +			SecurityLevel: PrivacyAndIntegrity, +		}, +	} +	id := credinternal.SPIFFEIDFromState(conn.ConnectionState()) +	if id != nil { +		tlsInfo.SPIFFEID = id +	} +	return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil +} + +func (c *tlsCreds) Clone() TransportCredentials { +	return NewTLS(c.config) +} + +func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { +	c.config.ServerName = serverNameOverride +	return nil +} + +// NewTLS uses c to construct a TransportCredentials based on TLS. +func NewTLS(c *tls.Config) TransportCredentials { +	tc := &tlsCreds{credinternal.CloneTLSConfig(c)} +	tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) +	return tc +} + +// NewClientTLSFromCert constructs TLS credentials from the provided root +// certificate authority certificate(s) to validate server connections. If +// certificates to establish the identity of the client need to be included in +// the credentials (eg: for mTLS), use NewTLS instead, where a complete +// tls.Config can be specified. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header +// field) in requests. +func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { +	return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) +} + +// NewClientTLSFromFile constructs TLS credentials from the provided root +// certificate authority certificate file(s) to validate server connections. If +// certificates to establish the identity of the client need to be included in +// the credentials (eg: for mTLS), use NewTLS instead, where a complete +// tls.Config can be specified. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header +// field) in requests. +func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { +	b, err := os.ReadFile(certFile) +	if err != nil { +		return nil, err +	} +	cp := x509.NewCertPool() +	if !cp.AppendCertsFromPEM(b) { +		return nil, fmt.Errorf("credentials: failed to append certificates") +	} +	return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil +} + +// NewServerTLSFromCert constructs TLS credentials from the input certificate for server. +func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { +	return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) +} + +// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key +// file for server. +func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { +	cert, err := tls.LoadX509KeyPair(certFile, keyFile) +	if err != nil { +		return nil, err +	} +	return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil +} + +// TLSChannelzSecurityValue defines the struct that TLS protocol should return +// from GetSecurityValue(), containing security info like cipher and certificate used. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type TLSChannelzSecurityValue struct { +	ChannelzSecurityValue +	StandardName      string +	LocalCertificate  []byte +	RemoteCertificate []byte +} + +var cipherSuiteLookup = map[uint16]string{ +	tls.TLS_RSA_WITH_RC4_128_SHA:                "TLS_RSA_WITH_RC4_128_SHA", +	tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA:           "TLS_RSA_WITH_3DES_EDE_CBC_SHA", +	tls.TLS_RSA_WITH_AES_128_CBC_SHA:            "TLS_RSA_WITH_AES_128_CBC_SHA", +	tls.TLS_RSA_WITH_AES_256_CBC_SHA:            "TLS_RSA_WITH_AES_256_CBC_SHA", +	tls.TLS_RSA_WITH_AES_128_GCM_SHA256:         "TLS_RSA_WITH_AES_128_GCM_SHA256", +	tls.TLS_RSA_WITH_AES_256_GCM_SHA384:         "TLS_RSA_WITH_AES_256_GCM_SHA384", +	tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA:        "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", +	tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA:    "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", +	tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA:    "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", +	tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA:          "TLS_ECDHE_RSA_WITH_RC4_128_SHA", +	tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA:     "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", +	tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA:      "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", +	tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:      "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", +	tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256:   "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", +	tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", +	tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:   "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", +	tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", +	tls.TLS_FALLBACK_SCSV:                       "TLS_FALLBACK_SCSV", +	tls.TLS_RSA_WITH_AES_128_CBC_SHA256:         "TLS_RSA_WITH_AES_128_CBC_SHA256", +	tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", +	tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256:   "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", +	tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305:    "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", +	tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305:  "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", +	tls.TLS_AES_128_GCM_SHA256:                  "TLS_AES_128_GCM_SHA256", +	tls.TLS_AES_256_GCM_SHA384:                  "TLS_AES_256_GCM_SHA384", +	tls.TLS_CHACHA20_POLY1305_SHA256:            "TLS_CHACHA20_POLY1305_SHA256", +} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go new file mode 100644 index 000000000..4866da101 --- /dev/null +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -0,0 +1,637 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( +	"context" +	"net" +	"time" + +	"google.golang.org/grpc/backoff" +	"google.golang.org/grpc/channelz" +	"google.golang.org/grpc/credentials" +	"google.golang.org/grpc/credentials/insecure" +	"google.golang.org/grpc/internal" +	internalbackoff "google.golang.org/grpc/internal/backoff" +	"google.golang.org/grpc/internal/binarylog" +	"google.golang.org/grpc/internal/transport" +	"google.golang.org/grpc/keepalive" +	"google.golang.org/grpc/resolver" +	"google.golang.org/grpc/stats" +) + +func init() { +	internal.AddGlobalDialOptions = func(opt ...DialOption) { +		extraDialOptions = append(extraDialOptions, opt...) +	} +	internal.ClearGlobalDialOptions = func() { +		extraDialOptions = nil +	} +	internal.WithBinaryLogger = withBinaryLogger +	internal.JoinDialOptions = newJoinDialOption +} + +// dialOptions configure a Dial call. dialOptions are set by the DialOption +// values passed to Dial. +type dialOptions struct { +	unaryInt  UnaryClientInterceptor +	streamInt StreamClientInterceptor + +	chainUnaryInts  []UnaryClientInterceptor +	chainStreamInts []StreamClientInterceptor + +	cp                          Compressor +	dc                          Decompressor +	bs                          internalbackoff.Strategy +	block                       bool +	returnLastError             bool +	timeout                     time.Duration +	scChan                      <-chan ServiceConfig +	authority                   string +	binaryLogger                binarylog.Logger +	copts                       transport.ConnectOptions +	callOptions                 []CallOption +	channelzParentID            *channelz.Identifier +	disableServiceConfig        bool +	disableRetry                bool +	disableHealthCheck          bool +	healthCheckFunc             internal.HealthChecker +	minConnectTimeout           func() time.Duration +	defaultServiceConfig        *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. +	defaultServiceConfigRawJSON *string +	resolvers                   []resolver.Builder +} + +// DialOption configures how we set up the connection. +type DialOption interface { +	apply(*dialOptions) +} + +var extraDialOptions []DialOption + +// EmptyDialOption does not alter the dial configuration. It can be embedded in +// another structure to build custom dial options. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type EmptyDialOption struct{} + +func (EmptyDialOption) apply(*dialOptions) {} + +// funcDialOption wraps a function that modifies dialOptions into an +// implementation of the DialOption interface. +type funcDialOption struct { +	f func(*dialOptions) +} + +func (fdo *funcDialOption) apply(do *dialOptions) { +	fdo.f(do) +} + +func newFuncDialOption(f func(*dialOptions)) *funcDialOption { +	return &funcDialOption{ +		f: f, +	} +} + +type joinDialOption struct { +	opts []DialOption +} + +func (jdo *joinDialOption) apply(do *dialOptions) { +	for _, opt := range jdo.opts { +		opt.apply(do) +	} +} + +func newJoinDialOption(opts ...DialOption) DialOption { +	return &joinDialOption{opts: opts} +} + +// WithWriteBufferSize determines how much data can be batched before doing a +// write on the wire. The corresponding memory allocation for this buffer will +// be twice the size to keep syscalls low. The default value for this buffer is +// 32KB. +// +// Zero or negative values will disable the write buffer such that each write +// will be on underlying connection. Note: A Send call may not directly +// translate to a write. +func WithWriteBufferSize(s int) DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.copts.WriteBufferSize = s +	}) +} + +// WithReadBufferSize lets you set the size of read buffer, this determines how +// much data can be read at most for each read syscall. +// +// The default value for this buffer is 32KB. Zero or negative values will +// disable read buffer for a connection so data framer can access the +// underlying conn directly. +func WithReadBufferSize(s int) DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.copts.ReadBufferSize = s +	}) +} + +// WithInitialWindowSize returns a DialOption which sets the value for initial +// window size on a stream. The lower bound for window size is 64K and any value +// smaller than that will be ignored. +func WithInitialWindowSize(s int32) DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.copts.InitialWindowSize = s +	}) +} + +// WithInitialConnWindowSize returns a DialOption which sets the value for +// initial window size on a connection. The lower bound for window size is 64K +// and any value smaller than that will be ignored. +func WithInitialConnWindowSize(s int32) DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.copts.InitialConnWindowSize = s +	}) +} + +// WithMaxMsgSize returns a DialOption which sets the maximum message size the +// client can receive. +// +// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead.  Will +// be supported throughout 1.x. +func WithMaxMsgSize(s int) DialOption { +	return WithDefaultCallOptions(MaxCallRecvMsgSize(s)) +} + +// WithDefaultCallOptions returns a DialOption which sets the default +// CallOptions for calls over the connection. +func WithDefaultCallOptions(cos ...CallOption) DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.callOptions = append(o.callOptions, cos...) +	}) +} + +// WithCodec returns a DialOption which sets a codec for message marshaling and +// unmarshaling. +// +// Deprecated: use WithDefaultCallOptions(ForceCodec(_)) instead.  Will be +// supported throughout 1.x. +func WithCodec(c Codec) DialOption { +	return WithDefaultCallOptions(CallCustomCodec(c)) +} + +// WithCompressor returns a DialOption which sets a Compressor to use for +// message compression. It has lower priority than the compressor set by the +// UseCompressor CallOption. +// +// Deprecated: use UseCompressor instead.  Will be supported throughout 1.x. +func WithCompressor(cp Compressor) DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.cp = cp +	}) +} + +// WithDecompressor returns a DialOption which sets a Decompressor to use for +// incoming message decompression.  If incoming response messages are encoded +// using the decompressor's Type(), it will be used.  Otherwise, the message +// encoding will be used to look up the compressor registered via +// encoding.RegisterCompressor, which will then be used to decompress the +// message.  If no compressor is registered for the encoding, an Unimplemented +// status error will be returned. +// +// Deprecated: use encoding.RegisterCompressor instead.  Will be supported +// throughout 1.x. +func WithDecompressor(dc Decompressor) DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.dc = dc +	}) +} + +// WithServiceConfig returns a DialOption which has a channel to read the +// service configuration. +// +// Deprecated: service config should be received through name resolver or via +// WithDefaultServiceConfig, as specified at +// https://github.com/grpc/grpc/blob/master/doc/service_config.md.  Will be +// removed in a future 1.x release. +func WithServiceConfig(c <-chan ServiceConfig) DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.scChan = c +	}) +} + +// WithConnectParams configures the ClientConn to use the provided ConnectParams +// for creating and maintaining connections to servers. +// +// The backoff configuration specified as part of the ConnectParams overrides +// all defaults specified in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider +// using the backoff.DefaultConfig as a base, in cases where you want to +// override only a subset of the backoff configuration. +func WithConnectParams(p ConnectParams) DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.bs = internalbackoff.Exponential{Config: p.Backoff} +		o.minConnectTimeout = func() time.Duration { +			return p.MinConnectTimeout +		} +	}) +} + +// WithBackoffMaxDelay configures the dialer to use the provided maximum delay +// when backing off after failed connection attempts. +// +// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x. +func WithBackoffMaxDelay(md time.Duration) DialOption { +	return WithBackoffConfig(BackoffConfig{MaxDelay: md}) +} + +// WithBackoffConfig configures the dialer to use the provided backoff +// parameters after connection failures. +// +// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x. +func WithBackoffConfig(b BackoffConfig) DialOption { +	bc := backoff.DefaultConfig +	bc.MaxDelay = b.MaxDelay +	return withBackoff(internalbackoff.Exponential{Config: bc}) +} + +// withBackoff sets the backoff strategy used for connectRetryNum after a failed +// connection attempt. +// +// This can be exported if arbitrary backoff strategies are allowed by gRPC. +func withBackoff(bs internalbackoff.Strategy) DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.bs = bs +	}) +} + +// WithBlock returns a DialOption which makes callers of Dial block until the +// underlying connection is up. Without this, Dial returns immediately and +// connecting the server happens in background. +func WithBlock() DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.block = true +	}) +} + +// WithReturnConnectionError returns a DialOption which makes the client connection +// return a string containing both the last connection error that occurred and +// the context.DeadlineExceeded error. +// Implies WithBlock() +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithReturnConnectionError() DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.block = true +		o.returnLastError = true +	}) +} + +// WithInsecure returns a DialOption which disables transport security for this +// ClientConn. Under the hood, it uses insecure.NewCredentials(). +// +// Note that using this DialOption with per-RPC credentials (through +// WithCredentialsBundle or WithPerRPCCredentials) which require transport +// security is incompatible and will cause grpc.Dial() to fail. +// +// Deprecated: use WithTransportCredentials and insecure.NewCredentials() +// instead. Will be supported throughout 1.x. +func WithInsecure() DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.copts.TransportCredentials = insecure.NewCredentials() +	}) +} + +// WithNoProxy returns a DialOption which disables the use of proxies for this +// ClientConn. This is ignored if WithDialer or WithContextDialer are used. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithNoProxy() DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.copts.UseProxy = false +	}) +} + +// WithTransportCredentials returns a DialOption which configures a connection +// level security credentials (e.g., TLS/SSL). This should not be used together +// with WithCredentialsBundle. +func WithTransportCredentials(creds credentials.TransportCredentials) DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.copts.TransportCredentials = creds +	}) +} + +// WithPerRPCCredentials returns a DialOption which sets credentials and places +// auth state on each outbound RPC. +func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) +	}) +} + +// WithCredentialsBundle returns a DialOption to set a credentials bundle for +// the ClientConn.WithCreds. This should not be used together with +// WithTransportCredentials. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithCredentialsBundle(b credentials.Bundle) DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.copts.CredsBundle = b +	}) +} + +// WithTimeout returns a DialOption that configures a timeout for dialing a +// ClientConn initially. This is valid if and only if WithBlock() is present. +// +// Deprecated: use DialContext instead of Dial and context.WithTimeout +// instead.  Will be supported throughout 1.x. +func WithTimeout(d time.Duration) DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.timeout = d +	}) +} + +// WithContextDialer returns a DialOption that sets a dialer to create +// connections. If FailOnNonTempDialError() is set to true, and an error is +// returned by f, gRPC checks the error's Temporary() method to decide if it +// should try to reconnect to the network address. +func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.copts.Dialer = f +	}) +} + +func init() { +	internal.WithHealthCheckFunc = withHealthCheckFunc +} + +// WithDialer returns a DialOption that specifies a function to use for dialing +// network addresses. If FailOnNonTempDialError() is set to true, and an error +// is returned by f, gRPC checks the error's Temporary() method to decide if it +// should try to reconnect to the network address. +// +// Deprecated: use WithContextDialer instead.  Will be supported throughout +// 1.x. +func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { +	return WithContextDialer( +		func(ctx context.Context, addr string) (net.Conn, error) { +			if deadline, ok := ctx.Deadline(); ok { +				return f(addr, time.Until(deadline)) +			} +			return f(addr, 0) +		}) +} + +// WithStatsHandler returns a DialOption that specifies the stats handler for +// all the RPCs and underlying network connections in this ClientConn. +func WithStatsHandler(h stats.Handler) DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		if h == nil { +			logger.Error("ignoring nil parameter in grpc.WithStatsHandler ClientOption") +			// Do not allow a nil stats handler, which would otherwise cause +			// panics. +			return +		} +		o.copts.StatsHandlers = append(o.copts.StatsHandlers, h) +	}) +} + +// withBinaryLogger returns a DialOption that specifies the binary logger for +// this ClientConn. +func withBinaryLogger(bl binarylog.Logger) DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.binaryLogger = bl +	}) +} + +// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on +// non-temporary dial errors. If f is true, and dialer returns a non-temporary +// error, gRPC will fail the connection to the network address and won't try to +// reconnect. The default value of FailOnNonTempDialError is false. +// +// FailOnNonTempDialError only affects the initial dial, and does not do +// anything useful unless you are also using WithBlock(). +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func FailOnNonTempDialError(f bool) DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.copts.FailOnNonTempDialError = f +	}) +} + +// WithUserAgent returns a DialOption that specifies a user agent string for all +// the RPCs. +func WithUserAgent(s string) DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.copts.UserAgent = s +	}) +} + +// WithKeepaliveParams returns a DialOption that specifies keepalive parameters +// for the client transport. +func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { +	if kp.Time < internal.KeepaliveMinPingTime { +		logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) +		kp.Time = internal.KeepaliveMinPingTime +	} +	return newFuncDialOption(func(o *dialOptions) { +		o.copts.KeepaliveParams = kp +	}) +} + +// WithUnaryInterceptor returns a DialOption that specifies the interceptor for +// unary RPCs. +func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.unaryInt = f +	}) +} + +// WithChainUnaryInterceptor returns a DialOption that specifies the chained +// interceptor for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All interceptors added by this method will be chained, and the interceptor +// defined by WithUnaryInterceptor will always be prepended to the chain. +func WithChainUnaryInterceptor(interceptors ...UnaryClientInterceptor) DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) +	}) +} + +// WithStreamInterceptor returns a DialOption that specifies the interceptor for +// streaming RPCs. +func WithStreamInterceptor(f StreamClientInterceptor) DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.streamInt = f +	}) +} + +// WithChainStreamInterceptor returns a DialOption that specifies the chained +// interceptor for streaming RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All interceptors added by this method will be chained, and the interceptor +// defined by WithStreamInterceptor will always be prepended to the chain. +func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.chainStreamInts = append(o.chainStreamInts, interceptors...) +	}) +} + +// WithAuthority returns a DialOption that specifies the value to be used as the +// :authority pseudo-header and as the server name in authentication handshake. +func WithAuthority(a string) DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.authority = a +	}) +} + +// WithChannelzParentID returns a DialOption that specifies the channelz ID of +// current ClientConn's parent. This function is used in nested channel creation +// (e.g. grpclb dial). +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithChannelzParentID(id *channelz.Identifier) DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.channelzParentID = id +	}) +} + +// WithDisableServiceConfig returns a DialOption that causes gRPC to ignore any +// service config provided by the resolver and provides a hint to the resolver +// to not fetch service configs. +// +// Note that this dial option only disables service config from resolver. If +// default service config is provided, gRPC will use the default service config. +func WithDisableServiceConfig() DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.disableServiceConfig = true +	}) +} + +// WithDefaultServiceConfig returns a DialOption that configures the default +// service config, which will be used in cases where: +// +// 1. WithDisableServiceConfig is also used, or +// +// 2. The name resolver does not provide a service config or provides an +// invalid service config. +// +// The parameter s is the JSON representation of the default service config. +// For more information about service configs, see: +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +// For a simple example of usage, see: +// examples/features/load_balancing/client/main.go +func WithDefaultServiceConfig(s string) DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.defaultServiceConfigRawJSON = &s +	}) +} + +// WithDisableRetry returns a DialOption that disables retries, even if the +// service config enables them.  This does not impact transparent retries, which +// will happen automatically if no data is written to the wire or if the RPC is +// unprocessed by the remote server. +func WithDisableRetry() DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.disableRetry = true +	}) +} + +// WithMaxHeaderListSize returns a DialOption that specifies the maximum +// (uncompressed) size of header list that the client is prepared to accept. +func WithMaxHeaderListSize(s uint32) DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.copts.MaxHeaderListSize = &s +	}) +} + +// WithDisableHealthCheck disables the LB channel health checking for all +// SubConns of this ClientConn. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithDisableHealthCheck() DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.disableHealthCheck = true +	}) +} + +// withHealthCheckFunc replaces the default health check function with the +// provided one. It makes tests easier to change the health check function. +// +// For testing purpose only. +func withHealthCheckFunc(f internal.HealthChecker) DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.healthCheckFunc = f +	}) +} + +func defaultDialOptions() dialOptions { +	return dialOptions{ +		healthCheckFunc: internal.HealthCheckFunc, +		copts: transport.ConnectOptions{ +			WriteBufferSize: defaultWriteBufSize, +			ReadBufferSize:  defaultReadBufSize, +			UseProxy:        true, +		}, +	} +} + +// withGetMinConnectDeadline specifies the function that clientconn uses to +// get minConnectDeadline. This can be used to make connection attempts happen +// faster/slower. +// +// For testing purpose only. +func withMinConnectDeadline(f func() time.Duration) DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.minConnectTimeout = f +	}) +} + +// WithResolvers allows a list of resolver implementations to be registered +// locally with the ClientConn without needing to be globally registered via +// resolver.Register.  They will be matched against the scheme used for the +// current Dial only, and will take precedence over the global registry. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithResolvers(rs ...resolver.Builder) DialOption { +	return newFuncDialOption(func(o *dialOptions) { +		o.resolvers = append(o.resolvers, rs...) +	}) +} diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go new file mode 100644 index 000000000..0022859ad --- /dev/null +++ b/vendor/google.golang.org/grpc/doc.go @@ -0,0 +1,26 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate ./regenerate.sh + +/* +Package grpc implements an RPC system called gRPC. + +See grpc.io for more information about gRPC. +*/ +package grpc // import "google.golang.org/grpc" diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go new file mode 100644 index 000000000..07a586135 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -0,0 +1,135 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package encoding defines the interface for the compressor and codec, and +// functions to register and retrieve compressors and codecs. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package encoding + +import ( +	"io" +	"strings" + +	"google.golang.org/grpc/internal/grpcutil" +) + +// Identity specifies the optional encoding for uncompressed streams. +// It is intended for grpc internal use only. +const Identity = "identity" + +// Compressor is used for compressing and decompressing when sending or +// receiving messages. +type Compressor interface { +	// Compress writes the data written to wc to w after compressing it.  If an +	// error occurs while initializing the compressor, that error is returned +	// instead. +	Compress(w io.Writer) (io.WriteCloser, error) +	// Decompress reads data from r, decompresses it, and provides the +	// uncompressed data via the returned io.Reader.  If an error occurs while +	// initializing the decompressor, that error is returned instead. +	Decompress(r io.Reader) (io.Reader, error) +	// Name is the name of the compression codec and is used to set the content +	// coding header.  The result must be static; the result cannot change +	// between calls. +	Name() string +	// If a Compressor implements +	// DecompressedSize(compressedBytes []byte) int, gRPC will call it +	// to determine the size of the buffer allocated for the result of decompression. +	// Return -1 to indicate unknown size. +	// +	// Experimental +	// +	// Notice: This API is EXPERIMENTAL and may be changed or removed in a +	// later release. +} + +var registeredCompressor = make(map[string]Compressor) + +// RegisterCompressor registers the compressor with gRPC by its name.  It can +// be activated when sending an RPC via grpc.UseCompressor().  It will be +// automatically accessed when receiving a message based on the content coding +// header.  Servers also use it to send a response with the same encoding as +// the request. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe.  If multiple Compressors are +// registered with the same name, the one registered last will take effect. +func RegisterCompressor(c Compressor) { +	registeredCompressor[c.Name()] = c +	if !grpcutil.IsCompressorNameRegistered(c.Name()) { +		grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name()) +	} +} + +// GetCompressor returns Compressor for the given compressor name. +func GetCompressor(name string) Compressor { +	return registeredCompressor[name] +} + +// Codec defines the interface gRPC uses to encode and decode messages.  Note +// that implementations of this interface must be thread safe; a Codec's +// methods can be called from concurrent goroutines. +type Codec interface { +	// Marshal returns the wire format of v. +	Marshal(v interface{}) ([]byte, error) +	// Unmarshal parses the wire format into v. +	Unmarshal(data []byte, v interface{}) error +	// Name returns the name of the Codec implementation. The returned string +	// will be used as part of content type in transmission.  The result must be +	// static; the result cannot change between calls. +	Name() string +} + +var registeredCodecs = make(map[string]Codec) + +// RegisterCodec registers the provided Codec for use with all gRPC clients and +// servers. +// +// The Codec will be stored and looked up by result of its Name() method, which +// should match the content-subtype of the encoding handled by the Codec.  This +// is case-insensitive, and is stored and looked up as lowercase.  If the +// result of calling Name() is an empty string, RegisterCodec will panic. See +// Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe.  If multiple Codecs are +// registered with the same name, the one registered last will take effect. +func RegisterCodec(codec Codec) { +	if codec == nil { +		panic("cannot register a nil Codec") +	} +	if codec.Name() == "" { +		panic("cannot register Codec with empty string result for Name()") +	} +	contentSubtype := strings.ToLower(codec.Name()) +	registeredCodecs[contentSubtype] = codec +} + +// GetCodec gets a registered Codec by content-subtype, or nil if no Codec is +// registered for the content-subtype. +// +// The content-subtype is expected to be lowercase. +func GetCodec(contentSubtype string) Codec { +	return registeredCodecs[contentSubtype] +} diff --git a/vendor/google.golang.org/grpc/encoding/gzip/gzip.go b/vendor/google.golang.org/grpc/encoding/gzip/gzip.go new file mode 100644 index 000000000..a3bb173c2 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/gzip/gzip.go @@ -0,0 +1,132 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package gzip implements and registers the gzip compressor +// during the initialization. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package gzip + +import ( +	"compress/gzip" +	"encoding/binary" +	"fmt" +	"io" +	"sync" + +	"google.golang.org/grpc/encoding" +) + +// Name is the name registered for the gzip compressor. +const Name = "gzip" + +func init() { +	c := &compressor{} +	c.poolCompressor.New = func() interface{} { +		return &writer{Writer: gzip.NewWriter(io.Discard), pool: &c.poolCompressor} +	} +	encoding.RegisterCompressor(c) +} + +type writer struct { +	*gzip.Writer +	pool *sync.Pool +} + +// SetLevel updates the registered gzip compressor to use the compression level specified (gzip.HuffmanOnly is not supported). +// NOTE: this function must only be called during initialization time (i.e. in an init() function), +// and is not thread-safe. +// +// The error returned will be nil if the specified level is valid. +func SetLevel(level int) error { +	if level < gzip.DefaultCompression || level > gzip.BestCompression { +		return fmt.Errorf("grpc: invalid gzip compression level: %d", level) +	} +	c := encoding.GetCompressor(Name).(*compressor) +	c.poolCompressor.New = func() interface{} { +		w, err := gzip.NewWriterLevel(io.Discard, level) +		if err != nil { +			panic(err) +		} +		return &writer{Writer: w, pool: &c.poolCompressor} +	} +	return nil +} + +func (c *compressor) Compress(w io.Writer) (io.WriteCloser, error) { +	z := c.poolCompressor.Get().(*writer) +	z.Writer.Reset(w) +	return z, nil +} + +func (z *writer) Close() error { +	defer z.pool.Put(z) +	return z.Writer.Close() +} + +type reader struct { +	*gzip.Reader +	pool *sync.Pool +} + +func (c *compressor) Decompress(r io.Reader) (io.Reader, error) { +	z, inPool := c.poolDecompressor.Get().(*reader) +	if !inPool { +		newZ, err := gzip.NewReader(r) +		if err != nil { +			return nil, err +		} +		return &reader{Reader: newZ, pool: &c.poolDecompressor}, nil +	} +	if err := z.Reset(r); err != nil { +		c.poolDecompressor.Put(z) +		return nil, err +	} +	return z, nil +} + +func (z *reader) Read(p []byte) (n int, err error) { +	n, err = z.Reader.Read(p) +	if err == io.EOF { +		z.pool.Put(z) +	} +	return n, err +} + +// RFC1952 specifies that the last four bytes "contains the size of +// the original (uncompressed) input data modulo 2^32." +// gRPC has a max message size of 2GB so we don't need to worry about wraparound. +func (c *compressor) DecompressedSize(buf []byte) int { +	last := len(buf) +	if last < 4 { +		return -1 +	} +	return int(binary.LittleEndian.Uint32(buf[last-4 : last])) +} + +func (c *compressor) Name() string { +	return Name +} + +type compressor struct { +	poolCompressor   sync.Pool +	poolDecompressor sync.Pool +} diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go new file mode 100644 index 000000000..3009b35af --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -0,0 +1,58 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package proto defines the protobuf codec. Importing this package will +// register the codec. +package proto + +import ( +	"fmt" + +	"github.com/golang/protobuf/proto" +	"google.golang.org/grpc/encoding" +) + +// Name is the name registered for the proto compressor. +const Name = "proto" + +func init() { +	encoding.RegisterCodec(codec{}) +} + +// codec is a Codec implementation with protobuf. It is the default codec for gRPC. +type codec struct{} + +func (codec) Marshal(v interface{}) ([]byte, error) { +	vv, ok := v.(proto.Message) +	if !ok { +		return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) +	} +	return proto.Marshal(vv) +} + +func (codec) Unmarshal(data []byte, v interface{}) error { +	vv, ok := v.(proto.Message) +	if !ok { +		return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) +	} +	return proto.Unmarshal(data, vv) +} + +func (codec) Name() string { +	return Name +} diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go new file mode 100644 index 000000000..8358dd6e2 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/component.go @@ -0,0 +1,117 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( +	"fmt" + +	"google.golang.org/grpc/internal/grpclog" +) + +// componentData records the settings for a component. +type componentData struct { +	name string +} + +var cache = map[string]*componentData{} + +func (c *componentData) InfoDepth(depth int, args ...interface{}) { +	args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +	grpclog.InfoDepth(depth+1, args...) +} + +func (c *componentData) WarningDepth(depth int, args ...interface{}) { +	args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +	grpclog.WarningDepth(depth+1, args...) +} + +func (c *componentData) ErrorDepth(depth int, args ...interface{}) { +	args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +	grpclog.ErrorDepth(depth+1, args...) +} + +func (c *componentData) FatalDepth(depth int, args ...interface{}) { +	args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +	grpclog.FatalDepth(depth+1, args...) +} + +func (c *componentData) Info(args ...interface{}) { +	c.InfoDepth(1, args...) +} + +func (c *componentData) Warning(args ...interface{}) { +	c.WarningDepth(1, args...) +} + +func (c *componentData) Error(args ...interface{}) { +	c.ErrorDepth(1, args...) +} + +func (c *componentData) Fatal(args ...interface{}) { +	c.FatalDepth(1, args...) +} + +func (c *componentData) Infof(format string, args ...interface{}) { +	c.InfoDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Warningf(format string, args ...interface{}) { +	c.WarningDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Errorf(format string, args ...interface{}) { +	c.ErrorDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Fatalf(format string, args ...interface{}) { +	c.FatalDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Infoln(args ...interface{}) { +	c.InfoDepth(1, args...) +} + +func (c *componentData) Warningln(args ...interface{}) { +	c.WarningDepth(1, args...) +} + +func (c *componentData) Errorln(args ...interface{}) { +	c.ErrorDepth(1, args...) +} + +func (c *componentData) Fatalln(args ...interface{}) { +	c.FatalDepth(1, args...) +} + +func (c *componentData) V(l int) bool { +	return V(l) +} + +// Component creates a new component and returns it for logging. If a component +// with the name already exists, nothing will be created and it will be +// returned. SetLoggerV2 will panic if it is called with a logger created by +// Component. +func Component(componentName string) DepthLoggerV2 { +	if cData, ok := cache[componentName]; ok { +		return cData +	} +	c := &componentData{componentName} +	cache[componentName] = c +	return c +} diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go new file mode 100644 index 000000000..c8bb2be34 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -0,0 +1,132 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog defines logging for grpc. +// +// All logs in transport and grpclb packages only go to verbose level 2. +// All logs in other packages in grpc are logged in spite of the verbosity level. +// +// In the default logger, +// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, +// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog // import "google.golang.org/grpc/grpclog" + +import ( +	"os" + +	"google.golang.org/grpc/internal/grpclog" +) + +func init() { +	SetLoggerV2(newLoggerV2()) +} + +// V reports whether verbosity level l is at least the requested verbose level. +func V(l int) bool { +	return grpclog.Logger.V(l) +} + +// Info logs to the INFO log. +func Info(args ...interface{}) { +	grpclog.Logger.Info(args...) +} + +// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. +func Infof(format string, args ...interface{}) { +	grpclog.Logger.Infof(format, args...) +} + +// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. +func Infoln(args ...interface{}) { +	grpclog.Logger.Infoln(args...) +} + +// Warning logs to the WARNING log. +func Warning(args ...interface{}) { +	grpclog.Logger.Warning(args...) +} + +// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. +func Warningf(format string, args ...interface{}) { +	grpclog.Logger.Warningf(format, args...) +} + +// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. +func Warningln(args ...interface{}) { +	grpclog.Logger.Warningln(args...) +} + +// Error logs to the ERROR log. +func Error(args ...interface{}) { +	grpclog.Logger.Error(args...) +} + +// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. +func Errorf(format string, args ...interface{}) { +	grpclog.Logger.Errorf(format, args...) +} + +// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. +func Errorln(args ...interface{}) { +	grpclog.Logger.Errorln(args...) +} + +// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. +// It calls os.Exit() with exit code 1. +func Fatal(args ...interface{}) { +	grpclog.Logger.Fatal(args...) +	// Make sure fatal logs will exit. +	os.Exit(1) +} + +// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. +// It calls os.Exit() with exit code 1. +func Fatalf(format string, args ...interface{}) { +	grpclog.Logger.Fatalf(format, args...) +	// Make sure fatal logs will exit. +	os.Exit(1) +} + +// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. +// It calle os.Exit()) with exit code 1. +func Fatalln(args ...interface{}) { +	grpclog.Logger.Fatalln(args...) +	// Make sure fatal logs will exit. +	os.Exit(1) +} + +// Print prints to the logger. Arguments are handled in the manner of fmt.Print. +// +// Deprecated: use Info. +func Print(args ...interface{}) { +	grpclog.Logger.Info(args...) +} + +// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. +// +// Deprecated: use Infof. +func Printf(format string, args ...interface{}) { +	grpclog.Logger.Infof(format, args...) +} + +// Println prints to the logger. Arguments are handled in the manner of fmt.Println. +// +// Deprecated: use Infoln. +func Println(args ...interface{}) { +	grpclog.Logger.Infoln(args...) +} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go new file mode 100644 index 000000000..ef06a4822 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -0,0 +1,87 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import "google.golang.org/grpc/internal/grpclog" + +// Logger mimics golang's standard Logger as an interface. +// +// Deprecated: use LoggerV2. +type Logger interface { +	Fatal(args ...interface{}) +	Fatalf(format string, args ...interface{}) +	Fatalln(args ...interface{}) +	Print(args ...interface{}) +	Printf(format string, args ...interface{}) +	Println(args ...interface{}) +} + +// SetLogger sets the logger that is used in grpc. Call only from +// init() functions. +// +// Deprecated: use SetLoggerV2. +func SetLogger(l Logger) { +	grpclog.Logger = &loggerWrapper{Logger: l} +} + +// loggerWrapper wraps Logger into a LoggerV2. +type loggerWrapper struct { +	Logger +} + +func (g *loggerWrapper) Info(args ...interface{}) { +	g.Logger.Print(args...) +} + +func (g *loggerWrapper) Infoln(args ...interface{}) { +	g.Logger.Println(args...) +} + +func (g *loggerWrapper) Infof(format string, args ...interface{}) { +	g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Warning(args ...interface{}) { +	g.Logger.Print(args...) +} + +func (g *loggerWrapper) Warningln(args ...interface{}) { +	g.Logger.Println(args...) +} + +func (g *loggerWrapper) Warningf(format string, args ...interface{}) { +	g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Error(args ...interface{}) { +	g.Logger.Print(args...) +} + +func (g *loggerWrapper) Errorln(args ...interface{}) { +	g.Logger.Println(args...) +} + +func (g *loggerWrapper) Errorf(format string, args ...interface{}) { +	g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) V(l int) bool { +	// Returns true for all verbose level. +	return true +} diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go new file mode 100644 index 000000000..5de66e40d --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -0,0 +1,258 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( +	"encoding/json" +	"fmt" +	"io" +	"log" +	"os" +	"strconv" +	"strings" + +	"google.golang.org/grpc/internal/grpclog" +) + +// LoggerV2 does underlying logging work for grpclog. +type LoggerV2 interface { +	// Info logs to INFO log. Arguments are handled in the manner of fmt.Print. +	Info(args ...interface{}) +	// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. +	Infoln(args ...interface{}) +	// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. +	Infof(format string, args ...interface{}) +	// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. +	Warning(args ...interface{}) +	// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. +	Warningln(args ...interface{}) +	// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. +	Warningf(format string, args ...interface{}) +	// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. +	Error(args ...interface{}) +	// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. +	Errorln(args ...interface{}) +	// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. +	Errorf(format string, args ...interface{}) +	// Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. +	// gRPC ensures that all Fatal logs will exit with os.Exit(1). +	// Implementations may also call os.Exit() with a non-zero exit code. +	Fatal(args ...interface{}) +	// Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. +	// gRPC ensures that all Fatal logs will exit with os.Exit(1). +	// Implementations may also call os.Exit() with a non-zero exit code. +	Fatalln(args ...interface{}) +	// Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. +	// gRPC ensures that all Fatal logs will exit with os.Exit(1). +	// Implementations may also call os.Exit() with a non-zero exit code. +	Fatalf(format string, args ...interface{}) +	// V reports whether verbosity level l is at least the requested verbose level. +	V(l int) bool +} + +// SetLoggerV2 sets logger that is used in grpc to a V2 logger. +// Not mutex-protected, should be called before any gRPC functions. +func SetLoggerV2(l LoggerV2) { +	if _, ok := l.(*componentData); ok { +		panic("cannot use component logger as grpclog logger") +	} +	grpclog.Logger = l +	grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2) +} + +const ( +	// infoLog indicates Info severity. +	infoLog int = iota +	// warningLog indicates Warning severity. +	warningLog +	// errorLog indicates Error severity. +	errorLog +	// fatalLog indicates Fatal severity. +	fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ +	infoLog:    "INFO", +	warningLog: "WARNING", +	errorLog:   "ERROR", +	fatalLog:   "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { +	m          []*log.Logger +	v          int +	jsonFormat bool +} + +// NewLoggerV2 creates a loggerV2 with the provided writers. +// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1). +// Error logs will be written to errorW, warningW and infoW. +// Warning logs will be written to warningW and infoW. +// Info logs will be written to infoW. +func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { +	return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{}) +} + +// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and +// verbosity level. +func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { +	return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v}) +} + +type loggerV2Config struct { +	verbose    int +	jsonFormat bool +} + +func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 { +	var m []*log.Logger +	flag := log.LstdFlags +	if c.jsonFormat { +		flag = 0 +	} +	m = append(m, log.New(infoW, "", flag)) +	m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) +	ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. +	m = append(m, log.New(ew, "", flag)) +	m = append(m, log.New(ew, "", flag)) +	return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat} +} + +// newLoggerV2 creates a loggerV2 to be used as default logger. +// All logs are written to stderr. +func newLoggerV2() LoggerV2 { +	errorW := io.Discard +	warningW := io.Discard +	infoW := io.Discard + +	logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") +	switch logLevel { +	case "", "ERROR", "error": // If env is unset, set level to ERROR. +		errorW = os.Stderr +	case "WARNING", "warning": +		warningW = os.Stderr +	case "INFO", "info": +		infoW = os.Stderr +	} + +	var v int +	vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL") +	if vl, err := strconv.Atoi(vLevel); err == nil { +		v = vl +	} + +	jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json") + +	return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{ +		verbose:    v, +		jsonFormat: jsonFormat, +	}) +} + +func (g *loggerT) output(severity int, s string) { +	sevStr := severityName[severity] +	if !g.jsonFormat { +		g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) +		return +	} +	// TODO: we can also include the logging component, but that needs more +	// (API) changes. +	b, _ := json.Marshal(map[string]string{ +		"severity": sevStr, +		"message":  s, +	}) +	g.m[severity].Output(2, string(b)) +} + +func (g *loggerT) Info(args ...interface{}) { +	g.output(infoLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Infoln(args ...interface{}) { +	g.output(infoLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Infof(format string, args ...interface{}) { +	g.output(infoLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Warning(args ...interface{}) { +	g.output(warningLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Warningln(args ...interface{}) { +	g.output(warningLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Warningf(format string, args ...interface{}) { +	g.output(warningLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Error(args ...interface{}) { +	g.output(errorLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Errorln(args ...interface{}) { +	g.output(errorLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Errorf(format string, args ...interface{}) { +	g.output(errorLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Fatal(args ...interface{}) { +	g.output(fatalLog, fmt.Sprint(args...)) +	os.Exit(1) +} + +func (g *loggerT) Fatalln(args ...interface{}) { +	g.output(fatalLog, fmt.Sprintln(args...)) +	os.Exit(1) +} + +func (g *loggerT) Fatalf(format string, args ...interface{}) { +	g.output(fatalLog, fmt.Sprintf(format, args...)) +	os.Exit(1) +} + +func (g *loggerT) V(l int) bool { +	return l <= g.v +} + +// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements +// DepthLoggerV2, the below functions will be called with the appropriate stack +// depth set for trivial functions the logger may ignore. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type DepthLoggerV2 interface { +	LoggerV2 +	// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. +	InfoDepth(depth int, args ...interface{}) +	// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. +	WarningDepth(depth int, args ...interface{}) +	// ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. +	ErrorDepth(depth int, args ...interface{}) +	// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. +	FatalDepth(depth int, args ...interface{}) +} diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go new file mode 100644 index 000000000..bb96ef57b --- /dev/null +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -0,0 +1,104 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( +	"context" +) + +// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. +type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error + +// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. +// Unary interceptors can be specified as a DialOption, using +// WithUnaryInterceptor() or WithChainUnaryInterceptor(), when creating a +// ClientConn. When a unary interceptor(s) is set on a ClientConn, gRPC +// delegates all unary RPC invocations to the interceptor, and it is the +// responsibility of the interceptor to call invoker to complete the processing +// of the RPC. +// +// method is the RPC name. req and reply are the corresponding request and +// response messages. cc is the ClientConn on which the RPC was invoked. invoker +// is the handler to complete the RPC and it is the responsibility of the +// interceptor to call it. opts contain all applicable call options, including +// defaults from the ClientConn as well as per-call options. +// +// The returned error must be compatible with the status package. +type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error + +// Streamer is called by StreamClientInterceptor to create a ClientStream. +type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) + +// StreamClientInterceptor intercepts the creation of a ClientStream. Stream +// interceptors can be specified as a DialOption, using WithStreamInterceptor() +// or WithChainStreamInterceptor(), when creating a ClientConn. When a stream +// interceptor(s) is set on the ClientConn, gRPC delegates all stream creations +// to the interceptor, and it is the responsibility of the interceptor to call +// streamer. +// +// desc contains a description of the stream. cc is the ClientConn on which the +// RPC was invoked. streamer is the handler to create a ClientStream and it is +// the responsibility of the interceptor to call it. opts contain all applicable +// call options, including defaults from the ClientConn as well as per-call +// options. +// +// StreamClientInterceptor may return a custom ClientStream to intercept all I/O +// operations. The returned error must be compatible with the status package. +type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) + +// UnaryServerInfo consists of various information about a unary RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type UnaryServerInfo struct { +	// Server is the service implementation the user provides. This is read-only. +	Server interface{} +	// FullMethod is the full RPC method string, i.e., /package.service/method. +	FullMethod string +} + +// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal +// execution of a unary RPC. +// +// If a UnaryHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. +type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) + +// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info +// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper +// of the service method implementation. It is the responsibility of the interceptor to invoke handler +// to complete the RPC. +type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) + +// StreamServerInfo consists of various information about a streaming RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type StreamServerInfo struct { +	// FullMethod is the full RPC method string, i.e., /package.service/method. +	FullMethod string +	// IsClientStream indicates whether the RPC is a client streaming RPC. +	IsClientStream bool +	// IsServerStream indicates whether the RPC is a server streaming RPC. +	IsServerStream bool +} + +// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server. +// info contains all the information of this RPC the interceptor can operate on. And handler is the +// service method implementation. It is the responsibility of the interceptor to invoke handler to +// complete the RPC. +type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go new file mode 100644 index 000000000..5fc0ee3da --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -0,0 +1,73 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package backoff implement the backoff strategy for gRPC. +// +// This is kept in internal until the gRPC project decides whether or not to +// allow alternative backoff strategies. +package backoff + +import ( +	"time" + +	grpcbackoff "google.golang.org/grpc/backoff" +	"google.golang.org/grpc/internal/grpcrand" +) + +// Strategy defines the methodology for backing off after a grpc connection +// failure. +type Strategy interface { +	// Backoff returns the amount of time to wait before the next retry given +	// the number of consecutive failures. +	Backoff(retries int) time.Duration +} + +// DefaultExponential is an exponential backoff implementation using the +// default values for all the configurable knobs defined in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +var DefaultExponential = Exponential{Config: grpcbackoff.DefaultConfig} + +// Exponential implements exponential backoff algorithm as defined in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +type Exponential struct { +	// Config contains all options to configure the backoff algorithm. +	Config grpcbackoff.Config +} + +// Backoff returns the amount of time to wait before the next retry given the +// number of retries. +func (bc Exponential) Backoff(retries int) time.Duration { +	if retries == 0 { +		return bc.Config.BaseDelay +	} +	backoff, max := float64(bc.Config.BaseDelay), float64(bc.Config.MaxDelay) +	for backoff < max && retries > 0 { +		backoff *= bc.Config.Multiplier +		retries-- +	} +	if backoff > max { +		backoff = max +	} +	// Randomize backoff delays so that if a cluster of requests start at +	// the same time, they won't operate in lockstep. +	backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1) +	if backoff < 0 { +		return 0 +	} +	return time.Duration(backoff) +} diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go new file mode 100644 index 000000000..08666f62a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -0,0 +1,384 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package gracefulswitch implements a graceful switch load balancer. +package gracefulswitch + +import ( +	"errors" +	"fmt" +	"sync" + +	"google.golang.org/grpc/balancer" +	"google.golang.org/grpc/balancer/base" +	"google.golang.org/grpc/connectivity" +	"google.golang.org/grpc/resolver" +) + +var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed") +var _ balancer.Balancer = (*Balancer)(nil) + +// NewBalancer returns a graceful switch Balancer. +func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer { +	return &Balancer{ +		cc:    cc, +		bOpts: opts, +	} +} + +// Balancer is a utility to gracefully switch from one balancer to +// a new balancer. It implements the balancer.Balancer interface. +type Balancer struct { +	bOpts balancer.BuildOptions +	cc    balancer.ClientConn + +	// mu protects the following fields and all fields within balancerCurrent +	// and balancerPending. mu does not need to be held when calling into the +	// child balancers, as all calls into these children happen only as a direct +	// result of a call into the gracefulSwitchBalancer, which are also +	// guaranteed to be synchronous. There is one exception: an UpdateState call +	// from a child balancer when current and pending are populated can lead to +	// calling Close() on the current. To prevent that racing with an +	// UpdateSubConnState from the channel, we hold currentMu during Close and +	// UpdateSubConnState calls. +	mu              sync.Mutex +	balancerCurrent *balancerWrapper +	balancerPending *balancerWrapper +	closed          bool // set to true when this balancer is closed + +	// currentMu must be locked before mu. This mutex guards against this +	// sequence of events: UpdateSubConnState() called, finds the +	// balancerCurrent, gives up lock, updateState comes in, causes Close() on +	// balancerCurrent before the UpdateSubConnState is called on the +	// balancerCurrent. +	currentMu sync.Mutex +} + +// swap swaps out the current lb with the pending lb and updates the ClientConn. +// The caller must hold gsb.mu. +func (gsb *Balancer) swap() { +	gsb.cc.UpdateState(gsb.balancerPending.lastState) +	cur := gsb.balancerCurrent +	gsb.balancerCurrent = gsb.balancerPending +	gsb.balancerPending = nil +	go func() { +		gsb.currentMu.Lock() +		defer gsb.currentMu.Unlock() +		cur.Close() +	}() +} + +// Helper function that checks if the balancer passed in is current or pending. +// The caller must hold gsb.mu. +func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool { +	return bw == gsb.balancerCurrent || bw == gsb.balancerPending +} + +// SwitchTo initializes the graceful switch process, which completes based on +// connectivity state changes on the current/pending balancer. Thus, the switch +// process is not complete when this method returns. This method must be called +// synchronously alongside the rest of the balancer.Balancer methods this +// Graceful Switch Balancer implements. +func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { +	gsb.mu.Lock() +	if gsb.closed { +		gsb.mu.Unlock() +		return errBalancerClosed +	} +	bw := &balancerWrapper{ +		gsb: gsb, +		lastState: balancer.State{ +			ConnectivityState: connectivity.Connecting, +			Picker:            base.NewErrPicker(balancer.ErrNoSubConnAvailable), +		}, +		subconns: make(map[balancer.SubConn]bool), +	} +	balToClose := gsb.balancerPending // nil if there is no pending balancer +	if gsb.balancerCurrent == nil { +		gsb.balancerCurrent = bw +	} else { +		gsb.balancerPending = bw +	} +	gsb.mu.Unlock() +	balToClose.Close() +	// This function takes a builder instead of a balancer because builder.Build +	// can call back inline, and this utility needs to handle the callbacks. +	newBalancer := builder.Build(bw, gsb.bOpts) +	if newBalancer == nil { +		// This is illegal and should never happen; we clear the balancerWrapper +		// we were constructing if it happens to avoid a potential panic. +		gsb.mu.Lock() +		if gsb.balancerPending != nil { +			gsb.balancerPending = nil +		} else { +			gsb.balancerCurrent = nil +		} +		gsb.mu.Unlock() +		return balancer.ErrBadResolverState +	} + +	// This write doesn't need to take gsb.mu because this field never gets read +	// or written to on any calls from the current or pending. Calls from grpc +	// to this balancer are guaranteed to be called synchronously, so this +	// bw.Balancer field will never be forwarded to until this SwitchTo() +	// function returns. +	bw.Balancer = newBalancer +	return nil +} + +// Returns nil if the graceful switch balancer is closed. +func (gsb *Balancer) latestBalancer() *balancerWrapper { +	gsb.mu.Lock() +	defer gsb.mu.Unlock() +	if gsb.balancerPending != nil { +		return gsb.balancerPending +	} +	return gsb.balancerCurrent +} + +// UpdateClientConnState forwards the update to the latest balancer created. +func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error { +	// The resolver data is only relevant to the most recent LB Policy. +	balToUpdate := gsb.latestBalancer() +	if balToUpdate == nil { +		return errBalancerClosed +	} +	// Perform this call without gsb.mu to prevent deadlocks if the child calls +	// back into the channel. The latest balancer can never be closed during a +	// call from the channel, even without gsb.mu held. +	return balToUpdate.UpdateClientConnState(state) +} + +// ResolverError forwards the error to the latest balancer created. +func (gsb *Balancer) ResolverError(err error) { +	// The resolver data is only relevant to the most recent LB Policy. +	balToUpdate := gsb.latestBalancer() +	if balToUpdate == nil { +		return +	} +	// Perform this call without gsb.mu to prevent deadlocks if the child calls +	// back into the channel. The latest balancer can never be closed during a +	// call from the channel, even without gsb.mu held. +	balToUpdate.ResolverError(err) +} + +// ExitIdle forwards the call to the latest balancer created. +// +// If the latest balancer does not support ExitIdle, the subConns are +// re-connected to manually. +func (gsb *Balancer) ExitIdle() { +	balToUpdate := gsb.latestBalancer() +	if balToUpdate == nil { +		return +	} +	// There is no need to protect this read with a mutex, as the write to the +	// Balancer field happens in SwitchTo, which completes before this can be +	// called. +	if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok { +		ei.ExitIdle() +		return +	} +	gsb.mu.Lock() +	defer gsb.mu.Unlock() +	for sc := range balToUpdate.subconns { +		sc.Connect() +	} +} + +// UpdateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { +	gsb.currentMu.Lock() +	defer gsb.currentMu.Unlock() +	gsb.mu.Lock() +	// Forward update to the appropriate child.  Even if there is a pending +	// balancer, the current balancer should continue to get SubConn updates to +	// maintain the proper state while the pending is still connecting. +	var balToUpdate *balancerWrapper +	if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] { +		balToUpdate = gsb.balancerCurrent +	} else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { +		balToUpdate = gsb.balancerPending +	} +	gsb.mu.Unlock() +	if balToUpdate == nil { +		// SubConn belonged to a stale lb policy that has not yet fully closed, +		// or the balancer was already closed. +		return +	} +	balToUpdate.UpdateSubConnState(sc, state) +} + +// Close closes any active child balancers. +func (gsb *Balancer) Close() { +	gsb.mu.Lock() +	gsb.closed = true +	currentBalancerToClose := gsb.balancerCurrent +	gsb.balancerCurrent = nil +	pendingBalancerToClose := gsb.balancerPending +	gsb.balancerPending = nil +	gsb.mu.Unlock() + +	currentBalancerToClose.Close() +	pendingBalancerToClose.Close() +} + +// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer +// methods to help cleanup SubConns created by the wrapped balancer. +// +// It implements the balancer.ClientConn interface and is passed down in that +// capacity to the wrapped balancer. It maintains a set of subConns created by +// the wrapped balancer and calls from the latter to create/update/remove +// SubConns update this set before being forwarded to the parent ClientConn. +// State updates from the wrapped balancer can result in invocation of the +// graceful switch logic. +type balancerWrapper struct { +	balancer.Balancer +	gsb *Balancer + +	lastState balancer.State +	subconns  map[balancer.SubConn]bool // subconns created by this balancer +} + +func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { +	if state.ConnectivityState == connectivity.Shutdown { +		bw.gsb.mu.Lock() +		delete(bw.subconns, sc) +		bw.gsb.mu.Unlock() +	} +	// There is no need to protect this read with a mutex, as the write to the +	// Balancer field happens in SwitchTo, which completes before this can be +	// called. +	bw.Balancer.UpdateSubConnState(sc, state) +} + +// Close closes the underlying LB policy and removes the subconns it created. bw +// must not be referenced via balancerCurrent or balancerPending in gsb when +// called. gsb.mu must not be held.  Does not panic with a nil receiver. +func (bw *balancerWrapper) Close() { +	// before Close is called. +	if bw == nil { +		return +	} +	// There is no need to protect this read with a mutex, as Close() is +	// impossible to be called concurrently with the write in SwitchTo(). The +	// callsites of Close() for this balancer in Graceful Switch Balancer will +	// never be called until SwitchTo() returns. +	bw.Balancer.Close() +	bw.gsb.mu.Lock() +	for sc := range bw.subconns { +		bw.gsb.cc.RemoveSubConn(sc) +	} +	bw.gsb.mu.Unlock() +} + +func (bw *balancerWrapper) UpdateState(state balancer.State) { +	// Hold the mutex for this entire call to ensure it cannot occur +	// concurrently with other updateState() calls. This causes updates to +	// lastState and calls to cc.UpdateState to happen atomically. +	bw.gsb.mu.Lock() +	defer bw.gsb.mu.Unlock() +	bw.lastState = state + +	if !bw.gsb.balancerCurrentOrPending(bw) { +		return +	} + +	if bw == bw.gsb.balancerCurrent { +		// In the case that the current balancer exits READY, and there is a pending +		// balancer, you can forward the pending balancer's cached State up to +		// ClientConn and swap the pending into the current. This is because there +		// is no reason to gracefully switch from and keep using the old policy as +		// the ClientConn is not connected to any backends. +		if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil { +			bw.gsb.swap() +			return +		} +		// Even if there is a pending balancer waiting to be gracefully switched to, +		// continue to forward current balancer updates to the Client Conn. Ignoring +		// state + picker from the current would cause undefined behavior/cause the +		// system to behave incorrectly from the current LB policies perspective. +		// Also, the current LB is still being used by grpc to choose SubConns per +		// RPC, and thus should use the most updated form of the current balancer. +		bw.gsb.cc.UpdateState(state) +		return +	} +	// This method is now dealing with a state update from the pending balancer. +	// If the current balancer is currently in a state other than READY, the new +	// policy can be swapped into place immediately. This is because there is no +	// reason to gracefully switch from and keep using the old policy as the +	// ClientConn is not connected to any backends. +	if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready { +		bw.gsb.swap() +	} +} + +func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { +	bw.gsb.mu.Lock() +	if !bw.gsb.balancerCurrentOrPending(bw) { +		bw.gsb.mu.Unlock() +		return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) +	} +	bw.gsb.mu.Unlock() + +	sc, err := bw.gsb.cc.NewSubConn(addrs, opts) +	if err != nil { +		return nil, err +	} +	bw.gsb.mu.Lock() +	if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call +		bw.gsb.cc.RemoveSubConn(sc) +		bw.gsb.mu.Unlock() +		return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) +	} +	bw.subconns[sc] = true +	bw.gsb.mu.Unlock() +	return sc, nil +} + +func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { +	// Ignore ResolveNow requests from anything other than the most recent +	// balancer, because older balancers were already removed from the config. +	if bw != bw.gsb.latestBalancer() { +		return +	} +	bw.gsb.cc.ResolveNow(opts) +} + +func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { +	bw.gsb.mu.Lock() +	if !bw.gsb.balancerCurrentOrPending(bw) { +		bw.gsb.mu.Unlock() +		return +	} +	bw.gsb.mu.Unlock() +	bw.gsb.cc.RemoveSubConn(sc) +} + +func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { +	bw.gsb.mu.Lock() +	if !bw.gsb.balancerCurrentOrPending(bw) { +		bw.gsb.mu.Unlock() +		return +	} +	bw.gsb.mu.Unlock() +	bw.gsb.cc.UpdateAddresses(sc, addrs) +} + +func (bw *balancerWrapper) Target() string { +	return bw.gsb.cc.Target() +} diff --git a/vendor/google.golang.org/grpc/internal/balancerload/load.go b/vendor/google.golang.org/grpc/internal/balancerload/load.go new file mode 100644 index 000000000..3a905d966 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancerload/load.go @@ -0,0 +1,46 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package balancerload defines APIs to parse server loads in trailers. The +// parsed loads are sent to balancers in DoneInfo. +package balancerload + +import ( +	"google.golang.org/grpc/metadata" +) + +// Parser converts loads from metadata into a concrete type. +type Parser interface { +	// Parse parses loads from metadata. +	Parse(md metadata.MD) interface{} +} + +var parser Parser + +// SetParser sets the load parser. +// +// Not mutex-protected, should be called before any gRPC functions. +func SetParser(lr Parser) { +	parser = lr +} + +// Parse calls parser.Read(). +func Parse(md metadata.MD) interface{} { +	if parser == nil { +		return nil +	} +	return parser.Parse(md) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go new file mode 100644 index 000000000..809d73cca --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -0,0 +1,189 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package binarylog implementation binary logging as defined in +// https://github.com/grpc/proposal/blob/master/A16-binary-logging.md. +package binarylog + +import ( +	"fmt" +	"os" + +	"google.golang.org/grpc/grpclog" +	"google.golang.org/grpc/internal/grpcutil" +) + +// Logger is the global binary logger. It can be used to get binary logger for +// each method. +type Logger interface { +	GetMethodLogger(methodName string) MethodLogger +} + +// binLogger is the global binary logger for the binary. One of this should be +// built at init time from the configuration (environment variable or flags). +// +// It is used to get a MethodLogger for each individual method. +var binLogger Logger + +var grpclogLogger = grpclog.Component("binarylog") + +// SetLogger sets the binary logger. +// +// Only call this at init time. +func SetLogger(l Logger) { +	binLogger = l +} + +// GetLogger gets the binary logger. +// +// Only call this at init time. +func GetLogger() Logger { +	return binLogger +} + +// GetMethodLogger returns the MethodLogger for the given methodName. +// +// methodName should be in the format of "/service/method". +// +// Each MethodLogger returned by this method is a new instance. This is to +// generate sequence id within the call. +func GetMethodLogger(methodName string) MethodLogger { +	if binLogger == nil { +		return nil +	} +	return binLogger.GetMethodLogger(methodName) +} + +func init() { +	const envStr = "GRPC_BINARY_LOG_FILTER" +	configStr := os.Getenv(envStr) +	binLogger = NewLoggerFromConfigString(configStr) +} + +// MethodLoggerConfig contains the setting for logging behavior of a method +// logger. Currently, it contains the max length of header and message. +type MethodLoggerConfig struct { +	// Max length of header and message. +	Header, Message uint64 +} + +// LoggerConfig contains the config for loggers to create method loggers. +type LoggerConfig struct { +	All      *MethodLoggerConfig +	Services map[string]*MethodLoggerConfig +	Methods  map[string]*MethodLoggerConfig + +	Blacklist map[string]struct{} +} + +type logger struct { +	config LoggerConfig +} + +// NewLoggerFromConfig builds a logger with the given LoggerConfig. +func NewLoggerFromConfig(config LoggerConfig) Logger { +	return &logger{config: config} +} + +// newEmptyLogger creates an empty logger. The map fields need to be filled in +// using the set* functions. +func newEmptyLogger() *logger { +	return &logger{} +} + +// Set method logger for "*". +func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error { +	if l.config.All != nil { +		return fmt.Errorf("conflicting global rules found") +	} +	l.config.All = ml +	return nil +} + +// Set method logger for "service/*". +// +// New MethodLogger with same service overrides the old one. +func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error { +	if _, ok := l.config.Services[service]; ok { +		return fmt.Errorf("conflicting service rules for service %v found", service) +	} +	if l.config.Services == nil { +		l.config.Services = make(map[string]*MethodLoggerConfig) +	} +	l.config.Services[service] = ml +	return nil +} + +// Set method logger for "service/method". +// +// New MethodLogger with same method overrides the old one. +func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error { +	if _, ok := l.config.Blacklist[method]; ok { +		return fmt.Errorf("conflicting blacklist rules for method %v found", method) +	} +	if _, ok := l.config.Methods[method]; ok { +		return fmt.Errorf("conflicting method rules for method %v found", method) +	} +	if l.config.Methods == nil { +		l.config.Methods = make(map[string]*MethodLoggerConfig) +	} +	l.config.Methods[method] = ml +	return nil +} + +// Set blacklist method for "-service/method". +func (l *logger) setBlacklist(method string) error { +	if _, ok := l.config.Blacklist[method]; ok { +		return fmt.Errorf("conflicting blacklist rules for method %v found", method) +	} +	if _, ok := l.config.Methods[method]; ok { +		return fmt.Errorf("conflicting method rules for method %v found", method) +	} +	if l.config.Blacklist == nil { +		l.config.Blacklist = make(map[string]struct{}) +	} +	l.config.Blacklist[method] = struct{}{} +	return nil +} + +// getMethodLogger returns the MethodLogger for the given methodName. +// +// methodName should be in the format of "/service/method". +// +// Each MethodLogger returned by this method is a new instance. This is to +// generate sequence id within the call. +func (l *logger) GetMethodLogger(methodName string) MethodLogger { +	s, m, err := grpcutil.ParseMethod(methodName) +	if err != nil { +		grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err) +		return nil +	} +	if ml, ok := l.config.Methods[s+"/"+m]; ok { +		return NewTruncatingMethodLogger(ml.Header, ml.Message) +	} +	if _, ok := l.config.Blacklist[s+"/"+m]; ok { +		return nil +	} +	if ml, ok := l.config.Services[s]; ok { +		return NewTruncatingMethodLogger(ml.Header, ml.Message) +	} +	if l.config.All == nil { +		return nil +	} +	return NewTruncatingMethodLogger(l.config.All.Header, l.config.All.Message) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go new file mode 100644 index 000000000..1ee00a39a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file contains exported variables/functions that are exported for testing +// only. +// +// An ideal way for this would be to put those in a *_test.go but in binarylog +// package. But this doesn't work with staticcheck with go module. Error was: +// "MdToMetadataProto not declared by package binarylog". This could be caused +// by the way staticcheck looks for files for a certain package, which doesn't +// support *_test.go files. +// +// Move those to binary_test.go when staticcheck is fixed. + +package binarylog + +var ( +	// AllLogger is a logger that logs all headers/messages for all RPCs. It's +	// for testing only. +	AllLogger = NewLoggerFromConfigString("*") +	// MdToMetadataProto converts metadata to a binary logging proto message. +	// It's for testing only. +	MdToMetadataProto = mdToMetadataProto +	// AddrToProto converts an address to a binary logging proto message. It's +	// for testing only. +	AddrToProto = addrToProto +) diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go new file mode 100644 index 000000000..f9e80e27a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -0,0 +1,208 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( +	"errors" +	"fmt" +	"regexp" +	"strconv" +	"strings" +) + +// NewLoggerFromConfigString reads the string and build a logger. It can be used +// to build a new logger and assign it to binarylog.Logger. +// +// Example filter config strings: +//   - "" Nothing will be logged +//   - "*" All headers and messages will be fully logged. +//   - "*{h}" Only headers will be logged. +//   - "*{m:256}" Only the first 256 bytes of each message will be logged. +//   - "Foo/*" Logs every method in service Foo +//   - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar +//   - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method +//     /Foo/Bar, logs all headers and messages in every other method in service +//     Foo. +// +// If two configs exist for one certain method or service, the one specified +// later overrides the previous config. +func NewLoggerFromConfigString(s string) Logger { +	if s == "" { +		return nil +	} +	l := newEmptyLogger() +	methods := strings.Split(s, ",") +	for _, method := range methods { +		if err := l.fillMethodLoggerWithConfigString(method); err != nil { +			grpclogLogger.Warningf("failed to parse binary log config: %v", err) +			return nil +		} +	} +	return l +} + +// fillMethodLoggerWithConfigString parses config, creates TruncatingMethodLogger and adds +// it to the right map in the logger. +func (l *logger) fillMethodLoggerWithConfigString(config string) error { +	// "" is invalid. +	if config == "" { +		return errors.New("empty string is not a valid method binary logging config") +	} + +	// "-service/method", blacklist, no * or {} allowed. +	if config[0] == '-' { +		s, m, suffix, err := parseMethodConfigAndSuffix(config[1:]) +		if err != nil { +			return fmt.Errorf("invalid config: %q, %v", config, err) +		} +		if m == "*" { +			return fmt.Errorf("invalid config: %q, %v", config, "* not allowed in blacklist config") +		} +		if suffix != "" { +			return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config") +		} +		if err := l.setBlacklist(s + "/" + m); err != nil { +			return fmt.Errorf("invalid config: %v", err) +		} +		return nil +	} + +	// "*{h:256;m:256}" +	if config[0] == '*' { +		hdr, msg, err := parseHeaderMessageLengthConfig(config[1:]) +		if err != nil { +			return fmt.Errorf("invalid config: %q, %v", config, err) +		} +		if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { +			return fmt.Errorf("invalid config: %v", err) +		} +		return nil +	} + +	s, m, suffix, err := parseMethodConfigAndSuffix(config) +	if err != nil { +		return fmt.Errorf("invalid config: %q, %v", config, err) +	} +	hdr, msg, err := parseHeaderMessageLengthConfig(suffix) +	if err != nil { +		return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) +	} +	if m == "*" { +		if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { +			return fmt.Errorf("invalid config: %v", err) +		} +	} else { +		if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { +			return fmt.Errorf("invalid config: %v", err) +		} +	} +	return nil +} + +const ( +	// TODO: this const is only used by env_config now. But could be useful for +	// other config. Move to binarylog.go if necessary. +	maxUInt = ^uint64(0) + +	// For "p.s/m" plus any suffix. Suffix will be parsed again. See test for +	// expected output. +	longMethodConfigRegexpStr = `^([\w./]+)/((?:\w+)|[*])(.+)?$` + +	// For suffix from above, "{h:123,m:123}". See test for expected output. +	optionalLengthRegexpStr      = `(?::(\d+))?` // Optional ":123". +	headerConfigRegexpStr        = `^{h` + optionalLengthRegexpStr + `}$` +	messageConfigRegexpStr       = `^{m` + optionalLengthRegexpStr + `}$` +	headerMessageConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `;m` + optionalLengthRegexpStr + `}$` +) + +var ( +	longMethodConfigRegexp    = regexp.MustCompile(longMethodConfigRegexpStr) +	headerConfigRegexp        = regexp.MustCompile(headerConfigRegexpStr) +	messageConfigRegexp       = regexp.MustCompile(messageConfigRegexpStr) +	headerMessageConfigRegexp = regexp.MustCompile(headerMessageConfigRegexpStr) +) + +// Turn "service/method{h;m}" into "service", "method", "{h;m}". +func parseMethodConfigAndSuffix(c string) (service, method, suffix string, _ error) { +	// Regexp result: +	// +	// in:  "p.s/m{h:123,m:123}", +	// out: []string{"p.s/m{h:123,m:123}", "p.s", "m", "{h:123,m:123}"}, +	match := longMethodConfigRegexp.FindStringSubmatch(c) +	if match == nil { +		return "", "", "", fmt.Errorf("%q contains invalid substring", c) +	} +	service = match[1] +	method = match[2] +	suffix = match[3] +	return +} + +// Turn "{h:123;m:345}" into 123, 345. +// +// Return maxUInt if length is unspecified. +func parseHeaderMessageLengthConfig(c string) (hdrLenStr, msgLenStr uint64, err error) { +	if c == "" { +		return maxUInt, maxUInt, nil +	} +	// Header config only. +	if match := headerConfigRegexp.FindStringSubmatch(c); match != nil { +		if s := match[1]; s != "" { +			hdrLenStr, err = strconv.ParseUint(s, 10, 64) +			if err != nil { +				return 0, 0, fmt.Errorf("failed to convert %q to uint", s) +			} +			return hdrLenStr, 0, nil +		} +		return maxUInt, 0, nil +	} + +	// Message config only. +	if match := messageConfigRegexp.FindStringSubmatch(c); match != nil { +		if s := match[1]; s != "" { +			msgLenStr, err = strconv.ParseUint(s, 10, 64) +			if err != nil { +				return 0, 0, fmt.Errorf("failed to convert %q to uint", s) +			} +			return 0, msgLenStr, nil +		} +		return 0, maxUInt, nil +	} + +	// Header and message config both. +	if match := headerMessageConfigRegexp.FindStringSubmatch(c); match != nil { +		// Both hdr and msg are specified, but one or two of them might be empty. +		hdrLenStr = maxUInt +		msgLenStr = maxUInt +		if s := match[1]; s != "" { +			hdrLenStr, err = strconv.ParseUint(s, 10, 64) +			if err != nil { +				return 0, 0, fmt.Errorf("failed to convert %q to uint", s) +			} +		} +		if s := match[2]; s != "" { +			msgLenStr, err = strconv.ParseUint(s, 10, 64) +			if err != nil { +				return 0, 0, fmt.Errorf("failed to convert %q to uint", s) +			} +		} +		return hdrLenStr, msgLenStr, nil +	} +	return 0, 0, fmt.Errorf("%q contains invalid substring", c) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go new file mode 100644 index 000000000..d71e44177 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -0,0 +1,435 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( +	"net" +	"strings" +	"sync/atomic" +	"time" + +	"github.com/golang/protobuf/proto" +	"github.com/golang/protobuf/ptypes" +	binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" +	"google.golang.org/grpc/metadata" +	"google.golang.org/grpc/status" +) + +type callIDGenerator struct { +	id uint64 +} + +func (g *callIDGenerator) next() uint64 { +	id := atomic.AddUint64(&g.id, 1) +	return id +} + +// reset is for testing only, and doesn't need to be thread safe. +func (g *callIDGenerator) reset() { +	g.id = 0 +} + +var idGen callIDGenerator + +// MethodLogger is the sub-logger for each method. +type MethodLogger interface { +	Log(LogEntryConfig) +} + +// TruncatingMethodLogger is a method logger that truncates headers and messages +// based on configured fields. +type TruncatingMethodLogger struct { +	headerMaxLen, messageMaxLen uint64 + +	callID          uint64 +	idWithinCallGen *callIDGenerator + +	sink Sink // TODO(blog): make this plugable. +} + +// NewTruncatingMethodLogger returns a new truncating method logger. +func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { +	return &TruncatingMethodLogger{ +		headerMaxLen:  h, +		messageMaxLen: m, + +		callID:          idGen.next(), +		idWithinCallGen: &callIDGenerator{}, + +		sink: DefaultSink, // TODO(blog): make it plugable. +	} +} + +// Build is an internal only method for building the proto message out of the +// input event. It's made public to enable other library to reuse as much logic +// in TruncatingMethodLogger as possible. +func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry { +	m := c.toProto() +	timestamp, _ := ptypes.TimestampProto(time.Now()) +	m.Timestamp = timestamp +	m.CallId = ml.callID +	m.SequenceIdWithinCall = ml.idWithinCallGen.next() + +	switch pay := m.Payload.(type) { +	case *binlogpb.GrpcLogEntry_ClientHeader: +		m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata()) +	case *binlogpb.GrpcLogEntry_ServerHeader: +		m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata()) +	case *binlogpb.GrpcLogEntry_Message: +		m.PayloadTruncated = ml.truncateMessage(pay.Message) +	} +	return m +} + +// Log creates a proto binary log entry, and logs it to the sink. +func (ml *TruncatingMethodLogger) Log(c LogEntryConfig) { +	ml.sink.Write(ml.Build(c)) +} + +func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *binlogpb.Metadata) (truncated bool) { +	if ml.headerMaxLen == maxUInt { +		return false +	} +	var ( +		bytesLimit = ml.headerMaxLen +		index      int +	) +	// At the end of the loop, index will be the first entry where the total +	// size is greater than the limit: +	// +	// len(entry[:index]) <= ml.hdr && len(entry[:index+1]) > ml.hdr. +	for ; index < len(mdPb.Entry); index++ { +		entry := mdPb.Entry[index] +		if entry.Key == "grpc-trace-bin" { +			// "grpc-trace-bin" is a special key. It's kept in the log entry, +			// but not counted towards the size limit. +			continue +		} +		currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue())) +		if currentEntryLen > bytesLimit { +			break +		} +		bytesLimit -= currentEntryLen +	} +	truncated = index < len(mdPb.Entry) +	mdPb.Entry = mdPb.Entry[:index] +	return truncated +} + +func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (truncated bool) { +	if ml.messageMaxLen == maxUInt { +		return false +	} +	if ml.messageMaxLen >= uint64(len(msgPb.Data)) { +		return false +	} +	msgPb.Data = msgPb.Data[:ml.messageMaxLen] +	return true +} + +// LogEntryConfig represents the configuration for binary log entry. +type LogEntryConfig interface { +	toProto() *binlogpb.GrpcLogEntry +} + +// ClientHeader configs the binary log entry to be a ClientHeader entry. +type ClientHeader struct { +	OnClientSide bool +	Header       metadata.MD +	MethodName   string +	Authority    string +	Timeout      time.Duration +	// PeerAddr is required only when it's on server side. +	PeerAddr net.Addr +} + +func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry { +	// This function doesn't need to set all the fields (e.g. seq ID). The Log +	// function will set the fields when necessary. +	clientHeader := &binlogpb.ClientHeader{ +		Metadata:   mdToMetadataProto(c.Header), +		MethodName: c.MethodName, +		Authority:  c.Authority, +	} +	if c.Timeout > 0 { +		clientHeader.Timeout = ptypes.DurationProto(c.Timeout) +	} +	ret := &binlogpb.GrpcLogEntry{ +		Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, +		Payload: &binlogpb.GrpcLogEntry_ClientHeader{ +			ClientHeader: clientHeader, +		}, +	} +	if c.OnClientSide { +		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT +	} else { +		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER +	} +	if c.PeerAddr != nil { +		ret.Peer = addrToProto(c.PeerAddr) +	} +	return ret +} + +// ServerHeader configs the binary log entry to be a ServerHeader entry. +type ServerHeader struct { +	OnClientSide bool +	Header       metadata.MD +	// PeerAddr is required only when it's on client side. +	PeerAddr net.Addr +} + +func (c *ServerHeader) toProto() *binlogpb.GrpcLogEntry { +	ret := &binlogpb.GrpcLogEntry{ +		Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, +		Payload: &binlogpb.GrpcLogEntry_ServerHeader{ +			ServerHeader: &binlogpb.ServerHeader{ +				Metadata: mdToMetadataProto(c.Header), +			}, +		}, +	} +	if c.OnClientSide { +		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT +	} else { +		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER +	} +	if c.PeerAddr != nil { +		ret.Peer = addrToProto(c.PeerAddr) +	} +	return ret +} + +// ClientMessage configs the binary log entry to be a ClientMessage entry. +type ClientMessage struct { +	OnClientSide bool +	// Message can be a proto.Message or []byte. Other messages formats are not +	// supported. +	Message interface{} +} + +func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry { +	var ( +		data []byte +		err  error +	) +	if m, ok := c.Message.(proto.Message); ok { +		data, err = proto.Marshal(m) +		if err != nil { +			grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err) +		} +	} else if b, ok := c.Message.([]byte); ok { +		data = b +	} else { +		grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") +	} +	ret := &binlogpb.GrpcLogEntry{ +		Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, +		Payload: &binlogpb.GrpcLogEntry_Message{ +			Message: &binlogpb.Message{ +				Length: uint32(len(data)), +				Data:   data, +			}, +		}, +	} +	if c.OnClientSide { +		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT +	} else { +		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER +	} +	return ret +} + +// ServerMessage configs the binary log entry to be a ServerMessage entry. +type ServerMessage struct { +	OnClientSide bool +	// Message can be a proto.Message or []byte. Other messages formats are not +	// supported. +	Message interface{} +} + +func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry { +	var ( +		data []byte +		err  error +	) +	if m, ok := c.Message.(proto.Message); ok { +		data, err = proto.Marshal(m) +		if err != nil { +			grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err) +		} +	} else if b, ok := c.Message.([]byte); ok { +		data = b +	} else { +		grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") +	} +	ret := &binlogpb.GrpcLogEntry{ +		Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, +		Payload: &binlogpb.GrpcLogEntry_Message{ +			Message: &binlogpb.Message{ +				Length: uint32(len(data)), +				Data:   data, +			}, +		}, +	} +	if c.OnClientSide { +		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT +	} else { +		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER +	} +	return ret +} + +// ClientHalfClose configs the binary log entry to be a ClientHalfClose entry. +type ClientHalfClose struct { +	OnClientSide bool +} + +func (c *ClientHalfClose) toProto() *binlogpb.GrpcLogEntry { +	ret := &binlogpb.GrpcLogEntry{ +		Type:    binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, +		Payload: nil, // No payload here. +	} +	if c.OnClientSide { +		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT +	} else { +		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER +	} +	return ret +} + +// ServerTrailer configs the binary log entry to be a ServerTrailer entry. +type ServerTrailer struct { +	OnClientSide bool +	Trailer      metadata.MD +	// Err is the status error. +	Err error +	// PeerAddr is required only when it's on client side and the RPC is trailer +	// only. +	PeerAddr net.Addr +} + +func (c *ServerTrailer) toProto() *binlogpb.GrpcLogEntry { +	st, ok := status.FromError(c.Err) +	if !ok { +		grpclogLogger.Info("binarylogging: error in trailer is not a status error") +	} +	var ( +		detailsBytes []byte +		err          error +	) +	stProto := st.Proto() +	if stProto != nil && len(stProto.Details) != 0 { +		detailsBytes, err = proto.Marshal(stProto) +		if err != nil { +			grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err) +		} +	} +	ret := &binlogpb.GrpcLogEntry{ +		Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, +		Payload: &binlogpb.GrpcLogEntry_Trailer{ +			Trailer: &binlogpb.Trailer{ +				Metadata:      mdToMetadataProto(c.Trailer), +				StatusCode:    uint32(st.Code()), +				StatusMessage: st.Message(), +				StatusDetails: detailsBytes, +			}, +		}, +	} +	if c.OnClientSide { +		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT +	} else { +		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER +	} +	if c.PeerAddr != nil { +		ret.Peer = addrToProto(c.PeerAddr) +	} +	return ret +} + +// Cancel configs the binary log entry to be a Cancel entry. +type Cancel struct { +	OnClientSide bool +} + +func (c *Cancel) toProto() *binlogpb.GrpcLogEntry { +	ret := &binlogpb.GrpcLogEntry{ +		Type:    binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL, +		Payload: nil, +	} +	if c.OnClientSide { +		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT +	} else { +		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER +	} +	return ret +} + +// metadataKeyOmit returns whether the metadata entry with this key should be +// omitted. +func metadataKeyOmit(key string) bool { +	switch key { +	case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te": +		return true +	case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users. +		return false +	} +	return strings.HasPrefix(key, "grpc-") +} + +func mdToMetadataProto(md metadata.MD) *binlogpb.Metadata { +	ret := &binlogpb.Metadata{} +	for k, vv := range md { +		if metadataKeyOmit(k) { +			continue +		} +		for _, v := range vv { +			ret.Entry = append(ret.Entry, +				&binlogpb.MetadataEntry{ +					Key:   k, +					Value: []byte(v), +				}, +			) +		} +	} +	return ret +} + +func addrToProto(addr net.Addr) *binlogpb.Address { +	ret := &binlogpb.Address{} +	switch a := addr.(type) { +	case *net.TCPAddr: +		if a.IP.To4() != nil { +			ret.Type = binlogpb.Address_TYPE_IPV4 +		} else if a.IP.To16() != nil { +			ret.Type = binlogpb.Address_TYPE_IPV6 +		} else { +			ret.Type = binlogpb.Address_TYPE_UNKNOWN +			// Do not set address and port fields. +			break +		} +		ret.Address = a.IP.String() +		ret.IpPort = uint32(a.Port) +	case *net.UnixAddr: +		ret.Type = binlogpb.Address_TYPE_UNIX +		ret.Address = a.String() +	default: +		ret.Type = binlogpb.Address_TYPE_UNKNOWN +	} +	return ret +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go new file mode 100644 index 000000000..264de387c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -0,0 +1,170 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( +	"bufio" +	"encoding/binary" +	"io" +	"sync" +	"time" + +	"github.com/golang/protobuf/proto" +	binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" +) + +var ( +	// DefaultSink is the sink where the logs will be written to. It's exported +	// for the binarylog package to update. +	DefaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp). +) + +// Sink writes log entry into the binary log sink. +// +// sink is a copy of the exported binarylog.Sink, to avoid circular dependency. +type Sink interface { +	// Write will be called to write the log entry into the sink. +	// +	// It should be thread-safe so it can be called in parallel. +	Write(*binlogpb.GrpcLogEntry) error +	// Close will be called when the Sink is replaced by a new Sink. +	Close() error +} + +type noopSink struct{} + +func (ns *noopSink) Write(*binlogpb.GrpcLogEntry) error { return nil } +func (ns *noopSink) Close() error                       { return nil } + +// newWriterSink creates a binary log sink with the given writer. +// +// Write() marshals the proto message and writes it to the given writer. Each +// message is prefixed with a 4 byte big endian unsigned integer as the length. +// +// No buffer is done, Close() doesn't try to close the writer. +func newWriterSink(w io.Writer) Sink { +	return &writerSink{out: w} +} + +type writerSink struct { +	out io.Writer +} + +func (ws *writerSink) Write(e *binlogpb.GrpcLogEntry) error { +	b, err := proto.Marshal(e) +	if err != nil { +		grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err) +		return err +	} +	hdr := make([]byte, 4) +	binary.BigEndian.PutUint32(hdr, uint32(len(b))) +	if _, err := ws.out.Write(hdr); err != nil { +		return err +	} +	if _, err := ws.out.Write(b); err != nil { +		return err +	} +	return nil +} + +func (ws *writerSink) Close() error { return nil } + +type bufferedSink struct { +	mu             sync.Mutex +	closer         io.Closer +	out            Sink          // out is built on buf. +	buf            *bufio.Writer // buf is kept for flush. +	flusherStarted bool + +	writeTicker *time.Ticker +	done        chan struct{} +} + +func (fs *bufferedSink) Write(e *binlogpb.GrpcLogEntry) error { +	fs.mu.Lock() +	defer fs.mu.Unlock() +	if !fs.flusherStarted { +		// Start the write loop when Write is called. +		fs.startFlushGoroutine() +		fs.flusherStarted = true +	} +	if err := fs.out.Write(e); err != nil { +		return err +	} +	return nil +} + +const ( +	bufFlushDuration = 60 * time.Second +) + +func (fs *bufferedSink) startFlushGoroutine() { +	fs.writeTicker = time.NewTicker(bufFlushDuration) +	go func() { +		for { +			select { +			case <-fs.done: +				return +			case <-fs.writeTicker.C: +			} +			fs.mu.Lock() +			if err := fs.buf.Flush(); err != nil { +				grpclogLogger.Warningf("failed to flush to Sink: %v", err) +			} +			fs.mu.Unlock() +		} +	}() +} + +func (fs *bufferedSink) Close() error { +	fs.mu.Lock() +	defer fs.mu.Unlock() +	if fs.writeTicker != nil { +		fs.writeTicker.Stop() +	} +	close(fs.done) +	if err := fs.buf.Flush(); err != nil { +		grpclogLogger.Warningf("failed to flush to Sink: %v", err) +	} +	if err := fs.closer.Close(); err != nil { +		grpclogLogger.Warningf("failed to close the underlying WriterCloser: %v", err) +	} +	if err := fs.out.Close(); err != nil { +		grpclogLogger.Warningf("failed to close the Sink: %v", err) +	} +	return nil +} + +// NewBufferedSink creates a binary log sink with the given WriteCloser. +// +// Write() marshals the proto message and writes it to the given writer. Each +// message is prefixed with a 4 byte big endian unsigned integer as the length. +// +// Content is kept in a buffer, and is flushed every 60 seconds. +// +// Close closes the WriteCloser. +func NewBufferedSink(o io.WriteCloser) Sink { +	bufW := bufio.NewWriter(o) +	return &bufferedSink{ +		closer: o, +		out:    newWriterSink(bufW), +		buf:    bufW, +		done:   make(chan struct{}), +	} +} diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go new file mode 100644 index 000000000..9f6a0c120 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -0,0 +1,85 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package buffer provides an implementation of an unbounded buffer. +package buffer + +import "sync" + +// Unbounded is an implementation of an unbounded buffer which does not use +// extra goroutines. This is typically used for passing updates from one entity +// to another within gRPC. +// +// All methods on this type are thread-safe and don't block on anything except +// the underlying mutex used for synchronization. +// +// Unbounded supports values of any type to be stored in it by using a channel +// of `interface{}`. This means that a call to Put() incurs an extra memory +// allocation, and also that users need a type assertion while reading. For +// performance critical code paths, using Unbounded is strongly discouraged and +// defining a new type specific implementation of this buffer is preferred. See +// internal/transport/transport.go for an example of this. +type Unbounded struct { +	c       chan interface{} +	mu      sync.Mutex +	backlog []interface{} +} + +// NewUnbounded returns a new instance of Unbounded. +func NewUnbounded() *Unbounded { +	return &Unbounded{c: make(chan interface{}, 1)} +} + +// Put adds t to the unbounded buffer. +func (b *Unbounded) Put(t interface{}) { +	b.mu.Lock() +	if len(b.backlog) == 0 { +		select { +		case b.c <- t: +			b.mu.Unlock() +			return +		default: +		} +	} +	b.backlog = append(b.backlog, t) +	b.mu.Unlock() +} + +// Load sends the earliest buffered data, if any, onto the read channel +// returned by Get(). Users are expected to call this every time they read a +// value from the read channel. +func (b *Unbounded) Load() { +	b.mu.Lock() +	if len(b.backlog) > 0 { +		select { +		case b.c <- b.backlog[0]: +			b.backlog[0] = nil +			b.backlog = b.backlog[1:] +		default: +		} +	} +	b.mu.Unlock() +} + +// Get returns a read channel on which values added to the buffer, via Put(), +// are sent on. +// +// Upon reading a value from this channel, users are expected to call Load() to +// send the next buffered value onto the channel if there is any. +func (b *Unbounded) Get() <-chan interface{} { +	return b.c +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go new file mode 100644 index 000000000..777cbcd79 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -0,0 +1,789 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz defines APIs for enabling channelz service, entry +// registration/deletion, and accessing channelz data. It also defines channelz +// metric struct formats. +// +// All APIs in this package are experimental. +package channelz + +import ( +	"context" +	"errors" +	"fmt" +	"sort" +	"sync" +	"sync/atomic" +	"time" + +	"google.golang.org/grpc/grpclog" +) + +const ( +	defaultMaxTraceEntry int32 = 30 +) + +var ( +	db    dbWrapper +	idGen idGenerator +	// EntryPerPage defines the number of channelz entries to be shown on a web page. +	EntryPerPage  = int64(50) +	curState      int32 +	maxTraceEntry = defaultMaxTraceEntry +) + +// TurnOn turns on channelz data collection. +func TurnOn() { +	if !IsOn() { +		db.set(newChannelMap()) +		idGen.reset() +		atomic.StoreInt32(&curState, 1) +	} +} + +// IsOn returns whether channelz data collection is on. +func IsOn() bool { +	return atomic.CompareAndSwapInt32(&curState, 1, 1) +} + +// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). +// Setting it to 0 will disable channel tracing. +func SetMaxTraceEntry(i int32) { +	atomic.StoreInt32(&maxTraceEntry, i) +} + +// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default. +func ResetMaxTraceEntryToDefault() { +	atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry) +} + +func getMaxTraceEntry() int { +	i := atomic.LoadInt32(&maxTraceEntry) +	return int(i) +} + +// dbWarpper wraps around a reference to internal channelz data storage, and +// provide synchronized functionality to set and get the reference. +type dbWrapper struct { +	mu sync.RWMutex +	DB *channelMap +} + +func (d *dbWrapper) set(db *channelMap) { +	d.mu.Lock() +	d.DB = db +	d.mu.Unlock() +} + +func (d *dbWrapper) get() *channelMap { +	d.mu.RLock() +	defer d.mu.RUnlock() +	return d.DB +} + +// NewChannelzStorageForTesting initializes channelz data storage and id +// generator for testing purposes. +// +// Returns a cleanup function to be invoked by the test, which waits for up to +// 10s for all channelz state to be reset by the grpc goroutines when those +// entities get closed. This cleanup function helps with ensuring that tests +// don't mess up each other. +func NewChannelzStorageForTesting() (cleanup func() error) { +	db.set(newChannelMap()) +	idGen.reset() + +	return func() error { +		cm := db.get() +		if cm == nil { +			return nil +		} + +		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) +		defer cancel() +		ticker := time.NewTicker(10 * time.Millisecond) +		defer ticker.Stop() +		for { +			cm.mu.RLock() +			topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets) +			cm.mu.RUnlock() + +			if err := ctx.Err(); err != nil { +				return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets) +			} +			if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 { +				return nil +			} +			<-ticker.C +		} +	} +} + +// GetTopChannels returns a slice of top channel's ChannelMetric, along with a +// boolean indicating whether there's more top channels to be queried for. +// +// The arg id specifies that only top channel with id at or above it will be included +// in the result. The returned slice is up to a length of the arg maxResults or +// EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { +	return db.get().GetTopChannels(id, maxResults) +} + +// GetServers returns a slice of server's ServerMetric, along with a +// boolean indicating whether there's more servers to be queried for. +// +// The arg id specifies that only server with id at or above it will be included +// in the result. The returned slice is up to a length of the arg maxResults or +// EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) { +	return db.get().GetServers(id, maxResults) +} + +// GetServerSockets returns a slice of server's (identified by id) normal socket's +// SocketMetric, along with a boolean indicating whether there's more sockets to +// be queried for. +// +// The arg startID specifies that only sockets with id at or above it will be +// included in the result. The returned slice is up to a length of the arg maxResults +// or EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { +	return db.get().GetServerSockets(id, startID, maxResults) +} + +// GetChannel returns the ChannelMetric for the channel (identified by id). +func GetChannel(id int64) *ChannelMetric { +	return db.get().GetChannel(id) +} + +// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id). +func GetSubChannel(id int64) *SubChannelMetric { +	return db.get().GetSubChannel(id) +} + +// GetSocket returns the SocketInternalMetric for the socket (identified by id). +func GetSocket(id int64) *SocketMetric { +	return db.get().GetSocket(id) +} + +// GetServer returns the ServerMetric for the server (identified by id). +func GetServer(id int64) *ServerMetric { +	return db.get().GetServer(id) +} + +// RegisterChannel registers the given channel c in the channelz database with +// ref as its reference name, and adds it to the child list of its parent +// (identified by pid). pid == nil means no parent. +// +// Returns a unique channelz identifier assigned to this channel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { +	id := idGen.genID() +	var parent int64 +	isTopChannel := true +	if pid != nil { +		isTopChannel = false +		parent = pid.Int() +	} + +	if !IsOn() { +		return newIdentifer(RefChannel, id, pid) +	} + +	cn := &channel{ +		refName:     ref, +		c:           c, +		subChans:    make(map[int64]string), +		nestedChans: make(map[int64]string), +		id:          id, +		pid:         parent, +		trace:       &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, +	} +	db.get().addChannel(id, cn, isTopChannel, parent) +	return newIdentifer(RefChannel, id, pid) +} + +// RegisterSubChannel registers the given subChannel c in the channelz database +// with ref as its reference name, and adds it to the child list of its parent +// (identified by pid). +// +// Returns a unique channelz identifier assigned to this subChannel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) { +	if pid == nil { +		return nil, errors.New("a SubChannel's parent id cannot be nil") +	} +	id := idGen.genID() +	if !IsOn() { +		return newIdentifer(RefSubChannel, id, pid), nil +	} + +	sc := &subChannel{ +		refName: ref, +		c:       c, +		sockets: make(map[int64]string), +		id:      id, +		pid:     pid.Int(), +		trace:   &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, +	} +	db.get().addSubChannel(id, sc, pid.Int()) +	return newIdentifer(RefSubChannel, id, pid), nil +} + +// RegisterServer registers the given server s in channelz database. It returns +// the unique channelz tracking id assigned to this server. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterServer(s Server, ref string) *Identifier { +	id := idGen.genID() +	if !IsOn() { +		return newIdentifer(RefServer, id, nil) +	} + +	svr := &server{ +		refName:       ref, +		s:             s, +		sockets:       make(map[int64]string), +		listenSockets: make(map[int64]string), +		id:            id, +	} +	db.get().addServer(id, svr) +	return newIdentifer(RefServer, id, nil) +} + +// RegisterListenSocket registers the given listen socket s in channelz database +// with ref as its reference name, and add it to the child list of its parent +// (identified by pid). It returns the unique channelz tracking id assigned to +// this listen socket. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { +	if pid == nil { +		return nil, errors.New("a ListenSocket's parent id cannot be 0") +	} +	id := idGen.genID() +	if !IsOn() { +		return newIdentifer(RefListenSocket, id, pid), nil +	} + +	ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()} +	db.get().addListenSocket(id, ls, pid.Int()) +	return newIdentifer(RefListenSocket, id, pid), nil +} + +// RegisterNormalSocket registers the given normal socket s in channelz database +// with ref as its reference name, and adds it to the child list of its parent +// (identified by pid). It returns the unique channelz tracking id assigned to +// this normal socket. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { +	if pid == nil { +		return nil, errors.New("a NormalSocket's parent id cannot be 0") +	} +	id := idGen.genID() +	if !IsOn() { +		return newIdentifer(RefNormalSocket, id, pid), nil +	} + +	ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()} +	db.get().addNormalSocket(id, ns, pid.Int()) +	return newIdentifer(RefNormalSocket, id, pid), nil +} + +// RemoveEntry removes an entry with unique channelz tracking id to be id from +// channelz database. +// +// If channelz is not turned ON, this function is a no-op. +func RemoveEntry(id *Identifier) { +	if !IsOn() { +		return +	} +	db.get().removeEntry(id.Int()) +} + +// TraceEventDesc is what the caller of AddTraceEvent should provide to describe +// the event to be added to the channel trace. +// +// The Parent field is optional. It is used for an event that will be recorded +// in the entity's parent trace. +type TraceEventDesc struct { +	Desc     string +	Severity Severity +	Parent   *TraceEventDesc +} + +// AddTraceEvent adds trace related to the entity with specified id, using the +// provided TraceEventDesc. +// +// If channelz is not turned ON, this will simply log the event descriptions. +func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) { +	// Log only the trace description associated with the bottom most entity. +	switch desc.Severity { +	case CtUnknown, CtInfo: +		l.InfoDepth(depth+1, withParens(id)+desc.Desc) +	case CtWarning: +		l.WarningDepth(depth+1, withParens(id)+desc.Desc) +	case CtError: +		l.ErrorDepth(depth+1, withParens(id)+desc.Desc) +	} + +	if getMaxTraceEntry() == 0 { +		return +	} +	if IsOn() { +		db.get().traceEvent(id.Int(), desc) +	} +} + +// channelMap is the storage data structure for channelz. +// Methods of channelMap can be divided in two two categories with respect to locking. +// 1. Methods acquire the global lock. +// 2. Methods that can only be called when global lock is held. +// A second type of method need always to be called inside a first type of method. +type channelMap struct { +	mu               sync.RWMutex +	topLevelChannels map[int64]struct{} +	servers          map[int64]*server +	channels         map[int64]*channel +	subChannels      map[int64]*subChannel +	listenSockets    map[int64]*listenSocket +	normalSockets    map[int64]*normalSocket +} + +func newChannelMap() *channelMap { +	return &channelMap{ +		topLevelChannels: make(map[int64]struct{}), +		channels:         make(map[int64]*channel), +		listenSockets:    make(map[int64]*listenSocket), +		normalSockets:    make(map[int64]*normalSocket), +		servers:          make(map[int64]*server), +		subChannels:      make(map[int64]*subChannel), +	} +} + +func (c *channelMap) addServer(id int64, s *server) { +	c.mu.Lock() +	s.cm = c +	c.servers[id] = s +	c.mu.Unlock() +} + +func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64) { +	c.mu.Lock() +	cn.cm = c +	cn.trace.cm = c +	c.channels[id] = cn +	if isTopChannel { +		c.topLevelChannels[id] = struct{}{} +	} else { +		c.findEntry(pid).addChild(id, cn) +	} +	c.mu.Unlock() +} + +func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64) { +	c.mu.Lock() +	sc.cm = c +	sc.trace.cm = c +	c.subChannels[id] = sc +	c.findEntry(pid).addChild(id, sc) +	c.mu.Unlock() +} + +func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64) { +	c.mu.Lock() +	ls.cm = c +	c.listenSockets[id] = ls +	c.findEntry(pid).addChild(id, ls) +	c.mu.Unlock() +} + +func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64) { +	c.mu.Lock() +	ns.cm = c +	c.normalSockets[id] = ns +	c.findEntry(pid).addChild(id, ns) +	c.mu.Unlock() +} + +// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to +// wait on the deletion of its children and until no other entity's channel trace references it. +// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully +// shutting down server will lead to the server being also deleted. +func (c *channelMap) removeEntry(id int64) { +	c.mu.Lock() +	c.findEntry(id).triggerDelete() +	c.mu.Unlock() +} + +// c.mu must be held by the caller +func (c *channelMap) decrTraceRefCount(id int64) { +	e := c.findEntry(id) +	if v, ok := e.(tracedChannel); ok { +		v.decrTraceRefCount() +		e.deleteSelfIfReady() +	} +} + +// c.mu must be held by the caller. +func (c *channelMap) findEntry(id int64) entry { +	var v entry +	var ok bool +	if v, ok = c.channels[id]; ok { +		return v +	} +	if v, ok = c.subChannels[id]; ok { +		return v +	} +	if v, ok = c.servers[id]; ok { +		return v +	} +	if v, ok = c.listenSockets[id]; ok { +		return v +	} +	if v, ok = c.normalSockets[id]; ok { +		return v +	} +	return &dummyEntry{idNotFound: id} +} + +// c.mu must be held by the caller +// deleteEntry simply deletes an entry from the channelMap. Before calling this +// method, caller must check this entry is ready to be deleted, i.e removeEntry() +// has been called on it, and no children still exist. +// Conditionals are ordered by the expected frequency of deletion of each entity +// type, in order to optimize performance. +func (c *channelMap) deleteEntry(id int64) { +	var ok bool +	if _, ok = c.normalSockets[id]; ok { +		delete(c.normalSockets, id) +		return +	} +	if _, ok = c.subChannels[id]; ok { +		delete(c.subChannels, id) +		return +	} +	if _, ok = c.channels[id]; ok { +		delete(c.channels, id) +		delete(c.topLevelChannels, id) +		return +	} +	if _, ok = c.listenSockets[id]; ok { +		delete(c.listenSockets, id) +		return +	} +	if _, ok = c.servers[id]; ok { +		delete(c.servers, id) +		return +	} +} + +func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) { +	c.mu.Lock() +	child := c.findEntry(id) +	childTC, ok := child.(tracedChannel) +	if !ok { +		c.mu.Unlock() +		return +	} +	childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()}) +	if desc.Parent != nil { +		parent := c.findEntry(child.getParentID()) +		var chanType RefChannelType +		switch child.(type) { +		case *channel: +			chanType = RefChannel +		case *subChannel: +			chanType = RefSubChannel +		} +		if parentTC, ok := parent.(tracedChannel); ok { +			parentTC.getChannelTrace().append(&TraceEvent{ +				Desc:      desc.Parent.Desc, +				Severity:  desc.Parent.Severity, +				Timestamp: time.Now(), +				RefID:     id, +				RefName:   childTC.getRefName(), +				RefType:   chanType, +			}) +			childTC.incrTraceRefCount() +		} +	} +	c.mu.Unlock() +} + +type int64Slice []int64 + +func (s int64Slice) Len() int           { return len(s) } +func (s int64Slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] } +func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] } + +func copyMap(m map[int64]string) map[int64]string { +	n := make(map[int64]string) +	for k, v := range m { +		n[k] = v +	} +	return n +} + +func min(a, b int64) int64 { +	if a < b { +		return a +	} +	return b +} + +func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { +	if maxResults <= 0 { +		maxResults = EntryPerPage +	} +	c.mu.RLock() +	l := int64(len(c.topLevelChannels)) +	ids := make([]int64, 0, l) +	cns := make([]*channel, 0, min(l, maxResults)) + +	for k := range c.topLevelChannels { +		ids = append(ids, k) +	} +	sort.Sort(int64Slice(ids)) +	idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) +	count := int64(0) +	var end bool +	var t []*ChannelMetric +	for i, v := range ids[idx:] { +		if count == maxResults { +			break +		} +		if cn, ok := c.channels[v]; ok { +			cns = append(cns, cn) +			t = append(t, &ChannelMetric{ +				NestedChans: copyMap(cn.nestedChans), +				SubChans:    copyMap(cn.subChans), +			}) +			count++ +		} +		if i == len(ids[idx:])-1 { +			end = true +			break +		} +	} +	c.mu.RUnlock() +	if count == 0 { +		end = true +	} + +	for i, cn := range cns { +		t[i].ChannelData = cn.c.ChannelzMetric() +		t[i].ID = cn.id +		t[i].RefName = cn.refName +		t[i].Trace = cn.trace.dumpData() +	} +	return t, end +} + +func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) { +	if maxResults <= 0 { +		maxResults = EntryPerPage +	} +	c.mu.RLock() +	l := int64(len(c.servers)) +	ids := make([]int64, 0, l) +	ss := make([]*server, 0, min(l, maxResults)) +	for k := range c.servers { +		ids = append(ids, k) +	} +	sort.Sort(int64Slice(ids)) +	idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) +	count := int64(0) +	var end bool +	var s []*ServerMetric +	for i, v := range ids[idx:] { +		if count == maxResults { +			break +		} +		if svr, ok := c.servers[v]; ok { +			ss = append(ss, svr) +			s = append(s, &ServerMetric{ +				ListenSockets: copyMap(svr.listenSockets), +			}) +			count++ +		} +		if i == len(ids[idx:])-1 { +			end = true +			break +		} +	} +	c.mu.RUnlock() +	if count == 0 { +		end = true +	} + +	for i, svr := range ss { +		s[i].ServerData = svr.s.ChannelzMetric() +		s[i].ID = svr.id +		s[i].RefName = svr.refName +	} +	return s, end +} + +func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { +	if maxResults <= 0 { +		maxResults = EntryPerPage +	} +	var svr *server +	var ok bool +	c.mu.RLock() +	if svr, ok = c.servers[id]; !ok { +		// server with id doesn't exist. +		c.mu.RUnlock() +		return nil, true +	} +	svrskts := svr.sockets +	l := int64(len(svrskts)) +	ids := make([]int64, 0, l) +	sks := make([]*normalSocket, 0, min(l, maxResults)) +	for k := range svrskts { +		ids = append(ids, k) +	} +	sort.Sort(int64Slice(ids)) +	idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID }) +	count := int64(0) +	var end bool +	for i, v := range ids[idx:] { +		if count == maxResults { +			break +		} +		if ns, ok := c.normalSockets[v]; ok { +			sks = append(sks, ns) +			count++ +		} +		if i == len(ids[idx:])-1 { +			end = true +			break +		} +	} +	c.mu.RUnlock() +	if count == 0 { +		end = true +	} +	s := make([]*SocketMetric, 0, len(sks)) +	for _, ns := range sks { +		sm := &SocketMetric{} +		sm.SocketData = ns.s.ChannelzMetric() +		sm.ID = ns.id +		sm.RefName = ns.refName +		s = append(s, sm) +	} +	return s, end +} + +func (c *channelMap) GetChannel(id int64) *ChannelMetric { +	cm := &ChannelMetric{} +	var cn *channel +	var ok bool +	c.mu.RLock() +	if cn, ok = c.channels[id]; !ok { +		// channel with id doesn't exist. +		c.mu.RUnlock() +		return nil +	} +	cm.NestedChans = copyMap(cn.nestedChans) +	cm.SubChans = copyMap(cn.subChans) +	// cn.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of cn.c when +	// holding the lock to prevent potential data race. +	chanCopy := cn.c +	c.mu.RUnlock() +	cm.ChannelData = chanCopy.ChannelzMetric() +	cm.ID = cn.id +	cm.RefName = cn.refName +	cm.Trace = cn.trace.dumpData() +	return cm +} + +func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric { +	cm := &SubChannelMetric{} +	var sc *subChannel +	var ok bool +	c.mu.RLock() +	if sc, ok = c.subChannels[id]; !ok { +		// subchannel with id doesn't exist. +		c.mu.RUnlock() +		return nil +	} +	cm.Sockets = copyMap(sc.sockets) +	// sc.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of sc.c when +	// holding the lock to prevent potential data race. +	chanCopy := sc.c +	c.mu.RUnlock() +	cm.ChannelData = chanCopy.ChannelzMetric() +	cm.ID = sc.id +	cm.RefName = sc.refName +	cm.Trace = sc.trace.dumpData() +	return cm +} + +func (c *channelMap) GetSocket(id int64) *SocketMetric { +	sm := &SocketMetric{} +	c.mu.RLock() +	if ls, ok := c.listenSockets[id]; ok { +		c.mu.RUnlock() +		sm.SocketData = ls.s.ChannelzMetric() +		sm.ID = ls.id +		sm.RefName = ls.refName +		return sm +	} +	if ns, ok := c.normalSockets[id]; ok { +		c.mu.RUnlock() +		sm.SocketData = ns.s.ChannelzMetric() +		sm.ID = ns.id +		sm.RefName = ns.refName +		return sm +	} +	c.mu.RUnlock() +	return nil +} + +func (c *channelMap) GetServer(id int64) *ServerMetric { +	sm := &ServerMetric{} +	var svr *server +	var ok bool +	c.mu.RLock() +	if svr, ok = c.servers[id]; !ok { +		c.mu.RUnlock() +		return nil +	} +	sm.ListenSockets = copyMap(svr.listenSockets) +	c.mu.RUnlock() +	sm.ID = svr.id +	sm.RefName = svr.refName +	sm.ServerData = svr.s.ChannelzMetric() +	return sm +} + +type idGenerator struct { +	id int64 +} + +func (i *idGenerator) reset() { +	atomic.StoreInt64(&i.id, 0) +} + +func (i *idGenerator) genID() int64 { +	return atomic.AddInt64(&i.id, 1) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/id.go b/vendor/google.golang.org/grpc/internal/channelz/id.go new file mode 100644 index 000000000..c9a27acd3 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/id.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import "fmt" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier struct { +	typ RefChannelType +	id  int64 +	str string +	pid *Identifier +} + +// Type returns the entity type corresponding to id. +func (id *Identifier) Type() RefChannelType { +	return id.typ +} + +// Int returns the integer identifier corresponding to id. +func (id *Identifier) Int() int64 { +	return id.id +} + +// String returns a string representation of the entity corresponding to id. +// +// This includes some information about the parent as well. Examples: +// Top-level channel: [Channel #channel-number] +// Nested channel:    [Channel #parent-channel-number Channel #channel-number] +// Sub channel:       [Channel #parent-channel SubChannel #subchannel-number] +func (id *Identifier) String() string { +	return id.str +} + +// Equal returns true if other is the same as id. +func (id *Identifier) Equal(other *Identifier) bool { +	if (id != nil) != (other != nil) { +		return false +	} +	if id == nil && other == nil { +		return true +	} +	return id.typ == other.typ && id.id == other.id && id.pid == other.pid +} + +// NewIdentifierForTesting returns a new opaque identifier to be used only for +// testing purposes. +func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier { +	return newIdentifer(typ, id, pid) +} + +func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier { +	str := fmt.Sprintf("%s #%d", typ, id) +	if pid != nil { +		str = fmt.Sprintf("%s %s", pid, str) +	} +	return &Identifier{typ: typ, id: id, str: str, pid: pid} +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go new file mode 100644 index 000000000..8e13a3d2c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -0,0 +1,79 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( +	"fmt" + +	"google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("channelz") + +func withParens(id *Identifier) string { +	return "[" + id.String() + "] " +} + +// Info logs and adds a trace event if channelz is on. +func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { +	AddTraceEvent(l, id, 1, &TraceEventDesc{ +		Desc:     fmt.Sprint(args...), +		Severity: CtInfo, +	}) +} + +// Infof logs and adds a trace event if channelz is on. +func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { +	AddTraceEvent(l, id, 1, &TraceEventDesc{ +		Desc:     fmt.Sprintf(format, args...), +		Severity: CtInfo, +	}) +} + +// Warning logs and adds a trace event if channelz is on. +func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { +	AddTraceEvent(l, id, 1, &TraceEventDesc{ +		Desc:     fmt.Sprint(args...), +		Severity: CtWarning, +	}) +} + +// Warningf logs and adds a trace event if channelz is on. +func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { +	AddTraceEvent(l, id, 1, &TraceEventDesc{ +		Desc:     fmt.Sprintf(format, args...), +		Severity: CtWarning, +	}) +} + +// Error logs and adds a trace event if channelz is on. +func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { +	AddTraceEvent(l, id, 1, &TraceEventDesc{ +		Desc:     fmt.Sprint(args...), +		Severity: CtError, +	}) +} + +// Errorf logs and adds a trace event if channelz is on. +func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { +	AddTraceEvent(l, id, 1, &TraceEventDesc{ +		Desc:     fmt.Sprintf(format, args...), +		Severity: CtError, +	}) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go new file mode 100644 index 000000000..7b2f350e2 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -0,0 +1,722 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( +	"net" +	"sync" +	"sync/atomic" +	"time" + +	"google.golang.org/grpc/connectivity" +	"google.golang.org/grpc/credentials" +) + +// entry represents a node in the channelz database. +type entry interface { +	// addChild adds a child e, whose channelz id is id to child list +	addChild(id int64, e entry) +	// deleteChild deletes a child with channelz id to be id from child list +	deleteChild(id int64) +	// triggerDelete tries to delete self from channelz database. However, if child +	// list is not empty, then deletion from the database is on hold until the last +	// child is deleted from database. +	triggerDelete() +	// deleteSelfIfReady check whether triggerDelete() has been called before, and whether child +	// list is now empty. If both conditions are met, then delete self from database. +	deleteSelfIfReady() +	// getParentID returns parent ID of the entry. 0 value parent ID means no parent. +	getParentID() int64 +} + +// dummyEntry is a fake entry to handle entry not found case. +type dummyEntry struct { +	idNotFound int64 +} + +func (d *dummyEntry) addChild(id int64, e entry) { +	// Note: It is possible for a normal program to reach here under race condition. +	// For example, there could be a race between ClientConn.Close() info being propagated +	// to addrConn and http2Client. ClientConn.Close() cancel the context and result +	// in http2Client to error. The error info is then caught by transport monitor +	// and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore, +	// the addrConn will create a new transport. And when registering the new transport in +	// channelz, its parent addrConn could have already been torn down and deleted +	// from channelz tracking, and thus reach the code here. +	logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) +} + +func (d *dummyEntry) deleteChild(id int64) { +	// It is possible for a normal program to reach here under race condition. +	// Refer to the example described in addChild(). +	logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) +} + +func (d *dummyEntry) triggerDelete() { +	logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) +} + +func (*dummyEntry) deleteSelfIfReady() { +	// code should not reach here. deleteSelfIfReady is always called on an existing entry. +} + +func (*dummyEntry) getParentID() int64 { +	return 0 +} + +// ChannelMetric defines the info channelz provides for a specific Channel, which +// includes ChannelInternalMetric and channelz-specific data, such as channelz id, +// child list, etc. +type ChannelMetric struct { +	// ID is the channelz id of this channel. +	ID int64 +	// RefName is the human readable reference string of this channel. +	RefName string +	// ChannelData contains channel internal metric reported by the channel through +	// ChannelzMetric(). +	ChannelData *ChannelInternalMetric +	// NestedChans tracks the nested channel type children of this channel in the format of +	// a map from nested channel channelz id to corresponding reference string. +	NestedChans map[int64]string +	// SubChans tracks the subchannel type children of this channel in the format of a +	// map from subchannel channelz id to corresponding reference string. +	SubChans map[int64]string +	// Sockets tracks the socket type children of this channel in the format of a map +	// from socket channelz id to corresponding reference string. +	// Note current grpc implementation doesn't allow channel having sockets directly, +	// therefore, this is field is unused. +	Sockets map[int64]string +	// Trace contains the most recent traced events. +	Trace *ChannelTrace +} + +// SubChannelMetric defines the info channelz provides for a specific SubChannel, +// which includes ChannelInternalMetric and channelz-specific data, such as +// channelz id, child list, etc. +type SubChannelMetric struct { +	// ID is the channelz id of this subchannel. +	ID int64 +	// RefName is the human readable reference string of this subchannel. +	RefName string +	// ChannelData contains subchannel internal metric reported by the subchannel +	// through ChannelzMetric(). +	ChannelData *ChannelInternalMetric +	// NestedChans tracks the nested channel type children of this subchannel in the format of +	// a map from nested channel channelz id to corresponding reference string. +	// Note current grpc implementation doesn't allow subchannel to have nested channels +	// as children, therefore, this field is unused. +	NestedChans map[int64]string +	// SubChans tracks the subchannel type children of this subchannel in the format of a +	// map from subchannel channelz id to corresponding reference string. +	// Note current grpc implementation doesn't allow subchannel to have subchannels +	// as children, therefore, this field is unused. +	SubChans map[int64]string +	// Sockets tracks the socket type children of this subchannel in the format of a map +	// from socket channelz id to corresponding reference string. +	Sockets map[int64]string +	// Trace contains the most recent traced events. +	Trace *ChannelTrace +} + +// ChannelInternalMetric defines the struct that the implementor of Channel interface +// should return from ChannelzMetric(). +type ChannelInternalMetric struct { +	// current connectivity state of the channel. +	State connectivity.State +	// The target this channel originally tried to connect to.  May be absent +	Target string +	// The number of calls started on the channel. +	CallsStarted int64 +	// The number of calls that have completed with an OK status. +	CallsSucceeded int64 +	// The number of calls that have a completed with a non-OK status. +	CallsFailed int64 +	// The last time a call was started on the channel. +	LastCallStartedTimestamp time.Time +} + +// ChannelTrace stores traced events on a channel/subchannel and related info. +type ChannelTrace struct { +	// EventNum is the number of events that ever got traced (i.e. including those that have been deleted) +	EventNum int64 +	// CreationTime is the creation time of the trace. +	CreationTime time.Time +	// Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the +	// oldest one) +	Events []*TraceEvent +} + +// TraceEvent represent a single trace event +type TraceEvent struct { +	// Desc is a simple description of the trace event. +	Desc string +	// Severity states the severity of this trace event. +	Severity Severity +	// Timestamp is the event time. +	Timestamp time.Time +	// RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is +	// involved in this event. +	// e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside []) +	RefID int64 +	// RefName is the reference name for the entity that gets referenced in the event. +	RefName string +	// RefType indicates the referenced entity type, i.e Channel or SubChannel. +	RefType RefChannelType +} + +// Channel is the interface that should be satisfied in order to be tracked by +// channelz as Channel or SubChannel. +type Channel interface { +	ChannelzMetric() *ChannelInternalMetric +} + +type dummyChannel struct{} + +func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric { +	return &ChannelInternalMetric{} +} + +type channel struct { +	refName     string +	c           Channel +	closeCalled bool +	nestedChans map[int64]string +	subChans    map[int64]string +	id          int64 +	pid         int64 +	cm          *channelMap +	trace       *channelTrace +	// traceRefCount is the number of trace events that reference this channel. +	// Non-zero traceRefCount means the trace of this channel cannot be deleted. +	traceRefCount int32 +} + +func (c *channel) addChild(id int64, e entry) { +	switch v := e.(type) { +	case *subChannel: +		c.subChans[id] = v.refName +	case *channel: +		c.nestedChans[id] = v.refName +	default: +		logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) +	} +} + +func (c *channel) deleteChild(id int64) { +	delete(c.subChans, id) +	delete(c.nestedChans, id) +	c.deleteSelfIfReady() +} + +func (c *channel) triggerDelete() { +	c.closeCalled = true +	c.deleteSelfIfReady() +} + +func (c *channel) getParentID() int64 { +	return c.pid +} + +// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means +// deleting the channel reference from its parent's child list. +// +// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the +// corresponding grpc object has been invoked, and the channel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (c *channel) deleteSelfFromTree() (deleted bool) { +	if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 { +		return false +	} +	// not top channel +	if c.pid != 0 { +		c.cm.findEntry(c.pid).deleteChild(c.id) +	} +	return true +} + +// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means +// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the +// channel, and its memory will be garbage collected. +// +// The trace reference count of the channel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (c *channel) deleteSelfFromMap() (delete bool) { +	if c.getTraceRefCount() != 0 { +		c.c = &dummyChannel{} +		return false +	} +	return true +} + +// deleteSelfIfReady tries to delete the channel itself from the channelz database. +// The delete process includes two steps: +//  1. delete the channel from the entry relation tree, i.e. delete the channel reference from its +//     parent's child list. +//  2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id +//     will return entry not found error. +func (c *channel) deleteSelfIfReady() { +	if !c.deleteSelfFromTree() { +		return +	} +	if !c.deleteSelfFromMap() { +		return +	} +	c.cm.deleteEntry(c.id) +	c.trace.clear() +} + +func (c *channel) getChannelTrace() *channelTrace { +	return c.trace +} + +func (c *channel) incrTraceRefCount() { +	atomic.AddInt32(&c.traceRefCount, 1) +} + +func (c *channel) decrTraceRefCount() { +	atomic.AddInt32(&c.traceRefCount, -1) +} + +func (c *channel) getTraceRefCount() int { +	i := atomic.LoadInt32(&c.traceRefCount) +	return int(i) +} + +func (c *channel) getRefName() string { +	return c.refName +} + +type subChannel struct { +	refName       string +	c             Channel +	closeCalled   bool +	sockets       map[int64]string +	id            int64 +	pid           int64 +	cm            *channelMap +	trace         *channelTrace +	traceRefCount int32 +} + +func (sc *subChannel) addChild(id int64, e entry) { +	if v, ok := e.(*normalSocket); ok { +		sc.sockets[id] = v.refName +	} else { +		logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) +	} +} + +func (sc *subChannel) deleteChild(id int64) { +	delete(sc.sockets, id) +	sc.deleteSelfIfReady() +} + +func (sc *subChannel) triggerDelete() { +	sc.closeCalled = true +	sc.deleteSelfIfReady() +} + +func (sc *subChannel) getParentID() int64 { +	return sc.pid +} + +// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which +// means deleting the subchannel reference from its parent's child list. +// +// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of +// the corresponding grpc object has been invoked, and the subchannel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (sc *subChannel) deleteSelfFromTree() (deleted bool) { +	if !sc.closeCalled || len(sc.sockets) != 0 { +		return false +	} +	sc.cm.findEntry(sc.pid).deleteChild(sc.id) +	return true +} + +// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means +// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query +// the subchannel, and its memory will be garbage collected. +// +// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (sc *subChannel) deleteSelfFromMap() (delete bool) { +	if sc.getTraceRefCount() != 0 { +		// free the grpc struct (i.e. addrConn) +		sc.c = &dummyChannel{} +		return false +	} +	return true +} + +// deleteSelfIfReady tries to delete the subchannel itself from the channelz database. +// The delete process includes two steps: +//  1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from +//     its parent's child list. +//  2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup +//     by id will return entry not found error. +func (sc *subChannel) deleteSelfIfReady() { +	if !sc.deleteSelfFromTree() { +		return +	} +	if !sc.deleteSelfFromMap() { +		return +	} +	sc.cm.deleteEntry(sc.id) +	sc.trace.clear() +} + +func (sc *subChannel) getChannelTrace() *channelTrace { +	return sc.trace +} + +func (sc *subChannel) incrTraceRefCount() { +	atomic.AddInt32(&sc.traceRefCount, 1) +} + +func (sc *subChannel) decrTraceRefCount() { +	atomic.AddInt32(&sc.traceRefCount, -1) +} + +func (sc *subChannel) getTraceRefCount() int { +	i := atomic.LoadInt32(&sc.traceRefCount) +	return int(i) +} + +func (sc *subChannel) getRefName() string { +	return sc.refName +} + +// SocketMetric defines the info channelz provides for a specific Socket, which +// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc. +type SocketMetric struct { +	// ID is the channelz id of this socket. +	ID int64 +	// RefName is the human readable reference string of this socket. +	RefName string +	// SocketData contains socket internal metric reported by the socket through +	// ChannelzMetric(). +	SocketData *SocketInternalMetric +} + +// SocketInternalMetric defines the struct that the implementor of Socket interface +// should return from ChannelzMetric(). +type SocketInternalMetric struct { +	// The number of streams that have been started. +	StreamsStarted int64 +	// The number of streams that have ended successfully: +	// On client side, receiving frame with eos bit set. +	// On server side, sending frame with eos bit set. +	StreamsSucceeded int64 +	// The number of streams that have ended unsuccessfully: +	// On client side, termination without receiving frame with eos bit set. +	// On server side, termination without sending frame with eos bit set. +	StreamsFailed int64 +	// The number of messages successfully sent on this socket. +	MessagesSent     int64 +	MessagesReceived int64 +	// The number of keep alives sent.  This is typically implemented with HTTP/2 +	// ping messages. +	KeepAlivesSent int64 +	// The last time a stream was created by this endpoint.  Usually unset for +	// servers. +	LastLocalStreamCreatedTimestamp time.Time +	// The last time a stream was created by the remote endpoint.  Usually unset +	// for clients. +	LastRemoteStreamCreatedTimestamp time.Time +	// The last time a message was sent by this endpoint. +	LastMessageSentTimestamp time.Time +	// The last time a message was received by this endpoint. +	LastMessageReceivedTimestamp time.Time +	// The amount of window, granted to the local endpoint by the remote endpoint. +	// This may be slightly out of date due to network latency.  This does NOT +	// include stream level or TCP level flow control info. +	LocalFlowControlWindow int64 +	// The amount of window, granted to the remote endpoint by the local endpoint. +	// This may be slightly out of date due to network latency.  This does NOT +	// include stream level or TCP level flow control info. +	RemoteFlowControlWindow int64 +	// The locally bound address. +	LocalAddr net.Addr +	// The remote bound address.  May be absent. +	RemoteAddr net.Addr +	// Optional, represents the name of the remote endpoint, if different than +	// the original target name. +	RemoteName    string +	SocketOptions *SocketOptionData +	Security      credentials.ChannelzSecurityValue +} + +// Socket is the interface that should be satisfied in order to be tracked by +// channelz as Socket. +type Socket interface { +	ChannelzMetric() *SocketInternalMetric +} + +type listenSocket struct { +	refName string +	s       Socket +	id      int64 +	pid     int64 +	cm      *channelMap +} + +func (ls *listenSocket) addChild(id int64, e entry) { +	logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) +} + +func (ls *listenSocket) deleteChild(id int64) { +	logger.Errorf("cannot delete a child (id = %d) from a listen socket", id) +} + +func (ls *listenSocket) triggerDelete() { +	ls.cm.deleteEntry(ls.id) +	ls.cm.findEntry(ls.pid).deleteChild(ls.id) +} + +func (ls *listenSocket) deleteSelfIfReady() { +	logger.Errorf("cannot call deleteSelfIfReady on a listen socket") +} + +func (ls *listenSocket) getParentID() int64 { +	return ls.pid +} + +type normalSocket struct { +	refName string +	s       Socket +	id      int64 +	pid     int64 +	cm      *channelMap +} + +func (ns *normalSocket) addChild(id int64, e entry) { +	logger.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e) +} + +func (ns *normalSocket) deleteChild(id int64) { +	logger.Errorf("cannot delete a child (id = %d) from a normal socket", id) +} + +func (ns *normalSocket) triggerDelete() { +	ns.cm.deleteEntry(ns.id) +	ns.cm.findEntry(ns.pid).deleteChild(ns.id) +} + +func (ns *normalSocket) deleteSelfIfReady() { +	logger.Errorf("cannot call deleteSelfIfReady on a normal socket") +} + +func (ns *normalSocket) getParentID() int64 { +	return ns.pid +} + +// ServerMetric defines the info channelz provides for a specific Server, which +// includes ServerInternalMetric and channelz-specific data, such as channelz id, +// child list, etc. +type ServerMetric struct { +	// ID is the channelz id of this server. +	ID int64 +	// RefName is the human readable reference string of this server. +	RefName string +	// ServerData contains server internal metric reported by the server through +	// ChannelzMetric(). +	ServerData *ServerInternalMetric +	// ListenSockets tracks the listener socket type children of this server in the +	// format of a map from socket channelz id to corresponding reference string. +	ListenSockets map[int64]string +} + +// ServerInternalMetric defines the struct that the implementor of Server interface +// should return from ChannelzMetric(). +type ServerInternalMetric struct { +	// The number of incoming calls started on the server. +	CallsStarted int64 +	// The number of incoming calls that have completed with an OK status. +	CallsSucceeded int64 +	// The number of incoming calls that have a completed with a non-OK status. +	CallsFailed int64 +	// The last time a call was started on the server. +	LastCallStartedTimestamp time.Time +} + +// Server is the interface to be satisfied in order to be tracked by channelz as +// Server. +type Server interface { +	ChannelzMetric() *ServerInternalMetric +} + +type server struct { +	refName       string +	s             Server +	closeCalled   bool +	sockets       map[int64]string +	listenSockets map[int64]string +	id            int64 +	cm            *channelMap +} + +func (s *server) addChild(id int64, e entry) { +	switch v := e.(type) { +	case *normalSocket: +		s.sockets[id] = v.refName +	case *listenSocket: +		s.listenSockets[id] = v.refName +	default: +		logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) +	} +} + +func (s *server) deleteChild(id int64) { +	delete(s.sockets, id) +	delete(s.listenSockets, id) +	s.deleteSelfIfReady() +} + +func (s *server) triggerDelete() { +	s.closeCalled = true +	s.deleteSelfIfReady() +} + +func (s *server) deleteSelfIfReady() { +	if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 { +		return +	} +	s.cm.deleteEntry(s.id) +} + +func (s *server) getParentID() int64 { +	return 0 +} + +type tracedChannel interface { +	getChannelTrace() *channelTrace +	incrTraceRefCount() +	decrTraceRefCount() +	getRefName() string +} + +type channelTrace struct { +	cm          *channelMap +	createdTime time.Time +	eventCount  int64 +	mu          sync.Mutex +	events      []*TraceEvent +} + +func (c *channelTrace) append(e *TraceEvent) { +	c.mu.Lock() +	if len(c.events) == getMaxTraceEntry() { +		del := c.events[0] +		c.events = c.events[1:] +		if del.RefID != 0 { +			// start recursive cleanup in a goroutine to not block the call originated from grpc. +			go func() { +				// need to acquire c.cm.mu lock to call the unlocked attemptCleanup func. +				c.cm.mu.Lock() +				c.cm.decrTraceRefCount(del.RefID) +				c.cm.mu.Unlock() +			}() +		} +	} +	e.Timestamp = time.Now() +	c.events = append(c.events, e) +	c.eventCount++ +	c.mu.Unlock() +} + +func (c *channelTrace) clear() { +	c.mu.Lock() +	for _, e := range c.events { +		if e.RefID != 0 { +			// caller should have already held the c.cm.mu lock. +			c.cm.decrTraceRefCount(e.RefID) +		} +	} +	c.mu.Unlock() +} + +// Severity is the severity level of a trace event. +// The canonical enumeration of all valid values is here: +// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126. +type Severity int + +const ( +	// CtUnknown indicates unknown severity of a trace event. +	CtUnknown Severity = iota +	// CtInfo indicates info level severity of a trace event. +	CtInfo +	// CtWarning indicates warning level severity of a trace event. +	CtWarning +	// CtError indicates error level severity of a trace event. +	CtError +) + +// RefChannelType is the type of the entity being referenced in a trace event. +type RefChannelType int + +const ( +	// RefUnknown indicates an unknown entity type, the zero value for this type. +	RefUnknown RefChannelType = iota +	// RefChannel indicates the referenced entity is a Channel. +	RefChannel +	// RefSubChannel indicates the referenced entity is a SubChannel. +	RefSubChannel +	// RefServer indicates the referenced entity is a Server. +	RefServer +	// RefListenSocket indicates the referenced entity is a ListenSocket. +	RefListenSocket +	// RefNormalSocket indicates the referenced entity is a NormalSocket. +	RefNormalSocket +) + +var refChannelTypeToString = map[RefChannelType]string{ +	RefUnknown:      "Unknown", +	RefChannel:      "Channel", +	RefSubChannel:   "SubChannel", +	RefServer:       "Server", +	RefListenSocket: "ListenSocket", +	RefNormalSocket: "NormalSocket", +} + +func (r RefChannelType) String() string { +	return refChannelTypeToString[r] +} + +func (c *channelTrace) dumpData() *ChannelTrace { +	c.mu.Lock() +	ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} +	ct.Events = c.events[:len(c.events)] +	c.mu.Unlock() +	return ct +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go new file mode 100644 index 000000000..1b1c4cce3 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( +	"syscall" + +	"golang.org/x/sys/unix" +) + +// SocketOptionData defines the struct to hold socket option data, and related +// getter function to obtain info from fd. +type SocketOptionData struct { +	Linger      *unix.Linger +	RecvTimeout *unix.Timeval +	SendTimeout *unix.Timeval +	TCPInfo     *unix.TCPInfo +} + +// Getsockopt defines the function to get socket options requested by channelz. +// It is to be passed to syscall.RawConn.Control(). +func (s *SocketOptionData) Getsockopt(fd uintptr) { +	if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil { +		s.Linger = v +	} +	if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil { +		s.RecvTimeout = v +	} +	if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil { +		s.SendTimeout = v +	} +	if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil { +		s.TCPInfo = v +	} +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go new file mode 100644 index 000000000..8b06eed1a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go @@ -0,0 +1,43 @@ +//go:build !linux +// +build !linux + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( +	"sync" +) + +var once sync.Once + +// SocketOptionData defines the struct to hold socket option data, and related +// getter function to obtain info from fd. +// Windows OS doesn't support Socket Option +type SocketOptionData struct { +} + +// Getsockopt defines the function to get socket options requested by channelz. +// It is to be passed to syscall.RawConn.Control(). +// Windows OS doesn't support Socket Option +func (s *SocketOptionData) Getsockopt(fd uintptr) { +	once.Do(func() { +		logger.Warning("Channelz: socket options are not supported on non-linux environments") +	}) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go new file mode 100644 index 000000000..8d194e44e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go @@ -0,0 +1,37 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( +	"syscall" +) + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(socket interface{}) *SocketOptionData { +	c, ok := socket.(syscall.Conn) +	if !ok { +		return nil +	} +	data := &SocketOptionData{} +	if rawConn, err := c.SyscallConn(); err == nil { +		rawConn.Control(data.Getsockopt) +		return data +	} +	return nil +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go new file mode 100644 index 000000000..837ddc402 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go @@ -0,0 +1,27 @@ +//go:build !linux +// +build !linux + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(c interface{}) *SocketOptionData { +	return nil +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go new file mode 100644 index 000000000..32c9b5903 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/credentials.go @@ -0,0 +1,49 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( +	"context" +) + +// requestInfoKey is a struct to be used as the key to store RequestInfo in a +// context. +type requestInfoKey struct{} + +// NewRequestInfoContext creates a context with ri. +func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context { +	return context.WithValue(ctx, requestInfoKey{}, ri) +} + +// RequestInfoFromContext extracts the RequestInfo from ctx. +func RequestInfoFromContext(ctx context.Context) interface{} { +	return ctx.Value(requestInfoKey{}) +} + +// clientHandshakeInfoKey is a struct used as the key to store +// ClientHandshakeInfo in a context. +type clientHandshakeInfoKey struct{} + +// ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx. +func ClientHandshakeInfoFromContext(ctx context.Context) interface{} { +	return ctx.Value(clientHandshakeInfoKey{}) +} + +// NewClientHandshakeInfoContext creates a context with chi. +func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context { +	return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe.go b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go new file mode 100644 index 000000000..25ade6230 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package credentials defines APIs for parsing SPIFFE ID. +// +// All APIs in this package are experimental. +package credentials + +import ( +	"crypto/tls" +	"crypto/x509" +	"net/url" + +	"google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("credentials") + +// SPIFFEIDFromState parses the SPIFFE ID from State. If the SPIFFE ID format +// is invalid, return nil with warning. +func SPIFFEIDFromState(state tls.ConnectionState) *url.URL { +	if len(state.PeerCertificates) == 0 || len(state.PeerCertificates[0].URIs) == 0 { +		return nil +	} +	return SPIFFEIDFromCert(state.PeerCertificates[0]) +} + +// SPIFFEIDFromCert parses the SPIFFE ID from x509.Certificate. If the SPIFFE +// ID format is invalid, return nil with warning. +func SPIFFEIDFromCert(cert *x509.Certificate) *url.URL { +	if cert == nil || cert.URIs == nil { +		return nil +	} +	var spiffeID *url.URL +	for _, uri := range cert.URIs { +		if uri == nil || uri.Scheme != "spiffe" || uri.Opaque != "" || (uri.User != nil && uri.User.Username() != "") { +			continue +		} +		// From this point, we assume the uri is intended for a SPIFFE ID. +		if len(uri.String()) > 2048 { +			logger.Warning("invalid SPIFFE ID: total ID length larger than 2048 bytes") +			return nil +		} +		if len(uri.Host) == 0 || len(uri.Path) == 0 { +			logger.Warning("invalid SPIFFE ID: domain or workload ID is empty") +			return nil +		} +		if len(uri.Host) > 255 { +			logger.Warning("invalid SPIFFE ID: domain length larger than 255 characters") +			return nil +		} +		// A valid SPIFFE certificate can only have exactly one URI SAN field. +		if len(cert.URIs) > 1 { +			logger.Warning("invalid SPIFFE ID: multiple URI SANs") +			return nil +		} +		spiffeID = uri +	} +	return spiffeID +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go new file mode 100644 index 000000000..2919632d6 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go @@ -0,0 +1,58 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( +	"net" +	"syscall" +) + +type sysConn = syscall.Conn + +// syscallConn keeps reference of rawConn to support syscall.Conn for channelz. +// SyscallConn() (the method in interface syscall.Conn) is explicitly +// implemented on this type, +// +// Interface syscall.Conn is implemented by most net.Conn implementations (e.g. +// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns +// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn +// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't +// help here). +type syscallConn struct { +	net.Conn +	// sysConn is a type alias of syscall.Conn. It's necessary because the name +	// `Conn` collides with `net.Conn`. +	sysConn +} + +// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that +// implements syscall.Conn. rawConn will be used to support syscall, and newConn +// will be used for read/write. +// +// This function returns newConn if rawConn doesn't implement syscall.Conn. +func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { +	sysConn, ok := rawConn.(syscall.Conn) +	if !ok { +		return newConn +	} +	return &syscallConn{ +		Conn:    newConn, +		sysConn: sysConn, +	} +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/util.go b/vendor/google.golang.org/grpc/internal/credentials/util.go new file mode 100644 index 000000000..f792fd22c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/util.go @@ -0,0 +1,52 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( +	"crypto/tls" +) + +const alpnProtoStrH2 = "h2" + +// AppendH2ToNextProtos appends h2 to next protos. +func AppendH2ToNextProtos(ps []string) []string { +	for _, p := range ps { +		if p == alpnProtoStrH2 { +			return ps +		} +	} +	ret := make([]string, 0, len(ps)+1) +	ret = append(ret, ps...) +	return append(ret, alpnProtoStrH2) +} + +// CloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +// +// TODO: inline this function if possible. +func CloneTLSConfig(cfg *tls.Config) *tls.Config { +	if cfg == nil { +		return &tls.Config{} +	} + +	return cfg.Clone() +} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go new file mode 100644 index 000000000..5ba9d94d4 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -0,0 +1,62 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package envconfig contains grpc settings configured by environment variables. +package envconfig + +import ( +	"os" +	"strconv" +	"strings" +) + +var ( +	// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). +	TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true) +	// AdvertiseCompressors is set if registered compressor should be advertised +	// ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false"). +	AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true) +	// RingHashCap indicates the maximum ring size which defaults to 4096 +	// entries but may be overridden by setting the environment variable +	// "GRPC_RING_HASH_CAP".  This does not override the default bounds +	// checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). +	RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) +) + +func boolFromEnv(envVar string, def bool) bool { +	if def { +		// The default is true; return true unless the variable is "false". +		return !strings.EqualFold(os.Getenv(envVar), "false") +	} +	// The default is false; return false unless the variable is "true". +	return strings.EqualFold(os.Getenv(envVar), "true") +} + +func uint64FromEnv(envVar string, def, min, max uint64) uint64 { +	v, err := strconv.ParseUint(os.Getenv(envVar), 10, 64) +	if err != nil { +		return def +	} +	if v < min { +		return min +	} +	if v > max { +		return max +	} +	return v +} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/observability.go b/vendor/google.golang.org/grpc/internal/envconfig/observability.go new file mode 100644 index 000000000..821dd0a7c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/observability.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package envconfig + +import "os" + +const ( +	envObservabilityConfig     = "GRPC_GCP_OBSERVABILITY_CONFIG" +	envObservabilityConfigFile = "GRPC_GCP_OBSERVABILITY_CONFIG_FILE" +) + +var ( +	// ObservabilityConfig is the json configuration for the gcp/observability +	// package specified directly in the envObservabilityConfig env var. +	ObservabilityConfig = os.Getenv(envObservabilityConfig) +	// ObservabilityConfigFile is the json configuration for the +	// gcp/observability specified in a file with the location specified in +	// envObservabilityConfigFile env var. +	ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile) +) diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go new file mode 100644 index 000000000..04136882c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -0,0 +1,92 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package envconfig + +import ( +	"os" +) + +const ( +	// XDSBootstrapFileNameEnv is the env variable to set bootstrap file name. +	// Do not use this and read from env directly. Its value is read and kept in +	// variable XDSBootstrapFileName. +	// +	// When both bootstrap FileName and FileContent are set, FileName is used. +	XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP" +	// XDSBootstrapFileContentEnv is the env variable to set bootstrap file +	// content. Do not use this and read from env directly. Its value is read +	// and kept in variable XDSBootstrapFileContent. +	// +	// When both bootstrap FileName and FileContent are set, FileName is used. +	XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" +) + +var ( +	// XDSBootstrapFileName holds the name of the file which contains xDS +	// bootstrap configuration. Users can specify the location of the bootstrap +	// file by setting the environment variable "GRPC_XDS_BOOTSTRAP". +	// +	// When both bootstrap FileName and FileContent are set, FileName is used. +	XDSBootstrapFileName = os.Getenv(XDSBootstrapFileNameEnv) +	// XDSBootstrapFileContent holds the content of the xDS bootstrap +	// configuration. Users can specify the bootstrap config by setting the +	// environment variable "GRPC_XDS_BOOTSTRAP_CONFIG". +	// +	// When both bootstrap FileName and FileContent are set, FileName is used. +	XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv) +	// XDSRingHash indicates whether ring hash support is enabled, which can be +	// disabled by setting the environment variable +	// "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false". +	XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true) +	// XDSClientSideSecurity is used to control processing of security +	// configuration on the client-side. +	// +	// Note that there is no env var protection for the server-side because we +	// have a brand new API on the server-side and users explicitly need to use +	// the new API to get security integration on the server. +	XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true) +	// XDSAggregateAndDNS indicates whether processing of aggregated cluster +	// and DNS cluster is enabled, which can be enabled by setting the +	// environment variable +	// "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to +	// "true". +	XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true) + +	// XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, +	// which can be disabled by setting the environment variable +	// "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". +	XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true) +	// XDSOutlierDetection indicates whether outlier detection support is +	// enabled, which can be disabled by setting the environment variable +	// "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false". +	XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true) +	// XDSFederation indicates whether federation support is enabled, which can +	// be enabled by setting the environment variable +	// "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true". +	XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", false) + +	// XDSRLS indicates whether processing of Cluster Specifier plugins and +	// support for the RLS CLuster Specifier is enabled, which can be enabled by +	// setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to +	// "true". +	XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", false) + +	// C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. +	C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") +) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go new file mode 100644 index 000000000..b68e26a36 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -0,0 +1,126 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog (internal) defines depth logging for grpc. +package grpclog + +import ( +	"os" +) + +// Logger is the logger used for the non-depth log functions. +var Logger LoggerV2 + +// DepthLogger is the logger used for the depth log functions. +var DepthLogger DepthLoggerV2 + +// InfoDepth logs to the INFO log at the specified depth. +func InfoDepth(depth int, args ...interface{}) { +	if DepthLogger != nil { +		DepthLogger.InfoDepth(depth, args...) +	} else { +		Logger.Infoln(args...) +	} +} + +// WarningDepth logs to the WARNING log at the specified depth. +func WarningDepth(depth int, args ...interface{}) { +	if DepthLogger != nil { +		DepthLogger.WarningDepth(depth, args...) +	} else { +		Logger.Warningln(args...) +	} +} + +// ErrorDepth logs to the ERROR log at the specified depth. +func ErrorDepth(depth int, args ...interface{}) { +	if DepthLogger != nil { +		DepthLogger.ErrorDepth(depth, args...) +	} else { +		Logger.Errorln(args...) +	} +} + +// FatalDepth logs to the FATAL log at the specified depth. +func FatalDepth(depth int, args ...interface{}) { +	if DepthLogger != nil { +		DepthLogger.FatalDepth(depth, args...) +	} else { +		Logger.Fatalln(args...) +	} +	os.Exit(1) +} + +// LoggerV2 does underlying logging work for grpclog. +// This is a copy of the LoggerV2 defined in the external grpclog package. It +// is defined here to avoid a circular dependency. +type LoggerV2 interface { +	// Info logs to INFO log. Arguments are handled in the manner of fmt.Print. +	Info(args ...interface{}) +	// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. +	Infoln(args ...interface{}) +	// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. +	Infof(format string, args ...interface{}) +	// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. +	Warning(args ...interface{}) +	// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. +	Warningln(args ...interface{}) +	// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. +	Warningf(format string, args ...interface{}) +	// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. +	Error(args ...interface{}) +	// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. +	Errorln(args ...interface{}) +	// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. +	Errorf(format string, args ...interface{}) +	// Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. +	// gRPC ensures that all Fatal logs will exit with os.Exit(1). +	// Implementations may also call os.Exit() with a non-zero exit code. +	Fatal(args ...interface{}) +	// Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. +	// gRPC ensures that all Fatal logs will exit with os.Exit(1). +	// Implementations may also call os.Exit() with a non-zero exit code. +	Fatalln(args ...interface{}) +	// Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. +	// gRPC ensures that all Fatal logs will exit with os.Exit(1). +	// Implementations may also call os.Exit() with a non-zero exit code. +	Fatalf(format string, args ...interface{}) +	// V reports whether verbosity level l is at least the requested verbose level. +	V(l int) bool +} + +// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements +// DepthLoggerV2, the below functions will be called with the appropriate stack +// depth set for trivial functions the logger may ignore. +// This is a copy of the DepthLoggerV2 defined in the external grpclog package. +// It is defined here to avoid a circular dependency. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type DepthLoggerV2 interface { +	// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. +	InfoDepth(depth int, args ...interface{}) +	// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. +	WarningDepth(depth int, args ...interface{}) +	// ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. +	ErrorDepth(depth int, args ...interface{}) +	// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. +	FatalDepth(depth int, args ...interface{}) +} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go new file mode 100644 index 000000000..82af70e96 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go @@ -0,0 +1,81 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( +	"fmt" +) + +// PrefixLogger does logging with a prefix. +// +// Logging method on a nil logs without any prefix. +type PrefixLogger struct { +	logger DepthLoggerV2 +	prefix string +} + +// Infof does info logging. +func (pl *PrefixLogger) Infof(format string, args ...interface{}) { +	if pl != nil { +		// Handle nil, so the tests can pass in a nil logger. +		format = pl.prefix + format +		pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) +		return +	} +	InfoDepth(1, fmt.Sprintf(format, args...)) +} + +// Warningf does warning logging. +func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { +	if pl != nil { +		format = pl.prefix + format +		pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) +		return +	} +	WarningDepth(1, fmt.Sprintf(format, args...)) +} + +// Errorf does error logging. +func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { +	if pl != nil { +		format = pl.prefix + format +		pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) +		return +	} +	ErrorDepth(1, fmt.Sprintf(format, args...)) +} + +// Debugf does info logging at verbose level 2. +func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { +	if !Logger.V(2) { +		return +	} +	if pl != nil { +		// Handle nil, so the tests can pass in a nil logger. +		format = pl.prefix + format +		pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) +		return +	} +	InfoDepth(1, fmt.Sprintf(format, args...)) +} + +// NewPrefixLogger creates a prefix logger with the given prefix. +func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger { +	return &PrefixLogger{logger: logger, prefix: prefix} +} diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go new file mode 100644 index 000000000..517ea7064 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcrand implements math/rand functions in a concurrent-safe way +// with a global random source, independent of math/rand's global source. +package grpcrand + +import ( +	"math/rand" +	"sync" +	"time" +) + +var ( +	r  = rand.New(rand.NewSource(time.Now().UnixNano())) +	mu sync.Mutex +) + +// Int implements rand.Int on the grpcrand global source. +func Int() int { +	mu.Lock() +	defer mu.Unlock() +	return r.Int() +} + +// Int63n implements rand.Int63n on the grpcrand global source. +func Int63n(n int64) int64 { +	mu.Lock() +	defer mu.Unlock() +	return r.Int63n(n) +} + +// Intn implements rand.Intn on the grpcrand global source. +func Intn(n int) int { +	mu.Lock() +	defer mu.Unlock() +	return r.Intn(n) +} + +// Int31n implements rand.Int31n on the grpcrand global source. +func Int31n(n int32) int32 { +	mu.Lock() +	defer mu.Unlock() +	return r.Int31n(n) +} + +// Float64 implements rand.Float64 on the grpcrand global source. +func Float64() float64 { +	mu.Lock() +	defer mu.Unlock() +	return r.Float64() +} + +// Uint64 implements rand.Uint64 on the grpcrand global source. +func Uint64() uint64 { +	mu.Lock() +	defer mu.Unlock() +	return r.Uint64() +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/event.go b/vendor/google.golang.org/grpc/internal/grpcsync/event.go new file mode 100644 index 000000000..fbe697c37 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/event.go @@ -0,0 +1,61 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcsync implements additional synchronization primitives built upon +// the sync package. +package grpcsync + +import ( +	"sync" +	"sync/atomic" +) + +// Event represents a one-time event that may occur in the future. +type Event struct { +	fired int32 +	c     chan struct{} +	o     sync.Once +} + +// Fire causes e to complete.  It is safe to call multiple times, and +// concurrently.  It returns true iff this call to Fire caused the signaling +// channel returned by Done to close. +func (e *Event) Fire() bool { +	ret := false +	e.o.Do(func() { +		atomic.StoreInt32(&e.fired, 1) +		close(e.c) +		ret = true +	}) +	return ret +} + +// Done returns a channel that will be closed when Fire is called. +func (e *Event) Done() <-chan struct{} { +	return e.c +} + +// HasFired returns true if Fire has been called. +func (e *Event) HasFired() bool { +	return atomic.LoadInt32(&e.fired) == 1 +} + +// NewEvent returns a new, ready-to-use Event. +func NewEvent() *Event { +	return &Event{c: make(chan struct{})} +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go new file mode 100644 index 000000000..6635f7bca --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go @@ -0,0 +1,32 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( +	"sync" +) + +// OnceFunc returns a function wrapping f which ensures f is only executed +// once even if the returned function is executed multiple times. +func OnceFunc(f func()) func() { +	var once sync.Once +	return func() { +		once.Do(f) +	} +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go new file mode 100644 index 000000000..9f4090967 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go @@ -0,0 +1,47 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( +	"strings" + +	"google.golang.org/grpc/internal/envconfig" +) + +// RegisteredCompressorNames holds names of the registered compressors. +var RegisteredCompressorNames []string + +// IsCompressorNameRegistered returns true when name is available in registry. +func IsCompressorNameRegistered(name string) bool { +	for _, compressor := range RegisteredCompressorNames { +		if compressor == name { +			return true +		} +	} +	return false +} + +// RegisteredCompressors returns a string of registered compressor names +// separated by comma. +func RegisteredCompressors() string { +	if !envconfig.AdvertiseCompressors { +		return "" +	} +	return strings.Join(RegisteredCompressorNames, ",") +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go new file mode 100644 index 000000000..b25b0baec --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( +	"strconv" +	"time" +) + +const maxTimeoutValue int64 = 100000000 - 1 + +// div does integer division and round-up the result. Note that this is +// equivalent to (d+r-1)/r but has less chance to overflow. +func div(d, r time.Duration) int64 { +	if d%r > 0 { +		return int64(d/r + 1) +	} +	return int64(d / r) +} + +// EncodeDuration encodes the duration to the format grpc-timeout header +// accepts. +// +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests +func EncodeDuration(t time.Duration) string { +	// TODO: This is simplistic and not bandwidth efficient. Improve it. +	if t <= 0 { +		return "0n" +	} +	if d := div(t, time.Nanosecond); d <= maxTimeoutValue { +		return strconv.FormatInt(d, 10) + "n" +	} +	if d := div(t, time.Microsecond); d <= maxTimeoutValue { +		return strconv.FormatInt(d, 10) + "u" +	} +	if d := div(t, time.Millisecond); d <= maxTimeoutValue { +		return strconv.FormatInt(d, 10) + "m" +	} +	if d := div(t, time.Second); d <= maxTimeoutValue { +		return strconv.FormatInt(d, 10) + "S" +	} +	if d := div(t, time.Minute); d <= maxTimeoutValue { +		return strconv.FormatInt(d, 10) + "M" +	} +	// Note that maxTimeoutValue * time.Hour > MaxInt64. +	return strconv.FormatInt(div(t, time.Hour), 10) + "H" +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go new file mode 100644 index 000000000..e2f948e8f --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go @@ -0,0 +1,20 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcutil provides utility functions used across the gRPC codebase. +package grpcutil diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go new file mode 100644 index 000000000..6f22bd891 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go @@ -0,0 +1,40 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( +	"context" + +	"google.golang.org/grpc/metadata" +) + +type mdExtraKey struct{} + +// WithExtraMetadata creates a new context with incoming md attached. +func WithExtraMetadata(ctx context.Context, md metadata.MD) context.Context { +	return context.WithValue(ctx, mdExtraKey{}, md) +} + +// ExtraMetadata returns the incoming metadata in ctx if it exists.  The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. +func ExtraMetadata(ctx context.Context) (md metadata.MD, ok bool) { +	md, ok = ctx.Value(mdExtraKey{}).(metadata.MD) +	return +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go new file mode 100644 index 000000000..ec62b4775 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go @@ -0,0 +1,88 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( +	"errors" +	"strings" +) + +// ParseMethod splits service and method from the input. It expects format +// "/service/method". +func ParseMethod(methodName string) (service, method string, _ error) { +	if !strings.HasPrefix(methodName, "/") { +		return "", "", errors.New("invalid method name: should start with /") +	} +	methodName = methodName[1:] + +	pos := strings.LastIndex(methodName, "/") +	if pos < 0 { +		return "", "", errors.New("invalid method name: suffix /method is missing") +	} +	return methodName[:pos], methodName[pos+1:], nil +} + +// baseContentType is the base content-type for gRPC.  This is a valid +// content-type on it's own, but can also include a content-subtype such as +// "proto" as a suffix after "+" or ";".  See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests +// for more details. +const baseContentType = "application/grpc" + +// ContentSubtype returns the content-subtype for the given content-type.  The +// given content-type must be a valid content-type that starts with +// "application/grpc". A content-subtype will follow "application/grpc" after a +// "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If contentType is not a valid content-type for gRPC, the boolean +// will be false, otherwise true. If content-type == "application/grpc", +// "application/grpc+", or "application/grpc;", the boolean will be true, +// but no content-subtype will be returned. +// +// contentType is assumed to be lowercase already. +func ContentSubtype(contentType string) (string, bool) { +	if contentType == baseContentType { +		return "", true +	} +	if !strings.HasPrefix(contentType, baseContentType) { +		return "", false +	} +	// guaranteed since != baseContentType and has baseContentType prefix +	switch contentType[len(baseContentType)] { +	case '+', ';': +		// this will return true for "application/grpc+" or "application/grpc;" +		// which the previous validContentType function tested to be valid, so we +		// just say that no content-subtype is specified in this case +		return contentType[len(baseContentType)+1:], true +	default: +		return "", false +	} +} + +// ContentType builds full content type with the given sub-type. +// +// contentSubtype is assumed to be lowercase +func ContentType(contentSubtype string) string { +	if contentSubtype == "" { +		return baseContentType +	} +	return baseContentType + "+" + contentSubtype +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/regex.go b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go new file mode 100644 index 000000000..7a092b2b8 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go @@ -0,0 +1,31 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import "regexp" + +// FullMatchWithRegex returns whether the full text matches the regex provided. +func FullMatchWithRegex(re *regexp.Regexp, text string) bool { +	if len(text) == 0 { +		return re.MatchString(text) +	} +	re.Longest() +	rem := re.FindString(text) +	return len(rem) == len(text) +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go new file mode 100644 index 000000000..0a76d9de6 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -0,0 +1,160 @@ +/* + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains gRPC-internal code, to avoid polluting +// the godoc of the top-level grpc package.  It must not import any grpc +// symbols to avoid circular dependencies. +package internal + +import ( +	"context" +	"time" + +	"google.golang.org/grpc/connectivity" +	"google.golang.org/grpc/serviceconfig" +) + +var ( +	// WithHealthCheckFunc is set by dialoptions.go +	WithHealthCheckFunc interface{} // func (HealthChecker) DialOption +	// HealthCheckFunc is used to provide client-side LB channel health checking +	HealthCheckFunc HealthChecker +	// BalancerUnregister is exported by package balancer to unregister a balancer. +	BalancerUnregister func(name string) +	// KeepaliveMinPingTime is the minimum ping interval.  This must be 10s by +	// default, but tests may wish to set it lower for convenience. +	KeepaliveMinPingTime = 10 * time.Second +	// ParseServiceConfig parses a JSON representation of the service config. +	ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult +	// EqualServiceConfigForTesting is for testing service config generation and +	// parsing. Both a and b should be returned by ParseServiceConfig. +	// This function compares the config without rawJSON stripped, in case the +	// there's difference in white space. +	EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool +	// GetCertificateProviderBuilder returns the registered builder for the +	// given name. This is set by package certprovider for use from xDS +	// bootstrap code while parsing certificate provider configs in the +	// bootstrap file. +	GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder +	// GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo +	// stored in the passed in attributes. This is set by +	// credentials/xds/xds.go. +	GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo +	// GetServerCredentials returns the transport credentials configured on a +	// gRPC server. An xDS-enabled server needs to know what type of credentials +	// is configured on the underlying gRPC server. This is set by server.go. +	GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials +	// DrainServerTransports initiates a graceful close of existing connections +	// on a gRPC server accepted on the provided listener address. An +	// xDS-enabled server invokes this method on a grpc.Server when a particular +	// listener moves to "not-serving" mode. +	DrainServerTransports interface{} // func(*grpc.Server, string) +	// AddGlobalServerOptions adds an array of ServerOption that will be +	// effective globally for newly created servers. The priority will be: 1. +	// user-provided; 2. this method; 3. default values. +	AddGlobalServerOptions interface{} // func(opt ...ServerOption) +	// ClearGlobalServerOptions clears the array of extra ServerOption. This +	// method is useful in testing and benchmarking. +	ClearGlobalServerOptions func() +	// AddGlobalDialOptions adds an array of DialOption that will be effective +	// globally for newly created client channels. The priority will be: 1. +	// user-provided; 2. this method; 3. default values. +	AddGlobalDialOptions interface{} // func(opt ...DialOption) +	// ClearGlobalDialOptions clears the array of extra DialOption. This +	// method is useful in testing and benchmarking. +	ClearGlobalDialOptions func() +	// JoinDialOptions combines the dial options passed as arguments into a +	// single dial option. +	JoinDialOptions interface{} // func(...grpc.DialOption) grpc.DialOption +	// JoinServerOptions combines the server options passed as arguments into a +	// single server option. +	JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption + +	// WithBinaryLogger returns a DialOption that specifies the binary logger +	// for a ClientConn. +	WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption +	// BinaryLogger returns a ServerOption that can set the binary logger for a +	// server. +	BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption + +	// NewXDSResolverWithConfigForTesting creates a new xds resolver builder using +	// the provided xds bootstrap config instead of the global configuration from +	// the supported environment variables.  The resolver.Builder is meant to be +	// used in conjunction with the grpc.WithResolvers DialOption. +	// +	// Testing Only +	// +	// This function should ONLY be used for testing and may not work with some +	// other features, including the CSDS service. +	NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error) + +	// RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster +	// Specifier Plugin for testing purposes, regardless of the XDSRLS environment +	// variable. +	// +	// TODO: Remove this function once the RLS env var is removed. +	RegisterRLSClusterSpecifierPluginForTesting func() + +	// UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster +	// Specifier Plugin for testing purposes. This is needed because there is no way +	// to unregister the RLS Cluster Specifier Plugin after registering it solely +	// for testing purposes using RegisterRLSClusterSpecifierPluginForTesting(). +	// +	// TODO: Remove this function once the RLS env var is removed. +	UnregisterRLSClusterSpecifierPluginForTesting func() + +	// RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing +	// purposes, regardless of the RBAC environment variable. +	// +	// TODO: Remove this function once the RBAC env var is removed. +	RegisterRBACHTTPFilterForTesting func() + +	// UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for +	// testing purposes. This is needed because there is no way to unregister the +	// HTTP Filter after registering it solely for testing purposes using +	// RegisterRBACHTTPFilterForTesting(). +	// +	// TODO: Remove this function once the RBAC env var is removed. +	UnregisterRBACHTTPFilterForTesting func() +) + +// HealthChecker defines the signature of the client-side LB channel health checking function. +// +// The implementation is expected to create a health checking RPC stream by +// calling newStream(), watch for the health status of serviceName, and report +// it's health back by calling setConnectivityState(). +// +// The health checking protocol is defined at: +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md +type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error + +const ( +	// CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. +	CredsBundleModeFallback = "fallback" +	// CredsBundleModeBalancer switches GoogleDefaultCreds to grpclb balancer +	// mode. +	CredsBundleModeBalancer = "balancer" +	// CredsBundleModeBackendFromBalancer switches GoogleDefaultCreds to mode +	// that supports backend returned by grpclb balancer. +	CredsBundleModeBackendFromBalancer = "backend-from-balancer" +) + +// RLSLoadBalancingPolicyName is the name of the RLS LB policy. +// +// It currently has an experimental suffix which would be removed once +// end-to-end testing of the policy is completed. +const RLSLoadBalancingPolicyName = "rls_experimental" diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go new file mode 100644 index 000000000..b2980f8ac --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -0,0 +1,120 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package metadata contains functions to set and get metadata from addresses. +// +// This package is experimental. +package metadata + +import ( +	"fmt" +	"strings" + +	"google.golang.org/grpc/metadata" +	"google.golang.org/grpc/resolver" +) + +type mdKeyType string + +const mdKey = mdKeyType("grpc.internal.address.metadata") + +type mdValue metadata.MD + +func (m mdValue) Equal(o interface{}) bool { +	om, ok := o.(mdValue) +	if !ok { +		return false +	} +	if len(m) != len(om) { +		return false +	} +	for k, v := range m { +		ov := om[k] +		if len(ov) != len(v) { +			return false +		} +		for i, ve := range v { +			if ov[i] != ve { +				return false +			} +		} +	} +	return true +} + +// Get returns the metadata of addr. +func Get(addr resolver.Address) metadata.MD { +	attrs := addr.Attributes +	if attrs == nil { +		return nil +	} +	md, _ := attrs.Value(mdKey).(mdValue) +	return metadata.MD(md) +} + +// Set sets (overrides) the metadata in addr. +// +// When a SubConn is created with this address, the RPCs sent on it will all +// have this metadata. +func Set(addr resolver.Address, md metadata.MD) resolver.Address { +	addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md)) +	return addr +} + +// Validate returns an error if the input md contains invalid keys or values. +// +// If the header is not a pseudo-header, the following items are checked: +// - header names must contain one or more characters from this set [0-9 a-z _ - .]. +// - if the header-name ends with a "-bin" suffix, no validation of the header value is performed. +// - otherwise, the header value must contain one or more characters from the set [%x20-%x7E]. +func Validate(md metadata.MD) error { +	for k, vals := range md { +		// pseudo-header will be ignored +		if k[0] == ':' { +			continue +		} +		// check key, for i that saving a conversion if not using for range +		for i := 0; i < len(k); i++ { +			r := k[i] +			if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { +				return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", k) +			} +		} +		if strings.HasSuffix(k, "-bin") { +			continue +		} +		// check value +		for _, val := range vals { +			if hasNotPrintable(val) { +				return fmt.Errorf("header key %q contains value with non-printable ASCII characters", k) +			} +		} +	} +	return nil +} + +// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E +func hasNotPrintable(msg string) bool { +	// for i that saving a conversion if not using for range +	for i := 0; i < len(msg); i++ { +		if msg[i] < 0x20 || msg[i] > 0x7E { +			return true +		} +	} +	return false +} diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go new file mode 100644 index 000000000..0177af4b5 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -0,0 +1,82 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pretty defines helper functions to pretty-print structs for logging. +package pretty + +import ( +	"bytes" +	"encoding/json" +	"fmt" + +	"github.com/golang/protobuf/jsonpb" +	protov1 "github.com/golang/protobuf/proto" +	"google.golang.org/protobuf/encoding/protojson" +	protov2 "google.golang.org/protobuf/proto" +) + +const jsonIndent = "  " + +// ToJSON marshals the input into a json string. +// +// If marshal fails, it falls back to fmt.Sprintf("%+v"). +func ToJSON(e interface{}) string { +	switch ee := e.(type) { +	case protov1.Message: +		mm := jsonpb.Marshaler{Indent: jsonIndent} +		ret, err := mm.MarshalToString(ee) +		if err != nil { +			// This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 +			// messages are not imported, and this will fail because the message +			// is not found. +			return fmt.Sprintf("%+v", ee) +		} +		return ret +	case protov2.Message: +		mm := protojson.MarshalOptions{ +			Multiline: true, +			Indent:    jsonIndent, +		} +		ret, err := mm.Marshal(ee) +		if err != nil { +			// This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 +			// messages are not imported, and this will fail because the message +			// is not found. +			return fmt.Sprintf("%+v", ee) +		} +		return string(ret) +	default: +		ret, err := json.MarshalIndent(ee, "", jsonIndent) +		if err != nil { +			return fmt.Sprintf("%+v", ee) +		} +		return string(ret) +	} +} + +// FormatJSON formats the input json bytes with indentation. +// +// If Indent fails, it returns the unchanged input as string. +func FormatJSON(b []byte) string { +	var out bytes.Buffer +	err := json.Indent(&out, b, "", jsonIndent) +	if err != nil { +		return string(b) +	} +	return out.String() +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go new file mode 100644 index 000000000..c7a18a948 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go @@ -0,0 +1,167 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver provides internal resolver-related functionality. +package resolver + +import ( +	"context" +	"sync" + +	"google.golang.org/grpc/internal/serviceconfig" +	"google.golang.org/grpc/metadata" +	"google.golang.org/grpc/resolver" +) + +// ConfigSelector controls what configuration to use for every RPC. +type ConfigSelector interface { +	// Selects the configuration for the RPC, or terminates it using the error. +	// This error will be converted by the gRPC library to a status error with +	// code UNKNOWN if it is not returned as a status error. +	SelectConfig(RPCInfo) (*RPCConfig, error) +} + +// RPCInfo contains RPC information needed by a ConfigSelector. +type RPCInfo struct { +	// Context is the user's context for the RPC and contains headers and +	// application timeout.  It is passed for interception purposes and for +	// efficiency reasons.  SelectConfig should not be blocking. +	Context context.Context +	Method  string // i.e. "/Service/Method" +} + +// RPCConfig describes the configuration to use for each RPC. +type RPCConfig struct { +	// The context to use for the remainder of the RPC; can pass info to LB +	// policy or affect timeout or metadata. +	Context      context.Context +	MethodConfig serviceconfig.MethodConfig // configuration to use for this RPC +	OnCommitted  func()                     // Called when the RPC has been committed (retries no longer possible) +	Interceptor  ClientInterceptor +} + +// ClientStream is the same as grpc.ClientStream, but defined here for circular +// dependency reasons. +type ClientStream interface { +	// Header returns the header metadata received from the server if there +	// is any. It blocks if the metadata is not ready to read. +	Header() (metadata.MD, error) +	// Trailer returns the trailer metadata from the server, if there is any. +	// It must only be called after stream.CloseAndRecv has returned, or +	// stream.Recv has returned a non-nil error (including io.EOF). +	Trailer() metadata.MD +	// CloseSend closes the send direction of the stream. It closes the stream +	// when non-nil error is met. It is also not safe to call CloseSend +	// concurrently with SendMsg. +	CloseSend() error +	// Context returns the context for this stream. +	// +	// It should not be called until after Header or RecvMsg has returned. Once +	// called, subsequent client-side retries are disabled. +	Context() context.Context +	// SendMsg is generally called by generated code. On error, SendMsg aborts +	// the stream. If the error was generated by the client, the status is +	// returned directly; otherwise, io.EOF is returned and the status of +	// the stream may be discovered using RecvMsg. +	// +	// SendMsg blocks until: +	//   - There is sufficient flow control to schedule m with the transport, or +	//   - The stream is done, or +	//   - The stream breaks. +	// +	// SendMsg does not wait until the message is received by the server. An +	// untimely stream closure may result in lost messages. To ensure delivery, +	// users should ensure the RPC completed successfully using RecvMsg. +	// +	// It is safe to have a goroutine calling SendMsg and another goroutine +	// calling RecvMsg on the same stream at the same time, but it is not safe +	// to call SendMsg on the same stream in different goroutines. It is also +	// not safe to call CloseSend concurrently with SendMsg. +	SendMsg(m interface{}) error +	// RecvMsg blocks until it receives a message into m or the stream is +	// done. It returns io.EOF when the stream completes successfully. On +	// any other error, the stream is aborted and the error contains the RPC +	// status. +	// +	// It is safe to have a goroutine calling SendMsg and another goroutine +	// calling RecvMsg on the same stream at the same time, but it is not +	// safe to call RecvMsg on the same stream in different goroutines. +	RecvMsg(m interface{}) error +} + +// ClientInterceptor is an interceptor for gRPC client streams. +type ClientInterceptor interface { +	// NewStream produces a ClientStream for an RPC which may optionally use +	// the provided function to produce a stream for delegation.  Note: +	// RPCInfo.Context should not be used (will be nil). +	// +	// done is invoked when the RPC is finished using its connection, or could +	// not be assigned a connection.  RPC operations may still occur on +	// ClientStream after done is called, since the interceptor is invoked by +	// application-layer operations.  done must never be nil when called. +	NewStream(ctx context.Context, ri RPCInfo, done func(), newStream func(ctx context.Context, done func()) (ClientStream, error)) (ClientStream, error) +} + +// ServerInterceptor is an interceptor for incoming RPC's on gRPC server side. +type ServerInterceptor interface { +	// AllowRPC checks if an incoming RPC is allowed to proceed based on +	// information about connection RPC was received on, and HTTP Headers. This +	// information will be piped into context. +	AllowRPC(ctx context.Context) error // TODO: Make this a real interceptor for filters such as rate limiting. +} + +type csKeyType string + +const csKey = csKeyType("grpc.internal.resolver.configSelector") + +// SetConfigSelector sets the config selector in state and returns the new +// state. +func SetConfigSelector(state resolver.State, cs ConfigSelector) resolver.State { +	state.Attributes = state.Attributes.WithValue(csKey, cs) +	return state +} + +// GetConfigSelector retrieves the config selector from state, if present, and +// returns it or nil if absent. +func GetConfigSelector(state resolver.State) ConfigSelector { +	cs, _ := state.Attributes.Value(csKey).(ConfigSelector) +	return cs +} + +// SafeConfigSelector allows for safe switching of ConfigSelector +// implementations such that previous values are guaranteed to not be in use +// when UpdateConfigSelector returns. +type SafeConfigSelector struct { +	mu sync.RWMutex +	cs ConfigSelector +} + +// UpdateConfigSelector swaps to the provided ConfigSelector and blocks until +// all uses of the previous ConfigSelector have completed. +func (scs *SafeConfigSelector) UpdateConfigSelector(cs ConfigSelector) { +	scs.mu.Lock() +	defer scs.mu.Unlock() +	scs.cs = cs +} + +// SelectConfig defers to the current ConfigSelector in scs. +func (scs *SafeConfigSelector) SelectConfig(r RPCInfo) (*RPCConfig, error) { +	scs.mu.RLock() +	defer scs.mu.RUnlock() +	return scs.cs.SelectConfig(r) +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go new file mode 100644 index 000000000..09a667f33 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -0,0 +1,458 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +package dns + +import ( +	"context" +	"encoding/json" +	"errors" +	"fmt" +	"net" +	"os" +	"strconv" +	"strings" +	"sync" +	"time" + +	grpclbstate "google.golang.org/grpc/balancer/grpclb/state" +	"google.golang.org/grpc/grpclog" +	"google.golang.org/grpc/internal/backoff" +	"google.golang.org/grpc/internal/envconfig" +	"google.golang.org/grpc/internal/grpcrand" +	"google.golang.org/grpc/resolver" +	"google.golang.org/grpc/serviceconfig" +) + +// EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB +// addresses from SRV records.  Must not be changed after init time. +var EnableSRVLookups = false + +var logger = grpclog.Component("dns") + +// Globals to stub out in tests. TODO: Perhaps these two can be combined into a +// single variable for testing the resolver? +var ( +	newTimer           = time.NewTimer +	newTimerDNSResRate = time.NewTimer +) + +func init() { +	resolver.Register(NewBuilder()) +} + +const ( +	defaultPort       = "443" +	defaultDNSSvrPort = "53" +	golang            = "GO" +	// txtPrefix is the prefix string to be prepended to the host name for txt record lookup. +	txtPrefix = "_grpc_config." +	// In DNS, service config is encoded in a TXT record via the mechanism +	// described in RFC-1464 using the attribute name grpc_config. +	txtAttribute = "grpc_config=" +) + +var ( +	errMissingAddr = errors.New("dns resolver: missing address") + +	// Addresses ending with a colon that is supposed to be the separator +	// between host and port is not allowed.  E.g. "::" is a valid address as +	// it is an IPv6 address (host only) and "[::]:" is invalid as it ends with +	// a colon as the host and port separator +	errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") +) + +var ( +	defaultResolver netResolver = net.DefaultResolver +	// To prevent excessive re-resolution, we enforce a rate limit on DNS +	// resolution requests. +	minDNSResRate = 30 * time.Second +) + +var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { +	return func(ctx context.Context, network, address string) (net.Conn, error) { +		var dialer net.Dialer +		return dialer.DialContext(ctx, network, authority) +	} +} + +var customAuthorityResolver = func(authority string) (netResolver, error) { +	host, port, err := parseTarget(authority, defaultDNSSvrPort) +	if err != nil { +		return nil, err +	} + +	authorityWithPort := net.JoinHostPort(host, port) + +	return &net.Resolver{ +		PreferGo: true, +		Dial:     customAuthorityDialler(authorityWithPort), +	}, nil +} + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +func NewBuilder() resolver.Builder { +	return &dnsBuilder{} +} + +type dnsBuilder struct{} + +// Build creates and starts a DNS resolver that watches the name resolution of the target. +func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { +	host, port, err := parseTarget(target.Endpoint(), defaultPort) +	if err != nil { +		return nil, err +	} + +	// IP address. +	if ipAddr, ok := formatIP(host); ok { +		addr := []resolver.Address{{Addr: ipAddr + ":" + port}} +		cc.UpdateState(resolver.State{Addresses: addr}) +		return deadResolver{}, nil +	} + +	// DNS address (non-IP). +	ctx, cancel := context.WithCancel(context.Background()) +	d := &dnsResolver{ +		host:                 host, +		port:                 port, +		ctx:                  ctx, +		cancel:               cancel, +		cc:                   cc, +		rn:                   make(chan struct{}, 1), +		disableServiceConfig: opts.DisableServiceConfig, +	} + +	if target.URL.Host == "" { +		d.resolver = defaultResolver +	} else { +		d.resolver, err = customAuthorityResolver(target.URL.Host) +		if err != nil { +			return nil, err +		} +	} + +	d.wg.Add(1) +	go d.watcher() +	return d, nil +} + +// Scheme returns the naming scheme of this resolver builder, which is "dns". +func (b *dnsBuilder) Scheme() string { +	return "dns" +} + +type netResolver interface { +	LookupHost(ctx context.Context, host string) (addrs []string, err error) +	LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) +	LookupTXT(ctx context.Context, name string) (txts []string, err error) +} + +// deadResolver is a resolver that does nothing. +type deadResolver struct{} + +func (deadResolver) ResolveNow(resolver.ResolveNowOptions) {} + +func (deadResolver) Close() {} + +// dnsResolver watches for the name resolution update for a non-IP target. +type dnsResolver struct { +	host     string +	port     string +	resolver netResolver +	ctx      context.Context +	cancel   context.CancelFunc +	cc       resolver.ClientConn +	// rn channel is used by ResolveNow() to force an immediate resolution of the target. +	rn chan struct{} +	// wg is used to enforce Close() to return after the watcher() goroutine has finished. +	// Otherwise, data race will be possible. [Race Example] in dns_resolver_test we +	// replace the real lookup functions with mocked ones to facilitate testing. +	// If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes +	// will warns lookup (READ the lookup function pointers) inside watcher() goroutine +	// has data race with replaceNetFunc (WRITE the lookup function pointers). +	wg                   sync.WaitGroup +	disableServiceConfig bool +} + +// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. +func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) { +	select { +	case d.rn <- struct{}{}: +	default: +	} +} + +// Close closes the dnsResolver. +func (d *dnsResolver) Close() { +	d.cancel() +	d.wg.Wait() +} + +func (d *dnsResolver) watcher() { +	defer d.wg.Done() +	backoffIndex := 1 +	for { +		state, err := d.lookup() +		if err != nil { +			// Report error to the underlying grpc.ClientConn. +			d.cc.ReportError(err) +		} else { +			err = d.cc.UpdateState(*state) +		} + +		var timer *time.Timer +		if err == nil { +			// Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least +			// to prevent constantly re-resolving. +			backoffIndex = 1 +			timer = newTimerDNSResRate(minDNSResRate) +			select { +			case <-d.ctx.Done(): +				timer.Stop() +				return +			case <-d.rn: +			} +		} else { +			// Poll on an error found in DNS Resolver or an error received from ClientConn. +			timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex)) +			backoffIndex++ +		} +		select { +		case <-d.ctx.Done(): +			timer.Stop() +			return +		case <-timer.C: +		} +	} +} + +func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { +	if !EnableSRVLookups { +		return nil, nil +	} +	var newAddrs []resolver.Address +	_, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host) +	if err != nil { +		err = handleDNSError(err, "SRV") // may become nil +		return nil, err +	} +	for _, s := range srvs { +		lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target) +		if err != nil { +			err = handleDNSError(err, "A") // may become nil +			if err == nil { +				// If there are other SRV records, look them up and ignore this +				// one that does not exist. +				continue +			} +			return nil, err +		} +		for _, a := range lbAddrs { +			ip, ok := formatIP(a) +			if !ok { +				return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) +			} +			addr := ip + ":" + strconv.Itoa(int(s.Port)) +			newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target}) +		} +	} +	return newAddrs, nil +} + +func handleDNSError(err error, lookupType string) error { +	if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { +		// Timeouts and temporary errors should be communicated to gRPC to +		// attempt another DNS query (with backoff).  Other errors should be +		// suppressed (they may represent the absence of a TXT record). +		return nil +	} +	if err != nil { +		err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err) +		logger.Info(err) +	} +	return err +} + +func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { +	ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host) +	if err != nil { +		if envconfig.TXTErrIgnore { +			return nil +		} +		if err = handleDNSError(err, "TXT"); err != nil { +			return &serviceconfig.ParseResult{Err: err} +		} +		return nil +	} +	var res string +	for _, s := range ss { +		res += s +	} + +	// TXT record must have "grpc_config=" attribute in order to be used as service config. +	if !strings.HasPrefix(res, txtAttribute) { +		logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) +		// This is not an error; it is the equivalent of not having a service config. +		return nil +	} +	sc := canaryingSC(strings.TrimPrefix(res, txtAttribute)) +	return d.cc.ParseServiceConfig(sc) +} + +func (d *dnsResolver) lookupHost() ([]resolver.Address, error) { +	addrs, err := d.resolver.LookupHost(d.ctx, d.host) +	if err != nil { +		err = handleDNSError(err, "A") +		return nil, err +	} +	newAddrs := make([]resolver.Address, 0, len(addrs)) +	for _, a := range addrs { +		ip, ok := formatIP(a) +		if !ok { +			return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) +		} +		addr := ip + ":" + d.port +		newAddrs = append(newAddrs, resolver.Address{Addr: addr}) +	} +	return newAddrs, nil +} + +func (d *dnsResolver) lookup() (*resolver.State, error) { +	srv, srvErr := d.lookupSRV() +	addrs, hostErr := d.lookupHost() +	if hostErr != nil && (srvErr != nil || len(srv) == 0) { +		return nil, hostErr +	} + +	state := resolver.State{Addresses: addrs} +	if len(srv) > 0 { +		state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv}) +	} +	if !d.disableServiceConfig { +		state.ServiceConfig = d.lookupTXT() +	} +	return &state, nil +} + +// formatIP returns ok = false if addr is not a valid textual representation of an IP address. +// If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +func formatIP(addr string) (addrIP string, ok bool) { +	ip := net.ParseIP(addr) +	if ip == nil { +		return "", false +	} +	if ip.To4() != nil { +		return addr, true +	} +	return "[" + addr + "]", true +} + +// parseTarget takes the user input target string and default port, returns formatted host and port info. +// If target doesn't specify a port, set the port to be the defaultPort. +// If target is in IPv6 format and host-name is enclosed in square brackets, brackets +// are stripped when setting the host. +// examples: +// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" defaultPort: "443" returns host: "ipv6-host", port: "443" +// target: ":80" defaultPort: "443" returns host: "localhost", port: "80" +func parseTarget(target, defaultPort string) (host, port string, err error) { +	if target == "" { +		return "", "", errMissingAddr +	} +	if ip := net.ParseIP(target); ip != nil { +		// target is an IPv4 or IPv6(without brackets) address +		return target, defaultPort, nil +	} +	if host, port, err = net.SplitHostPort(target); err == nil { +		if port == "" { +			// If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error. +			return "", "", errEndsWithColon +		} +		// target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port +		if host == "" { +			// Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. +			host = "localhost" +		} +		return host, port, nil +	} +	if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil { +		// target doesn't have port +		return host, port, nil +	} +	return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err) +} + +type rawChoice struct { +	ClientLanguage *[]string        `json:"clientLanguage,omitempty"` +	Percentage     *int             `json:"percentage,omitempty"` +	ClientHostName *[]string        `json:"clientHostName,omitempty"` +	ServiceConfig  *json.RawMessage `json:"serviceConfig,omitempty"` +} + +func containsString(a *[]string, b string) bool { +	if a == nil { +		return true +	} +	for _, c := range *a { +		if c == b { +			return true +		} +	} +	return false +} + +func chosenByPercentage(a *int) bool { +	if a == nil { +		return true +	} +	return grpcrand.Intn(100)+1 <= *a +} + +func canaryingSC(js string) string { +	if js == "" { +		return "" +	} +	var rcs []rawChoice +	err := json.Unmarshal([]byte(js), &rcs) +	if err != nil { +		logger.Warningf("dns: error parsing service config json: %v", err) +		return "" +	} +	cliHostname, err := os.Hostname() +	if err != nil { +		logger.Warningf("dns: error getting client hostname: %v", err) +		return "" +	} +	var sc string +	for _, c := range rcs { +		if !containsString(c.ClientLanguage, golang) || +			!chosenByPercentage(c.Percentage) || +			!containsString(c.ClientHostName, cliHostname) || +			c.ServiceConfig == nil { +			continue +		} +		sc = string(*c.ServiceConfig) +		break +	} +	return sc +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go new file mode 100644 index 000000000..afac56572 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -0,0 +1,64 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package passthrough implements a pass-through resolver. It sends the target +// name without scheme back to gRPC as resolved address. +package passthrough + +import ( +	"errors" + +	"google.golang.org/grpc/resolver" +) + +const scheme = "passthrough" + +type passthroughBuilder struct{} + +func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { +	if target.Endpoint() == "" && opts.Dialer == nil { +		return nil, errors.New("passthrough: received empty target in Build()") +	} +	r := &passthroughResolver{ +		target: target, +		cc:     cc, +	} +	r.start() +	return r, nil +} + +func (*passthroughBuilder) Scheme() string { +	return scheme +} + +type passthroughResolver struct { +	target resolver.Target +	cc     resolver.ClientConn +} + +func (r *passthroughResolver) start() { +	r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}}) +} + +func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} + +func (*passthroughResolver) Close() {} + +func init() { +	resolver.Register(&passthroughBuilder{}) +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go new file mode 100644 index 000000000..160911687 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package unix implements a resolver for unix targets. +package unix + +import ( +	"fmt" + +	"google.golang.org/grpc/internal/transport/networktype" +	"google.golang.org/grpc/resolver" +) + +const unixScheme = "unix" +const unixAbstractScheme = "unix-abstract" + +type builder struct { +	scheme string +} + +func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { +	if target.URL.Host != "" { +		return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host) +	} + +	// gRPC was parsing the dial target manually before PR #4817, and we +	// switched to using url.Parse() in that PR. To avoid breaking existing +	// resolver implementations we ended up stripping the leading "/" from the +	// endpoint. This obviously does not work for the "unix" scheme. Hence we +	// end up using the parsed URL instead. +	endpoint := target.URL.Path +	if endpoint == "" { +		endpoint = target.URL.Opaque +	} +	addr := resolver.Address{Addr: endpoint} +	if b.scheme == unixAbstractScheme { +		// We can not prepend \0 as c++ gRPC does, as in Golang '@' is used to signify we do +		// not want trailing \0 in address. +		addr.Addr = "@" + addr.Addr +	} +	cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}}) +	return &nopResolver{}, nil +} + +func (b *builder) Scheme() string { +	return b.scheme +} + +type nopResolver struct { +} + +func (*nopResolver) ResolveNow(resolver.ResolveNowOptions) {} + +func (*nopResolver) Close() {} + +func init() { +	resolver.Register(&builder{scheme: unixScheme}) +	resolver.Register(&builder{scheme: unixAbstractScheme}) +} diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go new file mode 100644 index 000000000..51e733e49 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go @@ -0,0 +1,180 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package serviceconfig contains utility functions to parse service config. +package serviceconfig + +import ( +	"encoding/json" +	"fmt" +	"time" + +	"google.golang.org/grpc/balancer" +	"google.golang.org/grpc/codes" +	"google.golang.org/grpc/grpclog" +	externalserviceconfig "google.golang.org/grpc/serviceconfig" +) + +var logger = grpclog.Component("core") + +// BalancerConfig wraps the name and config associated with one load balancing +// policy. It corresponds to a single entry of the loadBalancingConfig field +// from ServiceConfig. +// +// It implements the json.Unmarshaler interface. +// +// https://github.com/grpc/grpc-proto/blob/54713b1e8bc6ed2d4f25fb4dff527842150b91b2/grpc/service_config/service_config.proto#L247 +type BalancerConfig struct { +	Name   string +	Config externalserviceconfig.LoadBalancingConfig +} + +type intermediateBalancerConfig []map[string]json.RawMessage + +// MarshalJSON implements the json.Marshaler interface. +// +// It marshals the balancer and config into a length-1 slice +// ([]map[string]config). +func (bc *BalancerConfig) MarshalJSON() ([]byte, error) { +	if bc.Config == nil { +		// If config is nil, return empty config `{}`. +		return []byte(fmt.Sprintf(`[{%q: %v}]`, bc.Name, "{}")), nil +	} +	c, err := json.Marshal(bc.Config) +	if err != nil { +		return nil, err +	} +	return []byte(fmt.Sprintf(`[{%q: %s}]`, bc.Name, c)), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +// +// ServiceConfig contains a list of loadBalancingConfigs, each with a name and +// config. This method iterates through that list in order, and stops at the +// first policy that is supported. +//   - If the config for the first supported policy is invalid, the whole service +//     config is invalid. +//   - If the list doesn't contain any supported policy, the whole service config +//     is invalid. +func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { +	var ir intermediateBalancerConfig +	err := json.Unmarshal(b, &ir) +	if err != nil { +		return err +	} + +	var names []string +	for i, lbcfg := range ir { +		if len(lbcfg) != 1 { +			return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) +		} + +		var ( +			name    string +			jsonCfg json.RawMessage +		) +		// Get the key:value pair from the map. We have already made sure that +		// the map contains a single entry. +		for name, jsonCfg = range lbcfg { +		} + +		names = append(names, name) +		builder := balancer.Get(name) +		if builder == nil { +			// If the balancer is not registered, move on to the next config. +			// This is not an error. +			continue +		} +		bc.Name = name + +		parser, ok := builder.(balancer.ConfigParser) +		if !ok { +			if string(jsonCfg) != "{}" { +				logger.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg)) +			} +			// Stop at this, though the builder doesn't support parsing config. +			return nil +		} + +		cfg, err := parser.ParseConfig(jsonCfg) +		if err != nil { +			return fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err) +		} +		bc.Config = cfg +		return nil +	} +	// This is reached when the for loop iterates over all entries, but didn't +	// return. This means we had a loadBalancingConfig slice but did not +	// encounter a registered policy. The config is considered invalid in this +	// case. +	return fmt.Errorf("invalid loadBalancingConfig: no supported policies found in %v", names) +} + +// MethodConfig defines the configuration recommended by the service providers for a +// particular method. +type MethodConfig struct { +	// WaitForReady indicates whether RPCs sent to this method should wait until +	// the connection is ready by default (!failfast). The value specified via the +	// gRPC client API will override the value set here. +	WaitForReady *bool +	// Timeout is the default timeout for RPCs sent to this method. The actual +	// deadline used will be the minimum of the value specified here and the value +	// set by the application via the gRPC client API.  If either one is not set, +	// then the other will be used.  If neither is set, then the RPC has no deadline. +	Timeout *time.Duration +	// MaxReqSize is the maximum allowed payload size for an individual request in a +	// stream (client->server) in bytes. The size which is measured is the serialized +	// payload after per-message compression (but before stream compression) in bytes. +	// The actual value used is the minimum of the value specified here and the value set +	// by the application via the gRPC client API. If either one is not set, then the other +	// will be used.  If neither is set, then the built-in default is used. +	MaxReqSize *int +	// MaxRespSize is the maximum allowed payload size for an individual response in a +	// stream (server->client) in bytes. +	MaxRespSize *int +	// RetryPolicy configures retry options for the method. +	RetryPolicy *RetryPolicy +} + +// RetryPolicy defines the go-native version of the retry policy defined by the +// service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type RetryPolicy struct { +	// MaxAttempts is the maximum number of attempts, including the original RPC. +	// +	// This field is required and must be two or greater. +	MaxAttempts int + +	// Exponential backoff parameters. The initial retry attempt will occur at +	// random(0, initialBackoff). In general, the nth attempt will occur at +	// random(0, +	//   min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)). +	// +	// These fields are required and must be greater than zero. +	InitialBackoff    time.Duration +	MaxBackoff        time.Duration +	BackoffMultiplier float64 + +	// The set of status codes which may be retried. +	// +	// Status codes are specified as strings, e.g., "UNAVAILABLE". +	// +	// This field is required and must be non-empty. +	// Note: a set is used to store this for easy lookup. +	RetryableStatusCodes map[codes.Code]bool +} diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go new file mode 100644 index 000000000..b0ead4f54 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -0,0 +1,176 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package status implements errors returned by gRPC.  These errors are +// serialized and transmitted on the wire between server and client, and allow +// for additional data to be transmitted via the Details field in the status +// proto.  gRPC service handlers should return an error created by this +// package, and gRPC clients should expect a corresponding error to be +// returned from the RPC call. +// +// This package upholds the invariants that a non-nil error may not +// contain an OK code, and an OK code must result in a nil error. +package status + +import ( +	"errors" +	"fmt" + +	"github.com/golang/protobuf/proto" +	"github.com/golang/protobuf/ptypes" +	spb "google.golang.org/genproto/googleapis/rpc/status" +	"google.golang.org/grpc/codes" +) + +// Status represents an RPC status code, message, and details.  It is immutable +// and should be created with New, Newf, or FromProto. +type Status struct { +	s *spb.Status +} + +// New returns a Status representing c and msg. +func New(c codes.Code, msg string) *Status { +	return &Status{s: &spb.Status{Code: int32(c), Message: msg}} +} + +// Newf returns New(c, fmt.Sprintf(format, a...)). +func Newf(c codes.Code, format string, a ...interface{}) *Status { +	return New(c, fmt.Sprintf(format, a...)) +} + +// FromProto returns a Status representing s. +func FromProto(s *spb.Status) *Status { +	return &Status{s: proto.Clone(s).(*spb.Status)} +} + +// Err returns an error representing c and msg.  If c is OK, returns nil. +func Err(c codes.Code, msg string) error { +	return New(c, msg).Err() +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...interface{}) error { +	return Err(c, fmt.Sprintf(format, a...)) +} + +// Code returns the status code contained in s. +func (s *Status) Code() codes.Code { +	if s == nil || s.s == nil { +		return codes.OK +	} +	return codes.Code(s.s.Code) +} + +// Message returns the message contained in s. +func (s *Status) Message() string { +	if s == nil || s.s == nil { +		return "" +	} +	return s.s.Message +} + +// Proto returns s's status as an spb.Status proto message. +func (s *Status) Proto() *spb.Status { +	if s == nil { +		return nil +	} +	return proto.Clone(s.s).(*spb.Status) +} + +// Err returns an immutable error representing s; returns nil if s.Code() is OK. +func (s *Status) Err() error { +	if s.Code() == codes.OK { +		return nil +	} +	return &Error{s: s} +} + +// WithDetails returns a new status with the provided details messages appended to the status. +// If any errors are encountered, it returns nil and the first error encountered. +func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { +	if s.Code() == codes.OK { +		return nil, errors.New("no error details for status with code OK") +	} +	// s.Code() != OK implies that s.Proto() != nil. +	p := s.Proto() +	for _, detail := range details { +		any, err := ptypes.MarshalAny(detail) +		if err != nil { +			return nil, err +		} +		p.Details = append(p.Details, any) +	} +	return &Status{s: p}, nil +} + +// Details returns a slice of details messages attached to the status. +// If a detail cannot be decoded, the error is returned in place of the detail. +func (s *Status) Details() []interface{} { +	if s == nil || s.s == nil { +		return nil +	} +	details := make([]interface{}, 0, len(s.s.Details)) +	for _, any := range s.s.Details { +		detail := &ptypes.DynamicAny{} +		if err := ptypes.UnmarshalAny(any, detail); err != nil { +			details = append(details, err) +			continue +		} +		details = append(details, detail.Message) +	} +	return details +} + +func (s *Status) String() string { +	return fmt.Sprintf("rpc error: code = %s desc = %s", s.Code(), s.Message()) +} + +// Error wraps a pointer of a status proto. It implements error and Status, +// and a nil *Error should never be returned by this package. +type Error struct { +	s *Status +} + +func (e *Error) Error() string { +	return e.s.String() +} + +// GRPCStatus returns the Status represented by se. +func (e *Error) GRPCStatus() *Status { +	return e.s +} + +// Is implements future error.Is functionality. +// A Error is equivalent if the code and message are identical. +func (e *Error) Is(target error) bool { +	tse, ok := target.(*Error) +	if !ok { +		return false +	} +	return proto.Equal(e.s.s, tse.s.s) +} + +// IsRestrictedControlPlaneCode returns whether the status includes a code +// restricted for control plane usage as defined by gRFC A54. +func IsRestrictedControlPlaneCode(s *Status) bool { +	switch s.Code() { +	case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.DataLoss: +		return true +	} +	return false +} diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go new file mode 100644 index 000000000..b3a72276d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go @@ -0,0 +1,112 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package syscall provides functionalities that grpc uses to get low-level operating system +// stats/info. +package syscall + +import ( +	"fmt" +	"net" +	"syscall" +	"time" + +	"golang.org/x/sys/unix" +	"google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("core") + +// GetCPUTime returns the how much CPU time has passed since the start of this process. +func GetCPUTime() int64 { +	var ts unix.Timespec +	if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil { +		logger.Fatal(err) +	} +	return ts.Nano() +} + +// Rusage is an alias for syscall.Rusage under linux environment. +type Rusage = syscall.Rusage + +// GetRusage returns the resource usage of current process. +func GetRusage() *Rusage { +	rusage := new(Rusage) +	syscall.Getrusage(syscall.RUSAGE_SELF, rusage) +	return rusage +} + +// CPUTimeDiff returns the differences of user CPU time and system CPU time used +// between two Rusage structs. +func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { +	var ( +		utimeDiffs  = latest.Utime.Sec - first.Utime.Sec +		utimeDiffus = latest.Utime.Usec - first.Utime.Usec +		stimeDiffs  = latest.Stime.Sec - first.Stime.Sec +		stimeDiffus = latest.Stime.Usec - first.Stime.Usec +	) + +	uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6 +	sTimeElapsed := float64(stimeDiffs) + float64(stimeDiffus)*1.0e-6 + +	return uTimeElapsed, sTimeElapsed +} + +// SetTCPUserTimeout sets the TCP user timeout on a connection's socket +func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { +	tcpconn, ok := conn.(*net.TCPConn) +	if !ok { +		// not a TCP connection. exit early +		return nil +	} +	rawConn, err := tcpconn.SyscallConn() +	if err != nil { +		return fmt.Errorf("error getting raw connection: %v", err) +	} +	err = rawConn.Control(func(fd uintptr) { +		err = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, int(timeout/time.Millisecond)) +	}) +	if err != nil { +		return fmt.Errorf("error setting option on socket: %v", err) +	} + +	return nil +} + +// GetTCPUserTimeout gets the TCP user timeout on a connection's socket +func GetTCPUserTimeout(conn net.Conn) (opt int, err error) { +	tcpconn, ok := conn.(*net.TCPConn) +	if !ok { +		err = fmt.Errorf("conn is not *net.TCPConn. got %T", conn) +		return +	} +	rawConn, err := tcpconn.SyscallConn() +	if err != nil { +		err = fmt.Errorf("error getting raw connection: %v", err) +		return +	} +	err = rawConn.Control(func(fd uintptr) { +		opt, err = syscall.GetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT) +	}) +	if err != nil { +		err = fmt.Errorf("error getting option on socket: %v", err) +		return +	} + +	return +} diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go new file mode 100644 index 000000000..999f52cd7 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -0,0 +1,77 @@ +//go:build !linux +// +build !linux + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package syscall provides functionalities that grpc uses to get low-level +// operating system stats/info. +package syscall + +import ( +	"net" +	"sync" +	"time" + +	"google.golang.org/grpc/grpclog" +) + +var once sync.Once +var logger = grpclog.Component("core") + +func log() { +	once.Do(func() { +		logger.Info("CPU time info is unavailable on non-linux environments.") +	}) +} + +// GetCPUTime returns the how much CPU time has passed since the start of this +// process. It always returns 0 under non-linux environments. +func GetCPUTime() int64 { +	log() +	return 0 +} + +// Rusage is an empty struct under non-linux environments. +type Rusage struct{} + +// GetRusage is a no-op function under non-linux environments. +func GetRusage() *Rusage { +	log() +	return nil +} + +// CPUTimeDiff returns the differences of user CPU time and system CPU time used +// between two Rusage structs. It a no-op function for non-linux environments. +func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { +	log() +	return 0, 0 +} + +// SetTCPUserTimeout is a no-op function under non-linux environments. +func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { +	log() +	return nil +} + +// GetTCPUserTimeout is a no-op function under non-linux environments. +// A negative return value indicates the operation is not supported +func GetTCPUserTimeout(conn net.Conn) (int, error) { +	log() +	return -1, nil +} diff --git a/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go new file mode 100644 index 000000000..070680edb --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go @@ -0,0 +1,141 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( +	"sync" +	"time" +) + +const ( +	// bdpLimit is the maximum value the flow control windows will be increased +	// to.  TCP typically limits this to 4MB, but some systems go up to 16MB. +	// Since this is only a limit, it is safe to make it optimistic. +	bdpLimit = (1 << 20) * 16 +	// alpha is a constant factor used to keep a moving average +	// of RTTs. +	alpha = 0.9 +	// If the current bdp sample is greater than or equal to +	// our beta * our estimated bdp and the current bandwidth +	// sample is the maximum bandwidth observed so far, we +	// increase our bbp estimate by a factor of gamma. +	beta = 0.66 +	// To put our bdp to be smaller than or equal to twice the real BDP, +	// we should multiply our current sample with 4/3, however to round things out +	// we use 2 as the multiplication factor. +	gamma = 2 +) + +// Adding arbitrary data to ping so that its ack can be identified. +// Easter-egg: what does the ping message say? +var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}} + +type bdpEstimator struct { +	// sentAt is the time when the ping was sent. +	sentAt time.Time + +	mu sync.Mutex +	// bdp is the current bdp estimate. +	bdp uint32 +	// sample is the number of bytes received in one measurement cycle. +	sample uint32 +	// bwMax is the maximum bandwidth noted so far (bytes/sec). +	bwMax float64 +	// bool to keep track of the beginning of a new measurement cycle. +	isSent bool +	// Callback to update the window sizes. +	updateFlowControl func(n uint32) +	// sampleCount is the number of samples taken so far. +	sampleCount uint64 +	// round trip time (seconds) +	rtt float64 +} + +// timesnap registers the time bdp ping was sent out so that +// network rtt can be calculated when its ack is received. +// It is called (by controller) when the bdpPing is +// being written on the wire. +func (b *bdpEstimator) timesnap(d [8]byte) { +	if bdpPing.data != d { +		return +	} +	b.sentAt = time.Now() +} + +// add adds bytes to the current sample for calculating bdp. +// It returns true only if a ping must be sent. This can be used +// by the caller (handleData) to make decision about batching +// a window update with it. +func (b *bdpEstimator) add(n uint32) bool { +	b.mu.Lock() +	defer b.mu.Unlock() +	if b.bdp == bdpLimit { +		return false +	} +	if !b.isSent { +		b.isSent = true +		b.sample = n +		b.sentAt = time.Time{} +		b.sampleCount++ +		return true +	} +	b.sample += n +	return false +} + +// calculate is called when an ack for a bdp ping is received. +// Here we calculate the current bdp and bandwidth sample and +// decide if the flow control windows should go up. +func (b *bdpEstimator) calculate(d [8]byte) { +	// Check if the ping acked for was the bdp ping. +	if bdpPing.data != d { +		return +	} +	b.mu.Lock() +	rttSample := time.Since(b.sentAt).Seconds() +	if b.sampleCount < 10 { +		// Bootstrap rtt with an average of first 10 rtt samples. +		b.rtt += (rttSample - b.rtt) / float64(b.sampleCount) +	} else { +		// Heed to the recent past more. +		b.rtt += (rttSample - b.rtt) * float64(alpha) +	} +	b.isSent = false +	// The number of bytes accumulated so far in the sample is smaller +	// than or equal to 1.5 times the real BDP on a saturated connection. +	bwCurrent := float64(b.sample) / (b.rtt * float64(1.5)) +	if bwCurrent > b.bwMax { +		b.bwMax = bwCurrent +	} +	// If the current sample (which is smaller than or equal to the 1.5 times the real BDP) is +	// greater than or equal to 2/3rd our perceived bdp AND this is the maximum bandwidth seen so far, we +	// should update our perception of the network BDP. +	if float64(b.sample) >= beta*float64(b.bdp) && bwCurrent == b.bwMax && b.bdp != bdpLimit { +		sampleFloat := float64(b.sample) +		b.bdp = uint32(gamma * sampleFloat) +		if b.bdp > bdpLimit { +			b.bdp = bdpLimit +		} +		bdp := b.bdp +		b.mu.Unlock() +		b.updateFlowControl(bdp) +		return +	} +	b.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go new file mode 100644 index 000000000..9097385e1 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -0,0 +1,998 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( +	"bytes" +	"errors" +	"fmt" +	"runtime" +	"strconv" +	"sync" +	"sync/atomic" + +	"golang.org/x/net/http2" +	"golang.org/x/net/http2/hpack" +	"google.golang.org/grpc/internal/grpcutil" +	"google.golang.org/grpc/status" +) + +var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { +	e.SetMaxDynamicTableSizeLimit(v) +} + +type itemNode struct { +	it   interface{} +	next *itemNode +} + +type itemList struct { +	head *itemNode +	tail *itemNode +} + +func (il *itemList) enqueue(i interface{}) { +	n := &itemNode{it: i} +	if il.tail == nil { +		il.head, il.tail = n, n +		return +	} +	il.tail.next = n +	il.tail = n +} + +// peek returns the first item in the list without removing it from the +// list. +func (il *itemList) peek() interface{} { +	return il.head.it +} + +func (il *itemList) dequeue() interface{} { +	if il.head == nil { +		return nil +	} +	i := il.head.it +	il.head = il.head.next +	if il.head == nil { +		il.tail = nil +	} +	return i +} + +func (il *itemList) dequeueAll() *itemNode { +	h := il.head +	il.head, il.tail = nil, nil +	return h +} + +func (il *itemList) isEmpty() bool { +	return il.head == nil +} + +// The following defines various control items which could flow through +// the control buffer of transport. They represent different aspects of +// control tasks, e.g., flow control, settings, streaming resetting, etc. + +// maxQueuedTransportResponseFrames is the most queued "transport response" +// frames we will buffer before preventing new reads from occurring on the +// transport.  These are control frames sent in response to client requests, +// such as RST_STREAM due to bad headers or settings acks. +const maxQueuedTransportResponseFrames = 50 + +type cbItem interface { +	isTransportResponseFrame() bool +} + +// registerStream is used to register an incoming stream with loopy writer. +type registerStream struct { +	streamID uint32 +	wq       *writeQuota +} + +func (*registerStream) isTransportResponseFrame() bool { return false } + +// headerFrame is also used to register stream on the client-side. +type headerFrame struct { +	streamID   uint32 +	hf         []hpack.HeaderField +	endStream  bool               // Valid on server side. +	initStream func(uint32) error // Used only on the client side. +	onWrite    func() +	wq         *writeQuota    // write quota for the stream created. +	cleanup    *cleanupStream // Valid on the server side. +	onOrphaned func(error)    // Valid on client-side +} + +func (h *headerFrame) isTransportResponseFrame() bool { +	return h.cleanup != nil && h.cleanup.rst // Results in a RST_STREAM +} + +type cleanupStream struct { +	streamID uint32 +	rst      bool +	rstCode  http2.ErrCode +	onWrite  func() +} + +func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM + +type earlyAbortStream struct { +	httpStatus     uint32 +	streamID       uint32 +	contentSubtype string +	status         *status.Status +	rst            bool +} + +func (*earlyAbortStream) isTransportResponseFrame() bool { return false } + +type dataFrame struct { +	streamID  uint32 +	endStream bool +	h         []byte +	d         []byte +	// onEachWrite is called every time +	// a part of d is written out. +	onEachWrite func() +} + +func (*dataFrame) isTransportResponseFrame() bool { return false } + +type incomingWindowUpdate struct { +	streamID  uint32 +	increment uint32 +} + +func (*incomingWindowUpdate) isTransportResponseFrame() bool { return false } + +type outgoingWindowUpdate struct { +	streamID  uint32 +	increment uint32 +} + +func (*outgoingWindowUpdate) isTransportResponseFrame() bool { +	return false // window updates are throttled by thresholds +} + +type incomingSettings struct { +	ss []http2.Setting +} + +func (*incomingSettings) isTransportResponseFrame() bool { return true } // Results in a settings ACK + +type outgoingSettings struct { +	ss []http2.Setting +} + +func (*outgoingSettings) isTransportResponseFrame() bool { return false } + +type incomingGoAway struct { +} + +func (*incomingGoAway) isTransportResponseFrame() bool { return false } + +type goAway struct { +	code      http2.ErrCode +	debugData []byte +	headsUp   bool +	closeConn error // if set, loopyWriter will exit, resulting in conn closure +} + +func (*goAway) isTransportResponseFrame() bool { return false } + +type ping struct { +	ack  bool +	data [8]byte +} + +func (*ping) isTransportResponseFrame() bool { return true } + +type outFlowControlSizeRequest struct { +	resp chan uint32 +} + +func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false } + +// closeConnection is an instruction to tell the loopy writer to flush the +// framer and exit, which will cause the transport's connection to be closed +// (by the client or server).  The transport itself will close after the reader +// encounters the EOF caused by the connection closure. +type closeConnection struct{} + +func (closeConnection) isTransportResponseFrame() bool { return false } + +type outStreamState int + +const ( +	active outStreamState = iota +	empty +	waitingOnStreamQuota +) + +type outStream struct { +	id               uint32 +	state            outStreamState +	itl              *itemList +	bytesOutStanding int +	wq               *writeQuota + +	next *outStream +	prev *outStream +} + +func (s *outStream) deleteSelf() { +	if s.prev != nil { +		s.prev.next = s.next +	} +	if s.next != nil { +		s.next.prev = s.prev +	} +	s.next, s.prev = nil, nil +} + +type outStreamList struct { +	// Following are sentinel objects that mark the +	// beginning and end of the list. They do not +	// contain any item lists. All valid objects are +	// inserted in between them. +	// This is needed so that an outStream object can +	// deleteSelf() in O(1) time without knowing which +	// list it belongs to. +	head *outStream +	tail *outStream +} + +func newOutStreamList() *outStreamList { +	head, tail := new(outStream), new(outStream) +	head.next = tail +	tail.prev = head +	return &outStreamList{ +		head: head, +		tail: tail, +	} +} + +func (l *outStreamList) enqueue(s *outStream) { +	e := l.tail.prev +	e.next = s +	s.prev = e +	s.next = l.tail +	l.tail.prev = s +} + +// remove from the beginning of the list. +func (l *outStreamList) dequeue() *outStream { +	b := l.head.next +	if b == l.tail { +		return nil +	} +	b.deleteSelf() +	return b +} + +// controlBuffer is a way to pass information to loopy. +// Information is passed as specific struct types called control frames. +// A control frame not only represents data, messages or headers to be sent out +// but can also be used to instruct loopy to update its internal state. +// It shouldn't be confused with an HTTP2 frame, although some of the control frames +// like dataFrame and headerFrame do go out on wire as HTTP2 frames. +type controlBuffer struct { +	ch              chan struct{} +	done            <-chan struct{} +	mu              sync.Mutex +	consumerWaiting bool +	list            *itemList +	err             error + +	// transportResponseFrames counts the number of queued items that represent +	// the response of an action initiated by the peer.  trfChan is created +	// when transportResponseFrames >= maxQueuedTransportResponseFrames and is +	// closed and nilled when transportResponseFrames drops below the +	// threshold.  Both fields are protected by mu. +	transportResponseFrames int +	trfChan                 atomic.Value // chan struct{} +} + +func newControlBuffer(done <-chan struct{}) *controlBuffer { +	return &controlBuffer{ +		ch:   make(chan struct{}, 1), +		list: &itemList{}, +		done: done, +	} +} + +// throttle blocks if there are too many incomingSettings/cleanupStreams in the +// controlbuf. +func (c *controlBuffer) throttle() { +	ch, _ := c.trfChan.Load().(chan struct{}) +	if ch != nil { +		select { +		case <-ch: +		case <-c.done: +		} +	} +} + +func (c *controlBuffer) put(it cbItem) error { +	_, err := c.executeAndPut(nil, it) +	return err +} + +func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) { +	var wakeUp bool +	c.mu.Lock() +	if c.err != nil { +		c.mu.Unlock() +		return false, c.err +	} +	if f != nil { +		if !f(it) { // f wasn't successful +			c.mu.Unlock() +			return false, nil +		} +	} +	if c.consumerWaiting { +		wakeUp = true +		c.consumerWaiting = false +	} +	c.list.enqueue(it) +	if it.isTransportResponseFrame() { +		c.transportResponseFrames++ +		if c.transportResponseFrames == maxQueuedTransportResponseFrames { +			// We are adding the frame that puts us over the threshold; create +			// a throttling channel. +			c.trfChan.Store(make(chan struct{})) +		} +	} +	c.mu.Unlock() +	if wakeUp { +		select { +		case c.ch <- struct{}{}: +		default: +		} +	} +	return true, nil +} + +// Note argument f should never be nil. +func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) { +	c.mu.Lock() +	if c.err != nil { +		c.mu.Unlock() +		return false, c.err +	} +	if !f(it) { // f wasn't successful +		c.mu.Unlock() +		return false, nil +	} +	c.mu.Unlock() +	return true, nil +} + +func (c *controlBuffer) get(block bool) (interface{}, error) { +	for { +		c.mu.Lock() +		if c.err != nil { +			c.mu.Unlock() +			return nil, c.err +		} +		if !c.list.isEmpty() { +			h := c.list.dequeue().(cbItem) +			if h.isTransportResponseFrame() { +				if c.transportResponseFrames == maxQueuedTransportResponseFrames { +					// We are removing the frame that put us over the +					// threshold; close and clear the throttling channel. +					ch := c.trfChan.Load().(chan struct{}) +					close(ch) +					c.trfChan.Store((chan struct{})(nil)) +				} +				c.transportResponseFrames-- +			} +			c.mu.Unlock() +			return h, nil +		} +		if !block { +			c.mu.Unlock() +			return nil, nil +		} +		c.consumerWaiting = true +		c.mu.Unlock() +		select { +		case <-c.ch: +		case <-c.done: +			return nil, errors.New("transport closed by client") +		} +	} +} + +func (c *controlBuffer) finish() { +	c.mu.Lock() +	if c.err != nil { +		c.mu.Unlock() +		return +	} +	c.err = ErrConnClosing +	// There may be headers for streams in the control buffer. +	// These streams need to be cleaned out since the transport +	// is still not aware of these yet. +	for head := c.list.dequeueAll(); head != nil; head = head.next { +		hdr, ok := head.it.(*headerFrame) +		if !ok { +			continue +		} +		if hdr.onOrphaned != nil { // It will be nil on the server-side. +			hdr.onOrphaned(ErrConnClosing) +		} +	} +	// In case throttle() is currently in flight, it needs to be unblocked. +	// Otherwise, the transport may not close, since the transport is closed by +	// the reader encountering the connection error. +	ch, _ := c.trfChan.Load().(chan struct{}) +	if ch != nil { +		close(ch) +	} +	c.trfChan.Store((chan struct{})(nil)) +	c.mu.Unlock() +} + +type side int + +const ( +	clientSide side = iota +	serverSide +) + +// Loopy receives frames from the control buffer. +// Each frame is handled individually; most of the work done by loopy goes +// into handling data frames. Loopy maintains a queue of active streams, and each +// stream maintains a queue of data frames; as loopy receives data frames +// it gets added to the queue of the relevant stream. +// Loopy goes over this list of active streams by processing one node every iteration, +// thereby closely resemebling to a round-robin scheduling over all streams. While +// processing a stream, loopy writes out data bytes from this stream capped by the min +// of http2MaxFrameLen, connection-level flow control and stream-level flow control. +type loopyWriter struct { +	side      side +	cbuf      *controlBuffer +	sendQuota uint32 +	oiws      uint32 // outbound initial window size. +	// estdStreams is map of all established streams that are not cleaned-up yet. +	// On client-side, this is all streams whose headers were sent out. +	// On server-side, this is all streams whose headers were received. +	estdStreams map[uint32]*outStream // Established streams. +	// activeStreams is a linked-list of all streams that have data to send and some +	// stream-level flow control quota. +	// Each of these streams internally have a list of data items(and perhaps trailers +	// on the server-side) to be sent out. +	activeStreams *outStreamList +	framer        *framer +	hBuf          *bytes.Buffer  // The buffer for HPACK encoding. +	hEnc          *hpack.Encoder // HPACK encoder. +	bdpEst        *bdpEstimator +	draining      bool + +	// Side-specific handlers +	ssGoAwayHandler func(*goAway) (bool, error) +} + +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter { +	var buf bytes.Buffer +	l := &loopyWriter{ +		side:          s, +		cbuf:          cbuf, +		sendQuota:     defaultWindowSize, +		oiws:          defaultWindowSize, +		estdStreams:   make(map[uint32]*outStream), +		activeStreams: newOutStreamList(), +		framer:        fr, +		hBuf:          &buf, +		hEnc:          hpack.NewEncoder(&buf), +		bdpEst:        bdpEst, +	} +	return l +} + +const minBatchSize = 1000 + +// run should be run in a separate goroutine. +// It reads control frames from controlBuf and processes them by: +// 1. Updating loopy's internal state, or/and +// 2. Writing out HTTP2 frames on the wire. +// +// Loopy keeps all active streams with data to send in a linked-list. +// All streams in the activeStreams linked-list must have both: +// 1. Data to send, and +// 2. Stream level flow control quota available. +// +// In each iteration of run loop, other than processing the incoming control +// frame, loopy calls processData, which processes one node from the activeStreams linked-list. +// This results in writing of HTTP2 frames into an underlying write buffer. +// When there's no more control frames to read from controlBuf, loopy flushes the write buffer. +// As an optimization, to increase the batch size for each flush, loopy yields the processor, once +// if the batch size is too low to give stream goroutines a chance to fill it up. +func (l *loopyWriter) run() (err error) { +	// Always flush the writer before exiting in case there are pending frames +	// to be sent. +	defer l.framer.writer.Flush() +	for { +		it, err := l.cbuf.get(true) +		if err != nil { +			return err +		} +		if err = l.handle(it); err != nil { +			return err +		} +		if _, err = l.processData(); err != nil { +			return err +		} +		gosched := true +	hasdata: +		for { +			it, err := l.cbuf.get(false) +			if err != nil { +				return err +			} +			if it != nil { +				if err = l.handle(it); err != nil { +					return err +				} +				if _, err = l.processData(); err != nil { +					return err +				} +				continue hasdata +			} +			isEmpty, err := l.processData() +			if err != nil { +				return err +			} +			if !isEmpty { +				continue hasdata +			} +			if gosched { +				gosched = false +				if l.framer.writer.offset < minBatchSize { +					runtime.Gosched() +					continue hasdata +				} +			} +			l.framer.writer.Flush() +			break hasdata +		} +	} +} + +func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error { +	return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment) +} + +func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error { +	// Otherwise update the quota. +	if w.streamID == 0 { +		l.sendQuota += w.increment +		return nil +	} +	// Find the stream and update it. +	if str, ok := l.estdStreams[w.streamID]; ok { +		str.bytesOutStanding -= int(w.increment) +		if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota { +			str.state = active +			l.activeStreams.enqueue(str) +			return nil +		} +	} +	return nil +} + +func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { +	return l.framer.fr.WriteSettings(s.ss...) +} + +func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { +	if err := l.applySettings(s.ss); err != nil { +		return err +	} +	return l.framer.fr.WriteSettingsAck() +} + +func (l *loopyWriter) registerStreamHandler(h *registerStream) error { +	str := &outStream{ +		id:    h.streamID, +		state: empty, +		itl:   &itemList{}, +		wq:    h.wq, +	} +	l.estdStreams[h.streamID] = str +	return nil +} + +func (l *loopyWriter) headerHandler(h *headerFrame) error { +	if l.side == serverSide { +		str, ok := l.estdStreams[h.streamID] +		if !ok { +			if logger.V(logLevel) { +				logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) +			} +			return nil +		} +		// Case 1.A: Server is responding back with headers. +		if !h.endStream { +			return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite) +		} +		// else:  Case 1.B: Server wants to close stream. + +		if str.state != empty { // either active or waiting on stream quota. +			// add it str's list of items. +			str.itl.enqueue(h) +			return nil +		} +		if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil { +			return err +		} +		return l.cleanupStreamHandler(h.cleanup) +	} +	// Case 2: Client wants to originate stream. +	str := &outStream{ +		id:    h.streamID, +		state: empty, +		itl:   &itemList{}, +		wq:    h.wq, +	} +	return l.originateStream(str, h) +} + +func (l *loopyWriter) originateStream(str *outStream, hdr *headerFrame) error { +	// l.draining is set when handling GoAway. In which case, we want to avoid +	// creating new streams. +	if l.draining { +		// TODO: provide a better error with the reason we are in draining. +		hdr.onOrphaned(errStreamDrain) +		return nil +	} +	if err := hdr.initStream(str.id); err != nil { +		return err +	} +	if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { +		return err +	} +	l.estdStreams[str.id] = str +	return nil +} + +func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error { +	if onWrite != nil { +		onWrite() +	} +	l.hBuf.Reset() +	for _, f := range hf { +		if err := l.hEnc.WriteField(f); err != nil { +			if logger.V(logLevel) { +				logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err) +			} +		} +	} +	var ( +		err               error +		endHeaders, first bool +	) +	first = true +	for !endHeaders { +		size := l.hBuf.Len() +		if size > http2MaxFrameLen { +			size = http2MaxFrameLen +		} else { +			endHeaders = true +		} +		if first { +			first = false +			err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{ +				StreamID:      streamID, +				BlockFragment: l.hBuf.Next(size), +				EndStream:     endStream, +				EndHeaders:    endHeaders, +			}) +		} else { +			err = l.framer.fr.WriteContinuation( +				streamID, +				endHeaders, +				l.hBuf.Next(size), +			) +		} +		if err != nil { +			return err +		} +	} +	return nil +} + +func (l *loopyWriter) preprocessData(df *dataFrame) error { +	str, ok := l.estdStreams[df.streamID] +	if !ok { +		return nil +	} +	// If we got data for a stream it means that +	// stream was originated and the headers were sent out. +	str.itl.enqueue(df) +	if str.state == empty { +		str.state = active +		l.activeStreams.enqueue(str) +	} +	return nil +} + +func (l *loopyWriter) pingHandler(p *ping) error { +	if !p.ack { +		l.bdpEst.timesnap(p.data) +	} +	return l.framer.fr.WritePing(p.ack, p.data) + +} + +func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error { +	o.resp <- l.sendQuota +	return nil +} + +func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { +	c.onWrite() +	if str, ok := l.estdStreams[c.streamID]; ok { +		// On the server side it could be a trailers-only response or +		// a RST_STREAM before stream initialization thus the stream might +		// not be established yet. +		delete(l.estdStreams, c.streamID) +		str.deleteSelf() +	} +	if c.rst { // If RST_STREAM needs to be sent. +		if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil { +			return err +		} +	} +	if l.draining && len(l.estdStreams) == 0 { +		return errors.New("finished processing active streams while in draining mode") +	} +	return nil +} + +func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { +	if l.side == clientSide { +		return errors.New("earlyAbortStream not handled on client") +	} +	// In case the caller forgets to set the http status, default to 200. +	if eas.httpStatus == 0 { +		eas.httpStatus = 200 +	} +	headerFields := []hpack.HeaderField{ +		{Name: ":status", Value: strconv.Itoa(int(eas.httpStatus))}, +		{Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)}, +		{Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))}, +		{Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())}, +	} + +	if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil { +		return err +	} +	if eas.rst { +		if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil { +			return err +		} +	} +	return nil +} + +func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { +	if l.side == clientSide { +		l.draining = true +		if len(l.estdStreams) == 0 { +			return errors.New("received GOAWAY with no active streams") +		} +	} +	return nil +} + +func (l *loopyWriter) goAwayHandler(g *goAway) error { +	// Handling of outgoing GoAway is very specific to side. +	if l.ssGoAwayHandler != nil { +		draining, err := l.ssGoAwayHandler(g) +		if err != nil { +			return err +		} +		l.draining = draining +	} +	return nil +} + +func (l *loopyWriter) closeConnectionHandler() error { +	// Exit loopyWriter entirely by returning an error here.  This will lead to +	// the transport closing the connection, and, ultimately, transport +	// closure. +	return ErrConnClosing +} + +func (l *loopyWriter) handle(i interface{}) error { +	switch i := i.(type) { +	case *incomingWindowUpdate: +		return l.incomingWindowUpdateHandler(i) +	case *outgoingWindowUpdate: +		return l.outgoingWindowUpdateHandler(i) +	case *incomingSettings: +		return l.incomingSettingsHandler(i) +	case *outgoingSettings: +		return l.outgoingSettingsHandler(i) +	case *headerFrame: +		return l.headerHandler(i) +	case *registerStream: +		return l.registerStreamHandler(i) +	case *cleanupStream: +		return l.cleanupStreamHandler(i) +	case *earlyAbortStream: +		return l.earlyAbortStreamHandler(i) +	case *incomingGoAway: +		return l.incomingGoAwayHandler(i) +	case *dataFrame: +		return l.preprocessData(i) +	case *ping: +		return l.pingHandler(i) +	case *goAway: +		return l.goAwayHandler(i) +	case *outFlowControlSizeRequest: +		return l.outFlowControlSizeRequestHandler(i) +	case closeConnection: +		return l.closeConnectionHandler() +	default: +		return fmt.Errorf("transport: unknown control message type %T", i) +	} +} + +func (l *loopyWriter) applySettings(ss []http2.Setting) error { +	for _, s := range ss { +		switch s.ID { +		case http2.SettingInitialWindowSize: +			o := l.oiws +			l.oiws = s.Val +			if o < l.oiws { +				// If the new limit is greater make all depleted streams active. +				for _, stream := range l.estdStreams { +					if stream.state == waitingOnStreamQuota { +						stream.state = active +						l.activeStreams.enqueue(stream) +					} +				} +			} +		case http2.SettingHeaderTableSize: +			updateHeaderTblSize(l.hEnc, s.Val) +		} +	} +	return nil +} + +// processData removes the first stream from active streams, writes out at most 16KB +// of its data and then puts it at the end of activeStreams if there's still more data +// to be sent and stream has some stream-level flow control. +func (l *loopyWriter) processData() (bool, error) { +	if l.sendQuota == 0 { +		return true, nil +	} +	str := l.activeStreams.dequeue() // Remove the first stream. +	if str == nil { +		return true, nil +	} +	dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. +	// A data item is represented by a dataFrame, since it later translates into +	// multiple HTTP2 data frames. +	// Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data. +	// As an optimization to keep wire traffic low, data from d is copied to h to make as big as the +	// maximum possible HTTP2 frame size. + +	if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame +		// Client sends out empty data frame with endStream = true +		if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { +			return false, err +		} +		str.itl.dequeue() // remove the empty data item from stream +		if str.itl.isEmpty() { +			str.state = empty +		} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. +			if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { +				return false, err +			} +			if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { +				return false, nil +			} +		} else { +			l.activeStreams.enqueue(str) +		} +		return false, nil +	} +	var ( +		buf []byte +	) +	// Figure out the maximum size we can send +	maxSize := http2MaxFrameLen +	if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. +		str.state = waitingOnStreamQuota +		return false, nil +	} else if maxSize > strQuota { +		maxSize = strQuota +	} +	if maxSize > int(l.sendQuota) { // connection-level flow control. +		maxSize = int(l.sendQuota) +	} +	// Compute how much of the header and data we can send within quota and max frame length +	hSize := min(maxSize, len(dataItem.h)) +	dSize := min(maxSize-hSize, len(dataItem.d)) +	if hSize != 0 { +		if dSize == 0 { +			buf = dataItem.h +		} else { +			// We can add some data to grpc message header to distribute bytes more equally across frames. +			// Copy on the stack to avoid generating garbage +			var localBuf [http2MaxFrameLen]byte +			copy(localBuf[:hSize], dataItem.h) +			copy(localBuf[hSize:], dataItem.d[:dSize]) +			buf = localBuf[:hSize+dSize] +		} +	} else { +		buf = dataItem.d +	} + +	size := hSize + dSize + +	// Now that outgoing flow controls are checked we can replenish str's write quota +	str.wq.replenish(size) +	var endStream bool +	// If this is the last data message on this stream and all of it can be written in this iteration. +	if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size { +		endStream = true +	} +	if dataItem.onEachWrite != nil { +		dataItem.onEachWrite() +	} +	if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { +		return false, err +	} +	str.bytesOutStanding += size +	l.sendQuota -= uint32(size) +	dataItem.h = dataItem.h[hSize:] +	dataItem.d = dataItem.d[dSize:] + +	if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. +		str.itl.dequeue() +	} +	if str.itl.isEmpty() { +		str.state = empty +	} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers. +		if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { +			return false, err +		} +		if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { +			return false, err +		} +	} else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota. +		str.state = waitingOnStreamQuota +	} else { // Otherwise add it back to the list of active streams. +		l.activeStreams.enqueue(str) +	} +	return false, nil +} + +func min(a, b int) int { +	if a < b { +		return a +	} +	return b +} diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go new file mode 100644 index 000000000..bc8ee0747 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/defaults.go @@ -0,0 +1,55 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( +	"math" +	"time" +) + +const ( +	// The default value of flow control window size in HTTP2 spec. +	defaultWindowSize = 65535 +	// The initial window size for flow control. +	initialWindowSize             = defaultWindowSize // for an RPC +	infinity                      = time.Duration(math.MaxInt64) +	defaultClientKeepaliveTime    = infinity +	defaultClientKeepaliveTimeout = 20 * time.Second +	defaultMaxStreamsClient       = 100 +	defaultMaxConnectionIdle      = infinity +	defaultMaxConnectionAge       = infinity +	defaultMaxConnectionAgeGrace  = infinity +	defaultServerKeepaliveTime    = 2 * time.Hour +	defaultServerKeepaliveTimeout = 20 * time.Second +	defaultKeepalivePolicyMinTime = 5 * time.Minute +	// max window limit set by HTTP2 Specs. +	maxWindowSize = math.MaxInt32 +	// defaultWriteQuota is the default value for number of data +	// bytes that each stream can schedule before some of it being +	// flushed out. +	defaultWriteQuota              = 64 * 1024 +	defaultClientMaxHeaderListSize = uint32(16 << 20) +	defaultServerMaxHeaderListSize = uint32(16 << 20) +) + +// MaxStreamID is the upper bound for the stream ID before the current +// transport gracefully closes and new transport is created for subsequent RPCs. +// This is set to 75% of 2^31-1. Streams are identified with an unsigned 31-bit +// integer. It's exported so that tests can override it. +var MaxStreamID = uint32(math.MaxInt32 * 3 / 4) diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go new file mode 100644 index 000000000..97198c515 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -0,0 +1,215 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( +	"fmt" +	"math" +	"sync" +	"sync/atomic" +) + +// writeQuota is a soft limit on the amount of data a stream can +// schedule before some of it is written out. +type writeQuota struct { +	quota int32 +	// get waits on read from when quota goes less than or equal to zero. +	// replenish writes on it when quota goes positive again. +	ch chan struct{} +	// done is triggered in error case. +	done <-chan struct{} +	// replenish is called by loopyWriter to give quota back to. +	// It is implemented as a field so that it can be updated +	// by tests. +	replenish func(n int) +} + +func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota { +	w := &writeQuota{ +		quota: sz, +		ch:    make(chan struct{}, 1), +		done:  done, +	} +	w.replenish = w.realReplenish +	return w +} + +func (w *writeQuota) get(sz int32) error { +	for { +		if atomic.LoadInt32(&w.quota) > 0 { +			atomic.AddInt32(&w.quota, -sz) +			return nil +		} +		select { +		case <-w.ch: +			continue +		case <-w.done: +			return errStreamDone +		} +	} +} + +func (w *writeQuota) realReplenish(n int) { +	sz := int32(n) +	a := atomic.AddInt32(&w.quota, sz) +	b := a - sz +	if b <= 0 && a > 0 { +		select { +		case w.ch <- struct{}{}: +		default: +		} +	} +} + +type trInFlow struct { +	limit               uint32 +	unacked             uint32 +	effectiveWindowSize uint32 +} + +func (f *trInFlow) newLimit(n uint32) uint32 { +	d := n - f.limit +	f.limit = n +	f.updateEffectiveWindowSize() +	return d +} + +func (f *trInFlow) onData(n uint32) uint32 { +	f.unacked += n +	if f.unacked >= f.limit/4 { +		w := f.unacked +		f.unacked = 0 +		f.updateEffectiveWindowSize() +		return w +	} +	f.updateEffectiveWindowSize() +	return 0 +} + +func (f *trInFlow) reset() uint32 { +	w := f.unacked +	f.unacked = 0 +	f.updateEffectiveWindowSize() +	return w +} + +func (f *trInFlow) updateEffectiveWindowSize() { +	atomic.StoreUint32(&f.effectiveWindowSize, f.limit-f.unacked) +} + +func (f *trInFlow) getSize() uint32 { +	return atomic.LoadUint32(&f.effectiveWindowSize) +} + +// TODO(mmukhi): Simplify this code. +// inFlow deals with inbound flow control +type inFlow struct { +	mu sync.Mutex +	// The inbound flow control limit for pending data. +	limit uint32 +	// pendingData is the overall data which have been received but not been +	// consumed by applications. +	pendingData uint32 +	// The amount of data the application has consumed but grpc has not sent +	// window update for them. Used to reduce window update frequency. +	pendingUpdate uint32 +	// delta is the extra window update given by receiver when an application +	// is reading data bigger in size than the inFlow limit. +	delta uint32 +} + +// newLimit updates the inflow window to a new value n. +// It assumes that n is always greater than the old limit. +func (f *inFlow) newLimit(n uint32) { +	f.mu.Lock() +	f.limit = n +	f.mu.Unlock() +} + +func (f *inFlow) maybeAdjust(n uint32) uint32 { +	if n > uint32(math.MaxInt32) { +		n = uint32(math.MaxInt32) +	} +	f.mu.Lock() +	defer f.mu.Unlock() +	// estSenderQuota is the receiver's view of the maximum number of bytes the sender +	// can send without a window update. +	estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate)) +	// estUntransmittedData is the maximum number of bytes the sends might not have put +	// on the wire yet. A value of 0 or less means that we have already received all or +	// more bytes than the application is requesting to read. +	estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative. +	// This implies that unless we send a window update, the sender won't be able to send all the bytes +	// for this message. Therefore we must send an update over the limit since there's an active read +	// request from the application. +	if estUntransmittedData > estSenderQuota { +		// Sender's window shouldn't go more than 2^31 - 1 as specified in the HTTP spec. +		if f.limit+n > maxWindowSize { +			f.delta = maxWindowSize - f.limit +		} else { +			// Send a window update for the whole message and not just the difference between +			// estUntransmittedData and estSenderQuota. This will be helpful in case the message +			// is padded; We will fallback on the current available window(at least a 1/4th of the limit). +			f.delta = n +		} +		return f.delta +	} +	return 0 +} + +// onData is invoked when some data frame is received. It updates pendingData. +func (f *inFlow) onData(n uint32) error { +	f.mu.Lock() +	f.pendingData += n +	if f.pendingData+f.pendingUpdate > f.limit+f.delta { +		limit := f.limit +		rcvd := f.pendingData + f.pendingUpdate +		f.mu.Unlock() +		return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", rcvd, limit) +	} +	f.mu.Unlock() +	return nil +} + +// onRead is invoked when the application reads the data. It returns the window size +// to be sent to the peer. +func (f *inFlow) onRead(n uint32) uint32 { +	f.mu.Lock() +	if f.pendingData == 0 { +		f.mu.Unlock() +		return 0 +	} +	f.pendingData -= n +	if n > f.delta { +		n -= f.delta +		f.delta = 0 +	} else { +		f.delta -= n +		n = 0 +	} +	f.pendingUpdate += n +	if f.pendingUpdate >= f.limit/4 { +		wu := f.pendingUpdate +		f.pendingUpdate = 0 +		f.mu.Unlock() +		return wu +	} +	f.mu.Unlock() +	return 0 +} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go new file mode 100644 index 000000000..e6626bf96 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -0,0 +1,477 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file is the implementation of a gRPC server using HTTP/2 which +// uses the standard Go http2 Server implementation (via the +// http.Handler interface), rather than speaking low-level HTTP/2 +// frames itself. It is the implementation of *grpc.Server.ServeHTTP. + +package transport + +import ( +	"bytes" +	"context" +	"errors" +	"fmt" +	"io" +	"net" +	"net/http" +	"strings" +	"sync" +	"time" + +	"github.com/golang/protobuf/proto" +	"golang.org/x/net/http2" +	"google.golang.org/grpc/codes" +	"google.golang.org/grpc/credentials" +	"google.golang.org/grpc/internal/grpcutil" +	"google.golang.org/grpc/metadata" +	"google.golang.org/grpc/peer" +	"google.golang.org/grpc/stats" +	"google.golang.org/grpc/status" +) + +// NewServerHandlerTransport returns a ServerTransport handling gRPC from +// inside an http.Handler, or writes an HTTP error to w and returns an error. +// It requires that the http Server supports HTTP/2. +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { +	if r.ProtoMajor != 2 { +		msg := "gRPC requires HTTP/2" +		http.Error(w, msg, http.StatusBadRequest) +		return nil, errors.New(msg) +	} +	if r.Method != "POST" { +		msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) +		http.Error(w, msg, http.StatusBadRequest) +		return nil, errors.New(msg) +	} +	contentType := r.Header.Get("Content-Type") +	// TODO: do we assume contentType is lowercase? we did before +	contentSubtype, validContentType := grpcutil.ContentSubtype(contentType) +	if !validContentType { +		msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType) +		http.Error(w, msg, http.StatusUnsupportedMediaType) +		return nil, errors.New(msg) +	} +	if _, ok := w.(http.Flusher); !ok { +		msg := "gRPC requires a ResponseWriter supporting http.Flusher" +		http.Error(w, msg, http.StatusInternalServerError) +		return nil, errors.New(msg) +	} + +	st := &serverHandlerTransport{ +		rw:             w, +		req:            r, +		closedCh:       make(chan struct{}), +		writes:         make(chan func()), +		contentType:    contentType, +		contentSubtype: contentSubtype, +		stats:          stats, +	} + +	if v := r.Header.Get("grpc-timeout"); v != "" { +		to, err := decodeTimeout(v) +		if err != nil { +			msg := fmt.Sprintf("malformed grpc-timeout: %v", err) +			http.Error(w, msg, http.StatusBadRequest) +			return nil, status.Error(codes.Internal, msg) +		} +		st.timeoutSet = true +		st.timeout = to +	} + +	metakv := []string{"content-type", contentType} +	if r.Host != "" { +		metakv = append(metakv, ":authority", r.Host) +	} +	for k, vv := range r.Header { +		k = strings.ToLower(k) +		if isReservedHeader(k) && !isWhitelistedHeader(k) { +			continue +		} +		for _, v := range vv { +			v, err := decodeMetadataHeader(k, v) +			if err != nil { +				msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err) +				http.Error(w, msg, http.StatusBadRequest) +				return nil, status.Error(codes.Internal, msg) +			} +			metakv = append(metakv, k, v) +		} +	} +	st.headerMD = metadata.Pairs(metakv...) + +	return st, nil +} + +// serverHandlerTransport is an implementation of ServerTransport +// which replies to exactly one gRPC request (exactly one HTTP request), +// using the net/http.Handler interface. This http.Handler is guaranteed +// at this point to be speaking over HTTP/2, so it's able to speak valid +// gRPC. +type serverHandlerTransport struct { +	rw         http.ResponseWriter +	req        *http.Request +	timeoutSet bool +	timeout    time.Duration + +	headerMD metadata.MD + +	closeOnce sync.Once +	closedCh  chan struct{} // closed on Close + +	// writes is a channel of code to run serialized in the +	// ServeHTTP (HandleStreams) goroutine. The channel is closed +	// when WriteStatus is called. +	writes chan func() + +	// block concurrent WriteStatus calls +	// e.g. grpc/(*serverStream).SendMsg/RecvMsg +	writeStatusMu sync.Mutex + +	// we just mirror the request content-type +	contentType string +	// we store both contentType and contentSubtype so we don't keep recreating them +	// TODO make sure this is consistent across handler_server and http2_server +	contentSubtype string + +	stats []stats.Handler +} + +func (ht *serverHandlerTransport) Close(err error) { +	ht.closeOnce.Do(func() { +		if logger.V(logLevel) { +			logger.Infof("Closing serverHandlerTransport: %v", err) +		} +		close(ht.closedCh) +	}) +} + +func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } + +// strAddr is a net.Addr backed by either a TCP "ip:port" string, or +// the empty string if unknown. +type strAddr string + +func (a strAddr) Network() string { +	if a != "" { +		// Per the documentation on net/http.Request.RemoteAddr, if this is +		// set, it's set to the IP:port of the peer (hence, TCP): +		// https://golang.org/pkg/net/http/#Request +		// +		// If we want to support Unix sockets later, we can +		// add our own grpc-specific convention within the +		// grpc codebase to set RemoteAddr to a different +		// format, or probably better: we can attach it to the +		// context and use that from serverHandlerTransport.RemoteAddr. +		return "tcp" +	} +	return "" +} + +func (a strAddr) String() string { return string(a) } + +// do runs fn in the ServeHTTP goroutine. +func (ht *serverHandlerTransport) do(fn func()) error { +	select { +	case <-ht.closedCh: +		return ErrConnClosing +	case ht.writes <- fn: +		return nil +	} +} + +func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { +	ht.writeStatusMu.Lock() +	defer ht.writeStatusMu.Unlock() + +	headersWritten := s.updateHeaderSent() +	err := ht.do(func() { +		if !headersWritten { +			ht.writePendingHeaders(s) +		} + +		// And flush, in case no header or body has been sent yet. +		// This forces a separation of headers and trailers if this is the +		// first call (for example, in end2end tests's TestNoService). +		ht.rw.(http.Flusher).Flush() + +		h := ht.rw.Header() +		h.Set("Grpc-Status", fmt.Sprintf("%d", st.Code())) +		if m := st.Message(); m != "" { +			h.Set("Grpc-Message", encodeGrpcMessage(m)) +		} + +		if p := st.Proto(); p != nil && len(p.Details) > 0 { +			stBytes, err := proto.Marshal(p) +			if err != nil { +				// TODO: return error instead, when callers are able to handle it. +				panic(err) +			} + +			h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) +		} + +		if md := s.Trailer(); len(md) > 0 { +			for k, vv := range md { +				// Clients don't tolerate reading restricted headers after some non restricted ones were sent. +				if isReservedHeader(k) { +					continue +				} +				for _, v := range vv { +					// http2 ResponseWriter mechanism to send undeclared Trailers after +					// the headers have possibly been written. +					h.Add(http2.TrailerPrefix+k, encodeMetadataHeader(k, v)) +				} +			} +		} +	}) + +	if err == nil { // transport has not been closed +		// Note: The trailer fields are compressed with hpack after this call returns. +		// No WireLength field is set here. +		for _, sh := range ht.stats { +			sh.HandleRPC(s.Context(), &stats.OutTrailer{ +				Trailer: s.trailer.Copy(), +			}) +		} +	} +	ht.Close(errors.New("finished writing status")) +	return err +} + +// writePendingHeaders sets common and custom headers on the first +// write call (Write, WriteHeader, or WriteStatus) +func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) { +	ht.writeCommonHeaders(s) +	ht.writeCustomHeaders(s) +} + +// writeCommonHeaders sets common headers on the first write +// call (Write, WriteHeader, or WriteStatus). +func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { +	h := ht.rw.Header() +	h["Date"] = nil // suppress Date to make tests happy; TODO: restore +	h.Set("Content-Type", ht.contentType) + +	// Predeclare trailers we'll set later in WriteStatus (after the body). +	// This is a SHOULD in the HTTP RFC, and the way you add (known) +	// Trailers per the net/http.ResponseWriter contract. +	// See https://golang.org/pkg/net/http/#ResponseWriter +	// and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers +	h.Add("Trailer", "Grpc-Status") +	h.Add("Trailer", "Grpc-Message") +	h.Add("Trailer", "Grpc-Status-Details-Bin") + +	if s.sendCompress != "" { +		h.Set("Grpc-Encoding", s.sendCompress) +	} +} + +// writeCustomHeaders sets custom headers set on the stream via SetHeader +// on the first write call (Write, WriteHeader, or WriteStatus). +func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { +	h := ht.rw.Header() + +	s.hdrMu.Lock() +	for k, vv := range s.header { +		if isReservedHeader(k) { +			continue +		} +		for _, v := range vv { +			h.Add(k, encodeMetadataHeader(k, v)) +		} +	} + +	s.hdrMu.Unlock() +} + +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +	headersWritten := s.updateHeaderSent() +	return ht.do(func() { +		if !headersWritten { +			ht.writePendingHeaders(s) +		} +		ht.rw.Write(hdr) +		ht.rw.Write(data) +		ht.rw.(http.Flusher).Flush() +	}) +} + +func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { +	if err := s.SetHeader(md); err != nil { +		return err +	} + +	headersWritten := s.updateHeaderSent() +	err := ht.do(func() { +		if !headersWritten { +			ht.writePendingHeaders(s) +		} + +		ht.rw.WriteHeader(200) +		ht.rw.(http.Flusher).Flush() +	}) + +	if err == nil { +		for _, sh := range ht.stats { +			// Note: The header fields are compressed with hpack after this call returns. +			// No WireLength field is set here. +			sh.HandleRPC(s.Context(), &stats.OutHeader{ +				Header:      md.Copy(), +				Compression: s.sendCompress, +			}) +		} +	} +	return err +} + +func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { +	// With this transport type there will be exactly 1 stream: this HTTP request. + +	ctx := ht.req.Context() +	var cancel context.CancelFunc +	if ht.timeoutSet { +		ctx, cancel = context.WithTimeout(ctx, ht.timeout) +	} else { +		ctx, cancel = context.WithCancel(ctx) +	} + +	// requestOver is closed when the status has been written via WriteStatus. +	requestOver := make(chan struct{}) +	go func() { +		select { +		case <-requestOver: +		case <-ht.closedCh: +		case <-ht.req.Context().Done(): +		} +		cancel() +		ht.Close(errors.New("request is done processing")) +	}() + +	req := ht.req + +	s := &Stream{ +		id:             0, // irrelevant +		requestRead:    func(int) {}, +		cancel:         cancel, +		buf:            newRecvBuffer(), +		st:             ht, +		method:         req.URL.Path, +		recvCompress:   req.Header.Get("grpc-encoding"), +		contentSubtype: ht.contentSubtype, +	} +	pr := &peer.Peer{ +		Addr: ht.RemoteAddr(), +	} +	if req.TLS != nil { +		pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} +	} +	ctx = metadata.NewIncomingContext(ctx, ht.headerMD) +	s.ctx = peer.NewContext(ctx, pr) +	for _, sh := range ht.stats { +		s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) +		inHeader := &stats.InHeader{ +			FullMethod:  s.method, +			RemoteAddr:  ht.RemoteAddr(), +			Compression: s.recvCompress, +		} +		sh.HandleRPC(s.ctx, inHeader) +	} +	s.trReader = &transportReader{ +		reader:        &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, +		windowHandler: func(int) {}, +	} + +	// readerDone is closed when the Body.Read-ing goroutine exits. +	readerDone := make(chan struct{}) +	go func() { +		defer close(readerDone) + +		// TODO: minimize garbage, optimize recvBuffer code/ownership +		const readSize = 8196 +		for buf := make([]byte, readSize); ; { +			n, err := req.Body.Read(buf) +			if n > 0 { +				s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])}) +				buf = buf[n:] +			} +			if err != nil { +				s.buf.put(recvMsg{err: mapRecvMsgError(err)}) +				return +			} +			if len(buf) == 0 { +				buf = make([]byte, readSize) +			} +		} +	}() + +	// startStream is provided by the *grpc.Server's serveStreams. +	// It starts a goroutine serving s and exits immediately. +	// The goroutine that is started is the one that then calls +	// into ht, calling WriteHeader, Write, WriteStatus, Close, etc. +	startStream(s) + +	ht.runStream() +	close(requestOver) + +	// Wait for reading goroutine to finish. +	req.Body.Close() +	<-readerDone +} + +func (ht *serverHandlerTransport) runStream() { +	for { +		select { +		case fn := <-ht.writes: +			fn() +		case <-ht.closedCh: +			return +		} +	} +} + +func (ht *serverHandlerTransport) IncrMsgSent() {} + +func (ht *serverHandlerTransport) IncrMsgRecv() {} + +func (ht *serverHandlerTransport) Drain() { +	panic("Drain() is not implemented") +} + +// mapRecvMsgError returns the non-nil err into the appropriate +// error value as expected by callers of *grpc.parser.recvMsg. +// In particular, in can only be: +//   - io.EOF +//   - io.ErrUnexpectedEOF +//   - of type transport.ConnectionError +//   - an error from the status package +func mapRecvMsgError(err error) error { +	if err == io.EOF || err == io.ErrUnexpectedEOF { +		return err +	} +	if se, ok := err.(http2.StreamError); ok { +		if code, ok := http2ErrConvTab[se.Code]; ok { +			return status.Error(code, se.Error()) +		} +	} +	if strings.Contains(err.Error(), "body closed by handler") { +		return status.Error(codes.Canceled, err.Error()) +	} +	return connectionErrorf(true, err, err.Error()) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go new file mode 100644 index 000000000..79ee8aea0 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -0,0 +1,1800 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( +	"context" +	"fmt" +	"io" +	"math" +	"net" +	"net/http" +	"path/filepath" +	"strconv" +	"strings" +	"sync" +	"sync/atomic" +	"time" + +	"golang.org/x/net/http2" +	"golang.org/x/net/http2/hpack" +	"google.golang.org/grpc/codes" +	"google.golang.org/grpc/credentials" +	"google.golang.org/grpc/internal/channelz" +	icredentials "google.golang.org/grpc/internal/credentials" +	"google.golang.org/grpc/internal/grpcsync" +	"google.golang.org/grpc/internal/grpcutil" +	imetadata "google.golang.org/grpc/internal/metadata" +	istatus "google.golang.org/grpc/internal/status" +	"google.golang.org/grpc/internal/syscall" +	"google.golang.org/grpc/internal/transport/networktype" +	"google.golang.org/grpc/keepalive" +	"google.golang.org/grpc/metadata" +	"google.golang.org/grpc/peer" +	"google.golang.org/grpc/resolver" +	"google.golang.org/grpc/stats" +	"google.golang.org/grpc/status" +) + +// clientConnectionCounter counts the number of connections a client has +// initiated (equal to the number of http2Clients created). Must be accessed +// atomically. +var clientConnectionCounter uint64 + +// http2Client implements the ClientTransport interface with HTTP2. +type http2Client struct { +	lastRead  int64 // Keep this field 64-bit aligned. Accessed atomically. +	ctx       context.Context +	cancel    context.CancelFunc +	ctxDone   <-chan struct{} // Cache the ctx.Done() chan. +	userAgent string +	// address contains the resolver returned address for this transport. +	// If the `ServerName` field is set, it takes precedence over `CallHdr.Host` +	// passed to `NewStream`, when determining the :authority header. +	address    resolver.Address +	md         metadata.MD +	conn       net.Conn // underlying communication channel +	loopy      *loopyWriter +	remoteAddr net.Addr +	localAddr  net.Addr +	authInfo   credentials.AuthInfo // auth info about the connection + +	readerDone chan struct{} // sync point to enable testing. +	writerDone chan struct{} // sync point to enable testing. +	// goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) +	// that the server sent GoAway on this transport. +	goAway chan struct{} + +	framer *framer +	// controlBuf delivers all the control related tasks (e.g., window +	// updates, reset streams, and various settings) to the controller. +	// Do not access controlBuf with mu held. +	controlBuf *controlBuffer +	fc         *trInFlow +	// The scheme used: https if TLS is on, http otherwise. +	scheme string + +	isSecure bool + +	perRPCCreds []credentials.PerRPCCredentials + +	kp               keepalive.ClientParameters +	keepaliveEnabled bool + +	statsHandlers []stats.Handler + +	initialWindowSize int32 + +	// configured by peer through SETTINGS_MAX_HEADER_LIST_SIZE +	maxSendHeaderListSize *uint32 + +	bdpEst *bdpEstimator + +	maxConcurrentStreams  uint32 +	streamQuota           int64 +	streamsQuotaAvailable chan struct{} +	waitingStreams        uint32 +	nextID                uint32 +	registeredCompressors string + +	// Do not access controlBuf with mu held. +	mu            sync.Mutex // guard the following variables +	state         transportState +	activeStreams map[uint32]*Stream +	// prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. +	prevGoAwayID uint32 +	// goAwayReason records the http2.ErrCode and debug data received with the +	// GoAway frame. +	goAwayReason GoAwayReason +	// goAwayDebugMessage contains a detailed human readable string about a +	// GoAway frame, useful for error messages. +	goAwayDebugMessage string +	// A condition variable used to signal when the keepalive goroutine should +	// go dormant. The condition for dormancy is based on the number of active +	// streams and the `PermitWithoutStream` keepalive client parameter. And +	// since the number of active streams is guarded by the above mutex, we use +	// the same for this condition variable as well. +	kpDormancyCond *sync.Cond +	// A boolean to track whether the keepalive goroutine is dormant or not. +	// This is checked before attempting to signal the above condition +	// variable. +	kpDormant bool + +	// Fields below are for channelz metric collection. +	channelzID *channelz.Identifier +	czData     *channelzData + +	onClose func(GoAwayReason) + +	bufferPool *bufferPool + +	connectionID uint64 +} + +func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) { +	address := addr.Addr +	networkType, ok := networktype.Get(addr) +	if fn != nil { +		// Special handling for unix scheme with custom dialer. Back in the day, +		// we did not have a unix resolver and therefore targets with a unix +		// scheme would end up using the passthrough resolver. So, user's used a +		// custom dialer in this case and expected the original dial target to +		// be passed to the custom dialer. Now, we have a unix resolver. But if +		// a custom dialer is specified, we want to retain the old behavior in +		// terms of the address being passed to the custom dialer. +		if networkType == "unix" && !strings.HasPrefix(address, "\x00") { +			// Supported unix targets are either "unix://absolute-path" or +			// "unix:relative-path". +			if filepath.IsAbs(address) { +				return fn(ctx, "unix://"+address) +			} +			return fn(ctx, "unix:"+address) +		} +		return fn(ctx, address) +	} +	if !ok { +		networkType, address = parseDialTarget(address) +	} +	if networkType == "tcp" && useProxy { +		return proxyDial(ctx, address, grpcUA) +	} +	return (&net.Dialer{}).DialContext(ctx, networkType, address) +} + +func isTemporary(err error) bool { +	switch err := err.(type) { +	case interface { +		Temporary() bool +	}: +		return err.Temporary() +	case interface { +		Timeout() bool +	}: +		// Timeouts may be resolved upon retry, and are thus treated as +		// temporary. +		return err.Timeout() +	} +	return true +} + +// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 +// and starts to receive messages on it. Non-nil error returns if construction +// fails. +func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) { +	scheme := "http" +	ctx, cancel := context.WithCancel(ctx) +	defer func() { +		if err != nil { +			cancel() +		} +	}() + +	// gRPC, resolver, balancer etc. can specify arbitrary data in the +	// Attributes field of resolver.Address, which is shoved into connectCtx +	// and passed to the dialer and credential handshaker. This makes it possible for +	// address specific arbitrary data to reach custom dialers and credential handshakers. +	connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) + +	conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent) +	if err != nil { +		if opts.FailOnNonTempDialError { +			return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) +		} +		return nil, connectionErrorf(true, err, "transport: Error while dialing: %v", err) +	} + +	// Any further errors will close the underlying connection +	defer func(conn net.Conn) { +		if err != nil { +			conn.Close() +		} +	}(conn) + +	// The following defer and goroutine monitor the connectCtx for cancelation +	// and deadline.  On context expiration, the connection is hard closed and +	// this function will naturally fail as a result.  Otherwise, the defer +	// waits for the goroutine to exit to prevent the context from being +	// monitored (and to prevent the connection from ever being closed) after +	// returning from this function. +	ctxMonitorDone := grpcsync.NewEvent() +	newClientCtx, newClientDone := context.WithCancel(connectCtx) +	defer func() { +		newClientDone()         // Awaken the goroutine below if connectCtx hasn't expired. +		<-ctxMonitorDone.Done() // Wait for the goroutine below to exit. +	}() +	go func(conn net.Conn) { +		defer ctxMonitorDone.Fire() // Signal this goroutine has exited. +		<-newClientCtx.Done()       // Block until connectCtx expires or the defer above executes. +		if err := connectCtx.Err(); err != nil { +			// connectCtx expired before exiting the function.  Hard close the connection. +			if logger.V(logLevel) { +				logger.Infof("newClientTransport: aborting due to connectCtx: %v", err) +			} +			conn.Close() +		} +	}(conn) + +	kp := opts.KeepaliveParams +	// Validate keepalive parameters. +	if kp.Time == 0 { +		kp.Time = defaultClientKeepaliveTime +	} +	if kp.Timeout == 0 { +		kp.Timeout = defaultClientKeepaliveTimeout +	} +	keepaliveEnabled := false +	if kp.Time != infinity { +		if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { +			return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) +		} +		keepaliveEnabled = true +	} +	var ( +		isSecure bool +		authInfo credentials.AuthInfo +	) +	transportCreds := opts.TransportCredentials +	perRPCCreds := opts.PerRPCCredentials + +	if b := opts.CredsBundle; b != nil { +		if t := b.TransportCredentials(); t != nil { +			transportCreds = t +		} +		if t := b.PerRPCCredentials(); t != nil { +			perRPCCreds = append(perRPCCreds, t) +		} +	} +	if transportCreds != nil { +		conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn) +		if err != nil { +			return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) +		} +		for _, cd := range perRPCCreds { +			if cd.RequireTransportSecurity() { +				if ci, ok := authInfo.(interface { +					GetCommonAuthInfo() credentials.CommonAuthInfo +				}); ok { +					secLevel := ci.GetCommonAuthInfo().SecurityLevel +					if secLevel != credentials.InvalidSecurityLevel && secLevel < credentials.PrivacyAndIntegrity { +						return nil, connectionErrorf(true, nil, "transport: cannot send secure credentials on an insecure connection") +					} +				} +			} +		} +		isSecure = true +		if transportCreds.Info().SecurityProtocol == "tls" { +			scheme = "https" +		} +	} +	dynamicWindow := true +	icwz := int32(initialWindowSize) +	if opts.InitialConnWindowSize >= defaultWindowSize { +		icwz = opts.InitialConnWindowSize +		dynamicWindow = false +	} +	writeBufSize := opts.WriteBufferSize +	readBufSize := opts.ReadBufferSize +	maxHeaderListSize := defaultClientMaxHeaderListSize +	if opts.MaxHeaderListSize != nil { +		maxHeaderListSize = *opts.MaxHeaderListSize +	} +	t := &http2Client{ +		ctx:                   ctx, +		ctxDone:               ctx.Done(), // Cache Done chan. +		cancel:                cancel, +		userAgent:             opts.UserAgent, +		registeredCompressors: grpcutil.RegisteredCompressors(), +		address:               addr, +		conn:                  conn, +		remoteAddr:            conn.RemoteAddr(), +		localAddr:             conn.LocalAddr(), +		authInfo:              authInfo, +		readerDone:            make(chan struct{}), +		writerDone:            make(chan struct{}), +		goAway:                make(chan struct{}), +		framer:                newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), +		fc:                    &trInFlow{limit: uint32(icwz)}, +		scheme:                scheme, +		activeStreams:         make(map[uint32]*Stream), +		isSecure:              isSecure, +		perRPCCreds:           perRPCCreds, +		kp:                    kp, +		statsHandlers:         opts.StatsHandlers, +		initialWindowSize:     initialWindowSize, +		nextID:                1, +		maxConcurrentStreams:  defaultMaxStreamsClient, +		streamQuota:           defaultMaxStreamsClient, +		streamsQuotaAvailable: make(chan struct{}, 1), +		czData:                new(channelzData), +		keepaliveEnabled:      keepaliveEnabled, +		bufferPool:            newBufferPool(), +		onClose:               onClose, +	} +	// Add peer information to the http2client context. +	t.ctx = peer.NewContext(t.ctx, t.getPeer()) + +	if md, ok := addr.Metadata.(*metadata.MD); ok { +		t.md = *md +	} else if md := imetadata.Get(addr); md != nil { +		t.md = md +	} +	t.controlBuf = newControlBuffer(t.ctxDone) +	if opts.InitialWindowSize >= defaultWindowSize { +		t.initialWindowSize = opts.InitialWindowSize +		dynamicWindow = false +	} +	if dynamicWindow { +		t.bdpEst = &bdpEstimator{ +			bdp:               initialWindowSize, +			updateFlowControl: t.updateFlowControl, +		} +	} +	for _, sh := range t.statsHandlers { +		t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ +			RemoteAddr: t.remoteAddr, +			LocalAddr:  t.localAddr, +		}) +		connBegin := &stats.ConnBegin{ +			Client: true, +		} +		sh.HandleConn(t.ctx, connBegin) +	} +	t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) +	if err != nil { +		return nil, err +	} +	if t.keepaliveEnabled { +		t.kpDormancyCond = sync.NewCond(&t.mu) +		go t.keepalive() +	} + +	// Start the reader goroutine for incoming messages. Each transport has a +	// dedicated goroutine which reads HTTP2 frames from the network. Then it +	// dispatches the frame to the corresponding stream entity.  When the +	// server preface is received, readerErrCh is closed.  If an error occurs +	// first, an error is pushed to the channel.  This must be checked before +	// returning from this function. +	readerErrCh := make(chan error, 1) +	go t.reader(readerErrCh) +	defer func() { +		if err == nil { +			err = <-readerErrCh +		} +		if err != nil { +			t.Close(err) +		} +	}() + +	// Send connection preface to server. +	n, err := t.conn.Write(clientPreface) +	if err != nil { +		err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err) +		return nil, err +	} +	if n != len(clientPreface) { +		err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) +		return nil, err +	} +	var ss []http2.Setting + +	if t.initialWindowSize != defaultWindowSize { +		ss = append(ss, http2.Setting{ +			ID:  http2.SettingInitialWindowSize, +			Val: uint32(t.initialWindowSize), +		}) +	} +	if opts.MaxHeaderListSize != nil { +		ss = append(ss, http2.Setting{ +			ID:  http2.SettingMaxHeaderListSize, +			Val: *opts.MaxHeaderListSize, +		}) +	} +	err = t.framer.fr.WriteSettings(ss...) +	if err != nil { +		err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) +		return nil, err +	} +	// Adjust the connection flow control window if needed. +	if delta := uint32(icwz - defaultWindowSize); delta > 0 { +		if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { +			err = connectionErrorf(true, err, "transport: failed to write window update: %v", err) +			return nil, err +		} +	} + +	t.connectionID = atomic.AddUint64(&clientConnectionCounter, 1) + +	if err := t.framer.writer.Flush(); err != nil { +		return nil, err +	} +	go func() { +		t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) +		err := t.loopy.run() +		if logger.V(logLevel) { +			logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err) +		} +		// Do not close the transport.  Let reader goroutine handle it since +		// there might be data in the buffers. +		t.conn.Close() +		t.controlBuf.finish() +		close(t.writerDone) +	}() +	return t, nil +} + +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { +	// TODO(zhaoq): Handle uint32 overflow of Stream.id. +	s := &Stream{ +		ct:             t, +		done:           make(chan struct{}), +		method:         callHdr.Method, +		sendCompress:   callHdr.SendCompress, +		buf:            newRecvBuffer(), +		headerChan:     make(chan struct{}), +		contentSubtype: callHdr.ContentSubtype, +		doneFunc:       callHdr.DoneFunc, +	} +	s.wq = newWriteQuota(defaultWriteQuota, s.done) +	s.requestRead = func(n int) { +		t.adjustWindow(s, uint32(n)) +	} +	// The client side stream context should have exactly the same life cycle with the user provided context. +	// That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. +	// So we use the original context here instead of creating a copy. +	s.ctx = ctx +	s.trReader = &transportReader{ +		reader: &recvBufferReader{ +			ctx:     s.ctx, +			ctxDone: s.ctx.Done(), +			recv:    s.buf, +			closeStream: func(err error) { +				t.CloseStream(s, err) +			}, +			freeBuffer: t.bufferPool.put, +		}, +		windowHandler: func(n int) { +			t.updateWindow(s, uint32(n)) +		}, +	} +	return s +} + +func (t *http2Client) getPeer() *peer.Peer { +	return &peer.Peer{ +		Addr:     t.remoteAddr, +		AuthInfo: t.authInfo, // Can be nil +	} +} + +func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) { +	aud := t.createAudience(callHdr) +	ri := credentials.RequestInfo{ +		Method:   callHdr.Method, +		AuthInfo: t.authInfo, +	} +	ctxWithRequestInfo := icredentials.NewRequestInfoContext(ctx, ri) +	authData, err := t.getTrAuthData(ctxWithRequestInfo, aud) +	if err != nil { +		return nil, err +	} +	callAuthData, err := t.getCallAuthData(ctxWithRequestInfo, aud, callHdr) +	if err != nil { +		return nil, err +	} +	// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields +	// first and create a slice of that exact size. +	// Make the slice of certain predictable size to reduce allocations made by append. +	hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te +	hfLen += len(authData) + len(callAuthData) +	headerFields := make([]hpack.HeaderField, 0, hfLen) +	headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"}) +	headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) +	headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method}) +	headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) +	headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(callHdr.ContentSubtype)}) +	headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) +	headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) +	if callHdr.PreviousAttempts > 0 { +		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) +	} + +	registeredCompressors := t.registeredCompressors +	if callHdr.SendCompress != "" { +		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) +		// Include the outgoing compressor name when compressor is not registered +		// via encoding.RegisterCompressor. This is possible when client uses +		// WithCompressor dial option. +		if !grpcutil.IsCompressorNameRegistered(callHdr.SendCompress) { +			if registeredCompressors != "" { +				registeredCompressors += "," +			} +			registeredCompressors += callHdr.SendCompress +		} +	} + +	if registeredCompressors != "" { +		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: registeredCompressors}) +	} +	if dl, ok := ctx.Deadline(); ok { +		// Send out timeout regardless its value. The server can detect timeout context by itself. +		// TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. +		timeout := time.Until(dl) +		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: grpcutil.EncodeDuration(timeout)}) +	} +	for k, v := range authData { +		headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) +	} +	for k, v := range callAuthData { +		headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) +	} +	if b := stats.OutgoingTags(ctx); b != nil { +		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) +	} +	if b := stats.OutgoingTrace(ctx); b != nil { +		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) +	} + +	if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { +		var k string +		for k, vv := range md { +			// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. +			if isReservedHeader(k) { +				continue +			} +			for _, v := range vv { +				headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) +			} +		} +		for _, vv := range added { +			for i, v := range vv { +				if i%2 == 0 { +					k = strings.ToLower(v) +					continue +				} +				// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. +				if isReservedHeader(k) { +					continue +				} +				headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) +			} +		} +	} +	for k, vv := range t.md { +		if isReservedHeader(k) { +			continue +		} +		for _, v := range vv { +			headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) +		} +	} +	return headerFields, nil +} + +func (t *http2Client) createAudience(callHdr *CallHdr) string { +	// Create an audience string only if needed. +	if len(t.perRPCCreds) == 0 && callHdr.Creds == nil { +		return "" +	} +	// Construct URI required to get auth request metadata. +	// Omit port if it is the default one. +	host := strings.TrimSuffix(callHdr.Host, ":443") +	pos := strings.LastIndex(callHdr.Method, "/") +	if pos == -1 { +		pos = len(callHdr.Method) +	} +	return "https://" + host + callHdr.Method[:pos] +} + +func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) { +	if len(t.perRPCCreds) == 0 { +		return nil, nil +	} +	authData := map[string]string{} +	for _, c := range t.perRPCCreds { +		data, err := c.GetRequestMetadata(ctx, audience) +		if err != nil { +			if st, ok := status.FromError(err); ok { +				// Restrict the code to the list allowed by gRFC A54. +				if istatus.IsRestrictedControlPlaneCode(st) { +					err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) +				} +				return nil, err +			} + +			return nil, status.Errorf(codes.Unauthenticated, "transport: per-RPC creds failed due to error: %v", err) +		} +		for k, v := range data { +			// Capital header names are illegal in HTTP/2. +			k = strings.ToLower(k) +			authData[k] = v +		} +	} +	return authData, nil +} + +func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) { +	var callAuthData map[string]string +	// Check if credentials.PerRPCCredentials were provided via call options. +	// Note: if these credentials are provided both via dial options and call +	// options, then both sets of credentials will be applied. +	if callCreds := callHdr.Creds; callCreds != nil { +		if callCreds.RequireTransportSecurity() { +			ri, _ := credentials.RequestInfoFromContext(ctx) +			if !t.isSecure || credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity) != nil { +				return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") +			} +		} +		data, err := callCreds.GetRequestMetadata(ctx, audience) +		if err != nil { +			if st, ok := status.FromError(err); ok { +				// Restrict the code to the list allowed by gRFC A54. +				if istatus.IsRestrictedControlPlaneCode(st) { +					err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) +				} +				return nil, err +			} +			return nil, status.Errorf(codes.Internal, "transport: per-RPC creds failed due to error: %v", err) +		} +		callAuthData = make(map[string]string, len(data)) +		for k, v := range data { +			// Capital header names are illegal in HTTP/2 +			k = strings.ToLower(k) +			callAuthData[k] = v +		} +	} +	return callAuthData, nil +} + +// NewStreamError wraps an error and reports additional information.  Typically +// NewStream errors result in transparent retry, as they mean nothing went onto +// the wire.  However, there are two notable exceptions: +// +//  1. If the stream headers violate the max header list size allowed by the +//     server.  It's possible this could succeed on another transport, even if +//     it's unlikely, but do not transparently retry. +//  2. If the credentials errored when requesting their headers.  In this case, +//     it's possible a retry can fix the problem, but indefinitely transparently +//     retrying is not appropriate as it is likely the credentials, if they can +//     eventually succeed, would need I/O to do so. +type NewStreamError struct { +	Err error + +	AllowTransparentRetry bool +} + +func (e NewStreamError) Error() string { +	return e.Err.Error() +} + +// NewStream creates a stream and registers it into the transport as "active" +// streams.  All non-nil errors returned will be *NewStreamError. +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { +	ctx = peer.NewContext(ctx, t.getPeer()) + +	// ServerName field of the resolver returned address takes precedence over +	// Host field of CallHdr to determine the :authority header. This is because, +	// the ServerName field takes precedence for server authentication during +	// TLS handshake, and the :authority header should match the value used +	// for server authentication. +	if t.address.ServerName != "" { +		newCallHdr := *callHdr +		newCallHdr.Host = t.address.ServerName +		callHdr = &newCallHdr +	} + +	headerFields, err := t.createHeaderFields(ctx, callHdr) +	if err != nil { +		return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} +	} +	s := t.newStream(ctx, callHdr) +	cleanup := func(err error) { +		if s.swapState(streamDone) == streamDone { +			// If it was already done, return. +			return +		} +		// The stream was unprocessed by the server. +		atomic.StoreUint32(&s.unprocessed, 1) +		s.write(recvMsg{err: err}) +		close(s.done) +		// If headerChan isn't closed, then close it. +		if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { +			close(s.headerChan) +		} +	} +	hdr := &headerFrame{ +		hf:        headerFields, +		endStream: false, +		initStream: func(id uint32) error { +			t.mu.Lock() +			// TODO: handle transport closure in loopy instead and remove this +			// initStream is never called when transport is draining. +			if t.state == closing { +				t.mu.Unlock() +				cleanup(ErrConnClosing) +				return ErrConnClosing +			} +			if channelz.IsOn() { +				atomic.AddInt64(&t.czData.streamsStarted, 1) +				atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) +			} +			// If the keepalive goroutine has gone dormant, wake it up. +			if t.kpDormant { +				t.kpDormancyCond.Signal() +			} +			t.mu.Unlock() +			return nil +		}, +		onOrphaned: cleanup, +		wq:         s.wq, +	} +	firstTry := true +	var ch chan struct{} +	transportDrainRequired := false +	checkForStreamQuota := func(it interface{}) bool { +		if t.streamQuota <= 0 { // Can go negative if server decreases it. +			if firstTry { +				t.waitingStreams++ +			} +			ch = t.streamsQuotaAvailable +			return false +		} +		if !firstTry { +			t.waitingStreams-- +		} +		t.streamQuota-- +		h := it.(*headerFrame) +		h.streamID = t.nextID +		t.nextID += 2 + +		// Drain client transport if nextID > MaxStreamID which signals gRPC that +		// the connection is closed and a new one must be created for subsequent RPCs. +		transportDrainRequired = t.nextID > MaxStreamID + +		s.id = h.streamID +		s.fc = &inFlow{limit: uint32(t.initialWindowSize)} +		t.mu.Lock() +		if t.activeStreams == nil { // Can be niled from Close(). +			t.mu.Unlock() +			return false // Don't create a stream if the transport is already closed. +		} +		t.activeStreams[s.id] = s +		t.mu.Unlock() +		if t.streamQuota > 0 && t.waitingStreams > 0 { +			select { +			case t.streamsQuotaAvailable <- struct{}{}: +			default: +			} +		} +		return true +	} +	var hdrListSizeErr error +	checkForHeaderListSize := func(it interface{}) bool { +		if t.maxSendHeaderListSize == nil { +			return true +		} +		hdrFrame := it.(*headerFrame) +		var sz int64 +		for _, f := range hdrFrame.hf { +			if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { +				hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize) +				return false +			} +		} +		return true +	} +	for { +		success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { +			return checkForHeaderListSize(it) && checkForStreamQuota(it) +		}, hdr) +		if err != nil { +			// Connection closed. +			return nil, &NewStreamError{Err: err, AllowTransparentRetry: true} +		} +		if success { +			break +		} +		if hdrListSizeErr != nil { +			return nil, &NewStreamError{Err: hdrListSizeErr} +		} +		firstTry = false +		select { +		case <-ch: +		case <-ctx.Done(): +			return nil, &NewStreamError{Err: ContextErr(ctx.Err())} +		case <-t.goAway: +			return nil, &NewStreamError{Err: errStreamDrain, AllowTransparentRetry: true} +		case <-t.ctx.Done(): +			return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true} +		} +	} +	if len(t.statsHandlers) != 0 { +		header, ok := metadata.FromOutgoingContext(ctx) +		if ok { +			header.Set("user-agent", t.userAgent) +		} else { +			header = metadata.Pairs("user-agent", t.userAgent) +		} +		for _, sh := range t.statsHandlers { +			// Note: The header fields are compressed with hpack after this call returns. +			// No WireLength field is set here. +			// Note: Creating a new stats object to prevent pollution. +			outHeader := &stats.OutHeader{ +				Client:      true, +				FullMethod:  callHdr.Method, +				RemoteAddr:  t.remoteAddr, +				LocalAddr:   t.localAddr, +				Compression: callHdr.SendCompress, +				Header:      header, +			} +			sh.HandleRPC(s.ctx, outHeader) +		} +	} +	if transportDrainRequired { +		if logger.V(logLevel) { +			logger.Infof("transport: t.nextID > MaxStreamID. Draining") +		} +		t.GracefulClose() +	} +	return s, nil +} + +// CloseStream clears the footprint of a stream when the stream is not needed any more. +// This must not be executed in reader's goroutine. +func (t *http2Client) CloseStream(s *Stream, err error) { +	var ( +		rst     bool +		rstCode http2.ErrCode +	) +	if err != nil { +		rst = true +		rstCode = http2.ErrCodeCancel +	} +	t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) +} + +func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { +	// Set stream status to done. +	if s.swapState(streamDone) == streamDone { +		// If it was already done, return.  If multiple closeStream calls +		// happen simultaneously, wait for the first to finish. +		<-s.done +		return +	} +	// status and trailers can be updated here without any synchronization because the stream goroutine will +	// only read it after it sees an io.EOF error from read or write and we'll write those errors +	// only after updating this. +	s.status = st +	if len(mdata) > 0 { +		s.trailer = mdata +	} +	if err != nil { +		// This will unblock reads eventually. +		s.write(recvMsg{err: err}) +	} +	// If headerChan isn't closed, then close it. +	if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { +		s.noHeaders = true +		close(s.headerChan) +	} +	cleanup := &cleanupStream{ +		streamID: s.id, +		onWrite: func() { +			t.mu.Lock() +			if t.activeStreams != nil { +				delete(t.activeStreams, s.id) +			} +			t.mu.Unlock() +			if channelz.IsOn() { +				if eosReceived { +					atomic.AddInt64(&t.czData.streamsSucceeded, 1) +				} else { +					atomic.AddInt64(&t.czData.streamsFailed, 1) +				} +			} +		}, +		rst:     rst, +		rstCode: rstCode, +	} +	addBackStreamQuota := func(interface{}) bool { +		t.streamQuota++ +		if t.streamQuota > 0 && t.waitingStreams > 0 { +			select { +			case t.streamsQuotaAvailable <- struct{}{}: +			default: +			} +		} +		return true +	} +	t.controlBuf.executeAndPut(addBackStreamQuota, cleanup) +	// This will unblock write. +	close(s.done) +	if s.doneFunc != nil { +		s.doneFunc() +	} +} + +// Close kicks off the shutdown process of the transport. This should be called +// only once on a transport. Once it is called, the transport should not be +// accessed any more. +func (t *http2Client) Close(err error) { +	t.mu.Lock() +	// Make sure we only close once. +	if t.state == closing { +		t.mu.Unlock() +		return +	} +	if logger.V(logLevel) { +		logger.Infof("transport: closing: %v", err) +	} +	// Call t.onClose ASAP to prevent the client from attempting to create new +	// streams. +	if t.state != draining { +		t.onClose(GoAwayInvalid) +	} +	t.state = closing +	streams := t.activeStreams +	t.activeStreams = nil +	if t.kpDormant { +		// If the keepalive goroutine is blocked on this condition variable, we +		// should unblock it so that the goroutine eventually exits. +		t.kpDormancyCond.Signal() +	} +	t.mu.Unlock() +	t.controlBuf.finish() +	t.cancel() +	t.conn.Close() +	channelz.RemoveEntry(t.channelzID) +	// Append info about previous goaways if there were any, since this may be important +	// for understanding the root cause for this connection to be closed. +	_, goAwayDebugMessage := t.GetGoAwayReason() + +	var st *status.Status +	if len(goAwayDebugMessage) > 0 { +		st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) +		err = st.Err() +	} else { +		st = status.New(codes.Unavailable, err.Error()) +	} + +	// Notify all active streams. +	for _, s := range streams { +		t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) +	} +	for _, sh := range t.statsHandlers { +		connEnd := &stats.ConnEnd{ +			Client: true, +		} +		sh.HandleConn(t.ctx, connEnd) +	} +} + +// GracefulClose sets the state to draining, which prevents new streams from +// being created and causes the transport to be closed when the last active +// stream is closed.  If there are no active streams, the transport is closed +// immediately.  This does nothing if the transport is already draining or +// closing. +func (t *http2Client) GracefulClose() { +	t.mu.Lock() +	// Make sure we move to draining only from active. +	if t.state == draining || t.state == closing { +		t.mu.Unlock() +		return +	} +	if logger.V(logLevel) { +		logger.Infof("transport: GracefulClose called") +	} +	t.onClose(GoAwayInvalid) +	t.state = draining +	active := len(t.activeStreams) +	t.mu.Unlock() +	if active == 0 { +		t.Close(connectionErrorf(true, nil, "no active streams left to process while draining")) +		return +	} +	t.controlBuf.put(&incomingGoAway{}) +} + +// Write formats the data into HTTP2 data frame(s) and sends it out. The caller +// should proceed only if Write returns nil. +func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +	if opts.Last { +		// If it's the last message, update stream state. +		if !s.compareAndSwapState(streamActive, streamWriteDone) { +			return errStreamDone +		} +	} else if s.getState() != streamActive { +		return errStreamDone +	} +	df := &dataFrame{ +		streamID:  s.id, +		endStream: opts.Last, +		h:         hdr, +		d:         data, +	} +	if hdr != nil || data != nil { // If it's not an empty data frame, check quota. +		if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { +			return err +		} +	} +	return t.controlBuf.put(df) +} + +func (t *http2Client) getStream(f http2.Frame) *Stream { +	t.mu.Lock() +	s := t.activeStreams[f.Header().StreamID] +	t.mu.Unlock() +	return s +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Client) adjustWindow(s *Stream, n uint32) { +	if w := s.fc.maybeAdjust(n); w > 0 { +		t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) +	} +} + +// updateWindow adjusts the inbound quota for the stream. +// Window updates will be sent out when the cumulative quota +// exceeds the corresponding threshold. +func (t *http2Client) updateWindow(s *Stream, n uint32) { +	if w := s.fc.onRead(n); w > 0 { +		t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) +	} +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Client) updateFlowControl(n uint32) { +	updateIWS := func(interface{}) bool { +		t.initialWindowSize = int32(n) +		t.mu.Lock() +		for _, s := range t.activeStreams { +			s.fc.newLimit(n) +		} +		t.mu.Unlock() +		return true +	} +	t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)}) +	t.controlBuf.put(&outgoingSettings{ +		ss: []http2.Setting{ +			{ +				ID:  http2.SettingInitialWindowSize, +				Val: n, +			}, +		}, +	}) +} + +func (t *http2Client) handleData(f *http2.DataFrame) { +	size := f.Header().Length +	var sendBDPPing bool +	if t.bdpEst != nil { +		sendBDPPing = t.bdpEst.add(size) +	} +	// Decouple connection's flow control from application's read. +	// An update on connection's flow control should not depend on +	// whether user application has read the data or not. Such a +	// restriction is already imposed on the stream's flow control, +	// and therefore the sender will be blocked anyways. +	// Decoupling the connection flow control will prevent other +	// active(fast) streams from starving in presence of slow or +	// inactive streams. +	// +	if w := t.fc.onData(size); w > 0 { +		t.controlBuf.put(&outgoingWindowUpdate{ +			streamID:  0, +			increment: w, +		}) +	} +	if sendBDPPing { +		// Avoid excessive ping detection (e.g. in an L7 proxy) +		// by sending a window update prior to the BDP ping. + +		if w := t.fc.reset(); w > 0 { +			t.controlBuf.put(&outgoingWindowUpdate{ +				streamID:  0, +				increment: w, +			}) +		} + +		t.controlBuf.put(bdpPing) +	} +	// Select the right stream to dispatch. +	s := t.getStream(f) +	if s == nil { +		return +	} +	if size > 0 { +		if err := s.fc.onData(size); err != nil { +			t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false) +			return +		} +		if f.Header().Flags.Has(http2.FlagDataPadded) { +			if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { +				t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) +			} +		} +		// TODO(bradfitz, zhaoq): A copy is required here because there is no +		// guarantee f.Data() is consumed before the arrival of next frame. +		// Can this copy be eliminated? +		if len(f.Data()) > 0 { +			buffer := t.bufferPool.get() +			buffer.Reset() +			buffer.Write(f.Data()) +			s.write(recvMsg{buffer: buffer}) +		} +	} +	// The server has closed the stream without sending trailers.  Record that +	// the read direction is closed, and set the status appropriately. +	if f.StreamEnded() { +		t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true) +	} +} + +func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { +	s := t.getStream(f) +	if s == nil { +		return +	} +	if f.ErrCode == http2.ErrCodeRefusedStream { +		// The stream was unprocessed by the server. +		atomic.StoreUint32(&s.unprocessed, 1) +	} +	statusCode, ok := http2ErrConvTab[f.ErrCode] +	if !ok { +		if logger.V(logLevel) { +			logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error: %v", f.ErrCode) +		} +		statusCode = codes.Unknown +	} +	if statusCode == codes.Canceled { +		if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) { +			// Our deadline was already exceeded, and that was likely the cause +			// of this cancelation.  Alter the status code accordingly. +			statusCode = codes.DeadlineExceeded +		} +	} +	t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false) +} + +func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { +	if f.IsAck() { +		return +	} +	var maxStreams *uint32 +	var ss []http2.Setting +	var updateFuncs []func() +	f.ForeachSetting(func(s http2.Setting) error { +		switch s.ID { +		case http2.SettingMaxConcurrentStreams: +			maxStreams = new(uint32) +			*maxStreams = s.Val +		case http2.SettingMaxHeaderListSize: +			updateFuncs = append(updateFuncs, func() { +				t.maxSendHeaderListSize = new(uint32) +				*t.maxSendHeaderListSize = s.Val +			}) +		default: +			ss = append(ss, s) +		} +		return nil +	}) +	if isFirst && maxStreams == nil { +		maxStreams = new(uint32) +		*maxStreams = math.MaxUint32 +	} +	sf := &incomingSettings{ +		ss: ss, +	} +	if maxStreams != nil { +		updateStreamQuota := func() { +			delta := int64(*maxStreams) - int64(t.maxConcurrentStreams) +			t.maxConcurrentStreams = *maxStreams +			t.streamQuota += delta +			if delta > 0 && t.waitingStreams > 0 { +				close(t.streamsQuotaAvailable) // wake all of them up. +				t.streamsQuotaAvailable = make(chan struct{}, 1) +			} +		} +		updateFuncs = append(updateFuncs, updateStreamQuota) +	} +	t.controlBuf.executeAndPut(func(interface{}) bool { +		for _, f := range updateFuncs { +			f() +		} +		return true +	}, sf) +} + +func (t *http2Client) handlePing(f *http2.PingFrame) { +	if f.IsAck() { +		// Maybe it's a BDP ping. +		if t.bdpEst != nil { +			t.bdpEst.calculate(f.Data) +		} +		return +	} +	pingAck := &ping{ack: true} +	copy(pingAck.data[:], f.Data[:]) +	t.controlBuf.put(pingAck) +} + +func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { +	t.mu.Lock() +	if t.state == closing { +		t.mu.Unlock() +		return +	} +	if f.ErrCode == http2.ErrCodeEnhanceYourCalm { +		if logger.V(logLevel) { +			logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") +		} +	} +	id := f.LastStreamID +	if id > 0 && id%2 == 0 { +		t.mu.Unlock() +		t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id)) +		return +	} +	// A client can receive multiple GoAways from the server (see +	// https://github.com/grpc/grpc-go/issues/1387).  The idea is that the first +	// GoAway will be sent with an ID of MaxInt32 and the second GoAway will be +	// sent after an RTT delay with the ID of the last stream the server will +	// process. +	// +	// Therefore, when we get the first GoAway we don't necessarily close any +	// streams. While in case of second GoAway we close all streams created after +	// the GoAwayId. This way streams that were in-flight while the GoAway from +	// server was being sent don't get killed. +	select { +	case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways). +		// If there are multiple GoAways the first one should always have an ID greater than the following ones. +		if id > t.prevGoAwayID { +			t.mu.Unlock() +			t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)) +			return +		} +	default: +		t.setGoAwayReason(f) +		close(t.goAway) +		defer t.controlBuf.put(&incomingGoAway{}) // Defer as t.mu is currently held. +		// Notify the clientconn about the GOAWAY before we set the state to +		// draining, to allow the client to stop attempting to create streams +		// before disallowing new streams on this connection. +		if t.state != draining { +			t.onClose(t.goAwayReason) +			t.state = draining +		} +	} +	// All streams with IDs greater than the GoAwayId +	// and smaller than the previous GoAway ID should be killed. +	upperLimit := t.prevGoAwayID +	if upperLimit == 0 { // This is the first GoAway Frame. +		upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. +	} + +	t.prevGoAwayID = id +	if len(t.activeStreams) == 0 { +		t.mu.Unlock() +		t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) +		return +	} + +	streamsToClose := make([]*Stream, 0) +	for streamID, stream := range t.activeStreams { +		if streamID > id && streamID <= upperLimit { +			// The stream was unprocessed by the server. +			if streamID > id && streamID <= upperLimit { +				atomic.StoreUint32(&stream.unprocessed, 1) +				streamsToClose = append(streamsToClose, stream) +			} +		} +	} +	t.mu.Unlock() +	// Called outside t.mu because closeStream can take controlBuf's mu, which +	// could induce deadlock and is not allowed. +	for _, stream := range streamsToClose { +		t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) +	} +} + +// setGoAwayReason sets the value of t.goAwayReason based +// on the GoAway frame received. +// It expects a lock on transport's mutext to be held by +// the caller. +func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { +	t.goAwayReason = GoAwayNoReason +	switch f.ErrCode { +	case http2.ErrCodeEnhanceYourCalm: +		if string(f.DebugData()) == "too_many_pings" { +			t.goAwayReason = GoAwayTooManyPings +		} +	} +	if len(f.DebugData()) == 0 { +		t.goAwayDebugMessage = fmt.Sprintf("code: %s", f.ErrCode) +	} else { +		t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %q", f.ErrCode, string(f.DebugData())) +	} +} + +func (t *http2Client) GetGoAwayReason() (GoAwayReason, string) { +	t.mu.Lock() +	defer t.mu.Unlock() +	return t.goAwayReason, t.goAwayDebugMessage +} + +func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { +	t.controlBuf.put(&incomingWindowUpdate{ +		streamID:  f.Header().StreamID, +		increment: f.Increment, +	}) +} + +// operateHeaders takes action on the decoded headers. +func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { +	s := t.getStream(frame) +	if s == nil { +		return +	} +	endStream := frame.StreamEnded() +	atomic.StoreUint32(&s.bytesReceived, 1) +	initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0 + +	if !initialHeader && !endStream { +		// As specified by gRPC over HTTP2, a HEADERS frame (and associated CONTINUATION frames) can only appear at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set. +		st := status.New(codes.Internal, "a HEADERS frame cannot appear in the middle of a stream") +		t.closeStream(s, st.Err(), true, http2.ErrCodeProtocol, st, nil, false) +		return +	} + +	// frame.Truncated is set to true when framer detects that the current header +	// list size hits MaxHeaderListSize limit. +	if frame.Truncated { +		se := status.New(codes.Internal, "peer header list size exceeded limit") +		t.closeStream(s, se.Err(), true, http2.ErrCodeFrameSize, se, nil, endStream) +		return +	} + +	var ( +		// If a gRPC Response-Headers has already been received, then it means +		// that the peer is speaking gRPC and we are in gRPC mode. +		isGRPC         = !initialHeader +		mdata          = make(map[string][]string) +		contentTypeErr = "malformed header: missing HTTP content-type" +		grpcMessage    string +		statusGen      *status.Status +		recvCompress   string +		httpStatusCode *int +		httpStatusErr  string +		rawStatusCode  = codes.Unknown +		// headerError is set if an error is encountered while parsing the headers +		headerError string +	) + +	if initialHeader { +		httpStatusErr = "malformed header: missing HTTP status" +	} + +	for _, hf := range frame.Fields { +		switch hf.Name { +		case "content-type": +			if _, validContentType := grpcutil.ContentSubtype(hf.Value); !validContentType { +				contentTypeErr = fmt.Sprintf("transport: received unexpected content-type %q", hf.Value) +				break +			} +			contentTypeErr = "" +			mdata[hf.Name] = append(mdata[hf.Name], hf.Value) +			isGRPC = true +		case "grpc-encoding": +			recvCompress = hf.Value +		case "grpc-status": +			code, err := strconv.ParseInt(hf.Value, 10, 32) +			if err != nil { +				se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err)) +				t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) +				return +			} +			rawStatusCode = codes.Code(uint32(code)) +		case "grpc-message": +			grpcMessage = decodeGrpcMessage(hf.Value) +		case "grpc-status-details-bin": +			var err error +			statusGen, err = decodeGRPCStatusDetails(hf.Value) +			if err != nil { +				headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err) +			} +		case ":status": +			if hf.Value == "200" { +				httpStatusErr = "" +				statusCode := 200 +				httpStatusCode = &statusCode +				break +			} + +			c, err := strconv.ParseInt(hf.Value, 10, 32) +			if err != nil { +				se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err)) +				t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) +				return +			} +			statusCode := int(c) +			httpStatusCode = &statusCode + +			httpStatusErr = fmt.Sprintf( +				"unexpected HTTP status code received from server: %d (%s)", +				statusCode, +				http.StatusText(statusCode), +			) +		default: +			if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { +				break +			} +			v, err := decodeMetadataHeader(hf.Name, hf.Value) +			if err != nil { +				headerError = fmt.Sprintf("transport: malformed %s: %v", hf.Name, err) +				logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) +				break +			} +			mdata[hf.Name] = append(mdata[hf.Name], v) +		} +	} + +	if !isGRPC || httpStatusErr != "" { +		var code = codes.Internal // when header does not include HTTP status, return INTERNAL + +		if httpStatusCode != nil { +			var ok bool +			code, ok = HTTPStatusConvTab[*httpStatusCode] +			if !ok { +				code = codes.Unknown +			} +		} +		var errs []string +		if httpStatusErr != "" { +			errs = append(errs, httpStatusErr) +		} +		if contentTypeErr != "" { +			errs = append(errs, contentTypeErr) +		} +		// Verify the HTTP response is a 200. +		se := status.New(code, strings.Join(errs, "; ")) +		t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) +		return +	} + +	if headerError != "" { +		se := status.New(codes.Internal, headerError) +		t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) +		return +	} + +	isHeader := false + +	// If headerChan hasn't been closed yet +	if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { +		s.headerValid = true +		if !endStream { +			// HEADERS frame block carries a Response-Headers. +			isHeader = true +			// These values can be set without any synchronization because +			// stream goroutine will read it only after seeing a closed +			// headerChan which we'll close after setting this. +			s.recvCompress = recvCompress +			if len(mdata) > 0 { +				s.header = mdata +			} +		} else { +			// HEADERS frame block carries a Trailers-Only. +			s.noHeaders = true +		} +		close(s.headerChan) +	} + +	for _, sh := range t.statsHandlers { +		if isHeader { +			inHeader := &stats.InHeader{ +				Client:      true, +				WireLength:  int(frame.Header().Length), +				Header:      metadata.MD(mdata).Copy(), +				Compression: s.recvCompress, +			} +			sh.HandleRPC(s.ctx, inHeader) +		} else { +			inTrailer := &stats.InTrailer{ +				Client:     true, +				WireLength: int(frame.Header().Length), +				Trailer:    metadata.MD(mdata).Copy(), +			} +			sh.HandleRPC(s.ctx, inTrailer) +		} +	} + +	if !endStream { +		return +	} + +	if statusGen == nil { +		statusGen = status.New(rawStatusCode, grpcMessage) +	} + +	// if client received END_STREAM from server while stream was still active, send RST_STREAM +	rst := s.getState() == streamActive +	t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) +} + +// readServerPreface reads and handles the initial settings frame from the +// server. +func (t *http2Client) readServerPreface() error { +	frame, err := t.framer.fr.ReadFrame() +	if err != nil { +		return connectionErrorf(true, err, "error reading server preface: %v", err) +	} +	sf, ok := frame.(*http2.SettingsFrame) +	if !ok { +		return connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame) +	} +	t.handleSettings(sf, true) +	return nil +} + +// reader verifies the server preface and reads all subsequent data from +// network connection.  If the server preface is not read successfully, an +// error is pushed to errCh; otherwise errCh is closed with no error. +func (t *http2Client) reader(errCh chan<- error) { +	defer close(t.readerDone) + +	if err := t.readServerPreface(); err != nil { +		errCh <- err +		return +	} +	close(errCh) +	if t.keepaliveEnabled { +		atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) +	} + +	// loop to keep reading incoming messages on this transport. +	for { +		t.controlBuf.throttle() +		frame, err := t.framer.fr.ReadFrame() +		if t.keepaliveEnabled { +			atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) +		} +		if err != nil { +			// Abort an active stream if the http2.Framer returns a +			// http2.StreamError. This can happen only if the server's response +			// is malformed http2. +			if se, ok := err.(http2.StreamError); ok { +				t.mu.Lock() +				s := t.activeStreams[se.StreamID] +				t.mu.Unlock() +				if s != nil { +					// use error detail to provide better err message +					code := http2ErrConvTab[se.Code] +					errorDetail := t.framer.fr.ErrorDetail() +					var msg string +					if errorDetail != nil { +						msg = errorDetail.Error() +					} else { +						msg = "received invalid frame" +					} +					t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) +				} +				continue +			} else { +				// Transport error. +				t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) +				return +			} +		} +		switch frame := frame.(type) { +		case *http2.MetaHeadersFrame: +			t.operateHeaders(frame) +		case *http2.DataFrame: +			t.handleData(frame) +		case *http2.RSTStreamFrame: +			t.handleRSTStream(frame) +		case *http2.SettingsFrame: +			t.handleSettings(frame, false) +		case *http2.PingFrame: +			t.handlePing(frame) +		case *http2.GoAwayFrame: +			t.handleGoAway(frame) +		case *http2.WindowUpdateFrame: +			t.handleWindowUpdate(frame) +		default: +			if logger.V(logLevel) { +				logger.Errorf("transport: http2Client.reader got unhandled frame type %v.", frame) +			} +		} +	} +} + +func minTime(a, b time.Duration) time.Duration { +	if a < b { +		return a +	} +	return b +} + +// keepalive running in a separate goroutine makes sure the connection is alive by sending pings. +func (t *http2Client) keepalive() { +	p := &ping{data: [8]byte{}} +	// True iff a ping has been sent, and no data has been received since then. +	outstandingPing := false +	// Amount of time remaining before which we should receive an ACK for the +	// last sent ping. +	timeoutLeft := time.Duration(0) +	// Records the last value of t.lastRead before we go block on the timer. +	// This is required to check for read activity since then. +	prevNano := time.Now().UnixNano() +	timer := time.NewTimer(t.kp.Time) +	for { +		select { +		case <-timer.C: +			lastRead := atomic.LoadInt64(&t.lastRead) +			if lastRead > prevNano { +				// There has been read activity since the last time we were here. +				outstandingPing = false +				// Next timer should fire at kp.Time seconds from lastRead time. +				timer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) +				prevNano = lastRead +				continue +			} +			if outstandingPing && timeoutLeft <= 0 { +				t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")) +				return +			} +			t.mu.Lock() +			if t.state == closing { +				// If the transport is closing, we should exit from the +				// keepalive goroutine here. If not, we could have a race +				// between the call to Signal() from Close() and the call to +				// Wait() here, whereby the keepalive goroutine ends up +				// blocking on the condition variable which will never be +				// signalled again. +				t.mu.Unlock() +				return +			} +			if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream { +				// If a ping was sent out previously (because there were active +				// streams at that point) which wasn't acked and its timeout +				// hadn't fired, but we got here and are about to go dormant, +				// we should make sure that we unconditionally send a ping once +				// we awaken. +				outstandingPing = false +				t.kpDormant = true +				t.kpDormancyCond.Wait() +			} +			t.kpDormant = false +			t.mu.Unlock() + +			// We get here either because we were dormant and a new stream was +			// created which unblocked the Wait() call, or because the +			// keepalive timer expired. In both cases, we need to send a ping. +			if !outstandingPing { +				if channelz.IsOn() { +					atomic.AddInt64(&t.czData.kpCount, 1) +				} +				t.controlBuf.put(p) +				timeoutLeft = t.kp.Timeout +				outstandingPing = true +			} +			// The amount of time to sleep here is the minimum of kp.Time and +			// timeoutLeft. This will ensure that we wait only for kp.Time +			// before sending out the next ping (for cases where the ping is +			// acked). +			sleepDuration := minTime(t.kp.Time, timeoutLeft) +			timeoutLeft -= sleepDuration +			timer.Reset(sleepDuration) +		case <-t.ctx.Done(): +			if !timer.Stop() { +				<-timer.C +			} +			return +		} +	} +} + +func (t *http2Client) Error() <-chan struct{} { +	return t.ctx.Done() +} + +func (t *http2Client) GoAway() <-chan struct{} { +	return t.goAway +} + +func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric { +	s := channelz.SocketInternalMetric{ +		StreamsStarted:                  atomic.LoadInt64(&t.czData.streamsStarted), +		StreamsSucceeded:                atomic.LoadInt64(&t.czData.streamsSucceeded), +		StreamsFailed:                   atomic.LoadInt64(&t.czData.streamsFailed), +		MessagesSent:                    atomic.LoadInt64(&t.czData.msgSent), +		MessagesReceived:                atomic.LoadInt64(&t.czData.msgRecv), +		KeepAlivesSent:                  atomic.LoadInt64(&t.czData.kpCount), +		LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), +		LastMessageSentTimestamp:        time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), +		LastMessageReceivedTimestamp:    time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), +		LocalFlowControlWindow:          int64(t.fc.getSize()), +		SocketOptions:                   channelz.GetSocketOption(t.conn), +		LocalAddr:                       t.localAddr, +		RemoteAddr:                      t.remoteAddr, +		// RemoteName : +	} +	if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { +		s.Security = au.GetSecurityValue() +	} +	s.RemoteFlowControlWindow = t.getOutFlowWindow() +	return &s +} + +func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } + +func (t *http2Client) IncrMsgSent() { +	atomic.AddInt64(&t.czData.msgSent, 1) +	atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) +} + +func (t *http2Client) IncrMsgRecv() { +	atomic.AddInt64(&t.czData.msgRecv, 1) +	atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) +} + +func (t *http2Client) getOutFlowWindow() int64 { +	resp := make(chan uint32, 1) +	timer := time.NewTimer(time.Second) +	defer timer.Stop() +	t.controlBuf.put(&outFlowControlSizeRequest{resp}) +	select { +	case sz := <-resp: +		return int64(sz) +	case <-t.ctxDone: +		return -1 +	case <-timer.C: +		return -2 +	} +} + +func (t *http2Client) stateForTesting() transportState { +	t.mu.Lock() +	defer t.mu.Unlock() +	return t.state +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go new file mode 100644 index 000000000..bc3da7067 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -0,0 +1,1461 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( +	"bytes" +	"context" +	"errors" +	"fmt" +	"io" +	"math" +	"net" +	"net/http" +	"strconv" +	"sync" +	"sync/atomic" +	"time" + +	"github.com/golang/protobuf/proto" +	"golang.org/x/net/http2" +	"golang.org/x/net/http2/hpack" +	"google.golang.org/grpc/internal/grpcutil" +	"google.golang.org/grpc/internal/syscall" + +	"google.golang.org/grpc/codes" +	"google.golang.org/grpc/credentials" +	"google.golang.org/grpc/internal/channelz" +	"google.golang.org/grpc/internal/grpcrand" +	"google.golang.org/grpc/internal/grpcsync" +	"google.golang.org/grpc/keepalive" +	"google.golang.org/grpc/metadata" +	"google.golang.org/grpc/peer" +	"google.golang.org/grpc/stats" +	"google.golang.org/grpc/status" +	"google.golang.org/grpc/tap" +) + +var ( +	// ErrIllegalHeaderWrite indicates that setting header is illegal because of +	// the stream's state. +	ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times") +	// ErrHeaderListSizeLimitViolation indicates that the header list size is larger +	// than the limit set by peer. +	ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer") +) + +// serverConnectionCounter counts the number of connections a server has seen +// (equal to the number of http2Servers created). Must be accessed atomically. +var serverConnectionCounter uint64 + +// http2Server implements the ServerTransport interface with HTTP2. +type http2Server struct { +	lastRead    int64 // Keep this field 64-bit aligned. Accessed atomically. +	ctx         context.Context +	done        chan struct{} +	conn        net.Conn +	loopy       *loopyWriter +	readerDone  chan struct{} // sync point to enable testing. +	writerDone  chan struct{} // sync point to enable testing. +	remoteAddr  net.Addr +	localAddr   net.Addr +	authInfo    credentials.AuthInfo // auth info about the connection +	inTapHandle tap.ServerInHandle +	framer      *framer +	// The max number of concurrent streams. +	maxStreams uint32 +	// controlBuf delivers all the control related tasks (e.g., window +	// updates, reset streams, and various settings) to the controller. +	controlBuf *controlBuffer +	fc         *trInFlow +	stats      []stats.Handler +	// Keepalive and max-age parameters for the server. +	kp keepalive.ServerParameters +	// Keepalive enforcement policy. +	kep keepalive.EnforcementPolicy +	// The time instance last ping was received. +	lastPingAt time.Time +	// Number of times the client has violated keepalive ping policy so far. +	pingStrikes uint8 +	// Flag to signify that number of ping strikes should be reset to 0. +	// This is set whenever data or header frames are sent. +	// 1 means yes. +	resetPingStrikes      uint32 // Accessed atomically. +	initialWindowSize     int32 +	bdpEst                *bdpEstimator +	maxSendHeaderListSize *uint32 + +	mu sync.Mutex // guard the following + +	// drainEvent is initialized when Drain() is called the first time. After +	// which the server writes out the first GoAway(with ID 2^31-1) frame. Then +	// an independent goroutine will be launched to later send the second +	// GoAway. During this time we don't want to write another first GoAway(with +	// ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is +	// already initialized since draining is already underway. +	drainEvent    *grpcsync.Event +	state         transportState +	activeStreams map[uint32]*Stream +	// idle is the time instant when the connection went idle. +	// This is either the beginning of the connection or when the number of +	// RPCs go down to 0. +	// When the connection is busy, this value is set to 0. +	idle time.Time + +	// Fields below are for channelz metric collection. +	channelzID *channelz.Identifier +	czData     *channelzData +	bufferPool *bufferPool + +	connectionID uint64 + +	// maxStreamMu guards the maximum stream ID +	// This lock may not be taken if mu is already held. +	maxStreamMu sync.Mutex +	maxStreamID uint32 // max stream ID ever seen +} + +// NewServerTransport creates a http2 transport with conn and configuration +// options from config. +// +// It returns a non-nil transport and a nil error on success. On failure, it +// returns a nil transport and a non-nil error. For a special case where the +// underlying conn gets closed before the client preface could be read, it +// returns a nil transport and a nil error. +func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { +	var authInfo credentials.AuthInfo +	rawConn := conn +	if config.Credentials != nil { +		var err error +		conn, authInfo, err = config.Credentials.ServerHandshake(rawConn) +		if err != nil { +			// ErrConnDispatched means that the connection was dispatched away +			// from gRPC; those connections should be left open. io.EOF means +			// the connection was closed before handshaking completed, which can +			// happen naturally from probers. Return these errors directly. +			if err == credentials.ErrConnDispatched || err == io.EOF { +				return nil, err +			} +			return nil, connectionErrorf(false, err, "ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) +		} +	} +	writeBufSize := config.WriteBufferSize +	readBufSize := config.ReadBufferSize +	maxHeaderListSize := defaultServerMaxHeaderListSize +	if config.MaxHeaderListSize != nil { +		maxHeaderListSize = *config.MaxHeaderListSize +	} +	framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) +	// Send initial settings as connection preface to client. +	isettings := []http2.Setting{{ +		ID:  http2.SettingMaxFrameSize, +		Val: http2MaxFrameLen, +	}} +	// TODO(zhaoq): Have a better way to signal "no limit" because 0 is +	// permitted in the HTTP2 spec. +	maxStreams := config.MaxStreams +	if maxStreams == 0 { +		maxStreams = math.MaxUint32 +	} else { +		isettings = append(isettings, http2.Setting{ +			ID:  http2.SettingMaxConcurrentStreams, +			Val: maxStreams, +		}) +	} +	dynamicWindow := true +	iwz := int32(initialWindowSize) +	if config.InitialWindowSize >= defaultWindowSize { +		iwz = config.InitialWindowSize +		dynamicWindow = false +	} +	icwz := int32(initialWindowSize) +	if config.InitialConnWindowSize >= defaultWindowSize { +		icwz = config.InitialConnWindowSize +		dynamicWindow = false +	} +	if iwz != defaultWindowSize { +		isettings = append(isettings, http2.Setting{ +			ID:  http2.SettingInitialWindowSize, +			Val: uint32(iwz)}) +	} +	if config.MaxHeaderListSize != nil { +		isettings = append(isettings, http2.Setting{ +			ID:  http2.SettingMaxHeaderListSize, +			Val: *config.MaxHeaderListSize, +		}) +	} +	if config.HeaderTableSize != nil { +		isettings = append(isettings, http2.Setting{ +			ID:  http2.SettingHeaderTableSize, +			Val: *config.HeaderTableSize, +		}) +	} +	if err := framer.fr.WriteSettings(isettings...); err != nil { +		return nil, connectionErrorf(false, err, "transport: %v", err) +	} +	// Adjust the connection flow control window if needed. +	if delta := uint32(icwz - defaultWindowSize); delta > 0 { +		if err := framer.fr.WriteWindowUpdate(0, delta); err != nil { +			return nil, connectionErrorf(false, err, "transport: %v", err) +		} +	} +	kp := config.KeepaliveParams +	if kp.MaxConnectionIdle == 0 { +		kp.MaxConnectionIdle = defaultMaxConnectionIdle +	} +	if kp.MaxConnectionAge == 0 { +		kp.MaxConnectionAge = defaultMaxConnectionAge +	} +	// Add a jitter to MaxConnectionAge. +	kp.MaxConnectionAge += getJitter(kp.MaxConnectionAge) +	if kp.MaxConnectionAgeGrace == 0 { +		kp.MaxConnectionAgeGrace = defaultMaxConnectionAgeGrace +	} +	if kp.Time == 0 { +		kp.Time = defaultServerKeepaliveTime +	} +	if kp.Timeout == 0 { +		kp.Timeout = defaultServerKeepaliveTimeout +	} +	if kp.Time != infinity { +		if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { +			return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) +		} +	} +	kep := config.KeepalivePolicy +	if kep.MinTime == 0 { +		kep.MinTime = defaultKeepalivePolicyMinTime +	} + +	done := make(chan struct{}) +	t := &http2Server{ +		ctx:               setConnection(context.Background(), rawConn), +		done:              done, +		conn:              conn, +		remoteAddr:        conn.RemoteAddr(), +		localAddr:         conn.LocalAddr(), +		authInfo:          authInfo, +		framer:            framer, +		readerDone:        make(chan struct{}), +		writerDone:        make(chan struct{}), +		maxStreams:        maxStreams, +		inTapHandle:       config.InTapHandle, +		fc:                &trInFlow{limit: uint32(icwz)}, +		state:             reachable, +		activeStreams:     make(map[uint32]*Stream), +		stats:             config.StatsHandlers, +		kp:                kp, +		idle:              time.Now(), +		kep:               kep, +		initialWindowSize: iwz, +		czData:            new(channelzData), +		bufferPool:        newBufferPool(), +	} +	// Add peer information to the http2server context. +	t.ctx = peer.NewContext(t.ctx, t.getPeer()) + +	t.controlBuf = newControlBuffer(t.done) +	if dynamicWindow { +		t.bdpEst = &bdpEstimator{ +			bdp:               initialWindowSize, +			updateFlowControl: t.updateFlowControl, +		} +	} +	for _, sh := range t.stats { +		t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ +			RemoteAddr: t.remoteAddr, +			LocalAddr:  t.localAddr, +		}) +		connBegin := &stats.ConnBegin{} +		sh.HandleConn(t.ctx, connBegin) +	} +	t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) +	if err != nil { +		return nil, err +	} + +	t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) +	t.framer.writer.Flush() + +	defer func() { +		if err != nil { +			t.Close(err) +		} +	}() + +	// Check the validity of client preface. +	preface := make([]byte, len(clientPreface)) +	if _, err := io.ReadFull(t.conn, preface); err != nil { +		// In deployments where a gRPC server runs behind a cloud load balancer +		// which performs regular TCP level health checks, the connection is +		// closed immediately by the latter.  Returning io.EOF here allows the +		// grpc server implementation to recognize this scenario and suppress +		// logging to reduce spam. +		if err == io.EOF { +			return nil, io.EOF +		} +		return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) +	} +	if !bytes.Equal(preface, clientPreface) { +		return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) +	} + +	frame, err := t.framer.fr.ReadFrame() +	if err == io.EOF || err == io.ErrUnexpectedEOF { +		return nil, err +	} +	if err != nil { +		return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err) +	} +	atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) +	sf, ok := frame.(*http2.SettingsFrame) +	if !ok { +		return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) +	} +	t.handleSettings(sf) + +	go func() { +		t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) +		t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler +		err := t.loopy.run() +		if logger.V(logLevel) { +			logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err) +		} +		t.conn.Close() +		t.controlBuf.finish() +		close(t.writerDone) +	}() +	go t.keepalive() +	return t, nil +} + +// operateHeaders takes action on the decoded headers. Returns an error if fatal +// error encountered and transport needs to close, otherwise returns nil. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error { +	// Acquire max stream ID lock for entire duration +	t.maxStreamMu.Lock() +	defer t.maxStreamMu.Unlock() + +	streamID := frame.Header().StreamID + +	// frame.Truncated is set to true when framer detects that the current header +	// list size hits MaxHeaderListSize limit. +	if frame.Truncated { +		t.controlBuf.put(&cleanupStream{ +			streamID: streamID, +			rst:      true, +			rstCode:  http2.ErrCodeFrameSize, +			onWrite:  func() {}, +		}) +		return nil +	} + +	if streamID%2 != 1 || streamID <= t.maxStreamID { +		// illegal gRPC stream id. +		return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame) +	} +	t.maxStreamID = streamID + +	buf := newRecvBuffer() +	s := &Stream{ +		id:  streamID, +		st:  t, +		buf: buf, +		fc:  &inFlow{limit: uint32(t.initialWindowSize)}, +	} +	var ( +		// if false, content-type was missing or invalid +		isGRPC      = false +		contentType = "" +		mdata       = make(map[string][]string) +		httpMethod  string +		// these are set if an error is encountered while parsing the headers +		protocolError bool +		headerError   *status.Status + +		timeoutSet bool +		timeout    time.Duration +	) + +	for _, hf := range frame.Fields { +		switch hf.Name { +		case "content-type": +			contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value) +			if !validContentType { +				contentType = hf.Value +				break +			} +			mdata[hf.Name] = append(mdata[hf.Name], hf.Value) +			s.contentSubtype = contentSubtype +			isGRPC = true +		case "grpc-encoding": +			s.recvCompress = hf.Value +		case ":method": +			httpMethod = hf.Value +		case ":path": +			s.method = hf.Value +		case "grpc-timeout": +			timeoutSet = true +			var err error +			if timeout, err = decodeTimeout(hf.Value); err != nil { +				headerError = status.Newf(codes.Internal, "malformed grpc-timeout: %v", err) +			} +		// "Transports must consider requests containing the Connection header +		// as malformed." - A41 +		case "connection": +			if logger.V(logLevel) { +				logger.Errorf("transport: http2Server.operateHeaders parsed a :connection header which makes a request malformed as per the HTTP/2 spec") +			} +			protocolError = true +		default: +			if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { +				break +			} +			v, err := decodeMetadataHeader(hf.Name, hf.Value) +			if err != nil { +				headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err) +				logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) +				break +			} +			mdata[hf.Name] = append(mdata[hf.Name], v) +		} +	} + +	// "If multiple Host headers or multiple :authority headers are present, the +	// request must be rejected with an HTTP status code 400 as required by Host +	// validation in RFC 7230 §5.4, gRPC status code INTERNAL, or RST_STREAM +	// with HTTP/2 error code PROTOCOL_ERROR." - A41. Since this is a HTTP/2 +	// error, this takes precedence over a client not speaking gRPC. +	if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 { +		errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"])) +		if logger.V(logLevel) { +			logger.Errorf("transport: %v", errMsg) +		} +		t.controlBuf.put(&earlyAbortStream{ +			httpStatus:     http.StatusBadRequest, +			streamID:       streamID, +			contentSubtype: s.contentSubtype, +			status:         status.New(codes.Internal, errMsg), +			rst:            !frame.StreamEnded(), +		}) +		return nil +	} + +	if protocolError { +		t.controlBuf.put(&cleanupStream{ +			streamID: streamID, +			rst:      true, +			rstCode:  http2.ErrCodeProtocol, +			onWrite:  func() {}, +		}) +		return nil +	} +	if !isGRPC { +		t.controlBuf.put(&earlyAbortStream{ +			httpStatus:     http.StatusUnsupportedMediaType, +			streamID:       streamID, +			contentSubtype: s.contentSubtype, +			status:         status.Newf(codes.InvalidArgument, "invalid gRPC request content-type %q", contentType), +			rst:            !frame.StreamEnded(), +		}) +		return nil +	} +	if headerError != nil { +		t.controlBuf.put(&earlyAbortStream{ +			httpStatus:     http.StatusBadRequest, +			streamID:       streamID, +			contentSubtype: s.contentSubtype, +			status:         headerError, +			rst:            !frame.StreamEnded(), +		}) +		return nil +	} + +	// "If :authority is missing, Host must be renamed to :authority." - A41 +	if len(mdata[":authority"]) == 0 { +		// No-op if host isn't present, no eventual :authority header is a valid +		// RPC. +		if host, ok := mdata["host"]; ok { +			mdata[":authority"] = host +			delete(mdata, "host") +		} +	} else { +		// "If :authority is present, Host must be discarded" - A41 +		delete(mdata, "host") +	} + +	if frame.StreamEnded() { +		// s is just created by the caller. No lock needed. +		s.state = streamReadDone +	} +	if timeoutSet { +		s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout) +	} else { +		s.ctx, s.cancel = context.WithCancel(t.ctx) +	} + +	// Attach the received metadata to the context. +	if len(mdata) > 0 { +		s.ctx = metadata.NewIncomingContext(s.ctx, mdata) +		if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 { +			s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1])) +		} +		if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 { +			s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1])) +		} +	} +	t.mu.Lock() +	if t.state != reachable { +		t.mu.Unlock() +		s.cancel() +		return nil +	} +	if uint32(len(t.activeStreams)) >= t.maxStreams { +		t.mu.Unlock() +		t.controlBuf.put(&cleanupStream{ +			streamID: streamID, +			rst:      true, +			rstCode:  http2.ErrCodeRefusedStream, +			onWrite:  func() {}, +		}) +		s.cancel() +		return nil +	} +	if httpMethod != http.MethodPost { +		t.mu.Unlock() +		errMsg := fmt.Sprintf("http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) +		if logger.V(logLevel) { +			logger.Infof("transport: %v", errMsg) +		} +		t.controlBuf.put(&earlyAbortStream{ +			httpStatus:     405, +			streamID:       streamID, +			contentSubtype: s.contentSubtype, +			status:         status.New(codes.Internal, errMsg), +			rst:            !frame.StreamEnded(), +		}) +		s.cancel() +		return nil +	} +	if t.inTapHandle != nil { +		var err error +		if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { +			t.mu.Unlock() +			if logger.V(logLevel) { +				logger.Infof("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) +			} +			stat, ok := status.FromError(err) +			if !ok { +				stat = status.New(codes.PermissionDenied, err.Error()) +			} +			t.controlBuf.put(&earlyAbortStream{ +				httpStatus:     200, +				streamID:       s.id, +				contentSubtype: s.contentSubtype, +				status:         stat, +				rst:            !frame.StreamEnded(), +			}) +			return nil +		} +	} +	t.activeStreams[streamID] = s +	if len(t.activeStreams) == 1 { +		t.idle = time.Time{} +	} +	t.mu.Unlock() +	if channelz.IsOn() { +		atomic.AddInt64(&t.czData.streamsStarted, 1) +		atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) +	} +	s.requestRead = func(n int) { +		t.adjustWindow(s, uint32(n)) +	} +	s.ctx = traceCtx(s.ctx, s.method) +	for _, sh := range t.stats { +		s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) +		inHeader := &stats.InHeader{ +			FullMethod:  s.method, +			RemoteAddr:  t.remoteAddr, +			LocalAddr:   t.localAddr, +			Compression: s.recvCompress, +			WireLength:  int(frame.Header().Length), +			Header:      metadata.MD(mdata).Copy(), +		} +		sh.HandleRPC(s.ctx, inHeader) +	} +	s.ctxDone = s.ctx.Done() +	s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) +	s.trReader = &transportReader{ +		reader: &recvBufferReader{ +			ctx:        s.ctx, +			ctxDone:    s.ctxDone, +			recv:       s.buf, +			freeBuffer: t.bufferPool.put, +		}, +		windowHandler: func(n int) { +			t.updateWindow(s, uint32(n)) +		}, +	} +	// Register the stream with loopy. +	t.controlBuf.put(®isterStream{ +		streamID: s.id, +		wq:       s.wq, +	}) +	handle(s) +	return nil +} + +// HandleStreams receives incoming streams using the given handler. This is +// typically run in a separate goroutine. +// traceCtx attaches trace to ctx and returns the new context. +func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { +	defer close(t.readerDone) +	for { +		t.controlBuf.throttle() +		frame, err := t.framer.fr.ReadFrame() +		atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) +		if err != nil { +			if se, ok := err.(http2.StreamError); ok { +				if logger.V(logLevel) { +					logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) +				} +				t.mu.Lock() +				s := t.activeStreams[se.StreamID] +				t.mu.Unlock() +				if s != nil { +					t.closeStream(s, true, se.Code, false) +				} else { +					t.controlBuf.put(&cleanupStream{ +						streamID: se.StreamID, +						rst:      true, +						rstCode:  se.Code, +						onWrite:  func() {}, +					}) +				} +				continue +			} +			if err == io.EOF || err == io.ErrUnexpectedEOF { +				t.Close(err) +				return +			} +			t.Close(err) +			return +		} +		switch frame := frame.(type) { +		case *http2.MetaHeadersFrame: +			if err := t.operateHeaders(frame, handle, traceCtx); err != nil { +				t.Close(err) +				break +			} +		case *http2.DataFrame: +			t.handleData(frame) +		case *http2.RSTStreamFrame: +			t.handleRSTStream(frame) +		case *http2.SettingsFrame: +			t.handleSettings(frame) +		case *http2.PingFrame: +			t.handlePing(frame) +		case *http2.WindowUpdateFrame: +			t.handleWindowUpdate(frame) +		case *http2.GoAwayFrame: +			// TODO: Handle GoAway from the client appropriately. +		default: +			if logger.V(logLevel) { +				logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) +			} +		} +	} +} + +func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { +	t.mu.Lock() +	defer t.mu.Unlock() +	if t.activeStreams == nil { +		// The transport is closing. +		return nil, false +	} +	s, ok := t.activeStreams[f.Header().StreamID] +	if !ok { +		// The stream is already done. +		return nil, false +	} +	return s, true +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Server) adjustWindow(s *Stream, n uint32) { +	if w := s.fc.maybeAdjust(n); w > 0 { +		t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) +	} + +} + +// updateWindow adjusts the inbound quota for the stream and the transport. +// Window updates will deliver to the controller for sending when +// the cumulative quota exceeds the corresponding threshold. +func (t *http2Server) updateWindow(s *Stream, n uint32) { +	if w := s.fc.onRead(n); w > 0 { +		t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, +			increment: w, +		}) +	} +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Server) updateFlowControl(n uint32) { +	t.mu.Lock() +	for _, s := range t.activeStreams { +		s.fc.newLimit(n) +	} +	t.initialWindowSize = int32(n) +	t.mu.Unlock() +	t.controlBuf.put(&outgoingWindowUpdate{ +		streamID:  0, +		increment: t.fc.newLimit(n), +	}) +	t.controlBuf.put(&outgoingSettings{ +		ss: []http2.Setting{ +			{ +				ID:  http2.SettingInitialWindowSize, +				Val: n, +			}, +		}, +	}) + +} + +func (t *http2Server) handleData(f *http2.DataFrame) { +	size := f.Header().Length +	var sendBDPPing bool +	if t.bdpEst != nil { +		sendBDPPing = t.bdpEst.add(size) +	} +	// Decouple connection's flow control from application's read. +	// An update on connection's flow control should not depend on +	// whether user application has read the data or not. Such a +	// restriction is already imposed on the stream's flow control, +	// and therefore the sender will be blocked anyways. +	// Decoupling the connection flow control will prevent other +	// active(fast) streams from starving in presence of slow or +	// inactive streams. +	if w := t.fc.onData(size); w > 0 { +		t.controlBuf.put(&outgoingWindowUpdate{ +			streamID:  0, +			increment: w, +		}) +	} +	if sendBDPPing { +		// Avoid excessive ping detection (e.g. in an L7 proxy) +		// by sending a window update prior to the BDP ping. +		if w := t.fc.reset(); w > 0 { +			t.controlBuf.put(&outgoingWindowUpdate{ +				streamID:  0, +				increment: w, +			}) +		} +		t.controlBuf.put(bdpPing) +	} +	// Select the right stream to dispatch. +	s, ok := t.getStream(f) +	if !ok { +		return +	} +	if s.getState() == streamReadDone { +		t.closeStream(s, true, http2.ErrCodeStreamClosed, false) +		return +	} +	if size > 0 { +		if err := s.fc.onData(size); err != nil { +			t.closeStream(s, true, http2.ErrCodeFlowControl, false) +			return +		} +		if f.Header().Flags.Has(http2.FlagDataPadded) { +			if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { +				t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) +			} +		} +		// TODO(bradfitz, zhaoq): A copy is required here because there is no +		// guarantee f.Data() is consumed before the arrival of next frame. +		// Can this copy be eliminated? +		if len(f.Data()) > 0 { +			buffer := t.bufferPool.get() +			buffer.Reset() +			buffer.Write(f.Data()) +			s.write(recvMsg{buffer: buffer}) +		} +	} +	if f.StreamEnded() { +		// Received the end of stream from the client. +		s.compareAndSwapState(streamActive, streamReadDone) +		s.write(recvMsg{err: io.EOF}) +	} +} + +func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) { +	// If the stream is not deleted from the transport's active streams map, then do a regular close stream. +	if s, ok := t.getStream(f); ok { +		t.closeStream(s, false, 0, false) +		return +	} +	// If the stream is already deleted from the active streams map, then put a cleanupStream item into controlbuf to delete the stream from loopy writer's established streams map. +	t.controlBuf.put(&cleanupStream{ +		streamID: f.Header().StreamID, +		rst:      false, +		rstCode:  0, +		onWrite:  func() {}, +	}) +} + +func (t *http2Server) handleSettings(f *http2.SettingsFrame) { +	if f.IsAck() { +		return +	} +	var ss []http2.Setting +	var updateFuncs []func() +	f.ForeachSetting(func(s http2.Setting) error { +		switch s.ID { +		case http2.SettingMaxHeaderListSize: +			updateFuncs = append(updateFuncs, func() { +				t.maxSendHeaderListSize = new(uint32) +				*t.maxSendHeaderListSize = s.Val +			}) +		default: +			ss = append(ss, s) +		} +		return nil +	}) +	t.controlBuf.executeAndPut(func(interface{}) bool { +		for _, f := range updateFuncs { +			f() +		} +		return true +	}, &incomingSettings{ +		ss: ss, +	}) +} + +const ( +	maxPingStrikes     = 2 +	defaultPingTimeout = 2 * time.Hour +) + +func (t *http2Server) handlePing(f *http2.PingFrame) { +	if f.IsAck() { +		if f.Data == goAwayPing.data && t.drainEvent != nil { +			t.drainEvent.Fire() +			return +		} +		// Maybe it's a BDP ping. +		if t.bdpEst != nil { +			t.bdpEst.calculate(f.Data) +		} +		return +	} +	pingAck := &ping{ack: true} +	copy(pingAck.data[:], f.Data[:]) +	t.controlBuf.put(pingAck) + +	now := time.Now() +	defer func() { +		t.lastPingAt = now +	}() +	// A reset ping strikes means that we don't need to check for policy +	// violation for this ping and the pingStrikes counter should be set +	// to 0. +	if atomic.CompareAndSwapUint32(&t.resetPingStrikes, 1, 0) { +		t.pingStrikes = 0 +		return +	} +	t.mu.Lock() +	ns := len(t.activeStreams) +	t.mu.Unlock() +	if ns < 1 && !t.kep.PermitWithoutStream { +		// Keepalive shouldn't be active thus, this new ping should +		// have come after at least defaultPingTimeout. +		if t.lastPingAt.Add(defaultPingTimeout).After(now) { +			t.pingStrikes++ +		} +	} else { +		// Check if keepalive policy is respected. +		if t.lastPingAt.Add(t.kep.MinTime).After(now) { +			t.pingStrikes++ +		} +	} + +	if t.pingStrikes > maxPingStrikes { +		// Send goaway and close the connection. +		t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")}) +	} +} + +func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { +	t.controlBuf.put(&incomingWindowUpdate{ +		streamID:  f.Header().StreamID, +		increment: f.Increment, +	}) +} + +func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) []hpack.HeaderField { +	for k, vv := range md { +		if isReservedHeader(k) { +			// Clients don't tolerate reading restricted headers after some non restricted ones were sent. +			continue +		} +		for _, v := range vv { +			headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) +		} +	} +	return headerFields +} + +func (t *http2Server) checkForHeaderListSize(it interface{}) bool { +	if t.maxSendHeaderListSize == nil { +		return true +	} +	hdrFrame := it.(*headerFrame) +	var sz int64 +	for _, f := range hdrFrame.hf { +		if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { +			if logger.V(logLevel) { +				logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) +			} +			return false +		} +	} +	return true +} + +func (t *http2Server) streamContextErr(s *Stream) error { +	select { +	case <-t.done: +		return ErrConnClosing +	default: +	} +	return ContextErr(s.ctx.Err()) +} + +// WriteHeader sends the header metadata md back to the client. +func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { +	s.hdrMu.Lock() +	defer s.hdrMu.Unlock() +	if s.getState() == streamDone { +		return t.streamContextErr(s) +	} + +	if s.updateHeaderSent() { +		return ErrIllegalHeaderWrite +	} + +	if md.Len() > 0 { +		if s.header.Len() > 0 { +			s.header = metadata.Join(s.header, md) +		} else { +			s.header = md +		} +	} +	if err := t.writeHeaderLocked(s); err != nil { +		return status.Convert(err).Err() +	} +	return nil +} + +func (t *http2Server) setResetPingStrikes() { +	atomic.StoreUint32(&t.resetPingStrikes, 1) +} + +func (t *http2Server) writeHeaderLocked(s *Stream) error { +	// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields +	// first and create a slice of that exact size. +	headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. +	headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) +	headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)}) +	if s.sendCompress != "" { +		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) +	} +	headerFields = appendHeaderFieldsFromMD(headerFields, s.header) +	success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{ +		streamID:  s.id, +		hf:        headerFields, +		endStream: false, +		onWrite:   t.setResetPingStrikes, +	}) +	if !success { +		if err != nil { +			return err +		} +		t.closeStream(s, true, http2.ErrCodeInternal, false) +		return ErrHeaderListSizeLimitViolation +	} +	for _, sh := range t.stats { +		// Note: Headers are compressed with hpack after this call returns. +		// No WireLength field is set here. +		outHeader := &stats.OutHeader{ +			Header:      s.header.Copy(), +			Compression: s.sendCompress, +		} +		sh.HandleRPC(s.Context(), outHeader) +	} +	return nil +} + +// WriteStatus sends stream status to the client and terminates the stream. +// There is no further I/O operations being able to perform on this stream. +// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early +// OK is adopted. +func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { +	s.hdrMu.Lock() +	defer s.hdrMu.Unlock() + +	if s.getState() == streamDone { +		return nil +	} + +	// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields +	// first and create a slice of that exact size. +	headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. +	if !s.updateHeaderSent() {                      // No headers have been sent. +		if len(s.header) > 0 { // Send a separate header frame. +			if err := t.writeHeaderLocked(s); err != nil { +				return err +			} +		} else { // Send a trailer only response. +			headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) +			headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)}) +		} +	} +	headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) +	headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) + +	if p := st.Proto(); p != nil && len(p.Details) > 0 { +		stBytes, err := proto.Marshal(p) +		if err != nil { +			// TODO: return error instead, when callers are able to handle it. +			logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) +		} else { +			headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) +		} +	} + +	// Attach the trailer metadata. +	headerFields = appendHeaderFieldsFromMD(headerFields, s.trailer) +	trailingHeader := &headerFrame{ +		streamID:  s.id, +		hf:        headerFields, +		endStream: true, +		onWrite:   t.setResetPingStrikes, +	} + +	success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) +	if !success { +		if err != nil { +			return err +		} +		t.closeStream(s, true, http2.ErrCodeInternal, false) +		return ErrHeaderListSizeLimitViolation +	} +	// Send a RST_STREAM after the trailers if the client has not already half-closed. +	rst := s.getState() == streamActive +	t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) +	for _, sh := range t.stats { +		// Note: The trailer fields are compressed with hpack after this call returns. +		// No WireLength field is set here. +		sh.HandleRPC(s.Context(), &stats.OutTrailer{ +			Trailer: s.trailer.Copy(), +		}) +	} +	return nil +} + +// Write converts the data into HTTP2 data frame and sends it out. Non-nil error +// is returns if it fails (e.g., framing error, transport error). +func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +	if !s.isHeaderSent() { // Headers haven't been written yet. +		if err := t.WriteHeader(s, nil); err != nil { +			return err +		} +	} else { +		// Writing headers checks for this condition. +		if s.getState() == streamDone { +			return t.streamContextErr(s) +		} +	} +	df := &dataFrame{ +		streamID:    s.id, +		h:           hdr, +		d:           data, +		onEachWrite: t.setResetPingStrikes, +	} +	if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { +		return t.streamContextErr(s) +	} +	return t.controlBuf.put(df) +} + +// keepalive running in a separate goroutine does the following: +// 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle. +// 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge. +// 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge. +// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-responsive connection +// after an additional duration of keepalive.Timeout. +func (t *http2Server) keepalive() { +	p := &ping{} +	// True iff a ping has been sent, and no data has been received since then. +	outstandingPing := false +	// Amount of time remaining before which we should receive an ACK for the +	// last sent ping. +	kpTimeoutLeft := time.Duration(0) +	// Records the last value of t.lastRead before we go block on the timer. +	// This is required to check for read activity since then. +	prevNano := time.Now().UnixNano() +	// Initialize the different timers to their default values. +	idleTimer := time.NewTimer(t.kp.MaxConnectionIdle) +	ageTimer := time.NewTimer(t.kp.MaxConnectionAge) +	kpTimer := time.NewTimer(t.kp.Time) +	defer func() { +		// We need to drain the underlying channel in these timers after a call +		// to Stop(), only if we are interested in resetting them. Clearly we +		// are not interested in resetting them here. +		idleTimer.Stop() +		ageTimer.Stop() +		kpTimer.Stop() +	}() + +	for { +		select { +		case <-idleTimer.C: +			t.mu.Lock() +			idle := t.idle +			if idle.IsZero() { // The connection is non-idle. +				t.mu.Unlock() +				idleTimer.Reset(t.kp.MaxConnectionIdle) +				continue +			} +			val := t.kp.MaxConnectionIdle - time.Since(idle) +			t.mu.Unlock() +			if val <= 0 { +				// The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. +				// Gracefully close the connection. +				t.Drain() +				return +			} +			idleTimer.Reset(val) +		case <-ageTimer.C: +			t.Drain() +			ageTimer.Reset(t.kp.MaxConnectionAgeGrace) +			select { +			case <-ageTimer.C: +				// Close the connection after grace period. +				if logger.V(logLevel) { +					logger.Infof("transport: closing server transport due to maximum connection age.") +				} +				t.controlBuf.put(closeConnection{}) +			case <-t.done: +			} +			return +		case <-kpTimer.C: +			lastRead := atomic.LoadInt64(&t.lastRead) +			if lastRead > prevNano { +				// There has been read activity since the last time we were +				// here. Setup the timer to fire at kp.Time seconds from +				// lastRead time and continue. +				outstandingPing = false +				kpTimer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) +				prevNano = lastRead +				continue +			} +			if outstandingPing && kpTimeoutLeft <= 0 { +				t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time)) +				return +			} +			if !outstandingPing { +				if channelz.IsOn() { +					atomic.AddInt64(&t.czData.kpCount, 1) +				} +				t.controlBuf.put(p) +				kpTimeoutLeft = t.kp.Timeout +				outstandingPing = true +			} +			// The amount of time to sleep here is the minimum of kp.Time and +			// timeoutLeft. This will ensure that we wait only for kp.Time +			// before sending out the next ping (for cases where the ping is +			// acked). +			sleepDuration := minTime(t.kp.Time, kpTimeoutLeft) +			kpTimeoutLeft -= sleepDuration +			kpTimer.Reset(sleepDuration) +		case <-t.done: +			return +		} +	} +} + +// Close starts shutting down the http2Server transport. +// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This +// could cause some resource issue. Revisit this later. +func (t *http2Server) Close(err error) { +	t.mu.Lock() +	if t.state == closing { +		t.mu.Unlock() +		return +	} +	if logger.V(logLevel) { +		logger.Infof("transport: closing: %v", err) +	} +	t.state = closing +	streams := t.activeStreams +	t.activeStreams = nil +	t.mu.Unlock() +	t.controlBuf.finish() +	close(t.done) +	if err := t.conn.Close(); err != nil && logger.V(logLevel) { +		logger.Infof("transport: error closing conn during Close: %v", err) +	} +	channelz.RemoveEntry(t.channelzID) +	// Cancel all active streams. +	for _, s := range streams { +		s.cancel() +	} +	for _, sh := range t.stats { +		connEnd := &stats.ConnEnd{} +		sh.HandleConn(t.ctx, connEnd) +	} +} + +// deleteStream deletes the stream s from transport's active streams. +func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { + +	t.mu.Lock() +	if _, ok := t.activeStreams[s.id]; ok { +		delete(t.activeStreams, s.id) +		if len(t.activeStreams) == 0 { +			t.idle = time.Now() +		} +	} +	t.mu.Unlock() + +	if channelz.IsOn() { +		if eosReceived { +			atomic.AddInt64(&t.czData.streamsSucceeded, 1) +		} else { +			atomic.AddInt64(&t.czData.streamsFailed, 1) +		} +	} +} + +// finishStream closes the stream and puts the trailing headerFrame into controlbuf. +func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { +	// In case stream sending and receiving are invoked in separate +	// goroutines (e.g., bi-directional streaming), cancel needs to be +	// called to interrupt the potential blocking on other goroutines. +	s.cancel() + +	oldState := s.swapState(streamDone) +	if oldState == streamDone { +		// If the stream was already done, return. +		return +	} + +	hdr.cleanup = &cleanupStream{ +		streamID: s.id, +		rst:      rst, +		rstCode:  rstCode, +		onWrite: func() { +			t.deleteStream(s, eosReceived) +		}, +	} +	t.controlBuf.put(hdr) +} + +// closeStream clears the footprint of a stream when the stream is not needed any more. +func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { +	// In case stream sending and receiving are invoked in separate +	// goroutines (e.g., bi-directional streaming), cancel needs to be +	// called to interrupt the potential blocking on other goroutines. +	s.cancel() + +	s.swapState(streamDone) +	t.deleteStream(s, eosReceived) + +	t.controlBuf.put(&cleanupStream{ +		streamID: s.id, +		rst:      rst, +		rstCode:  rstCode, +		onWrite:  func() {}, +	}) +} + +func (t *http2Server) RemoteAddr() net.Addr { +	return t.remoteAddr +} + +func (t *http2Server) Drain() { +	t.mu.Lock() +	defer t.mu.Unlock() +	if t.drainEvent != nil { +		return +	} +	t.drainEvent = grpcsync.NewEvent() +	t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true}) +} + +var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} + +// Handles outgoing GoAway and returns true if loopy needs to put itself +// in draining mode. +func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { +	t.maxStreamMu.Lock() +	t.mu.Lock() +	if t.state == closing { // TODO(mmukhi): This seems unnecessary. +		t.mu.Unlock() +		t.maxStreamMu.Unlock() +		// The transport is closing. +		return false, ErrConnClosing +	} +	if !g.headsUp { +		// Stop accepting more streams now. +		t.state = draining +		sid := t.maxStreamID +		retErr := g.closeConn +		if len(t.activeStreams) == 0 { +			retErr = errors.New("second GOAWAY written and no active streams left to process") +		} +		t.mu.Unlock() +		t.maxStreamMu.Unlock() +		if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { +			return false, err +		} +		if retErr != nil { +			// Abruptly close the connection following the GoAway (via +			// loopywriter).  But flush out what's inside the buffer first. +			t.framer.writer.Flush() +			return false, retErr +		} +		return true, nil +	} +	t.mu.Unlock() +	t.maxStreamMu.Unlock() +	// For a graceful close, send out a GoAway with stream ID of MaxUInt32, +	// Follow that with a ping and wait for the ack to come back or a timer +	// to expire. During this time accept new streams since they might have +	// originated before the GoAway reaches the client. +	// After getting the ack or timer expiration send out another GoAway this +	// time with an ID of the max stream server intends to process. +	if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { +		return false, err +	} +	if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { +		return false, err +	} +	go func() { +		timer := time.NewTimer(time.Minute) +		defer timer.Stop() +		select { +		case <-t.drainEvent.Done(): +		case <-timer.C: +		case <-t.done: +			return +		} +		t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData}) +	}() +	return false, nil +} + +func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { +	s := channelz.SocketInternalMetric{ +		StreamsStarted:                   atomic.LoadInt64(&t.czData.streamsStarted), +		StreamsSucceeded:                 atomic.LoadInt64(&t.czData.streamsSucceeded), +		StreamsFailed:                    atomic.LoadInt64(&t.czData.streamsFailed), +		MessagesSent:                     atomic.LoadInt64(&t.czData.msgSent), +		MessagesReceived:                 atomic.LoadInt64(&t.czData.msgRecv), +		KeepAlivesSent:                   atomic.LoadInt64(&t.czData.kpCount), +		LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), +		LastMessageSentTimestamp:         time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), +		LastMessageReceivedTimestamp:     time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), +		LocalFlowControlWindow:           int64(t.fc.getSize()), +		SocketOptions:                    channelz.GetSocketOption(t.conn), +		LocalAddr:                        t.localAddr, +		RemoteAddr:                       t.remoteAddr, +		// RemoteName : +	} +	if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { +		s.Security = au.GetSecurityValue() +	} +	s.RemoteFlowControlWindow = t.getOutFlowWindow() +	return &s +} + +func (t *http2Server) IncrMsgSent() { +	atomic.AddInt64(&t.czData.msgSent, 1) +	atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) +} + +func (t *http2Server) IncrMsgRecv() { +	atomic.AddInt64(&t.czData.msgRecv, 1) +	atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) +} + +func (t *http2Server) getOutFlowWindow() int64 { +	resp := make(chan uint32, 1) +	timer := time.NewTimer(time.Second) +	defer timer.Stop() +	t.controlBuf.put(&outFlowControlSizeRequest{resp}) +	select { +	case sz := <-resp: +		return int64(sz) +	case <-t.done: +		return -1 +	case <-timer.C: +		return -2 +	} +} + +func (t *http2Server) getPeer() *peer.Peer { +	return &peer.Peer{ +		Addr:     t.remoteAddr, +		AuthInfo: t.authInfo, // Can be nil +	} +} + +func getJitter(v time.Duration) time.Duration { +	if v == infinity { +		return 0 +	} +	// Generate a jitter between +/- 10% of the value. +	r := int64(v / 10) +	j := grpcrand.Int63n(2*r) - r +	return time.Duration(j) +} + +type connectionKey struct{} + +// GetConnection gets the connection from the context. +func GetConnection(ctx context.Context) net.Conn { +	conn, _ := ctx.Value(connectionKey{}).(net.Conn) +	return conn +} + +// SetConnection adds the connection to the context to be able to get +// information about the destination ip and port for an incoming RPC. This also +// allows any unary or streaming interceptors to see the connection. +func setConnection(ctx context.Context, conn net.Conn) context.Context { +	return context.WithValue(ctx, connectionKey{}, conn) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go new file mode 100644 index 000000000..2c601a864 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -0,0 +1,412 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( +	"bufio" +	"encoding/base64" +	"fmt" +	"io" +	"math" +	"net" +	"net/http" +	"net/url" +	"strconv" +	"strings" +	"time" +	"unicode/utf8" + +	"github.com/golang/protobuf/proto" +	"golang.org/x/net/http2" +	"golang.org/x/net/http2/hpack" +	spb "google.golang.org/genproto/googleapis/rpc/status" +	"google.golang.org/grpc/codes" +	"google.golang.org/grpc/grpclog" +	"google.golang.org/grpc/status" +) + +const ( +	// http2MaxFrameLen specifies the max length of a HTTP2 frame. +	http2MaxFrameLen = 16384 // 16KB frame +	// https://httpwg.org/specs/rfc7540.html#SettingValues +	http2InitHeaderTableSize = 4096 +) + +var ( +	clientPreface   = []byte(http2.ClientPreface) +	http2ErrConvTab = map[http2.ErrCode]codes.Code{ +		http2.ErrCodeNo:                 codes.Internal, +		http2.ErrCodeProtocol:           codes.Internal, +		http2.ErrCodeInternal:           codes.Internal, +		http2.ErrCodeFlowControl:        codes.ResourceExhausted, +		http2.ErrCodeSettingsTimeout:    codes.Internal, +		http2.ErrCodeStreamClosed:       codes.Internal, +		http2.ErrCodeFrameSize:          codes.Internal, +		http2.ErrCodeRefusedStream:      codes.Unavailable, +		http2.ErrCodeCancel:             codes.Canceled, +		http2.ErrCodeCompression:        codes.Internal, +		http2.ErrCodeConnect:            codes.Internal, +		http2.ErrCodeEnhanceYourCalm:    codes.ResourceExhausted, +		http2.ErrCodeInadequateSecurity: codes.PermissionDenied, +		http2.ErrCodeHTTP11Required:     codes.Internal, +	} +	// HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table. +	HTTPStatusConvTab = map[int]codes.Code{ +		// 400 Bad Request - INTERNAL. +		http.StatusBadRequest: codes.Internal, +		// 401 Unauthorized  - UNAUTHENTICATED. +		http.StatusUnauthorized: codes.Unauthenticated, +		// 403 Forbidden - PERMISSION_DENIED. +		http.StatusForbidden: codes.PermissionDenied, +		// 404 Not Found - UNIMPLEMENTED. +		http.StatusNotFound: codes.Unimplemented, +		// 429 Too Many Requests - UNAVAILABLE. +		http.StatusTooManyRequests: codes.Unavailable, +		// 502 Bad Gateway - UNAVAILABLE. +		http.StatusBadGateway: codes.Unavailable, +		// 503 Service Unavailable - UNAVAILABLE. +		http.StatusServiceUnavailable: codes.Unavailable, +		// 504 Gateway timeout - UNAVAILABLE. +		http.StatusGatewayTimeout: codes.Unavailable, +	} +	logger = grpclog.Component("transport") +) + +// isReservedHeader checks whether hdr belongs to HTTP2 headers +// reserved by gRPC protocol. Any other headers are classified as the +// user-specified metadata. +func isReservedHeader(hdr string) bool { +	if hdr != "" && hdr[0] == ':' { +		return true +	} +	switch hdr { +	case "content-type", +		"user-agent", +		"grpc-message-type", +		"grpc-encoding", +		"grpc-message", +		"grpc-status", +		"grpc-timeout", +		"grpc-status-details-bin", +		// Intentionally exclude grpc-previous-rpc-attempts and +		// grpc-retry-pushback-ms, which are "reserved", but their API +		// intentionally works via metadata. +		"te": +		return true +	default: +		return false +	} +} + +// isWhitelistedHeader checks whether hdr should be propagated into metadata +// visible to users, even though it is classified as "reserved", above. +func isWhitelistedHeader(hdr string) bool { +	switch hdr { +	case ":authority", "user-agent": +		return true +	default: +		return false +	} +} + +const binHdrSuffix = "-bin" + +func encodeBinHeader(v []byte) string { +	return base64.RawStdEncoding.EncodeToString(v) +} + +func decodeBinHeader(v string) ([]byte, error) { +	if len(v)%4 == 0 { +		// Input was padded, or padding was not necessary. +		return base64.StdEncoding.DecodeString(v) +	} +	return base64.RawStdEncoding.DecodeString(v) +} + +func encodeMetadataHeader(k, v string) string { +	if strings.HasSuffix(k, binHdrSuffix) { +		return encodeBinHeader(([]byte)(v)) +	} +	return v +} + +func decodeMetadataHeader(k, v string) (string, error) { +	if strings.HasSuffix(k, binHdrSuffix) { +		b, err := decodeBinHeader(v) +		return string(b), err +	} +	return v, nil +} + +func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) { +	v, err := decodeBinHeader(rawDetails) +	if err != nil { +		return nil, err +	} +	st := &spb.Status{} +	if err = proto.Unmarshal(v, st); err != nil { +		return nil, err +	} +	return status.FromProto(st), nil +} + +type timeoutUnit uint8 + +const ( +	hour        timeoutUnit = 'H' +	minute      timeoutUnit = 'M' +	second      timeoutUnit = 'S' +	millisecond timeoutUnit = 'm' +	microsecond timeoutUnit = 'u' +	nanosecond  timeoutUnit = 'n' +) + +func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) { +	switch u { +	case hour: +		return time.Hour, true +	case minute: +		return time.Minute, true +	case second: +		return time.Second, true +	case millisecond: +		return time.Millisecond, true +	case microsecond: +		return time.Microsecond, true +	case nanosecond: +		return time.Nanosecond, true +	default: +	} +	return +} + +func decodeTimeout(s string) (time.Duration, error) { +	size := len(s) +	if size < 2 { +		return 0, fmt.Errorf("transport: timeout string is too short: %q", s) +	} +	if size > 9 { +		// Spec allows for 8 digits plus the unit. +		return 0, fmt.Errorf("transport: timeout string is too long: %q", s) +	} +	unit := timeoutUnit(s[size-1]) +	d, ok := timeoutUnitToDuration(unit) +	if !ok { +		return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s) +	} +	t, err := strconv.ParseInt(s[:size-1], 10, 64) +	if err != nil { +		return 0, err +	} +	const maxHours = math.MaxInt64 / int64(time.Hour) +	if d == time.Hour && t > maxHours { +		// This timeout would overflow math.MaxInt64; clamp it. +		return time.Duration(math.MaxInt64), nil +	} +	return d * time.Duration(t), nil +} + +const ( +	spaceByte   = ' ' +	tildeByte   = '~' +	percentByte = '%' +) + +// encodeGrpcMessage is used to encode status code in header field +// "grpc-message". It does percent encoding and also replaces invalid utf-8 +// characters with Unicode replacement character. +// +// It checks to see if each individual byte in msg is an allowable byte, and +// then either percent encoding or passing it through. When percent encoding, +// the byte is converted into hexadecimal notation with a '%' prepended. +func encodeGrpcMessage(msg string) string { +	if msg == "" { +		return "" +	} +	lenMsg := len(msg) +	for i := 0; i < lenMsg; i++ { +		c := msg[i] +		if !(c >= spaceByte && c <= tildeByte && c != percentByte) { +			return encodeGrpcMessageUnchecked(msg) +		} +	} +	return msg +} + +func encodeGrpcMessageUnchecked(msg string) string { +	var sb strings.Builder +	for len(msg) > 0 { +		r, size := utf8.DecodeRuneInString(msg) +		for _, b := range []byte(string(r)) { +			if size > 1 { +				// If size > 1, r is not ascii. Always do percent encoding. +				fmt.Fprintf(&sb, "%%%02X", b) +				continue +			} + +			// The for loop is necessary even if size == 1. r could be +			// utf8.RuneError. +			// +			// fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD". +			if b >= spaceByte && b <= tildeByte && b != percentByte { +				sb.WriteByte(b) +			} else { +				fmt.Fprintf(&sb, "%%%02X", b) +			} +		} +		msg = msg[size:] +	} +	return sb.String() +} + +// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. +func decodeGrpcMessage(msg string) string { +	if msg == "" { +		return "" +	} +	lenMsg := len(msg) +	for i := 0; i < lenMsg; i++ { +		if msg[i] == percentByte && i+2 < lenMsg { +			return decodeGrpcMessageUnchecked(msg) +		} +	} +	return msg +} + +func decodeGrpcMessageUnchecked(msg string) string { +	var sb strings.Builder +	lenMsg := len(msg) +	for i := 0; i < lenMsg; i++ { +		c := msg[i] +		if c == percentByte && i+2 < lenMsg { +			parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8) +			if err != nil { +				sb.WriteByte(c) +			} else { +				sb.WriteByte(byte(parsed)) +				i += 2 +			} +		} else { +			sb.WriteByte(c) +		} +	} +	return sb.String() +} + +type bufWriter struct { +	buf       []byte +	offset    int +	batchSize int +	conn      net.Conn +	err       error +} + +func newBufWriter(conn net.Conn, batchSize int) *bufWriter { +	return &bufWriter{ +		buf:       make([]byte, batchSize*2), +		batchSize: batchSize, +		conn:      conn, +	} +} + +func (w *bufWriter) Write(b []byte) (n int, err error) { +	if w.err != nil { +		return 0, w.err +	} +	if w.batchSize == 0 { // Buffer has been disabled. +		return w.conn.Write(b) +	} +	for len(b) > 0 { +		nn := copy(w.buf[w.offset:], b) +		b = b[nn:] +		w.offset += nn +		n += nn +		if w.offset >= w.batchSize { +			err = w.Flush() +		} +	} +	return n, err +} + +func (w *bufWriter) Flush() error { +	if w.err != nil { +		return w.err +	} +	if w.offset == 0 { +		return nil +	} +	_, w.err = w.conn.Write(w.buf[:w.offset]) +	w.offset = 0 +	return w.err +} + +type framer struct { +	writer *bufWriter +	fr     *http2.Framer +} + +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer { +	if writeBufferSize < 0 { +		writeBufferSize = 0 +	} +	var r io.Reader = conn +	if readBufferSize > 0 { +		r = bufio.NewReaderSize(r, readBufferSize) +	} +	w := newBufWriter(conn, writeBufferSize) +	f := &framer{ +		writer: w, +		fr:     http2.NewFramer(w, r), +	} +	f.fr.SetMaxReadFrameSize(http2MaxFrameLen) +	// Opt-in to Frame reuse API on framer to reduce garbage. +	// Frames aren't safe to read from after a subsequent call to ReadFrame. +	f.fr.SetReuseFrames() +	f.fr.MaxHeaderListSize = maxHeaderListSize +	f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) +	return f +} + +// parseDialTarget returns the network and address to pass to dialer. +func parseDialTarget(target string) (string, string) { +	net := "tcp" +	m1 := strings.Index(target, ":") +	m2 := strings.Index(target, ":/") +	// handle unix:addr which will fail with url.Parse +	if m1 >= 0 && m2 < 0 { +		if n := target[0:m1]; n == "unix" { +			return n, target[m1+1:] +		} +	} +	if m2 >= 0 { +		t, err := url.Parse(target) +		if err != nil { +			return net, target +		} +		scheme := t.Scheme +		addr := t.Path +		if scheme == "unix" { +			if addr == "" { +				addr = t.Host +			} +			return scheme, addr +		} +	} +	return net, target +} diff --git a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go new file mode 100644 index 000000000..c11b52782 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go @@ -0,0 +1,46 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package networktype declares the network type to be used in the default +// dialer. Attribute of a resolver.Address. +package networktype + +import ( +	"google.golang.org/grpc/resolver" +) + +// keyType is the key to use for storing State in Attributes. +type keyType string + +const key = keyType("grpc.internal.transport.networktype") + +// Set returns a copy of the provided address with attributes containing networkType. +func Set(address resolver.Address, networkType string) resolver.Address { +	address.Attributes = address.Attributes.WithValue(key, networkType) +	return address +} + +// Get returns the network type in the resolver.Address and true, or "", false +// if not present. +func Get(address resolver.Address) (string, bool) { +	v := address.Attributes.Value(key) +	if v == nil { +		return "", false +	} +	return v.(string), true +} diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go new file mode 100644 index 000000000..415961987 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -0,0 +1,142 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( +	"bufio" +	"context" +	"encoding/base64" +	"fmt" +	"io" +	"net" +	"net/http" +	"net/http/httputil" +	"net/url" +) + +const proxyAuthHeaderKey = "Proxy-Authorization" + +var ( +	// The following variable will be overwritten in the tests. +	httpProxyFromEnvironment = http.ProxyFromEnvironment +) + +func mapAddress(address string) (*url.URL, error) { +	req := &http.Request{ +		URL: &url.URL{ +			Scheme: "https", +			Host:   address, +		}, +	} +	url, err := httpProxyFromEnvironment(req) +	if err != nil { +		return nil, err +	} +	return url, nil +} + +// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader. +// It's possible that this reader reads more than what's need for the response and stores +// those bytes in the buffer. +// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the +// bytes in the buffer. +type bufConn struct { +	net.Conn +	r io.Reader +} + +func (c *bufConn) Read(b []byte) (int, error) { +	return c.r.Read(b) +} + +func basicAuth(username, password string) string { +	auth := username + ":" + password +	return base64.StdEncoding.EncodeToString([]byte(auth)) +} + +func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL, grpcUA string) (_ net.Conn, err error) { +	defer func() { +		if err != nil { +			conn.Close() +		} +	}() + +	req := &http.Request{ +		Method: http.MethodConnect, +		URL:    &url.URL{Host: backendAddr}, +		Header: map[string][]string{"User-Agent": {grpcUA}}, +	} +	if t := proxyURL.User; t != nil { +		u := t.Username() +		p, _ := t.Password() +		req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p)) +	} + +	if err := sendHTTPRequest(ctx, req, conn); err != nil { +		return nil, fmt.Errorf("failed to write the HTTP request: %v", err) +	} + +	r := bufio.NewReader(conn) +	resp, err := http.ReadResponse(r, req) +	if err != nil { +		return nil, fmt.Errorf("reading server HTTP response: %v", err) +	} +	defer resp.Body.Close() +	if resp.StatusCode != http.StatusOK { +		dump, err := httputil.DumpResponse(resp, true) +		if err != nil { +			return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status) +		} +		return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) +	} + +	return &bufConn{Conn: conn, r: r}, nil +} + +// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy +// is necessary, dials, does the HTTP CONNECT handshake, and returns the +// connection. +func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) { +	newAddr := addr +	proxyURL, err := mapAddress(addr) +	if err != nil { +		return nil, err +	} +	if proxyURL != nil { +		newAddr = proxyURL.Host +	} + +	conn, err = (&net.Dialer{}).DialContext(ctx, "tcp", newAddr) +	if err != nil { +		return +	} +	if proxyURL != nil { +		// proxy is disabled if proxyURL is nil. +		conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) +	} +	return +} + +func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { +	req = req.WithContext(ctx) +	if err := req.Write(conn); err != nil { +		return fmt.Errorf("failed to write the HTTP request: %v", err) +	} +	return nil +} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go new file mode 100644 index 000000000..0ac77ea4f --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -0,0 +1,823 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package transport defines and implements message oriented communication +// channel to complete various transactions (e.g., an RPC).  It is meant for +// grpc-internal usage and is not intended to be imported directly by users. +package transport + +import ( +	"bytes" +	"context" +	"errors" +	"fmt" +	"io" +	"net" +	"sync" +	"sync/atomic" +	"time" + +	"google.golang.org/grpc/codes" +	"google.golang.org/grpc/credentials" +	"google.golang.org/grpc/internal/channelz" +	"google.golang.org/grpc/keepalive" +	"google.golang.org/grpc/metadata" +	"google.golang.org/grpc/resolver" +	"google.golang.org/grpc/stats" +	"google.golang.org/grpc/status" +	"google.golang.org/grpc/tap" +) + +// ErrNoHeaders is used as a signal that a trailers only response was received, +// and is not a real error. +var ErrNoHeaders = errors.New("stream has no headers") + +const logLevel = 2 + +type bufferPool struct { +	pool sync.Pool +} + +func newBufferPool() *bufferPool { +	return &bufferPool{ +		pool: sync.Pool{ +			New: func() interface{} { +				return new(bytes.Buffer) +			}, +		}, +	} +} + +func (p *bufferPool) get() *bytes.Buffer { +	return p.pool.Get().(*bytes.Buffer) +} + +func (p *bufferPool) put(b *bytes.Buffer) { +	p.pool.Put(b) +} + +// recvMsg represents the received msg from the transport. All transport +// protocol specific info has been removed. +type recvMsg struct { +	buffer *bytes.Buffer +	// nil: received some data +	// io.EOF: stream is completed. data is nil. +	// other non-nil error: transport failure. data is nil. +	err error +} + +// recvBuffer is an unbounded channel of recvMsg structs. +// +// Note: recvBuffer differs from buffer.Unbounded only in the fact that it +// holds a channel of recvMsg structs instead of objects implementing "item" +// interface. recvBuffer is written to much more often and using strict recvMsg +// structs helps avoid allocation in "recvBuffer.put" +type recvBuffer struct { +	c       chan recvMsg +	mu      sync.Mutex +	backlog []recvMsg +	err     error +} + +func newRecvBuffer() *recvBuffer { +	b := &recvBuffer{ +		c: make(chan recvMsg, 1), +	} +	return b +} + +func (b *recvBuffer) put(r recvMsg) { +	b.mu.Lock() +	if b.err != nil { +		b.mu.Unlock() +		// An error had occurred earlier, don't accept more +		// data or errors. +		return +	} +	b.err = r.err +	if len(b.backlog) == 0 { +		select { +		case b.c <- r: +			b.mu.Unlock() +			return +		default: +		} +	} +	b.backlog = append(b.backlog, r) +	b.mu.Unlock() +} + +func (b *recvBuffer) load() { +	b.mu.Lock() +	if len(b.backlog) > 0 { +		select { +		case b.c <- b.backlog[0]: +			b.backlog[0] = recvMsg{} +			b.backlog = b.backlog[1:] +		default: +		} +	} +	b.mu.Unlock() +} + +// get returns the channel that receives a recvMsg in the buffer. +// +// Upon receipt of a recvMsg, the caller should call load to send another +// recvMsg onto the channel if there is any. +func (b *recvBuffer) get() <-chan recvMsg { +	return b.c +} + +// recvBufferReader implements io.Reader interface to read the data from +// recvBuffer. +type recvBufferReader struct { +	closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata. +	ctx         context.Context +	ctxDone     <-chan struct{} // cache of ctx.Done() (for performance). +	recv        *recvBuffer +	last        *bytes.Buffer // Stores the remaining data in the previous calls. +	err         error +	freeBuffer  func(*bytes.Buffer) +} + +// Read reads the next len(p) bytes from last. If last is drained, it tries to +// read additional data from recv. It blocks if there no additional data available +// in recv. If Read returns any non-nil error, it will continue to return that error. +func (r *recvBufferReader) Read(p []byte) (n int, err error) { +	if r.err != nil { +		return 0, r.err +	} +	if r.last != nil { +		// Read remaining data left in last call. +		copied, _ := r.last.Read(p) +		if r.last.Len() == 0 { +			r.freeBuffer(r.last) +			r.last = nil +		} +		return copied, nil +	} +	if r.closeStream != nil { +		n, r.err = r.readClient(p) +	} else { +		n, r.err = r.read(p) +	} +	return n, r.err +} + +func (r *recvBufferReader) read(p []byte) (n int, err error) { +	select { +	case <-r.ctxDone: +		return 0, ContextErr(r.ctx.Err()) +	case m := <-r.recv.get(): +		return r.readAdditional(m, p) +	} +} + +func (r *recvBufferReader) readClient(p []byte) (n int, err error) { +	// If the context is canceled, then closes the stream with nil metadata. +	// closeStream writes its error parameter to r.recv as a recvMsg. +	// r.readAdditional acts on that message and returns the necessary error. +	select { +	case <-r.ctxDone: +		// Note that this adds the ctx error to the end of recv buffer, and +		// reads from the head. This will delay the error until recv buffer is +		// empty, thus will delay ctx cancellation in Recv(). +		// +		// It's done this way to fix a race between ctx cancel and trailer. The +		// race was, stream.Recv() may return ctx error if ctxDone wins the +		// race, but stream.Trailer() may return a non-nil md because the stream +		// was not marked as done when trailer is received. This closeStream +		// call will mark stream as done, thus fix the race. +		// +		// TODO: delaying ctx error seems like a unnecessary side effect. What +		// we really want is to mark the stream as done, and return ctx error +		// faster. +		r.closeStream(ContextErr(r.ctx.Err())) +		m := <-r.recv.get() +		return r.readAdditional(m, p) +	case m := <-r.recv.get(): +		return r.readAdditional(m, p) +	} +} + +func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) { +	r.recv.load() +	if m.err != nil { +		return 0, m.err +	} +	copied, _ := m.buffer.Read(p) +	if m.buffer.Len() == 0 { +		r.freeBuffer(m.buffer) +		r.last = nil +	} else { +		r.last = m.buffer +	} +	return copied, nil +} + +type streamState uint32 + +const ( +	streamActive    streamState = iota +	streamWriteDone             // EndStream sent +	streamReadDone              // EndStream received +	streamDone                  // the entire stream is finished. +) + +// Stream represents an RPC in the transport layer. +type Stream struct { +	id           uint32 +	st           ServerTransport    // nil for client side Stream +	ct           *http2Client       // nil for server side Stream +	ctx          context.Context    // the associated context of the stream +	cancel       context.CancelFunc // always nil for client side Stream +	done         chan struct{}      // closed at the end of stream to unblock writers. On the client side. +	doneFunc     func()             // invoked at the end of stream on client side. +	ctxDone      <-chan struct{}    // same as done chan but for server side. Cache of ctx.Done() (for performance) +	method       string             // the associated RPC method of the stream +	recvCompress string +	sendCompress string +	buf          *recvBuffer +	trReader     io.Reader +	fc           *inFlow +	wq           *writeQuota + +	// Callback to state application's intentions to read data. This +	// is used to adjust flow control, if needed. +	requestRead func(int) + +	headerChan       chan struct{} // closed to indicate the end of header metadata. +	headerChanClosed uint32        // set when headerChan is closed. Used to avoid closing headerChan multiple times. +	// headerValid indicates whether a valid header was received.  Only +	// meaningful after headerChan is closed (always call waitOnHeader() before +	// reading its value).  Not valid on server side. +	headerValid bool + +	// hdrMu protects header and trailer metadata on the server-side. +	hdrMu sync.Mutex +	// On client side, header keeps the received header metadata. +	// +	// On server side, header keeps the header set by SetHeader(). The complete +	// header will merged into this after t.WriteHeader() is called. +	header  metadata.MD +	trailer metadata.MD // the key-value map of trailer metadata. + +	noHeaders bool // set if the client never received headers (set only after the stream is done). + +	// On the server-side, headerSent is atomically set to 1 when the headers are sent out. +	headerSent uint32 + +	state streamState + +	// On client-side it is the status error received from the server. +	// On server-side it is unused. +	status *status.Status + +	bytesReceived uint32 // indicates whether any bytes have been received on this stream +	unprocessed   uint32 // set if the server sends a refused stream or GOAWAY including this stream + +	// contentSubtype is the content-subtype for requests. +	// this must be lowercase or the behavior is undefined. +	contentSubtype string +} + +// isHeaderSent is only valid on the server-side. +func (s *Stream) isHeaderSent() bool { +	return atomic.LoadUint32(&s.headerSent) == 1 +} + +// updateHeaderSent updates headerSent and returns true +// if it was alreay set. It is valid only on server-side. +func (s *Stream) updateHeaderSent() bool { +	return atomic.SwapUint32(&s.headerSent, 1) == 1 +} + +func (s *Stream) swapState(st streamState) streamState { +	return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st))) +} + +func (s *Stream) compareAndSwapState(oldState, newState streamState) bool { +	return atomic.CompareAndSwapUint32((*uint32)(&s.state), uint32(oldState), uint32(newState)) +} + +func (s *Stream) getState() streamState { +	return streamState(atomic.LoadUint32((*uint32)(&s.state))) +} + +func (s *Stream) waitOnHeader() { +	if s.headerChan == nil { +		// On the server headerChan is always nil since a stream originates +		// only after having received headers. +		return +	} +	select { +	case <-s.ctx.Done(): +		// Close the stream to prevent headers/trailers from changing after +		// this function returns. +		s.ct.CloseStream(s, ContextErr(s.ctx.Err())) +		// headerChan could possibly not be closed yet if closeStream raced +		// with operateHeaders; wait until it is closed explicitly here. +		<-s.headerChan +	case <-s.headerChan: +	} +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *Stream) RecvCompress() string { +	s.waitOnHeader() +	return s.recvCompress +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *Stream) SetSendCompress(str string) { +	s.sendCompress = str +} + +// Done returns a channel which is closed when it receives the final status +// from the server. +func (s *Stream) Done() <-chan struct{} { +	return s.done +} + +// Header returns the header metadata of the stream. +// +// On client side, it acquires the key-value pairs of header metadata once it is +// available. It blocks until i) the metadata is ready or ii) there is no header +// metadata or iii) the stream is canceled/expired. +// +// On server side, it returns the out header after t.WriteHeader is called.  It +// does not block and must not be called until after WriteHeader. +func (s *Stream) Header() (metadata.MD, error) { +	if s.headerChan == nil { +		// On server side, return the header in stream. It will be the out +		// header after t.WriteHeader is called. +		return s.header.Copy(), nil +	} +	s.waitOnHeader() + +	if !s.headerValid { +		return nil, s.status.Err() +	} + +	if s.noHeaders { +		return nil, ErrNoHeaders +	} + +	return s.header.Copy(), nil +} + +// TrailersOnly blocks until a header or trailers-only frame is received and +// then returns true if the stream was trailers-only.  If the stream ends +// before headers are received, returns true, nil.  Client-side only. +func (s *Stream) TrailersOnly() bool { +	s.waitOnHeader() +	return s.noHeaders +} + +// Trailer returns the cached trailer metedata. Note that if it is not called +// after the entire stream is done, it could return an empty MD. Client +// side only. +// It can be safely read only after stream has ended that is either read +// or write have returned io.EOF. +func (s *Stream) Trailer() metadata.MD { +	c := s.trailer.Copy() +	return c +} + +// ContentSubtype returns the content-subtype for a request. For example, a +// content-subtype of "proto" will result in a content-type of +// "application/grpc+proto". This will always be lowercase.  See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +func (s *Stream) ContentSubtype() string { +	return s.contentSubtype +} + +// Context returns the context of the stream. +func (s *Stream) Context() context.Context { +	return s.ctx +} + +// Method returns the method for the stream. +func (s *Stream) Method() string { +	return s.method +} + +// Status returns the status received from the server. +// Status can be read safely only after the stream has ended, +// that is, after Done() is closed. +func (s *Stream) Status() *status.Status { +	return s.status +} + +// SetHeader sets the header metadata. This can be called multiple times. +// Server side only. +// This should not be called in parallel to other data writes. +func (s *Stream) SetHeader(md metadata.MD) error { +	if md.Len() == 0 { +		return nil +	} +	if s.isHeaderSent() || s.getState() == streamDone { +		return ErrIllegalHeaderWrite +	} +	s.hdrMu.Lock() +	s.header = metadata.Join(s.header, md) +	s.hdrMu.Unlock() +	return nil +} + +// SendHeader sends the given header metadata. The given metadata is +// combined with any metadata set by previous calls to SetHeader and +// then written to the transport stream. +func (s *Stream) SendHeader(md metadata.MD) error { +	return s.st.WriteHeader(s, md) +} + +// SetTrailer sets the trailer metadata which will be sent with the RPC status +// by the server. This can be called multiple times. Server side only. +// This should not be called parallel to other data writes. +func (s *Stream) SetTrailer(md metadata.MD) error { +	if md.Len() == 0 { +		return nil +	} +	if s.getState() == streamDone { +		return ErrIllegalHeaderWrite +	} +	s.hdrMu.Lock() +	s.trailer = metadata.Join(s.trailer, md) +	s.hdrMu.Unlock() +	return nil +} + +func (s *Stream) write(m recvMsg) { +	s.buf.put(m) +} + +// Read reads all p bytes from the wire for this stream. +func (s *Stream) Read(p []byte) (n int, err error) { +	// Don't request a read if there was an error earlier +	if er := s.trReader.(*transportReader).er; er != nil { +		return 0, er +	} +	s.requestRead(len(p)) +	return io.ReadFull(s.trReader, p) +} + +// tranportReader reads all the data available for this Stream from the transport and +// passes them into the decoder, which converts them into a gRPC message stream. +// The error is io.EOF when the stream is done or another non-nil error if +// the stream broke. +type transportReader struct { +	reader io.Reader +	// The handler to control the window update procedure for both this +	// particular stream and the associated transport. +	windowHandler func(int) +	er            error +} + +func (t *transportReader) Read(p []byte) (n int, err error) { +	n, err = t.reader.Read(p) +	if err != nil { +		t.er = err +		return +	} +	t.windowHandler(n) +	return +} + +// BytesReceived indicates whether any bytes have been received on this stream. +func (s *Stream) BytesReceived() bool { +	return atomic.LoadUint32(&s.bytesReceived) == 1 +} + +// Unprocessed indicates whether the server did not process this stream -- +// i.e. it sent a refused stream or GOAWAY including this stream ID. +func (s *Stream) Unprocessed() bool { +	return atomic.LoadUint32(&s.unprocessed) == 1 +} + +// GoString is implemented by Stream so context.String() won't +// race when printing %#v. +func (s *Stream) GoString() string { +	return fmt.Sprintf("<stream: %p, %v>", s, s.method) +} + +// state of transport +type transportState int + +const ( +	reachable transportState = iota +	closing +	draining +) + +// ServerConfig consists of all the configurations to establish a server transport. +type ServerConfig struct { +	MaxStreams            uint32 +	ConnectionTimeout     time.Duration +	Credentials           credentials.TransportCredentials +	InTapHandle           tap.ServerInHandle +	StatsHandlers         []stats.Handler +	KeepaliveParams       keepalive.ServerParameters +	KeepalivePolicy       keepalive.EnforcementPolicy +	InitialWindowSize     int32 +	InitialConnWindowSize int32 +	WriteBufferSize       int +	ReadBufferSize        int +	ChannelzParentID      *channelz.Identifier +	MaxHeaderListSize     *uint32 +	HeaderTableSize       *uint32 +} + +// ConnectOptions covers all relevant options for communicating with the server. +type ConnectOptions struct { +	// UserAgent is the application user agent. +	UserAgent string +	// Dialer specifies how to dial a network address. +	Dialer func(context.Context, string) (net.Conn, error) +	// FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors. +	FailOnNonTempDialError bool +	// PerRPCCredentials stores the PerRPCCredentials required to issue RPCs. +	PerRPCCredentials []credentials.PerRPCCredentials +	// TransportCredentials stores the Authenticator required to setup a client +	// connection. Only one of TransportCredentials and CredsBundle is non-nil. +	TransportCredentials credentials.TransportCredentials +	// CredsBundle is the credentials bundle to be used. Only one of +	// TransportCredentials and CredsBundle is non-nil. +	CredsBundle credentials.Bundle +	// KeepaliveParams stores the keepalive parameters. +	KeepaliveParams keepalive.ClientParameters +	// StatsHandlers stores the handler for stats. +	StatsHandlers []stats.Handler +	// InitialWindowSize sets the initial window size for a stream. +	InitialWindowSize int32 +	// InitialConnWindowSize sets the initial window size for a connection. +	InitialConnWindowSize int32 +	// WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire. +	WriteBufferSize int +	// ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. +	ReadBufferSize int +	// ChannelzParentID sets the addrConn id which initiate the creation of this client transport. +	ChannelzParentID *channelz.Identifier +	// MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. +	MaxHeaderListSize *uint32 +	// UseProxy specifies if a proxy should be used. +	UseProxy bool +} + +// NewClientTransport establishes the transport with the required ConnectOptions +// and returns it to the caller. +func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) { +	return newHTTP2Client(connectCtx, ctx, addr, opts, onClose) +} + +// Options provides additional hints and information for message +// transmission. +type Options struct { +	// Last indicates whether this write is the last piece for +	// this stream. +	Last bool +} + +// CallHdr carries the information of a particular RPC. +type CallHdr struct { +	// Host specifies the peer's host. +	Host string + +	// Method specifies the operation to perform. +	Method string + +	// SendCompress specifies the compression algorithm applied on +	// outbound message. +	SendCompress string + +	// Creds specifies credentials.PerRPCCredentials for a call. +	Creds credentials.PerRPCCredentials + +	// ContentSubtype specifies the content-subtype for a request. For example, a +	// content-subtype of "proto" will result in a content-type of +	// "application/grpc+proto". The value of ContentSubtype must be all +	// lowercase, otherwise the behavior is undefined. See +	// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests +	// for more details. +	ContentSubtype string + +	PreviousAttempts int // value of grpc-previous-rpc-attempts header to set + +	DoneFunc func() // called when the stream is finished +} + +// ClientTransport is the common interface for all gRPC client-side transport +// implementations. +type ClientTransport interface { +	// Close tears down this transport. Once it returns, the transport +	// should not be accessed any more. The caller must make sure this +	// is called only once. +	Close(err error) + +	// GracefulClose starts to tear down the transport: the transport will stop +	// accepting new RPCs and NewStream will return error. Once all streams are +	// finished, the transport will close. +	// +	// It does not block. +	GracefulClose() + +	// Write sends the data for the given stream. A nil stream indicates +	// the write is to be performed on the transport as a whole. +	Write(s *Stream, hdr []byte, data []byte, opts *Options) error + +	// NewStream creates a Stream for an RPC. +	NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) + +	// CloseStream clears the footprint of a stream when the stream is +	// not needed any more. The err indicates the error incurred when +	// CloseStream is called. Must be called when a stream is finished +	// unless the associated transport is closing. +	CloseStream(stream *Stream, err error) + +	// Error returns a channel that is closed when some I/O error +	// happens. Typically the caller should have a goroutine to monitor +	// this in order to take action (e.g., close the current transport +	// and create a new one) in error case. It should not return nil +	// once the transport is initiated. +	Error() <-chan struct{} + +	// GoAway returns a channel that is closed when ClientTransport +	// receives the draining signal from the server (e.g., GOAWAY frame in +	// HTTP/2). +	GoAway() <-chan struct{} + +	// GetGoAwayReason returns the reason why GoAway frame was received, along +	// with a human readable string with debug info. +	GetGoAwayReason() (GoAwayReason, string) + +	// RemoteAddr returns the remote network address. +	RemoteAddr() net.Addr + +	// IncrMsgSent increments the number of message sent through this transport. +	IncrMsgSent() + +	// IncrMsgRecv increments the number of message received through this transport. +	IncrMsgRecv() +} + +// ServerTransport is the common interface for all gRPC server-side transport +// implementations. +// +// Methods may be called concurrently from multiple goroutines, but +// Write methods for a given Stream will be called serially. +type ServerTransport interface { +	// HandleStreams receives incoming streams using the given handler. +	HandleStreams(func(*Stream), func(context.Context, string) context.Context) + +	// WriteHeader sends the header metadata for the given stream. +	// WriteHeader may not be called on all streams. +	WriteHeader(s *Stream, md metadata.MD) error + +	// Write sends the data for the given stream. +	// Write may not be called on all streams. +	Write(s *Stream, hdr []byte, data []byte, opts *Options) error + +	// WriteStatus sends the status of a stream to the client.  WriteStatus is +	// the final call made on a stream and always occurs. +	WriteStatus(s *Stream, st *status.Status) error + +	// Close tears down the transport. Once it is called, the transport +	// should not be accessed any more. All the pending streams and their +	// handlers will be terminated asynchronously. +	Close(err error) + +	// RemoteAddr returns the remote network address. +	RemoteAddr() net.Addr + +	// Drain notifies the client this ServerTransport stops accepting new RPCs. +	Drain() + +	// IncrMsgSent increments the number of message sent through this transport. +	IncrMsgSent() + +	// IncrMsgRecv increments the number of message received through this transport. +	IncrMsgRecv() +} + +// connectionErrorf creates an ConnectionError with the specified error description. +func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { +	return ConnectionError{ +		Desc: fmt.Sprintf(format, a...), +		temp: temp, +		err:  e, +	} +} + +// ConnectionError is an error that results in the termination of the +// entire connection and the retry of all the active streams. +type ConnectionError struct { +	Desc string +	temp bool +	err  error +} + +func (e ConnectionError) Error() string { +	return fmt.Sprintf("connection error: desc = %q", e.Desc) +} + +// Temporary indicates if this connection error is temporary or fatal. +func (e ConnectionError) Temporary() bool { +	return e.temp +} + +// Origin returns the original error of this connection error. +func (e ConnectionError) Origin() error { +	// Never return nil error here. +	// If the original error is nil, return itself. +	if e.err == nil { +		return e +	} +	return e.err +} + +// Unwrap returns the original error of this connection error or nil when the +// origin is nil. +func (e ConnectionError) Unwrap() error { +	return e.err +} + +var ( +	// ErrConnClosing indicates that the transport is closing. +	ErrConnClosing = connectionErrorf(true, nil, "transport is closing") +	// errStreamDrain indicates that the stream is rejected because the +	// connection is draining. This could be caused by goaway or balancer +	// removing the address. +	errStreamDrain = status.Error(codes.Unavailable, "the connection is draining") +	// errStreamDone is returned from write at the client side to indiacte application +	// layer of an error. +	errStreamDone = errors.New("the stream is done") +	// StatusGoAway indicates that the server sent a GOAWAY that included this +	// stream's ID in unprocessed RPCs. +	statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection") +) + +// GoAwayReason contains the reason for the GoAway frame received. +type GoAwayReason uint8 + +const ( +	// GoAwayInvalid indicates that no GoAway frame is received. +	GoAwayInvalid GoAwayReason = 0 +	// GoAwayNoReason is the default value when GoAway frame is received. +	GoAwayNoReason GoAwayReason = 1 +	// GoAwayTooManyPings indicates that a GoAway frame with +	// ErrCodeEnhanceYourCalm was received and that the debug data said +	// "too_many_pings". +	GoAwayTooManyPings GoAwayReason = 2 +) + +// channelzData is used to store channelz related data for http2Client and http2Server. +// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic +// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. +// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. +type channelzData struct { +	kpCount int64 +	// The number of streams that have started, including already finished ones. +	streamsStarted int64 +	// Client side: The number of streams that have ended successfully by receiving +	// EoS bit set frame from server. +	// Server side: The number of streams that have ended successfully by sending +	// frame with EoS bit set. +	streamsSucceeded int64 +	streamsFailed    int64 +	// lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type +	// instead of time.Time since it's more costly to atomically update time.Time variable than int64 +	// variable. The same goes for lastMsgSentTime and lastMsgRecvTime. +	lastStreamCreatedTime int64 +	msgSent               int64 +	msgRecv               int64 +	lastMsgSentTime       int64 +	lastMsgRecvTime       int64 +} + +// ContextErr converts the error from context package into a status error. +func ContextErr(err error) error { +	switch err { +	case context.DeadlineExceeded: +		return status.Error(codes.DeadlineExceeded, err.Error()) +	case context.Canceled: +		return status.Error(codes.Canceled, err.Error()) +	} +	return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err) +} diff --git a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go new file mode 100644 index 000000000..e8b492774 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go @@ -0,0 +1,40 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package internal + +import ( +	"google.golang.org/grpc/attributes" +	"google.golang.org/grpc/resolver" +) + +// handshakeClusterNameKey is the type used as the key to store cluster name in +// the Attributes field of resolver.Address. +type handshakeClusterNameKey struct{} + +// SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field +// is updated with the cluster name. +func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address { +	addr.Attributes = addr.Attributes.WithValue(handshakeClusterNameKey{}, clusterName) +	return addr +} + +// GetXDSHandshakeClusterName returns cluster name stored in attr. +func GetXDSHandshakeClusterName(attr *attributes.Attributes) (string, bool) { +	v := attr.Value(handshakeClusterNameKey{}) +	name, ok := v.(string) +	return name, ok +} diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go new file mode 100644 index 000000000..34d31b5e7 --- /dev/null +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -0,0 +1,85 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package keepalive defines configurable parameters for point-to-point +// healthcheck. +package keepalive + +import ( +	"time" +) + +// ClientParameters is used to set keepalive parameters on the client-side. +// These configure how the client will actively probe to notice when a +// connection is broken and send pings so intermediaries will be aware of the +// liveness of the connection. Make sure these parameters are set in +// coordination with the keepalive policy on the server, as incompatible +// settings can result in closing of connection. +type ClientParameters struct { +	// After a duration of this time if the client doesn't see any activity it +	// pings the server to see if the transport is still alive. +	// If set below 10s, a minimum value of 10s will be used instead. +	Time time.Duration // The current default value is infinity. +	// After having pinged for keepalive check, the client waits for a duration +	// of Timeout and if no activity is seen even after that the connection is +	// closed. +	Timeout time.Duration // The current default value is 20 seconds. +	// If true, client sends keepalive pings even with no active RPCs. If false, +	// when there are no active RPCs, Time and Timeout will be ignored and no +	// keepalive pings will be sent. +	PermitWithoutStream bool // false by default. +} + +// ServerParameters is used to set keepalive and max-age parameters on the +// server-side. +type ServerParameters struct { +	// MaxConnectionIdle is a duration for the amount of time after which an +	// idle connection would be closed by sending a GoAway. Idleness duration is +	// defined since the most recent time the number of outstanding RPCs became +	// zero or the connection establishment. +	MaxConnectionIdle time.Duration // The current default value is infinity. +	// MaxConnectionAge is a duration for the maximum amount of time a +	// connection may exist before it will be closed by sending a GoAway. A +	// random jitter of +/-10% will be added to MaxConnectionAge to spread out +	// connection storms. +	MaxConnectionAge time.Duration // The current default value is infinity. +	// MaxConnectionAgeGrace is an additive period after MaxConnectionAge after +	// which the connection will be forcibly closed. +	MaxConnectionAgeGrace time.Duration // The current default value is infinity. +	// After a duration of this time if the server doesn't see any activity it +	// pings the client to see if the transport is still alive. +	// If set below 1s, a minimum value of 1s will be used instead. +	Time time.Duration // The current default value is 2 hours. +	// After having pinged for keepalive check, the server waits for a duration +	// of Timeout and if no activity is seen even after that the connection is +	// closed. +	Timeout time.Duration // The current default value is 20 seconds. +} + +// EnforcementPolicy is used to set keepalive enforcement policy on the +// server-side. Server will close connection with a client that violates this +// policy. +type EnforcementPolicy struct { +	// MinTime is the minimum amount of time a client should wait before sending +	// a keepalive ping. +	MinTime time.Duration // The current default value is 5 minutes. +	// If true, server allows keepalive pings even when there are no active +	// streams(RPCs). If false, and client sends ping when there are no active +	// streams, server will send GOAWAY and close the connection. +	PermitWithoutStream bool // false by default. +} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go new file mode 100644 index 000000000..fb4a88f59 --- /dev/null +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -0,0 +1,288 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package metadata define the structure of the metadata supported by gRPC library. +// Please refer to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md +// for more information about custom-metadata. +package metadata // import "google.golang.org/grpc/metadata" + +import ( +	"context" +	"fmt" +	"strings" +) + +// DecodeKeyValue returns k, v, nil. +// +// Deprecated: use k and v directly instead. +func DecodeKeyValue(k, v string) (string, string, error) { +	return k, v, nil +} + +// MD is a mapping from metadata keys to values. Users should use the following +// two convenience functions New and Pairs to generate MD. +type MD map[string][]string + +// New creates an MD from a given key-value map. +// +// Only the following ASCII characters are allowed in keys: +//   - digits: 0-9 +//   - uppercase letters: A-Z (normalized to lower) +//   - lowercase letters: a-z +//   - special characters: -_. +// +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func New(m map[string]string) MD { +	md := make(MD, len(m)) +	for k, val := range m { +		key := strings.ToLower(k) +		md[key] = append(md[key], val) +	} +	return md +} + +// Pairs returns an MD formed by the mapping of key, value ... +// Pairs panics if len(kv) is odd. +// +// Only the following ASCII characters are allowed in keys: +//   - digits: 0-9 +//   - uppercase letters: A-Z (normalized to lower) +//   - lowercase letters: a-z +//   - special characters: -_. +// +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func Pairs(kv ...string) MD { +	if len(kv)%2 == 1 { +		panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) +	} +	md := make(MD, len(kv)/2) +	for i := 0; i < len(kv); i += 2 { +		key := strings.ToLower(kv[i]) +		md[key] = append(md[key], kv[i+1]) +	} +	return md +} + +// Len returns the number of items in md. +func (md MD) Len() int { +	return len(md) +} + +// Copy returns a copy of md. +func (md MD) Copy() MD { +	return Join(md) +} + +// Get obtains the values for a given key. +// +// k is converted to lowercase before searching in md. +func (md MD) Get(k string) []string { +	k = strings.ToLower(k) +	return md[k] +} + +// Set sets the value of a given key with a slice of values. +// +// k is converted to lowercase before storing in md. +func (md MD) Set(k string, vals ...string) { +	if len(vals) == 0 { +		return +	} +	k = strings.ToLower(k) +	md[k] = vals +} + +// Append adds the values to key k, not overwriting what was already stored at +// that key. +// +// k is converted to lowercase before storing in md. +func (md MD) Append(k string, vals ...string) { +	if len(vals) == 0 { +		return +	} +	k = strings.ToLower(k) +	md[k] = append(md[k], vals...) +} + +// Delete removes the values for a given key k which is converted to lowercase +// before removing it from md. +func (md MD) Delete(k string) { +	k = strings.ToLower(k) +	delete(md, k) +} + +// Join joins any number of mds into a single MD. +// +// The order of values for each key is determined by the order in which the mds +// containing those values are presented to Join. +func Join(mds ...MD) MD { +	out := MD{} +	for _, md := range mds { +		for k, v := range md { +			out[k] = append(out[k], v...) +		} +	} +	return out +} + +type mdIncomingKey struct{} +type mdOutgoingKey struct{} + +// NewIncomingContext creates a new context with incoming md attached. +func NewIncomingContext(ctx context.Context, md MD) context.Context { +	return context.WithValue(ctx, mdIncomingKey{}, md) +} + +// NewOutgoingContext creates a new context with outgoing md attached. If used +// in conjunction with AppendToOutgoingContext, NewOutgoingContext will +// overwrite any previously-appended metadata. +func NewOutgoingContext(ctx context.Context, md MD) context.Context { +	return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md}) +} + +// AppendToOutgoingContext returns a new context with the provided kv merged +// with any existing metadata in the context. Please refer to the documentation +// of Pairs for a description of kv. +func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context { +	if len(kv)%2 == 1 { +		panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv))) +	} +	md, _ := ctx.Value(mdOutgoingKey{}).(rawMD) +	added := make([][]string, len(md.added)+1) +	copy(added, md.added) +	added[len(added)-1] = make([]string, len(kv)) +	copy(added[len(added)-1], kv) +	return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) +} + +// FromIncomingContext returns the incoming metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. +func FromIncomingContext(ctx context.Context) (MD, bool) { +	md, ok := ctx.Value(mdIncomingKey{}).(MD) +	if !ok { +		return nil, false +	} +	out := make(MD, len(md)) +	for k, v := range md { +		// We need to manually convert all keys to lower case, because MD is a +		// map, and there's no guarantee that the MD attached to the context is +		// created using our helper functions. +		key := strings.ToLower(k) +		out[key] = copyOf(v) +	} +	return out, true +} + +// ValueFromIncomingContext returns the metadata value corresponding to the metadata +// key from the incoming metadata if it exists. Key must be lower-case. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ValueFromIncomingContext(ctx context.Context, key string) []string { +	md, ok := ctx.Value(mdIncomingKey{}).(MD) +	if !ok { +		return nil +	} + +	if v, ok := md[key]; ok { +		return copyOf(v) +	} +	for k, v := range md { +		// We need to manually convert all keys to lower case, because MD is a +		// map, and there's no guarantee that the MD attached to the context is +		// created using our helper functions. +		if strings.ToLower(k) == key { +			return copyOf(v) +		} +	} +	return nil +} + +// the returned slice must not be modified in place +func copyOf(v []string) []string { +	vals := make([]string, len(v)) +	copy(vals, v) +	return vals +} + +// FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. +// +// Remember to perform strings.ToLower on the keys, for both the returned MD (MD +// is a map, there's no guarantee it's created using our helper functions) and +// the extra kv pairs (AppendToOutgoingContext doesn't turn them into +// lowercase). +// +// This is intended for gRPC-internal use ONLY. Users should use +// FromOutgoingContext instead. +func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { +	raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) +	if !ok { +		return nil, nil, false +	} + +	return raw.md, raw.added, true +} + +// FromOutgoingContext returns the outgoing metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. +func FromOutgoingContext(ctx context.Context) (MD, bool) { +	raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) +	if !ok { +		return nil, false +	} + +	mdSize := len(raw.md) +	for i := range raw.added { +		mdSize += len(raw.added[i]) / 2 +	} + +	out := make(MD, mdSize) +	for k, v := range raw.md { +		// We need to manually convert all keys to lower case, because MD is a +		// map, and there's no guarantee that the MD attached to the context is +		// created using our helper functions. +		key := strings.ToLower(k) +		out[key] = copyOf(v) +	} +	for _, added := range raw.added { +		if len(added)%2 == 1 { +			panic(fmt.Sprintf("metadata: FromOutgoingContext got an odd number of input pairs for metadata: %d", len(added))) +		} + +		for i := 0; i < len(added); i += 2 { +			key := strings.ToLower(added[i]) +			out[key] = append(out[key], added[i+1]) +		} +	} +	return out, ok +} + +type rawMD struct { +	md    MD +	added [][]string +} diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go new file mode 100644 index 000000000..e01d219ff --- /dev/null +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package peer defines various peer information associated with RPCs and +// corresponding utils. +package peer + +import ( +	"context" +	"net" + +	"google.golang.org/grpc/credentials" +) + +// Peer contains the information of the peer for an RPC, such as the address +// and authentication information. +type Peer struct { +	// Addr is the peer address. +	Addr net.Addr +	// AuthInfo is the authentication information of the transport. +	// It is nil if there is no transport security being used. +	AuthInfo credentials.AuthInfo +} + +type peerKey struct{} + +// NewContext creates a new context with peer information attached. +func NewContext(ctx context.Context, p *Peer) context.Context { +	return context.WithValue(ctx, peerKey{}, p) +} + +// FromContext returns the peer information in ctx if it exists. +func FromContext(ctx context.Context) (p *Peer, ok bool) { +	p, ok = ctx.Value(peerKey{}).(*Peer) +	return +} diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go new file mode 100644 index 000000000..c525dc070 --- /dev/null +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -0,0 +1,194 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( +	"context" +	"io" +	"sync" + +	"google.golang.org/grpc/balancer" +	"google.golang.org/grpc/codes" +	"google.golang.org/grpc/internal/channelz" +	istatus "google.golang.org/grpc/internal/status" +	"google.golang.org/grpc/internal/transport" +	"google.golang.org/grpc/status" +) + +// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick +// actions and unblock when there's a picker update. +type pickerWrapper struct { +	mu         sync.Mutex +	done       bool +	blockingCh chan struct{} +	picker     balancer.Picker +} + +func newPickerWrapper() *pickerWrapper { +	return &pickerWrapper{blockingCh: make(chan struct{})} +} + +// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. +func (pw *pickerWrapper) updatePicker(p balancer.Picker) { +	pw.mu.Lock() +	if pw.done { +		pw.mu.Unlock() +		return +	} +	pw.picker = p +	// pw.blockingCh should never be nil. +	close(pw.blockingCh) +	pw.blockingCh = make(chan struct{}) +	pw.mu.Unlock() +} + +// doneChannelzWrapper performs the following: +//   - increments the calls started channelz counter +//   - wraps the done function in the passed in result to increment the calls +//     failed or calls succeeded channelz counter before invoking the actual +//     done function. +func doneChannelzWrapper(acw *acBalancerWrapper, result *balancer.PickResult) { +	acw.mu.Lock() +	ac := acw.ac +	acw.mu.Unlock() +	ac.incrCallsStarted() +	done := result.Done +	result.Done = func(b balancer.DoneInfo) { +		if b.Err != nil && b.Err != io.EOF { +			ac.incrCallsFailed() +		} else { +			ac.incrCallsSucceeded() +		} +		if done != nil { +			done(b) +		} +	} +} + +// pick returns the transport that will be used for the RPC. +// It may block in the following cases: +// - there's no picker +// - the current picker returns ErrNoSubConnAvailable +// - the current picker returns other errors and failfast is false. +// - the subConn returned by the current picker is not READY +// When one of these situations happens, pick blocks until the picker gets updated. +func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) { +	var ch chan struct{} + +	var lastPickErr error +	for { +		pw.mu.Lock() +		if pw.done { +			pw.mu.Unlock() +			return nil, balancer.PickResult{}, ErrClientConnClosing +		} + +		if pw.picker == nil { +			ch = pw.blockingCh +		} +		if ch == pw.blockingCh { +			// This could happen when either: +			// - pw.picker is nil (the previous if condition), or +			// - has called pick on the current picker. +			pw.mu.Unlock() +			select { +			case <-ctx.Done(): +				var errStr string +				if lastPickErr != nil { +					errStr = "latest balancer error: " + lastPickErr.Error() +				} else { +					errStr = ctx.Err().Error() +				} +				switch ctx.Err() { +				case context.DeadlineExceeded: +					return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr) +				case context.Canceled: +					return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr) +				} +			case <-ch: +			} +			continue +		} + +		ch = pw.blockingCh +		p := pw.picker +		pw.mu.Unlock() + +		pickResult, err := p.Pick(info) +		if err != nil { +			if err == balancer.ErrNoSubConnAvailable { +				continue +			} +			if st, ok := status.FromError(err); ok { +				// Status error: end the RPC unconditionally with this status. +				// First restrict the code to the list allowed by gRFC A54. +				if istatus.IsRestrictedControlPlaneCode(st) { +					err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err) +				} +				return nil, balancer.PickResult{}, dropError{error: err} +			} +			// For all other errors, wait for ready RPCs should block and other +			// RPCs should fail with unavailable. +			if !failfast { +				lastPickErr = err +				continue +			} +			return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) +		} + +		acw, ok := pickResult.SubConn.(*acBalancerWrapper) +		if !ok { +			logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn) +			continue +		} +		if t := acw.getAddrConn().getReadyTransport(); t != nil { +			if channelz.IsOn() { +				doneChannelzWrapper(acw, &pickResult) +				return t, pickResult, nil +			} +			return t, pickResult, nil +		} +		if pickResult.Done != nil { +			// Calling done with nil error, no bytes sent and no bytes received. +			// DoneInfo with default value works. +			pickResult.Done(balancer.DoneInfo{}) +		} +		logger.Infof("blockingPicker: the picked transport is not ready, loop back to repick") +		// If ok == false, ac.state is not READY. +		// A valid picker always returns READY subConn. This means the state of ac +		// just changed, and picker will be updated shortly. +		// continue back to the beginning of the for loop to repick. +	} +} + +func (pw *pickerWrapper) close() { +	pw.mu.Lock() +	defer pw.mu.Unlock() +	if pw.done { +		return +	} +	pw.done = true +	close(pw.blockingCh) +} + +// dropError is a wrapper error that indicates the LB policy wishes to drop the +// RPC and not retry it. +type dropError struct { +	error +} diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go new file mode 100644 index 000000000..fc91b4d26 --- /dev/null +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -0,0 +1,183 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( +	"errors" +	"fmt" + +	"google.golang.org/grpc/balancer" +	"google.golang.org/grpc/connectivity" +) + +// PickFirstBalancerName is the name of the pick_first balancer. +const PickFirstBalancerName = "pick_first" + +func newPickfirstBuilder() balancer.Builder { +	return &pickfirstBuilder{} +} + +type pickfirstBuilder struct{} + +func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { +	return &pickfirstBalancer{cc: cc} +} + +func (*pickfirstBuilder) Name() string { +	return PickFirstBalancerName +} + +type pickfirstBalancer struct { +	state   connectivity.State +	cc      balancer.ClientConn +	subConn balancer.SubConn +} + +func (b *pickfirstBalancer) ResolverError(err error) { +	if logger.V(2) { +		logger.Infof("pickfirstBalancer: ResolverError called with error: %v", err) +	} +	if b.subConn == nil { +		b.state = connectivity.TransientFailure +	} + +	if b.state != connectivity.TransientFailure { +		// The picker will not change since the balancer does not currently +		// report an error. +		return +	} +	b.cc.UpdateState(balancer.State{ +		ConnectivityState: connectivity.TransientFailure, +		Picker:            &picker{err: fmt.Errorf("name resolver error: %v", err)}, +	}) +} + +func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { +	if len(state.ResolverState.Addresses) == 0 { +		// The resolver reported an empty address list. Treat it like an error by +		// calling b.ResolverError. +		if b.subConn != nil { +			// Remove the old subConn. All addresses were removed, so it is no longer +			// valid. +			b.cc.RemoveSubConn(b.subConn) +			b.subConn = nil +		} +		b.ResolverError(errors.New("produced zero addresses")) +		return balancer.ErrBadResolverState +	} + +	if b.subConn != nil { +		b.cc.UpdateAddresses(b.subConn, state.ResolverState.Addresses) +		return nil +	} + +	subConn, err := b.cc.NewSubConn(state.ResolverState.Addresses, balancer.NewSubConnOptions{}) +	if err != nil { +		if logger.V(2) { +			logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) +		} +		b.state = connectivity.TransientFailure +		b.cc.UpdateState(balancer.State{ +			ConnectivityState: connectivity.TransientFailure, +			Picker:            &picker{err: fmt.Errorf("error creating connection: %v", err)}, +		}) +		return balancer.ErrBadResolverState +	} +	b.subConn = subConn +	b.state = connectivity.Idle +	b.cc.UpdateState(balancer.State{ +		ConnectivityState: connectivity.Connecting, +		Picker:            &picker{err: balancer.ErrNoSubConnAvailable}, +	}) +	b.subConn.Connect() +	return nil +} + +func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { +	if logger.V(2) { +		logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state) +	} +	if b.subConn != subConn { +		if logger.V(2) { +			logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized") +		} +		return +	} +	b.state = state.ConnectivityState +	if state.ConnectivityState == connectivity.Shutdown { +		b.subConn = nil +		return +	} + +	switch state.ConnectivityState { +	case connectivity.Ready: +		b.cc.UpdateState(balancer.State{ +			ConnectivityState: state.ConnectivityState, +			Picker:            &picker{result: balancer.PickResult{SubConn: subConn}}, +		}) +	case connectivity.Connecting: +		b.cc.UpdateState(balancer.State{ +			ConnectivityState: state.ConnectivityState, +			Picker:            &picker{err: balancer.ErrNoSubConnAvailable}, +		}) +	case connectivity.Idle: +		b.cc.UpdateState(balancer.State{ +			ConnectivityState: state.ConnectivityState, +			Picker:            &idlePicker{subConn: subConn}, +		}) +	case connectivity.TransientFailure: +		b.cc.UpdateState(balancer.State{ +			ConnectivityState: state.ConnectivityState, +			Picker:            &picker{err: state.ConnectionError}, +		}) +	} +} + +func (b *pickfirstBalancer) Close() { +} + +func (b *pickfirstBalancer) ExitIdle() { +	if b.subConn != nil && b.state == connectivity.Idle { +		b.subConn.Connect() +	} +} + +type picker struct { +	result balancer.PickResult +	err    error +} + +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { +	return p.result, p.err +} + +// idlePicker is used when the SubConn is IDLE and kicks the SubConn into +// CONNECTING when Pick is called. +type idlePicker struct { +	subConn balancer.SubConn +} + +func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { +	i.subConn.Connect() +	return balancer.PickResult{}, balancer.ErrNoSubConnAvailable +} + +func init() { +	balancer.Register(newPickfirstBuilder()) +} diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go new file mode 100644 index 000000000..cd4554785 --- /dev/null +++ b/vendor/google.golang.org/grpc/preloader.go @@ -0,0 +1,67 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( +	"google.golang.org/grpc/codes" +	"google.golang.org/grpc/status" +) + +// PreparedMsg is responsible for creating a Marshalled and Compressed object. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type PreparedMsg struct { +	// Struct for preparing msg before sending them +	encodedData []byte +	hdr         []byte +	payload     []byte +} + +// Encode marshalls and compresses the message using the codec and compressor for the stream. +func (p *PreparedMsg) Encode(s Stream, msg interface{}) error { +	ctx := s.Context() +	rpcInfo, ok := rpcInfoFromContext(ctx) +	if !ok { +		return status.Errorf(codes.Internal, "grpc: unable to get rpcInfo") +	} + +	// check if the context has the relevant information to prepareMsg +	if rpcInfo.preloaderInfo == nil { +		return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil") +	} +	if rpcInfo.preloaderInfo.codec == nil { +		return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil") +	} + +	// prepare the msg +	data, err := encode(rpcInfo.preloaderInfo.codec, msg) +	if err != nil { +		return err +	} +	p.encodedData = data +	compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp) +	if err != nil { +		return err +	} +	p.hdr, p.payload = msgHeader(data, compData) +	return nil +} diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh new file mode 100644 index 000000000..a6f26c8ab --- /dev/null +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -0,0 +1,123 @@ +#!/bin/bash +# Copyright 2020 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +#      http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu -o pipefail + +WORKDIR=$(mktemp -d) + +function finish { +  rm -rf "$WORKDIR" +} +trap finish EXIT + +export GOBIN=${WORKDIR}/bin +export PATH=${GOBIN}:${PATH} +mkdir -p ${GOBIN} + +echo "remove existing generated files" +# grpc_testing_not_regenerate/*.pb.go is not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate') + +echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" +(cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) + +echo "go install cmd/protoc-gen-go-grpc" +(cd cmd/protoc-gen-go-grpc && go install .) + +echo "git clone https://github.com/grpc/grpc-proto" +git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto + +echo "git clone https://github.com/protocolbuffers/protobuf" +git clone --quiet https://github.com/protocolbuffers/protobuf ${WORKDIR}/protobuf + +# Pull in code.proto as a proto dependency +mkdir -p ${WORKDIR}/googleapis/google/rpc +echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto" +curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto + +mkdir -p ${WORKDIR}/out + +# Generates sources without the embed requirement +LEGACY_SOURCES=( +  ${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto +  ${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto +  ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto +  ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto +  profiling/proto/service.proto +  ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto +  ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto +) + +# Generates only the new gRPC Service symbols +SOURCES=( +  $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^\(profiling/proto/service.proto\|reflection/grpc_reflection_v1alpha/reflection.proto\)$') +  ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto +  ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto +  ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto +  ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto +  ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto +  ${WORKDIR}/grpc-proto/grpc/testing/*.proto +  ${WORKDIR}/grpc-proto/grpc/core/*.proto +) + +# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an +# import path of 'bar' in the generated code when 'foo.proto' is imported in +# one of the sources. +# +# Note that the protos listed here are all for testing purposes. All protos to +# be used externally should have a go_package option (and they don't need to be +# listed here). +OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ +Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing + +for src in ${SOURCES[@]}; do +  echo "protoc ${src}" +  protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS}:${WORKDIR}/out \ +    -I"." \ +    -I${WORKDIR}/grpc-proto \ +    -I${WORKDIR}/googleapis \ +    -I${WORKDIR}/protobuf/src \ +    ${src} +done + +for src in ${LEGACY_SOURCES[@]}; do +  echo "protoc ${src}" +  protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},require_unimplemented_servers=false:${WORKDIR}/out \ +    -I"." \ +    -I${WORKDIR}/grpc-proto \ +    -I${WORKDIR}/googleapis \ +    -I${WORKDIR}/protobuf/src \ +    ${src} +done + +# The go_package option in grpc/lookup/v1/rls.proto doesn't match the +# current location. Move it into the right place. +mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 +mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 + +# grpc_testing_not_regenerate/*.pb.go are not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go + +cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go new file mode 100644 index 000000000..efcb7f3ef --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -0,0 +1,138 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package resolver + +type addressMapEntry struct { +	addr  Address +	value interface{} +} + +// AddressMap is a map of addresses to arbitrary values taking into account +// Attributes.  BalancerAttributes are ignored, as are Metadata and Type. +// Multiple accesses may not be performed concurrently.  Must be created via +// NewAddressMap; do not construct directly. +type AddressMap struct { +	// The underlying map is keyed by an Address with fields that we don't care +	// about being set to their zero values. The only fields that we care about +	// are `Addr`, `ServerName` and `Attributes`. Since we need to be able to +	// distinguish between addresses with same `Addr` and `ServerName`, but +	// different `Attributes`, we cannot store the `Attributes` in the map key. +	// +	// The comparison operation for structs work as follows: +	//  Struct values are comparable if all their fields are comparable. Two +	//  struct values are equal if their corresponding non-blank fields are equal. +	// +	// The value type of the map contains a slice of addresses which match the key +	// in their `Addr` and `ServerName` fields and contain the corresponding value +	// associated with them. +	m map[Address]addressMapEntryList +} + +func toMapKey(addr *Address) Address { +	return Address{Addr: addr.Addr, ServerName: addr.ServerName} +} + +type addressMapEntryList []*addressMapEntry + +// NewAddressMap creates a new AddressMap. +func NewAddressMap() *AddressMap { +	return &AddressMap{m: make(map[Address]addressMapEntryList)} +} + +// find returns the index of addr in the addressMapEntry slice, or -1 if not +// present. +func (l addressMapEntryList) find(addr Address) int { +	for i, entry := range l { +		// Attributes are the only thing to match on here, since `Addr` and +		// `ServerName` are already equal. +		if entry.addr.Attributes.Equal(addr.Attributes) { +			return i +		} +	} +	return -1 +} + +// Get returns the value for the address in the map, if present. +func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { +	addrKey := toMapKey(&addr) +	entryList := a.m[addrKey] +	if entry := entryList.find(addr); entry != -1 { +		return entryList[entry].value, true +	} +	return nil, false +} + +// Set updates or adds the value to the address in the map. +func (a *AddressMap) Set(addr Address, value interface{}) { +	addrKey := toMapKey(&addr) +	entryList := a.m[addrKey] +	if entry := entryList.find(addr); entry != -1 { +		entryList[entry].value = value +		return +	} +	a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value}) +} + +// Delete removes addr from the map. +func (a *AddressMap) Delete(addr Address) { +	addrKey := toMapKey(&addr) +	entryList := a.m[addrKey] +	entry := entryList.find(addr) +	if entry == -1 { +		return +	} +	if len(entryList) == 1 { +		entryList = nil +	} else { +		copy(entryList[entry:], entryList[entry+1:]) +		entryList = entryList[:len(entryList)-1] +	} +	a.m[addrKey] = entryList +} + +// Len returns the number of entries in the map. +func (a *AddressMap) Len() int { +	ret := 0 +	for _, entryList := range a.m { +		ret += len(entryList) +	} +	return ret +} + +// Keys returns a slice of all current map keys. +func (a *AddressMap) Keys() []Address { +	ret := make([]Address, 0, a.Len()) +	for _, entryList := range a.m { +		for _, entry := range entryList { +			ret = append(ret, entry.addr) +		} +	} +	return ret +} + +// Values returns a slice of all current map values. +func (a *AddressMap) Values() []interface{} { +	ret := make([]interface{}, 0, a.Len()) +	for _, entryList := range a.m { +		for _, entry := range entryList { +			ret = append(ret, entry.value) +		} +	} +	return ret +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go new file mode 100644 index 000000000..654e9ce69 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -0,0 +1,308 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver defines APIs for name resolution in gRPC. +// All APIs in this package are experimental. +package resolver + +import ( +	"context" +	"net" +	"net/url" +	"strings" + +	"google.golang.org/grpc/attributes" +	"google.golang.org/grpc/credentials" +	"google.golang.org/grpc/internal/pretty" +	"google.golang.org/grpc/serviceconfig" +) + +var ( +	// m is a map from scheme to resolver builder. +	m = make(map[string]Builder) +	// defaultScheme is the default scheme to use. +	defaultScheme = "passthrough" +) + +// TODO(bar) install dns resolver in init(){}. + +// Register registers the resolver builder to the resolver map. b.Scheme will be +// used as the scheme registered with this builder. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Resolvers are +// registered with the same name, the one registered last will take effect. +func Register(b Builder) { +	m[b.Scheme()] = b +} + +// Get returns the resolver builder registered with the given scheme. +// +// If no builder is register with the scheme, nil will be returned. +func Get(scheme string) Builder { +	if b, ok := m[scheme]; ok { +		return b +	} +	return nil +} + +// SetDefaultScheme sets the default scheme that will be used. The default +// default scheme is "passthrough". +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. The scheme set last overrides +// previously set values. +func SetDefaultScheme(scheme string) { +	defaultScheme = scheme +} + +// GetDefaultScheme gets the default scheme that will be used. +func GetDefaultScheme() string { +	return defaultScheme +} + +// AddressType indicates the address type returned by name resolution. +// +// Deprecated: use Attributes in Address instead. +type AddressType uint8 + +const ( +	// Backend indicates the address is for a backend server. +	// +	// Deprecated: use Attributes in Address instead. +	Backend AddressType = iota +	// GRPCLB indicates the address is for a grpclb load balancer. +	// +	// Deprecated: to select the GRPCLB load balancing policy, use a service +	// config with a corresponding loadBalancingConfig.  To supply balancer +	// addresses to the GRPCLB load balancing policy, set State.Attributes +	// using balancer/grpclb/state.Set. +	GRPCLB +) + +// Address represents a server the client connects to. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type Address struct { +	// Addr is the server address on which a connection will be established. +	Addr string + +	// ServerName is the name of this address. +	// If non-empty, the ServerName is used as the transport certification authority for +	// the address, instead of the hostname from the Dial target string. In most cases, +	// this should not be set. +	// +	// If Type is GRPCLB, ServerName should be the name of the remote load +	// balancer, not the name of the backend. +	// +	// WARNING: ServerName must only be populated with trusted values. It +	// is insecure to populate it with data from untrusted inputs since untrusted +	// values could be used to bypass the authority checks performed by TLS. +	ServerName string + +	// Attributes contains arbitrary data about this address intended for +	// consumption by the SubConn. +	Attributes *attributes.Attributes + +	// BalancerAttributes contains arbitrary data about this address intended +	// for consumption by the LB policy.  These attribes do not affect SubConn +	// creation, connection establishment, handshaking, etc. +	BalancerAttributes *attributes.Attributes + +	// Type is the type of this address. +	// +	// Deprecated: use Attributes instead. +	Type AddressType + +	// Metadata is the information associated with Addr, which may be used +	// to make load balancing decision. +	// +	// Deprecated: use Attributes instead. +	Metadata interface{} +} + +// Equal returns whether a and o are identical.  Metadata is compared directly, +// not with any recursive introspection. +func (a Address) Equal(o Address) bool { +	return a.Addr == o.Addr && a.ServerName == o.ServerName && +		a.Attributes.Equal(o.Attributes) && +		a.BalancerAttributes.Equal(o.BalancerAttributes) && +		a.Type == o.Type && a.Metadata == o.Metadata +} + +// String returns JSON formatted string representation of the address. +func (a Address) String() string { +	return pretty.ToJSON(a) +} + +// BuildOptions includes additional information for the builder to create +// the resolver. +type BuildOptions struct { +	// DisableServiceConfig indicates whether a resolver implementation should +	// fetch service config data. +	DisableServiceConfig bool +	// DialCreds is the transport credentials used by the ClientConn for +	// communicating with the target gRPC service (set via +	// WithTransportCredentials). In cases where a name resolution service +	// requires the same credentials, the resolver may use this field. In most +	// cases though, it is not appropriate, and this field may be ignored. +	DialCreds credentials.TransportCredentials +	// CredsBundle is the credentials bundle used by the ClientConn for +	// communicating with the target gRPC service (set via +	// WithCredentialsBundle). In cases where a name resolution service +	// requires the same credentials, the resolver may use this field. In most +	// cases though, it is not appropriate, and this field may be ignored. +	CredsBundle credentials.Bundle +	// Dialer is the custom dialer used by the ClientConn for dialling the +	// target gRPC service (set via WithDialer). In cases where a name +	// resolution service requires the same dialer, the resolver may use this +	// field. In most cases though, it is not appropriate, and this field may +	// be ignored. +	Dialer func(context.Context, string) (net.Conn, error) +} + +// State contains the current Resolver state relevant to the ClientConn. +type State struct { +	// Addresses is the latest set of resolved addresses for the target. +	Addresses []Address + +	// ServiceConfig contains the result from parsing the latest service +	// config.  If it is nil, it indicates no service config is present or the +	// resolver does not provide service configs. +	ServiceConfig *serviceconfig.ParseResult + +	// Attributes contains arbitrary data about the resolver intended for +	// consumption by the load balancing policy. +	Attributes *attributes.Attributes +} + +// ClientConn contains the callbacks for resolver to notify any updates +// to the gRPC ClientConn. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type ClientConn interface { +	// UpdateState updates the state of the ClientConn appropriately. +	UpdateState(State) error +	// ReportError notifies the ClientConn that the Resolver encountered an +	// error.  The ClientConn will notify the load balancer and begin calling +	// ResolveNow on the Resolver with exponential backoff. +	ReportError(error) +	// NewAddress is called by resolver to notify ClientConn a new list +	// of resolved addresses. +	// The address list should be the complete list of resolved addresses. +	// +	// Deprecated: Use UpdateState instead. +	NewAddress(addresses []Address) +	// NewServiceConfig is called by resolver to notify ClientConn a new +	// service config. The service config should be provided as a json string. +	// +	// Deprecated: Use UpdateState instead. +	NewServiceConfig(serviceConfig string) +	// ParseServiceConfig parses the provided service config and returns an +	// object that provides the parsed config. +	ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult +} + +// Target represents a target for gRPC, as specified in: +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// It is parsed from the target string that gets passed into Dial or DialContext +// by the user. And gRPC passes it to the resolver and the balancer. +// +// If the target follows the naming spec, and the parsed scheme is registered +// with gRPC, we will parse the target string according to the spec. If the +// target does not contain a scheme or if the parsed scheme is not registered +// (i.e. no corresponding resolver available to resolve the endpoint), we will +// apply the default scheme, and will attempt to reparse it. +// +// Examples: +// +//   - "dns://some_authority/foo.bar" +//     Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +//   - "foo.bar" +//     Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} +//   - "unknown_scheme://authority/endpoint" +//     Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} +type Target struct { +	// Deprecated: use URL.Scheme instead. +	Scheme string +	// Deprecated: use URL.Host instead. +	Authority string +	// URL contains the parsed dial target with an optional default scheme added +	// to it if the original dial target contained no scheme or contained an +	// unregistered scheme. Any query params specified in the original dial +	// target can be accessed from here. +	URL url.URL +} + +// Endpoint retrieves endpoint without leading "/" from either `URL.Path` +// or `URL.Opaque`. The latter is used when the former is empty. +func (t Target) Endpoint() string { +	endpoint := t.URL.Path +	if endpoint == "" { +		endpoint = t.URL.Opaque +	} +	// For targets of the form "[scheme]://[authority]/endpoint, the endpoint +	// value returned from url.Parse() contains a leading "/". Although this is +	// in accordance with RFC 3986, we do not want to break existing resolver +	// implementations which expect the endpoint without the leading "/". So, we +	// end up stripping the leading "/" here. But this will result in an +	// incorrect parsing for something like "unix:///path/to/socket". Since we +	// own the "unix" resolver, we can workaround in the unix resolver by using +	// the `URL` field. +	return strings.TrimPrefix(endpoint, "/") +} + +// Builder creates a resolver that will be used to watch name resolution updates. +type Builder interface { +	// Build creates a new resolver for the given target. +	// +	// gRPC dial calls Build synchronously, and fails if the returned error is +	// not nil. +	Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error) +	// Scheme returns the scheme supported by this resolver. +	// Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. +	Scheme() string +} + +// ResolveNowOptions includes additional information for ResolveNow. +type ResolveNowOptions struct{} + +// Resolver watches for the updates on the specified target. +// Updates include address updates and service config updates. +type Resolver interface { +	// ResolveNow will be called by gRPC to try to resolve the target name +	// again. It's just a hint, resolver can ignore this if it's not necessary. +	// +	// It could be called multiple times concurrently. +	ResolveNow(ResolveNowOptions) +	// Close closes the resolver. +	Close() +} + +// UnregisterForTesting removes the resolver builder with the given scheme from the +// resolver map. +// This function is for testing only. +func UnregisterForTesting(scheme string) { +	delete(m, scheme) +} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go new file mode 100644 index 000000000..05a9d4e0b --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -0,0 +1,176 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( +	"strings" +	"sync" + +	"google.golang.org/grpc/balancer" +	"google.golang.org/grpc/credentials" +	"google.golang.org/grpc/internal/channelz" +	"google.golang.org/grpc/internal/grpcsync" +	"google.golang.org/grpc/internal/pretty" +	"google.golang.org/grpc/resolver" +	"google.golang.org/grpc/serviceconfig" +) + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConn interface. +type ccResolverWrapper struct { +	cc         *ClientConn +	resolverMu sync.Mutex +	resolver   resolver.Resolver +	done       *grpcsync.Event +	curState   resolver.State + +	incomingMu sync.Mutex // Synchronizes all the incoming calls. +} + +// newCCResolverWrapper uses the resolver.Builder to build a Resolver and +// returns a ccResolverWrapper object which wraps the newly built resolver. +func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) { +	ccr := &ccResolverWrapper{ +		cc:   cc, +		done: grpcsync.NewEvent(), +	} + +	var credsClone credentials.TransportCredentials +	if creds := cc.dopts.copts.TransportCredentials; creds != nil { +		credsClone = creds.Clone() +	} +	rbo := resolver.BuildOptions{ +		DisableServiceConfig: cc.dopts.disableServiceConfig, +		DialCreds:            credsClone, +		CredsBundle:          cc.dopts.copts.CredsBundle, +		Dialer:               cc.dopts.copts.Dialer, +	} + +	var err error +	// We need to hold the lock here while we assign to the ccr.resolver field +	// to guard against a data race caused by the following code path, +	// rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up +	// accessing ccr.resolver which is being assigned here. +	ccr.resolverMu.Lock() +	defer ccr.resolverMu.Unlock() +	ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo) +	if err != nil { +		return nil, err +	} +	return ccr, nil +} + +func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { +	ccr.resolverMu.Lock() +	if !ccr.done.HasFired() { +		ccr.resolver.ResolveNow(o) +	} +	ccr.resolverMu.Unlock() +} + +func (ccr *ccResolverWrapper) close() { +	ccr.resolverMu.Lock() +	ccr.resolver.Close() +	ccr.done.Fire() +	ccr.resolverMu.Unlock() +} + +func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { +	ccr.incomingMu.Lock() +	defer ccr.incomingMu.Unlock() +	if ccr.done.HasFired() { +		return nil +	} +	ccr.addChannelzTraceEvent(s) +	ccr.curState = s +	if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { +		return balancer.ErrBadResolverState +	} +	return nil +} + +func (ccr *ccResolverWrapper) ReportError(err error) { +	ccr.incomingMu.Lock() +	defer ccr.incomingMu.Unlock() +	if ccr.done.HasFired() { +		return +	} +	channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) +	ccr.cc.updateResolverState(resolver.State{}, err) +} + +// NewAddress is called by the resolver implementation to send addresses to gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { +	ccr.incomingMu.Lock() +	defer ccr.incomingMu.Unlock() +	if ccr.done.HasFired() { +		return +	} +	ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) +	ccr.curState.Addresses = addrs +	ccr.cc.updateResolverState(ccr.curState, nil) +} + +// NewServiceConfig is called by the resolver implementation to send service +// configs to gRPC. +func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { +	ccr.incomingMu.Lock() +	defer ccr.incomingMu.Unlock() +	if ccr.done.HasFired() { +		return +	} +	channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %s", sc) +	if ccr.cc.dopts.disableServiceConfig { +		channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config") +		return +	} +	scpr := parseServiceConfig(sc) +	if scpr.Err != nil { +		channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) +		return +	} +	ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) +	ccr.curState.ServiceConfig = scpr +	ccr.cc.updateResolverState(ccr.curState, nil) +} + +func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { +	return parseServiceConfig(scJSON) +} + +func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { +	var updates []string +	var oldSC, newSC *ServiceConfig +	var oldOK, newOK bool +	if ccr.curState.ServiceConfig != nil { +		oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) +	} +	if s.ServiceConfig != nil { +		newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) +	} +	if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { +		updates = append(updates, "service config updated") +	} +	if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { +		updates = append(updates, "resolver returned an empty address list") +	} else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { +		updates = append(updates, "resolver returned new addresses") +	} +	channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go new file mode 100644 index 000000000..cb7020ebe --- /dev/null +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -0,0 +1,916 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( +	"bytes" +	"compress/gzip" +	"context" +	"encoding/binary" +	"fmt" +	"io" +	"math" +	"strings" +	"sync" +	"time" + +	"google.golang.org/grpc/codes" +	"google.golang.org/grpc/credentials" +	"google.golang.org/grpc/encoding" +	"google.golang.org/grpc/encoding/proto" +	"google.golang.org/grpc/internal/transport" +	"google.golang.org/grpc/metadata" +	"google.golang.org/grpc/peer" +	"google.golang.org/grpc/stats" +	"google.golang.org/grpc/status" +) + +// Compressor defines the interface gRPC uses to compress a message. +// +// Deprecated: use package encoding. +type Compressor interface { +	// Do compresses p into w. +	Do(w io.Writer, p []byte) error +	// Type returns the compression algorithm the Compressor uses. +	Type() string +} + +type gzipCompressor struct { +	pool sync.Pool +} + +// NewGZIPCompressor creates a Compressor based on GZIP. +// +// Deprecated: use package encoding/gzip. +func NewGZIPCompressor() Compressor { +	c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression) +	return c +} + +// NewGZIPCompressorWithLevel is like NewGZIPCompressor but specifies the gzip compression level instead +// of assuming DefaultCompression. +// +// The error returned will be nil if the level is valid. +// +// Deprecated: use package encoding/gzip. +func NewGZIPCompressorWithLevel(level int) (Compressor, error) { +	if level < gzip.DefaultCompression || level > gzip.BestCompression { +		return nil, fmt.Errorf("grpc: invalid compression level: %d", level) +	} +	return &gzipCompressor{ +		pool: sync.Pool{ +			New: func() interface{} { +				w, err := gzip.NewWriterLevel(io.Discard, level) +				if err != nil { +					panic(err) +				} +				return w +			}, +		}, +	}, nil +} + +func (c *gzipCompressor) Do(w io.Writer, p []byte) error { +	z := c.pool.Get().(*gzip.Writer) +	defer c.pool.Put(z) +	z.Reset(w) +	if _, err := z.Write(p); err != nil { +		return err +	} +	return z.Close() +} + +func (c *gzipCompressor) Type() string { +	return "gzip" +} + +// Decompressor defines the interface gRPC uses to decompress a message. +// +// Deprecated: use package encoding. +type Decompressor interface { +	// Do reads the data from r and uncompress them. +	Do(r io.Reader) ([]byte, error) +	// Type returns the compression algorithm the Decompressor uses. +	Type() string +} + +type gzipDecompressor struct { +	pool sync.Pool +} + +// NewGZIPDecompressor creates a Decompressor based on GZIP. +// +// Deprecated: use package encoding/gzip. +func NewGZIPDecompressor() Decompressor { +	return &gzipDecompressor{} +} + +func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { +	var z *gzip.Reader +	switch maybeZ := d.pool.Get().(type) { +	case nil: +		newZ, err := gzip.NewReader(r) +		if err != nil { +			return nil, err +		} +		z = newZ +	case *gzip.Reader: +		z = maybeZ +		if err := z.Reset(r); err != nil { +			d.pool.Put(z) +			return nil, err +		} +	} + +	defer func() { +		z.Close() +		d.pool.Put(z) +	}() +	return io.ReadAll(z) +} + +func (d *gzipDecompressor) Type() string { +	return "gzip" +} + +// callInfo contains all related configuration and information about an RPC. +type callInfo struct { +	compressorType        string +	failFast              bool +	maxReceiveMessageSize *int +	maxSendMessageSize    *int +	creds                 credentials.PerRPCCredentials +	contentSubtype        string +	codec                 baseCodec +	maxRetryRPCBufferSize int +} + +func defaultCallInfo() *callInfo { +	return &callInfo{ +		failFast:              true, +		maxRetryRPCBufferSize: 256 * 1024, // 256KB +	} +} + +// CallOption configures a Call before it starts or extracts information from +// a Call after it completes. +type CallOption interface { +	// before is called before the call is sent to any server.  If before +	// returns a non-nil error, the RPC fails with that error. +	before(*callInfo) error + +	// after is called after the call has completed.  after cannot return an +	// error, so any failures should be reported via output parameters. +	after(*callInfo, *csAttempt) +} + +// EmptyCallOption does not alter the Call configuration. +// It can be embedded in another structure to carry satellite data for use +// by interceptors. +type EmptyCallOption struct{} + +func (EmptyCallOption) before(*callInfo) error      { return nil } +func (EmptyCallOption) after(*callInfo, *csAttempt) {} + +// Header returns a CallOptions that retrieves the header metadata +// for a unary RPC. +func Header(md *metadata.MD) CallOption { +	return HeaderCallOption{HeaderAddr: md} +} + +// HeaderCallOption is a CallOption for collecting response header metadata. +// The metadata field will be populated *after* the RPC completes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type HeaderCallOption struct { +	HeaderAddr *metadata.MD +} + +func (o HeaderCallOption) before(c *callInfo) error { return nil } +func (o HeaderCallOption) after(c *callInfo, attempt *csAttempt) { +	*o.HeaderAddr, _ = attempt.s.Header() +} + +// Trailer returns a CallOptions that retrieves the trailer metadata +// for a unary RPC. +func Trailer(md *metadata.MD) CallOption { +	return TrailerCallOption{TrailerAddr: md} +} + +// TrailerCallOption is a CallOption for collecting response trailer metadata. +// The metadata field will be populated *after* the RPC completes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type TrailerCallOption struct { +	TrailerAddr *metadata.MD +} + +func (o TrailerCallOption) before(c *callInfo) error { return nil } +func (o TrailerCallOption) after(c *callInfo, attempt *csAttempt) { +	*o.TrailerAddr = attempt.s.Trailer() +} + +// Peer returns a CallOption that retrieves peer information for a unary RPC. +// The peer field will be populated *after* the RPC completes. +func Peer(p *peer.Peer) CallOption { +	return PeerCallOption{PeerAddr: p} +} + +// PeerCallOption is a CallOption for collecting the identity of the remote +// peer. The peer field will be populated *after* the RPC completes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type PeerCallOption struct { +	PeerAddr *peer.Peer +} + +func (o PeerCallOption) before(c *callInfo) error { return nil } +func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) { +	if x, ok := peer.FromContext(attempt.s.Context()); ok { +		*o.PeerAddr = *x +	} +} + +// WaitForReady configures the action to take when an RPC is attempted on broken +// connections or unreachable servers. If waitForReady is false and the +// connection is in the TRANSIENT_FAILURE state, the RPC will fail +// immediately. Otherwise, the RPC client will block the call until a +// connection is available (or the call is canceled or times out) and will +// retry the call if it fails due to a transient error.  gRPC will not retry if +// data was written to the wire unless the server indicates it did not process +// the data.  Please refer to +// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. +// +// By default, RPCs don't "wait for ready". +func WaitForReady(waitForReady bool) CallOption { +	return FailFastCallOption{FailFast: !waitForReady} +} + +// FailFast is the opposite of WaitForReady. +// +// Deprecated: use WaitForReady. +func FailFast(failFast bool) CallOption { +	return FailFastCallOption{FailFast: failFast} +} + +// FailFastCallOption is a CallOption for indicating whether an RPC should fail +// fast or not. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type FailFastCallOption struct { +	FailFast bool +} + +func (o FailFastCallOption) before(c *callInfo) error { +	c.failFast = o.FailFast +	return nil +} +func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} + +// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size +// in bytes the client can receive. If this is not set, gRPC uses the default +// 4MB. +func MaxCallRecvMsgSize(bytes int) CallOption { +	return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes} +} + +// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message +// size in bytes the client can receive. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type MaxRecvMsgSizeCallOption struct { +	MaxRecvMsgSize int +} + +func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { +	c.maxReceiveMessageSize = &o.MaxRecvMsgSize +	return nil +} +func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} + +// MaxCallSendMsgSize returns a CallOption which sets the maximum message size +// in bytes the client can send. If this is not set, gRPC uses the default +// `math.MaxInt32`. +func MaxCallSendMsgSize(bytes int) CallOption { +	return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes} +} + +// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message +// size in bytes the client can send. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type MaxSendMsgSizeCallOption struct { +	MaxSendMsgSize int +} + +func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { +	c.maxSendMessageSize = &o.MaxSendMsgSize +	return nil +} +func (o MaxSendMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} + +// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials +// for a call. +func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { +	return PerRPCCredsCallOption{Creds: creds} +} + +// PerRPCCredsCallOption is a CallOption that indicates the per-RPC +// credentials to use for the call. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type PerRPCCredsCallOption struct { +	Creds credentials.PerRPCCredentials +} + +func (o PerRPCCredsCallOption) before(c *callInfo) error { +	c.creds = o.Creds +	return nil +} +func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} + +// UseCompressor returns a CallOption which sets the compressor used when +// sending the request.  If WithCompressor is also set, UseCompressor has +// higher priority. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func UseCompressor(name string) CallOption { +	return CompressorCallOption{CompressorType: name} +} + +// CompressorCallOption is a CallOption that indicates the compressor to use. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type CompressorCallOption struct { +	CompressorType string +} + +func (o CompressorCallOption) before(c *callInfo) error { +	c.compressorType = o.CompressorType +	return nil +} +func (o CompressorCallOption) after(c *callInfo, attempt *csAttempt) {} + +// CallContentSubtype returns a CallOption that will set the content-subtype +// for a call. For example, if content-subtype is "json", the Content-Type over +// the wire will be "application/grpc+json". The content-subtype is converted +// to lowercase before being included in Content-Type. See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If ForceCodec is not also used, the content-subtype will be used to look up +// the Codec to use in the registry controlled by RegisterCodec. See the +// documentation on RegisterCodec for details on registration. The lookup of +// content-subtype is case-insensitive. If no such Codec is found, the call +// will result in an error with code codes.Internal. +// +// If ForceCodec is also used, that Codec will be used for all request and +// response messages, with the content-subtype set to the given contentSubtype +// here for requests. +func CallContentSubtype(contentSubtype string) CallOption { +	return ContentSubtypeCallOption{ContentSubtype: strings.ToLower(contentSubtype)} +} + +// ContentSubtypeCallOption is a CallOption that indicates the content-subtype +// used for marshaling messages. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ContentSubtypeCallOption struct { +	ContentSubtype string +} + +func (o ContentSubtypeCallOption) before(c *callInfo) error { +	c.contentSubtype = o.ContentSubtype +	return nil +} +func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} + +// ForceCodec returns a CallOption that will set codec to be used for all +// request and response messages for a call. The result of calling Name() will +// be used as the content-subtype after converting to lowercase, unless +// CallContentSubtype is also used. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between Codec and +// content-subtype. +// +// This function is provided for advanced users; prefer to use only +// CallContentSubtype to select a registered codec instead. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceCodec(codec encoding.Codec) CallOption { +	return ForceCodecCallOption{Codec: codec} +} + +// ForceCodecCallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ForceCodecCallOption struct { +	Codec encoding.Codec +} + +func (o ForceCodecCallOption) before(c *callInfo) error { +	c.codec = o.Codec +	return nil +} +func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {} + +// CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of +// an encoding.Codec. +// +// Deprecated: use ForceCodec instead. +func CallCustomCodec(codec Codec) CallOption { +	return CustomCodecCallOption{Codec: codec} +} + +// CustomCodecCallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type CustomCodecCallOption struct { +	Codec Codec +} + +func (o CustomCodecCallOption) before(c *callInfo) error { +	c.codec = o.Codec +	return nil +} +func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} + +// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory +// used for buffering this RPC's requests for retry purposes. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func MaxRetryRPCBufferSize(bytes int) CallOption { +	return MaxRetryRPCBufferSizeCallOption{bytes} +} + +// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of +// memory to be used for caching this RPC for retry purposes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type MaxRetryRPCBufferSizeCallOption struct { +	MaxRetryRPCBufferSize int +} + +func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { +	c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize +	return nil +} +func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo, attempt *csAttempt) {} + +// The format of the payload: compressed or not? +type payloadFormat uint8 + +const ( +	compressionNone payloadFormat = 0 // no compression +	compressionMade payloadFormat = 1 // compressed +) + +// parser reads complete gRPC messages from the underlying reader. +type parser struct { +	// r is the underlying reader. +	// See the comment on recvMsg for the permissible +	// error types. +	r io.Reader + +	// The header of a gRPC message. Find more detail at +	// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md +	header [5]byte +} + +// recvMsg reads a complete gRPC message from the stream. +// +// It returns the message and its payload (compression/encoding) +// format. The caller owns the returned msg memory. +// +// If there is an error, possible values are: +//   - io.EOF, when no messages remain +//   - io.ErrUnexpectedEOF +//   - of type transport.ConnectionError +//   - an error from the status package +// +// No other error values or types must be returned, which also means +// that the underlying io.Reader must not return an incompatible +// error. +func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { +	if _, err := p.r.Read(p.header[:]); err != nil { +		return 0, nil, err +	} + +	pf = payloadFormat(p.header[0]) +	length := binary.BigEndian.Uint32(p.header[1:]) + +	if length == 0 { +		return pf, nil, nil +	} +	if int64(length) > int64(maxInt) { +		return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt) +	} +	if int(length) > maxReceiveMessageSize { +		return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) +	} +	// TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead +	// of making it for each message: +	msg = make([]byte, int(length)) +	if _, err := p.r.Read(msg); err != nil { +		if err == io.EOF { +			err = io.ErrUnexpectedEOF +		} +		return 0, nil, err +	} +	return pf, msg, nil +} + +// encode serializes msg and returns a buffer containing the message, or an +// error if it is too large to be transmitted by grpc.  If msg is nil, it +// generates an empty message. +func encode(c baseCodec, msg interface{}) ([]byte, error) { +	if msg == nil { // NOTE: typed nils will not be caught by this check +		return nil, nil +	} +	b, err := c.Marshal(msg) +	if err != nil { +		return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) +	} +	if uint(len(b)) > math.MaxUint32 { +		return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) +	} +	return b, nil +} + +// compress returns the input bytes compressed by compressor or cp.  If both +// compressors are nil, returns nil. +// +// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. +func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { +	if compressor == nil && cp == nil { +		return nil, nil +	} +	wrapErr := func(err error) error { +		return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) +	} +	cbuf := &bytes.Buffer{} +	if compressor != nil { +		z, err := compressor.Compress(cbuf) +		if err != nil { +			return nil, wrapErr(err) +		} +		if _, err := z.Write(in); err != nil { +			return nil, wrapErr(err) +		} +		if err := z.Close(); err != nil { +			return nil, wrapErr(err) +		} +	} else { +		if err := cp.Do(cbuf, in); err != nil { +			return nil, wrapErr(err) +		} +	} +	return cbuf.Bytes(), nil +} + +const ( +	payloadLen = 1 +	sizeLen    = 4 +	headerLen  = payloadLen + sizeLen +) + +// msgHeader returns a 5-byte header for the message being transmitted and the +// payload, which is compData if non-nil or data otherwise. +func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { +	hdr = make([]byte, headerLen) +	if compData != nil { +		hdr[0] = byte(compressionMade) +		data = compData +	} else { +		hdr[0] = byte(compressionNone) +	} + +	// Write length of payload into buf +	binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data))) +	return hdr, data +} + +func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { +	return &stats.OutPayload{ +		Client:     client, +		Payload:    msg, +		Data:       data, +		Length:     len(data), +		WireLength: len(payload) + headerLen, +		SentTime:   t, +	} +} + +func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status { +	switch pf { +	case compressionNone: +	case compressionMade: +		if recvCompress == "" || recvCompress == encoding.Identity { +			return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding") +		} +		if !haveCompressor { +			return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) +		} +	default: +		return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) +	} +	return nil +} + +type payloadInfo struct { +	wireLength        int // The compressed length got from wire. +	uncompressedBytes []byte +} + +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { +	pf, d, err := p.recvMsg(maxReceiveMessageSize) +	if err != nil { +		return nil, err +	} +	if payInfo != nil { +		payInfo.wireLength = len(d) +	} + +	if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { +		return nil, st.Err() +	} + +	var size int +	if pf == compressionMade { +		// To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, +		// use this decompressor as the default. +		if dc != nil { +			d, err = dc.Do(bytes.NewReader(d)) +			size = len(d) +		} else { +			d, size, err = decompress(compressor, d, maxReceiveMessageSize) +		} +		if err != nil { +			return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) +		} +		if size > maxReceiveMessageSize { +			// TODO: Revisit the error code. Currently keep it consistent with java +			// implementation. +			return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) +		} +	} +	return d, nil +} + +// Using compressor, decompress d, returning data and size. +// Optionally, if data will be over maxReceiveMessageSize, just return the size. +func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) { +	dcReader, err := compressor.Decompress(bytes.NewReader(d)) +	if err != nil { +		return nil, 0, err +	} +	if sizer, ok := compressor.(interface { +		DecompressedSize(compressedBytes []byte) int +	}); ok { +		if size := sizer.DecompressedSize(d); size >= 0 { +			if size > maxReceiveMessageSize { +				return nil, size, nil +			} +			// size is used as an estimate to size the buffer, but we +			// will read more data if available. +			// +MinRead so ReadFrom will not reallocate if size is correct. +			buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) +			bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) +			return buf.Bytes(), int(bytesRead), err +		} +	} +	// Read from LimitReader with limit max+1. So if the underlying +	// reader is over limit, the result will be bigger than max. +	d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) +	return d, len(d), err +} + +// For the two compressor parameters, both should not be set, but if they are, +// dc takes precedence over compressor. +// TODO(dfawley): wrap the old compressor/decompressor using the new API? +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { +	d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) +	if err != nil { +		return err +	} +	if err := c.Unmarshal(d, m); err != nil { +		return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) +	} +	if payInfo != nil { +		payInfo.uncompressedBytes = d +	} +	return nil +} + +// Information about RPC +type rpcInfo struct { +	failfast      bool +	preloaderInfo *compressorInfo +} + +// Information about Preloader +// Responsible for storing codec, and compressors +// If stream (s) has  context s.Context which stores rpcInfo that has non nil +// pointers to codec, and compressors, then we can use preparedMsg for Async message prep +// and reuse marshalled bytes +type compressorInfo struct { +	codec baseCodec +	cp    Compressor +	comp  encoding.Compressor +} + +type rpcInfoContextKey struct{} + +func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context { +	return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{ +		failfast: failfast, +		preloaderInfo: &compressorInfo{ +			codec: codec, +			cp:    cp, +			comp:  comp, +		}, +	}) +} + +func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { +	s, ok = ctx.Value(rpcInfoContextKey{}).(*rpcInfo) +	return +} + +// Code returns the error code for err if it was produced by the rpc system. +// Otherwise, it returns codes.Unknown. +// +// Deprecated: use status.Code instead. +func Code(err error) codes.Code { +	return status.Code(err) +} + +// ErrorDesc returns the error description of err if it was produced by the rpc system. +// Otherwise, it returns err.Error() or empty string when err is nil. +// +// Deprecated: use status.Convert and Message method instead. +func ErrorDesc(err error) string { +	return status.Convert(err).Message() +} + +// Errorf returns an error containing an error code and a description; +// Errorf returns nil if c is OK. +// +// Deprecated: use status.Errorf instead. +func Errorf(c codes.Code, format string, a ...interface{}) error { +	return status.Errorf(c, format, a...) +} + +// toRPCErr converts an error into an error from the status package. +func toRPCErr(err error) error { +	switch err { +	case nil, io.EOF: +		return err +	case context.DeadlineExceeded: +		return status.Error(codes.DeadlineExceeded, err.Error()) +	case context.Canceled: +		return status.Error(codes.Canceled, err.Error()) +	case io.ErrUnexpectedEOF: +		return status.Error(codes.Internal, err.Error()) +	} + +	switch e := err.(type) { +	case transport.ConnectionError: +		return status.Error(codes.Unavailable, e.Desc) +	case *transport.NewStreamError: +		return toRPCErr(e.Err) +	} + +	if _, ok := status.FromError(err); ok { +		return err +	} + +	return status.Error(codes.Unknown, err.Error()) +} + +// setCallInfoCodec should only be called after CallOptions have been applied. +func setCallInfoCodec(c *callInfo) error { +	if c.codec != nil { +		// codec was already set by a CallOption; use it, but set the content +		// subtype if it is not set. +		if c.contentSubtype == "" { +			// c.codec is a baseCodec to hide the difference between grpc.Codec and +			// encoding.Codec (Name vs. String method name).  We only support +			// setting content subtype from encoding.Codec to avoid a behavior +			// change with the deprecated version. +			if ec, ok := c.codec.(encoding.Codec); ok { +				c.contentSubtype = strings.ToLower(ec.Name()) +			} +		} +		return nil +	} + +	if c.contentSubtype == "" { +		// No codec specified in CallOptions; use proto by default. +		c.codec = encoding.GetCodec(proto.Name) +		return nil +	} + +	// c.contentSubtype is already lowercased in CallContentSubtype +	c.codec = encoding.GetCodec(c.contentSubtype) +	if c.codec == nil { +		return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype) +	} +	return nil +} + +// channelzData is used to store channelz related data for ClientConn, addrConn and Server. +// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic +// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. +// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. +type channelzData struct { +	callsStarted   int64 +	callsFailed    int64 +	callsSucceeded int64 +	// lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of +	// time.Time since it's more costly to atomically update time.Time variable than int64 variable. +	lastCallStartedTime int64 +} + +// The SupportPackageIsVersion variables are referenced from generated protocol +// buffer files to ensure compatibility with the gRPC version used.  The latest +// support package version is 7. +// +// Older versions are kept for compatibility. +// +// These constants should not be referenced from any other code. +const ( +	SupportPackageIsVersion3 = true +	SupportPackageIsVersion4 = true +	SupportPackageIsVersion5 = true +	SupportPackageIsVersion6 = true +	SupportPackageIsVersion7 = true +) + +const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go new file mode 100644 index 000000000..d5a6e78be --- /dev/null +++ b/vendor/google.golang.org/grpc/server.go @@ -0,0 +1,1971 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( +	"context" +	"errors" +	"fmt" +	"io" +	"math" +	"net" +	"net/http" +	"reflect" +	"runtime" +	"strings" +	"sync" +	"sync/atomic" +	"time" + +	"golang.org/x/net/trace" + +	"google.golang.org/grpc/codes" +	"google.golang.org/grpc/credentials" +	"google.golang.org/grpc/encoding" +	"google.golang.org/grpc/encoding/proto" +	"google.golang.org/grpc/grpclog" +	"google.golang.org/grpc/internal" +	"google.golang.org/grpc/internal/binarylog" +	"google.golang.org/grpc/internal/channelz" +	"google.golang.org/grpc/internal/grpcrand" +	"google.golang.org/grpc/internal/grpcsync" +	"google.golang.org/grpc/internal/transport" +	"google.golang.org/grpc/keepalive" +	"google.golang.org/grpc/metadata" +	"google.golang.org/grpc/peer" +	"google.golang.org/grpc/stats" +	"google.golang.org/grpc/status" +	"google.golang.org/grpc/tap" +) + +const ( +	defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4 +	defaultServerMaxSendMessageSize    = math.MaxInt32 + +	// Server transports are tracked in a map which is keyed on listener +	// address. For regular gRPC traffic, connections are accepted in Serve() +	// through a call to Accept(), and we use the actual listener address as key +	// when we add it to the map. But for connections received through +	// ServeHTTP(), we do not have a listener and hence use this dummy value. +	listenerAddressForServeHTTP = "listenerAddressForServeHTTP" +) + +func init() { +	internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials { +		return srv.opts.creds +	} +	internal.DrainServerTransports = func(srv *Server, addr string) { +		srv.drainServerTransports(addr) +	} +	internal.AddGlobalServerOptions = func(opt ...ServerOption) { +		extraServerOptions = append(extraServerOptions, opt...) +	} +	internal.ClearGlobalServerOptions = func() { +		extraServerOptions = nil +	} +	internal.BinaryLogger = binaryLogger +	internal.JoinServerOptions = newJoinServerOption +} + +var statusOK = status.New(codes.OK, "") +var logger = grpclog.Component("core") + +type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) + +// MethodDesc represents an RPC service's method specification. +type MethodDesc struct { +	MethodName string +	Handler    methodHandler +} + +// ServiceDesc represents an RPC service's specification. +type ServiceDesc struct { +	ServiceName string +	// The pointer to the service interface. Used to check whether the user +	// provided implementation satisfies the interface requirements. +	HandlerType interface{} +	Methods     []MethodDesc +	Streams     []StreamDesc +	Metadata    interface{} +} + +// serviceInfo wraps information about a service. It is very similar to +// ServiceDesc and is constructed from it for internal purposes. +type serviceInfo struct { +	// Contains the implementation for the methods in this service. +	serviceImpl interface{} +	methods     map[string]*MethodDesc +	streams     map[string]*StreamDesc +	mdata       interface{} +} + +type serverWorkerData struct { +	st     transport.ServerTransport +	wg     *sync.WaitGroup +	stream *transport.Stream +} + +// Server is a gRPC server to serve RPC requests. +type Server struct { +	opts serverOptions + +	mu  sync.Mutex // guards following +	lis map[net.Listener]bool +	// conns contains all active server transports. It is a map keyed on a +	// listener address with the value being the set of active transports +	// belonging to that listener. +	conns    map[string]map[transport.ServerTransport]bool +	serve    bool +	drain    bool +	cv       *sync.Cond              // signaled when connections close for GracefulStop +	services map[string]*serviceInfo // service name -> service info +	events   trace.EventLog + +	quit               *grpcsync.Event +	done               *grpcsync.Event +	channelzRemoveOnce sync.Once +	serveWG            sync.WaitGroup // counts active Serve goroutines for GracefulStop + +	channelzID *channelz.Identifier +	czData     *channelzData + +	serverWorkerChannels []chan *serverWorkerData +} + +type serverOptions struct { +	creds                 credentials.TransportCredentials +	codec                 baseCodec +	cp                    Compressor +	dc                    Decompressor +	unaryInt              UnaryServerInterceptor +	streamInt             StreamServerInterceptor +	chainUnaryInts        []UnaryServerInterceptor +	chainStreamInts       []StreamServerInterceptor +	binaryLogger          binarylog.Logger +	inTapHandle           tap.ServerInHandle +	statsHandlers         []stats.Handler +	maxConcurrentStreams  uint32 +	maxReceiveMessageSize int +	maxSendMessageSize    int +	unknownStreamDesc     *StreamDesc +	keepaliveParams       keepalive.ServerParameters +	keepalivePolicy       keepalive.EnforcementPolicy +	initialWindowSize     int32 +	initialConnWindowSize int32 +	writeBufferSize       int +	readBufferSize        int +	connectionTimeout     time.Duration +	maxHeaderListSize     *uint32 +	headerTableSize       *uint32 +	numServerWorkers      uint32 +} + +var defaultServerOptions = serverOptions{ +	maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, +	maxSendMessageSize:    defaultServerMaxSendMessageSize, +	connectionTimeout:     120 * time.Second, +	writeBufferSize:       defaultWriteBufSize, +	readBufferSize:        defaultReadBufSize, +} +var extraServerOptions []ServerOption + +// A ServerOption sets options such as credentials, codec and keepalive parameters, etc. +type ServerOption interface { +	apply(*serverOptions) +} + +// EmptyServerOption does not alter the server configuration. It can be embedded +// in another structure to build custom server options. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type EmptyServerOption struct{} + +func (EmptyServerOption) apply(*serverOptions) {} + +// funcServerOption wraps a function that modifies serverOptions into an +// implementation of the ServerOption interface. +type funcServerOption struct { +	f func(*serverOptions) +} + +func (fdo *funcServerOption) apply(do *serverOptions) { +	fdo.f(do) +} + +func newFuncServerOption(f func(*serverOptions)) *funcServerOption { +	return &funcServerOption{ +		f: f, +	} +} + +// joinServerOption provides a way to combine arbitrary number of server +// options into one. +type joinServerOption struct { +	opts []ServerOption +} + +func (mdo *joinServerOption) apply(do *serverOptions) { +	for _, opt := range mdo.opts { +		opt.apply(do) +	} +} + +func newJoinServerOption(opts ...ServerOption) ServerOption { +	return &joinServerOption{opts: opts} +} + +// WriteBufferSize determines how much data can be batched before doing a write +// on the wire. The corresponding memory allocation for this buffer will be +// twice the size to keep syscalls low. The default value for this buffer is +// 32KB. Zero or negative values will disable the write buffer such that each +// write will be on underlying connection. +// Note: A Send call may not directly translate to a write. +func WriteBufferSize(s int) ServerOption { +	return newFuncServerOption(func(o *serverOptions) { +		o.writeBufferSize = s +	}) +} + +// ReadBufferSize lets you set the size of read buffer, this determines how much +// data can be read at most for one read syscall. The default value for this +// buffer is 32KB. Zero or negative values will disable read buffer for a +// connection so data framer can access the underlying conn directly. +func ReadBufferSize(s int) ServerOption { +	return newFuncServerOption(func(o *serverOptions) { +		o.readBufferSize = s +	}) +} + +// InitialWindowSize returns a ServerOption that sets window size for stream. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialWindowSize(s int32) ServerOption { +	return newFuncServerOption(func(o *serverOptions) { +		o.initialWindowSize = s +	}) +} + +// InitialConnWindowSize returns a ServerOption that sets window size for a connection. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialConnWindowSize(s int32) ServerOption { +	return newFuncServerOption(func(o *serverOptions) { +		o.initialConnWindowSize = s +	}) +} + +// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. +func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { +	if kp.Time > 0 && kp.Time < time.Second { +		logger.Warning("Adjusting keepalive ping interval to minimum period of 1s") +		kp.Time = time.Second +	} + +	return newFuncServerOption(func(o *serverOptions) { +		o.keepaliveParams = kp +	}) +} + +// KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server. +func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { +	return newFuncServerOption(func(o *serverOptions) { +		o.keepalivePolicy = kep +	}) +} + +// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. +// +// This will override any lookups by content-subtype for Codecs registered with RegisterCodec. +// +// Deprecated: register codecs using encoding.RegisterCodec. The server will +// automatically use registered codecs based on the incoming requests' headers. +// See also +// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. +// Will be supported throughout 1.x. +func CustomCodec(codec Codec) ServerOption { +	return newFuncServerOption(func(o *serverOptions) { +		o.codec = codec +	}) +} + +// ForceServerCodec returns a ServerOption that sets a codec for message +// marshaling and unmarshaling. +// +// This will override any lookups by content-subtype for Codecs registered +// with RegisterCodec. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between encoding.Codec +// and content-subtype. +// +// This function is provided for advanced users; prefer to register codecs +// using encoding.RegisterCodec. +// The server will automatically use registered codecs based on the incoming +// requests' headers. See also +// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. +// Will be supported throughout 1.x. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceServerCodec(codec encoding.Codec) ServerOption { +	return newFuncServerOption(func(o *serverOptions) { +		o.codec = codec +	}) +} + +// RPCCompressor returns a ServerOption that sets a compressor for outbound +// messages.  For backward compatibility, all outbound messages will be sent +// using this compressor, regardless of incoming message compression.  By +// default, server messages will be sent using the same compressor with which +// request messages were sent. +// +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. +func RPCCompressor(cp Compressor) ServerOption { +	return newFuncServerOption(func(o *serverOptions) { +		o.cp = cp +	}) +} + +// RPCDecompressor returns a ServerOption that sets a decompressor for inbound +// messages.  It has higher priority than decompressors registered via +// encoding.RegisterCompressor. +// +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. +func RPCDecompressor(dc Decompressor) ServerOption { +	return newFuncServerOption(func(o *serverOptions) { +		o.dc = dc +	}) +} + +// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default limit. +// +// Deprecated: use MaxRecvMsgSize instead. Will be supported throughout 1.x. +func MaxMsgSize(m int) ServerOption { +	return MaxRecvMsgSize(m) +} + +// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default 4MB. +func MaxRecvMsgSize(m int) ServerOption { +	return newFuncServerOption(func(o *serverOptions) { +		o.maxReceiveMessageSize = m +	}) +} + +// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send. +// If this is not set, gRPC uses the default `math.MaxInt32`. +func MaxSendMsgSize(m int) ServerOption { +	return newFuncServerOption(func(o *serverOptions) { +		o.maxSendMessageSize = m +	}) +} + +// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number +// of concurrent streams to each ServerTransport. +func MaxConcurrentStreams(n uint32) ServerOption { +	return newFuncServerOption(func(o *serverOptions) { +		o.maxConcurrentStreams = n +	}) +} + +// Creds returns a ServerOption that sets credentials for server connections. +func Creds(c credentials.TransportCredentials) ServerOption { +	return newFuncServerOption(func(o *serverOptions) { +		o.creds = c +	}) +} + +// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the +// server. Only one unary interceptor can be installed. The construction of multiple +// interceptors (e.g., chaining) can be implemented at the caller. +func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { +	return newFuncServerOption(func(o *serverOptions) { +		if o.unaryInt != nil { +			panic("The unary server interceptor was already set and may not be reset.") +		} +		o.unaryInt = i +	}) +} + +// ChainUnaryInterceptor returns a ServerOption that specifies the chained interceptor +// for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All unary interceptors added by this method will be chained. +func ChainUnaryInterceptor(interceptors ...UnaryServerInterceptor) ServerOption { +	return newFuncServerOption(func(o *serverOptions) { +		o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) +	}) +} + +// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the +// server. Only one stream interceptor can be installed. +func StreamInterceptor(i StreamServerInterceptor) ServerOption { +	return newFuncServerOption(func(o *serverOptions) { +		if o.streamInt != nil { +			panic("The stream server interceptor was already set and may not be reset.") +		} +		o.streamInt = i +	}) +} + +// ChainStreamInterceptor returns a ServerOption that specifies the chained interceptor +// for streaming RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All stream interceptors added by this method will be chained. +func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOption { +	return newFuncServerOption(func(o *serverOptions) { +		o.chainStreamInts = append(o.chainStreamInts, interceptors...) +	}) +} + +// InTapHandle returns a ServerOption that sets the tap handle for all the server +// transport to be created. Only one can be installed. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func InTapHandle(h tap.ServerInHandle) ServerOption { +	return newFuncServerOption(func(o *serverOptions) { +		if o.inTapHandle != nil { +			panic("The tap handle was already set and may not be reset.") +		} +		o.inTapHandle = h +	}) +} + +// StatsHandler returns a ServerOption that sets the stats handler for the server. +func StatsHandler(h stats.Handler) ServerOption { +	return newFuncServerOption(func(o *serverOptions) { +		if h == nil { +			logger.Error("ignoring nil parameter in grpc.StatsHandler ServerOption") +			// Do not allow a nil stats handler, which would otherwise cause +			// panics. +			return +		} +		o.statsHandlers = append(o.statsHandlers, h) +	}) +} + +// binaryLogger returns a ServerOption that can set the binary logger for the +// server. +func binaryLogger(bl binarylog.Logger) ServerOption { +	return newFuncServerOption(func(o *serverOptions) { +		o.binaryLogger = bl +	}) +} + +// UnknownServiceHandler returns a ServerOption that allows for adding a custom +// unknown service handler. The provided method is a bidi-streaming RPC service +// handler that will be invoked instead of returning the "unimplemented" gRPC +// error whenever a request is received for an unregistered service or method. +// The handling function and stream interceptor (if set) have full access to +// the ServerStream, including its Context. +func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { +	return newFuncServerOption(func(o *serverOptions) { +		o.unknownStreamDesc = &StreamDesc{ +			StreamName: "unknown_service_handler", +			Handler:    streamHandler, +			// We need to assume that the users of the streamHandler will want to use both. +			ClientStreams: true, +			ServerStreams: true, +		} +	}) +} + +// ConnectionTimeout returns a ServerOption that sets the timeout for +// connection establishment (up to and including HTTP/2 handshaking) for all +// new connections.  If this is not set, the default is 120 seconds.  A zero or +// negative value will result in an immediate timeout. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ConnectionTimeout(d time.Duration) ServerOption { +	return newFuncServerOption(func(o *serverOptions) { +		o.connectionTimeout = d +	}) +} + +// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size +// of header list that the server is prepared to accept. +func MaxHeaderListSize(s uint32) ServerOption { +	return newFuncServerOption(func(o *serverOptions) { +		o.maxHeaderListSize = &s +	}) +} + +// HeaderTableSize returns a ServerOption that sets the size of dynamic +// header table for stream. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func HeaderTableSize(s uint32) ServerOption { +	return newFuncServerOption(func(o *serverOptions) { +		o.headerTableSize = &s +	}) +} + +// NumStreamWorkers returns a ServerOption that sets the number of worker +// goroutines that should be used to process incoming streams. Setting this to +// zero (default) will disable workers and spawn a new goroutine for each +// stream. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NumStreamWorkers(numServerWorkers uint32) ServerOption { +	// TODO: If/when this API gets stabilized (i.e. stream workers become the +	// only way streams are processed), change the behavior of the zero value to +	// a sane default. Preliminary experiments suggest that a value equal to the +	// number of CPUs available is most performant; requires thorough testing. +	return newFuncServerOption(func(o *serverOptions) { +		o.numServerWorkers = numServerWorkers +	}) +} + +// serverWorkerResetThreshold defines how often the stack must be reset. Every +// N requests, by spawning a new goroutine in its place, a worker can reset its +// stack so that large stacks don't live in memory forever. 2^16 should allow +// each goroutine stack to live for at least a few seconds in a typical +// workload (assuming a QPS of a few thousand requests/sec). +const serverWorkerResetThreshold = 1 << 16 + +// serverWorkers blocks on a *transport.Stream channel forever and waits for +// data to be fed by serveStreams. This allows different requests to be +// processed by the same goroutine, removing the need for expensive stack +// re-allocations (see the runtime.morestack problem [1]). +// +// [1] https://github.com/golang/go/issues/18138 +func (s *Server) serverWorker(ch chan *serverWorkerData) { +	// To make sure all server workers don't reset at the same time, choose a +	// random number of iterations before resetting. +	threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold) +	for completed := 0; completed < threshold; completed++ { +		data, ok := <-ch +		if !ok { +			return +		} +		s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) +		data.wg.Done() +	} +	go s.serverWorker(ch) +} + +// initServerWorkers creates worker goroutines and channels to process incoming +// connections to reduce the time spent overall on runtime.morestack. +func (s *Server) initServerWorkers() { +	s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers) +	for i := uint32(0); i < s.opts.numServerWorkers; i++ { +		s.serverWorkerChannels[i] = make(chan *serverWorkerData) +		go s.serverWorker(s.serverWorkerChannels[i]) +	} +} + +func (s *Server) stopServerWorkers() { +	for i := uint32(0); i < s.opts.numServerWorkers; i++ { +		close(s.serverWorkerChannels[i]) +	} +} + +// NewServer creates a gRPC server which has no service registered and has not +// started to accept requests yet. +func NewServer(opt ...ServerOption) *Server { +	opts := defaultServerOptions +	for _, o := range extraServerOptions { +		o.apply(&opts) +	} +	for _, o := range opt { +		o.apply(&opts) +	} +	s := &Server{ +		lis:      make(map[net.Listener]bool), +		opts:     opts, +		conns:    make(map[string]map[transport.ServerTransport]bool), +		services: make(map[string]*serviceInfo), +		quit:     grpcsync.NewEvent(), +		done:     grpcsync.NewEvent(), +		czData:   new(channelzData), +	} +	chainUnaryServerInterceptors(s) +	chainStreamServerInterceptors(s) +	s.cv = sync.NewCond(&s.mu) +	if EnableTracing { +		_, file, line, _ := runtime.Caller(1) +		s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) +	} + +	if s.opts.numServerWorkers > 0 { +		s.initServerWorkers() +	} + +	s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") +	channelz.Info(logger, s.channelzID, "Server created") +	return s +} + +// printf records an event in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) printf(format string, a ...interface{}) { +	if s.events != nil { +		s.events.Printf(format, a...) +	} +} + +// errorf records an error in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) errorf(format string, a ...interface{}) { +	if s.events != nil { +		s.events.Errorf(format, a...) +	} +} + +// ServiceRegistrar wraps a single method that supports service registration. It +// enables users to pass concrete types other than grpc.Server to the service +// registration methods exported by the IDL generated code. +type ServiceRegistrar interface { +	// RegisterService registers a service and its implementation to the +	// concrete type implementing this interface.  It may not be called +	// once the server has started serving. +	// desc describes the service and its methods and handlers. impl is the +	// service implementation which is passed to the method handlers. +	RegisterService(desc *ServiceDesc, impl interface{}) +} + +// RegisterService registers a service and its implementation to the gRPC +// server. It is called from the IDL generated code. This must be called before +// invoking Serve. If ss is non-nil (for legacy code), its type is checked to +// ensure it implements sd.HandlerType. +func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { +	if ss != nil { +		ht := reflect.TypeOf(sd.HandlerType).Elem() +		st := reflect.TypeOf(ss) +		if !st.Implements(ht) { +			logger.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) +		} +	} +	s.register(sd, ss) +} + +func (s *Server) register(sd *ServiceDesc, ss interface{}) { +	s.mu.Lock() +	defer s.mu.Unlock() +	s.printf("RegisterService(%q)", sd.ServiceName) +	if s.serve { +		logger.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) +	} +	if _, ok := s.services[sd.ServiceName]; ok { +		logger.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) +	} +	info := &serviceInfo{ +		serviceImpl: ss, +		methods:     make(map[string]*MethodDesc), +		streams:     make(map[string]*StreamDesc), +		mdata:       sd.Metadata, +	} +	for i := range sd.Methods { +		d := &sd.Methods[i] +		info.methods[d.MethodName] = d +	} +	for i := range sd.Streams { +		d := &sd.Streams[i] +		info.streams[d.StreamName] = d +	} +	s.services[sd.ServiceName] = info +} + +// MethodInfo contains the information of an RPC including its method name and type. +type MethodInfo struct { +	// Name is the method name only, without the service name or package name. +	Name string +	// IsClientStream indicates whether the RPC is a client streaming RPC. +	IsClientStream bool +	// IsServerStream indicates whether the RPC is a server streaming RPC. +	IsServerStream bool +} + +// ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service. +type ServiceInfo struct { +	Methods []MethodInfo +	// Metadata is the metadata specified in ServiceDesc when registering service. +	Metadata interface{} +} + +// GetServiceInfo returns a map from service names to ServiceInfo. +// Service names include the package names, in the form of <package>.<service>. +func (s *Server) GetServiceInfo() map[string]ServiceInfo { +	ret := make(map[string]ServiceInfo) +	for n, srv := range s.services { +		methods := make([]MethodInfo, 0, len(srv.methods)+len(srv.streams)) +		for m := range srv.methods { +			methods = append(methods, MethodInfo{ +				Name:           m, +				IsClientStream: false, +				IsServerStream: false, +			}) +		} +		for m, d := range srv.streams { +			methods = append(methods, MethodInfo{ +				Name:           m, +				IsClientStream: d.ClientStreams, +				IsServerStream: d.ServerStreams, +			}) +		} + +		ret[n] = ServiceInfo{ +			Methods:  methods, +			Metadata: srv.mdata, +		} +	} +	return ret +} + +// ErrServerStopped indicates that the operation is now illegal because of +// the server being stopped. +var ErrServerStopped = errors.New("grpc: the server has been stopped") + +type listenSocket struct { +	net.Listener +	channelzID *channelz.Identifier +} + +func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { +	return &channelz.SocketInternalMetric{ +		SocketOptions: channelz.GetSocketOption(l.Listener), +		LocalAddr:     l.Listener.Addr(), +	} +} + +func (l *listenSocket) Close() error { +	err := l.Listener.Close() +	channelz.RemoveEntry(l.channelzID) +	channelz.Info(logger, l.channelzID, "ListenSocket deleted") +	return err +} + +// Serve accepts incoming connections on the listener lis, creating a new +// ServerTransport and service goroutine for each. The service goroutines +// read gRPC requests and then call the registered handlers to reply to them. +// Serve returns when lis.Accept fails with fatal errors.  lis will be closed when +// this method returns. +// Serve will return a non-nil error unless Stop or GracefulStop is called. +func (s *Server) Serve(lis net.Listener) error { +	s.mu.Lock() +	s.printf("serving") +	s.serve = true +	if s.lis == nil { +		// Serve called after Stop or GracefulStop. +		s.mu.Unlock() +		lis.Close() +		return ErrServerStopped +	} + +	s.serveWG.Add(1) +	defer func() { +		s.serveWG.Done() +		if s.quit.HasFired() { +			// Stop or GracefulStop called; block until done and return nil. +			<-s.done.Done() +		} +	}() + +	ls := &listenSocket{Listener: lis} +	s.lis[ls] = true + +	defer func() { +		s.mu.Lock() +		if s.lis != nil && s.lis[ls] { +			ls.Close() +			delete(s.lis, ls) +		} +		s.mu.Unlock() +	}() + +	var err error +	ls.channelzID, err = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) +	if err != nil { +		s.mu.Unlock() +		return err +	} +	s.mu.Unlock() +	channelz.Info(logger, ls.channelzID, "ListenSocket created") + +	var tempDelay time.Duration // how long to sleep on accept failure +	for { +		rawConn, err := lis.Accept() +		if err != nil { +			if ne, ok := err.(interface { +				Temporary() bool +			}); ok && ne.Temporary() { +				if tempDelay == 0 { +					tempDelay = 5 * time.Millisecond +				} else { +					tempDelay *= 2 +				} +				if max := 1 * time.Second; tempDelay > max { +					tempDelay = max +				} +				s.mu.Lock() +				s.printf("Accept error: %v; retrying in %v", err, tempDelay) +				s.mu.Unlock() +				timer := time.NewTimer(tempDelay) +				select { +				case <-timer.C: +				case <-s.quit.Done(): +					timer.Stop() +					return nil +				} +				continue +			} +			s.mu.Lock() +			s.printf("done serving; Accept = %v", err) +			s.mu.Unlock() + +			if s.quit.HasFired() { +				return nil +			} +			return err +		} +		tempDelay = 0 +		// Start a new goroutine to deal with rawConn so we don't stall this Accept +		// loop goroutine. +		// +		// Make sure we account for the goroutine so GracefulStop doesn't nil out +		// s.conns before this conn can be added. +		s.serveWG.Add(1) +		go func() { +			s.handleRawConn(lis.Addr().String(), rawConn) +			s.serveWG.Done() +		}() +	} +} + +// handleRawConn forks a goroutine to handle a just-accepted connection that +// has not had any I/O performed on it yet. +func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { +	if s.quit.HasFired() { +		rawConn.Close() +		return +	} +	rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) + +	// Finish handshaking (HTTP2) +	st := s.newHTTP2Transport(rawConn) +	rawConn.SetDeadline(time.Time{}) +	if st == nil { +		return +	} + +	if !s.addConn(lisAddr, st) { +		return +	} +	go func() { +		s.serveStreams(st) +		s.removeConn(lisAddr, st) +	}() +} + +func (s *Server) drainServerTransports(addr string) { +	s.mu.Lock() +	conns := s.conns[addr] +	for st := range conns { +		st.Drain() +	} +	s.mu.Unlock() +} + +// newHTTP2Transport sets up a http/2 transport (using the +// gRPC http2 server transport in transport/http2_server.go). +func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { +	config := &transport.ServerConfig{ +		MaxStreams:            s.opts.maxConcurrentStreams, +		ConnectionTimeout:     s.opts.connectionTimeout, +		Credentials:           s.opts.creds, +		InTapHandle:           s.opts.inTapHandle, +		StatsHandlers:         s.opts.statsHandlers, +		KeepaliveParams:       s.opts.keepaliveParams, +		KeepalivePolicy:       s.opts.keepalivePolicy, +		InitialWindowSize:     s.opts.initialWindowSize, +		InitialConnWindowSize: s.opts.initialConnWindowSize, +		WriteBufferSize:       s.opts.writeBufferSize, +		ReadBufferSize:        s.opts.readBufferSize, +		ChannelzParentID:      s.channelzID, +		MaxHeaderListSize:     s.opts.maxHeaderListSize, +		HeaderTableSize:       s.opts.headerTableSize, +	} +	st, err := transport.NewServerTransport(c, config) +	if err != nil { +		s.mu.Lock() +		s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) +		s.mu.Unlock() +		// ErrConnDispatched means that the connection was dispatched away from +		// gRPC; those connections should be left open. +		if err != credentials.ErrConnDispatched { +			// Don't log on ErrConnDispatched and io.EOF to prevent log spam. +			if err != io.EOF { +				channelz.Info(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) +			} +			c.Close() +		} +		return nil +	} + +	return st +} + +func (s *Server) serveStreams(st transport.ServerTransport) { +	defer st.Close(errors.New("finished serving streams for the server transport")) +	var wg sync.WaitGroup + +	var roundRobinCounter uint32 +	st.HandleStreams(func(stream *transport.Stream) { +		wg.Add(1) +		if s.opts.numServerWorkers > 0 { +			data := &serverWorkerData{st: st, wg: &wg, stream: stream} +			select { +			case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data: +			default: +				// If all stream workers are busy, fallback to the default code path. +				go func() { +					s.handleStream(st, stream, s.traceInfo(st, stream)) +					wg.Done() +				}() +			} +		} else { +			go func() { +				defer wg.Done() +				s.handleStream(st, stream, s.traceInfo(st, stream)) +			}() +		} +	}, func(ctx context.Context, method string) context.Context { +		if !EnableTracing { +			return ctx +		} +		tr := trace.New("grpc.Recv."+methodFamily(method), method) +		return trace.NewContext(ctx, tr) +	}) +	wg.Wait() +} + +var _ http.Handler = (*Server)(nil) + +// ServeHTTP implements the Go standard library's http.Handler +// interface by responding to the gRPC request r, by looking up +// the requested gRPC method in the gRPC server s. +// +// The provided HTTP request must have arrived on an HTTP/2 +// connection. When using the Go standard library's server, +// practically this means that the Request must also have arrived +// over TLS. +// +// To share one port (such as 443 for https) between gRPC and an +// existing http.Handler, use a root http.Handler such as: +// +//	if r.ProtoMajor == 2 && strings.HasPrefix( +//		r.Header.Get("Content-Type"), "application/grpc") { +//		grpcServer.ServeHTTP(w, r) +//	} else { +//		yourMux.ServeHTTP(w, r) +//	} +// +// Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally +// separate from grpc-go's HTTP/2 server. Performance and features may vary +// between the two paths. ServeHTTP does not support some gRPC features +// available through grpc-go's HTTP/2 server. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { +	st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) +	if err != nil { +		// Errors returned from transport.NewServerHandlerTransport have +		// already been written to w. +		return +	} +	if !s.addConn(listenerAddressForServeHTTP, st) { +		return +	} +	defer s.removeConn(listenerAddressForServeHTTP, st) +	s.serveStreams(st) +} + +// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. +// If tracing is not enabled, it returns nil. +func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { +	if !EnableTracing { +		return nil +	} +	tr, ok := trace.FromContext(stream.Context()) +	if !ok { +		return nil +	} + +	trInfo = &traceInfo{ +		tr: tr, +		firstLine: firstLine{ +			client:     false, +			remoteAddr: st.RemoteAddr(), +		}, +	} +	if dl, ok := stream.Context().Deadline(); ok { +		trInfo.firstLine.deadline = time.Until(dl) +	} +	return trInfo +} + +func (s *Server) addConn(addr string, st transport.ServerTransport) bool { +	s.mu.Lock() +	defer s.mu.Unlock() +	if s.conns == nil { +		st.Close(errors.New("Server.addConn called when server has already been stopped")) +		return false +	} +	if s.drain { +		// Transport added after we drained our existing conns: drain it +		// immediately. +		st.Drain() +	} + +	if s.conns[addr] == nil { +		// Create a map entry if this is the first connection on this listener. +		s.conns[addr] = make(map[transport.ServerTransport]bool) +	} +	s.conns[addr][st] = true +	return true +} + +func (s *Server) removeConn(addr string, st transport.ServerTransport) { +	s.mu.Lock() +	defer s.mu.Unlock() + +	conns := s.conns[addr] +	if conns != nil { +		delete(conns, st) +		if len(conns) == 0 { +			// If the last connection for this address is being removed, also +			// remove the map entry corresponding to the address. This is used +			// in GracefulStop() when waiting for all connections to be closed. +			delete(s.conns, addr) +		} +		s.cv.Broadcast() +	} +} + +func (s *Server) channelzMetric() *channelz.ServerInternalMetric { +	return &channelz.ServerInternalMetric{ +		CallsStarted:             atomic.LoadInt64(&s.czData.callsStarted), +		CallsSucceeded:           atomic.LoadInt64(&s.czData.callsSucceeded), +		CallsFailed:              atomic.LoadInt64(&s.czData.callsFailed), +		LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)), +	} +} + +func (s *Server) incrCallsStarted() { +	atomic.AddInt64(&s.czData.callsStarted, 1) +	atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (s *Server) incrCallsSucceeded() { +	atomic.AddInt64(&s.czData.callsSucceeded, 1) +} + +func (s *Server) incrCallsFailed() { +	atomic.AddInt64(&s.czData.callsFailed, 1) +} + +func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { +	data, err := encode(s.getCodec(stream.ContentSubtype()), msg) +	if err != nil { +		channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) +		return err +	} +	compData, err := compress(data, cp, comp) +	if err != nil { +		channelz.Error(logger, s.channelzID, "grpc: server failed to compress response: ", err) +		return err +	} +	hdr, payload := msgHeader(data, compData) +	// TODO(dfawley): should we be checking len(data) instead? +	if len(payload) > s.opts.maxSendMessageSize { +		return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) +	} +	err = t.Write(stream, hdr, payload, opts) +	if err == nil { +		for _, sh := range s.opts.statsHandlers { +			sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) +		} +	} +	return err +} + +// chainUnaryServerInterceptors chains all unary server interceptors into one. +func chainUnaryServerInterceptors(s *Server) { +	// Prepend opts.unaryInt to the chaining interceptors if it exists, since unaryInt will +	// be executed before any other chained interceptors. +	interceptors := s.opts.chainUnaryInts +	if s.opts.unaryInt != nil { +		interceptors = append([]UnaryServerInterceptor{s.opts.unaryInt}, s.opts.chainUnaryInts...) +	} + +	var chainedInt UnaryServerInterceptor +	if len(interceptors) == 0 { +		chainedInt = nil +	} else if len(interceptors) == 1 { +		chainedInt = interceptors[0] +	} else { +		chainedInt = chainUnaryInterceptors(interceptors) +	} + +	s.opts.unaryInt = chainedInt +} + +func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { +	return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { +		return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) +	} +} + +func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { +	if curr == len(interceptors)-1 { +		return finalHandler +	} +	return func(ctx context.Context, req interface{}) (interface{}, error) { +		return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) +	} +} + +func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { +	shs := s.opts.statsHandlers +	if len(shs) != 0 || trInfo != nil || channelz.IsOn() { +		if channelz.IsOn() { +			s.incrCallsStarted() +		} +		var statsBegin *stats.Begin +		for _, sh := range shs { +			beginTime := time.Now() +			statsBegin = &stats.Begin{ +				BeginTime:      beginTime, +				IsClientStream: false, +				IsServerStream: false, +			} +			sh.HandleRPC(stream.Context(), statsBegin) +		} +		if trInfo != nil { +			trInfo.tr.LazyLog(&trInfo.firstLine, false) +		} +		// The deferred error handling for tracing, stats handler and channelz are +		// combined into one function to reduce stack usage -- a defer takes ~56-64 +		// bytes on the stack, so overflowing the stack will require a stack +		// re-allocation, which is expensive. +		// +		// To maintain behavior similar to separate deferred statements, statements +		// should be executed in the reverse order. That is, tracing first, stats +		// handler second, and channelz last. Note that panics *within* defers will +		// lead to different behavior, but that's an acceptable compromise; that +		// would be undefined behavior territory anyway. +		defer func() { +			if trInfo != nil { +				if err != nil && err != io.EOF { +					trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) +					trInfo.tr.SetError() +				} +				trInfo.tr.Finish() +			} + +			for _, sh := range shs { +				end := &stats.End{ +					BeginTime: statsBegin.BeginTime, +					EndTime:   time.Now(), +				} +				if err != nil && err != io.EOF { +					end.Error = toRPCErr(err) +				} +				sh.HandleRPC(stream.Context(), end) +			} + +			if channelz.IsOn() { +				if err != nil && err != io.EOF { +					s.incrCallsFailed() +				} else { +					s.incrCallsSucceeded() +				} +			} +		}() +	} +	var binlogs []binarylog.MethodLogger +	if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { +		binlogs = append(binlogs, ml) +	} +	if s.opts.binaryLogger != nil { +		if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { +			binlogs = append(binlogs, ml) +		} +	} +	if len(binlogs) != 0 { +		ctx := stream.Context() +		md, _ := metadata.FromIncomingContext(ctx) +		logEntry := &binarylog.ClientHeader{ +			Header:     md, +			MethodName: stream.Method(), +			PeerAddr:   nil, +		} +		if deadline, ok := ctx.Deadline(); ok { +			logEntry.Timeout = time.Until(deadline) +			if logEntry.Timeout < 0 { +				logEntry.Timeout = 0 +			} +		} +		if a := md[":authority"]; len(a) > 0 { +			logEntry.Authority = a[0] +		} +		if peer, ok := peer.FromContext(ctx); ok { +			logEntry.PeerAddr = peer.Addr +		} +		for _, binlog := range binlogs { +			binlog.Log(logEntry) +		} +	} + +	// comp and cp are used for compression.  decomp and dc are used for +	// decompression.  If comp and decomp are both set, they are the same; +	// however they are kept separate to ensure that at most one of the +	// compressor/decompressor variable pairs are set for use later. +	var comp, decomp encoding.Compressor +	var cp Compressor +	var dc Decompressor + +	// If dc is set and matches the stream's compression, use it.  Otherwise, try +	// to find a matching registered compressor for decomp. +	if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { +		dc = s.opts.dc +	} else if rc != "" && rc != encoding.Identity { +		decomp = encoding.GetCompressor(rc) +		if decomp == nil { +			st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) +			t.WriteStatus(stream, st) +			return st.Err() +		} +	} + +	// If cp is set, use it.  Otherwise, attempt to compress the response using +	// the incoming message compression method. +	// +	// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. +	if s.opts.cp != nil { +		cp = s.opts.cp +		stream.SetSendCompress(cp.Type()) +	} else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { +		// Legacy compressor not specified; attempt to respond with same encoding. +		comp = encoding.GetCompressor(rc) +		if comp != nil { +			stream.SetSendCompress(rc) +		} +	} + +	var payInfo *payloadInfo +	if len(shs) != 0 || len(binlogs) != 0 { +		payInfo = &payloadInfo{} +	} +	d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) +	if err != nil { +		if e := t.WriteStatus(stream, status.Convert(err)); e != nil { +			channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) +		} +		return err +	} +	if channelz.IsOn() { +		t.IncrMsgRecv() +	} +	df := func(v interface{}) error { +		if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { +			return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) +		} +		for _, sh := range shs { +			sh.HandleRPC(stream.Context(), &stats.InPayload{ +				RecvTime:   time.Now(), +				Payload:    v, +				WireLength: payInfo.wireLength + headerLen, +				Data:       d, +				Length:     len(d), +			}) +		} +		if len(binlogs) != 0 { +			cm := &binarylog.ClientMessage{ +				Message: d, +			} +			for _, binlog := range binlogs { +				binlog.Log(cm) +			} +		} +		if trInfo != nil { +			trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) +		} +		return nil +	} +	ctx := NewContextWithServerTransportStream(stream.Context(), stream) +	reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt) +	if appErr != nil { +		appStatus, ok := status.FromError(appErr) +		if !ok { +			// Convert non-status application error to a status error with code +			// Unknown, but handle context errors specifically. +			appStatus = status.FromContextError(appErr) +			appErr = appStatus.Err() +		} +		if trInfo != nil { +			trInfo.tr.LazyLog(stringer(appStatus.Message()), true) +			trInfo.tr.SetError() +		} +		if e := t.WriteStatus(stream, appStatus); e != nil { +			channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) +		} +		if len(binlogs) != 0 { +			if h, _ := stream.Header(); h.Len() > 0 { +				// Only log serverHeader if there was header. Otherwise it can +				// be trailer only. +				sh := &binarylog.ServerHeader{ +					Header: h, +				} +				for _, binlog := range binlogs { +					binlog.Log(sh) +				} +			} +			st := &binarylog.ServerTrailer{ +				Trailer: stream.Trailer(), +				Err:     appErr, +			} +			for _, binlog := range binlogs { +				binlog.Log(st) +			} +		} +		return appErr +	} +	if trInfo != nil { +		trInfo.tr.LazyLog(stringer("OK"), false) +	} +	opts := &transport.Options{Last: true} + +	if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { +		if err == io.EOF { +			// The entire stream is done (for unary RPC only). +			return err +		} +		if sts, ok := status.FromError(err); ok { +			if e := t.WriteStatus(stream, sts); e != nil { +				channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) +			} +		} else { +			switch st := err.(type) { +			case transport.ConnectionError: +				// Nothing to do here. +			default: +				panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) +			} +		} +		if len(binlogs) != 0 { +			h, _ := stream.Header() +			sh := &binarylog.ServerHeader{ +				Header: h, +			} +			st := &binarylog.ServerTrailer{ +				Trailer: stream.Trailer(), +				Err:     appErr, +			} +			for _, binlog := range binlogs { +				binlog.Log(sh) +				binlog.Log(st) +			} +		} +		return err +	} +	if len(binlogs) != 0 { +		h, _ := stream.Header() +		sh := &binarylog.ServerHeader{ +			Header: h, +		} +		sm := &binarylog.ServerMessage{ +			Message: reply, +		} +		for _, binlog := range binlogs { +			binlog.Log(sh) +			binlog.Log(sm) +		} +	} +	if channelz.IsOn() { +		t.IncrMsgSent() +	} +	if trInfo != nil { +		trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) +	} +	// TODO: Should we be logging if writing status failed here, like above? +	// Should the logging be in WriteStatus?  Should we ignore the WriteStatus +	// error or allow the stats handler to see it? +	err = t.WriteStatus(stream, statusOK) +	if len(binlogs) != 0 { +		st := &binarylog.ServerTrailer{ +			Trailer: stream.Trailer(), +			Err:     appErr, +		} +		for _, binlog := range binlogs { +			binlog.Log(st) +		} +	} +	return err +} + +// chainStreamServerInterceptors chains all stream server interceptors into one. +func chainStreamServerInterceptors(s *Server) { +	// Prepend opts.streamInt to the chaining interceptors if it exists, since streamInt will +	// be executed before any other chained interceptors. +	interceptors := s.opts.chainStreamInts +	if s.opts.streamInt != nil { +		interceptors = append([]StreamServerInterceptor{s.opts.streamInt}, s.opts.chainStreamInts...) +	} + +	var chainedInt StreamServerInterceptor +	if len(interceptors) == 0 { +		chainedInt = nil +	} else if len(interceptors) == 1 { +		chainedInt = interceptors[0] +	} else { +		chainedInt = chainStreamInterceptors(interceptors) +	} + +	s.opts.streamInt = chainedInt +} + +func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { +	return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { +		return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) +	} +} + +func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { +	if curr == len(interceptors)-1 { +		return finalHandler +	} +	return func(srv interface{}, stream ServerStream) error { +		return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) +	} +} + +func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { +	if channelz.IsOn() { +		s.incrCallsStarted() +	} +	shs := s.opts.statsHandlers +	var statsBegin *stats.Begin +	if len(shs) != 0 { +		beginTime := time.Now() +		statsBegin = &stats.Begin{ +			BeginTime:      beginTime, +			IsClientStream: sd.ClientStreams, +			IsServerStream: sd.ServerStreams, +		} +		for _, sh := range shs { +			sh.HandleRPC(stream.Context(), statsBegin) +		} +	} +	ctx := NewContextWithServerTransportStream(stream.Context(), stream) +	ss := &serverStream{ +		ctx:                   ctx, +		t:                     t, +		s:                     stream, +		p:                     &parser{r: stream}, +		codec:                 s.getCodec(stream.ContentSubtype()), +		maxReceiveMessageSize: s.opts.maxReceiveMessageSize, +		maxSendMessageSize:    s.opts.maxSendMessageSize, +		trInfo:                trInfo, +		statsHandler:          shs, +	} + +	if len(shs) != 0 || trInfo != nil || channelz.IsOn() { +		// See comment in processUnaryRPC on defers. +		defer func() { +			if trInfo != nil { +				ss.mu.Lock() +				if err != nil && err != io.EOF { +					ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) +					ss.trInfo.tr.SetError() +				} +				ss.trInfo.tr.Finish() +				ss.trInfo.tr = nil +				ss.mu.Unlock() +			} + +			if len(shs) != 0 { +				end := &stats.End{ +					BeginTime: statsBegin.BeginTime, +					EndTime:   time.Now(), +				} +				if err != nil && err != io.EOF { +					end.Error = toRPCErr(err) +				} +				for _, sh := range shs { +					sh.HandleRPC(stream.Context(), end) +				} +			} + +			if channelz.IsOn() { +				if err != nil && err != io.EOF { +					s.incrCallsFailed() +				} else { +					s.incrCallsSucceeded() +				} +			} +		}() +	} + +	if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { +		ss.binlogs = append(ss.binlogs, ml) +	} +	if s.opts.binaryLogger != nil { +		if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { +			ss.binlogs = append(ss.binlogs, ml) +		} +	} +	if len(ss.binlogs) != 0 { +		md, _ := metadata.FromIncomingContext(ctx) +		logEntry := &binarylog.ClientHeader{ +			Header:     md, +			MethodName: stream.Method(), +			PeerAddr:   nil, +		} +		if deadline, ok := ctx.Deadline(); ok { +			logEntry.Timeout = time.Until(deadline) +			if logEntry.Timeout < 0 { +				logEntry.Timeout = 0 +			} +		} +		if a := md[":authority"]; len(a) > 0 { +			logEntry.Authority = a[0] +		} +		if peer, ok := peer.FromContext(ss.Context()); ok { +			logEntry.PeerAddr = peer.Addr +		} +		for _, binlog := range ss.binlogs { +			binlog.Log(logEntry) +		} +	} + +	// If dc is set and matches the stream's compression, use it.  Otherwise, try +	// to find a matching registered compressor for decomp. +	if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { +		ss.dc = s.opts.dc +	} else if rc != "" && rc != encoding.Identity { +		ss.decomp = encoding.GetCompressor(rc) +		if ss.decomp == nil { +			st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) +			t.WriteStatus(ss.s, st) +			return st.Err() +		} +	} + +	// If cp is set, use it.  Otherwise, attempt to compress the response using +	// the incoming message compression method. +	// +	// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. +	if s.opts.cp != nil { +		ss.cp = s.opts.cp +		stream.SetSendCompress(s.opts.cp.Type()) +	} else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { +		// Legacy compressor not specified; attempt to respond with same encoding. +		ss.comp = encoding.GetCompressor(rc) +		if ss.comp != nil { +			stream.SetSendCompress(rc) +		} +	} + +	ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.cp, ss.comp) + +	if trInfo != nil { +		trInfo.tr.LazyLog(&trInfo.firstLine, false) +	} +	var appErr error +	var server interface{} +	if info != nil { +		server = info.serviceImpl +	} +	if s.opts.streamInt == nil { +		appErr = sd.Handler(server, ss) +	} else { +		info := &StreamServerInfo{ +			FullMethod:     stream.Method(), +			IsClientStream: sd.ClientStreams, +			IsServerStream: sd.ServerStreams, +		} +		appErr = s.opts.streamInt(server, ss, info, sd.Handler) +	} +	if appErr != nil { +		appStatus, ok := status.FromError(appErr) +		if !ok { +			// Convert non-status application error to a status error with code +			// Unknown, but handle context errors specifically. +			appStatus = status.FromContextError(appErr) +			appErr = appStatus.Err() +		} +		if trInfo != nil { +			ss.mu.Lock() +			ss.trInfo.tr.LazyLog(stringer(appStatus.Message()), true) +			ss.trInfo.tr.SetError() +			ss.mu.Unlock() +		} +		t.WriteStatus(ss.s, appStatus) +		if len(ss.binlogs) != 0 { +			st := &binarylog.ServerTrailer{ +				Trailer: ss.s.Trailer(), +				Err:     appErr, +			} +			for _, binlog := range ss.binlogs { +				binlog.Log(st) +			} +		} +		// TODO: Should we log an error from WriteStatus here and below? +		return appErr +	} +	if trInfo != nil { +		ss.mu.Lock() +		ss.trInfo.tr.LazyLog(stringer("OK"), false) +		ss.mu.Unlock() +	} +	err = t.WriteStatus(ss.s, statusOK) +	if len(ss.binlogs) != 0 { +		st := &binarylog.ServerTrailer{ +			Trailer: ss.s.Trailer(), +			Err:     appErr, +		} +		for _, binlog := range ss.binlogs { +			binlog.Log(st) +		} +	} +	return err +} + +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { +	sm := stream.Method() +	if sm != "" && sm[0] == '/' { +		sm = sm[1:] +	} +	pos := strings.LastIndex(sm, "/") +	if pos == -1 { +		if trInfo != nil { +			trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) +			trInfo.tr.SetError() +		} +		errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) +		if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { +			if trInfo != nil { +				trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) +				trInfo.tr.SetError() +			} +			channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) +		} +		if trInfo != nil { +			trInfo.tr.Finish() +		} +		return +	} +	service := sm[:pos] +	method := sm[pos+1:] + +	srv, knownService := s.services[service] +	if knownService { +		if md, ok := srv.methods[method]; ok { +			s.processUnaryRPC(t, stream, srv, md, trInfo) +			return +		} +		if sd, ok := srv.streams[method]; ok { +			s.processStreamingRPC(t, stream, srv, sd, trInfo) +			return +		} +	} +	// Unknown service, or known server unknown method. +	if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { +		s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) +		return +	} +	var errDesc string +	if !knownService { +		errDesc = fmt.Sprintf("unknown service %v", service) +	} else { +		errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) +	} +	if trInfo != nil { +		trInfo.tr.LazyPrintf("%s", errDesc) +		trInfo.tr.SetError() +	} +	if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { +		if trInfo != nil { +			trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) +			trInfo.tr.SetError() +		} +		channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) +	} +	if trInfo != nil { +		trInfo.tr.Finish() +	} +} + +// The key to save ServerTransportStream in the context. +type streamKey struct{} + +// NewContextWithServerTransportStream creates a new context from ctx and +// attaches stream to it. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context { +	return context.WithValue(ctx, streamKey{}, stream) +} + +// ServerTransportStream is a minimal interface that a transport stream must +// implement. This can be used to mock an actual transport stream for tests of +// handler code that use, for example, grpc.SetHeader (which requires some +// stream to be in context). +// +// See also NewContextWithServerTransportStream. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServerTransportStream interface { +	Method() string +	SetHeader(md metadata.MD) error +	SendHeader(md metadata.MD) error +	SetTrailer(md metadata.MD) error +} + +// ServerTransportStreamFromContext returns the ServerTransportStream saved in +// ctx. Returns nil if the given context has no stream associated with it +// (which implies it is not an RPC invocation context). +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream { +	s, _ := ctx.Value(streamKey{}).(ServerTransportStream) +	return s +} + +// Stop stops the gRPC server. It immediately closes all open +// connections and listeners. +// It cancels all active RPCs on the server side and the corresponding +// pending RPCs on the client side will get notified by connection +// errors. +func (s *Server) Stop() { +	s.quit.Fire() + +	defer func() { +		s.serveWG.Wait() +		s.done.Fire() +	}() + +	s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) + +	s.mu.Lock() +	listeners := s.lis +	s.lis = nil +	conns := s.conns +	s.conns = nil +	// interrupt GracefulStop if Stop and GracefulStop are called concurrently. +	s.cv.Broadcast() +	s.mu.Unlock() + +	for lis := range listeners { +		lis.Close() +	} +	for _, cs := range conns { +		for st := range cs { +			st.Close(errors.New("Server.Stop called")) +		} +	} +	if s.opts.numServerWorkers > 0 { +		s.stopServerWorkers() +	} + +	s.mu.Lock() +	if s.events != nil { +		s.events.Finish() +		s.events = nil +	} +	s.mu.Unlock() +} + +// GracefulStop stops the gRPC server gracefully. It stops the server from +// accepting new connections and RPCs and blocks until all the pending RPCs are +// finished. +func (s *Server) GracefulStop() { +	s.quit.Fire() +	defer s.done.Fire() + +	s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) +	s.mu.Lock() +	if s.conns == nil { +		s.mu.Unlock() +		return +	} + +	for lis := range s.lis { +		lis.Close() +	} +	s.lis = nil +	if !s.drain { +		for _, conns := range s.conns { +			for st := range conns { +				st.Drain() +			} +		} +		s.drain = true +	} + +	// Wait for serving threads to be ready to exit.  Only then can we be sure no +	// new conns will be created. +	s.mu.Unlock() +	s.serveWG.Wait() +	s.mu.Lock() + +	for len(s.conns) != 0 { +		s.cv.Wait() +	} +	s.conns = nil +	if s.events != nil { +		s.events.Finish() +		s.events = nil +	} +	s.mu.Unlock() +} + +// contentSubtype must be lowercase +// cannot return nil +func (s *Server) getCodec(contentSubtype string) baseCodec { +	if s.opts.codec != nil { +		return s.opts.codec +	} +	if contentSubtype == "" { +		return encoding.GetCodec(proto.Name) +	} +	codec := encoding.GetCodec(contentSubtype) +	if codec == nil { +		return encoding.GetCodec(proto.Name) +	} +	return codec +} + +// SetHeader sets the header metadata to be sent from the server to the client. +// The context provided must be the context passed to the server's handler. +// +// Streaming RPCs should prefer the SetHeader method of the ServerStream. +// +// When called multiple times, all the provided metadata will be merged.  All +// the metadata will be sent out when one of the following happens: +// +//   - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader. +//   - The first response message is sent.  For unary handlers, this occurs when +//     the handler returns; for streaming handlers, this can happen when stream's +//     SendMsg method is called. +//   - An RPC status is sent out (error or success).  This occurs when the handler +//     returns. +// +// SetHeader will fail if called after any of the events above. +// +// The error returned is compatible with the status package.  However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. +func SetHeader(ctx context.Context, md metadata.MD) error { +	if md.Len() == 0 { +		return nil +	} +	stream := ServerTransportStreamFromContext(ctx) +	if stream == nil { +		return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) +	} +	return stream.SetHeader(md) +} + +// SendHeader sends header metadata. It may be called at most once, and may not +// be called after any event that causes headers to be sent (see SetHeader for +// a complete list).  The provided md and headers set by SetHeader() will be +// sent. +// +// The error returned is compatible with the status package.  However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. +func SendHeader(ctx context.Context, md metadata.MD) error { +	stream := ServerTransportStreamFromContext(ctx) +	if stream == nil { +		return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) +	} +	if err := stream.SendHeader(md); err != nil { +		return toRPCErr(err) +	} +	return nil +} + +// SetTrailer sets the trailer metadata that will be sent when an RPC returns. +// When called more than once, all the provided metadata will be merged. +// +// The error returned is compatible with the status package.  However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. +func SetTrailer(ctx context.Context, md metadata.MD) error { +	if md.Len() == 0 { +		return nil +	} +	stream := ServerTransportStreamFromContext(ctx) +	if stream == nil { +		return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) +	} +	return stream.SetTrailer(md) +} + +// Method returns the method string for the server context.  The returned +// string is in the format of "/service/method". +func Method(ctx context.Context) (string, bool) { +	s := ServerTransportStreamFromContext(ctx) +	if s == nil { +		return "", false +	} +	return s.Method(), true +} + +type channelzServer struct { +	s *Server +} + +func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { +	return c.s.channelzMetric() +} diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go new file mode 100644 index 000000000..f22acace4 --- /dev/null +++ b/vendor/google.golang.org/grpc/service_config.go @@ -0,0 +1,406 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( +	"encoding/json" +	"errors" +	"fmt" +	"reflect" +	"strconv" +	"strings" +	"time" + +	"google.golang.org/grpc/codes" +	"google.golang.org/grpc/internal" +	internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" +	"google.golang.org/grpc/serviceconfig" +) + +const maxInt = int(^uint(0) >> 1) + +// MethodConfig defines the configuration recommended by the service providers for a +// particular method. +// +// Deprecated: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type MethodConfig = internalserviceconfig.MethodConfig + +type lbConfig struct { +	name string +	cfg  serviceconfig.LoadBalancingConfig +} + +// ServiceConfig is provided by the service provider and contains parameters for how +// clients that connect to the service should behave. +// +// Deprecated: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type ServiceConfig struct { +	serviceconfig.Config + +	// LB is the load balancer the service providers recommends.  This is +	// deprecated; lbConfigs is preferred.  If lbConfig and LB are both present, +	// lbConfig will be used. +	LB *string + +	// lbConfig is the service config's load balancing configuration.  If +	// lbConfig and LB are both present, lbConfig will be used. +	lbConfig *lbConfig + +	// Methods contains a map for the methods in this service.  If there is an +	// exact match for a method (i.e. /service/method) in the map, use the +	// corresponding MethodConfig.  If there's no exact match, look for the +	// default config for the service (/service/) and use the corresponding +	// MethodConfig if it exists.  Otherwise, the method has no MethodConfig to +	// use. +	Methods map[string]MethodConfig + +	// If a retryThrottlingPolicy is provided, gRPC will automatically throttle +	// retry attempts and hedged RPCs when the client’s ratio of failures to +	// successes exceeds a threshold. +	// +	// For each server name, the gRPC client will maintain a token_count which is +	// initially set to maxTokens, and can take values between 0 and maxTokens. +	// +	// Every outgoing RPC (regardless of service or method invoked) will change +	// token_count as follows: +	// +	//   - Every failed RPC will decrement the token_count by 1. +	//   - Every successful RPC will increment the token_count by tokenRatio. +	// +	// If token_count is less than or equal to maxTokens / 2, then RPCs will not +	// be retried and hedged RPCs will not be sent. +	retryThrottling *retryThrottlingPolicy +	// healthCheckConfig must be set as one of the requirement to enable LB channel +	// health check. +	healthCheckConfig *healthCheckConfig +	// rawJSONString stores service config json string that get parsed into +	// this service config struct. +	rawJSONString string +} + +// healthCheckConfig defines the go-native version of the LB channel health check config. +type healthCheckConfig struct { +	// serviceName is the service name to use in the health-checking request. +	ServiceName string +} + +type jsonRetryPolicy struct { +	MaxAttempts          int +	InitialBackoff       string +	MaxBackoff           string +	BackoffMultiplier    float64 +	RetryableStatusCodes []codes.Code +} + +// retryThrottlingPolicy defines the go-native version of the retry throttling +// policy defined by the service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type retryThrottlingPolicy struct { +	// The number of tokens starts at maxTokens. The token_count will always be +	// between 0 and maxTokens. +	// +	// This field is required and must be greater than zero. +	MaxTokens float64 +	// The amount of tokens to add on each successful RPC. Typically this will +	// be some number between 0 and 1, e.g., 0.1. +	// +	// This field is required and must be greater than zero. Up to 3 decimal +	// places are supported. +	TokenRatio float64 +} + +func parseDuration(s *string) (*time.Duration, error) { +	if s == nil { +		return nil, nil +	} +	if !strings.HasSuffix(*s, "s") { +		return nil, fmt.Errorf("malformed duration %q", *s) +	} +	ss := strings.SplitN((*s)[:len(*s)-1], ".", 3) +	if len(ss) > 2 { +		return nil, fmt.Errorf("malformed duration %q", *s) +	} +	// hasDigits is set if either the whole or fractional part of the number is +	// present, since both are optional but one is required. +	hasDigits := false +	var d time.Duration +	if len(ss[0]) > 0 { +		i, err := strconv.ParseInt(ss[0], 10, 32) +		if err != nil { +			return nil, fmt.Errorf("malformed duration %q: %v", *s, err) +		} +		d = time.Duration(i) * time.Second +		hasDigits = true +	} +	if len(ss) == 2 && len(ss[1]) > 0 { +		if len(ss[1]) > 9 { +			return nil, fmt.Errorf("malformed duration %q", *s) +		} +		f, err := strconv.ParseInt(ss[1], 10, 64) +		if err != nil { +			return nil, fmt.Errorf("malformed duration %q: %v", *s, err) +		} +		for i := 9; i > len(ss[1]); i-- { +			f *= 10 +		} +		d += time.Duration(f) +		hasDigits = true +	} +	if !hasDigits { +		return nil, fmt.Errorf("malformed duration %q", *s) +	} + +	return &d, nil +} + +type jsonName struct { +	Service string +	Method  string +} + +var ( +	errDuplicatedName             = errors.New("duplicated name") +	errEmptyServiceNonEmptyMethod = errors.New("cannot combine empty 'service' and non-empty 'method'") +) + +func (j jsonName) generatePath() (string, error) { +	if j.Service == "" { +		if j.Method != "" { +			return "", errEmptyServiceNonEmptyMethod +		} +		return "", nil +	} +	res := "/" + j.Service + "/" +	if j.Method != "" { +		res += j.Method +	} +	return res, nil +} + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonMC struct { +	Name                    *[]jsonName +	WaitForReady            *bool +	Timeout                 *string +	MaxRequestMessageBytes  *int64 +	MaxResponseMessageBytes *int64 +	RetryPolicy             *jsonRetryPolicy +} + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonSC struct { +	LoadBalancingPolicy *string +	LoadBalancingConfig *internalserviceconfig.BalancerConfig +	MethodConfig        *[]jsonMC +	RetryThrottling     *retryThrottlingPolicy +	HealthCheckConfig   *healthCheckConfig +} + +func init() { +	internal.ParseServiceConfig = parseServiceConfig +} +func parseServiceConfig(js string) *serviceconfig.ParseResult { +	if len(js) == 0 { +		return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} +	} +	var rsc jsonSC +	err := json.Unmarshal([]byte(js), &rsc) +	if err != nil { +		logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) +		return &serviceconfig.ParseResult{Err: err} +	} +	sc := ServiceConfig{ +		LB:                rsc.LoadBalancingPolicy, +		Methods:           make(map[string]MethodConfig), +		retryThrottling:   rsc.RetryThrottling, +		healthCheckConfig: rsc.HealthCheckConfig, +		rawJSONString:     js, +	} +	if c := rsc.LoadBalancingConfig; c != nil { +		sc.lbConfig = &lbConfig{ +			name: c.Name, +			cfg:  c.Config, +		} +	} + +	if rsc.MethodConfig == nil { +		return &serviceconfig.ParseResult{Config: &sc} +	} + +	paths := map[string]struct{}{} +	for _, m := range *rsc.MethodConfig { +		if m.Name == nil { +			continue +		} +		d, err := parseDuration(m.Timeout) +		if err != nil { +			logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) +			return &serviceconfig.ParseResult{Err: err} +		} + +		mc := MethodConfig{ +			WaitForReady: m.WaitForReady, +			Timeout:      d, +		} +		if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { +			logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) +			return &serviceconfig.ParseResult{Err: err} +		} +		if m.MaxRequestMessageBytes != nil { +			if *m.MaxRequestMessageBytes > int64(maxInt) { +				mc.MaxReqSize = newInt(maxInt) +			} else { +				mc.MaxReqSize = newInt(int(*m.MaxRequestMessageBytes)) +			} +		} +		if m.MaxResponseMessageBytes != nil { +			if *m.MaxResponseMessageBytes > int64(maxInt) { +				mc.MaxRespSize = newInt(maxInt) +			} else { +				mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes)) +			} +		} +		for i, n := range *m.Name { +			path, err := n.generatePath() +			if err != nil { +				logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) +				return &serviceconfig.ParseResult{Err: err} +			} + +			if _, ok := paths[path]; ok { +				err = errDuplicatedName +				logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) +				return &serviceconfig.ParseResult{Err: err} +			} +			paths[path] = struct{}{} +			sc.Methods[path] = mc +		} +	} + +	if sc.retryThrottling != nil { +		if mt := sc.retryThrottling.MaxTokens; mt <= 0 || mt > 1000 { +			return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt)} +		} +		if tr := sc.retryThrottling.TokenRatio; tr <= 0 { +			return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr)} +		} +	} +	return &serviceconfig.ParseResult{Config: &sc} +} + +func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPolicy, err error) { +	if jrp == nil { +		return nil, nil +	} +	ib, err := parseDuration(&jrp.InitialBackoff) +	if err != nil { +		return nil, err +	} +	mb, err := parseDuration(&jrp.MaxBackoff) +	if err != nil { +		return nil, err +	} + +	if jrp.MaxAttempts <= 1 || +		*ib <= 0 || +		*mb <= 0 || +		jrp.BackoffMultiplier <= 0 || +		len(jrp.RetryableStatusCodes) == 0 { +		logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) +		return nil, nil +	} + +	rp := &internalserviceconfig.RetryPolicy{ +		MaxAttempts:          jrp.MaxAttempts, +		InitialBackoff:       *ib, +		MaxBackoff:           *mb, +		BackoffMultiplier:    jrp.BackoffMultiplier, +		RetryableStatusCodes: make(map[codes.Code]bool), +	} +	if rp.MaxAttempts > 5 { +		// TODO(retry): Make the max maxAttempts configurable. +		rp.MaxAttempts = 5 +	} +	for _, code := range jrp.RetryableStatusCodes { +		rp.RetryableStatusCodes[code] = true +	} +	return rp, nil +} + +func min(a, b *int) *int { +	if *a < *b { +		return a +	} +	return b +} + +func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { +	if mcMax == nil && doptMax == nil { +		return &defaultVal +	} +	if mcMax != nil && doptMax != nil { +		return min(mcMax, doptMax) +	} +	if mcMax != nil { +		return mcMax +	} +	return doptMax +} + +func newInt(b int) *int { +	return &b +} + +func init() { +	internal.EqualServiceConfigForTesting = equalServiceConfig +} + +// equalServiceConfig compares two configs. The rawJSONString field is ignored, +// because they may diff in white spaces. +// +// If any of them is NOT *ServiceConfig, return false. +func equalServiceConfig(a, b serviceconfig.Config) bool { +	if a == nil && b == nil { +		return true +	} +	aa, ok := a.(*ServiceConfig) +	if !ok { +		return false +	} +	bb, ok := b.(*ServiceConfig) +	if !ok { +		return false +	} +	aaRaw := aa.rawJSONString +	aa.rawJSONString = "" +	bbRaw := bb.rawJSONString +	bb.rawJSONString = "" +	defer func() { +		aa.rawJSONString = aaRaw +		bb.rawJSONString = bbRaw +	}() +	// Using reflect.DeepEqual instead of cmp.Equal because many balancer +	// configs are unexported, and cmp.Equal cannot compare unexported fields +	// from unexported structs. +	return reflect.DeepEqual(aa, bb) +} diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go new file mode 100644 index 000000000..35e7a20a0 --- /dev/null +++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go @@ -0,0 +1,44 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package serviceconfig defines types and methods for operating on gRPC +// service configs. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package serviceconfig + +// Config represents an opaque data structure holding a service config. +type Config interface { +	isServiceConfig() +} + +// LoadBalancingConfig represents an opaque data structure holding a load +// balancing config. +type LoadBalancingConfig interface { +	isLoadBalancingConfig() +} + +// ParseResult contains a service config or an error.  Exactly one must be +// non-nil. +type ParseResult struct { +	Config Config +	Err    error +} diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go new file mode 100644 index 000000000..dc03731e4 --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/handlers.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( +	"context" +	"net" +) + +// ConnTagInfo defines the relevant information needed by connection context tagger. +type ConnTagInfo struct { +	// RemoteAddr is the remote address of the corresponding connection. +	RemoteAddr net.Addr +	// LocalAddr is the local address of the corresponding connection. +	LocalAddr net.Addr +} + +// RPCTagInfo defines the relevant information needed by RPC context tagger. +type RPCTagInfo struct { +	// FullMethodName is the RPC method in the format of /package.service/method. +	FullMethodName string +	// FailFast indicates if this RPC is failfast. +	// This field is only valid on client side, it's always false on server side. +	FailFast bool +} + +// Handler defines the interface for the related stats handling (e.g., RPCs, connections). +type Handler interface { +	// TagRPC can attach some information to the given context. +	// The context used for the rest lifetime of the RPC will be derived from +	// the returned context. +	TagRPC(context.Context, *RPCTagInfo) context.Context +	// HandleRPC processes the RPC stats. +	HandleRPC(context.Context, RPCStats) + +	// TagConn can attach some information to the given context. +	// The returned context will be used for stats handling. +	// For conn stats handling, the context used in HandleConn for this +	// connection will be derived from the context returned. +	// For RPC stats handling, +	//  - On server side, the context used in HandleRPC for all RPCs on this +	// connection will be derived from the context returned. +	//  - On client side, the context is not derived from the context returned. +	TagConn(context.Context, *ConnTagInfo) context.Context +	// HandleConn processes the Conn stats. +	HandleConn(context.Context, ConnStats) +} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go new file mode 100644 index 000000000..0285dcc6a --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -0,0 +1,319 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats is for collecting and reporting various network and RPC stats. +// This package is for monitoring purpose only. All fields are read-only. +// All APIs are experimental. +package stats // import "google.golang.org/grpc/stats" + +import ( +	"context" +	"net" +	"time" + +	"google.golang.org/grpc/metadata" +) + +// RPCStats contains stats information about RPCs. +type RPCStats interface { +	isRPCStats() +	// IsClient returns true if this RPCStats is from client side. +	IsClient() bool +} + +// Begin contains stats when an RPC attempt begins. +// FailFast is only valid if this Begin is from client side. +type Begin struct { +	// Client is true if this Begin is from client side. +	Client bool +	// BeginTime is the time when the RPC attempt begins. +	BeginTime time.Time +	// FailFast indicates if this RPC is failfast. +	FailFast bool +	// IsClientStream indicates whether the RPC is a client streaming RPC. +	IsClientStream bool +	// IsServerStream indicates whether the RPC is a server streaming RPC. +	IsServerStream bool +	// IsTransparentRetryAttempt indicates whether this attempt was initiated +	// due to transparently retrying a previous attempt. +	IsTransparentRetryAttempt bool +} + +// IsClient indicates if the stats information is from client side. +func (s *Begin) IsClient() bool { return s.Client } + +func (s *Begin) isRPCStats() {} + +// InPayload contains the information for an incoming payload. +type InPayload struct { +	// Client is true if this InPayload is from client side. +	Client bool +	// Payload is the payload with original type. +	Payload interface{} +	// Data is the serialized message payload. +	Data []byte +	// Length is the length of uncompressed data. +	Length int +	// WireLength is the length of data on wire (compressed, signed, encrypted). +	WireLength int +	// RecvTime is the time when the payload is received. +	RecvTime time.Time +} + +// IsClient indicates if the stats information is from client side. +func (s *InPayload) IsClient() bool { return s.Client } + +func (s *InPayload) isRPCStats() {} + +// InHeader contains stats when a header is received. +type InHeader struct { +	// Client is true if this InHeader is from client side. +	Client bool +	// WireLength is the wire length of header. +	WireLength int +	// Compression is the compression algorithm used for the RPC. +	Compression string +	// Header contains the header metadata received. +	Header metadata.MD + +	// The following fields are valid only if Client is false. +	// FullMethod is the full RPC method string, i.e., /package.service/method. +	FullMethod string +	// RemoteAddr is the remote address of the corresponding connection. +	RemoteAddr net.Addr +	// LocalAddr is the local address of the corresponding connection. +	LocalAddr net.Addr +} + +// IsClient indicates if the stats information is from client side. +func (s *InHeader) IsClient() bool { return s.Client } + +func (s *InHeader) isRPCStats() {} + +// InTrailer contains stats when a trailer is received. +type InTrailer struct { +	// Client is true if this InTrailer is from client side. +	Client bool +	// WireLength is the wire length of trailer. +	WireLength int +	// Trailer contains the trailer metadata received from the server. This +	// field is only valid if this InTrailer is from the client side. +	Trailer metadata.MD +} + +// IsClient indicates if the stats information is from client side. +func (s *InTrailer) IsClient() bool { return s.Client } + +func (s *InTrailer) isRPCStats() {} + +// OutPayload contains the information for an outgoing payload. +type OutPayload struct { +	// Client is true if this OutPayload is from client side. +	Client bool +	// Payload is the payload with original type. +	Payload interface{} +	// Data is the serialized message payload. +	Data []byte +	// Length is the length of uncompressed data. +	Length int +	// WireLength is the length of data on wire (compressed, signed, encrypted). +	WireLength int +	// SentTime is the time when the payload is sent. +	SentTime time.Time +} + +// IsClient indicates if this stats information is from client side. +func (s *OutPayload) IsClient() bool { return s.Client } + +func (s *OutPayload) isRPCStats() {} + +// OutHeader contains stats when a header is sent. +type OutHeader struct { +	// Client is true if this OutHeader is from client side. +	Client bool +	// Compression is the compression algorithm used for the RPC. +	Compression string +	// Header contains the header metadata sent. +	Header metadata.MD + +	// The following fields are valid only if Client is true. +	// FullMethod is the full RPC method string, i.e., /package.service/method. +	FullMethod string +	// RemoteAddr is the remote address of the corresponding connection. +	RemoteAddr net.Addr +	// LocalAddr is the local address of the corresponding connection. +	LocalAddr net.Addr +} + +// IsClient indicates if this stats information is from client side. +func (s *OutHeader) IsClient() bool { return s.Client } + +func (s *OutHeader) isRPCStats() {} + +// OutTrailer contains stats when a trailer is sent. +type OutTrailer struct { +	// Client is true if this OutTrailer is from client side. +	Client bool +	// WireLength is the wire length of trailer. +	// +	// Deprecated: This field is never set. The length is not known when this message is +	// emitted because the trailer fields are compressed with hpack after that. +	WireLength int +	// Trailer contains the trailer metadata sent to the client. This +	// field is only valid if this OutTrailer is from the server side. +	Trailer metadata.MD +} + +// IsClient indicates if this stats information is from client side. +func (s *OutTrailer) IsClient() bool { return s.Client } + +func (s *OutTrailer) isRPCStats() {} + +// End contains stats when an RPC ends. +type End struct { +	// Client is true if this End is from client side. +	Client bool +	// BeginTime is the time when the RPC began. +	BeginTime time.Time +	// EndTime is the time when the RPC ends. +	EndTime time.Time +	// Trailer contains the trailer metadata received from the server. This +	// field is only valid if this End is from the client side. +	// Deprecated: use Trailer in InTrailer instead. +	Trailer metadata.MD +	// Error is the error the RPC ended with. It is an error generated from +	// status.Status and can be converted back to status.Status using +	// status.FromError if non-nil. +	Error error +} + +// IsClient indicates if this is from client side. +func (s *End) IsClient() bool { return s.Client } + +func (s *End) isRPCStats() {} + +// ConnStats contains stats information about connections. +type ConnStats interface { +	isConnStats() +	// IsClient returns true if this ConnStats is from client side. +	IsClient() bool +} + +// ConnBegin contains the stats of a connection when it is established. +type ConnBegin struct { +	// Client is true if this ConnBegin is from client side. +	Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnBegin) IsClient() bool { return s.Client } + +func (s *ConnBegin) isConnStats() {} + +// ConnEnd contains the stats of a connection when it ends. +type ConnEnd struct { +	// Client is true if this ConnEnd is from client side. +	Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnEnd) IsClient() bool { return s.Client } + +func (s *ConnEnd) isConnStats() {} + +type incomingTagsKey struct{} +type outgoingTagsKey struct{} + +// SetTags attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-tags-bin.  Subsequent calls to +// SetTags will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release.  New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTags(ctx context.Context, b []byte) context.Context { +	return context.WithValue(ctx, outgoingTagsKey{}, b) +} + +// Tags returns the tags from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release.  New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Tags(ctx context.Context) []byte { +	b, _ := ctx.Value(incomingTagsKey{}).([]byte) +	return b +} + +// SetIncomingTags attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). +// +// This is intended for gRPC-internal use ONLY. +func SetIncomingTags(ctx context.Context, b []byte) context.Context { +	return context.WithValue(ctx, incomingTagsKey{}, b) +} + +// OutgoingTags returns the tags from the context for the outbound RPC. +// +// This is intended for gRPC-internal use ONLY. +func OutgoingTags(ctx context.Context) []byte { +	b, _ := ctx.Value(outgoingTagsKey{}).([]byte) +	return b +} + +type incomingTraceKey struct{} +type outgoingTraceKey struct{} + +// SetTrace attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-trace-bin.  Subsequent calls to +// SetTrace will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release.  New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTrace(ctx context.Context, b []byte) context.Context { +	return context.WithValue(ctx, outgoingTraceKey{}, b) +} + +// Trace returns the trace from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release.  New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Trace(ctx context.Context) []byte { +	b, _ := ctx.Value(incomingTraceKey{}).([]byte) +	return b +} + +// SetIncomingTrace attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs).  It is intended for +// gRPC-internal use. +func SetIncomingTrace(ctx context.Context, b []byte) context.Context { +	return context.WithValue(ctx, incomingTraceKey{}, b) +} + +// OutgoingTrace returns the trace from the context for the outbound RPC.  It is +// intended for gRPC-internal use. +func OutgoingTrace(ctx context.Context) []byte { +	b, _ := ctx.Value(outgoingTraceKey{}).([]byte) +	return b +} diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go new file mode 100644 index 000000000..623be39f2 --- /dev/null +++ b/vendor/google.golang.org/grpc/status/status.go @@ -0,0 +1,135 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package status implements errors returned by gRPC.  These errors are +// serialized and transmitted on the wire between server and client, and allow +// for additional data to be transmitted via the Details field in the status +// proto.  gRPC service handlers should return an error created by this +// package, and gRPC clients should expect a corresponding error to be +// returned from the RPC call. +// +// This package upholds the invariants that a non-nil error may not +// contain an OK code, and an OK code must result in a nil error. +package status + +import ( +	"context" +	"errors" +	"fmt" + +	spb "google.golang.org/genproto/googleapis/rpc/status" + +	"google.golang.org/grpc/codes" +	"google.golang.org/grpc/internal/status" +) + +// Status references google.golang.org/grpc/internal/status. It represents an +// RPC status code, message, and details.  It is immutable and should be +// created with New, Newf, or FromProto. +// https://godoc.org/google.golang.org/grpc/internal/status +type Status = status.Status + +// New returns a Status representing c and msg. +func New(c codes.Code, msg string) *Status { +	return status.New(c, msg) +} + +// Newf returns New(c, fmt.Sprintf(format, a...)). +func Newf(c codes.Code, format string, a ...interface{}) *Status { +	return New(c, fmt.Sprintf(format, a...)) +} + +// Error returns an error representing c and msg.  If c is OK, returns nil. +func Error(c codes.Code, msg string) error { +	return New(c, msg).Err() +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...interface{}) error { +	return Error(c, fmt.Sprintf(format, a...)) +} + +// ErrorProto returns an error representing s.  If s.Code is OK, returns nil. +func ErrorProto(s *spb.Status) error { +	return FromProto(s).Err() +} + +// FromProto returns a Status representing s. +func FromProto(s *spb.Status) *Status { +	return status.FromProto(s) +} + +// FromError returns a Status representation of err. +// +//   - If err was produced by this package or implements the method `GRPCStatus() +//     *Status`, the appropriate Status is returned. +// +//   - If err is nil, a Status is returned with codes.OK and no message. +// +//   - Otherwise, err is an error not compatible with this package.  In this +//     case, a Status is returned with codes.Unknown and err's Error() message, +//     and ok is false. +func FromError(err error) (s *Status, ok bool) { +	if err == nil { +		return nil, true +	} +	if se, ok := err.(interface { +		GRPCStatus() *Status +	}); ok { +		return se.GRPCStatus(), true +	} +	return New(codes.Unknown, err.Error()), false +} + +// Convert is a convenience function which removes the need to handle the +// boolean return value from FromError. +func Convert(err error) *Status { +	s, _ := FromError(err) +	return s +} + +// Code returns the Code of the error if it is a Status error, codes.OK if err +// is nil, or codes.Unknown otherwise. +func Code(err error) codes.Code { +	// Don't use FromError to avoid allocation of OK status. +	if err == nil { +		return codes.OK +	} +	if se, ok := err.(interface { +		GRPCStatus() *Status +	}); ok { +		return se.GRPCStatus().Code() +	} +	return codes.Unknown +} + +// FromContextError converts a context error or wrapped context error into a +// Status.  It returns a Status with codes.OK if err is nil, or a Status with +// codes.Unknown if err is non-nil and not a context error. +func FromContextError(err error) *Status { +	if err == nil { +		return nil +	} +	if errors.Is(err, context.DeadlineExceeded) { +		return New(codes.DeadlineExceeded, err.Error()) +	} +	if errors.Is(err, context.Canceled) { +		return New(codes.Canceled, err.Error()) +	} +	return New(codes.Unknown, err.Error()) +} diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go new file mode 100644 index 000000000..93231af2a --- /dev/null +++ b/vendor/google.golang.org/grpc/stream.go @@ -0,0 +1,1740 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( +	"context" +	"errors" +	"io" +	"math" +	"strconv" +	"sync" +	"time" + +	"golang.org/x/net/trace" +	"google.golang.org/grpc/balancer" +	"google.golang.org/grpc/codes" +	"google.golang.org/grpc/encoding" +	"google.golang.org/grpc/internal/balancerload" +	"google.golang.org/grpc/internal/binarylog" +	"google.golang.org/grpc/internal/channelz" +	"google.golang.org/grpc/internal/grpcrand" +	"google.golang.org/grpc/internal/grpcutil" +	imetadata "google.golang.org/grpc/internal/metadata" +	iresolver "google.golang.org/grpc/internal/resolver" +	"google.golang.org/grpc/internal/serviceconfig" +	istatus "google.golang.org/grpc/internal/status" +	"google.golang.org/grpc/internal/transport" +	"google.golang.org/grpc/metadata" +	"google.golang.org/grpc/peer" +	"google.golang.org/grpc/stats" +	"google.golang.org/grpc/status" +) + +// StreamHandler defines the handler called by gRPC server to complete the +// execution of a streaming RPC. +// +// If a StreamHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. +type StreamHandler func(srv interface{}, stream ServerStream) error + +// StreamDesc represents a streaming RPC service's method specification.  Used +// on the server when registering services and on the client when initiating +// new streams. +type StreamDesc struct { +	// StreamName and Handler are only used when registering handlers on a +	// server. +	StreamName string        // the name of the method excluding the service +	Handler    StreamHandler // the handler called for the method + +	// ServerStreams and ClientStreams are used for registering handlers on a +	// server as well as defining RPC behavior when passed to NewClientStream +	// and ClientConn.NewStream.  At least one must be true. +	ServerStreams bool // indicates the server can perform streaming sends +	ClientStreams bool // indicates the client can perform streaming sends +} + +// Stream defines the common interface a client or server stream has to satisfy. +// +// Deprecated: See ClientStream and ServerStream documentation instead. +type Stream interface { +	// Deprecated: See ClientStream and ServerStream documentation instead. +	Context() context.Context +	// Deprecated: See ClientStream and ServerStream documentation instead. +	SendMsg(m interface{}) error +	// Deprecated: See ClientStream and ServerStream documentation instead. +	RecvMsg(m interface{}) error +} + +// ClientStream defines the client-side behavior of a streaming RPC. +// +// All errors returned from ClientStream methods are compatible with the +// status package. +type ClientStream interface { +	// Header returns the header metadata received from the server if there +	// is any. It blocks if the metadata is not ready to read. +	Header() (metadata.MD, error) +	// Trailer returns the trailer metadata from the server, if there is any. +	// It must only be called after stream.CloseAndRecv has returned, or +	// stream.Recv has returned a non-nil error (including io.EOF). +	Trailer() metadata.MD +	// CloseSend closes the send direction of the stream. It closes the stream +	// when non-nil error is met. It is also not safe to call CloseSend +	// concurrently with SendMsg. +	CloseSend() error +	// Context returns the context for this stream. +	// +	// It should not be called until after Header or RecvMsg has returned. Once +	// called, subsequent client-side retries are disabled. +	Context() context.Context +	// SendMsg is generally called by generated code. On error, SendMsg aborts +	// the stream. If the error was generated by the client, the status is +	// returned directly; otherwise, io.EOF is returned and the status of +	// the stream may be discovered using RecvMsg. +	// +	// SendMsg blocks until: +	//   - There is sufficient flow control to schedule m with the transport, or +	//   - The stream is done, or +	//   - The stream breaks. +	// +	// SendMsg does not wait until the message is received by the server. An +	// untimely stream closure may result in lost messages. To ensure delivery, +	// users should ensure the RPC completed successfully using RecvMsg. +	// +	// It is safe to have a goroutine calling SendMsg and another goroutine +	// calling RecvMsg on the same stream at the same time, but it is not safe +	// to call SendMsg on the same stream in different goroutines. It is also +	// not safe to call CloseSend concurrently with SendMsg. +	SendMsg(m interface{}) error +	// RecvMsg blocks until it receives a message into m or the stream is +	// done. It returns io.EOF when the stream completes successfully. On +	// any other error, the stream is aborted and the error contains the RPC +	// status. +	// +	// It is safe to have a goroutine calling SendMsg and another goroutine +	// calling RecvMsg on the same stream at the same time, but it is not +	// safe to call RecvMsg on the same stream in different goroutines. +	RecvMsg(m interface{}) error +} + +// NewStream creates a new Stream for the client side. This is typically +// called by generated code. ctx is used for the lifetime of the stream. +// +// To ensure resources are not leaked due to the stream returned, one of the following +// actions must be performed: +// +//  1. Call Close on the ClientConn. +//  2. Cancel the context provided. +//  3. Call RecvMsg until a non-nil error is returned. A protobuf-generated +//     client-streaming RPC, for instance, might use the helper function +//     CloseAndRecv (note that CloseSend does not Recv, therefore is not +//     guaranteed to release all resources). +//  4. Receive a non-nil, non-io.EOF error from Header or SendMsg. +// +// If none of the above happen, a goroutine and a context will be leaked, and grpc +// will not call the optionally-configured stats handler with a stats.End message. +func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { +	// allow interceptor to see all applicable call options, which means those +	// configured as defaults from dial option as well as per-call options +	opts = combine(cc.dopts.callOptions, opts) + +	if cc.dopts.streamInt != nil { +		return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...) +	} +	return newClientStream(ctx, desc, cc, method, opts...) +} + +// NewClientStream is a wrapper for ClientConn.NewStream. +func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { +	return cc.NewStream(ctx, desc, method, opts...) +} + +func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { +	if md, _, ok := metadata.FromOutgoingContextRaw(ctx); ok { +		if err := imetadata.Validate(md); err != nil { +			return nil, status.Error(codes.Internal, err.Error()) +		} +	} +	if channelz.IsOn() { +		cc.incrCallsStarted() +		defer func() { +			if err != nil { +				cc.incrCallsFailed() +			} +		}() +	} +	// Provide an opportunity for the first RPC to see the first service config +	// provided by the resolver. +	if err := cc.waitForResolvedAddrs(ctx); err != nil { +		return nil, err +	} + +	var mc serviceconfig.MethodConfig +	var onCommit func() +	var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { +		return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...) +	} + +	rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method} +	rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo) +	if err != nil { +		if st, ok := status.FromError(err); ok { +			// Restrict the code to the list allowed by gRFC A54. +			if istatus.IsRestrictedControlPlaneCode(st) { +				err = status.Errorf(codes.Internal, "config selector returned illegal status: %v", err) +			} +			return nil, err +		} +		return nil, toRPCErr(err) +	} + +	if rpcConfig != nil { +		if rpcConfig.Context != nil { +			ctx = rpcConfig.Context +		} +		mc = rpcConfig.MethodConfig +		onCommit = rpcConfig.OnCommitted +		if rpcConfig.Interceptor != nil { +			rpcInfo.Context = nil +			ns := newStream +			newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { +				cs, err := rpcConfig.Interceptor.NewStream(ctx, rpcInfo, done, ns) +				if err != nil { +					return nil, toRPCErr(err) +				} +				return cs, nil +			} +		} +	} + +	return newStream(ctx, func() {}) +} + +func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) { +	c := defaultCallInfo() +	if mc.WaitForReady != nil { +		c.failFast = !*mc.WaitForReady +	} + +	// Possible context leak: +	// The cancel function for the child context we create will only be called +	// when RecvMsg returns a non-nil error, if the ClientConn is closed, or if +	// an error is generated by SendMsg. +	// https://github.com/grpc/grpc-go/issues/1818. +	var cancel context.CancelFunc +	if mc.Timeout != nil && *mc.Timeout >= 0 { +		ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) +	} else { +		ctx, cancel = context.WithCancel(ctx) +	} +	defer func() { +		if err != nil { +			cancel() +		} +	}() + +	for _, o := range opts { +		if err := o.before(c); err != nil { +			return nil, toRPCErr(err) +		} +	} +	c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) +	c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) +	if err := setCallInfoCodec(c); err != nil { +		return nil, err +	} + +	callHdr := &transport.CallHdr{ +		Host:           cc.authority, +		Method:         method, +		ContentSubtype: c.contentSubtype, +		DoneFunc:       doneFunc, +	} + +	// Set our outgoing compression according to the UseCompressor CallOption, if +	// set.  In that case, also find the compressor from the encoding package. +	// Otherwise, use the compressor configured by the WithCompressor DialOption, +	// if set. +	var cp Compressor +	var comp encoding.Compressor +	if ct := c.compressorType; ct != "" { +		callHdr.SendCompress = ct +		if ct != encoding.Identity { +			comp = encoding.GetCompressor(ct) +			if comp == nil { +				return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) +			} +		} +	} else if cc.dopts.cp != nil { +		callHdr.SendCompress = cc.dopts.cp.Type() +		cp = cc.dopts.cp +	} +	if c.creds != nil { +		callHdr.Creds = c.creds +	} + +	cs := &clientStream{ +		callHdr:      callHdr, +		ctx:          ctx, +		methodConfig: &mc, +		opts:         opts, +		callInfo:     c, +		cc:           cc, +		desc:         desc, +		codec:        c.codec, +		cp:           cp, +		comp:         comp, +		cancel:       cancel, +		firstAttempt: true, +		onCommit:     onCommit, +	} +	if !cc.dopts.disableRetry { +		cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) +	} +	if ml := binarylog.GetMethodLogger(method); ml != nil { +		cs.binlogs = append(cs.binlogs, ml) +	} +	if cc.dopts.binaryLogger != nil { +		if ml := cc.dopts.binaryLogger.GetMethodLogger(method); ml != nil { +			cs.binlogs = append(cs.binlogs, ml) +		} +	} + +	// Pick the transport to use and create a new stream on the transport. +	// Assign cs.attempt upon success. +	op := func(a *csAttempt) error { +		if err := a.getTransport(); err != nil { +			return err +		} +		if err := a.newStream(); err != nil { +			return err +		} +		// Because this operation is always called either here (while creating +		// the clientStream) or by the retry code while locked when replaying +		// the operation, it is safe to access cs.attempt directly. +		cs.attempt = a +		return nil +	} +	if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { +		return nil, err +	} + +	if len(cs.binlogs) != 0 { +		md, _ := metadata.FromOutgoingContext(ctx) +		logEntry := &binarylog.ClientHeader{ +			OnClientSide: true, +			Header:       md, +			MethodName:   method, +			Authority:    cs.cc.authority, +		} +		if deadline, ok := ctx.Deadline(); ok { +			logEntry.Timeout = time.Until(deadline) +			if logEntry.Timeout < 0 { +				logEntry.Timeout = 0 +			} +		} +		for _, binlog := range cs.binlogs { +			binlog.Log(logEntry) +		} +	} + +	if desc != unaryStreamDesc { +		// Listen on cc and stream contexts to cleanup when the user closes the +		// ClientConn or cancels the stream context.  In all other cases, an error +		// should already be injected into the recv buffer by the transport, which +		// the client will eventually receive, and then we will cancel the stream's +		// context in clientStream.finish. +		go func() { +			select { +			case <-cc.ctx.Done(): +				cs.finish(ErrClientConnClosing) +			case <-ctx.Done(): +				cs.finish(toRPCErr(ctx.Err())) +			} +		}() +	} +	return cs, nil +} + +// newAttemptLocked creates a new csAttempt without a transport or stream. +func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) { +	if err := cs.ctx.Err(); err != nil { +		return nil, toRPCErr(err) +	} +	if err := cs.cc.ctx.Err(); err != nil { +		return nil, ErrClientConnClosing +	} + +	ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp) +	method := cs.callHdr.Method +	var beginTime time.Time +	shs := cs.cc.dopts.copts.StatsHandlers +	for _, sh := range shs { +		ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast}) +		beginTime = time.Now() +		begin := &stats.Begin{ +			Client:                    true, +			BeginTime:                 beginTime, +			FailFast:                  cs.callInfo.failFast, +			IsClientStream:            cs.desc.ClientStreams, +			IsServerStream:            cs.desc.ServerStreams, +			IsTransparentRetryAttempt: isTransparent, +		} +		sh.HandleRPC(ctx, begin) +	} + +	var trInfo *traceInfo +	if EnableTracing { +		trInfo = &traceInfo{ +			tr: trace.New("grpc.Sent."+methodFamily(method), method), +			firstLine: firstLine{ +				client: true, +			}, +		} +		if deadline, ok := ctx.Deadline(); ok { +			trInfo.firstLine.deadline = time.Until(deadline) +		} +		trInfo.tr.LazyLog(&trInfo.firstLine, false) +		ctx = trace.NewContext(ctx, trInfo.tr) +	} + +	if cs.cc.parsedTarget.URL.Scheme == "xds" { +		// Add extra metadata (metadata that will be added by transport) to context +		// so the balancer can see them. +		ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( +			"content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), +		)) +	} + +	return &csAttempt{ +		ctx:           ctx, +		beginTime:     beginTime, +		cs:            cs, +		dc:            cs.cc.dopts.dc, +		statsHandlers: shs, +		trInfo:        trInfo, +	}, nil +} + +func (a *csAttempt) getTransport() error { +	cs := a.cs + +	var err error +	a.t, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) +	if err != nil { +		if de, ok := err.(dropError); ok { +			err = de.error +			a.drop = true +		} +		return err +	} +	if a.trInfo != nil { +		a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr()) +	} +	return nil +} + +func (a *csAttempt) newStream() error { +	cs := a.cs +	cs.callHdr.PreviousAttempts = cs.numRetries + +	// Merge metadata stored in PickResult, if any, with existing call metadata. +	// It is safe to overwrite the csAttempt's context here, since all state +	// maintained in it are local to the attempt. When the attempt has to be +	// retried, a new instance of csAttempt will be created. +	if a.pickResult.Metatada != nil { +		// We currently do not have a function it the metadata package which +		// merges given metadata with existing metadata in a context. Existing +		// function `AppendToOutgoingContext()` takes a variadic argument of key +		// value pairs. +		// +		// TODO: Make it possible to retrieve key value pairs from metadata.MD +		// in a form passable to AppendToOutgoingContext(), or create a version +		// of AppendToOutgoingContext() that accepts a metadata.MD. +		md, _ := metadata.FromOutgoingContext(a.ctx) +		md = metadata.Join(md, a.pickResult.Metatada) +		a.ctx = metadata.NewOutgoingContext(a.ctx, md) +	} + +	s, err := a.t.NewStream(a.ctx, cs.callHdr) +	if err != nil { +		nse, ok := err.(*transport.NewStreamError) +		if !ok { +			// Unexpected. +			return err +		} + +		if nse.AllowTransparentRetry { +			a.allowTransparentRetry = true +		} + +		// Unwrap and convert error. +		return toRPCErr(nse.Err) +	} +	a.s = s +	a.p = &parser{r: s} +	return nil +} + +// clientStream implements a client side Stream. +type clientStream struct { +	callHdr  *transport.CallHdr +	opts     []CallOption +	callInfo *callInfo +	cc       *ClientConn +	desc     *StreamDesc + +	codec baseCodec +	cp    Compressor +	comp  encoding.Compressor + +	cancel context.CancelFunc // cancels all attempts + +	sentLast bool // sent an end stream + +	methodConfig *MethodConfig + +	ctx context.Context // the application's context, wrapped by stats/tracing + +	retryThrottler *retryThrottler // The throttler active when the RPC began. + +	binlogs []binarylog.MethodLogger +	// serverHeaderBinlogged is a boolean for whether server header has been +	// logged. Server header will be logged when the first time one of those +	// happens: stream.Header(), stream.Recv(). +	// +	// It's only read and used by Recv() and Header(), so it doesn't need to be +	// synchronized. +	serverHeaderBinlogged bool + +	mu                      sync.Mutex +	firstAttempt            bool // if true, transparent retry is valid +	numRetries              int  // exclusive of transparent retry attempt(s) +	numRetriesSincePushback int  // retries since pushback; to reset backoff +	finished                bool // TODO: replace with atomic cmpxchg or sync.Once? +	// attempt is the active client stream attempt. +	// The only place where it is written is the newAttemptLocked method and this method never writes nil. +	// So, attempt can be nil only inside newClientStream function when clientStream is first created. +	// One of the first things done after clientStream's creation, is to call newAttemptLocked which either +	// assigns a non nil value to the attempt or returns an error. If an error is returned from newAttemptLocked, +	// then newClientStream calls finish on the clientStream and returns. So, finish method is the only +	// place where we need to check if the attempt is nil. +	attempt *csAttempt +	// TODO(hedging): hedging will have multiple attempts simultaneously. +	committed  bool // active attempt committed for retry? +	onCommit   func() +	buffer     []func(a *csAttempt) error // operations to replay on retry +	bufferSize int                        // current size of buffer +} + +// csAttempt implements a single transport stream attempt within a +// clientStream. +type csAttempt struct { +	ctx        context.Context +	cs         *clientStream +	t          transport.ClientTransport +	s          *transport.Stream +	p          *parser +	pickResult balancer.PickResult + +	finished  bool +	dc        Decompressor +	decomp    encoding.Compressor +	decompSet bool + +	mu sync.Mutex // guards trInfo.tr +	// trInfo may be nil (if EnableTracing is false). +	// trInfo.tr is set when created (if EnableTracing is true), +	// and cleared when the finish method is called. +	trInfo *traceInfo + +	statsHandlers []stats.Handler +	beginTime     time.Time + +	// set for newStream errors that may be transparently retried +	allowTransparentRetry bool +	// set for pick errors that are returned as a status +	drop bool +} + +func (cs *clientStream) commitAttemptLocked() { +	if !cs.committed && cs.onCommit != nil { +		cs.onCommit() +	} +	cs.committed = true +	cs.buffer = nil +} + +func (cs *clientStream) commitAttempt() { +	cs.mu.Lock() +	cs.commitAttemptLocked() +	cs.mu.Unlock() +} + +// shouldRetry returns nil if the RPC should be retried; otherwise it returns +// the error that should be returned by the operation.  If the RPC should be +// retried, the bool indicates whether it is being retried transparently. +func (a *csAttempt) shouldRetry(err error) (bool, error) { +	cs := a.cs + +	if cs.finished || cs.committed || a.drop { +		// RPC is finished or committed or was dropped by the picker; cannot retry. +		return false, err +	} +	if a.s == nil && a.allowTransparentRetry { +		return true, nil +	} +	// Wait for the trailers. +	unprocessed := false +	if a.s != nil { +		<-a.s.Done() +		unprocessed = a.s.Unprocessed() +	} +	if cs.firstAttempt && unprocessed { +		// First attempt, stream unprocessed: transparently retry. +		return true, nil +	} +	if cs.cc.dopts.disableRetry { +		return false, err +	} + +	pushback := 0 +	hasPushback := false +	if a.s != nil { +		if !a.s.TrailersOnly() { +			return false, err +		} + +		// TODO(retry): Move down if the spec changes to not check server pushback +		// before considering this a failure for throttling. +		sps := a.s.Trailer()["grpc-retry-pushback-ms"] +		if len(sps) == 1 { +			var e error +			if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { +				channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0]) +				cs.retryThrottler.throttle() // This counts as a failure for throttling. +				return false, err +			} +			hasPushback = true +		} else if len(sps) > 1 { +			channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps) +			cs.retryThrottler.throttle() // This counts as a failure for throttling. +			return false, err +		} +	} + +	var code codes.Code +	if a.s != nil { +		code = a.s.Status().Code() +	} else { +		code = status.Code(err) +	} + +	rp := cs.methodConfig.RetryPolicy +	if rp == nil || !rp.RetryableStatusCodes[code] { +		return false, err +	} + +	// Note: the ordering here is important; we count this as a failure +	// only if the code matched a retryable code. +	if cs.retryThrottler.throttle() { +		return false, err +	} +	if cs.numRetries+1 >= rp.MaxAttempts { +		return false, err +	} + +	var dur time.Duration +	if hasPushback { +		dur = time.Millisecond * time.Duration(pushback) +		cs.numRetriesSincePushback = 0 +	} else { +		fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback)) +		cur := float64(rp.InitialBackoff) * fact +		if max := float64(rp.MaxBackoff); cur > max { +			cur = max +		} +		dur = time.Duration(grpcrand.Int63n(int64(cur))) +		cs.numRetriesSincePushback++ +	} + +	// TODO(dfawley): we could eagerly fail here if dur puts us past the +	// deadline, but unsure if it is worth doing. +	t := time.NewTimer(dur) +	select { +	case <-t.C: +		cs.numRetries++ +		return false, nil +	case <-cs.ctx.Done(): +		t.Stop() +		return false, status.FromContextError(cs.ctx.Err()).Err() +	} +} + +// Returns nil if a retry was performed and succeeded; error otherwise. +func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { +	for { +		attempt.finish(toRPCErr(lastErr)) +		isTransparent, err := attempt.shouldRetry(lastErr) +		if err != nil { +			cs.commitAttemptLocked() +			return err +		} +		cs.firstAttempt = false +		attempt, err = cs.newAttemptLocked(isTransparent) +		if err != nil { +			// Only returns error if the clientconn is closed or the context of +			// the stream is canceled. +			return err +		} +		// Note that the first op in the replay buffer always sets cs.attempt +		// if it is able to pick a transport and create a stream. +		if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { +			return nil +		} +	} +} + +func (cs *clientStream) Context() context.Context { +	cs.commitAttempt() +	// No need to lock before using attempt, since we know it is committed and +	// cannot change. +	if cs.attempt.s != nil { +		return cs.attempt.s.Context() +	} +	return cs.ctx +} + +func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { +	cs.mu.Lock() +	for { +		if cs.committed { +			cs.mu.Unlock() +			// toRPCErr is used in case the error from the attempt comes from +			// NewClientStream, which intentionally doesn't return a status +			// error to allow for further inspection; all other errors should +			// already be status errors. +			return toRPCErr(op(cs.attempt)) +		} +		if len(cs.buffer) == 0 { +			// For the first op, which controls creation of the stream and +			// assigns cs.attempt, we need to create a new attempt inline +			// before executing the first op.  On subsequent ops, the attempt +			// is created immediately before replaying the ops. +			var err error +			if cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */); err != nil { +				cs.mu.Unlock() +				cs.finish(err) +				return err +			} +		} +		a := cs.attempt +		cs.mu.Unlock() +		err := op(a) +		cs.mu.Lock() +		if a != cs.attempt { +			// We started another attempt already. +			continue +		} +		if err == io.EOF { +			<-a.s.Done() +		} +		if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) { +			onSuccess() +			cs.mu.Unlock() +			return err +		} +		if err := cs.retryLocked(a, err); err != nil { +			cs.mu.Unlock() +			return err +		} +	} +} + +func (cs *clientStream) Header() (metadata.MD, error) { +	var m metadata.MD +	noHeader := false +	err := cs.withRetry(func(a *csAttempt) error { +		var err error +		m, err = a.s.Header() +		if err == transport.ErrNoHeaders { +			noHeader = true +			return nil +		} +		return toRPCErr(err) +	}, cs.commitAttemptLocked) + +	if err != nil { +		cs.finish(err) +		return nil, err +	} + +	if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && !noHeader { +		// Only log if binary log is on and header has not been logged, and +		// there is actually headers to log. +		logEntry := &binarylog.ServerHeader{ +			OnClientSide: true, +			Header:       m, +			PeerAddr:     nil, +		} +		if peer, ok := peer.FromContext(cs.Context()); ok { +			logEntry.PeerAddr = peer.Addr +		} +		cs.serverHeaderBinlogged = true +		for _, binlog := range cs.binlogs { +			binlog.Log(logEntry) +		} +	} +	return m, nil +} + +func (cs *clientStream) Trailer() metadata.MD { +	// On RPC failure, we never need to retry, because usage requires that +	// RecvMsg() returned a non-nil error before calling this function is valid. +	// We would have retried earlier if necessary. +	// +	// Commit the attempt anyway, just in case users are not following those +	// directions -- it will prevent races and should not meaningfully impact +	// performance. +	cs.commitAttempt() +	if cs.attempt.s == nil { +		return nil +	} +	return cs.attempt.s.Trailer() +} + +func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { +	for _, f := range cs.buffer { +		if err := f(attempt); err != nil { +			return err +		} +	} +	return nil +} + +func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) { +	// Note: we still will buffer if retry is disabled (for transparent retries). +	if cs.committed { +		return +	} +	cs.bufferSize += sz +	if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize { +		cs.commitAttemptLocked() +		return +	} +	cs.buffer = append(cs.buffer, op) +} + +func (cs *clientStream) SendMsg(m interface{}) (err error) { +	defer func() { +		if err != nil && err != io.EOF { +			// Call finish on the client stream for errors generated by this SendMsg +			// call, as these indicate problems created by this client.  (Transport +			// errors are converted to an io.EOF error in csAttempt.sendMsg; the real +			// error will be returned from RecvMsg eventually in that case, or be +			// retried.) +			cs.finish(err) +		} +	}() +	if cs.sentLast { +		return status.Errorf(codes.Internal, "SendMsg called after CloseSend") +	} +	if !cs.desc.ClientStreams { +		cs.sentLast = true +	} + +	// load hdr, payload, data +	hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp) +	if err != nil { +		return err +	} + +	// TODO(dfawley): should we be checking len(data) instead? +	if len(payload) > *cs.callInfo.maxSendMessageSize { +		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) +	} +	op := func(a *csAttempt) error { +		return a.sendMsg(m, hdr, payload, data) +	} +	err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) +	if len(cs.binlogs) != 0 && err == nil { +		cm := &binarylog.ClientMessage{ +			OnClientSide: true, +			Message:      data, +		} +		for _, binlog := range cs.binlogs { +			binlog.Log(cm) +		} +	} +	return err +} + +func (cs *clientStream) RecvMsg(m interface{}) error { +	if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { +		// Call Header() to binary log header if it's not already logged. +		cs.Header() +	} +	var recvInfo *payloadInfo +	if len(cs.binlogs) != 0 { +		recvInfo = &payloadInfo{} +	} +	err := cs.withRetry(func(a *csAttempt) error { +		return a.recvMsg(m, recvInfo) +	}, cs.commitAttemptLocked) +	if len(cs.binlogs) != 0 && err == nil { +		sm := &binarylog.ServerMessage{ +			OnClientSide: true, +			Message:      recvInfo.uncompressedBytes, +		} +		for _, binlog := range cs.binlogs { +			binlog.Log(sm) +		} +	} +	if err != nil || !cs.desc.ServerStreams { +		// err != nil or non-server-streaming indicates end of stream. +		cs.finish(err) + +		if len(cs.binlogs) != 0 { +			// finish will not log Trailer. Log Trailer here. +			logEntry := &binarylog.ServerTrailer{ +				OnClientSide: true, +				Trailer:      cs.Trailer(), +				Err:          err, +			} +			if logEntry.Err == io.EOF { +				logEntry.Err = nil +			} +			if peer, ok := peer.FromContext(cs.Context()); ok { +				logEntry.PeerAddr = peer.Addr +			} +			for _, binlog := range cs.binlogs { +				binlog.Log(logEntry) +			} +		} +	} +	return err +} + +func (cs *clientStream) CloseSend() error { +	if cs.sentLast { +		// TODO: return an error and finish the stream instead, due to API misuse? +		return nil +	} +	cs.sentLast = true +	op := func(a *csAttempt) error { +		a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) +		// Always return nil; io.EOF is the only error that might make sense +		// instead, but there is no need to signal the client to call RecvMsg +		// as the only use left for the stream after CloseSend is to call +		// RecvMsg.  This also matches historical behavior. +		return nil +	} +	cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) +	if len(cs.binlogs) != 0 { +		chc := &binarylog.ClientHalfClose{ +			OnClientSide: true, +		} +		for _, binlog := range cs.binlogs { +			binlog.Log(chc) +		} +	} +	// We never returned an error here for reasons. +	return nil +} + +func (cs *clientStream) finish(err error) { +	if err == io.EOF { +		// Ending a stream with EOF indicates a success. +		err = nil +	} +	cs.mu.Lock() +	if cs.finished { +		cs.mu.Unlock() +		return +	} +	cs.finished = true +	cs.commitAttemptLocked() +	if cs.attempt != nil { +		cs.attempt.finish(err) +		// after functions all rely upon having a stream. +		if cs.attempt.s != nil { +			for _, o := range cs.opts { +				o.after(cs.callInfo, cs.attempt) +			} +		} +	} +	cs.mu.Unlock() +	// For binary logging. only log cancel in finish (could be caused by RPC ctx +	// canceled or ClientConn closed). Trailer will be logged in RecvMsg. +	// +	// Only one of cancel or trailer needs to be logged. In the cases where +	// users don't call RecvMsg, users must have already canceled the RPC. +	if len(cs.binlogs) != 0 && status.Code(err) == codes.Canceled { +		c := &binarylog.Cancel{ +			OnClientSide: true, +		} +		for _, binlog := range cs.binlogs { +			binlog.Log(c) +		} +	} +	if err == nil { +		cs.retryThrottler.successfulRPC() +	} +	if channelz.IsOn() { +		if err != nil { +			cs.cc.incrCallsFailed() +		} else { +			cs.cc.incrCallsSucceeded() +		} +	} +	cs.cancel() +} + +func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { +	cs := a.cs +	if a.trInfo != nil { +		a.mu.Lock() +		if a.trInfo.tr != nil { +			a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) +		} +		a.mu.Unlock() +	} +	if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil { +		if !cs.desc.ClientStreams { +			// For non-client-streaming RPCs, we return nil instead of EOF on error +			// because the generated code requires it.  finish is not called; RecvMsg() +			// will call it with the stream's status independently. +			return nil +		} +		return io.EOF +	} +	for _, sh := range a.statsHandlers { +		sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) +	} +	if channelz.IsOn() { +		a.t.IncrMsgSent() +	} +	return nil +} + +func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { +	cs := a.cs +	if len(a.statsHandlers) != 0 && payInfo == nil { +		payInfo = &payloadInfo{} +	} + +	if !a.decompSet { +		// Block until we receive headers containing received message encoding. +		if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity { +			if a.dc == nil || a.dc.Type() != ct { +				// No configured decompressor, or it does not match the incoming +				// message encoding; attempt to find a registered compressor that does. +				a.dc = nil +				a.decomp = encoding.GetCompressor(ct) +			} +		} else { +			// No compression is used; disable our decompressor. +			a.dc = nil +		} +		// Only initialize this state once per stream. +		a.decompSet = true +	} +	err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp) +	if err != nil { +		if err == io.EOF { +			if statusErr := a.s.Status().Err(); statusErr != nil { +				return statusErr +			} +			return io.EOF // indicates successful end of stream. +		} + +		return toRPCErr(err) +	} +	if a.trInfo != nil { +		a.mu.Lock() +		if a.trInfo.tr != nil { +			a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) +		} +		a.mu.Unlock() +	} +	for _, sh := range a.statsHandlers { +		sh.HandleRPC(a.ctx, &stats.InPayload{ +			Client:   true, +			RecvTime: time.Now(), +			Payload:  m, +			// TODO truncate large payload. +			Data:       payInfo.uncompressedBytes, +			WireLength: payInfo.wireLength + headerLen, +			Length:     len(payInfo.uncompressedBytes), +		}) +	} +	if channelz.IsOn() { +		a.t.IncrMsgRecv() +	} +	if cs.desc.ServerStreams { +		// Subsequent messages should be received by subsequent RecvMsg calls. +		return nil +	} +	// Special handling for non-server-stream rpcs. +	// This recv expects EOF or errors, so we don't collect inPayload. +	err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp) +	if err == nil { +		return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>")) +	} +	if err == io.EOF { +		return a.s.Status().Err() // non-server streaming Recv returns nil on success +	} +	return toRPCErr(err) +} + +func (a *csAttempt) finish(err error) { +	a.mu.Lock() +	if a.finished { +		a.mu.Unlock() +		return +	} +	a.finished = true +	if err == io.EOF { +		// Ending a stream with EOF indicates a success. +		err = nil +	} +	var tr metadata.MD +	if a.s != nil { +		a.t.CloseStream(a.s, err) +		tr = a.s.Trailer() +	} + +	if a.pickResult.Done != nil { +		br := false +		if a.s != nil { +			br = a.s.BytesReceived() +		} +		a.pickResult.Done(balancer.DoneInfo{ +			Err:           err, +			Trailer:       tr, +			BytesSent:     a.s != nil, +			BytesReceived: br, +			ServerLoad:    balancerload.Parse(tr), +		}) +	} +	for _, sh := range a.statsHandlers { +		end := &stats.End{ +			Client:    true, +			BeginTime: a.beginTime, +			EndTime:   time.Now(), +			Trailer:   tr, +			Error:     err, +		} +		sh.HandleRPC(a.ctx, end) +	} +	if a.trInfo != nil && a.trInfo.tr != nil { +		if err == nil { +			a.trInfo.tr.LazyPrintf("RPC: [OK]") +		} else { +			a.trInfo.tr.LazyPrintf("RPC: [%v]", err) +			a.trInfo.tr.SetError() +		} +		a.trInfo.tr.Finish() +		a.trInfo.tr = nil +	} +	a.mu.Unlock() +} + +// newClientStream creates a ClientStream with the specified transport, on the +// given addrConn. +// +// It's expected that the given transport is either the same one in addrConn, or +// is already closed. To avoid race, transport is specified separately, instead +// of using ac.transpot. +// +// Main difference between this and ClientConn.NewStream: +// - no retry +// - no service config (or wait for service config) +// - no tracing or stats +func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, ac *addrConn, opts ...CallOption) (_ ClientStream, err error) { +	if t == nil { +		// TODO: return RPC error here? +		return nil, errors.New("transport provided is nil") +	} +	// defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct. +	c := &callInfo{} + +	// Possible context leak: +	// The cancel function for the child context we create will only be called +	// when RecvMsg returns a non-nil error, if the ClientConn is closed, or if +	// an error is generated by SendMsg. +	// https://github.com/grpc/grpc-go/issues/1818. +	ctx, cancel := context.WithCancel(ctx) +	defer func() { +		if err != nil { +			cancel() +		} +	}() + +	for _, o := range opts { +		if err := o.before(c); err != nil { +			return nil, toRPCErr(err) +		} +	} +	c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) +	c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize) +	if err := setCallInfoCodec(c); err != nil { +		return nil, err +	} + +	callHdr := &transport.CallHdr{ +		Host:           ac.cc.authority, +		Method:         method, +		ContentSubtype: c.contentSubtype, +	} + +	// Set our outgoing compression according to the UseCompressor CallOption, if +	// set.  In that case, also find the compressor from the encoding package. +	// Otherwise, use the compressor configured by the WithCompressor DialOption, +	// if set. +	var cp Compressor +	var comp encoding.Compressor +	if ct := c.compressorType; ct != "" { +		callHdr.SendCompress = ct +		if ct != encoding.Identity { +			comp = encoding.GetCompressor(ct) +			if comp == nil { +				return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) +			} +		} +	} else if ac.cc.dopts.cp != nil { +		callHdr.SendCompress = ac.cc.dopts.cp.Type() +		cp = ac.cc.dopts.cp +	} +	if c.creds != nil { +		callHdr.Creds = c.creds +	} + +	// Use a special addrConnStream to avoid retry. +	as := &addrConnStream{ +		callHdr:  callHdr, +		ac:       ac, +		ctx:      ctx, +		cancel:   cancel, +		opts:     opts, +		callInfo: c, +		desc:     desc, +		codec:    c.codec, +		cp:       cp, +		comp:     comp, +		t:        t, +	} + +	s, err := as.t.NewStream(as.ctx, as.callHdr) +	if err != nil { +		err = toRPCErr(err) +		return nil, err +	} +	as.s = s +	as.p = &parser{r: s} +	ac.incrCallsStarted() +	if desc != unaryStreamDesc { +		// Listen on cc and stream contexts to cleanup when the user closes the +		// ClientConn or cancels the stream context.  In all other cases, an error +		// should already be injected into the recv buffer by the transport, which +		// the client will eventually receive, and then we will cancel the stream's +		// context in clientStream.finish. +		go func() { +			select { +			case <-ac.ctx.Done(): +				as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing")) +			case <-ctx.Done(): +				as.finish(toRPCErr(ctx.Err())) +			} +		}() +	} +	return as, nil +} + +type addrConnStream struct { +	s         *transport.Stream +	ac        *addrConn +	callHdr   *transport.CallHdr +	cancel    context.CancelFunc +	opts      []CallOption +	callInfo  *callInfo +	t         transport.ClientTransport +	ctx       context.Context +	sentLast  bool +	desc      *StreamDesc +	codec     baseCodec +	cp        Compressor +	comp      encoding.Compressor +	decompSet bool +	dc        Decompressor +	decomp    encoding.Compressor +	p         *parser +	mu        sync.Mutex +	finished  bool +} + +func (as *addrConnStream) Header() (metadata.MD, error) { +	m, err := as.s.Header() +	if err != nil { +		as.finish(toRPCErr(err)) +	} +	return m, err +} + +func (as *addrConnStream) Trailer() metadata.MD { +	return as.s.Trailer() +} + +func (as *addrConnStream) CloseSend() error { +	if as.sentLast { +		// TODO: return an error and finish the stream instead, due to API misuse? +		return nil +	} +	as.sentLast = true + +	as.t.Write(as.s, nil, nil, &transport.Options{Last: true}) +	// Always return nil; io.EOF is the only error that might make sense +	// instead, but there is no need to signal the client to call RecvMsg +	// as the only use left for the stream after CloseSend is to call +	// RecvMsg.  This also matches historical behavior. +	return nil +} + +func (as *addrConnStream) Context() context.Context { +	return as.s.Context() +} + +func (as *addrConnStream) SendMsg(m interface{}) (err error) { +	defer func() { +		if err != nil && err != io.EOF { +			// Call finish on the client stream for errors generated by this SendMsg +			// call, as these indicate problems created by this client.  (Transport +			// errors are converted to an io.EOF error in csAttempt.sendMsg; the real +			// error will be returned from RecvMsg eventually in that case, or be +			// retried.) +			as.finish(err) +		} +	}() +	if as.sentLast { +		return status.Errorf(codes.Internal, "SendMsg called after CloseSend") +	} +	if !as.desc.ClientStreams { +		as.sentLast = true +	} + +	// load hdr, payload, data +	hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp) +	if err != nil { +		return err +	} + +	// TODO(dfawley): should we be checking len(data) instead? +	if len(payld) > *as.callInfo.maxSendMessageSize { +		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize) +	} + +	if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { +		if !as.desc.ClientStreams { +			// For non-client-streaming RPCs, we return nil instead of EOF on error +			// because the generated code requires it.  finish is not called; RecvMsg() +			// will call it with the stream's status independently. +			return nil +		} +		return io.EOF +	} + +	if channelz.IsOn() { +		as.t.IncrMsgSent() +	} +	return nil +} + +func (as *addrConnStream) RecvMsg(m interface{}) (err error) { +	defer func() { +		if err != nil || !as.desc.ServerStreams { +			// err != nil or non-server-streaming indicates end of stream. +			as.finish(err) +		} +	}() + +	if !as.decompSet { +		// Block until we receive headers containing received message encoding. +		if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity { +			if as.dc == nil || as.dc.Type() != ct { +				// No configured decompressor, or it does not match the incoming +				// message encoding; attempt to find a registered compressor that does. +				as.dc = nil +				as.decomp = encoding.GetCompressor(ct) +			} +		} else { +			// No compression is used; disable our decompressor. +			as.dc = nil +		} +		// Only initialize this state once per stream. +		as.decompSet = true +	} +	err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) +	if err != nil { +		if err == io.EOF { +			if statusErr := as.s.Status().Err(); statusErr != nil { +				return statusErr +			} +			return io.EOF // indicates successful end of stream. +		} +		return toRPCErr(err) +	} + +	if channelz.IsOn() { +		as.t.IncrMsgRecv() +	} +	if as.desc.ServerStreams { +		// Subsequent messages should be received by subsequent RecvMsg calls. +		return nil +	} + +	// Special handling for non-server-stream rpcs. +	// This recv expects EOF or errors, so we don't collect inPayload. +	err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) +	if err == nil { +		return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>")) +	} +	if err == io.EOF { +		return as.s.Status().Err() // non-server streaming Recv returns nil on success +	} +	return toRPCErr(err) +} + +func (as *addrConnStream) finish(err error) { +	as.mu.Lock() +	if as.finished { +		as.mu.Unlock() +		return +	} +	as.finished = true +	if err == io.EOF { +		// Ending a stream with EOF indicates a success. +		err = nil +	} +	if as.s != nil { +		as.t.CloseStream(as.s, err) +	} + +	if err != nil { +		as.ac.incrCallsFailed() +	} else { +		as.ac.incrCallsSucceeded() +	} +	as.cancel() +	as.mu.Unlock() +} + +// ServerStream defines the server-side behavior of a streaming RPC. +// +// Errors returned from ServerStream methods are compatible with the status +// package.  However, the status code will often not match the RPC status as +// seen by the client application, and therefore, should not be relied upon for +// this purpose. +type ServerStream interface { +	// SetHeader sets the header metadata. It may be called multiple times. +	// When call multiple times, all the provided metadata will be merged. +	// All the metadata will be sent out when one of the following happens: +	//  - ServerStream.SendHeader() is called; +	//  - The first response is sent out; +	//  - An RPC status is sent out (error or success). +	SetHeader(metadata.MD) error +	// SendHeader sends the header metadata. +	// The provided md and headers set by SetHeader() will be sent. +	// It fails if called multiple times. +	SendHeader(metadata.MD) error +	// SetTrailer sets the trailer metadata which will be sent with the RPC status. +	// When called more than once, all the provided metadata will be merged. +	SetTrailer(metadata.MD) +	// Context returns the context for this stream. +	Context() context.Context +	// SendMsg sends a message. On error, SendMsg aborts the stream and the +	// error is returned directly. +	// +	// SendMsg blocks until: +	//   - There is sufficient flow control to schedule m with the transport, or +	//   - The stream is done, or +	//   - The stream breaks. +	// +	// SendMsg does not wait until the message is received by the client. An +	// untimely stream closure may result in lost messages. +	// +	// It is safe to have a goroutine calling SendMsg and another goroutine +	// calling RecvMsg on the same stream at the same time, but it is not safe +	// to call SendMsg on the same stream in different goroutines. +	// +	// It is not safe to modify the message after calling SendMsg. Tracing +	// libraries and stats handlers may use the message lazily. +	SendMsg(m interface{}) error +	// RecvMsg blocks until it receives a message into m or the stream is +	// done. It returns io.EOF when the client has performed a CloseSend. On +	// any non-EOF error, the stream is aborted and the error contains the +	// RPC status. +	// +	// It is safe to have a goroutine calling SendMsg and another goroutine +	// calling RecvMsg on the same stream at the same time, but it is not +	// safe to call RecvMsg on the same stream in different goroutines. +	RecvMsg(m interface{}) error +} + +// serverStream implements a server side Stream. +type serverStream struct { +	ctx   context.Context +	t     transport.ServerTransport +	s     *transport.Stream +	p     *parser +	codec baseCodec + +	cp     Compressor +	dc     Decompressor +	comp   encoding.Compressor +	decomp encoding.Compressor + +	maxReceiveMessageSize int +	maxSendMessageSize    int +	trInfo                *traceInfo + +	statsHandler []stats.Handler + +	binlogs []binarylog.MethodLogger +	// serverHeaderBinlogged indicates whether server header has been logged. It +	// will happen when one of the following two happens: stream.SendHeader(), +	// stream.Send(). +	// +	// It's only checked in send and sendHeader, doesn't need to be +	// synchronized. +	serverHeaderBinlogged bool + +	mu sync.Mutex // protects trInfo.tr after the service handler runs. +} + +func (ss *serverStream) Context() context.Context { +	return ss.ctx +} + +func (ss *serverStream) SetHeader(md metadata.MD) error { +	if md.Len() == 0 { +		return nil +	} +	err := imetadata.Validate(md) +	if err != nil { +		return status.Error(codes.Internal, err.Error()) +	} +	return ss.s.SetHeader(md) +} + +func (ss *serverStream) SendHeader(md metadata.MD) error { +	err := imetadata.Validate(md) +	if err != nil { +		return status.Error(codes.Internal, err.Error()) +	} + +	err = ss.t.WriteHeader(ss.s, md) +	if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged { +		h, _ := ss.s.Header() +		sh := &binarylog.ServerHeader{ +			Header: h, +		} +		ss.serverHeaderBinlogged = true +		for _, binlog := range ss.binlogs { +			binlog.Log(sh) +		} +	} +	return err +} + +func (ss *serverStream) SetTrailer(md metadata.MD) { +	if md.Len() == 0 { +		return +	} +	if err := imetadata.Validate(md); err != nil { +		logger.Errorf("stream: failed to validate md when setting trailer, err: %v", err) +	} +	ss.s.SetTrailer(md) +} + +func (ss *serverStream) SendMsg(m interface{}) (err error) { +	defer func() { +		if ss.trInfo != nil { +			ss.mu.Lock() +			if ss.trInfo.tr != nil { +				if err == nil { +					ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) +				} else { +					ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) +					ss.trInfo.tr.SetError() +				} +			} +			ss.mu.Unlock() +		} +		if err != nil && err != io.EOF { +			st, _ := status.FromError(toRPCErr(err)) +			ss.t.WriteStatus(ss.s, st) +			// Non-user specified status was sent out. This should be an error +			// case (as a server side Cancel maybe). +			// +			// This is not handled specifically now. User will return a final +			// status from the service handler, we will log that error instead. +			// This behavior is similar to an interceptor. +		} +		if channelz.IsOn() && err == nil { +			ss.t.IncrMsgSent() +		} +	}() + +	// load hdr, payload, data +	hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) +	if err != nil { +		return err +	} + +	// TODO(dfawley): should we be checking len(data) instead? +	if len(payload) > ss.maxSendMessageSize { +		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) +	} +	if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { +		return toRPCErr(err) +	} +	if len(ss.binlogs) != 0 { +		if !ss.serverHeaderBinlogged { +			h, _ := ss.s.Header() +			sh := &binarylog.ServerHeader{ +				Header: h, +			} +			ss.serverHeaderBinlogged = true +			for _, binlog := range ss.binlogs { +				binlog.Log(sh) +			} +		} +		sm := &binarylog.ServerMessage{ +			Message: data, +		} +		for _, binlog := range ss.binlogs { +			binlog.Log(sm) +		} +	} +	if len(ss.statsHandler) != 0 { +		for _, sh := range ss.statsHandler { +			sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) +		} +	} +	return nil +} + +func (ss *serverStream) RecvMsg(m interface{}) (err error) { +	defer func() { +		if ss.trInfo != nil { +			ss.mu.Lock() +			if ss.trInfo.tr != nil { +				if err == nil { +					ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) +				} else if err != io.EOF { +					ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) +					ss.trInfo.tr.SetError() +				} +			} +			ss.mu.Unlock() +		} +		if err != nil && err != io.EOF { +			st, _ := status.FromError(toRPCErr(err)) +			ss.t.WriteStatus(ss.s, st) +			// Non-user specified status was sent out. This should be an error +			// case (as a server side Cancel maybe). +			// +			// This is not handled specifically now. User will return a final +			// status from the service handler, we will log that error instead. +			// This behavior is similar to an interceptor. +		} +		if channelz.IsOn() && err == nil { +			ss.t.IncrMsgRecv() +		} +	}() +	var payInfo *payloadInfo +	if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { +		payInfo = &payloadInfo{} +	} +	if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { +		if err == io.EOF { +			if len(ss.binlogs) != 0 { +				chc := &binarylog.ClientHalfClose{} +				for _, binlog := range ss.binlogs { +					binlog.Log(chc) +				} +			} +			return err +		} +		if err == io.ErrUnexpectedEOF { +			err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) +		} +		return toRPCErr(err) +	} +	if len(ss.statsHandler) != 0 { +		for _, sh := range ss.statsHandler { +			sh.HandleRPC(ss.s.Context(), &stats.InPayload{ +				RecvTime: time.Now(), +				Payload:  m, +				// TODO truncate large payload. +				Data:       payInfo.uncompressedBytes, +				WireLength: payInfo.wireLength + headerLen, +				Length:     len(payInfo.uncompressedBytes), +			}) +		} +	} +	if len(ss.binlogs) != 0 { +		cm := &binarylog.ClientMessage{ +			Message: payInfo.uncompressedBytes, +		} +		for _, binlog := range ss.binlogs { +			binlog.Log(cm) +		} +	} +	return nil +} + +// MethodFromServerStream returns the method string for the input stream. +// The returned string is in the format of "/service/method". +func MethodFromServerStream(stream ServerStream) (string, bool) { +	return Method(stream.Context()) +} + +// prepareMsg returns the hdr, payload and data +// using the compressors passed or using the +// passed preparedmsg +func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { +	if preparedMsg, ok := m.(*PreparedMsg); ok { +		return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil +	} +	// The input interface is not a prepared msg. +	// Marshal and Compress the data at this point +	data, err = encode(codec, m) +	if err != nil { +		return nil, nil, nil, err +	} +	compData, err := compress(data, cp, comp) +	if err != nil { +		return nil, nil, nil, err +	} +	hdr, payload = msgHeader(data, compData) +	return hdr, payload, data, nil +} diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go new file mode 100644 index 000000000..bfa5dfa40 --- /dev/null +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -0,0 +1,56 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package tap defines the function handles which are executed on the transport +// layer of gRPC-Go and related information. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +package tap + +import ( +	"context" +) + +// Info defines the relevant information needed by the handles. +type Info struct { +	// FullMethodName is the string of grpc method (in the format of +	// /package.service/method). +	FullMethodName string +	// TODO: More to be added. +} + +// ServerInHandle defines the function which runs before a new stream is +// created on the server side. If it returns a non-nil error, the stream will +// not be created and an error will be returned to the client.  If the error +// returned is a status error, that status code and message will be used, +// otherwise PermissionDenied will be the code and err.Error() will be the +// message. +// +// It's intended to be used in situations where you don't want to waste the +// resources to accept the new stream (e.g. rate-limiting). For other general +// usages, please use interceptors. +// +// Note that it is executed in the per-connection I/O goroutine(s) instead of +// per-RPC goroutine. Therefore, users should NOT have any +// blocking/time-consuming work in this handle. Otherwise all the RPCs would +// slow down. Also, for the same reason, this handle won't be called +// concurrently by gRPC. +type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error) diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go new file mode 100644 index 000000000..07a2d26b3 --- /dev/null +++ b/vendor/google.golang.org/grpc/trace.go @@ -0,0 +1,123 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( +	"bytes" +	"fmt" +	"io" +	"net" +	"strings" +	"sync" +	"time" + +	"golang.org/x/net/trace" +) + +// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. +// This should only be set before any RPCs are sent or received by this program. +var EnableTracing bool + +// methodFamily returns the trace family for the given method. +// It turns "/pkg.Service/GetFoo" into "pkg.Service". +func methodFamily(m string) string { +	m = strings.TrimPrefix(m, "/") // remove leading slash +	if i := strings.Index(m, "/"); i >= 0 { +		m = m[:i] // remove everything from second slash +	} +	return m +} + +// traceInfo contains tracing information for an RPC. +type traceInfo struct { +	tr        trace.Trace +	firstLine firstLine +} + +// firstLine is the first line of an RPC trace. +// It may be mutated after construction; remoteAddr specifically may change +// during client-side use. +type firstLine struct { +	mu         sync.Mutex +	client     bool // whether this is a client (outgoing) RPC +	remoteAddr net.Addr +	deadline   time.Duration // may be zero +} + +func (f *firstLine) SetRemoteAddr(addr net.Addr) { +	f.mu.Lock() +	f.remoteAddr = addr +	f.mu.Unlock() +} + +func (f *firstLine) String() string { +	f.mu.Lock() +	defer f.mu.Unlock() + +	var line bytes.Buffer +	io.WriteString(&line, "RPC: ") +	if f.client { +		io.WriteString(&line, "to") +	} else { +		io.WriteString(&line, "from") +	} +	fmt.Fprintf(&line, " %v deadline:", f.remoteAddr) +	if f.deadline != 0 { +		fmt.Fprint(&line, f.deadline) +	} else { +		io.WriteString(&line, "none") +	} +	return line.String() +} + +const truncateSize = 100 + +func truncate(x string, l int) string { +	if l > len(x) { +		return x +	} +	return x[:l] +} + +// payload represents an RPC request or response payload. +type payload struct { +	sent bool        // whether this is an outgoing payload +	msg  interface{} // e.g. a proto.Message +	// TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? +} + +func (p payload) String() string { +	if p.sent { +		return truncate(fmt.Sprintf("sent: %v", p.msg), truncateSize) +	} +	return truncate(fmt.Sprintf("recv: %v", p.msg), truncateSize) +} + +type fmtStringer struct { +	format string +	a      []interface{} +} + +func (f *fmtStringer) String() string { +	return fmt.Sprintf(f.format, f.a...) +} + +type stringer string + +func (s stringer) String() string { return string(s) } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go new file mode 100644 index 000000000..fe552c315 --- /dev/null +++ b/vendor/google.golang.org/grpc/version.go @@ -0,0 +1,22 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +// Version is the current grpc version. +const Version = "1.53.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh new file mode 100644 index 000000000..3728aed04 --- /dev/null +++ b/vendor/google.golang.org/grpc/vet.sh @@ -0,0 +1,217 @@ +#!/bin/bash + +set -ex  # Exit on error; debugging enabled. +set -o pipefail  # Fail a pipe if any sub-command fails. + +# not makes sure the command passed to it does not exit with a return code of 0. +not() { +  # This is required instead of the earlier (! $COMMAND) because subshells and +  # pipefail don't work the same on Darwin as in Linux. +  ! "$@" +} + +die() { +  echo "$@" >&2 +  exit 1 +} + +fail_on_output() { +  tee /dev/stderr | not read +} + +# Check to make sure it's safe to modify the user's git repo. +git status --porcelain | fail_on_output + +# Undo any edits made by this script. +cleanup() { +  git reset --hard HEAD +} +trap cleanup EXIT + +PATH="${HOME}/go/bin:${GOROOT}/bin:${PATH}" +go version + +if [[ "$1" = "-install" ]]; then +  # Install the pinned versions as defined in module tools. +  pushd ./test/tools +  go install \ +    golang.org/x/lint/golint \ +    golang.org/x/tools/cmd/goimports \ +    honnef.co/go/tools/cmd/staticcheck \ +    github.com/client9/misspell/cmd/misspell +  popd +  if [[ -z "${VET_SKIP_PROTO}" ]]; then +    if [[ "${TRAVIS}" = "true" ]]; then +      PROTOBUF_VERSION=3.14.0 +      PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip +      pushd /home/travis +      wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} +      unzip ${PROTOC_FILENAME} +      bin/protoc --version +      popd +    elif [[ "${GITHUB_ACTIONS}" = "true" ]]; then +      PROTOBUF_VERSION=3.14.0 +      PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip +      pushd /home/runner/go +      wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} +      unzip ${PROTOC_FILENAME} +      bin/protoc --version +      popd +    elif not which protoc > /dev/null; then +      die "Please install protoc into your path" +    fi +  fi +  exit 0 +elif [[ "$#" -ne 0 ]]; then +  die "Unknown argument(s): $*" +fi + +# - Check that generated proto files are up to date. +if [[ -z "${VET_SKIP_PROTO}" ]]; then +  PATH="/home/travis/bin:${PATH}" make proto && \ +    git status --porcelain 2>&1 | fail_on_output || \ +    (git status; git --no-pager diff; exit 1) +fi + +if [[ -n "${VET_ONLY_PROTO}" ]]; then +  exit 0 +fi + +# - Ensure all source files contain a copyright message. +# (Done in two parts because Darwin "git grep" has broken support for compound +# exclusion matches.) +(grep -L "DO NOT EDIT" $(git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)" -- '*.go') || true) | fail_on_output + +# - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. +not grep 'func Test[^(]' *_test.go +not grep 'func Test[^(]' test/*.go + +# - Do not import x/net/context. +not git grep -l 'x/net/context' -- "*.go" + +# - Do not import math/rand for real library code.  Use internal/grpcrand for +#   thread safety. +git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' + +# - Do not call grpclog directly. Use grpclog.Component instead. +git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' + +# - Ensure all ptypes proto packages are renamed when importing. +not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" + +# - Ensure all xds proto imports are renamed to *pb or *grpc. +git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' + +misspell -error . + +# - gofmt, goimports, golint (with exceptions for generated code), go vet, +# go mod tidy. +# Perform these checks on each module inside gRPC. +for MOD_FILE in $(find . -name 'go.mod'); do +  MOD_DIR=$(dirname ${MOD_FILE}) +  pushd ${MOD_DIR} +  go vet -all ./... | fail_on_output +  gofmt -s -d -l . 2>&1 | fail_on_output +  goimports -l . 2>&1 | not grep -vE "\.pb\.go" +  golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" + +  go mod tidy -compat=1.17 +  git status --porcelain 2>&1 | fail_on_output || \ +    (git status; git --no-pager diff; exit 1) +  popd +done + +# - Collection of static analysis checks +# +# TODO(dfawley): don't use deprecated functions in examples or first-party +# plugins. +# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs. +SC_OUT="$(mktemp)" +staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true +# Error if anything other than deprecation warnings are printed. +not grep -v "is deprecated:.*SA1019" "${SC_OUT}" +# Only ignore the following deprecated types/fields/functions. +not grep -Fv '.CredsBundle +.HeaderMap +.Metadata is deprecated: use Attributes +.NewAddress +.NewServiceConfig +.Type is deprecated: use Attributes +BuildVersion is deprecated +balancer.ErrTransientFailure +balancer.Picker +extDesc.Filename is deprecated +github.com/golang/protobuf/jsonpb is deprecated +grpc.CallCustomCodec +grpc.Code +grpc.Compressor +grpc.CustomCodec +grpc.Decompressor +grpc.MaxMsgSize +grpc.MethodConfig +grpc.NewGZIPCompressor +grpc.NewGZIPDecompressor +grpc.RPCCompressor +grpc.RPCDecompressor +grpc.ServiceConfig +grpc.WithCompressor +grpc.WithDecompressor +grpc.WithDialer +grpc.WithMaxMsgSize +grpc.WithServiceConfig +grpc.WithTimeout +http.CloseNotifier +info.SecurityVersion +proto is deprecated +proto.InternalMessageInfo is deprecated +proto.EnumName is deprecated +proto.ErrInternalBadWireType is deprecated +proto.FileDescriptor is deprecated +proto.Marshaler is deprecated +proto.MessageType is deprecated +proto.RegisterEnum is deprecated +proto.RegisterFile is deprecated +proto.RegisterType is deprecated +proto.RegisterExtension is deprecated +proto.RegisteredExtension is deprecated +proto.RegisteredExtensions is deprecated +proto.RegisterMapType is deprecated +proto.Unmarshaler is deprecated +resolver.Backend +resolver.GRPCLB +Target is deprecated: Use the Target field in the BuildOptions instead. +xxx_messageInfo_ +' "${SC_OUT}" + +# - special golint on package comments. +lint_package_comment_per_package() { +  # Number of files in this go package. +  fileCount=$(go list -f '{{len .GoFiles}}' $1) +  if [ ${fileCount} -eq 0 ]; then +    return 0 +  fi +  # Number of package errors generated by golint. +  lintPackageCommentErrorsCount=$(golint --min_confidence 0 $1 | grep -c "should have a package comment") +  # golint complains about every file that's missing the package comment. If the +  # number of files for this package is greater than the number of errors, there's +  # at least one file with package comment, good. Otherwise, fail. +  if [ ${fileCount} -le ${lintPackageCommentErrorsCount} ]; then +    echo "Package $1 (with ${fileCount} files) is missing package comment" +    return 1 +  fi +} +lint_package_comment() { +  set +ex + +  count=0 +  for i in $(go list ./...); do +    lint_package_comment_per_package "$i" +    ((count += $?)) +  done + +  set -ex +  return $count +} +lint_package_comment + +echo SUCCESS  | 
