summaryrefslogtreecommitdiff
path: root/vendor/golang.org/x/net
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/golang.org/x/net')
-rw-r--r--vendor/golang.org/x/net/http2/flow.go88
-rw-r--r--vendor/golang.org/x/net/http2/server.go85
-rw-r--r--vendor/golang.org/x/net/http2/transport.go86
3 files changed, 144 insertions, 115 deletions
diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go
index b51f0e0cf..750ac52f2 100644
--- a/vendor/golang.org/x/net/http2/flow.go
+++ b/vendor/golang.org/x/net/http2/flow.go
@@ -6,23 +6,91 @@
package http2
-// flow is the flow control window's size.
-type flow struct {
+// inflowMinRefresh is the minimum number of bytes we'll send for a
+// flow control window update.
+const inflowMinRefresh = 4 << 10
+
+// inflow accounts for an inbound flow control window.
+// It tracks both the latest window sent to the peer (used for enforcement)
+// and the accumulated unsent window.
+type inflow struct {
+ avail int32
+ unsent int32
+}
+
+// set sets the initial window.
+func (f *inflow) init(n int32) {
+ f.avail = n
+}
+
+// add adds n bytes to the window, with a maximum window size of max,
+// indicating that the peer can now send us more data.
+// For example, the user read from a {Request,Response} body and consumed
+// some of the buffered data, so the peer can now send more.
+// It returns the number of bytes to send in a WINDOW_UPDATE frame to the peer.
+// Window updates are accumulated and sent when the unsent capacity
+// is at least inflowMinRefresh or will at least double the peer's available window.
+func (f *inflow) add(n int) (connAdd int32) {
+ if n < 0 {
+ panic("negative update")
+ }
+ unsent := int64(f.unsent) + int64(n)
+ // "A sender MUST NOT allow a flow-control window to exceed 2^31-1 octets."
+ // RFC 7540 Section 6.9.1.
+ const maxWindow = 1<<31 - 1
+ if unsent+int64(f.avail) > maxWindow {
+ panic("flow control update exceeds maximum window size")
+ }
+ f.unsent = int32(unsent)
+ if f.unsent < inflowMinRefresh && f.unsent < f.avail {
+ // If there aren't at least inflowMinRefresh bytes of window to send,
+ // and this update won't at least double the window, buffer the update for later.
+ return 0
+ }
+ f.avail += f.unsent
+ f.unsent = 0
+ return int32(unsent)
+}
+
+// take attempts to take n bytes from the peer's flow control window.
+// It reports whether the window has available capacity.
+func (f *inflow) take(n uint32) bool {
+ if n > uint32(f.avail) {
+ return false
+ }
+ f.avail -= int32(n)
+ return true
+}
+
+// takeInflows attempts to take n bytes from two inflows,
+// typically connection-level and stream-level flows.
+// It reports whether both windows have available capacity.
+func takeInflows(f1, f2 *inflow, n uint32) bool {
+ if n > uint32(f1.avail) || n > uint32(f2.avail) {
+ return false
+ }
+ f1.avail -= int32(n)
+ f2.avail -= int32(n)
+ return true
+}
+
+// outflow is the outbound flow control window's size.
+type outflow struct {
_ incomparable
// n is the number of DATA bytes we're allowed to send.
- // A flow is kept both on a conn and a per-stream.
+ // An outflow is kept both on a conn and a per-stream.
n int32
- // conn points to the shared connection-level flow that is
- // shared by all streams on that conn. It is nil for the flow
+ // conn points to the shared connection-level outflow that is
+ // shared by all streams on that conn. It is nil for the outflow
// that's on the conn directly.
- conn *flow
+ conn *outflow
}
-func (f *flow) setConnFlow(cf *flow) { f.conn = cf }
+func (f *outflow) setConnFlow(cf *outflow) { f.conn = cf }
-func (f *flow) available() int32 {
+func (f *outflow) available() int32 {
n := f.n
if f.conn != nil && f.conn.n < n {
n = f.conn.n
@@ -30,7 +98,7 @@ func (f *flow) available() int32 {
return n
}
-func (f *flow) take(n int32) {
+func (f *outflow) take(n int32) {
if n > f.available() {
panic("internal error: took too much")
}
@@ -42,7 +110,7 @@ func (f *flow) take(n int32) {
// add adds n bytes (positive or negative) to the flow control window.
// It returns false if the sum would exceed 2^31-1.
-func (f *flow) add(n int32) bool {
+func (f *outflow) add(n int32) bool {
sum := f.n + n
if (sum > n) == (f.n > 0) {
f.n = sum
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index 4eb7617fa..b624dc0a7 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -448,7 +448,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
// configured value for inflow, that will be updated when we send a
// WINDOW_UPDATE shortly after sending SETTINGS.
sc.flow.add(initialWindowSize)
- sc.inflow.add(initialWindowSize)
+ sc.inflow.init(initialWindowSize)
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize())
@@ -563,8 +563,8 @@ type serverConn struct {
wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes
bodyReadCh chan bodyReadMsg // from handlers -> serve
serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop
- flow flow // conn-wide (not stream-specific) outbound flow control
- inflow flow // conn-wide inbound flow control
+ flow outflow // conn-wide (not stream-specific) outbound flow control
+ inflow inflow // conn-wide inbound flow control
tlsState *tls.ConnectionState // shared by all handlers, like net/http
remoteAddrStr string
writeSched WriteScheduler
@@ -641,10 +641,10 @@ type stream struct {
cancelCtx func()
// owned by serverConn's serve loop:
- bodyBytes int64 // body bytes seen so far
- declBodyBytes int64 // or -1 if undeclared
- flow flow // limits writing from Handler to client
- inflow flow // what the client is allowed to POST/etc to us
+ bodyBytes int64 // body bytes seen so far
+ declBodyBytes int64 // or -1 if undeclared
+ flow outflow // limits writing from Handler to client
+ inflow inflow // what the client is allowed to POST/etc to us
state streamState
resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
gotTrailerHeader bool // HEADER frame for trailers was seen
@@ -1503,7 +1503,7 @@ func (sc *serverConn) processFrame(f Frame) error {
if sc.inGoAway && (sc.goAwayCode != ErrCodeNo || f.Header().StreamID > sc.maxClientStreamID) {
if f, ok := f.(*DataFrame); ok {
- if sc.inflow.available() < int32(f.Length) {
+ if !sc.inflow.take(f.Length) {
return sc.countError("data_flow", streamError(f.Header().StreamID, ErrCodeFlowControl))
}
sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
@@ -1775,14 +1775,9 @@ func (sc *serverConn) processData(f *DataFrame) error {
// But still enforce their connection-level flow control,
// and return any flow control bytes since we're not going
// to consume them.
- if sc.inflow.available() < int32(f.Length) {
+ if !sc.inflow.take(f.Length) {
return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
}
- // Deduct the flow control from inflow, since we're
- // going to immediately add it back in
- // sendWindowUpdate, which also schedules sending the
- // frames.
- sc.inflow.take(int32(f.Length))
sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
if st != nil && st.resetQueued {
@@ -1797,10 +1792,9 @@ func (sc *serverConn) processData(f *DataFrame) error {
// Sender sending more than they'd declared?
if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
- if sc.inflow.available() < int32(f.Length) {
+ if !sc.inflow.take(f.Length) {
return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
}
- sc.inflow.take(int32(f.Length))
sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
@@ -1811,10 +1805,9 @@ func (sc *serverConn) processData(f *DataFrame) error {
}
if f.Length > 0 {
// Check whether the client has flow control quota.
- if st.inflow.available() < int32(f.Length) {
+ if !takeInflows(&sc.inflow, &st.inflow, f.Length) {
return sc.countError("flow_on_data_length", streamError(id, ErrCodeFlowControl))
}
- st.inflow.take(int32(f.Length))
if len(data) > 0 {
wrote, err := st.body.Write(data)
@@ -1830,10 +1823,12 @@ func (sc *serverConn) processData(f *DataFrame) error {
// Return any padded flow control now, since we won't
// refund it later on body reads.
- if pad := int32(f.Length) - int32(len(data)); pad > 0 {
- sc.sendWindowUpdate32(nil, pad)
- sc.sendWindowUpdate32(st, pad)
- }
+ // Call sendWindowUpdate even if there is no padding,
+ // to return buffered flow control credit if the sent
+ // window has shrunk.
+ pad := int32(f.Length) - int32(len(data))
+ sc.sendWindowUpdate32(nil, pad)
+ sc.sendWindowUpdate32(st, pad)
}
if f.StreamEnded() {
st.endStream()
@@ -2105,8 +2100,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
st.cw.Init()
st.flow.conn = &sc.flow // link to conn-level counter
st.flow.add(sc.initialStreamSendWindowSize)
- st.inflow.conn = &sc.inflow // link to conn-level counter
- st.inflow.add(sc.srv.initialStreamRecvWindowSize())
+ st.inflow.init(sc.srv.initialStreamRecvWindowSize())
if sc.hs.WriteTimeout != 0 {
st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
}
@@ -2388,47 +2382,28 @@ func (sc *serverConn) noteBodyRead(st *stream, n int) {
}
// st may be nil for conn-level
-func (sc *serverConn) sendWindowUpdate(st *stream, n int) {
- sc.serveG.check()
- // "The legal range for the increment to the flow control
- // window is 1 to 2^31-1 (2,147,483,647) octets."
- // A Go Read call on 64-bit machines could in theory read
- // a larger Read than this. Very unlikely, but we handle it here
- // rather than elsewhere for now.
- const maxUint31 = 1<<31 - 1
- for n > maxUint31 {
- sc.sendWindowUpdate32(st, maxUint31)
- n -= maxUint31
- }
- sc.sendWindowUpdate32(st, int32(n))
+func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
+ sc.sendWindowUpdate(st, int(n))
}
// st may be nil for conn-level
-func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
+func (sc *serverConn) sendWindowUpdate(st *stream, n int) {
sc.serveG.check()
- if n == 0 {
- return
- }
- if n < 0 {
- panic("negative update")
- }
var streamID uint32
- if st != nil {
+ var send int32
+ if st == nil {
+ send = sc.inflow.add(n)
+ } else {
streamID = st.id
+ send = st.inflow.add(n)
+ }
+ if send == 0 {
+ return
}
sc.writeFrame(FrameWriteRequest{
- write: writeWindowUpdate{streamID: streamID, n: uint32(n)},
+ write: writeWindowUpdate{streamID: streamID, n: uint32(send)},
stream: st,
})
- var ok bool
- if st == nil {
- ok = sc.inflow.add(n)
- } else {
- ok = st.inflow.add(n)
- }
- if !ok {
- panic("internal error; sent too many window updates without decrements?")
- }
}
// requestBody is the Handler's Request.Body type.
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index 30f706e6c..b43ec10cf 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -47,10 +47,6 @@ const (
// we buffer per stream.
transportDefaultStreamFlow = 4 << 20
- // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send
- // a stream-level WINDOW_UPDATE for at a time.
- transportDefaultStreamMinRefresh = 4 << 10
-
defaultUserAgent = "Go-http-client/2.0"
// initialMaxConcurrentStreams is a connections maxConcurrentStreams until
@@ -310,8 +306,8 @@ type ClientConn struct {
mu sync.Mutex // guards following
cond *sync.Cond // hold mu; broadcast on flow/closed changes
- flow flow // our conn-level flow control quota (cs.flow is per stream)
- inflow flow // peer's conn-level flow control
+ flow outflow // our conn-level flow control quota (cs.outflow is per stream)
+ inflow inflow // peer's conn-level flow control
doNotReuse bool // whether conn is marked to not be reused for any future requests
closing bool
closed bool
@@ -376,10 +372,10 @@ type clientStream struct {
respHeaderRecv chan struct{} // closed when headers are received
res *http.Response // set if respHeaderRecv is closed
- flow flow // guarded by cc.mu
- inflow flow // guarded by cc.mu
- bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read
- readErr error // sticky read error; owned by transportResponseBody.Read
+ flow outflow // guarded by cc.mu
+ inflow inflow // guarded by cc.mu
+ bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read
+ readErr error // sticky read error; owned by transportResponseBody.Read
reqBody io.ReadCloser
reqBodyContentLength int64 // -1 means unknown
@@ -811,7 +807,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
cc.bw.Write(clientPreface)
cc.fr.WriteSettings(initialSettings...)
cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow)
- cc.inflow.add(transportDefaultConnFlow + initialWindowSize)
+ cc.inflow.init(transportDefaultConnFlow + initialWindowSize)
cc.bw.Flush()
if cc.werr != nil {
cc.Close()
@@ -2073,8 +2069,7 @@ type resAndError struct {
func (cc *ClientConn) addStreamLocked(cs *clientStream) {
cs.flow.add(int32(cc.initialWindowSize))
cs.flow.setConnFlow(&cc.flow)
- cs.inflow.add(transportDefaultStreamFlow)
- cs.inflow.setConnFlow(&cc.inflow)
+ cs.inflow.init(transportDefaultStreamFlow)
cs.ID = cc.nextStreamID
cc.nextStreamID += 2
cc.streams[cs.ID] = cs
@@ -2533,21 +2528,10 @@ func (b transportResponseBody) Read(p []byte) (n int, err error) {
}
cc.mu.Lock()
- var connAdd, streamAdd int32
- // Check the conn-level first, before the stream-level.
- if v := cc.inflow.available(); v < transportDefaultConnFlow/2 {
- connAdd = transportDefaultConnFlow - v
- cc.inflow.add(connAdd)
- }
+ connAdd := cc.inflow.add(n)
+ var streamAdd int32
if err == nil { // No need to refresh if the stream is over or failed.
- // Consider any buffered body data (read from the conn but not
- // consumed by the client) when computing flow control for this
- // stream.
- v := int(cs.inflow.available()) + cs.bufPipe.Len()
- if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh {
- streamAdd = int32(transportDefaultStreamFlow - v)
- cs.inflow.add(streamAdd)
- }
+ streamAdd = cs.inflow.add(n)
}
cc.mu.Unlock()
@@ -2575,17 +2559,15 @@ func (b transportResponseBody) Close() error {
if unread > 0 {
cc.mu.Lock()
// Return connection-level flow control.
- if unread > 0 {
- cc.inflow.add(int32(unread))
- }
+ connAdd := cc.inflow.add(unread)
cc.mu.Unlock()
// TODO(dneil): Acquiring this mutex can block indefinitely.
// Move flow control return to a goroutine?
cc.wmu.Lock()
// Return connection-level flow control.
- if unread > 0 {
- cc.fr.WriteWindowUpdate(0, uint32(unread))
+ if connAdd > 0 {
+ cc.fr.WriteWindowUpdate(0, uint32(connAdd))
}
cc.bw.Flush()
cc.wmu.Unlock()
@@ -2628,13 +2610,18 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
// But at least return their flow control:
if f.Length > 0 {
cc.mu.Lock()
- cc.inflow.add(int32(f.Length))
+ ok := cc.inflow.take(f.Length)
+ connAdd := cc.inflow.add(int(f.Length))
cc.mu.Unlock()
-
- cc.wmu.Lock()
- cc.fr.WriteWindowUpdate(0, uint32(f.Length))
- cc.bw.Flush()
- cc.wmu.Unlock()
+ if !ok {
+ return ConnectionError(ErrCodeFlowControl)
+ }
+ if connAdd > 0 {
+ cc.wmu.Lock()
+ cc.fr.WriteWindowUpdate(0, uint32(connAdd))
+ cc.bw.Flush()
+ cc.wmu.Unlock()
+ }
}
return nil
}
@@ -2665,9 +2652,7 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
}
// Check connection-level flow control.
cc.mu.Lock()
- if cs.inflow.available() >= int32(f.Length) {
- cs.inflow.take(int32(f.Length))
- } else {
+ if !takeInflows(&cc.inflow, &cs.inflow, f.Length) {
cc.mu.Unlock()
return ConnectionError(ErrCodeFlowControl)
}
@@ -2689,19 +2674,20 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
}
}
- if refund > 0 {
- cc.inflow.add(int32(refund))
- if !didReset {
- cs.inflow.add(int32(refund))
- }
+ sendConn := cc.inflow.add(refund)
+ var sendStream int32
+ if !didReset {
+ sendStream = cs.inflow.add(refund)
}
cc.mu.Unlock()
- if refund > 0 {
+ if sendConn > 0 || sendStream > 0 {
cc.wmu.Lock()
- cc.fr.WriteWindowUpdate(0, uint32(refund))
- if !didReset {
- cc.fr.WriteWindowUpdate(cs.ID, uint32(refund))
+ if sendConn > 0 {
+ cc.fr.WriteWindowUpdate(0, uint32(sendConn))
+ }
+ if sendStream > 0 {
+ cc.fr.WriteWindowUpdate(cs.ID, uint32(sendStream))
}
cc.bw.Flush()
cc.wmu.Unlock()