diff options
Diffstat (limited to 'vendor/golang.org/x/net/http2')
| -rw-r--r-- | vendor/golang.org/x/net/http2/h2c/h2c.go | 11 | ||||
| -rw-r--r-- | vendor/golang.org/x/net/http2/server.go | 50 | ||||
| -rw-r--r-- | vendor/golang.org/x/net/http2/transport.go | 69 | 
3 files changed, 85 insertions, 45 deletions
diff --git a/vendor/golang.org/x/net/http2/h2c/h2c.go b/vendor/golang.org/x/net/http2/h2c/h2c.go index c3df711d9..2b77ffdaf 100644 --- a/vendor/golang.org/x/net/http2/h2c/h2c.go +++ b/vendor/golang.org/x/net/http2/h2c/h2c.go @@ -70,6 +70,15 @@ func NewHandler(h http.Handler, s *http2.Server) http.Handler {  	}  } +// extractServer extracts existing http.Server instance from http.Request or create an empty http.Server +func extractServer(r *http.Request) *http.Server { +	server, ok := r.Context().Value(http.ServerContextKey).(*http.Server) +	if ok { +		return server +	} +	return new(http.Server) +} +  // ServeHTTP implement the h2c support that is enabled by h2c.GetH2CHandler.  func (s h2cHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {  	// Handle h2c with prior knowledge (RFC 7540 Section 3.4) @@ -87,6 +96,7 @@ func (s h2cHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {  		defer conn.Close()  		s.s.ServeConn(conn, &http2.ServeConnOpts{  			Context:          r.Context(), +			BaseConfig:       extractServer(r),  			Handler:          s.Handler,  			SawClientPreface: true,  		}) @@ -104,6 +114,7 @@ func (s h2cHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {  		defer conn.Close()  		s.s.ServeConn(conn, &http2.ServeConnOpts{  			Context:        r.Context(), +			BaseConfig:     extractServer(r),  			Handler:        s.Handler,  			UpgradeRequest: r,  			Settings:       settings, diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index fd873b9af..43cc2a34a 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -143,7 +143,7 @@ type Server struct {  }  func (s *Server) initialConnRecvWindowSize() int32 { -	if s.MaxUploadBufferPerConnection > initialWindowSize { +	if s.MaxUploadBufferPerConnection >= initialWindowSize {  		return s.MaxUploadBufferPerConnection  	}  	return 1 << 20 @@ -869,9 +869,7 @@ func (sc *serverConn) serve() {  	// Each connection starts with initialWindowSize inflow tokens.  	// If a higher value is configured, we add more tokens. -	if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { -		sc.sendWindowUpdate(nil, int(diff)) -	} +	sc.sendWindowUpdate(nil)  	if err := sc.readPreface(); err != nil {  		sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) @@ -1588,7 +1586,7 @@ func (sc *serverConn) closeStream(st *stream, err error) {  	if p := st.body; p != nil {  		// Return any buffered unread bytes worth of conn-level flow control.  		// See golang.org/issue/16481 -		sc.sendWindowUpdate(nil, p.Len()) +		sc.sendWindowUpdate(nil)  		p.CloseWithError(err)  	} @@ -1736,7 +1734,7 @@ func (sc *serverConn) processData(f *DataFrame) error {  		// sendWindowUpdate, which also schedules sending the  		// frames.  		sc.inflow.take(int32(f.Length)) -		sc.sendWindowUpdate(nil, int(f.Length)) // conn-level +		sc.sendWindowUpdate(nil) // conn-level  		if st != nil && st.resetQueued {  			// Already have a stream error in flight. Don't send another. @@ -1754,7 +1752,7 @@ func (sc *serverConn) processData(f *DataFrame) error {  			return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))  		}  		sc.inflow.take(int32(f.Length)) -		sc.sendWindowUpdate(nil, int(f.Length)) // conn-level +		sc.sendWindowUpdate(nil) // conn-level  		st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))  		// RFC 7540, sec 8.1.2.6: A request or response is also malformed if the @@ -1772,7 +1770,7 @@ func (sc *serverConn) processData(f *DataFrame) error {  		if len(data) > 0 {  			wrote, err := st.body.Write(data)  			if err != nil { -				sc.sendWindowUpdate(nil, int(f.Length)-wrote) +				sc.sendWindowUpdate32(nil, int32(f.Length)-int32(wrote))  				return sc.countError("body_write_err", streamError(id, ErrCodeStreamClosed))  			}  			if wrote != len(data) { @@ -2099,12 +2097,6 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res  		return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol))  	} -	bodyOpen := !f.StreamEnded() -	if rp.method == "HEAD" && bodyOpen { -		// HEAD requests can't have bodies -		return nil, nil, sc.countError("head_body", streamError(f.StreamID, ErrCodeProtocol)) -	} -  	rp.header = make(http.Header)  	for _, hf := range f.RegularFields() {  		rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) @@ -2117,6 +2109,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res  	if err != nil {  		return nil, nil, err  	} +	bodyOpen := !f.StreamEnded()  	if bodyOpen {  		if vv, ok := rp.header["Content-Length"]; ok {  			if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil { @@ -2329,17 +2322,32 @@ func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) {  func (sc *serverConn) noteBodyRead(st *stream, n int) {  	sc.serveG.check() -	sc.sendWindowUpdate(nil, n) // conn-level +	sc.sendWindowUpdate(nil) // conn-level  	if st.state != stateHalfClosedRemote && st.state != stateClosed {  		// Don't send this WINDOW_UPDATE if the stream is closed  		// remotely. -		sc.sendWindowUpdate(st, n) +		sc.sendWindowUpdate(st)  	}  }  // st may be nil for conn-level -func (sc *serverConn) sendWindowUpdate(st *stream, n int) { +func (sc *serverConn) sendWindowUpdate(st *stream) {  	sc.serveG.check() + +	var n int32 +	if st == nil { +		if avail, windowSize := sc.inflow.available(), sc.srv.initialConnRecvWindowSize(); avail > windowSize/2 { +			return +		} else { +			n = windowSize - avail +		} +	} else { +		if avail, windowSize := st.inflow.available(), sc.srv.initialStreamRecvWindowSize(); avail > windowSize/2 { +			return +		} else { +			n = windowSize - avail +		} +	}  	// "The legal range for the increment to the flow control  	// window is 1 to 2^31-1 (2,147,483,647) octets."  	// A Go Read call on 64-bit machines could in theory read @@ -2505,6 +2513,10 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {  		rws.writeHeader(200)  	} +	if rws.handlerDone { +		rws.promoteUndeclaredTrailers() +	} +  	isHeadResp := rws.req.Method == "HEAD"  	if !rws.sentHeader {  		rws.sentHeader = true @@ -2576,10 +2588,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {  		return 0, nil  	} -	if rws.handlerDone { -		rws.promoteUndeclaredTrailers() -	} -  	// only send trailers if they have actually been defined by the  	// server handler.  	hasNonemptyTrailers := rws.hasNonemptyTrailers() diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 90fdc28cf..c5d005bba 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -258,7 +258,8 @@ func (t *Transport) initConnPool() {  // HTTP/2 server.  type ClientConn struct {  	t             *Transport -	tconn         net.Conn             // usually *tls.Conn, except specialized impls +	tconn         net.Conn // usually *tls.Conn, except specialized impls +	tconnClosed   bool  	tlsState      *tls.ConnectionState // nil only for specialized impls  	reused        uint32               // whether conn is being reused; atomic  	singleUse     bool                 // whether being used for a single http.Request @@ -344,8 +345,8 @@ type clientStream struct {  	readErr     error // sticky read error; owned by transportResponseBody.Read  	reqBody              io.ReadCloser -	reqBodyContentLength int64 // -1 means unknown -	reqBodyClosed        bool  // body has been closed; guarded by cc.mu +	reqBodyContentLength int64         // -1 means unknown +	reqBodyClosed        chan struct{} // guarded by cc.mu; non-nil on Close, closed when done  	// owned by writeRequest:  	sentEndStream bool // sent an END_STREAM flag to the peer @@ -385,9 +386,8 @@ func (cs *clientStream) abortStreamLocked(err error) {  		cs.abortErr = err  		close(cs.abort)  	}) -	if cs.reqBody != nil && !cs.reqBodyClosed { -		cs.reqBody.Close() -		cs.reqBodyClosed = true +	if cs.reqBody != nil { +		cs.closeReqBodyLocked()  	}  	// TODO(dneil): Clean up tests where cs.cc.cond is nil.  	if cs.cc.cond != nil { @@ -400,13 +400,24 @@ func (cs *clientStream) abortRequestBodyWrite() {  	cc := cs.cc  	cc.mu.Lock()  	defer cc.mu.Unlock() -	if cs.reqBody != nil && !cs.reqBodyClosed { -		cs.reqBody.Close() -		cs.reqBodyClosed = true +	if cs.reqBody != nil && cs.reqBodyClosed == nil { +		cs.closeReqBodyLocked()  		cc.cond.Broadcast()  	}  } +func (cs *clientStream) closeReqBodyLocked() { +	if cs.reqBodyClosed != nil { +		return +	} +	cs.reqBodyClosed = make(chan struct{}) +	reqBodyClosed := cs.reqBodyClosed +	go func() { +		cs.reqBody.Close() +		close(reqBodyClosed) +	}() +} +  type stickyErrWriter struct {  	conn    net.Conn  	timeout time.Duration @@ -921,10 +932,10 @@ func (cc *ClientConn) onIdleTimeout() {  	cc.closeIfIdle()  } -func (cc *ClientConn) closeConn() error { +func (cc *ClientConn) closeConn() {  	t := time.AfterFunc(250*time.Millisecond, cc.forceCloseConn)  	defer t.Stop() -	return cc.tconn.Close() +	cc.tconn.Close()  }  // A tls.Conn.Close can hang for a long time if the peer is unresponsive. @@ -990,7 +1001,8 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {  	shutdownEnterWaitStateHook()  	select {  	case <-done: -		return cc.closeConn() +		cc.closeConn() +		return nil  	case <-ctx.Done():  		cc.mu.Lock()  		// Free the goroutine above @@ -1027,7 +1039,7 @@ func (cc *ClientConn) sendGoAway() error {  // closes the client connection immediately. In-flight requests are interrupted.  // err is sent to streams. -func (cc *ClientConn) closeForError(err error) error { +func (cc *ClientConn) closeForError(err error) {  	cc.mu.Lock()  	cc.closed = true  	for _, cs := range cc.streams { @@ -1035,7 +1047,7 @@ func (cc *ClientConn) closeForError(err error) error {  	}  	cc.cond.Broadcast()  	cc.mu.Unlock() -	return cc.closeConn() +	cc.closeConn()  }  // Close closes the client connection immediately. @@ -1043,16 +1055,17 @@ func (cc *ClientConn) closeForError(err error) error {  // In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead.  func (cc *ClientConn) Close() error {  	err := errors.New("http2: client connection force closed via ClientConn.Close") -	return cc.closeForError(err) +	cc.closeForError(err) +	return nil  }  // closes the client connection immediately. In-flight requests are interrupted. -func (cc *ClientConn) closeForLostPing() error { +func (cc *ClientConn) closeForLostPing() {  	err := errors.New("http2: client connection lost")  	if f := cc.t.CountError; f != nil {  		f("conn_close_lost_ping")  	} -	return cc.closeForError(err) +	cc.closeForError(err)  }  // errRequestCanceled is a copy of net/http's errRequestCanceled because it's not @@ -1430,11 +1443,19 @@ func (cs *clientStream) cleanupWriteRequest(err error) {  	// and in multiple cases: server replies <=299 and >299  	// while still writing request body  	cc.mu.Lock() +	mustCloseBody := false +	if cs.reqBody != nil && cs.reqBodyClosed == nil { +		mustCloseBody = true +		cs.reqBodyClosed = make(chan struct{}) +	}  	bodyClosed := cs.reqBodyClosed -	cs.reqBodyClosed = true  	cc.mu.Unlock() -	if !bodyClosed && cs.reqBody != nil { +	if mustCloseBody {  		cs.reqBody.Close() +		close(bodyClosed) +	} +	if bodyClosed != nil { +		<-bodyClosed  	}  	if err != nil && cs.sentEndStream { @@ -1614,7 +1635,7 @@ func (cs *clientStream) writeRequestBody(req *http.Request) (err error) {  		}  		if err != nil {  			cc.mu.Lock() -			bodyClosed := cs.reqBodyClosed +			bodyClosed := cs.reqBodyClosed != nil  			cc.mu.Unlock()  			switch {  			case bodyClosed: @@ -1709,7 +1730,7 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error)  		if cc.closed {  			return 0, errClientConnClosed  		} -		if cs.reqBodyClosed { +		if cs.reqBodyClosed != nil {  			return 0, errStopReqBodyWrite  		}  		select { @@ -2005,7 +2026,7 @@ func (cc *ClientConn) forgetStreamID(id uint32) {  	// wake up RoundTrip if there is a pending request.  	cc.cond.Broadcast() -	closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() +	closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil  	if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 {  		if VerboseLogs {  			cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, cc.nextStreamID-2) @@ -2081,6 +2102,7 @@ func (rl *clientConnReadLoop) cleanup() {  		err = io.ErrUnexpectedEOF  	}  	cc.closed = true +  	for _, cs := range cc.streams {  		select {  		case <-cs.peerClosed: @@ -2674,7 +2696,6 @@ func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error {  		if fn := cc.t.CountError; fn != nil {  			fn("recv_goaway_" + f.ErrCode.stringToken())  		} -  	}  	cc.setGoAway(f)  	return nil @@ -3028,7 +3049,7 @@ func traceGotConn(req *http.Request, cc *ClientConn, reused bool) {  	cc.mu.Lock()  	ci.WasIdle = len(cc.streams) == 0 && reused  	if ci.WasIdle && !cc.lastActive.IsZero() { -		ci.IdleTime = time.Now().Sub(cc.lastActive) +		ci.IdleTime = time.Since(cc.lastActive)  	}  	cc.mu.Unlock()  | 
