diff options
Diffstat (limited to 'src/pkg/net/http/httputil')
-rw-r--r-- | src/pkg/net/http/httputil/chunked.go | 74 | ||||
-rw-r--r-- | src/pkg/net/http/httputil/chunked_test.go | 120 | ||||
-rw-r--r-- | src/pkg/net/http/httputil/dump.go | 35 | ||||
-rw-r--r-- | src/pkg/net/http/httputil/dump_test.go | 87 | ||||
-rw-r--r-- | src/pkg/net/http/httputil/httputil.go | 32 | ||||
-rw-r--r-- | src/pkg/net/http/httputil/persist.go | 21 | ||||
-rw-r--r-- | src/pkg/net/http/httputil/reverseproxy.go | 4 | ||||
-rw-r--r-- | src/pkg/net/http/httputil/reverseproxy_test.go | 16 |
8 files changed, 318 insertions, 71 deletions
diff --git a/src/pkg/net/http/httputil/chunked.go b/src/pkg/net/http/httputil/chunked.go index b66d40951..9632bfd19 100644 --- a/src/pkg/net/http/httputil/chunked.go +++ b/src/pkg/net/http/httputil/chunked.go @@ -4,15 +4,14 @@ // The wire protocol for HTTP's "chunked" Transfer-Encoding. -// This code is a duplicate of ../chunked.go with these edits: -// s/newChunked/NewChunked/g -// s/package http/package httputil/ +// This code is duplicated in net/http and net/http/httputil. // Please make any changes in both files. package httputil import ( "bufio" + "bytes" "errors" "fmt" "io" @@ -22,13 +21,13 @@ const maxLineLength = 4096 // assumed <= bufio.defaultBufSize var ErrLineTooLong = errors.New("header line too long") -// NewChunkedReader returns a new chunkedReader that translates the data read from r +// newChunkedReader returns a new chunkedReader that translates the data read from r // out of HTTP "chunked" format before returning it. // The chunkedReader returns io.EOF when the final 0-length chunk is read. // -// NewChunkedReader is not needed by normal applications. The http package +// newChunkedReader is not needed by normal applications. The http package // automatically decodes chunking when reading response bodies. -func NewChunkedReader(r io.Reader) io.Reader { +func newChunkedReader(r io.Reader) io.Reader { br, ok := r.(*bufio.Reader) if !ok { br = bufio.NewReader(r) @@ -59,26 +58,45 @@ func (cr *chunkedReader) beginChunk() { } } -func (cr *chunkedReader) Read(b []uint8) (n int, err error) { - if cr.err != nil { - return 0, cr.err +func (cr *chunkedReader) chunkHeaderAvailable() bool { + n := cr.r.Buffered() + if n > 0 { + peek, _ := cr.r.Peek(n) + return bytes.IndexByte(peek, '\n') >= 0 } - if cr.n == 0 { - cr.beginChunk() - if cr.err != nil { - return 0, cr.err + return false +} + +func (cr *chunkedReader) Read(b []uint8) (n int, err error) { + for cr.err == nil { + if cr.n == 0 { + if n > 0 && !cr.chunkHeaderAvailable() { + // We've read enough. Don't potentially block + // reading a new chunk header. + break + } + cr.beginChunk() + continue } - } - if uint64(len(b)) > cr.n { - b = b[0:cr.n] - } - n, cr.err = cr.r.Read(b) - cr.n -= uint64(n) - if cr.n == 0 && cr.err == nil { - // end of chunk (CRLF) - if _, cr.err = io.ReadFull(cr.r, cr.buf[:]); cr.err == nil { - if cr.buf[0] != '\r' || cr.buf[1] != '\n' { - cr.err = errors.New("malformed chunked encoding") + if len(b) == 0 { + break + } + rbuf := b + if uint64(len(rbuf)) > cr.n { + rbuf = rbuf[:cr.n] + } + var n0 int + n0, cr.err = cr.r.Read(rbuf) + n += n0 + b = b[n0:] + cr.n -= uint64(n0) + // If we're at the end of a chunk, read the next two + // bytes to verify they are "\r\n". + if cr.n == 0 && cr.err == nil { + if _, cr.err = io.ReadFull(cr.r, cr.buf[:2]); cr.err == nil { + if cr.buf[0] != '\r' || cr.buf[1] != '\n' { + cr.err = errors.New("malformed chunked encoding") + } } } } @@ -117,16 +135,16 @@ func isASCIISpace(b byte) bool { return b == ' ' || b == '\t' || b == '\n' || b == '\r' } -// NewChunkedWriter returns a new chunkedWriter that translates writes into HTTP +// newChunkedWriter returns a new chunkedWriter that translates writes into HTTP // "chunked" format before writing them to w. Closing the returned chunkedWriter // sends the final 0-length chunk that marks the end of the stream. // -// NewChunkedWriter is not needed by normal applications. The http +// newChunkedWriter is not needed by normal applications. The http // package adds chunking automatically if handlers don't set a -// Content-Length header. Using NewChunkedWriter inside a handler +// Content-Length header. Using newChunkedWriter inside a handler // would result in double chunking or chunking with a Content-Length // length, both of which are wrong. -func NewChunkedWriter(w io.Writer) io.WriteCloser { +func newChunkedWriter(w io.Writer) io.WriteCloser { return &chunkedWriter{w} } diff --git a/src/pkg/net/http/httputil/chunked_test.go b/src/pkg/net/http/httputil/chunked_test.go index a06bffad5..a7a577468 100644 --- a/src/pkg/net/http/httputil/chunked_test.go +++ b/src/pkg/net/http/httputil/chunked_test.go @@ -2,26 +2,25 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This code is a duplicate of ../chunked_test.go with these edits: -// s/newChunked/NewChunked/g -// s/package http/package httputil/ +// This code is duplicated in net/http and net/http/httputil. // Please make any changes in both files. package httputil import ( + "bufio" "bytes" "fmt" "io" "io/ioutil" - "runtime" + "strings" "testing" ) func TestChunk(t *testing.T) { var b bytes.Buffer - w := NewChunkedWriter(&b) + w := newChunkedWriter(&b) const chunk1 = "hello, " const chunk2 = "world! 0123456789abcdef" w.Write([]byte(chunk1)) @@ -32,7 +31,7 @@ func TestChunk(t *testing.T) { t.Fatalf("chunk writer wrote %q; want %q", g, e) } - r := NewChunkedReader(&b) + r := newChunkedReader(&b) data, err := ioutil.ReadAll(r) if err != nil { t.Logf(`data: "%s"`, data) @@ -43,37 +42,102 @@ func TestChunk(t *testing.T) { } } +func TestChunkReadMultiple(t *testing.T) { + // Bunch of small chunks, all read together. + { + var b bytes.Buffer + w := newChunkedWriter(&b) + w.Write([]byte("foo")) + w.Write([]byte("bar")) + w.Close() + + r := newChunkedReader(&b) + buf := make([]byte, 10) + n, err := r.Read(buf) + if n != 6 || err != io.EOF { + t.Errorf("Read = %d, %v; want 6, EOF", n, err) + } + buf = buf[:n] + if string(buf) != "foobar" { + t.Errorf("Read = %q; want %q", buf, "foobar") + } + } + + // One big chunk followed by a little chunk, but the small bufio.Reader size + // should prevent the second chunk header from being read. + { + var b bytes.Buffer + w := newChunkedWriter(&b) + // fillBufChunk is 11 bytes + 3 bytes header + 2 bytes footer = 16 bytes, + // the same as the bufio ReaderSize below (the minimum), so even + // though we're going to try to Read with a buffer larger enough to also + // receive "foo", the second chunk header won't be read yet. + const fillBufChunk = "0123456789a" + const shortChunk = "foo" + w.Write([]byte(fillBufChunk)) + w.Write([]byte(shortChunk)) + w.Close() + + r := newChunkedReader(bufio.NewReaderSize(&b, 16)) + buf := make([]byte, len(fillBufChunk)+len(shortChunk)) + n, err := r.Read(buf) + if n != len(fillBufChunk) || err != nil { + t.Errorf("Read = %d, %v; want %d, nil", n, err, len(fillBufChunk)) + } + buf = buf[:n] + if string(buf) != fillBufChunk { + t.Errorf("Read = %q; want %q", buf, fillBufChunk) + } + + n, err = r.Read(buf) + if n != len(shortChunk) || err != io.EOF { + t.Errorf("Read = %d, %v; want %d, EOF", n, err, len(shortChunk)) + } + } + + // And test that we see an EOF chunk, even though our buffer is already full: + { + r := newChunkedReader(bufio.NewReader(strings.NewReader("3\r\nfoo\r\n0\r\n"))) + buf := make([]byte, 3) + n, err := r.Read(buf) + if n != 3 || err != io.EOF { + t.Errorf("Read = %d, %v; want 3, EOF", n, err) + } + if string(buf) != "foo" { + t.Errorf("buf = %q; want foo", buf) + } + } +} + func TestChunkReaderAllocs(t *testing.T) { - // temporarily set GOMAXPROCS to 1 as we are testing memory allocations - defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1)) + if testing.Short() { + t.Skip("skipping in short mode") + } var buf bytes.Buffer - w := NewChunkedWriter(&buf) + w := newChunkedWriter(&buf) a, b, c := []byte("aaaaaa"), []byte("bbbbbbbbbbbb"), []byte("cccccccccccccccccccccccc") w.Write(a) w.Write(b) w.Write(c) w.Close() - r := NewChunkedReader(&buf) readBuf := make([]byte, len(a)+len(b)+len(c)+1) - - var ms runtime.MemStats - runtime.ReadMemStats(&ms) - m0 := ms.Mallocs - - n, err := io.ReadFull(r, readBuf) - - runtime.ReadMemStats(&ms) - mallocs := ms.Mallocs - m0 - if mallocs > 1 { - t.Errorf("%d mallocs; want <= 1", mallocs) - } - - if n != len(readBuf)-1 { - t.Errorf("read %d bytes; want %d", n, len(readBuf)-1) - } - if err != io.ErrUnexpectedEOF { - t.Errorf("read error = %v; want ErrUnexpectedEOF", err) + byter := bytes.NewReader(buf.Bytes()) + bufr := bufio.NewReader(byter) + mallocs := testing.AllocsPerRun(100, func() { + byter.Seek(0, 0) + bufr.Reset(byter) + r := newChunkedReader(bufr) + n, err := io.ReadFull(r, readBuf) + if n != len(readBuf)-1 { + t.Fatalf("read %d bytes; want %d", n, len(readBuf)-1) + } + if err != io.ErrUnexpectedEOF { + t.Fatalf("read error = %v; want ErrUnexpectedEOF", err) + } + }) + if mallocs > 1.5 { + t.Errorf("mallocs = %v; want 1", mallocs) } } diff --git a/src/pkg/net/http/httputil/dump.go b/src/pkg/net/http/httputil/dump.go index 265499fb0..2a7a413d0 100644 --- a/src/pkg/net/http/httputil/dump.go +++ b/src/pkg/net/http/httputil/dump.go @@ -7,6 +7,7 @@ package httputil import ( "bufio" "bytes" + "errors" "fmt" "io" "io/ioutil" @@ -29,7 +30,7 @@ func drainBody(b io.ReadCloser) (r1, r2 io.ReadCloser, err error) { if err = b.Close(); err != nil { return nil, nil, err } - return ioutil.NopCloser(&buf), ioutil.NopCloser(bytes.NewBuffer(buf.Bytes())), nil + return ioutil.NopCloser(&buf), ioutil.NopCloser(bytes.NewReader(buf.Bytes())), nil } // dumpConn is a net.Conn which writes to Writer and reads from Reader @@ -106,6 +107,7 @@ func DumpRequestOut(req *http.Request, body bool) ([]byte, error) { return &dumpConn{io.MultiWriter(&buf, pw), dr}, nil }, } + defer t.CloseIdleConnections() _, err := t.RoundTrip(reqSend) @@ -230,14 +232,31 @@ func DumpRequest(req *http.Request, body bool) (dump []byte, err error) { return } +// errNoBody is a sentinel error value used by failureToReadBody so we can detect +// that the lack of body was intentional. +var errNoBody = errors.New("sentinel error value") + +// failureToReadBody is a io.ReadCloser that just returns errNoBody on +// Read. It's swapped in when we don't actually want to consume the +// body, but need a non-nil one, and want to distinguish the error +// from reading the dummy body. +type failureToReadBody struct{} + +func (failureToReadBody) Read([]byte) (int, error) { return 0, errNoBody } +func (failureToReadBody) Close() error { return nil } + +var emptyBody = ioutil.NopCloser(strings.NewReader("")) + // DumpResponse is like DumpRequest but dumps a response. func DumpResponse(resp *http.Response, body bool) (dump []byte, err error) { var b bytes.Buffer save := resp.Body savecl := resp.ContentLength - if !body || resp.Body == nil { - resp.Body = nil - resp.ContentLength = 0 + + if !body { + resp.Body = failureToReadBody{} + } else if resp.Body == nil { + resp.Body = emptyBody } else { save, resp.Body, err = drainBody(resp.Body) if err != nil { @@ -245,11 +264,13 @@ func DumpResponse(resp *http.Response, body bool) (dump []byte, err error) { } } err = resp.Write(&b) + if err == errNoBody { + err = nil + } resp.Body = save resp.ContentLength = savecl if err != nil { - return + return nil, err } - dump = b.Bytes() - return + return b.Bytes(), nil } diff --git a/src/pkg/net/http/httputil/dump_test.go b/src/pkg/net/http/httputil/dump_test.go index 987a82048..e1ffb3935 100644 --- a/src/pkg/net/http/httputil/dump_test.go +++ b/src/pkg/net/http/httputil/dump_test.go @@ -11,6 +11,8 @@ import ( "io/ioutil" "net/http" "net/url" + "runtime" + "strings" "testing" ) @@ -112,6 +114,7 @@ var dumpTests = []dumpTest{ } func TestDumpRequest(t *testing.T) { + numg0 := runtime.NumGoroutine() for i, tt := range dumpTests { setBody := func() { if tt.Body == nil { @@ -119,7 +122,7 @@ func TestDumpRequest(t *testing.T) { } switch b := tt.Body.(type) { case []byte: - tt.Req.Body = ioutil.NopCloser(bytes.NewBuffer(b)) + tt.Req.Body = ioutil.NopCloser(bytes.NewReader(b)) case func() io.ReadCloser: tt.Req.Body = b() } @@ -155,6 +158,9 @@ func TestDumpRequest(t *testing.T) { } } } + if dg := runtime.NumGoroutine() - numg0; dg > 4 { + t.Errorf("Unexpectedly large number of new goroutines: %d new", dg) + } } func chunk(s string) string { @@ -176,3 +182,82 @@ func mustNewRequest(method, url string, body io.Reader) *http.Request { } return req } + +var dumpResTests = []struct { + res *http.Response + body bool + want string +}{ + { + res: &http.Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + ContentLength: 50, + Header: http.Header{ + "Foo": []string{"Bar"}, + }, + Body: ioutil.NopCloser(strings.NewReader("foo")), // shouldn't be used + }, + body: false, // to verify we see 50, not empty or 3. + want: `HTTP/1.1 200 OK +Content-Length: 50 +Foo: Bar`, + }, + + { + res: &http.Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + ContentLength: 3, + Body: ioutil.NopCloser(strings.NewReader("foo")), + }, + body: true, + want: `HTTP/1.1 200 OK +Content-Length: 3 + +foo`, + }, + + { + res: &http.Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + ContentLength: -1, + Body: ioutil.NopCloser(strings.NewReader("foo")), + TransferEncoding: []string{"chunked"}, + }, + body: true, + want: `HTTP/1.1 200 OK +Transfer-Encoding: chunked + +3 +foo +0`, + }, +} + +func TestDumpResponse(t *testing.T) { + for i, tt := range dumpResTests { + gotb, err := DumpResponse(tt.res, tt.body) + if err != nil { + t.Errorf("%d. DumpResponse = %v", i, err) + continue + } + got := string(gotb) + got = strings.TrimSpace(got) + got = strings.Replace(got, "\r", "", -1) + + if got != tt.want { + t.Errorf("%d.\nDumpResponse got:\n%s\n\nWant:\n%s\n", i, got, tt.want) + } + } +} diff --git a/src/pkg/net/http/httputil/httputil.go b/src/pkg/net/http/httputil/httputil.go new file mode 100644 index 000000000..74fb6c655 --- /dev/null +++ b/src/pkg/net/http/httputil/httputil.go @@ -0,0 +1,32 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package httputil provides HTTP utility functions, complementing the +// more common ones in the net/http package. +package httputil + +import "io" + +// NewChunkedReader returns a new chunkedReader that translates the data read from r +// out of HTTP "chunked" format before returning it. +// The chunkedReader returns io.EOF when the final 0-length chunk is read. +// +// NewChunkedReader is not needed by normal applications. The http package +// automatically decodes chunking when reading response bodies. +func NewChunkedReader(r io.Reader) io.Reader { + return newChunkedReader(r) +} + +// NewChunkedWriter returns a new chunkedWriter that translates writes into HTTP +// "chunked" format before writing them to w. Closing the returned chunkedWriter +// sends the final 0-length chunk that marks the end of the stream. +// +// NewChunkedWriter is not needed by normal applications. The http +// package adds chunking automatically if handlers don't set a +// Content-Length header. Using NewChunkedWriter inside a handler +// would result in double chunking or chunking with a Content-Length +// length, both of which are wrong. +func NewChunkedWriter(w io.Writer) io.WriteCloser { + return newChunkedWriter(w) +} diff --git a/src/pkg/net/http/httputil/persist.go b/src/pkg/net/http/httputil/persist.go index 507938aca..987bcc96b 100644 --- a/src/pkg/net/http/httputil/persist.go +++ b/src/pkg/net/http/httputil/persist.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package httputil provides HTTP utility functions, complementing the -// more common ones in the net/http package. package httputil import ( @@ -33,8 +31,8 @@ var errClosed = errors.New("i/o operation on closed connection") // i.e. requests can be read out of sync (but in the same order) while the // respective responses are sent. // -// ServerConn is low-level and should not be needed by most applications. -// See Server. +// ServerConn is low-level and old. Applications should instead use Server +// in the net/http package. type ServerConn struct { lk sync.Mutex // read-write protects the following fields c net.Conn @@ -47,8 +45,11 @@ type ServerConn struct { pipe textproto.Pipeline } -// NewServerConn returns a new ServerConn reading and writing c. If r is not +// NewServerConn returns a new ServerConn reading and writing c. If r is not // nil, it is the buffer to use when reading c. +// +// ServerConn is low-level and old. Applications should instead use Server +// in the net/http package. func NewServerConn(c net.Conn, r *bufio.Reader) *ServerConn { if r == nil { r = bufio.NewReader(c) @@ -223,8 +224,8 @@ func (sc *ServerConn) Write(req *http.Request, resp *http.Response) error { // supports hijacking the connection calling Hijack to // regain control of the underlying net.Conn and deal with it as desired. // -// ClientConn is low-level and should not be needed by most applications. -// See Client. +// ClientConn is low-level and old. Applications should instead use +// Client or Transport in the net/http package. type ClientConn struct { lk sync.Mutex // read-write protects the following fields c net.Conn @@ -240,6 +241,9 @@ type ClientConn struct { // NewClientConn returns a new ClientConn reading and writing c. If r is not // nil, it is the buffer to use when reading c. +// +// ClientConn is low-level and old. Applications should use Client or +// Transport in the net/http package. func NewClientConn(c net.Conn, r *bufio.Reader) *ClientConn { if r == nil { r = bufio.NewReader(c) @@ -254,6 +258,9 @@ func NewClientConn(c net.Conn, r *bufio.Reader) *ClientConn { // NewProxyClientConn works like NewClientConn but writes Requests // using Request's WriteProxy method. +// +// New code should not use NewProxyClientConn. See Client or +// Transport in the net/http package instead. func NewProxyClientConn(c net.Conn, r *bufio.Reader) *ClientConn { cc := NewClientConn(c, r) cc.writeReq = (*http.Request).WriteProxy diff --git a/src/pkg/net/http/httputil/reverseproxy.go b/src/pkg/net/http/httputil/reverseproxy.go index 1990f64db..48ada5f5f 100644 --- a/src/pkg/net/http/httputil/reverseproxy.go +++ b/src/pkg/net/http/httputil/reverseproxy.go @@ -144,6 +144,10 @@ func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) { } defer res.Body.Close() + for _, h := range hopHeaders { + res.Header.Del(h) + } + copyHeader(rw.Header(), res.Header) rw.WriteHeader(res.StatusCode) diff --git a/src/pkg/net/http/httputil/reverseproxy_test.go b/src/pkg/net/http/httputil/reverseproxy_test.go index 1c0444ec4..e9539b44b 100644 --- a/src/pkg/net/http/httputil/reverseproxy_test.go +++ b/src/pkg/net/http/httputil/reverseproxy_test.go @@ -16,6 +16,12 @@ import ( "time" ) +const fakeHopHeader = "X-Fake-Hop-Header-For-Test" + +func init() { + hopHeaders = append(hopHeaders, fakeHopHeader) +} + func TestReverseProxy(t *testing.T) { const backendResponse = "I am the backend" const backendStatus = 404 @@ -36,6 +42,10 @@ func TestReverseProxy(t *testing.T) { t.Errorf("backend got Host header %q, want %q", g, e) } w.Header().Set("X-Foo", "bar") + w.Header().Set("Upgrade", "foo") + w.Header().Set(fakeHopHeader, "foo") + w.Header().Add("X-Multi-Value", "foo") + w.Header().Add("X-Multi-Value", "bar") http.SetCookie(w, &http.Cookie{Name: "flavor", Value: "chocolateChip"}) w.WriteHeader(backendStatus) w.Write([]byte(backendResponse)) @@ -64,6 +74,12 @@ func TestReverseProxy(t *testing.T) { if g, e := res.Header.Get("X-Foo"), "bar"; g != e { t.Errorf("got X-Foo %q; expected %q", g, e) } + if c := res.Header.Get(fakeHopHeader); c != "" { + t.Errorf("got %s header value %q", fakeHopHeader, c) + } + if g, e := len(res.Header["X-Multi-Value"]), 2; g != e { + t.Errorf("got %d X-Multi-Value header values; expected %d", g, e) + } if g, e := len(res.Header["Set-Cookie"]), 1; g != e { t.Fatalf("got %d SetCookies, want %d", g, e) } |