diff options
Diffstat (limited to 'src/pkg/http')
59 files changed, 0 insertions, 13711 deletions
diff --git a/src/pkg/http/Makefile b/src/pkg/http/Makefile deleted file mode 100644 index df4ab9510..000000000 --- a/src/pkg/http/Makefile +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2009 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -include ../../Make.inc - -TARG=http -GOFILES=\ - chunked.go\ - client.go\ - cookie.go\ - dump.go\ - fs.go\ - header.go\ - lex.go\ - persist.go\ - request.go\ - response.go\ - reverseproxy.go\ - server.go\ - sniff.go\ - status.go\ - transfer.go\ - transport.go\ - -include ../../Make.pkg diff --git a/src/pkg/http/cgi/Makefile b/src/pkg/http/cgi/Makefile deleted file mode 100644 index 19b1039c2..000000000 --- a/src/pkg/http/cgi/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -# Copyright 2011 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -include ../../../Make.inc - -TARG=http/cgi -GOFILES=\ - child.go\ - host.go\ - -include ../../../Make.pkg diff --git a/src/pkg/http/cgi/child.go b/src/pkg/http/cgi/child.go deleted file mode 100644 index 8d0eca8d5..000000000 --- a/src/pkg/http/cgi/child.go +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file implements CGI from the perspective of a child -// process. - -package cgi - -import ( - "bufio" - "crypto/tls" - "fmt" - "http" - "io" - "io/ioutil" - "net" - "os" - "strconv" - "strings" - "url" -) - -// Request returns the HTTP request as represented in the current -// environment. This assumes the current program is being run -// by a web server in a CGI environment. -// The returned Request's Body is populated, if applicable. -func Request() (*http.Request, os.Error) { - r, err := RequestFromMap(envMap(os.Environ())) - if err != nil { - return nil, err - } - if r.ContentLength > 0 { - r.Body = ioutil.NopCloser(io.LimitReader(os.Stdin, r.ContentLength)) - } - return r, nil -} - -func envMap(env []string) map[string]string { - m := make(map[string]string) - for _, kv := range env { - if idx := strings.Index(kv, "="); idx != -1 { - m[kv[:idx]] = kv[idx+1:] - } - } - return m -} - -// RequestFromMap creates an http.Request from CGI variables. -// The returned Request's Body field is not populated. -func RequestFromMap(params map[string]string) (*http.Request, os.Error) { - r := new(http.Request) - r.Method = params["REQUEST_METHOD"] - if r.Method == "" { - return nil, os.NewError("cgi: no REQUEST_METHOD in environment") - } - - r.Proto = params["SERVER_PROTOCOL"] - var ok bool - r.ProtoMajor, r.ProtoMinor, ok = http.ParseHTTPVersion(r.Proto) - if !ok { - return nil, os.NewError("cgi: invalid SERVER_PROTOCOL version") - } - - r.Close = true - r.Trailer = http.Header{} - r.Header = http.Header{} - - r.Host = params["HTTP_HOST"] - - if lenstr := params["CONTENT_LENGTH"]; lenstr != "" { - clen, err := strconv.Atoi64(lenstr) - if err != nil { - return nil, os.NewError("cgi: bad CONTENT_LENGTH in environment: " + lenstr) - } - r.ContentLength = clen - } - - if ct := params["CONTENT_TYPE"]; ct != "" { - r.Header.Set("Content-Type", ct) - } - - // Copy "HTTP_FOO_BAR" variables to "Foo-Bar" Headers - for k, v := range params { - if !strings.HasPrefix(k, "HTTP_") || k == "HTTP_HOST" { - continue - } - r.Header.Add(strings.Replace(k[5:], "_", "-", -1), v) - } - - // TODO: cookies. parsing them isn't exported, though. - - if r.Host != "" { - // Hostname is provided, so we can reasonably construct a URL, - // even if we have to assume 'http' for the scheme. - r.RawURL = "http://" + r.Host + params["REQUEST_URI"] - url, err := url.Parse(r.RawURL) - if err != nil { - return nil, os.NewError("cgi: failed to parse host and REQUEST_URI into a URL: " + r.RawURL) - } - r.URL = url - } - // Fallback logic if we don't have a Host header or the URL - // failed to parse - if r.URL == nil { - r.RawURL = params["REQUEST_URI"] - url, err := url.Parse(r.RawURL) - if err != nil { - return nil, os.NewError("cgi: failed to parse REQUEST_URI into a URL: " + r.RawURL) - } - r.URL = url - } - - // There's apparently a de-facto standard for this. - // http://docstore.mik.ua/orelly/linux/cgi/ch03_02.htm#ch03-35636 - if s := params["HTTPS"]; s == "on" || s == "ON" || s == "1" { - r.TLS = &tls.ConnectionState{HandshakeComplete: true} - } - - // Request.RemoteAddr has its port set by Go's standard http - // server, so we do here too. We don't have one, though, so we - // use a dummy one. - r.RemoteAddr = net.JoinHostPort(params["REMOTE_ADDR"], "0") - - return r, nil -} - -// Serve executes the provided Handler on the currently active CGI -// request, if any. If there's no current CGI environment -// an error is returned. The provided handler may be nil to use -// http.DefaultServeMux. -func Serve(handler http.Handler) os.Error { - req, err := Request() - if err != nil { - return err - } - if handler == nil { - handler = http.DefaultServeMux - } - rw := &response{ - req: req, - header: make(http.Header), - bufw: bufio.NewWriter(os.Stdout), - } - handler.ServeHTTP(rw, req) - if err = rw.bufw.Flush(); err != nil { - return err - } - return nil -} - -type response struct { - req *http.Request - header http.Header - bufw *bufio.Writer - headerSent bool -} - -func (r *response) Flush() { - r.bufw.Flush() -} - -func (r *response) Header() http.Header { - return r.header -} - -func (r *response) Write(p []byte) (n int, err os.Error) { - if !r.headerSent { - r.WriteHeader(http.StatusOK) - } - return r.bufw.Write(p) -} - -func (r *response) WriteHeader(code int) { - if r.headerSent { - // Note: explicitly using Stderr, as Stdout is our HTTP output. - fmt.Fprintf(os.Stderr, "CGI attempted to write header twice on request for %s", r.req.URL) - return - } - r.headerSent = true - fmt.Fprintf(r.bufw, "Status: %d %s\r\n", code, http.StatusText(code)) - - // Set a default Content-Type - if _, hasType := r.header["Content-Type"]; !hasType { - r.header.Add("Content-Type", "text/html; charset=utf-8") - } - - r.header.Write(r.bufw) - r.bufw.WriteString("\r\n") - r.bufw.Flush() -} diff --git a/src/pkg/http/cgi/child_test.go b/src/pkg/http/cgi/child_test.go deleted file mode 100644 index eee043bc9..000000000 --- a/src/pkg/http/cgi/child_test.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Tests for CGI (the child process perspective) - -package cgi - -import ( - "testing" -) - -func TestRequest(t *testing.T) { - env := map[string]string{ - "SERVER_PROTOCOL": "HTTP/1.1", - "REQUEST_METHOD": "GET", - "HTTP_HOST": "example.com", - "HTTP_REFERER": "elsewhere", - "HTTP_USER_AGENT": "goclient", - "HTTP_FOO_BAR": "baz", - "REQUEST_URI": "/path?a=b", - "CONTENT_LENGTH": "123", - "CONTENT_TYPE": "text/xml", - "HTTPS": "1", - "REMOTE_ADDR": "5.6.7.8", - } - req, err := RequestFromMap(env) - if err != nil { - t.Fatalf("RequestFromMap: %v", err) - } - if g, e := req.UserAgent(), "goclient"; e != g { - t.Errorf("expected UserAgent %q; got %q", e, g) - } - if g, e := req.Method, "GET"; e != g { - t.Errorf("expected Method %q; got %q", e, g) - } - if g, e := req.Header.Get("Content-Type"), "text/xml"; e != g { - t.Errorf("expected Content-Type %q; got %q", e, g) - } - if g, e := req.ContentLength, int64(123); e != g { - t.Errorf("expected ContentLength %d; got %d", e, g) - } - if g, e := req.Referer(), "elsewhere"; e != g { - t.Errorf("expected Referer %q; got %q", e, g) - } - if req.Header == nil { - t.Fatalf("unexpected nil Header") - } - if g, e := req.Header.Get("Foo-Bar"), "baz"; e != g { - t.Errorf("expected Foo-Bar %q; got %q", e, g) - } - if g, e := req.RawURL, "http://example.com/path?a=b"; e != g { - t.Errorf("expected RawURL %q; got %q", e, g) - } - if g, e := req.URL.String(), "http://example.com/path?a=b"; e != g { - t.Errorf("expected URL %q; got %q", e, g) - } - if g, e := req.FormValue("a"), "b"; e != g { - t.Errorf("expected FormValue(a) %q; got %q", e, g) - } - if req.Trailer == nil { - t.Errorf("unexpected nil Trailer") - } - if req.TLS == nil { - t.Errorf("expected non-nil TLS") - } - if e, g := "5.6.7.8:0", req.RemoteAddr; e != g { - t.Errorf("RemoteAddr: got %q; want %q", g, e) - } -} - -func TestRequestWithoutHost(t *testing.T) { - env := map[string]string{ - "SERVER_PROTOCOL": "HTTP/1.1", - "HTTP_HOST": "", - "REQUEST_METHOD": "GET", - "REQUEST_URI": "/path?a=b", - "CONTENT_LENGTH": "123", - } - req, err := RequestFromMap(env) - if err != nil { - t.Fatalf("RequestFromMap: %v", err) - } - if g, e := req.RawURL, "/path?a=b"; e != g { - t.Errorf("expected RawURL %q; got %q", e, g) - } - if req.URL == nil { - t.Fatalf("unexpected nil URL") - } - if g, e := req.URL.String(), "/path?a=b"; e != g { - t.Errorf("expected URL %q; got %q", e, g) - } -} diff --git a/src/pkg/http/cgi/host.go b/src/pkg/http/cgi/host.go deleted file mode 100644 index f7de89f99..000000000 --- a/src/pkg/http/cgi/host.go +++ /dev/null @@ -1,323 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file implements the host side of CGI (being the webserver -// parent process). - -// Package cgi implements CGI (Common Gateway Interface) as specified -// in RFC 3875. -// -// Note that using CGI means starting a new process to handle each -// request, which is typically less efficient than using a -// long-running server. This package is intended primarily for -// compatibility with existing systems. -package cgi - -import ( - "bufio" - "exec" - "fmt" - "http" - "io" - "log" - "os" - "path/filepath" - "regexp" - "runtime" - "strconv" - "strings" -) - -var trailingPort = regexp.MustCompile(`:([0-9]+)$`) - -var osDefaultInheritEnv = map[string][]string{ - "darwin": []string{"DYLD_LIBRARY_PATH"}, - "freebsd": []string{"LD_LIBRARY_PATH"}, - "hpux": []string{"LD_LIBRARY_PATH", "SHLIB_PATH"}, - "irix": []string{"LD_LIBRARY_PATH", "LD_LIBRARYN32_PATH", "LD_LIBRARY64_PATH"}, - "linux": []string{"LD_LIBRARY_PATH"}, - "solaris": []string{"LD_LIBRARY_PATH", "LD_LIBRARY_PATH_32", "LD_LIBRARY_PATH_64"}, - "windows": []string{"SystemRoot", "COMSPEC", "PATHEXT", "WINDIR"}, -} - -// Handler runs an executable in a subprocess with a CGI environment. -type Handler struct { - Path string // path to the CGI executable - Root string // root URI prefix of handler or empty for "/" - - // Dir specifies the CGI executable's working directory. - // If Dir is empty, the base directory of Path is used. - // If Path has no base directory, the current working - // directory is used. - Dir string - - Env []string // extra environment variables to set, if any, as "key=value" - InheritEnv []string // environment variables to inherit from host, as "key" - Logger *log.Logger // optional log for errors or nil to use log.Print - Args []string // optional arguments to pass to child process - - // PathLocationHandler specifies the root http Handler that - // should handle internal redirects when the CGI process - // returns a Location header value starting with a "/", as - // specified in RFC 3875 ยง 6.3.2. This will likely be - // http.DefaultServeMux. - // - // If nil, a CGI response with a local URI path is instead sent - // back to the client and not redirected internally. - PathLocationHandler http.Handler -} - -func (h *Handler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { - root := h.Root - if root == "" { - root = "/" - } - - if len(req.TransferEncoding) > 0 && req.TransferEncoding[0] == "chunked" { - rw.WriteHeader(http.StatusBadRequest) - rw.Write([]byte("Chunked request bodies are not supported by CGI.")) - return - } - - pathInfo := req.URL.Path - if root != "/" && strings.HasPrefix(pathInfo, root) { - pathInfo = pathInfo[len(root):] - } - - port := "80" - if matches := trailingPort.FindStringSubmatch(req.Host); len(matches) != 0 { - port = matches[1] - } - - env := []string{ - "SERVER_SOFTWARE=go", - "SERVER_NAME=" + req.Host, - "SERVER_PROTOCOL=HTTP/1.1", - "HTTP_HOST=" + req.Host, - "GATEWAY_INTERFACE=CGI/1.1", - "REQUEST_METHOD=" + req.Method, - "QUERY_STRING=" + req.URL.RawQuery, - "REQUEST_URI=" + req.URL.RawPath, - "PATH_INFO=" + pathInfo, - "SCRIPT_NAME=" + root, - "SCRIPT_FILENAME=" + h.Path, - "REMOTE_ADDR=" + req.RemoteAddr, - "REMOTE_HOST=" + req.RemoteAddr, - "SERVER_PORT=" + port, - } - - if req.TLS != nil { - env = append(env, "HTTPS=on") - } - - for k, v := range req.Header { - k = strings.Map(upperCaseAndUnderscore, k) - joinStr := ", " - if k == "COOKIE" { - joinStr = "; " - } - env = append(env, "HTTP_"+k+"="+strings.Join(v, joinStr)) - } - - if req.ContentLength > 0 { - env = append(env, fmt.Sprintf("CONTENT_LENGTH=%d", req.ContentLength)) - } - if ctype := req.Header.Get("Content-Type"); ctype != "" { - env = append(env, "CONTENT_TYPE="+ctype) - } - - if h.Env != nil { - env = append(env, h.Env...) - } - - envPath := os.Getenv("PATH") - if envPath == "" { - envPath = "/bin:/usr/bin:/usr/ucb:/usr/bsd:/usr/local/bin" - } - env = append(env, "PATH="+envPath) - - for _, e := range h.InheritEnv { - if v := os.Getenv(e); v != "" { - env = append(env, e+"="+v) - } - } - - for _, e := range osDefaultInheritEnv[runtime.GOOS] { - if v := os.Getenv(e); v != "" { - env = append(env, e+"="+v) - } - } - - var cwd, path string - if h.Dir != "" { - path = h.Path - cwd = h.Dir - } else { - cwd, path = filepath.Split(h.Path) - } - if cwd == "" { - cwd = "." - } - - internalError := func(err os.Error) { - rw.WriteHeader(http.StatusInternalServerError) - h.printf("CGI error: %v", err) - } - - cmd := &exec.Cmd{ - Path: path, - Args: append([]string{h.Path}, h.Args...), - Dir: cwd, - Env: env, - Stderr: os.Stderr, // for now - } - if req.ContentLength != 0 { - cmd.Stdin = req.Body - } - stdoutRead, err := cmd.StdoutPipe() - if err != nil { - internalError(err) - return - } - - err = cmd.Start() - if err != nil { - internalError(err) - return - } - defer cmd.Wait() - defer stdoutRead.Close() - - linebody, _ := bufio.NewReaderSize(stdoutRead, 1024) - headers := make(http.Header) - statusCode := 0 - for { - line, isPrefix, err := linebody.ReadLine() - if isPrefix { - rw.WriteHeader(http.StatusInternalServerError) - h.printf("cgi: long header line from subprocess.") - return - } - if err == os.EOF { - break - } - if err != nil { - rw.WriteHeader(http.StatusInternalServerError) - h.printf("cgi: error reading headers: %v", err) - return - } - if len(line) == 0 { - break - } - parts := strings.SplitN(string(line), ":", 2) - if len(parts) < 2 { - h.printf("cgi: bogus header line: %s", string(line)) - continue - } - header, val := parts[0], parts[1] - header = strings.TrimSpace(header) - val = strings.TrimSpace(val) - switch { - case header == "Status": - if len(val) < 3 { - h.printf("cgi: bogus status (short): %q", val) - return - } - code, err := strconv.Atoi(val[0:3]) - if err != nil { - h.printf("cgi: bogus status: %q", val) - h.printf("cgi: line was %q", line) - return - } - statusCode = code - default: - headers.Add(header, val) - } - } - - if loc := headers.Get("Location"); loc != "" { - if strings.HasPrefix(loc, "/") && h.PathLocationHandler != nil { - h.handleInternalRedirect(rw, req, loc) - return - } - if statusCode == 0 { - statusCode = http.StatusFound - } - } - - if statusCode == 0 { - statusCode = http.StatusOK - } - - // Copy headers to rw's headers, after we've decided not to - // go into handleInternalRedirect, which won't want its rw - // headers to have been touched. - for k, vv := range headers { - for _, v := range vv { - rw.Header().Add(k, v) - } - } - - rw.WriteHeader(statusCode) - - _, err = io.Copy(rw, linebody) - if err != nil { - h.printf("cgi: copy error: %v", err) - } -} - -func (h *Handler) printf(format string, v ...interface{}) { - if h.Logger != nil { - h.Logger.Printf(format, v...) - } else { - log.Printf(format, v...) - } -} - -func (h *Handler) handleInternalRedirect(rw http.ResponseWriter, req *http.Request, path string) { - url, err := req.URL.Parse(path) - if err != nil { - rw.WriteHeader(http.StatusInternalServerError) - h.printf("cgi: error resolving local URI path %q: %v", path, err) - return - } - // TODO: RFC 3875 isn't clear if only GET is supported, but it - // suggests so: "Note that any message-body attached to the - // request (such as for a POST request) may not be available - // to the resource that is the target of the redirect." We - // should do some tests against Apache to see how it handles - // POST, HEAD, etc. Does the internal redirect get the same - // method or just GET? What about incoming headers? - // (e.g. Cookies) Which headers, if any, are copied into the - // second request? - newReq := &http.Request{ - Method: "GET", - URL: url, - RawURL: path, - Proto: "HTTP/1.1", - ProtoMajor: 1, - ProtoMinor: 1, - Header: make(http.Header), - Host: url.Host, - RemoteAddr: req.RemoteAddr, - TLS: req.TLS, - } - h.PathLocationHandler.ServeHTTP(rw, newReq) -} - -func upperCaseAndUnderscore(rune int) int { - switch { - case rune >= 'a' && rune <= 'z': - return rune - ('a' - 'A') - case rune == '-': - return '_' - case rune == '=': - // Maybe not part of the CGI 'spec' but would mess up - // the environment in any case, as Go represents the - // environment as a slice of "key=value" strings. - return '_' - } - // TODO: other transformations in spec or practice? - return rune -} diff --git a/src/pkg/http/cgi/host_test.go b/src/pkg/http/cgi/host_test.go deleted file mode 100644 index 1dc3abdbb..000000000 --- a/src/pkg/http/cgi/host_test.go +++ /dev/null @@ -1,449 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Tests for package cgi - -package cgi - -import ( - "bufio" - "exec" - "fmt" - "http" - "http/httptest" - "io" - "os" - "net" - "path/filepath" - "strconv" - "strings" - "testing" - "time" - "runtime" -) - -func newRequest(httpreq string) *http.Request { - buf := bufio.NewReader(strings.NewReader(httpreq)) - req, err := http.ReadRequest(buf) - if err != nil { - panic("cgi: bogus http request in test: " + httpreq) - } - req.RemoteAddr = "1.2.3.4" - return req -} - -func runCgiTest(t *testing.T, h *Handler, httpreq string, expectedMap map[string]string) *httptest.ResponseRecorder { - rw := httptest.NewRecorder() - req := newRequest(httpreq) - h.ServeHTTP(rw, req) - - // Make a map to hold the test map that the CGI returns. - m := make(map[string]string) - linesRead := 0 -readlines: - for { - line, err := rw.Body.ReadString('\n') - switch { - case err == os.EOF: - break readlines - case err != nil: - t.Fatalf("unexpected error reading from CGI: %v", err) - } - linesRead++ - trimmedLine := strings.TrimRight(line, "\r\n") - split := strings.SplitN(trimmedLine, "=", 2) - if len(split) != 2 { - t.Fatalf("Unexpected %d parts from invalid line number %v: %q; existing map=%v", - len(split), linesRead, line, m) - } - m[split[0]] = split[1] - } - - for key, expected := range expectedMap { - if got := m[key]; got != expected { - t.Errorf("for key %q got %q; expected %q", key, got, expected) - } - } - return rw -} - -var cgiTested = false -var cgiWorks bool - -func skipTest(t *testing.T) bool { - if !cgiTested { - cgiTested = true - cgiWorks = exec.Command("./testdata/test.cgi").Run() == nil - } - if !cgiWorks { - // No Perl on Windows, needed by test.cgi - // TODO: make the child process be Go, not Perl. - t.Logf("Skipping test: test.cgi failed.") - return true - } - return false -} - -func TestCGIBasicGet(t *testing.T) { - if skipTest(t) { - return - } - h := &Handler{ - Path: "testdata/test.cgi", - Root: "/test.cgi", - } - expectedMap := map[string]string{ - "test": "Hello CGI", - "param-a": "b", - "param-foo": "bar", - "env-GATEWAY_INTERFACE": "CGI/1.1", - "env-HTTP_HOST": "example.com", - "env-PATH_INFO": "", - "env-QUERY_STRING": "foo=bar&a=b", - "env-REMOTE_ADDR": "1.2.3.4", - "env-REMOTE_HOST": "1.2.3.4", - "env-REQUEST_METHOD": "GET", - "env-REQUEST_URI": "/test.cgi?foo=bar&a=b", - "env-SCRIPT_FILENAME": "testdata/test.cgi", - "env-SCRIPT_NAME": "/test.cgi", - "env-SERVER_NAME": "example.com", - "env-SERVER_PORT": "80", - "env-SERVER_SOFTWARE": "go", - } - replay := runCgiTest(t, h, "GET /test.cgi?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap) - - if expected, got := "text/html", replay.Header().Get("Content-Type"); got != expected { - t.Errorf("got a Content-Type of %q; expected %q", got, expected) - } - if expected, got := "X-Test-Value", replay.Header().Get("X-Test-Header"); got != expected { - t.Errorf("got a X-Test-Header of %q; expected %q", got, expected) - } -} - -func TestCGIBasicGetAbsPath(t *testing.T) { - if skipTest(t) { - return - } - pwd, err := os.Getwd() - if err != nil { - t.Fatalf("getwd error: %v", err) - } - h := &Handler{ - Path: pwd + "/testdata/test.cgi", - Root: "/test.cgi", - } - expectedMap := map[string]string{ - "env-REQUEST_URI": "/test.cgi?foo=bar&a=b", - "env-SCRIPT_FILENAME": pwd + "/testdata/test.cgi", - "env-SCRIPT_NAME": "/test.cgi", - } - runCgiTest(t, h, "GET /test.cgi?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap) -} - -func TestPathInfo(t *testing.T) { - if skipTest(t) { - return - } - h := &Handler{ - Path: "testdata/test.cgi", - Root: "/test.cgi", - } - expectedMap := map[string]string{ - "param-a": "b", - "env-PATH_INFO": "/extrapath", - "env-QUERY_STRING": "a=b", - "env-REQUEST_URI": "/test.cgi/extrapath?a=b", - "env-SCRIPT_FILENAME": "testdata/test.cgi", - "env-SCRIPT_NAME": "/test.cgi", - } - runCgiTest(t, h, "GET /test.cgi/extrapath?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap) -} - -func TestPathInfoDirRoot(t *testing.T) { - if skipTest(t) { - return - } - h := &Handler{ - Path: "testdata/test.cgi", - Root: "/myscript/", - } - expectedMap := map[string]string{ - "env-PATH_INFO": "bar", - "env-QUERY_STRING": "a=b", - "env-REQUEST_URI": "/myscript/bar?a=b", - "env-SCRIPT_FILENAME": "testdata/test.cgi", - "env-SCRIPT_NAME": "/myscript/", - } - runCgiTest(t, h, "GET /myscript/bar?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap) -} - -func TestDupHeaders(t *testing.T) { - if skipTest(t) { - return - } - h := &Handler{ - Path: "testdata/test.cgi", - } - expectedMap := map[string]string{ - "env-REQUEST_URI": "/myscript/bar?a=b", - "env-SCRIPT_FILENAME": "testdata/test.cgi", - "env-HTTP_COOKIE": "nom=NOM; yum=YUM", - "env-HTTP_X_FOO": "val1, val2", - } - runCgiTest(t, h, "GET /myscript/bar?a=b HTTP/1.0\n"+ - "Cookie: nom=NOM\n"+ - "Cookie: yum=YUM\n"+ - "X-Foo: val1\n"+ - "X-Foo: val2\n"+ - "Host: example.com\n\n", - expectedMap) -} - -func TestPathInfoNoRoot(t *testing.T) { - if skipTest(t) { - return - } - h := &Handler{ - Path: "testdata/test.cgi", - Root: "", - } - expectedMap := map[string]string{ - "env-PATH_INFO": "/bar", - "env-QUERY_STRING": "a=b", - "env-REQUEST_URI": "/bar?a=b", - "env-SCRIPT_FILENAME": "testdata/test.cgi", - "env-SCRIPT_NAME": "/", - } - runCgiTest(t, h, "GET /bar?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap) -} - -func TestCGIBasicPost(t *testing.T) { - if skipTest(t) { - return - } - postReq := `POST /test.cgi?a=b HTTP/1.0 -Host: example.com -Content-Type: application/x-www-form-urlencoded -Content-Length: 15 - -postfoo=postbar` - h := &Handler{ - Path: "testdata/test.cgi", - Root: "/test.cgi", - } - expectedMap := map[string]string{ - "test": "Hello CGI", - "param-postfoo": "postbar", - "env-REQUEST_METHOD": "POST", - "env-CONTENT_LENGTH": "15", - "env-REQUEST_URI": "/test.cgi?a=b", - } - runCgiTest(t, h, postReq, expectedMap) -} - -func chunk(s string) string { - return fmt.Sprintf("%x\r\n%s\r\n", len(s), s) -} - -// The CGI spec doesn't allow chunked requests. -func TestCGIPostChunked(t *testing.T) { - if skipTest(t) { - return - } - postReq := `POST /test.cgi?a=b HTTP/1.1 -Host: example.com -Content-Type: application/x-www-form-urlencoded -Transfer-Encoding: chunked - -` + chunk("postfoo") + chunk("=") + chunk("postbar") + chunk("") - - h := &Handler{ - Path: "testdata/test.cgi", - Root: "/test.cgi", - } - expectedMap := map[string]string{} - resp := runCgiTest(t, h, postReq, expectedMap) - if got, expected := resp.Code, http.StatusBadRequest; got != expected { - t.Fatalf("Expected %v response code from chunked request body; got %d", - expected, got) - } -} - -func TestRedirect(t *testing.T) { - if skipTest(t) { - return - } - h := &Handler{ - Path: "testdata/test.cgi", - Root: "/test.cgi", - } - rec := runCgiTest(t, h, "GET /test.cgi?loc=http://foo.com/ HTTP/1.0\nHost: example.com\n\n", nil) - if e, g := 302, rec.Code; e != g { - t.Errorf("expected status code %d; got %d", e, g) - } - if e, g := "http://foo.com/", rec.Header().Get("Location"); e != g { - t.Errorf("expected Location header of %q; got %q", e, g) - } -} - -func TestInternalRedirect(t *testing.T) { - if skipTest(t) { - return - } - baseHandler := http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { - fmt.Fprintf(rw, "basepath=%s\n", req.URL.Path) - fmt.Fprintf(rw, "remoteaddr=%s\n", req.RemoteAddr) - }) - h := &Handler{ - Path: "testdata/test.cgi", - Root: "/test.cgi", - PathLocationHandler: baseHandler, - } - expectedMap := map[string]string{ - "basepath": "/foo", - "remoteaddr": "1.2.3.4", - } - runCgiTest(t, h, "GET /test.cgi?loc=/foo HTTP/1.0\nHost: example.com\n\n", expectedMap) -} - -// TestCopyError tests that we kill the process if there's an error copying -// its output. (for example, from the client having gone away) -func TestCopyError(t *testing.T) { - if skipTest(t) || runtime.GOOS == "windows" { - return - } - h := &Handler{ - Path: "testdata/test.cgi", - Root: "/test.cgi", - } - ts := httptest.NewServer(h) - defer ts.Close() - - conn, err := net.Dial("tcp", ts.Listener.Addr().String()) - if err != nil { - t.Fatal(err) - } - req, _ := http.NewRequest("GET", "http://example.com/test.cgi?bigresponse=1", nil) - err = req.Write(conn) - if err != nil { - t.Fatalf("Write: %v", err) - } - - res, err := http.ReadResponse(bufio.NewReader(conn), req) - if err != nil { - t.Fatalf("ReadResponse: %v", err) - } - - pidstr := res.Header.Get("X-CGI-Pid") - if pidstr == "" { - t.Fatalf("expected an X-CGI-Pid header in response") - } - pid, err := strconv.Atoi(pidstr) - if err != nil { - t.Fatalf("invalid X-CGI-Pid value") - } - - var buf [5000]byte - n, err := io.ReadFull(res.Body, buf[:]) - if err != nil { - t.Fatalf("ReadFull: %d bytes, %v", n, err) - } - - childRunning := func() bool { - p, err := os.FindProcess(pid) - if err != nil { - return false - } - return p.Signal(os.UnixSignal(0)) == nil - } - - if !childRunning() { - t.Fatalf("pre-conn.Close, expected child to be running") - } - conn.Close() - - if tries := 0; childRunning() { - for tries < 15 && childRunning() { - time.Sleep(50e6 * int64(tries)) - tries++ - } - if childRunning() { - t.Fatalf("post-conn.Close, expected child to be gone") - } - } -} - -func TestDirUnix(t *testing.T) { - if skipTest(t) || runtime.GOOS == "windows" { - return - } - - cwd, _ := os.Getwd() - h := &Handler{ - Path: "testdata/test.cgi", - Root: "/test.cgi", - Dir: cwd, - } - expectedMap := map[string]string{ - "cwd": cwd, - } - runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap) - - cwd, _ = os.Getwd() - cwd = filepath.Join(cwd, "testdata") - h = &Handler{ - Path: "testdata/test.cgi", - Root: "/test.cgi", - } - expectedMap = map[string]string{ - "cwd": cwd, - } - runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap) -} - -func TestDirWindows(t *testing.T) { - if skipTest(t) || runtime.GOOS != "windows" { - return - } - - cgifile, _ := filepath.Abs("testdata/test.cgi") - - var perl string - var err os.Error - perl, err = exec.LookPath("perl") - if err != nil { - return - } - perl, _ = filepath.Abs(perl) - - cwd, _ := os.Getwd() - h := &Handler{ - Path: perl, - Root: "/test.cgi", - Dir: cwd, - Args: []string{cgifile}, - Env: []string{"SCRIPT_FILENAME=" + cgifile}, - } - expectedMap := map[string]string{ - "cwd": cwd, - } - runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap) - - // If not specify Dir on windows, working directory should be - // base directory of perl. - cwd, _ = filepath.Split(perl) - if cwd != "" && cwd[len(cwd)-1] == filepath.Separator { - cwd = cwd[:len(cwd)-1] - } - h = &Handler{ - Path: perl, - Root: "/test.cgi", - Args: []string{cgifile}, - Env: []string{"SCRIPT_FILENAME=" + cgifile}, - } - expectedMap = map[string]string{ - "cwd": cwd, - } - runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap) -} diff --git a/src/pkg/http/cgi/matryoshka_test.go b/src/pkg/http/cgi/matryoshka_test.go deleted file mode 100644 index 3e4a6addf..000000000 --- a/src/pkg/http/cgi/matryoshka_test.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Tests a Go CGI program running under a Go CGI host process. -// Further, the two programs are the same binary, just checking -// their environment to figure out what mode to run in. - -package cgi - -import ( - "fmt" - "http" - "os" - "testing" -) - -// This test is a CGI host (testing host.go) that runs its own binary -// as a child process testing the other half of CGI (child.go). -func TestHostingOurselves(t *testing.T) { - h := &Handler{ - Path: os.Args[0], - Root: "/test.go", - Args: []string{"-test.run=TestBeChildCGIProcess"}, - } - expectedMap := map[string]string{ - "test": "Hello CGI-in-CGI", - "param-a": "b", - "param-foo": "bar", - "env-GATEWAY_INTERFACE": "CGI/1.1", - "env-HTTP_HOST": "example.com", - "env-PATH_INFO": "", - "env-QUERY_STRING": "foo=bar&a=b", - "env-REMOTE_ADDR": "1.2.3.4", - "env-REMOTE_HOST": "1.2.3.4", - "env-REQUEST_METHOD": "GET", - "env-REQUEST_URI": "/test.go?foo=bar&a=b", - "env-SCRIPT_FILENAME": os.Args[0], - "env-SCRIPT_NAME": "/test.go", - "env-SERVER_NAME": "example.com", - "env-SERVER_PORT": "80", - "env-SERVER_SOFTWARE": "go", - } - replay := runCgiTest(t, h, "GET /test.go?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap) - - if expected, got := "text/html; charset=utf-8", replay.Header().Get("Content-Type"); got != expected { - t.Errorf("got a Content-Type of %q; expected %q", got, expected) - } - if expected, got := "X-Test-Value", replay.Header().Get("X-Test-Header"); got != expected { - t.Errorf("got a X-Test-Header of %q; expected %q", got, expected) - } -} - -// Note: not actually a test. -func TestBeChildCGIProcess(t *testing.T) { - if os.Getenv("REQUEST_METHOD") == "" { - // Not in a CGI environment; skipping test. - return - } - Serve(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { - rw.Header().Set("X-Test-Header", "X-Test-Value") - fmt.Fprintf(rw, "test=Hello CGI-in-CGI\n") - req.ParseForm() - for k, vv := range req.Form { - for _, v := range vv { - fmt.Fprintf(rw, "param-%s=%s\n", k, v) - } - } - for _, kv := range os.Environ() { - fmt.Fprintf(rw, "env-%s\n", kv) - } - })) - os.Exit(0) -} diff --git a/src/pkg/http/cgi/testdata/test.cgi b/src/pkg/http/cgi/testdata/test.cgi deleted file mode 100755 index b46b1330f..000000000 --- a/src/pkg/http/cgi/testdata/test.cgi +++ /dev/null @@ -1,96 +0,0 @@ -#!/usr/bin/perl -# Copyright 2011 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. -# -# Test script run as a child process under cgi_test.go - -use strict; -use Cwd; - -my $q = MiniCGI->new; -my $params = $q->Vars; - -if ($params->{"loc"}) { - print "Location: $params->{loc}\r\n\r\n"; - exit(0); -} - -my $NL = "\r\n"; -$NL = "\n" if $params->{mode} eq "NL"; - -my $p = sub { - print "$_[0]$NL"; -}; - -# With carriage returns -$p->("Content-Type: text/html"); -$p->("X-CGI-Pid: $$"); -$p->("X-Test-Header: X-Test-Value"); -$p->(""); - -if ($params->{"bigresponse"}) { - for (1..1024) { - print "A" x 1024, "\n"; - } - exit 0; -} - -print "test=Hello CGI\n"; - -foreach my $k (sort keys %$params) { - print "param-$k=$params->{$k}\n"; -} - -foreach my $k (sort keys %ENV) { - my $clean_env = $ENV{$k}; - $clean_env =~ s/[\n\r]//g; - print "env-$k=$clean_env\n"; -} - -# NOTE: don't call getcwd() for windows. -# msys return /c/go/src/... not C:\go\... -my $dir; -if ($^O eq 'MSWin32' || $^O eq 'msys') { - my $cmd = $ENV{'COMSPEC'} || 'c:\\windows\\system32\\cmd.exe'; - $cmd =~ s!\\!/!g; - $dir = `$cmd /c cd`; - chomp $dir; -} else { - $dir = getcwd(); -} -print "cwd=$dir\n"; - - -# A minimal version of CGI.pm, for people without the perl-modules -# package installed. (CGI.pm used to be part of the Perl core, but -# some distros now bundle perl-base and perl-modules separately...) -package MiniCGI; - -sub new { - my $class = shift; - return bless {}, $class; -} - -sub Vars { - my $self = shift; - my $pairs; - if ($ENV{CONTENT_LENGTH}) { - $pairs = do { local $/; <STDIN> }; - } else { - $pairs = $ENV{QUERY_STRING}; - } - my $vars = {}; - foreach my $kv (split(/&/, $pairs)) { - my ($k, $v) = split(/=/, $kv, 2); - $vars->{_urldecode($k)} = _urldecode($v); - } - return $vars; -} - -sub _urldecode { - my $v = shift; - $v =~ tr/+/ /; - $v =~ s/%([a-fA-F0-9][a-fA-F0-9])/pack("C", hex($1))/eg; - return $v; -} diff --git a/src/pkg/http/chunked.go b/src/pkg/http/chunked.go deleted file mode 100644 index 6c23e691f..000000000 --- a/src/pkg/http/chunked.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -import ( - "io" - "log" - "os" - "strconv" - "bufio" -) - -// NewChunkedWriter returns a new writer that translates writes into HTTP -// "chunked" format before writing them to w. Closing the returned writer -// sends the final 0-length chunk that marks the end of the stream. -// -// NewChunkedWriter is not needed by normal applications. The http -// package adds chunking automatically if handlers don't set a -// Content-Length header. Using NewChunkedWriter inside a handler -// would result in double chunking or chunking with a Content-Length -// length, both of which are wrong. -func NewChunkedWriter(w io.Writer) io.WriteCloser { - if _, bad := w.(*response); bad { - log.Printf("warning: using NewChunkedWriter in an http.Handler; expect corrupt output") - } - return &chunkedWriter{w} -} - -// Writing to ChunkedWriter translates to writing in HTTP chunked Transfer -// Encoding wire format to the underlying Wire writer. -type chunkedWriter struct { - Wire io.Writer -} - -// Write the contents of data as one chunk to Wire. -// NOTE: Note that the corresponding chunk-writing procedure in Conn.Write has -// a bug since it does not check for success of io.WriteString -func (cw *chunkedWriter) Write(data []byte) (n int, err os.Error) { - - // Don't send 0-length data. It looks like EOF for chunked encoding. - if len(data) == 0 { - return 0, nil - } - - head := strconv.Itob(len(data), 16) + "\r\n" - - if _, err = io.WriteString(cw.Wire, head); err != nil { - return 0, err - } - if n, err = cw.Wire.Write(data); err != nil { - return - } - if n != len(data) { - err = io.ErrShortWrite - return - } - _, err = io.WriteString(cw.Wire, "\r\n") - - return -} - -func (cw *chunkedWriter) Close() os.Error { - _, err := io.WriteString(cw.Wire, "0\r\n") - return err -} - -// NewChunkedReader returns a new reader that translates the data read from r -// out of HTTP "chunked" format before returning it. -// The reader returns os.EOF when the final 0-length chunk is read. -// -// NewChunkedReader is not needed by normal applications. The http package -// automatically decodes chunking when reading response bodies. -func NewChunkedReader(r *bufio.Reader) io.Reader { - return &chunkedReader{r: r} -} diff --git a/src/pkg/http/client.go b/src/pkg/http/client.go deleted file mode 100644 index 44b3443fc..000000000 --- a/src/pkg/http/client.go +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Primitive HTTP client. See RFC 2616. - -package http - -import ( - "encoding/base64" - "fmt" - "io" - "os" - "strings" - "url" -) - -// A Client is an HTTP client. Its zero value (DefaultClient) is a usable client -// that uses DefaultTransport. -// -// The Client's Transport typically has internal state (cached -// TCP connections), so Clients should be reused instead of created as -// needed. Clients are safe for concurrent use by multiple goroutines. -// -// Client is not yet very configurable. -type Client struct { - Transport RoundTripper // if nil, DefaultTransport is used - - // If CheckRedirect is not nil, the client calls it before - // following an HTTP redirect. The arguments req and via - // are the upcoming request and the requests made already, - // oldest first. If CheckRedirect returns an error, the client - // returns that error instead of issue the Request req. - // - // If CheckRedirect is nil, the Client uses its default policy, - // which is to stop after 10 consecutive requests. - CheckRedirect func(req *Request, via []*Request) os.Error -} - -// DefaultClient is the default Client and is used by Get, Head, and Post. -var DefaultClient = &Client{} - -// RoundTripper is an interface representing the ability to execute a -// single HTTP transaction, obtaining the Response for a given Request. -// -// A RoundTripper must be safe for concurrent use by multiple -// goroutines. -type RoundTripper interface { - // RoundTrip executes a single HTTP transaction, returning - // the Response for the request req. RoundTrip should not - // attempt to interpret the response. In particular, - // RoundTrip must return err == nil if it obtained a response, - // regardless of the response's HTTP status code. A non-nil - // err should be reserved for failure to obtain a response. - // Similarly, RoundTrip should not attempt to handle - // higher-level protocol details such as redirects, - // authentication, or cookies. - // - // RoundTrip may modify the request. The request Headers field is - // guaranteed to be initialized. - RoundTrip(req *Request) (resp *Response, err os.Error) -} - -// Given a string of the form "host", "host:port", or "[ipv6::address]:port", -// return true if the string includes a port. -func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } - -// Used in Send to implement io.ReadCloser by bundling together the -// bufio.Reader through which we read the response, and the underlying -// network connection. -type readClose struct { - io.Reader - io.Closer -} - -// Do sends an HTTP request and returns an HTTP response, following -// policy (e.g. redirects, cookies, auth) as configured on the client. -// -// Callers should close resp.Body when done reading from it. -// -// Generally Get, Post, or PostForm will be used instead of Do. -func (c *Client) Do(req *Request) (resp *Response, err os.Error) { - if req.Method == "GET" || req.Method == "HEAD" { - return c.doFollowingRedirects(req) - } - return send(req, c.Transport) -} - -// send issues an HTTP request. Caller should close resp.Body when done reading from it. -func send(req *Request, t RoundTripper) (resp *Response, err os.Error) { - if t == nil { - t = DefaultTransport - if t == nil { - err = os.NewError("no http.Client.Transport or http.DefaultTransport") - return - } - } - - // Most the callers of send (Get, Post, et al) don't need - // Headers, leaving it uninitialized. We guarantee to the - // Transport that this has been initialized, though. - if req.Header == nil { - req.Header = make(Header) - } - - info := req.URL.RawUserinfo - if len(info) > 0 { - if req.Header == nil { - req.Header = make(Header) - } - req.Header.Set("Authorization", "Basic "+base64.URLEncoding.EncodeToString([]byte(info))) - } - return t.RoundTrip(req) -} - -// True if the specified HTTP status code is one for which the Get utility should -// automatically redirect. -func shouldRedirect(statusCode int) bool { - switch statusCode { - case StatusMovedPermanently, StatusFound, StatusSeeOther, StatusTemporaryRedirect: - return true - } - return false -} - -// Get issues a GET to the specified URL. If the response is one of the following -// redirect codes, Get follows the redirect, up to a maximum of 10 redirects: -// -// 301 (Moved Permanently) -// 302 (Found) -// 303 (See Other) -// 307 (Temporary Redirect) -// -// Caller should close r.Body when done reading from it. -// -// Get is a convenience wrapper around DefaultClient.Get. -func Get(url string) (r *Response, err os.Error) { - return DefaultClient.Get(url) -} - -// Get issues a GET to the specified URL. If the response is one of the -// following redirect codes, Get follows the redirect after calling the -// Client's CheckRedirect function. -// -// 301 (Moved Permanently) -// 302 (Found) -// 303 (See Other) -// 307 (Temporary Redirect) -// -// Caller should close r.Body when done reading from it. -func (c *Client) Get(url string) (r *Response, err os.Error) { - req, err := NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - return c.doFollowingRedirects(req) -} - -func (c *Client) doFollowingRedirects(ireq *Request) (r *Response, err os.Error) { - // TODO: if/when we add cookie support, the redirected request shouldn't - // necessarily supply the same cookies as the original. - var base *url.URL - redirectChecker := c.CheckRedirect - if redirectChecker == nil { - redirectChecker = defaultCheckRedirect - } - var via []*Request - - req := ireq - urlStr := "" // next relative or absolute URL to fetch (after first request) - for redirect := 0; ; redirect++ { - if redirect != 0 { - req = new(Request) - req.Method = ireq.Method - req.Header = make(Header) - req.URL, err = base.Parse(urlStr) - if err != nil { - break - } - if len(via) > 0 { - // Add the Referer header. - lastReq := via[len(via)-1] - if lastReq.URL.Scheme != "https" { - req.Header.Set("Referer", lastReq.URL.String()) - } - - err = redirectChecker(req, via) - if err != nil { - break - } - } - } - - urlStr = req.URL.String() - if r, err = send(req, c.Transport); err != nil { - break - } - if shouldRedirect(r.StatusCode) { - r.Body.Close() - if urlStr = r.Header.Get("Location"); urlStr == "" { - err = os.NewError(fmt.Sprintf("%d response missing Location header", r.StatusCode)) - break - } - base = req.URL - via = append(via, req) - continue - } - return - } - - method := ireq.Method - err = &url.Error{method[0:1] + strings.ToLower(method[1:]), urlStr, err} - return -} - -func defaultCheckRedirect(req *Request, via []*Request) os.Error { - if len(via) >= 10 { - return os.NewError("stopped after 10 redirects") - } - return nil -} - -// Post issues a POST to the specified URL. -// -// Caller should close r.Body when done reading from it. -// -// Post is a wrapper around DefaultClient.Post -func Post(url string, bodyType string, body io.Reader) (r *Response, err os.Error) { - return DefaultClient.Post(url, bodyType, body) -} - -// Post issues a POST to the specified URL. -// -// Caller should close r.Body when done reading from it. -func (c *Client) Post(url string, bodyType string, body io.Reader) (r *Response, err os.Error) { - req, err := NewRequest("POST", url, body) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", bodyType) - return send(req, c.Transport) -} - -// PostForm issues a POST to the specified URL, -// with data's keys and values urlencoded as the request body. -// -// Caller should close r.Body when done reading from it. -// -// PostForm is a wrapper around DefaultClient.PostForm -func PostForm(url string, data url.Values) (r *Response, err os.Error) { - return DefaultClient.PostForm(url, data) -} - -// PostForm issues a POST to the specified URL, -// with data's keys and values urlencoded as the request body. -// -// Caller should close r.Body when done reading from it. -func (c *Client) PostForm(url string, data url.Values) (r *Response, err os.Error) { - return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) -} - -// Head issues a HEAD to the specified URL. If the response is one of the -// following redirect codes, Head follows the redirect after calling the -// Client's CheckRedirect function. -// -// 301 (Moved Permanently) -// 302 (Found) -// 303 (See Other) -// 307 (Temporary Redirect) -// -// Head is a wrapper around DefaultClient.Head -func Head(url string) (r *Response, err os.Error) { - return DefaultClient.Head(url) -} - -// Head issues a HEAD to the specified URL. If the response is one of the -// following redirect codes, Head follows the redirect after calling the -// Client's CheckRedirect function. -// -// 301 (Moved Permanently) -// 302 (Found) -// 303 (See Other) -// 307 (Temporary Redirect) -func (c *Client) Head(url string) (r *Response, err os.Error) { - req, err := NewRequest("HEAD", url, nil) - if err != nil { - return nil, err - } - return c.doFollowingRedirects(req) -} diff --git a/src/pkg/http/client_test.go b/src/pkg/http/client_test.go deleted file mode 100644 index f22cce50b..000000000 --- a/src/pkg/http/client_test.go +++ /dev/null @@ -1,292 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Tests for client.go - -package http_test - -import ( - "fmt" - . "http" - "http/httptest" - "io" - "io/ioutil" - "net" - "os" - "strconv" - "strings" - "testing" - "url" -) - -var robotsTxtHandler = HandlerFunc(func(w ResponseWriter, r *Request) { - w.Header().Set("Last-Modified", "sometime") - fmt.Fprintf(w, "User-agent: go\nDisallow: /something/") -}) - -func TestClient(t *testing.T) { - ts := httptest.NewServer(robotsTxtHandler) - defer ts.Close() - - r, err := Get(ts.URL) - var b []byte - if err == nil { - b, err = ioutil.ReadAll(r.Body) - r.Body.Close() - } - if err != nil { - t.Error(err) - } else if s := string(b); !strings.HasPrefix(s, "User-agent:") { - t.Errorf("Incorrect page body (did not begin with User-agent): %q", s) - } -} - -func TestClientHead(t *testing.T) { - ts := httptest.NewServer(robotsTxtHandler) - defer ts.Close() - - r, err := Head(ts.URL) - if err != nil { - t.Fatal(err) - } - if _, ok := r.Header["Last-Modified"]; !ok { - t.Error("Last-Modified header not found.") - } -} - -type recordingTransport struct { - req *Request -} - -func (t *recordingTransport) RoundTrip(req *Request) (resp *Response, err os.Error) { - t.req = req - return nil, os.NewError("dummy impl") -} - -func TestGetRequestFormat(t *testing.T) { - tr := &recordingTransport{} - client := &Client{Transport: tr} - url := "http://dummy.faketld/" - client.Get(url) // Note: doesn't hit network - if tr.req.Method != "GET" { - t.Errorf("expected method %q; got %q", "GET", tr.req.Method) - } - if tr.req.URL.String() != url { - t.Errorf("expected URL %q; got %q", url, tr.req.URL.String()) - } - if tr.req.Header == nil { - t.Errorf("expected non-nil request Header") - } -} - -func TestPostRequestFormat(t *testing.T) { - tr := &recordingTransport{} - client := &Client{Transport: tr} - - url := "http://dummy.faketld/" - json := `{"key":"value"}` - b := strings.NewReader(json) - client.Post(url, "application/json", b) // Note: doesn't hit network - - if tr.req.Method != "POST" { - t.Errorf("got method %q, want %q", tr.req.Method, "POST") - } - if tr.req.URL.String() != url { - t.Errorf("got URL %q, want %q", tr.req.URL.String(), url) - } - if tr.req.Header == nil { - t.Fatalf("expected non-nil request Header") - } - if tr.req.Close { - t.Error("got Close true, want false") - } - if g, e := tr.req.ContentLength, int64(len(json)); g != e { - t.Errorf("got ContentLength %d, want %d", g, e) - } -} - -func TestPostFormRequestFormat(t *testing.T) { - tr := &recordingTransport{} - client := &Client{Transport: tr} - - urlStr := "http://dummy.faketld/" - form := make(url.Values) - form.Set("foo", "bar") - form.Add("foo", "bar2") - form.Set("bar", "baz") - client.PostForm(urlStr, form) // Note: doesn't hit network - - if tr.req.Method != "POST" { - t.Errorf("got method %q, want %q", tr.req.Method, "POST") - } - if tr.req.URL.String() != urlStr { - t.Errorf("got URL %q, want %q", tr.req.URL.String(), urlStr) - } - if tr.req.Header == nil { - t.Fatalf("expected non-nil request Header") - } - if g, e := tr.req.Header.Get("Content-Type"), "application/x-www-form-urlencoded"; g != e { - t.Errorf("got Content-Type %q, want %q", g, e) - } - if tr.req.Close { - t.Error("got Close true, want false") - } - expectedBody := "foo=bar&foo=bar2&bar=baz" - if g, e := tr.req.ContentLength, int64(len(expectedBody)); g != e { - t.Errorf("got ContentLength %d, want %d", g, e) - } - bodyb, err := ioutil.ReadAll(tr.req.Body) - if err != nil { - t.Fatalf("ReadAll on req.Body: %v", err) - } - if g := string(bodyb); g != expectedBody { - t.Errorf("got body %q, want %q", g, expectedBody) - } -} - -func TestRedirects(t *testing.T) { - var ts *httptest.Server - ts = httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { - n, _ := strconv.Atoi(r.FormValue("n")) - // Test Referer header. (7 is arbitrary position to test at) - if n == 7 { - if g, e := r.Referer(), ts.URL+"/?n=6"; e != g { - t.Errorf("on request ?n=7, expected referer of %q; got %q", e, g) - } - } - if n < 15 { - Redirect(w, r, fmt.Sprintf("/?n=%d", n+1), StatusFound) - return - } - fmt.Fprintf(w, "n=%d", n) - })) - defer ts.Close() - - c := &Client{} - _, err := c.Get(ts.URL) - if e, g := "Get /?n=10: stopped after 10 redirects", fmt.Sprintf("%v", err); e != g { - t.Errorf("with default client Get, expected error %q, got %q", e, g) - } - - // HEAD request should also have the ability to follow redirects. - _, err = c.Head(ts.URL) - if e, g := "Head /?n=10: stopped after 10 redirects", fmt.Sprintf("%v", err); e != g { - t.Errorf("with default client Head, expected error %q, got %q", e, g) - } - - // Do should also follow redirects. - greq, _ := NewRequest("GET", ts.URL, nil) - _, err = c.Do(greq) - if e, g := "Get /?n=10: stopped after 10 redirects", fmt.Sprintf("%v", err); e != g { - t.Errorf("with default client Do, expected error %q, got %q", e, g) - } - - var checkErr os.Error - var lastVia []*Request - c = &Client{CheckRedirect: func(_ *Request, via []*Request) os.Error { - lastVia = via - return checkErr - }} - res, err := c.Get(ts.URL) - finalUrl := res.Request.URL.String() - if e, g := "<nil>", fmt.Sprintf("%v", err); e != g { - t.Errorf("with custom client, expected error %q, got %q", e, g) - } - if !strings.HasSuffix(finalUrl, "/?n=15") { - t.Errorf("expected final url to end in /?n=15; got url %q", finalUrl) - } - if e, g := 15, len(lastVia); e != g { - t.Errorf("expected lastVia to have contained %d elements; got %d", e, g) - } - - checkErr = os.NewError("no redirects allowed") - res, err = c.Get(ts.URL) - finalUrl = res.Request.URL.String() - if e, g := "Get /?n=1: no redirects allowed", fmt.Sprintf("%v", err); e != g { - t.Errorf("with redirects forbidden, expected error %q, got %q", e, g) - } -} - -func TestStreamingGet(t *testing.T) { - say := make(chan string) - ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { - w.(Flusher).Flush() - for str := range say { - w.Write([]byte(str)) - w.(Flusher).Flush() - } - })) - defer ts.Close() - - c := &Client{} - res, err := c.Get(ts.URL) - if err != nil { - t.Fatal(err) - } - var buf [10]byte - for _, str := range []string{"i", "am", "also", "known", "as", "comet"} { - say <- str - n, err := io.ReadFull(res.Body, buf[0:len(str)]) - if err != nil { - t.Fatalf("ReadFull on %q: %v", str, err) - } - if n != len(str) { - t.Fatalf("Receiving %q, only read %d bytes", str, n) - } - got := string(buf[0:n]) - if got != str { - t.Fatalf("Expected %q, got %q", str, got) - } - } - close(say) - _, err = io.ReadFull(res.Body, buf[0:1]) - if err != os.EOF { - t.Fatalf("at end expected EOF, got %v", err) - } -} - -type writeCountingConn struct { - net.Conn - count *int -} - -func (c *writeCountingConn) Write(p []byte) (int, os.Error) { - *c.count++ - return c.Conn.Write(p) -} - -// TestClientWrites verifies that client requests are buffered and we -// don't send a TCP packet per line of the http request + body. -func TestClientWrites(t *testing.T) { - ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { - })) - defer ts.Close() - - writes := 0 - dialer := func(netz string, addr string) (net.Conn, os.Error) { - c, err := net.Dial(netz, addr) - if err == nil { - c = &writeCountingConn{c, &writes} - } - return c, err - } - c := &Client{Transport: &Transport{Dial: dialer}} - - _, err := c.Get(ts.URL) - if err != nil { - t.Fatal(err) - } - if writes != 1 { - t.Errorf("Get request did %d Write calls, want 1", writes) - } - - writes = 0 - _, err = c.PostForm(ts.URL, url.Values{"foo": {"bar"}}) - if err != nil { - t.Fatal(err) - } - if writes != 1 { - t.Errorf("Post request did %d Write calls, want 1", writes) - } -} diff --git a/src/pkg/http/cookie.go b/src/pkg/http/cookie.go deleted file mode 100644 index fe70431bb..000000000 --- a/src/pkg/http/cookie.go +++ /dev/null @@ -1,268 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -import ( - "bytes" - "fmt" - "strconv" - "strings" - "time" -) - -// This implementation is done according to RFC 6265: -// -// http://tools.ietf.org/html/rfc6265 - -// A Cookie represents an HTTP cookie as sent in the Set-Cookie header of an -// HTTP response or the Cookie header of an HTTP request. -type Cookie struct { - Name string - Value string - Path string - Domain string - Expires time.Time - RawExpires string - - // MaxAge=0 means no 'Max-Age' attribute specified. - // MaxAge<0 means delete cookie now, equivalently 'Max-Age: 0' - // MaxAge>0 means Max-Age attribute present and given in seconds - MaxAge int - Secure bool - HttpOnly bool - Raw string - Unparsed []string // Raw text of unparsed attribute-value pairs -} - -// readSetCookies parses all "Set-Cookie" values from -// the header h and returns the successfully parsed Cookies. -func readSetCookies(h Header) []*Cookie { - cookies := []*Cookie{} - for _, line := range h["Set-Cookie"] { - parts := strings.Split(strings.TrimSpace(line), ";") - if len(parts) == 1 && parts[0] == "" { - continue - } - parts[0] = strings.TrimSpace(parts[0]) - j := strings.Index(parts[0], "=") - if j < 0 { - continue - } - name, value := parts[0][:j], parts[0][j+1:] - if !isCookieNameValid(name) { - continue - } - value, success := parseCookieValue(value) - if !success { - continue - } - c := &Cookie{ - Name: name, - Value: value, - Raw: line, - } - for i := 1; i < len(parts); i++ { - parts[i] = strings.TrimSpace(parts[i]) - if len(parts[i]) == 0 { - continue - } - - attr, val := parts[i], "" - if j := strings.Index(attr, "="); j >= 0 { - attr, val = attr[:j], attr[j+1:] - } - lowerAttr := strings.ToLower(attr) - parseCookieValueFn := parseCookieValue - if lowerAttr == "expires" { - parseCookieValueFn = parseCookieExpiresValue - } - val, success = parseCookieValueFn(val) - if !success { - c.Unparsed = append(c.Unparsed, parts[i]) - continue - } - switch lowerAttr { - case "secure": - c.Secure = true - continue - case "httponly": - c.HttpOnly = true - continue - case "domain": - c.Domain = val - // TODO: Add domain parsing - continue - case "max-age": - secs, err := strconv.Atoi(val) - if err != nil || secs < 0 || secs != 0 && val[0] == '0' { - break - } - if secs <= 0 { - c.MaxAge = -1 - } else { - c.MaxAge = secs - } - continue - case "expires": - c.RawExpires = val - exptime, err := time.Parse(time.RFC1123, val) - if err != nil { - exptime, err = time.Parse("Mon, 02-Jan-2006 15:04:05 MST", val) - if err != nil { - c.Expires = time.Time{} - break - } - } - c.Expires = *exptime - continue - case "path": - c.Path = val - // TODO: Add path parsing - continue - } - c.Unparsed = append(c.Unparsed, parts[i]) - } - cookies = append(cookies, c) - } - return cookies -} - -// SetCookie adds a Set-Cookie header to the provided ResponseWriter's headers. -func SetCookie(w ResponseWriter, cookie *Cookie) { - w.Header().Add("Set-Cookie", cookie.String()) -} - -// String returns the serialization of the cookie for use in a Cookie -// header (if only Name and Value are set) or a Set-Cookie response -// header (if other fields are set). -func (c *Cookie) String() string { - var b bytes.Buffer - fmt.Fprintf(&b, "%s=%s", sanitizeName(c.Name), sanitizeValue(c.Value)) - if len(c.Path) > 0 { - fmt.Fprintf(&b, "; Path=%s", sanitizeValue(c.Path)) - } - if len(c.Domain) > 0 { - fmt.Fprintf(&b, "; Domain=%s", sanitizeValue(c.Domain)) - } - if len(c.Expires.Zone) > 0 { - fmt.Fprintf(&b, "; Expires=%s", c.Expires.Format(time.RFC1123)) - } - if c.MaxAge > 0 { - fmt.Fprintf(&b, "; Max-Age=%d", c.MaxAge) - } else if c.MaxAge < 0 { - fmt.Fprintf(&b, "; Max-Age=0") - } - if c.HttpOnly { - fmt.Fprintf(&b, "; HttpOnly") - } - if c.Secure { - fmt.Fprintf(&b, "; Secure") - } - return b.String() -} - -// readCookies parses all "Cookie" values from the header h and -// returns the successfully parsed Cookies. -// -// if filter isn't empty, only cookies of that name are returned -func readCookies(h Header, filter string) []*Cookie { - cookies := []*Cookie{} - lines, ok := h["Cookie"] - if !ok { - return cookies - } - - for _, line := range lines { - parts := strings.Split(strings.TrimSpace(line), ";") - if len(parts) == 1 && parts[0] == "" { - continue - } - // Per-line attributes - parsedPairs := 0 - for i := 0; i < len(parts); i++ { - parts[i] = strings.TrimSpace(parts[i]) - if len(parts[i]) == 0 { - continue - } - name, val := parts[i], "" - if j := strings.Index(name, "="); j >= 0 { - name, val = name[:j], name[j+1:] - } - if !isCookieNameValid(name) { - continue - } - if filter != "" && filter != name { - continue - } - val, success := parseCookieValue(val) - if !success { - continue - } - cookies = append(cookies, &Cookie{Name: name, Value: val}) - parsedPairs++ - } - } - return cookies -} - -func sanitizeName(n string) string { - n = strings.Replace(n, "\n", "-", -1) - n = strings.Replace(n, "\r", "-", -1) - return n -} - -func sanitizeValue(v string) string { - v = strings.Replace(v, "\n", " ", -1) - v = strings.Replace(v, "\r", " ", -1) - v = strings.Replace(v, ";", " ", -1) - return v -} - -func unquoteCookieValue(v string) string { - if len(v) > 1 && v[0] == '"' && v[len(v)-1] == '"' { - return v[1 : len(v)-1] - } - return v -} - -func isCookieByte(c byte) bool { - switch { - case c == 0x21, 0x23 <= c && c <= 0x2b, 0x2d <= c && c <= 0x3a, - 0x3c <= c && c <= 0x5b, 0x5d <= c && c <= 0x7e: - return true - } - return false -} - -func isCookieExpiresByte(c byte) (ok bool) { - return isCookieByte(c) || c == ',' || c == ' ' -} - -func parseCookieValue(raw string) (string, bool) { - return parseCookieValueUsing(raw, isCookieByte) -} - -func parseCookieExpiresValue(raw string) (string, bool) { - return parseCookieValueUsing(raw, isCookieExpiresByte) -} - -func parseCookieValueUsing(raw string, validByte func(byte) bool) (string, bool) { - raw = unquoteCookieValue(raw) - for i := 0; i < len(raw); i++ { - if !validByte(raw[i]) { - return "", false - } - } - return raw, true -} - -func isCookieNameValid(raw string) bool { - for _, c := range raw { - if !isToken(byte(c)) { - return false - } - } - return true -} diff --git a/src/pkg/http/cookie_test.go b/src/pkg/http/cookie_test.go deleted file mode 100644 index d7aeda0be..000000000 --- a/src/pkg/http/cookie_test.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -import ( - "fmt" - "json" - "os" - "reflect" - "testing" - "time" -) - -var writeSetCookiesTests = []struct { - Cookie *Cookie - Raw string -}{ - { - &Cookie{Name: "cookie-1", Value: "v$1"}, - "cookie-1=v$1", - }, - { - &Cookie{Name: "cookie-2", Value: "two", MaxAge: 3600}, - "cookie-2=two; Max-Age=3600", - }, - { - &Cookie{Name: "cookie-3", Value: "three", Domain: ".example.com"}, - "cookie-3=three; Domain=.example.com", - }, - { - &Cookie{Name: "cookie-4", Value: "four", Path: "/restricted/"}, - "cookie-4=four; Path=/restricted/", - }, -} - -func TestWriteSetCookies(t *testing.T) { - for i, tt := range writeSetCookiesTests { - if g, e := tt.Cookie.String(), tt.Raw; g != e { - t.Errorf("Test %d, expecting:\n%s\nGot:\n%s\n", i, e, g) - continue - } - } -} - -type headerOnlyResponseWriter Header - -func (ho headerOnlyResponseWriter) Header() Header { - return Header(ho) -} - -func (ho headerOnlyResponseWriter) Write([]byte) (int, os.Error) { - panic("NOIMPL") -} - -func (ho headerOnlyResponseWriter) WriteHeader(int) { - panic("NOIMPL") -} - -func TestSetCookie(t *testing.T) { - m := make(Header) - SetCookie(headerOnlyResponseWriter(m), &Cookie{Name: "cookie-1", Value: "one", Path: "/restricted/"}) - SetCookie(headerOnlyResponseWriter(m), &Cookie{Name: "cookie-2", Value: "two", MaxAge: 3600}) - if l := len(m["Set-Cookie"]); l != 2 { - t.Fatalf("expected %d cookies, got %d", 2, l) - } - if g, e := m["Set-Cookie"][0], "cookie-1=one; Path=/restricted/"; g != e { - t.Errorf("cookie #1: want %q, got %q", e, g) - } - if g, e := m["Set-Cookie"][1], "cookie-2=two; Max-Age=3600"; g != e { - t.Errorf("cookie #2: want %q, got %q", e, g) - } -} - -var addCookieTests = []struct { - Cookies []*Cookie - Raw string -}{ - { - []*Cookie{}, - "", - }, - { - []*Cookie{&Cookie{Name: "cookie-1", Value: "v$1"}}, - "cookie-1=v$1", - }, - { - []*Cookie{ - &Cookie{Name: "cookie-1", Value: "v$1"}, - &Cookie{Name: "cookie-2", Value: "v$2"}, - &Cookie{Name: "cookie-3", Value: "v$3"}, - }, - "cookie-1=v$1; cookie-2=v$2; cookie-3=v$3", - }, -} - -func TestAddCookie(t *testing.T) { - for i, tt := range addCookieTests { - req, _ := NewRequest("GET", "http://example.com/", nil) - for _, c := range tt.Cookies { - req.AddCookie(c) - } - if g := req.Header.Get("Cookie"); g != tt.Raw { - t.Errorf("Test %d:\nwant: %s\n got: %s\n", i, tt.Raw, g) - continue - } - } -} - -var readSetCookiesTests = []struct { - Header Header - Cookies []*Cookie -}{ - { - Header{"Set-Cookie": {"Cookie-1=v$1"}}, - []*Cookie{&Cookie{Name: "Cookie-1", Value: "v$1", Raw: "Cookie-1=v$1"}}, - }, - { - Header{"Set-Cookie": {"NID=99=YsDT5i3E-CXax-; expires=Wed, 23-Nov-2011 01:05:03 GMT; path=/; domain=.google.ch; HttpOnly"}}, - []*Cookie{&Cookie{ - Name: "NID", - Value: "99=YsDT5i3E-CXax-", - Path: "/", - Domain: ".google.ch", - HttpOnly: true, - Expires: time.Time{Year: 2011, Month: 11, Day: 23, Hour: 1, Minute: 5, Second: 3, Weekday: 3, ZoneOffset: 0, Zone: "GMT"}, - RawExpires: "Wed, 23-Nov-2011 01:05:03 GMT", - Raw: "NID=99=YsDT5i3E-CXax-; expires=Wed, 23-Nov-2011 01:05:03 GMT; path=/; domain=.google.ch; HttpOnly", - }}, - }, -} - -func toJSON(v interface{}) string { - b, err := json.Marshal(v) - if err != nil { - return fmt.Sprintf("%#v", v) - } - return string(b) -} - -func TestReadSetCookies(t *testing.T) { - for i, tt := range readSetCookiesTests { - for n := 0; n < 2; n++ { // to verify readSetCookies doesn't mutate its input - c := readSetCookies(tt.Header) - if !reflect.DeepEqual(c, tt.Cookies) { - t.Errorf("#%d readSetCookies: have\n%s\nwant\n%s\n", i, toJSON(c), toJSON(tt.Cookies)) - continue - } - } - } -} - -var readCookiesTests = []struct { - Header Header - Filter string - Cookies []*Cookie -}{ - { - Header{"Cookie": {"Cookie-1=v$1", "c2=v2"}}, - "", - []*Cookie{ - &Cookie{Name: "Cookie-1", Value: "v$1"}, - &Cookie{Name: "c2", Value: "v2"}, - }, - }, - { - Header{"Cookie": {"Cookie-1=v$1", "c2=v2"}}, - "c2", - []*Cookie{ - &Cookie{Name: "c2", Value: "v2"}, - }, - }, - { - Header{"Cookie": {"Cookie-1=v$1; c2=v2"}}, - "", - []*Cookie{ - &Cookie{Name: "Cookie-1", Value: "v$1"}, - &Cookie{Name: "c2", Value: "v2"}, - }, - }, - { - Header{"Cookie": {"Cookie-1=v$1; c2=v2"}}, - "c2", - []*Cookie{ - &Cookie{Name: "c2", Value: "v2"}, - }, - }, -} - -func TestReadCookies(t *testing.T) { - for i, tt := range readCookiesTests { - for n := 0; n < 2; n++ { // to verify readCookies doesn't mutate its input - c := readCookies(tt.Header, tt.Filter) - if !reflect.DeepEqual(c, tt.Cookies) { - t.Errorf("#%d readCookies:\nhave: %s\nwant: %s\n", i, toJSON(c), toJSON(tt.Cookies)) - continue - } - } - } -} diff --git a/src/pkg/http/dump.go b/src/pkg/http/dump.go deleted file mode 100644 index 358980f7c..000000000 --- a/src/pkg/http/dump.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -import ( - "bytes" - "io" - "io/ioutil" - "os" -) - -// One of the copies, say from b to r2, could be avoided by using a more -// elaborate trick where the other copy is made during Request/Response.Write. -// This would complicate things too much, given that these functions are for -// debugging only. -func drainBody(b io.ReadCloser) (r1, r2 io.ReadCloser, err os.Error) { - var buf bytes.Buffer - if _, err = buf.ReadFrom(b); err != nil { - return nil, nil, err - } - if err = b.Close(); err != nil { - return nil, nil, err - } - return ioutil.NopCloser(&buf), ioutil.NopCloser(bytes.NewBuffer(buf.Bytes())), nil -} - -// DumpRequest returns the wire representation of req, -// optionally including the request body, for debugging. -// DumpRequest is semantically a no-op, but in order to -// dump the body, it reads the body data into memory and -// changes req.Body to refer to the in-memory copy. -// The documentation for Request.Write details which fields -// of req are used. -func DumpRequest(req *Request, body bool) (dump []byte, err os.Error) { - var b bytes.Buffer - save := req.Body - if !body || req.Body == nil { - req.Body = nil - } else { - save, req.Body, err = drainBody(req.Body) - if err != nil { - return - } - } - err = req.Write(&b) - req.Body = save - if err != nil { - return - } - dump = b.Bytes() - return -} - -// DumpResponse is like DumpRequest but dumps a response. -func DumpResponse(resp *Response, body bool) (dump []byte, err os.Error) { - var b bytes.Buffer - save := resp.Body - savecl := resp.ContentLength - if !body || resp.Body == nil { - resp.Body = nil - resp.ContentLength = 0 - } else { - save, resp.Body, err = drainBody(resp.Body) - if err != nil { - return - } - } - err = resp.Write(&b) - resp.Body = save - resp.ContentLength = savecl - if err != nil { - return - } - dump = b.Bytes() - return -} diff --git a/src/pkg/http/export_test.go b/src/pkg/http/export_test.go deleted file mode 100644 index 3fe658641..000000000 --- a/src/pkg/http/export_test.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Bridge package to expose http internals to tests in the http_test -// package. - -package http - -func (t *Transport) IdleConnKeysForTesting() (keys []string) { - keys = make([]string, 0) - t.lk.Lock() - defer t.lk.Unlock() - if t.idleConn == nil { - return - } - for key := range t.idleConn { - keys = append(keys, key) - } - return -} - -func (t *Transport) IdleConnCountForTesting(cacheKey string) int { - t.lk.Lock() - defer t.lk.Unlock() - if t.idleConn == nil { - return 0 - } - conns, ok := t.idleConn[cacheKey] - if !ok { - return 0 - } - return len(conns) -} - -func NewTestTimeoutHandler(handler Handler, ch <-chan int64) Handler { - f := func() <-chan int64 { - return ch - } - return &timeoutHandler{handler, f, ""} -} diff --git a/src/pkg/http/fcgi/Makefile b/src/pkg/http/fcgi/Makefile deleted file mode 100644 index bc01cdea9..000000000 --- a/src/pkg/http/fcgi/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -# Copyright 2011 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -include ../../../Make.inc - -TARG=http/fcgi -GOFILES=\ - child.go\ - fcgi.go\ - -include ../../../Make.pkg diff --git a/src/pkg/http/fcgi/child.go b/src/pkg/http/fcgi/child.go deleted file mode 100644 index 19718824c..000000000 --- a/src/pkg/http/fcgi/child.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package fcgi - -// This file implements FastCGI from the perspective of a child process. - -import ( - "fmt" - "http" - "http/cgi" - "io" - "net" - "os" - "time" -) - -// request holds the state for an in-progress request. As soon as it's complete, -// it's converted to an http.Request. -type request struct { - pw *io.PipeWriter - reqId uint16 - params map[string]string - buf [1024]byte - rawParams []byte - keepConn bool -} - -func newRequest(reqId uint16, flags uint8) *request { - r := &request{ - reqId: reqId, - params: map[string]string{}, - keepConn: flags&flagKeepConn != 0, - } - r.rawParams = r.buf[:0] - return r -} - -// parseParams reads an encoded []byte into Params. -func (r *request) parseParams() { - text := r.rawParams - r.rawParams = nil - for len(text) > 0 { - keyLen, n := readSize(text) - if n == 0 { - return - } - text = text[n:] - valLen, n := readSize(text) - if n == 0 { - return - } - text = text[n:] - key := readString(text, keyLen) - text = text[keyLen:] - val := readString(text, valLen) - text = text[valLen:] - r.params[key] = val - } -} - -// response implements http.ResponseWriter. -type response struct { - req *request - header http.Header - w *bufWriter - wroteHeader bool -} - -func newResponse(c *child, req *request) *response { - return &response{ - req: req, - header: http.Header{}, - w: newWriter(c.conn, typeStdout, req.reqId), - } -} - -func (r *response) Header() http.Header { - return r.header -} - -func (r *response) Write(data []byte) (int, os.Error) { - if !r.wroteHeader { - r.WriteHeader(http.StatusOK) - } - return r.w.Write(data) -} - -func (r *response) WriteHeader(code int) { - if r.wroteHeader { - return - } - r.wroteHeader = true - if code == http.StatusNotModified { - // Must not have body. - r.header.Del("Content-Type") - r.header.Del("Content-Length") - r.header.Del("Transfer-Encoding") - } else if r.header.Get("Content-Type") == "" { - r.header.Set("Content-Type", "text/html; charset=utf-8") - } - - if r.header.Get("Date") == "" { - r.header.Set("Date", time.UTC().Format(http.TimeFormat)) - } - - fmt.Fprintf(r.w, "Status: %d %s\r\n", code, http.StatusText(code)) - r.header.Write(r.w) - r.w.WriteString("\r\n") -} - -func (r *response) Flush() { - if !r.wroteHeader { - r.WriteHeader(http.StatusOK) - } - r.w.Flush() -} - -func (r *response) Close() os.Error { - r.Flush() - return r.w.Close() -} - -type child struct { - conn *conn - handler http.Handler -} - -func newChild(rwc net.Conn, handler http.Handler) *child { - return &child{newConn(rwc), handler} -} - -func (c *child) serve() { - requests := map[uint16]*request{} - defer c.conn.Close() - var rec record - var br beginRequest - for { - if err := rec.read(c.conn.rwc); err != nil { - return - } - - req, ok := requests[rec.h.Id] - if !ok && rec.h.Type != typeBeginRequest && rec.h.Type != typeGetValues { - // The spec says to ignore unknown request IDs. - continue - } - if ok && rec.h.Type == typeBeginRequest { - // The server is trying to begin a request with the same ID - // as an in-progress request. This is an error. - return - } - - switch rec.h.Type { - case typeBeginRequest: - if err := br.read(rec.content()); err != nil { - return - } - if br.role != roleResponder { - c.conn.writeEndRequest(rec.h.Id, 0, statusUnknownRole) - break - } - requests[rec.h.Id] = newRequest(rec.h.Id, br.flags) - case typeParams: - // NOTE(eds): Technically a key-value pair can straddle the boundary - // between two packets. We buffer until we've received all parameters. - if len(rec.content()) > 0 { - req.rawParams = append(req.rawParams, rec.content()...) - break - } - req.parseParams() - case typeStdin: - content := rec.content() - if req.pw == nil { - var body io.ReadCloser - if len(content) > 0 { - // body could be an io.LimitReader, but it shouldn't matter - // as long as both sides are behaving. - body, req.pw = io.Pipe() - } - go c.serveRequest(req, body) - } - if len(content) > 0 { - // TODO(eds): This blocks until the handler reads from the pipe. - // If the handler takes a long time, it might be a problem. - req.pw.Write(content) - } else if req.pw != nil { - req.pw.Close() - } - case typeGetValues: - values := map[string]string{"FCGI_MPXS_CONNS": "1"} - c.conn.writePairs(0, typeGetValuesResult, values) - case typeData: - // If the filter role is implemented, read the data stream here. - case typeAbortRequest: - requests[rec.h.Id] = nil, false - c.conn.writeEndRequest(rec.h.Id, 0, statusRequestComplete) - if !req.keepConn { - // connection will close upon return - return - } - default: - b := make([]byte, 8) - b[0] = rec.h.Type - c.conn.writeRecord(typeUnknownType, 0, b) - } - } -} - -func (c *child) serveRequest(req *request, body io.ReadCloser) { - r := newResponse(c, req) - httpReq, err := cgi.RequestFromMap(req.params) - if err != nil { - // there was an error reading the request - r.WriteHeader(http.StatusInternalServerError) - c.conn.writeRecord(typeStderr, req.reqId, []byte(err.String())) - } else { - httpReq.Body = body - c.handler.ServeHTTP(r, httpReq) - } - if body != nil { - body.Close() - } - r.Close() - c.conn.writeEndRequest(req.reqId, 0, statusRequestComplete) - if !req.keepConn { - c.conn.Close() - } -} - -// Serve accepts incoming FastCGI connections on the listener l, creating a new -// service thread for each. The service threads read requests and then call handler -// to reply to them. -// If l is nil, Serve accepts connections on stdin. -// If handler is nil, http.DefaultServeMux is used. -func Serve(l net.Listener, handler http.Handler) os.Error { - if l == nil { - var err os.Error - l, err = net.FileListener(os.Stdin) - if err != nil { - return err - } - defer l.Close() - } - if handler == nil { - handler = http.DefaultServeMux - } - for { - rw, err := l.Accept() - if err != nil { - return err - } - c := newChild(rw, handler) - go c.serve() - } - panic("unreachable") -} diff --git a/src/pkg/http/fcgi/fcgi.go b/src/pkg/http/fcgi/fcgi.go deleted file mode 100644 index 8e2e1cd3c..000000000 --- a/src/pkg/http/fcgi/fcgi.go +++ /dev/null @@ -1,271 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package fcgi implements the FastCGI protocol. -// Currently only the responder role is supported. -// The protocol is defined at http://www.fastcgi.com/drupal/node/6?q=node/22 -package fcgi - -// This file defines the raw protocol and some utilities used by the child and -// the host. - -import ( - "bufio" - "bytes" - "encoding/binary" - "io" - "os" - "sync" -) - -const ( - // Packet Types - typeBeginRequest = iota + 1 - typeAbortRequest - typeEndRequest - typeParams - typeStdin - typeStdout - typeStderr - typeData - typeGetValues - typeGetValuesResult - typeUnknownType -) - -// keep the connection between web-server and responder open after request -const flagKeepConn = 1 - -const ( - maxWrite = 65535 // maximum record body - maxPad = 255 -) - -const ( - roleResponder = iota + 1 // only Responders are implemented. - roleAuthorizer - roleFilter -) - -const ( - statusRequestComplete = iota - statusCantMultiplex - statusOverloaded - statusUnknownRole -) - -const headerLen = 8 - -type header struct { - Version uint8 - Type uint8 - Id uint16 - ContentLength uint16 - PaddingLength uint8 - Reserved uint8 -} - -type beginRequest struct { - role uint16 - flags uint8 - reserved [5]uint8 -} - -func (br *beginRequest) read(content []byte) os.Error { - if len(content) != 8 { - return os.NewError("fcgi: invalid begin request record") - } - br.role = binary.BigEndian.Uint16(content) - br.flags = content[2] - return nil -} - -// for padding so we don't have to allocate all the time -// not synchronized because we don't care what the contents are -var pad [maxPad]byte - -func (h *header) init(recType uint8, reqId uint16, contentLength int) { - h.Version = 1 - h.Type = recType - h.Id = reqId - h.ContentLength = uint16(contentLength) - h.PaddingLength = uint8(-contentLength & 7) -} - -// conn sends records over rwc -type conn struct { - mutex sync.Mutex - rwc io.ReadWriteCloser - - // to avoid allocations - buf bytes.Buffer - h header -} - -func newConn(rwc io.ReadWriteCloser) *conn { - return &conn{rwc: rwc} -} - -func (c *conn) Close() os.Error { - c.mutex.Lock() - defer c.mutex.Unlock() - return c.rwc.Close() -} - -type record struct { - h header - buf [maxWrite + maxPad]byte -} - -func (rec *record) read(r io.Reader) (err os.Error) { - if err = binary.Read(r, binary.BigEndian, &rec.h); err != nil { - return err - } - if rec.h.Version != 1 { - return os.NewError("fcgi: invalid header version") - } - n := int(rec.h.ContentLength) + int(rec.h.PaddingLength) - if _, err = io.ReadFull(r, rec.buf[:n]); err != nil { - return err - } - return nil -} - -func (r *record) content() []byte { - return r.buf[:r.h.ContentLength] -} - -// writeRecord writes and sends a single record. -func (c *conn) writeRecord(recType uint8, reqId uint16, b []byte) os.Error { - c.mutex.Lock() - defer c.mutex.Unlock() - c.buf.Reset() - c.h.init(recType, reqId, len(b)) - if err := binary.Write(&c.buf, binary.BigEndian, c.h); err != nil { - return err - } - if _, err := c.buf.Write(b); err != nil { - return err - } - if _, err := c.buf.Write(pad[:c.h.PaddingLength]); err != nil { - return err - } - _, err := c.rwc.Write(c.buf.Bytes()) - return err -} - -func (c *conn) writeBeginRequest(reqId uint16, role uint16, flags uint8) os.Error { - b := [8]byte{byte(role >> 8), byte(role), flags} - return c.writeRecord(typeBeginRequest, reqId, b[:]) -} - -func (c *conn) writeEndRequest(reqId uint16, appStatus int, protocolStatus uint8) os.Error { - b := make([]byte, 8) - binary.BigEndian.PutUint32(b, uint32(appStatus)) - b[4] = protocolStatus - return c.writeRecord(typeEndRequest, reqId, b) -} - -func (c *conn) writePairs(recType uint8, reqId uint16, pairs map[string]string) os.Error { - w := newWriter(c, recType, reqId) - b := make([]byte, 8) - for k, v := range pairs { - n := encodeSize(b, uint32(len(k))) - n += encodeSize(b[n:], uint32(len(k))) - if _, err := w.Write(b[:n]); err != nil { - return err - } - if _, err := w.WriteString(k); err != nil { - return err - } - if _, err := w.WriteString(v); err != nil { - return err - } - } - w.Close() - return nil -} - -func readSize(s []byte) (uint32, int) { - if len(s) == 0 { - return 0, 0 - } - size, n := uint32(s[0]), 1 - if size&(1<<7) != 0 { - if len(s) < 4 { - return 0, 0 - } - n = 4 - size = binary.BigEndian.Uint32(s) - size &^= 1 << 31 - } - return size, n -} - -func readString(s []byte, size uint32) string { - if size > uint32(len(s)) { - return "" - } - return string(s[:size]) -} - -func encodeSize(b []byte, size uint32) int { - if size > 127 { - size |= 1 << 31 - binary.BigEndian.PutUint32(b, size) - return 4 - } - b[0] = byte(size) - return 1 -} - -// bufWriter encapsulates bufio.Writer but also closes the underlying stream when -// Closed. -type bufWriter struct { - closer io.Closer - *bufio.Writer -} - -func (w *bufWriter) Close() os.Error { - if err := w.Writer.Flush(); err != nil { - w.closer.Close() - return err - } - return w.closer.Close() -} - -func newWriter(c *conn, recType uint8, reqId uint16) *bufWriter { - s := &streamWriter{c: c, recType: recType, reqId: reqId} - w, _ := bufio.NewWriterSize(s, maxWrite) - return &bufWriter{s, w} -} - -// streamWriter abstracts out the separation of a stream into discrete records. -// It only writes maxWrite bytes at a time. -type streamWriter struct { - c *conn - recType uint8 - reqId uint16 -} - -func (w *streamWriter) Write(p []byte) (int, os.Error) { - nn := 0 - for len(p) > 0 { - n := len(p) - if n > maxWrite { - n = maxWrite - } - if err := w.c.writeRecord(w.recType, w.reqId, p[:n]); err != nil { - return nn, err - } - nn += n - p = p[n:] - } - return nn, nil -} - -func (w *streamWriter) Close() os.Error { - // send empty record to close the stream - return w.c.writeRecord(w.recType, w.reqId, nil) -} diff --git a/src/pkg/http/fcgi/fcgi_test.go b/src/pkg/http/fcgi/fcgi_test.go deleted file mode 100644 index 16a624329..000000000 --- a/src/pkg/http/fcgi/fcgi_test.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package fcgi - -import ( - "bytes" - "io" - "os" - "testing" -) - -var sizeTests = []struct { - size uint32 - bytes []byte -}{ - {0, []byte{0x00}}, - {127, []byte{0x7F}}, - {128, []byte{0x80, 0x00, 0x00, 0x80}}, - {1000, []byte{0x80, 0x00, 0x03, 0xE8}}, - {33554431, []byte{0x81, 0xFF, 0xFF, 0xFF}}, -} - -func TestSize(t *testing.T) { - b := make([]byte, 4) - for i, test := range sizeTests { - n := encodeSize(b, test.size) - if !bytes.Equal(b[:n], test.bytes) { - t.Errorf("%d expected %x, encoded %x", i, test.bytes, b) - } - size, n := readSize(test.bytes) - if size != test.size { - t.Errorf("%d expected %d, read %d", i, test.size, size) - } - if len(test.bytes) != n { - t.Errorf("%d did not consume all the bytes", i) - } - } -} - -var streamTests = []struct { - desc string - recType uint8 - reqId uint16 - content []byte - raw []byte -}{ - {"single record", typeStdout, 1, nil, - []byte{1, typeStdout, 0, 1, 0, 0, 0, 0}, - }, - // this data will have to be split into two records - {"two records", typeStdin, 300, make([]byte, 66000), - bytes.Join([][]byte{ - // header for the first record - []byte{1, typeStdin, 0x01, 0x2C, 0xFF, 0xFF, 1, 0}, - make([]byte, 65536), - // header for the second - []byte{1, typeStdin, 0x01, 0x2C, 0x01, 0xD1, 7, 0}, - make([]byte, 472), - // header for the empty record - []byte{1, typeStdin, 0x01, 0x2C, 0, 0, 0, 0}, - }, - nil), - }, -} - -type nilCloser struct { - io.ReadWriter -} - -func (c *nilCloser) Close() os.Error { return nil } - -func TestStreams(t *testing.T) { - var rec record -outer: - for _, test := range streamTests { - buf := bytes.NewBuffer(test.raw) - var content []byte - for buf.Len() > 0 { - if err := rec.read(buf); err != nil { - t.Errorf("%s: error reading record: %v", test.desc, err) - continue outer - } - content = append(content, rec.content()...) - } - if rec.h.Type != test.recType { - t.Errorf("%s: got type %d expected %d", test.desc, rec.h.Type, test.recType) - continue - } - if rec.h.Id != test.reqId { - t.Errorf("%s: got request ID %d expected %d", test.desc, rec.h.Id, test.reqId) - continue - } - if !bytes.Equal(content, test.content) { - t.Errorf("%s: read wrong content", test.desc) - continue - } - buf.Reset() - c := newConn(&nilCloser{buf}) - w := newWriter(c, test.recType, test.reqId) - if _, err := w.Write(test.content); err != nil { - t.Errorf("%s: error writing record: %v", test.desc, err) - continue - } - if err := w.Close(); err != nil { - t.Errorf("%s: error closing stream: %v", test.desc, err) - continue - } - if !bytes.Equal(buf.Bytes(), test.raw) { - t.Errorf("%s: wrote wrong content", test.desc) - } - } -} diff --git a/src/pkg/http/fs.go b/src/pkg/http/fs.go deleted file mode 100644 index 2c7c636fd..000000000 --- a/src/pkg/http/fs.go +++ /dev/null @@ -1,323 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// HTTP file system request handler - -package http - -import ( - "fmt" - "io" - "mime" - "os" - "path" - "path/filepath" - "strconv" - "strings" - "time" - "utf8" -) - -// A Dir implements http.FileSystem using the native file -// system restricted to a specific directory tree. -type Dir string - -func (d Dir) Open(name string) (File, os.Error) { - if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 { - return nil, os.NewError("http: invalid character in file path") - } - f, err := os.Open(filepath.Join(string(d), filepath.FromSlash(path.Clean("/"+name)))) - if err != nil { - return nil, err - } - return f, nil -} - -// A FileSystem implements access to a collection of named files. -// The elements in a file path are separated by slash ('/', U+002F) -// characters, regardless of host operating system convention. -type FileSystem interface { - Open(name string) (File, os.Error) -} - -// A File is returned by a FileSystem's Open method and can be -// served by the FileServer implementation. -type File interface { - Close() os.Error - Stat() (*os.FileInfo, os.Error) - Readdir(count int) ([]os.FileInfo, os.Error) - Read([]byte) (int, os.Error) - Seek(offset int64, whence int) (int64, os.Error) -} - -// Heuristic: b is text if it is valid UTF-8 and doesn't -// contain any unprintable ASCII or Unicode characters. -func isText(b []byte) bool { - for len(b) > 0 && utf8.FullRune(b) { - rune, size := utf8.DecodeRune(b) - if size == 1 && rune == utf8.RuneError { - // decoding error - return false - } - if 0x7F <= rune && rune <= 0x9F { - return false - } - if rune < ' ' { - switch rune { - case '\n', '\r', '\t': - // okay - default: - // binary garbage - return false - } - } - b = b[size:] - } - return true -} - -func dirList(w ResponseWriter, f File) { - w.Header().Set("Content-Type", "text/html; charset=utf-8") - fmt.Fprintf(w, "<pre>\n") - for { - dirs, err := f.Readdir(100) - if err != nil || len(dirs) == 0 { - break - } - for _, d := range dirs { - name := d.Name - if d.IsDirectory() { - name += "/" - } - // TODO htmlescape - fmt.Fprintf(w, "<a href=\"%s\">%s</a>\n", name, name) - } - } - fmt.Fprintf(w, "</pre>\n") -} - -// name is '/'-separated, not filepath.Separator. -func serveFile(w ResponseWriter, r *Request, fs FileSystem, name string, redirect bool) { - const indexPage = "/index.html" - - // redirect .../index.html to .../ - // can't use Redirect() because that would make the path absolute, - // which would be a problem running under StripPrefix - if strings.HasSuffix(r.URL.Path, indexPage) { - localRedirect(w, r, "./") - return - } - - f, err := fs.Open(name) - if err != nil { - // TODO expose actual error? - NotFound(w, r) - return - } - defer f.Close() - - d, err1 := f.Stat() - if err1 != nil { - // TODO expose actual error? - NotFound(w, r) - return - } - - if redirect { - // redirect to canonical path: / at end of directory url - // r.URL.Path always begins with / - url := r.URL.Path - if d.IsDirectory() { - if url[len(url)-1] != '/' { - localRedirect(w, r, path.Base(url)+"/") - return - } - } else { - if url[len(url)-1] == '/' { - localRedirect(w, r, "../"+path.Base(url)) - return - } - } - } - - if t, _ := time.Parse(TimeFormat, r.Header.Get("If-Modified-Since")); t != nil && d.Mtime_ns/1e9 <= t.Seconds() { - w.WriteHeader(StatusNotModified) - return - } - w.Header().Set("Last-Modified", time.SecondsToUTC(d.Mtime_ns/1e9).Format(TimeFormat)) - - // use contents of index.html for directory, if present - if d.IsDirectory() { - index := name + indexPage - ff, err := fs.Open(index) - if err == nil { - defer ff.Close() - dd, err := ff.Stat() - if err == nil { - name = index - d = dd - f = ff - } - } - } - - if d.IsDirectory() { - dirList(w, f) - return - } - - // serve file - size := d.Size - code := StatusOK - - // If Content-Type isn't set, use the file's extension to find it. - if w.Header().Get("Content-Type") == "" { - ctype := mime.TypeByExtension(filepath.Ext(name)) - if ctype == "" { - // read a chunk to decide between utf-8 text and binary - var buf [1024]byte - n, _ := io.ReadFull(f, buf[:]) - b := buf[:n] - if isText(b) { - ctype = "text/plain; charset=utf-8" - } else { - // generic binary - ctype = "application/octet-stream" - } - f.Seek(0, os.SEEK_SET) // rewind to output whole file - } - w.Header().Set("Content-Type", ctype) - } - - // handle Content-Range header. - // TODO(adg): handle multiple ranges - ranges, err := parseRange(r.Header.Get("Range"), size) - if err == nil && len(ranges) > 1 { - err = os.NewError("multiple ranges not supported") - } - if err != nil { - Error(w, err.String(), StatusRequestedRangeNotSatisfiable) - return - } - if len(ranges) == 1 { - ra := ranges[0] - if _, err := f.Seek(ra.start, os.SEEK_SET); err != nil { - Error(w, err.String(), StatusRequestedRangeNotSatisfiable) - return - } - size = ra.length - code = StatusPartialContent - w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", ra.start, ra.start+ra.length-1, d.Size)) - } - - w.Header().Set("Accept-Ranges", "bytes") - if w.Header().Get("Content-Encoding") == "" { - w.Header().Set("Content-Length", strconv.Itoa64(size)) - } - - w.WriteHeader(code) - - if r.Method != "HEAD" { - io.Copyn(w, f, size) - } -} - -// localRedirect gives a Moved Permanently response. -// It does not convert relative paths to absolute paths like Redirect does. -func localRedirect(w ResponseWriter, r *Request, newPath string) { - if q := r.URL.RawQuery; q != "" { - newPath += "?" + q - } - w.Header().Set("Location", newPath) - w.WriteHeader(StatusMovedPermanently) -} - -// ServeFile replies to the request with the contents of the named file or directory. -func ServeFile(w ResponseWriter, r *Request, name string) { - dir, file := filepath.Split(name) - serveFile(w, r, Dir(dir), file, false) -} - -type fileHandler struct { - root FileSystem -} - -// FileServer returns a handler that serves HTTP requests -// with the contents of the file system rooted at root. -// -// To use the operating system's file system implementation, -// use http.Dir: -// -// http.Handle("/", http.FileServer(http.Dir("/tmp"))) -func FileServer(root FileSystem) Handler { - return &fileHandler{root} -} - -func (f *fileHandler) ServeHTTP(w ResponseWriter, r *Request) { - upath := r.URL.Path - if !strings.HasPrefix(upath, "/") { - upath = "/" + upath - r.URL.Path = upath - } - serveFile(w, r, f.root, path.Clean(upath), true) -} - -// httpRange specifies the byte range to be sent to the client. -type httpRange struct { - start, length int64 -} - -// parseRange parses a Range header string as per RFC 2616. -func parseRange(s string, size int64) ([]httpRange, os.Error) { - if s == "" { - return nil, nil // header not present - } - const b = "bytes=" - if !strings.HasPrefix(s, b) { - return nil, os.NewError("invalid range") - } - var ranges []httpRange - for _, ra := range strings.Split(s[len(b):], ",") { - i := strings.Index(ra, "-") - if i < 0 { - return nil, os.NewError("invalid range") - } - start, end := ra[:i], ra[i+1:] - var r httpRange - if start == "" { - // If no start is specified, end specifies the - // range start relative to the end of the file. - i, err := strconv.Atoi64(end) - if err != nil { - return nil, os.NewError("invalid range") - } - if i > size { - i = size - } - r.start = size - i - r.length = size - r.start - } else { - i, err := strconv.Atoi64(start) - if err != nil || i > size || i < 0 { - return nil, os.NewError("invalid range") - } - r.start = i - if end == "" { - // If no end is specified, range extends to end of the file. - r.length = size - r.start - } else { - i, err := strconv.Atoi64(end) - if err != nil || r.start > i { - return nil, os.NewError("invalid range") - } - if i >= size { - i = size - 1 - } - r.length = i - r.start + 1 - } - } - ranges = append(ranges, r) - } - return ranges, nil -} diff --git a/src/pkg/http/fs_test.go b/src/pkg/http/fs_test.go deleted file mode 100644 index bb6d0158b..000000000 --- a/src/pkg/http/fs_test.go +++ /dev/null @@ -1,308 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http_test - -import ( - "fmt" - . "http" - "http/httptest" - "io/ioutil" - "os" - "path/filepath" - "strings" - "testing" - "url" -) - -const ( - testFile = "testdata/file" - testFileLength = 11 -) - -var ServeFileRangeTests = []struct { - start, end int - r string - code int -}{ - {0, testFileLength, "", StatusOK}, - {0, 5, "0-4", StatusPartialContent}, - {2, testFileLength, "2-", StatusPartialContent}, - {testFileLength - 5, testFileLength, "-5", StatusPartialContent}, - {3, 8, "3-7", StatusPartialContent}, - {0, 0, "20-", StatusRequestedRangeNotSatisfiable}, -} - -func TestServeFile(t *testing.T) { - ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { - ServeFile(w, r, "testdata/file") - })) - defer ts.Close() - - var err os.Error - - file, err := ioutil.ReadFile(testFile) - if err != nil { - t.Fatal("reading file:", err) - } - - // set up the Request (re-used for all tests) - var req Request - req.Header = make(Header) - if req.URL, err = url.Parse(ts.URL); err != nil { - t.Fatal("ParseURL:", err) - } - req.Method = "GET" - - // straight GET - _, body := getBody(t, req) - if !equal(body, file) { - t.Fatalf("body mismatch: got %q, want %q", body, file) - } - - // Range tests - for _, rt := range ServeFileRangeTests { - req.Header.Set("Range", "bytes="+rt.r) - if rt.r == "" { - req.Header["Range"] = nil - } - r, body := getBody(t, req) - if r.StatusCode != rt.code { - t.Errorf("range=%q: StatusCode=%d, want %d", rt.r, r.StatusCode, rt.code) - } - if rt.code == StatusRequestedRangeNotSatisfiable { - continue - } - h := fmt.Sprintf("bytes %d-%d/%d", rt.start, rt.end-1, testFileLength) - if rt.r == "" { - h = "" - } - cr := r.Header.Get("Content-Range") - if cr != h { - t.Errorf("header mismatch: range=%q: got %q, want %q", rt.r, cr, h) - } - if !equal(body, file[rt.start:rt.end]) { - t.Errorf("body mismatch: range=%q: got %q, want %q", rt.r, body, file[rt.start:rt.end]) - } - } -} - -var fsRedirectTestData = []struct { - original, redirect string -}{ - {"/test/index.html", "/test/"}, - {"/test/testdata", "/test/testdata/"}, - {"/test/testdata/file/", "/test/testdata/file"}, -} - -func TestFSRedirect(t *testing.T) { - ts := httptest.NewServer(StripPrefix("/test", FileServer(Dir(".")))) - defer ts.Close() - - for _, data := range fsRedirectTestData { - res, err := Get(ts.URL + data.original) - if err != nil { - t.Fatal(err) - } - res.Body.Close() - if g, e := res.Request.URL.Path, data.redirect; g != e { - t.Errorf("redirect from %s: got %s, want %s", data.original, g, e) - } - } -} - -type testFileSystem struct { - open func(name string) (File, os.Error) -} - -func (fs *testFileSystem) Open(name string) (File, os.Error) { - return fs.open(name) -} - -func TestFileServerCleans(t *testing.T) { - ch := make(chan string, 1) - fs := FileServer(&testFileSystem{func(name string) (File, os.Error) { - ch <- name - return nil, os.ENOENT - }}) - tests := []struct { - reqPath, openArg string - }{ - {"/foo.txt", "/foo.txt"}, - {"//foo.txt", "/foo.txt"}, - {"/../foo.txt", "/foo.txt"}, - } - req, _ := NewRequest("GET", "http://example.com", nil) - for n, test := range tests { - rec := httptest.NewRecorder() - req.URL.Path = test.reqPath - fs.ServeHTTP(rec, req) - if got := <-ch; got != test.openArg { - t.Errorf("test %d: got %q, want %q", n, got, test.openArg) - } - } -} - -func TestFileServerImplicitLeadingSlash(t *testing.T) { - tempDir, err := ioutil.TempDir("", "") - if err != nil { - t.Fatalf("TempDir: %v", err) - } - defer os.RemoveAll(tempDir) - if err := ioutil.WriteFile(filepath.Join(tempDir, "foo.txt"), []byte("Hello world"), 0644); err != nil { - t.Fatalf("WriteFile: %v", err) - } - ts := httptest.NewServer(StripPrefix("/bar/", FileServer(Dir(tempDir)))) - defer ts.Close() - get := func(suffix string) string { - res, err := Get(ts.URL + suffix) - if err != nil { - t.Fatalf("Get %s: %v", suffix, err) - } - b, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Fatalf("ReadAll %s: %v", suffix, err) - } - return string(b) - } - if s := get("/bar/"); !strings.Contains(s, ">foo.txt<") { - t.Logf("expected a directory listing with foo.txt, got %q", s) - } - if s := get("/bar/foo.txt"); s != "Hello world" { - t.Logf("expected %q, got %q", "Hello world", s) - } -} - -func TestDirJoin(t *testing.T) { - wfi, err := os.Stat("/etc/hosts") - if err != nil { - t.Logf("skipping test; no /etc/hosts file") - return - } - test := func(d Dir, name string) { - f, err := d.Open(name) - if err != nil { - t.Fatalf("open of %s: %v", name, err) - } - defer f.Close() - gfi, err := f.Stat() - if err != nil { - t.Fatalf("stat of %s: %v", name, err) - } - if gfi.Ino != wfi.Ino { - t.Errorf("%s got different inode", name) - } - } - test(Dir("/etc/"), "/hosts") - test(Dir("/etc/"), "hosts") - test(Dir("/etc/"), "../../../../hosts") - test(Dir("/etc"), "/hosts") - test(Dir("/etc"), "hosts") - test(Dir("/etc"), "../../../../hosts") - - // Not really directories, but since we use this trick in - // ServeFile, test it: - test(Dir("/etc/hosts"), "") - test(Dir("/etc/hosts"), "/") - test(Dir("/etc/hosts"), "../") -} - -func TestServeFileContentType(t *testing.T) { - const ctype = "icecream/chocolate" - override := false - ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { - if override { - w.Header().Set("Content-Type", ctype) - } - ServeFile(w, r, "testdata/file") - })) - defer ts.Close() - get := func(want string) { - resp, err := Get(ts.URL) - if err != nil { - t.Fatal(err) - } - if h := resp.Header.Get("Content-Type"); h != want { - t.Errorf("Content-Type mismatch: got %q, want %q", h, want) - } - } - get("text/plain; charset=utf-8") - override = true - get(ctype) -} - -func TestServeFileMimeType(t *testing.T) { - ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { - ServeFile(w, r, "testdata/style.css") - })) - defer ts.Close() - resp, err := Get(ts.URL) - if err != nil { - t.Fatal(err) - } - want := "text/css; charset=utf-8" - if h := resp.Header.Get("Content-Type"); h != want { - t.Errorf("Content-Type mismatch: got %q, want %q", h, want) - } -} - -func TestServeFileWithContentEncoding(t *testing.T) { - ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { - w.Header().Set("Content-Encoding", "foo") - ServeFile(w, r, "testdata/file") - })) - defer ts.Close() - resp, err := Get(ts.URL) - if err != nil { - t.Fatal(err) - } - if g, e := resp.ContentLength, int64(-1); g != e { - t.Errorf("Content-Length mismatch: got %d, want %d", g, e) - } -} - -func TestServeIndexHtml(t *testing.T) { - const want = "index.html says hello\n" - ts := httptest.NewServer(FileServer(Dir("."))) - defer ts.Close() - - for _, path := range []string{"/testdata/", "/testdata/index.html"} { - res, err := Get(ts.URL + path) - if err != nil { - t.Fatal(err) - } - defer res.Body.Close() - b, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Fatal("reading Body:", err) - } - if s := string(b); s != want { - t.Errorf("for path %q got %q, want %q", path, s, want) - } - } -} - -func getBody(t *testing.T, req Request) (*Response, []byte) { - r, err := DefaultClient.Do(&req) - if err != nil { - t.Fatal(req.URL.String(), "send:", err) - } - b, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Fatal("reading Body:", err) - } - return r, b -} - -func equal(a, b []byte) bool { - if len(a) != len(b) { - return false - } - for i := range a { - if a[i] != b[i] { - return false - } - } - return true -} diff --git a/src/pkg/http/header.go b/src/pkg/http/header.go deleted file mode 100644 index 08b077130..000000000 --- a/src/pkg/http/header.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -import ( - "fmt" - "io" - "net/textproto" - "os" - "sort" - "strings" -) - -// A Header represents the key-value pairs in an HTTP header. -type Header map[string][]string - -// Add adds the key, value pair to the header. -// It appends to any existing values associated with key. -func (h Header) Add(key, value string) { - textproto.MIMEHeader(h).Add(key, value) -} - -// Set sets the header entries associated with key to -// the single element value. It replaces any existing -// values associated with key. -func (h Header) Set(key, value string) { - textproto.MIMEHeader(h).Set(key, value) -} - -// Get gets the first value associated with the given key. -// If there are no values associated with the key, Get returns "". -// Get is a convenience method. For more complex queries, -// access the map directly. -func (h Header) Get(key string) string { - return textproto.MIMEHeader(h).Get(key) -} - -// Del deletes the values associated with key. -func (h Header) Del(key string) { - textproto.MIMEHeader(h).Del(key) -} - -// Write writes a header in wire format. -func (h Header) Write(w io.Writer) os.Error { - return h.WriteSubset(w, nil) -} - -// WriteSubset writes a header in wire format. -// If exclude is not nil, keys where exclude[key] == true are not written. -func (h Header) WriteSubset(w io.Writer, exclude map[string]bool) os.Error { - keys := make([]string, 0, len(h)) - for k := range h { - if exclude == nil || !exclude[k] { - keys = append(keys, k) - } - } - sort.Strings(keys) - for _, k := range keys { - for _, v := range h[k] { - v = strings.Replace(v, "\n", " ", -1) - v = strings.Replace(v, "\r", " ", -1) - v = strings.TrimSpace(v) - if _, err := fmt.Fprintf(w, "%s: %s\r\n", k, v); err != nil { - return err - } - } - } - return nil -} - -// CanonicalHeaderKey returns the canonical format of the -// header key s. The canonicalization converts the first -// letter and any letter following a hyphen to upper case; -// the rest are converted to lowercase. For example, the -// canonical key for "accept-encoding" is "Accept-Encoding". -func CanonicalHeaderKey(s string) string { return textproto.CanonicalMIMEHeaderKey(s) } diff --git a/src/pkg/http/header_test.go b/src/pkg/http/header_test.go deleted file mode 100644 index ccdee8a97..000000000 --- a/src/pkg/http/header_test.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -import ( - "bytes" - "testing" -) - -var headerWriteTests = []struct { - h Header - exclude map[string]bool - expected string -}{ - {Header{}, nil, ""}, - { - Header{ - "Content-Type": {"text/html; charset=UTF-8"}, - "Content-Length": {"0"}, - }, - nil, - "Content-Length: 0\r\nContent-Type: text/html; charset=UTF-8\r\n", - }, - { - Header{ - "Content-Length": {"0", "1", "2"}, - }, - nil, - "Content-Length: 0\r\nContent-Length: 1\r\nContent-Length: 2\r\n", - }, - { - Header{ - "Expires": {"-1"}, - "Content-Length": {"0"}, - "Content-Encoding": {"gzip"}, - }, - map[string]bool{"Content-Length": true}, - "Content-Encoding: gzip\r\nExpires: -1\r\n", - }, - { - Header{ - "Expires": {"-1"}, - "Content-Length": {"0", "1", "2"}, - "Content-Encoding": {"gzip"}, - }, - map[string]bool{"Content-Length": true}, - "Content-Encoding: gzip\r\nExpires: -1\r\n", - }, - { - Header{ - "Expires": {"-1"}, - "Content-Length": {"0"}, - "Content-Encoding": {"gzip"}, - }, - map[string]bool{"Content-Length": true, "Expires": true, "Content-Encoding": true}, - "", - }, - { - Header{ - "Nil": nil, - "Empty": {}, - "Blank": {""}, - "Double-Blank": {"", ""}, - }, - nil, - "Blank: \r\nDouble-Blank: \r\nDouble-Blank: \r\n", - }, -} - -func TestHeaderWrite(t *testing.T) { - var buf bytes.Buffer - for i, test := range headerWriteTests { - test.h.WriteSubset(&buf, test.exclude) - if buf.String() != test.expected { - t.Errorf("#%d:\n got: %q\nwant: %q", i, buf.String(), test.expected) - } - buf.Reset() - } -} diff --git a/src/pkg/http/httptest/Makefile b/src/pkg/http/httptest/Makefile deleted file mode 100644 index eb35d8aec..000000000 --- a/src/pkg/http/httptest/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -# Copyright 2011 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -include ../../../Make.inc - -TARG=http/httptest -GOFILES=\ - recorder.go\ - server.go\ - -include ../../../Make.pkg diff --git a/src/pkg/http/httptest/recorder.go b/src/pkg/http/httptest/recorder.go deleted file mode 100644 index f2fedefcf..000000000 --- a/src/pkg/http/httptest/recorder.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package httptest provides utilities for HTTP testing. -package httptest - -import ( - "bytes" - "http" - "os" -) - -// ResponseRecorder is an implementation of http.ResponseWriter that -// records its mutations for later inspection in tests. -type ResponseRecorder struct { - Code int // the HTTP response code from WriteHeader - HeaderMap http.Header // the HTTP response headers - Body *bytes.Buffer // if non-nil, the bytes.Buffer to append written data to - Flushed bool -} - -// NewRecorder returns an initialized ResponseRecorder. -func NewRecorder() *ResponseRecorder { - return &ResponseRecorder{ - HeaderMap: make(http.Header), - Body: new(bytes.Buffer), - } -} - -// DefaultRemoteAddr is the default remote address to return in RemoteAddr if -// an explicit DefaultRemoteAddr isn't set on ResponseRecorder. -const DefaultRemoteAddr = "1.2.3.4" - -// Header returns the response headers. -func (rw *ResponseRecorder) Header() http.Header { - return rw.HeaderMap -} - -// Write always succeeds and writes to rw.Body, if not nil. -func (rw *ResponseRecorder) Write(buf []byte) (int, os.Error) { - if rw.Body != nil { - rw.Body.Write(buf) - } - if rw.Code == 0 { - rw.Code = http.StatusOK - } - return len(buf), nil -} - -// WriteHeader sets rw.Code. -func (rw *ResponseRecorder) WriteHeader(code int) { - rw.Code = code -} - -// Flush sets rw.Flushed to true. -func (rw *ResponseRecorder) Flush() { - rw.Flushed = true -} diff --git a/src/pkg/http/httptest/server.go b/src/pkg/http/httptest/server.go deleted file mode 100644 index 2ec36d04c..000000000 --- a/src/pkg/http/httptest/server.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Implementation of Server - -package httptest - -import ( - "crypto/rand" - "crypto/tls" - "flag" - "fmt" - "http" - "net" - "os" - "time" -) - -// A Server is an HTTP server listening on a system-chosen port on the -// local loopback interface, for use in end-to-end HTTP tests. -type Server struct { - URL string // base URL of form http://ipaddr:port with no trailing slash - Listener net.Listener - TLS *tls.Config // nil if not using using TLS -} - -// historyListener keeps track of all connections that it's ever -// accepted. -type historyListener struct { - net.Listener - history []net.Conn -} - -func (hs *historyListener) Accept() (c net.Conn, err os.Error) { - c, err = hs.Listener.Accept() - if err == nil { - hs.history = append(hs.history, c) - } - return -} - -func newLocalListener() net.Listener { - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - if l, err = net.Listen("tcp6", "[::1]:0"); err != nil { - panic(fmt.Sprintf("httptest: failed to listen on a port: %v", err)) - } - } - return l -} - -// When debugging a particular http server-based test, -// this flag lets you run -// gotest -run=BrokenTest -httptest.serve=127.0.0.1:8000 -// to start the broken server so you can interact with it manually. -var serve = flag.String("httptest.serve", "", "if non-empty, httptest.NewServer serves on this address and blocks") - -// NewServer starts and returns a new Server. -// The caller should call Close when finished, to shut it down. -func NewServer(handler http.Handler) *Server { - ts := new(Server) - var l net.Listener - if *serve != "" { - var err os.Error - l, err = net.Listen("tcp", *serve) - if err != nil { - panic(fmt.Sprintf("httptest: failed to listen on %v: %v", *serve, err)) - } - } else { - l = newLocalListener() - } - ts.Listener = &historyListener{l, make([]net.Conn, 0)} - ts.URL = "http://" + l.Addr().String() - server := &http.Server{Handler: handler} - go server.Serve(ts.Listener) - if *serve != "" { - fmt.Println(os.Stderr, "httptest: serving on", ts.URL) - select {} - } - return ts -} - -// NewTLSServer starts and returns a new Server using TLS. -// The caller should call Close when finished, to shut it down. -func NewTLSServer(handler http.Handler) *Server { - l := newLocalListener() - ts := new(Server) - - cert, err := tls.X509KeyPair(localhostCert, localhostKey) - if err != nil { - panic(fmt.Sprintf("httptest: NewTLSServer: %v", err)) - } - - ts.TLS = &tls.Config{ - Rand: rand.Reader, - Time: time.Seconds, - NextProtos: []string{"http/1.1"}, - Certificates: []tls.Certificate{cert}, - } - tlsListener := tls.NewListener(l, ts.TLS) - - ts.Listener = &historyListener{tlsListener, make([]net.Conn, 0)} - ts.URL = "https://" + l.Addr().String() - server := &http.Server{Handler: handler} - go server.Serve(ts.Listener) - return ts -} - -// Close shuts down the server. -func (s *Server) Close() { - s.Listener.Close() -} - -// CloseClientConnections closes any currently open HTTP connections -// to the test Server. -func (s *Server) CloseClientConnections() { - hl, ok := s.Listener.(*historyListener) - if !ok { - return - } - for _, conn := range hl.history { - conn.Close() - } -} - -// localhostCert is a PEM-encoded TLS cert with SAN DNS names -// "127.0.0.1" and "[::1]", expiring at the last second of 2049 (the end -// of ASN.1 time). -var localhostCert = []byte(`-----BEGIN CERTIFICATE----- -MIIBOTCB5qADAgECAgEAMAsGCSqGSIb3DQEBBTAAMB4XDTcwMDEwMTAwMDAwMFoX -DTQ5MTIzMTIzNTk1OVowADBaMAsGCSqGSIb3DQEBAQNLADBIAkEAsuA5mAFMj6Q7 -qoBzcvKzIq4kzuT5epSp2AkcQfyBHm7K13Ws7u+0b5Vb9gqTf5cAiIKcrtrXVqkL -8i1UQF6AzwIDAQABo08wTTAOBgNVHQ8BAf8EBAMCACQwDQYDVR0OBAYEBAECAwQw -DwYDVR0jBAgwBoAEAQIDBDAbBgNVHREEFDASggkxMjcuMC4wLjGCBVs6OjFdMAsG -CSqGSIb3DQEBBQNBAJH30zjLWRztrWpOCgJL8RQWLaKzhK79pVhAx6q/3NrF16C7 -+l1BRZstTwIGdoGId8BRpErK1TXkniFb95ZMynM= ------END CERTIFICATE----- -`) - -// localhostKey is the private key for localhostCert. -var localhostKey = []byte(`-----BEGIN RSA PRIVATE KEY----- -MIIBPQIBAAJBALLgOZgBTI+kO6qAc3LysyKuJM7k+XqUqdgJHEH8gR5uytd1rO7v -tG+VW/YKk3+XAIiCnK7a11apC/ItVEBegM8CAwEAAQJBAI5sxq7naeR9ahyqRkJi -SIv2iMxLuPEHaezf5CYOPWjSjBPyVhyRevkhtqEjF/WkgL7C2nWpYHsUcBDBQVF0 -3KECIQDtEGB2ulnkZAahl3WuJziXGLB+p8Wgx7wzSM6bHu1c6QIhAMEp++CaS+SJ -/TrU0zwY/fW4SvQeb49BPZUF3oqR8Xz3AiEA1rAJHBzBgdOQKdE3ksMUPcnvNJSN -poCcELmz2clVXtkCIQCLytuLV38XHToTipR4yMl6O+6arzAjZ56uq7m7ZRV0TwIh -AM65XAOw8Dsg9Kq78aYXiOEDc5DL0sbFUu/SlmRcCg93 ------END RSA PRIVATE KEY----- -`) diff --git a/src/pkg/http/lex.go b/src/pkg/http/lex.go deleted file mode 100644 index 93b67e701..000000000 --- a/src/pkg/http/lex.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -// This file deals with lexical matters of HTTP - -func isSeparator(c byte) bool { - switch c { - case '(', ')', '<', '>', '@', ',', ';', ':', '\\', '"', '/', '[', ']', '?', '=', '{', '}', ' ', '\t': - return true - } - return false -} - -func isSpace(c byte) bool { - switch c { - case ' ', '\t', '\r', '\n': - return true - } - return false -} - -func isCtl(c byte) bool { return (0 <= c && c <= 31) || c == 127 } - -func isChar(c byte) bool { return 0 <= c && c <= 127 } - -func isAnyText(c byte) bool { return !isCtl(c) } - -func isQdText(c byte) bool { return isAnyText(c) && c != '"' } - -func isToken(c byte) bool { return isChar(c) && !isCtl(c) && !isSeparator(c) } - -// Valid escaped sequences are not specified in RFC 2616, so for now, we assume -// that they coincide with the common sense ones used by GO. Malformed -// characters should probably not be treated as errors by a robust (forgiving) -// parser, so we replace them with the '?' character. -func httpUnquotePair(b byte) byte { - // skip the first byte, which should always be '\' - switch b { - case 'a': - return '\a' - case 'b': - return '\b' - case 'f': - return '\f' - case 'n': - return '\n' - case 'r': - return '\r' - case 't': - return '\t' - case 'v': - return '\v' - case '\\': - return '\\' - case '\'': - return '\'' - case '"': - return '"' - } - return '?' -} - -// raw must begin with a valid quoted string. Only the first quoted string is -// parsed and is unquoted in result. eaten is the number of bytes parsed, or -1 -// upon failure. -func httpUnquote(raw []byte) (eaten int, result string) { - buf := make([]byte, len(raw)) - if raw[0] != '"' { - return -1, "" - } - eaten = 1 - j := 0 // # of bytes written in buf - for i := 1; i < len(raw); i++ { - switch b := raw[i]; b { - case '"': - eaten++ - buf = buf[0:j] - return i + 1, string(buf) - case '\\': - if len(raw) < i+2 { - return -1, "" - } - buf[j] = httpUnquotePair(raw[i+1]) - eaten += 2 - j++ - i++ - default: - if isQdText(b) { - buf[j] = b - } else { - buf[j] = '?' - } - eaten++ - j++ - } - } - return -1, "" -} - -// This is a best effort parse, so errors are not returned, instead not all of -// the input string might be parsed. result is always non-nil. -func httpSplitFieldValue(fv string) (eaten int, result []string) { - result = make([]string, 0, len(fv)) - raw := []byte(fv) - i := 0 - chunk := "" - for i < len(raw) { - b := raw[i] - switch { - case b == '"': - eaten, unq := httpUnquote(raw[i:len(raw)]) - if eaten < 0 { - return i, result - } else { - i += eaten - chunk += unq - } - case isSeparator(b): - if chunk != "" { - result = result[0 : len(result)+1] - result[len(result)-1] = chunk - chunk = "" - } - i++ - case isToken(b): - chunk += string(b) - i++ - case b == '\n' || b == '\r': - i++ - default: - chunk += "?" - i++ - } - } - if chunk != "" { - result = result[0 : len(result)+1] - result[len(result)-1] = chunk - chunk = "" - } - return i, result -} diff --git a/src/pkg/http/lex_test.go b/src/pkg/http/lex_test.go deleted file mode 100644 index 5386f7534..000000000 --- a/src/pkg/http/lex_test.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -import ( - "testing" -) - -type lexTest struct { - Raw string - Parsed int // # of parsed characters - Result []string -} - -var lexTests = []lexTest{ - { - Raw: `"abc"def,:ghi`, - Parsed: 13, - Result: []string{"abcdef", "ghi"}, - }, - // My understanding of the RFC is that escape sequences outside of - // quotes are not interpreted? - { - Raw: `"\t"\t"\t"`, - Parsed: 10, - Result: []string{"\t", "t\t"}, - }, - { - Raw: `"\yab"\r\n`, - Parsed: 10, - Result: []string{"?ab", "r", "n"}, - }, - { - Raw: "ab\f", - Parsed: 3, - Result: []string{"ab?"}, - }, - { - Raw: "\"ab \" c,de f, gh, ij\n\t\r", - Parsed: 23, - Result: []string{"ab ", "c", "de", "f", "gh", "ij"}, - }, -} - -func min(x, y int) int { - if x <= y { - return x - } - return y -} - -func TestSplitFieldValue(t *testing.T) { - for k, l := range lexTests { - parsed, result := httpSplitFieldValue(l.Raw) - if parsed != l.Parsed { - t.Errorf("#%d: Parsed %d, expected %d", k, parsed, l.Parsed) - } - if len(result) != len(l.Result) { - t.Errorf("#%d: Result len %d, expected %d", k, len(result), len(l.Result)) - } - for i := 0; i < min(len(result), len(l.Result)); i++ { - if result[i] != l.Result[i] { - t.Errorf("#%d: %d-th entry mismatch. Have {%s}, expect {%s}", - k, i, result[i], l.Result[i]) - } - } - } -} diff --git a/src/pkg/http/persist.go b/src/pkg/http/persist.go deleted file mode 100644 index 78bf9058f..000000000 --- a/src/pkg/http/persist.go +++ /dev/null @@ -1,420 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -import ( - "bufio" - "io" - "net" - "net/textproto" - "os" - "sync" -) - -var ( - ErrPersistEOF = &ProtocolError{"persistent connection closed"} - ErrPipeline = &ProtocolError{"pipeline error"} -) - -// A ServerConn reads requests and sends responses over an underlying -// connection, until the HTTP keepalive logic commands an end. ServerConn -// also allows hijacking the underlying connection by calling Hijack -// to regain control over the connection. ServerConn supports pipe-lining, -// i.e. requests can be read out of sync (but in the same order) while the -// respective responses are sent. -// -// ServerConn is low-level and should not be needed by most applications. -// See Server. -type ServerConn struct { - lk sync.Mutex // read-write protects the following fields - c net.Conn - r *bufio.Reader - re, we os.Error // read/write errors - lastbody io.ReadCloser - nread, nwritten int - pipereq map[*Request]uint - - pipe textproto.Pipeline -} - -// NewServerConn returns a new ServerConn reading and writing c. If r is not -// nil, it is the buffer to use when reading c. -func NewServerConn(c net.Conn, r *bufio.Reader) *ServerConn { - if r == nil { - r = bufio.NewReader(c) - } - return &ServerConn{c: c, r: r, pipereq: make(map[*Request]uint)} -} - -// Hijack detaches the ServerConn and returns the underlying connection as well -// as the read-side bufio which may have some left over data. Hijack may be -// called before Read has signaled the end of the keep-alive logic. The user -// should not call Hijack while Read or Write is in progress. -func (sc *ServerConn) Hijack() (c net.Conn, r *bufio.Reader) { - sc.lk.Lock() - defer sc.lk.Unlock() - c = sc.c - r = sc.r - sc.c = nil - sc.r = nil - return -} - -// Close calls Hijack and then also closes the underlying connection -func (sc *ServerConn) Close() os.Error { - c, _ := sc.Hijack() - if c != nil { - return c.Close() - } - return nil -} - -// Read returns the next request on the wire. An ErrPersistEOF is returned if -// it is gracefully determined that there are no more requests (e.g. after the -// first request on an HTTP/1.0 connection, or after a Connection:close on a -// HTTP/1.1 connection). -func (sc *ServerConn) Read() (req *Request, err os.Error) { - - // Ensure ordered execution of Reads and Writes - id := sc.pipe.Next() - sc.pipe.StartRequest(id) - defer func() { - sc.pipe.EndRequest(id) - if req == nil { - sc.pipe.StartResponse(id) - sc.pipe.EndResponse(id) - } else { - // Remember the pipeline id of this request - sc.lk.Lock() - sc.pipereq[req] = id - sc.lk.Unlock() - } - }() - - sc.lk.Lock() - if sc.we != nil { // no point receiving if write-side broken or closed - defer sc.lk.Unlock() - return nil, sc.we - } - if sc.re != nil { - defer sc.lk.Unlock() - return nil, sc.re - } - if sc.r == nil { // connection closed by user in the meantime - defer sc.lk.Unlock() - return nil, os.EBADF - } - r := sc.r - lastbody := sc.lastbody - sc.lastbody = nil - sc.lk.Unlock() - - // Make sure body is fully consumed, even if user does not call body.Close - if lastbody != nil { - // body.Close is assumed to be idempotent and multiple calls to - // it should return the error that its first invocation - // returned. - err = lastbody.Close() - if err != nil { - sc.lk.Lock() - defer sc.lk.Unlock() - sc.re = err - return nil, err - } - } - - req, err = ReadRequest(r) - sc.lk.Lock() - defer sc.lk.Unlock() - if err != nil { - if err == io.ErrUnexpectedEOF { - // A close from the opposing client is treated as a - // graceful close, even if there was some unparse-able - // data before the close. - sc.re = ErrPersistEOF - return nil, sc.re - } else { - sc.re = err - return req, err - } - } - sc.lastbody = req.Body - sc.nread++ - if req.Close { - sc.re = ErrPersistEOF - return req, sc.re - } - return req, err -} - -// Pending returns the number of unanswered requests -// that have been received on the connection. -func (sc *ServerConn) Pending() int { - sc.lk.Lock() - defer sc.lk.Unlock() - return sc.nread - sc.nwritten -} - -// Write writes resp in response to req. To close the connection gracefully, set the -// Response.Close field to true. Write should be considered operational until -// it returns an error, regardless of any errors returned on the Read side. -func (sc *ServerConn) Write(req *Request, resp *Response) os.Error { - - // Retrieve the pipeline ID of this request/response pair - sc.lk.Lock() - id, ok := sc.pipereq[req] - sc.pipereq[req] = 0, false - if !ok { - sc.lk.Unlock() - return ErrPipeline - } - sc.lk.Unlock() - - // Ensure pipeline order - sc.pipe.StartResponse(id) - defer sc.pipe.EndResponse(id) - - sc.lk.Lock() - if sc.we != nil { - defer sc.lk.Unlock() - return sc.we - } - if sc.c == nil { // connection closed by user in the meantime - defer sc.lk.Unlock() - return os.EBADF - } - c := sc.c - if sc.nread <= sc.nwritten { - defer sc.lk.Unlock() - return os.NewError("persist server pipe count") - } - if resp.Close { - // After signaling a keep-alive close, any pipelined unread - // requests will be lost. It is up to the user to drain them - // before signaling. - sc.re = ErrPersistEOF - } - sc.lk.Unlock() - - err := resp.Write(c) - sc.lk.Lock() - defer sc.lk.Unlock() - if err != nil { - sc.we = err - return err - } - sc.nwritten++ - - return nil -} - -// A ClientConn sends request and receives headers over an underlying -// connection, while respecting the HTTP keepalive logic. ClientConn -// supports hijacking the connection calling Hijack to -// regain control of the underlying net.Conn and deal with it as desired. -// -// ClientConn is low-level and should not be needed by most applications. -// See Client. -type ClientConn struct { - lk sync.Mutex // read-write protects the following fields - c net.Conn - r *bufio.Reader - re, we os.Error // read/write errors - lastbody io.ReadCloser - nread, nwritten int - pipereq map[*Request]uint - - pipe textproto.Pipeline - writeReq func(*Request, io.Writer) os.Error -} - -// NewClientConn returns a new ClientConn reading and writing c. If r is not -// nil, it is the buffer to use when reading c. -func NewClientConn(c net.Conn, r *bufio.Reader) *ClientConn { - if r == nil { - r = bufio.NewReader(c) - } - return &ClientConn{ - c: c, - r: r, - pipereq: make(map[*Request]uint), - writeReq: (*Request).Write, - } -} - -// NewProxyClientConn works like NewClientConn but writes Requests -// using Request's WriteProxy method. -func NewProxyClientConn(c net.Conn, r *bufio.Reader) *ClientConn { - cc := NewClientConn(c, r) - cc.writeReq = (*Request).WriteProxy - return cc -} - -// Hijack detaches the ClientConn and returns the underlying connection as well -// as the read-side bufio which may have some left over data. Hijack may be -// called before the user or Read have signaled the end of the keep-alive -// logic. The user should not call Hijack while Read or Write is in progress. -func (cc *ClientConn) Hijack() (c net.Conn, r *bufio.Reader) { - cc.lk.Lock() - defer cc.lk.Unlock() - c = cc.c - r = cc.r - cc.c = nil - cc.r = nil - return -} - -// Close calls Hijack and then also closes the underlying connection -func (cc *ClientConn) Close() os.Error { - c, _ := cc.Hijack() - if c != nil { - return c.Close() - } - return nil -} - -// Write writes a request. An ErrPersistEOF error is returned if the connection -// has been closed in an HTTP keepalive sense. If req.Close equals true, the -// keepalive connection is logically closed after this request and the opposing -// server is informed. An ErrUnexpectedEOF indicates the remote closed the -// underlying TCP connection, which is usually considered as graceful close. -func (cc *ClientConn) Write(req *Request) (err os.Error) { - - // Ensure ordered execution of Writes - id := cc.pipe.Next() - cc.pipe.StartRequest(id) - defer func() { - cc.pipe.EndRequest(id) - if err != nil { - cc.pipe.StartResponse(id) - cc.pipe.EndResponse(id) - } else { - // Remember the pipeline id of this request - cc.lk.Lock() - cc.pipereq[req] = id - cc.lk.Unlock() - } - }() - - cc.lk.Lock() - if cc.re != nil { // no point sending if read-side closed or broken - defer cc.lk.Unlock() - return cc.re - } - if cc.we != nil { - defer cc.lk.Unlock() - return cc.we - } - if cc.c == nil { // connection closed by user in the meantime - defer cc.lk.Unlock() - return os.EBADF - } - c := cc.c - if req.Close { - // We write the EOF to the write-side error, because there - // still might be some pipelined reads - cc.we = ErrPersistEOF - } - cc.lk.Unlock() - - err = cc.writeReq(req, c) - cc.lk.Lock() - defer cc.lk.Unlock() - if err != nil { - cc.we = err - return err - } - cc.nwritten++ - - return nil -} - -// Pending returns the number of unanswered requests -// that have been sent on the connection. -func (cc *ClientConn) Pending() int { - cc.lk.Lock() - defer cc.lk.Unlock() - return cc.nwritten - cc.nread -} - -// Read reads the next response from the wire. A valid response might be -// returned together with an ErrPersistEOF, which means that the remote -// requested that this be the last request serviced. Read can be called -// concurrently with Write, but not with another Read. -func (cc *ClientConn) Read(req *Request) (*Response, os.Error) { - return cc.readUsing(req, ReadResponse) -} - -// readUsing is the implementation of Read with a replaceable -// ReadResponse-like function, used by the Transport. -func (cc *ClientConn) readUsing(req *Request, readRes func(*bufio.Reader, *Request) (*Response, os.Error)) (resp *Response, err os.Error) { - // Retrieve the pipeline ID of this request/response pair - cc.lk.Lock() - id, ok := cc.pipereq[req] - cc.pipereq[req] = 0, false - if !ok { - cc.lk.Unlock() - return nil, ErrPipeline - } - cc.lk.Unlock() - - // Ensure pipeline order - cc.pipe.StartResponse(id) - defer cc.pipe.EndResponse(id) - - cc.lk.Lock() - if cc.re != nil { - defer cc.lk.Unlock() - return nil, cc.re - } - if cc.r == nil { // connection closed by user in the meantime - defer cc.lk.Unlock() - return nil, os.EBADF - } - r := cc.r - lastbody := cc.lastbody - cc.lastbody = nil - cc.lk.Unlock() - - // Make sure body is fully consumed, even if user does not call body.Close - if lastbody != nil { - // body.Close is assumed to be idempotent and multiple calls to - // it should return the error that its first invokation - // returned. - err = lastbody.Close() - if err != nil { - cc.lk.Lock() - defer cc.lk.Unlock() - cc.re = err - return nil, err - } - } - - resp, err = readRes(r, req) - cc.lk.Lock() - defer cc.lk.Unlock() - if err != nil { - cc.re = err - return resp, err - } - cc.lastbody = resp.Body - - cc.nread++ - - if resp.Close { - cc.re = ErrPersistEOF // don't send any more requests - return resp, cc.re - } - return resp, err -} - -// Do is convenience method that writes a request and reads a response. -func (cc *ClientConn) Do(req *Request) (resp *Response, err os.Error) { - err = cc.Write(req) - if err != nil { - return - } - return cc.Read(req) -} diff --git a/src/pkg/http/pprof/Makefile b/src/pkg/http/pprof/Makefile deleted file mode 100644 index 5858a0efa..000000000 --- a/src/pkg/http/pprof/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright 2010 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -include ../../../Make.inc - -TARG=http/pprof -GOFILES=\ - pprof.go\ - -include ../../../Make.pkg diff --git a/src/pkg/http/pprof/pprof.go b/src/pkg/http/pprof/pprof.go deleted file mode 100644 index 917c7f877..000000000 --- a/src/pkg/http/pprof/pprof.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package pprof serves via its HTTP server runtime profiling data -// in the format expected by the pprof visualization tool. -// For more information about pprof, see -// http://code.google.com/p/google-perftools/. -// -// The package is typically only imported for the side effect of -// registering its HTTP handlers. -// The handled paths all begin with /debug/pprof/. -// -// To use pprof, link this package into your program: -// import _ "http/pprof" -// -// Then use the pprof tool to look at the heap profile: -// -// pprof http://localhost:6060/debug/pprof/heap -// -// Or to look at a 30-second CPU profile: -// -// pprof http://localhost:6060/debug/pprof/profile -// -package pprof - -import ( - "bufio" - "bytes" - "fmt" - "http" - "os" - "runtime" - "runtime/pprof" - "strconv" - "strings" - "time" -) - -func init() { - http.Handle("/debug/pprof/cmdline", http.HandlerFunc(Cmdline)) - http.Handle("/debug/pprof/profile", http.HandlerFunc(Profile)) - http.Handle("/debug/pprof/heap", http.HandlerFunc(Heap)) - http.Handle("/debug/pprof/symbol", http.HandlerFunc(Symbol)) -} - -// Cmdline responds with the running program's -// command line, with arguments separated by NUL bytes. -// The package initialization registers it as /debug/pprof/cmdline. -func Cmdline(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - fmt.Fprintf(w, strings.Join(os.Args, "\x00")) -} - -// Heap responds with the pprof-formatted heap profile. -// The package initialization registers it as /debug/pprof/heap. -func Heap(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - pprof.WriteHeapProfile(w) -} - -// Profile responds with the pprof-formatted cpu profile. -// The package initialization registers it as /debug/pprof/profile. -func Profile(w http.ResponseWriter, r *http.Request) { - sec, _ := strconv.Atoi64(r.FormValue("seconds")) - if sec == 0 { - sec = 30 - } - - // Set Content Type assuming StartCPUProfile will work, - // because if it does it starts writing. - w.Header().Set("Content-Type", "application/octet-stream") - if err := pprof.StartCPUProfile(w); err != nil { - // StartCPUProfile failed, so no writes yet. - // Can change header back to text content - // and send error code. - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - w.WriteHeader(http.StatusInternalServerError) - fmt.Fprintf(w, "Could not enable CPU profiling: %s\n", err) - return - } - time.Sleep(sec * 1e9) - pprof.StopCPUProfile() -} - -// Symbol looks up the program counters listed in the request, -// responding with a table mapping program counters to function names. -// The package initialization registers it as /debug/pprof/symbol. -func Symbol(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - - // We have to read the whole POST body before - // writing any output. Buffer the output here. - var buf bytes.Buffer - - // We don't know how many symbols we have, but we - // do have symbol information. Pprof only cares whether - // this number is 0 (no symbols available) or > 0. - fmt.Fprintf(&buf, "num_symbols: 1\n") - - var b *bufio.Reader - if r.Method == "POST" { - b = bufio.NewReader(r.Body) - } else { - b = bufio.NewReader(strings.NewReader(r.URL.RawQuery)) - } - - for { - word, err := b.ReadSlice('+') - if err == nil { - word = word[0 : len(word)-1] // trim + - } - pc, _ := strconv.Btoui64(string(word), 0) - if pc != 0 { - f := runtime.FuncForPC(uintptr(pc)) - if f != nil { - fmt.Fprintf(&buf, "%#x %s\n", pc, f.Name()) - } - } - - // Wait until here to check for err; the last - // symbol will have an err because it doesn't end in +. - if err != nil { - if err != os.EOF { - fmt.Fprintf(&buf, "reading request: %v\n", err) - } - break - } - } - - w.Write(buf.Bytes()) -} diff --git a/src/pkg/http/proxy_test.go b/src/pkg/http/proxy_test.go deleted file mode 100644 index 9b320b3aa..000000000 --- a/src/pkg/http/proxy_test.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -import ( - "os" - "testing" -) - -// TODO(mattn): -// test ProxyAuth - -var UseProxyTests = []struct { - host string - match bool -}{ - // Never proxy localhost: - {"localhost:80", false}, - {"127.0.0.1", false}, - {"127.0.0.2", false}, - {"[::1]", false}, - {"[::2]", true}, // not a loopback address - - {"barbaz.net", false}, // match as .barbaz.net - {"foobar.com", false}, // have a port but match - {"foofoobar.com", true}, // not match as a part of foobar.com - {"baz.com", true}, // not match as a part of barbaz.com - {"localhost.net", true}, // not match as suffix of address - {"local.localhost", true}, // not match as prefix as address - {"barbarbaz.net", true}, // not match because NO_PROXY have a '.' - {"www.foobar.com", true}, // not match because NO_PROXY is not .foobar.com -} - -func TestUseProxy(t *testing.T) { - oldenv := os.Getenv("NO_PROXY") - defer os.Setenv("NO_PROXY", oldenv) - - no_proxy := "foobar.com, .barbaz.net" - os.Setenv("NO_PROXY", no_proxy) - - for _, test := range UseProxyTests { - if useProxy(test.host+":80") != test.match { - t.Errorf("useProxy(%v) = %v, want %v", test.host, !test.match, test.match) - } - } -} diff --git a/src/pkg/http/range_test.go b/src/pkg/http/range_test.go deleted file mode 100644 index 5274a81fa..000000000 --- a/src/pkg/http/range_test.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -import ( - "testing" -) - -var ParseRangeTests = []struct { - s string - length int64 - r []httpRange -}{ - {"", 0, nil}, - {"foo", 0, nil}, - {"bytes=", 0, nil}, - {"bytes=5-4", 10, nil}, - {"bytes=0-2,5-4", 10, nil}, - {"bytes=0-9", 10, []httpRange{{0, 10}}}, - {"bytes=0-", 10, []httpRange{{0, 10}}}, - {"bytes=5-", 10, []httpRange{{5, 5}}}, - {"bytes=0-20", 10, []httpRange{{0, 10}}}, - {"bytes=15-,0-5", 10, nil}, - {"bytes=-5", 10, []httpRange{{5, 5}}}, - {"bytes=-15", 10, []httpRange{{0, 10}}}, - {"bytes=0-499", 10000, []httpRange{{0, 500}}}, - {"bytes=500-999", 10000, []httpRange{{500, 500}}}, - {"bytes=-500", 10000, []httpRange{{9500, 500}}}, - {"bytes=9500-", 10000, []httpRange{{9500, 500}}}, - {"bytes=0-0,-1", 10000, []httpRange{{0, 1}, {9999, 1}}}, - {"bytes=500-600,601-999", 10000, []httpRange{{500, 101}, {601, 399}}}, - {"bytes=500-700,601-999", 10000, []httpRange{{500, 201}, {601, 399}}}, -} - -func TestParseRange(t *testing.T) { - for _, test := range ParseRangeTests { - r := test.r - ranges, err := parseRange(test.s, test.length) - if err != nil && r != nil { - t.Errorf("parseRange(%q) returned error %q", test.s, err) - } - if len(ranges) != len(r) { - t.Errorf("len(parseRange(%q)) = %d, want %d", test.s, len(ranges), len(r)) - continue - } - for i := range r { - if ranges[i].start != r[i].start { - t.Errorf("parseRange(%q)[%d].start = %d, want %d", test.s, i, ranges[i].start, r[i].start) - } - if ranges[i].length != r[i].length { - t.Errorf("parseRange(%q)[%d].length = %d, want %d", test.s, i, ranges[i].length, r[i].length) - } - } - } -} diff --git a/src/pkg/http/readrequest_test.go b/src/pkg/http/readrequest_test.go deleted file mode 100644 index f6dc99e2e..000000000 --- a/src/pkg/http/readrequest_test.go +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -import ( - "bufio" - "bytes" - "fmt" - "io" - "testing" - "url" -) - -type reqTest struct { - Raw string - Req *Request - Body string - Error string -} - -var noError = "" -var noBody = "" - -var reqTests = []reqTest{ - // Baseline test; All Request fields included for template use - { - "GET http://www.techcrunch.com/ HTTP/1.1\r\n" + - "Host: www.techcrunch.com\r\n" + - "User-Agent: Fake\r\n" + - "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n" + - "Accept-Language: en-us,en;q=0.5\r\n" + - "Accept-Encoding: gzip,deflate\r\n" + - "Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7\r\n" + - "Keep-Alive: 300\r\n" + - "Content-Length: 7\r\n" + - "Proxy-Connection: keep-alive\r\n\r\n" + - "abcdef\n???", - - &Request{ - Method: "GET", - RawURL: "http://www.techcrunch.com/", - URL: &url.URL{ - Raw: "http://www.techcrunch.com/", - Scheme: "http", - RawPath: "/", - RawAuthority: "www.techcrunch.com", - RawUserinfo: "", - Host: "www.techcrunch.com", - Path: "/", - RawQuery: "", - Fragment: "", - }, - Proto: "HTTP/1.1", - ProtoMajor: 1, - ProtoMinor: 1, - Header: Header{ - "Accept": {"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"}, - "Accept-Language": {"en-us,en;q=0.5"}, - "Accept-Encoding": {"gzip,deflate"}, - "Accept-Charset": {"ISO-8859-1,utf-8;q=0.7,*;q=0.7"}, - "Keep-Alive": {"300"}, - "Proxy-Connection": {"keep-alive"}, - "Content-Length": {"7"}, - "User-Agent": {"Fake"}, - }, - Close: false, - ContentLength: 7, - Host: "www.techcrunch.com", - Form: url.Values{}, - }, - - "abcdef\n", - - noError, - }, - - // GET request with no body (the normal case) - { - "GET / HTTP/1.1\r\n" + - "Host: foo.com\r\n\r\n", - - &Request{ - Method: "GET", - RawURL: "/", - URL: &url.URL{ - Raw: "/", - Path: "/", - RawPath: "/", - }, - Proto: "HTTP/1.1", - ProtoMajor: 1, - ProtoMinor: 1, - Close: false, - ContentLength: 0, - Host: "foo.com", - Form: url.Values{}, - }, - - noBody, - noError, - }, - - // Tests that we don't parse a path that looks like a - // scheme-relative URI as a scheme-relative URI. - { - "GET //user@host/is/actually/a/path/ HTTP/1.1\r\n" + - "Host: test\r\n\r\n", - - &Request{ - Method: "GET", - RawURL: "//user@host/is/actually/a/path/", - URL: &url.URL{ - Raw: "//user@host/is/actually/a/path/", - Scheme: "", - RawPath: "//user@host/is/actually/a/path/", - RawAuthority: "", - RawUserinfo: "", - Host: "", - Path: "//user@host/is/actually/a/path/", - RawQuery: "", - Fragment: "", - }, - Proto: "HTTP/1.1", - ProtoMajor: 1, - ProtoMinor: 1, - Header: Header{}, - Close: false, - ContentLength: 0, - Host: "test", - Form: url.Values{}, - }, - - noBody, - noError, - }, - - // Tests a bogus abs_path on the Request-Line (RFC 2616 section 5.1.2) - { - "GET ../../../../etc/passwd HTTP/1.1\r\n" + - "Host: test\r\n\r\n", - nil, - noBody, - "parse ../../../../etc/passwd: invalid URI for request", - }, - - // Tests missing URL: - { - "GET HTTP/1.1\r\n" + - "Host: test\r\n\r\n", - nil, - noBody, - "parse : empty url", - }, -} - -func TestReadRequest(t *testing.T) { - for i := range reqTests { - tt := &reqTests[i] - var braw bytes.Buffer - braw.WriteString(tt.Raw) - req, err := ReadRequest(bufio.NewReader(&braw)) - if err != nil { - if err.String() != tt.Error { - t.Errorf("#%d: error %q, want error %q", i, err.String(), tt.Error) - } - continue - } - rbody := req.Body - req.Body = nil - diff(t, fmt.Sprintf("#%d Request", i), req, tt.Req) - var bout bytes.Buffer - if rbody != nil { - io.Copy(&bout, rbody) - rbody.Close() - } - body := bout.String() - if body != tt.Body { - t.Errorf("#%d: Body = %q want %q", i, body, tt.Body) - } - } -} diff --git a/src/pkg/http/request.go b/src/pkg/http/request.go deleted file mode 100644 index ed41fa45c..000000000 --- a/src/pkg/http/request.go +++ /dev/null @@ -1,745 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// HTTP Request reading and parsing. - -// Package http implements parsing of HTTP requests, replies, and URLs and -// provides an extensible HTTP server and a basic HTTP client. -package http - -import ( - "bufio" - "bytes" - "crypto/tls" - "encoding/base64" - "fmt" - "io" - "io/ioutil" - "mime" - "mime/multipart" - "net/textproto" - "os" - "strconv" - "strings" - "url" -) - -const ( - maxLineLength = 4096 // assumed <= bufio.defaultBufSize - maxValueLength = 4096 - maxHeaderLines = 1024 - chunkSize = 4 << 10 // 4 KB chunks - defaultMaxMemory = 32 << 20 // 32 MB -) - -// ErrMissingFile is returned by FormFile when the provided file field name -// is either not present in the request or not a file field. -var ErrMissingFile = os.NewError("http: no such file") - -// HTTP request parsing errors. -type ProtocolError struct { - ErrorString string -} - -func (err *ProtocolError) String() string { return err.ErrorString } - -var ( - ErrLineTooLong = &ProtocolError{"header line too long"} - ErrHeaderTooLong = &ProtocolError{"header too long"} - ErrShortBody = &ProtocolError{"entity body too short"} - ErrNotSupported = &ProtocolError{"feature not supported"} - ErrUnexpectedTrailer = &ProtocolError{"trailer header without chunked transfer encoding"} - ErrMissingContentLength = &ProtocolError{"missing ContentLength in HEAD response"} - ErrNotMultipart = &ProtocolError{"request Content-Type isn't multipart/form-data"} - ErrMissingBoundary = &ProtocolError{"no multipart boundary param Content-Type"} -) - -type badStringError struct { - what string - str string -} - -func (e *badStringError) String() string { return fmt.Sprintf("%s %q", e.what, e.str) } - -// Headers that Request.Write handles itself and should be skipped. -var reqWriteExcludeHeader = map[string]bool{ - "Host": true, - "User-Agent": true, - "Content-Length": true, - "Transfer-Encoding": true, - "Trailer": true, -} - -// A Request represents a parsed HTTP request header. -type Request struct { - Method string // GET, POST, PUT, etc. - RawURL string // The raw URL given in the request. - URL *url.URL // Parsed URL. - - // The protocol version for incoming requests. - // Outgoing requests always use HTTP/1.1. - Proto string // "HTTP/1.0" - ProtoMajor int // 1 - ProtoMinor int // 0 - - // A header maps request lines to their values. - // If the header says - // - // accept-encoding: gzip, deflate - // Accept-Language: en-us - // Connection: keep-alive - // - // then - // - // Header = map[string][]string{ - // "Accept-Encoding": {"gzip, deflate"}, - // "Accept-Language": {"en-us"}, - // "Connection": {"keep-alive"}, - // } - // - // HTTP defines that header names are case-insensitive. - // The request parser implements this by canonicalizing the - // name, making the first character and any characters - // following a hyphen uppercase and the rest lowercase. - Header Header - - // The message body. - Body io.ReadCloser - - // ContentLength records the length of the associated content. - // The value -1 indicates that the length is unknown. - // Values >= 0 indicate that the given number of bytes may be read from Body. - ContentLength int64 - - // TransferEncoding lists the transfer encodings from outermost to innermost. - // An empty list denotes the "identity" encoding. - TransferEncoding []string - - // Whether to close the connection after replying to this request. - Close bool - - // The host on which the URL is sought. - // Per RFC 2616, this is either the value of the Host: header - // or the host name given in the URL itself. - Host string - - // The parsed form. Only available after ParseForm is called. - Form url.Values - - // The parsed multipart form, including file uploads. - // Only available after ParseMultipartForm is called. - MultipartForm *multipart.Form - - // Trailer maps trailer keys to values. Like for Header, if the - // response has multiple trailer lines with the same key, they will be - // concatenated, delimited by commas. - Trailer Header - - // RemoteAddr allows HTTP servers and other software to record - // the network address that sent the request, usually for - // logging. This field is not filled in by ReadRequest and - // has no defined format. The HTTP server in this package - // sets RemoteAddr to an "IP:port" address before invoking a - // handler. - RemoteAddr string - - // TLS allows HTTP servers and other software to record - // information about the TLS connection on which the request - // was received. This field is not filled in by ReadRequest. - // The HTTP server in this package sets the field for - // TLS-enabled connections before invoking a handler; - // otherwise it leaves the field nil. - TLS *tls.ConnectionState -} - -// ProtoAtLeast returns whether the HTTP protocol used -// in the request is at least major.minor. -func (r *Request) ProtoAtLeast(major, minor int) bool { - return r.ProtoMajor > major || - r.ProtoMajor == major && r.ProtoMinor >= minor -} - -// UserAgent returns the client's User-Agent, if sent in the request. -func (r *Request) UserAgent() string { - return r.Header.Get("User-Agent") -} - -// Cookies parses and returns the HTTP cookies sent with the request. -func (r *Request) Cookies() []*Cookie { - return readCookies(r.Header, "") -} - -var ErrNoCookie = os.NewError("http: named cookied not present") - -// Cookie returns the named cookie provided in the request or -// ErrNoCookie if not found. -func (r *Request) Cookie(name string) (*Cookie, os.Error) { - for _, c := range readCookies(r.Header, name) { - return c, nil - } - return nil, ErrNoCookie -} - -// AddCookie adds a cookie to the request. Per RFC 6265 section 5.4, -// AddCookie does not attach more than one Cookie header field. That -// means all cookies, if any, are written into the same line, -// separated by semicolon. -func (r *Request) AddCookie(c *Cookie) { - s := fmt.Sprintf("%s=%s", sanitizeName(c.Name), sanitizeValue(c.Value)) - if c := r.Header.Get("Cookie"); c != "" { - r.Header.Set("Cookie", c+"; "+s) - } else { - r.Header.Set("Cookie", s) - } -} - -// Referer returns the referring URL, if sent in the request. -// -// Referer is misspelled as in the request itself, a mistake from the -// earliest days of HTTP. This value can also be fetched from the -// Header map as Header["Referer"]; the benefit of making it available -// as a method is that the compiler can diagnose programs that use the -// alternate (correct English) spelling req.Referrer() but cannot -// diagnose programs that use Header["Referrer"]. -func (r *Request) Referer() string { - return r.Header.Get("Referer") -} - -// multipartByReader is a sentinel value. -// Its presence in Request.MultipartForm indicates that parsing of the request -// body has been handed off to a MultipartReader instead of ParseMultipartFrom. -var multipartByReader = &multipart.Form{ - Value: make(map[string][]string), - File: make(map[string][]*multipart.FileHeader), -} - -// MultipartReader returns a MIME multipart reader if this is a -// multipart/form-data POST request, else returns nil and an error. -// Use this function instead of ParseMultipartForm to -// process the request body as a stream. -func (r *Request) MultipartReader() (*multipart.Reader, os.Error) { - if r.MultipartForm == multipartByReader { - return nil, os.NewError("http: MultipartReader called twice") - } - if r.MultipartForm != nil { - return nil, os.NewError("http: multipart handled by ParseMultipartForm") - } - r.MultipartForm = multipartByReader - return r.multipartReader() -} - -func (r *Request) multipartReader() (*multipart.Reader, os.Error) { - v := r.Header.Get("Content-Type") - if v == "" { - return nil, ErrNotMultipart - } - d, params := mime.ParseMediaType(v) - if d != "multipart/form-data" { - return nil, ErrNotMultipart - } - boundary, ok := params["boundary"] - if !ok { - return nil, ErrMissingBoundary - } - return multipart.NewReader(r.Body, boundary), nil -} - -// Return value if nonempty, def otherwise. -func valueOrDefault(value, def string) string { - if value != "" { - return value - } - return def -} - -const defaultUserAgent = "Go http package" - -// Write writes an HTTP/1.1 request -- header and body -- in wire format. -// This method consults the following fields of req: -// Host -// RawURL, if non-empty, or else URL -// Method (defaults to "GET") -// Header -// ContentLength -// TransferEncoding -// Body -// -// If Body is present, Content-Length is <= 0 and TransferEncoding -// hasn't been set to "identity", Write adds "Transfer-Encoding: -// chunked" to the header. Body is closed after it is sent. -func (req *Request) Write(w io.Writer) os.Error { - return req.write(w, false) -} - -// WriteProxy is like Write but writes the request in the form -// expected by an HTTP proxy. It includes the scheme and host -// name in the URI instead of using a separate Host: header line. -// If req.RawURL is non-empty, WriteProxy uses it unchanged -// instead of URL but still omits the Host: header. -func (req *Request) WriteProxy(w io.Writer) os.Error { - return req.write(w, true) -} - -func (req *Request) write(w io.Writer, usingProxy bool) os.Error { - host := req.Host - if host == "" { - if req.URL == nil { - return os.NewError("http: Request.Write on Request with no Host or URL set") - } - host = req.URL.Host - } - - urlStr := req.RawURL - if urlStr == "" { - urlStr = valueOrDefault(req.URL.EncodedPath(), "/") - if req.URL.RawQuery != "" { - urlStr += "?" + req.URL.RawQuery - } - if usingProxy { - if urlStr == "" || urlStr[0] != '/' { - urlStr = "/" + urlStr - } - urlStr = req.URL.Scheme + "://" + host + urlStr - } - } - - bw := bufio.NewWriter(w) - fmt.Fprintf(bw, "%s %s HTTP/1.1\r\n", valueOrDefault(req.Method, "GET"), urlStr) - - // Header lines - fmt.Fprintf(bw, "Host: %s\r\n", host) - - // Use the defaultUserAgent unless the Header contains one, which - // may be blank to not send the header. - userAgent := defaultUserAgent - if req.Header != nil { - if ua := req.Header["User-Agent"]; len(ua) > 0 { - userAgent = ua[0] - } - } - if userAgent != "" { - fmt.Fprintf(bw, "User-Agent: %s\r\n", userAgent) - } - - // Process Body,ContentLength,Close,Trailer - tw, err := newTransferWriter(req) - if err != nil { - return err - } - err = tw.WriteHeader(bw) - if err != nil { - return err - } - - // TODO: split long values? (If so, should share code with Conn.Write) - err = req.Header.WriteSubset(bw, reqWriteExcludeHeader) - if err != nil { - return err - } - - io.WriteString(bw, "\r\n") - - // Write body and trailer - err = tw.WriteBody(bw) - if err != nil { - return err - } - bw.Flush() - return nil -} - -// Read a line of bytes (up to \n) from b. -// Give up if the line exceeds maxLineLength. -// The returned bytes are a pointer into storage in -// the bufio, so they are only valid until the next bufio read. -func readLineBytes(b *bufio.Reader) (p []byte, err os.Error) { - if p, err = b.ReadSlice('\n'); err != nil { - // We always know when EOF is coming. - // If the caller asked for a line, there should be a line. - if err == os.EOF { - err = io.ErrUnexpectedEOF - } else if err == bufio.ErrBufferFull { - err = ErrLineTooLong - } - return nil, err - } - if len(p) >= maxLineLength { - return nil, ErrLineTooLong - } - - // Chop off trailing white space. - var i int - for i = len(p); i > 0; i-- { - if c := p[i-1]; c != ' ' && c != '\r' && c != '\t' && c != '\n' { - break - } - } - return p[0:i], nil -} - -// readLineBytes, but convert the bytes into a string. -func readLine(b *bufio.Reader) (s string, err os.Error) { - p, e := readLineBytes(b) - if e != nil { - return "", e - } - return string(p), nil -} - -// Convert decimal at s[i:len(s)] to integer, -// returning value, string position where the digits stopped, -// and whether there was a valid number (digits, not too big). -func atoi(s string, i int) (n, i1 int, ok bool) { - const Big = 1000000 - if i >= len(s) || s[i] < '0' || s[i] > '9' { - return 0, 0, false - } - n = 0 - for ; i < len(s) && '0' <= s[i] && s[i] <= '9'; i++ { - n = n*10 + int(s[i]-'0') - if n > Big { - return 0, 0, false - } - } - return n, i, true -} - -// ParseHTTPVersion parses a HTTP version string. -// "HTTP/1.0" returns (1, 0, true). -func ParseHTTPVersion(vers string) (major, minor int, ok bool) { - if len(vers) < 5 || vers[0:5] != "HTTP/" { - return 0, 0, false - } - major, i, ok := atoi(vers, 5) - if !ok || i >= len(vers) || vers[i] != '.' { - return 0, 0, false - } - minor, i, ok = atoi(vers, i+1) - if !ok || i != len(vers) { - return 0, 0, false - } - return major, minor, true -} - -type chunkedReader struct { - r *bufio.Reader - n uint64 // unread bytes in chunk - err os.Error -} - -func (cr *chunkedReader) beginChunk() { - // chunk-size CRLF - var line string - line, cr.err = readLine(cr.r) - if cr.err != nil { - return - } - cr.n, cr.err = strconv.Btoui64(line, 16) - if cr.err != nil { - return - } - if cr.n == 0 { - // trailer CRLF - for { - line, cr.err = readLine(cr.r) - if cr.err != nil { - return - } - if line == "" { - break - } - } - cr.err = os.EOF - } -} - -func (cr *chunkedReader) Read(b []uint8) (n int, err os.Error) { - if cr.err != nil { - return 0, cr.err - } - if cr.n == 0 { - cr.beginChunk() - if cr.err != nil { - return 0, cr.err - } - } - if uint64(len(b)) > cr.n { - b = b[0:cr.n] - } - n, cr.err = cr.r.Read(b) - cr.n -= uint64(n) - if cr.n == 0 && cr.err == nil { - // end of chunk (CRLF) - b := make([]byte, 2) - if _, cr.err = io.ReadFull(cr.r, b); cr.err == nil { - if b[0] != '\r' || b[1] != '\n' { - cr.err = os.NewError("malformed chunked encoding") - } - } - } - return n, cr.err -} - -// NewRequest returns a new Request given a method, URL, and optional body. -func NewRequest(method, urlStr string, body io.Reader) (*Request, os.Error) { - u, err := url.Parse(urlStr) - if err != nil { - return nil, err - } - rc, ok := body.(io.ReadCloser) - if !ok && body != nil { - rc = ioutil.NopCloser(body) - } - req := &Request{ - Method: method, - URL: u, - Proto: "HTTP/1.1", - ProtoMajor: 1, - ProtoMinor: 1, - Header: make(Header), - Body: rc, - Host: u.Host, - } - if body != nil { - switch v := body.(type) { - case *strings.Reader: - req.ContentLength = int64(v.Len()) - case *bytes.Buffer: - req.ContentLength = int64(v.Len()) - } - } - - return req, nil -} - -// SetBasicAuth sets the request's Authorization header to use HTTP -// Basic Authentication with the provided username and password. -// -// With HTTP Basic Authentication the provided username and password -// are not encrypted. -func (r *Request) SetBasicAuth(username, password string) { - s := username + ":" + password - r.Header.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(s))) -} - -// ReadRequest reads and parses a request from b. -func ReadRequest(b *bufio.Reader) (req *Request, err os.Error) { - - tp := textproto.NewReader(b) - req = new(Request) - - // First line: GET /index.html HTTP/1.0 - var s string - if s, err = tp.ReadLine(); err != nil { - if err == os.EOF { - err = io.ErrUnexpectedEOF - } - return nil, err - } - - var f []string - if f = strings.SplitN(s, " ", 3); len(f) < 3 { - return nil, &badStringError{"malformed HTTP request", s} - } - req.Method, req.RawURL, req.Proto = f[0], f[1], f[2] - var ok bool - if req.ProtoMajor, req.ProtoMinor, ok = ParseHTTPVersion(req.Proto); !ok { - return nil, &badStringError{"malformed HTTP version", req.Proto} - } - - if req.URL, err = url.ParseRequest(req.RawURL); err != nil { - return nil, err - } - - // Subsequent lines: Key: value. - mimeHeader, err := tp.ReadMIMEHeader() - if err != nil { - return nil, err - } - req.Header = Header(mimeHeader) - - // RFC2616: Must treat - // GET /index.html HTTP/1.1 - // Host: www.google.com - // and - // GET http://www.google.com/index.html HTTP/1.1 - // Host: doesntmatter - // the same. In the second case, any Host line is ignored. - req.Host = req.URL.Host - if req.Host == "" { - req.Host = req.Header.Get("Host") - } - req.Header.Del("Host") - - fixPragmaCacheControl(req.Header) - - // TODO: Parse specific header values: - // Accept - // Accept-Encoding - // Accept-Language - // Authorization - // Cache-Control - // Connection - // Date - // Expect - // From - // If-Match - // If-Modified-Since - // If-None-Match - // If-Range - // If-Unmodified-Since - // Max-Forwards - // Proxy-Authorization - // Referer [sic] - // TE (transfer-codings) - // Trailer - // Transfer-Encoding - // Upgrade - // User-Agent - // Via - // Warning - - err = readTransfer(req, b) - if err != nil { - return nil, err - } - - return req, nil -} - -// ParseForm parses the raw query. -// For POST requests, it also parses the request body as a form. -// ParseMultipartForm calls ParseForm automatically. -// It is idempotent. -func (r *Request) ParseForm() (err os.Error) { - if r.Form != nil { - return - } - - if r.URL != nil { - r.Form, err = url.ParseQuery(r.URL.RawQuery) - } - if r.Method == "POST" { - if r.Body == nil { - return os.NewError("missing form body") - } - ct := r.Header.Get("Content-Type") - switch strings.SplitN(ct, ";", 2)[0] { - case "text/plain", "application/x-www-form-urlencoded", "": - const maxFormSize = int64(10 << 20) // 10 MB is a lot of text. - b, e := ioutil.ReadAll(io.LimitReader(r.Body, maxFormSize+1)) - if e != nil { - if err == nil { - err = e - } - break - } - if int64(len(b)) > maxFormSize { - return os.NewError("http: POST too large") - } - var newValues url.Values - newValues, e = url.ParseQuery(string(b)) - if err == nil { - err = e - } - if r.Form == nil { - r.Form = make(url.Values) - } - // Copy values into r.Form. TODO: make this smoother. - for k, vs := range newValues { - for _, value := range vs { - r.Form.Add(k, value) - } - } - case "multipart/form-data": - // handled by ParseMultipartForm - default: - return &badStringError{"unknown Content-Type", ct} - } - } - return err -} - -// ParseMultipartForm parses a request body as multipart/form-data. -// The whole request body is parsed and up to a total of maxMemory bytes of -// its file parts are stored in memory, with the remainder stored on -// disk in temporary files. -// ParseMultipartForm calls ParseForm if necessary. -// After one call to ParseMultipartForm, subsequent calls have no effect. -func (r *Request) ParseMultipartForm(maxMemory int64) os.Error { - if r.MultipartForm == multipartByReader { - return os.NewError("http: multipart handled by MultipartReader") - } - if r.Form == nil { - err := r.ParseForm() - if err != nil { - return err - } - } - if r.MultipartForm != nil { - return nil - } - - mr, err := r.multipartReader() - if err == ErrNotMultipart { - return nil - } else if err != nil { - return err - } - - f, err := mr.ReadForm(maxMemory) - if err != nil { - return err - } - for k, v := range f.Value { - r.Form[k] = append(r.Form[k], v...) - } - r.MultipartForm = f - - return nil -} - -// FormValue returns the first value for the named component of the query. -// FormValue calls ParseMultipartForm and ParseForm if necessary. -func (r *Request) FormValue(key string) string { - if r.Form == nil { - r.ParseMultipartForm(defaultMaxMemory) - } - if vs := r.Form[key]; len(vs) > 0 { - return vs[0] - } - return "" -} - -// FormFile returns the first file for the provided form key. -// FormFile calls ParseMultipartForm and ParseForm if necessary. -func (r *Request) FormFile(key string) (multipart.File, *multipart.FileHeader, os.Error) { - if r.MultipartForm == multipartByReader { - return nil, nil, os.NewError("http: multipart handled by MultipartReader") - } - if r.MultipartForm == nil { - err := r.ParseMultipartForm(defaultMaxMemory) - if err != nil { - return nil, nil, err - } - } - if r.MultipartForm != nil && r.MultipartForm.File != nil { - if fhs := r.MultipartForm.File[key]; len(fhs) > 0 { - f, err := fhs[0].Open() - return f, fhs[0], err - } - } - return nil, nil, ErrMissingFile -} - -func (r *Request) expectsContinue() bool { - return strings.ToLower(r.Header.Get("Expect")) == "100-continue" -} - -func (r *Request) wantsHttp10KeepAlive() bool { - if r.ProtoMajor != 1 || r.ProtoMinor != 0 { - return false - } - return strings.Contains(strings.ToLower(r.Header.Get("Connection")), "keep-alive") -} diff --git a/src/pkg/http/request_test.go b/src/pkg/http/request_test.go deleted file mode 100644 index 869cd57b6..000000000 --- a/src/pkg/http/request_test.go +++ /dev/null @@ -1,336 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http_test - -import ( - "bytes" - "fmt" - . "http" - "http/httptest" - "io" - "io/ioutil" - "mime/multipart" - "os" - "reflect" - "regexp" - "strings" - "testing" - "url" -) - -type stringMultimap map[string][]string - -type parseTest struct { - query string - out stringMultimap -} - -var parseTests = []parseTest{ - { - query: "a=1&b=2", - out: stringMultimap{"a": []string{"1"}, "b": []string{"2"}}, - }, - { - query: "a=1&a=2&a=banana", - out: stringMultimap{"a": []string{"1", "2", "banana"}}, - }, - { - query: "ascii=%3Ckey%3A+0x90%3E", - out: stringMultimap{"ascii": []string{"<key: 0x90>"}}, - }, -} - -func TestParseForm(t *testing.T) { - for i, test := range parseTests { - form, err := url.ParseQuery(test.query) - if err != nil { - t.Errorf("test %d: Unexpected error: %v", i, err) - continue - } - if len(form) != len(test.out) { - t.Errorf("test %d: len(form) = %d, want %d", i, len(form), len(test.out)) - } - for k, evs := range test.out { - vs, ok := form[k] - if !ok { - t.Errorf("test %d: Missing key %q", i, k) - continue - } - if len(vs) != len(evs) { - t.Errorf("test %d: len(form[%q]) = %d, want %d", i, k, len(vs), len(evs)) - continue - } - for j, ev := range evs { - if v := vs[j]; v != ev { - t.Errorf("test %d: form[%q][%d] = %q, want %q", i, k, j, v, ev) - } - } - } - } -} - -func TestQuery(t *testing.T) { - req := &Request{Method: "GET"} - req.URL, _ = url.Parse("http://www.google.com/search?q=foo&q=bar") - if q := req.FormValue("q"); q != "foo" { - t.Errorf(`req.FormValue("q") = %q, want "foo"`, q) - } -} - -func TestPostQuery(t *testing.T) { - req := &Request{Method: "POST"} - req.URL, _ = url.Parse("http://www.google.com/search?q=foo&q=bar&both=x") - req.Header = Header{ - "Content-Type": {"application/x-www-form-urlencoded; boo!"}, - } - req.Body = ioutil.NopCloser(strings.NewReader("z=post&both=y")) - if q := req.FormValue("q"); q != "foo" { - t.Errorf(`req.FormValue("q") = %q, want "foo"`, q) - } - if z := req.FormValue("z"); z != "post" { - t.Errorf(`req.FormValue("z") = %q, want "post"`, z) - } - if both := req.Form["both"]; !reflect.DeepEqual(both, []string{"x", "y"}) { - t.Errorf(`req.FormValue("both") = %q, want ["x", "y"]`, both) - } -} - -type stringMap map[string][]string -type parseContentTypeTest struct { - contentType stringMap - error bool -} - -var parseContentTypeTests = []parseContentTypeTest{ - {contentType: stringMap{"Content-Type": {"text/plain"}}}, - {contentType: stringMap{}}, // Non-existent keys are not placed. The value nil is illegal. - {contentType: stringMap{"Content-Type": {"text/plain; boundary="}}}, - { - contentType: stringMap{"Content-Type": {"application/unknown"}}, - error: true, - }, -} - -func TestPostContentTypeParsing(t *testing.T) { - for i, test := range parseContentTypeTests { - req := &Request{ - Method: "POST", - Header: Header(test.contentType), - Body: ioutil.NopCloser(bytes.NewBufferString("body")), - } - err := req.ParseForm() - if !test.error && err != nil { - t.Errorf("test %d: Unexpected error: %v", i, err) - } - if test.error && err == nil { - t.Errorf("test %d should have returned error", i) - } - } -} - -func TestMultipartReader(t *testing.T) { - req := &Request{ - Method: "POST", - Header: Header{"Content-Type": {`multipart/form-data; boundary="foo123"`}}, - Body: ioutil.NopCloser(new(bytes.Buffer)), - } - multipart, err := req.MultipartReader() - if multipart == nil { - t.Errorf("expected multipart; error: %v", err) - } - - req.Header = Header{"Content-Type": {"text/plain"}} - multipart, err = req.MultipartReader() - if multipart != nil { - t.Errorf("unexpected multipart for text/plain") - } -} - -func TestRedirect(t *testing.T) { - ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { - switch r.URL.Path { - case "/": - w.Header().Set("Location", "/foo/") - w.WriteHeader(StatusSeeOther) - case "/foo/": - fmt.Fprintf(w, "foo") - default: - w.WriteHeader(StatusBadRequest) - } - })) - defer ts.Close() - - var end = regexp.MustCompile("/foo/$") - r, err := Get(ts.URL) - if err != nil { - t.Fatal(err) - } - r.Body.Close() - url := r.Request.URL.String() - if r.StatusCode != 200 || !end.MatchString(url) { - t.Fatalf("Get got status %d at %q, want 200 matching /foo/$", r.StatusCode, url) - } -} - -func TestSetBasicAuth(t *testing.T) { - r, _ := NewRequest("GET", "http://example.com/", nil) - r.SetBasicAuth("Aladdin", "open sesame") - if g, e := r.Header.Get("Authorization"), "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ=="; g != e { - t.Errorf("got header %q, want %q", g, e) - } -} - -func TestMultipartRequest(t *testing.T) { - // Test that we can read the values and files of a - // multipart request with FormValue and FormFile, - // and that ParseMultipartForm can be called multiple times. - req := newTestMultipartRequest(t) - if err := req.ParseMultipartForm(25); err != nil { - t.Fatal("ParseMultipartForm first call:", err) - } - defer req.MultipartForm.RemoveAll() - validateTestMultipartContents(t, req, false) - if err := req.ParseMultipartForm(25); err != nil { - t.Fatal("ParseMultipartForm second call:", err) - } - validateTestMultipartContents(t, req, false) -} - -func TestMultipartRequestAuto(t *testing.T) { - // Test that FormValue and FormFile automatically invoke - // ParseMultipartForm and return the right values. - req := newTestMultipartRequest(t) - defer func() { - if req.MultipartForm != nil { - req.MultipartForm.RemoveAll() - } - }() - validateTestMultipartContents(t, req, true) -} - -func TestEmptyMultipartRequest(t *testing.T) { - // Test that FormValue and FormFile automatically invoke - // ParseMultipartForm and return the right values. - req, err := NewRequest("GET", "/", nil) - if err != nil { - t.Errorf("NewRequest err = %q", err) - } - testMissingFile(t, req) -} - -func TestRequestMultipartCallOrder(t *testing.T) { - req := newTestMultipartRequest(t) - _, err := req.MultipartReader() - if err != nil { - t.Fatalf("MultipartReader: %v", err) - } - err = req.ParseMultipartForm(1024) - if err == nil { - t.Errorf("expected an error from ParseMultipartForm after call to MultipartReader") - } -} - -func testMissingFile(t *testing.T, req *Request) { - f, fh, err := req.FormFile("missing") - if f != nil { - t.Errorf("FormFile file = %q, want nil", f) - } - if fh != nil { - t.Errorf("FormFile file header = %q, want nil", fh) - } - if err != ErrMissingFile { - t.Errorf("FormFile err = %q, want ErrMissingFile", err) - } -} - -func newTestMultipartRequest(t *testing.T) *Request { - b := bytes.NewBufferString(strings.Replace(message, "\n", "\r\n", -1)) - req, err := NewRequest("POST", "/", b) - if err != nil { - t.Fatal("NewRequest:", err) - } - ctype := fmt.Sprintf(`multipart/form-data; boundary="%s"`, boundary) - req.Header.Set("Content-type", ctype) - return req -} - -func validateTestMultipartContents(t *testing.T, req *Request, allMem bool) { - if g, e := req.FormValue("texta"), textaValue; g != e { - t.Errorf("texta value = %q, want %q", g, e) - } - if g, e := req.FormValue("texta"), textaValue; g != e { - t.Errorf("texta value = %q, want %q", g, e) - } - if g := req.FormValue("missing"); g != "" { - t.Errorf("missing value = %q, want empty string", g) - } - - assertMem := func(n string, fd multipart.File) { - if _, ok := fd.(*os.File); ok { - t.Error(n, " is *os.File, should not be") - } - } - fd := testMultipartFile(t, req, "filea", "filea.txt", fileaContents) - assertMem("filea", fd) - fd = testMultipartFile(t, req, "fileb", "fileb.txt", filebContents) - if allMem { - assertMem("fileb", fd) - } else { - if _, ok := fd.(*os.File); !ok { - t.Errorf("fileb has unexpected underlying type %T", fd) - } - } - - testMissingFile(t, req) -} - -func testMultipartFile(t *testing.T, req *Request, key, expectFilename, expectContent string) multipart.File { - f, fh, err := req.FormFile(key) - if err != nil { - t.Fatalf("FormFile(%q): %q", key, err) - } - if fh.Filename != expectFilename { - t.Errorf("filename = %q, want %q", fh.Filename, expectFilename) - } - var b bytes.Buffer - _, err = io.Copy(&b, f) - if err != nil { - t.Fatal("copying contents:", err) - } - if g := b.String(); g != expectContent { - t.Errorf("contents = %q, want %q", g, expectContent) - } - return f -} - -const ( - fileaContents = "This is a test file." - filebContents = "Another test file." - textaValue = "foo" - textbValue = "bar" - boundary = `MyBoundary` -) - -const message = ` ---MyBoundary -Content-Disposition: form-data; name="filea"; filename="filea.txt" -Content-Type: text/plain - -` + fileaContents + ` ---MyBoundary -Content-Disposition: form-data; name="fileb"; filename="fileb.txt" -Content-Type: text/plain - -` + filebContents + ` ---MyBoundary -Content-Disposition: form-data; name="texta" - -` + textaValue + ` ---MyBoundary -Content-Disposition: form-data; name="textb" - -` + textbValue + ` ---MyBoundary-- -` diff --git a/src/pkg/http/requestwrite_test.go b/src/pkg/http/requestwrite_test.go deleted file mode 100644 index 458f0bd7f..000000000 --- a/src/pkg/http/requestwrite_test.go +++ /dev/null @@ -1,370 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "strings" - "testing" - "url" -) - -type reqWriteTest struct { - Req Request - Body interface{} // optional []byte or func() io.ReadCloser to populate Req.Body - Raw string - RawProxy string -} - -var reqWriteTests = []reqWriteTest{ - // HTTP/1.1 => chunked coding; no body; no trailer - { - Request{ - Method: "GET", - RawURL: "http://www.techcrunch.com/", - URL: &url.URL{ - Raw: "http://www.techcrunch.com/", - Scheme: "http", - RawPath: "http://www.techcrunch.com/", - RawAuthority: "www.techcrunch.com", - RawUserinfo: "", - Host: "www.techcrunch.com", - Path: "/", - RawQuery: "", - Fragment: "", - }, - Proto: "HTTP/1.1", - ProtoMajor: 1, - ProtoMinor: 1, - Header: Header{ - "Accept": {"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"}, - "Accept-Charset": {"ISO-8859-1,utf-8;q=0.7,*;q=0.7"}, - "Accept-Encoding": {"gzip,deflate"}, - "Accept-Language": {"en-us,en;q=0.5"}, - "Keep-Alive": {"300"}, - "Proxy-Connection": {"keep-alive"}, - "User-Agent": {"Fake"}, - }, - Body: nil, - Close: false, - Host: "www.techcrunch.com", - Form: map[string][]string{}, - }, - - nil, - - "GET http://www.techcrunch.com/ HTTP/1.1\r\n" + - "Host: www.techcrunch.com\r\n" + - "User-Agent: Fake\r\n" + - "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n" + - "Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7\r\n" + - "Accept-Encoding: gzip,deflate\r\n" + - "Accept-Language: en-us,en;q=0.5\r\n" + - "Keep-Alive: 300\r\n" + - "Proxy-Connection: keep-alive\r\n\r\n", - - "GET http://www.techcrunch.com/ HTTP/1.1\r\n" + - "Host: www.techcrunch.com\r\n" + - "User-Agent: Fake\r\n" + - "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n" + - "Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7\r\n" + - "Accept-Encoding: gzip,deflate\r\n" + - "Accept-Language: en-us,en;q=0.5\r\n" + - "Keep-Alive: 300\r\n" + - "Proxy-Connection: keep-alive\r\n\r\n", - }, - // HTTP/1.1 => chunked coding; body; empty trailer - { - Request{ - Method: "GET", - URL: &url.URL{ - Scheme: "http", - Host: "www.google.com", - Path: "/search", - }, - ProtoMajor: 1, - ProtoMinor: 1, - Header: Header{}, - TransferEncoding: []string{"chunked"}, - }, - - []byte("abcdef"), - - "GET /search HTTP/1.1\r\n" + - "Host: www.google.com\r\n" + - "User-Agent: Go http package\r\n" + - "Transfer-Encoding: chunked\r\n\r\n" + - chunk("abcdef") + chunk(""), - - "GET http://www.google.com/search HTTP/1.1\r\n" + - "Host: www.google.com\r\n" + - "User-Agent: Go http package\r\n" + - "Transfer-Encoding: chunked\r\n\r\n" + - chunk("abcdef") + chunk(""), - }, - // HTTP/1.1 POST => chunked coding; body; empty trailer - { - Request{ - Method: "POST", - URL: &url.URL{ - Scheme: "http", - Host: "www.google.com", - Path: "/search", - }, - ProtoMajor: 1, - ProtoMinor: 1, - Header: Header{}, - Close: true, - TransferEncoding: []string{"chunked"}, - }, - - []byte("abcdef"), - - "POST /search HTTP/1.1\r\n" + - "Host: www.google.com\r\n" + - "User-Agent: Go http package\r\n" + - "Connection: close\r\n" + - "Transfer-Encoding: chunked\r\n\r\n" + - chunk("abcdef") + chunk(""), - - "POST http://www.google.com/search HTTP/1.1\r\n" + - "Host: www.google.com\r\n" + - "User-Agent: Go http package\r\n" + - "Connection: close\r\n" + - "Transfer-Encoding: chunked\r\n\r\n" + - chunk("abcdef") + chunk(""), - }, - - // HTTP/1.1 POST with Content-Length, no chunking - { - Request{ - Method: "POST", - URL: &url.URL{ - Scheme: "http", - Host: "www.google.com", - Path: "/search", - }, - ProtoMajor: 1, - ProtoMinor: 1, - Header: Header{}, - Close: true, - ContentLength: 6, - }, - - []byte("abcdef"), - - "POST /search HTTP/1.1\r\n" + - "Host: www.google.com\r\n" + - "User-Agent: Go http package\r\n" + - "Connection: close\r\n" + - "Content-Length: 6\r\n" + - "\r\n" + - "abcdef", - - "POST http://www.google.com/search HTTP/1.1\r\n" + - "Host: www.google.com\r\n" + - "User-Agent: Go http package\r\n" + - "Connection: close\r\n" + - "Content-Length: 6\r\n" + - "\r\n" + - "abcdef", - }, - - // HTTP/1.1 POST with Content-Length in headers - { - Request{ - Method: "POST", - RawURL: "http://example.com/", - Host: "example.com", - Header: Header{ - "Content-Length": []string{"10"}, // ignored - }, - ContentLength: 6, - }, - - []byte("abcdef"), - - "POST http://example.com/ HTTP/1.1\r\n" + - "Host: example.com\r\n" + - "User-Agent: Go http package\r\n" + - "Content-Length: 6\r\n" + - "\r\n" + - "abcdef", - - "POST http://example.com/ HTTP/1.1\r\n" + - "Host: example.com\r\n" + - "User-Agent: Go http package\r\n" + - "Content-Length: 6\r\n" + - "\r\n" + - "abcdef", - }, - - // default to HTTP/1.1 - { - Request{ - Method: "GET", - RawURL: "/search", - Host: "www.google.com", - }, - - nil, - - "GET /search HTTP/1.1\r\n" + - "Host: www.google.com\r\n" + - "User-Agent: Go http package\r\n" + - "\r\n", - - // Looks weird but RawURL overrides what WriteProxy would choose. - "GET /search HTTP/1.1\r\n" + - "Host: www.google.com\r\n" + - "User-Agent: Go http package\r\n" + - "\r\n", - }, - - // Request with a 0 ContentLength and a 0 byte body. - { - Request{ - Method: "POST", - RawURL: "/", - Host: "example.com", - ProtoMajor: 1, - ProtoMinor: 1, - ContentLength: 0, // as if unset by user - }, - - func() io.ReadCloser { return ioutil.NopCloser(io.LimitReader(strings.NewReader("xx"), 0)) }, - - "POST / HTTP/1.1\r\n" + - "Host: example.com\r\n" + - "User-Agent: Go http package\r\n" + - "\r\n", - - "POST / HTTP/1.1\r\n" + - "Host: example.com\r\n" + - "User-Agent: Go http package\r\n" + - "\r\n", - }, - - // Request with a 0 ContentLength and a 1 byte body. - { - Request{ - Method: "POST", - RawURL: "/", - Host: "example.com", - ProtoMajor: 1, - ProtoMinor: 1, - ContentLength: 0, // as if unset by user - }, - - func() io.ReadCloser { return ioutil.NopCloser(io.LimitReader(strings.NewReader("xx"), 1)) }, - - "POST / HTTP/1.1\r\n" + - "Host: example.com\r\n" + - "User-Agent: Go http package\r\n" + - "Transfer-Encoding: chunked\r\n\r\n" + - chunk("x") + chunk(""), - - "POST / HTTP/1.1\r\n" + - "Host: example.com\r\n" + - "User-Agent: Go http package\r\n" + - "Transfer-Encoding: chunked\r\n\r\n" + - chunk("x") + chunk(""), - }, -} - -func TestRequestWrite(t *testing.T) { - for i := range reqWriteTests { - tt := &reqWriteTests[i] - - setBody := func() { - switch b := tt.Body.(type) { - case []byte: - tt.Req.Body = ioutil.NopCloser(bytes.NewBuffer(b)) - case func() io.ReadCloser: - tt.Req.Body = b() - } - } - if tt.Body != nil { - setBody() - } - if tt.Req.Header == nil { - tt.Req.Header = make(Header) - } - var braw bytes.Buffer - err := tt.Req.Write(&braw) - if err != nil { - t.Errorf("error writing #%d: %s", i, err) - continue - } - sraw := braw.String() - if sraw != tt.Raw { - t.Errorf("Test %d, expecting:\n%s\nGot:\n%s\n", i, tt.Raw, sraw) - continue - } - - if tt.Body != nil { - setBody() - } - var praw bytes.Buffer - err = tt.Req.WriteProxy(&praw) - if err != nil { - t.Errorf("error writing #%d: %s", i, err) - continue - } - sraw = praw.String() - if sraw != tt.RawProxy { - t.Errorf("Test Proxy %d, expecting:\n%s\nGot:\n%s\n", i, tt.RawProxy, sraw) - continue - } - } -} - -type closeChecker struct { - io.Reader - closed bool -} - -func (rc *closeChecker) Close() os.Error { - rc.closed = true - return nil -} - -// TestRequestWriteClosesBody tests that Request.Write does close its request.Body. -// It also indirectly tests NewRequest and that it doesn't wrap an existing Closer -// inside a NopCloser, and that it serializes it correctly. -func TestRequestWriteClosesBody(t *testing.T) { - rc := &closeChecker{Reader: strings.NewReader("my body")} - req, _ := NewRequest("POST", "http://foo.com/", rc) - if req.ContentLength != 0 { - t.Errorf("got req.ContentLength %d, want 0", req.ContentLength) - } - buf := new(bytes.Buffer) - req.Write(buf) - if !rc.closed { - t.Error("body not closed after write") - } - expected := "POST / HTTP/1.1\r\n" + - "Host: foo.com\r\n" + - "User-Agent: Go http package\r\n" + - "Transfer-Encoding: chunked\r\n\r\n" + - // TODO: currently we don't buffer before chunking, so we get a - // single "m" chunk before the other chunks, as this was the 1-byte - // read from our MultiReader where we stiched the Body back together - // after sniffing whether the Body was 0 bytes or not. - chunk("m") + - chunk("y body") + - chunk("") - if buf.String() != expected { - t.Errorf("write:\n got: %s\nwant: %s", buf.String(), expected) - } -} - -func chunk(s string) string { - return fmt.Sprintf("%x\r\n%s\r\n", len(s), s) -} diff --git a/src/pkg/http/response.go b/src/pkg/http/response.go deleted file mode 100644 index 915327a69..000000000 --- a/src/pkg/http/response.go +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// HTTP Response reading and parsing. - -package http - -import ( - "bufio" - "io" - "net/textproto" - "os" - "strconv" - "strings" -) - -var respExcludeHeader = map[string]bool{ - "Content-Length": true, - "Transfer-Encoding": true, - "Trailer": true, -} - -// Response represents the response from an HTTP request. -// -type Response struct { - Status string // e.g. "200 OK" - StatusCode int // e.g. 200 - Proto string // e.g. "HTTP/1.0" - ProtoMajor int // e.g. 1 - ProtoMinor int // e.g. 0 - - // Header maps header keys to values. If the response had multiple - // headers with the same key, they will be concatenated, with comma - // delimiters. (Section 4.2 of RFC 2616 requires that multiple headers - // be semantically equivalent to a comma-delimited sequence.) Values - // duplicated by other fields in this struct (e.g., ContentLength) are - // omitted from Header. - // - // Keys in the map are canonicalized (see CanonicalHeaderKey). - Header Header - - // Body represents the response body. - Body io.ReadCloser - - // ContentLength records the length of the associated content. The - // value -1 indicates that the length is unknown. Unless RequestMethod - // is "HEAD", values >= 0 indicate that the given number of bytes may - // be read from Body. - ContentLength int64 - - // Contains transfer encodings from outer-most to inner-most. Value is - // nil, means that "identity" encoding is used. - TransferEncoding []string - - // Close records whether the header directed that the connection be - // closed after reading Body. The value is advice for clients: neither - // ReadResponse nor Response.Write ever closes a connection. - Close bool - - // Trailer maps trailer keys to values, in the same - // format as the header. - Trailer Header - - // The Request that was sent to obtain this Response. - // Request's Body is nil (having already been consumed). - // This is only populated for Client requests. - Request *Request -} - -// Cookies parses and returns the cookies set in the Set-Cookie headers. -func (r *Response) Cookies() []*Cookie { - return readSetCookies(r.Header) -} - -// ReadResponse reads and returns an HTTP response from r. The -// req parameter specifies the Request that corresponds to -// this Response. Clients must call resp.Body.Close when finished -// reading resp.Body. After that call, clients can inspect -// resp.Trailer to find key/value pairs included in the response -// trailer. -func ReadResponse(r *bufio.Reader, req *Request) (resp *Response, err os.Error) { - - tp := textproto.NewReader(r) - resp = new(Response) - - resp.Request = req - resp.Request.Method = strings.ToUpper(resp.Request.Method) - - // Parse the first line of the response. - line, err := tp.ReadLine() - if err != nil { - if err == os.EOF { - err = io.ErrUnexpectedEOF - } - return nil, err - } - f := strings.SplitN(line, " ", 3) - if len(f) < 2 { - return nil, &badStringError{"malformed HTTP response", line} - } - reasonPhrase := "" - if len(f) > 2 { - reasonPhrase = f[2] - } - resp.Status = f[1] + " " + reasonPhrase - resp.StatusCode, err = strconv.Atoi(f[1]) - if err != nil { - return nil, &badStringError{"malformed HTTP status code", f[1]} - } - - resp.Proto = f[0] - var ok bool - if resp.ProtoMajor, resp.ProtoMinor, ok = ParseHTTPVersion(resp.Proto); !ok { - return nil, &badStringError{"malformed HTTP version", resp.Proto} - } - - // Parse the response headers. - mimeHeader, err := tp.ReadMIMEHeader() - if err != nil { - return nil, err - } - resp.Header = Header(mimeHeader) - - fixPragmaCacheControl(resp.Header) - - err = readTransfer(resp, r) - if err != nil { - return nil, err - } - - return resp, nil -} - -// RFC2616: Should treat -// Pragma: no-cache -// like -// Cache-Control: no-cache -func fixPragmaCacheControl(header Header) { - if hp, ok := header["Pragma"]; ok && len(hp) > 0 && hp[0] == "no-cache" { - if _, presentcc := header["Cache-Control"]; !presentcc { - header["Cache-Control"] = []string{"no-cache"} - } - } -} - -// ProtoAtLeast returns whether the HTTP protocol used -// in the response is at least major.minor. -func (r *Response) ProtoAtLeast(major, minor int) bool { - return r.ProtoMajor > major || - r.ProtoMajor == major && r.ProtoMinor >= minor -} - -// Writes the response (header, body and trailer) in wire format. This method -// consults the following fields of resp: -// -// StatusCode -// ProtoMajor -// ProtoMinor -// RequestMethod -// TransferEncoding -// Trailer -// Body -// ContentLength -// Header, values for non-canonical keys will have unpredictable behavior -// -func (resp *Response) Write(w io.Writer) os.Error { - - // RequestMethod should be upper-case - if resp.Request != nil { - resp.Request.Method = strings.ToUpper(resp.Request.Method) - } - - // Status line - text := resp.Status - if text == "" { - var ok bool - text, ok = statusText[resp.StatusCode] - if !ok { - text = "status code " + strconv.Itoa(resp.StatusCode) - } - } - io.WriteString(w, "HTTP/"+strconv.Itoa(resp.ProtoMajor)+".") - io.WriteString(w, strconv.Itoa(resp.ProtoMinor)+" ") - io.WriteString(w, strconv.Itoa(resp.StatusCode)+" "+text+"\r\n") - - // Process Body,ContentLength,Close,Trailer - tw, err := newTransferWriter(resp) - if err != nil { - return err - } - err = tw.WriteHeader(w) - if err != nil { - return err - } - - // Rest of header - err = resp.Header.WriteSubset(w, respExcludeHeader) - if err != nil { - return err - } - - // End-of-header - io.WriteString(w, "\r\n") - - // Write body and trailer - err = tw.WriteBody(w) - if err != nil { - return err - } - - // Success - return nil -} diff --git a/src/pkg/http/response_test.go b/src/pkg/http/response_test.go deleted file mode 100644 index 1d4a23423..000000000 --- a/src/pkg/http/response_test.go +++ /dev/null @@ -1,397 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -import ( - "bufio" - "bytes" - "compress/gzip" - "crypto/rand" - "fmt" - "os" - "io" - "io/ioutil" - "reflect" - "testing" -) - -type respTest struct { - Raw string - Resp Response - Body string -} - -func dummyReq(method string) *Request { - return &Request{Method: method} -} - -var respTests = []respTest{ - // Unchunked response without Content-Length. - { - "HTTP/1.0 200 OK\r\n" + - "Connection: close\r\n" + - "\r\n" + - "Body here\n", - - Response{ - Status: "200 OK", - StatusCode: 200, - Proto: "HTTP/1.0", - ProtoMajor: 1, - ProtoMinor: 0, - Request: dummyReq("GET"), - Header: Header{ - "Connection": {"close"}, // TODO(rsc): Delete? - }, - Close: true, - ContentLength: -1, - }, - - "Body here\n", - }, - - // Unchunked HTTP/1.1 response without Content-Length or - // Connection headers. - { - "HTTP/1.1 200 OK\r\n" + - "\r\n" + - "Body here\n", - - Response{ - Status: "200 OK", - StatusCode: 200, - Proto: "HTTP/1.1", - ProtoMajor: 1, - ProtoMinor: 1, - Request: dummyReq("GET"), - Close: true, - ContentLength: -1, - }, - - "Body here\n", - }, - - // Unchunked HTTP/1.1 204 response without Content-Length. - { - "HTTP/1.1 204 No Content\r\n" + - "\r\n" + - "Body should not be read!\n", - - Response{ - Status: "204 No Content", - StatusCode: 204, - Proto: "HTTP/1.1", - ProtoMajor: 1, - ProtoMinor: 1, - Request: dummyReq("GET"), - Close: false, - ContentLength: 0, - }, - - "", - }, - - // Unchunked response with Content-Length. - { - "HTTP/1.0 200 OK\r\n" + - "Content-Length: 10\r\n" + - "Connection: close\r\n" + - "\r\n" + - "Body here\n", - - Response{ - Status: "200 OK", - StatusCode: 200, - Proto: "HTTP/1.0", - ProtoMajor: 1, - ProtoMinor: 0, - Request: dummyReq("GET"), - Header: Header{ - "Connection": {"close"}, // TODO(rsc): Delete? - "Content-Length": {"10"}, // TODO(rsc): Delete? - }, - Close: true, - ContentLength: 10, - }, - - "Body here\n", - }, - - // Chunked response without Content-Length. - { - "HTTP/1.0 200 OK\r\n" + - "Transfer-Encoding: chunked\r\n" + - "\r\n" + - "0a\r\n" + - "Body here\n\r\n" + - "09\r\n" + - "continued\r\n" + - "0\r\n" + - "\r\n", - - Response{ - Status: "200 OK", - StatusCode: 200, - Proto: "HTTP/1.0", - ProtoMajor: 1, - ProtoMinor: 0, - Request: dummyReq("GET"), - Header: Header{}, - Close: true, - ContentLength: -1, - TransferEncoding: []string{"chunked"}, - }, - - "Body here\ncontinued", - }, - - // Chunked response with Content-Length. - { - "HTTP/1.0 200 OK\r\n" + - "Transfer-Encoding: chunked\r\n" + - "Content-Length: 10\r\n" + - "\r\n" + - "0a\r\n" + - "Body here\n" + - "0\r\n" + - "\r\n", - - Response{ - Status: "200 OK", - StatusCode: 200, - Proto: "HTTP/1.0", - ProtoMajor: 1, - ProtoMinor: 0, - Request: dummyReq("GET"), - Header: Header{}, - Close: true, - ContentLength: -1, // TODO(rsc): Fix? - TransferEncoding: []string{"chunked"}, - }, - - "Body here\n", - }, - - // Chunked response in response to a HEAD request (the "chunked" should - // be ignored, as HEAD responses never have bodies) - { - "HTTP/1.0 200 OK\r\n" + - "Transfer-Encoding: chunked\r\n" + - "\r\n", - - Response{ - Status: "200 OK", - StatusCode: 200, - Proto: "HTTP/1.0", - ProtoMajor: 1, - ProtoMinor: 0, - Request: dummyReq("HEAD"), - Header: Header{}, - Close: true, - ContentLength: 0, - }, - - "", - }, - - // explicit Content-Length of 0. - { - "HTTP/1.1 200 OK\r\n" + - "Content-Length: 0\r\n" + - "\r\n", - - Response{ - Status: "200 OK", - StatusCode: 200, - Proto: "HTTP/1.1", - ProtoMajor: 1, - ProtoMinor: 1, - Request: dummyReq("GET"), - Header: Header{ - "Content-Length": {"0"}, - }, - Close: false, - ContentLength: 0, - }, - - "", - }, - - // Status line without a Reason-Phrase, but trailing space. - // (permitted by RFC 2616) - { - "HTTP/1.0 303 \r\n\r\n", - Response{ - Status: "303 ", - StatusCode: 303, - Proto: "HTTP/1.0", - ProtoMajor: 1, - ProtoMinor: 0, - Request: dummyReq("GET"), - Header: Header{}, - Close: true, - ContentLength: -1, - }, - - "", - }, - - // Status line without a Reason-Phrase, and no trailing space. - // (not permitted by RFC 2616, but we'll accept it anyway) - { - "HTTP/1.0 303\r\n\r\n", - Response{ - Status: "303 ", - StatusCode: 303, - Proto: "HTTP/1.0", - ProtoMajor: 1, - ProtoMinor: 0, - Request: dummyReq("GET"), - Header: Header{}, - Close: true, - ContentLength: -1, - }, - - "", - }, -} - -func TestReadResponse(t *testing.T) { - for i := range respTests { - tt := &respTests[i] - var braw bytes.Buffer - braw.WriteString(tt.Raw) - resp, err := ReadResponse(bufio.NewReader(&braw), tt.Resp.Request) - if err != nil { - t.Errorf("#%d: %s", i, err) - continue - } - rbody := resp.Body - resp.Body = nil - diff(t, fmt.Sprintf("#%d Response", i), resp, &tt.Resp) - var bout bytes.Buffer - if rbody != nil { - io.Copy(&bout, rbody) - rbody.Close() - } - body := bout.String() - if body != tt.Body { - t.Errorf("#%d: Body = %q want %q", i, body, tt.Body) - } - } -} - -var readResponseCloseInMiddleTests = []struct { - chunked, compressed bool -}{ - {false, false}, - {true, false}, - {true, true}, -} - -// TestReadResponseCloseInMiddle tests that closing a body after -// reading only part of its contents advances the read to the end of -// the request, right up until the next request. -func TestReadResponseCloseInMiddle(t *testing.T) { - for _, test := range readResponseCloseInMiddleTests { - fatalf := func(format string, args ...interface{}) { - args = append([]interface{}{test.chunked, test.compressed}, args...) - t.Fatalf("on test chunked=%v, compressed=%v: "+format, args...) - } - checkErr := func(err os.Error, msg string) { - if err == nil { - return - } - fatalf(msg+": %v", err) - } - var buf bytes.Buffer - buf.WriteString("HTTP/1.1 200 OK\r\n") - if test.chunked { - buf.WriteString("Transfer-Encoding: chunked\r\n") - } else { - buf.WriteString("Content-Length: 1000000\r\n") - } - var wr io.Writer = &buf - if test.chunked { - wr = &chunkedWriter{wr} - } - if test.compressed { - buf.WriteString("Content-Encoding: gzip\r\n") - var err os.Error - wr, err = gzip.NewWriter(wr) - checkErr(err, "gzip.NewWriter") - } - buf.WriteString("\r\n") - - chunk := bytes.Repeat([]byte{'x'}, 1000) - for i := 0; i < 1000; i++ { - if test.compressed { - // Otherwise this compresses too well. - _, err := io.ReadFull(rand.Reader, chunk) - checkErr(err, "rand.Reader ReadFull") - } - wr.Write(chunk) - } - if test.compressed { - err := wr.(*gzip.Compressor).Close() - checkErr(err, "compressor close") - } - if test.chunked { - buf.WriteString("0\r\n\r\n") - } - buf.WriteString("Next Request Here") - - bufr := bufio.NewReader(&buf) - resp, err := ReadResponse(bufr, dummyReq("GET")) - checkErr(err, "ReadResponse") - expectedLength := int64(-1) - if !test.chunked { - expectedLength = 1000000 - } - if resp.ContentLength != expectedLength { - fatalf("expected response length %d, got %d", expectedLength, resp.ContentLength) - } - if resp.Body == nil { - fatalf("nil body") - } - if test.compressed { - gzReader, err := gzip.NewReader(resp.Body) - checkErr(err, "gzip.NewReader") - resp.Body = &readFirstCloseBoth{gzReader, resp.Body} - } - - rbuf := make([]byte, 2500) - n, err := io.ReadFull(resp.Body, rbuf) - checkErr(err, "2500 byte ReadFull") - if n != 2500 { - fatalf("ReadFull only read %d bytes", n) - } - if test.compressed == false && !bytes.Equal(bytes.Repeat([]byte{'x'}, 2500), rbuf) { - fatalf("ReadFull didn't read 2500 'x'; got %q", string(rbuf)) - } - resp.Body.Close() - - rest, err := ioutil.ReadAll(bufr) - checkErr(err, "ReadAll on remainder") - if e, g := "Next Request Here", string(rest); e != g { - fatalf("remainder = %q, expected %q", g, e) - } - } -} - -func diff(t *testing.T, prefix string, have, want interface{}) { - hv := reflect.ValueOf(have).Elem() - wv := reflect.ValueOf(want).Elem() - if hv.Type() != wv.Type() { - t.Errorf("%s: type mismatch %v want %v", prefix, hv.Type(), wv.Type()) - } - for i := 0; i < hv.NumField(); i++ { - hf := hv.Field(i).Interface() - wf := wv.Field(i).Interface() - if !reflect.DeepEqual(hf, wf) { - t.Errorf("%s: %s = %v want %v", prefix, hv.Type().Field(i).Name, hf, wf) - } - } -} diff --git a/src/pkg/http/responsewrite_test.go b/src/pkg/http/responsewrite_test.go deleted file mode 100644 index f8e63acf4..000000000 --- a/src/pkg/http/responsewrite_test.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -import ( - "bytes" - "io/ioutil" - "testing" -) - -type respWriteTest struct { - Resp Response - Raw string -} - -var respWriteTests = []respWriteTest{ - // HTTP/1.0, identity coding; no trailer - { - Response{ - StatusCode: 503, - ProtoMajor: 1, - ProtoMinor: 0, - Request: dummyReq("GET"), - Header: Header{}, - Body: ioutil.NopCloser(bytes.NewBufferString("abcdef")), - ContentLength: 6, - }, - - "HTTP/1.0 503 Service Unavailable\r\n" + - "Content-Length: 6\r\n\r\n" + - "abcdef", - }, - // Unchunked response without Content-Length. - { - Response{ - StatusCode: 200, - ProtoMajor: 1, - ProtoMinor: 0, - Request: dummyReq("GET"), - Header: Header{}, - Body: ioutil.NopCloser(bytes.NewBufferString("abcdef")), - ContentLength: -1, - }, - "HTTP/1.0 200 OK\r\n" + - "\r\n" + - "abcdef", - }, - // HTTP/1.1, chunked coding; empty trailer; close - { - Response{ - StatusCode: 200, - ProtoMajor: 1, - ProtoMinor: 1, - Request: dummyReq("GET"), - Header: Header{}, - Body: ioutil.NopCloser(bytes.NewBufferString("abcdef")), - ContentLength: 6, - TransferEncoding: []string{"chunked"}, - Close: true, - }, - - "HTTP/1.1 200 OK\r\n" + - "Connection: close\r\n" + - "Transfer-Encoding: chunked\r\n\r\n" + - "6\r\nabcdef\r\n0\r\n\r\n", - }, - - // Header value with a newline character (Issue 914). - // Also tests removal of leading and trailing whitespace. - { - Response{ - StatusCode: 204, - ProtoMajor: 1, - ProtoMinor: 1, - Request: dummyReq("GET"), - Header: Header{ - "Foo": []string{" Bar\nBaz "}, - }, - Body: nil, - ContentLength: 0, - TransferEncoding: []string{"chunked"}, - Close: true, - }, - - "HTTP/1.1 204 No Content\r\n" + - "Connection: close\r\n" + - "Foo: Bar Baz\r\n" + - "\r\n", - }, -} - -func TestResponseWrite(t *testing.T) { - for i := range respWriteTests { - tt := &respWriteTests[i] - var braw bytes.Buffer - err := tt.Resp.Write(&braw) - if err != nil { - t.Errorf("error writing #%d: %s", i, err) - continue - } - sraw := braw.String() - if sraw != tt.Raw { - t.Errorf("Test %d, expecting:\n%q\nGot:\n%q\n", i, tt.Raw, sraw) - continue - } - } -} diff --git a/src/pkg/http/reverseproxy.go b/src/pkg/http/reverseproxy.go deleted file mode 100644 index 3f8bfdc80..000000000 --- a/src/pkg/http/reverseproxy.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// HTTP reverse proxy handler - -package http - -import ( - "io" - "log" - "net" - "os" - "strings" - "sync" - "time" - "url" -) - -// ReverseProxy is an HTTP Handler that takes an incoming request and -// sends it to another server, proxying the response back to the -// client. -type ReverseProxy struct { - // Director must be a function which modifies - // the request into a new request to be sent - // using Transport. Its response is then copied - // back to the original client unmodified. - Director func(*Request) - - // The Transport used to perform proxy requests. - // If nil, DefaultTransport is used. - Transport RoundTripper - - // FlushInterval specifies the flush interval, in - // nanoseconds, to flush to the client while - // coping the response body. - // If zero, no periodic flushing is done. - FlushInterval int64 -} - -func singleJoiningSlash(a, b string) string { - aslash := strings.HasSuffix(a, "/") - bslash := strings.HasPrefix(b, "/") - switch { - case aslash && bslash: - return a + b[1:] - case !aslash && !bslash: - return a + "/" + b - } - return a + b -} - -// NewSingleHostReverseProxy returns a new ReverseProxy that rewrites -// URLs to the scheme, host, and base path provided in target. If the -// target's path is "/base" and the incoming request was for "/dir", -// the target request will be for /base/dir. -func NewSingleHostReverseProxy(target *url.URL) *ReverseProxy { - director := func(req *Request) { - req.URL.Scheme = target.Scheme - req.URL.Host = target.Host - req.URL.Path = singleJoiningSlash(target.Path, req.URL.Path) - if q := req.URL.RawQuery; q != "" { - req.URL.RawPath = req.URL.Path + "?" + q - } else { - req.URL.RawPath = req.URL.Path - } - req.URL.RawQuery = target.RawQuery - } - return &ReverseProxy{Director: director} -} - -func (p *ReverseProxy) ServeHTTP(rw ResponseWriter, req *Request) { - transport := p.Transport - if transport == nil { - transport = DefaultTransport - } - - outreq := new(Request) - *outreq = *req // includes shallow copies of maps, but okay - - p.Director(outreq) - outreq.Proto = "HTTP/1.1" - outreq.ProtoMajor = 1 - outreq.ProtoMinor = 1 - outreq.Close = false - - if clientIp, _, err := net.SplitHostPort(req.RemoteAddr); err == nil { - outreq.Header.Set("X-Forwarded-For", clientIp) - } - - res, err := transport.RoundTrip(outreq) - if err != nil { - log.Printf("http: proxy error: %v", err) - rw.WriteHeader(StatusInternalServerError) - return - } - - hdr := rw.Header() - for k, vv := range res.Header { - for _, v := range vv { - hdr.Add(k, v) - } - } - - rw.WriteHeader(res.StatusCode) - - if res.Body != nil { - var dst io.Writer = rw - if p.FlushInterval != 0 { - if wf, ok := rw.(writeFlusher); ok { - dst = &maxLatencyWriter{dst: wf, latency: p.FlushInterval} - } - } - io.Copy(dst, res.Body) - } -} - -type writeFlusher interface { - io.Writer - Flusher -} - -type maxLatencyWriter struct { - dst writeFlusher - latency int64 // nanos - - lk sync.Mutex // protects init of done, as well Write + Flush - done chan bool -} - -func (m *maxLatencyWriter) Write(p []byte) (n int, err os.Error) { - m.lk.Lock() - defer m.lk.Unlock() - if m.done == nil { - m.done = make(chan bool) - go m.flushLoop() - } - n, err = m.dst.Write(p) - if err != nil { - m.done <- true - } - return -} - -func (m *maxLatencyWriter) flushLoop() { - t := time.NewTicker(m.latency) - defer t.Stop() - for { - select { - case <-t.C: - m.lk.Lock() - m.dst.Flush() - m.lk.Unlock() - case <-m.done: - return - } - } - panic("unreached") -} diff --git a/src/pkg/http/reverseproxy_test.go b/src/pkg/http/reverseproxy_test.go deleted file mode 100644 index 8078c8d10..000000000 --- a/src/pkg/http/reverseproxy_test.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Reverse proxy tests. - -package http_test - -import ( - . "http" - "http/httptest" - "io/ioutil" - "testing" - "url" -) - -func TestReverseProxy(t *testing.T) { - const backendResponse = "I am the backend" - const backendStatus = 404 - backend := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { - if len(r.TransferEncoding) > 0 { - t.Errorf("backend got unexpected TransferEncoding: %v", r.TransferEncoding) - } - if r.Header.Get("X-Forwarded-For") == "" { - t.Errorf("didn't get X-Forwarded-For header") - } - if g, e := r.Host, "some-name"; g != e { - t.Errorf("backend got Host header %q, want %q", g, e) - } - w.Header().Set("X-Foo", "bar") - SetCookie(w, &Cookie{Name: "flavor", Value: "chocolateChip"}) - w.WriteHeader(backendStatus) - w.Write([]byte(backendResponse)) - })) - defer backend.Close() - backendURL, err := url.Parse(backend.URL) - if err != nil { - t.Fatal(err) - } - proxyHandler := NewSingleHostReverseProxy(backendURL) - frontend := httptest.NewServer(proxyHandler) - defer frontend.Close() - - getReq, _ := NewRequest("GET", frontend.URL, nil) - getReq.Host = "some-name" - res, err := DefaultClient.Do(getReq) - if err != nil { - t.Fatalf("Get: %v", err) - } - if g, e := res.StatusCode, backendStatus; g != e { - t.Errorf("got res.StatusCode %d; expected %d", g, e) - } - if g, e := res.Header.Get("X-Foo"), "bar"; g != e { - t.Errorf("got X-Foo %q; expected %q", g, e) - } - if g, e := len(res.Header["Set-Cookie"]), 1; g != e { - t.Fatalf("got %d SetCookies, want %d", g, e) - } - if cookie := res.Cookies()[0]; cookie.Name != "flavor" { - t.Errorf("unexpected cookie %q", cookie.Name) - } - bodyBytes, _ := ioutil.ReadAll(res.Body) - if g, e := string(bodyBytes), backendResponse; g != e { - t.Errorf("got body %q; expected %q", g, e) - } -} diff --git a/src/pkg/http/serve_test.go b/src/pkg/http/serve_test.go deleted file mode 100644 index ac0403345..000000000 --- a/src/pkg/http/serve_test.go +++ /dev/null @@ -1,959 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// End-to-end serving tests - -package http_test - -import ( - "bufio" - "bytes" - "fmt" - . "http" - "http/httptest" - "io" - "io/ioutil" - "log" - "os" - "net" - "reflect" - "strings" - "syscall" - "testing" - "time" - "url" -) - -type dummyAddr string -type oneConnListener struct { - conn net.Conn -} - -func (l *oneConnListener) Accept() (c net.Conn, err os.Error) { - c = l.conn - if c == nil { - err = os.EOF - return - } - err = nil - l.conn = nil - return -} - -func (l *oneConnListener) Close() os.Error { - return nil -} - -func (l *oneConnListener) Addr() net.Addr { - return dummyAddr("test-address") -} - -func (a dummyAddr) Network() string { - return string(a) -} - -func (a dummyAddr) String() string { - return string(a) -} - -type testConn struct { - readBuf bytes.Buffer - writeBuf bytes.Buffer -} - -func (c *testConn) Read(b []byte) (int, os.Error) { - return c.readBuf.Read(b) -} - -func (c *testConn) Write(b []byte) (int, os.Error) { - return c.writeBuf.Write(b) -} - -func (c *testConn) Close() os.Error { - return nil -} - -func (c *testConn) LocalAddr() net.Addr { - return dummyAddr("local-addr") -} - -func (c *testConn) RemoteAddr() net.Addr { - return dummyAddr("remote-addr") -} - -func (c *testConn) SetTimeout(nsec int64) os.Error { - return nil -} - -func (c *testConn) SetReadTimeout(nsec int64) os.Error { - return nil -} - -func (c *testConn) SetWriteTimeout(nsec int64) os.Error { - return nil -} - -func TestConsumingBodyOnNextConn(t *testing.T) { - conn := new(testConn) - for i := 0; i < 2; i++ { - conn.readBuf.Write([]byte( - "POST / HTTP/1.1\r\n" + - "Host: test\r\n" + - "Content-Length: 11\r\n" + - "\r\n" + - "foo=1&bar=1")) - } - - reqNum := 0 - ch := make(chan *Request) - servech := make(chan os.Error) - listener := &oneConnListener{conn} - handler := func(res ResponseWriter, req *Request) { - reqNum++ - ch <- req - } - - go func() { - servech <- Serve(listener, HandlerFunc(handler)) - }() - - var req *Request - req = <-ch - if req == nil { - t.Fatal("Got nil first request.") - } - if req.Method != "POST" { - t.Errorf("For request #1's method, got %q; expected %q", - req.Method, "POST") - } - - req = <-ch - if req == nil { - t.Fatal("Got nil first request.") - } - if req.Method != "POST" { - t.Errorf("For request #2's method, got %q; expected %q", - req.Method, "POST") - } - - if serveerr := <-servech; serveerr != os.EOF { - t.Errorf("Serve returned %q; expected EOF", serveerr) - } -} - -type stringHandler string - -func (s stringHandler) ServeHTTP(w ResponseWriter, r *Request) { - w.Header().Set("Result", string(s)) -} - -var handlers = []struct { - pattern string - msg string -}{ - {"/", "Default"}, - {"/someDir/", "someDir"}, - {"someHost.com/someDir/", "someHost.com/someDir"}, -} - -var vtests = []struct { - url string - expected string -}{ - {"http://localhost/someDir/apage", "someDir"}, - {"http://localhost/otherDir/apage", "Default"}, - {"http://someHost.com/someDir/apage", "someHost.com/someDir"}, - {"http://otherHost.com/someDir/apage", "someDir"}, - {"http://otherHost.com/aDir/apage", "Default"}, -} - -func TestHostHandlers(t *testing.T) { - for _, h := range handlers { - Handle(h.pattern, stringHandler(h.msg)) - } - ts := httptest.NewServer(nil) - defer ts.Close() - - conn, err := net.Dial("tcp", ts.Listener.Addr().String()) - if err != nil { - t.Fatal(err) - } - defer conn.Close() - cc := NewClientConn(conn, nil) - for _, vt := range vtests { - var r *Response - var req Request - if req.URL, err = url.Parse(vt.url); err != nil { - t.Errorf("cannot parse url: %v", err) - continue - } - if err := cc.Write(&req); err != nil { - t.Errorf("writing request: %v", err) - continue - } - r, err := cc.Read(&req) - if err != nil { - t.Errorf("reading response: %v", err) - continue - } - s := r.Header.Get("Result") - if s != vt.expected { - t.Errorf("Get(%q) = %q, want %q", vt.url, s, vt.expected) - } - } -} - -// Tests for http://code.google.com/p/go/issues/detail?id=900 -func TestMuxRedirectLeadingSlashes(t *testing.T) { - paths := []string{"//foo.txt", "///foo.txt", "/../../foo.txt"} - for _, path := range paths { - req, err := ReadRequest(bufio.NewReader(bytes.NewBufferString("GET " + path + " HTTP/1.1\r\nHost: test\r\n\r\n"))) - if err != nil { - t.Errorf("%s", err) - } - mux := NewServeMux() - resp := httptest.NewRecorder() - - mux.ServeHTTP(resp, req) - - if loc, expected := resp.Header().Get("Location"), "/foo.txt"; loc != expected { - t.Errorf("Expected Location header set to %q; got %q", expected, loc) - return - } - - if code, expected := resp.Code, StatusMovedPermanently; code != expected { - t.Errorf("Expected response code of StatusMovedPermanently; got %d", code) - return - } - } -} - -func TestServerTimeouts(t *testing.T) { - // TODO(bradfitz): convert this to use httptest.Server - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("listen error: %v", err) - } - addr, _ := l.Addr().(*net.TCPAddr) - - reqNum := 0 - handler := HandlerFunc(func(res ResponseWriter, req *Request) { - reqNum++ - fmt.Fprintf(res, "req=%d", reqNum) - }) - - const second = 1000000000 /* nanos */ - server := &Server{Handler: handler, ReadTimeout: 0.25 * second, WriteTimeout: 0.25 * second} - go server.Serve(l) - - url := fmt.Sprintf("http://%s/", addr) - - // Hit the HTTP server successfully. - tr := &Transport{DisableKeepAlives: true} // they interfere with this test - c := &Client{Transport: tr} - r, err := c.Get(url) - if err != nil { - t.Fatalf("http Get #1: %v", err) - } - got, _ := ioutil.ReadAll(r.Body) - expected := "req=1" - if string(got) != expected { - t.Errorf("Unexpected response for request #1; got %q; expected %q", - string(got), expected) - } - - // Slow client that should timeout. - t1 := time.Nanoseconds() - conn, err := net.Dial("tcp", addr.String()) - if err != nil { - t.Fatalf("Dial: %v", err) - } - buf := make([]byte, 1) - n, err := conn.Read(buf) - latency := time.Nanoseconds() - t1 - if n != 0 || err != os.EOF { - t.Errorf("Read = %v, %v, wanted %v, %v", n, err, 0, os.EOF) - } - if latency < second*0.20 /* fudge from 0.25 above */ { - t.Errorf("got EOF after %d ns, want >= %d", latency, second*0.20) - } - - // Hit the HTTP server successfully again, verifying that the - // previous slow connection didn't run our handler. (that we - // get "req=2", not "req=3") - r, err = Get(url) - if err != nil { - t.Fatalf("http Get #2: %v", err) - } - got, _ = ioutil.ReadAll(r.Body) - expected = "req=2" - if string(got) != expected { - t.Errorf("Get #2 got %q, want %q", string(got), expected) - } - - l.Close() -} - -// TestIdentityResponse verifies that a handler can unset -func TestIdentityResponse(t *testing.T) { - handler := HandlerFunc(func(rw ResponseWriter, req *Request) { - rw.Header().Set("Content-Length", "3") - rw.Header().Set("Transfer-Encoding", req.FormValue("te")) - switch { - case req.FormValue("overwrite") == "1": - _, err := rw.Write([]byte("foo TOO LONG")) - if err != ErrContentLength { - t.Errorf("expected ErrContentLength; got %v", err) - } - case req.FormValue("underwrite") == "1": - rw.Header().Set("Content-Length", "500") - rw.Write([]byte("too short")) - default: - rw.Write([]byte("foo")) - } - }) - - ts := httptest.NewServer(handler) - defer ts.Close() - - // Note: this relies on the assumption (which is true) that - // Get sends HTTP/1.1 or greater requests. Otherwise the - // server wouldn't have the choice to send back chunked - // responses. - for _, te := range []string{"", "identity"} { - url := ts.URL + "/?te=" + te - res, err := Get(url) - if err != nil { - t.Fatalf("error with Get of %s: %v", url, err) - } - if cl, expected := res.ContentLength, int64(3); cl != expected { - t.Errorf("for %s expected res.ContentLength of %d; got %d", url, expected, cl) - } - if cl, expected := res.Header.Get("Content-Length"), "3"; cl != expected { - t.Errorf("for %s expected Content-Length header of %q; got %q", url, expected, cl) - } - if tl, expected := len(res.TransferEncoding), 0; tl != expected { - t.Errorf("for %s expected len(res.TransferEncoding) of %d; got %d (%v)", - url, expected, tl, res.TransferEncoding) - } - res.Body.Close() - } - - // Verify that ErrContentLength is returned - url := ts.URL + "/?overwrite=1" - _, err := Get(url) - if err != nil { - t.Fatalf("error with Get of %s: %v", url, err) - } - // Verify that the connection is closed when the declared Content-Length - // is larger than what the handler wrote. - conn, err := net.Dial("tcp", ts.Listener.Addr().String()) - if err != nil { - t.Fatalf("error dialing: %v", err) - } - _, err = conn.Write([]byte("GET /?underwrite=1 HTTP/1.1\r\nHost: foo\r\n\r\n")) - if err != nil { - t.Fatalf("error writing: %v", err) - } - // The next ReadAll will hang for a failing test, so use a Timer instead - // to fail more traditionally - timer := time.AfterFunc(2e9, func() { - t.Fatalf("Timeout expired in ReadAll.") - }) - defer timer.Stop() - got, _ := ioutil.ReadAll(conn) - expectedSuffix := "\r\n\r\ntoo short" - if !strings.HasSuffix(string(got), expectedSuffix) { - t.Fatalf("Expected output to end with %q; got response body %q", - expectedSuffix, string(got)) - } -} - -func testTcpConnectionCloses(t *testing.T, req string, h Handler) { - s := httptest.NewServer(h) - defer s.Close() - - conn, err := net.Dial("tcp", s.Listener.Addr().String()) - if err != nil { - t.Fatal("dial error:", err) - } - defer conn.Close() - - _, err = fmt.Fprint(conn, req) - if err != nil { - t.Fatal("print error:", err) - } - - r := bufio.NewReader(conn) - _, err = ReadResponse(r, &Request{Method: "GET"}) - if err != nil { - t.Fatal("ReadResponse error:", err) - } - - success := make(chan bool) - go func() { - select { - case <-time.After(5e9): - t.Fatal("body not closed after 5s") - case <-success: - } - }() - - _, err = ioutil.ReadAll(r) - if err != nil { - t.Fatal("read error:", err) - } - - success <- true -} - -// TestServeHTTP10Close verifies that HTTP/1.0 requests won't be kept alive. -func TestServeHTTP10Close(t *testing.T) { - testTcpConnectionCloses(t, "GET / HTTP/1.0\r\n\r\n", HandlerFunc(func(w ResponseWriter, r *Request) { - ServeFile(w, r, "testdata/file") - })) -} - -// TestHandlersCanSetConnectionClose verifies that handlers can force a connection to close, -// even for HTTP/1.1 requests. -func TestHandlersCanSetConnectionClose11(t *testing.T) { - testTcpConnectionCloses(t, "GET / HTTP/1.1\r\n\r\n", HandlerFunc(func(w ResponseWriter, r *Request) { - w.Header().Set("Connection", "close") - })) -} - -func TestHandlersCanSetConnectionClose10(t *testing.T) { - testTcpConnectionCloses(t, "GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n", HandlerFunc(func(w ResponseWriter, r *Request) { - w.Header().Set("Connection", "close") - })) -} - -func TestSetsRemoteAddr(t *testing.T) { - ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { - fmt.Fprintf(w, "%s", r.RemoteAddr) - })) - defer ts.Close() - - res, err := Get(ts.URL) - if err != nil { - t.Fatalf("Get error: %v", err) - } - body, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Fatalf("ReadAll error: %v", err) - } - ip := string(body) - if !strings.HasPrefix(ip, "127.0.0.1:") && !strings.HasPrefix(ip, "[::1]:") { - t.Fatalf("Expected local addr; got %q", ip) - } -} - -func TestChunkedResponseHeaders(t *testing.T) { - log.SetOutput(ioutil.Discard) // is noisy otherwise - defer log.SetOutput(os.Stderr) - - ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { - w.Header().Set("Content-Length", "intentional gibberish") // we check that this is deleted - fmt.Fprintf(w, "I am a chunked response.") - })) - defer ts.Close() - - res, err := Get(ts.URL) - if err != nil { - t.Fatalf("Get error: %v", err) - } - if g, e := res.ContentLength, int64(-1); g != e { - t.Errorf("expected ContentLength of %d; got %d", e, g) - } - if g, e := res.TransferEncoding, []string{"chunked"}; !reflect.DeepEqual(g, e) { - t.Errorf("expected TransferEncoding of %v; got %v", e, g) - } - if _, haveCL := res.Header["Content-Length"]; haveCL { - t.Errorf("Unexpected Content-Length") - } -} - -// Test304Responses verifies that 304s don't declare that they're -// chunking in their response headers and aren't allowed to produce -// output. -func Test304Responses(t *testing.T) { - ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { - w.WriteHeader(StatusNotModified) - _, err := w.Write([]byte("illegal body")) - if err != ErrBodyNotAllowed { - t.Errorf("on Write, expected ErrBodyNotAllowed, got %v", err) - } - })) - defer ts.Close() - res, err := Get(ts.URL) - if err != nil { - t.Error(err) - } - if len(res.TransferEncoding) > 0 { - t.Errorf("expected no TransferEncoding; got %v", res.TransferEncoding) - } - body, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Error(err) - } - if len(body) > 0 { - t.Errorf("got unexpected body %q", string(body)) - } -} - -// TestHeadResponses verifies that responses to HEAD requests don't -// declare that they're chunking in their response headers and aren't -// allowed to produce output. -func TestHeadResponses(t *testing.T) { - ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { - _, err := w.Write([]byte("Ignored body")) - if err != ErrBodyNotAllowed { - t.Errorf("on Write, expected ErrBodyNotAllowed, got %v", err) - } - - // Also exercise the ReaderFrom path - _, err = io.Copy(w, strings.NewReader("Ignored body")) - if err != ErrBodyNotAllowed { - t.Errorf("on Copy, expected ErrBodyNotAllowed, got %v", err) - } - })) - defer ts.Close() - res, err := Head(ts.URL) - if err != nil { - t.Error(err) - } - if len(res.TransferEncoding) > 0 { - t.Errorf("expected no TransferEncoding; got %v", res.TransferEncoding) - } - body, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Error(err) - } - if len(body) > 0 { - t.Errorf("got unexpected body %q", string(body)) - } -} - -func TestTLSServer(t *testing.T) { - ts := httptest.NewTLSServer(HandlerFunc(func(w ResponseWriter, r *Request) { - if r.TLS != nil { - w.Header().Set("X-TLS-Set", "true") - if r.TLS.HandshakeComplete { - w.Header().Set("X-TLS-HandshakeComplete", "true") - } - } - })) - defer ts.Close() - if !strings.HasPrefix(ts.URL, "https://") { - t.Fatalf("expected test TLS server to start with https://, got %q", ts.URL) - } - res, err := Get(ts.URL) - if err != nil { - t.Fatal(err) - } - if res == nil { - t.Fatalf("got nil Response") - } - defer res.Body.Close() - if res.Header.Get("X-TLS-Set") != "true" { - t.Errorf("expected X-TLS-Set response header") - } - if res.Header.Get("X-TLS-HandshakeComplete") != "true" { - t.Errorf("expected X-TLS-HandshakeComplete header") - } -} - -type serverExpectTest struct { - contentLength int // of request body - expectation string // e.g. "100-continue" - readBody bool // whether handler should read the body (if false, sends StatusUnauthorized) - expectedResponse string // expected substring in first line of http response -} - -var serverExpectTests = []serverExpectTest{ - // Normal 100-continues, case-insensitive. - {100, "100-continue", true, "100 Continue"}, - {100, "100-cOntInUE", true, "100 Continue"}, - - // No 100-continue. - {100, "", true, "200 OK"}, - - // 100-continue but requesting client to deny us, - // so it never reads the body. - {100, "100-continue", false, "401 Unauthorized"}, - // Likewise without 100-continue: - {100, "", false, "401 Unauthorized"}, - - // Non-standard expectations are failures - {0, "a-pony", false, "417 Expectation Failed"}, - - // Expect-100 requested but no body - {0, "100-continue", true, "400 Bad Request"}, -} - -// Tests that the server responds to the "Expect" request header -// correctly. -func TestServerExpect(t *testing.T) { - ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { - // Note using r.FormValue("readbody") because for POST - // requests that would read from r.Body, which we only - // conditionally want to do. - if strings.Contains(r.URL.RawPath, "readbody=true") { - ioutil.ReadAll(r.Body) - w.Write([]byte("Hi")) - } else { - w.WriteHeader(StatusUnauthorized) - } - })) - defer ts.Close() - - runTest := func(test serverExpectTest) { - conn, err := net.Dial("tcp", ts.Listener.Addr().String()) - if err != nil { - t.Fatalf("Dial: %v", err) - } - defer conn.Close() - sendf := func(format string, args ...interface{}) { - _, err := fmt.Fprintf(conn, format, args...) - if err != nil { - t.Fatalf("On test %#v, error writing %q: %v", test, format, err) - } - } - go func() { - sendf("POST /?readbody=%v HTTP/1.1\r\n"+ - "Connection: close\r\n"+ - "Content-Length: %d\r\n"+ - "Expect: %s\r\nHost: foo\r\n\r\n", - test.readBody, test.contentLength, test.expectation) - if test.contentLength > 0 && strings.ToLower(test.expectation) != "100-continue" { - body := strings.Repeat("A", test.contentLength) - sendf(body) - } - }() - bufr := bufio.NewReader(conn) - line, err := bufr.ReadString('\n') - if err != nil { - t.Fatalf("ReadString: %v", err) - } - if !strings.Contains(line, test.expectedResponse) { - t.Errorf("for test %#v got first line=%q", test, line) - } - } - - for _, test := range serverExpectTests { - runTest(test) - } -} - -func TestServerConsumesRequestBody(t *testing.T) { - conn := new(testConn) - body := strings.Repeat("x", 1<<20) - conn.readBuf.Write([]byte(fmt.Sprintf( - "POST / HTTP/1.1\r\n"+ - "Host: test\r\n"+ - "Content-Length: %d\r\n"+ - "\r\n", len(body)))) - conn.readBuf.Write([]byte(body)) - - done := make(chan bool) - - ls := &oneConnListener{conn} - go Serve(ls, HandlerFunc(func(rw ResponseWriter, req *Request) { - if conn.readBuf.Len() < len(body)/2 { - t.Errorf("on request, read buffer length is %d; expected about 1MB", conn.readBuf.Len()) - } - rw.WriteHeader(200) - if g, e := conn.readBuf.Len(), 0; g != e { - t.Errorf("after WriteHeader, read buffer length is %d; want %d", g, e) - } - done <- true - })) - <-done -} - -func TestTimeoutHandler(t *testing.T) { - sendHi := make(chan bool, 1) - writeErrors := make(chan os.Error, 1) - sayHi := HandlerFunc(func(w ResponseWriter, r *Request) { - <-sendHi - _, werr := w.Write([]byte("hi")) - writeErrors <- werr - }) - timeout := make(chan int64, 1) // write to this to force timeouts - ts := httptest.NewServer(NewTestTimeoutHandler(sayHi, timeout)) - defer ts.Close() - - // Succeed without timing out: - sendHi <- true - res, err := Get(ts.URL) - if err != nil { - t.Error(err) - } - if g, e := res.StatusCode, StatusOK; g != e { - t.Errorf("got res.StatusCode %d; expected %d", g, e) - } - body, _ := ioutil.ReadAll(res.Body) - if g, e := string(body), "hi"; g != e { - t.Errorf("got body %q; expected %q", g, e) - } - if g := <-writeErrors; g != nil { - t.Errorf("got unexpected Write error on first request: %v", g) - } - - // Times out: - timeout <- 1 - res, err = Get(ts.URL) - if err != nil { - t.Error(err) - } - if g, e := res.StatusCode, StatusServiceUnavailable; g != e { - t.Errorf("got res.StatusCode %d; expected %d", g, e) - } - body, _ = ioutil.ReadAll(res.Body) - if !strings.Contains(string(body), "<title>Timeout</title>") { - t.Errorf("expected timeout body; got %q", string(body)) - } - - // Now make the previously-timed out handler speak again, - // which verifies the panic is handled: - sendHi <- true - if g, e := <-writeErrors, ErrHandlerTimeout; g != e { - t.Errorf("expected Write error of %v; got %v", e, g) - } -} - -// Verifies we don't path.Clean() on the wrong parts in redirects. -func TestRedirectMunging(t *testing.T) { - req, _ := NewRequest("GET", "http://example.com/", nil) - - resp := httptest.NewRecorder() - Redirect(resp, req, "/foo?next=http://bar.com/", 302) - if g, e := resp.Header().Get("Location"), "/foo?next=http://bar.com/"; g != e { - t.Errorf("Location header was %q; want %q", g, e) - } - - resp = httptest.NewRecorder() - Redirect(resp, req, "http://localhost:8080/_ah/login?continue=http://localhost:8080/", 302) - if g, e := resp.Header().Get("Location"), "http://localhost:8080/_ah/login?continue=http://localhost:8080/"; g != e { - t.Errorf("Location header was %q; want %q", g, e) - } -} - -// TestZeroLengthPostAndResponse exercises an optimization done by the Transport: -// when there is no body (either because the method doesn't permit a body, or an -// explicit Content-Length of zero is present), then the transport can re-use the -// connection immediately. But when it re-uses the connection, it typically closes -// the previous request's body, which is not optimal for zero-lengthed bodies, -// as the client would then see http.ErrBodyReadAfterClose and not 0, os.EOF. -func TestZeroLengthPostAndResponse(t *testing.T) { - ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, r *Request) { - all, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Fatalf("handler ReadAll: %v", err) - } - if len(all) != 0 { - t.Errorf("handler got %d bytes; expected 0", len(all)) - } - rw.Header().Set("Content-Length", "0") - })) - defer ts.Close() - - req, err := NewRequest("POST", ts.URL, strings.NewReader("")) - if err != nil { - t.Fatal(err) - } - req.ContentLength = 0 - - var resp [5]*Response - for i := range resp { - resp[i], err = DefaultClient.Do(req) - if err != nil { - t.Fatalf("client post #%d: %v", i, err) - } - } - - for i := range resp { - all, err := ioutil.ReadAll(resp[i].Body) - if err != nil { - t.Fatalf("req #%d: client ReadAll: %v", i, err) - } - if len(all) != 0 { - t.Errorf("req #%d: client got %d bytes; expected 0", i, len(all)) - } - } -} - -func TestHandlerPanic(t *testing.T) { - // Unlike the other tests that set the log output to ioutil.Discard - // to quiet the output, this test uses a pipe. The pipe serves three - // purposes: - // - // 1) The log.Print from the http server (generated by the caught - // panic) will go to the pipe instead of stderr, making the - // output quiet. - // - // 2) We read from the pipe to verify that the handler - // actually caught the panic and logged something. - // - // 3) The blocking Read call prevents this TestHandlerPanic - // function from exiting before the HTTP server handler - // finishes crashing. If this text function exited too - // early (and its defer log.SetOutput(os.Stderr) ran), - // then the crash output could spill into the next test. - pr, pw := io.Pipe() - log.SetOutput(pw) - defer log.SetOutput(os.Stderr) - - ts := httptest.NewServer(HandlerFunc(func(ResponseWriter, *Request) { - panic("intentional death for testing") - })) - defer ts.Close() - _, err := Get(ts.URL) - if err == nil { - t.Logf("expected an error") - } - - // Do a blocking read on the log output pipe so its logging - // doesn't bleed into the next test. But wait only 5 seconds - // for it. - done := make(chan bool) - go func() { - buf := make([]byte, 1024) - _, err := pr.Read(buf) - pr.Close() - if err != nil { - t.Fatal(err) - } - done <- true - }() - select { - case <-done: - return - case <-time.After(5e9): - t.Fatal("expected server handler to log an error") - } -} - -func TestNoDate(t *testing.T) { - ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { - w.Header()["Date"] = nil - })) - defer ts.Close() - res, err := Get(ts.URL) - if err != nil { - t.Fatal(err) - } - _, present := res.Header["Date"] - if present { - t.Fatalf("Expected no Date header; got %v", res.Header["Date"]) - } -} - -func TestStripPrefix(t *testing.T) { - h := HandlerFunc(func(w ResponseWriter, r *Request) { - w.Header().Set("X-Path", r.URL.Path) - }) - ts := httptest.NewServer(StripPrefix("/foo", h)) - defer ts.Close() - - res, err := Get(ts.URL + "/foo/bar") - if err != nil { - t.Fatal(err) - } - if g, e := res.Header.Get("X-Path"), "/bar"; g != e { - t.Errorf("test 1: got %s, want %s", g, e) - } - - res, err = Get(ts.URL + "/bar") - if err != nil { - t.Fatal(err) - } - if g, e := res.StatusCode, 404; g != e { - t.Errorf("test 2: got status %v, want %v", g, e) - } -} - -func TestRequestLimit(t *testing.T) { - ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { - t.Fatalf("didn't expect to get request in Handler") - })) - defer ts.Close() - req, _ := NewRequest("GET", ts.URL, nil) - var bytesPerHeader = len("header12345: val12345\r\n") - for i := 0; i < ((DefaultMaxHeaderBytes+4096)/bytesPerHeader)+1; i++ { - req.Header.Set(fmt.Sprintf("header%05d", i), fmt.Sprintf("val%05d", i)) - } - res, err := DefaultClient.Do(req) - if err != nil { - // Some HTTP clients may fail on this undefined behavior (server replying and - // closing the connection while the request is still being written), but - // we do support it (at least currently), so we expect a response below. - t.Fatalf("Do: %v", err) - } - if res.StatusCode != 400 { - t.Fatalf("expected 400 response status; got: %d %s", res.StatusCode, res.Status) - } -} - -type errorListener struct { - errs []os.Error -} - -func (l *errorListener) Accept() (c net.Conn, err os.Error) { - if len(l.errs) == 0 { - return nil, os.EOF - } - err = l.errs[0] - l.errs = l.errs[1:] - return -} - -func (l *errorListener) Close() os.Error { - return nil -} - -func (l *errorListener) Addr() net.Addr { - return dummyAddr("test-address") -} - -func TestAcceptMaxFds(t *testing.T) { - log.SetOutput(ioutil.Discard) // is noisy otherwise - defer log.SetOutput(os.Stderr) - - ln := &errorListener{[]os.Error{ - &net.OpError{ - Op: "accept", - Error: os.Errno(syscall.EMFILE), - }}} - err := Serve(ln, HandlerFunc(HandlerFunc(func(ResponseWriter, *Request) {}))) - if err != os.EOF { - t.Errorf("got error %v, want EOF", err) - } -} - -func BenchmarkClientServer(b *testing.B) { - b.StopTimer() - ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, r *Request) { - fmt.Fprintf(rw, "Hello world.\n") - })) - defer ts.Close() - b.StartTimer() - - for i := 0; i < b.N; i++ { - res, err := Get(ts.URL) - if err != nil { - panic("Get: " + err.String()) - } - all, err := ioutil.ReadAll(res.Body) - if err != nil { - panic("ReadAll: " + err.String()) - } - body := string(all) - if body != "Hello world.\n" { - panic("Got body: " + body) - } - } - - b.StopTimer() -} diff --git a/src/pkg/http/server.go b/src/pkg/http/server.go deleted file mode 100644 index b634e27d6..000000000 --- a/src/pkg/http/server.go +++ /dev/null @@ -1,1183 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// HTTP server. See RFC 2616. - -// TODO(rsc): -// logging - -package http - -import ( - "bufio" - "bytes" - "crypto/rand" - "crypto/tls" - "fmt" - "io" - "log" - "net" - "os" - "path" - "runtime/debug" - "strconv" - "strings" - "sync" - "time" - "url" -) - -// Errors introduced by the HTTP server. -var ( - ErrWriteAfterFlush = os.NewError("Conn.Write called after Flush") - ErrBodyNotAllowed = os.NewError("http: response status code does not allow body") - ErrHijacked = os.NewError("Conn has been hijacked") - ErrContentLength = os.NewError("Conn.Write wrote more than the declared Content-Length") -) - -// Objects implementing the Handler interface can be -// registered to serve a particular path or subtree -// in the HTTP server. -// -// ServeHTTP should write reply headers and data to the ResponseWriter -// and then return. Returning signals that the request is finished -// and that the HTTP server can move on to the next request on -// the connection. -type Handler interface { - ServeHTTP(ResponseWriter, *Request) -} - -// A ResponseWriter interface is used by an HTTP handler to -// construct an HTTP response. -type ResponseWriter interface { - // Header returns the header map that will be sent by WriteHeader. - // Changing the header after a call to WriteHeader (or Write) has - // no effect. - Header() Header - - // Write writes the data to the connection as part of an HTTP reply. - // If WriteHeader has not yet been called, Write calls WriteHeader(http.StatusOK) - // before writing the data. - Write([]byte) (int, os.Error) - - // WriteHeader sends an HTTP response header with status code. - // If WriteHeader is not called explicitly, the first call to Write - // will trigger an implicit WriteHeader(http.StatusOK). - // Thus explicit calls to WriteHeader are mainly used to - // send error codes. - WriteHeader(int) -} - -// The Flusher interface is implemented by ResponseWriters that allow -// an HTTP handler to flush buffered data to the client. -// -// Note that even for ResponseWriters that support Flush, -// if the client is connected through an HTTP proxy, -// the buffered data may not reach the client until the response -// completes. -type Flusher interface { - // Flush sends any buffered data to the client. - Flush() -} - -// The Hijacker interface is implemented by ResponseWriters that allow -// an HTTP handler to take over the connection. -type Hijacker interface { - // Hijack lets the caller take over the connection. - // After a call to Hijack(), the HTTP server library - // will not do anything else with the connection. - // It becomes the caller's responsibility to manage - // and close the connection. - Hijack() (net.Conn, *bufio.ReadWriter, os.Error) -} - -// A conn represents the server side of an HTTP connection. -type conn struct { - remoteAddr string // network address of remote side - server *Server // the Server on which the connection arrived - rwc net.Conn // i/o connection - lr *io.LimitedReader // io.LimitReader(rwc) - buf *bufio.ReadWriter // buffered(lr,rwc), reading from bufio->limitReader->rwc - hijacked bool // connection has been hijacked by handler - tlsState *tls.ConnectionState // or nil when not using TLS - body []byte -} - -// A response represents the server side of an HTTP response. -type response struct { - conn *conn - req *Request // request for this response - chunking bool // using chunked transfer encoding for reply body - wroteHeader bool // reply header has been written - wroteContinue bool // 100 Continue response was written - header Header // reply header parameters - written int64 // number of bytes written in body - contentLength int64 // explicitly-declared Content-Length; or -1 - status int // status code passed to WriteHeader - needSniff bool // need to sniff to find Content-Type - - // close connection after this reply. set on request and - // updated after response from handler if there's a - // "Connection: keep-alive" response header and a - // Content-Length. - closeAfterReply bool -} - -type writerOnly struct { - io.Writer -} - -func (r *response) ReadFrom(src io.Reader) (n int64, err os.Error) { - // Flush before checking r.chunking, as Flush will call - // WriteHeader if it hasn't been called yet, and WriteHeader - // is what sets r.chunking. - r.Flush() - if !r.chunking && r.bodyAllowed() && !r.needSniff { - if rf, ok := r.conn.rwc.(io.ReaderFrom); ok { - n, err = rf.ReadFrom(src) - r.written += n - return - } - } - // Fall back to default io.Copy implementation. - // Use wrapper to hide r.ReadFrom from io.Copy. - return io.Copy(writerOnly{r}, src) -} - -// noLimit is an effective infinite upper bound for io.LimitedReader -const noLimit int64 = (1 << 63) - 1 - -// Create new connection from rwc. -func (srv *Server) newConn(rwc net.Conn) (c *conn, err os.Error) { - c = new(conn) - c.remoteAddr = rwc.RemoteAddr().String() - c.server = srv - c.rwc = rwc - c.body = make([]byte, sniffLen) - c.lr = io.LimitReader(rwc, noLimit).(*io.LimitedReader) - br := bufio.NewReader(c.lr) - bw := bufio.NewWriter(rwc) - c.buf = bufio.NewReadWriter(br, bw) - - if tlsConn, ok := rwc.(*tls.Conn); ok { - tlsConn.Handshake() - c.tlsState = new(tls.ConnectionState) - *c.tlsState = tlsConn.ConnectionState() - } - - return c, nil -} - -// DefaultMaxHeaderBytes is the maximum permitted size of the headers -// in an HTTP request. -// This can be overridden by setting Server.MaxHeaderBytes. -const DefaultMaxHeaderBytes = 1 << 20 // 1 MB - -func (srv *Server) maxHeaderBytes() int { - if srv.MaxHeaderBytes > 0 { - return srv.MaxHeaderBytes - } - return DefaultMaxHeaderBytes -} - -// wrapper around io.ReaderCloser which on first read, sends an -// HTTP/1.1 100 Continue header -type expectContinueReader struct { - resp *response - readCloser io.ReadCloser - closed bool -} - -func (ecr *expectContinueReader) Read(p []byte) (n int, err os.Error) { - if ecr.closed { - return 0, os.NewError("http: Read after Close on request Body") - } - if !ecr.resp.wroteContinue && !ecr.resp.conn.hijacked { - ecr.resp.wroteContinue = true - io.WriteString(ecr.resp.conn.buf, "HTTP/1.1 100 Continue\r\n\r\n") - ecr.resp.conn.buf.Flush() - } - return ecr.readCloser.Read(p) -} - -func (ecr *expectContinueReader) Close() os.Error { - ecr.closed = true - return ecr.readCloser.Close() -} - -// TimeFormat is the time format to use with -// time.Parse and time.Time.Format when parsing -// or generating times in HTTP headers. -// It is like time.RFC1123 but hard codes GMT as the time zone. -const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" - -var errTooLarge = os.NewError("http: request too large") - -// Read next request from connection. -func (c *conn) readRequest() (w *response, err os.Error) { - if c.hijacked { - return nil, ErrHijacked - } - c.lr.N = int64(c.server.maxHeaderBytes()) + 4096 /* bufio slop */ - var req *Request - if req, err = ReadRequest(c.buf.Reader); err != nil { - if c.lr.N == 0 { - return nil, errTooLarge - } - return nil, err - } - c.lr.N = noLimit - - req.RemoteAddr = c.remoteAddr - req.TLS = c.tlsState - - w = new(response) - w.conn = c - w.req = req - w.header = make(Header) - w.contentLength = -1 - c.body = c.body[:0] - return w, nil -} - -func (w *response) Header() Header { - return w.header -} - -func (w *response) WriteHeader(code int) { - if w.conn.hijacked { - log.Print("http: response.WriteHeader on hijacked connection") - return - } - if w.wroteHeader { - log.Print("http: multiple response.WriteHeader calls") - return - } - - // Per RFC 2616, we should consume the request body before - // replying, if the handler hasn't already done so. - if w.req.ContentLength != 0 { - ecr, isExpecter := w.req.Body.(*expectContinueReader) - if !isExpecter || ecr.resp.wroteContinue { - w.req.Body.Close() - } - } - - w.wroteHeader = true - w.status = code - if code == StatusNotModified { - // Must not have body. - for _, header := range []string{"Content-Type", "Content-Length", "Transfer-Encoding"} { - if w.header.Get(header) != "" { - // TODO: return an error if WriteHeader gets a return parameter - // or set a flag on w to make future Writes() write an error page? - // for now just log and drop the header. - log.Printf("http: StatusNotModified response with header %q defined", header) - w.header.Del(header) - } - } - } else { - // If no content type, apply sniffing algorithm to body. - if w.header.Get("Content-Type") == "" { - w.needSniff = true - } - } - - if _, ok := w.header["Date"]; !ok { - w.Header().Set("Date", time.UTC().Format(TimeFormat)) - } - - // Check for a explicit (and valid) Content-Length header. - var hasCL bool - var contentLength int64 - if clenStr := w.header.Get("Content-Length"); clenStr != "" { - var err os.Error - contentLength, err = strconv.Atoi64(clenStr) - if err == nil { - hasCL = true - } else { - log.Printf("http: invalid Content-Length of %q sent", clenStr) - w.header.Del("Content-Length") - } - } - - te := w.header.Get("Transfer-Encoding") - hasTE := te != "" - if hasCL && hasTE && te != "identity" { - // TODO: return an error if WriteHeader gets a return parameter - // For now just ignore the Content-Length. - log.Printf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d", - te, contentLength) - w.header.Del("Content-Length") - hasCL = false - } - - if w.req.Method == "HEAD" || code == StatusNotModified { - // do nothing - } else if hasCL { - w.contentLength = contentLength - w.header.Del("Transfer-Encoding") - } else if w.req.ProtoAtLeast(1, 1) { - // HTTP/1.1 or greater: use chunked transfer encoding - // to avoid closing the connection at EOF. - // TODO: this blows away any custom or stacked Transfer-Encoding they - // might have set. Deal with that as need arises once we have a valid - // use case. - w.chunking = true - w.header.Set("Transfer-Encoding", "chunked") - } else { - // HTTP version < 1.1: cannot do chunked transfer - // encoding and we don't know the Content-Length so - // signal EOF by closing connection. - w.closeAfterReply = true - w.header.Del("Transfer-Encoding") // in case already set - } - - if w.req.wantsHttp10KeepAlive() && (w.req.Method == "HEAD" || hasCL) { - _, connectionHeaderSet := w.header["Connection"] - if !connectionHeaderSet { - w.header.Set("Connection", "keep-alive") - } - } else if !w.req.ProtoAtLeast(1, 1) { - // Client did not ask to keep connection alive. - w.closeAfterReply = true - } - - if w.header.Get("Connection") == "close" { - w.closeAfterReply = true - } - - // Cannot use Content-Length with non-identity Transfer-Encoding. - if w.chunking { - w.header.Del("Content-Length") - } - if !w.req.ProtoAtLeast(1, 0) { - return - } - proto := "HTTP/1.0" - if w.req.ProtoAtLeast(1, 1) { - proto = "HTTP/1.1" - } - codestring := strconv.Itoa(code) - text, ok := statusText[code] - if !ok { - text = "status code " + codestring - } - io.WriteString(w.conn.buf, proto+" "+codestring+" "+text+"\r\n") - w.header.Write(w.conn.buf) - - // If we need to sniff the body, leave the header open. - // Otherwise, end it here. - if !w.needSniff { - io.WriteString(w.conn.buf, "\r\n") - } -} - -// sniff uses the first block of written data, -// stored in w.conn.body, to decide the Content-Type -// for the HTTP body. -func (w *response) sniff() { - if !w.needSniff { - return - } - w.needSniff = false - - data := w.conn.body - fmt.Fprintf(w.conn.buf, "Content-Type: %s\r\n\r\n", DetectContentType(data)) - - if len(data) == 0 { - return - } - if w.chunking { - fmt.Fprintf(w.conn.buf, "%x\r\n", len(data)) - } - _, err := w.conn.buf.Write(data) - if w.chunking && err == nil { - io.WriteString(w.conn.buf, "\r\n") - } -} - -// bodyAllowed returns true if a Write is allowed for this response type. -// It's illegal to call this before the header has been flushed. -func (w *response) bodyAllowed() bool { - if !w.wroteHeader { - panic("") - } - return w.status != StatusNotModified && w.req.Method != "HEAD" -} - -func (w *response) Write(data []byte) (n int, err os.Error) { - if w.conn.hijacked { - log.Print("http: response.Write on hijacked connection") - return 0, ErrHijacked - } - if !w.wroteHeader { - w.WriteHeader(StatusOK) - } - if len(data) == 0 { - return 0, nil - } - if !w.bodyAllowed() { - return 0, ErrBodyNotAllowed - } - - w.written += int64(len(data)) // ignoring errors, for errorKludge - if w.contentLength != -1 && w.written > w.contentLength { - return 0, ErrContentLength - } - - var m int - if w.needSniff { - // We need to sniff the beginning of the output to - // determine the content type. Accumulate the - // initial writes in w.conn.body. - // Cap m so that append won't allocate. - m := cap(w.conn.body) - len(w.conn.body) - if m > len(data) { - m = len(data) - } - w.conn.body = append(w.conn.body, data[:m]...) - data = data[m:] - if len(data) == 0 { - // Copied everything into the buffer. - // Wait for next write. - return m, nil - } - - // Filled the buffer; more data remains. - // Sniff the content (flushes the buffer) - // and then proceed with the remainder - // of the data as a normal Write. - // Calling sniff clears needSniff. - w.sniff() - } - - // TODO(rsc): if chunking happened after the buffering, - // then there would be fewer chunk headers. - // On the other hand, it would make hijacking more difficult. - if w.chunking { - fmt.Fprintf(w.conn.buf, "%x\r\n", len(data)) // TODO(rsc): use strconv not fmt - } - n, err = w.conn.buf.Write(data) - if err == nil && w.chunking { - if n != len(data) { - err = io.ErrShortWrite - } - if err == nil { - io.WriteString(w.conn.buf, "\r\n") - } - } - - return m + n, err -} - -// If this is an error reply (4xx or 5xx) -// and the handler wrote some data explaining the error, -// some browsers (i.e., Chrome, Internet Explorer) -// will show their own error instead unless the error is -// long enough. The minimum lengths used in those -// browsers are in the 256-512 range. -// Pad to 1024 bytes. -func errorKludge(w *response) { - const min = 1024 - - // Is this an error? - if kind := w.status / 100; kind != 4 && kind != 5 { - return - } - - // Did the handler supply any info? Enough? - if w.written == 0 || w.written >= min { - return - } - - // Is it a broken browser? - var msg string - switch agent := w.req.UserAgent(); { - case strings.Contains(agent, "MSIE"): - msg = "Internet Explorer" - case strings.Contains(agent, "Chrome/"): - msg = "Chrome" - default: - return - } - msg += " would ignore this error page if this text weren't here.\n" - - // Is it text? ("Content-Type" is always in the map) - baseType := strings.SplitN(w.header.Get("Content-Type"), ";", 2)[0] - switch baseType { - case "text/html": - io.WriteString(w, "<!-- ") - for w.written < min { - io.WriteString(w, msg) - } - io.WriteString(w, " -->") - case "text/plain": - io.WriteString(w, "\n") - for w.written < min { - io.WriteString(w, msg) - } - } -} - -func (w *response) finishRequest() { - // If this was an HTTP/1.0 request with keep-alive and we sent a Content-Length - // back, we can make this a keep-alive response ... - if w.req.wantsHttp10KeepAlive() { - sentLength := w.header.Get("Content-Length") != "" - if sentLength && w.header.Get("Connection") == "keep-alive" { - w.closeAfterReply = false - } - } - if !w.wroteHeader { - w.WriteHeader(StatusOK) - } - if w.needSniff { - w.sniff() - } - errorKludge(w) - if w.chunking { - io.WriteString(w.conn.buf, "0\r\n") - // trailer key/value pairs, followed by blank line - io.WriteString(w.conn.buf, "\r\n") - } - w.conn.buf.Flush() - w.req.Body.Close() - if w.req.MultipartForm != nil { - w.req.MultipartForm.RemoveAll() - } - - if w.contentLength != -1 && w.contentLength != w.written { - // Did not write enough. Avoid getting out of sync. - w.closeAfterReply = true - } -} - -func (w *response) Flush() { - if !w.wroteHeader { - w.WriteHeader(StatusOK) - } - w.sniff() - w.conn.buf.Flush() -} - -// Close the connection. -func (c *conn) close() { - if c.buf != nil { - c.buf.Flush() - c.buf = nil - } - if c.rwc != nil { - c.rwc.Close() - c.rwc = nil - } -} - -// Serve a new connection. -func (c *conn) serve() { - defer func() { - err := recover() - if err == nil { - return - } - c.rwc.Close() - - var buf bytes.Buffer - fmt.Fprintf(&buf, "http: panic serving %v: %v\n", c.remoteAddr, err) - buf.Write(debug.Stack()) - log.Print(buf.String()) - }() - - for { - w, err := c.readRequest() - if err != nil { - if err == errTooLarge { - // Their HTTP client may or may not be - // able to read this if we're - // responding to them and hanging up - // while they're still writing their - // request. Undefined behavior. - fmt.Fprintf(c.rwc, "HTTP/1.1 400 Request Too Large\r\n\r\n") - } - break - } - - // Expect 100 Continue support - req := w.req - if req.expectsContinue() { - if req.ProtoAtLeast(1, 1) { - // Wrap the Body reader with one that replies on the connection - req.Body = &expectContinueReader{readCloser: req.Body, resp: w} - } - if req.ContentLength == 0 { - w.Header().Set("Connection", "close") - w.WriteHeader(StatusBadRequest) - w.finishRequest() - break - } - req.Header.Del("Expect") - } else if req.Header.Get("Expect") != "" { - // TODO(bradfitz): let ServeHTTP handlers handle - // requests with non-standard expectation[s]? Seems - // theoretical at best, and doesn't fit into the - // current ServeHTTP model anyway. We'd need to - // make the ResponseWriter an optional - // "ExpectReplier" interface or something. - // - // For now we'll just obey RFC 2616 14.20 which says - // "If a server receives a request containing an - // Expect field that includes an expectation- - // extension that it does not support, it MUST - // respond with a 417 (Expectation Failed) status." - w.Header().Set("Connection", "close") - w.WriteHeader(StatusExpectationFailed) - w.finishRequest() - break - } - - handler := c.server.Handler - if handler == nil { - handler = DefaultServeMux - } - - // HTTP cannot have multiple simultaneous active requests.[*] - // Until the server replies to this request, it can't read another, - // so we might as well run the handler in this goroutine. - // [*] Not strictly true: HTTP pipelining. We could let them all process - // in parallel even if their responses need to be serialized. - handler.ServeHTTP(w, w.req) - if c.hijacked { - return - } - w.finishRequest() - if w.closeAfterReply { - break - } - } - c.close() -} - -// Hijack implements the Hijacker.Hijack method. Our response is both a ResponseWriter -// and a Hijacker. -func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err os.Error) { - if w.conn.hijacked { - return nil, nil, ErrHijacked - } - w.conn.hijacked = true - rwc = w.conn.rwc - buf = w.conn.buf - w.conn.rwc = nil - w.conn.buf = nil - return -} - -// The HandlerFunc type is an adapter to allow the use of -// ordinary functions as HTTP handlers. If f is a function -// with the appropriate signature, HandlerFunc(f) is a -// Handler object that calls f. -type HandlerFunc func(ResponseWriter, *Request) - -// ServeHTTP calls f(w, r). -func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *Request) { - f(w, r) -} - -// Helper handlers - -// Error replies to the request with the specified error message and HTTP code. -func Error(w ResponseWriter, error string, code int) { - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - w.WriteHeader(code) - fmt.Fprintln(w, error) -} - -// NotFound replies to the request with an HTTP 404 not found error. -func NotFound(w ResponseWriter, r *Request) { Error(w, "404 page not found", StatusNotFound) } - -// NotFoundHandler returns a simple request handler -// that replies to each request with a ``404 page not found'' reply. -func NotFoundHandler() Handler { return HandlerFunc(NotFound) } - -// StripPrefix returns a handler that serves HTTP requests -// by removing the given prefix from the request URL's Path -// and invoking the handler h. StripPrefix handles a -// request for a path that doesn't begin with prefix by -// replying with an HTTP 404 not found error. -func StripPrefix(prefix string, h Handler) Handler { - return HandlerFunc(func(w ResponseWriter, r *Request) { - if !strings.HasPrefix(r.URL.Path, prefix) { - NotFound(w, r) - return - } - r.URL.Path = r.URL.Path[len(prefix):] - h.ServeHTTP(w, r) - }) -} - -// Redirect replies to the request with a redirect to url, -// which may be a path relative to the request path. -func Redirect(w ResponseWriter, r *Request, urlStr string, code int) { - if u, err := url.Parse(urlStr); err == nil { - // If url was relative, make absolute by - // combining with request path. - // The browser would probably do this for us, - // but doing it ourselves is more reliable. - - // NOTE(rsc): RFC 2616 says that the Location - // line must be an absolute URI, like - // "http://www.google.com/redirect/", - // not a path like "/redirect/". - // Unfortunately, we don't know what to - // put in the host name section to get the - // client to connect to us again, so we can't - // know the right absolute URI to send back. - // Because of this problem, no one pays attention - // to the RFC; they all send back just a new path. - // So do we. - oldpath := r.URL.Path - if oldpath == "" { // should not happen, but avoid a crash if it does - oldpath = "/" - } - if u.Scheme == "" { - // no leading http://server - if urlStr == "" || urlStr[0] != '/' { - // make relative path absolute - olddir, _ := path.Split(oldpath) - urlStr = olddir + urlStr - } - - var query string - if i := strings.Index(urlStr, "?"); i != -1 { - urlStr, query = urlStr[:i], urlStr[i:] - } - - // clean up but preserve trailing slash - trailing := urlStr[len(urlStr)-1] == '/' - urlStr = path.Clean(urlStr) - if trailing && urlStr[len(urlStr)-1] != '/' { - urlStr += "/" - } - urlStr += query - } - } - - w.Header().Set("Location", urlStr) - w.WriteHeader(code) - - // RFC2616 recommends that a short note "SHOULD" be included in the - // response because older user agents may not understand 301/307. - // Shouldn't send the response for POST or HEAD; that leaves GET. - if r.Method == "GET" { - note := "<a href=\"" + htmlEscape(urlStr) + "\">" + statusText[code] + "</a>.\n" - fmt.Fprintln(w, note) - } -} - -func htmlEscape(s string) string { - s = strings.Replace(s, "&", "&", -1) - s = strings.Replace(s, "<", "<", -1) - s = strings.Replace(s, ">", ">", -1) - s = strings.Replace(s, "\"", """, -1) - s = strings.Replace(s, "'", "'", -1) - return s -} - -// Redirect to a fixed URL -type redirectHandler struct { - url string - code int -} - -func (rh *redirectHandler) ServeHTTP(w ResponseWriter, r *Request) { - Redirect(w, r, rh.url, rh.code) -} - -// RedirectHandler returns a request handler that redirects -// each request it receives to the given url using the given -// status code. -func RedirectHandler(url string, code int) Handler { - return &redirectHandler{url, code} -} - -// ServeMux is an HTTP request multiplexer. -// It matches the URL of each incoming request against a list of registered -// patterns and calls the handler for the pattern that -// most closely matches the URL. -// -// Patterns named fixed, rooted paths, like "/favicon.ico", -// or rooted subtrees, like "/images/" (note the trailing slash). -// Longer patterns take precedence over shorter ones, so that -// if there are handlers registered for both "/images/" -// and "/images/thumbnails/", the latter handler will be -// called for paths beginning "/images/thumbnails/" and the -// former will receiver requests for any other paths in the -// "/images/" subtree. -// -// Patterns may optionally begin with a host name, restricting matches to -// URLs on that host only. Host-specific patterns take precedence over -// general patterns, so that a handler might register for the two patterns -// "/codesearch" and "codesearch.google.com/" without also taking over -// requests for "http://www.google.com/". -// -// ServeMux also takes care of sanitizing the URL request path, -// redirecting any request containing . or .. elements to an -// equivalent .- and ..-free URL. -type ServeMux struct { - m map[string]Handler -} - -// NewServeMux allocates and returns a new ServeMux. -func NewServeMux() *ServeMux { return &ServeMux{make(map[string]Handler)} } - -// DefaultServeMux is the default ServeMux used by Serve. -var DefaultServeMux = NewServeMux() - -// Does path match pattern? -func pathMatch(pattern, path string) bool { - if len(pattern) == 0 { - // should not happen - return false - } - n := len(pattern) - if pattern[n-1] != '/' { - return pattern == path - } - return len(path) >= n && path[0:n] == pattern -} - -// Return the canonical path for p, eliminating . and .. elements. -func cleanPath(p string) string { - if p == "" { - return "/" - } - if p[0] != '/' { - p = "/" + p - } - np := path.Clean(p) - // path.Clean removes trailing slash except for root; - // put the trailing slash back if necessary. - if p[len(p)-1] == '/' && np != "/" { - np += "/" - } - return np -} - -// Find a handler on a handler map given a path string -// Most-specific (longest) pattern wins -func (mux *ServeMux) match(path string) Handler { - var h Handler - var n = 0 - for k, v := range mux.m { - if !pathMatch(k, path) { - continue - } - if h == nil || len(k) > n { - n = len(k) - h = v - } - } - return h -} - -// ServeHTTP dispatches the request to the handler whose -// pattern most closely matches the request URL. -func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request) { - // Clean path to canonical form and redirect. - if p := cleanPath(r.URL.Path); p != r.URL.Path { - w.Header().Set("Location", p) - w.WriteHeader(StatusMovedPermanently) - return - } - // Host-specific pattern takes precedence over generic ones - h := mux.match(r.Host + r.URL.Path) - if h == nil { - h = mux.match(r.URL.Path) - } - if h == nil { - h = NotFoundHandler() - } - h.ServeHTTP(w, r) -} - -// Handle registers the handler for the given pattern. -func (mux *ServeMux) Handle(pattern string, handler Handler) { - if pattern == "" { - panic("http: invalid pattern " + pattern) - } - - mux.m[pattern] = handler - - // Helpful behavior: - // If pattern is /tree/, insert permanent redirect for /tree. - n := len(pattern) - if n > 0 && pattern[n-1] == '/' { - mux.m[pattern[0:n-1]] = RedirectHandler(pattern, StatusMovedPermanently) - } -} - -// HandleFunc registers the handler function for the given pattern. -func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { - mux.Handle(pattern, HandlerFunc(handler)) -} - -// Handle registers the handler for the given pattern -// in the DefaultServeMux. -// The documentation for ServeMux explains how patterns are matched. -func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) } - -// HandleFunc registers the handler function for the given pattern -// in the DefaultServeMux. -// The documentation for ServeMux explains how patterns are matched. -func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { - DefaultServeMux.HandleFunc(pattern, handler) -} - -// Serve accepts incoming HTTP connections on the listener l, -// creating a new service thread for each. The service threads -// read requests and then call handler to reply to them. -// Handler is typically nil, in which case the DefaultServeMux is used. -func Serve(l net.Listener, handler Handler) os.Error { - srv := &Server{Handler: handler} - return srv.Serve(l) -} - -// A Server defines parameters for running an HTTP server. -type Server struct { - Addr string // TCP address to listen on, ":http" if empty - Handler Handler // handler to invoke, http.DefaultServeMux if nil - ReadTimeout int64 // the net.Conn.SetReadTimeout value for new connections - WriteTimeout int64 // the net.Conn.SetWriteTimeout value for new connections - MaxHeaderBytes int // maximum size of request headers, DefaultMaxHeaderBytes if 0 -} - -// ListenAndServe listens on the TCP network address srv.Addr and then -// calls Serve to handle requests on incoming connections. If -// srv.Addr is blank, ":http" is used. -func (srv *Server) ListenAndServe() os.Error { - addr := srv.Addr - if addr == "" { - addr = ":http" - } - l, e := net.Listen("tcp", addr) - if e != nil { - return e - } - return srv.Serve(l) -} - -// Serve accepts incoming connections on the Listener l, creating a -// new service thread for each. The service threads read requests and -// then call srv.Handler to reply to them. -func (srv *Server) Serve(l net.Listener) os.Error { - defer l.Close() - for { - rw, e := l.Accept() - if e != nil { - if ne, ok := e.(net.Error); ok && ne.Temporary() { - log.Printf("http: Accept error: %v", e) - continue - } - return e - } - if srv.ReadTimeout != 0 { - rw.SetReadTimeout(srv.ReadTimeout) - } - if srv.WriteTimeout != 0 { - rw.SetWriteTimeout(srv.WriteTimeout) - } - c, err := srv.newConn(rw) - if err != nil { - continue - } - go c.serve() - } - panic("not reached") -} - -// ListenAndServe listens on the TCP network address addr -// and then calls Serve with handler to handle requests -// on incoming connections. Handler is typically nil, -// in which case the DefaultServeMux is used. -// -// A trivial example server is: -// -// package main -// -// import ( -// "http" -// "io" -// "log" -// ) -// -// // hello world, the web server -// func HelloServer(w http.ResponseWriter, req *http.Request) { -// io.WriteString(w, "hello, world!\n") -// } -// -// func main() { -// http.HandleFunc("/hello", HelloServer) -// err := http.ListenAndServe(":12345", nil) -// if err != nil { -// log.Fatal("ListenAndServe: ", err.String()) -// } -// } -func ListenAndServe(addr string, handler Handler) os.Error { - server := &Server{Addr: addr, Handler: handler} - return server.ListenAndServe() -} - -// ListenAndServeTLS acts identically to ListenAndServe, except that it -// expects HTTPS connections. Additionally, files containing a certificate and -// matching private key for the server must be provided. If the certificate -// is signed by a certificate authority, the certFile should be the concatenation -// of the server's certificate followed by the CA's certificate. -// -// A trivial example server is: -// -// import ( -// "http" -// "log" -// ) -// -// func handler(w http.ResponseWriter, req *http.Request) { -// w.Header().Set("Content-Type", "text/plain") -// w.Write([]byte("This is an example server.\n")) -// } -// -// func main() { -// http.HandleFunc("/", handler) -// log.Printf("About to listen on 10443. Go to https://127.0.0.1:10443/") -// err := http.ListenAndServeTLS(":10443", "cert.pem", "key.pem", nil) -// if err != nil { -// log.Fatal(err) -// } -// } -// -// One can use generate_cert.go in crypto/tls to generate cert.pem and key.pem. -func ListenAndServeTLS(addr string, certFile string, keyFile string, handler Handler) os.Error { - server := &Server{Addr: addr, Handler: handler} - return server.ListenAndServeTLS(certFile, keyFile) -} - -// ListenAndServeTLS listens on the TCP network address srv.Addr and -// then calls Serve to handle requests on incoming TLS connections. -// -// Filenames containing a certificate and matching private key for -// the server must be provided. If the certificate is signed by a -// certificate authority, the certFile should be the concatenation -// of the server's certificate followed by the CA's certificate. -// -// If srv.Addr is blank, ":https" is used. -func (s *Server) ListenAndServeTLS(certFile, keyFile string) os.Error { - addr := s.Addr - if addr == "" { - addr = ":https" - } - config := &tls.Config{ - Rand: rand.Reader, - Time: time.Seconds, - NextProtos: []string{"http/1.1"}, - } - - var err os.Error - config.Certificates = make([]tls.Certificate, 1) - config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return err - } - - conn, err := net.Listen("tcp", addr) - if err != nil { - return err - } - - tlsListener := tls.NewListener(conn, config) - return s.Serve(tlsListener) -} - -// TimeoutHandler returns a Handler that runs h with the given time limit. -// -// The new Handler calls h.ServeHTTP to handle each request, but if a -// call runs for more than ns nanoseconds, the handler responds with -// a 503 Service Unavailable error and the given message in its body. -// (If msg is empty, a suitable default message will be sent.) -// After such a timeout, writes by h to its ResponseWriter will return -// ErrHandlerTimeout. -func TimeoutHandler(h Handler, ns int64, msg string) Handler { - f := func() <-chan int64 { - return time.After(ns) - } - return &timeoutHandler{h, f, msg} -} - -// ErrHandlerTimeout is returned on ResponseWriter Write calls -// in handlers which have timed out. -var ErrHandlerTimeout = os.NewError("http: Handler timeout") - -type timeoutHandler struct { - handler Handler - timeout func() <-chan int64 // returns channel producing a timeout - body string -} - -func (h *timeoutHandler) errorBody() string { - if h.body != "" { - return h.body - } - return "<html><head><title>Timeout</title></head><body><h1>Timeout</h1></body></html>" -} - -func (h *timeoutHandler) ServeHTTP(w ResponseWriter, r *Request) { - done := make(chan bool) - tw := &timeoutWriter{w: w} - go func() { - h.handler.ServeHTTP(tw, r) - done <- true - }() - select { - case <-done: - return - case <-h.timeout(): - tw.mu.Lock() - defer tw.mu.Unlock() - if !tw.wroteHeader { - tw.w.WriteHeader(StatusServiceUnavailable) - tw.w.Write([]byte(h.errorBody())) - } - tw.timedOut = true - } -} - -type timeoutWriter struct { - w ResponseWriter - - mu sync.Mutex - timedOut bool - wroteHeader bool -} - -func (tw *timeoutWriter) Header() Header { - return tw.w.Header() -} - -func (tw *timeoutWriter) Write(p []byte) (int, os.Error) { - tw.mu.Lock() - timedOut := tw.timedOut - tw.mu.Unlock() - if timedOut { - return 0, ErrHandlerTimeout - } - return tw.w.Write(p) -} - -func (tw *timeoutWriter) WriteHeader(code int) { - tw.mu.Lock() - if tw.timedOut || tw.wroteHeader { - tw.mu.Unlock() - return - } - tw.wroteHeader = true - tw.mu.Unlock() - tw.w.WriteHeader(code) -} diff --git a/src/pkg/http/sniff.go b/src/pkg/http/sniff.go deleted file mode 100644 index d60868750..000000000 --- a/src/pkg/http/sniff.go +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -import ( - "bytes" - "encoding/binary" -) - -// Content-type sniffing algorithm. -// References in this file refer to this draft specification: -// http://tools.ietf.org/html/draft-ietf-websec-mime-sniff-03 - -// The algorithm prefers to use sniffLen bytes to make its decision. -const sniffLen = 512 - -// DetectContentType returns the sniffed Content-Type string -// for the given data. This function always returns a valid MIME type. -func DetectContentType(data []byte) string { - if len(data) > sniffLen { - data = data[:sniffLen] - } - - // Index of the first non-whitespace byte in data. - firstNonWS := 0 - for ; firstNonWS < len(data) && isWS(data[firstNonWS]); firstNonWS++ { - } - - for _, sig := range sniffSignatures { - if ct := sig.match(data, firstNonWS); ct != "" { - return ct - } - } - - return "application/octet-stream" // fallback -} - -func isWS(b byte) bool { - return bytes.IndexByte([]byte("\t\n\x0C\n "), b) != -1 -} - -type sniffSig interface { - // match returns the MIME type of the data, or "" if unknown. - match(data []byte, firstNonWS int) string -} - -// Data matching the table in section 6. -var sniffSignatures = []sniffSig{ - htmlSig([]byte("<!DOCTYPE HTML")), - htmlSig([]byte("<HTML")), - htmlSig([]byte("<HEAD")), - htmlSig([]byte("<SCRIPT")), - htmlSig([]byte("<IFRAME")), - htmlSig([]byte("<H1")), - htmlSig([]byte("<DIV")), - htmlSig([]byte("<FONT")), - htmlSig([]byte("<TABLE")), - htmlSig([]byte("<A")), - htmlSig([]byte("<STYLE")), - htmlSig([]byte("<TITLE")), - htmlSig([]byte("<B")), - htmlSig([]byte("<BODY")), - htmlSig([]byte("<BR")), - htmlSig([]byte("<P")), - htmlSig([]byte("<!--")), - - &maskedSig{mask: []byte("\xFF\xFF\xFF\xFF\xFF"), pat: []byte("<?xml"), skipWS: true, ct: "text/xml; charset=utf-8"}, - - &exactSig{[]byte("%PDF-"), "application/pdf"}, - &exactSig{[]byte("%!PS-Adobe-"), "application/postscript"}, - - // UTF BOMs. - &maskedSig{mask: []byte("\xFF\xFF\x00\x00"), pat: []byte("\xFE\xFF\x00\x00"), ct: "text/plain; charset=utf-16be"}, - &maskedSig{mask: []byte("\xFF\xFF\x00\x00"), pat: []byte("\xFF\xFE\x00\x00"), ct: "text/plain; charset=utf-16le"}, - &maskedSig{mask: []byte("\xFF\xFF\xFF\x00"), pat: []byte("\xEF\xBB\xBF\x00"), ct: "text/plain; charset=utf-8"}, - - &exactSig{[]byte("GIF87a"), "image/gif"}, - &exactSig{[]byte("GIF89a"), "image/gif"}, - &exactSig{[]byte("\x89\x50\x4E\x47\x0D\x0A\x1A\x0A"), "image/png"}, - &exactSig{[]byte("\xFF\xD8\xFF"), "image/jpeg"}, - &exactSig{[]byte("BM"), "image/bmp"}, - &maskedSig{ - mask: []byte("\xFF\xFF\xFF\xFF\x00\x00\x00\x00\xFF\xFF\xFF\xFF\xFF\xFF"), - pat: []byte("RIFF\x00\x00\x00\x00WEBPVP"), - ct: "image/webp", - }, - &exactSig{[]byte("\x00\x00\x01\x00"), "image/vnd.microsoft.icon"}, - &exactSig{[]byte("\x4F\x67\x67\x53\x00"), "application/ogg"}, - &maskedSig{ - mask: []byte("\xFF\xFF\xFF\xFF\x00\x00\x00\x00\xFF\xFF\xFF\xFF"), - pat: []byte("RIFF\x00\x00\x00\x00WAVE"), - ct: "audio/wave", - }, - &exactSig{[]byte("\x1A\x45\xDF\xA3"), "video/webm"}, - &exactSig{[]byte("\x52\x61\x72\x20\x1A\x07\x00"), "application/x-rar-compressed"}, - &exactSig{[]byte("\x50\x4B\x03\x04"), "application/zip"}, - &exactSig{[]byte("\x1F\x8B\x08"), "application/x-gzip"}, - - // TODO(dsymonds): Re-enable this when the spec is sorted w.r.t. MP4. - //mp4Sig(0), - - textSig(0), // should be last -} - -type exactSig struct { - sig []byte - ct string -} - -func (e *exactSig) match(data []byte, firstNonWS int) string { - if bytes.HasPrefix(data, e.sig) { - return e.ct - } - return "" -} - -type maskedSig struct { - mask, pat []byte - skipWS bool - ct string -} - -func (m *maskedSig) match(data []byte, firstNonWS int) string { - if m.skipWS { - data = data[firstNonWS:] - } - if len(data) < len(m.mask) { - return "" - } - for i, mask := range m.mask { - db := data[i] & mask - if db != m.pat[i] { - return "" - } - } - return m.ct -} - -type htmlSig []byte - -func (h htmlSig) match(data []byte, firstNonWS int) string { - data = data[firstNonWS:] - if len(data) < len(h)+1 { - return "" - } - for i, b := range h { - db := data[i] - if 'A' <= b && b <= 'Z' { - db &= 0xDF - } - if b != db { - return "" - } - } - // Next byte must be space or right angle bracket. - if db := data[len(h)]; db != ' ' && db != '>' { - return "" - } - return "text/html; charset=utf-8" -} - -type mp4Sig int - -func (mp4Sig) match(data []byte, firstNonWS int) string { - // c.f. section 6.1. - if len(data) < 8 { - return "" - } - boxSize := int(binary.BigEndian.Uint32(data[:4])) - if boxSize%4 != 0 || len(data) < boxSize { - return "" - } - if !bytes.Equal(data[4:8], []byte("ftyp")) { - return "" - } - for st := 8; st < boxSize; st += 4 { - if st == 12 { - // minor version number - continue - } - seg := string(data[st : st+3]) - switch seg { - case "mp4", "iso", "M4V", "M4P", "M4B": - return "video/mp4" - /* The remainder are not in the spec. - case "M4A": - return "audio/mp4" - case "3gp": - return "video/3gpp" - case "jp2": - return "image/jp2" // JPEG 2000 - */ - } - } - return "" -} - -type textSig int - -func (textSig) match(data []byte, firstNonWS int) string { - // c.f. section 5, step 4. - for _, b := range data[firstNonWS:] { - switch { - case 0x00 <= b && b <= 0x08, - b == 0x0B, - 0x0E <= b && b <= 0x1A, - 0x1C <= b && b <= 0x1F: - return "" - } - } - return "text/plain; charset=utf-8" -} diff --git a/src/pkg/http/sniff_test.go b/src/pkg/http/sniff_test.go deleted file mode 100644 index faf05e405..000000000 --- a/src/pkg/http/sniff_test.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http_test - -import ( - "bytes" - . "http" - "http/httptest" - "io/ioutil" - "log" - "strconv" - "testing" -) - -var sniffTests = []struct { - desc string - data []byte - contentType string -}{ - // Some nonsense. - {"Empty", []byte{}, "text/plain; charset=utf-8"}, - {"Binary", []byte{1, 2, 3}, "application/octet-stream"}, - - {"HTML document #1", []byte(`<HtMl><bOdY>blah blah blah</body></html>`), "text/html; charset=utf-8"}, - {"HTML document #2", []byte(`<HTML></HTML>`), "text/html; charset=utf-8"}, - {"HTML document #3 (leading whitespace)", []byte(` <!DOCTYPE HTML>...`), "text/html; charset=utf-8"}, - - {"Plain text", []byte(`This is not HTML. It has โ though.`), "text/plain; charset=utf-8"}, - - {"XML", []byte("\n<?xml!"), "text/xml; charset=utf-8"}, - - // Image types. - {"GIF 87a", []byte(`GIF87a`), "image/gif"}, - {"GIF 89a", []byte(`GIF89a...`), "image/gif"}, - - // TODO(dsymonds): Re-enable this when the spec is sorted w.r.t. MP4. - //{"MP4 video", []byte("\x00\x00\x00\x18ftypmp42\x00\x00\x00\x00mp42isom<\x06t\xbfmdat"), "video/mp4"}, - //{"MP4 audio", []byte("\x00\x00\x00\x20ftypM4A \x00\x00\x00\x00M4A mp42isom\x00\x00\x00\x00"), "audio/mp4"}, -} - -func TestDetectContentType(t *testing.T) { - for _, tt := range sniffTests { - ct := DetectContentType(tt.data) - if ct != tt.contentType { - t.Errorf("%v: DetectContentType = %q, want %q", tt.desc, ct, tt.contentType) - } - } -} - -func TestServerContentType(t *testing.T) { - ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { - i, _ := strconv.Atoi(r.FormValue("i")) - tt := sniffTests[i] - n, err := w.Write(tt.data) - if n != len(tt.data) || err != nil { - log.Fatalf("%v: Write(%q) = %v, %v want %d, nil", tt.desc, tt.data, n, err, len(tt.data)) - } - })) - defer ts.Close() - - for i, tt := range sniffTests { - resp, err := Get(ts.URL + "/?i=" + strconv.Itoa(i)) - if err != nil { - t.Errorf("%v: %v", tt.desc, err) - continue - } - if ct := resp.Header.Get("Content-Type"); ct != tt.contentType { - t.Errorf("%v: Content-Type = %q, want %q", tt.desc, ct, tt.contentType) - } - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Errorf("%v: reading body: %v", tt.desc, err) - } else if !bytes.Equal(data, tt.data) { - t.Errorf("%v: data is %q, want %q", tt.desc, data, tt.data) - } - resp.Body.Close() - } -} diff --git a/src/pkg/http/spdy/Makefile b/src/pkg/http/spdy/Makefile deleted file mode 100644 index 3bec220c4..000000000 --- a/src/pkg/http/spdy/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2011 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -include ../../../Make.inc - -TARG=http/spdy -GOFILES=\ - read.go\ - types.go\ - write.go\ - -include ../../../Make.pkg diff --git a/src/pkg/http/spdy/read.go b/src/pkg/http/spdy/read.go deleted file mode 100644 index c6b6ab3af..000000000 --- a/src/pkg/http/spdy/read.go +++ /dev/null @@ -1,313 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package spdy - -import ( - "compress/zlib" - "encoding/binary" - "http" - "io" - "os" - "strings" -) - -func (frame *SynStreamFrame) read(h ControlFrameHeader, f *Framer) os.Error { - return f.readSynStreamFrame(h, frame) -} - -func (frame *SynReplyFrame) read(h ControlFrameHeader, f *Framer) os.Error { - return f.readSynReplyFrame(h, frame) -} - -func (frame *RstStreamFrame) read(h ControlFrameHeader, f *Framer) os.Error { - frame.CFHeader = h - if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { - return err - } - if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil { - return err - } - return nil -} - -func (frame *SettingsFrame) read(h ControlFrameHeader, f *Framer) os.Error { - frame.CFHeader = h - var numSettings uint32 - if err := binary.Read(f.r, binary.BigEndian, &numSettings); err != nil { - return err - } - frame.FlagIdValues = make([]SettingsFlagIdValue, numSettings) - for i := uint32(0); i < numSettings; i++ { - if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Id); err != nil { - return err - } - frame.FlagIdValues[i].Flag = SettingsFlag((frame.FlagIdValues[i].Id & 0xff000000) >> 24) - frame.FlagIdValues[i].Id &= 0xffffff - if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Value); err != nil { - return err - } - } - return nil -} - -func (frame *NoopFrame) read(h ControlFrameHeader, f *Framer) os.Error { - frame.CFHeader = h - return nil -} - -func (frame *PingFrame) read(h ControlFrameHeader, f *Framer) os.Error { - frame.CFHeader = h - if err := binary.Read(f.r, binary.BigEndian, &frame.Id); err != nil { - return err - } - return nil -} - -func (frame *GoAwayFrame) read(h ControlFrameHeader, f *Framer) os.Error { - frame.CFHeader = h - if err := binary.Read(f.r, binary.BigEndian, &frame.LastGoodStreamId); err != nil { - return err - } - return nil -} - -func (frame *HeadersFrame) read(h ControlFrameHeader, f *Framer) os.Error { - return f.readHeadersFrame(h, frame) -} - -func newControlFrame(frameType ControlFrameType) (controlFrame, os.Error) { - ctor, ok := cframeCtor[frameType] - if !ok { - return nil, &Error{Err: InvalidControlFrame} - } - return ctor(), nil -} - -var cframeCtor = map[ControlFrameType]func() controlFrame{ - TypeSynStream: func() controlFrame { return new(SynStreamFrame) }, - TypeSynReply: func() controlFrame { return new(SynReplyFrame) }, - TypeRstStream: func() controlFrame { return new(RstStreamFrame) }, - TypeSettings: func() controlFrame { return new(SettingsFrame) }, - TypeNoop: func() controlFrame { return new(NoopFrame) }, - TypePing: func() controlFrame { return new(PingFrame) }, - TypeGoAway: func() controlFrame { return new(GoAwayFrame) }, - TypeHeaders: func() controlFrame { return new(HeadersFrame) }, - // TODO(willchan): Add TypeWindowUpdate -} - -func (f *Framer) uncorkHeaderDecompressor(payloadSize int64) os.Error { - if f.headerDecompressor != nil { - f.headerReader.N = payloadSize - return nil - } - f.headerReader = io.LimitedReader{R: f.r, N: payloadSize} - decompressor, err := zlib.NewReaderDict(&f.headerReader, []byte(HeaderDictionary)) - if err != nil { - return err - } - f.headerDecompressor = decompressor - return nil -} - -// ReadFrame reads SPDY encoded data and returns a decompressed Frame. -func (f *Framer) ReadFrame() (Frame, os.Error) { - var firstWord uint32 - if err := binary.Read(f.r, binary.BigEndian, &firstWord); err != nil { - return nil, err - } - if (firstWord & 0x80000000) != 0 { - frameType := ControlFrameType(firstWord & 0xffff) - version := uint16(0x7fff & (firstWord >> 16)) - return f.parseControlFrame(version, frameType) - } - return f.parseDataFrame(firstWord & 0x7fffffff) -} - -func (f *Framer) parseControlFrame(version uint16, frameType ControlFrameType) (Frame, os.Error) { - var length uint32 - if err := binary.Read(f.r, binary.BigEndian, &length); err != nil { - return nil, err - } - flags := ControlFlags((length & 0xff000000) >> 24) - length &= 0xffffff - header := ControlFrameHeader{version, frameType, flags, length} - cframe, err := newControlFrame(frameType) - if err != nil { - return nil, err - } - if err = cframe.read(header, f); err != nil { - return nil, err - } - return cframe, nil -} - -func parseHeaderValueBlock(r io.Reader, streamId uint32) (http.Header, os.Error) { - var numHeaders uint16 - if err := binary.Read(r, binary.BigEndian, &numHeaders); err != nil { - return nil, err - } - var e os.Error - h := make(http.Header, int(numHeaders)) - for i := 0; i < int(numHeaders); i++ { - var length uint16 - if err := binary.Read(r, binary.BigEndian, &length); err != nil { - return nil, err - } - nameBytes := make([]byte, length) - if _, err := io.ReadFull(r, nameBytes); err != nil { - return nil, err - } - name := string(nameBytes) - if name != strings.ToLower(name) { - e = &Error{UnlowercasedHeaderName, streamId} - name = strings.ToLower(name) - } - if h[name] != nil { - e = &Error{DuplicateHeaders, streamId} - } - if err := binary.Read(r, binary.BigEndian, &length); err != nil { - return nil, err - } - value := make([]byte, length) - if _, err := io.ReadFull(r, value); err != nil { - return nil, err - } - valueList := strings.Split(string(value), "\x00") - for _, v := range valueList { - h.Add(name, v) - } - } - if e != nil { - return h, e - } - return h, nil -} - -func (f *Framer) readSynStreamFrame(h ControlFrameHeader, frame *SynStreamFrame) os.Error { - frame.CFHeader = h - var err os.Error - if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { - return err - } - if err = binary.Read(f.r, binary.BigEndian, &frame.AssociatedToStreamId); err != nil { - return err - } - if err = binary.Read(f.r, binary.BigEndian, &frame.Priority); err != nil { - return err - } - frame.Priority >>= 14 - - reader := f.r - if !f.headerCompressionDisabled { - f.uncorkHeaderDecompressor(int64(h.length - 10)) - reader = f.headerDecompressor - } - - frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) - if !f.headerCompressionDisabled && ((err == os.EOF && f.headerReader.N == 0) || f.headerReader.N != 0) { - err = &Error{WrongCompressedPayloadSize, 0} - } - if err != nil { - return err - } - // Remove this condition when we bump Version to 3. - if Version >= 3 { - for h, _ := range frame.Headers { - if invalidReqHeaders[h] { - return &Error{InvalidHeaderPresent, frame.StreamId} - } - } - } - return nil -} - -func (f *Framer) readSynReplyFrame(h ControlFrameHeader, frame *SynReplyFrame) os.Error { - frame.CFHeader = h - var err os.Error - if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { - return err - } - var unused uint16 - if err = binary.Read(f.r, binary.BigEndian, &unused); err != nil { - return err - } - reader := f.r - if !f.headerCompressionDisabled { - f.uncorkHeaderDecompressor(int64(h.length - 6)) - reader = f.headerDecompressor - } - frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) - if !f.headerCompressionDisabled && ((err == os.EOF && f.headerReader.N == 0) || f.headerReader.N != 0) { - err = &Error{WrongCompressedPayloadSize, 0} - } - if err != nil { - return err - } - // Remove this condition when we bump Version to 3. - if Version >= 3 { - for h, _ := range frame.Headers { - if invalidRespHeaders[h] { - return &Error{InvalidHeaderPresent, frame.StreamId} - } - } - } - return nil -} - -func (f *Framer) readHeadersFrame(h ControlFrameHeader, frame *HeadersFrame) os.Error { - frame.CFHeader = h - var err os.Error - if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { - return err - } - var unused uint16 - if err = binary.Read(f.r, binary.BigEndian, &unused); err != nil { - return err - } - reader := f.r - if !f.headerCompressionDisabled { - f.uncorkHeaderDecompressor(int64(h.length - 6)) - reader = f.headerDecompressor - } - frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) - if !f.headerCompressionDisabled && ((err == os.EOF && f.headerReader.N == 0) || f.headerReader.N != 0) { - err = &Error{WrongCompressedPayloadSize, 0} - } - if err != nil { - return err - } - - // Remove this condition when we bump Version to 3. - if Version >= 3 { - var invalidHeaders map[string]bool - if frame.StreamId%2 == 0 { - invalidHeaders = invalidReqHeaders - } else { - invalidHeaders = invalidRespHeaders - } - for h, _ := range frame.Headers { - if invalidHeaders[h] { - return &Error{InvalidHeaderPresent, frame.StreamId} - } - } - } - return nil -} - -func (f *Framer) parseDataFrame(streamId uint32) (*DataFrame, os.Error) { - var length uint32 - if err := binary.Read(f.r, binary.BigEndian, &length); err != nil { - return nil, err - } - var frame DataFrame - frame.StreamId = streamId - frame.Flags = DataFlags(length >> 24) - length &= 0xffffff - frame.Data = make([]byte, length) - if _, err := io.ReadFull(f.r, frame.Data); err != nil { - return nil, err - } - return &frame, nil -} diff --git a/src/pkg/http/spdy/spdy_test.go b/src/pkg/http/spdy/spdy_test.go deleted file mode 100644 index cb91e0286..000000000 --- a/src/pkg/http/spdy/spdy_test.go +++ /dev/null @@ -1,497 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package spdy - -import ( - "bytes" - "http" - "io" - "reflect" - "testing" -) - -func TestHeaderParsing(t *testing.T) { - headers := http.Header{ - "Url": []string{"http://www.google.com/"}, - "Method": []string{"get"}, - "Version": []string{"http/1.1"}, - } - var headerValueBlockBuf bytes.Buffer - writeHeaderValueBlock(&headerValueBlockBuf, headers) - - const bogusStreamId = 1 - newHeaders, err := parseHeaderValueBlock(&headerValueBlockBuf, bogusStreamId) - if err != nil { - t.Fatal("parseHeaderValueBlock:", err) - } - - if !reflect.DeepEqual(headers, newHeaders) { - t.Fatal("got: ", newHeaders, "\nwant: ", headers) - } -} - -func TestCreateParseSynStreamFrame(t *testing.T) { - buffer := new(bytes.Buffer) - framer := &Framer{ - headerCompressionDisabled: true, - w: buffer, - headerBuf: new(bytes.Buffer), - r: buffer, - } - synStreamFrame := SynStreamFrame{ - CFHeader: ControlFrameHeader{ - version: Version, - frameType: TypeSynStream, - }, - Headers: http.Header{ - "Url": []string{"http://www.google.com/"}, - "Method": []string{"get"}, - "Version": []string{"http/1.1"}, - }, - } - if err := framer.WriteFrame(&synStreamFrame); err != nil { - t.Fatal("WriteFrame without compression:", err) - } - frame, err := framer.ReadFrame() - if err != nil { - t.Fatal("ReadFrame without compression:", err) - } - parsedSynStreamFrame, ok := frame.(*SynStreamFrame) - if !ok { - t.Fatal("Parsed incorrect frame type:", frame) - } - if !reflect.DeepEqual(synStreamFrame, *parsedSynStreamFrame) { - t.Fatal("got: ", *parsedSynStreamFrame, "\nwant: ", synStreamFrame) - } - - // Test again with compression - buffer.Reset() - framer, err = NewFramer(buffer, buffer) - if err != nil { - t.Fatal("Failed to create new framer:", err) - } - if err := framer.WriteFrame(&synStreamFrame); err != nil { - t.Fatal("WriteFrame with compression:", err) - } - frame, err = framer.ReadFrame() - if err != nil { - t.Fatal("ReadFrame with compression:", err) - } - parsedSynStreamFrame, ok = frame.(*SynStreamFrame) - if !ok { - t.Fatal("Parsed incorrect frame type:", frame) - } - if !reflect.DeepEqual(synStreamFrame, *parsedSynStreamFrame) { - t.Fatal("got: ", *parsedSynStreamFrame, "\nwant: ", synStreamFrame) - } -} - -func TestCreateParseSynReplyFrame(t *testing.T) { - buffer := new(bytes.Buffer) - framer := &Framer{ - headerCompressionDisabled: true, - w: buffer, - headerBuf: new(bytes.Buffer), - r: buffer, - } - synReplyFrame := SynReplyFrame{ - CFHeader: ControlFrameHeader{ - version: Version, - frameType: TypeSynReply, - }, - Headers: http.Header{ - "Url": []string{"http://www.google.com/"}, - "Method": []string{"get"}, - "Version": []string{"http/1.1"}, - }, - } - if err := framer.WriteFrame(&synReplyFrame); err != nil { - t.Fatal("WriteFrame without compression:", err) - } - frame, err := framer.ReadFrame() - if err != nil { - t.Fatal("ReadFrame without compression:", err) - } - parsedSynReplyFrame, ok := frame.(*SynReplyFrame) - if !ok { - t.Fatal("Parsed incorrect frame type:", frame) - } - if !reflect.DeepEqual(synReplyFrame, *parsedSynReplyFrame) { - t.Fatal("got: ", *parsedSynReplyFrame, "\nwant: ", synReplyFrame) - } - - // Test again with compression - buffer.Reset() - framer, err = NewFramer(buffer, buffer) - if err != nil { - t.Fatal("Failed to create new framer:", err) - } - if err := framer.WriteFrame(&synReplyFrame); err != nil { - t.Fatal("WriteFrame with compression:", err) - } - frame, err = framer.ReadFrame() - if err != nil { - t.Fatal("ReadFrame with compression:", err) - } - parsedSynReplyFrame, ok = frame.(*SynReplyFrame) - if !ok { - t.Fatal("Parsed incorrect frame type:", frame) - } - if !reflect.DeepEqual(synReplyFrame, *parsedSynReplyFrame) { - t.Fatal("got: ", *parsedSynReplyFrame, "\nwant: ", synReplyFrame) - } -} - -func TestCreateParseRstStream(t *testing.T) { - buffer := new(bytes.Buffer) - framer, err := NewFramer(buffer, buffer) - if err != nil { - t.Fatal("Failed to create new framer:", err) - } - rstStreamFrame := RstStreamFrame{ - CFHeader: ControlFrameHeader{ - version: Version, - frameType: TypeRstStream, - }, - StreamId: 1, - Status: InvalidStream, - } - if err := framer.WriteFrame(&rstStreamFrame); err != nil { - t.Fatal("WriteFrame:", err) - } - frame, err := framer.ReadFrame() - if err != nil { - t.Fatal("ReadFrame:", err) - } - parsedRstStreamFrame, ok := frame.(*RstStreamFrame) - if !ok { - t.Fatal("Parsed incorrect frame type:", frame) - } - if !reflect.DeepEqual(rstStreamFrame, *parsedRstStreamFrame) { - t.Fatal("got: ", *parsedRstStreamFrame, "\nwant: ", rstStreamFrame) - } -} - -func TestCreateParseSettings(t *testing.T) { - buffer := new(bytes.Buffer) - framer, err := NewFramer(buffer, buffer) - if err != nil { - t.Fatal("Failed to create new framer:", err) - } - settingsFrame := SettingsFrame{ - CFHeader: ControlFrameHeader{ - version: Version, - frameType: TypeSettings, - }, - FlagIdValues: []SettingsFlagIdValue{ - {FlagSettingsPersistValue, SettingsCurrentCwnd, 10}, - {FlagSettingsPersisted, SettingsUploadBandwidth, 1}, - }, - } - if err := framer.WriteFrame(&settingsFrame); err != nil { - t.Fatal("WriteFrame:", err) - } - frame, err := framer.ReadFrame() - if err != nil { - t.Fatal("ReadFrame:", err) - } - parsedSettingsFrame, ok := frame.(*SettingsFrame) - if !ok { - t.Fatal("Parsed incorrect frame type:", frame) - } - if !reflect.DeepEqual(settingsFrame, *parsedSettingsFrame) { - t.Fatal("got: ", *parsedSettingsFrame, "\nwant: ", settingsFrame) - } -} - -func TestCreateParseNoop(t *testing.T) { - buffer := new(bytes.Buffer) - framer, err := NewFramer(buffer, buffer) - if err != nil { - t.Fatal("Failed to create new framer:", err) - } - noopFrame := NoopFrame{ - CFHeader: ControlFrameHeader{ - version: Version, - frameType: TypeNoop, - }, - } - if err := framer.WriteFrame(&noopFrame); err != nil { - t.Fatal("WriteFrame:", err) - } - frame, err := framer.ReadFrame() - if err != nil { - t.Fatal("ReadFrame:", err) - } - parsedNoopFrame, ok := frame.(*NoopFrame) - if !ok { - t.Fatal("Parsed incorrect frame type:", frame) - } - if !reflect.DeepEqual(noopFrame, *parsedNoopFrame) { - t.Fatal("got: ", *parsedNoopFrame, "\nwant: ", noopFrame) - } -} - -func TestCreateParsePing(t *testing.T) { - buffer := new(bytes.Buffer) - framer, err := NewFramer(buffer, buffer) - if err != nil { - t.Fatal("Failed to create new framer:", err) - } - pingFrame := PingFrame{ - CFHeader: ControlFrameHeader{ - version: Version, - frameType: TypePing, - }, - Id: 31337, - } - if err := framer.WriteFrame(&pingFrame); err != nil { - t.Fatal("WriteFrame:", err) - } - frame, err := framer.ReadFrame() - if err != nil { - t.Fatal("ReadFrame:", err) - } - parsedPingFrame, ok := frame.(*PingFrame) - if !ok { - t.Fatal("Parsed incorrect frame type:", frame) - } - if !reflect.DeepEqual(pingFrame, *parsedPingFrame) { - t.Fatal("got: ", *parsedPingFrame, "\nwant: ", pingFrame) - } -} - -func TestCreateParseGoAway(t *testing.T) { - buffer := new(bytes.Buffer) - framer, err := NewFramer(buffer, buffer) - if err != nil { - t.Fatal("Failed to create new framer:", err) - } - goAwayFrame := GoAwayFrame{ - CFHeader: ControlFrameHeader{ - version: Version, - frameType: TypeGoAway, - }, - LastGoodStreamId: 31337, - } - if err := framer.WriteFrame(&goAwayFrame); err != nil { - t.Fatal("WriteFrame:", err) - } - frame, err := framer.ReadFrame() - if err != nil { - t.Fatal("ReadFrame:", err) - } - parsedGoAwayFrame, ok := frame.(*GoAwayFrame) - if !ok { - t.Fatal("Parsed incorrect frame type:", frame) - } - if !reflect.DeepEqual(goAwayFrame, *parsedGoAwayFrame) { - t.Fatal("got: ", *parsedGoAwayFrame, "\nwant: ", goAwayFrame) - } -} - -func TestCreateParseHeadersFrame(t *testing.T) { - buffer := new(bytes.Buffer) - framer := &Framer{ - headerCompressionDisabled: true, - w: buffer, - headerBuf: new(bytes.Buffer), - r: buffer, - } - headersFrame := HeadersFrame{ - CFHeader: ControlFrameHeader{ - version: Version, - frameType: TypeHeaders, - }, - } - headersFrame.Headers = http.Header{ - "Url": []string{"http://www.google.com/"}, - "Method": []string{"get"}, - "Version": []string{"http/1.1"}, - } - if err := framer.WriteFrame(&headersFrame); err != nil { - t.Fatal("WriteFrame without compression:", err) - } - frame, err := framer.ReadFrame() - if err != nil { - t.Fatal("ReadFrame without compression:", err) - } - parsedHeadersFrame, ok := frame.(*HeadersFrame) - if !ok { - t.Fatal("Parsed incorrect frame type:", frame) - } - if !reflect.DeepEqual(headersFrame, *parsedHeadersFrame) { - t.Fatal("got: ", *parsedHeadersFrame, "\nwant: ", headersFrame) - } - - // Test again with compression - buffer.Reset() - framer, err = NewFramer(buffer, buffer) - if err := framer.WriteFrame(&headersFrame); err != nil { - t.Fatal("WriteFrame with compression:", err) - } - frame, err = framer.ReadFrame() - if err != nil { - t.Fatal("ReadFrame with compression:", err) - } - parsedHeadersFrame, ok = frame.(*HeadersFrame) - if !ok { - t.Fatal("Parsed incorrect frame type:", frame) - } - if !reflect.DeepEqual(headersFrame, *parsedHeadersFrame) { - t.Fatal("got: ", *parsedHeadersFrame, "\nwant: ", headersFrame) - } -} - -func TestCreateParseDataFrame(t *testing.T) { - buffer := new(bytes.Buffer) - framer, err := NewFramer(buffer, buffer) - if err != nil { - t.Fatal("Failed to create new framer:", err) - } - dataFrame := DataFrame{ - StreamId: 1, - Data: []byte{'h', 'e', 'l', 'l', 'o'}, - } - if err := framer.WriteFrame(&dataFrame); err != nil { - t.Fatal("WriteFrame:", err) - } - frame, err := framer.ReadFrame() - if err != nil { - t.Fatal("ReadFrame:", err) - } - parsedDataFrame, ok := frame.(*DataFrame) - if !ok { - t.Fatal("Parsed incorrect frame type:", frame) - } - if !reflect.DeepEqual(dataFrame, *parsedDataFrame) { - t.Fatal("got: ", *parsedDataFrame, "\nwant: ", dataFrame) - } -} - -func TestCompressionContextAcrossFrames(t *testing.T) { - buffer := new(bytes.Buffer) - framer, err := NewFramer(buffer, buffer) - if err != nil { - t.Fatal("Failed to create new framer:", err) - } - headersFrame := HeadersFrame{ - CFHeader: ControlFrameHeader{ - version: Version, - frameType: TypeHeaders, - }, - Headers: http.Header{ - "Url": []string{"http://www.google.com/"}, - "Method": []string{"get"}, - "Version": []string{"http/1.1"}, - }, - } - if err := framer.WriteFrame(&headersFrame); err != nil { - t.Fatal("WriteFrame (HEADERS):", err) - } - synStreamFrame := SynStreamFrame{ControlFrameHeader{Version, TypeSynStream, 0, 0}, 0, 0, 0, nil} - synStreamFrame.Headers = http.Header{ - "Url": []string{"http://www.google.com/"}, - "Method": []string{"get"}, - "Version": []string{"http/1.1"}, - } - if err := framer.WriteFrame(&synStreamFrame); err != nil { - t.Fatal("WriteFrame (SYN_STREAM):", err) - } - frame, err := framer.ReadFrame() - if err != nil { - t.Fatal("ReadFrame (HEADERS):", err, buffer.Bytes()) - } - parsedHeadersFrame, ok := frame.(*HeadersFrame) - if !ok { - t.Fatalf("expected HeadersFrame; got %T %v", frame, frame) - } - if !reflect.DeepEqual(headersFrame, *parsedHeadersFrame) { - t.Fatal("got: ", *parsedHeadersFrame, "\nwant: ", headersFrame) - } - frame, err = framer.ReadFrame() - if err != nil { - t.Fatal("ReadFrame (SYN_STREAM):", err, buffer.Bytes()) - } - parsedSynStreamFrame, ok := frame.(*SynStreamFrame) - if !ok { - t.Fatalf("expected SynStreamFrame; got %T %v", frame, frame) - } - if !reflect.DeepEqual(synStreamFrame, *parsedSynStreamFrame) { - t.Fatal("got: ", *parsedSynStreamFrame, "\nwant: ", synStreamFrame) - } -} - -func TestMultipleSPDYFrames(t *testing.T) { - // Initialize the framers. - pr1, pw1 := io.Pipe() - pr2, pw2 := io.Pipe() - writer, err := NewFramer(pw1, pr2) - if err != nil { - t.Fatal("Failed to create writer:", err) - } - reader, err := NewFramer(pw2, pr1) - if err != nil { - t.Fatal("Failed to create reader:", err) - } - - // Set up the frames we're actually transferring. - headersFrame := HeadersFrame{ - CFHeader: ControlFrameHeader{ - version: Version, - frameType: TypeHeaders, - }, - Headers: http.Header{ - "Url": []string{"http://www.google.com/"}, - "Method": []string{"get"}, - "Version": []string{"http/1.1"}, - }, - } - synStreamFrame := SynStreamFrame{ - CFHeader: ControlFrameHeader{ - version: Version, - frameType: TypeSynStream, - }, - Headers: http.Header{ - "Url": []string{"http://www.google.com/"}, - "Method": []string{"get"}, - "Version": []string{"http/1.1"}, - }, - } - - // Start the goroutines to write the frames. - go func() { - if err := writer.WriteFrame(&headersFrame); err != nil { - t.Fatal("WriteFrame (HEADERS): ", err) - } - if err := writer.WriteFrame(&synStreamFrame); err != nil { - t.Fatal("WriteFrame (SYN_STREAM): ", err) - } - }() - - // Read the frames and verify they look as expected. - frame, err := reader.ReadFrame() - if err != nil { - t.Fatal("ReadFrame (HEADERS): ", err) - } - parsedHeadersFrame, ok := frame.(*HeadersFrame) - if !ok { - t.Fatal("Parsed incorrect frame type:", frame) - } - if !reflect.DeepEqual(headersFrame, *parsedHeadersFrame) { - t.Fatal("got: ", *parsedHeadersFrame, "\nwant: ", headersFrame) - } - frame, err = reader.ReadFrame() - if err != nil { - t.Fatal("ReadFrame (SYN_STREAM):", err) - } - parsedSynStreamFrame, ok := frame.(*SynStreamFrame) - if !ok { - t.Fatal("Parsed incorrect frame type.") - } - if !reflect.DeepEqual(synStreamFrame, *parsedSynStreamFrame) { - t.Fatal("got: ", *parsedSynStreamFrame, "\nwant: ", synStreamFrame) - } -} diff --git a/src/pkg/http/spdy/types.go b/src/pkg/http/spdy/types.go deleted file mode 100644 index 41cafb174..000000000 --- a/src/pkg/http/spdy/types.go +++ /dev/null @@ -1,370 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package spdy - -import ( - "bytes" - "compress/zlib" - "http" - "io" - "os" -) - -// Data Frame Format -// +----------------------------------+ -// |0| Stream-ID (31bits) | -// +----------------------------------+ -// | flags (8) | Length (24 bits) | -// +----------------------------------+ -// | Data | -// +----------------------------------+ -// -// Control Frame Format -// +----------------------------------+ -// |1| Version(15bits) | Type(16bits) | -// +----------------------------------+ -// | flags (8) | Length (24 bits) | -// +----------------------------------+ -// | Data | -// +----------------------------------+ -// -// Control Frame: SYN_STREAM -// +----------------------------------+ -// |1|000000000000001|0000000000000001| -// +----------------------------------+ -// | flags (8) | Length (24 bits) | >= 12 -// +----------------------------------+ -// |X| Stream-ID(31bits) | -// +----------------------------------+ -// |X|Associated-To-Stream-ID (31bits)| -// +----------------------------------+ -// |Pri| unused | Length (16bits)| -// +----------------------------------+ -// -// Control Frame: SYN_REPLY -// +----------------------------------+ -// |1|000000000000001|0000000000000010| -// +----------------------------------+ -// | flags (8) | Length (24 bits) | >= 8 -// +----------------------------------+ -// |X| Stream-ID(31bits) | -// +----------------------------------+ -// | unused (16 bits)| Length (16bits)| -// +----------------------------------+ -// -// Control Frame: RST_STREAM -// +----------------------------------+ -// |1|000000000000001|0000000000000011| -// +----------------------------------+ -// | flags (8) | Length (24 bits) | >= 4 -// +----------------------------------+ -// |X| Stream-ID(31bits) | -// +----------------------------------+ -// | Status code (32 bits) | -// +----------------------------------+ -// -// Control Frame: SETTINGS -// +----------------------------------+ -// |1|000000000000001|0000000000000100| -// +----------------------------------+ -// | flags (8) | Length (24 bits) | -// +----------------------------------+ -// | # of entries (32) | -// +----------------------------------+ -// -// Control Frame: NOOP -// +----------------------------------+ -// |1|000000000000001|0000000000000101| -// +----------------------------------+ -// | flags (8) | Length (24 bits) | = 0 -// +----------------------------------+ -// -// Control Frame: PING -// +----------------------------------+ -// |1|000000000000001|0000000000000110| -// +----------------------------------+ -// | flags (8) | Length (24 bits) | = 4 -// +----------------------------------+ -// | Unique id (32 bits) | -// +----------------------------------+ -// -// Control Frame: GOAWAY -// +----------------------------------+ -// |1|000000000000001|0000000000000111| -// +----------------------------------+ -// | flags (8) | Length (24 bits) | = 4 -// +----------------------------------+ -// |X| Last-accepted-stream-id | -// +----------------------------------+ -// -// Control Frame: HEADERS -// +----------------------------------+ -// |1|000000000000001|0000000000001000| -// +----------------------------------+ -// | flags (8) | Length (24 bits) | >= 8 -// +----------------------------------+ -// |X| Stream-ID (31 bits) | -// +----------------------------------+ -// | unused (16 bits)| Length (16bits)| -// +----------------------------------+ -// -// Control Frame: WINDOW_UPDATE -// +----------------------------------+ -// |1|000000000000001|0000000000001001| -// +----------------------------------+ -// | flags (8) | Length (24 bits) | = 8 -// +----------------------------------+ -// |X| Stream-ID (31 bits) | -// +----------------------------------+ -// | Delta-Window-Size (32 bits) | -// +----------------------------------+ - -// Version is the protocol version number that this package implements. -const Version = 2 - -// ControlFrameType stores the type field in a control frame header. -type ControlFrameType uint16 - -// Control frame type constants -const ( - TypeSynStream ControlFrameType = 0x0001 - TypeSynReply = 0x0002 - TypeRstStream = 0x0003 - TypeSettings = 0x0004 - TypeNoop = 0x0005 - TypePing = 0x0006 - TypeGoAway = 0x0007 - TypeHeaders = 0x0008 - TypeWindowUpdate = 0x0009 -) - -// ControlFlags are the flags that can be set on a control frame. -type ControlFlags uint8 - -const ( - ControlFlagFin ControlFlags = 0x01 -) - -// DataFlags are the flags that can be set on a data frame. -type DataFlags uint8 - -const ( - DataFlagFin DataFlags = 0x01 - DataFlagCompressed = 0x02 -) - -// MaxDataLength is the maximum number of bytes that can be stored in one frame. -const MaxDataLength = 1<<24 - 1 - -// Frame is a single SPDY frame in its unpacked in-memory representation. Use -// Framer to read and write it. -type Frame interface { - write(f *Framer) os.Error -} - -// ControlFrameHeader contains all the fields in a control frame header, -// in its unpacked in-memory representation. -type ControlFrameHeader struct { - // Note, high bit is the "Control" bit. - version uint16 - frameType ControlFrameType - Flags ControlFlags - length uint32 -} - -type controlFrame interface { - Frame - read(h ControlFrameHeader, f *Framer) os.Error -} - -// SynStreamFrame is the unpacked, in-memory representation of a SYN_STREAM -// frame. -type SynStreamFrame struct { - CFHeader ControlFrameHeader - StreamId uint32 - AssociatedToStreamId uint32 - // Note, only 2 highest bits currently used - // Rest of Priority is unused. - Priority uint16 - Headers http.Header -} - -// SynReplyFrame is the unpacked, in-memory representation of a SYN_REPLY frame. -type SynReplyFrame struct { - CFHeader ControlFrameHeader - StreamId uint32 - Headers http.Header -} - -// StatusCode represents the status that led to a RST_STREAM -type StatusCode uint32 - -const ( - ProtocolError StatusCode = 1 - InvalidStream = 2 - RefusedStream = 3 - UnsupportedVersion = 4 - Cancel = 5 - InternalError = 6 - FlowControlError = 7 -) - -// RstStreamFrame is the unpacked, in-memory representation of a RST_STREAM -// frame. -type RstStreamFrame struct { - CFHeader ControlFrameHeader - StreamId uint32 - Status StatusCode -} - -// SettingsFlag represents a flag in a SETTINGS frame. -type SettingsFlag uint8 - -const ( - FlagSettingsPersistValue SettingsFlag = 0x1 - FlagSettingsPersisted = 0x2 -) - -// SettingsFlag represents the id of an id/value pair in a SETTINGS frame. -type SettingsId uint32 - -const ( - SettingsUploadBandwidth SettingsId = 1 - SettingsDownloadBandwidth = 2 - SettingsRoundTripTime = 3 - SettingsMaxConcurrentStreams = 4 - SettingsCurrentCwnd = 5 -) - -// SettingsFlagIdValue is the unpacked, in-memory representation of the -// combined flag/id/value for a setting in a SETTINGS frame. -type SettingsFlagIdValue struct { - Flag SettingsFlag - Id SettingsId - Value uint32 -} - -// SettingsFrame is the unpacked, in-memory representation of a SPDY -// SETTINGS frame. -type SettingsFrame struct { - CFHeader ControlFrameHeader - FlagIdValues []SettingsFlagIdValue -} - -// NoopFrame is the unpacked, in-memory representation of a NOOP frame. -type NoopFrame struct { - CFHeader ControlFrameHeader -} - -// PingFrame is the unpacked, in-memory representation of a PING frame. -type PingFrame struct { - CFHeader ControlFrameHeader - Id uint32 -} - -// GoAwayFrame is the unpacked, in-memory representation of a GOAWAY frame. -type GoAwayFrame struct { - CFHeader ControlFrameHeader - LastGoodStreamId uint32 -} - -// HeadersFrame is the unpacked, in-memory representation of a HEADERS frame. -type HeadersFrame struct { - CFHeader ControlFrameHeader - StreamId uint32 - Headers http.Header -} - -// DataFrame is the unpacked, in-memory representation of a DATA frame. -type DataFrame struct { - // Note, high bit is the "Control" bit. Should be 0 for data frames. - StreamId uint32 - Flags DataFlags - Data []byte -} - -// HeaderDictionary is the dictionary sent to the zlib compressor/decompressor. -// Even though the specification states there is no null byte at the end, Chrome sends it. -const HeaderDictionary = "optionsgetheadpostputdeletetrace" + - "acceptaccept-charsetaccept-encodingaccept-languageauthorizationexpectfromhost" + - "if-modified-sinceif-matchif-none-matchif-rangeif-unmodifiedsince" + - "max-forwardsproxy-authorizationrangerefererteuser-agent" + - "100101200201202203204205206300301302303304305306307400401402403404405406407408409410411412413414415416417500501502503504505" + - "accept-rangesageetaglocationproxy-authenticatepublicretry-after" + - "servervarywarningwww-authenticateallowcontent-basecontent-encodingcache-control" + - "connectiondatetrailertransfer-encodingupgradeviawarning" + - "content-languagecontent-lengthcontent-locationcontent-md5content-rangecontent-typeetagexpireslast-modifiedset-cookie" + - "MondayTuesdayWednesdayThursdayFridaySaturdaySunday" + - "JanFebMarAprMayJunJulAugSepOctNovDec" + - "chunkedtext/htmlimage/pngimage/jpgimage/gifapplication/xmlapplication/xhtmltext/plainpublicmax-age" + - "charset=iso-8859-1utf-8gzipdeflateHTTP/1.1statusversionurl\x00" - -// A SPDY specific error. -type ErrorCode string - -const ( - UnlowercasedHeaderName ErrorCode = "header was not lowercased" - DuplicateHeaders ErrorCode = "multiple headers with same name" - WrongCompressedPayloadSize ErrorCode = "compressed payload size was incorrect" - UnknownFrameType ErrorCode = "unknown frame type" - InvalidControlFrame ErrorCode = "invalid control frame" - InvalidDataFrame ErrorCode = "invalid data frame" - InvalidHeaderPresent ErrorCode = "frame contained invalid header" -) - -// Error contains both the type of error and additional values. StreamId is 0 -// if Error is not associated with a stream. -type Error struct { - Err ErrorCode - StreamId uint32 -} - -func (e *Error) String() string { - return string(e.Err) -} - -var invalidReqHeaders = map[string]bool{ - "Connection": true, - "Keep-Alive": true, - "Proxy-Connection": true, - "Transfer-Encoding": true, -} - -var invalidRespHeaders = map[string]bool{ - "Connection": true, - "Keep-Alive": true, - "Transfer-Encoding": true, -} - -// Framer handles serializing/deserializing SPDY frames, including compressing/ -// decompressing payloads. -type Framer struct { - headerCompressionDisabled bool - w io.Writer - headerBuf *bytes.Buffer - headerCompressor *zlib.Writer - r io.Reader - headerReader io.LimitedReader - headerDecompressor io.ReadCloser -} - -// NewFramer allocates a new Framer for a given SPDY connection, repesented by -// a io.Writer and io.Reader. Note that Framer will read and write individual fields -// from/to the Reader and Writer, so the caller should pass in an appropriately -// buffered implementation to optimize performance. -func NewFramer(w io.Writer, r io.Reader) (*Framer, os.Error) { - compressBuf := new(bytes.Buffer) - compressor, err := zlib.NewWriterDict(compressBuf, zlib.BestCompression, []byte(HeaderDictionary)) - if err != nil { - return nil, err - } - framer := &Framer{ - w: w, - headerBuf: compressBuf, - headerCompressor: compressor, - r: r, - } - return framer, nil -} diff --git a/src/pkg/http/spdy/write.go b/src/pkg/http/spdy/write.go deleted file mode 100644 index 7d40bbe9f..000000000 --- a/src/pkg/http/spdy/write.go +++ /dev/null @@ -1,286 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package spdy - -import ( - "encoding/binary" - "http" - "io" - "os" - "strings" -) - -func (frame *SynStreamFrame) write(f *Framer) os.Error { - return f.writeSynStreamFrame(frame) -} - -func (frame *SynReplyFrame) write(f *Framer) os.Error { - return f.writeSynReplyFrame(frame) -} - -func (frame *RstStreamFrame) write(f *Framer) (err os.Error) { - frame.CFHeader.version = Version - frame.CFHeader.frameType = TypeRstStream - frame.CFHeader.length = 8 - - // Serialize frame to Writer - if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { - return - } - if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { - return - } - if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil { - return - } - return -} - -func (frame *SettingsFrame) write(f *Framer) (err os.Error) { - frame.CFHeader.version = Version - frame.CFHeader.frameType = TypeSettings - frame.CFHeader.length = uint32(len(frame.FlagIdValues)*8 + 4) - - // Serialize frame to Writer - if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { - return - } - if err = binary.Write(f.w, binary.BigEndian, uint32(len(frame.FlagIdValues))); err != nil { - return - } - for _, flagIdValue := range frame.FlagIdValues { - flagId := (uint32(flagIdValue.Flag) << 24) | uint32(flagIdValue.Id) - if err = binary.Write(f.w, binary.BigEndian, flagId); err != nil { - return - } - if err = binary.Write(f.w, binary.BigEndian, flagIdValue.Value); err != nil { - return - } - } - return -} - -func (frame *NoopFrame) write(f *Framer) os.Error { - frame.CFHeader.version = Version - frame.CFHeader.frameType = TypeNoop - - // Serialize frame to Writer - return writeControlFrameHeader(f.w, frame.CFHeader) -} - -func (frame *PingFrame) write(f *Framer) (err os.Error) { - frame.CFHeader.version = Version - frame.CFHeader.frameType = TypePing - frame.CFHeader.length = 4 - - // Serialize frame to Writer - if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { - return - } - if err = binary.Write(f.w, binary.BigEndian, frame.Id); err != nil { - return - } - return -} - -func (frame *GoAwayFrame) write(f *Framer) (err os.Error) { - frame.CFHeader.version = Version - frame.CFHeader.frameType = TypeGoAway - frame.CFHeader.length = 4 - - // Serialize frame to Writer - if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { - return - } - if err = binary.Write(f.w, binary.BigEndian, frame.LastGoodStreamId); err != nil { - return - } - return nil -} - -func (frame *HeadersFrame) write(f *Framer) os.Error { - return f.writeHeadersFrame(frame) -} - -func (frame *DataFrame) write(f *Framer) os.Error { - return f.writeDataFrame(frame) -} - -// WriteFrame writes a frame. -func (f *Framer) WriteFrame(frame Frame) os.Error { - return frame.write(f) -} - -func writeControlFrameHeader(w io.Writer, h ControlFrameHeader) os.Error { - if err := binary.Write(w, binary.BigEndian, 0x8000|h.version); err != nil { - return err - } - if err := binary.Write(w, binary.BigEndian, h.frameType); err != nil { - return err - } - flagsAndLength := (uint32(h.Flags) << 24) | h.length - if err := binary.Write(w, binary.BigEndian, flagsAndLength); err != nil { - return err - } - return nil -} - -func writeHeaderValueBlock(w io.Writer, h http.Header) (n int, err os.Error) { - n = 0 - if err = binary.Write(w, binary.BigEndian, uint16(len(h))); err != nil { - return - } - n += 2 - for name, values := range h { - if err = binary.Write(w, binary.BigEndian, uint16(len(name))); err != nil { - return - } - n += 2 - name = strings.ToLower(name) - if _, err = io.WriteString(w, name); err != nil { - return - } - n += len(name) - v := strings.Join(values, "\x00") - if err = binary.Write(w, binary.BigEndian, uint16(len(v))); err != nil { - return - } - n += 2 - if _, err = io.WriteString(w, v); err != nil { - return - } - n += len(v) - } - return -} - -func (f *Framer) writeSynStreamFrame(frame *SynStreamFrame) (err os.Error) { - // Marshal the headers. - var writer io.Writer = f.headerBuf - if !f.headerCompressionDisabled { - writer = f.headerCompressor - } - if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil { - return - } - if !f.headerCompressionDisabled { - f.headerCompressor.Flush() - } - - // Set ControlFrameHeader - frame.CFHeader.version = Version - frame.CFHeader.frameType = TypeSynStream - frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 10) - - // Serialize frame to Writer - if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { - return err - } - if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { - return err - } - if err = binary.Write(f.w, binary.BigEndian, frame.AssociatedToStreamId); err != nil { - return err - } - if err = binary.Write(f.w, binary.BigEndian, frame.Priority<<14); err != nil { - return err - } - if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil { - return err - } - f.headerBuf.Reset() - return nil -} - -func (f *Framer) writeSynReplyFrame(frame *SynReplyFrame) (err os.Error) { - // Marshal the headers. - var writer io.Writer = f.headerBuf - if !f.headerCompressionDisabled { - writer = f.headerCompressor - } - if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil { - return - } - if !f.headerCompressionDisabled { - f.headerCompressor.Flush() - } - - // Set ControlFrameHeader - frame.CFHeader.version = Version - frame.CFHeader.frameType = TypeSynReply - frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 6) - - // Serialize frame to Writer - if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { - return - } - if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { - return - } - if err = binary.Write(f.w, binary.BigEndian, uint16(0)); err != nil { - return - } - if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil { - return - } - f.headerBuf.Reset() - return -} - -func (f *Framer) writeHeadersFrame(frame *HeadersFrame) (err os.Error) { - // Marshal the headers. - var writer io.Writer = f.headerBuf - if !f.headerCompressionDisabled { - writer = f.headerCompressor - } - if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil { - return - } - if !f.headerCompressionDisabled { - f.headerCompressor.Flush() - } - - // Set ControlFrameHeader - frame.CFHeader.version = Version - frame.CFHeader.frameType = TypeHeaders - frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 6) - - // Serialize frame to Writer - if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { - return - } - if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { - return - } - if err = binary.Write(f.w, binary.BigEndian, uint16(0)); err != nil { - return - } - if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil { - return - } - f.headerBuf.Reset() - return -} - -func (f *Framer) writeDataFrame(frame *DataFrame) (err os.Error) { - // Validate DataFrame - if frame.StreamId&0x80000000 != 0 || len(frame.Data) >= 0x0f000000 { - return &Error{InvalidDataFrame, frame.StreamId} - } - - // Serialize frame to Writer - if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { - return - } - flagsAndLength := (uint32(frame.Flags) << 24) | uint32(len(frame.Data)) - if err = binary.Write(f.w, binary.BigEndian, flagsAndLength); err != nil { - return - } - if _, err = f.w.Write(frame.Data); err != nil { - return - } - - return nil -} diff --git a/src/pkg/http/status.go b/src/pkg/http/status.go deleted file mode 100644 index b6e2d65c6..000000000 --- a/src/pkg/http/status.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -// HTTP status codes, defined in RFC 2616. -const ( - StatusContinue = 100 - StatusSwitchingProtocols = 101 - - StatusOK = 200 - StatusCreated = 201 - StatusAccepted = 202 - StatusNonAuthoritativeInfo = 203 - StatusNoContent = 204 - StatusResetContent = 205 - StatusPartialContent = 206 - - StatusMultipleChoices = 300 - StatusMovedPermanently = 301 - StatusFound = 302 - StatusSeeOther = 303 - StatusNotModified = 304 - StatusUseProxy = 305 - StatusTemporaryRedirect = 307 - - StatusBadRequest = 400 - StatusUnauthorized = 401 - StatusPaymentRequired = 402 - StatusForbidden = 403 - StatusNotFound = 404 - StatusMethodNotAllowed = 405 - StatusNotAcceptable = 406 - StatusProxyAuthRequired = 407 - StatusRequestTimeout = 408 - StatusConflict = 409 - StatusGone = 410 - StatusLengthRequired = 411 - StatusPreconditionFailed = 412 - StatusRequestEntityTooLarge = 413 - StatusRequestURITooLong = 414 - StatusUnsupportedMediaType = 415 - StatusRequestedRangeNotSatisfiable = 416 - StatusExpectationFailed = 417 - - StatusInternalServerError = 500 - StatusNotImplemented = 501 - StatusBadGateway = 502 - StatusServiceUnavailable = 503 - StatusGatewayTimeout = 504 - StatusHTTPVersionNotSupported = 505 -) - -var statusText = map[int]string{ - StatusContinue: "Continue", - StatusSwitchingProtocols: "Switching Protocols", - - StatusOK: "OK", - StatusCreated: "Created", - StatusAccepted: "Accepted", - StatusNonAuthoritativeInfo: "Non-Authoritative Information", - StatusNoContent: "No Content", - StatusResetContent: "Reset Content", - StatusPartialContent: "Partial Content", - - StatusMultipleChoices: "Multiple Choices", - StatusMovedPermanently: "Moved Permanently", - StatusFound: "Found", - StatusSeeOther: "See Other", - StatusNotModified: "Not Modified", - StatusUseProxy: "Use Proxy", - StatusTemporaryRedirect: "Temporary Redirect", - - StatusBadRequest: "Bad Request", - StatusUnauthorized: "Unauthorized", - StatusPaymentRequired: "Payment Required", - StatusForbidden: "Forbidden", - StatusNotFound: "Not Found", - StatusMethodNotAllowed: "Method Not Allowed", - StatusNotAcceptable: "Not Acceptable", - StatusProxyAuthRequired: "Proxy Authentication Required", - StatusRequestTimeout: "Request Timeout", - StatusConflict: "Conflict", - StatusGone: "Gone", - StatusLengthRequired: "Length Required", - StatusPreconditionFailed: "Precondition Failed", - StatusRequestEntityTooLarge: "Request Entity Too Large", - StatusRequestURITooLong: "Request URI Too Long", - StatusUnsupportedMediaType: "Unsupported Media Type", - StatusRequestedRangeNotSatisfiable: "Requested Range Not Satisfiable", - StatusExpectationFailed: "Expectation Failed", - - StatusInternalServerError: "Internal Server Error", - StatusNotImplemented: "Not Implemented", - StatusBadGateway: "Bad Gateway", - StatusServiceUnavailable: "Service Unavailable", - StatusGatewayTimeout: "Gateway Timeout", - StatusHTTPVersionNotSupported: "HTTP Version Not Supported", -} - -// StatusText returns a text for the HTTP status code. It returns the empty -// string if the code is unknown. -func StatusText(code int) string { - return statusText[code] -} diff --git a/src/pkg/http/testdata/file b/src/pkg/http/testdata/file deleted file mode 100644 index 11f11f9be..000000000 --- a/src/pkg/http/testdata/file +++ /dev/null @@ -1 +0,0 @@ -0123456789 diff --git a/src/pkg/http/testdata/index.html b/src/pkg/http/testdata/index.html deleted file mode 100644 index da8e1e93d..000000000 --- a/src/pkg/http/testdata/index.html +++ /dev/null @@ -1 +0,0 @@ -index.html says hello diff --git a/src/pkg/http/testdata/style.css b/src/pkg/http/testdata/style.css deleted file mode 100644 index 208d16d42..000000000 --- a/src/pkg/http/testdata/style.css +++ /dev/null @@ -1 +0,0 @@ -body {} diff --git a/src/pkg/http/transfer.go b/src/pkg/http/transfer.go deleted file mode 100644 index b65d99a6f..000000000 --- a/src/pkg/http/transfer.go +++ /dev/null @@ -1,520 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -import ( - "bytes" - "bufio" - "io" - "io/ioutil" - "os" - "strconv" - "strings" -) - -// transferWriter inspects the fields of a user-supplied Request or Response, -// sanitizes them without changing the user object and provides methods for -// writing the respective header, body and trailer in wire format. -type transferWriter struct { - Body io.Reader - BodyCloser io.Closer - ResponseToHEAD bool - ContentLength int64 - Close bool - TransferEncoding []string - Trailer Header -} - -func newTransferWriter(r interface{}) (t *transferWriter, err os.Error) { - t = &transferWriter{} - - // Extract relevant fields - atLeastHTTP11 := false - switch rr := r.(type) { - case *Request: - t.Body = rr.Body - t.BodyCloser = rr.Body - t.ContentLength = rr.ContentLength - t.Close = rr.Close - t.TransferEncoding = rr.TransferEncoding - t.Trailer = rr.Trailer - atLeastHTTP11 = rr.ProtoAtLeast(1, 1) - if t.Body != nil && len(t.TransferEncoding) == 0 && atLeastHTTP11 { - if t.ContentLength == 0 { - // Test to see if it's actually zero or just unset. - var buf [1]byte - n, _ := io.ReadFull(t.Body, buf[:]) - if n == 1 { - // Oh, guess there is data in this Body Reader after all. - // The ContentLength field just wasn't set. - // Stich the Body back together again, re-attaching our - // consumed byte. - t.ContentLength = -1 - t.Body = io.MultiReader(bytes.NewBuffer(buf[:]), t.Body) - } else { - // Body is actually empty. - t.Body = nil - t.BodyCloser = nil - } - } - if t.ContentLength < 0 { - t.TransferEncoding = []string{"chunked"} - } - } - case *Response: - t.Body = rr.Body - t.BodyCloser = rr.Body - t.ContentLength = rr.ContentLength - t.Close = rr.Close - t.TransferEncoding = rr.TransferEncoding - t.Trailer = rr.Trailer - atLeastHTTP11 = rr.ProtoAtLeast(1, 1) - t.ResponseToHEAD = noBodyExpected(rr.Request.Method) - } - - // Sanitize Body,ContentLength,TransferEncoding - if t.ResponseToHEAD { - t.Body = nil - t.TransferEncoding = nil - // ContentLength is expected to hold Content-Length - if t.ContentLength < 0 { - return nil, ErrMissingContentLength - } - } else { - if !atLeastHTTP11 || t.Body == nil { - t.TransferEncoding = nil - } - if chunked(t.TransferEncoding) { - t.ContentLength = -1 - } else if t.Body == nil { // no chunking, no body - t.ContentLength = 0 - } - } - - // Sanitize Trailer - if !chunked(t.TransferEncoding) { - t.Trailer = nil - } - - return t, nil -} - -func noBodyExpected(requestMethod string) bool { - return requestMethod == "HEAD" -} - -func (t *transferWriter) WriteHeader(w io.Writer) (err os.Error) { - if t.Close { - _, err = io.WriteString(w, "Connection: close\r\n") - if err != nil { - return - } - } - - // Write Content-Length and/or Transfer-Encoding whose values are a - // function of the sanitized field triple (Body, ContentLength, - // TransferEncoding) - if chunked(t.TransferEncoding) { - _, err = io.WriteString(w, "Transfer-Encoding: chunked\r\n") - if err != nil { - return - } - } else if t.ContentLength > 0 || t.ResponseToHEAD || (t.ContentLength == 0 && isIdentity(t.TransferEncoding)) { - io.WriteString(w, "Content-Length: ") - _, err = io.WriteString(w, strconv.Itoa64(t.ContentLength)+"\r\n") - if err != nil { - return - } - } - - // Write Trailer header - if t.Trailer != nil { - // TODO: At some point, there should be a generic mechanism for - // writing long headers, using HTTP line splitting - io.WriteString(w, "Trailer: ") - needComma := false - for k := range t.Trailer { - k = CanonicalHeaderKey(k) - switch k { - case "Transfer-Encoding", "Trailer", "Content-Length": - return &badStringError{"invalid Trailer key", k} - } - if needComma { - io.WriteString(w, ",") - } - io.WriteString(w, k) - needComma = true - } - _, err = io.WriteString(w, "\r\n") - } - - return -} - -func (t *transferWriter) WriteBody(w io.Writer) (err os.Error) { - // Write body - if t.Body != nil { - if chunked(t.TransferEncoding) { - cw := NewChunkedWriter(w) - _, err = io.Copy(cw, t.Body) - if err == nil { - err = cw.Close() - } - } else if t.ContentLength == -1 { - _, err = io.Copy(w, t.Body) - } else { - _, err = io.Copy(w, io.LimitReader(t.Body, t.ContentLength)) - } - if err != nil { - return err - } - if err = t.BodyCloser.Close(); err != nil { - return err - } - } - - // TODO(petar): Place trailer writer code here. - if chunked(t.TransferEncoding) { - // Last chunk, empty trailer - _, err = io.WriteString(w, "\r\n") - } - - return -} - -type transferReader struct { - // Input - Header Header - StatusCode int - RequestMethod string - ProtoMajor int - ProtoMinor int - // Output - Body io.ReadCloser - ContentLength int64 - TransferEncoding []string - Close bool - Trailer Header -} - -// bodyAllowedForStatus returns whether a given response status code -// permits a body. See RFC2616, section 4.4. -func bodyAllowedForStatus(status int) bool { - switch { - case status >= 100 && status <= 199: - return false - case status == 204: - return false - case status == 304: - return false - } - return true -} - -// msg is *Request or *Response. -func readTransfer(msg interface{}, r *bufio.Reader) (err os.Error) { - t := &transferReader{} - - // Unify input - isResponse := false - switch rr := msg.(type) { - case *Response: - t.Header = rr.Header - t.StatusCode = rr.StatusCode - t.RequestMethod = rr.Request.Method - t.ProtoMajor = rr.ProtoMajor - t.ProtoMinor = rr.ProtoMinor - t.Close = shouldClose(t.ProtoMajor, t.ProtoMinor, t.Header) - isResponse = true - case *Request: - t.Header = rr.Header - t.ProtoMajor = rr.ProtoMajor - t.ProtoMinor = rr.ProtoMinor - // Transfer semantics for Requests are exactly like those for - // Responses with status code 200, responding to a GET method - t.StatusCode = 200 - t.RequestMethod = "GET" - default: - panic("unexpected type") - } - - // Default to HTTP/1.1 - if t.ProtoMajor == 0 && t.ProtoMinor == 0 { - t.ProtoMajor, t.ProtoMinor = 1, 1 - } - - // Transfer encoding, content length - t.TransferEncoding, err = fixTransferEncoding(t.RequestMethod, t.Header) - if err != nil { - return err - } - - t.ContentLength, err = fixLength(isResponse, t.StatusCode, t.RequestMethod, t.Header, t.TransferEncoding) - if err != nil { - return err - } - - // Trailer - t.Trailer, err = fixTrailer(t.Header, t.TransferEncoding) - if err != nil { - return err - } - - // If there is no Content-Length or chunked Transfer-Encoding on a *Response - // and the status is not 1xx, 204 or 304, then the body is unbounded. - // See RFC2616, section 4.4. - switch msg.(type) { - case *Response: - if t.ContentLength == -1 && - !chunked(t.TransferEncoding) && - bodyAllowedForStatus(t.StatusCode) { - // Unbounded body. - t.Close = true - } - } - - // Prepare body reader. ContentLength < 0 means chunked encoding - // or close connection when finished, since multipart is not supported yet - switch { - case chunked(t.TransferEncoding): - t.Body = &body{Reader: NewChunkedReader(r), hdr: msg, r: r, closing: t.Close} - case t.ContentLength >= 0: - // TODO: limit the Content-Length. This is an easy DoS vector. - t.Body = &body{Reader: io.LimitReader(r, t.ContentLength), closing: t.Close} - default: - // t.ContentLength < 0, i.e. "Content-Length" not mentioned in header - if t.Close { - // Close semantics (i.e. HTTP/1.0) - t.Body = &body{Reader: r, closing: t.Close} - } else { - // Persistent connection (i.e. HTTP/1.1) - t.Body = &body{Reader: io.LimitReader(r, 0), closing: t.Close} - } - } - - // Unify output - switch rr := msg.(type) { - case *Request: - rr.Body = t.Body - rr.ContentLength = t.ContentLength - rr.TransferEncoding = t.TransferEncoding - rr.Close = t.Close - rr.Trailer = t.Trailer - case *Response: - rr.Body = t.Body - rr.ContentLength = t.ContentLength - rr.TransferEncoding = t.TransferEncoding - rr.Close = t.Close - rr.Trailer = t.Trailer - } - - return nil -} - -// Checks whether chunked is part of the encodings stack -func chunked(te []string) bool { return len(te) > 0 && te[0] == "chunked" } - -// Checks whether the encoding is explicitly "identity". -func isIdentity(te []string) bool { return len(te) == 1 && te[0] == "identity" } - -// Sanitize transfer encoding -func fixTransferEncoding(requestMethod string, header Header) ([]string, os.Error) { - raw, present := header["Transfer-Encoding"] - if !present { - return nil, nil - } - - header["Transfer-Encoding"] = nil, false - - // Head responses have no bodies, so the transfer encoding - // should be ignored. - if requestMethod == "HEAD" { - return nil, nil - } - - encodings := strings.Split(raw[0], ",") - te := make([]string, 0, len(encodings)) - // TODO: Even though we only support "identity" and "chunked" - // encodings, the loop below is designed with foresight. One - // invariant that must be maintained is that, if present, - // chunked encoding must always come first. - for _, encoding := range encodings { - encoding = strings.ToLower(strings.TrimSpace(encoding)) - // "identity" encoding is not recored - if encoding == "identity" { - break - } - if encoding != "chunked" { - return nil, &badStringError{"unsupported transfer encoding", encoding} - } - te = te[0 : len(te)+1] - te[len(te)-1] = encoding - } - if len(te) > 1 { - return nil, &badStringError{"too many transfer encodings", strings.Join(te, ",")} - } - if len(te) > 0 { - // Chunked encoding trumps Content-Length. See RFC 2616 - // Section 4.4. Currently len(te) > 0 implies chunked - // encoding. - header["Content-Length"] = nil, false - return te, nil - } - - return nil, nil -} - -// Determine the expected body length, using RFC 2616 Section 4.4. This -// function is not a method, because ultimately it should be shared by -// ReadResponse and ReadRequest. -func fixLength(isResponse bool, status int, requestMethod string, header Header, te []string) (int64, os.Error) { - - // Logic based on response type or status - if noBodyExpected(requestMethod) { - return 0, nil - } - if status/100 == 1 { - return 0, nil - } - switch status { - case 204, 304: - return 0, nil - } - - // Logic based on Transfer-Encoding - if chunked(te) { - return -1, nil - } - - // Logic based on Content-Length - cl := strings.TrimSpace(header.Get("Content-Length")) - if cl != "" { - n, err := strconv.Atoi64(cl) - if err != nil || n < 0 { - return -1, &badStringError{"bad Content-Length", cl} - } - return n, nil - } else { - header.Del("Content-Length") - } - - if !isResponse && requestMethod == "GET" { - // RFC 2616 doesn't explicitly permit nor forbid an - // entity-body on a GET request so we permit one if - // declared, but we default to 0 here (not -1 below) - // if there's no mention of a body. - return 0, nil - } - - // Logic based on media type. The purpose of the following code is just - // to detect whether the unsupported "multipart/byteranges" is being - // used. A proper Content-Type parser is needed in the future. - if strings.Contains(strings.ToLower(header.Get("Content-Type")), "multipart/byteranges") { - return -1, ErrNotSupported - } - - // Body-EOF logic based on other methods (like closing, or chunked coding) - return -1, nil -} - -// Determine whether to hang up after sending a request and body, or -// receiving a response and body -// 'header' is the request headers -func shouldClose(major, minor int, header Header) bool { - if major < 1 { - return true - } else if major == 1 && minor == 0 { - if !strings.Contains(strings.ToLower(header.Get("Connection")), "keep-alive") { - return true - } - return false - } else { - // TODO: Should split on commas, toss surrounding white space, - // and check each field. - if strings.ToLower(header.Get("Connection")) == "close" { - header.Del("Connection") - return true - } - } - return false -} - -// Parse the trailer header -func fixTrailer(header Header, te []string) (Header, os.Error) { - raw := header.Get("Trailer") - if raw == "" { - return nil, nil - } - - header.Del("Trailer") - trailer := make(Header) - keys := strings.Split(raw, ",") - for _, key := range keys { - key = CanonicalHeaderKey(strings.TrimSpace(key)) - switch key { - case "Transfer-Encoding", "Trailer", "Content-Length": - return nil, &badStringError{"bad trailer key", key} - } - trailer.Del(key) - } - if len(trailer) == 0 { - return nil, nil - } - if !chunked(te) { - // Trailer and no chunking - return nil, ErrUnexpectedTrailer - } - return trailer, nil -} - -// body turns a Reader into a ReadCloser. -// Close ensures that the body has been fully read -// and then reads the trailer if necessary. -type body struct { - io.Reader - hdr interface{} // non-nil (Response or Request) value means read trailer - r *bufio.Reader // underlying wire-format reader for the trailer - closing bool // is the connection to be closed after reading body? - closed bool -} - -// ErrBodyReadAfterClose is returned when reading a Request Body after -// the body has been closed. This typically happens when the body is -// read after an HTTP Handler calls WriteHeader or Write on its -// ResponseWriter. -var ErrBodyReadAfterClose = os.NewError("http: invalid Read on closed request Body") - -func (b *body) Read(p []byte) (n int, err os.Error) { - if b.closed { - return 0, ErrBodyReadAfterClose - } - return b.Reader.Read(p) -} - -func (b *body) Close() os.Error { - if b.closed { - return nil - } - defer func() { - b.closed = true - }() - if b.hdr == nil && b.closing { - // no trailer and closing the connection next. - // no point in reading to EOF. - return nil - } - - if _, err := io.Copy(ioutil.Discard, b); err != nil { - return err - } - - if b.hdr == nil { // not reading trailer - return nil - } - - // TODO(petar): Put trailer reader code here - - return nil -} diff --git a/src/pkg/http/transport.go b/src/pkg/http/transport.go deleted file mode 100644 index 4302ffab1..000000000 --- a/src/pkg/http/transport.go +++ /dev/null @@ -1,717 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -import ( - "bufio" - "compress/gzip" - "crypto/tls" - "encoding/base64" - "fmt" - "io" - "io/ioutil" - "log" - "net" - "os" - "strings" - "sync" - "url" -) - -// DefaultTransport is the default implementation of Transport and is -// used by DefaultClient. It establishes a new network connection for -// each call to Do and uses HTTP proxies as directed by the -// $HTTP_PROXY and $NO_PROXY (or $http_proxy and $no_proxy) -// environment variables. -var DefaultTransport RoundTripper = &Transport{Proxy: ProxyFromEnvironment} - -// DefaultMaxIdleConnsPerHost is the default value of Transport's -// MaxIdleConnsPerHost. -const DefaultMaxIdleConnsPerHost = 2 - -// Transport is an implementation of RoundTripper that supports http, -// https, and http proxies (for either http or https with CONNECT). -// Transport can also cache connections for future re-use. -type Transport struct { - lk sync.Mutex - idleConn map[string][]*persistConn - altProto map[string]RoundTripper // nil or map of URI scheme => RoundTripper - - // TODO: tunable on global max cached connections - // TODO: tunable on timeout on cached connections - // TODO: optional pipelining - - // Proxy specifies a function to return a proxy for a given - // Request. If the function returns a non-nil error, the - // request is aborted with the provided error. - // If Proxy is nil or returns a nil *URL, no proxy is used. - Proxy func(*Request) (*url.URL, os.Error) - - // Dial specifies the dial function for creating TCP - // connections. - // If Dial is nil, net.Dial is used. - Dial func(net, addr string) (c net.Conn, err os.Error) - - DisableKeepAlives bool - DisableCompression bool - - // MaxIdleConnsPerHost, if non-zero, controls the maximum idle - // (keep-alive) to keep to keep per-host. If zero, - // DefaultMaxIdleConnsPerHost is used. - MaxIdleConnsPerHost int -} - -// ProxyFromEnvironment returns the URL of the proxy to use for a -// given request, as indicated by the environment variables -// $HTTP_PROXY and $NO_PROXY (or $http_proxy and $no_proxy). -// Either URL or an error is returned. -func ProxyFromEnvironment(req *Request) (*url.URL, os.Error) { - proxy := getenvEitherCase("HTTP_PROXY") - if proxy == "" { - return nil, nil - } - if !useProxy(canonicalAddr(req.URL)) { - return nil, nil - } - proxyURL, err := url.ParseRequest(proxy) - if err != nil { - return nil, os.NewError("invalid proxy address") - } - if proxyURL.Host == "" { - proxyURL, err = url.ParseRequest("http://" + proxy) - if err != nil { - return nil, os.NewError("invalid proxy address") - } - } - return proxyURL, nil -} - -// ProxyURL returns a proxy function (for use in a Transport) -// that always returns the same URL. -func ProxyURL(fixedURL *url.URL) func(*Request) (*url.URL, os.Error) { - return func(*Request) (*url.URL, os.Error) { - return fixedURL, nil - } -} - -// RoundTrip implements the RoundTripper interface. -func (t *Transport) RoundTrip(req *Request) (resp *Response, err os.Error) { - if req.URL == nil { - if req.URL, err = url.Parse(req.RawURL); err != nil { - return - } - } - if req.URL.Scheme != "http" && req.URL.Scheme != "https" { - t.lk.Lock() - var rt RoundTripper - if t.altProto != nil { - rt = t.altProto[req.URL.Scheme] - } - t.lk.Unlock() - if rt == nil { - return nil, &badStringError{"unsupported protocol scheme", req.URL.Scheme} - } - return rt.RoundTrip(req) - } - - cm, err := t.connectMethodForRequest(req) - if err != nil { - return nil, err - } - - // Get the cached or newly-created connection to either the - // host (for http or https), the http proxy, or the http proxy - // pre-CONNECTed to https server. In any case, we'll be ready - // to send it requests. - pconn, err := t.getConn(cm) - if err != nil { - return nil, err - } - - return pconn.roundTrip(req) -} - -// RegisterProtocol registers a new protocol with scheme. -// The Transport will pass requests using the given scheme to rt. -// It is rt's responsibility to simulate HTTP request semantics. -// -// RegisterProtocol can be used by other packages to provide -// implementations of protocol schemes like "ftp" or "file". -func (t *Transport) RegisterProtocol(scheme string, rt RoundTripper) { - if scheme == "http" || scheme == "https" { - panic("protocol " + scheme + " already registered") - } - t.lk.Lock() - defer t.lk.Unlock() - if t.altProto == nil { - t.altProto = make(map[string]RoundTripper) - } - if _, exists := t.altProto[scheme]; exists { - panic("protocol " + scheme + " already registered") - } - t.altProto[scheme] = rt -} - -// CloseIdleConnections closes any connections which were previously -// connected from previous requests but are now sitting idle in -// a "keep-alive" state. It does not interrupt any connections currently -// in use. -func (t *Transport) CloseIdleConnections() { - t.lk.Lock() - defer t.lk.Unlock() - if t.idleConn == nil { - return - } - for _, conns := range t.idleConn { - for _, pconn := range conns { - pconn.close() - } - } - t.idleConn = nil -} - -// -// Private implementation past this point. -// - -func getenvEitherCase(k string) string { - if v := os.Getenv(strings.ToUpper(k)); v != "" { - return v - } - return os.Getenv(strings.ToLower(k)) -} - -func (t *Transport) connectMethodForRequest(req *Request) (*connectMethod, os.Error) { - cm := &connectMethod{ - targetScheme: req.URL.Scheme, - targetAddr: canonicalAddr(req.URL), - } - if t.Proxy != nil { - var err os.Error - cm.proxyURL, err = t.Proxy(req) - if err != nil { - return nil, err - } - } - return cm, nil -} - -// proxyAuth returns the Proxy-Authorization header to set -// on requests, if applicable. -func (cm *connectMethod) proxyAuth() string { - if cm.proxyURL == nil { - return "" - } - proxyInfo := cm.proxyURL.RawUserinfo - if proxyInfo != "" { - return "Basic " + base64.URLEncoding.EncodeToString([]byte(proxyInfo)) - } - return "" -} - -func (t *Transport) putIdleConn(pconn *persistConn) { - t.lk.Lock() - defer t.lk.Unlock() - if t.DisableKeepAlives || t.MaxIdleConnsPerHost < 0 { - pconn.close() - return - } - if pconn.isBroken() { - return - } - key := pconn.cacheKey - max := t.MaxIdleConnsPerHost - if max == 0 { - max = DefaultMaxIdleConnsPerHost - } - if len(t.idleConn[key]) >= max { - pconn.close() - return - } - t.idleConn[key] = append(t.idleConn[key], pconn) -} - -func (t *Transport) getIdleConn(cm *connectMethod) (pconn *persistConn) { - t.lk.Lock() - defer t.lk.Unlock() - if t.idleConn == nil { - t.idleConn = make(map[string][]*persistConn) - } - key := cm.String() - for { - pconns, ok := t.idleConn[key] - if !ok { - return nil - } - if len(pconns) == 1 { - pconn = pconns[0] - t.idleConn[key] = nil, false - } else { - // 2 or more cached connections; pop last - // TODO: queue? - pconn = pconns[len(pconns)-1] - t.idleConn[key] = pconns[0 : len(pconns)-1] - } - if !pconn.isBroken() { - return - } - } - return -} - -func (t *Transport) dial(network, addr string) (c net.Conn, err os.Error) { - if t.Dial != nil { - return t.Dial(network, addr) - } - return net.Dial(network, addr) -} - -// getConn dials and creates a new persistConn to the target as -// specified in the connectMethod. This includes doing a proxy CONNECT -// and/or setting up TLS. If this doesn't return an error, the persistConn -// is ready to write requests to. -func (t *Transport) getConn(cm *connectMethod) (*persistConn, os.Error) { - if pc := t.getIdleConn(cm); pc != nil { - return pc, nil - } - - conn, err := t.dial("tcp", cm.addr()) - if err != nil { - if cm.proxyURL != nil { - err = fmt.Errorf("http: error connecting to proxy %s: %v", cm.proxyURL, err) - } - return nil, err - } - - pa := cm.proxyAuth() - - pconn := &persistConn{ - t: t, - cacheKey: cm.String(), - conn: conn, - reqch: make(chan requestAndChan, 50), - } - newClientConnFunc := NewClientConn - - switch { - case cm.proxyURL == nil: - // Do nothing. - case cm.targetScheme == "http": - newClientConnFunc = NewProxyClientConn - if pa != "" { - pconn.mutateRequestFunc = func(req *Request) { - if req.Header == nil { - req.Header = make(Header) - } - req.Header.Set("Proxy-Authorization", pa) - } - } - case cm.targetScheme == "https": - connectReq := &Request{ - Method: "CONNECT", - RawURL: cm.targetAddr, - Host: cm.targetAddr, - Header: make(Header), - } - if pa != "" { - connectReq.Header.Set("Proxy-Authorization", pa) - } - connectReq.Write(conn) - - // Read response. - // Okay to use and discard buffered reader here, because - // TLS server will not speak until spoken to. - br := bufio.NewReader(conn) - resp, err := ReadResponse(br, connectReq) - if err != nil { - conn.Close() - return nil, err - } - if resp.StatusCode != 200 { - f := strings.SplitN(resp.Status, " ", 2) - conn.Close() - return nil, os.NewError(f[1]) - } - } - - if cm.targetScheme == "https" { - // Initiate TLS and check remote host name against certificate. - conn = tls.Client(conn, nil) - if err = conn.(*tls.Conn).Handshake(); err != nil { - return nil, err - } - if err = conn.(*tls.Conn).VerifyHostname(cm.tlsHost()); err != nil { - return nil, err - } - pconn.conn = conn - } - - pconn.br = bufio.NewReader(pconn.conn) - pconn.cc = newClientConnFunc(conn, pconn.br) - go pconn.readLoop() - return pconn, nil -} - -// useProxy returns true if requests to addr should use a proxy, -// according to the NO_PROXY or no_proxy environment variable. -// addr is always a canonicalAddr with a host and port. -func useProxy(addr string) bool { - if len(addr) == 0 { - return true - } - host, _, err := net.SplitHostPort(addr) - if err != nil { - return false - } - if host == "localhost" { - return false - } - if ip := net.ParseIP(host); ip != nil { - if ip.IsLoopback() { - return false - } - } - - no_proxy := getenvEitherCase("NO_PROXY") - if no_proxy == "*" { - return false - } - - addr = strings.ToLower(strings.TrimSpace(addr)) - if hasPort(addr) { - addr = addr[:strings.LastIndex(addr, ":")] - } - - for _, p := range strings.Split(no_proxy, ",") { - p = strings.ToLower(strings.TrimSpace(p)) - if len(p) == 0 { - continue - } - if hasPort(p) { - p = p[:strings.LastIndex(p, ":")] - } - if addr == p || (p[0] == '.' && (strings.HasSuffix(addr, p) || addr == p[1:])) { - return false - } - } - return true -} - -// connectMethod is the map key (in its String form) for keeping persistent -// TCP connections alive for subsequent HTTP requests. -// -// A connect method may be of the following types: -// -// Cache key form Description -// ----------------- ------------------------- -// ||http|foo.com http directly to server, no proxy -// ||https|foo.com https directly to server, no proxy -// http://proxy.com|https|foo.com http to proxy, then CONNECT to foo.com -// http://proxy.com|http http to proxy, http to anywhere after that -// -// Note: no support to https to the proxy yet. -// -type connectMethod struct { - proxyURL *url.URL // nil for no proxy, else full proxy URL - targetScheme string // "http" or "https" - targetAddr string // Not used if proxy + http targetScheme (4th example in table) -} - -func (ck *connectMethod) String() string { - proxyStr := "" - if ck.proxyURL != nil { - proxyStr = ck.proxyURL.String() - } - return strings.Join([]string{proxyStr, ck.targetScheme, ck.targetAddr}, "|") -} - -// addr returns the first hop "host:port" to which we need to TCP connect. -func (cm *connectMethod) addr() string { - if cm.proxyURL != nil { - return canonicalAddr(cm.proxyURL) - } - return cm.targetAddr -} - -// tlsHost returns the host name to match against the peer's -// TLS certificate. -func (cm *connectMethod) tlsHost() string { - h := cm.targetAddr - if hasPort(h) { - h = h[:strings.LastIndex(h, ":")] - } - return h -} - -type readResult struct { - res *Response // either res or err will be set - err os.Error -} - -type writeRequest struct { - // Set by client (in pc.roundTrip) - req *Request - resch chan *readResult - - // Set by writeLoop if an error writing headers. - writeErr os.Error -} - -// persistConn wraps a connection, usually a persistent one -// (but may be used for non-keep-alive requests as well) -type persistConn struct { - t *Transport - cacheKey string // its connectMethod.String() - conn net.Conn - cc *ClientConn - br *bufio.Reader - reqch chan requestAndChan // written by roundTrip(); read by readLoop() - mutateRequestFunc func(*Request) // nil or func to modify each outbound request - - lk sync.Mutex // guards numExpectedResponses and broken - numExpectedResponses int - broken bool // an error has happened on this connection; marked broken so it's not reused. -} - -func (pc *persistConn) isBroken() bool { - pc.lk.Lock() - defer pc.lk.Unlock() - return pc.broken -} - -func (pc *persistConn) expectingResponse() bool { - pc.lk.Lock() - defer pc.lk.Unlock() - return pc.numExpectedResponses > 0 -} - -func (pc *persistConn) readLoop() { - alive := true - for alive { - pb, err := pc.br.Peek(1) - if err != nil { - if (err == os.EOF || err == os.EINVAL) && !pc.expectingResponse() { - // Remote side closed on us. (We probably hit their - // max idle timeout) - pc.close() - return - } - } - if !pc.expectingResponse() { - log.Printf("Unsolicited response received on idle HTTP channel starting with %q; err=%v", - string(pb), err) - pc.close() - return - } - - rc := <-pc.reqch - resp, err := pc.cc.readUsing(rc.req, func(buf *bufio.Reader, forReq *Request) (*Response, os.Error) { - resp, err := ReadResponse(buf, forReq) - if err != nil || resp.ContentLength == 0 { - return resp, err - } - if rc.addedGzip { - forReq.Header.Del("Accept-Encoding") - } - if rc.addedGzip && resp.Header.Get("Content-Encoding") == "gzip" { - resp.Header.Del("Content-Encoding") - resp.Header.Del("Content-Length") - resp.ContentLength = -1 - gzReader, err := gzip.NewReader(resp.Body) - if err != nil { - pc.close() - return nil, err - } - resp.Body = &readFirstCloseBoth{&discardOnCloseReadCloser{gzReader}, resp.Body} - } - resp.Body = &bodyEOFSignal{body: resp.Body} - return resp, err - }) - - if err == ErrPersistEOF { - // Succeeded, but we can't send any more - // persistent connections on this again. We - // hide this error to upstream callers. - alive = false - err = nil - } else if err != nil || rc.req.Close { - alive = false - } - - hasBody := resp != nil && resp.ContentLength != 0 - var waitForBodyRead chan bool - if alive { - if hasBody { - waitForBodyRead = make(chan bool) - resp.Body.(*bodyEOFSignal).fn = func() { - pc.t.putIdleConn(pc) - waitForBodyRead <- true - } - } else { - // When there's no response body, we immediately - // reuse the TCP connection (putIdleConn), but - // we need to prevent ClientConn.Read from - // closing the Response.Body on the next - // loop, otherwise it might close the body - // before the client code has had a chance to - // read it (even though it'll just be 0, EOF). - pc.cc.lk.Lock() - pc.cc.lastbody = nil - pc.cc.lk.Unlock() - - pc.t.putIdleConn(pc) - } - } - - rc.ch <- responseAndError{resp, err} - - // Wait for the just-returned response body to be fully consumed - // before we race and peek on the underlying bufio reader. - if waitForBodyRead != nil { - <-waitForBodyRead - } - } -} - -type responseAndError struct { - res *Response - err os.Error -} - -type requestAndChan struct { - req *Request - ch chan responseAndError - - // did the Transport (as opposed to the client code) add an - // Accept-Encoding gzip header? only if it we set it do - // we transparently decode the gzip. - addedGzip bool -} - -func (pc *persistConn) roundTrip(req *Request) (resp *Response, err os.Error) { - if pc.mutateRequestFunc != nil { - pc.mutateRequestFunc(req) - } - - // Ask for a compressed version if the caller didn't set their - // own value for Accept-Encoding. We only attempted to - // uncompress the gzip stream if we were the layer that - // requested it. - requestedGzip := false - if !pc.t.DisableCompression && req.Header.Get("Accept-Encoding") == "" { - // Request gzip only, not deflate. Deflate is ambiguous and - // as universally supported anyway. - // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 - requestedGzip = true - req.Header.Set("Accept-Encoding", "gzip") - } - - pc.lk.Lock() - pc.numExpectedResponses++ - pc.lk.Unlock() - - err = pc.cc.Write(req) - if err != nil { - pc.close() - return - } - - ch := make(chan responseAndError, 1) - pc.reqch <- requestAndChan{req, ch, requestedGzip} - re := <-ch - pc.lk.Lock() - pc.numExpectedResponses-- - pc.lk.Unlock() - - return re.res, re.err -} - -func (pc *persistConn) close() { - pc.lk.Lock() - defer pc.lk.Unlock() - pc.broken = true - pc.cc.Close() - pc.conn.Close() - pc.mutateRequestFunc = nil -} - -var portMap = map[string]string{ - "http": "80", - "https": "443", -} - -// canonicalAddr returns url.Host but always with a ":port" suffix -func canonicalAddr(url *url.URL) string { - addr := url.Host - if !hasPort(addr) { - return addr + ":" + portMap[url.Scheme] - } - return addr -} - -func responseIsKeepAlive(res *Response) bool { - // TODO: implement. for now just always shutting down the connection. - return false -} - -// bodyEOFSignal wraps a ReadCloser but runs fn (if non-nil) at most -// once, right before the final Read() or Close() call returns, but after -// EOF has been seen. -type bodyEOFSignal struct { - body io.ReadCloser - fn func() - isClosed bool -} - -func (es *bodyEOFSignal) Read(p []byte) (n int, err os.Error) { - n, err = es.body.Read(p) - if es.isClosed && n > 0 { - panic("http: unexpected bodyEOFSignal Read after Close; see issue 1725") - } - if err == os.EOF && es.fn != nil { - es.fn() - es.fn = nil - } - return -} - -func (es *bodyEOFSignal) Close() (err os.Error) { - if es.isClosed { - return nil - } - es.isClosed = true - err = es.body.Close() - if err == nil && es.fn != nil { - es.fn() - es.fn = nil - } - return -} - -type readFirstCloseBoth struct { - io.ReadCloser - io.Closer -} - -func (r *readFirstCloseBoth) Close() os.Error { - if err := r.ReadCloser.Close(); err != nil { - r.Closer.Close() - return err - } - if err := r.Closer.Close(); err != nil { - return err - } - return nil -} - -// discardOnCloseReadCloser consumes all its input on Close. -type discardOnCloseReadCloser struct { - io.ReadCloser -} - -func (d *discardOnCloseReadCloser) Close() os.Error { - io.Copy(ioutil.Discard, d.ReadCloser) // ignore errors; likely invalid or already closed - return d.ReadCloser.Close() -} diff --git a/src/pkg/http/transport_test.go b/src/pkg/http/transport_test.go deleted file mode 100644 index eafde7f89..000000000 --- a/src/pkg/http/transport_test.go +++ /dev/null @@ -1,662 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Tests for transport.go - -package http_test - -import ( - "bytes" - "compress/gzip" - "crypto/rand" - "fmt" - . "http" - "http/httptest" - "io" - "io/ioutil" - "os" - "strconv" - "strings" - "testing" - "time" - "url" -) - -// TODO: test 5 pipelined requests with responses: 1) OK, 2) OK, Connection: Close -// and then verify that the final 2 responses get errors back. - -// hostPortHandler writes back the client's "host:port". -var hostPortHandler = HandlerFunc(func(w ResponseWriter, r *Request) { - if r.FormValue("close") == "true" { - w.Header().Set("Connection", "close") - } - w.Write([]byte(r.RemoteAddr)) -}) - -// Two subsequent requests and verify their response is the same. -// The response from the server is our own IP:port -func TestTransportKeepAlives(t *testing.T) { - ts := httptest.NewServer(hostPortHandler) - defer ts.Close() - - for _, disableKeepAlive := range []bool{false, true} { - tr := &Transport{DisableKeepAlives: disableKeepAlive} - c := &Client{Transport: tr} - - fetch := func(n int) string { - res, err := c.Get(ts.URL) - if err != nil { - t.Fatalf("error in disableKeepAlive=%v, req #%d, GET: %v", disableKeepAlive, n, err) - } - body, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Fatalf("error in disableKeepAlive=%v, req #%d, ReadAll: %v", disableKeepAlive, n, err) - } - return string(body) - } - - body1 := fetch(1) - body2 := fetch(2) - - bodiesDiffer := body1 != body2 - if bodiesDiffer != disableKeepAlive { - t.Errorf("error in disableKeepAlive=%v. unexpected bodiesDiffer=%v; body1=%q; body2=%q", - disableKeepAlive, bodiesDiffer, body1, body2) - } - } -} - -func TestTransportConnectionCloseOnResponse(t *testing.T) { - ts := httptest.NewServer(hostPortHandler) - defer ts.Close() - - for _, connectionClose := range []bool{false, true} { - tr := &Transport{} - c := &Client{Transport: tr} - - fetch := func(n int) string { - req := new(Request) - var err os.Error - req.URL, err = url.Parse(ts.URL + fmt.Sprintf("?close=%v", connectionClose)) - if err != nil { - t.Fatalf("URL parse error: %v", err) - } - req.Method = "GET" - req.Proto = "HTTP/1.1" - req.ProtoMajor = 1 - req.ProtoMinor = 1 - - res, err := c.Do(req) - if err != nil { - t.Fatalf("error in connectionClose=%v, req #%d, Do: %v", connectionClose, n, err) - } - body, err := ioutil.ReadAll(res.Body) - defer res.Body.Close() - if err != nil { - t.Fatalf("error in connectionClose=%v, req #%d, ReadAll: %v", connectionClose, n, err) - } - return string(body) - } - - body1 := fetch(1) - body2 := fetch(2) - bodiesDiffer := body1 != body2 - if bodiesDiffer != connectionClose { - t.Errorf("error in connectionClose=%v. unexpected bodiesDiffer=%v; body1=%q; body2=%q", - connectionClose, bodiesDiffer, body1, body2) - } - } -} - -func TestTransportConnectionCloseOnRequest(t *testing.T) { - ts := httptest.NewServer(hostPortHandler) - defer ts.Close() - - for _, connectionClose := range []bool{false, true} { - tr := &Transport{} - c := &Client{Transport: tr} - - fetch := func(n int) string { - req := new(Request) - var err os.Error - req.URL, err = url.Parse(ts.URL) - if err != nil { - t.Fatalf("URL parse error: %v", err) - } - req.Method = "GET" - req.Proto = "HTTP/1.1" - req.ProtoMajor = 1 - req.ProtoMinor = 1 - req.Close = connectionClose - - res, err := c.Do(req) - if err != nil { - t.Fatalf("error in connectionClose=%v, req #%d, Do: %v", connectionClose, n, err) - } - body, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Fatalf("error in connectionClose=%v, req #%d, ReadAll: %v", connectionClose, n, err) - } - return string(body) - } - - body1 := fetch(1) - body2 := fetch(2) - bodiesDiffer := body1 != body2 - if bodiesDiffer != connectionClose { - t.Errorf("error in connectionClose=%v. unexpected bodiesDiffer=%v; body1=%q; body2=%q", - connectionClose, bodiesDiffer, body1, body2) - } - } -} - -func TestTransportIdleCacheKeys(t *testing.T) { - ts := httptest.NewServer(hostPortHandler) - defer ts.Close() - - tr := &Transport{DisableKeepAlives: false} - c := &Client{Transport: tr} - - if e, g := 0, len(tr.IdleConnKeysForTesting()); e != g { - t.Errorf("After CloseIdleConnections expected %d idle conn cache keys; got %d", e, g) - } - - resp, err := c.Get(ts.URL) - if err != nil { - t.Error(err) - } - ioutil.ReadAll(resp.Body) - - keys := tr.IdleConnKeysForTesting() - if e, g := 1, len(keys); e != g { - t.Fatalf("After Get expected %d idle conn cache keys; got %d", e, g) - } - - if e := "|http|" + ts.Listener.Addr().String(); keys[0] != e { - t.Errorf("Expected idle cache key %q; got %q", e, keys[0]) - } - - tr.CloseIdleConnections() - if e, g := 0, len(tr.IdleConnKeysForTesting()); e != g { - t.Errorf("After CloseIdleConnections expected %d idle conn cache keys; got %d", e, g) - } -} - -func TestTransportMaxPerHostIdleConns(t *testing.T) { - resch := make(chan string) - gotReq := make(chan bool) - ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { - gotReq <- true - msg := <-resch - _, err := w.Write([]byte(msg)) - if err != nil { - t.Fatalf("Write: %v", err) - } - })) - defer ts.Close() - maxIdleConns := 2 - tr := &Transport{DisableKeepAlives: false, MaxIdleConnsPerHost: maxIdleConns} - c := &Client{Transport: tr} - - // Start 3 outstanding requests and wait for the server to get them. - // Their responses will hang until we we write to resch, though. - donech := make(chan bool) - doReq := func() { - resp, err := c.Get(ts.URL) - if err != nil { - t.Error(err) - } - _, err = ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatalf("ReadAll: %v", err) - } - donech <- true - } - go doReq() - <-gotReq - go doReq() - <-gotReq - go doReq() - <-gotReq - - if e, g := 0, len(tr.IdleConnKeysForTesting()); e != g { - t.Fatalf("Before writes, expected %d idle conn cache keys; got %d", e, g) - } - - resch <- "res1" - <-donech - keys := tr.IdleConnKeysForTesting() - if e, g := 1, len(keys); e != g { - t.Fatalf("after first response, expected %d idle conn cache keys; got %d", e, g) - } - cacheKey := "|http|" + ts.Listener.Addr().String() - if keys[0] != cacheKey { - t.Fatalf("Expected idle cache key %q; got %q", cacheKey, keys[0]) - } - if e, g := 1, tr.IdleConnCountForTesting(cacheKey); e != g { - t.Errorf("after first response, expected %d idle conns; got %d", e, g) - } - - resch <- "res2" - <-donech - if e, g := 2, tr.IdleConnCountForTesting(cacheKey); e != g { - t.Errorf("after second response, expected %d idle conns; got %d", e, g) - } - - resch <- "res3" - <-donech - if e, g := maxIdleConns, tr.IdleConnCountForTesting(cacheKey); e != g { - t.Errorf("after third response, still expected %d idle conns; got %d", e, g) - } -} - -func TestTransportServerClosingUnexpectedly(t *testing.T) { - ts := httptest.NewServer(hostPortHandler) - defer ts.Close() - - tr := &Transport{} - c := &Client{Transport: tr} - - fetch := func(n, retries int) string { - condFatalf := func(format string, arg ...interface{}) { - if retries <= 0 { - t.Fatalf(format, arg...) - } - t.Logf("retrying shortly after expected error: "+format, arg...) - time.Sleep(1e9 / int64(retries)) - } - for retries >= 0 { - retries-- - res, err := c.Get(ts.URL) - if err != nil { - condFatalf("error in req #%d, GET: %v", n, err) - continue - } - body, err := ioutil.ReadAll(res.Body) - if err != nil { - condFatalf("error in req #%d, ReadAll: %v", n, err) - continue - } - res.Body.Close() - return string(body) - } - panic("unreachable") - } - - body1 := fetch(1, 0) - body2 := fetch(2, 0) - - ts.CloseClientConnections() // surprise! - - // This test has an expected race. Sleeping for 25 ms prevents - // it on most fast machines, causing the next fetch() call to - // succeed quickly. But if we do get errors, fetch() will retry 5 - // times with some delays between. - time.Sleep(25e6) - - body3 := fetch(3, 5) - - if body1 != body2 { - t.Errorf("expected body1 and body2 to be equal") - } - if body2 == body3 { - t.Errorf("expected body2 and body3 to be different") - } -} - -// TestTransportHeadResponses verifies that we deal with Content-Lengths -// with no bodies properly -func TestTransportHeadResponses(t *testing.T) { - ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { - if r.Method != "HEAD" { - panic("expected HEAD; got " + r.Method) - } - w.Header().Set("Content-Length", "123") - w.WriteHeader(200) - })) - defer ts.Close() - - tr := &Transport{DisableKeepAlives: false} - c := &Client{Transport: tr} - for i := 0; i < 2; i++ { - res, err := c.Head(ts.URL) - if err != nil { - t.Errorf("error on loop %d: %v", i, err) - } - if e, g := "123", res.Header.Get("Content-Length"); e != g { - t.Errorf("loop %d: expected Content-Length header of %q, got %q", i, e, g) - } - if e, g := int64(0), res.ContentLength; e != g { - t.Errorf("loop %d: expected res.ContentLength of %v, got %v", i, e, g) - } - } -} - -// TestTransportHeadChunkedResponse verifies that we ignore chunked transfer-encoding -// on responses to HEAD requests. -func TestTransportHeadChunkedResponse(t *testing.T) { - ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { - if r.Method != "HEAD" { - panic("expected HEAD; got " + r.Method) - } - w.Header().Set("Transfer-Encoding", "chunked") // client should ignore - w.Header().Set("x-client-ipport", r.RemoteAddr) - w.WriteHeader(200) - })) - defer ts.Close() - - tr := &Transport{DisableKeepAlives: false} - c := &Client{Transport: tr} - - res1, err := c.Head(ts.URL) - if err != nil { - t.Fatalf("request 1 error: %v", err) - } - res2, err := c.Head(ts.URL) - if err != nil { - t.Fatalf("request 2 error: %v", err) - } - if v1, v2 := res1.Header.Get("x-client-ipport"), res2.Header.Get("x-client-ipport"); v1 != v2 { - t.Errorf("ip/ports differed between head requests: %q vs %q", v1, v2) - } -} - -func TestTransportNilURL(t *testing.T) { - ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { - fmt.Fprintf(w, "Hi") - })) - defer ts.Close() - - req := new(Request) - req.URL = nil // what we're actually testing - req.Method = "GET" - req.RawURL = ts.URL - req.Proto = "HTTP/1.1" - req.ProtoMajor = 1 - req.ProtoMinor = 1 - req.Header = make(Header) - - tr := &Transport{} - res, err := tr.RoundTrip(req) - if err != nil { - t.Fatalf("unexpected RoundTrip error: %v", err) - } - body, err := ioutil.ReadAll(res.Body) - if g, e := string(body), "Hi"; g != e { - t.Fatalf("Expected response body of %q; got %q", e, g) - } -} - -var roundTripTests = []struct { - accept string - expectAccept string - compressed bool -}{ - // Requests with no accept-encoding header use transparent compression - {"", "gzip", false}, - // Requests with other accept-encoding should pass through unmodified - {"foo", "foo", false}, - // Requests with accept-encoding == gzip should be passed through - {"gzip", "gzip", true}} - -// Test that the modification made to the Request by the RoundTripper is cleaned up -func TestRoundTripGzip(t *testing.T) { - const responseBody = "test response body" - ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) { - accept := req.Header.Get("Accept-Encoding") - if expect := req.FormValue("expect_accept"); accept != expect { - t.Errorf("Accept-Encoding = %q, want %q", accept, expect) - } - if accept == "gzip" { - rw.Header().Set("Content-Encoding", "gzip") - gz, _ := gzip.NewWriter(rw) - gz.Write([]byte(responseBody)) - gz.Close() - } else { - rw.Header().Set("Content-Encoding", accept) - rw.Write([]byte(responseBody)) - } - })) - defer ts.Close() - - for i, test := range roundTripTests { - // Test basic request (no accept-encoding) - req, _ := NewRequest("GET", ts.URL+"?expect_accept="+test.expectAccept, nil) - req.Header.Set("Accept-Encoding", test.accept) - res, err := DefaultTransport.RoundTrip(req) - var body []byte - if test.compressed { - gzip, _ := gzip.NewReader(res.Body) - body, err = ioutil.ReadAll(gzip) - res.Body.Close() - } else { - body, err = ioutil.ReadAll(res.Body) - } - if err != nil { - t.Errorf("%d. Error: %q", i, err) - } else { - if g, e := string(body), responseBody; g != e { - t.Errorf("%d. body = %q; want %q", i, g, e) - } - if g, e := req.Header.Get("Accept-Encoding"), test.accept; g != e { - t.Errorf("%d. Accept-Encoding = %q; want %q", i, g, e) - } - if g, e := res.Header.Get("Content-Encoding"), test.accept; g != e { - t.Errorf("%d. Content-Encoding = %q; want %q", i, g, e) - } - } - } - -} - -func TestTransportGzip(t *testing.T) { - const testString = "The test string aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" - const nRandBytes = 1024 * 1024 - ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) { - if g, e := req.Header.Get("Accept-Encoding"), "gzip"; g != e { - t.Errorf("Accept-Encoding = %q, want %q", g, e) - } - rw.Header().Set("Content-Encoding", "gzip") - if req.Method == "HEAD" { - return - } - - var w io.Writer = rw - var buf bytes.Buffer - if req.FormValue("chunked") == "0" { - w = &buf - defer io.Copy(rw, &buf) - defer func() { - rw.Header().Set("Content-Length", strconv.Itoa(buf.Len())) - }() - } - gz, _ := gzip.NewWriter(w) - gz.Write([]byte(testString)) - if req.FormValue("body") == "large" { - io.Copyn(gz, rand.Reader, nRandBytes) - } - gz.Close() - })) - defer ts.Close() - - for _, chunked := range []string{"1", "0"} { - c := &Client{Transport: &Transport{}} - - // First fetch something large, but only read some of it. - res, err := c.Get(ts.URL + "?body=large&chunked=" + chunked) - if err != nil { - t.Fatalf("large get: %v", err) - } - buf := make([]byte, len(testString)) - n, err := io.ReadFull(res.Body, buf) - if err != nil { - t.Fatalf("partial read of large response: size=%d, %v", n, err) - } - if e, g := testString, string(buf); e != g { - t.Errorf("partial read got %q, expected %q", g, e) - } - res.Body.Close() - // Read on the body, even though it's closed - n, err = res.Body.Read(buf) - if n != 0 || err == nil { - t.Errorf("expected error post-closed large Read; got = %d, %v", n, err) - } - - // Then something small. - res, err = c.Get(ts.URL + "?chunked=" + chunked) - if err != nil { - t.Fatal(err) - } - body, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Fatal(err) - } - if g, e := string(body), testString; g != e { - t.Fatalf("body = %q; want %q", g, e) - } - if g, e := res.Header.Get("Content-Encoding"), ""; g != e { - t.Fatalf("Content-Encoding = %q; want %q", g, e) - } - - // Read on the body after it's been fully read: - n, err = res.Body.Read(buf) - if n != 0 || err == nil { - t.Errorf("expected Read error after exhausted reads; got %d, %v", n, err) - } - res.Body.Close() - n, err = res.Body.Read(buf) - if n != 0 || err == nil { - t.Errorf("expected Read error after Close; got %d, %v", n, err) - } - } - - // And a HEAD request too, because they're always weird. - c := &Client{Transport: &Transport{}} - res, err := c.Head(ts.URL) - if err != nil { - t.Fatalf("Head: %v", err) - } - if res.StatusCode != 200 { - t.Errorf("Head status=%d; want=200", res.StatusCode) - } -} - -func TestTransportProxy(t *testing.T) { - ch := make(chan string, 1) - ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { - ch <- "real server" - })) - defer ts.Close() - proxy := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { - ch <- "proxy for " + r.URL.String() - })) - defer proxy.Close() - - pu, err := url.Parse(proxy.URL) - if err != nil { - t.Fatal(err) - } - c := &Client{Transport: &Transport{Proxy: ProxyURL(pu)}} - c.Head(ts.URL) - got := <-ch - want := "proxy for " + ts.URL + "/" - if got != want { - t.Errorf("want %q, got %q", want, got) - } -} - -// TestTransportGzipRecursive sends a gzip quine and checks that the -// client gets the same value back. This is more cute than anything, -// but checks that we don't recurse forever, and checks that -// Content-Encoding is removed. -func TestTransportGzipRecursive(t *testing.T) { - ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { - w.Header().Set("Content-Encoding", "gzip") - w.Write(rgz) - })) - defer ts.Close() - - c := &Client{Transport: &Transport{}} - res, err := c.Get(ts.URL) - if err != nil { - t.Fatal(err) - } - body, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(body, rgz) { - t.Fatalf("Incorrect result from recursive gz:\nhave=%x\nwant=%x", - body, rgz) - } - if g, e := res.Header.Get("Content-Encoding"), ""; g != e { - t.Fatalf("Content-Encoding = %q; want %q", g, e) - } -} - -type fooProto struct{} - -func (fooProto) RoundTrip(req *Request) (*Response, os.Error) { - res := &Response{ - Status: "200 OK", - StatusCode: 200, - Header: make(Header), - Body: ioutil.NopCloser(strings.NewReader("You wanted " + req.URL.String())), - } - return res, nil -} - -func TestTransportAltProto(t *testing.T) { - tr := &Transport{} - c := &Client{Transport: tr} - tr.RegisterProtocol("foo", fooProto{}) - res, err := c.Get("foo://bar.com/path") - if err != nil { - t.Fatal(err) - } - bodyb, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Fatal(err) - } - body := string(bodyb) - if e := "You wanted foo://bar.com/path"; body != e { - t.Errorf("got response %q, want %q", body, e) - } -} - -// rgz is a gzip quine that uncompresses to itself. -var rgz = []byte{ - 0x1f, 0x8b, 0x08, 0x08, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, - 0x69, 0x76, 0x65, 0x00, 0x92, 0xef, 0xe6, 0xe0, - 0x60, 0x00, 0x83, 0xa2, 0xd4, 0xe4, 0xd2, 0xa2, - 0xe2, 0xcc, 0xb2, 0x54, 0x06, 0x00, 0x00, 0x17, - 0x00, 0xe8, 0xff, 0x92, 0xef, 0xe6, 0xe0, 0x60, - 0x00, 0x83, 0xa2, 0xd4, 0xe4, 0xd2, 0xa2, 0xe2, - 0xcc, 0xb2, 0x54, 0x06, 0x00, 0x00, 0x17, 0x00, - 0xe8, 0xff, 0x42, 0x12, 0x46, 0x16, 0x06, 0x00, - 0x05, 0x00, 0xfa, 0xff, 0x42, 0x12, 0x46, 0x16, - 0x06, 0x00, 0x05, 0x00, 0xfa, 0xff, 0x00, 0x05, - 0x00, 0xfa, 0xff, 0x00, 0x14, 0x00, 0xeb, 0xff, - 0x42, 0x12, 0x46, 0x16, 0x06, 0x00, 0x05, 0x00, - 0xfa, 0xff, 0x00, 0x05, 0x00, 0xfa, 0xff, 0x00, - 0x14, 0x00, 0xeb, 0xff, 0x42, 0x88, 0x21, 0xc4, - 0x00, 0x00, 0x14, 0x00, 0xeb, 0xff, 0x42, 0x88, - 0x21, 0xc4, 0x00, 0x00, 0x14, 0x00, 0xeb, 0xff, - 0x42, 0x88, 0x21, 0xc4, 0x00, 0x00, 0x14, 0x00, - 0xeb, 0xff, 0x42, 0x88, 0x21, 0xc4, 0x00, 0x00, - 0x14, 0x00, 0xeb, 0xff, 0x42, 0x88, 0x21, 0xc4, - 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, - 0x00, 0xff, 0xff, 0x00, 0x17, 0x00, 0xe8, 0xff, - 0x42, 0x88, 0x21, 0xc4, 0x00, 0x00, 0x00, 0x00, - 0xff, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, - 0x17, 0x00, 0xe8, 0xff, 0x42, 0x12, 0x46, 0x16, - 0x06, 0x00, 0x00, 0x00, 0xff, 0xff, 0x01, 0x08, - 0x00, 0xf7, 0xff, 0x3d, 0xb1, 0x20, 0x85, 0xfa, - 0x00, 0x00, 0x00, 0x42, 0x12, 0x46, 0x16, 0x06, - 0x00, 0x00, 0x00, 0xff, 0xff, 0x01, 0x08, 0x00, - 0xf7, 0xff, 0x3d, 0xb1, 0x20, 0x85, 0xfa, 0x00, - 0x00, 0x00, 0x3d, 0xb1, 0x20, 0x85, 0xfa, 0x00, - 0x00, 0x00, -} diff --git a/src/pkg/http/triv.go b/src/pkg/http/triv.go deleted file mode 100644 index a8fd99aa4..000000000 --- a/src/pkg/http/triv.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "bytes" - "expvar" - "flag" - "fmt" - "http" - "io" - "log" - "os" - "strconv" -) - -// hello world, the web server -var helloRequests = expvar.NewInt("hello-requests") - -func HelloServer(w http.ResponseWriter, req *http.Request) { - helloRequests.Add(1) - io.WriteString(w, "hello, world!\n") -} - -// Simple counter server. POSTing to it will set the value. -type Counter struct { - n int -} - -// This makes Counter satisfy the expvar.Var interface, so we can export -// it directly. -func (ctr *Counter) String() string { return fmt.Sprintf("%d", ctr.n) } - -func (ctr *Counter) ServeHTTP(w http.ResponseWriter, req *http.Request) { - switch req.Method { - case "GET": - ctr.n++ - case "POST": - buf := new(bytes.Buffer) - io.Copy(buf, req.Body) - body := buf.String() - if n, err := strconv.Atoi(body); err != nil { - fmt.Fprintf(w, "bad POST: %v\nbody: [%v]\n", err, body) - } else { - ctr.n = n - fmt.Fprint(w, "counter reset\n") - } - } - fmt.Fprintf(w, "counter = %d\n", ctr.n) -} - -// simple flag server -var booleanflag = flag.Bool("boolean", true, "another flag for testing") - -func FlagServer(w http.ResponseWriter, req *http.Request) { - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - fmt.Fprint(w, "Flags:\n") - flag.VisitAll(func(f *flag.Flag) { - if f.Value.String() != f.DefValue { - fmt.Fprintf(w, "%s = %s [default = %s]\n", f.Name, f.Value.String(), f.DefValue) - } else { - fmt.Fprintf(w, "%s = %s\n", f.Name, f.Value.String()) - } - }) -} - -// simple argument server -func ArgServer(w http.ResponseWriter, req *http.Request) { - for _, s := range os.Args { - fmt.Fprint(w, s, " ") - } -} - -// a channel (just for the fun of it) -type Chan chan int - -func ChanCreate() Chan { - c := make(Chan) - go func(c Chan) { - for x := 0; ; x++ { - c <- x - } - }(c) - return c -} - -func (ch Chan) ServeHTTP(w http.ResponseWriter, req *http.Request) { - io.WriteString(w, fmt.Sprintf("channel send #%d\n", <-ch)) -} - -// exec a program, redirecting output -func DateServer(rw http.ResponseWriter, req *http.Request) { - rw.Header().Set("Content-Type", "text/plain; charset=utf-8") - r, w, err := os.Pipe() - if err != nil { - fmt.Fprintf(rw, "pipe: %s\n", err) - return - } - - p, err := os.StartProcess("/bin/date", []string{"date"}, &os.ProcAttr{Files: []*os.File{nil, w, w}}) - defer r.Close() - w.Close() - if err != nil { - fmt.Fprintf(rw, "fork/exec: %s\n", err) - return - } - defer p.Release() - io.Copy(rw, r) - wait, err := p.Wait(0) - if err != nil { - fmt.Fprintf(rw, "wait: %s\n", err) - return - } - if !wait.Exited() || wait.ExitStatus() != 0 { - fmt.Fprintf(rw, "date: %v\n", wait) - return - } -} - -func Logger(w http.ResponseWriter, req *http.Request) { - log.Print(req.URL.Raw) - w.WriteHeader(404) - w.Write([]byte("oops")) -} - -var webroot = flag.String("root", "/home/rsc", "web root directory") - -func main() { - flag.Parse() - - // The counter is published as a variable directly. - ctr := new(Counter) - http.Handle("/counter", ctr) - expvar.Publish("counter", ctr) - - http.Handle("/", http.HandlerFunc(Logger)) - http.Handle("/go/", http.StripPrefix("/go/", http.FileServer(http.Dir(*webroot)))) - http.Handle("/flags", http.HandlerFunc(FlagServer)) - http.Handle("/args", http.HandlerFunc(ArgServer)) - http.Handle("/go/hello", http.HandlerFunc(HelloServer)) - http.Handle("/chan", ChanCreate()) - http.Handle("/date", http.HandlerFunc(DateServer)) - err := http.ListenAndServe(":12345", nil) - if err != nil { - log.Panicln("ListenAndServe:", err) - } -} |