summaryrefslogtreecommitdiff
path: root/src/pkg/net/http/header.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/pkg/net/http/header.go')
-rw-r--r--src/pkg/net/http/header.go135
1 files changed, 123 insertions, 12 deletions
diff --git a/src/pkg/net/http/header.go b/src/pkg/net/http/header.go
index b107c312d..f479b7b4e 100644
--- a/src/pkg/net/http/header.go
+++ b/src/pkg/net/http/header.go
@@ -5,11 +5,11 @@
package http
import (
- "fmt"
"io"
"net/textproto"
"sort"
"strings"
+ "time"
)
// A Header represents the key-value pairs in an HTTP header.
@@ -36,6 +36,14 @@ func (h Header) Get(key string) string {
return textproto.MIMEHeader(h).Get(key)
}
+// get is like Get, but key must already be in CanonicalHeaderKey form.
+func (h Header) get(key string) string {
+ if v := h[key]; len(v) > 0 {
+ return v[0]
+ }
+ return ""
+}
+
// Del deletes the values associated with key.
func (h Header) Del(key string) {
textproto.MIMEHeader(h).Del(key)
@@ -46,24 +54,87 @@ func (h Header) Write(w io.Writer) error {
return h.WriteSubset(w, nil)
}
+func (h Header) clone() Header {
+ h2 := make(Header, len(h))
+ for k, vv := range h {
+ vv2 := make([]string, len(vv))
+ copy(vv2, vv)
+ h2[k] = vv2
+ }
+ return h2
+}
+
+var timeFormats = []string{
+ TimeFormat,
+ time.RFC850,
+ time.ANSIC,
+}
+
+// ParseTime parses a time header (such as the Date: header),
+// trying each of the three formats allowed by HTTP/1.1:
+// TimeFormat, time.RFC850, and time.ANSIC.
+func ParseTime(text string) (t time.Time, err error) {
+ for _, layout := range timeFormats {
+ t, err = time.Parse(layout, text)
+ if err == nil {
+ return
+ }
+ }
+ return
+}
+
var headerNewlineToSpace = strings.NewReplacer("\n", " ", "\r", " ")
+type writeStringer interface {
+ WriteString(string) (int, error)
+}
+
+// stringWriter implements WriteString on a Writer.
+type stringWriter struct {
+ w io.Writer
+}
+
+func (w stringWriter) WriteString(s string) (n int, err error) {
+ return w.w.Write([]byte(s))
+}
+
+type keyValues struct {
+ key string
+ values []string
+}
+
+type byKey []keyValues
+
+func (s byKey) Len() int { return len(s) }
+func (s byKey) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s byKey) Less(i, j int) bool { return s[i].key < s[j].key }
+
+func (h Header) sortedKeyValues(exclude map[string]bool) []keyValues {
+ kvs := make([]keyValues, 0, len(h))
+ for k, vv := range h {
+ if !exclude[k] {
+ kvs = append(kvs, keyValues{k, vv})
+ }
+ }
+ sort.Sort(byKey(kvs))
+ return kvs
+}
+
// WriteSubset writes a header in wire format.
// If exclude is not nil, keys where exclude[key] == true are not written.
func (h Header) WriteSubset(w io.Writer, exclude map[string]bool) error {
- keys := make([]string, 0, len(h))
- for k := range h {
- if exclude == nil || !exclude[k] {
- keys = append(keys, k)
- }
+ ws, ok := w.(writeStringer)
+ if !ok {
+ ws = stringWriter{w}
}
- sort.Strings(keys)
- for _, k := range keys {
- for _, v := range h[k] {
+ for _, kv := range h.sortedKeyValues(exclude) {
+ for _, v := range kv.values {
v = headerNewlineToSpace.Replace(v)
- v = strings.TrimSpace(v)
- if _, err := fmt.Fprintf(w, "%s: %s\r\n", k, v); err != nil {
- return err
+ v = textproto.TrimString(v)
+ for _, s := range []string{kv.key, ": ", v, "\r\n"} {
+ if _, err := ws.WriteString(s); err != nil {
+ return err
+ }
}
}
}
@@ -76,3 +147,43 @@ func (h Header) WriteSubset(w io.Writer, exclude map[string]bool) error {
// the rest are converted to lowercase. For example, the
// canonical key for "accept-encoding" is "Accept-Encoding".
func CanonicalHeaderKey(s string) string { return textproto.CanonicalMIMEHeaderKey(s) }
+
+// hasToken returns whether token appears with v, ASCII
+// case-insensitive, with space or comma boundaries.
+// token must be all lowercase.
+// v may contain mixed cased.
+func hasToken(v, token string) bool {
+ if len(token) > len(v) || token == "" {
+ return false
+ }
+ if v == token {
+ return true
+ }
+ for sp := 0; sp <= len(v)-len(token); sp++ {
+ // Check that first character is good.
+ // The token is ASCII, so checking only a single byte
+ // is sufficient. We skip this potential starting
+ // position if both the first byte and its potential
+ // ASCII uppercase equivalent (b|0x20) don't match.
+ // False positives ('^' => '~') are caught by EqualFold.
+ if b := v[sp]; b != token[0] && b|0x20 != token[0] {
+ continue
+ }
+ // Check that start pos is on a valid token boundary.
+ if sp > 0 && !isTokenBoundary(v[sp-1]) {
+ continue
+ }
+ // Check that end pos is on a valid token boundary.
+ if endPos := sp + len(token); endPos != len(v) && !isTokenBoundary(v[endPos]) {
+ continue
+ }
+ if strings.EqualFold(v[sp:sp+len(token)], token) {
+ return true
+ }
+ }
+ return false
+}
+
+func isTokenBoundary(b byte) bool {
+ return b == ' ' || b == ',' || b == '\t'
+}