summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/Make.ccmd3
-rw-r--r--src/Make.clib2
-rw-r--r--src/Make.cmd4
-rw-r--r--src/cmd/5a/lex.c53
-rw-r--r--src/cmd/5c/gc.h4
-rw-r--r--src/cmd/5g/gg.h4
-rw-r--r--src/cmd/5l/asm.c216
-rw-r--r--src/cmd/5l/obj.c5
-rw-r--r--src/cmd/5l/optab.c35
-rw-r--r--src/cmd/5l/span.c4
-rw-r--r--src/cmd/6a/lex.c53
-rw-r--r--src/cmd/6g/cgen.c38
-rw-r--r--src/cmd/6g/ggen.c34
-rw-r--r--src/cmd/6g/gsubr.c41
-rw-r--r--src/cmd/6g/reg.c5
-rw-r--r--src/cmd/6l/asm.c74
-rw-r--r--src/cmd/6l/l.h3
-rw-r--r--src/cmd/8a/lex.c53
-rw-r--r--src/cmd/8g/cgen.c40
-rw-r--r--src/cmd/8g/ggen.c29
-rw-r--r--src/cmd/8g/gsubr.c68
-rw-r--r--src/cmd/8l/asm.c75
-rw-r--r--src/cmd/8l/obj.c2
-rw-r--r--src/cmd/8l/pass.c15
-rw-r--r--src/cmd/cc/lex.c58
-rw-r--r--src/cmd/cgo/gcc.go14
-rw-r--r--src/cmd/cgo/out.go12
-rw-r--r--src/cmd/gc/builtin.c.boot4
-rw-r--r--src/cmd/gc/go.h1
-rw-r--r--src/cmd/gc/runtime.go1
-rw-r--r--src/cmd/gc/subr.c14
-rw-r--r--src/cmd/gc/swt.c5
-rw-r--r--src/cmd/gc/typecheck.c21
-rw-r--r--src/cmd/gc/unsafe.c1
-rw-r--r--src/cmd/gc/walk.c146
-rw-r--r--src/cmd/godoc/doc.go3
-rw-r--r--src/cmd/godoc/godoc.go35
-rw-r--r--src/cmd/godoc/main.go15
-rw-r--r--src/cmd/gofix/Makefile1
-rw-r--r--src/cmd/gofix/httpfinalurl.go56
-rw-r--r--src/cmd/gofix/httpfinalurl_test.go37
-rwxr-xr-xsrc/cmd/gofmt/test.sh3
-rw-r--r--src/cmd/gotest/gotest.go5
-rw-r--r--src/cmd/gotype/gotype.go6
-rw-r--r--src/cmd/ld/data.c12
-rw-r--r--src/cmd/ld/dwarf.c1
-rw-r--r--src/cmd/ld/elf.h4
-rw-r--r--src/cmd/ld/go.c2
-rw-r--r--src/cmd/ld/lib.c12
-rw-r--r--src/cmd/ld/lib.h2
-rw-r--r--src/cmd/ld/pe.c6
-rwxr-xr-xsrc/cmd/prof/gopprof9
-rwxr-xr-xsrc/make.bash3
-rw-r--r--src/pkg/Makefile7
-rw-r--r--src/pkg/asn1/asn1.go35
-rw-r--r--src/pkg/asn1/asn1_test.go27
-rw-r--r--src/pkg/asn1/common.go4
-rw-r--r--src/pkg/asn1/marshal.go47
-rwxr-xr-xsrc/pkg/big/int.go94
-rwxr-xr-xsrc/pkg/big/int_test.go197
-rwxr-xr-xsrc/pkg/big/nat.go75
-rwxr-xr-xsrc/pkg/big/nat_test.go32
-rw-r--r--src/pkg/big/rat.go8
-rw-r--r--src/pkg/compress/bzip2/bzip2.go4
-rw-r--r--src/pkg/compress/bzip2/huffman.go2
-rw-r--r--src/pkg/compress/flate/deflate.go29
-rw-r--r--src/pkg/compress/flate/huffman_bit_writer.go4
-rw-r--r--src/pkg/compress/gzip/gzip_test.go2
-rw-r--r--src/pkg/compress/lzw/reader.go13
-rw-r--r--src/pkg/compress/zlib/writer.go2
-rw-r--r--src/pkg/compress/zlib/writer_test.go18
-rw-r--r--src/pkg/container/heap/heap.go2
-rw-r--r--src/pkg/crypto/elliptic/elliptic.go2
-rw-r--r--src/pkg/crypto/elliptic/elliptic_test.go4
-rw-r--r--src/pkg/crypto/hmac/hmac_test.go2
-rw-r--r--src/pkg/crypto/openpgp/armor/armor.go2
-rw-r--r--src/pkg/crypto/openpgp/keys.go103
-rw-r--r--src/pkg/crypto/openpgp/packet/packet.go22
-rw-r--r--src/pkg/crypto/openpgp/packet/private_key.go84
-rw-r--r--src/pkg/crypto/openpgp/packet/public_key.go103
-rw-r--r--src/pkg/crypto/openpgp/packet/public_key_test.go36
-rw-r--r--src/pkg/crypto/openpgp/packet/signature.go48
-rw-r--r--src/pkg/crypto/openpgp/packet/userid.go56
-rw-r--r--src/pkg/crypto/openpgp/packet/userid_test.go45
-rw-r--r--src/pkg/crypto/openpgp/read.go10
-rw-r--r--src/pkg/crypto/openpgp/read_test.go6
-rw-r--r--src/pkg/crypto/openpgp/s2k/s2k.go2
-rw-r--r--src/pkg/crypto/openpgp/write.go15
-rw-r--r--src/pkg/crypto/openpgp/write_test.go41
-rw-r--r--src/pkg/crypto/rand/Makefile1
-rw-r--r--src/pkg/crypto/rand/util.go80
-rw-r--r--src/pkg/crypto/rsa/rsa.go90
-rw-r--r--src/pkg/crypto/subtle/constant_time_test.go4
-rw-r--r--src/pkg/crypto/tls/common.go4
-rw-r--r--src/pkg/crypto/tls/conn.go7
-rw-r--r--src/pkg/crypto/tls/handshake_server.go4
-rw-r--r--src/pkg/crypto/tls/key_agreement.go8
-rw-r--r--src/pkg/crypto/x509/crl/Makefile11
-rw-r--r--src/pkg/crypto/x509/crl/crl.go96
-rw-r--r--src/pkg/crypto/x509/crl/crl_test.go63
-rw-r--r--src/pkg/crypto/x509/x509.go242
-rw-r--r--src/pkg/crypto/x509/x509_test.go86
-rw-r--r--src/pkg/crypto/xtea/block.go2
-rw-r--r--src/pkg/crypto/xtea/xtea_test.go10
-rw-r--r--src/pkg/debug/dwarf/type.go2
-rw-r--r--src/pkg/debug/macho/file.go2
-rw-r--r--src/pkg/debug/pe/file.go2
-rw-r--r--src/pkg/debug/proc/proc_linux.go6
-rw-r--r--src/pkg/ebnf/ebnf.go2
-rw-r--r--src/pkg/encoding/git85/git.go2
-rw-r--r--src/pkg/encoding/pem/pem.go2
-rw-r--r--src/pkg/exp/datafmt/datafmt.go2
-rw-r--r--src/pkg/exp/draw/draw.go129
-rw-r--r--src/pkg/exp/draw/draw_test.go50
-rw-r--r--src/pkg/exp/draw/x11/conn.go4
-rw-r--r--src/pkg/exp/eval/expr.go2
-rw-r--r--src/pkg/exp/eval/stmt.go6
-rw-r--r--src/pkg/exp/eval/typec.go2
-rw-r--r--src/pkg/exp/wingui/Makefile2
-rw-r--r--src/pkg/exp/wingui/winapi.go6
-rw-r--r--src/pkg/exp/wingui/zwinapi.go2
-rw-r--r--src/pkg/expvar/expvar_test.go30
-rw-r--r--src/pkg/flag/export_test.go22
-rw-r--r--src/pkg/flag/flag.go400
-rw-r--r--src/pkg/flag/flag_test.go59
-rw-r--r--src/pkg/fmt/doc.go10
-rw-r--r--src/pkg/fmt/print.go2
-rw-r--r--src/pkg/go/ast/ast.go24
-rw-r--r--src/pkg/go/ast/filter.go116
-rw-r--r--src/pkg/go/ast/resolve.go83
-rw-r--r--src/pkg/go/ast/scope.go11
-rw-r--r--src/pkg/go/doc/doc.go24
-rw-r--r--src/pkg/go/parser/parser.go54
-rw-r--r--src/pkg/go/parser/parser_test.go1
-rw-r--r--src/pkg/go/printer/nodes.go183
-rw-r--r--src/pkg/go/printer/performance_test.go62
-rw-r--r--src/pkg/go/printer/printer.go2
-rw-r--r--src/pkg/go/printer/testdata/comments.golden10
-rw-r--r--src/pkg/go/printer/testdata/comments.input2
-rw-r--r--src/pkg/go/printer/testdata/comments.x10
-rw-r--r--src/pkg/go/printer/testdata/declarations.golden41
-rw-r--r--src/pkg/go/printer/testdata/declarations.input29
-rw-r--r--src/pkg/go/printer/testdata/expressions.golden99
-rw-r--r--src/pkg/go/printer/testdata/expressions.input97
-rw-r--r--src/pkg/go/printer/testdata/expressions.raw99
-rw-r--r--src/pkg/go/printer/testdata/parser.go2252
-rw-r--r--src/pkg/go/scanner/scanner_test.go2
-rw-r--r--src/pkg/go/token/position.go54
-rw-r--r--src/pkg/go/types/Makefile1
-rw-r--r--src/pkg/go/types/check.go233
-rw-r--r--src/pkg/go/types/check_test.go224
-rw-r--r--src/pkg/go/types/gcimporter.go223
-rw-r--r--src/pkg/go/types/gcimporter_test.go7
-rw-r--r--src/pkg/go/types/testdata/exports.go16
-rw-r--r--src/pkg/go/types/testdata/test0.src154
-rw-r--r--src/pkg/go/types/types.go169
-rw-r--r--src/pkg/go/types/universe.go16
-rw-r--r--src/pkg/gob/decode.go2
-rw-r--r--src/pkg/gob/doc.go2
-rw-r--r--src/pkg/gob/gobencdec_test.go2
-rw-r--r--src/pkg/gob/type.go2
-rw-r--r--src/pkg/html/token.go39
-rw-r--r--src/pkg/html/token_test.go40
-rw-r--r--src/pkg/http/cgi/child.go89
-rw-r--r--src/pkg/http/cgi/child_test.go30
-rw-r--r--src/pkg/http/cgi/host.go3
-rw-r--r--src/pkg/http/chunked.go14
-rw-r--r--src/pkg/http/client.go92
-rw-r--r--src/pkg/http/client_test.go25
-rw-r--r--src/pkg/http/cookie.go85
-rw-r--r--src/pkg/http/cookie_test.go42
-rw-r--r--src/pkg/http/fcgi/child.go78
-rw-r--r--src/pkg/http/fs_test.go2
-rw-r--r--src/pkg/http/header.go40
-rw-r--r--src/pkg/http/header_test.go71
-rw-r--r--src/pkg/http/persist.go13
-rw-r--r--src/pkg/http/pprof/pprof.go14
-rw-r--r--src/pkg/http/proxy_test.go4
-rw-r--r--src/pkg/http/request.go27
-rw-r--r--src/pkg/http/request_test.go43
-rw-r--r--src/pkg/http/requestwrite_test.go5
-rw-r--r--src/pkg/http/response.go57
-rw-r--r--src/pkg/http/response_test.go62
-rw-r--r--src/pkg/http/responsewrite_test.go14
-rw-r--r--src/pkg/http/reverseproxy_test.go2
-rw-r--r--src/pkg/http/serve_test.go147
-rw-r--r--src/pkg/http/server.go8
-rw-r--r--src/pkg/http/spdy/Makefile11
-rw-r--r--src/pkg/http/spdy/protocol.go367
-rw-r--r--src/pkg/http/spdy/protocol_test.go259
-rw-r--r--src/pkg/http/transfer.go24
-rw-r--r--src/pkg/http/transport.go175
-rw-r--r--src/pkg/http/transport_test.go93
-rw-r--r--src/pkg/http/url.go2
-rw-r--r--src/pkg/image/bmp/Makefile11
-rw-r--r--src/pkg/image/bmp/reader.go148
-rw-r--r--src/pkg/image/decode_test.go70
-rw-r--r--src/pkg/image/gif/Makefile11
-rw-r--r--src/pkg/image/gif/reader.go423
-rw-r--r--src/pkg/image/image.go56
-rw-r--r--src/pkg/image/jpeg/idct.go124
-rw-r--r--src/pkg/image/jpeg/reader.go246
-rw-r--r--src/pkg/image/jpeg/writer.go62
-rw-r--r--src/pkg/image/jpeg/writer_test.go28
-rw-r--r--src/pkg/image/png/reader.go22
-rw-r--r--src/pkg/image/png/reader_test.go20
-rw-r--r--src/pkg/image/png/testdata/pngsuite/README3
-rw-r--r--src/pkg/image/png/testdata/pngsuite/basn3p08-trns.pngbin0 -> 1538 bytes
-rw-r--r--src/pkg/image/png/testdata/pngsuite/basn3p08-trns.sng301
-rw-r--r--src/pkg/image/png/writer.go53
-rw-r--r--src/pkg/image/png/writer_test.go40
-rw-r--r--src/pkg/image/testdata/video-001.5bpp.gifbin0 -> 6214 bytes
-rw-r--r--src/pkg/image/testdata/video-001.interlaced.gifbin0 -> 14142 bytes
-rw-r--r--src/pkg/image/testdata/video-005.gray.jpegbin0 -> 5618 bytes
-rw-r--r--src/pkg/image/testdata/video-005.gray.pngbin0 -> 14974 bytes
-rw-r--r--src/pkg/image/tiff/Makefile13
-rw-r--r--src/pkg/image/tiff/buffer.go57
-rw-r--r--src/pkg/image/tiff/buffer_test.go36
-rw-r--r--src/pkg/image/tiff/consts.go103
-rw-r--r--src/pkg/image/tiff/reader.go399
-rw-r--r--src/pkg/io/multi_test.go5
-rw-r--r--src/pkg/mime/multipart/Makefile1
-rw-r--r--src/pkg/mime/multipart/formdata.go11
-rw-r--r--src/pkg/mime/multipart/formdata_test.go2
-rw-r--r--src/pkg/mime/multipart/multipart.go239
-rw-r--r--src/pkg/mime/multipart/multipart_test.go187
-rw-r--r--src/pkg/mime/multipart/writer.go160
-rw-r--r--src/pkg/mime/multipart/writer_test.go71
-rw-r--r--src/pkg/net/Makefile4
-rw-r--r--src/pkg/net/dial.go9
-rw-r--r--src/pkg/net/dialgoogle_test.go102
-rw-r--r--src/pkg/net/dnsclient.go78
-rw-r--r--src/pkg/net/dnsconfig.go1
-rw-r--r--src/pkg/net/dnsmsg.go36
-rw-r--r--src/pkg/net/dnsmsg_test.go107
-rw-r--r--src/pkg/net/file_test.go6
-rw-r--r--src/pkg/net/ip.go65
-rw-r--r--src/pkg/net/ip_test.go112
-rw-r--r--src/pkg/net/ipraw_test.go11
-rw-r--r--src/pkg/net/iprawsock.go31
-rw-r--r--src/pkg/net/ipsock.go191
-rw-r--r--src/pkg/net/resolv_windows.go4
-rw-r--r--src/pkg/net/server_test.go37
-rw-r--r--src/pkg/net/sock.go25
-rw-r--r--src/pkg/net/sock_bsd.go31
-rw-r--r--src/pkg/net/sock_linux.go25
-rw-r--r--src/pkg/net/sock_windows.go25
-rw-r--r--src/pkg/net/tcpsock.go9
-rw-r--r--src/pkg/net/udpsock.go9
-rw-r--r--src/pkg/os/dir_unix.go43
-rw-r--r--src/pkg/os/dir_windows.go12
-rw-r--r--src/pkg/os/env.go2
-rw-r--r--src/pkg/os/env_unix.go30
-rw-r--r--src/pkg/os/file_unix.go27
-rw-r--r--src/pkg/os/file_windows.go33
-rw-r--r--src/pkg/os/os_test.go7
-rw-r--r--src/pkg/os/path.go3
-rw-r--r--src/pkg/os/user/user_test.go4
-rw-r--r--src/pkg/path/filepath/path.go9
-rw-r--r--src/pkg/path/filepath/path_test.go56
-rw-r--r--src/pkg/rand/rand_test.go4
-rw-r--r--src/pkg/reflect/all_test.go30
-rw-r--r--src/pkg/reflect/value.go9
-rw-r--r--src/pkg/rpc/client.go2
-rw-r--r--src/pkg/runtime/append_test.go51
-rw-r--r--src/pkg/runtime/arm/closure.c2
-rw-r--r--src/pkg/runtime/cgo/Makefile14
-rw-r--r--src/pkg/runtime/cgo/setenv.c16
-rw-r--r--src/pkg/runtime/linux/arm/sys.s15
-rw-r--r--src/pkg/runtime/linux/thread.c8
-rw-r--r--src/pkg/runtime/malloc.goc2
-rw-r--r--src/pkg/runtime/proc.c35
-rw-r--r--src/pkg/runtime/runtime.h14
-rw-r--r--src/pkg/runtime/slice.c91
-rw-r--r--src/pkg/runtime/softfloat64.go4
-rw-r--r--src/pkg/runtime/stack.h14
-rw-r--r--src/pkg/strconv/atob.go4
-rw-r--r--src/pkg/strconv/atob_test.go2
-rw-r--r--src/pkg/strings/reader.go5
-rw-r--r--src/pkg/sync/atomic/asm_linux_arm.s21
-rwxr-xr-xsrc/pkg/syscall/mkall.sh4
-rwxr-xr-xsrc/pkg/syscall/mkerrors.sh12
-rwxr-xr-xsrc/pkg/syscall/mksyscall_windows.pl1
-rw-r--r--src/pkg/syscall/syscall_bsd.go9
-rw-r--r--src/pkg/syscall/syscall_darwin.go5
-rw-r--r--src/pkg/syscall/syscall_freebsd.go5
-rw-r--r--src/pkg/syscall/syscall_linux.go37
-rw-r--r--src/pkg/syscall/syscall_linux_386.go1
-rw-r--r--src/pkg/syscall/syscall_linux_amd64.go1
-rw-r--r--src/pkg/syscall/syscall_linux_arm.go1
-rw-r--r--src/pkg/syscall/syscall_plan9.go5
-rw-r--r--src/pkg/syscall/syscall_windows.go16
-rw-r--r--src/pkg/syscall/types_linux.c58
-rw-r--r--src/pkg/syscall/zerrors_linux_386.go126
-rw-r--r--src/pkg/syscall/zerrors_linux_amd64.go126
-rw-r--r--src/pkg/syscall/zerrors_linux_arm.go532
-rw-r--r--src/pkg/syscall/zsyscall_linux_386.go9
-rw-r--r--src/pkg/syscall/zsyscall_linux_amd64.go9
-rw-r--r--src/pkg/syscall/zsyscall_linux_arm.go9
-rw-r--r--src/pkg/syscall/zsyscall_windows_386.go92
-rw-r--r--src/pkg/syscall/ztypes_darwin_386.go4
-rw-r--r--src/pkg/syscall/ztypes_darwin_amd64.go4
-rw-r--r--src/pkg/syscall/ztypes_linux_386.go102
-rw-r--r--src/pkg/syscall/ztypes_linux_amd64.go102
-rw-r--r--src/pkg/syscall/ztypes_linux_arm.go240
-rw-r--r--src/pkg/syscall/ztypes_windows_386.go14
-rw-r--r--src/pkg/syslog/syslog_test.go20
-rw-r--r--src/pkg/template/template.go138
-rw-r--r--src/pkg/template/template_test.go39
-rw-r--r--src/pkg/unicode/Makefile1
-rw-r--r--src/pkg/unicode/maketables.go4
-rw-r--r--src/pkg/websocket/client.go4
-rw-r--r--src/pkg/websocket/websocket_test.go4
-rw-r--r--src/pkg/xml/read.go9
-rw-r--r--src/pkg/xml/xml_test.go81
315 files changed, 13441 insertions, 3041 deletions
diff --git a/src/Make.ccmd b/src/Make.ccmd
index cb2b25512..88f647152 100644
--- a/src/Make.ccmd
+++ b/src/Make.ccmd
@@ -18,6 +18,9 @@ CLEANFILES+=y.tab.[ch]
clean:
rm -f *.$(HOST_O) $(TARG) $(CLEANFILES)
+nuke: clean
+ rm -f "$(GOBIN)/$(TARG)"
+
ifneq ($(NOINSTALL),1)
install: $(QUOTED_GOBIN)/$(TARG)
endif
diff --git a/src/Make.clib b/src/Make.clib
index ebe4f84b9..25fe88463 100644
--- a/src/Make.clib
+++ b/src/Make.clib
@@ -26,6 +26,8 @@ CLEANFILES+=y.tab.[ch] y.output a.out $(LIB)
clean:
rm -f *.$(HOST_O) $(CLEANFILES)
+nuke: clean
+ rm -f "$(GOROOT)/lib/$(LIB)"
y.tab.h: $(YFILES)
LANG=C LANGUAGE="en_US.UTF8" bison -v -y $(HOST_YFLAGS) $(YFILES)
diff --git a/src/Make.cmd b/src/Make.cmd
index e769e3072..27c6a2e13 100644
--- a/src/Make.cmd
+++ b/src/Make.cmd
@@ -25,9 +25,9 @@ _go_.$O: $(GOFILES) $(PREREQ)
install: $(TARGDIR)/$(TARG)
$(TARGDIR)/$(TARG): $(TARG)
- cp -f $(TARG) $(TARGDIR)
+ mkdir -p $(TARGDIR) && cp -f $(TARG) $(TARGDIR)
-CLEANFILES+=$(TARG) _test _testmain.go
+CLEANFILES+=$(TARG) _test _testmain.go test.out build.out
nuke: clean
rm -f $(TARGDIR)/$(TARG)
diff --git a/src/cmd/5a/lex.c b/src/cmd/5a/lex.c
index dbee3657f..a04cda220 100644
--- a/src/cmd/5a/lex.c
+++ b/src/cmd/5a/lex.c
@@ -50,7 +50,7 @@ void
main(int argc, char *argv[])
{
char *p;
- int nout, nproc, i, c;
+ int c;
thechar = '5';
thestring = "arm";
@@ -94,46 +94,10 @@ main(int argc, char *argv[])
print("usage: %ca [-options] file.s\n", thechar);
errorexit();
}
- if(argc > 1 && systemtype(Windows)){
- print("can't assemble multiple files on windows\n");
+ if(argc > 1){
+ print("can't assemble multiple files\n");
errorexit();
}
- if(argc > 1 && !systemtype(Windows)) {
- nproc = 1;
- if(p = getenv("NPROC"))
- nproc = atol(p); /* */
- c = 0;
- nout = 0;
- for(;;) {
- Waitmsg *w;
-
- while(nout < nproc && argc > 0) {
- i = fork();
- if(i < 0) {
- fprint(2, "fork: %r\n");
- errorexit();
- }
- if(i == 0) {
- print("%s:\n", *argv);
- if(assemble(*argv))
- errorexit();
- exits(0);
- }
- nout++;
- argc--;
- argv++;
- }
- w = wait();
- if(w == nil) {
- if(c)
- errorexit();
- exits(0);
- }
- if(w->msg[0])
- c++;
- nout--;
- }
- }
if(assemble(argv[0]))
errorexit();
exits(0);
@@ -142,7 +106,7 @@ main(int argc, char *argv[])
int
assemble(char *file)
{
- char *ofile, incfile[20], *p;
+ char *ofile, *p;
int i, of;
ofile = alloc(strlen(file)+3); // +3 for .x\0 (x=thechar)
@@ -167,15 +131,6 @@ assemble(char *file)
} else
outfile = "/dev/null";
}
- p = getenv("INCLUDE");
- if(p) {
- setinclude(p);
- } else {
- if(systemtype(Plan9)) {
- sprint(incfile,"/%s/include", thestring);
- setinclude(strdup(incfile));
- }
- }
of = create(outfile, OWRITE, 0664);
if(of < 0) {
diff --git a/src/cmd/5c/gc.h b/src/cmd/5c/gc.h
index 9e9d1bd7d..549e0c88a 100644
--- a/src/cmd/5c/gc.h
+++ b/src/cmd/5c/gc.h
@@ -69,7 +69,7 @@ struct Adr
Sym* sym;
char type;
- char reg;
+ uchar reg;
char name;
char etype;
};
@@ -83,7 +83,7 @@ struct Prog
Prog* link;
int32 lineno;
char as;
- char reg;
+ uchar reg;
uchar scond;
};
#define P ((Prog*)0)
diff --git a/src/cmd/5g/gg.h b/src/cmd/5g/gg.h
index 78e6833b2..fe404ed79 100644
--- a/src/cmd/5g/gg.h
+++ b/src/cmd/5g/gg.h
@@ -26,7 +26,7 @@ struct Addr
int width;
uchar type;
char name;
- char reg;
+ uchar reg;
char pun;
uchar etype;
};
@@ -41,7 +41,7 @@ struct Prog
Addr to; // dst address
Prog* link; // next instruction in this func
void* regp; // points to enclosing Reg struct
- char reg; // doubles as width in DATA op
+ uchar reg; // doubles as width in DATA op
uchar scond;
};
diff --git a/src/cmd/5l/asm.c b/src/cmd/5l/asm.c
index e2583e7c3..9e9c2c1eb 100644
--- a/src/cmd/5l/asm.c
+++ b/src/cmd/5l/asm.c
@@ -73,6 +73,8 @@ enum {
ElfStrGosymcounts,
ElfStrGosymtab,
ElfStrGopclntab,
+ ElfStrSymtab,
+ ElfStrStrtab,
ElfStrShstrtab,
ElfStrRelPlt,
ElfStrPlt,
@@ -87,6 +89,9 @@ needlib(char *name)
char *p;
Sym *s;
+ if(*name == '\0')
+ return 0;
+
/* reuse hash code in symbol table */
p = smprint(".dynlib.%s", name);
s = lookup(p, 0);
@@ -163,6 +168,8 @@ doelf(void)
elfstr[ElfStrGosymcounts] = addstring(shstrtab, ".gosymcounts");
elfstr[ElfStrGosymtab] = addstring(shstrtab, ".gosymtab");
elfstr[ElfStrGopclntab] = addstring(shstrtab, ".gopclntab");
+ elfstr[ElfStrSymtab] = addstring(shstrtab, ".symtab");
+ elfstr[ElfStrStrtab] = addstring(shstrtab, ".strtab");
}
elfstr[ElfStrShstrtab] = addstring(shstrtab, ".shstrtab");
@@ -288,19 +295,20 @@ asmb(void)
{
int32 t;
int a, dynsym;
- uint32 va, fo, w, startva;
- int strtabsize;
+ uint32 fo, symo, startva, elfsymo, elfstro, elfsymsize;
ElfEhdr *eh;
ElfPhdr *ph, *pph;
ElfShdr *sh;
Section *sect;
- strtabsize = 0;
-
if(debug['v'])
Bprint(&bso, "%5.2f asmb\n", cputime());
Bflush(&bso);
+ elfsymsize = 0;
+ elfstro = 0;
+ elfsymo = 0;
+
sect = segtext.sect;
seek(cout, sect->vaddr - segtext.vaddr + segtext.fileoff, 0);
codeblk(sect->vaddr, sect->len);
@@ -322,15 +330,30 @@ asmb(void)
seek(cout, sect->vaddr - segtext.vaddr + segtext.fileoff, 0);
datblk(sect->vaddr, sect->len);
+ if(iself) {
+ /* index of elf text section; needed by asmelfsym, double-checked below */
+ /* !debug['d'] causes extra sections before the .text section */
+ elftextsh = 1;
+ if(!debug['d']) {
+ elftextsh += 10;
+ if(elfverneed)
+ elftextsh += 2;
+ }
+ }
+
/* output symbol table */
symsize = 0;
lcsize = 0;
+ symo = 0;
if(!debug['s']) {
// TODO: rationalize
if(debug['v'])
Bprint(&bso, "%5.2f sym\n", cputime());
Bflush(&bso);
switch(HEADTYPE) {
+ default:
+ if(iself)
+ goto ElfSym;
case Hnoheader:
case Hrisc:
case Hixp1200:
@@ -345,14 +368,29 @@ asmb(void)
OFFSET += rnd(segdata.filelen, 4096);
seek(cout, OFFSET, 0);
break;
- case Hlinux:
- OFFSET += segdata.filelen;
- seek(cout, rnd(OFFSET, INITRND), 0);
+ ElfSym:
+ symo = rnd(HEADR+segtext.filelen, INITRND)+segdata.filelen;
+ symo = rnd(symo, INITRND);
break;
}
- if(!debug['s'])
- asmthumbmap();
+ if(iself) {
+ if(debug['v'])
+ Bprint(&bso, "%5.2f elfsym\n", cputime());
+ elfsymo = symo+8+symsize+lcsize;
+ seek(cout, elfsymo, 0);
+ asmelfsym32();
+ cflush();
+ elfstro = seek(cout, 0, 1);
+ elfsymsize = elfstro - elfsymo;
+ ewrite(cout, elfstrdat, elfstrsize);
+
+ // if(debug['v'])
+ // Bprint(&bso, "%5.2f dwarf\n", cputime());
+ // dwarfemitdebugsections();
+ }
+ asmthumbmap();
cflush();
+
}
cursym = nil;
@@ -426,9 +464,7 @@ asmb(void)
/* elf arm */
eh = getElfEhdr();
fo = HEADR;
- va = INITTEXT;
startva = INITTEXT - fo; /* va of byte 0 of file */
- w = textsize;
/* This null SHdr must appear before all others */
sh = newElfShdr(elfstr[ElfStrEmpty]);
@@ -541,6 +577,8 @@ asmb(void)
ph->flags = PF_W+PF_R;
ph->align = 4;
+ if(elftextsh != eh->shnum)
+ diag("elftextsh = %d, want %d", elftextsh, eh->shnum);
for(sect=segtext.sect; sect!=nil; sect=sect->next)
elfshbits(sect);
for(sect=segdata.sect; sect!=nil; sect=sect->next)
@@ -558,6 +596,22 @@ asmb(void)
sh->flags = SHF_ALLOC;
sh->addralign = 1;
shsym(sh, lookup("pclntab", 0));
+
+ sh = newElfShdr(elfstr[ElfStrSymtab]);
+ sh->type = SHT_SYMTAB;
+ sh->off = elfsymo;
+ sh->size = elfsymsize;
+ sh->addralign = 4;
+ sh->entsize = 16;
+ sh->link = eh->shnum; // link to strtab
+
+ sh = newElfShdr(elfstr[ElfStrStrtab]);
+ sh->type = SHT_STRTAB;
+ sh->off = elfstro;
+ sh->size = elfstrsize;
+ sh->addralign = 1;
+
+ // dwarfaddelfheaders();
}
sh = newElfShstrtab(elfstr[ElfStrShstrtab]);
@@ -990,40 +1044,6 @@ if(debug['G']) print("%ux: %s: arm %d %d %d\n", (uint32)(p->pc), p->from.sym->na
o1 |= 1<<22;
break;
- case 22: /* movb/movh/movhu O(R),R -> lr,shl,shr */
- aclass(&p->from);
- r = p->from.reg;
- if(r == NREG)
- r = o->param;
- o1 = olr(instoffset, r, p->to.reg, p->scond);
-
- o2 = oprrr(ASLL, p->scond);
- o3 = oprrr(ASRA, p->scond);
- r = p->to.reg;
- if(p->as == AMOVB) {
- o2 |= (24<<7)|(r)|(r<<12);
- o3 |= (24<<7)|(r)|(r<<12);
- } else {
- o2 |= (16<<7)|(r)|(r<<12);
- if(p->as == AMOVHU)
- o3 = oprrr(ASRL, p->scond);
- o3 |= (16<<7)|(r)|(r<<12);
- }
- break;
-
- case 23: /* movh/movhu R,O(R) -> sb,sb */
- aclass(&p->to);
- r = p->to.reg;
- if(r == NREG)
- r = o->param;
- o1 = osr(AMOVH, p->from.reg, instoffset, r, p->scond);
-
- o2 = oprrr(ASRL, p->scond);
- o2 |= (8<<7)|(p->from.reg)|(REGTMP<<12);
-
- o3 = osr(AMOVH, REGTMP, instoffset+1, r, p->scond);
- break;
-
case 30: /* mov/movb/movbu R,L(R) */
o1 = omvl(p, &p->to, REGTMP);
if(!o1)
@@ -1037,7 +1057,6 @@ if(debug['G']) print("%ux: %s: arm %d %d %d\n", (uint32)(p->pc), p->from.sym->na
break;
case 31: /* mov/movbu L(R),R -> lr[b] */
- case 32: /* movh/movb L(R),R -> lr[b] */
o1 = omvl(p, &p->from, REGTMP);
if(!o1)
break;
@@ -1047,53 +1066,6 @@ if(debug['G']) print("%ux: %s: arm %d %d %d\n", (uint32)(p->pc), p->from.sym->na
o2 = olrr(REGTMP,r, p->to.reg, p->scond);
if(p->as == AMOVBU || p->as == AMOVB)
o2 |= 1<<22;
- if(o->type == 31)
- break;
-
- o3 = oprrr(ASLL, p->scond);
-
- if(p->as == AMOVBU || p->as == AMOVHU)
- o4 = oprrr(ASRL, p->scond);
- else
- o4 = oprrr(ASRA, p->scond);
-
- r = p->to.reg;
- o3 |= (r)|(r<<12);
- o4 |= (r)|(r<<12);
- if(p->as == AMOVB || p->as == AMOVBU) {
- o3 |= (24<<7);
- o4 |= (24<<7);
- } else {
- o3 |= (16<<7);
- o4 |= (16<<7);
- }
- break;
-
- case 33: /* movh/movhu R,L(R) -> sb, sb */
- o1 = omvl(p, &p->to, REGTMP);
- if(!o1)
- break;
- r = p->to.reg;
- if(r == NREG)
- r = o->param;
- o2 = osrr(p->from.reg, REGTMP, r, p->scond);
- o2 |= (1<<22) ;
-
- o3 = oprrr(ASRL, p->scond);
- o3 |= (8<<7)|(p->from.reg)|(p->from.reg<<12);
- o3 |= (1<<6); /* ROR 8 */
-
- o4 = oprrr(AADD, p->scond);
- o4 |= (REGTMP << 12) | (REGTMP << 16);
- o4 |= immrot(1);
-
- o5 = osrr(p->from.reg, REGTMP,r,p->scond);
- o5 |= (1<<22);
-
- o6 = oprrr(ASRL, p->scond);
- o6 |= (24<<7)|(p->from.reg)|(p->from.reg<<12);
- o6 |= (1<<6); /* ROL 8 */
-
break;
case 34: /* mov $lacon,R */
@@ -1304,54 +1276,12 @@ if(debug['G']) print("%ux: %s: arm %d %d %d\n", (uint32)(p->pc), p->from.sym->na
break;
case 65: /* mov/movbu addr,R */
- case 66: /* movh/movhu/movb addr,R */
o1 = omvl(p, &p->from, REGTMP);
if(!o1)
break;
o2 = olr(0, REGTMP, p->to.reg, p->scond);
if(p->as == AMOVBU || p->as == AMOVB)
o2 |= 1<<22;
- if(o->type == 65)
- break;
-
- o3 = oprrr(ASLL, p->scond);
-
- if(p->as == AMOVBU || p->as == AMOVHU)
- o4 = oprrr(ASRL, p->scond);
- else
- o4 = oprrr(ASRA, p->scond);
-
- r = p->to.reg;
- o3 |= (r)|(r<<12);
- o4 |= (r)|(r<<12);
- if(p->as == AMOVB || p->as == AMOVBU) {
- o3 |= (24<<7);
- o4 |= (24<<7);
- } else {
- o3 |= (16<<7);
- o4 |= (16<<7);
- }
- break;
-
- case 67: /* movh/movhu R,addr -> sb, sb */
- o1 = omvl(p, &p->to, REGTMP);
- if(!o1)
- break;
- o2 = osr(p->as, p->from.reg, 0, REGTMP, p->scond);
-
- o3 = oprrr(ASRL, p->scond);
- o3 |= (8<<7)|(p->from.reg)|(p->from.reg<<12);
- o3 |= (1<<6); /* ROR 8 */
-
- o4 = oprrr(AADD, p->scond);
- o4 |= (REGTMP << 12) | (REGTMP << 16);
- o4 |= immrot(1);
-
- o5 = osr(p->as, p->from.reg, 0, REGTMP, p->scond);
-
- o6 = oprrr(ASRL, p->scond);
- o6 |= (24<<7)|(p->from.reg)|(p->from.reg<<12);
- o6 |= (1<<6); /* ROL 8 */
break;
case 68: /* floating point store -> ADDR */
@@ -1572,6 +1502,22 @@ if(debug['G']) print("%ux: %s: arm %d %d %d\n", (uint32)(p->pc), p->from.sym->na
o1 |= p->to.reg << 12;
o1 |= (p->scond & C_SCOND) << 28;
break;
+ case 93: /* movb/movh/movhu addr,R -> ldrsb/ldrsh/ldrh */
+ o1 = omvl(p, &p->from, REGTMP);
+ if(!o1)
+ break;
+ o2 = olhr(0, REGTMP, p->to.reg, p->scond);
+ if(p->as == AMOVB)
+ o2 ^= (1<<5)|(1<<6);
+ else if(p->as == AMOVH)
+ o2 ^= (1<<6);
+ break;
+ case 94: /* movh/movhu R,addr -> strh */
+ o1 = omvl(p, &p->to, REGTMP);
+ if(!o1)
+ break;
+ o2 = oshr(p->from.reg, 0, REGTMP, p->scond);
+ break;
}
out[0] = o1;
diff --git a/src/cmd/5l/obj.c b/src/cmd/5l/obj.c
index c4a2bfc3f..96ba0010f 100644
--- a/src/cmd/5l/obj.c
+++ b/src/cmd/5l/obj.c
@@ -219,7 +219,7 @@ main(int argc, char *argv[])
elfinit();
HEADR = ELFRESERVE;
if(INITTEXT == -1)
- INITTEXT = 0x8000 + HEADR;
+ INITTEXT = 0x10000 + HEADR;
if(INITDAT == -1)
INITDAT = 0;
if(INITRND == -1)
@@ -412,7 +412,7 @@ ldobj1(Biobuf *f, char *pkg, int64 len, char *pn)
{
int32 ipc;
Prog *p;
- Sym *h[NSYM], *s, *di;
+ Sym *h[NSYM], *s;
int v, o, r, skip;
uint32 sig;
char *name;
@@ -424,7 +424,6 @@ ldobj1(Biobuf *f, char *pkg, int64 len, char *pn)
lastp = nil;
ntext = 0;
eof = Boffset(f) + len;
- di = S;
src[0] = 0;
newloop:
diff --git a/src/cmd/5l/optab.c b/src/cmd/5l/optab.c
index 625b66812..4816aa40f 100644
--- a/src/cmd/5l/optab.c
+++ b/src/cmd/5l/optab.c
@@ -117,18 +117,6 @@ Optab optab[] =
{ AMOVBU, C_SAUTO,C_NONE, C_REG, 21, 4, REGSP },
{ AMOVBU, C_SOREG,C_NONE, C_REG, 21, 4, 0 },
- { AMOVB, C_SAUTO,C_NONE, C_REG, 22, 12, REGSP },
- { AMOVB, C_SOREG,C_NONE, C_REG, 22, 12, 0 },
- { AMOVH, C_SAUTO,C_NONE, C_REG, 22, 12, REGSP },
- { AMOVH, C_SOREG,C_NONE, C_REG, 22, 12, 0 },
- { AMOVHU, C_SAUTO,C_NONE, C_REG, 22, 12, REGSP },
- { AMOVHU, C_SOREG,C_NONE, C_REG, 22, 12, 0 },
-
- { AMOVH, C_REG, C_NONE, C_SAUTO, 23, 12, REGSP },
- { AMOVH, C_REG, C_NONE, C_SOREG, 23, 12, 0 },
- { AMOVHU, C_REG, C_NONE, C_SAUTO, 23, 12, REGSP },
- { AMOVHU, C_REG, C_NONE, C_SOREG, 23, 12, 0 },
-
{ AMOVW, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, LTO },
{ AMOVW, C_REG, C_NONE, C_LOREG, 30, 8, 0, LTO },
{ AMOVW, C_REG, C_NONE, C_ADDR, 64, 8, 0, LTO },
@@ -146,23 +134,6 @@ Optab optab[] =
{ AMOVBU, C_LOREG,C_NONE, C_REG, 31, 8, 0, LFROM },
{ AMOVBU, C_ADDR, C_NONE, C_REG, 65, 8, 0, LFROM },
- { AMOVB, C_LAUTO,C_NONE, C_REG, 32, 16, REGSP, LFROM },
- { AMOVB, C_LOREG,C_NONE, C_REG, 32, 16, 0, LFROM },
- { AMOVB, C_ADDR, C_NONE, C_REG, 66, 16, 0, LFROM },
- { AMOVH, C_LAUTO,C_NONE, C_REG, 32, 16, REGSP, LFROM },
- { AMOVH, C_LOREG,C_NONE, C_REG, 32, 16, 0, LFROM },
- { AMOVH, C_ADDR, C_NONE, C_REG, 66, 16, 0, LFROM },
- { AMOVHU, C_LAUTO,C_NONE, C_REG, 32, 16, REGSP, LFROM },
- { AMOVHU, C_LOREG,C_NONE, C_REG, 32, 16, 0, LFROM },
- { AMOVHU, C_ADDR, C_NONE, C_REG, 66, 16, 0, LFROM },
-
- { AMOVH, C_REG, C_NONE, C_LAUTO, 33, 24, REGSP, LTO },
- { AMOVH, C_REG, C_NONE, C_LOREG, 33, 24, 0, LTO },
- { AMOVH, C_REG, C_NONE, C_ADDR, 67, 24, 0, LTO },
- { AMOVHU, C_REG, C_NONE, C_LAUTO, 33, 24, REGSP, LTO },
- { AMOVHU, C_REG, C_NONE, C_LOREG, 33, 24, 0, LTO },
- { AMOVHU, C_REG, C_NONE, C_ADDR, 67, 24, 0, LTO },
-
{ AMOVW, C_LACON,C_NONE, C_REG, 34, 8, REGSP, LFROM },
{ AMOVW, C_PSR, C_NONE, C_REG, 35, 4, 0 },
@@ -224,15 +195,21 @@ Optab optab[] =
{ AMOVH, C_REG, C_NONE, C_LAUTO, 72, 8, REGSP, LTO|V4 },
{ AMOVH, C_REG, C_NONE, C_LOREG, 72, 8, 0, LTO|V4 },
+ { AMOVH, C_REG, C_NONE, C_ADDR, 94, 8, 0, LTO|V4 },
{ AMOVHU, C_REG, C_NONE, C_LAUTO, 72, 8, REGSP, LTO|V4 },
{ AMOVHU, C_REG, C_NONE, C_LOREG, 72, 8, 0, LTO|V4 },
+ { AMOVHU, C_REG, C_NONE, C_ADDR, 94, 8, 0, LTO|V4 },
{ AMOVB, C_LAUTO,C_NONE, C_REG, 73, 8, REGSP, LFROM|V4 },
{ AMOVB, C_LOREG,C_NONE, C_REG, 73, 8, 0, LFROM|V4 },
+ { AMOVB, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM|V4 },
{ AMOVH, C_LAUTO,C_NONE, C_REG, 73, 8, REGSP, LFROM|V4 },
{ AMOVH, C_LOREG,C_NONE, C_REG, 73, 8, 0, LFROM|V4 },
+ { AMOVH, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM|V4 },
{ AMOVHU, C_LAUTO,C_NONE, C_REG, 73, 8, REGSP, LFROM|V4 },
{ AMOVHU, C_LOREG,C_NONE, C_REG, 73, 8, 0, LFROM|V4 },
+ { AMOVHU, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM|V4 },
+
{ ALDREX, C_SOREG,C_NONE, C_REG, 77, 4, 0 },
{ ASTREX, C_SOREG,C_REG, C_REG, 78, 4, 0 },
diff --git a/src/cmd/5l/span.c b/src/cmd/5l/span.c
index 482d3e90a..4067f1a32 100644
--- a/src/cmd/5l/span.c
+++ b/src/cmd/5l/span.c
@@ -168,7 +168,6 @@ span(void)
Optab *o;
int m, bflag, i, v;
int32 c, otxt, out[6];
- int lastthumb = -1;
Section *sect;
uchar *bp;
@@ -187,7 +186,6 @@ span(void)
p->pc = c;
cursym->value = c;
- lastthumb = thumb;
autosize = p->to.offset + 4;
if(p->from.sym != S)
p->from.sym->value = c;
@@ -927,7 +925,7 @@ buildop(void)
{
int i, n, r;
- armv4 = !debug['h'];
+ armv4 = 1;
for(i=0; i<C_GOK; i++)
for(n=0; n<C_GOK; n++)
xcmp[i][n] = cmp(n, i);
diff --git a/src/cmd/6a/lex.c b/src/cmd/6a/lex.c
index 37144c888..b4c7d0c2c 100644
--- a/src/cmd/6a/lex.c
+++ b/src/cmd/6a/lex.c
@@ -56,7 +56,7 @@ void
main(int argc, char *argv[])
{
char *p;
- int nout, nproc, i, c;
+ int c;
thechar = '6';
thestring = "amd64";
@@ -96,46 +96,10 @@ main(int argc, char *argv[])
print("usage: %ca [-options] file.s\n", thechar);
errorexit();
}
- if(argc > 1 && systemtype(Windows)){
- print("can't assemble multiple files on windows\n");
+ if(argc > 1){
+ print("can't assemble multiple files\n");
errorexit();
}
- if(argc > 1 && !systemtype(Windows)) {
- nproc = 1;
- if(p = getenv("NPROC"))
- nproc = atol(p); /* */
- c = 0;
- nout = 0;
- for(;;) {
- Waitmsg *w;
-
- while(nout < nproc && argc > 0) {
- i = fork();
- if(i < 0) {
- fprint(2, "fork: %r\n");
- errorexit();
- }
- if(i == 0) {
- print("%s:\n", *argv);
- if(assemble(*argv))
- errorexit();
- exits(0);
- }
- nout++;
- argc--;
- argv++;
- }
- w = wait();
- if(w == nil) {
- if(c)
- errorexit();
- exits(0);
- }
- if(w->msg[0])
- c++;
- nout--;
- }
- }
if(assemble(argv[0]))
errorexit();
exits(0);
@@ -144,7 +108,7 @@ main(int argc, char *argv[])
int
assemble(char *file)
{
- char *ofile, incfile[20], *p;
+ char *ofile, *p;
int i, of;
ofile = alloc(strlen(file)+3); // +3 for .x\0 (x=thechar)
@@ -169,15 +133,6 @@ assemble(char *file)
} else
outfile = "/dev/null";
}
- p = getenv("INCLUDE");
- if(p) {
- setinclude(p);
- } else {
- if(systemtype(Plan9)) {
- sprint(incfile,"/%s/include", thestring);
- setinclude(strdup(incfile));
- }
- }
of = create(outfile, OWRITE, 0664);
if(of < 0) {
diff --git a/src/cmd/6g/cgen.c b/src/cmd/6g/cgen.c
index 75dc4fe13..fca4b64dd 100644
--- a/src/cmd/6g/cgen.c
+++ b/src/cmd/6g/cgen.c
@@ -283,11 +283,9 @@ cgen(Node *n, Node *res)
if(istype(nl->type, TSTRING) || isslice(nl->type)) {
// both slice and string have len one pointer into the struct.
// a zero pointer means zero length
- regalloc(&n1, types[tptr], res);
- agen(nl, &n1);
- n1.op = OINDREG;
+ igen(nl, &n1, res);
n1.type = types[TUINT32];
- n1.xoffset = Array_nel;
+ n1.xoffset += Array_nel;
gmove(&n1, res);
regfree(&n1);
break;
@@ -319,11 +317,9 @@ cgen(Node *n, Node *res)
break;
}
if(isslice(nl->type)) {
- regalloc(&n1, types[tptr], res);
- agen(nl, &n1);
- n1.op = OINDREG;
+ igen(nl, &n1, res);
n1.type = types[TUINT32];
- n1.xoffset = Array_cap;
+ n1.xoffset += Array_cap;
gmove(&n1, res);
regfree(&n1);
break;
@@ -542,7 +538,8 @@ agen(Node *n, Node *res)
gmove(&n1, &n3);
}
- ginscon(optoas(OADD, types[tptr]), v*w, &n3);
+ if (v*w != 0)
+ ginscon(optoas(OADD, types[tptr]), v*w, &n3);
gmove(&n3, res);
regfree(&n3);
break;
@@ -682,6 +679,28 @@ ret:
void
igen(Node *n, Node *a, Node *res)
{
+ Type *fp;
+ Iter flist;
+
+ switch(n->op) {
+ case ONAME:
+ if((n->class&PHEAP) || n->class == PPARAMREF)
+ break;
+ *a = *n;
+ return;
+
+ case OCALLFUNC:
+ fp = structfirst(&flist, getoutarg(n->left->type));
+ cgen_call(n, 0);
+ memset(a, 0, sizeof *a);
+ a->op = OINDREG;
+ a->val.u.reg = D_SP;
+ a->addable = 1;
+ a->xoffset = fp->width;
+ a->type = n->type;
+ return;
+ }
+
regalloc(a, types[tptr], res);
agen(n, a);
a->op = OINDREG;
@@ -848,6 +867,7 @@ bgen(Node *n, int true, Prog *to)
n2 = n1;
n2.op = OINDREG;
n2.xoffset = Array_array;
+ n2.type = types[tptr];
nodconst(&tmp, types[tptr], 0);
gins(optoas(OCMP, types[tptr]), &n2, &tmp);
patch(gbranch(a, types[tptr]), to);
diff --git a/src/cmd/6g/ggen.c b/src/cmd/6g/ggen.c
index 8d89fb164..ce66b43f0 100644
--- a/src/cmd/6g/ggen.c
+++ b/src/cmd/6g/ggen.c
@@ -201,7 +201,7 @@ cgen_callinter(Node *n, Node *res, int proc)
regalloc(&nodo, types[tptr], &nodr);
nodo.op = OINDREG;
- agen(i, &nodr); // REG = &inter
+ agen(i, &nodr); // REG = &inter
nodindreg(&nodsp, types[tptr], D_SP);
nodo.xoffset += widthptr;
@@ -1206,7 +1206,7 @@ cgen_inline(Node *n, Node *res)
Node nodes[5];
Node n1, n2, nres, ntemp;
vlong v;
- int i, narg;
+ int i, narg, nochk;
if(n->op != OCALLFUNC)
goto no;
@@ -1242,6 +1242,7 @@ slicearray:
// len = hb[3] - lb[2] (destroys hb)
n2 = *res;
n2.xoffset += Array_nel;
+ n2.type = types[TUINT32];
if(smallintconst(&nodes[3]) && smallintconst(&nodes[2])) {
v = mpgetfix(nodes[3].val.u.xval) -
@@ -1260,6 +1261,7 @@ slicearray:
// cap = nel[1] - lb[2] (destroys nel)
n2 = *res;
n2.xoffset += Array_cap;
+ n2.type = types[TUINT32];
if(smallintconst(&nodes[1]) && smallintconst(&nodes[2])) {
v = mpgetfix(nodes[1].val.u.xval) -
@@ -1288,6 +1290,7 @@ slicearray:
// ary = old[0] + (lb[2] * width[4]) (destroys old)
n2 = *res;
n2.xoffset += Array_array;
+ n2.type = types[tptr];
if(smallintconst(&nodes[2]) && smallintconst(&nodes[4])) {
v = mpgetfix(nodes[2].val.u.xval) *
@@ -1311,6 +1314,7 @@ slicearray:
return 1;
sliceslice:
+ nochk = n->etype; // skip bounds checking
ntemp.op = OXXX;
if(!sleasy(n->list->n->right)) {
Node *n0;
@@ -1340,11 +1344,13 @@ sliceslice:
n2 = nodes[0];
n2.xoffset += Array_nel;
n2.type = types[TUINT32];
- cmpandthrow(&nodes[1], &n2);
+ if(!nochk)
+ cmpandthrow(&nodes[1], &n2);
// ret.nel = old.nel[0]-lb[1];
n2 = nodes[0];
n2.xoffset += Array_nel;
+ n2.type = types[TUINT32];
regalloc(&n1, types[TUINT32], N);
gins(optoas(OAS, types[TUINT32]), &n2, &n1);
@@ -1353,22 +1359,24 @@ sliceslice:
n2 = nres;
n2.xoffset += Array_nel;
+ n2.type = types[TUINT32];
gins(optoas(OAS, types[TUINT32]), &n1, &n2);
regfree(&n1);
} else { // old[lb:hb]
- // if(hb[2] > old.cap[0]) goto throw;
n2 = nodes[0];
n2.xoffset += Array_cap;
n2.type = types[TUINT32];
- cmpandthrow(&nodes[2], &n2);
-
- // if(lb[1] > hb[2]) goto throw;
- cmpandthrow(&nodes[1], &nodes[2]);
-
+ if(!nochk) {
+ // if(hb[2] > old.cap[0]) goto throw;
+ cmpandthrow(&nodes[2], &n2);
+ // if(lb[1] > hb[2]) goto throw;
+ cmpandthrow(&nodes[1], &nodes[2]);
+ }
// ret.len = hb[2]-lb[1]; (destroys hb[2])
n2 = nres;
n2.xoffset += Array_nel;
-
+ n2.type = types[TUINT32];
+
if(smallintconst(&nodes[2]) && smallintconst(&nodes[1])) {
v = mpgetfix(nodes[2].val.u.xval) -
mpgetfix(nodes[1].val.u.xval);
@@ -1387,6 +1395,7 @@ sliceslice:
// ret.cap = old.cap[0]-lb[1]; (uses hb[2])
n2 = nodes[0];
n2.xoffset += Array_cap;
+ n2.type = types[TUINT32];
regalloc(&n1, types[TUINT32], &nodes[2]);
gins(optoas(OAS, types[TUINT32]), &n2, &n1);
@@ -1395,13 +1404,15 @@ sliceslice:
n2 = nres;
n2.xoffset += Array_cap;
+ n2.type = types[TUINT32];
+
gins(optoas(OAS, types[TUINT32]), &n1, &n2);
regfree(&n1);
// ret.array = old.array[0]+lb[1]*width[3]; (uses lb[1])
n2 = nodes[0];
n2.xoffset += Array_array;
-
+ n2.type = types[tptr];
regalloc(&n1, types[tptr], &nodes[1]);
if(smallintconst(&nodes[1]) && smallintconst(&nodes[3])) {
gins(optoas(OAS, types[tptr]), &n2, &n1);
@@ -1418,6 +1429,7 @@ sliceslice:
n2 = nres;
n2.xoffset += Array_array;
+ n2.type = types[tptr];
gins(optoas(OAS, types[tptr]), &n1, &n2);
regfree(&n1);
diff --git a/src/cmd/6g/gsubr.c b/src/cmd/6g/gsubr.c
index c3dac1fdc..ed98d1bc9 100644
--- a/src/cmd/6g/gsubr.c
+++ b/src/cmd/6g/gsubr.c
@@ -48,7 +48,7 @@ clearp(Prog *p)
/*
* generate and return proc with p->as = as,
- * linked into program. pc is next instruction.
+ * linked into program. pc is next instruction.
*/
Prog*
prog(int as)
@@ -330,11 +330,13 @@ regfree(Node *n)
{
int i;
- if(n->op == ONAME && iscomplex[n->type->etype])
+ if(n->op == ONAME)
return;
if(n->op != OREGISTER && n->op != OINDREG)
fatal("regfree: not a register");
i = n->val.u.reg;
+ if(i == D_SP)
+ return;
if(i < 0 || i >= sizeof(reg))
fatal("regfree: reg out of range");
if(reg[i] <= 0)
@@ -888,7 +890,7 @@ Prog*
gins(int as, Node *f, Node *t)
{
// Node nod;
-// int32 v;
+ int32 w;
Prog *p;
Addr af, at;
@@ -933,6 +935,27 @@ gins(int as, Node *f, Node *t)
p->to = at;
if(debug['g'])
print("%P\n", p);
+
+
+ w = 0;
+ switch(as) {
+ case AMOVB:
+ w = 1;
+ break;
+ case AMOVW:
+ w = 2;
+ break;
+ case AMOVL:
+ w = 4;
+ break;
+ case AMOVQ:
+ w = 8;
+ break;
+ }
+ if(w != 0 && f != N && (af.width > w || at.width > w)) {
+ fatal("bad width: %P (%d, %d)\n", p, af.width, at.width);
+ }
+
return p;
}
@@ -947,7 +970,7 @@ checkoffset(Addr *a, int canemitcode)
fatal("checkoffset %#llx, cannot emit code", a->offset);
// cannot rely on unmapped nil page at 0 to catch
- // reference with large offset. instead, emit explicit
+ // reference with large offset. instead, emit explicit
// test of 0(reg).
p = gins(ATESTB, nodintconst(0), N);
p->to = *a;
@@ -1106,8 +1129,9 @@ naddr(Node *n, Addr *a, int canemitcode)
naddr(n->left, a, canemitcode);
if(a->type == D_CONST && a->offset == 0)
break; // len(nil)
- a->etype = TUINT;
+ a->etype = TUINT32;
a->offset += Array_nel;
+ a->width = 4;
if(a->offset >= unmappedzero && a->offset-Array_nel < unmappedzero)
checkoffset(a, canemitcode);
break;
@@ -1117,8 +1141,9 @@ naddr(Node *n, Addr *a, int canemitcode)
naddr(n->left, a, canemitcode);
if(a->type == D_CONST && a->offset == 0)
break; // cap(nil)
- a->etype = TUINT;
+ a->etype = TUINT32;
a->offset += Array_cap;
+ a->width = 4;
if(a->offset >= unmappedzero && a->offset-Array_cap < unmappedzero)
checkoffset(a, canemitcode);
break;
@@ -1962,12 +1987,12 @@ oindex:
if(o & OAddable) {
n2 = *l;
n2.xoffset += Array_array;
- n2.type = types[TUINT64];
+ n2.type = types[tptr];
gmove(&n2, reg);
} else {
n2 = *reg;
- n2.xoffset = Array_array;
n2.op = OINDREG;
+ n2.xoffset = Array_array;
n2.type = types[tptr];
gmove(&n2, reg);
}
diff --git a/src/cmd/6g/reg.c b/src/cmd/6g/reg.c
index ed8bac3f0..b4b5b7d6b 100644
--- a/src/cmd/6g/reg.c
+++ b/src/cmd/6g/reg.c
@@ -873,14 +873,17 @@ mkvar(Reg *r, Adr *a)
// if they overlaps, disable both
if(overlap(v->offset, v->width, o, w)) {
+// print("disable overlap %s %d %d %d %d, %E != %E\n", s->name, v->offset, v->width, o, w, v->etype, et);
v->addr = 1;
flag = 1;
}
}
}
- if(a->pun)
+ if(a->pun) {
+// print("disable pun %s\n", s->name);
flag = 1;
+ }
switch(et) {
case 0:
case TFUNC:
diff --git a/src/cmd/6l/asm.c b/src/cmd/6l/asm.c
index dda19e48d..6dffa20f2 100644
--- a/src/cmd/6l/asm.c
+++ b/src/cmd/6l/asm.c
@@ -108,6 +108,9 @@ needlib(char *name)
char *p;
Sym *s;
+ if(*name == '\0')
+ return 0;
+
/* reuse hash code in symbol table */
p = smprint(".elfload.%s", name);
s = lookup(p, 0);
@@ -787,40 +790,51 @@ asmb(void)
symo = rnd(symo, PEFILEALIGN);
break;
}
- /*
- * the symbol information is stored as
- * 32-bit symbol table size
- * 32-bit line number table size
- * symbol table
- * line number table
- */
- seek(cout, symo+8, 0);
- if(debug['v'])
- Bprint(&bso, "%5.2f sp\n", cputime());
- Bflush(&bso);
- if(debug['v'])
- Bprint(&bso, "%5.2f pc\n", cputime());
- Bflush(&bso);
- if(!debug['s'])
- strnput("", INITRND-(8+symsize+lcsize)%INITRND);
- cflush();
- seek(cout, symo, 0);
- lputl(symsize);
- lputl(lcsize);
- cflush();
- if(HEADTYPE != Hwindows && !debug['s']) {
- elfsymo = symo+8+symsize+lcsize;
- seek(cout, elfsymo, 0);
- asmelfsym64();
- cflush();
- elfstro = seek(cout, 0, 1);
- elfsymsize = elfstro - elfsymo;
- ewrite(cout, elfstrdat, elfstrsize);
-
+ switch(HEADTYPE) {
+ default:
+ if(iself) {
+ /*
+ * the symbol information is stored as
+ * 32-bit symbol table size
+ * 32-bit line number table size
+ * symbol table
+ * line number table
+ */
+ seek(cout, symo+8, 0);
+ if(debug['v'])
+ Bprint(&bso, "%5.2f sp\n", cputime());
+ Bflush(&bso);
+ if(debug['v'])
+ Bprint(&bso, "%5.2f pc\n", cputime());
+ Bflush(&bso);
+ if(!debug['s'])
+ strnput("", INITRND-(8+symsize+lcsize)%INITRND);
+ cflush();
+ seek(cout, symo, 0);
+ lputl(symsize);
+ lputl(lcsize);
+ cflush();
+ elfsymo = symo+8+symsize+lcsize;
+ seek(cout, elfsymo, 0);
+ asmelfsym64();
+ cflush();
+ elfstro = seek(cout, 0, 1);
+ elfsymsize = elfstro - elfsymo;
+ ewrite(cout, elfstrdat, elfstrsize);
+
+ if(debug['v'])
+ Bprint(&bso, "%5.2f dwarf\n", cputime());
+
+ dwarfemitdebugsections();
+ }
+ break;
+ case Hwindows:
+ seek(cout, symo, 0);
if(debug['v'])
Bprint(&bso, "%5.2f dwarf\n", cputime());
dwarfemitdebugsections();
+ break;
}
}
diff --git a/src/cmd/6l/l.h b/src/cmd/6l/l.h
index 33ca51b2c..f4ee6aa92 100644
--- a/src/cmd/6l/l.h
+++ b/src/cmd/6l/l.h
@@ -350,9 +350,6 @@ EXTERN Sym* fromgotype; // type symbol on last p->from read
EXTERN vlong textstksiz;
EXTERN vlong textarg;
-EXTERN int elfstrsize;
-EXTERN char* elfstrdat;
-EXTERN int elftextsh;
extern Optab optab[];
extern Optab* opindex[];
diff --git a/src/cmd/8a/lex.c b/src/cmd/8a/lex.c
index ca18b69ce..078861877 100644
--- a/src/cmd/8a/lex.c
+++ b/src/cmd/8a/lex.c
@@ -56,7 +56,7 @@ void
main(int argc, char *argv[])
{
char *p;
- int nout, nproc, i, c;
+ int c;
thechar = '8';
thestring = "386";
@@ -96,46 +96,10 @@ main(int argc, char *argv[])
print("usage: %ca [-options] file.s\n", thechar);
errorexit();
}
- if(argc > 1 && systemtype(Windows)){
- print("can't assemble multiple files on windows\n");
+ if(argc > 1){
+ print("can't assemble multiple files\n");
errorexit();
}
- if(argc > 1 && !systemtype(Windows)) {
- nproc = 1;
- if(p = getenv("NPROC"))
- nproc = atol(p); /* */
- c = 0;
- nout = 0;
- for(;;) {
- Waitmsg *w;
-
- while(nout < nproc && argc > 0) {
- i = fork();
- if(i < 0) {
- fprint(2, "fork: %r\n");
- errorexit();
- }
- if(i == 0) {
- print("%s:\n", *argv);
- if(assemble(*argv))
- errorexit();
- exits(0);
- }
- nout++;
- argc--;
- argv++;
- }
- w = wait();
- if(w == nil) {
- if(c)
- errorexit();
- exits(0);
- }
- if(w->msg[0])
- c++;
- nout--;
- }
- }
if(assemble(argv[0]))
errorexit();
exits(0);
@@ -144,7 +108,7 @@ main(int argc, char *argv[])
int
assemble(char *file)
{
- char *ofile, incfile[20], *p;
+ char *ofile, *p;
int i, of;
ofile = alloc(strlen(file)+3); // +3 for .x\0 (x=thechar)
@@ -169,15 +133,6 @@ assemble(char *file)
} else
outfile = "/dev/null";
}
- p = getenv("INCLUDE");
- if(p) {
- setinclude(p);
- } else {
- if(systemtype(Plan9)) {
- sprint(incfile,"/%s/include", thestring);
- setinclude(strdup(incfile));
- }
- }
of = create(outfile, OWRITE, 0664);
if(of < 0) {
diff --git a/src/cmd/8g/cgen.c b/src/cmd/8g/cgen.c
index 596824a6c..1614a2d77 100644
--- a/src/cmd/8g/cgen.c
+++ b/src/cmd/8g/cgen.c
@@ -232,6 +232,7 @@ cgen(Node *n, Node *res)
cgen(nl, res);
break;
}
+
tempname(&n2, n->type);
mgen(nl, &n1, res);
gmove(&n1, &n2);
@@ -277,15 +278,10 @@ cgen(Node *n, Node *res)
if(istype(nl->type, TSTRING) || isslice(nl->type)) {
// both slice and string have len one pointer into the struct.
igen(nl, &n1, res);
- n1.op = OREGISTER; // was OINDREG
- regalloc(&n2, types[TUINT32], &n1);
- n1.op = OINDREG;
n1.type = types[TUINT32];
- n1.xoffset = Array_nel;
- gmove(&n1, &n2);
- gmove(&n2, res);
+ n1.xoffset += Array_nel;
+ gmove(&n1, res);
regfree(&n1);
- regfree(&n2);
break;
}
fatal("cgen: OLEN: unknown type %lT", nl->type);
@@ -594,9 +590,10 @@ agen(Node *n, Node *res)
gmove(&n1, &n3);
}
- nodconst(&n2, types[tptr], v*w);
- gins(optoas(OADD, types[tptr]), &n2, &n3);
-
+ if (v*w != 0) {
+ nodconst(&n2, types[tptr], v*w);
+ gins(optoas(OADD, types[tptr]), &n2, &n3);
+ }
gmove(&n3, res);
regfree(&n3);
break;
@@ -729,7 +726,27 @@ void
igen(Node *n, Node *a, Node *res)
{
Node n1;
-
+ Type *fp;
+ Iter flist;
+
+ switch(n->op) {
+ case ONAME:
+ if((n->class&PHEAP) || n->class == PPARAMREF)
+ break;
+ *a = *n;
+ return;
+
+ case OCALLFUNC:
+ fp = structfirst(&flist, getoutarg(n->left->type));
+ cgen_call(n, 0);
+ memset(a, 0, sizeof *a);
+ a->op = OINDREG;
+ a->val.u.reg = D_SP;
+ a->addable = 1;
+ a->xoffset = fp->width;
+ a->type = n->type;
+ return;
+ }
// release register for now, to avoid
// confusing tempname.
if(res != N && res->op == OREGISTER)
@@ -919,6 +936,7 @@ bgen(Node *n, int true, Prog *to)
n2 = n1;
n2.op = OINDREG;
n2.xoffset = Array_array;
+ n2.type = types[tptr];
nodconst(&tmp, types[tptr], 0);
gins(optoas(OCMP, types[tptr]), &n2, &tmp);
patch(gbranch(a, types[tptr]), to);
diff --git a/src/cmd/8g/ggen.c b/src/cmd/8g/ggen.c
index 920725c3e..223152536 100644
--- a/src/cmd/8g/ggen.c
+++ b/src/cmd/8g/ggen.c
@@ -915,7 +915,7 @@ cgen_inline(Node *n, Node *res)
Node nodes[5];
Node n1, n2, nres, ntemp;
vlong v;
- int i, narg;
+ int i, narg, nochk;
if(n->op != OCALLFUNC)
goto no;
@@ -953,6 +953,7 @@ slicearray:
// len = hb[3] - lb[2] (destroys hb)
n2 = *res;
n2.xoffset += Array_nel;
+ n2.type = types[TUINT32];
if(smallintconst(&nodes[3]) && smallintconst(&nodes[2])) {
v = mpgetfix(nodes[3].val.u.xval) -
@@ -971,6 +972,7 @@ slicearray:
// cap = nel[1] - lb[2] (destroys nel)
n2 = *res;
n2.xoffset += Array_cap;
+ n2.type = types[TUINT32];
if(smallintconst(&nodes[1]) && smallintconst(&nodes[2])) {
v = mpgetfix(nodes[1].val.u.xval) -
@@ -999,6 +1001,7 @@ slicearray:
// ary = old[0] + (lb[2] * width[4]) (destroys old)
n2 = *res;
n2.xoffset += Array_array;
+ n2.type = types[tptr];
if(smallintconst(&nodes[2]) && smallintconst(&nodes[4])) {
v = mpgetfix(nodes[2].val.u.xval) *
@@ -1026,6 +1029,7 @@ slicearray:
sliceslice:
if(!fix64(n->list, narg))
goto no;
+ nochk = n->etype; // skip bounds checking
ntemp.op = OXXX;
if(!sleasy(n->list->n->right)) {
Node *n0;
@@ -1055,11 +1059,13 @@ sliceslice:
n2 = nodes[0];
n2.xoffset += Array_nel;
n2.type = types[TUINT32];
- cmpandthrow(&nodes[1], &n2);
+ if(!nochk)
+ cmpandthrow(&nodes[1], &n2);
// ret.nel = old.nel[0]-lb[1];
n2 = nodes[0];
n2.xoffset += Array_nel;
+ n2.type = types[TUINT32];
regalloc(&n1, types[TUINT32], N);
gins(optoas(OAS, types[TUINT32]), &n2, &n1);
@@ -1068,22 +1074,25 @@ sliceslice:
n2 = nres;
n2.xoffset += Array_nel;
+ n2.type = types[TUINT32];
gins(optoas(OAS, types[TUINT32]), &n1, &n2);
regfree(&n1);
} else { // old[lb:hb]
- // if(hb[2] > old.cap[0]) goto throw;
n2 = nodes[0];
n2.xoffset += Array_cap;
n2.type = types[TUINT32];
- cmpandthrow(&nodes[2], &n2);
-
- // if(lb[1] > hb[2]) goto throw;
- cmpandthrow(&nodes[1], &nodes[2]);
+ if (!nochk) {
+ // if(hb[2] > old.cap[0]) goto throw;
+ cmpandthrow(&nodes[2], &n2);
+ // if(lb[1] > hb[2]) goto throw;
+ cmpandthrow(&nodes[1], &nodes[2]);
+ }
// ret.len = hb[2]-lb[1]; (destroys hb[2])
n2 = nres;
n2.xoffset += Array_nel;
-
+ n2.type = types[TUINT32];
+
if(smallintconst(&nodes[2]) && smallintconst(&nodes[1])) {
v = mpgetfix(nodes[2].val.u.xval) -
mpgetfix(nodes[1].val.u.xval);
@@ -1102,6 +1111,7 @@ sliceslice:
// ret.cap = old.cap[0]-lb[1]; (uses hb[2])
n2 = nodes[0];
n2.xoffset += Array_cap;
+ n2.type = types[TUINT32];
regalloc(&n1, types[TUINT32], &nodes[2]);
gins(optoas(OAS, types[TUINT32]), &n2, &n1);
@@ -1110,12 +1120,14 @@ sliceslice:
n2 = nres;
n2.xoffset += Array_cap;
+ n2.type = types[TUINT32];
gins(optoas(OAS, types[TUINT32]), &n1, &n2);
regfree(&n1);
// ret.array = old.array[0]+lb[1]*width[3]; (uses lb[1])
n2 = nodes[0];
n2.xoffset += Array_array;
+ n2.type = types[tptr];
regalloc(&n1, types[tptr], &nodes[1]);
if(smallintconst(&nodes[1]) && smallintconst(&nodes[3])) {
@@ -1135,6 +1147,7 @@ sliceslice:
n2 = nres;
n2.xoffset += Array_array;
+ n2.type = types[tptr];
gins(optoas(OAS, types[tptr]), &n1, &n2);
regfree(&n1);
diff --git a/src/cmd/8g/gsubr.c b/src/cmd/8g/gsubr.c
index 8ed7e5564..5ad35fdce 100644
--- a/src/cmd/8g/gsubr.c
+++ b/src/cmd/8g/gsubr.c
@@ -698,7 +698,6 @@ ginit(void)
reg[i] = 1;
for(i=D_AL; i<=D_DI; i++)
reg[i] = 0;
-
for(i=0; i<nelem(resvd); i++)
reg[resvd[i]]++;
}
@@ -789,6 +788,8 @@ err:
return;
out:
+ if (i == D_SP)
+ print("alloc SP\n");
if(reg[i] == 0) {
regpc[i] = (ulong)__builtin_return_address(0);
if(i == D_AX || i == D_CX || i == D_DX || i == D_SP) {
@@ -804,10 +805,14 @@ void
regfree(Node *n)
{
int i;
-
+
+ if(n->op == ONAME)
+ return;
if(n->op != OREGISTER && n->op != OINDREG)
fatal("regfree: not a register");
i = n->val.u.reg;
+ if(i == D_SP)
+ return;
if(i < 0 || i >= sizeof(reg))
fatal("regfree: reg out of range");
if(reg[i] <= 0)
@@ -1129,6 +1134,9 @@ gmove(Node *f, Node *t)
case CASE(TINT8, TUINT8):
case CASE(TUINT8, TINT8):
case CASE(TUINT8, TUINT8):
+ a = AMOVB;
+ break;
+
case CASE(TINT16, TINT8): // truncate
case CASE(TUINT16, TINT8):
case CASE(TINT32, TINT8):
@@ -1138,7 +1146,7 @@ gmove(Node *f, Node *t)
case CASE(TINT32, TUINT8):
case CASE(TUINT32, TUINT8):
a = AMOVB;
- break;
+ goto rsrc;
case CASE(TINT64, TINT8): // truncate low word
case CASE(TUINT64, TINT8):
@@ -1146,7 +1154,7 @@ gmove(Node *f, Node *t)
case CASE(TUINT64, TUINT8):
split64(f, &flo, &fhi);
nodreg(&r1, t->type, D_AX);
- gins(AMOVB, &flo, &r1);
+ gmove(&flo, &r1);
gins(AMOVB, &r1, t);
splitclean();
return;
@@ -1155,12 +1163,15 @@ gmove(Node *f, Node *t)
case CASE(TINT16, TUINT16):
case CASE(TUINT16, TINT16):
case CASE(TUINT16, TUINT16):
+ a = AMOVW;
+ break;
+
case CASE(TINT32, TINT16): // truncate
case CASE(TUINT32, TINT16):
case CASE(TINT32, TUINT16):
case CASE(TUINT32, TUINT16):
a = AMOVW;
- break;
+ goto rsrc;
case CASE(TINT64, TINT16): // truncate low word
case CASE(TUINT64, TINT16):
@@ -1168,7 +1179,7 @@ gmove(Node *f, Node *t)
case CASE(TUINT64, TUINT16):
split64(f, &flo, &fhi);
nodreg(&r1, t->type, D_AX);
- gins(AMOVW, &flo, &r1);
+ gmove(&flo, &r1);
gins(AMOVW, &r1, t);
splitclean();
return;
@@ -1186,7 +1197,7 @@ gmove(Node *f, Node *t)
case CASE(TUINT64, TUINT32):
split64(f, &flo, &fhi);
nodreg(&r1, t->type, D_AX);
- gins(AMOVL, &flo, &r1);
+ gmove(&flo, &r1);
gins(AMOVL, &r1, t);
splitclean();
return;
@@ -1340,14 +1351,14 @@ gmove(Node *f, Node *t)
case TUINT8:
gins(ATESTL, ncon(0xffffff00), &t1);
p1 = gbranch(AJEQ, T);
- gins(AMOVB, ncon(0), &t1);
+ gins(AMOVL, ncon(0), &t1);
patch(p1, pc);
gmove(&t1, t);
break;
case TUINT16:
gins(ATESTL, ncon(0xffff0000), &t1);
p1 = gbranch(AJEQ, T);
- gins(AMOVW, ncon(0), &t1);
+ gins(AMOVL, ncon(0), &t1);
patch(p1, pc);
gmove(&t1, t);
break;
@@ -1418,11 +1429,11 @@ gmove(Node *f, Node *t)
split64(t, &tlo, &thi);
gins(AXORL, ncon(0x80000000), &thi); // + 2^63
patch(p3, pc);
- patch(p1, pc);
splitclean();
-
// restore rounding mode
gins(AFLDCW, &t1, N);
+
+ patch(p1, pc);
return;
/*
@@ -1571,6 +1582,14 @@ gmove(Node *f, Node *t)
gins(a, f, t);
return;
+rsrc:
+ // requires register source
+ regalloc(&r1, f->type, t);
+ gmove(f, &r1);
+ gins(a, &r1, t);
+ regfree(&r1);
+ return;
+
rdst:
// requires register destination
regalloc(&r1, t->type, t);
@@ -1623,6 +1642,7 @@ gins(int as, Node *f, Node *t)
{
Prog *p;
Addr af, at;
+ int w;
if(as == AFMOVF && f && f->op == OREGISTER && t && t->op == OREGISTER)
fatal("gins MOVF reg, reg");
@@ -1648,6 +1668,26 @@ gins(int as, Node *f, Node *t)
p->to = at;
if(debug['g'])
print("%P\n", p);
+
+ w = 0;
+ switch(as) {
+ case AMOVB:
+ w = 1;
+ break;
+ case AMOVW:
+ w = 2;
+ break;
+ case AMOVL:
+ w = 4;
+ break;
+ }
+
+ if(1 && w != 0 && f != N && (af.width > w || at.width > w)) {
+ dump("bad width from:", f);
+ dump("bad width to:", t);
+ fatal("bad width: %P (%d, %d)\n", p, af.width, at.width);
+ }
+
return p;
}
@@ -1799,8 +1839,9 @@ naddr(Node *n, Addr *a, int canemitcode)
naddr(n->left, a, canemitcode);
if(a->type == D_CONST && a->offset == 0)
break; // len(nil)
- a->etype = TUINT;
+ a->etype = TUINT32;
a->offset += Array_nel;
+ a->width = 4;
if(a->offset >= unmappedzero && a->offset-Array_nel < unmappedzero)
checkoffset(a, canemitcode);
break;
@@ -1810,8 +1851,9 @@ naddr(Node *n, Addr *a, int canemitcode)
naddr(n->left, a, canemitcode);
if(a->type == D_CONST && a->offset == 0)
break; // cap(nil)
- a->etype = TUINT;
+ a->etype = TUINT32;
a->offset += Array_cap;
+ a->width = 4;
if(a->offset >= unmappedzero && a->offset-Array_nel < unmappedzero)
checkoffset(a, canemitcode);
break;
diff --git a/src/cmd/8l/asm.c b/src/cmd/8l/asm.c
index f28b8d904..7de7753a2 100644
--- a/src/cmd/8l/asm.c
+++ b/src/cmd/8l/asm.c
@@ -104,6 +104,9 @@ needlib(char *name)
char *p;
Sym *s;
+ if(*name == '\0')
+ return 0;
+
/* reuse hash code in symbol table */
p = smprint(".dynlib.%s", name);
s = lookup(p, 0);
@@ -532,6 +535,8 @@ doelf(void)
elfstr[ElfStrGosymcounts] = addstring(shstrtab, ".gosymcounts");
elfstr[ElfStrGosymtab] = addstring(shstrtab, ".gosymtab");
elfstr[ElfStrGopclntab] = addstring(shstrtab, ".gopclntab");
+ elfstr[ElfStrSymtab] = addstring(shstrtab, ".symtab");
+ elfstr[ElfStrStrtab] = addstring(shstrtab, ".strtab");
dwarfaddshstrings(shstrtab);
}
elfstr[ElfStrShstrtab] = addstring(shstrtab, ".shstrtab");
@@ -658,7 +663,7 @@ asmb(void)
{
int32 v, magic;
int a, dynsym;
- uint32 symo, startva, machlink;
+ uint32 symo, startva, machlink, elfsymo, elfstro, elfsymsize;
ElfEhdr *eh;
ElfPhdr *ph, *pph;
ElfShdr *sh;
@@ -670,6 +675,10 @@ asmb(void)
Bprint(&bso, "%5.2f asmb\n", cputime());
Bflush(&bso);
+ elfsymsize = 0;
+ elfstro = 0;
+ elfsymo = 0;
+
sect = segtext.sect;
seek(cout, sect->vaddr - segtext.vaddr + segtext.fileoff, 0);
codeblk(sect->vaddr, sect->len);
@@ -736,32 +745,48 @@ asmb(void)
symo = rnd(symo, INITRND);
break;
case Hwindows:
- // TODO(brainman): not sure what symo meant to be, but it is not used for Windows PE for now anyway
symo = rnd(HEADR+segtext.filelen, PEFILEALIGN)+segdata.filelen;
symo = rnd(symo, PEFILEALIGN);
break;
}
- if(!debug['s']) {
- seek(cout, symo, 0);
-
- if(HEADTYPE == Hplan9x32) {
- asmplan9sym();
+ switch(HEADTYPE) {
+ default:
+ if(iself) {
+ if(debug['v'])
+ Bprint(&bso, "%5.2f elfsym\n", cputime());
+ elfsymo = symo+8+symsize+lcsize;
+ seek(cout, elfsymo, 0);
+ asmelfsym32();
cflush();
-
- sym = lookup("pclntab", 0);
- if(sym != nil) {
- lcsize = sym->np;
- for(i=0; i < lcsize; i++)
- cput(sym->p[i]);
-
- cflush();
- }
-
- } else if(HEADTYPE != Hwindows) {
+ elfstro = seek(cout, 0, 1);
+ elfsymsize = elfstro - elfsymo;
+ ewrite(cout, elfstrdat, elfstrsize);
+
if(debug['v'])
Bprint(&bso, "%5.2f dwarf\n", cputime());
dwarfemitdebugsections();
}
+ break;
+ case Hplan9x32:
+ seek(cout, symo, 0);
+ asmplan9sym();
+ cflush();
+
+ sym = lookup("pclntab", 0);
+ if(sym != nil) {
+ lcsize = sym->np;
+ for(i=0; i < lcsize; i++)
+ cput(sym->p[i]);
+
+ cflush();
+ }
+ break;
+ case Hwindows:
+ seek(cout, symo, 0);
+ if(debug['v'])
+ Bprint(&bso, "%5.2f dwarf\n", cputime());
+ dwarfemitdebugsections();
+ break;
}
}
if(debug['v'])
@@ -1082,6 +1107,20 @@ asmb(void)
sh->addralign = 1;
shsym(sh, lookup("pclntab", 0));
+ sh = newElfShdr(elfstr[ElfStrSymtab]);
+ sh->type = SHT_SYMTAB;
+ sh->off = elfsymo;
+ sh->size = elfsymsize;
+ sh->addralign = 4;
+ sh->entsize = 16;
+ sh->link = eh->shnum; // link to strtab
+
+ sh = newElfShdr(elfstr[ElfStrStrtab]);
+ sh->type = SHT_STRTAB;
+ sh->off = elfstro;
+ sh->size = elfstrsize;
+ sh->addralign = 1;
+
dwarfaddelfheaders();
}
diff --git a/src/cmd/8l/obj.c b/src/cmd/8l/obj.c
index 2a38f7ef0..f84a30f39 100644
--- a/src/cmd/8l/obj.c
+++ b/src/cmd/8l/obj.c
@@ -188,7 +188,7 @@ main(int argc, char *argv[])
if(INITDAT == -1)
INITDAT = 0;
if(INITRND == -1)
- INITRND = 1;
+ INITRND = 4096;
break;
case Hmsdoscom: /* MS-DOS .COM */
HEADR = 0;
diff --git a/src/cmd/8l/pass.c b/src/cmd/8l/pass.c
index 28589b66a..72ae043d6 100644
--- a/src/cmd/8l/pass.c
+++ b/src/cmd/8l/pass.c
@@ -32,23 +32,10 @@
#include "l.h"
#include "../ld/lib.h"
+#include "../../pkg/runtime/stack.h"
static void xfol(Prog*, Prog**);
-// see ../../pkg/runtime/proc.c:/StackGuard
-enum
-{
-#ifdef __WINDOWS__
- // use larger stacks to compensate for larger stack guard,
- // needed for exception handling.
- StackSmall = 256,
- StackBig = 8192,
-#else
- StackSmall = 128,
- StackBig = 4096,
-#endif
-};
-
Prog*
brchain(Prog *p)
{
diff --git a/src/cmd/cc/lex.c b/src/cmd/cc/lex.c
index dba8ff634..71cc89bf0 100644
--- a/src/cmd/cc/lex.c
+++ b/src/cmd/cc/lex.c
@@ -88,7 +88,7 @@ void
main(int argc, char *argv[])
{
char **defs, *p;
- int nproc, nout, i, c, ndef;
+ int c, ndef;
ensuresymb(NSYMB);
memset(debug, 0, sizeof(debug));
@@ -142,51 +142,10 @@ main(int argc, char *argv[])
print("usage: %cc [-options] files\n", thechar);
errorexit();
}
- if(argc > 1 && systemtype(Windows)){
- print("can't compile multiple files on windows\n");
+ if(argc > 1){
+ print("can't compile multiple files\n");
errorexit();
}
- if(argc > 1 && !systemtype(Windows)) {
- nproc = 1;
- /*
- * if we're writing acid to standard output, don't compile
- * concurrently, to avoid interleaving output.
- */
- if(((!debug['a'] && !debug['q'] && !debug['Q']) || debug['n']) &&
- (p = getenv("NPROC")) != nil)
- nproc = atol(p); /* */
- c = 0;
- nout = 0;
- for(;;) {
- Waitmsg *w;
-
- while(nout < nproc && argc > 0) {
- i = fork();
- if(i < 0) {
- print("cannot create a process\n");
- errorexit();
- }
- if(i == 0) {
- fprint(2, "%s:\n", *argv);
- if (compile(*argv, defs, ndef))
- errorexit();
- exits(0);
- }
- nout++;
- argc--;
- argv++;
- }
- w = wait();
- if(w == nil) {
- if(c)
- errorexit();
- exits(0);
- }
- if(w->msg[0])
- c++;
- nout--;
- }
- }
if(argc == 0)
c = compile("stdin", defs, ndef);
@@ -201,7 +160,7 @@ main(int argc, char *argv[])
int
compile(char *file, char **defs, int ndef)
{
- char *ofile, incfile[20];
+ char *ofile;
char *p, **av, opt[256];
int i, c, fd[2];
static int first = 1;
@@ -236,15 +195,6 @@ compile(char *file, char **defs, int ndef)
outfile = "/dev/null";
}
- if(p = getenv("INCLUDE")) {
- setinclude(p);
- } else {
- if(systemtype(Plan9)) {
- sprint(incfile, "/%s/include", thestring);
- setinclude(strdup(incfile));
- setinclude("/sys/include");
- }
- }
if (first)
Binit(&diagbuf, 1, OWRITE);
/*
diff --git a/src/cmd/cgo/gcc.go b/src/cmd/cgo/gcc.go
index ae5ca2c7d..ac6561345 100644
--- a/src/cmd/cgo/gcc.go
+++ b/src/cmd/cgo/gcc.go
@@ -604,7 +604,7 @@ const gccTmp = "_obj/_cgo_.o"
// gccCmd returns the gcc command line to use for compiling
// the input.
func (p *Package) gccCmd() []string {
- return []string{
+ c := []string{
p.gccName(),
p.gccMachine(),
"-Wall", // many warnings
@@ -614,15 +614,17 @@ func (p *Package) gccCmd() []string {
"-fno-eliminate-unused-debug-types", // gets rid of e.g. untyped enum otherwise
"-c", // do not link
"-xc", // input language is C
- "-", // read input from standard input
}
+ c = append(c, p.GccOptions...)
+ c = append(c, "-") //read input from standard input
+ return c
}
// gccDebug runs gcc -gdwarf-2 over the C program stdin and
// returns the corresponding DWARF data and any messages
// printed to standard error.
func (p *Package) gccDebug(stdin []byte) *dwarf.Data {
- runGcc(stdin, append(p.gccCmd(), p.GccOptions...))
+ runGcc(stdin, p.gccCmd())
// Try to parse f as ELF and Mach-O and hope one works.
var f interface {
@@ -649,8 +651,8 @@ func (p *Package) gccDebug(stdin []byte) *dwarf.Data {
// #defines that gcc encountered while processing the input
// and its included files.
func (p *Package) gccDefines(stdin []byte) string {
- base := []string{p.gccName(), p.gccMachine(), "-E", "-dM", "-xc", "-"}
- stdout, _ := runGcc(stdin, append(base, p.GccOptions...))
+ base := []string{p.gccName(), p.gccMachine(), "-E", "-dM", "-xc"}
+ stdout, _ := runGcc(stdin, append(append(base, p.GccOptions...), "-"))
return stdout
}
@@ -659,7 +661,7 @@ func (p *Package) gccDefines(stdin []byte) string {
// gcc to fail.
func (p *Package) gccErrors(stdin []byte) string {
// TODO(rsc): require failure
- args := append(p.gccCmd(), p.GccOptions...)
+ args := p.gccCmd()
if *debugGcc {
fmt.Fprintf(os.Stderr, "$ %s <<EOF\n", strings.Join(args, " "))
os.Stderr.Write(stdin)
diff --git a/src/cmd/cgo/out.go b/src/cmd/cgo/out.go
index bc031cc58..2ce4e9752 100644
--- a/src/cmd/cgo/out.go
+++ b/src/cmd/cgo/out.go
@@ -331,7 +331,11 @@ func (p *Package) writeOutputFunc(fgcc *os.File, n *Name) {
fmt.Fprintf(fgcc, "\tint e;\n") // assuming 32 bit (see comment above structType)
fmt.Fprintf(fgcc, "\terrno = 0;\n")
}
- fmt.Fprintf(fgcc, "\t%s *a = v;\n", ctype)
+ // We're trying to write a gcc struct that matches 6c/8c/5c's layout.
+ // Use packed attribute to force no padding in this struct in case
+ // gcc has different packing requirements. For example,
+ // on 386 Windows, gcc wants to 8-align int64s, but 8c does not.
+ fmt.Fprintf(fgcc, "\t%s __attribute__((__packed__)) *a = v;\n", ctype)
fmt.Fprintf(fgcc, "\t")
if t := n.FuncType.Result; t != nil {
fmt.Fprintf(fgcc, "a->r = ")
@@ -370,7 +374,9 @@ func (p *Package) writeExports(fgo2, fc, fm *os.File) {
fn := exp.Func
// Construct a gcc struct matching the 6c argument and
- // result frame.
+ // result frame. The gcc struct will be compiled with
+ // __attribute__((packed)) so all padding must be accounted
+ // for explicitly.
ctype := "struct {\n"
off := int64(0)
npad := 0
@@ -458,7 +464,7 @@ func (p *Package) writeExports(fgo2, fc, fm *os.File) {
fmt.Fprintf(fgcc, "extern _cgoexp%s_%s(void *, int);\n", cPrefix, exp.ExpName)
fmt.Fprintf(fgcc, "\n%s\n", s)
fmt.Fprintf(fgcc, "{\n")
- fmt.Fprintf(fgcc, "\t%s a;\n", ctype)
+ fmt.Fprintf(fgcc, "\t%s __attribute__((packed)) a;\n", ctype)
if gccResult != "void" && (len(fntype.Results.List) > 1 || len(fntype.Results.List[0].Names) > 1) {
fmt.Fprintf(fgcc, "\t%s r;\n", gccResult)
}
diff --git a/src/cmd/gc/builtin.c.boot b/src/cmd/gc/builtin.c.boot
index bdbca7f78..c9bf501d1 100644
--- a/src/cmd/gc/builtin.c.boot
+++ b/src/cmd/gc/builtin.c.boot
@@ -1,5 +1,6 @@
char *runtimeimport =
"package runtime\n"
+ "import runtime \"runtime\"\n"
"func \"\".new (? int32) *any\n"
"func \"\".panicindex ()\n"
"func \"\".panicslice ()\n"
@@ -21,7 +22,6 @@ char *runtimeimport =
"func \"\".printsp ()\n"
"func \"\".goprintf ()\n"
"func \"\".concatstring ()\n"
- "func \"\".append ()\n"
"func \"\".appendslice (typ *uint8, x any, y []any) any\n"
"func \"\".cmpstring (? string, ? string) int\n"
"func \"\".slicestring (? string, ? int, ? int) string\n"
@@ -81,6 +81,7 @@ char *runtimeimport =
"func \"\".selectgo (sel *uint8)\n"
"func \"\".block ()\n"
"func \"\".makeslice (typ *uint8, nel int64, cap int64) []any\n"
+ "func \"\".growslice (typ *uint8, old []any, cap int64) []any\n"
"func \"\".sliceslice1 (old []any, lb uint64, width uint64) []any\n"
"func \"\".sliceslice (old []any, lb uint64, hb uint64, width uint64) []any\n"
"func \"\".slicearray (old *any, nel uint64, lb uint64, hb uint64, width uint64) []any\n"
@@ -98,6 +99,7 @@ char *runtimeimport =
"$$\n";
char *unsafeimport =
"package unsafe\n"
+ "import runtime \"runtime\"\n"
"type \"\".Pointer uintptr\n"
"func \"\".Offsetof (? any) int\n"
"func \"\".Sizeof (? any) int\n"
diff --git a/src/cmd/gc/go.h b/src/cmd/gc/go.h
index f58b76789..359881e11 100644
--- a/src/cmd/gc/go.h
+++ b/src/cmd/gc/go.h
@@ -225,6 +225,7 @@ struct Node
Type* realtype; // as determined by typecheck
NodeList* list;
NodeList* rlist;
+ Node* orig; // original form, for printing
// for-body
NodeList* ninit;
diff --git a/src/cmd/gc/runtime.go b/src/cmd/gc/runtime.go
index 35d11eca9..00fc720b8 100644
--- a/src/cmd/gc/runtime.go
+++ b/src/cmd/gc/runtime.go
@@ -110,6 +110,7 @@ func selectgo(sel *byte)
func block()
func makeslice(typ *byte, nel int64, cap int64) (ary []any)
+func growslice(typ *byte, old []any, n int64) (ary []any)
func sliceslice1(old []any, lb uint64, width uint64) (ary []any)
func sliceslice(old []any, lb uint64, hb uint64, width uint64) (ary []any)
func slicearray(old *any, nel uint64, lb uint64, hb uint64, width uint64) (ary []any)
diff --git a/src/cmd/gc/subr.c b/src/cmd/gc/subr.c
index bb2505694..326a5ba74 100644
--- a/src/cmd/gc/subr.c
+++ b/src/cmd/gc/subr.c
@@ -1073,6 +1073,9 @@ Jconv(Fmt *fp)
if(n->implicit != 0)
fmtprint(fp, " implicit(%d)", n->implicit);
+ if(n->pun != 0)
+ fmtprint(fp, " pun(%d)", n->pun);
+
return 0;
}
@@ -1141,7 +1144,7 @@ Tpretty(Fmt *fp, Type *t)
Type *t1;
Sym *s;
- if(debug['r']) {
+ if(0 && debug['r']) {
debug['r'] = 0;
fmtprint(fp, "%T (orig=%T)", t, t->orig);
debug['r'] = 1;
@@ -1454,6 +1457,8 @@ Nconv(Fmt *fp)
}
if(fp->flags & FmtSharp) {
+ if(n->orig != N)
+ n = n->orig;
exprfmt(fp, n, 0);
goto out;
}
@@ -3107,7 +3112,7 @@ genwrapper(Type *rcvr, Type *method, Sym *newnam, int iface)
Type *tpad;
int isddd;
- if(debug['r'])
+ if(0 && debug['r'])
print("genwrapper rcvrtype=%T method=%T newnam=%S\n",
rcvr, method, newnam);
@@ -3161,7 +3166,7 @@ genwrapper(Type *rcvr, Type *method, Sym *newnam, int iface)
fn->nbody = list1(n);
}
- if(debug['r'])
+ if(0 && debug['r'])
dumplist("genwrapper body", fn->nbody);
funcbody(fn);
@@ -3256,8 +3261,9 @@ implements(Type *t, Type *iface, Type **m, Type **samename, int *ptr)
// the method does not exist for value types.
rcvr = getthisx(tm->type)->type->type;
if(isptr[rcvr->etype] && !isptr[t0->etype] && !followptr && !isifacemethod(tm->type)) {
- if(debug['r'])
+ if(0 && debug['r'])
yyerror("interface pointer mismatch");
+
*m = im;
*samename = nil;
*ptr = 1;
diff --git a/src/cmd/gc/swt.c b/src/cmd/gc/swt.c
index 6e8436c3c..c2968c44b 100644
--- a/src/cmd/gc/swt.c
+++ b/src/cmd/gc/swt.c
@@ -867,8 +867,11 @@ typecheckswitch(Node *n)
case Etype: // type switch
if(ll->n->op == OLITERAL && istype(ll->n->type, TNIL))
;
- else if(ll->n->op != OTYPE && ll->n->type != T)
+ else if(ll->n->op != OTYPE && ll->n->type != T) {
yyerror("%#N is not a type", ll->n);
+ // reset to original type
+ ll->n = n->ntest->right;
+ }
break;
}
}
diff --git a/src/cmd/gc/typecheck.c b/src/cmd/gc/typecheck.c
index c48bf7a29..9aaf3e6ef 100644
--- a/src/cmd/gc/typecheck.c
+++ b/src/cmd/gc/typecheck.c
@@ -894,12 +894,20 @@ reswitch:
// might be constant
switch(t->etype) {
case TSTRING:
- if(isconst(l, CTSTR))
- nodconst(n, types[TINT], l->val.u.sval->len);
+ if(isconst(l, CTSTR)) {
+ r = nod(OXXX, N, N);
+ nodconst(r, types[TINT], l->val.u.sval->len);
+ r->orig = n;
+ n = r;
+ }
break;
case TARRAY:
- if(t->bound >= 0 && l->op == ONAME)
- nodconst(n, types[TINT], t->bound);
+ if(t->bound >= 0 && l->op == ONAME) {
+ r = nod(OXXX, N, N);
+ nodconst(r, types[TINT], t->bound);
+ r->orig = n;
+ n = r;
+ }
break;
}
n->type = types[TINT];
@@ -1357,7 +1365,10 @@ ret:
goto error;
}
if((top & Etop) && !(top & (Ecall|Erv|Etype)) && !(ok & Etop)) {
- yyerror("%#N not used", n);
+ if(n->diag == 0) {
+ yyerror("%#N not used", n);
+ n->diag = 1;
+ }
goto error;
}
diff --git a/src/cmd/gc/unsafe.c b/src/cmd/gc/unsafe.c
index 33f375631..540994ddd 100644
--- a/src/cmd/gc/unsafe.c
+++ b/src/cmd/gc/unsafe.c
@@ -41,6 +41,7 @@ unsafenmagic(Node *nn)
tr = r->type;
if(tr == T)
goto bad;
+ dowidth(tr);
v = tr->width;
goto yes;
}
diff --git a/src/cmd/gc/walk.c b/src/cmd/gc/walk.c
index 278eef414..b3b400556 100644
--- a/src/cmd/gc/walk.c
+++ b/src/cmd/gc/walk.c
@@ -18,12 +18,13 @@ static NodeList* paramstoheap(Type **argin, int out);
static NodeList* reorder1(NodeList*);
static NodeList* reorder3(NodeList*);
static Node* addstr(Node*, NodeList**);
+static Node* appendslice(Node*, NodeList**);
static Node* append(Node*, NodeList**);
static NodeList* walkdefstack;
// can this code branch reach the end
-// without an undcontitional RETURN
+// without an unconditional RETURN
// this is hard, so it is conservative
static int
walkret(NodeList *l)
@@ -805,18 +806,17 @@ walkexpr(Node **np, NodeList **init)
n->ninit = nil;
walkexpr(&n->left, init);
n->left = safeexpr(n->left, init);
+
if(oaslit(n, init))
goto ret;
+
walkexpr(&n->right, init);
- l = n->left;
- r = n->right;
- if(l == N || r == N)
- goto ret;
- r = ascompatee1(n->op, l, r, init);
- if(r != N) {
+ if(n->left != N && n->right != N) {
+ r = convas(nod(OAS, n->left, n->right), init);
r->dodata = n->dodata;
n = r;
}
+
goto ret;
case OAS2:
@@ -1134,6 +1134,7 @@ walkexpr(Node **np, NodeList **init)
case OINDEXMAP:
if(n->etype == 1)
goto ret;
+
t = n->left->type;
n = mkcall1(mapfn("mapaccess1", t), t->type, init, n->left, n->right);
goto ret;
@@ -1188,6 +1189,7 @@ walkexpr(Node **np, NodeList **init)
// sliceslice(old []any, lb uint64, hb uint64, width uint64) (ary []any)
// sliceslice1(old []any, lb uint64, width uint64) (ary []any)
t = n->type;
+ et = n->etype;
if(n->right->left == N)
l = nodintconst(0);
else
@@ -1210,6 +1212,7 @@ walkexpr(Node **np, NodeList **init)
l,
nodintconst(t->type->width));
}
+ n->etype = et; // preserve no-typecheck flag from OSLICE to the slice* call.
goto ret;
slicearray:
@@ -1332,7 +1335,10 @@ walkexpr(Node **np, NodeList **init)
goto ret;
case OAPPEND:
- n = append(n, init);
+ if(n->isddd)
+ n = appendslice(n, init);
+ else
+ n = append(n, init);
goto ret;
case OCOPY:
@@ -1953,23 +1959,18 @@ callnew(Type *t)
static Node*
convas(Node *n, NodeList **init)
{
- Node *l, *r;
Type *lt, *rt;
if(n->op != OAS)
fatal("convas: not OAS %O", n->op);
- n->typecheck = 1;
- lt = T;
- rt = T;
+ n->typecheck = 1;
- l = n->left;
- r = n->right;
- if(l == N || r == N)
+ if(n->left == N || n->right == N)
goto out;
- lt = l->type;
- rt = r->type;
+ lt = n->left->type;
+ rt = n->right->type;
if(lt == T || rt == T)
goto out;
@@ -1987,7 +1988,7 @@ convas(Node *n, NodeList **init)
if(eqtype(lt, rt))
goto out;
- n->right = assignconv(r, lt, "assignment");
+ n->right = assignconv(n->right, lt, "assignment");
walkexpr(&n->right, init);
out:
@@ -2365,42 +2366,85 @@ addstr(Node *n, NodeList **init)
}
static Node*
-append(Node *n, NodeList **init)
+appendslice(Node *n, NodeList **init)
{
- int i, j;
- Node *f, *r;
- NodeList *in, *args;
+ Node *f;
- if(n->isddd) {
- f = syslook("appendslice", 1);
- argtype(f, n->type);
- argtype(f, n->type->type);
- argtype(f, n->type);
- r = mkcall1(f, n->type, init, typename(n->type), n->list->n, n->list->next->n);
- return r;
+ f = syslook("appendslice", 1);
+ argtype(f, n->type);
+ argtype(f, n->type->type);
+ argtype(f, n->type);
+ return mkcall1(f, n->type, init, typename(n->type), n->list->n, n->list->next->n);
+}
+
+// expand append(src, a [, b]* ) to
+//
+// init {
+// s := src
+// const argc = len(args) - 1
+// if cap(s) - len(s) < argc {
+// s = growslice(s, argc)
+// }
+// n := len(s)
+// s = s[:n+argc]
+// s[n] = a
+// s[n+1] = b
+// ...
+// }
+// s
+static Node*
+append(Node *n, NodeList **init)
+{
+ NodeList *l, *a;
+ Node *nsrc, *ns, *nn, *na, *nx, *fn;
+ int argc;
+
+ walkexprlistsafe(n->list, init);
+
+ nsrc = n->list->n;
+ argc = count(n->list) - 1;
+ if (argc < 1) {
+ return nsrc;
}
- j = count(n->list) - 1;
- f = syslook("append", 1);
- f->type = T;
- f->ntype = nod(OTFUNC, N, N);
- in = list1(nod(ODCLFIELD, N, typenod(ptrto(types[TUINT8])))); // type
- in = list(in, nod(ODCLFIELD, N, typenod(types[TINT]))); // count
- in = list(in, nod(ODCLFIELD, N, typenod(n->type))); // slice
- for(i=0; i<j; i++)
- in = list(in, nod(ODCLFIELD, N, typenod(n->type->type)));
- f->ntype->list = in;
- f->ntype->rlist = list1(nod(ODCLFIELD, N, typenod(n->type)));
-
- args = list1(typename(n->type));
- args = list(args, nodintconst(j));
- args = concat(args, n->list);
-
- r = nod(OCALL, f, N);
- r->list = args;
- typecheck(&r, Erv);
- walkexpr(&r, init);
- r->type = n->type;
+ l = nil;
- return r;
+ ns = nod(OXXX, N, N); // var s
+ tempname(ns, nsrc->type);
+ l = list(l, nod(OAS, ns, nsrc)); // s = src
+
+ na = nodintconst(argc); // const argc
+ nx = nod(OIF, N, N); // if cap(s) - len(s) < argc
+ nx->ntest = nod(OLT, nod(OSUB, nod(OCAP, ns, N), nod(OLEN, ns, N)), na);
+
+ fn = syslook("growslice", 1); // growslice(<type>, old []T, n int64) (ret []T)
+ argtype(fn, ns->type->type); // 1 old []any
+ argtype(fn, ns->type->type); // 2 ret []any
+
+ nx->nbody = list1(nod(OAS, ns, mkcall1(fn, ns->type, &nx->ninit,
+ typename(ns->type),
+ ns,
+ conv(na, types[TINT64]))));
+ l = list(l, nx);
+
+ nn = nod(OXXX, N, N); // var n
+ tempname(nn, types[TINT]);
+ l = list(l, nod(OAS, nn, nod(OLEN, ns, N))); // n = len(s)
+
+ nx = nod(OSLICE, ns, nod(OKEY, N, nod(OADD, nn, na))); // ...s[:n+argc]
+ nx->etype = 1; // disable bounds check
+ l = list(l, nod(OAS, ns, nx)); // s = s[:n+argc]
+
+ for (a = n->list->next; a != nil; a = a->next) {
+ nx = nod(OINDEX, ns, nn); // s[n] ...
+ nx->etype = 1; // disable bounds check
+ l = list(l, nod(OAS, nx, a->n)); // s[n] = arg
+ if (a->next != nil)
+ l = list(l, nod(OAS, nn, nod(OADD, nn, nodintconst(1)))); // n = n + 1
+ }
+
+ typechecklist(l, Etop);
+ walkstmtlist(l);
+ *init = concat(*init, l);
+ return ns;
}
diff --git a/src/cmd/godoc/doc.go b/src/cmd/godoc/doc.go
index f0006e750..26d436d72 100644
--- a/src/cmd/godoc/doc.go
+++ b/src/cmd/godoc/doc.go
@@ -47,6 +47,9 @@ The flags are:
width of tabs in units of spaces
-timestamps=true
show timestamps with directory listings
+ -index
+ enable identifier and full text search index
+ (no search box is shown if -index is not set)
-maxresults=10000
maximum number of full text search results shown
(no full text index is built if maxresults <= 0)
diff --git a/src/cmd/godoc/godoc.go b/src/cmd/godoc/godoc.go
index b8e9dbc92..f97c764f9 100644
--- a/src/cmd/godoc/godoc.go
+++ b/src/cmd/godoc/godoc.go
@@ -64,9 +64,12 @@ var (
// layout control
tabwidth = flag.Int("tabwidth", 4, "tab width")
showTimestamps = flag.Bool("timestamps", true, "show timestamps with directory listings")
- maxResults = flag.Int("maxresults", 10000, "maximum number of full text search results shown")
templateDir = flag.String("templates", "", "directory containing alternate template files")
+ // search index
+ indexEnabled = flag.Bool("index", false, "enable search index")
+ maxResults = flag.Int("maxresults", 10000, "maximum number of full text search results shown")
+
// file system mapping
fsMap Mapping // user-defined mapping
fsTree RWValue // *Directory tree of packages, updated with each sync
@@ -687,17 +690,19 @@ func readTemplates() {
func servePage(w http.ResponseWriter, title, subtitle, query string, content []byte) {
d := struct {
- Title string
- Subtitle string
- PkgRoots []string
- Query string
- Version string
- Menu []byte
- Content []byte
+ Title string
+ Subtitle string
+ PkgRoots []string
+ SearchBox bool
+ Query string
+ Version string
+ Menu []byte
+ Content []byte
}{
title,
subtitle,
fsMap.PrefixList(),
+ *indexEnabled,
query,
runtime.Version(),
nil,
@@ -1174,11 +1179,15 @@ func lookup(query string) (result SearchResult) {
}
// is the result accurate?
- if _, ts := fsModified.get(); timestamp < ts {
- // The index is older than the latest file system change
- // under godoc's observation. Indexing may be in progress
- // or start shortly (see indexer()).
- result.Alert = "Indexing in progress: result may be inaccurate"
+ if *indexEnabled {
+ if _, ts := fsModified.get(); timestamp < ts {
+ // The index is older than the latest file system change
+ // under godoc's observation. Indexing may be in progress
+ // or start shortly (see indexer()).
+ result.Alert = "Indexing in progress: result may be inaccurate"
+ }
+ } else {
+ result.Alert = "Search index disabled: no results available"
}
return
diff --git a/src/cmd/godoc/main.go b/src/cmd/godoc/main.go
index e426626b3..967ea8727 100644
--- a/src/cmd/godoc/main.go
+++ b/src/cmd/godoc/main.go
@@ -176,7 +176,7 @@ func remoteSearch(query string) (res *http.Response, err os.Error) {
// remote search
for _, addr := range addrs {
url := "http://" + addr + search
- res, _, err = http.Get(url)
+ res, err = http.Get(url)
if err == nil && res.StatusCode == http.StatusOK {
break
}
@@ -246,8 +246,13 @@ func main() {
log.Printf("address = %s", *httpAddr)
log.Printf("goroot = %s", *goroot)
log.Printf("tabwidth = %d", *tabwidth)
- if *maxResults > 0 {
- log.Printf("maxresults = %d (full text index enabled)", *maxResults)
+ switch {
+ case !*indexEnabled:
+ log.Print("search index disabled")
+ case *maxResults > 0:
+ log.Printf("full text index enabled (maxresults = %d)", *maxResults)
+ default:
+ log.Print("identifier search index enabled")
}
if !fsMap.IsEmpty() {
log.Print("user-defined mapping:")
@@ -284,7 +289,9 @@ func main() {
}
// Start indexing goroutine.
- go indexer()
+ if *indexEnabled {
+ go indexer()
+ }
// Start http server.
if err := http.ListenAndServe(*httpAddr, handler); err != nil {
diff --git a/src/cmd/gofix/Makefile b/src/cmd/gofix/Makefile
index 12f09b4e4..d19de5c4f 100644
--- a/src/cmd/gofix/Makefile
+++ b/src/cmd/gofix/Makefile
@@ -10,6 +10,7 @@ GOFILES=\
netdial.go\
main.go\
osopen.go\
+ httpfinalurl.go\
httpserver.go\
procattr.go\
reflect.go\
diff --git a/src/cmd/gofix/httpfinalurl.go b/src/cmd/gofix/httpfinalurl.go
new file mode 100644
index 000000000..53642b22f
--- /dev/null
+++ b/src/cmd/gofix/httpfinalurl.go
@@ -0,0 +1,56 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "go/ast"
+)
+
+var httpFinalURLFix = fix{
+ "httpfinalurl",
+ httpfinalurl,
+ `Adapt http Get calls to not have a finalURL result parameter.
+
+ http://codereview.appspot.com/4535056/
+`,
+}
+
+func init() {
+ register(httpFinalURLFix)
+}
+
+func httpfinalurl(f *ast.File) bool {
+ if !imports(f, "http") {
+ return false
+ }
+
+ fixed := false
+ walk(f, func(n interface{}) {
+ // Fix up calls to http.Get.
+ //
+ // If they have blank identifiers, remove them:
+ // resp, _, err := http.Get(url)
+ // -> resp, err := http.Get(url)
+ //
+ // But if they're using the finalURL parameter, warn:
+ // resp, finalURL, err := http.Get(url)
+ as, ok := n.(*ast.AssignStmt)
+ if !ok || len(as.Lhs) != 3 || len(as.Rhs) != 1 {
+ return
+ }
+
+ if !isCall(as.Rhs[0], "http", "Get") {
+ return
+ }
+
+ if isBlank(as.Lhs[1]) {
+ as.Lhs = []ast.Expr{as.Lhs[0], as.Lhs[2]}
+ fixed = true
+ } else {
+ warn(as.Pos(), "call to http.Get records final URL")
+ }
+ })
+ return fixed
+}
diff --git a/src/cmd/gofix/httpfinalurl_test.go b/src/cmd/gofix/httpfinalurl_test.go
new file mode 100644
index 000000000..9e7d6242d
--- /dev/null
+++ b/src/cmd/gofix/httpfinalurl_test.go
@@ -0,0 +1,37 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func init() {
+ addTestCases(httpfinalurlTests)
+}
+
+var httpfinalurlTests = []testCase{
+ {
+ Name: "finalurl.0",
+ In: `package main
+
+import (
+ "http"
+)
+
+func f() {
+ resp, _, err := http.Get("http://www.google.com/")
+ _, _ = resp, err
+}
+`,
+ Out: `package main
+
+import (
+ "http"
+)
+
+func f() {
+ resp, err := http.Get("http://www.google.com/")
+ _, _ = resp, err
+}
+`,
+ },
+}
diff --git a/src/cmd/gofmt/test.sh b/src/cmd/gofmt/test.sh
index 99ec76932..f60ff9b32 100755
--- a/src/cmd/gofmt/test.sh
+++ b/src/cmd/gofmt/test.sh
@@ -42,7 +42,8 @@ apply1() {
bug163.go | bug166.go | bug169.go | bug217.go | bug222.go | \
bug226.go | bug228.go | bug248.go | bug274.go | bug280.go | \
bug282.go | bug287.go | bug298.go | bug299.go | bug300.go | \
- bug302.go | bug306.go | bug322.go | bug324.go ) return ;;
+ bug302.go | bug306.go | bug322.go | bug324.go | bug335.go | \
+ bug340.go ) return ;;
esac
# the following directories are skipped because they contain test
# cases for syntax errors and thus won't parse in the first place:
diff --git a/src/cmd/gotest/gotest.go b/src/cmd/gotest/gotest.go
index a7ba8dd11..8c81baf97 100644
--- a/src/cmd/gotest/gotest.go
+++ b/src/cmd/gotest/gotest.go
@@ -52,7 +52,7 @@ var (
xFlag bool
)
-// elapsed returns time elapsed since gotest started.
+// elapsed returns the number of seconds since gotest started.
func elapsed() float64 {
return float64(time.Nanoseconds()-start) / 1e9
}
@@ -182,7 +182,7 @@ func getTestFileNames() {
}
}
-// parseFiles parses the files and remembers the packages we find.
+// parseFiles parses the files and remembers the packages we find.
func parseFiles() {
fileSet := token.NewFileSet()
for _, f := range files {
@@ -285,6 +285,7 @@ func doRun(argv []string, returnStdout bool) string {
}
cmd += `"` + v + `"`
}
+ command = "sh"
argv = []string{"sh", "-c", cmd}
}
var err os.Error
diff --git a/src/cmd/gotype/gotype.go b/src/cmd/gotype/gotype.go
index 568467322..b6a23ae5f 100644
--- a/src/cmd/gotype/gotype.go
+++ b/src/cmd/gotype/gotype.go
@@ -178,8 +178,10 @@ func processPackage(fset *token.FileSet, files map[string]*ast.File) {
report(err)
return
}
- // TODO(gri): typecheck package
- _ = pkg
+ _, err = types.Check(fset, pkg)
+ if err != nil {
+ report(err)
+ }
}
diff --git a/src/cmd/ld/data.c b/src/cmd/ld/data.c
index 0cb2b2138..3f3faade0 100644
--- a/src/cmd/ld/data.c
+++ b/src/cmd/ld/data.c
@@ -804,6 +804,10 @@ dodata(void)
diag("%s: no size", s->name);
t = 1;
}
+ if(t >= PtrSize)
+ t = rnd(t, PtrSize);
+ else if(t > 2)
+ t = rnd(t, 4);
if(t & 1)
;
else if(t & 2)
@@ -826,6 +830,10 @@ dodata(void)
diag("unexpected symbol type %d", s->type);
}
t = s->size;
+ if(t >= PtrSize)
+ t = rnd(t, PtrSize);
+ else if(t > 2)
+ t = rnd(t, 4);
if(t & 1)
;
else if(t & 2)
@@ -899,10 +907,8 @@ address(void)
segdata.fileoff = va - segtext.vaddr + segtext.fileoff;
if(HEADTYPE == Hwindows)
segdata.fileoff = segtext.fileoff + rnd(segtext.len, PEFILEALIGN);
- if(HEADTYPE == Hplan9x32) {
- segdata.vaddr = va = rnd(va, 4096);
+ if(HEADTYPE == Hplan9x32)
segdata.fileoff = segtext.fileoff + segtext.filelen;
- }
for(s=segdata.sect; s != nil; s=s->next) {
s->vaddr = va;
va += s->len;
diff --git a/src/cmd/ld/dwarf.c b/src/cmd/ld/dwarf.c
index 98b068008..ed11f5e5a 100644
--- a/src/cmd/ld/dwarf.c
+++ b/src/cmd/ld/dwarf.c
@@ -2562,7 +2562,6 @@ dwarfaddmachoheaders(void)
void
dwarfaddpeheaders(void)
{
- dwarfemitdebugsections();
newPEDWARFSection(".debug_abbrev", abbrevsize);
newPEDWARFSection(".debug_line", linesize);
newPEDWARFSection(".debug_frame", framesize);
diff --git a/src/cmd/ld/elf.h b/src/cmd/ld/elf.h
index 08583cc8f..d1370d28b 100644
--- a/src/cmd/ld/elf.h
+++ b/src/cmd/ld/elf.h
@@ -978,6 +978,10 @@ ElfShdr* elfshbits(Section*);
void elfsetstring(char*, int);
void elfaddverneed(Sym*);
+EXTERN int elfstrsize;
+EXTERN char* elfstrdat;
+EXTERN int elftextsh;
+
/*
* Total amount of space to reserve at the start of the file
* for Header, PHeaders, SHeaders, and interp.
diff --git a/src/cmd/ld/go.c b/src/cmd/ld/go.c
index e52c5cb34..a19fe460d 100644
--- a/src/cmd/ld/go.c
+++ b/src/cmd/ld/go.c
@@ -454,6 +454,7 @@ loaddynimport(char *file, char *pkg, char *p, int n)
if(strcmp(name, "_") == 0 && strcmp(def, "_") == 0) {
// allow #pragma dynimport _ _ "foo.so"
// to force a link of foo.so.
+ havedynamic = 1;
adddynlib(lib);
continue;
}
@@ -468,6 +469,7 @@ loaddynimport(char *file, char *pkg, char *p, int n)
s->dynimpname = def;
s->dynimpvers = q;
s->type = SDYNIMPORT;
+ havedynamic = 1;
}
}
return;
diff --git a/src/cmd/ld/lib.c b/src/cmd/ld/lib.c
index 15219ba11..105d982e4 100644
--- a/src/cmd/ld/lib.c
+++ b/src/cmd/ld/lib.c
@@ -259,6 +259,18 @@ loadlib(void)
Bprint(&bso, "%5.2f autolib: %s (from %s)\n", cputime(), library[i].file, library[i].objref);
objfile(library[i].file, library[i].pkg);
}
+
+ // We've loaded all the code now.
+ // If there are no dynamic libraries needed, gcc disables dynamic linking.
+ // Because of this, glibc's dynamic ELF loader occasionally (like in version 2.13)
+ // assumes that a dynamic binary always refers to at least one dynamic library.
+ // Rather than be a source of test cases for glibc, disable dynamic linking
+ // the same way that gcc would.
+ //
+ // Exception: on OS X, programs such as Shark only work with dynamic
+ // binaries, so leave it enabled on OS X (Mach-O) binaries.
+ if(!havedynamic && HEADTYPE != Hdarwin)
+ debug['d'] = 1;
}
/*
diff --git a/src/cmd/ld/lib.h b/src/cmd/ld/lib.h
index 8b603a04a..cd4608085 100644
--- a/src/cmd/ld/lib.h
+++ b/src/cmd/ld/lib.h
@@ -122,6 +122,7 @@ EXTERN char* outfile;
EXTERN int32 nsymbol;
EXTERN char* thestring;
EXTERN int ndynexp;
+EXTERN int havedynamic;
EXTERN Segment segtext;
EXTERN Segment segdata;
@@ -185,6 +186,7 @@ vlong addsize(Sym*, Sym*);
vlong adduint8(Sym*, uint8);
vlong adduint16(Sym*, uint16);
void asmsym(void);
+void asmelfsym32(void);
void asmelfsym64(void);
void asmplan9sym(void);
void strnput(char*, int);
diff --git a/src/cmd/ld/pe.c b/src/cmd/ld/pe.c
index d523ca9c5..1c0c66538 100644
--- a/src/cmd/ld/pe.c
+++ b/src/cmd/ld/pe.c
@@ -484,13 +484,13 @@ asmbpe(void)
d->Characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA|
IMAGE_SCN_MEM_READ|IMAGE_SCN_MEM_WRITE;
+ if(!debug['s'])
+ dwarfaddpeheaders();
+
addimports(nextfileoff, d);
addexports(nextfileoff);
- if(!debug['s'])
- dwarfaddpeheaders();
-
addsymtable();
fh.NumberOfSections = nsect;
diff --git a/src/cmd/prof/gopprof b/src/cmd/prof/gopprof
index 8fa00cbe8..8863fc623 100755
--- a/src/cmd/prof/gopprof
+++ b/src/cmd/prof/gopprof
@@ -2880,17 +2880,18 @@ sub FetchSymbols {
my @toask = @pcs;
while (@toask > 0) {
my $n = @toask;
- if ($n > 49) { $n = 49; }
+ # NOTE(rsc): Limiting the number of PCs requested per round
+ # used to be necessary, but I think it was a bug in
+ # debug/pprof/symbol's implementation. Leaving here
+ # in case I am wrong.
+ # if ($n > 49) { $n = 49; }
my @thisround = @toask[0..$n];
-my $t = @toask;
-print STDERR "$n $t\n";
@toask = @toask[($n+1)..(@toask-1)];
my $post_data = join("+", sort((map {"0x" . "$_"} @thisround)));
open(POSTFILE, ">$main::tmpfile_sym");
print POSTFILE $post_data;
close(POSTFILE);
-print STDERR "SYMBL!\n";
my $url = SymbolPageURL();
$url = ResolveRedirectionForCurl($url);
my $command_line = "$CURL -sd '\@$main::tmpfile_sym' '$url'";
diff --git a/src/make.bash b/src/make.bash
index 79e368cb5..84b9908f4 100755
--- a/src/make.bash
+++ b/src/make.bash
@@ -47,6 +47,9 @@ rm -f "$GOBIN"/gomake
) >"$GOBIN"/gomake
chmod +x "$GOBIN"/gomake
+# TODO(brainman): delete this after 01/01/2012.
+rm -f "$GOBIN"/gotest # remove old bash version of gotest on Windows
+
if [ -d /selinux -a -f /selinux/booleans/allow_execstack -a -x /usr/sbin/selinuxenabled ] && /usr/sbin/selinuxenabled; then
if ! cat /selinux/booleans/allow_execstack | grep -c '^1 1$' >> /dev/null ; then
echo "WARNING: the default SELinux policy on, at least, Fedora 12 breaks "
diff --git a/src/pkg/Makefile b/src/pkg/Makefile
index b046064a6..c84da57f2 100644
--- a/src/pkg/Makefile
+++ b/src/pkg/Makefile
@@ -59,6 +59,7 @@ DIRS=\
crypto/tls\
crypto/twofish\
crypto/x509\
+ crypto/x509/crl\
crypto/xtea\
debug/dwarf\
debug/macho\
@@ -103,9 +104,13 @@ DIRS=\
http/fcgi\
http/pprof\
http/httptest\
+ http/spdy\
image\
+ image/bmp\
+ image/gif\
image/jpeg\
image/png\
+ image/tiff\
image/ycbcr\
index/suffixarray\
io\
@@ -185,6 +190,8 @@ NOTEST+=\
hash\
http/pprof\
http/httptest\
+ image/bmp\
+ image/gif\
net/dict\
rand\
runtime/cgo\
diff --git a/src/pkg/asn1/asn1.go b/src/pkg/asn1/asn1.go
index 5f470aed7..2650ef2a2 100644
--- a/src/pkg/asn1/asn1.go
+++ b/src/pkg/asn1/asn1.go
@@ -20,6 +20,7 @@ package asn1
// everything by any means.
import (
+ "big"
"fmt"
"os"
"reflect"
@@ -88,6 +89,27 @@ func parseInt(bytes []byte) (int, os.Error) {
return int(ret64), nil
}
+var bigOne = big.NewInt(1)
+
+// parseBigInt treats the given bytes as a big-endian, signed integer and returns
+// the result.
+func parseBigInt(bytes []byte) *big.Int {
+ ret := new(big.Int)
+ if len(bytes) > 0 && bytes[0]&0x80 == 0x80 {
+ // This is a negative number.
+ notBytes := make([]byte, len(bytes))
+ for i := range notBytes {
+ notBytes[i] = ^bytes[i]
+ }
+ ret.SetBytes(notBytes)
+ ret.Add(ret, bigOne)
+ ret.Neg(ret)
+ return ret
+ }
+ ret.SetBytes(bytes)
+ return ret
+}
+
// BIT STRING
// BitString is the structure to use when you want an ASN.1 BIT STRING type. A
@@ -164,9 +186,9 @@ func (oi ObjectIdentifier) Equal(other ObjectIdentifier) bool {
return true
}
-// parseObjectIdentifier parses an OBJECT IDENTIFER from the given bytes and
-// returns it. An object identifer is a sequence of variable length integers
-// that are assigned in a hierarachy.
+// parseObjectIdentifier parses an OBJECT IDENTIFIER from the given bytes and
+// returns it. An object identifier is a sequence of variable length integers
+// that are assigned in a hierarchy.
func parseObjectIdentifier(bytes []byte) (s []int, err os.Error) {
if len(bytes) == 0 {
err = SyntaxError{"zero length OBJECT IDENTIFIER"}
@@ -269,7 +291,7 @@ func isPrintable(b byte) bool {
b == ':' ||
b == '=' ||
b == '?' ||
- // This is techincally not allowed in a PrintableString.
+ // This is technically not allowed in a PrintableString.
// However, x509 certificates with wildcard strings don't
// always use the correct string type so we permit it.
b == '*'
@@ -425,6 +447,7 @@ var (
timeType = reflect.TypeOf(&time.Time{})
rawValueType = reflect.TypeOf(RawValue{})
rawContentsType = reflect.TypeOf(RawContent(nil))
+ bigIntType = reflect.TypeOf(new(big.Int))
)
// invalidLength returns true iff offset + length > sliceLength, or if the
@@ -639,6 +662,10 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam
case flagType:
v.SetBool(true)
return
+ case bigIntType:
+ parsedInt := parseBigInt(innerBytes)
+ v.Set(reflect.ValueOf(parsedInt))
+ return
}
switch val := v; val.Kind() {
case reflect.Bool:
diff --git a/src/pkg/asn1/asn1_test.go b/src/pkg/asn1/asn1_test.go
index 78f562805..463dbe026 100644
--- a/src/pkg/asn1/asn1_test.go
+++ b/src/pkg/asn1/asn1_test.go
@@ -42,6 +42,33 @@ func TestParseInt64(t *testing.T) {
}
}
+var bigIntTests = []struct {
+ in []byte
+ base10 string
+}{
+ {[]byte{0xff}, "-1"},
+ {[]byte{0x00}, "0"},
+ {[]byte{0x01}, "1"},
+ {[]byte{0x00, 0xff}, "255"},
+ {[]byte{0xff, 0x00}, "-256"},
+ {[]byte{0x01, 0x00}, "256"},
+}
+
+func TestParseBigInt(t *testing.T) {
+ for i, test := range bigIntTests {
+ ret := parseBigInt(test.in)
+ if ret.String() != test.base10 {
+ t.Errorf("#%d: bad result from %x, got %s want %s", i, test.in, ret.String(), test.base10)
+ }
+ fw := newForkableWriter()
+ marshalBigInt(fw, ret)
+ result := fw.Bytes()
+ if !bytes.Equal(result, test.in) {
+ t.Errorf("#%d: got %x from marshaling %s, want %x", i, result, ret, test.in)
+ }
+ }
+}
+
type bitStringTest struct {
in []byte
ok bool
diff --git a/src/pkg/asn1/common.go b/src/pkg/asn1/common.go
index 158987747..9db887e25 100644
--- a/src/pkg/asn1/common.go
+++ b/src/pkg/asn1/common.go
@@ -10,7 +10,7 @@ import (
"strings"
)
-// ASN.1 objects have metadata preceeding them:
+// ASN.1 objects have metadata preceding them:
// the tag: the type of the object
// a flag denoting if this object is compound or not
// the class type: the namespace of the tag
@@ -132,6 +132,8 @@ func getUniversalType(t reflect.Type) (tagNumber int, isCompound, ok bool) {
return tagUTCTime, false, true
case enumeratedType:
return tagEnum, false, true
+ case bigIntType:
+ return tagInteger, false, true
}
switch t.Kind() {
case reflect.Bool:
diff --git a/src/pkg/asn1/marshal.go b/src/pkg/asn1/marshal.go
index a3e1145b8..771ac2824 100644
--- a/src/pkg/asn1/marshal.go
+++ b/src/pkg/asn1/marshal.go
@@ -5,6 +5,7 @@
package asn1
import (
+ "big"
"bytes"
"fmt"
"io"
@@ -125,6 +126,43 @@ func int64Length(i int64) (numBytes int) {
return
}
+func marshalBigInt(out *forkableWriter, n *big.Int) (err os.Error) {
+ if n.Sign() < 0 {
+ // A negative number has to be converted to two's-complement
+ // form. So we'll subtract 1 and invert. If the
+ // most-significant-bit isn't set then we'll need to pad the
+ // beginning with 0xff in order to keep the number negative.
+ nMinus1 := new(big.Int).Neg(n)
+ nMinus1.Sub(nMinus1, bigOne)
+ bytes := nMinus1.Bytes()
+ for i := range bytes {
+ bytes[i] ^= 0xff
+ }
+ if len(bytes) == 0 || bytes[0]&0x80 == 0 {
+ err = out.WriteByte(0xff)
+ if err != nil {
+ return
+ }
+ }
+ _, err = out.Write(bytes)
+ } else if n.Sign() == 0 {
+ // Zero is written as a single 0 zero rather than no bytes.
+ err = out.WriteByte(0x00)
+ } else {
+ bytes := n.Bytes()
+ if len(bytes) > 0 && bytes[0]&0x80 != 0 {
+ // We'll have to pad this with 0x00 in order to stop it
+ // looking like a negative number.
+ err = out.WriteByte(0)
+ if err != nil {
+ return
+ }
+ }
+ _, err = out.Write(bytes)
+ }
+ return
+}
+
func marshalLength(out *forkableWriter, i int) (err os.Error) {
n := lengthLength(i)
@@ -334,6 +372,8 @@ func marshalBody(out *forkableWriter, value reflect.Value, params fieldParameter
return marshalBitString(out, value.Interface().(BitString))
case objectIdentifierType:
return marshalObjectIdentifier(out, value.Interface().(ObjectIdentifier))
+ case bigIntType:
+ return marshalBigInt(out, value.Interface().(*big.Int))
}
switch v := value; v.Kind() {
@@ -351,7 +391,7 @@ func marshalBody(out *forkableWriter, value reflect.Value, params fieldParameter
startingField := 0
// If the first element of the structure is a non-empty
- // RawContents, then we don't bother serialising the rest.
+ // RawContents, then we don't bother serializing the rest.
if t.NumField() > 0 && t.Field(0).Type == rawContentsType {
s := v.Field(0)
if s.Len() > 0 {
@@ -361,7 +401,7 @@ func marshalBody(out *forkableWriter, value reflect.Value, params fieldParameter
}
/* The RawContents will contain the tag and
* length fields but we'll also be writing
- * those outselves, so we strip them out of
+ * those ourselves, so we strip them out of
* bytes */
_, err = out.Write(stripTagAndLength(bytes))
return
@@ -420,6 +460,9 @@ func marshalField(out *forkableWriter, v reflect.Value, params fieldParameters)
if v.Type() == rawValueType {
rv := v.Interface().(RawValue)
+ if rv.Class == 0 && rv.Tag == 0 && len(rv.Bytes) == 0 && params.optional {
+ return
+ }
err = marshalTagAndLength(out, tagAndLength{rv.Class, rv.Tag, len(rv.Bytes), rv.IsCompound})
if err != nil {
return
diff --git a/src/pkg/big/int.go b/src/pkg/big/int.go
index f1ea7b1c2..74fbef48d 100755
--- a/src/pkg/big/int.go
+++ b/src/pkg/big/int.go
@@ -309,42 +309,68 @@ func (x *Int) Cmp(y *Int) (r int) {
func (x *Int) String() string {
- s := ""
- if x.neg {
- s = "-"
+ switch {
+ case x == nil:
+ return "<nil>"
+ case x.neg:
+ return "-" + x.abs.decimalString()
}
- return s + x.abs.string(10)
+ return x.abs.decimalString()
}
-func fmtbase(ch int) int {
+func charset(ch int) string {
switch ch {
case 'b':
- return 2
+ return lowercaseDigits[0:2]
case 'o':
- return 8
- case 'd':
- return 10
+ return lowercaseDigits[0:8]
+ case 'd', 'v':
+ return lowercaseDigits[0:10]
case 'x':
- return 16
+ return lowercaseDigits[0:16]
+ case 'X':
+ return uppercaseDigits[0:16]
}
- return 10
+ return "" // unknown format
}
// Format is a support routine for fmt.Formatter. It accepts
-// the formats 'b' (binary), 'o' (octal), 'd' (decimal) and
-// 'x' (hexadecimal).
+// the formats 'b' (binary), 'o' (octal), 'd' (decimal), 'x'
+// (lowercase hexadecimal), and 'X' (uppercase hexadecimal).
//
func (x *Int) Format(s fmt.State, ch int) {
- if x == nil {
+ cs := charset(ch)
+
+ // special cases
+ switch {
+ case cs == "":
+ // unknown format
+ fmt.Fprintf(s, "%%!%c(big.Int=%s)", ch, x.String())
+ return
+ case x == nil:
fmt.Fprint(s, "<nil>")
return
}
+
+ // determine format
+ format := "%s"
+ if s.Flag('#') {
+ switch ch {
+ case 'o':
+ format = "0%s"
+ case 'x':
+ format = "0x%s"
+ case 'X':
+ format = "0X%s"
+ }
+ }
if x.neg {
- fmt.Fprint(s, "-")
+ format = "-" + format
}
- fmt.Fprint(s, x.abs.string(fmtbase(ch)))
+
+ fmt.Fprintf(s, format, x.abs.string(cs))
}
@@ -560,6 +586,42 @@ func (z *Int) Rsh(x *Int, n uint) *Int {
}
+// Bit returns the value of the i'th bit of z. That is, it
+// returns (z>>i)&1. The bit index i must be >= 0.
+func (z *Int) Bit(i int) uint {
+ if i < 0 {
+ panic("negative bit index")
+ }
+ if z.neg {
+ t := nat{}.sub(z.abs, natOne)
+ return t.bit(uint(i)) ^ 1
+ }
+
+ return z.abs.bit(uint(i))
+}
+
+
+// SetBit sets the i'th bit of z to bit and returns z.
+// That is, if bit is 1 SetBit sets z = x | (1 << i);
+// if bit is 0 it sets z = x &^ (1 << i). If bit is not 0 or 1,
+// SetBit will panic.
+func (z *Int) SetBit(x *Int, i int, b uint) *Int {
+ if i < 0 {
+ panic("negative bit index")
+ }
+ if x.neg {
+ t := z.abs.sub(x.abs, natOne)
+ t = t.setBit(t, uint(i), b^1)
+ z.abs = t.add(t, natOne)
+ z.neg = len(z.abs) > 0
+ return z
+ }
+ z.abs = z.abs.setBit(x.abs, uint(i), b)
+ z.neg = false
+ return z
+}
+
+
// And sets z = x & y and returns z.
func (z *Int) And(x, y *Int) *Int {
if x.neg == y.neg {
diff --git a/src/pkg/big/int_test.go b/src/pkg/big/int_test.go
index 9c19dd5da..595f04956 100755
--- a/src/pkg/big/int_test.go
+++ b/src/pkg/big/int_test.go
@@ -348,6 +348,55 @@ func TestSetString(t *testing.T) {
}
+var formatTests = []struct {
+ input string
+ format string
+ output string
+}{
+ {"<nil>", "%x", "<nil>"},
+ {"<nil>", "%#x", "<nil>"},
+ {"<nil>", "%#y", "%!y(big.Int=<nil>)"},
+
+ {"10", "%b", "1010"},
+ {"10", "%o", "12"},
+ {"10", "%d", "10"},
+ {"10", "%v", "10"},
+ {"10", "%x", "a"},
+ {"10", "%X", "A"},
+ {"-10", "%X", "-A"},
+ {"10", "%y", "%!y(big.Int=10)"},
+ {"-10", "%y", "%!y(big.Int=-10)"},
+
+ {"10", "%#b", "1010"},
+ {"10", "%#o", "012"},
+ {"10", "%#d", "10"},
+ {"10", "%#v", "10"},
+ {"10", "%#x", "0xa"},
+ {"10", "%#X", "0XA"},
+ {"-10", "%#X", "-0XA"},
+ {"10", "%#y", "%!y(big.Int=10)"},
+ {"-10", "%#y", "%!y(big.Int=-10)"},
+}
+
+
+func TestFormat(t *testing.T) {
+ for i, test := range formatTests {
+ var x *Int
+ if test.input != "<nil>" {
+ var ok bool
+ x, ok = new(Int).SetString(test.input, 0)
+ if !ok {
+ t.Errorf("#%d failed reading input %s", i, test.input)
+ }
+ }
+ output := fmt.Sprintf(test.format, x)
+ if output != test.output {
+ t.Errorf("#%d got %s; want %s", i, output, test.output)
+ }
+ }
+}
+
+
// Examples from the Go Language Spec, section "Arithmetic operators"
var divisionSignsTests = []struct {
x, y int64
@@ -985,6 +1034,152 @@ func testBitFunSelf(t *testing.T, msg string, f bitFun, x, y *Int, exp string) {
}
+func altBit(x *Int, i int) uint {
+ z := new(Int).Rsh(x, uint(i))
+ z = z.And(z, NewInt(1))
+ if z.Cmp(new(Int)) != 0 {
+ return 1
+ }
+ return 0
+}
+
+
+func altSetBit(z *Int, x *Int, i int, b uint) *Int {
+ one := NewInt(1)
+ m := one.Lsh(one, uint(i))
+ switch b {
+ case 1:
+ return z.Or(x, m)
+ case 0:
+ return z.AndNot(x, m)
+ }
+ panic("set bit is not 0 or 1")
+}
+
+
+func testBitset(t *testing.T, x *Int) {
+ n := x.BitLen()
+ z := new(Int).Set(x)
+ z1 := new(Int).Set(x)
+ for i := 0; i < n+10; i++ {
+ old := z.Bit(i)
+ old1 := altBit(z1, i)
+ if old != old1 {
+ t.Errorf("bitset: inconsistent value for Bit(%s, %d), got %v want %v", z1, i, old, old1)
+ }
+ z := new(Int).SetBit(z, i, 1)
+ z1 := altSetBit(new(Int), z1, i, 1)
+ if z.Bit(i) == 0 {
+ t.Errorf("bitset: bit %d of %s got 0 want 1", i, x)
+ }
+ if z.Cmp(z1) != 0 {
+ t.Errorf("bitset: inconsistent value after SetBit 1, got %s want %s", z, z1)
+ }
+ z.SetBit(z, i, 0)
+ altSetBit(z1, z1, i, 0)
+ if z.Bit(i) != 0 {
+ t.Errorf("bitset: bit %d of %s got 1 want 0", i, x)
+ }
+ if z.Cmp(z1) != 0 {
+ t.Errorf("bitset: inconsistent value after SetBit 0, got %s want %s", z, z1)
+ }
+ altSetBit(z1, z1, i, old)
+ z.SetBit(z, i, old)
+ if z.Cmp(z1) != 0 {
+ t.Errorf("bitset: inconsistent value after SetBit old, got %s want %s", z, z1)
+ }
+ }
+ if z.Cmp(x) != 0 {
+ t.Errorf("bitset: got %s want %s", z, x)
+ }
+}
+
+
+var bitsetTests = []struct {
+ x string
+ i int
+ b uint
+}{
+ {"0", 0, 0},
+ {"0", 200, 0},
+ {"1", 0, 1},
+ {"1", 1, 0},
+ {"-1", 0, 1},
+ {"-1", 200, 1},
+ {"0x2000000000000000000000000000", 108, 0},
+ {"0x2000000000000000000000000000", 109, 1},
+ {"0x2000000000000000000000000000", 110, 0},
+ {"-0x2000000000000000000000000001", 108, 1},
+ {"-0x2000000000000000000000000001", 109, 0},
+ {"-0x2000000000000000000000000001", 110, 1},
+}
+
+
+func TestBitSet(t *testing.T) {
+ for _, test := range bitwiseTests {
+ x := new(Int)
+ x.SetString(test.x, 0)
+ testBitset(t, x)
+ x = new(Int)
+ x.SetString(test.y, 0)
+ testBitset(t, x)
+ }
+ for i, test := range bitsetTests {
+ x := new(Int)
+ x.SetString(test.x, 0)
+ b := x.Bit(test.i)
+ if b != test.b {
+
+ t.Errorf("#%d want %v got %v", i, test.b, b)
+ }
+ }
+}
+
+
+func BenchmarkBitset(b *testing.B) {
+ z := new(Int)
+ z.SetBit(z, 512, 1)
+ b.ResetTimer()
+ b.StartTimer()
+ for i := b.N - 1; i >= 0; i-- {
+ z.SetBit(z, i&512, 1)
+ }
+}
+
+
+func BenchmarkBitsetNeg(b *testing.B) {
+ z := NewInt(-1)
+ z.SetBit(z, 512, 0)
+ b.ResetTimer()
+ b.StartTimer()
+ for i := b.N - 1; i >= 0; i-- {
+ z.SetBit(z, i&512, 0)
+ }
+}
+
+
+func BenchmarkBitsetOrig(b *testing.B) {
+ z := new(Int)
+ altSetBit(z, z, 512, 1)
+ b.ResetTimer()
+ b.StartTimer()
+ for i := b.N - 1; i >= 0; i-- {
+ altSetBit(z, z, i&512, 1)
+ }
+}
+
+
+func BenchmarkBitsetNegOrig(b *testing.B) {
+ z := NewInt(-1)
+ altSetBit(z, z, 512, 0)
+ b.ResetTimer()
+ b.StartTimer()
+ for i := b.N - 1; i >= 0; i-- {
+ altSetBit(z, z, i&512, 0)
+ }
+}
+
+
func TestBitwise(t *testing.T) {
x := new(Int)
y := new(Int)
@@ -1019,6 +1214,7 @@ var notTests = []struct {
},
}
+
func TestNot(t *testing.T) {
in := new(Int)
out := new(Int)
@@ -1047,6 +1243,7 @@ var modInverseTests = []struct {
{"239487239847", "2410312426921032588552076022197566074856950548502459942654116941958108831682612228890093858261341614673227141477904012196503648957050582631942730706805009223062734745341073406696246014589361659774041027169249453200378729434170325843778659198143763193776859869524088940195577346119843545301547043747207749969763750084308926339295559968882457872412993810129130294592999947926365264059284647209730384947211681434464714438488520940127459844288859336526896320919633919"},
}
+
func TestModInverse(t *testing.T) {
var element, prime Int
one := NewInt(1)
diff --git a/src/pkg/big/nat.go b/src/pkg/big/nat.go
index 4848d427b..c2b95e8a2 100755
--- a/src/pkg/big/nat.go
+++ b/src/pkg/big/nat.go
@@ -20,6 +20,7 @@ package big
import "rand"
+
// An unsigned integer x of the form
//
// x = x[n-1]*_B^(n-1) + x[n-2]*_B^(n-2) + ... + x[1]*_B + x[0]
@@ -668,16 +669,24 @@ func (z nat) scan(s string, base int) (nat, int, int) {
}
-// string converts x to a string for a given base, with 2 <= base <= 16.
-// TODO(gri) in the style of the other routines, perhaps this should take
-// a []byte buffer and return it
-func (x nat) string(base int) string {
- if base < 2 || 16 < base {
- panic("illegal base")
- }
+// Character sets for string conversion.
+const (
+ lowercaseDigits = "0123456789abcdefghijklmnopqrstuvwxyz"
+ uppercaseDigits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+)
+
+// string converts x to a string using digits from a charset; a digit with
+// value d is represented by charset[d]. The conversion base is determined
+// by len(charset), which must be >= 2.
+func (x nat) string(charset string) string {
+ base := len(charset)
- if len(x) == 0 {
- return "0"
+ // special cases
+ switch {
+ case base < 2:
+ panic("illegal base")
+ case len(x) == 0:
+ return string(charset[0])
}
// allocate buffer for conversion
@@ -692,13 +701,20 @@ func (x nat) string(base int) string {
i--
var r Word
q, r = q.divW(q, Word(base))
- s[i] = "0123456789abcdef"[r]
+ s[i] = charset[r]
}
return string(s[i:])
}
+// decimalString returns a decimal representation of x.
+// It calls x.string with the charset "0123456789".
+func (x nat) decimalString() string {
+ return x.string(lowercaseDigits[0:10])
+}
+
+
const deBruijn32 = 0x077CB531
var deBruijn32Lookup = []byte{
@@ -721,7 +737,7 @@ var deBruijn64Lookup = []byte{
func trailingZeroBits(x Word) int {
// x & -x leaves only the right-most bit set in the word. Let k be the
// index of that bit. Since only a single bit is set, the value is two
- // to the power of k. Multipling by a power of two is equivalent to
+ // to the power of k. Multiplying by a power of two is equivalent to
// left shifting, in this case by k bits. The de Bruijn constant is
// such that all six bit, consecutive substrings are distinct.
// Therefore, if we have a left shifted version of this constant we can
@@ -773,6 +789,43 @@ func (z nat) shr(x nat, s uint) nat {
}
+func (z nat) setBit(x nat, i uint, b uint) nat {
+ j := int(i / _W)
+ m := Word(1) << (i % _W)
+ n := len(x)
+ switch b {
+ case 0:
+ z = z.make(n)
+ copy(z, x)
+ if j >= n {
+ // no need to grow
+ return z
+ }
+ z[j] &^= m
+ return z.norm()
+ case 1:
+ if j >= n {
+ n = j + 1
+ }
+ z = z.make(n)
+ copy(z, x)
+ z[j] |= m
+ // no need to normalize
+ return z
+ }
+ panic("set bit is not 0 or 1")
+}
+
+
+func (z nat) bit(i uint) uint {
+ j := int(i / _W)
+ if j >= len(z) {
+ return 0
+ }
+ return uint(z[j] >> (i % _W) & 1)
+}
+
+
func (z nat) and(x, y nat) nat {
m := len(x)
n := len(y)
diff --git a/src/pkg/big/nat_test.go b/src/pkg/big/nat_test.go
index 0bcb94554..a29843a3f 100755
--- a/src/pkg/big/nat_test.go
+++ b/src/pkg/big/nat_test.go
@@ -133,7 +133,7 @@ var mulRangesN = []struct {
func TestMulRangeN(t *testing.T) {
for i, r := range mulRangesN {
- prod := nat(nil).mulRange(r.a, r.b).string(10)
+ prod := nat(nil).mulRange(r.a, r.b).decimalString()
if prod != r.prod {
t.Errorf("#%d: got %s; want %s", i, prod, r.prod)
}
@@ -167,31 +167,35 @@ func BenchmarkMul(b *testing.B) {
}
-var tab = []struct {
- x nat
- b int
- s string
+var strTests = []struct {
+ x nat // nat value to be converted
+ c string // conversion charset
+ s string // expected result
}{
- {nil, 10, "0"},
- {nat{1}, 10, "1"},
- {nat{10}, 10, "10"},
- {nat{1234567890}, 10, "1234567890"},
+ {nil, "01", "0"},
+ {nat{1}, "01", "1"},
+ {nat{0xc5}, "01", "11000101"},
+ {nat{03271}, lowercaseDigits[0:8], "3271"},
+ {nat{10}, lowercaseDigits[0:10], "10"},
+ {nat{1234567890}, uppercaseDigits[0:10], "1234567890"},
+ {nat{0xdeadbeef}, lowercaseDigits[0:16], "deadbeef"},
+ {nat{0xdeadbeef}, uppercaseDigits[0:16], "DEADBEEF"},
}
func TestString(t *testing.T) {
- for _, a := range tab {
- s := a.x.string(a.b)
+ for _, a := range strTests {
+ s := a.x.string(a.c)
if s != a.s {
t.Errorf("string%+v\n\tgot s = %s; want %s", a, s, a.s)
}
- x, b, n := nat(nil).scan(a.s, a.b)
+ x, b, n := nat(nil).scan(a.s, len(a.c))
if x.cmp(a.x) != 0 {
t.Errorf("scan%+v\n\tgot z = %v; want %v", a, x, a.x)
}
- if b != a.b {
- t.Errorf("scan%+v\n\tgot b = %d; want %d", a, b, a.b)
+ if b != len(a.c) {
+ t.Errorf("scan%+v\n\tgot b = %d; want %d", a, b, len(a.c))
}
if n != len(a.s) {
t.Errorf("scan%+v\n\tgot n = %d; want %d", a, n, len(a.s))
diff --git a/src/pkg/big/rat.go b/src/pkg/big/rat.go
index e70673a1c..2adf316e6 100644
--- a/src/pkg/big/rat.go
+++ b/src/pkg/big/rat.go
@@ -84,7 +84,7 @@ func (z *Rat) Num() *Int {
}
-// Demom returns the denominator of z; it is always > 0.
+// Denom returns the denominator of z; it is always > 0.
// The result is a reference to z's denominator; it
// may change if a new value is assigned to z.
func (z *Rat) Denom() *Int {
@@ -270,7 +270,7 @@ func (z *Rat) SetString(s string) (*Rat, bool) {
// String returns a string representation of z in the form "a/b" (even if b == 1).
func (z *Rat) String() string {
- return z.a.String() + "/" + z.b.string(10)
+ return z.a.String() + "/" + z.b.decimalString()
}
@@ -311,13 +311,13 @@ func (z *Rat) FloatString(prec int) string {
}
}
- s := q.string(10)
+ s := q.decimalString()
if z.a.neg {
s = "-" + s
}
if prec > 0 {
- rs := r.string(10)
+ rs := r.decimalString()
leadingZeros := prec - len(rs)
s += "." + strings.Repeat("0", leadingZeros) + rs
}
diff --git a/src/pkg/compress/bzip2/bzip2.go b/src/pkg/compress/bzip2/bzip2.go
index 9e97edec1..8b4572306 100644
--- a/src/pkg/compress/bzip2/bzip2.go
+++ b/src/pkg/compress/bzip2/bzip2.go
@@ -284,7 +284,7 @@ func (bz2 *reader) readBlock() (err os.Error) {
repeat := 0
repeat_power := 0
- // The `C' array (used by the inverse BWT) needs to be zero initialised.
+ // The `C' array (used by the inverse BWT) needs to be zero initialized.
for i := range bz2.c {
bz2.c[i] = 0
}
@@ -330,7 +330,7 @@ func (bz2 *reader) readBlock() (err os.Error) {
if int(v) == numSymbols-1 {
// This is the EOF symbol. Because it's always at the
- // end of the move-to-front list, and nevers gets moved
+ // end of the move-to-front list, and never gets moved
// to the front, it has this unique value.
break
}
diff --git a/src/pkg/compress/bzip2/huffman.go b/src/pkg/compress/bzip2/huffman.go
index 732bc4a21..dc05739c7 100644
--- a/src/pkg/compress/bzip2/huffman.go
+++ b/src/pkg/compress/bzip2/huffman.go
@@ -68,7 +68,7 @@ func newHuffmanTree(lengths []uint8) (huffmanTree, os.Error) {
// each symbol (consider reflecting a tree down the middle, for
// example). Since the code length assignments determine the
// efficiency of the tree, each of these trees is equally good. In
- // order to minimise the amount of information needed to build a tree
+ // order to minimize the amount of information needed to build a tree
// bzip2 uses a canonical tree so that it can be reconstructed given
// only the code length assignments.
diff --git a/src/pkg/compress/flate/deflate.go b/src/pkg/compress/flate/deflate.go
index e5b2beaef..a02a5e8d9 100644
--- a/src/pkg/compress/flate/deflate.go
+++ b/src/pkg/compress/flate/deflate.go
@@ -143,10 +143,18 @@ func (d *compressor) fillWindow(index int) (int, os.Error) {
d.blockStart = math.MaxInt32
}
for i, h := range d.hashHead {
- d.hashHead[i] = max(h-wSize, -1)
+ v := h - wSize
+ if v < -1 {
+ v = -1
+ }
+ d.hashHead[i] = v
}
for i, h := range d.hashPrev {
- d.hashPrev[i] = max(h-wSize, -1)
+ v := -h - wSize
+ if v < -1 {
+ v = -1
+ }
+ d.hashPrev[i] = v
}
}
count, err := d.r.Read(d.window[d.windowEnd:])
@@ -177,10 +185,18 @@ func (d *compressor) writeBlock(tokens []token, index int, eof bool) os.Error {
// Try to find a match starting at index whose length is greater than prevSize.
// We only look at chainCount possibilities before giving up.
func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) {
- win := d.window[0 : pos+min(maxMatchLength, lookahead)]
+ minMatchLook := maxMatchLength
+ if lookahead < minMatchLook {
+ minMatchLook = lookahead
+ }
+
+ win := d.window[0 : pos+minMatchLook]
// We quit when we get a match that's at least nice long
- nice := min(d.niceMatch, len(win)-pos)
+ nice := len(win) - pos
+ if d.niceMatch < nice {
+ nice = d.niceMatch
+ }
// If we've got a match that's good enough, only look in 1/4 the chain.
tries := d.maxChainLength
@@ -344,9 +360,12 @@ Loop:
}
prevLength := length
prevOffset := offset
- minIndex := max(index-maxOffset, 0)
length = minMatchLength - 1
offset = 0
+ minIndex := index - maxOffset
+ if minIndex < 0 {
+ minIndex = 0
+ }
if chainHead >= minIndex &&
(isFastDeflate && lookahead > minMatchLength-1 ||
diff --git a/src/pkg/compress/flate/huffman_bit_writer.go b/src/pkg/compress/flate/huffman_bit_writer.go
index abff82dd6..5df4510a2 100644
--- a/src/pkg/compress/flate/huffman_bit_writer.go
+++ b/src/pkg/compress/flate/huffman_bit_writer.go
@@ -185,7 +185,7 @@ func (w *huffmanBitWriter) writeBytes(bytes []byte) {
_, w.err = w.w.Write(bytes)
}
-// RFC 1951 3.2.7 specifies a special run-length encoding for specifiying
+// RFC 1951 3.2.7 specifies a special run-length encoding for specifying
// the literal and offset lengths arrays (which are concatenated into a single
// array). This method generates that run-length encoding.
//
@@ -279,7 +279,7 @@ func (w *huffmanBitWriter) writeCode(code *huffmanEncoder, literal uint32) {
//
// numLiterals The number of literals specified in codegen
// numOffsets The number of offsets specified in codegen
-// numCodegens Tne number of codegens used in codegen
+// numCodegens The number of codegens used in codegen
func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) {
if w.err != nil {
return
diff --git a/src/pkg/compress/gzip/gzip_test.go b/src/pkg/compress/gzip/gzip_test.go
index 23f351405..121e627e6 100644
--- a/src/pkg/compress/gzip/gzip_test.go
+++ b/src/pkg/compress/gzip/gzip_test.go
@@ -11,7 +11,7 @@ import (
)
// pipe creates two ends of a pipe that gzip and gunzip, and runs dfunc at the
-// writer end and ifunc at the reader end.
+// writer end and cfunc at the reader end.
func pipe(t *testing.T, dfunc func(*Compressor), cfunc func(*Decompressor)) {
piper, pipew := io.Pipe()
defer piper.Close()
diff --git a/src/pkg/compress/lzw/reader.go b/src/pkg/compress/lzw/reader.go
index d418bc856..a1cd2abc0 100644
--- a/src/pkg/compress/lzw/reader.go
+++ b/src/pkg/compress/lzw/reader.go
@@ -165,16 +165,19 @@ func decode1(pw *io.PipeWriter, r io.ByteReader, read func(*decoder) (uint16, os
if _, err := w.Write(buf[i:]); err != nil {
return err
}
- // Save what the hi code expands to.
- suffix[hi] = uint8(c)
- prefix[hi] = last
+ if last != invalidCode {
+ // Save what the hi code expands to.
+ suffix[hi] = uint8(c)
+ prefix[hi] = last
+ }
default:
return os.NewError("lzw: invalid code")
}
last, hi = code, hi+1
- if hi == overflow {
+ if hi >= overflow {
if d.width == maxWidth {
- return os.NewError("lzw: missing clear code")
+ last = invalidCode
+ continue
}
d.width++
overflow <<= 1
diff --git a/src/pkg/compress/zlib/writer.go b/src/pkg/compress/zlib/writer.go
index f1f9b2853..8f86e9c4c 100644
--- a/src/pkg/compress/zlib/writer.go
+++ b/src/pkg/compress/zlib/writer.go
@@ -89,7 +89,7 @@ func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, os.Error) {
}
}
z.w = w
- z.compressor = flate.NewWriter(w, level)
+ z.compressor = flate.NewWriterDict(w, level, dict)
z.digest = adler32.New()
return z, nil
}
diff --git a/src/pkg/compress/zlib/writer_test.go b/src/pkg/compress/zlib/writer_test.go
index f94f28470..a06689ee5 100644
--- a/src/pkg/compress/zlib/writer_test.go
+++ b/src/pkg/compress/zlib/writer_test.go
@@ -5,6 +5,7 @@
package zlib
import (
+ "bytes"
"io"
"io/ioutil"
"os"
@@ -121,3 +122,20 @@ func TestWriterDict(t *testing.T) {
}
}
}
+
+func TestWriterDictIsUsed(t *testing.T) {
+ var input = []byte("Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.")
+ buf := bytes.NewBuffer(nil)
+ compressor, err := NewWriterDict(buf, BestCompression, input)
+ if err != nil {
+ t.Errorf("error in NewWriterDict: %s", err)
+ return
+ }
+ compressor.Write(input)
+ compressor.Close()
+ const expectedMaxSize = 25
+ output := buf.Bytes()
+ if len(output) > expectedMaxSize {
+ t.Errorf("result too large (got %d, want <= %d bytes). Is the dictionary being used?", len(output), expectedMaxSize)
+ }
+}
diff --git a/src/pkg/container/heap/heap.go b/src/pkg/container/heap/heap.go
index f2b8a750a..5b68827df 100644
--- a/src/pkg/container/heap/heap.go
+++ b/src/pkg/container/heap/heap.go
@@ -22,7 +22,7 @@ type Interface interface {
}
-// A heaper must be initialized before any of the heap operations
+// A heap must be initialized before any of the heap operations
// can be used. Init is idempotent with respect to the heap invariants
// and may be called whenever the heap invariants may have been invalidated.
// Its complexity is O(n) where n = h.Len().
diff --git a/src/pkg/crypto/elliptic/elliptic.go b/src/pkg/crypto/elliptic/elliptic.go
index 335c9645d..41835f1a9 100644
--- a/src/pkg/crypto/elliptic/elliptic.go
+++ b/src/pkg/crypto/elliptic/elliptic.go
@@ -284,7 +284,7 @@ func (curve *Curve) Marshal(x, y *big.Int) []byte {
return ret
}
-// Unmarshal converts a point, serialised by Marshal, into an x, y pair. On
+// Unmarshal converts a point, serialized by Marshal, into an x, y pair. On
// error, x = nil.
func (curve *Curve) Unmarshal(data []byte) (x, y *big.Int) {
byteLen := (curve.BitSize + 7) >> 3
diff --git a/src/pkg/crypto/elliptic/elliptic_test.go b/src/pkg/crypto/elliptic/elliptic_test.go
index 02083a986..b7e7f035f 100644
--- a/src/pkg/crypto/elliptic/elliptic_test.go
+++ b/src/pkg/crypto/elliptic/elliptic_test.go
@@ -321,8 +321,8 @@ func TestMarshal(t *testing.T) {
t.Error(err)
return
}
- serialised := p224.Marshal(x, y)
- xx, yy := p224.Unmarshal(serialised)
+ serialized := p224.Marshal(x, y)
+ xx, yy := p224.Unmarshal(serialized)
if xx == nil {
t.Error("failed to unmarshal")
return
diff --git a/src/pkg/crypto/hmac/hmac_test.go b/src/pkg/crypto/hmac/hmac_test.go
index 40adbad04..bcae63b8a 100644
--- a/src/pkg/crypto/hmac/hmac_test.go
+++ b/src/pkg/crypto/hmac/hmac_test.go
@@ -190,7 +190,7 @@ func TestHMAC(t *testing.T) {
continue
}
- // Repetive Sum() calls should return the same value
+ // Repetitive Sum() calls should return the same value
for k := 0; k < 2; k++ {
sum := fmt.Sprintf("%x", h.Sum())
if sum != tt.out {
diff --git a/src/pkg/crypto/openpgp/armor/armor.go b/src/pkg/crypto/openpgp/armor/armor.go
index 8da612c50..9c4180d6d 100644
--- a/src/pkg/crypto/openpgp/armor/armor.go
+++ b/src/pkg/crypto/openpgp/armor/armor.go
@@ -153,7 +153,7 @@ func (r *openpgpReader) Read(p []byte) (n int, err os.Error) {
// Decode reads a PGP armored block from the given Reader. It will ignore
// leading garbage. If it doesn't find a block, it will return nil, os.EOF. The
-// given Reader is not usable after calling this function: an arbitary amount
+// given Reader is not usable after calling this function: an arbitrary amount
// of data may have been read past the end of the block.
func Decode(in io.Reader) (p *Block, err os.Error) {
r, _ := bufio.NewReaderSize(in, 100)
diff --git a/src/pkg/crypto/openpgp/keys.go b/src/pkg/crypto/openpgp/keys.go
index 6c03f8828..2acb7e612 100644
--- a/src/pkg/crypto/openpgp/keys.go
+++ b/src/pkg/crypto/openpgp/keys.go
@@ -5,9 +5,11 @@
package openpgp
import (
+ "crypto"
"crypto/openpgp/armor"
"crypto/openpgp/error"
"crypto/openpgp/packet"
+ "crypto/rsa"
"io"
"os"
)
@@ -297,3 +299,104 @@ func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *p
e.Subkeys = append(e.Subkeys, subKey)
return nil
}
+
+const defaultRSAKeyBits = 2048
+
+// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a
+// single identity composed of the given full name, comment and email, any of
+// which may be empty but must not contain any of "()<>\x00".
+func NewEntity(rand io.Reader, currentTimeSecs int64, name, comment, email string) (*Entity, os.Error) {
+ uid := packet.NewUserId(name, comment, email)
+ if uid == nil {
+ return nil, error.InvalidArgumentError("user id field contained invalid characters")
+ }
+ signingPriv, err := rsa.GenerateKey(rand, defaultRSAKeyBits)
+ if err != nil {
+ return nil, err
+ }
+ encryptingPriv, err := rsa.GenerateKey(rand, defaultRSAKeyBits)
+ if err != nil {
+ return nil, err
+ }
+
+ t := uint32(currentTimeSecs)
+
+ e := &Entity{
+ PrimaryKey: packet.NewRSAPublicKey(t, &signingPriv.PublicKey, false /* not a subkey */ ),
+ PrivateKey: packet.NewRSAPrivateKey(t, signingPriv, false /* not a subkey */ ),
+ Identities: make(map[string]*Identity),
+ }
+ isPrimaryId := true
+ e.Identities[uid.Id] = &Identity{
+ Name: uid.Name,
+ UserId: uid,
+ SelfSignature: &packet.Signature{
+ CreationTime: t,
+ SigType: packet.SigTypePositiveCert,
+ PubKeyAlgo: packet.PubKeyAlgoRSA,
+ Hash: crypto.SHA256,
+ IsPrimaryId: &isPrimaryId,
+ FlagsValid: true,
+ FlagSign: true,
+ FlagCertify: true,
+ IssuerKeyId: &e.PrimaryKey.KeyId,
+ },
+ }
+
+ e.Subkeys = make([]Subkey, 1)
+ e.Subkeys[0] = Subkey{
+ PublicKey: packet.NewRSAPublicKey(t, &encryptingPriv.PublicKey, true /* is a subkey */ ),
+ PrivateKey: packet.NewRSAPrivateKey(t, encryptingPriv, true /* is a subkey */ ),
+ Sig: &packet.Signature{
+ CreationTime: t,
+ SigType: packet.SigTypeSubkeyBinding,
+ PubKeyAlgo: packet.PubKeyAlgoRSA,
+ Hash: crypto.SHA256,
+ FlagsValid: true,
+ FlagEncryptStorage: true,
+ FlagEncryptCommunications: true,
+ IssuerKeyId: &e.PrimaryKey.KeyId,
+ },
+ }
+
+ return e, nil
+}
+
+// SerializePrivate serializes an Entity, including private key material, to
+// the given Writer. For now, it must only be used on an Entity returned from
+// NewEntity.
+func (e *Entity) SerializePrivate(w io.Writer) (err os.Error) {
+ err = e.PrivateKey.Serialize(w)
+ if err != nil {
+ return
+ }
+ for _, ident := range e.Identities {
+ err = ident.UserId.Serialize(w)
+ if err != nil {
+ return
+ }
+ err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey)
+ if err != nil {
+ return
+ }
+ err = ident.SelfSignature.Serialize(w)
+ if err != nil {
+ return
+ }
+ }
+ for _, subkey := range e.Subkeys {
+ err = subkey.PrivateKey.Serialize(w)
+ if err != nil {
+ return
+ }
+ err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey)
+ if err != nil {
+ return
+ }
+ err = subkey.Sig.Serialize(w)
+ if err != nil {
+ return
+ }
+ }
+ return nil
+}
diff --git a/src/pkg/crypto/openpgp/packet/packet.go b/src/pkg/crypto/openpgp/packet/packet.go
index c0ec44dd8..e583670fb 100644
--- a/src/pkg/crypto/openpgp/packet/packet.go
+++ b/src/pkg/crypto/openpgp/packet/packet.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package packet implements parsing and serialisation of OpenPGP packets, as
+// Package packet implements parsing and serialization of OpenPGP packets, as
// specified in RFC 4880.
package packet
@@ -301,12 +301,12 @@ type SignatureType uint8
const (
SigTypeBinary SignatureType = 0
- SigTypeText = 1
- SigTypeGenericCert = 0x10
- SigTypePersonaCert = 0x11
- SigTypeCasualCert = 0x12
- SigTypePositiveCert = 0x13
- SigTypeSubkeyBinding = 0x18
+ SigTypeText = 1
+ SigTypeGenericCert = 0x10
+ SigTypePersonaCert = 0x11
+ SigTypeCasualCert = 0x12
+ SigTypePositiveCert = 0x13
+ SigTypeSubkeyBinding = 0x18
)
// PublicKeyAlgorithm represents the different public key system specified for
@@ -386,6 +386,14 @@ func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err os.Error) {
return
}
+// mpiLength returns the length of the given *big.Int when serialized as an
+// MPI.
+func mpiLength(n *big.Int) (mpiLengthInBytes int) {
+ mpiLengthInBytes = 2 /* MPI length */
+ mpiLengthInBytes += (n.BitLen() + 7) / 8
+ return
+}
+
// writeMPI serializes a big integer to w.
func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err os.Error) {
_, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)})
diff --git a/src/pkg/crypto/openpgp/packet/private_key.go b/src/pkg/crypto/openpgp/packet/private_key.go
index fde2a9933..92e7ee422 100644
--- a/src/pkg/crypto/openpgp/packet/private_key.go
+++ b/src/pkg/crypto/openpgp/packet/private_key.go
@@ -32,6 +32,13 @@ type PrivateKey struct {
iv []byte
}
+func NewRSAPrivateKey(currentTimeSecs uint32, priv *rsa.PrivateKey, isSubkey bool) *PrivateKey {
+ pk := new(PrivateKey)
+ pk.PublicKey = *NewRSAPublicKey(currentTimeSecs, &priv.PublicKey, isSubkey)
+ pk.PrivateKey = priv
+ return pk
+}
+
func (pk *PrivateKey) parse(r io.Reader) (err os.Error) {
err = (&pk.PublicKey).parse(r)
if err != nil {
@@ -91,6 +98,83 @@ func (pk *PrivateKey) parse(r io.Reader) (err os.Error) {
return
}
+func mod64kHash(d []byte) uint16 {
+ h := uint16(0)
+ for i := 0; i < len(d); i += 2 {
+ v := uint16(d[i]) << 8
+ if i+1 < len(d) {
+ v += uint16(d[i+1])
+ }
+ h += v
+ }
+ return h
+}
+
+func (pk *PrivateKey) Serialize(w io.Writer) (err os.Error) {
+ // TODO(agl): support encrypted private keys
+ buf := bytes.NewBuffer(nil)
+ err = pk.PublicKey.serializeWithoutHeaders(buf)
+ if err != nil {
+ return
+ }
+ buf.WriteByte(0 /* no encryption */ )
+
+ privateKeyBuf := bytes.NewBuffer(nil)
+
+ switch priv := pk.PrivateKey.(type) {
+ case *rsa.PrivateKey:
+ err = serializeRSAPrivateKey(privateKeyBuf, priv)
+ default:
+ err = error.InvalidArgumentError("non-RSA private key")
+ }
+ if err != nil {
+ return
+ }
+
+ ptype := packetTypePrivateKey
+ contents := buf.Bytes()
+ privateKeyBytes := privateKeyBuf.Bytes()
+ if pk.IsSubkey {
+ ptype = packetTypePrivateSubkey
+ }
+ err = serializeHeader(w, ptype, len(contents)+len(privateKeyBytes)+2)
+ if err != nil {
+ return
+ }
+ _, err = w.Write(contents)
+ if err != nil {
+ return
+ }
+ _, err = w.Write(privateKeyBytes)
+ if err != nil {
+ return
+ }
+
+ checksum := mod64kHash(privateKeyBytes)
+ var checksumBytes [2]byte
+ checksumBytes[0] = byte(checksum >> 8)
+ checksumBytes[1] = byte(checksum)
+ _, err = w.Write(checksumBytes[:])
+
+ return
+}
+
+func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) os.Error {
+ err := writeBig(w, priv.D)
+ if err != nil {
+ return err
+ }
+ err = writeBig(w, priv.Primes[1])
+ if err != nil {
+ return err
+ }
+ err = writeBig(w, priv.Primes[0])
+ if err != nil {
+ return err
+ }
+ return writeBig(w, priv.Precomputed.Qinv)
+}
+
// Decrypt decrypts an encrypted private key using a passphrase.
func (pk *PrivateKey) Decrypt(passphrase []byte) os.Error {
if !pk.Encrypted {
diff --git a/src/pkg/crypto/openpgp/packet/public_key.go b/src/pkg/crypto/openpgp/packet/public_key.go
index cd4a9aebb..46d365b2a 100644
--- a/src/pkg/crypto/openpgp/packet/public_key.go
+++ b/src/pkg/crypto/openpgp/packet/public_key.go
@@ -30,6 +30,28 @@ type PublicKey struct {
n, e, p, q, g, y parsedMPI
}
+func fromBig(n *big.Int) parsedMPI {
+ return parsedMPI{
+ bytes: n.Bytes(),
+ bitLength: uint16(n.BitLen()),
+ }
+}
+
+// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey.
+func NewRSAPublicKey(creationTimeSecs uint32, pub *rsa.PublicKey, isSubkey bool) *PublicKey {
+ pk := &PublicKey{
+ CreationTime: creationTimeSecs,
+ PubKeyAlgo: PubKeyAlgoRSA,
+ PublicKey: pub,
+ IsSubkey: isSubkey,
+ n: fromBig(pub.N),
+ e: fromBig(big.NewInt(int64(pub.E))),
+ }
+
+ pk.setFingerPrintAndKeyId()
+ return pk
+}
+
func (pk *PublicKey) parse(r io.Reader) (err os.Error) {
// RFC 4880, section 5.5.2
var buf [6]byte
@@ -54,14 +76,17 @@ func (pk *PublicKey) parse(r io.Reader) (err os.Error) {
return
}
+ pk.setFingerPrintAndKeyId()
+ return
+}
+
+func (pk *PublicKey) setFingerPrintAndKeyId() {
// RFC 4880, section 12.2
fingerPrint := sha1.New()
pk.SerializeSignaturePrefix(fingerPrint)
- pk.Serialize(fingerPrint)
+ pk.serializeWithoutHeaders(fingerPrint)
copy(pk.Fingerprint[:], fingerPrint.Sum())
pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20])
-
- return
}
// parseRSA parses RSA public key material from the given Reader. See RFC 4880,
@@ -143,9 +168,30 @@ func (pk *PublicKey) SerializeSignaturePrefix(h hash.Hash) {
return
}
-// Serialize marshals the PublicKey to w in the form of an OpenPGP public key
-// packet, not including the packet header.
func (pk *PublicKey) Serialize(w io.Writer) (err os.Error) {
+ length := 6 // 6 byte header
+
+ switch pk.PubKeyAlgo {
+ case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
+ length += 2 + len(pk.n.bytes)
+ length += 2 + len(pk.e.bytes)
+ case PubKeyAlgoDSA:
+ length += 2 + len(pk.p.bytes)
+ length += 2 + len(pk.q.bytes)
+ length += 2 + len(pk.g.bytes)
+ length += 2 + len(pk.y.bytes)
+ }
+
+ err = serializeHeader(w, packetTypePublicKey, length)
+ if err != nil {
+ return
+ }
+ return pk.serializeWithoutHeaders(w)
+}
+
+// serializeWithoutHeaders marshals the PublicKey to w in the form of an
+// OpenPGP public key packet, not including the packet header.
+func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err os.Error) {
var buf [6]byte
buf[0] = 4
buf[1] = byte(pk.CreationTime >> 24)
@@ -211,34 +257,43 @@ func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err os.E
panic("unreachable")
}
-// VerifyKeySignature returns nil iff sig is a valid signature, make by this
-// public key, of the public key in signed.
-func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) (err os.Error) {
- h := sig.Hash.New()
+// keySignatureHash returns a Hash of the message that needs to be signed for
+// pk to assert a subkey relationship to signed.
+func keySignatureHash(pk, signed *PublicKey, sig *Signature) (h hash.Hash, err os.Error) {
+ h = sig.Hash.New()
if h == nil {
- return error.UnsupportedError("hash function")
+ return nil, error.UnsupportedError("hash function")
}
// RFC 4880, section 5.2.4
pk.SerializeSignaturePrefix(h)
- pk.Serialize(h)
+ pk.serializeWithoutHeaders(h)
signed.SerializeSignaturePrefix(h)
- signed.Serialize(h)
+ signed.serializeWithoutHeaders(h)
+ return
+}
+// VerifyKeySignature returns nil iff sig is a valid signature, made by this
+// public key, of signed.
+func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) (err os.Error) {
+ h, err := keySignatureHash(pk, signed, sig)
+ if err != nil {
+ return err
+ }
return pk.VerifySignature(h, sig)
}
-// VerifyUserIdSignature returns nil iff sig is a valid signature, make by this
-// public key, of the given user id.
-func (pk *PublicKey) VerifyUserIdSignature(id string, sig *Signature) (err os.Error) {
- h := sig.Hash.New()
+// userIdSignatureHash returns a Hash of the message that needs to be signed
+// to assert that pk is a valid key for id.
+func userIdSignatureHash(id string, pk *PublicKey, sig *Signature) (h hash.Hash, err os.Error) {
+ h = sig.Hash.New()
if h == nil {
- return error.UnsupportedError("hash function")
+ return nil, error.UnsupportedError("hash function")
}
// RFC 4880, section 5.2.4
pk.SerializeSignaturePrefix(h)
- pk.Serialize(h)
+ pk.serializeWithoutHeaders(h)
var buf [5]byte
buf[0] = 0xb4
@@ -249,6 +304,16 @@ func (pk *PublicKey) VerifyUserIdSignature(id string, sig *Signature) (err os.Er
h.Write(buf[:])
h.Write([]byte(id))
+ return
+}
+
+// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this
+// public key, of id.
+func (pk *PublicKey) VerifyUserIdSignature(id string, sig *Signature) (err os.Error) {
+ h, err := userIdSignatureHash(id, pk, sig)
+ if err != nil {
+ return err
+ }
return pk.VerifySignature(h, sig)
}
@@ -272,7 +337,7 @@ type parsedMPI struct {
bitLength uint16
}
-// writeMPIs is a utility function for serialising several big integers to the
+// writeMPIs is a utility function for serializing several big integers to the
// given Writer.
func writeMPIs(w io.Writer, mpis ...parsedMPI) (err os.Error) {
for _, mpi := range mpis {
diff --git a/src/pkg/crypto/openpgp/packet/public_key_test.go b/src/pkg/crypto/openpgp/packet/public_key_test.go
index 069388c14..6e8bfbce6 100644
--- a/src/pkg/crypto/openpgp/packet/public_key_test.go
+++ b/src/pkg/crypto/openpgp/packet/public_key_test.go
@@ -28,12 +28,12 @@ func TestPublicKeyRead(t *testing.T) {
packet, err := Read(readerFromHex(test.hexData))
if err != nil {
t.Errorf("#%d: Read error: %s", i, err)
- return
+ continue
}
pk, ok := packet.(*PublicKey)
if !ok {
t.Errorf("#%d: failed to parse, got: %#v", i, packet)
- return
+ continue
}
if pk.PubKeyAlgo != test.pubKeyAlgo {
t.Errorf("#%d: bad public key algorithm got:%x want:%x", i, pk.PubKeyAlgo, test.pubKeyAlgo)
@@ -57,6 +57,38 @@ func TestPublicKeyRead(t *testing.T) {
}
}
+func TestPublicKeySerialize(t *testing.T) {
+ for i, test := range pubKeyTests {
+ packet, err := Read(readerFromHex(test.hexData))
+ if err != nil {
+ t.Errorf("#%d: Read error: %s", i, err)
+ continue
+ }
+ pk, ok := packet.(*PublicKey)
+ if !ok {
+ t.Errorf("#%d: failed to parse, got: %#v", i, packet)
+ continue
+ }
+ serializeBuf := bytes.NewBuffer(nil)
+ err = pk.Serialize(serializeBuf)
+ if err != nil {
+ t.Errorf("#%d: failed to serialize: %s", i, err)
+ continue
+ }
+
+ packet, err = Read(serializeBuf)
+ if err != nil {
+ t.Errorf("#%d: Read error (from serialized data): %s", i, err)
+ continue
+ }
+ pk, ok = packet.(*PublicKey)
+ if !ok {
+ t.Errorf("#%d: failed to parse serialized data, got: %#v", i, packet)
+ continue
+ }
+ }
+}
+
const rsaFingerprintHex = "5fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb"
const rsaPkDataHex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001"
diff --git a/src/pkg/crypto/openpgp/packet/signature.go b/src/pkg/crypto/openpgp/packet/signature.go
index 719657e76..3169bac1e 100644
--- a/src/pkg/crypto/openpgp/packet/signature.go
+++ b/src/pkg/crypto/openpgp/packet/signature.go
@@ -393,7 +393,7 @@ func (sig *Signature) buildHashSuffix() (err os.Error) {
sig.HashSuffix[3], ok = s2k.HashToHashId(sig.Hash)
if !ok {
sig.HashSuffix = nil
- return error.InvalidArgumentError("hash cannot be repesented in OpenPGP: " + strconv.Itoa(int(sig.Hash)))
+ return error.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash)))
}
sig.HashSuffix[4] = byte(hashedSubpacketsLen >> 8)
sig.HashSuffix[5] = byte(hashedSubpacketsLen)
@@ -420,28 +420,46 @@ func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err os.Error)
return
}
-// SignRSA signs a message with an RSA private key. The hash, h, must contain
+// Sign signs a message with a private key. The hash, h, must contain
// the hash of the message to be signed and will be mutated by this function.
// On success, the signature is stored in sig. Call Serialize to write it out.
-func (sig *Signature) SignRSA(h hash.Hash, priv *rsa.PrivateKey) (err os.Error) {
+func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey) (err os.Error) {
digest, err := sig.signPrepareHash(h)
if err != nil {
return
}
- sig.RSASignature, err = rsa.SignPKCS1v15(rand.Reader, priv, sig.Hash, digest)
+
+ switch priv.PubKeyAlgo {
+ case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
+ sig.RSASignature, err = rsa.SignPKCS1v15(rand.Reader, priv.PrivateKey.(*rsa.PrivateKey), sig.Hash, digest)
+ case PubKeyAlgoDSA:
+ sig.DSASigR, sig.DSASigS, err = dsa.Sign(rand.Reader, priv.PrivateKey.(*dsa.PrivateKey), digest)
+ default:
+ err = error.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo)))
+ }
+
return
}
-// SignDSA signs a message with a DSA private key. The hash, h, must contain
-// the hash of the message to be signed and will be mutated by this function.
-// On success, the signature is stored in sig. Call Serialize to write it out.
-func (sig *Signature) SignDSA(h hash.Hash, priv *dsa.PrivateKey) (err os.Error) {
- digest, err := sig.signPrepareHash(h)
+// SignUserId computes a signature from priv, asserting that pub is a valid
+// key for the identity id. On success, the signature is stored in sig. Call
+// Serialize to write it out.
+func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey) os.Error {
+ h, err := userIdSignatureHash(id, pub, sig)
if err != nil {
- return
+ return nil
}
- sig.DSASigR, sig.DSASigS, err = dsa.Sign(rand.Reader, priv, digest)
- return
+ return sig.Sign(h, priv)
+}
+
+// SignKey computes a signature from priv, asserting that pub is a subkey. On
+// success, the signature is stored in sig. Call Serialize to write it out.
+func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey) os.Error {
+ h, err := keySignatureHash(&priv.PublicKey, pub, sig)
+ if err != nil {
+ return err
+ }
+ return sig.Sign(h, priv)
}
// Serialize marshals sig to w. SignRSA or SignDSA must have been called first.
@@ -455,10 +473,8 @@ func (sig *Signature) Serialize(w io.Writer) (err os.Error) {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
sigLength = len(sig.RSASignature)
case PubKeyAlgoDSA:
- sigLength = 2 /* MPI length */
- sigLength += (sig.DSASigR.BitLen() + 7) / 8
- sigLength += 2 /* MPI length */
- sigLength += (sig.DSASigS.BitLen() + 7) / 8
+ sigLength = mpiLength(sig.DSASigR)
+ sigLength += mpiLength(sig.DSASigS)
default:
panic("impossible")
}
diff --git a/src/pkg/crypto/openpgp/packet/userid.go b/src/pkg/crypto/openpgp/packet/userid.go
index ed2ad7774..0580ba3ed 100644
--- a/src/pkg/crypto/openpgp/packet/userid.go
+++ b/src/pkg/crypto/openpgp/packet/userid.go
@@ -20,6 +20,51 @@ type UserId struct {
Name, Comment, Email string
}
+func hasInvalidCharacters(s string) bool {
+ for _, c := range s {
+ switch c {
+ case '(', ')', '<', '>', 0:
+ return true
+ }
+ }
+ return false
+}
+
+// NewUserId returns a UserId or nil if any of the arguments contain invalid
+// characters. The invalid characters are '\x00', '(', ')', '<' and '>'
+func NewUserId(name, comment, email string) *UserId {
+ // RFC 4880 doesn't deal with the structure of userid strings; the
+ // name, comment and email form is just a convention. However, there's
+ // no convention about escaping the metacharacters and GPG just refuses
+ // to create user ids where, say, the name contains a '('. We mirror
+ // this behaviour.
+
+ if hasInvalidCharacters(name) || hasInvalidCharacters(comment) || hasInvalidCharacters(email) {
+ return nil
+ }
+
+ uid := new(UserId)
+ uid.Name, uid.Comment, uid.Email = name, comment, email
+ uid.Id = name
+ if len(comment) > 0 {
+ if len(uid.Id) > 0 {
+ uid.Id += " "
+ }
+ uid.Id += "("
+ uid.Id += comment
+ uid.Id += ")"
+ }
+ if len(email) > 0 {
+ if len(uid.Id) > 0 {
+ uid.Id += " "
+ }
+ uid.Id += "<"
+ uid.Id += email
+ uid.Id += ">"
+ }
+ return uid
+}
+
func (uid *UserId) parse(r io.Reader) (err os.Error) {
// RFC 4880, section 5.11
b, err := ioutil.ReadAll(r)
@@ -31,6 +76,17 @@ func (uid *UserId) parse(r io.Reader) (err os.Error) {
return
}
+// Serialize marshals uid to w in the form of an OpenPGP packet, including
+// header.
+func (uid *UserId) Serialize(w io.Writer) os.Error {
+ err := serializeHeader(w, packetTypeUserId, len(uid.Id))
+ if err != nil {
+ return err
+ }
+ _, err = w.Write([]byte(uid.Id))
+ return err
+}
+
// parseUserId extracts the name, comment and email from a user id string that
// is formatted as "Full Name (Comment) <email@example.com>".
func parseUserId(id string) (name, comment, email string) {
diff --git a/src/pkg/crypto/openpgp/packet/userid_test.go b/src/pkg/crypto/openpgp/packet/userid_test.go
index 394873dc3..296819389 100644
--- a/src/pkg/crypto/openpgp/packet/userid_test.go
+++ b/src/pkg/crypto/openpgp/packet/userid_test.go
@@ -40,3 +40,48 @@ func TestParseUserId(t *testing.T) {
}
}
}
+
+var newUserIdTests = []struct {
+ name, comment, email, id string
+}{
+ {"foo", "", "", "foo"},
+ {"", "bar", "", "(bar)"},
+ {"", "", "baz", "<baz>"},
+ {"foo", "bar", "", "foo (bar)"},
+ {"foo", "", "baz", "foo <baz>"},
+ {"", "bar", "baz", "(bar) <baz>"},
+ {"foo", "bar", "baz", "foo (bar) <baz>"},
+}
+
+func TestNewUserId(t *testing.T) {
+ for i, test := range newUserIdTests {
+ uid := NewUserId(test.name, test.comment, test.email)
+ if uid == nil {
+ t.Errorf("#%d: returned nil", i)
+ continue
+ }
+ if uid.Id != test.id {
+ t.Errorf("#%d: got '%s', want '%s'", i, uid.Id, test.id)
+ }
+ }
+}
+
+var invalidNewUserIdTests = []struct {
+ name, comment, email string
+}{
+ {"foo(", "", ""},
+ {"foo<", "", ""},
+ {"", "bar)", ""},
+ {"", "bar<", ""},
+ {"", "", "baz>"},
+ {"", "", "baz)"},
+ {"", "", "baz\x00"},
+}
+
+func TestNewUserIdWithInvalidInput(t *testing.T) {
+ for i, test := range invalidNewUserIdTests {
+ if uid := NewUserId(test.name, test.comment, test.email); uid != nil {
+ t.Errorf("#%d: returned non-nil value: %#v", i, uid)
+ }
+ }
+}
diff --git a/src/pkg/crypto/openpgp/read.go b/src/pkg/crypto/openpgp/read.go
index 4f84dff82..46fcde363 100644
--- a/src/pkg/crypto/openpgp/read.go
+++ b/src/pkg/crypto/openpgp/read.go
@@ -44,7 +44,7 @@ type MessageDetails struct {
DecryptedWith Key // the private key used to decrypt the message, if any.
IsSigned bool // true if the message is signed.
SignedByKeyId uint64 // the key id of the signer, if any.
- SignedBy *Key // the key of the signer, if availible.
+ SignedBy *Key // the key of the signer, if available.
LiteralData *packet.LiteralData // the metadata of the contents
UnverifiedBody io.Reader // the contents of the message.
@@ -145,7 +145,7 @@ ParsePackets:
// function so that it can decrypt a key or give us a passphrase.
FindKey:
for {
- // See if any of the keys already have a private key availible
+ // See if any of the keys already have a private key available
candidates = candidates[:0]
candidateFingerprints := make(map[string]bool)
@@ -214,7 +214,7 @@ FindKey:
return readSignedMessage(packets, md, keyring)
}
-// readSignedMessage reads a possibily signed message if mdin is non-zero then
+// readSignedMessage reads a possibly signed message if mdin is non-zero then
// that structure is updated and returned. Otherwise a fresh MessageDetails is
// used.
func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing) (md *MessageDetails, err os.Error) {
@@ -274,13 +274,13 @@ FindLiteralData:
// hashForSignature returns a pair of hashes that can be used to verify a
// signature. The signature may specify that the contents of the signed message
-// should be preprocessed (i.e. to normalise line endings). Thus this function
+// should be preprocessed (i.e. to normalize line endings). Thus this function
// returns two hashes. The second should be used to hash the message itself and
// performs any needed preprocessing.
func hashForSignature(hashId crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, os.Error) {
h := hashId.New()
if h == nil {
- return nil, nil, error.UnsupportedError("hash not availible: " + strconv.Itoa(int(hashId)))
+ return nil, nil, error.UnsupportedError("hash not available: " + strconv.Itoa(int(hashId)))
}
switch sigType {
diff --git a/src/pkg/crypto/openpgp/read_test.go b/src/pkg/crypto/openpgp/read_test.go
index 423c85b0f..a040b5b1b 100644
--- a/src/pkg/crypto/openpgp/read_test.go
+++ b/src/pkg/crypto/openpgp/read_test.go
@@ -193,9 +193,9 @@ func TestSymmetricallyEncrypted(t *testing.T) {
t.Errorf("ReadAll: %s", err)
}
- expectedCreatationTime := uint32(1295992998)
- if md.LiteralData.Time != expectedCreatationTime {
- t.Errorf("LiteralData.Time is %d, want %d", md.LiteralData.Time, expectedCreatationTime)
+ expectedCreationTime := uint32(1295992998)
+ if md.LiteralData.Time != expectedCreationTime {
+ t.Errorf("LiteralData.Time is %d, want %d", md.LiteralData.Time, expectedCreationTime)
}
if string(contents) != expected {
diff --git a/src/pkg/crypto/openpgp/s2k/s2k.go b/src/pkg/crypto/openpgp/s2k/s2k.go
index 93b7582fa..80b81bd3a 100644
--- a/src/pkg/crypto/openpgp/s2k/s2k.go
+++ b/src/pkg/crypto/openpgp/s2k/s2k.go
@@ -90,7 +90,7 @@ func Parse(r io.Reader) (f func(out, in []byte), err os.Error) {
}
h := hash.New()
if h == nil {
- return nil, error.UnsupportedError("hash not availible: " + strconv.Itoa(int(hash)))
+ return nil, error.UnsupportedError("hash not available: " + strconv.Itoa(int(hash)))
}
switch buf[0] {
diff --git a/src/pkg/crypto/openpgp/write.go b/src/pkg/crypto/openpgp/write.go
index ef7b11230..a1ede564e 100644
--- a/src/pkg/crypto/openpgp/write.go
+++ b/src/pkg/crypto/openpgp/write.go
@@ -6,15 +6,12 @@ package openpgp
import (
"crypto"
- "crypto/dsa"
"crypto/openpgp/armor"
"crypto/openpgp/error"
"crypto/openpgp/packet"
- "crypto/rsa"
_ "crypto/sha256"
"io"
"os"
- "strconv"
"time"
)
@@ -77,17 +74,7 @@ func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.S
}
io.Copy(wrappedHash, message)
- switch signer.PrivateKey.PubKeyAlgo {
- case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSASignOnly:
- priv := signer.PrivateKey.PrivateKey.(*rsa.PrivateKey)
- err = sig.SignRSA(h, priv)
- case packet.PubKeyAlgoDSA:
- priv := signer.PrivateKey.PrivateKey.(*dsa.PrivateKey)
- err = sig.SignDSA(h, priv)
- default:
- err = error.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo)))
- }
-
+ err = sig.Sign(h, signer.PrivateKey)
if err != nil {
return
}
diff --git a/src/pkg/crypto/openpgp/write_test.go b/src/pkg/crypto/openpgp/write_test.go
index 42cd0d27f..a74a84b2b 100644
--- a/src/pkg/crypto/openpgp/write_test.go
+++ b/src/pkg/crypto/openpgp/write_test.go
@@ -6,7 +6,9 @@ package openpgp
import (
"bytes"
+ "crypto/rand"
"testing"
+ "time"
)
func TestSignDetached(t *testing.T) {
@@ -44,3 +46,42 @@ func TestSignDetachedDSA(t *testing.T) {
testDetachedSignature(t, kring, out, signedInput, "check", testKey3KeyId)
}
+
+func TestNewEntity(t *testing.T) {
+ if testing.Short() {
+ return
+ }
+
+ e, err := NewEntity(rand.Reader, time.Seconds(), "Test User", "test", "test@example.com")
+ if err != nil {
+ t.Errorf("failed to create entity: %s", err)
+ return
+ }
+
+ w := bytes.NewBuffer(nil)
+ if err := e.SerializePrivate(w); err != nil {
+ t.Errorf("failed to serialize entity: %s", err)
+ return
+ }
+ serialized := w.Bytes()
+
+ el, err := ReadKeyRing(w)
+ if err != nil {
+ t.Errorf("failed to reparse entity: %s", err)
+ return
+ }
+
+ if len(el) != 1 {
+ t.Errorf("wrong number of entities found, got %d, want 1", len(el))
+ }
+
+ w = bytes.NewBuffer(nil)
+ if err := e.SerializePrivate(w); err != nil {
+ t.Errorf("failed to serialize entity second time: %s", err)
+ return
+ }
+
+ if !bytes.Equal(w.Bytes(), serialized) {
+ t.Errorf("results differed")
+ }
+}
diff --git a/src/pkg/crypto/rand/Makefile b/src/pkg/crypto/rand/Makefile
index 88b6d71e3..d1321297d 100644
--- a/src/pkg/crypto/rand/Makefile
+++ b/src/pkg/crypto/rand/Makefile
@@ -8,6 +8,7 @@ TARG=crypto/rand
GOFILES=\
rand.go\
+ util.go\
GOFILES_freebsd=\
rand_unix.go\
diff --git a/src/pkg/crypto/rand/util.go b/src/pkg/crypto/rand/util.go
new file mode 100644
index 000000000..77028476e
--- /dev/null
+++ b/src/pkg/crypto/rand/util.go
@@ -0,0 +1,80 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rand
+
+import (
+ "big"
+ "io"
+ "os"
+)
+
+// Prime returns a number, p, of the given size, such that p is prime
+// with high probability.
+func Prime(rand io.Reader, bits int) (p *big.Int, err os.Error) {
+ if bits < 1 {
+ err = os.EINVAL
+ }
+
+ b := uint(bits % 8)
+ if b == 0 {
+ b = 8
+ }
+
+ bytes := make([]byte, (bits+7)/8)
+ p = new(big.Int)
+
+ for {
+ _, err = io.ReadFull(rand, bytes)
+ if err != nil {
+ return nil, err
+ }
+
+ // Clear bits in the first byte to make sure the candidate has a size <= bits.
+ bytes[0] &= uint8(int(1<<b) - 1)
+ // Don't let the value be too small, i.e, set the most significant bit.
+ bytes[0] |= 1 << (b - 1)
+ // Make the value odd since an even number this large certainly isn't prime.
+ bytes[len(bytes)-1] |= 1
+
+ p.SetBytes(bytes)
+ if big.ProbablyPrime(p, 20) {
+ return
+ }
+ }
+
+ return
+}
+
+// Int returns a uniform random value in [0, max).
+func Int(rand io.Reader, max *big.Int) (n *big.Int, err os.Error) {
+ k := (max.BitLen() + 7) / 8
+
+ // b is the number of bits in the most significant byte of max.
+ b := uint(max.BitLen() % 8)
+ if b == 0 {
+ b = 8
+ }
+
+ bytes := make([]byte, k)
+ n = new(big.Int)
+
+ for {
+ _, err = io.ReadFull(rand, bytes)
+ if err != nil {
+ return nil, err
+ }
+
+ // Clear bits in the first byte to increase the probability
+ // that the candidate is < max.
+ bytes[0] &= uint8(int(1<<b) - 1)
+
+ n.SetBytes(bytes)
+ if n.Cmp(max) < 0 {
+ return
+ }
+ }
+
+ return
+}
diff --git a/src/pkg/crypto/rsa/rsa.go b/src/pkg/crypto/rsa/rsa.go
index e1813dbf9..6bfe6c4e5 100644
--- a/src/pkg/crypto/rsa/rsa.go
+++ b/src/pkg/crypto/rsa/rsa.go
@@ -9,6 +9,7 @@ package rsa
import (
"big"
+ "crypto/rand"
"crypto/subtle"
"hash"
"io"
@@ -18,69 +19,6 @@ import (
var bigZero = big.NewInt(0)
var bigOne = big.NewInt(1)
-// randomPrime returns a number, p, of the given size, such that p is prime
-// with high probability.
-func randomPrime(rand io.Reader, bits int) (p *big.Int, err os.Error) {
- if bits < 1 {
- err = os.EINVAL
- }
-
- bytes := make([]byte, (bits+7)/8)
- p = new(big.Int)
-
- for {
- _, err = io.ReadFull(rand, bytes)
- if err != nil {
- return
- }
-
- // Don't let the value be too small.
- bytes[0] |= 0x80
- // Make the value odd since an even number this large certainly isn't prime.
- bytes[len(bytes)-1] |= 1
-
- p.SetBytes(bytes)
- if big.ProbablyPrime(p, 20) {
- return
- }
- }
-
- return
-}
-
-// randomNumber returns a uniform random value in [0, max).
-func randomNumber(rand io.Reader, max *big.Int) (n *big.Int, err os.Error) {
- k := (max.BitLen() + 7) / 8
-
- // r is the number of bits in the used in the most significant byte of
- // max.
- r := uint(max.BitLen() % 8)
- if r == 0 {
- r = 8
- }
-
- bytes := make([]byte, k)
- n = new(big.Int)
-
- for {
- _, err = io.ReadFull(rand, bytes)
- if err != nil {
- return
- }
-
- // Clear bits in the first byte to increase the probability
- // that the candidate is < max.
- bytes[0] &= uint8(int(1<<r) - 1)
-
- n.SetBytes(bytes)
- if n.Cmp(max) < 0 {
- return
- }
- }
-
- return
-}
-
// A PublicKey represents the public part of an RSA key.
type PublicKey struct {
N *big.Int // modulus
@@ -94,7 +32,7 @@ type PrivateKey struct {
Primes []*big.Int // prime factors of N, has >= 2 elements.
// Precomputed contains precomputed values that speed up private
- // operations, if availible.
+ // operations, if available.
Precomputed PrecomputedValues
}
@@ -162,8 +100,8 @@ func (priv *PrivateKey) Validate() os.Error {
}
// GenerateKey generates an RSA keypair of the given bit size.
-func GenerateKey(rand io.Reader, bits int) (priv *PrivateKey, err os.Error) {
- return GenerateMultiPrimeKey(rand, 2, bits)
+func GenerateKey(random io.Reader, bits int) (priv *PrivateKey, err os.Error) {
+ return GenerateMultiPrimeKey(random, 2, bits)
}
// GenerateMultiPrimeKey generates a multi-prime RSA keypair of the given bit
@@ -176,7 +114,7 @@ func GenerateKey(rand io.Reader, bits int) (priv *PrivateKey, err os.Error) {
//
// [1] US patent 4405829 (1972, expired)
// [2] http://www.cacr.math.uwaterloo.ca/techreports/2006/cacr2006-16.pdf
-func GenerateMultiPrimeKey(rand io.Reader, nprimes int, bits int) (priv *PrivateKey, err os.Error) {
+func GenerateMultiPrimeKey(random io.Reader, nprimes int, bits int) (priv *PrivateKey, err os.Error) {
priv = new(PrivateKey)
// Smaller public exponents lead to faster public key
// operations. Since the exponent must be coprime to
@@ -198,7 +136,7 @@ NextSetOfPrimes:
for {
todo := bits
for i := 0; i < nprimes; i++ {
- primes[i], err = randomPrime(rand, todo/(nprimes-i))
+ primes[i], err = rand.Prime(random, todo/(nprimes-i))
if err != nil {
return nil, err
}
@@ -293,7 +231,7 @@ func encrypt(c *big.Int, pub *PublicKey, m *big.Int) *big.Int {
// EncryptOAEP encrypts the given message with RSA-OAEP.
// The message must be no longer than the length of the public modulus less
// twice the hash length plus 2.
-func EncryptOAEP(hash hash.Hash, rand io.Reader, pub *PublicKey, msg []byte, label []byte) (out []byte, err os.Error) {
+func EncryptOAEP(hash hash.Hash, random io.Reader, pub *PublicKey, msg []byte, label []byte) (out []byte, err os.Error) {
hash.Reset()
k := (pub.N.BitLen() + 7) / 8
if len(msg) > k-2*hash.Size()-2 {
@@ -313,7 +251,7 @@ func EncryptOAEP(hash hash.Hash, rand io.Reader, pub *PublicKey, msg []byte, lab
db[len(db)-len(msg)-1] = 1
copy(db[len(db)-len(msg):], msg)
- _, err = io.ReadFull(rand, seed)
+ _, err = io.ReadFull(random, seed)
if err != nil {
return
}
@@ -405,7 +343,7 @@ func (priv *PrivateKey) Precompute() {
// decrypt performs an RSA decryption, resulting in a plaintext integer. If a
// random source is given, RSA blinding is used.
-func decrypt(rand io.Reader, priv *PrivateKey, c *big.Int) (m *big.Int, err os.Error) {
+func decrypt(random io.Reader, priv *PrivateKey, c *big.Int) (m *big.Int, err os.Error) {
// TODO(agl): can we get away with reusing blinds?
if c.Cmp(priv.N) > 0 {
err = DecryptionError{}
@@ -413,16 +351,16 @@ func decrypt(rand io.Reader, priv *PrivateKey, c *big.Int) (m *big.Int, err os.E
}
var ir *big.Int
- if rand != nil {
+ if random != nil {
// Blinding enabled. Blinding involves multiplying c by r^e.
// Then the decryption operation performs (m^e * r^e)^d mod n
// which equals mr mod n. The factor of r can then be removed
- // by multipling by the multiplicative inverse of r.
+ // by multiplying by the multiplicative inverse of r.
var r *big.Int
for {
- r, err = randomNumber(rand, priv.N)
+ r, err = rand.Int(random, priv.N)
if err != nil {
return
}
@@ -483,7 +421,7 @@ func decrypt(rand io.Reader, priv *PrivateKey, c *big.Int) (m *big.Int, err os.E
// DecryptOAEP decrypts ciphertext using RSA-OAEP.
// If rand != nil, DecryptOAEP uses RSA blinding to avoid timing side-channel attacks.
-func DecryptOAEP(hash hash.Hash, rand io.Reader, priv *PrivateKey, ciphertext []byte, label []byte) (msg []byte, err os.Error) {
+func DecryptOAEP(hash hash.Hash, random io.Reader, priv *PrivateKey, ciphertext []byte, label []byte) (msg []byte, err os.Error) {
k := (priv.N.BitLen() + 7) / 8
if len(ciphertext) > k ||
k < hash.Size()*2+2 {
@@ -493,7 +431,7 @@ func DecryptOAEP(hash hash.Hash, rand io.Reader, priv *PrivateKey, ciphertext []
c := new(big.Int).SetBytes(ciphertext)
- m, err := decrypt(rand, priv, c)
+ m, err := decrypt(random, priv, c)
if err != nil {
return
}
diff --git a/src/pkg/crypto/subtle/constant_time_test.go b/src/pkg/crypto/subtle/constant_time_test.go
index b28b73581..adab8e2e8 100644
--- a/src/pkg/crypto/subtle/constant_time_test.go
+++ b/src/pkg/crypto/subtle/constant_time_test.go
@@ -14,14 +14,14 @@ type TestConstantTimeCompareStruct struct {
out int
}
-var testConstandTimeCompareData = []TestConstantTimeCompareStruct{
+var testConstantTimeCompareData = []TestConstantTimeCompareStruct{
{[]byte{}, []byte{}, 1},
{[]byte{0x11}, []byte{0x11}, 1},
{[]byte{0x12}, []byte{0x11}, 0},
}
func TestConstantTimeCompare(t *testing.T) {
- for i, test := range testConstandTimeCompareData {
+ for i, test := range testConstantTimeCompareData {
if r := ConstantTimeCompare(test.a, test.b); r != test.out {
t.Errorf("#%d bad result (got %x, want %x)", i, r, test.out)
}
diff --git a/src/pkg/crypto/tls/common.go b/src/pkg/crypto/tls/common.go
index 204d25531..3efac9c13 100644
--- a/src/pkg/crypto/tls/common.go
+++ b/src/pkg/crypto/tls/common.go
@@ -87,7 +87,7 @@ const (
certTypeRSASign = 1 // A certificate containing an RSA key
certTypeDSSSign = 2 // A certificate containing a DSA key
certTypeRSAFixedDH = 3 // A certificate containing a static DH key
- certTypeDSSFixedDH = 4 // A certficiate containing a static DH key
+ certTypeDSSFixedDH = 4 // A certificate containing a static DH key
// Rest of these are reserved by the TLS spec
)
@@ -100,6 +100,8 @@ type ConnectionState struct {
// the certificate chain that was presented by the other side
PeerCertificates []*x509.Certificate
+ // the verified certificate chains built from PeerCertificates.
+ VerifiedChains [][]*x509.Certificate
}
// A Config structure is used to configure a TLS client or server. After one
diff --git a/src/pkg/crypto/tls/conn.go b/src/pkg/crypto/tls/conn.go
index 63d56310c..097e182bd 100644
--- a/src/pkg/crypto/tls/conn.go
+++ b/src/pkg/crypto/tls/conn.go
@@ -34,7 +34,7 @@ type Conn struct {
cipherSuite uint16
ocspResponse []byte // stapled OCSP response
peerCertificates []*x509.Certificate
- // verifedChains contains the certificate chains that we built, as
+ // verifiedChains contains the certificate chains that we built, as
// opposed to the ones presented by the server.
verifiedChains [][]*x509.Certificate
@@ -237,7 +237,7 @@ func (hc *halfConn) decrypt(b *block) (bool, alert) {
// "Password Interception in a SSL/TLS Channel", Brice
// Canvel et al.
//
- // However, our behaviour matches OpenSSL, so we leak
+ // However, our behavior matches OpenSSL, so we leak
// only as much as they do.
default:
panic("unknown cipher type")
@@ -410,7 +410,7 @@ func (hc *halfConn) freeBlock(b *block) {
// splitBlock splits a block after the first n bytes,
// returning a block with those n bytes and a
-// block with the remaindec. the latter may be nil.
+// block with the remainder. the latter may be nil.
func (hc *halfConn) splitBlock(b *block, n int) (*block, *block) {
if len(b.data) <= n {
return b, nil
@@ -768,6 +768,7 @@ func (c *Conn) ConnectionState() ConnectionState {
state.NegotiatedProtocolIsMutual = !c.clientProtocolFallback
state.CipherSuite = c.cipherSuite
state.PeerCertificates = c.peerCertificates
+ state.VerifiedChains = c.verifiedChains
}
return state
diff --git a/src/pkg/crypto/tls/handshake_server.go b/src/pkg/crypto/tls/handshake_server.go
index 37c8d154a..e9431c6fa 100644
--- a/src/pkg/crypto/tls/handshake_server.go
+++ b/src/pkg/crypto/tls/handshake_server.go
@@ -209,10 +209,10 @@ FindCipherSuite:
// If we received a client cert in response to our certificate request message,
// the client will send us a certificateVerifyMsg immediately after the
- // clientKeyExchangeMsg. This message is a MD5SHA1 digest of all preceeding
+ // clientKeyExchangeMsg. This message is a MD5SHA1 digest of all preceding
// handshake-layer messages that is signed using the private key corresponding
// to the client's certificate. This allows us to verify that the client is in
- // posession of the private key of the certificate.
+ // possession of the private key of the certificate.
if len(c.peerCertificates) > 0 {
msg, err = c.readHandshake()
if err != nil {
diff --git a/src/pkg/crypto/tls/key_agreement.go b/src/pkg/crypto/tls/key_agreement.go
index 8edbb1190..84f90c45a 100644
--- a/src/pkg/crypto/tls/key_agreement.go
+++ b/src/pkg/crypto/tls/key_agreement.go
@@ -236,12 +236,12 @@ func (ka *ecdheRSAKeyAgreement) generateClientKeyExchange(config *Config, client
xBytes := x.Bytes()
copy(preMasterSecret[len(preMasterSecret)-len(xBytes):], xBytes)
- serialised := ka.curve.Marshal(mx, my)
+ serialized := ka.curve.Marshal(mx, my)
ckx := new(clientKeyExchangeMsg)
- ckx.ciphertext = make([]byte, 1+len(serialised))
- ckx.ciphertext[0] = byte(len(serialised))
- copy(ckx.ciphertext[1:], serialised)
+ ckx.ciphertext = make([]byte, 1+len(serialized))
+ ckx.ciphertext[0] = byte(len(serialized))
+ copy(ckx.ciphertext[1:], serialized)
return preMasterSecret, ckx, nil
}
diff --git a/src/pkg/crypto/x509/crl/Makefile b/src/pkg/crypto/x509/crl/Makefile
new file mode 100644
index 000000000..d780af38e
--- /dev/null
+++ b/src/pkg/crypto/x509/crl/Makefile
@@ -0,0 +1,11 @@
+# Copyright 2011 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+include ../../../../Make.inc
+
+TARG=crypto/x509/crl
+GOFILES=\
+ crl.go\
+
+include ../../../../Make.pkg
diff --git a/src/pkg/crypto/x509/crl/crl.go b/src/pkg/crypto/x509/crl/crl.go
new file mode 100644
index 000000000..c79c797c7
--- /dev/null
+++ b/src/pkg/crypto/x509/crl/crl.go
@@ -0,0 +1,96 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package crl exposes low-level details of PKIX Certificate Revocation Lists
+// as specified in RFC 5280, section 5.
+package crl
+
+import (
+ "asn1"
+ "bytes"
+ "encoding/pem"
+ "os"
+ "time"
+)
+
+// CertificateList represents the ASN.1 structure of the same name. See RFC
+// 5280, section 5.1. Use crypto/x509/Certificate.CheckCRLSignature to verify
+// the signature.
+type CertificateList struct {
+ TBSCertList TBSCertificateList
+ SignatureAlgorithm AlgorithmIdentifier
+ SignatureValue asn1.BitString
+}
+
+// HasExpired returns true iff currentTimeSeconds is past the expiry time of
+// certList.
+func (certList *CertificateList) HasExpired(currentTimeSeconds int64) bool {
+ return certList.TBSCertList.NextUpdate.Seconds() <= currentTimeSeconds
+}
+
+// TBSCertificateList represents the ASN.1 structure of the same name. See RFC
+// 5280, section 5.1.
+type TBSCertificateList struct {
+ Raw asn1.RawContent
+ Version int "optional,default:2"
+ Signature AlgorithmIdentifier
+ Issuer asn1.RawValue
+ ThisUpdate *time.Time
+ NextUpdate *time.Time
+ RevokedCertificates []RevokedCertificate "optional"
+ Extensions []Extension "tag:0,optional,explicit"
+}
+
+// AlgorithmIdentifier represents the ASN.1 structure of the same name. See RFC
+// 5280, section 4.1.1.2.
+type AlgorithmIdentifier struct {
+ Algo asn1.ObjectIdentifier
+ Params asn1.RawValue "optional"
+}
+
+// AlgorithmIdentifier represents the ASN.1 structure of the same name. See RFC
+// 5280, section 5.1.
+type RevokedCertificate struct {
+ SerialNumber asn1.RawValue
+ RevocationTime *time.Time
+ Extensions []Extension "optional"
+}
+
+// AlgorithmIdentifier represents the ASN.1 structure of the same name. See RFC
+// 5280, section 4.2.
+type Extension struct {
+ Id asn1.ObjectIdentifier
+ IsCritial bool "optional"
+ Value []byte
+}
+
+// pemCRLPrefix is the magic string that indicates that we have a PEM encoded
+// CRL.
+var pemCRLPrefix = []byte("-----BEGIN X509 CRL")
+// pemType is the type of a PEM encoded CRL.
+var pemType = "X509 CRL"
+
+// Parse parses a CRL from the given bytes. It's often the case that PEM
+// encoded CRLs will appear where they should be DER encoded, so this function
+// will transparently handle PEM encoding as long as there isn't any leading
+// garbage.
+func Parse(crlBytes []byte) (certList *CertificateList, err os.Error) {
+ if bytes.HasPrefix(crlBytes, pemCRLPrefix) {
+ block, _ := pem.Decode(crlBytes)
+ if block != nil && block.Type == pemType {
+ crlBytes = block.Bytes
+ }
+ }
+ return ParseDER(crlBytes)
+}
+
+// ParseDER parses a DER encoded CRL from the given bytes.
+func ParseDER(derBytes []byte) (certList *CertificateList, err os.Error) {
+ certList = new(CertificateList)
+ _, err = asn1.Unmarshal(derBytes, certList)
+ if err != nil {
+ certList = nil
+ }
+ return
+}
diff --git a/src/pkg/crypto/x509/crl/crl_test.go b/src/pkg/crypto/x509/crl/crl_test.go
new file mode 100644
index 000000000..62d8dc195
--- /dev/null
+++ b/src/pkg/crypto/x509/crl/crl_test.go
@@ -0,0 +1,63 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package crl
+
+import (
+ "encoding/base64"
+ "testing"
+)
+
+func fromBase64(in string) []byte {
+ out := make([]byte, base64.StdEncoding.DecodedLen(len(in)))
+ _, err := base64.StdEncoding.Decode(out, []byte(in))
+ if err != nil {
+ panic("failed to base64 decode")
+ }
+ return out
+}
+
+func TestParseDER(t *testing.T) {
+ derBytes := fromBase64(derCRLBase64)
+ certList, err := ParseDER(derBytes)
+ if err != nil {
+ t.Errorf("error parsing: %s", err)
+ return
+ }
+ numCerts := len(certList.TBSCertList.RevokedCertificates)
+ expected := 88
+ if numCerts != expected {
+ t.Errorf("bad number of revoked certificates. got: %d want: %d", numCerts, expected)
+ }
+
+ if certList.HasExpired(1302517272) {
+ t.Errorf("CRL has expired (but shouldn't have)")
+ }
+
+ // Can't check the signature here without a package cycle.
+}
+
+func TestParsePEM(t *testing.T) {
+ pemBytes := fromBase64(pemCRLBase64)
+ certList, err := Parse(pemBytes)
+ if err != nil {
+ t.Errorf("error parsing: %s", err)
+ return
+ }
+ numCerts := len(certList.TBSCertList.RevokedCertificates)
+ expected := 2
+ if numCerts != expected {
+ t.Errorf("bad number of revoked certificates. got: %d want: %d", numCerts, expected)
+ }
+
+ if certList.HasExpired(1302517272) {
+ t.Errorf("CRL has expired (but shouldn't have)")
+ }
+
+ // Can't check the signature here without a package cycle.
+}
+
+const derCRLBase64 = "MIINqzCCDJMCAQEwDQYJKoZIhvcNAQEFBQAwVjEZMBcGA1UEAxMQUEtJIEZJTk1FQ0NBTklDQTEVMBMGA1UEChMMRklOTUVDQ0FOSUNBMRUwEwYDVQQLEwxGSU5NRUNDQU5JQ0ExCzAJBgNVBAYTAklUFw0xMTA1MDQxNjU3NDJaFw0xMTA1MDQyMDU3NDJaMIIMBzAhAg4Ze1od49Lt1qIXBydAzhcNMDkwNzE2MDg0MzIyWjAAMCECDl0HSL9bcZ1Ci/UHJ0DPFw0wOTA3MTYwODQzMTNaMAAwIQIOESB9tVAmX3cY7QcnQNAXDTA5MDcxNjA4NDUyMlowADAhAg4S1tGAQ3mHt8uVBydA1RcNMDkwODA0MTUyNTIyWjAAMCECDlQ249Y7vtC25ScHJ0DWFw0wOTA4MDQxNTI1MzdaMAAwIQIOISMop3NkA4PfYwcnQNkXDTA5MDgwNDExMDAzNFowADAhAg56/BMoS29KEShTBydA2hcNMDkwODA0MTEwMTAzWjAAMCECDnBp/22HPH5CSWoHJ0DbFw0wOTA4MDQxMDU0NDlaMAAwIQIOV9IP+8CD8bK+XAcnQNwXDTA5MDgwNDEwNTcxN1owADAhAg4v5aRz0IxWqYiXBydA3RcNMDkwODA0MTA1NzQ1WjAAMCECDlOU34VzvZAybQwHJ0DeFw0wOTA4MDQxMDU4MjFaMAAwIAINO4CD9lluIxcwBydBAxcNMDkwNzIyMTUzMTU5WjAAMCECDgOllfO8Y1QA7/wHJ0ExFw0wOTA3MjQxMTQxNDNaMAAwIQIOJBX7jbiCdRdyjgcnQUQXDTA5MDkxNjA5MzAwOFowADAhAg5iYSAgmDrlH/RZBydBRRcNMDkwOTE2MDkzMDE3WjAAMCECDmu6k6srP3jcMaQHJ0FRFw0wOTA4MDQxMDU2NDBaMAAwIQIOX8aHlO0V+WVH4QcnQVMXDTA5MDgwNDEwNTcyOVowADAhAg5flK2rg3NnsRgDBydBzhcNMTEwMjAxMTUzMzQ2WjAAMCECDg35yJDL1jOPTgoHJ0HPFw0xMTAyMDExNTM0MjZaMAAwIQIOMyFJ6+e9iiGVBQcnQdAXDTA5MDkxODEzMjAwNVowADAhAg5Emb/Oykucmn8fBydB1xcNMDkwOTIxMTAxMDQ3WjAAMCECDjQKCncV+MnUavMHJ0HaFw0wOTA5MjIwODE1MjZaMAAwIQIOaxiFUt3dpd+tPwcnQfQXDTEwMDYxODA4NDI1MVowADAhAg5G7P8nO0tkrMt7BydB9RcNMTAwNjE4MDg0MjMwWjAAMCECDmTCC3SXhmDRst4HJ0H2Fw0wOTA5MjgxMjA3MjBaMAAwIQIOHoGhUr/pRwzTKgcnQfcXDTA5MDkyODEyMDcyNFowADAhAg50wrcrCiw8mQmPBydCBBcNMTAwMjE2MTMwMTA2WjAAMCECDifWmkvwyhEqwEcHJ0IFFw0xMDAyMTYxMzAxMjBaMAAwIQIOfgPmlW9fg+osNgcnQhwXDTEwMDQxMzA5NTIwMFowADAhAg4YHAGuA6LgCk7tBydCHRcNMTAwNDEzMDk1MTM4WjAAMCECDi1zH1bxkNJhokAHJ0IsFw0xMDA0MTMwOTU5MzBaMAAwIQIOMipNccsb/wo2fwcnQi0XDTEwMDQxMzA5NTkwMFowADAhAg46lCmvPl4GpP6ABydCShcNMTAwMTE5MDk1MjE3WjAAMCECDjaTcaj+wBpcGAsHJ0JLFw0xMDAxMTkwOTUyMzRaMAAwIQIOOMC13EOrBuxIOQcnQloXDTEwMDIwMTA5NDcwNVowADAhAg5KmZl+krz4RsmrBydCWxcNMTAwMjAxMDk0NjQwWjAAMCECDmLG3zQJ/fzdSsUHJ0JiFw0xMDAzMDEwOTUxNDBaMAAwIQIOP39ksgHdojf4owcnQmMXDTEwMDMwMTA5NTExN1owADAhAg4LDQzvWNRlD6v9BydCZBcNMTAwMzAxMDk0NjIyWjAAMCECDkmNfeclaFhIaaUHJ0JlFw0xMDAzMDEwOTQ2MDVaMAAwIQIOT/qWWfpH/m8NTwcnQpQXDTEwMDUxMTA5MTgyMVowADAhAg5m/ksYxvCEgJSvBydClRcNMTAwNTExMDkxODAxWjAAMCECDgvf3Ohq6JOPU9AHJ0KWFw0xMDA1MTEwOTIxMjNaMAAwIQIOKSPas10z4jNVIQcnQpcXDTEwMDUxMTA5MjEwMlowADAhAg4mCWmhoZ3lyKCDBydCohcNMTEwNDI4MTEwMjI1WjAAMCECDkeiyRsBMK0Gvr4HJ0KjFw0xMTA0MjgxMTAyMDdaMAAwIQIOa09b/nH2+55SSwcnQq4XDTExMDQwMTA4Mjk0NlowADAhAg5O7M7iq7gGplr1BydCrxcNMTEwNDAxMDgzMDE3WjAAMCECDjlT6mJxUjTvyogHJ0K1Fw0xMTAxMjcxNTQ4NTJaMAAwIQIODS/l4UUFLe21NAcnQrYXDTExMDEyNzE1NDgyOFowADAhAg5lPRA0XdOUF6lSBydDHhcNMTEwMTI4MTQzNTA1WjAAMCECDixKX4fFGGpENwgHJ0MfFw0xMTAxMjgxNDM1MzBaMAAwIQIORNBkqsPnpKTtbAcnQ08XDTEwMDkwOTA4NDg0MlowADAhAg5QL+EMM3lohedEBydDUBcNMTAwOTA5MDg0ODE5WjAAMCECDlhDnHK+HiTRAXcHJ0NUFw0xMDEwMTkxNjIxNDBaMAAwIQIOdBFqAzq/INz53gcnQ1UXDTEwMTAxOTE2MjA0NFowADAhAg4OjR7s8MgKles1BydDWhcNMTEwMTI3MTY1MzM2WjAAMCECDmfR/elHee+d0SoHJ0NbFw0xMTAxMjcxNjUzNTZaMAAwIQIOBTKv2ui+KFMI+wcnQ5YXDTEwMDkxNTEwMjE1N1owADAhAg49F3c/GSah+oRUBydDmxcNMTEwMTI3MTczMjMzWjAAMCECDggv4I61WwpKFMMHJ0OcFw0xMTAxMjcxNzMyNTVaMAAwIQIOXx/Y8sEvwS10LAcnQ6UXDTExMDEyODExMjkzN1owADAhAg5LSLbnVrSKaw/9BydDphcNMTEwMTI4MTEyOTIwWjAAMCECDmFFoCuhKUeACQQHJ0PfFw0xMTAxMTExMDE3MzdaMAAwIQIOQTDdFh2fSPF6AAcnQ+AXDTExMDExMTEwMTcxMFowADAhAg5B8AOXX61FpvbbBydD5RcNMTAxMDA2MTAxNDM2WjAAMCECDh41P2Gmi7PkwI4HJ0PmFw0xMDEwMDYxMDE2MjVaMAAwIQIOWUHGLQCd+Ale9gcnQ/0XDTExMDUwMjA3NTYxMFowADAhAg5Z2c9AYkikmgWOBydD/hcNMTEwNTAyMDc1NjM0WjAAMCECDmf/UD+/h8nf+74HJ0QVFw0xMTA0MTUwNzI4MzNaMAAwIQIOICvj4epy3MrqfwcnRBYXDTExMDQxNTA3Mjg1NlowADAhAg4bouRMfOYqgv4xBydEHxcNMTEwMzA4MTYyNDI1WjAAMCECDhebWHGoKiTp7pEHJ0QgFw0xMTAzMDgxNjI0NDhaMAAwIQIOX+qnxxAqJ8LtawcnRDcXDTExMDEzMTE1MTIyOFowADAhAg4j0fICqZ+wkOdqBydEOBcNMTEwMTMxMTUxMTQxWjAAMCECDhmXjsV4SUpWtAMHJ0RLFw0xMTAxMjgxMTI0MTJaMAAwIQIODno/w+zG43kkTwcnREwXDTExMDEyODExMjM1MlowADAhAg4b1gc88767Fr+LBydETxcNMTEwMTI4MTEwMjA4WjAAMCECDn+M3Pa1w2nyFeUHJ0RQFw0xMTAxMjgxMDU4NDVaMAAwIQIOaduoyIH61tqybAcnRJUXDTEwMTIxNTA5NDMyMlowADAhAg4nLqQPkyi3ESAKBydElhcNMTAxMjE1MDk0MzM2WjAAMCECDi504NIMH8578gQHJ0SbFw0xMTAyMTQxNDA1NDFaMAAwIQIOGuaM8PDaC5u1egcnRJwXDTExMDIxNDE0MDYwNFowADAhAg4ehYq/BXGnB5PWBydEnxcNMTEwMjA0MDgwOTUxWjAAMCECDkSD4eS4FxW5H20HJ0SgFw0xMTAyMDQwODA5MjVaMAAwIQIOOCcb6ilYObt1egcnRKEXDTExMDEyNjEwNDEyOVowADAhAg58tISWCCwFnKGnBydEohcNMTEwMjA0MDgxMzQyWjAAMCECDn5rjtabY/L/WL0HJ0TJFw0xMTAyMDQxMTAzNDFaMAAwDQYJKoZIhvcNAQEFBQADggEBAGnF2Gs0+LNiYCW1Ipm83OXQYP/bd5tFFRzyz3iepFqNfYs4D68/QihjFoRHQoXEB0OEe1tvaVnnPGnEOpi6krwekquMxo4H88B5SlyiFIqemCOIss0SxlCFs69LmfRYvPPvPEhoXtQ3ZThe0UvKG83GOklhvGl6OaiRf4Mt+m8zOT4Wox/j6aOBK6cw6qKCdmD+Yj1rrNqFGg1CnSWMoD6S6mwNgkzwdBUJZ22BwrzAAo4RHa2Uy3ef1FjwD0XtU5N3uDSxGGBEDvOe5z82rps3E22FpAA8eYl8kaXtmWqyvYU0epp4brGuTxCuBMCAsxt/OjIjeNNQbBGkwxgfYA0="
+
+const pemCRLBase64 = "LS0tLS1CRUdJTiBYNTA5IENSTC0tLS0tDQpNSUlCOWpDQ0FWOENBUUV3RFFZSktvWklodmNOQVFFRkJRQXdiREVhTUJnR0ExVUVDaE1SVWxOQklGTmxZM1Z5DQphWFI1SUVsdVl5NHhIakFjQmdOVkJBTVRGVkpUUVNCUWRXSnNhV01nVW05dmRDQkRRU0IyTVRFdU1Dd0dDU3FHDQpTSWIzRFFFSkFSWWZjbk5oYTJWdmJuSnZiM1J6YVdkdVFISnpZWE5sWTNWeWFYUjVMbU52YlJjTk1URXdNakl6DQpNVGt5T0RNd1doY05NVEV3T0RJeU1Ua3lPRE13V2pDQmpEQktBaEVBckRxb2g5RkhKSFhUN09QZ3V1bjQrQmNODQpNRGt4TVRBeU1UUXlOekE1V2pBbU1Bb0dBMVVkRlFRRENnRUpNQmdHQTFVZEdBUVJHQTh5TURBNU1URXdNakUwDQpNalExTlZvd1BnSVJBTEd6blowOTVQQjVhQU9MUGc1N2ZNTVhEVEF5TVRBeU16RTBOVEF4TkZvd0dqQVlCZ05WDQpIUmdFRVJnUE1qQXdNakV3TWpNeE5EVXdNVFJhb0RBd0xqQWZCZ05WSFNNRUdEQVdnQlQxVERGNlVRTS9MTmVMDQpsNWx2cUhHUXEzZzltekFMQmdOVkhSUUVCQUlDQUlRd0RRWUpLb1pJaHZjTkFRRUZCUUFEZ1lFQUZVNUFzNk16DQpxNVBSc2lmYW9iUVBHaDFhSkx5QytNczVBZ2MwYld5QTNHQWR4dXI1U3BQWmVSV0NCamlQL01FSEJXSkNsQkhQDQpHUmNxNXlJZDNFakRrYUV5eFJhK2k2N0x6dmhJNmMyOUVlNks5cFNZd2ppLzdSVWhtbW5Qclh0VHhsTDBsckxyDQptUVFKNnhoRFJhNUczUUE0Q21VZHNITnZicnpnbUNZcHZWRT0NCi0tLS0tRU5EIFg1MDkgQ1JMLS0tLS0NCg0K"
diff --git a/src/pkg/crypto/x509/x509.go b/src/pkg/crypto/x509/x509.go
index f2a039b5a..6ae1f8e39 100644
--- a/src/pkg/crypto/x509/x509.go
+++ b/src/pkg/crypto/x509/x509.go
@@ -11,9 +11,10 @@ import (
"bytes"
"container/vector"
"crypto"
+ "crypto/dsa"
"crypto/rsa"
"crypto/sha1"
- "hash"
+ "crypto/x509/crl"
"io"
"os"
"time"
@@ -32,10 +33,10 @@ type pkcs1PrivateKey struct {
Dq asn1.RawValue "optional"
Qinv asn1.RawValue "optional"
- AdditionalPrimes []pkcs1AddtionalRSAPrime "optional"
+ AdditionalPrimes []pkcs1AdditionalRSAPrime "optional"
}
-type pkcs1AddtionalRSAPrime struct {
+type pkcs1AdditionalRSAPrime struct {
Prime asn1.RawValue
// We ignore these values because rsa will calculate them.
@@ -134,7 +135,7 @@ func MarshalPKCS1PrivateKey(key *rsa.PrivateKey) []byte {
Qinv: rawValueForBig(key.Precomputed.Qinv),
}
- priv.AdditionalPrimes = make([]pkcs1AddtionalRSAPrime, len(key.Precomputed.CRTValues))
+ priv.AdditionalPrimes = make([]pkcs1AdditionalRSAPrime, len(key.Precomputed.CRTValues))
for i, values := range key.Precomputed.CRTValues {
priv.AdditionalPrimes[i].Prime = rawValueForBig(key.Primes[2+i])
priv.AdditionalPrimes[i].Exp = rawValueForBig(values.Exp)
@@ -168,8 +169,17 @@ type tbsCertificate struct {
Extensions []extension "optional,explicit,tag:3"
}
+type dsaAlgorithmParameters struct {
+ P, Q, G asn1.RawValue
+}
+
+type dsaSignature struct {
+ R, S asn1.RawValue
+}
+
type algorithmIdentifier struct {
- Algorithm asn1.ObjectIdentifier
+ Algorithm asn1.ObjectIdentifier
+ Parameters asn1.RawValue "optional"
}
type rdnSequence []relativeDistinguishedNameSET
@@ -186,6 +196,7 @@ type validity struct {
}
type publicKeyInfo struct {
+ Raw asn1.RawContent
Algorithm algorithmIdentifier
PublicKey asn1.BitString
}
@@ -211,6 +222,8 @@ const (
SHA256WithRSA
SHA384WithRSA
SHA512WithRSA
+ DSAWithSHA1
+ DSAWithSHA256
)
type PublicKeyAlgorithm int
@@ -218,6 +231,7 @@ type PublicKeyAlgorithm int
const (
UnknownPublicKeyAlgorithm PublicKeyAlgorithm = iota
RSA
+ DSA
)
// Name represents an X.509 distinguished name. This only includes the common
@@ -272,7 +286,7 @@ var (
oidOrganizationalUnit = []int{2, 5, 4, 11}
oidCommonName = []int{2, 5, 4, 3}
oidSerialNumber = []int{2, 5, 4, 5}
- oidLocatity = []int{2, 5, 4, 7}
+ oidLocality = []int{2, 5, 4, 7}
oidProvince = []int{2, 5, 4, 8}
oidStreetAddress = []int{2, 5, 4, 9}
oidPostalCode = []int{2, 5, 4, 17}
@@ -300,7 +314,7 @@ func (n Name) toRDNSequence() (ret rdnSequence) {
ret = appendRDNs(ret, n.Country, oidCountry)
ret = appendRDNs(ret, n.Organization, oidOrganization)
ret = appendRDNs(ret, n.OrganizationalUnit, oidOrganizationalUnit)
- ret = appendRDNs(ret, n.Locality, oidLocatity)
+ ret = appendRDNs(ret, n.Locality, oidLocality)
ret = appendRDNs(ret, n.Province, oidProvince)
ret = appendRDNs(ret, n.StreetAddress, oidStreetAddress)
ret = appendRDNs(ret, n.PostalCode, oidPostalCode)
@@ -314,37 +328,93 @@ func (n Name) toRDNSequence() (ret rdnSequence) {
return ret
}
-func getSignatureAlgorithmFromOID(oid []int) SignatureAlgorithm {
- if len(oid) == 7 && oid[0] == 1 && oid[1] == 2 && oid[2] == 840 &&
- oid[3] == 113549 && oid[4] == 1 && oid[5] == 1 {
- switch oid[6] {
- case 2:
- return MD2WithRSA
- case 4:
- return MD5WithRSA
- case 5:
- return SHA1WithRSA
- case 11:
- return SHA256WithRSA
- case 12:
- return SHA384WithRSA
- case 13:
- return SHA512WithRSA
- }
- }
+// OIDs for signature algorithms
+//
+// pkcs-1 OBJECT IDENTIFIER ::= {
+// iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) 1 }
+//
+//
+// RFC 3279 2.2.1 RSA Signature Algorithms
+//
+// md2WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 2 }
+//
+// md5WithRSAEncryption OBJECT IDENTIFER ::= { pkcs-1 4 }
+//
+// sha-1WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 5 }
+//
+// dsaWithSha1 OBJECT IDENTIFIER ::= {
+// iso(1) member-body(2) us(840) x9-57(10040) x9cm(4) 3 }
+//
+//
+// RFC 4055 5 PKCS #1 Version 1.5
+//
+// sha256WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 11 }
+//
+// sha384WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 12 }
+//
+// sha512WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 13 }
+//
+//
+// RFC 5758 3.1 DSA Signature Algorithms
+//
+// dsaWithSha356 OBJECT IDENTIFER ::= {
+// joint-iso-ccitt(2) country(16) us(840) organization(1) gov(101)
+// algorithms(4) id-dsa-with-sha2(3) 2}
+//
+var (
+ oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2}
+ oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4}
+ oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5}
+ oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11}
+ oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12}
+ oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13}
+ oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3}
+ oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 4, 3, 2}
+)
+func getSignatureAlgorithmFromOID(oid asn1.ObjectIdentifier) SignatureAlgorithm {
+ switch {
+ case oid.Equal(oidSignatureMD2WithRSA):
+ return MD2WithRSA
+ case oid.Equal(oidSignatureMD5WithRSA):
+ return MD5WithRSA
+ case oid.Equal(oidSignatureSHA1WithRSA):
+ return SHA1WithRSA
+ case oid.Equal(oidSignatureSHA256WithRSA):
+ return SHA256WithRSA
+ case oid.Equal(oidSignatureSHA384WithRSA):
+ return SHA384WithRSA
+ case oid.Equal(oidSignatureSHA512WithRSA):
+ return SHA512WithRSA
+ case oid.Equal(oidSignatureDSAWithSHA1):
+ return DSAWithSHA1
+ case oid.Equal(oidSignatureDSAWithSHA256):
+ return DSAWithSHA256
+ }
return UnknownSignatureAlgorithm
}
-func getPublicKeyAlgorithmFromOID(oid []int) PublicKeyAlgorithm {
- if len(oid) == 7 && oid[0] == 1 && oid[1] == 2 && oid[2] == 840 &&
- oid[3] == 113549 && oid[4] == 1 && oid[5] == 1 {
- switch oid[6] {
- case 1:
- return RSA
- }
- }
+// RFC 3279, 2.3 Public Key Algorithms
+//
+// pkcs-1 OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840)
+// rsadsi(113549) pkcs(1) 1 }
+//
+// rsaEncryption OBJECT IDENTIFIER ::== { pkcs1-1 1 }
+//
+// id-dsa OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840)
+// x9-57(10040) x9cm(4) 1 }
+var (
+ oidPublicKeyRsa = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1}
+ oidPublicKeyDsa = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1}
+)
+func getPublicKeyAlgorithmFromOID(oid asn1.ObjectIdentifier) PublicKeyAlgorithm {
+ switch {
+ case oid.Equal(oidPublicKeyRsa):
+ return RSA
+ case oid.Equal(oidPublicKeyDsa):
+ return DSA
+ }
return UnknownPublicKeyAlgorithm
}
@@ -402,8 +472,10 @@ const (
// A Certificate represents an X.509 certificate.
type Certificate struct {
- Raw []byte // Complete ASN.1 DER content (certificate, signature algorithm and signature).
- RawTBSCertificate []byte // Certificate part of raw ASN.1 DER content.
+ Raw []byte // Complete ASN.1 DER content (certificate, signature algorithm and signature).
+ RawTBSCertificate []byte // Certificate part of raw ASN.1 DER content.
+ RawSubjectPublicKeyInfo []byte // DER encoded SubjectPublicKeyInfo.
+
Signature []byte
SignatureAlgorithm SignatureAlgorithm
@@ -482,26 +554,60 @@ func (c *Certificate) CheckSignatureFrom(parent *Certificate) (err os.Error) {
// TODO(agl): don't ignore the path length constraint.
- var h hash.Hash
+ return parent.CheckSignature(c.SignatureAlgorithm, c.RawTBSCertificate, c.Signature)
+}
+
+// CheckSignature verifies that signature is a valid signature over signed from
+// c's public key.
+func (c *Certificate) CheckSignature(algo SignatureAlgorithm, signed, signature []byte) (err os.Error) {
var hashType crypto.Hash
- switch c.SignatureAlgorithm {
- case SHA1WithRSA:
- h = sha1.New()
+ switch algo {
+ case SHA1WithRSA, DSAWithSHA1:
hashType = crypto.SHA1
+ case SHA256WithRSA, DSAWithSHA256:
+ hashType = crypto.SHA256
+ case SHA384WithRSA:
+ hashType = crypto.SHA384
+ case SHA512WithRSA:
+ hashType = crypto.SHA512
default:
return UnsupportedAlgorithmError{}
}
- pub, ok := parent.PublicKey.(*rsa.PublicKey)
- if !ok {
+ h := hashType.New()
+ if h == nil {
return UnsupportedAlgorithmError{}
}
- h.Write(c.RawTBSCertificate)
+ h.Write(signed)
digest := h.Sum()
- return rsa.VerifyPKCS1v15(pub, hashType, digest, c.Signature)
+ switch pub := c.PublicKey.(type) {
+ case *rsa.PublicKey:
+ return rsa.VerifyPKCS1v15(pub, hashType, digest, signature)
+ case *dsa.PublicKey:
+ dsaSig := new(dsaSignature)
+ if _, err := asn1.Unmarshal(signature, dsaSig); err != nil {
+ return err
+ }
+ if !rawValueIsInteger(&dsaSig.R) || !rawValueIsInteger(&dsaSig.S) {
+ return asn1.StructuralError{"tags don't match"}
+ }
+ r := new(big.Int).SetBytes(dsaSig.R.Bytes)
+ s := new(big.Int).SetBytes(dsaSig.S.Bytes)
+ if !dsa.Verify(pub, digest, r, s) {
+ return os.ErrorString("DSA verification failure")
+ }
+ return
+ }
+ return UnsupportedAlgorithmError{}
+}
+
+// CheckCRLSignature checks that the signature in crl is from c.
+func (c *Certificate) CheckCRLSignature(crl *crl.CertificateList) (err os.Error) {
+ algo := getSignatureAlgorithmFromOID(crl.SignatureAlgorithm.Algo)
+ return c.CheckSignature(algo, crl.TBSCertList.Raw, crl.SignatureValue.RightAlign())
}
type UnhandledCriticalExtension struct{}
@@ -538,7 +644,8 @@ type generalSubtree struct {
Max int "optional,tag:1"
}
-func parsePublicKey(algo PublicKeyAlgorithm, asn1Data []byte) (interface{}, os.Error) {
+func parsePublicKey(algo PublicKeyAlgorithm, keyData *publicKeyInfo) (interface{}, os.Error) {
+ asn1Data := keyData.PublicKey.RightAlign()
switch algo {
case RSA:
p := new(rsaPublicKey)
@@ -556,10 +663,38 @@ func parsePublicKey(algo PublicKeyAlgorithm, asn1Data []byte) (interface{}, os.E
N: new(big.Int).SetBytes(p.N.Bytes),
}
return pub, nil
+ case DSA:
+ p := new(asn1.RawValue)
+ _, err := asn1.Unmarshal(asn1Data, p)
+ if err != nil {
+ return nil, err
+ }
+ if !rawValueIsInteger(p) {
+ return nil, asn1.StructuralError{"tags don't match"}
+ }
+ paramsData := keyData.Algorithm.Parameters.FullBytes
+ params := new(dsaAlgorithmParameters)
+ _, err = asn1.Unmarshal(paramsData, params)
+ if err != nil {
+ return nil, err
+ }
+ if !rawValueIsInteger(&params.P) ||
+ !rawValueIsInteger(&params.Q) ||
+ !rawValueIsInteger(&params.G) {
+ return nil, asn1.StructuralError{"tags don't match"}
+ }
+ pub := &dsa.PublicKey{
+ Parameters: dsa.Parameters{
+ P: new(big.Int).SetBytes(params.P.Bytes),
+ Q: new(big.Int).SetBytes(params.Q.Bytes),
+ G: new(big.Int).SetBytes(params.G.Bytes),
+ },
+ Y: new(big.Int).SetBytes(p.Bytes),
+ }
+ return pub, nil
default:
return nil, nil
}
-
panic("unreachable")
}
@@ -567,6 +702,7 @@ func parseCertificate(in *certificate) (*Certificate, os.Error) {
out := new(Certificate)
out.Raw = in.Raw
out.RawTBSCertificate = in.TBSCertificate.Raw
+ out.RawSubjectPublicKeyInfo = in.TBSCertificate.PublicKey.Raw
out.Signature = in.SignatureValue.RightAlign()
out.SignatureAlgorithm =
@@ -575,7 +711,7 @@ func parseCertificate(in *certificate) (*Certificate, os.Error) {
out.PublicKeyAlgorithm =
getPublicKeyAlgorithmFromOID(in.TBSCertificate.PublicKey.Algorithm.Algorithm)
var err os.Error
- out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, in.TBSCertificate.PublicKey.PublicKey.RightAlign())
+ out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, &in.TBSCertificate.PublicKey)
if err != nil {
return nil, err
}
@@ -607,13 +743,13 @@ func parseCertificate(in *certificate) (*Certificate, os.Error) {
}
case 19:
// RFC 5280, 4.2.1.9
- var constriants basicConstraints
- _, err := asn1.Unmarshal(e.Value, &constriants)
+ var constraints basicConstraints
+ _, err := asn1.Unmarshal(e.Value, &constraints)
if err == nil {
out.BasicConstraintsValid = true
- out.IsCA = constriants.IsCA
- out.MaxPathLen = constriants.MaxPathLen
+ out.IsCA = constraints.IsCA
+ out.MaxPathLen = constraints.MaxPathLen
continue
}
case 17:
@@ -979,11 +1115,11 @@ func CreateCertificate(rand io.Reader, template, parent *Certificate, pub *rsa.P
c := tbsCertificate{
Version: 2,
SerialNumber: asn1.RawValue{Bytes: template.SerialNumber, Tag: 2},
- SignatureAlgorithm: algorithmIdentifier{oidSHA1WithRSA},
+ SignatureAlgorithm: algorithmIdentifier{Algorithm: oidSHA1WithRSA},
Issuer: parent.Subject.toRDNSequence(),
Validity: validity{template.NotBefore, template.NotAfter},
Subject: template.Subject.toRDNSequence(),
- PublicKey: publicKeyInfo{algorithmIdentifier{oidRSA}, encodedPublicKey},
+ PublicKey: publicKeyInfo{nil, algorithmIdentifier{Algorithm: oidRSA}, encodedPublicKey},
Extensions: extensions,
}
@@ -1006,7 +1142,7 @@ func CreateCertificate(rand io.Reader, template, parent *Certificate, pub *rsa.P
cert, err = asn1.Marshal(certificate{
nil,
c,
- algorithmIdentifier{oidSHA1WithRSA},
+ algorithmIdentifier{Algorithm: oidSHA1WithRSA},
asn1.BitString{Bytes: signature, BitLength: len(signature) * 8},
})
return
diff --git a/src/pkg/crypto/x509/x509_test.go b/src/pkg/crypto/x509/x509_test.go
index a42113add..fd137a6f5 100644
--- a/src/pkg/crypto/x509/x509_test.go
+++ b/src/pkg/crypto/x509/x509_test.go
@@ -7,6 +7,7 @@ package x509
import (
"asn1"
"big"
+ "crypto/dsa"
"crypto/rand"
"crypto/rsa"
"encoding/hex"
@@ -54,6 +55,12 @@ func fromBase10(base10 string) *big.Int {
return i
}
+func bigFromHexString(s string) *big.Int {
+ ret := new(big.Int)
+ ret.SetString(s, 16)
+ return ret
+}
+
var rsaPrivateKey = &rsa.PrivateKey{
PublicKey: rsa.PublicKey{
N: bigFromString("9353930466774385905609975137998169297361893554149986716853295022578535724979677252958524466350471210367835187480748268864277464700638583474144061408845077"),
@@ -245,3 +252,82 @@ func TestCreateSelfSignedCertificate(t *testing.T) {
return
}
}
+
+// Self-signed certificate using DSA with SHA1
+var dsaCertPem = `-----BEGIN CERTIFICATE-----
+MIIEDTCCA82gAwIBAgIJALHPghaoxeDhMAkGByqGSM44BAMweTELMAkGA1UEBhMC
+VVMxCzAJBgNVBAgTAk5DMQ8wDQYDVQQHEwZOZXd0b24xFDASBgNVBAoTC0dvb2ds
+ZSwgSW5jMRIwEAYDVQQDEwlKb24gQWxsaWUxIjAgBgkqhkiG9w0BCQEWE2pvbmFs
+bGllQGdvb2dsZS5jb20wHhcNMTEwNTE0MDMwMTQ1WhcNMTEwNjEzMDMwMTQ1WjB5
+MQswCQYDVQQGEwJVUzELMAkGA1UECBMCTkMxDzANBgNVBAcTBk5ld3RvbjEUMBIG
+A1UEChMLR29vZ2xlLCBJbmMxEjAQBgNVBAMTCUpvbiBBbGxpZTEiMCAGCSqGSIb3
+DQEJARYTam9uYWxsaWVAZ29vZ2xlLmNvbTCCAbcwggEsBgcqhkjOOAQBMIIBHwKB
+gQC8hLUnQ7FpFYu4WXTj6DKvXvz8QrJkNJCVMTpKAT7uBpobk32S5RrPKXocd4gN
+8lyGB9ggS03EVlEwXvSmO0DH2MQtke2jl9j1HLydClMf4sbx5V6TV9IFw505U1iW
+jL7awRMgxge+FsudtJK254FjMFo03ZnOQ8ZJJ9E6AEDrlwIVAJpnBn9moyP11Ox5
+Asc/5dnjb6dPAoGBAJFHd4KVv1iTVCvEG6gGiYop5DJh28hUQcN9kul+2A0yPUSC
+X93oN00P8Vh3eYgSaCWZsha7zDG53MrVJ0Zf6v/X/CoZNhLldeNOepivTRAzn+Rz
+kKUYy5l1sxYLHQKF0UGNCXfFKZT0PCmgU+PWhYNBBMn6/cIh44vp85ideo5CA4GE
+AAKBgFmifCafzeRaohYKXJgMGSEaggCVCRq5xdyDCat+wbOkjC4mfG01/um3G8u5
+LxasjlWRKTR/tcAL7t0QuokVyQaYdVypZXNaMtx1db7YBuHjj3aP+8JOQRI9xz8c
+bp5NDJ5pISiFOv4p3GZfqZPcqckDt78AtkQrmnal2txhhjF6o4HeMIHbMB0GA1Ud
+DgQWBBQVyyr7hO11ZFFpWX50298Sa3V+rzCBqwYDVR0jBIGjMIGggBQVyyr7hO11
+ZFFpWX50298Sa3V+r6F9pHsweTELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAk5DMQ8w
+DQYDVQQHEwZOZXd0b24xFDASBgNVBAoTC0dvb2dsZSwgSW5jMRIwEAYDVQQDEwlK
+b24gQWxsaWUxIjAgBgkqhkiG9w0BCQEWE2pvbmFsbGllQGdvb2dsZS5jb22CCQCx
+z4IWqMXg4TAMBgNVHRMEBTADAQH/MAkGByqGSM44BAMDLwAwLAIUPtn/5j8Q1jJI
+7ggOIsgrhgUdjGQCFCsmDq1H11q9+9Wp9IMeGrTSKHIM
+-----END CERTIFICATE-----
+`
+
+func TestParseCertificateWithDsaPublicKey(t *testing.T) {
+ expectedKey := &dsa.PublicKey{
+ Parameters: dsa.Parameters{
+ P: bigFromHexString("00BC84B52743B169158BB85974E3E832AF5EFCFC42B264349095313A4A013EEE069A1B937D92E51ACF297A1C77880DF25C8607D8204B4DC45651305EF4A63B40C7D8C42D91EDA397D8F51CBC9D0A531FE2C6F1E55E9357D205C39D395358968CBEDAC11320C607BE16CB9DB492B6E78163305A34DD99CE43C64927D13A0040EB97"),
+ Q: bigFromHexString("009A67067F66A323F5D4EC7902C73FE5D9E36FA74F"),
+ G: bigFromHexString("009147778295BF5893542BC41BA806898A29E43261DBC85441C37D92E97ED80D323D44825FDDE8374D0FF15877798812682599B216BBCC31B9DCCAD527465FEAFFD7FC2A193612E575E34E7A98AF4D10339FE47390A518CB9975B3160B1D0285D1418D0977C52994F43C29A053E3D685834104C9FAFDC221E38BE9F3989D7A8E42"),
+ },
+ Y: bigFromHexString("59A27C269FCDE45AA2160A5C980C19211A820095091AB9C5DC8309AB7EC1B3A48C2E267C6D35FEE9B71BCBB92F16AC8E559129347FB5C00BEEDD10BA8915C90698755CA965735A32DC7575BED806E1E38F768FFBC24E41123DC73F1C6E9E4D0C9E692128853AFE29DC665FA993DCA9C903B7BF00B6442B9A76A5DADC6186317A"),
+ }
+ pemBlock, _ := pem.Decode([]byte(dsaCertPem))
+ cert, err := ParseCertificate(pemBlock.Bytes)
+ if err != nil {
+ t.Fatalf("Failed to parse certificate: %s", err)
+ }
+ if cert.PublicKeyAlgorithm != DSA {
+ t.Errorf("Parsed key algorithm was not DSA")
+ }
+ parsedKey, ok := cert.PublicKey.(*dsa.PublicKey)
+ if !ok {
+ t.Fatalf("Parsed key was not a DSA key: %s", err)
+ }
+ if expectedKey.Y.Cmp(parsedKey.Y) != 0 ||
+ expectedKey.P.Cmp(parsedKey.P) != 0 ||
+ expectedKey.Q.Cmp(parsedKey.Q) != 0 ||
+ expectedKey.G.Cmp(parsedKey.G) != 0 {
+ t.Fatal("Parsed key differs from expected key")
+ }
+}
+
+func TestParseCertificateWithDSASignatureAlgorithm(t *testing.T) {
+ pemBlock, _ := pem.Decode([]byte(dsaCertPem))
+ cert, err := ParseCertificate(pemBlock.Bytes)
+ if err != nil {
+ t.Fatalf("Failed to parse certificate: %s", err)
+ }
+ if cert.SignatureAlgorithm != DSAWithSHA1 {
+ t.Errorf("Parsed signature algorithm was not DSAWithSHA1")
+ }
+}
+
+func TestVerifyCertificateWithDSASignature(t *testing.T) {
+ pemBlock, _ := pem.Decode([]byte(dsaCertPem))
+ cert, err := ParseCertificate(pemBlock.Bytes)
+ if err != nil {
+ t.Fatalf("Failed to parse certificate: %s", err)
+ }
+ // test cert is self-signed
+ if err = cert.CheckSignatureFrom(cert); err != nil {
+ t.Fatalf("DSA Certificate verfication failed: %s", err)
+ }
+}
diff --git a/src/pkg/crypto/xtea/block.go b/src/pkg/crypto/xtea/block.go
index 3ac36d038..bf5d24599 100644
--- a/src/pkg/crypto/xtea/block.go
+++ b/src/pkg/crypto/xtea/block.go
@@ -22,7 +22,7 @@ func blockToUint32(src []byte) (uint32, uint32) {
return r0, r1
}
-// uint32ToBlock writes two unint32s into an 8 byte data block.
+// uint32ToBlock writes two uint32s into an 8 byte data block.
// Values are written as big endian.
func uint32ToBlock(v0, v1 uint32, dst []byte) {
dst[0] = byte(v0 >> 24)
diff --git a/src/pkg/crypto/xtea/xtea_test.go b/src/pkg/crypto/xtea/xtea_test.go
index 03934f169..217d96adc 100644
--- a/src/pkg/crypto/xtea/xtea_test.go
+++ b/src/pkg/crypto/xtea/xtea_test.go
@@ -8,7 +8,7 @@ import (
"testing"
)
-// A sample test key for when we just want to initialise a cipher
+// A sample test key for when we just want to initialize a cipher
var testKey = []byte{0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF}
// Test that the block size for XTEA is correct
@@ -26,12 +26,12 @@ func TestBlocksize(t *testing.T) {
result := c.BlockSize()
if result != 8 {
- t.Errorf("BlockSize function - expected 8, gotr %d", result)
+ t.Errorf("BlockSize function - expected 8, got %d", result)
return
}
}
-// A series of test values to confirm that the Cipher.table array was initialised correctly
+// A series of test values to confirm that the Cipher.table array was initialized correctly
var testTable = []uint32{
0x00112233, 0x6B1568B8, 0xE28CE030, 0xC5089E2D, 0xC5089E2D, 0x1EFBD3A2, 0xA7845C2A, 0x78EF0917,
0x78EF0917, 0x172682D0, 0x5B6AC714, 0x822AC955, 0x3DE68511, 0xDC1DFECA, 0x2062430E, 0x3611343F,
@@ -43,7 +43,7 @@ var testTable = []uint32{
0x4E22726F, 0x309E306C, 0x309E306C, 0x8A9165E1, 0x1319EE69, 0xF595AC66, 0xF595AC66, 0x4F88E1DB,
}
-// Test that the cipher context is initialised correctly
+// Test that the cipher context is initialized correctly
func TestCipherInit(t *testing.T) {
c, err := NewCipher(testKey)
if err != nil {
@@ -53,7 +53,7 @@ func TestCipherInit(t *testing.T) {
for i := 0; i < len(c.table); i++ {
if c.table[i] != testTable[i] {
- t.Errorf("NewCipher() failed to initialise Cipher.table[%d] correctly. Expected %08X, got %08X", i, testTable[i], c.table[i])
+ t.Errorf("NewCipher() failed to initialize Cipher.table[%d] correctly. Expected %08X, got %08X", i, testTable[i], c.table[i])
break
}
}
diff --git a/src/pkg/debug/dwarf/type.go b/src/pkg/debug/dwarf/type.go
index 902a545f8..f9acf119f 100644
--- a/src/pkg/debug/dwarf/type.go
+++ b/src/pkg/debug/dwarf/type.go
@@ -523,7 +523,7 @@ func (d *Data) Type(off Offset) (Type, os.Error) {
// Attributes:
// AttrType: type of return value if any
// AttrName: possible name of type [ignored]
- // AttrPrototyped: whether used ANSI C prototye [ignored]
+ // AttrPrototyped: whether used ANSI C prototype [ignored]
// Children:
// TagFormalParameter: typed parameter
// AttrType: type of parameter
diff --git a/src/pkg/debug/macho/file.go b/src/pkg/debug/macho/file.go
index a777d873c..721a4c416 100644
--- a/src/pkg/debug/macho/file.go
+++ b/src/pkg/debug/macho/file.go
@@ -184,7 +184,7 @@ func (f *File) Close() os.Error {
return err
}
-// NewFile creates a new File for acecssing a Mach-O binary in an underlying reader.
+// NewFile creates a new File for accessing a Mach-O binary in an underlying reader.
// The Mach-O binary is expected to start at position 0 in the ReaderAt.
func NewFile(r io.ReaderAt) (*File, os.Error) {
f := new(File)
diff --git a/src/pkg/debug/pe/file.go b/src/pkg/debug/pe/file.go
index 6a14e50f9..04991f781 100644
--- a/src/pkg/debug/pe/file.go
+++ b/src/pkg/debug/pe/file.go
@@ -112,7 +112,7 @@ func (f *File) Close() os.Error {
return err
}
-// NewFile creates a new File for acecssing a PE binary in an underlying reader.
+// NewFile creates a new File for accessing a PE binary in an underlying reader.
func NewFile(r io.ReaderAt) (*File, os.Error) {
f := new(File)
sr := io.NewSectionReader(r, 0, 1<<63-1)
diff --git a/src/pkg/debug/proc/proc_linux.go b/src/pkg/debug/proc/proc_linux.go
index 17c8fa529..153c3e99b 100644
--- a/src/pkg/debug/proc/proc_linux.go
+++ b/src/pkg/debug/proc/proc_linux.go
@@ -277,7 +277,7 @@ func (t *thread) ptraceDetach() os.Error {
}
/*
- * Logging utilties
+ * Logging utilities
*/
var logLock sync.Mutex
@@ -1192,7 +1192,7 @@ func (p *process) attachAllThreads() os.Error {
// We stop threads as we attach to them; however, because new
// threads can appear while we're looping over all of them, we
- // have to repeatly scan until we know we're attached to all
+ // have to repeatedly scan until we know we're attached to all
// of them.
for again := true; again; {
again = false
@@ -1214,7 +1214,7 @@ func (p *process) attachAllThreads() os.Error {
_, err = p.attachThread(tid)
if err != nil {
// There could have been a race, or
- // this process could be a zobmie.
+ // this process could be a zombie.
statFile, err2 := ioutil.ReadFile(taskPath + "/" + tidStr + "/stat")
if err2 != nil {
switch err2 := err2.(type) {
diff --git a/src/pkg/ebnf/ebnf.go b/src/pkg/ebnf/ebnf.go
index 7918c4593..964e1c1b0 100644
--- a/src/pkg/ebnf/ebnf.go
+++ b/src/pkg/ebnf/ebnf.go
@@ -5,7 +5,7 @@
// Package ebnf is a library for EBNF grammars. The input is text ([]byte)
// satisfying the following grammar (represented itself in EBNF):
//
-// Production = name "=" Expression "." .
+// Production = name "=" [ Expression ] "." .
// Expression = Alternative { "|" Alternative } .
// Alternative = Term { Term } .
// Term = name | token [ "..." token ] | Group | Option | Repetition .
diff --git a/src/pkg/encoding/git85/git.go b/src/pkg/encoding/git85/git.go
index 09a45cd3c..6bb74f46c 100644
--- a/src/pkg/encoding/git85/git.go
+++ b/src/pkg/encoding/git85/git.go
@@ -273,5 +273,5 @@ func (d *decoder) Read(p []byte) (n int, err os.Error) {
d.nbuf = copy(d.buf[0:], d.buf[nl+1:d.nbuf])
d.off += int64(nl + 1)
}
- panic("unreacahable")
+ panic("unreachable")
}
diff --git a/src/pkg/encoding/pem/pem.go b/src/pkg/encoding/pem/pem.go
index 44e3d0ad0..c2398807f 100644
--- a/src/pkg/encoding/pem/pem.go
+++ b/src/pkg/encoding/pem/pem.go
@@ -140,7 +140,7 @@ Error:
// any lines which could be header lines. However, a valid preamble
// line is not a valid header line, therefore we cannot have consumed
// the preamble line for the any subsequent block. Thus, we will always
- // find any valid block, no matter what bytes preceed it.
+ // find any valid block, no matter what bytes precede it.
//
// For example, if the input is
//
diff --git a/src/pkg/exp/datafmt/datafmt.go b/src/pkg/exp/datafmt/datafmt.go
index a8efdc58f..10e4b54f9 100644
--- a/src/pkg/exp/datafmt/datafmt.go
+++ b/src/pkg/exp/datafmt/datafmt.go
@@ -594,7 +594,7 @@ func (s *State) eval(fexpr expr, value reflect.Value, index int) bool {
s.eval(t.indent, value, index)
// if the indentation evaluates to nil, the state's output buffer
// didn't change - either way it's ok to append the difference to
- // the current identation
+ // the current indentation
s.indent.Write(s.output.Bytes()[mark.outputLen:s.output.Len()])
s.restore(mark)
diff --git a/src/pkg/exp/draw/draw.go b/src/pkg/exp/draw/draw.go
index 1d0729d92..f98e24618 100644
--- a/src/pkg/exp/draw/draw.go
+++ b/src/pkg/exp/draw/draw.go
@@ -8,7 +8,10 @@
// and the X Render extension.
package draw
-import "image"
+import (
+ "image"
+ "image/ycbcr"
+)
// m is the maximum color value returned by image.Color.RGBA.
const m = 1<<16 - 1
@@ -65,29 +68,42 @@ func DrawMask(dst Image, r image.Rectangle, src image.Image, sp image.Point, mas
if dst0, ok := dst.(*image.RGBA); ok {
if op == Over {
if mask == nil {
- if src0, ok := src.(*image.ColorImage); ok {
+ switch src0 := src.(type) {
+ case *image.ColorImage:
drawFillOver(dst0, r, src0)
return
- }
- if src0, ok := src.(*image.RGBA); ok {
+ case *image.RGBA:
drawCopyOver(dst0, r, src0, sp)
return
+ case *image.NRGBA:
+ drawNRGBAOver(dst0, r, src0, sp)
+ return
+ case *ycbcr.YCbCr:
+ drawYCbCr(dst0, r, src0, sp)
+ return
}
} else if mask0, ok := mask.(*image.Alpha); ok {
- if src0, ok := src.(*image.ColorImage); ok {
+ switch src0 := src.(type) {
+ case *image.ColorImage:
drawGlyphOver(dst0, r, src0, mask0, mp)
return
}
}
} else {
if mask == nil {
- if src0, ok := src.(*image.ColorImage); ok {
+ switch src0 := src.(type) {
+ case *image.ColorImage:
drawFillSrc(dst0, r, src0)
return
- }
- if src0, ok := src.(*image.RGBA); ok {
+ case *image.RGBA:
drawCopySrc(dst0, r, src0, sp)
return
+ case *image.NRGBA:
+ drawNRGBASrc(dst0, r, src0, sp)
+ return
+ case *ycbcr.YCbCr:
+ drawYCbCr(dst0, r, src0, sp)
+ return
}
}
}
@@ -224,6 +240,36 @@ func drawCopyOver(dst *image.RGBA, r image.Rectangle, src *image.RGBA, sp image.
}
}
+func drawNRGBAOver(dst *image.RGBA, r image.Rectangle, src *image.NRGBA, sp image.Point) {
+ for y, sy := r.Min.Y, sp.Y; y != r.Max.Y; y, sy = y+1, sy+1 {
+ dpix := dst.Pix[y*dst.Stride : (y+1)*dst.Stride]
+ spix := src.Pix[sy*src.Stride : (sy+1)*src.Stride]
+ for x, sx := r.Min.X, sp.X; x != r.Max.X; x, sx = x+1, sx+1 {
+ // Convert from non-premultiplied color to pre-multiplied color.
+ // The order of operations here is to match the NRGBAColor.RGBA
+ // method in image/color.go.
+ snrgba := spix[sx]
+ sa := uint32(snrgba.A)
+ sr := uint32(snrgba.R) * 0x101 * sa / 0xff
+ sg := uint32(snrgba.G) * 0x101 * sa / 0xff
+ sb := uint32(snrgba.B) * 0x101 * sa / 0xff
+ sa *= 0x101
+
+ rgba := dpix[x]
+ dr := uint32(rgba.R)
+ dg := uint32(rgba.G)
+ db := uint32(rgba.B)
+ da := uint32(rgba.A)
+ a := (m - sa) * 0x101
+ dr = (dr*a + sr*m) / m
+ dg = (dg*a + sg*m) / m
+ db = (db*a + sb*m) / m
+ da = (da*a + sa*m) / m
+ dpix[x] = image.RGBAColor{uint8(dr >> 8), uint8(dg >> 8), uint8(db >> 8), uint8(da >> 8)}
+ }
+ }
+}
+
func drawGlyphOver(dst *image.RGBA, r image.Rectangle, src *image.ColorImage, mask *image.Alpha, mp image.Point) {
x0, x1 := r.Min.X, r.Max.X
y0, y1 := r.Min.Y, r.Max.Y
@@ -311,6 +357,73 @@ func drawCopySrc(dst *image.RGBA, r image.Rectangle, src *image.RGBA, sp image.P
}
}
+func drawNRGBASrc(dst *image.RGBA, r image.Rectangle, src *image.NRGBA, sp image.Point) {
+ for y, sy := r.Min.Y, sp.Y; y != r.Max.Y; y, sy = y+1, sy+1 {
+ dpix := dst.Pix[y*dst.Stride : (y+1)*dst.Stride]
+ spix := src.Pix[sy*src.Stride : (sy+1)*src.Stride]
+ for x, sx := r.Min.X, sp.X; x != r.Max.X; x, sx = x+1, sx+1 {
+ // Convert from non-premultiplied color to pre-multiplied color.
+ // The order of operations here is to match the NRGBAColor.RGBA
+ // method in image/color.go.
+ snrgba := spix[sx]
+ sa := uint32(snrgba.A)
+ sr := uint32(snrgba.R) * 0x101 * sa / 0xff
+ sg := uint32(snrgba.G) * 0x101 * sa / 0xff
+ sb := uint32(snrgba.B) * 0x101 * sa / 0xff
+ sa *= 0x101
+
+ dpix[x] = image.RGBAColor{uint8(sr >> 8), uint8(sg >> 8), uint8(sb >> 8), uint8(sa >> 8)}
+ }
+ }
+}
+
+func drawYCbCr(dst *image.RGBA, r image.Rectangle, src *ycbcr.YCbCr, sp image.Point) {
+ // A YCbCr image is always fully opaque, and so if the mask is implicitly nil
+ // (i.e. fully opaque) then the op is effectively always Src.
+ var (
+ yy, cb, cr uint8
+ rr, gg, bb uint8
+ )
+ switch src.SubsampleRatio {
+ case ycbcr.SubsampleRatio422:
+ for y, sy := r.Min.Y, sp.Y; y != r.Max.Y; y, sy = y+1, sy+1 {
+ dpix := dst.Pix[y*dst.Stride : (y+1)*dst.Stride]
+ for x, sx := r.Min.X, sp.X; x != r.Max.X; x, sx = x+1, sx+1 {
+ i := sx / 2
+ yy = src.Y[sy*src.YStride+sx]
+ cb = src.Cb[sy*src.CStride+i]
+ cr = src.Cr[sy*src.CStride+i]
+ rr, gg, bb = ycbcr.YCbCrToRGB(yy, cb, cr)
+ dpix[x] = image.RGBAColor{rr, gg, bb, 255}
+ }
+ }
+ case ycbcr.SubsampleRatio420:
+ for y, sy := r.Min.Y, sp.Y; y != r.Max.Y; y, sy = y+1, sy+1 {
+ dpix := dst.Pix[y*dst.Stride : (y+1)*dst.Stride]
+ for x, sx := r.Min.X, sp.X; x != r.Max.X; x, sx = x+1, sx+1 {
+ i, j := sx/2, sy/2
+ yy = src.Y[sy*src.YStride+sx]
+ cb = src.Cb[j*src.CStride+i]
+ cr = src.Cr[j*src.CStride+i]
+ rr, gg, bb = ycbcr.YCbCrToRGB(yy, cb, cr)
+ dpix[x] = image.RGBAColor{rr, gg, bb, 255}
+ }
+ }
+ default:
+ // Default to 4:4:4 subsampling.
+ for y, sy := r.Min.Y, sp.Y; y != r.Max.Y; y, sy = y+1, sy+1 {
+ dpix := dst.Pix[y*dst.Stride : (y+1)*dst.Stride]
+ for x, sx := r.Min.X, sp.X; x != r.Max.X; x, sx = x+1, sx+1 {
+ yy = src.Y[sy*src.YStride+sx]
+ cb = src.Cb[sy*src.CStride+sx]
+ cr = src.Cr[sy*src.CStride+sx]
+ rr, gg, bb = ycbcr.YCbCrToRGB(yy, cb, cr)
+ dpix[x] = image.RGBAColor{rr, gg, bb, 255}
+ }
+ }
+ }
+}
+
func drawRGBA(dst *image.RGBA, r image.Rectangle, src image.Image, sp image.Point, mask image.Image, mp image.Point, op Op) {
x0, x1, dx := r.Min.X, r.Max.X, 1
y0, y1, dy := r.Min.Y, r.Max.Y, 1
diff --git a/src/pkg/exp/draw/draw_test.go b/src/pkg/exp/draw/draw_test.go
index 90c9e823d..873a2f24a 100644
--- a/src/pkg/exp/draw/draw_test.go
+++ b/src/pkg/exp/draw/draw_test.go
@@ -6,6 +6,7 @@ package draw
import (
"image"
+ "image/ycbcr"
"testing"
)
@@ -43,6 +44,34 @@ func vgradAlpha(alpha int) image.Image {
return m
}
+func vgradGreenNRGBA(alpha int) image.Image {
+ m := image.NewNRGBA(16, 16)
+ for y := 0; y < 16; y++ {
+ for x := 0; x < 16; x++ {
+ m.Set(x, y, image.RGBAColor{0, uint8(y * 0x11), 0, uint8(alpha)})
+ }
+ }
+ return m
+}
+
+func vgradCr() image.Image {
+ m := &ycbcr.YCbCr{
+ Y: make([]byte, 16*16),
+ Cb: make([]byte, 16*16),
+ Cr: make([]byte, 16*16),
+ YStride: 16,
+ CStride: 16,
+ SubsampleRatio: ycbcr.SubsampleRatio444,
+ Rect: image.Rect(0, 0, 16, 16),
+ }
+ for y := 0; y < 16; y++ {
+ for x := 0; x < 16; x++ {
+ m.Cr[y*m.CStride+x] = uint8(y * 0x11)
+ }
+ }
+ return m
+}
+
func hgradRed(alpha int) Image {
m := image.NewRGBA(16, 16)
for y := 0; y < 16; y++ {
@@ -95,6 +124,27 @@ var drawTests = []drawTest{
{"copyAlphaSrc", vgradGreen(90), fillAlpha(192), Src, image.RGBAColor{0, 36, 0, 68}},
{"copyNil", vgradGreen(90), nil, Over, image.RGBAColor{88, 48, 0, 255}},
{"copyNilSrc", vgradGreen(90), nil, Src, image.RGBAColor{0, 48, 0, 90}},
+ // Uniform mask (100%, 75%, nil) and variable NRGBA source.
+ // At (x, y) == (8, 8):
+ // The destination pixel is {136, 0, 0, 255}.
+ // The source pixel is {0, 136, 0, 90} in NRGBA-space, which is {0, 48, 0, 90} in RGBA-space.
+ // The result pixel is different than in the "copy*" test cases because of rounding errors.
+ {"nrgba", vgradGreenNRGBA(90), fillAlpha(255), Over, image.RGBAColor{88, 46, 0, 255}},
+ {"nrgbaSrc", vgradGreenNRGBA(90), fillAlpha(255), Src, image.RGBAColor{0, 46, 0, 90}},
+ {"nrgbaAlpha", vgradGreenNRGBA(90), fillAlpha(192), Over, image.RGBAColor{100, 34, 0, 255}},
+ {"nrgbaAlphaSrc", vgradGreenNRGBA(90), fillAlpha(192), Src, image.RGBAColor{0, 34, 0, 68}},
+ {"nrgbaNil", vgradGreenNRGBA(90), nil, Over, image.RGBAColor{88, 46, 0, 255}},
+ {"nrgbaNilSrc", vgradGreenNRGBA(90), nil, Src, image.RGBAColor{0, 46, 0, 90}},
+ // Uniform mask (100%, 75%, nil) and variable YCbCr source.
+ // At (x, y) == (8, 8):
+ // The destination pixel is {136, 0, 0, 255}.
+ // The source pixel is {0, 0, 136} in YCbCr-space, which is {11, 38, 0, 255} in RGB-space.
+ {"ycbcr", vgradCr(), fillAlpha(255), Over, image.RGBAColor{11, 38, 0, 255}},
+ {"ycbcrSrc", vgradCr(), fillAlpha(255), Src, image.RGBAColor{11, 38, 0, 255}},
+ {"ycbcrAlpha", vgradCr(), fillAlpha(192), Over, image.RGBAColor{42, 28, 0, 255}},
+ {"ycbcrAlphaSrc", vgradCr(), fillAlpha(192), Src, image.RGBAColor{8, 28, 0, 192}},
+ {"ycbcrNil", vgradCr(), nil, Over, image.RGBAColor{11, 38, 0, 255}},
+ {"ycbcrNilSrc", vgradCr(), nil, Src, image.RGBAColor{11, 38, 0, 255}},
// Variable mask and variable source.
// At (x, y) == (8, 8):
// The destination pixel is {136, 0, 0, 255}.
diff --git a/src/pkg/exp/draw/x11/conn.go b/src/pkg/exp/draw/x11/conn.go
index 81c67267d..23edc2c63 100644
--- a/src/pkg/exp/draw/x11/conn.go
+++ b/src/pkg/exp/draw/x11/conn.go
@@ -310,7 +310,7 @@ func authenticate(w *bufio.Writer, displayStr string) os.Error {
return os.NewError("unsupported Xauth")
}
// 0x006c means little-endian. 0x000b, 0x0000 means X major version 11, minor version 0.
- // 0x0012 and 0x0010 means the auth key and value have lenths 18 and 16.
+ // 0x0012 and 0x0010 means the auth key and value have lengths 18 and 16.
// The final 0x0000 is padding, so that the string length is a multiple of 4.
_, err = io.WriteString(w, "\x6c\x00\x0b\x00\x00\x00\x12\x00\x10\x00\x00\x00")
if err != nil {
@@ -517,7 +517,7 @@ func (c *conn) handshake() os.Error {
if err != nil {
return err
}
- // Ignore some things that we don't care about (totalling 10 + vendorLen bytes):
+ // Ignore some things that we don't care about (totaling 10 + vendorLen bytes):
// imageByteOrder(1), bitmapFormatBitOrder(1), bitmapFormatScanlineUnit(1) bitmapFormatScanlinePad(1),
// minKeycode(1), maxKeycode(1), padding(4), vendor (vendorLen).
if 10+int(vendorLen) > cap(c.buf) {
diff --git a/src/pkg/exp/eval/expr.go b/src/pkg/exp/eval/expr.go
index e65f47617..14a0659b6 100644
--- a/src/pkg/exp/eval/expr.go
+++ b/src/pkg/exp/eval/expr.go
@@ -1781,7 +1781,7 @@ func (a *exprInfo) compileBinaryExpr(op token.Token, l, r *expr) *expr {
// written: Function values are equal if they were
// created by the same execution of a function literal
// or refer to the same function declaration. This is
- // *almost* but not quite waht 6g implements. If a
+ // *almost* but not quite what 6g implements. If a
// function literals does not capture any variables,
// then multiple executions of it will result in the
// same closure. Russ says he'll change that.
diff --git a/src/pkg/exp/eval/stmt.go b/src/pkg/exp/eval/stmt.go
index f6b7c1cda..57bf20e6f 100644
--- a/src/pkg/exp/eval/stmt.go
+++ b/src/pkg/exp/eval/stmt.go
@@ -68,7 +68,7 @@ type flowBuf struct {
gotos map[token.Pos]*flowBlock
// labels is a map from label name to information on the block
// at the point of the label. labels are tracked by name,
- // since mutliple labels at the same PC can have different
+ // since multiple labels at the same PC can have different
// blocks.
labels map[string]*flowBlock
}
@@ -307,7 +307,7 @@ func (a *stmtCompiler) compile(s ast.Stmt) {
}
if notimpl {
- a.diag("%T statment node not implemented", s)
+ a.diag("%T statement node not implemented", s)
}
if a.block.inner != nil {
@@ -550,7 +550,7 @@ func (a *stmtCompiler) doAssign(lhs []ast.Expr, rhs []ast.Expr, tok token.Token,
ident, ok = le.(*ast.Ident)
if !ok {
a.diagAt(le.Pos(), "left side of := must be a name")
- // Suppress new defitions errors
+ // Suppress new definitions errors
nDefs++
continue
}
diff --git a/src/pkg/exp/eval/typec.go b/src/pkg/exp/eval/typec.go
index de90cf664..0ed24a8d2 100644
--- a/src/pkg/exp/eval/typec.go
+++ b/src/pkg/exp/eval/typec.go
@@ -68,7 +68,7 @@ func (a *typeCompiler) compileArrayType(x *ast.ArrayType, allowRec bool) Type {
}
if _, ok := x.Len.(*ast.Ellipsis); ok {
- a.diagAt(x.Len.Pos(), "... array initailizers not implemented")
+ a.diagAt(x.Len.Pos(), "... array initializers not implemented")
return nil
}
l, ok := a.compileArrayLen(a.block, x.Len)
diff --git a/src/pkg/exp/wingui/Makefile b/src/pkg/exp/wingui/Makefile
index 983a8270b..e382c019f 100644
--- a/src/pkg/exp/wingui/Makefile
+++ b/src/pkg/exp/wingui/Makefile
@@ -18,7 +18,7 @@ GOFILES=\
include ../../../Make.cmd
zwinapi.go: winapi.go
- $(GOROOT)/src/pkg/syscall/mksyscall_windows.sh $< \
+ $(GOROOT)/src/pkg/syscall/mksyscall_windows.pl $< \
| sed 's/^package.*syscall$$/package main/' \
| sed '/^import/a \
import "syscall"' \
diff --git a/src/pkg/exp/wingui/winapi.go b/src/pkg/exp/wingui/winapi.go
index c96f45299..fb0d61009 100644
--- a/src/pkg/exp/wingui/winapi.go
+++ b/src/pkg/exp/wingui/winapi.go
@@ -96,7 +96,7 @@ const (
// Some button control styles
BS_DEFPUSHBUTTON = 1
- // Some colour constants
+ // Some color constants
COLOR_WINDOW = 5
COLOR_BTNFACE = 15
@@ -108,13 +108,13 @@ const (
)
var (
- // Some globaly known cusrors
+ // Some globally known cursors
IDC_ARROW = MakeIntResource(32512)
IDC_IBEAM = MakeIntResource(32513)
IDC_WAIT = MakeIntResource(32514)
IDC_CROSS = MakeIntResource(32515)
- // Some globaly known icons
+ // Some globally known icons
IDI_APPLICATION = MakeIntResource(32512)
IDI_HAND = MakeIntResource(32513)
IDI_QUESTION = MakeIntResource(32514)
diff --git a/src/pkg/exp/wingui/zwinapi.go b/src/pkg/exp/wingui/zwinapi.go
index 60aaac6cf..6ae6330a1 100644
--- a/src/pkg/exp/wingui/zwinapi.go
+++ b/src/pkg/exp/wingui/zwinapi.go
@@ -1,4 +1,4 @@
-// mksyscall_windows.sh winapi.go
+// mksyscall_windows.pl winapi.go
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
package main
diff --git a/src/pkg/expvar/expvar_test.go b/src/pkg/expvar/expvar_test.go
index 94926d9f8..8f7a48168 100644
--- a/src/pkg/expvar/expvar_test.go
+++ b/src/pkg/expvar/expvar_test.go
@@ -76,33 +76,33 @@ func TestString(t *testing.T) {
}
func TestMapCounter(t *testing.T) {
- colours := NewMap("bike-shed-colours")
+ colors := NewMap("bike-shed-colors")
- colours.Add("red", 1)
- colours.Add("red", 2)
- colours.Add("blue", 4)
- colours.AddFloat("green", 4.125)
- if x := colours.m["red"].(*Int).i; x != 3 {
- t.Errorf("colours.m[\"red\"] = %v, want 3", x)
+ colors.Add("red", 1)
+ colors.Add("red", 2)
+ colors.Add("blue", 4)
+ colors.AddFloat("green", 4.125)
+ if x := colors.m["red"].(*Int).i; x != 3 {
+ t.Errorf("colors.m[\"red\"] = %v, want 3", x)
}
- if x := colours.m["blue"].(*Int).i; x != 4 {
- t.Errorf("colours.m[\"blue\"] = %v, want 4", x)
+ if x := colors.m["blue"].(*Int).i; x != 4 {
+ t.Errorf("colors.m[\"blue\"] = %v, want 4", x)
}
- if x := colours.m["green"].(*Float).f; x != 4.125 {
- t.Errorf("colours.m[\"green\"] = %v, want 3.14", x)
+ if x := colors.m["green"].(*Float).f; x != 4.125 {
+ t.Errorf("colors.m[\"green\"] = %v, want 3.14", x)
}
- // colours.String() should be '{"red":3, "blue":4}',
+ // colors.String() should be '{"red":3, "blue":4}',
// though the order of red and blue could vary.
- s := colours.String()
+ s := colors.String()
var j interface{}
err := json.Unmarshal([]byte(s), &j)
if err != nil {
- t.Errorf("colours.String() isn't valid JSON: %v", err)
+ t.Errorf("colors.String() isn't valid JSON: %v", err)
}
m, ok := j.(map[string]interface{})
if !ok {
- t.Error("colours.String() didn't produce a map.")
+ t.Error("colors.String() didn't produce a map.")
}
red := m["red"]
x, ok := red.(float64)
diff --git a/src/pkg/flag/export_test.go b/src/pkg/flag/export_test.go
index b5e3243b3..7b190807a 100644
--- a/src/pkg/flag/export_test.go
+++ b/src/pkg/flag/export_test.go
@@ -9,24 +9,14 @@ import "os"
// Additional routines compiled into the package only during testing.
// ResetForTesting clears all flag state and sets the usage function as directed.
-// After calling ResetForTesting, parse errors in flag handling will panic rather
-// than exit the program.
+// After calling ResetForTesting, parse errors in flag handling will not
+// exit the program.
func ResetForTesting(usage func()) {
- flags = &allFlags{make(map[string]*Flag), make(map[string]*Flag), os.Args[1:]}
+ commandLine = NewFlagSet(os.Args[0], ContinueOnError)
Usage = usage
- panicOnError = true
}
-// ParseForTesting parses the flag state using the provided arguments. It
-// should be called after 1) ResetForTesting and 2) setting up the new flags.
-// The return value reports whether the parse was error-free.
-func ParseForTesting(args []string) (result bool) {
- defer func() {
- if recover() != nil {
- result = false
- }
- }()
- os.Args = args
- Parse()
- return true
+// CommandLine returns the default FlagSet.
+func CommandLine() *FlagSet {
+ return commandLine
}
diff --git a/src/pkg/flag/flag.go b/src/pkg/flag/flag.go
index 9ed20e06b..e5d2f94e9 100644
--- a/src/pkg/flag/flag.go
+++ b/src/pkg/flag/flag.go
@@ -50,18 +50,12 @@
Integer flags accept 1234, 0664, 0x1234 and may be negative.
Boolean flags may be 1, 0, t, f, true, false, TRUE, FALSE, True, False.
- It is safe to call flag.Parse multiple times, possibly after changing
- os.Args. This makes it possible to implement command lines with
- subcommands that enable additional flags, as in:
-
- flag.Bool(...) // global options
- flag.Parse() // parse leading command
- subcmd := flag.Arg(0)
- switch subcmd {
- // add per-subcommand options
- }
- os.Args = flag.Args()
- flag.Parse()
+ The default set of command-line flags is controlled by
+ top-level functions. The FlagSet type allows one to define
+ independent sets of flags, such as to implement subcommands
+ in a command-line interface. The methods of FlagSet are
+ analogous to the top-level functions for the command-line
+ flag set.
*/
package flag
@@ -190,6 +184,30 @@ type Value interface {
Set(string) bool
}
+// ErrorHandling defines how to handle flag parsing errors.
+type ErrorHandling int
+
+const (
+ ContinueOnError ErrorHandling = iota
+ ExitOnError
+ PanicOnError
+)
+
+// A FlagSet represents a set of defined flags.
+type FlagSet struct {
+ // Usage is the function called when an error occurs while parsing flags.
+ // The field is a function (not a method) that may be changed to point to
+ // a custom error handler.
+ Usage func()
+
+ name string
+ actual map[string]*Flag
+ formal map[string]*Flag
+ args []string // arguments after flags
+ exitOnError bool // does the program exit if there's an error?
+ errorHandling ErrorHandling
+}
+
// A Flag represents the state of a flag.
type Flag struct {
Name string // name as it appears on command line
@@ -198,14 +216,6 @@ type Flag struct {
DefValue string // default value (as text); for usage message
}
-type allFlags struct {
- actual map[string]*Flag
- formal map[string]*Flag
- args []string // arguments after flags
-}
-
-var flags *allFlags
-
// sortFlags returns the flags as a slice in lexicographical sorted order.
func sortFlags(flags map[string]*Flag) []*Flag {
list := make(sort.StringArray, len(flags))
@@ -224,43 +234,67 @@ func sortFlags(flags map[string]*Flag) []*Flag {
// VisitAll visits the flags in lexicographical order, calling fn for each.
// It visits all flags, even those not set.
-func VisitAll(fn func(*Flag)) {
- for _, f := range sortFlags(flags.formal) {
- fn(f)
+func (f *FlagSet) VisitAll(fn func(*Flag)) {
+ for _, flag := range sortFlags(f.formal) {
+ fn(flag)
}
}
+// VisitAll visits the command-line flags in lexicographical order, calling
+// fn for each. It visits all flags, even those not set.
+func VisitAll(fn func(*Flag)) {
+ commandLine.VisitAll(fn)
+}
+
// Visit visits the flags in lexicographical order, calling fn for each.
// It visits only those flags that have been set.
-func Visit(fn func(*Flag)) {
- for _, f := range sortFlags(flags.actual) {
- fn(f)
+func (f *FlagSet) Visit(fn func(*Flag)) {
+ for _, flag := range sortFlags(f.actual) {
+ fn(flag)
}
}
+// Visit visits the command-line flags in lexicographical order, calling fn
+// for each. It visits only those flags that have been set.
+func Visit(fn func(*Flag)) {
+ commandLine.Visit(fn)
+}
+
// Lookup returns the Flag structure of the named flag, returning nil if none exists.
+func (f *FlagSet) Lookup(name string) *Flag {
+ return f.formal[name]
+}
+
+// Lookup returns the Flag structure of the named command-line flag,
+// returning nil if none exists.
func Lookup(name string) *Flag {
- return flags.formal[name]
+ return commandLine.formal[name]
}
// Set sets the value of the named flag. It returns true if the set succeeded; false if
// there is no such flag defined.
-func Set(name, value string) bool {
- f, ok := flags.formal[name]
+func (f *FlagSet) Set(name, value string) bool {
+ flag, ok := f.formal[name]
if !ok {
return false
}
- ok = f.Value.Set(value)
+ ok = flag.Value.Set(value)
if !ok {
return false
}
- flags.actual[name] = f
+ f.actual[name] = flag
return true
}
-// PrintDefaults prints to standard error the default values of all defined flags.
-func PrintDefaults() {
- VisitAll(func(f *Flag) {
+// Set sets the value of the named command-line flag. It returns true if the
+// set succeeded; false if there is no such flag defined.
+func Set(name, value string) bool {
+ return commandLine.Set(name, value)
+}
+
+// PrintDefaults prints to standard error the default values of all defined flags in the set.
+func (f *FlagSet) PrintDefaults() {
+ f.VisitAll(func(f *Flag) {
format := " -%s=%s: %s\n"
if _, ok := f.Value.(*stringValue); ok {
// put quotes on the value
@@ -270,174 +304,298 @@ func PrintDefaults() {
})
}
-// Usage prints to standard error a default usage message documenting all defined flags.
+// PrintDefaults prints to standard error the default values of all defined command-line flags.
+func PrintDefaults() {
+ commandLine.PrintDefaults()
+}
+
+// defaultUsage is the default function to print a usage message.
+func defaultUsage(f *FlagSet) {
+ fmt.Fprintf(os.Stderr, "Usage of %s:\n", f.name)
+ f.PrintDefaults()
+}
+
+// Usage prints to standard error a usage message documenting all defined command-line flags.
// The function is a variable that may be changed to point to a custom function.
var Usage = func() {
- fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
- PrintDefaults()
+ defaultUsage(commandLine)
}
-var panicOnError = false
+// NFlag returns the number of flags that have been set.
+func (f *FlagSet) NFlag() int { return len(f.actual) }
-// failf prints to standard error a formatted error and Usage, and then exits the program.
-func failf(format string, a ...interface{}) {
- fmt.Fprintf(os.Stderr, format, a...)
- Usage()
- if panicOnError {
- panic("flag parse error")
+// NFlag returns the number of command-line flags that have been set.
+func NFlag() int { return len(commandLine.actual) }
+
+// Arg returns the i'th argument. Arg(0) is the first remaining argument
+// after flags have been processed.
+func (f *FlagSet) Arg(i int) string {
+ if i < 0 || i >= len(f.args) {
+ return ""
}
- os.Exit(2)
+ return f.args[i]
}
-// NFlag returns the number of flags that have been set.
-func NFlag() int { return len(flags.actual) }
-
// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument
// after flags have been processed.
func Arg(i int) string {
- if i < 0 || i >= len(flags.args) {
- return ""
- }
- return flags.args[i]
+ return commandLine.Arg(i)
}
// NArg is the number of arguments remaining after flags have been processed.
-func NArg() int { return len(flags.args) }
+func (f *FlagSet) NArg() int { return len(f.args) }
+
+// NArg is the number of arguments remaining after flags have been processed.
+func NArg() int { return len(commandLine.args) }
+
+// Args returns the non-flag arguments.
+func (f *FlagSet) Args() []string { return f.args }
// Args returns the non-flag command-line arguments.
-func Args() []string { return flags.args }
+func Args() []string { return commandLine.args }
+
+// BoolVar defines a bool flag with specified name, default value, and usage string.
+// The argument p points to a bool variable in which to store the value of the flag.
+func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) {
+ f.Var(newBoolValue(value, p), name, usage)
+}
// BoolVar defines a bool flag with specified name, default value, and usage string.
// The argument p points to a bool variable in which to store the value of the flag.
func BoolVar(p *bool, name string, value bool, usage string) {
- Var(newBoolValue(value, p), name, usage)
+ commandLine.Var(newBoolValue(value, p), name, usage)
}
// Bool defines a bool flag with specified name, default value, and usage string.
// The return value is the address of a bool variable that stores the value of the flag.
-func Bool(name string, value bool, usage string) *bool {
+func (f *FlagSet) Bool(name string, value bool, usage string) *bool {
p := new(bool)
- BoolVar(p, name, value, usage)
+ f.BoolVar(p, name, value, usage)
return p
}
+// Bool defines a bool flag with specified name, default value, and usage string.
+// The return value is the address of a bool variable that stores the value of the flag.
+func Bool(name string, value bool, usage string) *bool {
+ return commandLine.Bool(name, value, usage)
+}
+
+// IntVar defines an int flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+func (f *FlagSet) IntVar(p *int, name string, value int, usage string) {
+ f.Var(newIntValue(value, p), name, usage)
+}
+
// IntVar defines an int flag with specified name, default value, and usage string.
// The argument p points to an int variable in which to store the value of the flag.
func IntVar(p *int, name string, value int, usage string) {
- Var(newIntValue(value, p), name, usage)
+ commandLine.Var(newIntValue(value, p), name, usage)
}
// Int defines an int flag with specified name, default value, and usage string.
// The return value is the address of an int variable that stores the value of the flag.
-func Int(name string, value int, usage string) *int {
+func (f *FlagSet) Int(name string, value int, usage string) *int {
p := new(int)
- IntVar(p, name, value, usage)
+ f.IntVar(p, name, value, usage)
return p
}
+// Int defines an int flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+func Int(name string, value int, usage string) *int {
+ return commandLine.Int(name, value, usage)
+}
+
+// Int64Var defines an int64 flag with specified name, default value, and usage string.
+// The argument p points to an int64 variable in which to store the value of the flag.
+func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) {
+ f.Var(newInt64Value(value, p), name, usage)
+}
+
// Int64Var defines an int64 flag with specified name, default value, and usage string.
// The argument p points to an int64 variable in which to store the value of the flag.
func Int64Var(p *int64, name string, value int64, usage string) {
- Var(newInt64Value(value, p), name, usage)
+ commandLine.Var(newInt64Value(value, p), name, usage)
}
// Int64 defines an int64 flag with specified name, default value, and usage string.
// The return value is the address of an int64 variable that stores the value of the flag.
-func Int64(name string, value int64, usage string) *int64 {
+func (f *FlagSet) Int64(name string, value int64, usage string) *int64 {
p := new(int64)
- Int64Var(p, name, value, usage)
+ f.Int64Var(p, name, value, usage)
return p
}
+// Int64 defines an int64 flag with specified name, default value, and usage string.
+// The return value is the address of an int64 variable that stores the value of the flag.
+func Int64(name string, value int64, usage string) *int64 {
+ return commandLine.Int64(name, value, usage)
+}
+
// UintVar defines a uint flag with specified name, default value, and usage string.
// The argument p points to a uint variable in which to store the value of the flag.
+func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) {
+ f.Var(newUintValue(value, p), name, usage)
+}
+
+// UintVar defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
func UintVar(p *uint, name string, value uint, usage string) {
- Var(newUintValue(value, p), name, usage)
+ commandLine.Var(newUintValue(value, p), name, usage)
}
// Uint defines a uint flag with specified name, default value, and usage string.
-// The return value is the address of a uint variable that stores the value of the flag.
-func Uint(name string, value uint, usage string) *uint {
+// The return value is the address of a uint variable that stores the value of the flag.
+func (f *FlagSet) Uint(name string, value uint, usage string) *uint {
p := new(uint)
- UintVar(p, name, value, usage)
+ f.UintVar(p, name, value, usage)
return p
}
+// Uint defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func Uint(name string, value uint, usage string) *uint {
+ return commandLine.Uint(name, value, usage)
+}
+
+// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
+// The argument p points to a uint64 variable in which to store the value of the flag.
+func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) {
+ f.Var(newUint64Value(value, p), name, usage)
+}
+
// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
// The argument p points to a uint64 variable in which to store the value of the flag.
func Uint64Var(p *uint64, name string, value uint64, usage string) {
- Var(newUint64Value(value, p), name, usage)
+ commandLine.Var(newUint64Value(value, p), name, usage)
}
// Uint64 defines a uint64 flag with specified name, default value, and usage string.
// The return value is the address of a uint64 variable that stores the value of the flag.
-func Uint64(name string, value uint64, usage string) *uint64 {
+func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 {
p := new(uint64)
- Uint64Var(p, name, value, usage)
+ f.Uint64Var(p, name, value, usage)
return p
}
+// Uint64 defines a uint64 flag with specified name, default value, and usage string.
+// The return value is the address of a uint64 variable that stores the value of the flag.
+func Uint64(name string, value uint64, usage string) *uint64 {
+ return commandLine.Uint64(name, value, usage)
+}
+
+// StringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a string variable in which to store the value of the flag.
+func (f *FlagSet) StringVar(p *string, name string, value string, usage string) {
+ f.Var(newStringValue(value, p), name, usage)
+}
+
// StringVar defines a string flag with specified name, default value, and usage string.
// The argument p points to a string variable in which to store the value of the flag.
-func StringVar(p *string, name, value string, usage string) {
- Var(newStringValue(value, p), name, usage)
+func StringVar(p *string, name string, value string, usage string) {
+ commandLine.Var(newStringValue(value, p), name, usage)
}
// String defines a string flag with specified name, default value, and usage string.
// The return value is the address of a string variable that stores the value of the flag.
-func String(name, value string, usage string) *string {
+func (f *FlagSet) String(name string, value string, usage string) *string {
p := new(string)
- StringVar(p, name, value, usage)
+ f.StringVar(p, name, value, usage)
return p
}
+// String defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a string variable that stores the value of the flag.
+func String(name string, value string, usage string) *string {
+ return commandLine.String(name, value, usage)
+}
+
+// Float64Var defines a float64 flag with specified name, default value, and usage string.
+// The argument p points to a float64 variable in which to store the value of the flag.
+func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) {
+ f.Var(newFloat64Value(value, p), name, usage)
+}
+
// Float64Var defines a float64 flag with specified name, default value, and usage string.
// The argument p points to a float64 variable in which to store the value of the flag.
func Float64Var(p *float64, name string, value float64, usage string) {
- Var(newFloat64Value(value, p), name, usage)
+ commandLine.Var(newFloat64Value(value, p), name, usage)
}
// Float64 defines a float64 flag with specified name, default value, and usage string.
// The return value is the address of a float64 variable that stores the value of the flag.
-func Float64(name string, value float64, usage string) *float64 {
+func (f *FlagSet) Float64(name string, value float64, usage string) *float64 {
p := new(float64)
- Float64Var(p, name, value, usage)
+ f.Float64Var(p, name, value, usage)
return p
}
-// Var defines a user-typed flag with specified name, default value, and usage string.
-// The argument p points to a Value variable in which to store the value of the flag.
-func Var(value Value, name string, usage string) {
+// Float64 defines an int flag with specified name, default value, and usage string.
+// The return value is the address of a float64 variable that stores the value of the flag.
+func Float64(name string, value float64, usage string) *float64 {
+ return commandLine.Float64(name, value, usage)
+}
+
+// Var defines a flag with the specified name and usage string. The type and
+// value of the flag are represented by the first argument, of type Value, which
+// typically holds a user-defined implementation of Value. For instance, the
+// caller could create a flag that turns a comma-separated string into a slice
+// of strings by giving the slice the methods of Value; in particular, Set would
+// decompose the comma-separated string into the slice.
+func (f *FlagSet) Var(value Value, name string, usage string) {
// Remember the default value as a string; it won't change.
- f := &Flag{name, usage, value, value.String()}
- _, alreadythere := flags.formal[name]
+ flag := &Flag{name, usage, value, value.String()}
+ _, alreadythere := f.formal[name]
if alreadythere {
- fmt.Fprintln(os.Stderr, "flag redefined:", name)
+ fmt.Fprintf(os.Stderr, "%s flag redefined: %s\n", f.name, name)
panic("flag redefinition") // Happens only if flags are declared with identical names
}
- flags.formal[name] = f
+ f.formal[name] = flag
}
+// Var defines a flag with the specified name and usage string. The type and
+// value of the flag are represented by the first argument, of type Value, which
+// typically holds a user-defined implementation of Value. For instance, the
+// caller could create a flag that turns a comma-separated string into a slice
+// of strings by giving the slice the methods of Value; in particular, Set would
+// decompose the comma-separated string into the slice.
+func Var(value Value, name string, usage string) {
+ commandLine.Var(value, name, usage)
+}
-func (f *allFlags) parseOne() (ok bool) {
+// failf prints to standard error a formatted error and usage message and
+// returns the error.
+func (f *FlagSet) failf(format string, a ...interface{}) os.Error {
+ err := fmt.Errorf(format, a...)
+ fmt.Fprintln(os.Stderr, err)
+ if f == commandLine {
+ Usage()
+ } else {
+ f.Usage()
+ }
+ return err
+}
+
+// parseOne parses one flag. It returns whether a flag was seen.
+func (f *FlagSet) parseOne() (bool, os.Error) {
if len(f.args) == 0 {
- return false
+ return false, nil
}
s := f.args[0]
if len(s) == 0 || s[0] != '-' || len(s) == 1 {
- return false
+ return false, nil
}
num_minuses := 1
if s[1] == '-' {
num_minuses++
if len(s) == 2 { // "--" terminates the flags
f.args = f.args[1:]
- return false
+ return false, nil
}
}
name := s[num_minuses:]
if len(name) == 0 || name[0] == '-' || name[0] == '=' {
- failf("bad flag syntax: %s\n", s)
+ return false, f.failf("bad flag syntax: %s", s)
}
// it's a flag. does it have an argument?
@@ -452,15 +610,15 @@ func (f *allFlags) parseOne() (ok bool) {
break
}
}
- m := flags.formal
+ m := f.formal
flag, alreadythere := m[name] // BUG
if !alreadythere {
- failf("flag provided but not defined: -%s\n", name)
+ return false, f.failf("flag provided but not defined: -%s", name)
}
if fv, ok := flag.Value.(*boolValue); ok { // special case: doesn't need an arg
if has_value {
if !fv.Set(value) {
- failf("invalid boolean value %q for flag: -%s\n", value, name)
+ f.failf("invalid boolean value %q for flag: -%s", value, name)
}
} else {
fv.Set("true")
@@ -473,25 +631,61 @@ func (f *allFlags) parseOne() (ok bool) {
value, f.args = f.args[0], f.args[1:]
}
if !has_value {
- failf("flag needs an argument: -%s\n", name)
+ return false, f.failf("flag needs an argument: -%s", name)
}
ok = flag.Value.Set(value)
if !ok {
- failf("invalid value %q for flag: -%s\n", value, name)
+ return false, f.failf("invalid value %q for flag: -%s", value, name)
}
}
- flags.actual[name] = flag
- return true
+ f.actual[name] = flag
+ return true, nil
+}
+
+// Parse parses flag definitions from the argument list, which should not
+// include the command name. Must be called after all flags in the FlagSet
+// are defined and before flags are accessed by the program.
+func (f *FlagSet) Parse(arguments []string) os.Error {
+ f.args = arguments
+ for {
+ seen, err := f.parseOne()
+ if seen {
+ continue
+ }
+ if err == nil {
+ break
+ }
+ switch f.errorHandling {
+ case ContinueOnError:
+ return err
+ case ExitOnError:
+ os.Exit(2)
+ case PanicOnError:
+ panic(err)
+ }
+ }
+ return nil
}
-// Parse parses the command-line flags. Must be called after all flags are defined
-// and before any are accessed by the program.
+// Parse parses the command-line flags from os.Args[1:]. Must be called
+// after all flags are defined and before flags are accessed by the program.
func Parse() {
- flags.args = os.Args[1:]
- for flags.parseOne() {
- }
+ // Ignore errors; commandLine is set for ExitOnError.
+ commandLine.Parse(os.Args[1:])
}
-func init() {
- flags = &allFlags{make(map[string]*Flag), make(map[string]*Flag), os.Args[1:]}
+// The default set of command-line flags, parsed from os.Args.
+var commandLine = NewFlagSet(os.Args[0], ExitOnError)
+
+// NewFlagSet returns a new, empty flag set with the specified name and
+// error handling property.
+func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet {
+ f := &FlagSet{
+ name: name,
+ actual: make(map[string]*Flag),
+ formal: make(map[string]*Flag),
+ errorHandling: errorHandling,
+ }
+ f.Usage = func() { defaultUsage(f) }
+ return f
}
diff --git a/src/pkg/flag/flag_test.go b/src/pkg/flag/flag_test.go
index 1e47d12e4..fbd706921 100644
--- a/src/pkg/flag/flag_test.go
+++ b/src/pkg/flag/flag_test.go
@@ -89,7 +89,7 @@ func TestEverything(t *testing.T) {
func TestUsage(t *testing.T) {
called := false
ResetForTesting(func() { called = true })
- if ParseForTesting([]string{"a.out", "-x"}) {
+ if CommandLine().Parse([]string{"-x"}) == nil {
t.Error("parse did not fail for unknown flag")
}
if !called {
@@ -97,19 +97,17 @@ func TestUsage(t *testing.T) {
}
}
-func TestParse(t *testing.T) {
- ResetForTesting(func() { t.Error("bad parse") })
- boolFlag := Bool("bool", false, "bool value")
- bool2Flag := Bool("bool2", false, "bool2 value")
- intFlag := Int("int", 0, "int value")
- int64Flag := Int64("int64", 0, "int64 value")
- uintFlag := Uint("uint", 0, "uint value")
- uint64Flag := Uint64("uint64", 0, "uint64 value")
- stringFlag := String("string", "0", "string value")
- float64Flag := Float64("float64", 0, "float64 value")
+func testParse(f *FlagSet, t *testing.T) {
+ boolFlag := f.Bool("bool", false, "bool value")
+ bool2Flag := f.Bool("bool2", false, "bool2 value")
+ intFlag := f.Int("int", 0, "int value")
+ int64Flag := f.Int64("int64", 0, "int64 value")
+ uintFlag := f.Uint("uint", 0, "uint value")
+ uint64Flag := f.Uint64("uint64", 0, "uint64 value")
+ stringFlag := f.String("string", "0", "string value")
+ float64Flag := f.Float64("float64", 0, "float64 value")
extra := "one-extra-argument"
args := []string{
- "a.out",
"-bool",
"-bool2=true",
"--int", "22",
@@ -120,8 +118,8 @@ func TestParse(t *testing.T) {
"-float64", "2718e28",
extra,
}
- if !ParseForTesting(args) {
- t.Fatal("parse failed")
+ if err := f.Parse(args); err != nil {
+ t.Fatal(err)
}
if *boolFlag != true {
t.Error("bool flag should be true, is ", *boolFlag)
@@ -147,14 +145,23 @@ func TestParse(t *testing.T) {
if *float64Flag != 2718e28 {
t.Error("float64 flag should be 2718e28, is ", *float64Flag)
}
- if len(Args()) != 1 {
- t.Error("expected one argument, got", len(Args()))
- } else if Args()[0] != extra {
- t.Errorf("expected argument %q got %q", extra, Args()[0])
+ if len(f.Args()) != 1 {
+ t.Error("expected one argument, got", len(f.Args()))
+ } else if f.Args()[0] != extra {
+ t.Errorf("expected argument %q got %q", extra, f.Args()[0])
}
}
-// Declare a user-defined flag.
+func TestParse(t *testing.T) {
+ ResetForTesting(func() { t.Error("bad parse") })
+ testParse(CommandLine(), t)
+}
+
+func TestFlagSetParse(t *testing.T) {
+ testParse(NewFlagSet("test", ContinueOnError), t)
+}
+
+// Declare a user-defined flag type.
type flagVar []string
func (f *flagVar) String() string {
@@ -167,11 +174,11 @@ func (f *flagVar) Set(value string) bool {
}
func TestUserDefined(t *testing.T) {
- ResetForTesting(func() { t.Fatal("bad parse") })
+ flags := NewFlagSet("test", ContinueOnError)
var v flagVar
- Var(&v, "v", "usage")
- if !ParseForTesting([]string{"a.out", "-v", "1", "-v", "2", "-v=3"}) {
- t.Error("parse failed")
+ flags.Var(&v, "v", "usage")
+ if err := flags.Parse([]string{"-v", "1", "-v", "2", "-v=3"}); err != nil {
+ t.Error(err)
}
if len(v) != 3 {
t.Fatal("expected 3 args; got ", len(v))
@@ -182,13 +189,17 @@ func TestUserDefined(t *testing.T) {
}
}
+// This tests that one can reset the flags. This still works but not well, and is
+// superseded by FlagSet.
func TestChangingArgs(t *testing.T) {
ResetForTesting(func() { t.Fatal("bad parse") })
oldArgs := os.Args
defer func() { os.Args = oldArgs }()
os.Args = []string{"cmd", "-before", "subcmd", "-after", "args"}
before := Bool("before", false, "")
- Parse()
+ if err := CommandLine().Parse(os.Args[1:]); err != nil {
+ t.Fatal(err)
+ }
cmd := Arg(0)
os.Args = Args()
after := Bool("after", false, "")
diff --git a/src/pkg/fmt/doc.go b/src/pkg/fmt/doc.go
index e4d4f1844..08609ca64 100644
--- a/src/pkg/fmt/doc.go
+++ b/src/pkg/fmt/doc.go
@@ -27,7 +27,7 @@
%o base 8
%x base 16, with lower-case letters for a-f
%X base 16, with upper-case letters for A-F
- %U Unicode format: U+1234; same as "U+%0.4X"
+ %U Unicode format: U+1234; same as "U+%04X"
Floating-point and complex constituents:
%b decimalless scientific notation with exponent a power
of two, in the manner of strconv.Ftoa32, e.g. -123456p-78
@@ -134,10 +134,10 @@
The formats behave analogously to those of Printf with the
following exceptions:
- %p is not implemented
- %T is not implemented
- %e %E %f %F %g %g are all equivalent and scan any floating point or complex value
- %s and %v on strings scan a space-delimited token
+ %p is not implemented
+ %T is not implemented
+ %e %E %f %F %g %G are all equivalent and scan any floating point or complex value
+ %s and %v on strings scan a space-delimited token
The familiar base-setting prefixes 0 (octal) and 0x
(hexadecimal) are accepted when scanning integers without a
diff --git a/src/pkg/fmt/print.go b/src/pkg/fmt/print.go
index 10e0fe7c8..8885cebdf 100644
--- a/src/pkg/fmt/print.go
+++ b/src/pkg/fmt/print.go
@@ -41,7 +41,7 @@ type State interface {
Precision() (prec int, ok bool)
// Flag returns whether the flag c, a character, has been set.
- Flag(int) bool
+ Flag(c int) bool
}
// Formatter is the interface implemented by values with a custom formatter.
diff --git a/src/pkg/go/ast/ast.go b/src/pkg/go/ast/ast.go
index 2fc1a6032..b1c7d4ab1 100644
--- a/src/pkg/go/ast/ast.go
+++ b/src/pkg/go/ast/ast.go
@@ -121,7 +121,7 @@ func (f *Field) End() token.Pos {
// A FieldList represents a list of Fields, enclosed by parentheses or braces.
type FieldList struct {
Opening token.Pos // position of opening parenthesis/brace, if any
- List []*Field // field list
+ List []*Field // field list; or nil
Closing token.Pos // position of closing parenthesis/brace, if any
}
@@ -334,7 +334,7 @@ type (
// A FuncType node represents a function type.
FuncType struct {
Func token.Pos // position of "func" keyword
- Params *FieldList // (incoming) parameters
+ Params *FieldList // (incoming) parameters; or nil
Results *FieldList // (outgoing) results; or nil
}
@@ -515,10 +515,10 @@ type (
// An EmptyStmt node represents an empty statement.
// The "position" of the empty statement is the position
- // of the immediately preceeding semicolon.
+ // of the immediately preceding semicolon.
//
EmptyStmt struct {
- Semicolon token.Pos // position of preceeding ";"
+ Semicolon token.Pos // position of preceding ";"
}
// A LabeledStmt node represents a labeled statement.
@@ -596,7 +596,7 @@ type (
// An IfStmt node represents an if statement.
IfStmt struct {
If token.Pos // position of "if" keyword
- Init Stmt // initalization statement; or nil
+ Init Stmt // initialization statement; or nil
Cond Expr // condition
Body *BlockStmt
Else Stmt // else branch; or nil
@@ -613,7 +613,7 @@ type (
// A SwitchStmt node represents an expression switch statement.
SwitchStmt struct {
Switch token.Pos // position of "switch" keyword
- Init Stmt // initalization statement; or nil
+ Init Stmt // initialization statement; or nil
Tag Expr // tag expression; or nil
Body *BlockStmt // CaseClauses only
}
@@ -621,7 +621,7 @@ type (
// An TypeSwitchStmt node represents a type switch statement.
TypeSwitchStmt struct {
Switch token.Pos // position of "switch" keyword
- Init Stmt // initalization statement; or nil
+ Init Stmt // initialization statement; or nil
Assign Stmt // x := y.(type) or y.(type)
Body *BlockStmt // CaseClauses only
}
@@ -643,7 +643,7 @@ type (
// A ForStmt represents a for statement.
ForStmt struct {
For token.Pos // position of "for" keyword
- Init Stmt // initalization statement; or nil
+ Init Stmt // initialization statement; or nil
Cond Expr // condition; or nil
Post Stmt // post iteration statement; or nil
Body *BlockStmt
@@ -945,10 +945,10 @@ func (f *File) End() token.Pos {
// collectively building a Go package.
//
type Package struct {
- Name string // package name
- Scope *Scope // package scope
- Imports map[string]*Scope // map of import path -> package scope across all files
- Files map[string]*File // Go source files by filename
+ Name string // package name
+ Scope *Scope // package scope across all files
+ Imports map[string]*Object // map of package id -> package object
+ Files map[string]*File // Go source files by filename
}
diff --git a/src/pkg/go/ast/filter.go b/src/pkg/go/ast/filter.go
index 090d08d34..0907fd53d 100644
--- a/src/pkg/go/ast/filter.go
+++ b/src/pkg/go/ast/filter.go
@@ -21,24 +21,26 @@ func identListExports(list []*Ident) []*Ident {
}
-// isExportedType assumes that typ is a correct type.
-func isExportedType(typ Expr) bool {
- switch t := typ.(type) {
+// fieldName assumes that x is the type of an anonymous field and
+// returns the corresponding field name. If x is not an acceptable
+// anonymous field, the result is nil.
+//
+func fieldName(x Expr) *Ident {
+ switch t := x.(type) {
case *Ident:
- return t.IsExported()
- case *ParenExpr:
- return isExportedType(t.X)
+ return t
case *SelectorExpr:
- // assume t.X is a typename
- return t.Sel.IsExported()
+ if _, ok := t.X.(*Ident); ok {
+ return t.Sel
+ }
case *StarExpr:
- return isExportedType(t.X)
+ return fieldName(t.X)
}
- return false
+ return nil
}
-func fieldListExports(fields *FieldList, incomplete *bool) {
+func fieldListExports(fields *FieldList) (removedFields bool) {
if fields == nil {
return
}
@@ -53,12 +55,13 @@ func fieldListExports(fields *FieldList, incomplete *bool) {
// fields, so this is not absolutely correct.
// However, this cannot be done w/o complete
// type information.)
- exported = isExportedType(f.Type)
+ name := fieldName(f.Type)
+ exported = name != nil && name.IsExported()
} else {
n := len(f.Names)
f.Names = identListExports(f.Names)
if len(f.Names) < n {
- *incomplete = true
+ removedFields = true
}
exported = len(f.Names) > 0
}
@@ -69,9 +72,10 @@ func fieldListExports(fields *FieldList, incomplete *bool) {
}
}
if j < len(list) {
- *incomplete = true
+ removedFields = true
}
fields.List = list[0:j]
+ return
}
@@ -90,12 +94,16 @@ func typeExports(typ Expr) {
case *ArrayType:
typeExports(t.Elt)
case *StructType:
- fieldListExports(t.Fields, &t.Incomplete)
+ if fieldListExports(t.Fields) {
+ t.Incomplete = true
+ }
case *FuncType:
paramListExports(t.Params)
paramListExports(t.Results)
case *InterfaceType:
- fieldListExports(t.Methods, &t.Incomplete)
+ if fieldListExports(t.Methods) {
+ t.Incomplete = true
+ }
case *MapType:
typeExports(t.Key)
typeExports(t.Value)
@@ -206,13 +214,60 @@ func filterIdentList(list []*Ident, f Filter) []*Ident {
}
+func filterFieldList(fields *FieldList, filter Filter) (removedFields bool) {
+ if fields == nil {
+ return false
+ }
+ list := fields.List
+ j := 0
+ for _, f := range list {
+ keepField := false
+ if len(f.Names) == 0 {
+ // anonymous field
+ name := fieldName(f.Type)
+ keepField = name != nil && filter(name.Name)
+ } else {
+ n := len(f.Names)
+ f.Names = filterIdentList(f.Names, filter)
+ if len(f.Names) < n {
+ removedFields = true
+ }
+ keepField = len(f.Names) > 0
+ }
+ if keepField {
+ list[j] = f
+ j++
+ }
+ }
+ if j < len(list) {
+ removedFields = true
+ }
+ fields.List = list[0:j]
+ return
+}
+
+
func filterSpec(spec Spec, f Filter) bool {
switch s := spec.(type) {
case *ValueSpec:
s.Names = filterIdentList(s.Names, f)
return len(s.Names) > 0
case *TypeSpec:
- return f(s.Name.Name)
+ if f(s.Name.Name) {
+ return true
+ }
+ switch t := s.Type.(type) {
+ case *StructType:
+ if filterFieldList(t.Fields, f) {
+ t.Incomplete = true
+ }
+ return len(t.Fields.List) > 0
+ case *InterfaceType:
+ if filterFieldList(t.Methods, f) {
+ t.Incomplete = true
+ }
+ return len(t.Methods.List) > 0
+ }
}
return false
}
@@ -230,7 +285,14 @@ func filterSpecList(list []Spec, f Filter) []Spec {
}
-func filterDecl(decl Decl, f Filter) bool {
+// FilterDecl trims the AST for a Go declaration in place by removing
+// all names (including struct field and interface method names, but
+// not from parameter lists) that don't pass through the filter f.
+//
+// FilterDecl returns true if there are any declared names left after
+// filtering; it returns false otherwise.
+//
+func FilterDecl(decl Decl, f Filter) bool {
switch d := decl.(type) {
case *GenDecl:
d.Specs = filterSpecList(d.Specs, f)
@@ -243,10 +305,10 @@ func filterDecl(decl Decl, f Filter) bool {
// FilterFile trims the AST for a Go file in place by removing all
-// names from top-level declarations (but not from parameter lists
-// or inside types) that don't pass through the filter f. If the
-// declaration is empty afterwards, the declaration is removed from
-// the AST.
+// names from top-level declarations (including struct field and
+// interface method names, but not from parameter lists) that don't
+// pass through the filter f. If the declaration is empty afterwards,
+// the declaration is removed from the AST.
// The File.comments list is not changed.
//
// FilterFile returns true if there are any top-level declarations
@@ -255,7 +317,7 @@ func filterDecl(decl Decl, f Filter) bool {
func FilterFile(src *File, f Filter) bool {
j := 0
for _, d := range src.Decls {
- if filterDecl(d, f) {
+ if FilterDecl(d, f) {
src.Decls[j] = d
j++
}
@@ -266,10 +328,10 @@ func FilterFile(src *File, f Filter) bool {
// FilterPackage trims the AST for a Go package in place by removing all
-// names from top-level declarations (but not from parameter lists
-// or inside types) that don't pass through the filter f. If the
-// declaration is empty afterwards, the declaration is removed from
-// the AST.
+// names from top-level declarations (including struct field and
+// interface method names, but not from parameter lists) that don't
+// pass through the filter f. If the declaration is empty afterwards,
+// the declaration is removed from the AST.
// The pkg.Files list is not changed, so that file names and top-level
// package comments don't get lost.
//
diff --git a/src/pkg/go/ast/resolve.go b/src/pkg/go/ast/resolve.go
index fddc3baab..ecd2e8a7c 100644
--- a/src/pkg/go/ast/resolve.go
+++ b/src/pkg/go/ast/resolve.go
@@ -11,6 +11,7 @@ import (
"go/scanner"
"go/token"
"os"
+ "strconv"
)
@@ -57,11 +58,16 @@ func resolve(scope *Scope, ident *Ident) bool {
}
-// NewPackage uses an Importer to resolve imports. Given an importPath,
-// an importer returns the imported package's name, its scope of exported
-// objects, and an error, if any.
-//
-type Importer func(path string) (name string, scope *Scope, err os.Error)
+// An Importer resolves import paths to package Objects.
+// The imports map records the packages already imported,
+// indexed by package id (canonical import path).
+// An Importer must determine the canonical import path and
+// check the map to see if it is already present in the imports map.
+// If so, the Importer can return the map entry. Otherwise, the
+// Importer should load the package data for the given path into
+// a new *Object (pkg), record pkg in the imports map, and then
+// return pkg.
+type Importer func(imports map[string]*Object, path string) (pkg *Object, err os.Error)
// NewPackage creates a new Package node from a set of File nodes. It resolves
@@ -70,7 +76,7 @@ type Importer func(path string) (name string, scope *Scope, err os.Error)
// used to resolve identifiers not declared in any of the package files. Any
// remaining unresolved identifiers are reported as undeclared. If the files
// belong to different packages, one package name is selected and files with
-// different package name are reported and then ignored.
+// different package names are reported and then ignored.
// The result is a package node and a scanner.ErrorList if there were errors.
//
func NewPackage(fset *token.FileSet, files map[string]*File, importer Importer, universe *Scope) (*Package, os.Error) {
@@ -96,14 +102,8 @@ func NewPackage(fset *token.FileSet, files map[string]*File, importer Importer,
}
}
- // imports maps import paths to package names and scopes
- // TODO(gri): Eventually we like to get to the import scope from
- // a package object. Then we can have a map path -> Obj.
- type importedPkg struct {
- name string
- scope *Scope
- }
- imports := make(map[string]*importedPkg)
+ // package global mapping of imported package ids to package objects
+ imports := make(map[string]*Object)
// complete file scopes with imports and resolve identifiers
for _, file := range files {
@@ -117,42 +117,41 @@ func NewPackage(fset *token.FileSet, files map[string]*File, importer Importer,
importErrors := false
fileScope := NewScope(pkgScope)
for _, spec := range file.Imports {
- // add import to global map of imports
- path := string(spec.Path.Value)
- path = path[1 : len(path)-1] // strip ""'s
- pkg := imports[path]
- if pkg == nil {
- if importer == nil {
- importErrors = true
- continue
- }
- name, scope, err := importer(path)
- if err != nil {
- p.errorf(spec.Path.Pos(), "could not import %s (%s)", path, err)
- importErrors = true
- continue
- }
- pkg = &importedPkg{name, scope}
- imports[path] = pkg
- // TODO(gri) If a local package name != "." is provided,
- // global identifier resolution could proceed even if the
- // import failed. Consider adjusting the logic here a bit.
+ if importer == nil {
+ importErrors = true
+ continue
+ }
+ path, _ := strconv.Unquote(string(spec.Path.Value))
+ pkg, err := importer(imports, path)
+ if err != nil {
+ p.errorf(spec.Path.Pos(), "could not import %s (%s)", path, err)
+ importErrors = true
+ continue
}
+ // TODO(gri) If a local package name != "." is provided,
+ // global identifier resolution could proceed even if the
+ // import failed. Consider adjusting the logic here a bit.
+
// local name overrides imported package name
- name := pkg.name
+ name := pkg.Name
if spec.Name != nil {
name = spec.Name.Name
}
+
// add import to file scope
if name == "." {
// merge imported scope with file scope
- for _, obj := range pkg.scope.Objects {
+ for _, obj := range pkg.Data.(*Scope).Objects {
p.declare(fileScope, pkgScope, obj)
}
} else {
// declare imported package object in file scope
+ // (do not re-use pkg in the file scope but create
+ // a new object instead; the Decl field is different
+ // for different files)
obj := NewObj(Pkg, name)
obj.Decl = spec
+ obj.Data = pkg.Data
p.declare(fileScope, pkgScope, obj)
}
}
@@ -161,8 +160,8 @@ func NewPackage(fset *token.FileSet, files map[string]*File, importer Importer,
if importErrors {
// don't use the universe scope without correct imports
// (objects in the universe may be shadowed by imports;
- // with missing imports identifiers might get resolved
- // wrongly)
+ // with missing imports, identifiers might get resolved
+ // incorrectly to universe objects)
pkgScope.Outer = nil
}
i := 0
@@ -178,11 +177,5 @@ func NewPackage(fset *token.FileSet, files map[string]*File, importer Importer,
pkgScope.Outer = universe // reset universe scope
}
- // collect all import paths and respective package scopes
- importedScopes := make(map[string]*Scope)
- for path, pkg := range imports {
- importedScopes[path] = pkg.scope
- }
-
- return &Package{pkgName, pkgScope, importedScopes, files}, p.GetError(scanner.Sorted)
+ return &Package{pkgName, pkgScope, imports, files}, p.GetError(scanner.Sorted)
}
diff --git a/src/pkg/go/ast/scope.go b/src/pkg/go/ast/scope.go
index 830d88aef..b966f786f 100644
--- a/src/pkg/go/ast/scope.go
+++ b/src/pkg/go/ast/scope.go
@@ -70,13 +70,24 @@ func (s *Scope) String() string {
// ----------------------------------------------------------------------------
// Objects
+// TODO(gri) Consider replacing the Object struct with an interface
+// and a corresponding set of object implementations.
+
// An Object describes a named language entity such as a package,
// constant, type, variable, function (incl. methods), or label.
//
+// The Data fields contains object-specific data:
+//
+// Kind Data type Data value
+// Pkg *Scope package scope
+// Con int iota for the respective declaration
+// Con != nil constant value
+//
type Object struct {
Kind ObjKind
Name string // declared name
Decl interface{} // corresponding Field, XxxSpec, FuncDecl, or LabeledStmt; or nil
+ Data interface{} // object-specific data; or nil
Type interface{} // place holder for type information; may be nil
}
diff --git a/src/pkg/go/doc/doc.go b/src/pkg/go/doc/doc.go
index 29d205d39..a7a7e0a32 100644
--- a/src/pkg/go/doc/doc.go
+++ b/src/pkg/go/doc/doc.go
@@ -572,6 +572,20 @@ func (doc *docReader) newDoc(importpath string, filenames []string) *PackageDoc
type Filter func(string) bool
+func matchFields(fields *ast.FieldList, f Filter) bool {
+ if fields != nil {
+ for _, field := range fields.List {
+ for _, name := range field.Names {
+ if f(name.Name) {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
+
func matchDecl(d *ast.GenDecl, f Filter) bool {
for _, d := range d.Specs {
switch v := d.(type) {
@@ -585,6 +599,16 @@ func matchDecl(d *ast.GenDecl, f Filter) bool {
if f(v.Name.Name) {
return true
}
+ switch t := v.Type.(type) {
+ case *ast.StructType:
+ if matchFields(t.Fields, f) {
+ return true
+ }
+ case *ast.InterfaceType:
+ if matchFields(t.Methods, f) {
+ return true
+ }
+ }
}
}
return false
diff --git a/src/pkg/go/parser/parser.go b/src/pkg/go/parser/parser.go
index 5c57e41d1..586ee3a9a 100644
--- a/src/pkg/go/parser/parser.go
+++ b/src/pkg/go/parser/parser.go
@@ -54,7 +54,7 @@ type parser struct {
// Non-syntactic parser control
exprLev int // < 0: in control clause, >= 0: in expression
- // Ordinary identifer scopes
+ // Ordinary identifier scopes
pkgScope *ast.Scope // pkgScope.Outer == nil
topScope *ast.Scope // top-most scope; may be pkgScope
unresolved []*ast.Ident // unresolved identifiers
@@ -131,14 +131,16 @@ func (p *parser) closeLabelScope() {
}
-func (p *parser) declare(decl interface{}, scope *ast.Scope, kind ast.ObjKind, idents ...*ast.Ident) {
+func (p *parser) declare(decl, data interface{}, scope *ast.Scope, kind ast.ObjKind, idents ...*ast.Ident) {
for _, ident := range idents {
assert(ident.Obj == nil, "identifier already declared or resolved")
+ obj := ast.NewObj(kind, ident.Name)
+ // remember the corresponding declaration for redeclaration
+ // errors and global variable resolution/typechecking phase
+ obj.Decl = decl
+ obj.Data = data
+ ident.Obj = obj
if ident.Name != "_" {
- obj := ast.NewObj(kind, ident.Name)
- // remember the corresponding declaration for redeclaration
- // errors and global variable resolution/typechecking phase
- obj.Decl = decl
if alt := scope.Insert(obj); alt != nil && p.mode&DeclarationErrors != 0 {
prevDecl := ""
if pos := alt.Pos(); pos.IsValid() {
@@ -146,7 +148,6 @@ func (p *parser) declare(decl interface{}, scope *ast.Scope, kind ast.ObjKind, i
}
p.error(ident.Pos(), fmt.Sprintf("%s redeclared in this block%s", ident.Name, prevDecl))
}
- ident.Obj = obj
}
}
}
@@ -159,17 +160,17 @@ func (p *parser) shortVarDecl(idents []*ast.Ident) {
n := 0 // number of new variables
for _, ident := range idents {
assert(ident.Obj == nil, "identifier already declared or resolved")
+ obj := ast.NewObj(ast.Var, ident.Name)
+ // short var declarations cannot have redeclaration errors
+ // and are not global => no need to remember the respective
+ // declaration
+ ident.Obj = obj
if ident.Name != "_" {
- obj := ast.NewObj(ast.Var, ident.Name)
- // short var declarations cannot have redeclaration errors
- // and are not global => no need to remember the respective
- // declaration
- alt := p.topScope.Insert(obj)
- if alt == nil {
+ if alt := p.topScope.Insert(obj); alt != nil {
+ ident.Obj = alt // redeclaration
+ } else {
n++ // new declaration
- alt = obj
}
- ident.Obj = alt
}
}
if n == 0 && p.mode&DeclarationErrors != 0 {
@@ -596,7 +597,7 @@ func (p *parser) parseFieldDecl(scope *ast.Scope) *ast.Field {
p.expectSemi() // call before accessing p.linecomment
field := &ast.Field{doc, idents, typ, tag, p.lineComment}
- p.declare(field, scope, ast.Var, idents...)
+ p.declare(field, nil, scope, ast.Var, idents...)
return field
}
@@ -707,7 +708,7 @@ func (p *parser) parseParameterList(scope *ast.Scope, ellipsisOk bool) (params [
params = append(params, field)
// Go spec: The scope of an identifier denoting a function
// parameter or result variable is the function body.
- p.declare(field, scope, ast.Var, idents...)
+ p.declare(field, nil, scope, ast.Var, idents...)
if p.tok == token.COMMA {
p.next()
}
@@ -719,7 +720,7 @@ func (p *parser) parseParameterList(scope *ast.Scope, ellipsisOk bool) (params [
params = append(params, field)
// Go spec: The scope of an identifier denoting a function
// parameter or result variable is the function body.
- p.declare(field, scope, ast.Var, idents...)
+ p.declare(field, nil, scope, ast.Var, idents...)
if p.tok != token.COMMA {
break
}
@@ -818,11 +819,12 @@ func (p *parser) parseMethodSpec(scope *ast.Scope) *ast.Field {
} else {
// embedded interface
typ = x
+ p.resolve(typ)
}
p.expectSemi() // call before accessing p.linecomment
spec := &ast.Field{doc, idents, typ, nil, p.lineComment}
- p.declare(spec, scope, ast.Fun, idents...)
+ p.declare(spec, nil, scope, ast.Fun, idents...)
return spec
}
@@ -1476,7 +1478,7 @@ func (p *parser) parseSimpleStmt(labelOk bool) ast.Stmt {
// in which it is declared and excludes the body of any nested
// function.
stmt := &ast.LabeledStmt{label, colon, p.parseStmt()}
- p.declare(stmt, p.labelScope, ast.Lbl, label)
+ p.declare(stmt, nil, p.labelScope, ast.Lbl, label)
return stmt
}
p.error(x[0].Pos(), "illegal label declaration")
@@ -1780,10 +1782,6 @@ func (p *parser) parseCommClause() *ast.CommClause {
rhs = lhs[0]
lhs = nil // there is no lhs
}
- if x, isUnary := rhs.(*ast.UnaryExpr); !isUnary || x.Op != token.ARROW {
- p.errorExpected(rhs.Pos(), "send or receive operation")
- rhs = &ast.BadExpr{rhs.Pos(), rhs.End()}
- }
if lhs != nil {
comm = &ast.AssignStmt{lhs, pos, tok, []ast.Expr{rhs}}
} else {
@@ -2004,7 +2002,7 @@ func parseConstSpec(p *parser, doc *ast.CommentGroup, iota int) ast.Spec {
// the end of the innermost containing block.
// (Global identifiers are resolved in a separate phase after parsing.)
spec := &ast.ValueSpec{doc, idents, typ, values, p.lineComment}
- p.declare(spec, p.topScope, ast.Con, idents...)
+ p.declare(spec, iota, p.topScope, ast.Con, idents...)
return spec
}
@@ -2022,7 +2020,7 @@ func parseTypeSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
// containing block.
// (Global identifiers are resolved in a separate phase after parsing.)
spec := &ast.TypeSpec{doc, ident, nil, nil}
- p.declare(spec, p.topScope, ast.Typ, ident)
+ p.declare(spec, nil, p.topScope, ast.Typ, ident)
spec.Type = p.parseType()
p.expectSemi() // call before accessing p.linecomment
@@ -2051,7 +2049,7 @@ func parseVarSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
// the end of the innermost containing block.
// (Global identifiers are resolved in a separate phase after parsing.)
spec := &ast.ValueSpec{doc, idents, typ, values, p.lineComment}
- p.declare(spec, p.topScope, ast.Var, idents...)
+ p.declare(spec, nil, p.topScope, ast.Var, idents...)
return spec
}
@@ -2143,7 +2141,7 @@ func (p *parser) parseFuncDecl() *ast.FuncDecl {
// init() functions cannot be referred to and there may
// be more than one - don't put them in the pkgScope
if ident.Name != "init" {
- p.declare(decl, p.pkgScope, ast.Fun, ident)
+ p.declare(decl, nil, p.pkgScope, ast.Fun, ident)
}
}
diff --git a/src/pkg/go/parser/parser_test.go b/src/pkg/go/parser/parser_test.go
index 2f1ee6bfc..5b52f51d4 100644
--- a/src/pkg/go/parser/parser_test.go
+++ b/src/pkg/go/parser/parser_test.go
@@ -51,6 +51,7 @@ var validPrograms = []interface{}{
`package p; type T []int; func f() { for _ = range []int{T{42}[0]} {} };`,
`package p; var a = T{{1, 2}, {3, 4}}`,
`package p; func f() { select { case <- c: case c <- d: case c <- <- d: case <-c <- d: } };`,
+ `package p; func f() { select { case x := (<-c): } };`,
`package p; func f() { if ; true {} };`,
`package p; func f() { switch ; {} };`,
}
diff --git a/src/pkg/go/printer/nodes.go b/src/pkg/go/printer/nodes.go
index 86c327930..0fca8a161 100644
--- a/src/pkg/go/printer/nodes.go
+++ b/src/pkg/go/printer/nodes.go
@@ -33,7 +33,7 @@ import (
// line break was printed; returns false otherwise.
//
// TODO(gri): linebreak may add too many lines if the next statement at "line"
-// is preceeded by comments because the computation of n assumes
+// is preceded by comments because the computation of n assumes
// the current position before the comment and the target position
// after the comment. Thus, after interspersing such comments, the
// space taken up by them is not considered to reduce the number of
@@ -215,12 +215,13 @@ func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exp
}
if i > 0 {
- if mode&commaSep != 0 {
+ switch {
+ case mode&commaSep != 0:
p.print(token.COMMA)
- }
- if mode&periodSep != 0 {
+ case mode&periodSep != 0:
p.print(token.PERIOD)
}
+ needsBlank := mode&periodSep == 0 // period-separated list elements don't need a blank
if prevLine < line && prevLine > 0 && line > 0 {
// lines are broken using newlines so comments remain aligned
// unless forceFF is set or there are multiple expressions on
@@ -229,11 +230,12 @@ func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exp
ws = ignore
*multiLine = true
prevBreak = i
+ needsBlank = false // we got a line break instead
}
- } else if mode&periodSep == 0 {
+ }
+ if needsBlank {
p.print(blank)
}
- // period-separated list elements don't need a blank
}
if isPair && size > 0 && len(list) > 1 {
@@ -438,8 +440,8 @@ func (p *printer) fieldList(fields *ast.FieldList, isStruct, isIncomplete bool)
if len(list) > 0 {
p.print(formfeed)
}
- p.flush(p.fset.Position(rbrace), token.RBRACE) // make sure we don't loose the last line comment
- p.setLineComment("// contains unexported fields")
+ p.flush(p.fset.Position(rbrace), token.RBRACE) // make sure we don't lose the last line comment
+ p.setLineComment("// contains filtered or unexported fields")
}
} else { // interface
@@ -465,8 +467,8 @@ func (p *printer) fieldList(fields *ast.FieldList, isStruct, isIncomplete bool)
if len(list) > 0 {
p.print(formfeed)
}
- p.flush(p.fset.Position(rbrace), token.RBRACE) // make sure we don't loose the last line comment
- p.setLineComment("// contains unexported methods")
+ p.flush(p.fset.Position(rbrace), token.RBRACE) // make sure we don't lose the last line comment
+ p.setLineComment("// contains filtered or unexported methods")
}
}
@@ -1189,6 +1191,97 @@ func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool, multiLine *bool) {
// ----------------------------------------------------------------------------
// Declarations
+// The keepTypeColumn function determines if the type column of a series of
+// consecutive const or var declarations must be kept, or if initialization
+// values (V) can be placed in the type column (T) instead. The i'th entry
+// in the result slice is true if the type column in spec[i] must be kept.
+//
+// For example, the declaration:
+//
+// const (
+// foobar int = 42 // comment
+// x = 7 // comment
+// foo
+// bar = 991
+// )
+//
+// leads to the type/values matrix below. A run of value columns (V) can
+// be moved into the type column if there is no type for any of the values
+// in that column (we only move entire columns so that they align properly).
+//
+// matrix formatted result
+// matrix
+// T V -> T V -> true there is a T and so the type
+// - V - V true column must be kept
+// - - - - false
+// - V V - false V is moved into T column
+//
+func keepTypeColumn(specs []ast.Spec) []bool {
+ m := make([]bool, len(specs))
+
+ populate := func(i, j int, keepType bool) {
+ if keepType {
+ for ; i < j; i++ {
+ m[i] = true
+ }
+ }
+ }
+
+ i0 := -1 // if i0 >= 0 we are in a run and i0 is the start of the run
+ var keepType bool
+ for i, s := range specs {
+ t := s.(*ast.ValueSpec)
+ if t.Values != nil {
+ if i0 < 0 {
+ // start of a run of ValueSpecs with non-nil Values
+ i0 = i
+ keepType = false
+ }
+ } else {
+ if i0 >= 0 {
+ // end of a run
+ populate(i0, i, keepType)
+ i0 = -1
+ }
+ }
+ if t.Type != nil {
+ keepType = true
+ }
+ }
+ if i0 >= 0 {
+ // end of a run
+ populate(i0, len(specs), keepType)
+ }
+
+ return m
+}
+
+
+func (p *printer) valueSpec(s *ast.ValueSpec, keepType, doIndent bool, multiLine *bool) {
+ p.setComment(s.Doc)
+ p.identList(s.Names, doIndent, multiLine) // always present
+ extraTabs := 3
+ if s.Type != nil || keepType {
+ p.print(vtab)
+ extraTabs--
+ }
+ if s.Type != nil {
+ p.expr(s.Type, multiLine)
+ }
+ if s.Values != nil {
+ p.print(vtab, token.ASSIGN)
+ p.exprList(token.NoPos, s.Values, 1, blankStart|commaSep, multiLine, token.NoPos)
+ extraTabs--
+ }
+ if s.Comment != nil {
+ for ; extraTabs > 0; extraTabs-- {
+ p.print(vtab)
+ }
+ p.setComment(s.Comment)
+ }
+}
+
+
// The parameter n is the number of specs in the group. If doIndent is set,
// multi-line identifier lists in the spec are indented when the first
// linebreak is encountered.
@@ -1206,38 +1299,20 @@ func (p *printer) spec(spec ast.Spec, n int, doIndent bool, multiLine *bool) {
p.setComment(s.Comment)
case *ast.ValueSpec:
+ if n != 1 {
+ p.internalError("expected n = 1; got", n)
+ }
p.setComment(s.Doc)
p.identList(s.Names, doIndent, multiLine) // always present
- if n == 1 {
- if s.Type != nil {
- p.print(blank)
- p.expr(s.Type, multiLine)
- }
- if s.Values != nil {
- p.print(blank, token.ASSIGN)
- p.exprList(token.NoPos, s.Values, 1, blankStart|commaSep, multiLine, token.NoPos)
- }
- p.setComment(s.Comment)
-
- } else {
- extraTabs := 3
- if s.Type != nil {
- p.print(vtab)
- p.expr(s.Type, multiLine)
- extraTabs--
- }
- if s.Values != nil {
- p.print(vtab, token.ASSIGN)
- p.exprList(token.NoPos, s.Values, 1, blankStart|commaSep, multiLine, token.NoPos)
- extraTabs--
- }
- if s.Comment != nil {
- for ; extraTabs > 0; extraTabs-- {
- p.print(vtab)
- }
- p.setComment(s.Comment)
- }
+ if s.Type != nil {
+ p.print(blank)
+ p.expr(s.Type, multiLine)
+ }
+ if s.Values != nil {
+ p.print(blank, token.ASSIGN)
+ p.exprList(token.NoPos, s.Values, 1, blankStart|commaSep, multiLine, token.NoPos)
}
+ p.setComment(s.Comment)
case *ast.TypeSpec:
p.setComment(s.Doc)
@@ -1264,15 +1339,29 @@ func (p *printer) genDecl(d *ast.GenDecl, multiLine *bool) {
if d.Lparen.IsValid() {
// group of parenthesized declarations
p.print(d.Lparen, token.LPAREN)
- if len(d.Specs) > 0 {
+ if n := len(d.Specs); n > 0 {
p.print(indent, formfeed)
- var ml bool
- for i, s := range d.Specs {
- if i > 0 {
- p.linebreak(p.fset.Position(s.Pos()).Line, 1, ignore, ml)
+ if n > 1 && (d.Tok == token.CONST || d.Tok == token.VAR) {
+ // two or more grouped const/var declarations:
+ // determine if the type column must be kept
+ keepType := keepTypeColumn(d.Specs)
+ var ml bool
+ for i, s := range d.Specs {
+ if i > 0 {
+ p.linebreak(p.fset.Position(s.Pos()).Line, 1, ignore, ml)
+ }
+ ml = false
+ p.valueSpec(s.(*ast.ValueSpec), keepType[i], false, &ml)
+ }
+ } else {
+ var ml bool
+ for i, s := range d.Specs {
+ if i > 0 {
+ p.linebreak(p.fset.Position(s.Pos()).Line, 1, ignore, ml)
+ }
+ ml = false
+ p.spec(s, n, false, &ml)
}
- ml = false
- p.spec(s, len(d.Specs), false, &ml)
}
p.print(unindent, formfeed)
*multiLine = true
@@ -1303,7 +1392,7 @@ func (p *printer) nodeSize(n ast.Node, maxSize int) (size int) {
size = maxSize + 1 // assume n doesn't fit
p.nodeSizes[n] = size
- // nodeSize computation must be indendent of particular
+ // nodeSize computation must be independent of particular
// style so that we always get the same decision; print
// in RawFormat
cfg := Config{Mode: RawFormat}
diff --git a/src/pkg/go/printer/performance_test.go b/src/pkg/go/printer/performance_test.go
new file mode 100644
index 000000000..31de0b7ad
--- /dev/null
+++ b/src/pkg/go/printer/performance_test.go
@@ -0,0 +1,62 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements a simple printer performance benchmark:
+// gotest -bench=BenchmarkPrint
+
+package printer
+
+import (
+ "bytes"
+ "go/ast"
+ "go/parser"
+ "io"
+ "io/ioutil"
+ "log"
+ "testing"
+)
+
+
+var testfile *ast.File
+
+
+func testprint(out io.Writer, file *ast.File) {
+ if _, err := (&Config{TabIndent | UseSpaces, 8}).Fprint(out, fset, file); err != nil {
+ log.Fatalf("print error: %s", err)
+ }
+}
+
+
+// cannot initialize in init because (printer) Fprint launches goroutines.
+func initialize() {
+ const filename = "testdata/parser.go"
+
+ src, err := ioutil.ReadFile(filename)
+ if err != nil {
+ log.Fatalf("%s", err)
+ }
+
+ file, err := parser.ParseFile(fset, filename, src, parser.ParseComments)
+ if err != nil {
+ log.Fatalf("%s", err)
+ }
+
+ var buf bytes.Buffer
+ testprint(&buf, file)
+ if !bytes.Equal(buf.Bytes(), src) {
+ log.Fatalf("print error: %s not idempotent", filename)
+ }
+
+ testfile = file
+}
+
+
+func BenchmarkPrint(b *testing.B) {
+ if testfile == nil {
+ initialize()
+ }
+ for i := 0; i < b.N; i++ {
+ testprint(ioutil.Discard, testfile)
+ }
+}
diff --git a/src/pkg/go/printer/printer.go b/src/pkg/go/printer/printer.go
index 01ebf783c..40b15dd79 100644
--- a/src/pkg/go/printer/printer.go
+++ b/src/pkg/go/printer/printer.go
@@ -589,7 +589,7 @@ func (p *printer) writeCommentSuffix(needsLinebreak bool) (droppedFF bool) {
// ignore trailing whitespace
p.wsbuf[i] = ignore
case indent, unindent:
- // don't loose indentation information
+ // don't lose indentation information
case newline, formfeed:
// if we need a line break, keep exactly one
// but remember if we dropped any formfeeds
diff --git a/src/pkg/go/printer/testdata/comments.golden b/src/pkg/go/printer/testdata/comments.golden
index a86d66174..b177c3571 100644
--- a/src/pkg/go/printer/testdata/comments.golden
+++ b/src/pkg/go/printer/testdata/comments.golden
@@ -22,7 +22,7 @@ const (
_ = iota + 10
_ // comments
- _ = 10 // comment
+ _ = 10 // comment
_ T = 20 // comment
)
@@ -38,9 +38,9 @@ const (
_ // comment
_ // comment
_ = iota + 10
- _ // comment
- _ = 10
- _ = 20 // comment
+ _ // comment
+ _ = 10
+ _ = 20 // comment
_ T = 0 // comment
)
@@ -436,7 +436,7 @@ func _() {
// Comments immediately adjacent to punctuation (for which the go/printer
-// may obly have estimated position information) must remain after the punctuation.
+// may only have estimated position information) must remain after the punctuation.
func _() {
_ = T{
1, // comment after comma
diff --git a/src/pkg/go/printer/testdata/comments.input b/src/pkg/go/printer/testdata/comments.input
index 14cd4cf7a..2a9a86b68 100644
--- a/src/pkg/go/printer/testdata/comments.input
+++ b/src/pkg/go/printer/testdata/comments.input
@@ -434,7 +434,7 @@ func _() {
// Comments immediately adjacent to punctuation (for which the go/printer
-// may obly have estimated position information) must remain after the punctuation.
+// may only have estimated position information) must remain after the punctuation.
func _() {
_ = T{
1, // comment after comma
diff --git a/src/pkg/go/printer/testdata/comments.x b/src/pkg/go/printer/testdata/comments.x
index 4d7a928ae..30a182f49 100644
--- a/src/pkg/go/printer/testdata/comments.x
+++ b/src/pkg/go/printer/testdata/comments.x
@@ -8,7 +8,7 @@ type SZ struct{}
// The S0 struct; no field is exported.
type S0 struct {
- // contains unexported fields
+ // contains filtered or unexported fields
}
// The S1 struct; some fields are not exported.
@@ -16,7 +16,7 @@ type S1 struct {
S0
A, B, C float // 3 exported fields
D int // 2 unexported fields
- // contains unexported fields
+ // contains filtered or unexported fields
}
// The S2 struct; all fields are exported.
@@ -30,14 +30,14 @@ type SZ interface{}
// The I0 interface; no method is exported.
type I0 interface {
- // contains unexported methods
+ // contains filtered or unexported methods
}
// The I1 interface; some methods are not exported.
type I1 interface {
I0
F(x float) float // exported methods
- // contains unexported methods
+ // contains filtered or unexported methods
}
// The I2 interface; all methods are exported.
@@ -53,5 +53,5 @@ type S3 struct {
F1 int // line comment for F1
// lead comment for F2
F2 int // line comment for F2
- // contains unexported fields
+ // contains filtered or unexported fields
}
diff --git a/src/pkg/go/printer/testdata/declarations.golden b/src/pkg/go/printer/testdata/declarations.golden
index c1b255842..fac72f651 100644
--- a/src/pkg/go/printer/testdata/declarations.golden
+++ b/src/pkg/go/printer/testdata/declarations.golden
@@ -160,7 +160,6 @@ bar`
func _() {
- // the following decls need a semicolon at the end
type _ int
type _ *int
type _ []int
@@ -175,7 +174,6 @@ func _() {
var _ chan int
var _ func() int
- // the following decls don't need a semicolon at the end
type _ struct{}
type _ *struct{}
type _ []struct{}
@@ -331,11 +329,11 @@ func _() {
)
// some entries have a type
const (
- xxxxxx = 1
- x = 2
- xxx = 3
+ xxxxxx = 1
+ x = 2
+ xxx = 3
yyyyyyyy float = iota
- yyyy = "bar"
+ yyyy = "bar"
yyy
yy = 2
)
@@ -365,7 +363,7 @@ func _() {
xxx string
yyyyyyyy int = 1234
y float = 3.14
- yyyy = "bar"
+ yyyy = "bar"
yyy string = "foo"
)
// mixed entries - all comments should be aligned
@@ -373,7 +371,7 @@ func _() {
a, b, c int
x = 10
d int // comment
- y = 20 // comment
+ y = 20 // comment
f, ff, fff, ffff int = 0, 1, 2, 3 // comment
)
// respect original line breaks
@@ -401,6 +399,33 @@ func _() {
)
}
+// alignment of "=" in consecutive lines (extended example from issue 1414)
+const (
+ umax uint = ^uint(0) // maximum value for a uint
+ bpu = 1 << (5 + umax>>63) // bits per uint
+ foo
+ bar = -1
+)
+
+// typical enum
+const (
+ a MyType = iota
+ abcd
+ b
+ c
+ def
+)
+
+// excerpt from godoc.go
+var (
+ goroot = flag.String("goroot", runtime.GOROOT(), "Go root directory")
+ testDir = flag.String("testdir", "", "Go root subdirectory - for testing only (faster startups)")
+ pkgPath = flag.String("path", "", "additional package directories (colon-separated)")
+ filter = flag.String("filter", "", "filter file containing permitted package directory paths")
+ filterMin = flag.Int("filter_minutes", 0, "filter file update interval in minutes; disabled if <= 0")
+ filterDelay delayTime // actual filter update interval in minutes; usually filterDelay == filterMin, but filterDelay may back off exponentially
+)
+
// formatting of structs
type _ struct{}
diff --git a/src/pkg/go/printer/testdata/declarations.input b/src/pkg/go/printer/testdata/declarations.input
index c8b37e12b..c6134096b 100644
--- a/src/pkg/go/printer/testdata/declarations.input
+++ b/src/pkg/go/printer/testdata/declarations.input
@@ -159,7 +159,6 @@ bar`
func _() {
- // the following decls need a semicolon at the end
type _ int
type _ *int
type _ []int
@@ -174,7 +173,6 @@ func _() {
var _ chan int
var _ func() int
- // the following decls don't need a semicolon at the end
type _ struct{}
type _ *struct{}
type _ []struct{}
@@ -400,6 +398,33 @@ func _() {
)
}
+// alignment of "=" in consecutive lines (extended example from issue 1414)
+const (
+ umax uint = ^uint(0) // maximum value for a uint
+ bpu = 1 << (5 + umax>>63) // bits per uint
+ foo
+ bar = -1
+)
+
+// typical enum
+const (
+ a MyType = iota
+ abcd
+ b
+ c
+ def
+)
+
+// excerpt from godoc.go
+var (
+ goroot = flag.String("goroot", runtime.GOROOT(), "Go root directory")
+ testDir = flag.String("testdir", "", "Go root subdirectory - for testing only (faster startups)")
+ pkgPath = flag.String("path", "", "additional package directories (colon-separated)")
+ filter = flag.String("filter", "", "filter file containing permitted package directory paths")
+ filterMin = flag.Int("filter_minutes", 0, "filter file update interval in minutes; disabled if <= 0")
+ filterDelay delayTime // actual filter update interval in minutes; usually filterDelay == filterMin, but filterDelay may back off exponentially
+)
+
// formatting of structs
type _ struct{}
diff --git a/src/pkg/go/printer/testdata/expressions.golden b/src/pkg/go/printer/testdata/expressions.golden
index c1a7e970b..a5e2fdc3b 100644
--- a/src/pkg/go/printer/testdata/expressions.golden
+++ b/src/pkg/go/printer/testdata/expressions.golden
@@ -94,30 +94,49 @@ func _() {
_ = under_bar - 1
_ = Open(dpath+"/file", O_WRONLY|O_CREAT, 0666)
_ = int(c0&_Mask4)<<18 | int(c1&_Maskx)<<12 | int(c2&_Maskx)<<6 | int(c3&_Maskx)
-}
-
-func _() {
+ // the parser does not restrict expressions that may appear as statements
+ true
+ 42
+ "foo"
+ x
+ (x)
a + b
a + b + c
- a + b*c
a + (b * c)
- (a + b) * c
- a + (b * c * d)
- a + (b*c + d)
+ a + (b / c)
+ 1 + a
+ a + 1
+ s[a]
+ x << 1
+ (s[0] << 1) & 0xf
+ "foo" + s
+ x == y
+ x < y || z > 42
+}
+
+
+func _() {
+ _ = a + b
+ _ = a + b + c
+ _ = a + b*c
+ _ = a + (b * c)
+ _ = (a + b) * c
+ _ = a + (b * c * d)
+ _ = a + (b*c + d)
- 1 << x
- -1 << x
- 1<<x - 1
- -1<<x - 1
+ _ = 1 << x
+ _ = -1 << x
+ _ = 1<<x - 1
+ _ = -1<<x - 1
- f(a + b)
- f(a + b + c)
- f(a + b*c)
- f(a + (b * c))
- f(1<<x-1, 1<<x-2)
+ _ = f(a + b)
+ _ = f(a + b + c)
+ _ = f(a + b*c)
+ _ = f(a + (b * c))
+ _ = f(1<<x-1, 1<<x-2)
- 1<<d.logWindowSize - 1
+ _ = 1<<d.logWindowSize - 1
buf = make(x, 2*cap(b.buf)+n)
@@ -131,7 +150,7 @@ func _() {
signed += ' ' * 8
tw.octal(header[148:155], chksum)
- x > 0 && i >= 0
+ _ = x > 0 && i >= 0
x1, x0 := x>>w2, x&m2
z0 = t1<<w2 + t0
@@ -141,31 +160,31 @@ func _() {
x1 = (x1 << z) | (x0 >> (uint(w) - z))
x1 = x1<<z | x0>>(uint(w)-z)
- buf[0 : len(buf)+1]
- buf[0 : n+1]
+ _ = buf[0 : len(buf)+1]
+ _ = buf[0 : n+1]
a, b = b, a
a = b + c
a = b*c + d
- a*b + c
- a - b - c
- a - (b - c)
- a - b*c
- a - (b * c)
- a * b / c
- a / *b
- x[a|^b]
- x[a / *b]
- a & ^b
- a + +b
- a - -b
- x[a*-b]
- x[a + +b]
- x ^ y ^ z
- b[a>>24] ^ b[(a>>16)&0xFF] ^ b[(a>>8)&0xFF] ^ b[a&0xFF]
- len(longVariableName) * 2
-
- token(matchType + xlength<<lengthShift + xoffset)
+ _ = a*b + c
+ _ = a - b - c
+ _ = a - (b - c)
+ _ = a - b*c
+ _ = a - (b * c)
+ _ = a * b / c
+ _ = a / *b
+ _ = x[a|^b]
+ _ = x[a / *b]
+ _ = a & ^b
+ _ = a + +b
+ _ = a - -b
+ _ = x[a*-b]
+ _ = x[a + +b]
+ _ = x ^ y ^ z
+ _ = b[a>>24] ^ b[(a>>16)&0xFF] ^ b[(a>>8)&0xFF] ^ b[a&0xFF]
+ _ = len(longVariableName) * 2
+
+ _ = token(matchType + xlength<<lengthShift + xoffset)
}
@@ -625,7 +644,7 @@ func _() {
func f() {
// os.Open parameters should remain on two lines
if writer, err = os.Open(outfile, s.O_WRONLY|os.O_CREATE|
- os.O_TRUNC,0666); err != nil {
+ os.O_TRUNC, 0666); err != nil {
log.Fatal(err)
}
}
diff --git a/src/pkg/go/printer/testdata/expressions.input b/src/pkg/go/printer/testdata/expressions.input
index b87381198..d11314983 100644
--- a/src/pkg/go/printer/testdata/expressions.input
+++ b/src/pkg/go/printer/testdata/expressions.input
@@ -94,30 +94,49 @@ func _() {
_ = under_bar-1
_ = Open(dpath + "/file", O_WRONLY | O_CREAT, 0666)
_ = int(c0&_Mask4)<<18 | int(c1&_Maskx)<<12 | int(c2&_Maskx)<<6 | int(c3&_Maskx)
-}
-
-func _() {
+ // the parser does not restrict expressions that may appear as statements
+ true
+ 42
+ "foo"
+ x
+ (x)
a+b
a+b+c
- a+b*c
a+(b*c)
- (a+b)*c
- a+(b*c*d)
- a+(b*c+d)
+ a+(b/c)
+ 1+a
+ a+1
+ s[a]
+ x<<1
+ (s[0]<<1)&0xf
+ "foo"+s
+ x == y
+ x < y || z > 42
+}
- 1<<x
- -1<<x
- 1<<x-1
- -1<<x-1
- f(a+b)
- f(a+b+c)
- f(a+b*c)
- f(a+(b*c))
- f(1<<x-1, 1<<x-2)
+func _() {
+ _ = a+b
+ _ = a+b+c
+ _ = a+b*c
+ _ = a+(b*c)
+ _ = (a+b)*c
+ _ = a+(b*c*d)
+ _ = a+(b*c+d)
+
+ _ = 1<<x
+ _ = -1<<x
+ _ = 1<<x-1
+ _ = -1<<x-1
- 1<<d.logWindowSize-1
+ _ = f(a+b)
+ _ = f(a+b+c)
+ _ = f(a+b*c)
+ _ = f(a+(b*c))
+ _ = f(1<<x-1, 1<<x-2)
+
+ _ = 1<<d.logWindowSize-1
buf = make(x, 2*cap(b.buf) + n)
@@ -131,7 +150,7 @@ func _() {
signed += ' '*8
tw.octal(header[148:155], chksum)
- x > 0 && i >= 0
+ _ = x > 0 && i >= 0
x1, x0 := x>>w2, x&m2
z0 = t1<<w2+t0
@@ -141,31 +160,31 @@ func _() {
x1 = (x1<<z)|(x0>>(uint(w)-z))
x1 = x1<<z | x0>>(uint(w)-z)
- buf[0:len(buf)+1]
- buf[0:n+1]
+ _ = buf[0:len(buf)+1]
+ _ = buf[0:n+1]
a,b = b,a
a = b+c
a = b*c+d
- a*b+c
- a-b-c
- a-(b-c)
- a-b*c
- a-(b*c)
- a*b/c
- a/ *b
- x[a|^b]
- x[a/ *b]
- a& ^b
- a+ +b
- a- -b
- x[a*-b]
- x[a+ +b]
- x^y^z
- b[a>>24] ^ b[(a>>16)&0xFF] ^ b[(a>>8)&0xFF] ^ b[a&0xFF]
- len(longVariableName)*2
-
- token(matchType + xlength<<lengthShift + xoffset)
+ _ = a*b+c
+ _ = a-b-c
+ _ = a-(b-c)
+ _ = a-b*c
+ _ = a-(b*c)
+ _ = a*b/c
+ _ = a/ *b
+ _ = x[a|^b]
+ _ = x[a/ *b]
+ _ = a& ^b
+ _ = a+ +b
+ _ = a- -b
+ _ = x[a*-b]
+ _ = x[a+ +b]
+ _ = x^y^z
+ _ = b[a>>24] ^ b[(a>>16)&0xFF] ^ b[(a>>8)&0xFF] ^ b[a&0xFF]
+ _ = len(longVariableName)*2
+
+ _ = token(matchType + xlength<<lengthShift + xoffset)
}
diff --git a/src/pkg/go/printer/testdata/expressions.raw b/src/pkg/go/printer/testdata/expressions.raw
index 735cd943e..308d9edff 100644
--- a/src/pkg/go/printer/testdata/expressions.raw
+++ b/src/pkg/go/printer/testdata/expressions.raw
@@ -94,30 +94,49 @@ func _() {
_ = under_bar - 1
_ = Open(dpath+"/file", O_WRONLY|O_CREAT, 0666)
_ = int(c0&_Mask4)<<18 | int(c1&_Maskx)<<12 | int(c2&_Maskx)<<6 | int(c3&_Maskx)
-}
-
-func _() {
+ // the parser does not restrict expressions that may appear as statements
+ true
+ 42
+ "foo"
+ x
+ (x)
a + b
a + b + c
- a + b*c
a + (b * c)
- (a + b) * c
- a + (b * c * d)
- a + (b*c + d)
+ a + (b / c)
+ 1 + a
+ a + 1
+ s[a]
+ x << 1
+ (s[0] << 1) & 0xf
+ "foo" + s
+ x == y
+ x < y || z > 42
+}
+
+
+func _() {
+ _ = a + b
+ _ = a + b + c
+ _ = a + b*c
+ _ = a + (b * c)
+ _ = (a + b) * c
+ _ = a + (b * c * d)
+ _ = a + (b*c + d)
- 1 << x
- -1 << x
- 1<<x - 1
- -1<<x - 1
+ _ = 1 << x
+ _ = -1 << x
+ _ = 1<<x - 1
+ _ = -1<<x - 1
- f(a + b)
- f(a + b + c)
- f(a + b*c)
- f(a + (b * c))
- f(1<<x-1, 1<<x-2)
+ _ = f(a + b)
+ _ = f(a + b + c)
+ _ = f(a + b*c)
+ _ = f(a + (b * c))
+ _ = f(1<<x-1, 1<<x-2)
- 1<<d.logWindowSize - 1
+ _ = 1<<d.logWindowSize - 1
buf = make(x, 2*cap(b.buf)+n)
@@ -131,7 +150,7 @@ func _() {
signed += ' ' * 8
tw.octal(header[148:155], chksum)
- x > 0 && i >= 0
+ _ = x > 0 && i >= 0
x1, x0 := x>>w2, x&m2
z0 = t1<<w2 + t0
@@ -141,31 +160,31 @@ func _() {
x1 = (x1 << z) | (x0 >> (uint(w) - z))
x1 = x1<<z | x0>>(uint(w)-z)
- buf[0 : len(buf)+1]
- buf[0 : n+1]
+ _ = buf[0 : len(buf)+1]
+ _ = buf[0 : n+1]
a, b = b, a
a = b + c
a = b*c + d
- a*b + c
- a - b - c
- a - (b - c)
- a - b*c
- a - (b * c)
- a * b / c
- a / *b
- x[a|^b]
- x[a / *b]
- a & ^b
- a + +b
- a - -b
- x[a*-b]
- x[a + +b]
- x ^ y ^ z
- b[a>>24] ^ b[(a>>16)&0xFF] ^ b[(a>>8)&0xFF] ^ b[a&0xFF]
- len(longVariableName) * 2
-
- token(matchType + xlength<<lengthShift + xoffset)
+ _ = a*b + c
+ _ = a - b - c
+ _ = a - (b - c)
+ _ = a - b*c
+ _ = a - (b * c)
+ _ = a * b / c
+ _ = a / *b
+ _ = x[a|^b]
+ _ = x[a / *b]
+ _ = a & ^b
+ _ = a + +b
+ _ = a - -b
+ _ = x[a*-b]
+ _ = x[a + +b]
+ _ = x ^ y ^ z
+ _ = b[a>>24] ^ b[(a>>16)&0xFF] ^ b[(a>>8)&0xFF] ^ b[a&0xFF]
+ _ = len(longVariableName) * 2
+
+ _ = token(matchType + xlength<<lengthShift + xoffset)
}
@@ -625,7 +644,7 @@ func _() {
func f() {
// os.Open parameters should remain on two lines
if writer, err = os.Open(outfile, s.O_WRONLY|os.O_CREATE|
- os.O_TRUNC,0666); err != nil {
+ os.O_TRUNC, 0666); err != nil {
log.Fatal(err)
}
}
diff --git a/src/pkg/go/printer/testdata/parser.go b/src/pkg/go/printer/testdata/parser.go
new file mode 100644
index 000000000..5c57e41d1
--- /dev/null
+++ b/src/pkg/go/printer/testdata/parser.go
@@ -0,0 +1,2252 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package parser implements a parser for Go source files. Input may be
+// provided in a variety of forms (see the various Parse* functions); the
+// output is an abstract syntax tree (AST) representing the Go source. The
+// parser is invoked through one of the Parse* functions.
+//
+package parser
+
+import (
+ "fmt"
+ "go/ast"
+ "go/scanner"
+ "go/token"
+)
+
+
+// The mode parameter to the Parse* functions is a set of flags (or 0).
+// They control the amount of source code parsed and other optional
+// parser functionality.
+//
+const (
+ PackageClauseOnly uint = 1 << iota // parsing stops after package clause
+ ImportsOnly // parsing stops after import declarations
+ ParseComments // parse comments and add them to AST
+ Trace // print a trace of parsed productions
+ DeclarationErrors // report declaration errors
+)
+
+
+// The parser structure holds the parser's internal state.
+type parser struct {
+ file *token.File
+ scanner.ErrorVector
+ scanner scanner.Scanner
+
+ // Tracing/debugging
+ mode uint // parsing mode
+ trace bool // == (mode & Trace != 0)
+ indent uint // indentation used for tracing output
+
+ // Comments
+ comments []*ast.CommentGroup
+ leadComment *ast.CommentGroup // last lead comment
+ lineComment *ast.CommentGroup // last line comment
+
+ // Next token
+ pos token.Pos // token position
+ tok token.Token // one token look-ahead
+ lit string // token literal
+
+ // Non-syntactic parser control
+ exprLev int // < 0: in control clause, >= 0: in expression
+
+ // Ordinary identifer scopes
+ pkgScope *ast.Scope // pkgScope.Outer == nil
+ topScope *ast.Scope // top-most scope; may be pkgScope
+ unresolved []*ast.Ident // unresolved identifiers
+ imports []*ast.ImportSpec // list of imports
+
+ // Label scope
+ // (maintained by open/close LabelScope)
+ labelScope *ast.Scope // label scope for current function
+ targetStack [][]*ast.Ident // stack of unresolved labels
+}
+
+
+// scannerMode returns the scanner mode bits given the parser's mode bits.
+func scannerMode(mode uint) uint {
+ var m uint = scanner.InsertSemis
+ if mode&ParseComments != 0 {
+ m |= scanner.ScanComments
+ }
+ return m
+}
+
+
+func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode uint) {
+ p.file = fset.AddFile(filename, fset.Base(), len(src))
+ p.scanner.Init(p.file, src, p, scannerMode(mode))
+
+ p.mode = mode
+ p.trace = mode&Trace != 0 // for convenience (p.trace is used frequently)
+
+ p.next()
+
+ // set up the pkgScope here (as opposed to in parseFile) because
+ // there are other parser entry points (ParseExpr, etc.)
+ p.openScope()
+ p.pkgScope = p.topScope
+
+ // for the same reason, set up a label scope
+ p.openLabelScope()
+}
+
+
+// ----------------------------------------------------------------------------
+// Scoping support
+
+func (p *parser) openScope() {
+ p.topScope = ast.NewScope(p.topScope)
+}
+
+
+func (p *parser) closeScope() {
+ p.topScope = p.topScope.Outer
+}
+
+
+func (p *parser) openLabelScope() {
+ p.labelScope = ast.NewScope(p.labelScope)
+ p.targetStack = append(p.targetStack, nil)
+}
+
+
+func (p *parser) closeLabelScope() {
+ // resolve labels
+ n := len(p.targetStack) - 1
+ scope := p.labelScope
+ for _, ident := range p.targetStack[n] {
+ ident.Obj = scope.Lookup(ident.Name)
+ if ident.Obj == nil && p.mode&DeclarationErrors != 0 {
+ p.error(ident.Pos(), fmt.Sprintf("label %s undefined", ident.Name))
+ }
+ }
+ // pop label scope
+ p.targetStack = p.targetStack[0:n]
+ p.labelScope = p.labelScope.Outer
+}
+
+
+func (p *parser) declare(decl interface{}, scope *ast.Scope, kind ast.ObjKind, idents ...*ast.Ident) {
+ for _, ident := range idents {
+ assert(ident.Obj == nil, "identifier already declared or resolved")
+ if ident.Name != "_" {
+ obj := ast.NewObj(kind, ident.Name)
+ // remember the corresponding declaration for redeclaration
+ // errors and global variable resolution/typechecking phase
+ obj.Decl = decl
+ if alt := scope.Insert(obj); alt != nil && p.mode&DeclarationErrors != 0 {
+ prevDecl := ""
+ if pos := alt.Pos(); pos.IsValid() {
+ prevDecl = fmt.Sprintf("\n\tprevious declaration at %s", p.file.Position(pos))
+ }
+ p.error(ident.Pos(), fmt.Sprintf("%s redeclared in this block%s", ident.Name, prevDecl))
+ }
+ ident.Obj = obj
+ }
+ }
+}
+
+
+func (p *parser) shortVarDecl(idents []*ast.Ident) {
+ // Go spec: A short variable declaration may redeclare variables
+ // provided they were originally declared in the same block with
+ // the same type, and at least one of the non-blank variables is new.
+ n := 0 // number of new variables
+ for _, ident := range idents {
+ assert(ident.Obj == nil, "identifier already declared or resolved")
+ if ident.Name != "_" {
+ obj := ast.NewObj(ast.Var, ident.Name)
+ // short var declarations cannot have redeclaration errors
+ // and are not global => no need to remember the respective
+ // declaration
+ alt := p.topScope.Insert(obj)
+ if alt == nil {
+ n++ // new declaration
+ alt = obj
+ }
+ ident.Obj = alt
+ }
+ }
+ if n == 0 && p.mode&DeclarationErrors != 0 {
+ p.error(idents[0].Pos(), "no new variables on left side of :=")
+ }
+}
+
+
+// The unresolved object is a sentinel to mark identifiers that have been added
+// to the list of unresolved identifiers. The sentinel is only used for verifying
+// internal consistency.
+var unresolved = new(ast.Object)
+
+
+func (p *parser) resolve(x ast.Expr) {
+ // nothing to do if x is not an identifier or the blank identifier
+ ident, _ := x.(*ast.Ident)
+ if ident == nil {
+ return
+ }
+ assert(ident.Obj == nil, "identifier already declared or resolved")
+ if ident.Name == "_" {
+ return
+ }
+ // try to resolve the identifier
+ for s := p.topScope; s != nil; s = s.Outer {
+ if obj := s.Lookup(ident.Name); obj != nil {
+ ident.Obj = obj
+ return
+ }
+ }
+ // all local scopes are known, so any unresolved identifier
+ // must be found either in the file scope, package scope
+ // (perhaps in another file), or universe scope --- collect
+ // them so that they can be resolved later
+ ident.Obj = unresolved
+ p.unresolved = append(p.unresolved, ident)
+}
+
+
+// ----------------------------------------------------------------------------
+// Parsing support
+
+func (p *parser) printTrace(a ...interface{}) {
+ const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " +
+ ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+ const n = uint(len(dots))
+ pos := p.file.Position(p.pos)
+ fmt.Printf("%5d:%3d: ", pos.Line, pos.Column)
+ i := 2 * p.indent
+ for ; i > n; i -= n {
+ fmt.Print(dots)
+ }
+ fmt.Print(dots[0:i])
+ fmt.Println(a...)
+}
+
+
+func trace(p *parser, msg string) *parser {
+ p.printTrace(msg, "(")
+ p.indent++
+ return p
+}
+
+
+// Usage pattern: defer un(trace(p, "..."));
+func un(p *parser) {
+ p.indent--
+ p.printTrace(")")
+}
+
+
+// Advance to the next token.
+func (p *parser) next0() {
+ // Because of one-token look-ahead, print the previous token
+ // when tracing as it provides a more readable output. The
+ // very first token (!p.pos.IsValid()) is not initialized
+ // (it is token.ILLEGAL), so don't print it .
+ if p.trace && p.pos.IsValid() {
+ s := p.tok.String()
+ switch {
+ case p.tok.IsLiteral():
+ p.printTrace(s, p.lit)
+ case p.tok.IsOperator(), p.tok.IsKeyword():
+ p.printTrace("\"" + s + "\"")
+ default:
+ p.printTrace(s)
+ }
+ }
+
+ p.pos, p.tok, p.lit = p.scanner.Scan()
+}
+
+// Consume a comment and return it and the line on which it ends.
+func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
+ // /*-style comments may end on a different line than where they start.
+ // Scan the comment for '\n' chars and adjust endline accordingly.
+ endline = p.file.Line(p.pos)
+ if p.lit[1] == '*' {
+ // don't use range here - no need to decode Unicode code points
+ for i := 0; i < len(p.lit); i++ {
+ if p.lit[i] == '\n' {
+ endline++
+ }
+ }
+ }
+
+ comment = &ast.Comment{p.pos, p.lit}
+ p.next0()
+
+ return
+}
+
+
+// Consume a group of adjacent comments, add it to the parser's
+// comments list, and return it together with the line at which
+// the last comment in the group ends. An empty line or non-comment
+// token terminates a comment group.
+//
+func (p *parser) consumeCommentGroup() (comments *ast.CommentGroup, endline int) {
+ var list []*ast.Comment
+ endline = p.file.Line(p.pos)
+ for p.tok == token.COMMENT && endline+1 >= p.file.Line(p.pos) {
+ var comment *ast.Comment
+ comment, endline = p.consumeComment()
+ list = append(list, comment)
+ }
+
+ // add comment group to the comments list
+ comments = &ast.CommentGroup{list}
+ p.comments = append(p.comments, comments)
+
+ return
+}
+
+
+// Advance to the next non-comment token. In the process, collect
+// any comment groups encountered, and remember the last lead and
+// and line comments.
+//
+// A lead comment is a comment group that starts and ends in a
+// line without any other tokens and that is followed by a non-comment
+// token on the line immediately after the comment group.
+//
+// A line comment is a comment group that follows a non-comment
+// token on the same line, and that has no tokens after it on the line
+// where it ends.
+//
+// Lead and line comments may be considered documentation that is
+// stored in the AST.
+//
+func (p *parser) next() {
+ p.leadComment = nil
+ p.lineComment = nil
+ line := p.file.Line(p.pos) // current line
+ p.next0()
+
+ if p.tok == token.COMMENT {
+ var comment *ast.CommentGroup
+ var endline int
+
+ if p.file.Line(p.pos) == line {
+ // The comment is on same line as the previous token; it
+ // cannot be a lead comment but may be a line comment.
+ comment, endline = p.consumeCommentGroup()
+ if p.file.Line(p.pos) != endline {
+ // The next token is on a different line, thus
+ // the last comment group is a line comment.
+ p.lineComment = comment
+ }
+ }
+
+ // consume successor comments, if any
+ endline = -1
+ for p.tok == token.COMMENT {
+ comment, endline = p.consumeCommentGroup()
+ }
+
+ if endline+1 == p.file.Line(p.pos) {
+ // The next token is following on the line immediately after the
+ // comment group, thus the last comment group is a lead comment.
+ p.leadComment = comment
+ }
+ }
+}
+
+
+func (p *parser) error(pos token.Pos, msg string) {
+ p.Error(p.file.Position(pos), msg)
+}
+
+
+func (p *parser) errorExpected(pos token.Pos, msg string) {
+ msg = "expected " + msg
+ if pos == p.pos {
+ // the error happened at the current position;
+ // make the error message more specific
+ if p.tok == token.SEMICOLON && p.lit[0] == '\n' {
+ msg += ", found newline"
+ } else {
+ msg += ", found '" + p.tok.String() + "'"
+ if p.tok.IsLiteral() {
+ msg += " " + p.lit
+ }
+ }
+ }
+ p.error(pos, msg)
+}
+
+
+func (p *parser) expect(tok token.Token) token.Pos {
+ pos := p.pos
+ if p.tok != tok {
+ p.errorExpected(pos, "'"+tok.String()+"'")
+ }
+ p.next() // make progress
+ return pos
+}
+
+
+func (p *parser) expectSemi() {
+ if p.tok != token.RPAREN && p.tok != token.RBRACE {
+ p.expect(token.SEMICOLON)
+ }
+}
+
+
+func assert(cond bool, msg string) {
+ if !cond {
+ panic("go/parser internal error: " + msg)
+ }
+}
+
+
+// ----------------------------------------------------------------------------
+// Identifiers
+
+func (p *parser) parseIdent() *ast.Ident {
+ pos := p.pos
+ name := "_"
+ if p.tok == token.IDENT {
+ name = p.lit
+ p.next()
+ } else {
+ p.expect(token.IDENT) // use expect() error handling
+ }
+ return &ast.Ident{pos, name, nil}
+}
+
+
+func (p *parser) parseIdentList() (list []*ast.Ident) {
+ if p.trace {
+ defer un(trace(p, "IdentList"))
+ }
+
+ list = append(list, p.parseIdent())
+ for p.tok == token.COMMA {
+ p.next()
+ list = append(list, p.parseIdent())
+ }
+
+ return
+}
+
+
+// ----------------------------------------------------------------------------
+// Common productions
+
+// If lhs is set, result list elements which are identifiers are not resolved.
+func (p *parser) parseExprList(lhs bool) (list []ast.Expr) {
+ if p.trace {
+ defer un(trace(p, "ExpressionList"))
+ }
+
+ list = append(list, p.parseExpr(lhs))
+ for p.tok == token.COMMA {
+ p.next()
+ list = append(list, p.parseExpr(lhs))
+ }
+
+ return
+}
+
+
+func (p *parser) parseLhsList() []ast.Expr {
+ list := p.parseExprList(true)
+ switch p.tok {
+ case token.DEFINE:
+ // lhs of a short variable declaration
+ p.shortVarDecl(p.makeIdentList(list))
+ case token.COLON:
+ // lhs of a label declaration or a communication clause of a select
+ // statement (parseLhsList is not called when parsing the case clause
+ // of a switch statement):
+ // - labels are declared by the caller of parseLhsList
+ // - for communication clauses, if there is a stand-alone identifier
+ // followed by a colon, we have a syntax error; there is no need
+ // to resolve the identifier in that case
+ default:
+ // identifiers must be declared elsewhere
+ for _, x := range list {
+ p.resolve(x)
+ }
+ }
+ return list
+}
+
+
+func (p *parser) parseRhsList() []ast.Expr {
+ return p.parseExprList(false)
+}
+
+
+// ----------------------------------------------------------------------------
+// Types
+
+func (p *parser) parseType() ast.Expr {
+ if p.trace {
+ defer un(trace(p, "Type"))
+ }
+
+ typ := p.tryType()
+
+ if typ == nil {
+ pos := p.pos
+ p.errorExpected(pos, "type")
+ p.next() // make progress
+ return &ast.BadExpr{pos, p.pos}
+ }
+
+ return typ
+}
+
+
+// If the result is an identifier, it is not resolved.
+func (p *parser) parseTypeName() ast.Expr {
+ if p.trace {
+ defer un(trace(p, "TypeName"))
+ }
+
+ ident := p.parseIdent()
+ // don't resolve ident yet - it may be a parameter or field name
+
+ if p.tok == token.PERIOD {
+ // ident is a package name
+ p.next()
+ p.resolve(ident)
+ sel := p.parseIdent()
+ return &ast.SelectorExpr{ident, sel}
+ }
+
+ return ident
+}
+
+
+func (p *parser) parseArrayType(ellipsisOk bool) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "ArrayType"))
+ }
+
+ lbrack := p.expect(token.LBRACK)
+ var len ast.Expr
+ if ellipsisOk && p.tok == token.ELLIPSIS {
+ len = &ast.Ellipsis{p.pos, nil}
+ p.next()
+ } else if p.tok != token.RBRACK {
+ len = p.parseRhs()
+ }
+ p.expect(token.RBRACK)
+ elt := p.parseType()
+
+ return &ast.ArrayType{lbrack, len, elt}
+}
+
+
+func (p *parser) makeIdentList(list []ast.Expr) []*ast.Ident {
+ idents := make([]*ast.Ident, len(list))
+ for i, x := range list {
+ ident, isIdent := x.(*ast.Ident)
+ if !isIdent {
+ pos := x.(ast.Expr).Pos()
+ p.errorExpected(pos, "identifier")
+ ident = &ast.Ident{pos, "_", nil}
+ }
+ idents[i] = ident
+ }
+ return idents
+}
+
+
+func (p *parser) parseFieldDecl(scope *ast.Scope) *ast.Field {
+ if p.trace {
+ defer un(trace(p, "FieldDecl"))
+ }
+
+ doc := p.leadComment
+
+ // fields
+ list, typ := p.parseVarList(false)
+
+ // optional tag
+ var tag *ast.BasicLit
+ if p.tok == token.STRING {
+ tag = &ast.BasicLit{p.pos, p.tok, p.lit}
+ p.next()
+ }
+
+ // analyze case
+ var idents []*ast.Ident
+ if typ != nil {
+ // IdentifierList Type
+ idents = p.makeIdentList(list)
+ } else {
+ // ["*"] TypeName (AnonymousField)
+ typ = list[0] // we always have at least one element
+ p.resolve(typ)
+ if n := len(list); n > 1 || !isTypeName(deref(typ)) {
+ pos := typ.Pos()
+ p.errorExpected(pos, "anonymous field")
+ typ = &ast.BadExpr{pos, list[n-1].End()}
+ }
+ }
+
+ p.expectSemi() // call before accessing p.linecomment
+
+ field := &ast.Field{doc, idents, typ, tag, p.lineComment}
+ p.declare(field, scope, ast.Var, idents...)
+
+ return field
+}
+
+
+func (p *parser) parseStructType() *ast.StructType {
+ if p.trace {
+ defer un(trace(p, "StructType"))
+ }
+
+ pos := p.expect(token.STRUCT)
+ lbrace := p.expect(token.LBRACE)
+ scope := ast.NewScope(nil) // struct scope
+ var list []*ast.Field
+ for p.tok == token.IDENT || p.tok == token.MUL || p.tok == token.LPAREN {
+ // a field declaration cannot start with a '(' but we accept
+ // it here for more robust parsing and better error messages
+ // (parseFieldDecl will check and complain if necessary)
+ list = append(list, p.parseFieldDecl(scope))
+ }
+ rbrace := p.expect(token.RBRACE)
+
+ // TODO(gri): store struct scope in AST
+ return &ast.StructType{pos, &ast.FieldList{lbrace, list, rbrace}, false}
+}
+
+
+func (p *parser) parsePointerType() *ast.StarExpr {
+ if p.trace {
+ defer un(trace(p, "PointerType"))
+ }
+
+ star := p.expect(token.MUL)
+ base := p.parseType()
+
+ return &ast.StarExpr{star, base}
+}
+
+
+func (p *parser) tryVarType(isParam bool) ast.Expr {
+ if isParam && p.tok == token.ELLIPSIS {
+ pos := p.pos
+ p.next()
+ typ := p.tryIdentOrType(isParam) // don't use parseType so we can provide better error message
+ if typ == nil {
+ p.error(pos, "'...' parameter is missing type")
+ typ = &ast.BadExpr{pos, p.pos}
+ }
+ if p.tok != token.RPAREN {
+ p.error(pos, "can use '...' with last parameter type only")
+ }
+ return &ast.Ellipsis{pos, typ}
+ }
+ return p.tryIdentOrType(false)
+}
+
+
+func (p *parser) parseVarType(isParam bool) ast.Expr {
+ typ := p.tryVarType(isParam)
+ if typ == nil {
+ pos := p.pos
+ p.errorExpected(pos, "type")
+ p.next() // make progress
+ typ = &ast.BadExpr{pos, p.pos}
+ }
+ return typ
+}
+
+
+func (p *parser) parseVarList(isParam bool) (list []ast.Expr, typ ast.Expr) {
+ if p.trace {
+ defer un(trace(p, "VarList"))
+ }
+
+ // a list of identifiers looks like a list of type names
+ for {
+ // parseVarType accepts any type (including parenthesized ones)
+ // even though the syntax does not permit them here: we
+ // accept them all for more robust parsing and complain
+ // afterwards
+ list = append(list, p.parseVarType(isParam))
+ if p.tok != token.COMMA {
+ break
+ }
+ p.next()
+ }
+
+ // if we had a list of identifiers, it must be followed by a type
+ typ = p.tryVarType(isParam)
+ if typ != nil {
+ p.resolve(typ)
+ }
+
+ return
+}
+
+
+func (p *parser) parseParameterList(scope *ast.Scope, ellipsisOk bool) (params []*ast.Field) {
+ if p.trace {
+ defer un(trace(p, "ParameterList"))
+ }
+
+ list, typ := p.parseVarList(ellipsisOk)
+ if typ != nil {
+ // IdentifierList Type
+ idents := p.makeIdentList(list)
+ field := &ast.Field{nil, idents, typ, nil, nil}
+ params = append(params, field)
+ // Go spec: The scope of an identifier denoting a function
+ // parameter or result variable is the function body.
+ p.declare(field, scope, ast.Var, idents...)
+ if p.tok == token.COMMA {
+ p.next()
+ }
+
+ for p.tok != token.RPAREN && p.tok != token.EOF {
+ idents := p.parseIdentList()
+ typ := p.parseVarType(ellipsisOk)
+ field := &ast.Field{nil, idents, typ, nil, nil}
+ params = append(params, field)
+ // Go spec: The scope of an identifier denoting a function
+ // parameter or result variable is the function body.
+ p.declare(field, scope, ast.Var, idents...)
+ if p.tok != token.COMMA {
+ break
+ }
+ p.next()
+ }
+
+ } else {
+ // Type { "," Type } (anonymous parameters)
+ params = make([]*ast.Field, len(list))
+ for i, x := range list {
+ p.resolve(x)
+ params[i] = &ast.Field{Type: x}
+ }
+ }
+
+ return
+}
+
+
+func (p *parser) parseParameters(scope *ast.Scope, ellipsisOk bool) *ast.FieldList {
+ if p.trace {
+ defer un(trace(p, "Parameters"))
+ }
+
+ var params []*ast.Field
+ lparen := p.expect(token.LPAREN)
+ if p.tok != token.RPAREN {
+ params = p.parseParameterList(scope, ellipsisOk)
+ }
+ rparen := p.expect(token.RPAREN)
+
+ return &ast.FieldList{lparen, params, rparen}
+}
+
+
+func (p *parser) parseResult(scope *ast.Scope) *ast.FieldList {
+ if p.trace {
+ defer un(trace(p, "Result"))
+ }
+
+ if p.tok == token.LPAREN {
+ return p.parseParameters(scope, false)
+ }
+
+ typ := p.tryType()
+ if typ != nil {
+ list := make([]*ast.Field, 1)
+ list[0] = &ast.Field{Type: typ}
+ return &ast.FieldList{List: list}
+ }
+
+ return nil
+}
+
+
+func (p *parser) parseSignature(scope *ast.Scope) (params, results *ast.FieldList) {
+ if p.trace {
+ defer un(trace(p, "Signature"))
+ }
+
+ params = p.parseParameters(scope, true)
+ results = p.parseResult(scope)
+
+ return
+}
+
+
+func (p *parser) parseFuncType() (*ast.FuncType, *ast.Scope) {
+ if p.trace {
+ defer un(trace(p, "FuncType"))
+ }
+
+ pos := p.expect(token.FUNC)
+ scope := ast.NewScope(p.topScope) // function scope
+ params, results := p.parseSignature(scope)
+
+ return &ast.FuncType{pos, params, results}, scope
+}
+
+
+func (p *parser) parseMethodSpec(scope *ast.Scope) *ast.Field {
+ if p.trace {
+ defer un(trace(p, "MethodSpec"))
+ }
+
+ doc := p.leadComment
+ var idents []*ast.Ident
+ var typ ast.Expr
+ x := p.parseTypeName()
+ if ident, isIdent := x.(*ast.Ident); isIdent && p.tok == token.LPAREN {
+ // method
+ idents = []*ast.Ident{ident}
+ scope := ast.NewScope(nil) // method scope
+ params, results := p.parseSignature(scope)
+ typ = &ast.FuncType{token.NoPos, params, results}
+ } else {
+ // embedded interface
+ typ = x
+ }
+ p.expectSemi() // call before accessing p.linecomment
+
+ spec := &ast.Field{doc, idents, typ, nil, p.lineComment}
+ p.declare(spec, scope, ast.Fun, idents...)
+
+ return spec
+}
+
+
+func (p *parser) parseInterfaceType() *ast.InterfaceType {
+ if p.trace {
+ defer un(trace(p, "InterfaceType"))
+ }
+
+ pos := p.expect(token.INTERFACE)
+ lbrace := p.expect(token.LBRACE)
+ scope := ast.NewScope(nil) // interface scope
+ var list []*ast.Field
+ for p.tok == token.IDENT {
+ list = append(list, p.parseMethodSpec(scope))
+ }
+ rbrace := p.expect(token.RBRACE)
+
+ // TODO(gri): store interface scope in AST
+ return &ast.InterfaceType{pos, &ast.FieldList{lbrace, list, rbrace}, false}
+}
+
+
+func (p *parser) parseMapType() *ast.MapType {
+ if p.trace {
+ defer un(trace(p, "MapType"))
+ }
+
+ pos := p.expect(token.MAP)
+ p.expect(token.LBRACK)
+ key := p.parseType()
+ p.expect(token.RBRACK)
+ value := p.parseType()
+
+ return &ast.MapType{pos, key, value}
+}
+
+
+func (p *parser) parseChanType() *ast.ChanType {
+ if p.trace {
+ defer un(trace(p, "ChanType"))
+ }
+
+ pos := p.pos
+ dir := ast.SEND | ast.RECV
+ if p.tok == token.CHAN {
+ p.next()
+ if p.tok == token.ARROW {
+ p.next()
+ dir = ast.SEND
+ }
+ } else {
+ p.expect(token.ARROW)
+ p.expect(token.CHAN)
+ dir = ast.RECV
+ }
+ value := p.parseType()
+
+ return &ast.ChanType{pos, dir, value}
+}
+
+
+// If the result is an identifier, it is not resolved.
+func (p *parser) tryIdentOrType(ellipsisOk bool) ast.Expr {
+ switch p.tok {
+ case token.IDENT:
+ return p.parseTypeName()
+ case token.LBRACK:
+ return p.parseArrayType(ellipsisOk)
+ case token.STRUCT:
+ return p.parseStructType()
+ case token.MUL:
+ return p.parsePointerType()
+ case token.FUNC:
+ typ, _ := p.parseFuncType()
+ return typ
+ case token.INTERFACE:
+ return p.parseInterfaceType()
+ case token.MAP:
+ return p.parseMapType()
+ case token.CHAN, token.ARROW:
+ return p.parseChanType()
+ case token.LPAREN:
+ lparen := p.pos
+ p.next()
+ typ := p.parseType()
+ rparen := p.expect(token.RPAREN)
+ return &ast.ParenExpr{lparen, typ, rparen}
+ }
+
+ // no type found
+ return nil
+}
+
+
+func (p *parser) tryType() ast.Expr {
+ typ := p.tryIdentOrType(false)
+ if typ != nil {
+ p.resolve(typ)
+ }
+ return typ
+}
+
+
+// ----------------------------------------------------------------------------
+// Blocks
+
+func (p *parser) parseStmtList() (list []ast.Stmt) {
+ if p.trace {
+ defer un(trace(p, "StatementList"))
+ }
+
+ for p.tok != token.CASE && p.tok != token.DEFAULT && p.tok != token.RBRACE && p.tok != token.EOF {
+ list = append(list, p.parseStmt())
+ }
+
+ return
+}
+
+
+func (p *parser) parseBody(scope *ast.Scope) *ast.BlockStmt {
+ if p.trace {
+ defer un(trace(p, "Body"))
+ }
+
+ lbrace := p.expect(token.LBRACE)
+ p.topScope = scope // open function scope
+ p.openLabelScope()
+ list := p.parseStmtList()
+ p.closeLabelScope()
+ p.closeScope()
+ rbrace := p.expect(token.RBRACE)
+
+ return &ast.BlockStmt{lbrace, list, rbrace}
+}
+
+
+func (p *parser) parseBlockStmt() *ast.BlockStmt {
+ if p.trace {
+ defer un(trace(p, "BlockStmt"))
+ }
+
+ lbrace := p.expect(token.LBRACE)
+ p.openScope()
+ list := p.parseStmtList()
+ p.closeScope()
+ rbrace := p.expect(token.RBRACE)
+
+ return &ast.BlockStmt{lbrace, list, rbrace}
+}
+
+
+// ----------------------------------------------------------------------------
+// Expressions
+
+func (p *parser) parseFuncTypeOrLit() ast.Expr {
+ if p.trace {
+ defer un(trace(p, "FuncTypeOrLit"))
+ }
+
+ typ, scope := p.parseFuncType()
+ if p.tok != token.LBRACE {
+ // function type only
+ return typ
+ }
+
+ p.exprLev++
+ body := p.parseBody(scope)
+ p.exprLev--
+
+ return &ast.FuncLit{typ, body}
+}
+
+
+// parseOperand may return an expression or a raw type (incl. array
+// types of the form [...]T. Callers must verify the result.
+// If lhs is set and the result is an identifier, it is not resolved.
+//
+func (p *parser) parseOperand(lhs bool) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "Operand"))
+ }
+
+ switch p.tok {
+ case token.IDENT:
+ x := p.parseIdent()
+ if !lhs {
+ p.resolve(x)
+ }
+ return x
+
+ case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING:
+ x := &ast.BasicLit{p.pos, p.tok, p.lit}
+ p.next()
+ return x
+
+ case token.LPAREN:
+ lparen := p.pos
+ p.next()
+ p.exprLev++
+ x := p.parseRhs()
+ p.exprLev--
+ rparen := p.expect(token.RPAREN)
+ return &ast.ParenExpr{lparen, x, rparen}
+
+ case token.FUNC:
+ return p.parseFuncTypeOrLit()
+
+ default:
+ if typ := p.tryIdentOrType(true); typ != nil {
+ // could be type for composite literal or conversion
+ _, isIdent := typ.(*ast.Ident)
+ assert(!isIdent, "type cannot be identifier")
+ return typ
+ }
+ }
+
+ pos := p.pos
+ p.errorExpected(pos, "operand")
+ p.next() // make progress
+ return &ast.BadExpr{pos, p.pos}
+}
+
+
+func (p *parser) parseSelector(x ast.Expr) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "Selector"))
+ }
+
+ sel := p.parseIdent()
+
+ return &ast.SelectorExpr{x, sel}
+}
+
+
+func (p *parser) parseTypeAssertion(x ast.Expr) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "TypeAssertion"))
+ }
+
+ p.expect(token.LPAREN)
+ var typ ast.Expr
+ if p.tok == token.TYPE {
+ // type switch: typ == nil
+ p.next()
+ } else {
+ typ = p.parseType()
+ }
+ p.expect(token.RPAREN)
+
+ return &ast.TypeAssertExpr{x, typ}
+}
+
+
+func (p *parser) parseIndexOrSlice(x ast.Expr) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "IndexOrSlice"))
+ }
+
+ lbrack := p.expect(token.LBRACK)
+ p.exprLev++
+ var low, high ast.Expr
+ isSlice := false
+ if p.tok != token.COLON {
+ low = p.parseRhs()
+ }
+ if p.tok == token.COLON {
+ isSlice = true
+ p.next()
+ if p.tok != token.RBRACK {
+ high = p.parseRhs()
+ }
+ }
+ p.exprLev--
+ rbrack := p.expect(token.RBRACK)
+
+ if isSlice {
+ return &ast.SliceExpr{x, lbrack, low, high, rbrack}
+ }
+ return &ast.IndexExpr{x, lbrack, low, rbrack}
+}
+
+
+func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr {
+ if p.trace {
+ defer un(trace(p, "CallOrConversion"))
+ }
+
+ lparen := p.expect(token.LPAREN)
+ p.exprLev++
+ var list []ast.Expr
+ var ellipsis token.Pos
+ for p.tok != token.RPAREN && p.tok != token.EOF && !ellipsis.IsValid() {
+ list = append(list, p.parseRhs())
+ if p.tok == token.ELLIPSIS {
+ ellipsis = p.pos
+ p.next()
+ }
+ if p.tok != token.COMMA {
+ break
+ }
+ p.next()
+ }
+ p.exprLev--
+ rparen := p.expect(token.RPAREN)
+
+ return &ast.CallExpr{fun, lparen, list, ellipsis, rparen}
+}
+
+
+func (p *parser) parseElement(keyOk bool) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "Element"))
+ }
+
+ if p.tok == token.LBRACE {
+ return p.parseLiteralValue(nil)
+ }
+
+ x := p.parseExpr(keyOk) // don't resolve if map key
+ if keyOk {
+ if p.tok == token.COLON {
+ colon := p.pos
+ p.next()
+ return &ast.KeyValueExpr{x, colon, p.parseElement(false)}
+ }
+ p.resolve(x) // not a map key
+ }
+
+ return x
+}
+
+
+func (p *parser) parseElementList() (list []ast.Expr) {
+ if p.trace {
+ defer un(trace(p, "ElementList"))
+ }
+
+ for p.tok != token.RBRACE && p.tok != token.EOF {
+ list = append(list, p.parseElement(true))
+ if p.tok != token.COMMA {
+ break
+ }
+ p.next()
+ }
+
+ return
+}
+
+
+func (p *parser) parseLiteralValue(typ ast.Expr) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "LiteralValue"))
+ }
+
+ lbrace := p.expect(token.LBRACE)
+ var elts []ast.Expr
+ p.exprLev++
+ if p.tok != token.RBRACE {
+ elts = p.parseElementList()
+ }
+ p.exprLev--
+ rbrace := p.expect(token.RBRACE)
+ return &ast.CompositeLit{typ, lbrace, elts, rbrace}
+}
+
+
+// checkExpr checks that x is an expression (and not a type).
+func (p *parser) checkExpr(x ast.Expr) ast.Expr {
+ switch t := unparen(x).(type) {
+ case *ast.BadExpr:
+ case *ast.Ident:
+ case *ast.BasicLit:
+ case *ast.FuncLit:
+ case *ast.CompositeLit:
+ case *ast.ParenExpr:
+ panic("unreachable")
+ case *ast.SelectorExpr:
+ case *ast.IndexExpr:
+ case *ast.SliceExpr:
+ case *ast.TypeAssertExpr:
+ if t.Type == nil {
+ // the form X.(type) is only allowed in type switch expressions
+ p.errorExpected(x.Pos(), "expression")
+ x = &ast.BadExpr{x.Pos(), x.End()}
+ }
+ case *ast.CallExpr:
+ case *ast.StarExpr:
+ case *ast.UnaryExpr:
+ if t.Op == token.RANGE {
+ // the range operator is only allowed at the top of a for statement
+ p.errorExpected(x.Pos(), "expression")
+ x = &ast.BadExpr{x.Pos(), x.End()}
+ }
+ case *ast.BinaryExpr:
+ default:
+ // all other nodes are not proper expressions
+ p.errorExpected(x.Pos(), "expression")
+ x = &ast.BadExpr{x.Pos(), x.End()}
+ }
+ return x
+}
+
+
+// isTypeName returns true iff x is a (qualified) TypeName.
+func isTypeName(x ast.Expr) bool {
+ switch t := x.(type) {
+ case *ast.BadExpr:
+ case *ast.Ident:
+ case *ast.SelectorExpr:
+ _, isIdent := t.X.(*ast.Ident)
+ return isIdent
+ default:
+ return false // all other nodes are not type names
+ }
+ return true
+}
+
+
+// isLiteralType returns true iff x is a legal composite literal type.
+func isLiteralType(x ast.Expr) bool {
+ switch t := x.(type) {
+ case *ast.BadExpr:
+ case *ast.Ident:
+ case *ast.SelectorExpr:
+ _, isIdent := t.X.(*ast.Ident)
+ return isIdent
+ case *ast.ArrayType:
+ case *ast.StructType:
+ case *ast.MapType:
+ default:
+ return false // all other nodes are not legal composite literal types
+ }
+ return true
+}
+
+
+// If x is of the form *T, deref returns T, otherwise it returns x.
+func deref(x ast.Expr) ast.Expr {
+ if p, isPtr := x.(*ast.StarExpr); isPtr {
+ x = p.X
+ }
+ return x
+}
+
+
+// If x is of the form (T), unparen returns unparen(T), otherwise it returns x.
+func unparen(x ast.Expr) ast.Expr {
+ if p, isParen := x.(*ast.ParenExpr); isParen {
+ x = unparen(p.X)
+ }
+ return x
+}
+
+
+// checkExprOrType checks that x is an expression or a type
+// (and not a raw type such as [...]T).
+//
+func (p *parser) checkExprOrType(x ast.Expr) ast.Expr {
+ switch t := unparen(x).(type) {
+ case *ast.ParenExpr:
+ panic("unreachable")
+ case *ast.UnaryExpr:
+ if t.Op == token.RANGE {
+ // the range operator is only allowed at the top of a for statement
+ p.errorExpected(x.Pos(), "expression")
+ x = &ast.BadExpr{x.Pos(), x.End()}
+ }
+ case *ast.ArrayType:
+ if len, isEllipsis := t.Len.(*ast.Ellipsis); isEllipsis {
+ p.error(len.Pos(), "expected array length, found '...'")
+ x = &ast.BadExpr{x.Pos(), x.End()}
+ }
+ }
+
+ // all other nodes are expressions or types
+ return x
+}
+
+
+// If lhs is set and the result is an identifier, it is not resolved.
+func (p *parser) parsePrimaryExpr(lhs bool) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "PrimaryExpr"))
+ }
+
+ x := p.parseOperand(lhs)
+L:
+ for {
+ switch p.tok {
+ case token.PERIOD:
+ p.next()
+ if lhs {
+ p.resolve(x)
+ }
+ switch p.tok {
+ case token.IDENT:
+ x = p.parseSelector(p.checkExpr(x))
+ case token.LPAREN:
+ x = p.parseTypeAssertion(p.checkExpr(x))
+ default:
+ pos := p.pos
+ p.next() // make progress
+ p.errorExpected(pos, "selector or type assertion")
+ x = &ast.BadExpr{pos, p.pos}
+ }
+ case token.LBRACK:
+ if lhs {
+ p.resolve(x)
+ }
+ x = p.parseIndexOrSlice(p.checkExpr(x))
+ case token.LPAREN:
+ if lhs {
+ p.resolve(x)
+ }
+ x = p.parseCallOrConversion(p.checkExprOrType(x))
+ case token.LBRACE:
+ if isLiteralType(x) && (p.exprLev >= 0 || !isTypeName(x)) {
+ if lhs {
+ p.resolve(x)
+ }
+ x = p.parseLiteralValue(x)
+ } else {
+ break L
+ }
+ default:
+ break L
+ }
+ lhs = false // no need to try to resolve again
+ }
+
+ return x
+}
+
+
+// If lhs is set and the result is an identifier, it is not resolved.
+func (p *parser) parseUnaryExpr(lhs bool) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "UnaryExpr"))
+ }
+
+ switch p.tok {
+ case token.ADD, token.SUB, token.NOT, token.XOR, token.AND, token.RANGE:
+ pos, op := p.pos, p.tok
+ p.next()
+ x := p.parseUnaryExpr(false)
+ return &ast.UnaryExpr{pos, op, p.checkExpr(x)}
+
+ case token.ARROW:
+ // channel type or receive expression
+ pos := p.pos
+ p.next()
+ if p.tok == token.CHAN {
+ p.next()
+ value := p.parseType()
+ return &ast.ChanType{pos, ast.RECV, value}
+ }
+
+ x := p.parseUnaryExpr(false)
+ return &ast.UnaryExpr{pos, token.ARROW, p.checkExpr(x)}
+
+ case token.MUL:
+ // pointer type or unary "*" expression
+ pos := p.pos
+ p.next()
+ x := p.parseUnaryExpr(false)
+ return &ast.StarExpr{pos, p.checkExprOrType(x)}
+ }
+
+ return p.parsePrimaryExpr(lhs)
+}
+
+
+// If lhs is set and the result is an identifier, it is not resolved.
+func (p *parser) parseBinaryExpr(lhs bool, prec1 int) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "BinaryExpr"))
+ }
+
+ x := p.parseUnaryExpr(lhs)
+ for prec := p.tok.Precedence(); prec >= prec1; prec-- {
+ for p.tok.Precedence() == prec {
+ pos, op := p.pos, p.tok
+ p.next()
+ if lhs {
+ p.resolve(x)
+ lhs = false
+ }
+ y := p.parseBinaryExpr(false, prec+1)
+ x = &ast.BinaryExpr{p.checkExpr(x), pos, op, p.checkExpr(y)}
+ }
+ }
+
+ return x
+}
+
+
+// If lhs is set and the result is an identifier, it is not resolved.
+// TODO(gri): parseExpr may return a type or even a raw type ([..]int) -
+// should reject when a type/raw type is obviously not allowed
+func (p *parser) parseExpr(lhs bool) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "Expression"))
+ }
+
+ return p.parseBinaryExpr(lhs, token.LowestPrec+1)
+}
+
+
+func (p *parser) parseRhs() ast.Expr {
+ return p.parseExpr(false)
+}
+
+
+// ----------------------------------------------------------------------------
+// Statements
+
+func (p *parser) parseSimpleStmt(labelOk bool) ast.Stmt {
+ if p.trace {
+ defer un(trace(p, "SimpleStmt"))
+ }
+
+ x := p.parseLhsList()
+
+ switch p.tok {
+ case
+ token.DEFINE, token.ASSIGN, token.ADD_ASSIGN,
+ token.SUB_ASSIGN, token.MUL_ASSIGN, token.QUO_ASSIGN,
+ token.REM_ASSIGN, token.AND_ASSIGN, token.OR_ASSIGN,
+ token.XOR_ASSIGN, token.SHL_ASSIGN, token.SHR_ASSIGN, token.AND_NOT_ASSIGN:
+ // assignment statement
+ pos, tok := p.pos, p.tok
+ p.next()
+ y := p.parseRhsList()
+ return &ast.AssignStmt{x, pos, tok, y}
+ }
+
+ if len(x) > 1 {
+ p.errorExpected(x[0].Pos(), "1 expression")
+ // continue with first expression
+ }
+
+ switch p.tok {
+ case token.COLON:
+ // labeled statement
+ colon := p.pos
+ p.next()
+ if label, isIdent := x[0].(*ast.Ident); labelOk && isIdent {
+ // Go spec: The scope of a label is the body of the function
+ // in which it is declared and excludes the body of any nested
+ // function.
+ stmt := &ast.LabeledStmt{label, colon, p.parseStmt()}
+ p.declare(stmt, p.labelScope, ast.Lbl, label)
+ return stmt
+ }
+ p.error(x[0].Pos(), "illegal label declaration")
+ return &ast.BadStmt{x[0].Pos(), colon + 1}
+
+ case token.ARROW:
+ // send statement
+ arrow := p.pos
+ p.next() // consume "<-"
+ y := p.parseRhs()
+ return &ast.SendStmt{x[0], arrow, y}
+
+ case token.INC, token.DEC:
+ // increment or decrement
+ s := &ast.IncDecStmt{x[0], p.pos, p.tok}
+ p.next() // consume "++" or "--"
+ return s
+ }
+
+ // expression
+ return &ast.ExprStmt{x[0]}
+}
+
+
+func (p *parser) parseCallExpr() *ast.CallExpr {
+ x := p.parseRhs()
+ if call, isCall := x.(*ast.CallExpr); isCall {
+ return call
+ }
+ p.errorExpected(x.Pos(), "function/method call")
+ return nil
+}
+
+
+func (p *parser) parseGoStmt() ast.Stmt {
+ if p.trace {
+ defer un(trace(p, "GoStmt"))
+ }
+
+ pos := p.expect(token.GO)
+ call := p.parseCallExpr()
+ p.expectSemi()
+ if call == nil {
+ return &ast.BadStmt{pos, pos + 2} // len("go")
+ }
+
+ return &ast.GoStmt{pos, call}
+}
+
+
+func (p *parser) parseDeferStmt() ast.Stmt {
+ if p.trace {
+ defer un(trace(p, "DeferStmt"))
+ }
+
+ pos := p.expect(token.DEFER)
+ call := p.parseCallExpr()
+ p.expectSemi()
+ if call == nil {
+ return &ast.BadStmt{pos, pos + 5} // len("defer")
+ }
+
+ return &ast.DeferStmt{pos, call}
+}
+
+
+func (p *parser) parseReturnStmt() *ast.ReturnStmt {
+ if p.trace {
+ defer un(trace(p, "ReturnStmt"))
+ }
+
+ pos := p.pos
+ p.expect(token.RETURN)
+ var x []ast.Expr
+ if p.tok != token.SEMICOLON && p.tok != token.RBRACE {
+ x = p.parseRhsList()
+ }
+ p.expectSemi()
+
+ return &ast.ReturnStmt{pos, x}
+}
+
+
+func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt {
+ if p.trace {
+ defer un(trace(p, "BranchStmt"))
+ }
+
+ pos := p.expect(tok)
+ var label *ast.Ident
+ if tok != token.FALLTHROUGH && p.tok == token.IDENT {
+ label = p.parseIdent()
+ // add to list of unresolved targets
+ n := len(p.targetStack) - 1
+ p.targetStack[n] = append(p.targetStack[n], label)
+ }
+ p.expectSemi()
+
+ return &ast.BranchStmt{pos, tok, label}
+}
+
+
+func (p *parser) makeExpr(s ast.Stmt) ast.Expr {
+ if s == nil {
+ return nil
+ }
+ if es, isExpr := s.(*ast.ExprStmt); isExpr {
+ return p.checkExpr(es.X)
+ }
+ p.error(s.Pos(), "expected condition, found simple statement")
+ return &ast.BadExpr{s.Pos(), s.End()}
+}
+
+
+func (p *parser) parseIfStmt() *ast.IfStmt {
+ if p.trace {
+ defer un(trace(p, "IfStmt"))
+ }
+
+ pos := p.expect(token.IF)
+ p.openScope()
+ defer p.closeScope()
+
+ var s ast.Stmt
+ var x ast.Expr
+ {
+ prevLev := p.exprLev
+ p.exprLev = -1
+ if p.tok == token.SEMICOLON {
+ p.next()
+ x = p.parseRhs()
+ } else {
+ s = p.parseSimpleStmt(false)
+ if p.tok == token.SEMICOLON {
+ p.next()
+ x = p.parseRhs()
+ } else {
+ x = p.makeExpr(s)
+ s = nil
+ }
+ }
+ p.exprLev = prevLev
+ }
+
+ body := p.parseBlockStmt()
+ var else_ ast.Stmt
+ if p.tok == token.ELSE {
+ p.next()
+ else_ = p.parseStmt()
+ } else {
+ p.expectSemi()
+ }
+
+ return &ast.IfStmt{pos, s, x, body, else_}
+}
+
+
+func (p *parser) parseTypeList() (list []ast.Expr) {
+ if p.trace {
+ defer un(trace(p, "TypeList"))
+ }
+
+ list = append(list, p.parseType())
+ for p.tok == token.COMMA {
+ p.next()
+ list = append(list, p.parseType())
+ }
+
+ return
+}
+
+
+func (p *parser) parseCaseClause(exprSwitch bool) *ast.CaseClause {
+ if p.trace {
+ defer un(trace(p, "CaseClause"))
+ }
+
+ pos := p.pos
+ var list []ast.Expr
+ if p.tok == token.CASE {
+ p.next()
+ if exprSwitch {
+ list = p.parseRhsList()
+ } else {
+ list = p.parseTypeList()
+ }
+ } else {
+ p.expect(token.DEFAULT)
+ }
+
+ colon := p.expect(token.COLON)
+ p.openScope()
+ body := p.parseStmtList()
+ p.closeScope()
+
+ return &ast.CaseClause{pos, list, colon, body}
+}
+
+
+func isExprSwitch(s ast.Stmt) bool {
+ if s == nil {
+ return true
+ }
+ if e, ok := s.(*ast.ExprStmt); ok {
+ if a, ok := e.X.(*ast.TypeAssertExpr); ok {
+ return a.Type != nil // regular type assertion
+ }
+ return true
+ }
+ return false
+}
+
+
+func (p *parser) parseSwitchStmt() ast.Stmt {
+ if p.trace {
+ defer un(trace(p, "SwitchStmt"))
+ }
+
+ pos := p.expect(token.SWITCH)
+ p.openScope()
+ defer p.closeScope()
+
+ var s1, s2 ast.Stmt
+ if p.tok != token.LBRACE {
+ prevLev := p.exprLev
+ p.exprLev = -1
+ if p.tok != token.SEMICOLON {
+ s2 = p.parseSimpleStmt(false)
+ }
+ if p.tok == token.SEMICOLON {
+ p.next()
+ s1 = s2
+ s2 = nil
+ if p.tok != token.LBRACE {
+ s2 = p.parseSimpleStmt(false)
+ }
+ }
+ p.exprLev = prevLev
+ }
+
+ exprSwitch := isExprSwitch(s2)
+ lbrace := p.expect(token.LBRACE)
+ var list []ast.Stmt
+ for p.tok == token.CASE || p.tok == token.DEFAULT {
+ list = append(list, p.parseCaseClause(exprSwitch))
+ }
+ rbrace := p.expect(token.RBRACE)
+ p.expectSemi()
+ body := &ast.BlockStmt{lbrace, list, rbrace}
+
+ if exprSwitch {
+ return &ast.SwitchStmt{pos, s1, p.makeExpr(s2), body}
+ }
+ // type switch
+ // TODO(gri): do all the checks!
+ return &ast.TypeSwitchStmt{pos, s1, s2, body}
+}
+
+
+func (p *parser) parseCommClause() *ast.CommClause {
+ if p.trace {
+ defer un(trace(p, "CommClause"))
+ }
+
+ p.openScope()
+ pos := p.pos
+ var comm ast.Stmt
+ if p.tok == token.CASE {
+ p.next()
+ lhs := p.parseLhsList()
+ if p.tok == token.ARROW {
+ // SendStmt
+ if len(lhs) > 1 {
+ p.errorExpected(lhs[0].Pos(), "1 expression")
+ // continue with first expression
+ }
+ arrow := p.pos
+ p.next()
+ rhs := p.parseRhs()
+ comm = &ast.SendStmt{lhs[0], arrow, rhs}
+ } else {
+ // RecvStmt
+ pos := p.pos
+ tok := p.tok
+ var rhs ast.Expr
+ if tok == token.ASSIGN || tok == token.DEFINE {
+ // RecvStmt with assignment
+ if len(lhs) > 2 {
+ p.errorExpected(lhs[0].Pos(), "1 or 2 expressions")
+ // continue with first two expressions
+ lhs = lhs[0:2]
+ }
+ p.next()
+ rhs = p.parseRhs()
+ } else {
+ // rhs must be single receive operation
+ if len(lhs) > 1 {
+ p.errorExpected(lhs[0].Pos(), "1 expression")
+ // continue with first expression
+ }
+ rhs = lhs[0]
+ lhs = nil // there is no lhs
+ }
+ if x, isUnary := rhs.(*ast.UnaryExpr); !isUnary || x.Op != token.ARROW {
+ p.errorExpected(rhs.Pos(), "send or receive operation")
+ rhs = &ast.BadExpr{rhs.Pos(), rhs.End()}
+ }
+ if lhs != nil {
+ comm = &ast.AssignStmt{lhs, pos, tok, []ast.Expr{rhs}}
+ } else {
+ comm = &ast.ExprStmt{rhs}
+ }
+ }
+ } else {
+ p.expect(token.DEFAULT)
+ }
+
+ colon := p.expect(token.COLON)
+ body := p.parseStmtList()
+ p.closeScope()
+
+ return &ast.CommClause{pos, comm, colon, body}
+}
+
+
+func (p *parser) parseSelectStmt() *ast.SelectStmt {
+ if p.trace {
+ defer un(trace(p, "SelectStmt"))
+ }
+
+ pos := p.expect(token.SELECT)
+ lbrace := p.expect(token.LBRACE)
+ var list []ast.Stmt
+ for p.tok == token.CASE || p.tok == token.DEFAULT {
+ list = append(list, p.parseCommClause())
+ }
+ rbrace := p.expect(token.RBRACE)
+ p.expectSemi()
+ body := &ast.BlockStmt{lbrace, list, rbrace}
+
+ return &ast.SelectStmt{pos, body}
+}
+
+
+func (p *parser) parseForStmt() ast.Stmt {
+ if p.trace {
+ defer un(trace(p, "ForStmt"))
+ }
+
+ pos := p.expect(token.FOR)
+ p.openScope()
+ defer p.closeScope()
+
+ var s1, s2, s3 ast.Stmt
+ if p.tok != token.LBRACE {
+ prevLev := p.exprLev
+ p.exprLev = -1
+ if p.tok != token.SEMICOLON {
+ s2 = p.parseSimpleStmt(false)
+ }
+ if p.tok == token.SEMICOLON {
+ p.next()
+ s1 = s2
+ s2 = nil
+ if p.tok != token.SEMICOLON {
+ s2 = p.parseSimpleStmt(false)
+ }
+ p.expectSemi()
+ if p.tok != token.LBRACE {
+ s3 = p.parseSimpleStmt(false)
+ }
+ }
+ p.exprLev = prevLev
+ }
+
+ body := p.parseBlockStmt()
+ p.expectSemi()
+
+ if as, isAssign := s2.(*ast.AssignStmt); isAssign {
+ // possibly a for statement with a range clause; check assignment operator
+ if as.Tok != token.ASSIGN && as.Tok != token.DEFINE {
+ p.errorExpected(as.TokPos, "'=' or ':='")
+ return &ast.BadStmt{pos, body.End()}
+ }
+ // check lhs
+ var key, value ast.Expr
+ switch len(as.Lhs) {
+ case 2:
+ key, value = as.Lhs[0], as.Lhs[1]
+ case 1:
+ key = as.Lhs[0]
+ default:
+ p.errorExpected(as.Lhs[0].Pos(), "1 or 2 expressions")
+ return &ast.BadStmt{pos, body.End()}
+ }
+ // check rhs
+ if len(as.Rhs) != 1 {
+ p.errorExpected(as.Rhs[0].Pos(), "1 expression")
+ return &ast.BadStmt{pos, body.End()}
+ }
+ if rhs, isUnary := as.Rhs[0].(*ast.UnaryExpr); isUnary && rhs.Op == token.RANGE {
+ // rhs is range expression
+ // (any short variable declaration was handled by parseSimpleStat above)
+ return &ast.RangeStmt{pos, key, value, as.TokPos, as.Tok, rhs.X, body}
+ }
+ p.errorExpected(s2.Pos(), "range clause")
+ return &ast.BadStmt{pos, body.End()}
+ }
+
+ // regular for statement
+ return &ast.ForStmt{pos, s1, p.makeExpr(s2), s3, body}
+}
+
+
+func (p *parser) parseStmt() (s ast.Stmt) {
+ if p.trace {
+ defer un(trace(p, "Statement"))
+ }
+
+ switch p.tok {
+ case token.CONST, token.TYPE, token.VAR:
+ s = &ast.DeclStmt{p.parseDecl()}
+ case
+ // tokens that may start a top-level expression
+ token.IDENT, token.INT, token.FLOAT, token.CHAR, token.STRING, token.FUNC, token.LPAREN, // operand
+ token.LBRACK, token.STRUCT, // composite type
+ token.MUL, token.AND, token.ARROW, token.ADD, token.SUB, token.XOR: // unary operators
+ s = p.parseSimpleStmt(true)
+ // because of the required look-ahead, labeled statements are
+ // parsed by parseSimpleStmt - don't expect a semicolon after
+ // them
+ if _, isLabeledStmt := s.(*ast.LabeledStmt); !isLabeledStmt {
+ p.expectSemi()
+ }
+ case token.GO:
+ s = p.parseGoStmt()
+ case token.DEFER:
+ s = p.parseDeferStmt()
+ case token.RETURN:
+ s = p.parseReturnStmt()
+ case token.BREAK, token.CONTINUE, token.GOTO, token.FALLTHROUGH:
+ s = p.parseBranchStmt(p.tok)
+ case token.LBRACE:
+ s = p.parseBlockStmt()
+ p.expectSemi()
+ case token.IF:
+ s = p.parseIfStmt()
+ case token.SWITCH:
+ s = p.parseSwitchStmt()
+ case token.SELECT:
+ s = p.parseSelectStmt()
+ case token.FOR:
+ s = p.parseForStmt()
+ case token.SEMICOLON:
+ s = &ast.EmptyStmt{p.pos}
+ p.next()
+ case token.RBRACE:
+ // a semicolon may be omitted before a closing "}"
+ s = &ast.EmptyStmt{p.pos}
+ default:
+ // no statement found
+ pos := p.pos
+ p.errorExpected(pos, "statement")
+ p.next() // make progress
+ s = &ast.BadStmt{pos, p.pos}
+ }
+
+ return
+}
+
+
+// ----------------------------------------------------------------------------
+// Declarations
+
+type parseSpecFunction func(p *parser, doc *ast.CommentGroup, iota int) ast.Spec
+
+
+func parseImportSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
+ if p.trace {
+ defer un(trace(p, "ImportSpec"))
+ }
+
+ var ident *ast.Ident
+ switch p.tok {
+ case token.PERIOD:
+ ident = &ast.Ident{p.pos, ".", nil}
+ p.next()
+ case token.IDENT:
+ ident = p.parseIdent()
+ }
+
+ var path *ast.BasicLit
+ if p.tok == token.STRING {
+ path = &ast.BasicLit{p.pos, p.tok, p.lit}
+ p.next()
+ } else {
+ p.expect(token.STRING) // use expect() error handling
+ }
+ p.expectSemi() // call before accessing p.linecomment
+
+ // collect imports
+ spec := &ast.ImportSpec{doc, ident, path, p.lineComment}
+ p.imports = append(p.imports, spec)
+
+ return spec
+}
+
+
+func parseConstSpec(p *parser, doc *ast.CommentGroup, iota int) ast.Spec {
+ if p.trace {
+ defer un(trace(p, "ConstSpec"))
+ }
+
+ idents := p.parseIdentList()
+ typ := p.tryType()
+ var values []ast.Expr
+ if typ != nil || p.tok == token.ASSIGN || iota == 0 {
+ p.expect(token.ASSIGN)
+ values = p.parseRhsList()
+ }
+ p.expectSemi() // call before accessing p.linecomment
+
+ // Go spec: The scope of a constant or variable identifier declared inside
+ // a function begins at the end of the ConstSpec or VarSpec and ends at
+ // the end of the innermost containing block.
+ // (Global identifiers are resolved in a separate phase after parsing.)
+ spec := &ast.ValueSpec{doc, idents, typ, values, p.lineComment}
+ p.declare(spec, p.topScope, ast.Con, idents...)
+
+ return spec
+}
+
+
+func parseTypeSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
+ if p.trace {
+ defer un(trace(p, "TypeSpec"))
+ }
+
+ ident := p.parseIdent()
+
+ // Go spec: The scope of a type identifier declared inside a function begins
+ // at the identifier in the TypeSpec and ends at the end of the innermost
+ // containing block.
+ // (Global identifiers are resolved in a separate phase after parsing.)
+ spec := &ast.TypeSpec{doc, ident, nil, nil}
+ p.declare(spec, p.topScope, ast.Typ, ident)
+
+ spec.Type = p.parseType()
+ p.expectSemi() // call before accessing p.linecomment
+ spec.Comment = p.lineComment
+
+ return spec
+}
+
+
+func parseVarSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
+ if p.trace {
+ defer un(trace(p, "VarSpec"))
+ }
+
+ idents := p.parseIdentList()
+ typ := p.tryType()
+ var values []ast.Expr
+ if typ == nil || p.tok == token.ASSIGN {
+ p.expect(token.ASSIGN)
+ values = p.parseRhsList()
+ }
+ p.expectSemi() // call before accessing p.linecomment
+
+ // Go spec: The scope of a constant or variable identifier declared inside
+ // a function begins at the end of the ConstSpec or VarSpec and ends at
+ // the end of the innermost containing block.
+ // (Global identifiers are resolved in a separate phase after parsing.)
+ spec := &ast.ValueSpec{doc, idents, typ, values, p.lineComment}
+ p.declare(spec, p.topScope, ast.Var, idents...)
+
+ return spec
+}
+
+
+func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.GenDecl {
+ if p.trace {
+ defer un(trace(p, "GenDecl("+keyword.String()+")"))
+ }
+
+ doc := p.leadComment
+ pos := p.expect(keyword)
+ var lparen, rparen token.Pos
+ var list []ast.Spec
+ if p.tok == token.LPAREN {
+ lparen = p.pos
+ p.next()
+ for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ {
+ list = append(list, f(p, p.leadComment, iota))
+ }
+ rparen = p.expect(token.RPAREN)
+ p.expectSemi()
+ } else {
+ list = append(list, f(p, nil, 0))
+ }
+
+ return &ast.GenDecl{doc, pos, keyword, lparen, list, rparen}
+}
+
+
+func (p *parser) parseReceiver(scope *ast.Scope) *ast.FieldList {
+ if p.trace {
+ defer un(trace(p, "Receiver"))
+ }
+
+ pos := p.pos
+ par := p.parseParameters(scope, false)
+
+ // must have exactly one receiver
+ if par.NumFields() != 1 {
+ p.errorExpected(pos, "exactly one receiver")
+ // TODO determine a better range for BadExpr below
+ par.List = []*ast.Field{&ast.Field{Type: &ast.BadExpr{pos, pos}}}
+ return par
+ }
+
+ // recv type must be of the form ["*"] identifier
+ recv := par.List[0]
+ base := deref(recv.Type)
+ if _, isIdent := base.(*ast.Ident); !isIdent {
+ p.errorExpected(base.Pos(), "(unqualified) identifier")
+ par.List = []*ast.Field{&ast.Field{Type: &ast.BadExpr{recv.Pos(), recv.End()}}}
+ }
+
+ return par
+}
+
+
+func (p *parser) parseFuncDecl() *ast.FuncDecl {
+ if p.trace {
+ defer un(trace(p, "FunctionDecl"))
+ }
+
+ doc := p.leadComment
+ pos := p.expect(token.FUNC)
+ scope := ast.NewScope(p.topScope) // function scope
+
+ var recv *ast.FieldList
+ if p.tok == token.LPAREN {
+ recv = p.parseReceiver(scope)
+ }
+
+ ident := p.parseIdent()
+
+ params, results := p.parseSignature(scope)
+
+ var body *ast.BlockStmt
+ if p.tok == token.LBRACE {
+ body = p.parseBody(scope)
+ }
+ p.expectSemi()
+
+ decl := &ast.FuncDecl{doc, recv, ident, &ast.FuncType{pos, params, results}, body}
+ if recv == nil {
+ // Go spec: The scope of an identifier denoting a constant, type,
+ // variable, or function (but not method) declared at top level
+ // (outside any function) is the package block.
+ //
+ // init() functions cannot be referred to and there may
+ // be more than one - don't put them in the pkgScope
+ if ident.Name != "init" {
+ p.declare(decl, p.pkgScope, ast.Fun, ident)
+ }
+ }
+
+ return decl
+}
+
+
+func (p *parser) parseDecl() ast.Decl {
+ if p.trace {
+ defer un(trace(p, "Declaration"))
+ }
+
+ var f parseSpecFunction
+ switch p.tok {
+ case token.CONST:
+ f = parseConstSpec
+
+ case token.TYPE:
+ f = parseTypeSpec
+
+ case token.VAR:
+ f = parseVarSpec
+
+ case token.FUNC:
+ return p.parseFuncDecl()
+
+ default:
+ pos := p.pos
+ p.errorExpected(pos, "declaration")
+ p.next() // make progress
+ decl := &ast.BadDecl{pos, p.pos}
+ return decl
+ }
+
+ return p.parseGenDecl(p.tok, f)
+}
+
+
+func (p *parser) parseDeclList() (list []ast.Decl) {
+ if p.trace {
+ defer un(trace(p, "DeclList"))
+ }
+
+ for p.tok != token.EOF {
+ list = append(list, p.parseDecl())
+ }
+
+ return
+}
+
+
+// ----------------------------------------------------------------------------
+// Source files
+
+func (p *parser) parseFile() *ast.File {
+ if p.trace {
+ defer un(trace(p, "File"))
+ }
+
+ // package clause
+ doc := p.leadComment
+ pos := p.expect(token.PACKAGE)
+ // Go spec: The package clause is not a declaration;
+ // the package name does not appear in any scope.
+ ident := p.parseIdent()
+ if ident.Name == "_" {
+ p.error(p.pos, "invalid package name _")
+ }
+ p.expectSemi()
+
+ var decls []ast.Decl
+
+ // Don't bother parsing the rest if we had errors already.
+ // Likely not a Go source file at all.
+
+ if p.ErrorCount() == 0 && p.mode&PackageClauseOnly == 0 {
+ // import decls
+ for p.tok == token.IMPORT {
+ decls = append(decls, p.parseGenDecl(token.IMPORT, parseImportSpec))
+ }
+
+ if p.mode&ImportsOnly == 0 {
+ // rest of package body
+ for p.tok != token.EOF {
+ decls = append(decls, p.parseDecl())
+ }
+ }
+ }
+
+ assert(p.topScope == p.pkgScope, "imbalanced scopes")
+
+ // resolve global identifiers within the same file
+ i := 0
+ for _, ident := range p.unresolved {
+ // i <= index for current ident
+ assert(ident.Obj == unresolved, "object already resolved")
+ ident.Obj = p.pkgScope.Lookup(ident.Name) // also removes unresolved sentinel
+ if ident.Obj == nil {
+ p.unresolved[i] = ident
+ i++
+ }
+ }
+
+ // TODO(gri): store p.imports in AST
+ return &ast.File{doc, pos, ident, decls, p.pkgScope, p.imports, p.unresolved[0:i], p.comments}
+}
diff --git a/src/pkg/go/scanner/scanner_test.go b/src/pkg/go/scanner/scanner_test.go
index 8afb00ee5..2d56bfb25 100644
--- a/src/pkg/go/scanner/scanner_test.go
+++ b/src/pkg/go/scanner/scanner_test.go
@@ -89,7 +89,7 @@ var tokens = [...]elt{
literal,
},
- // Operators and delimitors
+ // Operators and delimiters
{token.ADD, "+", operator},
{token.SUB, "-", operator},
{token.MUL, "*", operator},
diff --git a/src/pkg/go/token/position.go b/src/pkg/go/token/position.go
index 809e53f0a..23a3cc00f 100644
--- a/src/pkg/go/token/position.go
+++ b/src/pkg/go/token/position.go
@@ -94,10 +94,14 @@ func searchFiles(a []*File, x int) int {
func (s *FileSet) file(p Pos) *File {
+ if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size {
+ return f
+ }
if i := searchFiles(s.files, int(p)); i >= 0 {
f := s.files[i]
// f.base <= int(p) by definition of searchFiles
if int(p) <= f.base+f.size {
+ s.last = f
return f
}
}
@@ -131,7 +135,7 @@ func (f *File) position(p Pos) (pos Position) {
func (s *FileSet) Position(p Pos) (pos Position) {
if p != NoPos {
// TODO(gri) consider optimizing the case where p
- // is in the last file addded, or perhaps
+ // is in the last file added, or perhaps
// looked at - will eliminate one level
// of search
s.mutex.RLock()
@@ -316,8 +320,26 @@ func (f *File) Position(p Pos) (pos Position) {
}
-func searchUints(a []int, x int) int {
- return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1
+func searchInts(a []int, x int) int {
+ // This function body is a manually inlined version of:
+ //
+ // return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1
+ //
+ // With better compiler optimizations, this may not be needed in the
+ // future, but at the moment this change improves the go/printer
+ // benchmark performance by ~30%. This has a direct impact on the
+ // speed of gofmt and thus seems worthwhile (2011-04-29).
+ i, j := 0, len(a)
+ for i < j {
+ h := i + (j-i)/2 // avoid overflow when computing h
+ // i ≤ h < j
+ if a[h] <= x {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ return i - 1
}
@@ -329,14 +351,17 @@ func searchLineInfos(a []lineInfo, x int) int {
// info returns the file name, line, and column number for a file offset.
func (f *File) info(offset int) (filename string, line, column int) {
filename = f.name
- if i := searchUints(f.lines, offset); i >= 0 {
+ if i := searchInts(f.lines, offset); i >= 0 {
line, column = i+1, offset-f.lines[i]+1
}
- if i := searchLineInfos(f.infos, offset); i >= 0 {
- alt := &f.infos[i]
- filename = alt.filename
- if i := searchUints(f.lines, alt.offset); i >= 0 {
- line += alt.line - i - 1
+ if len(f.infos) > 0 {
+ // almost no files have extra line infos
+ if i := searchLineInfos(f.infos, offset); i >= 0 {
+ alt := &f.infos[i]
+ filename = alt.filename
+ if i := searchInts(f.lines, alt.offset); i >= 0 {
+ line += alt.line - i - 1
+ }
}
}
return
@@ -348,10 +373,10 @@ func (f *File) info(offset int) (filename string, line, column int) {
// may invoke them concurrently.
//
type FileSet struct {
- mutex sync.RWMutex // protects the file set
- base int // base offset for the next file
- files []*File // list of files in the order added to the set
- index map[*File]int // file -> files index for quick lookup
+ mutex sync.RWMutex // protects the file set
+ base int // base offset for the next file
+ files []*File // list of files in the order added to the set
+ last *File // cache of last file looked up
}
@@ -359,7 +384,6 @@ type FileSet struct {
func NewFileSet() *FileSet {
s := new(FileSet)
s.base = 1 // 0 == NoPos
- s.index = make(map[*File]int)
return s
}
@@ -405,8 +429,8 @@ func (s *FileSet) AddFile(filename string, base, size int) *File {
}
// add the file to the file set
s.base = base
- s.index[f] = len(s.files)
s.files = append(s.files, f)
+ s.last = f
return f
}
diff --git a/src/pkg/go/types/Makefile b/src/pkg/go/types/Makefile
index 54e762b36..4ca707c73 100644
--- a/src/pkg/go/types/Makefile
+++ b/src/pkg/go/types/Makefile
@@ -6,6 +6,7 @@ include ../../../Make.inc
TARG=go/types
GOFILES=\
+ check.go\
const.go\
exportdata.go\
gcimporter.go\
diff --git a/src/pkg/go/types/check.go b/src/pkg/go/types/check.go
new file mode 100644
index 000000000..02d662926
--- /dev/null
+++ b/src/pkg/go/types/check.go
@@ -0,0 +1,233 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements the Check function, which typechecks a package.
+
+package types
+
+import (
+ "fmt"
+ "go/ast"
+ "go/scanner"
+ "go/token"
+ "os"
+ "strconv"
+)
+
+
+const debug = false
+
+
+type checker struct {
+ fset *token.FileSet
+ scanner.ErrorVector
+ types map[ast.Expr]Type
+}
+
+
+func (c *checker) errorf(pos token.Pos, format string, args ...interface{}) string {
+ msg := fmt.Sprintf(format, args...)
+ c.Error(c.fset.Position(pos), msg)
+ return msg
+}
+
+
+// collectFields collects struct fields tok = token.STRUCT), interface methods
+// (tok = token.INTERFACE), and function arguments/results (tok = token.FUNC).
+func (c *checker) collectFields(tok token.Token, list *ast.FieldList, cycleOk bool) (fields ObjList, tags []string, isVariadic bool) {
+ if list != nil {
+ for _, field := range list.List {
+ ftype := field.Type
+ if t, ok := ftype.(*ast.Ellipsis); ok {
+ ftype = t.Elt
+ isVariadic = true
+ }
+ typ := c.makeType(ftype, cycleOk)
+ tag := ""
+ if field.Tag != nil {
+ assert(field.Tag.Kind == token.STRING)
+ tag, _ = strconv.Unquote(field.Tag.Value)
+ }
+ if len(field.Names) > 0 {
+ // named fields
+ for _, name := range field.Names {
+ obj := name.Obj
+ obj.Type = typ
+ fields = append(fields, obj)
+ if tok == token.STRUCT {
+ tags = append(tags, tag)
+ }
+ }
+ } else {
+ // anonymous field
+ switch tok {
+ case token.STRUCT:
+ tags = append(tags, tag)
+ fallthrough
+ case token.FUNC:
+ obj := ast.NewObj(ast.Var, "")
+ obj.Type = typ
+ fields = append(fields, obj)
+ case token.INTERFACE:
+ utyp := Underlying(typ)
+ if typ, ok := utyp.(*Interface); ok {
+ // TODO(gri) This is not good enough. Check for double declarations!
+ fields = append(fields, typ.Methods...)
+ } else if _, ok := utyp.(*Bad); !ok {
+ // if utyp is Bad, don't complain (the root cause was reported before)
+ c.errorf(ftype.Pos(), "interface contains embedded non-interface type")
+ }
+ default:
+ panic("unreachable")
+ }
+ }
+ }
+ }
+ return
+}
+
+
+// makeType makes a new type for an AST type specification x or returns
+// the type referred to by a type name x. If cycleOk is set, a type may
+// refer to itself directly or indirectly; otherwise cycles are errors.
+//
+func (c *checker) makeType(x ast.Expr, cycleOk bool) (typ Type) {
+ if debug {
+ fmt.Printf("makeType (cycleOk = %v)\n", cycleOk)
+ ast.Print(c.fset, x)
+ defer func() {
+ fmt.Printf("-> %T %v\n\n", typ, typ)
+ }()
+ }
+
+ switch t := x.(type) {
+ case *ast.BadExpr:
+ return &Bad{}
+
+ case *ast.Ident:
+ // type name
+ obj := t.Obj
+ if obj == nil {
+ // unresolved identifier (error has been reported before)
+ return &Bad{Msg: "unresolved identifier"}
+ }
+ if obj.Kind != ast.Typ {
+ msg := c.errorf(t.Pos(), "%s is not a type", t.Name)
+ return &Bad{Msg: msg}
+ }
+ c.checkObj(obj, cycleOk)
+ if !cycleOk && obj.Type.(*Name).Underlying == nil {
+ // TODO(gri) Enable this message again once its position
+ // is independent of the underlying map implementation.
+ // msg := c.errorf(obj.Pos(), "illegal cycle in declaration of %s", obj.Name)
+ msg := "illegal cycle"
+ return &Bad{Msg: msg}
+ }
+ return obj.Type.(Type)
+
+ case *ast.ParenExpr:
+ return c.makeType(t.X, cycleOk)
+
+ case *ast.SelectorExpr:
+ // qualified identifier
+ // TODO (gri) eventually, this code belongs to expression
+ // type checking - here for the time being
+ if ident, ok := t.X.(*ast.Ident); ok {
+ if obj := ident.Obj; obj != nil {
+ if obj.Kind != ast.Pkg {
+ msg := c.errorf(ident.Pos(), "%s is not a package", obj.Name)
+ return &Bad{Msg: msg}
+ }
+ // TODO(gri) we have a package name but don't
+ // have the mapping from package name to package
+ // scope anymore (created in ast.NewPackage).
+ return &Bad{} // for now
+ }
+ }
+ // TODO(gri) can this really happen (the parser should have excluded this)?
+ msg := c.errorf(t.Pos(), "expected qualified identifier")
+ return &Bad{Msg: msg}
+
+ case *ast.StarExpr:
+ return &Pointer{Base: c.makeType(t.X, true)}
+
+ case *ast.ArrayType:
+ if t.Len != nil {
+ // TODO(gri) compute length
+ return &Array{Elt: c.makeType(t.Elt, cycleOk)}
+ }
+ return &Slice{Elt: c.makeType(t.Elt, true)}
+
+ case *ast.StructType:
+ fields, tags, _ := c.collectFields(token.STRUCT, t.Fields, cycleOk)
+ return &Struct{Fields: fields, Tags: tags}
+
+ case *ast.FuncType:
+ params, _, _ := c.collectFields(token.FUNC, t.Params, true)
+ results, _, isVariadic := c.collectFields(token.FUNC, t.Results, true)
+ return &Func{Recv: nil, Params: params, Results: results, IsVariadic: isVariadic}
+
+ case *ast.InterfaceType:
+ methods, _, _ := c.collectFields(token.INTERFACE, t.Methods, cycleOk)
+ methods.Sort()
+ return &Interface{Methods: methods}
+
+ case *ast.MapType:
+ return &Map{Key: c.makeType(t.Key, true), Elt: c.makeType(t.Key, true)}
+
+ case *ast.ChanType:
+ return &Chan{Dir: t.Dir, Elt: c.makeType(t.Value, true)}
+ }
+
+ panic(fmt.Sprintf("unreachable (%T)", x))
+}
+
+
+// checkObj type checks an object.
+func (c *checker) checkObj(obj *ast.Object, ref bool) {
+ if obj.Type != nil {
+ // object has already been type checked
+ return
+ }
+
+ switch obj.Kind {
+ case ast.Bad:
+ // ignore
+
+ case ast.Con:
+ // TODO(gri) complete this
+
+ case ast.Typ:
+ typ := &Name{Obj: obj}
+ obj.Type = typ // "mark" object so recursion terminates
+ typ.Underlying = Underlying(c.makeType(obj.Decl.(*ast.TypeSpec).Type, ref))
+
+ case ast.Var:
+ // TODO(gri) complete this
+
+ case ast.Fun:
+ // TODO(gri) complete this
+
+ default:
+ panic("unreachable")
+ }
+}
+
+
+// Check typechecks a package.
+// It augments the AST by assigning types to all ast.Objects and returns a map
+// of types for all expression nodes in statements, and a scanner.ErrorList if
+// there are errors.
+//
+func Check(fset *token.FileSet, pkg *ast.Package) (types map[ast.Expr]Type, err os.Error) {
+ var c checker
+ c.fset = fset
+ c.types = make(map[ast.Expr]Type)
+
+ for _, obj := range pkg.Scope.Objects {
+ c.checkObj(obj, false)
+ }
+
+ return c.types, c.GetError(scanner.NoMultiples)
+}
diff --git a/src/pkg/go/types/check_test.go b/src/pkg/go/types/check_test.go
new file mode 100644
index 000000000..6ecb12b1e
--- /dev/null
+++ b/src/pkg/go/types/check_test.go
@@ -0,0 +1,224 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements a typechecker test harness. The packages specified
+// in tests are typechecked. Error messages reported by the typechecker are
+// compared against the error messages expected in the test files.
+//
+// Expected errors are indicated in the test files by putting a comment
+// of the form /* ERROR "rx" */ immediately following an offending token.
+// The harness will verify that an error matching the regular expression
+// rx is reported at that source position. Consecutive comments may be
+// used to indicate multiple errors for the same token position.
+//
+// For instance, the following test file indicates that a "not declared"
+// error should be reported for the undeclared variable x:
+//
+// package p
+// func f() {
+// _ = x /* ERROR "not declared" */ + 1
+// }
+
+package types
+
+import (
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/scanner"
+ "go/token"
+ "io/ioutil"
+ "os"
+ "regexp"
+ "testing"
+)
+
+
+// The test filenames do not end in .go so that they are invisible
+// to gofmt since they contain comments that must not change their
+// positions relative to surrounding tokens.
+
+var tests = []struct {
+ name string
+ files []string
+}{
+ {"test0", []string{"testdata/test0.src"}},
+}
+
+
+var fset = token.NewFileSet()
+
+
+// TODO(gri) This functionality should be in token.Fileset.
+func getFile(filename string) *token.File {
+ for f := range fset.Files() {
+ if f.Name() == filename {
+ return f
+ }
+ }
+ return nil
+}
+
+
+// TODO(gri) This functionality should be in token.Fileset.
+func getPos(filename string, offset int) token.Pos {
+ if f := getFile(filename); f != nil {
+ return f.Pos(offset)
+ }
+ return token.NoPos
+}
+
+
+// TODO(gri) Need to revisit parser interface. We should be able to use parser.ParseFiles
+// or a similar function instead.
+func parseFiles(t *testing.T, testname string, filenames []string) (map[string]*ast.File, os.Error) {
+ files := make(map[string]*ast.File)
+ var errors scanner.ErrorList
+ for _, filename := range filenames {
+ if _, exists := files[filename]; exists {
+ t.Fatalf("%s: duplicate file %s", testname, filename)
+ }
+ file, err := parser.ParseFile(fset, filename, nil, parser.DeclarationErrors)
+ if file == nil {
+ t.Fatalf("%s: could not parse file %s", testname, filename)
+ }
+ files[filename] = file
+ if err != nil {
+ // if the parser returns a non-scanner.ErrorList error
+ // the file couldn't be read in the first place and
+ // file == nil; in that case we shouldn't reach here
+ errors = append(errors, err.(scanner.ErrorList)...)
+ }
+
+ }
+ return files, errors
+}
+
+
+// ERROR comments must be of the form /* ERROR "rx" */ and rx is
+// a regular expression that matches the expected error message.
+//
+var errRx = regexp.MustCompile(`^/\* *ERROR *"([^"]*)" *\*/$`)
+
+// expectedErrors collects the regular expressions of ERROR comments found
+// in files and returns them as a map of error positions to error messages.
+//
+func expectedErrors(t *testing.T, testname string, files map[string]*ast.File) map[token.Pos]string {
+ errors := make(map[token.Pos]string)
+ for filename := range files {
+ src, err := ioutil.ReadFile(filename)
+ if err != nil {
+ t.Fatalf("%s: could not read %s", testname, filename)
+ }
+
+ var s scanner.Scanner
+ // file was parsed already - do not add it again to the file
+ // set otherwise the position information returned here will
+ // not match the position information collected by the parser
+ s.Init(getFile(filename), src, nil, scanner.ScanComments)
+ var prev token.Pos // position of last non-comment token
+
+ scanFile:
+ for {
+ pos, tok, lit := s.Scan()
+ switch tok {
+ case token.EOF:
+ break scanFile
+ case token.COMMENT:
+ s := errRx.FindStringSubmatch(lit)
+ if len(s) == 2 {
+ errors[prev] = string(s[1])
+ }
+ default:
+ prev = pos
+ }
+ }
+ }
+ return errors
+}
+
+
+func eliminate(t *testing.T, expected map[token.Pos]string, errors os.Error) {
+ if errors == nil {
+ return
+ }
+ for _, error := range errors.(scanner.ErrorList) {
+ // error.Pos is a token.Position, but we want
+ // a token.Pos so we can do a map lookup
+ // TODO(gri) Need to move scanner.Errors over
+ // to use token.Pos and file set info.
+ pos := getPos(error.Pos.Filename, error.Pos.Offset)
+ if msg, found := expected[pos]; found {
+ // we expect a message at pos; check if it matches
+ rx, err := regexp.Compile(msg)
+ if err != nil {
+ t.Errorf("%s: %v", error.Pos, err)
+ continue
+ }
+ if match := rx.MatchString(error.Msg); !match {
+ t.Errorf("%s: %q does not match %q", error.Pos, error.Msg, msg)
+ continue
+ }
+ // we have a match - eliminate this error
+ expected[pos] = "", false
+ } else {
+ // To keep in mind when analyzing failed test output:
+ // If the same error position occurs multiple times in errors,
+ // this message will be triggered (because the first error at
+ // the position removes this position from the expected errors).
+ t.Errorf("%s: no (multiple?) error expected, but found: %s", error.Pos, error.Msg)
+ }
+ }
+}
+
+
+func check(t *testing.T, testname string, testfiles []string) {
+ // TODO(gri) Eventually all these different phases should be
+ // subsumed into a single function call that takes
+ // a set of files and creates a fully resolved and
+ // type-checked AST.
+
+ files, err := parseFiles(t, testname, testfiles)
+
+ // we are expecting the following errors
+ // (collect these after parsing the files so that
+ // they are found in the file set)
+ errors := expectedErrors(t, testname, files)
+
+ // verify errors returned by the parser
+ eliminate(t, errors, err)
+
+ // verify errors returned after resolving identifiers
+ pkg, err := ast.NewPackage(fset, files, GcImporter, Universe)
+ eliminate(t, errors, err)
+
+ // verify errors returned by the typechecker
+ _, err = Check(fset, pkg)
+ eliminate(t, errors, err)
+
+ // there should be no expected errors left
+ if len(errors) > 0 {
+ t.Errorf("%s: %d errors not reported:", testname, len(errors))
+ for pos, msg := range errors {
+ t.Errorf("%s: %s\n", fset.Position(pos), msg)
+ }
+ }
+}
+
+
+func TestCheck(t *testing.T) {
+ // For easy debugging w/o changing the testing code,
+ // if there is a local test file, only test that file.
+ const testfile = "test.go"
+ if fi, err := os.Stat(testfile); err == nil && fi.IsRegular() {
+ fmt.Printf("WARNING: Testing only %s (remove it to run all tests)\n", testfile)
+ check(t, testfile, []string{testfile})
+ return
+ }
+
+ // Otherwise, run all the tests.
+ for _, test := range tests {
+ check(t, test.name, test.files)
+ }
+}
diff --git a/src/pkg/go/types/gcimporter.go b/src/pkg/go/types/gcimporter.go
index 30adc04e7..377c45ad6 100644
--- a/src/pkg/go/types/gcimporter.go
+++ b/src/pkg/go/types/gcimporter.go
@@ -74,15 +74,14 @@ func findPkg(path string) (filename, id string) {
// object/archive file and populates its scope with the results.
type gcParser struct {
scanner scanner.Scanner
- tok int // current token
- lit string // literal string; only valid for Ident, Int, String tokens
- id string // package id of imported package
- scope *ast.Scope // scope of imported package; alias for deps[id]
- deps map[string]*ast.Scope // package id -> package scope
+ tok int // current token
+ lit string // literal string; only valid for Ident, Int, String tokens
+ id string // package id of imported package
+ imports map[string]*ast.Object // package id -> package object
}
-func (p *gcParser) init(filename, id string, src io.Reader) {
+func (p *gcParser) init(filename, id string, src io.Reader, imports map[string]*ast.Object) {
p.scanner.Init(src)
p.scanner.Error = func(_ *scanner.Scanner, msg string) { p.error(msg) }
p.scanner.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments
@@ -90,8 +89,7 @@ func (p *gcParser) init(filename, id string, src io.Reader) {
p.scanner.Filename = filename // for good error messages
p.next()
p.id = id
- p.scope = ast.NewScope(nil)
- p.deps = map[string]*ast.Scope{"unsafe": Unsafe, id: p.scope}
+ p.imports = imports
}
@@ -110,9 +108,9 @@ func (p *gcParser) next() {
// GcImporter implements the ast.Importer signature.
-func GcImporter(path string) (name string, scope *ast.Scope, err os.Error) {
+func GcImporter(imports map[string]*ast.Object, path string) (pkg *ast.Object, err os.Error) {
if path == "unsafe" {
- return path, Unsafe, nil
+ return Unsafe, nil
}
defer func() {
@@ -130,6 +128,10 @@ func GcImporter(path string) (name string, scope *ast.Scope, err os.Error) {
return
}
+ if pkg = imports[id]; pkg != nil {
+ return // package was imported before
+ }
+
buf, err := ExportData(filename)
if err != nil {
return
@@ -137,13 +139,12 @@ func GcImporter(path string) (name string, scope *ast.Scope, err os.Error) {
defer buf.Close()
if trace {
- fmt.Printf("importing %s\n", filename)
+ fmt.Printf("importing %s (%s)\n", id, filename)
}
var p gcParser
- p.init(filename, id, buf)
- name, scope = p.parseExport()
-
+ p.init(filename, id, buf, imports)
+ pkg = p.parseExport()
return
}
@@ -214,21 +215,31 @@ func (p *gcParser) expectKeyword(keyword string) {
// ImportPath = string_lit .
//
-func (p *gcParser) parsePkgId() *ast.Scope {
+func (p *gcParser) parsePkgId() *ast.Object {
id, err := strconv.Unquote(p.expect(scanner.String))
if err != nil {
p.error(err)
}
- scope := p.scope // id == "" stands for the imported package id
- if id != "" {
- if scope = p.deps[id]; scope == nil {
- scope = ast.NewScope(nil)
- p.deps[id] = scope
- }
+ switch id {
+ case "":
+ // id == "" stands for the imported package id
+ // (only known at time of package installation)
+ id = p.id
+ case "unsafe":
+ // package unsafe is not in the imports map - handle explicitly
+ return Unsafe
+ }
+
+ pkg := p.imports[id]
+ if pkg == nil {
+ scope = ast.NewScope(nil)
+ pkg = ast.NewObj(ast.Pkg, "")
+ pkg.Data = scope
+ p.imports[id] = pkg
}
- return scope
+ return pkg
}
@@ -253,13 +264,14 @@ func (p *gcParser) parseDotIdent() string {
// ExportedName = ImportPath "." dotIdentifier .
//
func (p *gcParser) parseExportedName(kind ast.ObjKind) *ast.Object {
- scope := p.parsePkgId()
+ pkg := p.parsePkgId()
p.expect('.')
name := p.parseDotIdent()
// a type may have been declared before - if it exists
// already in the respective package scope, return that
// type
+ scope := pkg.Data.(*ast.Scope)
if kind == ast.Typ {
if obj := scope.Lookup(name); obj != nil {
assert(obj.Kind == ast.Typ)
@@ -344,28 +356,22 @@ func (p *gcParser) parseName() (name string) {
// Field = Name Type [ ":" string_lit ] .
//
-func (p *gcParser) parseField(scope *ast.Scope) {
- // TODO(gri) The code below is not correct for anonymous fields:
- // The name is the type name; it should not be empty.
+func (p *gcParser) parseField() (fld *ast.Object, tag string) {
name := p.parseName()
ftyp := p.parseType()
if name == "" {
// anonymous field - ftyp must be T or *T and T must be a type name
- ftyp = Deref(ftyp)
- if ftyp, ok := ftyp.(*Name); ok {
- name = ftyp.Obj.Name
- } else {
+ if _, ok := Deref(ftyp).(*Name); !ok {
p.errorf("anonymous field expected")
}
}
if p.tok == ':' {
p.next()
- tag := p.expect(scanner.String)
- _ = tag // TODO(gri) store tag somewhere
+ tag = p.expect(scanner.String)
}
- fld := ast.NewObj(ast.Var, name)
+ fld = ast.NewObj(ast.Var, name)
fld.Type = ftyp
- scope.Insert(fld)
+ return
}
@@ -373,103 +379,119 @@ func (p *gcParser) parseField(scope *ast.Scope) {
// FieldList = Field { ";" Field } .
//
func (p *gcParser) parseStructType() Type {
+ var fields []*ast.Object
+ var tags []string
+
+ parseField := func() {
+ fld, tag := p.parseField()
+ fields = append(fields, fld)
+ tags = append(tags, tag)
+ }
+
p.expectKeyword("struct")
p.expect('{')
- scope := ast.NewScope(nil)
if p.tok != '}' {
- p.parseField(scope)
+ parseField()
for p.tok == ';' {
p.next()
- p.parseField(scope)
+ parseField()
}
}
p.expect('}')
- return &Struct{}
+
+ return &Struct{Fields: fields, Tags: tags}
}
// Parameter = ( identifier | "?" ) [ "..." ] Type .
//
-func (p *gcParser) parseParameter(scope *ast.Scope, isVariadic *bool) {
+func (p *gcParser) parseParameter() (par *ast.Object, isVariadic bool) {
name := p.parseName()
if name == "" {
name = "_" // cannot access unnamed identifiers
}
- if isVariadic != nil {
- if *isVariadic {
- p.error("... not on final argument")
- }
- if p.tok == '.' {
- p.expectSpecial("...")
- *isVariadic = true
- }
+ if p.tok == '.' {
+ p.expectSpecial("...")
+ isVariadic = true
}
ptyp := p.parseType()
- par := ast.NewObj(ast.Var, name)
+ par = ast.NewObj(ast.Var, name)
par.Type = ptyp
- scope.Insert(par)
+ return
}
// Parameters = "(" [ ParameterList ] ")" .
// ParameterList = { Parameter "," } Parameter .
//
-func (p *gcParser) parseParameters(scope *ast.Scope, isVariadic *bool) {
+func (p *gcParser) parseParameters() (list []*ast.Object, isVariadic bool) {
+ parseParameter := func() {
+ par, variadic := p.parseParameter()
+ list = append(list, par)
+ if variadic {
+ if isVariadic {
+ p.error("... not on final argument")
+ }
+ isVariadic = true
+ }
+ }
+
p.expect('(')
if p.tok != ')' {
- p.parseParameter(scope, isVariadic)
+ parseParameter()
for p.tok == ',' {
p.next()
- p.parseParameter(scope, isVariadic)
+ parseParameter()
}
}
p.expect(')')
+
+ return
}
// Signature = Parameters [ Result ] .
// Result = Type | Parameters .
//
-func (p *gcParser) parseSignature(scope *ast.Scope, isVariadic *bool) {
- p.parseParameters(scope, isVariadic)
+func (p *gcParser) parseSignature() *Func {
+ params, isVariadic := p.parseParameters()
// optional result type
+ var results []*ast.Object
switch p.tok {
case scanner.Ident, scanner.String, '[', '*', '<':
// single, unnamed result
result := ast.NewObj(ast.Var, "_")
result.Type = p.parseType()
- scope.Insert(result)
+ results = []*ast.Object{result}
case '(':
// named or multiple result(s)
- p.parseParameters(scope, nil)
+ var variadic bool
+ results, variadic = p.parseParameters()
+ if variadic {
+ p.error("... not permitted on result type")
+ }
}
-}
-
-// FuncType = "func" Signature .
-//
-func (p *gcParser) parseFuncType() Type {
- // "func" already consumed
- scope := ast.NewScope(nil)
- isVariadic := false
- p.parseSignature(scope, &isVariadic)
- return &Func{IsVariadic: isVariadic}
+ return &Func{Params: params, Results: results, IsVariadic: isVariadic}
}
// MethodSpec = identifier Signature .
//
-func (p *gcParser) parseMethodSpec(scope *ast.Scope) {
+func (p *gcParser) parseMethodSpec() *ast.Object {
if p.tok == scanner.Ident {
p.expect(scanner.Ident)
} else {
+ // TODO(gri) should this be parseExportedName here?
p.parsePkgId()
p.expect('.')
p.parseDotIdent()
}
- isVariadic := false
- p.parseSignature(scope, &isVariadic)
+ p.parseSignature()
+
+ // TODO(gri) compute method object
+ return ast.NewObj(ast.Fun, "_")
}
@@ -477,18 +499,26 @@ func (p *gcParser) parseMethodSpec(scope *ast.Scope) {
// MethodList = MethodSpec { ";" MethodSpec } .
//
func (p *gcParser) parseInterfaceType() Type {
+ var methods ObjList
+
+ parseMethod := func() {
+ meth := p.parseMethodSpec()
+ methods = append(methods, meth)
+ }
+
p.expectKeyword("interface")
p.expect('{')
- scope := ast.NewScope(nil)
if p.tok != '}' {
- p.parseMethodSpec(scope)
+ parseMethod()
for p.tok == ';' {
p.next()
- p.parseMethodSpec(scope)
+ parseMethod()
}
}
p.expect('}')
- return &Interface{}
+
+ methods.Sort()
+ return &Interface{Methods: methods}
}
@@ -520,6 +550,7 @@ func (p *gcParser) parseChanType() Type {
// TypeName = ExportedName .
// SliceType = "[" "]" Type .
// PointerType = "*" Type .
+// FuncType = "func" Signature .
//
func (p *gcParser) parseType() Type {
switch p.tok {
@@ -530,8 +561,9 @@ func (p *gcParser) parseType() Type {
case "struct":
return p.parseStructType()
case "func":
- p.next() // parseFuncType assumes "func" is already consumed
- return p.parseFuncType()
+ // FuncType
+ p.next()
+ return p.parseSignature()
case "interface":
return p.parseInterfaceType()
case "map":
@@ -578,9 +610,10 @@ func (p *gcParser) parseImportDecl() {
// The identifier has no semantic meaning in the import data.
// It exists so that error messages can print the real package
// name: binary.ByteOrder instead of "encoding/binary".ByteOrder.
- // TODO(gri): Save package id -> package name mapping.
- p.expect(scanner.Ident)
- p.parsePkgId()
+ name := p.expect(scanner.Ident)
+ pkg := p.parsePkgId()
+ assert(pkg.Name == "" || pkg.Name == name)
+ pkg.Name = name
}
@@ -681,7 +714,7 @@ func (p *gcParser) parseConstDecl() {
if obj.Type == nil {
obj.Type = typ
}
- _ = x // TODO(gri) store x somewhere
+ obj.Data = x
}
@@ -690,12 +723,18 @@ func (p *gcParser) parseConstDecl() {
func (p *gcParser) parseTypeDecl() {
p.expectKeyword("type")
obj := p.parseExportedName(ast.Typ)
+
+ // The type object may have been imported before and thus already
+ // have a type associated with it. We still need to parse the type
+ // structure, but throw it away if the object already has a type.
+ // This ensures that all imports refer to the same type object for
+ // a given type declaration.
typ := p.parseType()
- name := obj.Type.(*Name)
- assert(name.Underlying == nil)
- assert(Underlying(typ) == typ)
- name.Underlying = typ
+ if name := obj.Type.(*Name); name.Underlying == nil {
+ assert(Underlying(typ) == typ)
+ name.Underlying = typ
+ }
}
@@ -713,7 +752,7 @@ func (p *gcParser) parseVarDecl() {
func (p *gcParser) parseFuncDecl() {
// "func" already consumed
obj := p.parseExportedName(ast.Fun)
- obj.Type = p.parseFuncType()
+ obj.Type = p.parseSignature()
}
@@ -722,14 +761,11 @@ func (p *gcParser) parseFuncDecl() {
//
func (p *gcParser) parseMethodDecl() {
// "func" already consumed
- scope := ast.NewScope(nil) // method scope
p.expect('(')
- p.parseParameter(scope, nil) // receiver
+ p.parseParameter() // receiver
p.expect(')')
p.expect(scanner.Ident)
- isVariadic := false
- p.parseSignature(scope, &isVariadic)
-
+ p.parseSignature()
}
@@ -763,7 +799,7 @@ func (p *gcParser) parseDecl() {
// Export = "PackageClause { Decl } "$$" .
// PackageClause = "package" identifier [ "safe" ] "\n" .
//
-func (p *gcParser) parseExport() (string, *ast.Scope) {
+func (p *gcParser) parseExport() *ast.Object {
p.expectKeyword("package")
name := p.expect(scanner.Ident)
if p.tok != '\n' {
@@ -774,6 +810,11 @@ func (p *gcParser) parseExport() (string, *ast.Scope) {
}
p.expect('\n')
+ assert(p.imports[p.id] == nil)
+ pkg := ast.NewObj(ast.Pkg, name)
+ pkg.Data = ast.NewScope(nil)
+ p.imports[p.id] = pkg
+
for p.tok != '$' && p.tok != scanner.EOF {
p.parseDecl()
}
@@ -788,5 +829,5 @@ func (p *gcParser) parseExport() (string, *ast.Scope) {
p.errorf("expected no scanner errors, got %d", n)
}
- return name, p.scope
+ return pkg
}
diff --git a/src/pkg/go/types/gcimporter_test.go b/src/pkg/go/types/gcimporter_test.go
index 556e761df..50e70f29c 100644
--- a/src/pkg/go/types/gcimporter_test.go
+++ b/src/pkg/go/types/gcimporter_test.go
@@ -6,6 +6,7 @@ package types
import (
"exec"
+ "go/ast"
"io/ioutil"
"path/filepath"
"runtime"
@@ -57,8 +58,12 @@ func compile(t *testing.T, dirname, filename string) {
}
+// Use the same global imports map for all tests. The effect is
+// as if all tested packages were imported into a single package.
+var imports = make(map[string]*ast.Object)
+
func testPath(t *testing.T, path string) bool {
- _, _, err := GcImporter(path)
+ _, err := GcImporter(imports, path)
if err != nil {
t.Errorf("testPath(%s): %s", path, err)
return false
diff --git a/src/pkg/go/types/testdata/exports.go b/src/pkg/go/types/testdata/exports.go
index 13efe012a..1de2e00ad 100644
--- a/src/pkg/go/types/testdata/exports.go
+++ b/src/pkg/go/types/testdata/exports.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// This file is used to generate a .6 object file which
+// This file is used to generate an object file which
// serves as test file for gcimporter_test.go.
package exports
@@ -14,13 +14,13 @@ import (
const (
C0 int = 0
- C1 = 3.14159265
- C2 = 2.718281828i
- C3 = -123.456e-789
- C4 = +123.456E+789
- C5 = 1234i
- C6 = "foo\n"
- C7 = `bar\n`
+ C1 = 3.14159265
+ C2 = 2.718281828i
+ C3 = -123.456e-789
+ C4 = +123.456E+789
+ C5 = 1234i
+ C6 = "foo\n"
+ C7 = `bar\n`
)
diff --git a/src/pkg/go/types/testdata/test0.src b/src/pkg/go/types/testdata/test0.src
new file mode 100644
index 000000000..84a1abe27
--- /dev/null
+++ b/src/pkg/go/types/testdata/test0.src
@@ -0,0 +1,154 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// type declarations
+
+package test0
+
+import "unsafe"
+
+const pi = 3.1415
+
+type (
+ N undeclared /* ERROR "undeclared" */
+ B bool
+ I int32
+ A [10]P
+ T struct {
+ x, y P
+ }
+ P *T
+ R (*R)
+ F func(A) I
+ Y interface {
+ f(A) I
+ }
+ S [](((P)))
+ M map[I]F
+ C chan<- I
+)
+
+
+type (
+ p1 pi /* ERROR "not a package" */ .foo
+ p2 unsafe.Pointer
+)
+
+
+type (
+ Pi pi /* ERROR "not a type" */
+
+ a /* DISABLED "illegal cycle" */ a
+ a /* ERROR "redeclared" */ int
+
+ // where the cycle error appears depends on the
+ // order in which declarations are processed
+ // (which depends on the order in which a map
+ // is iterated through)
+ b c
+ c /* DISABLED "illegal cycle" */ d
+ d e
+ e b
+
+ t *t
+
+ U V
+ V *W
+ W U
+
+ P1 *S2
+ P2 P1
+
+ S0 struct {
+ }
+ S1 struct {
+ a, b, c int
+ u, v, a /* ERROR "redeclared" */ float32
+ }
+ S2 struct {
+ U // anonymous field
+ // TODO(gri) recognize double-declaration below
+ // U /* ERROR "redeclared" */ int
+ }
+ S3 struct {
+ x S2
+ }
+ S4/* DISABLED "illegal cycle" */ struct {
+ S4
+ }
+ S5 struct {
+ S6
+ }
+ S6 /* DISABLED "illegal cycle" */ struct {
+ field S7
+ }
+ S7 struct {
+ S5
+ }
+
+ L1 []L1
+ L2 []int
+
+ A1 [10]int
+ A2 /* DISABLED "illegal cycle" */ [10]A2
+ A3 /* DISABLED "illegal cycle" */ [10]struct {
+ x A4
+ }
+ A4 [10]A3
+
+ F1 func()
+ F2 func(x, y, z float32)
+ F3 func(x, y, x /* ERROR "redeclared" */ float32)
+ F4 func() (x, y, x /* ERROR "redeclared" */ float32)
+ F5 func(x int) (x /* ERROR "redeclared" */ float32)
+ F6 func(x ...int)
+
+ I1 interface{}
+ I2 interface {
+ m1()
+ }
+ I3 interface {
+ m1()
+ m1 /* ERROR "redeclared" */ ()
+ }
+ I4 interface {
+ m1(x, y, x /* ERROR "redeclared" */ float32)
+ m2() (x, y, x /* ERROR "redeclared" */ float32)
+ m3(x int) (x /* ERROR "redeclared" */ float32)
+ }
+ I5 interface {
+ m1(I5)
+ }
+ I6 interface {
+ S0 /* ERROR "non-interface" */
+ }
+ I7 interface {
+ I1
+ I1
+ }
+ I8 /* DISABLED "illegal cycle" */ interface {
+ I8
+ }
+ I9 /* DISABLED "illegal cycle" */ interface {
+ I10
+ }
+ I10 interface {
+ I11
+ }
+ I11 interface {
+ I9
+ }
+
+ C1 chan int
+ C2 <-chan int
+ C3 chan<- C3
+ C4 chan C5
+ C5 chan C6
+ C6 chan C4
+
+ M1 map[Last]string
+ M2 map[string]M2
+
+ Last int
+)
diff --git a/src/pkg/go/types/types.go b/src/pkg/go/types/types.go
index 2ee645d98..10b0145b8 100644
--- a/src/pkg/go/types/types.go
+++ b/src/pkg/go/types/types.go
@@ -7,7 +7,10 @@
//
package types
-import "go/ast"
+import (
+ "go/ast"
+ "sort"
+)
// All types implement the Type interface.
@@ -23,6 +26,13 @@ type ImplementsType struct{}
func (t *ImplementsType) isType() {}
+// A Bad type is a non-nil placeholder type when we don't know a type.
+type Bad struct {
+ ImplementsType
+ Msg string // for better error reporting/debugging
+}
+
+
// A Basic represents a (unnamed) basic type.
type Basic struct {
ImplementsType
@@ -46,9 +56,15 @@ type Slice struct {
// A Struct represents a struct type struct{...}.
+// Anonymous fields are represented by objects with empty names.
type Struct struct {
ImplementsType
- // TODO(gri) need to remember fields.
+ Fields ObjList // struct fields; or nil
+ Tags []string // corresponding tags; or nil
+ // TODO(gri) This type needs some rethinking:
+ // - at the moment anonymous fields are marked with "" object names,
+ // and their names have to be reconstructed
+ // - there is no scope for fast lookup (but the parser creates one)
}
@@ -60,17 +76,20 @@ type Pointer struct {
// A Func represents a function type func(...) (...).
+// Unnamed parameters are represented by objects with empty names.
type Func struct {
ImplementsType
- IsVariadic bool
- // TODO(gri) need to remember parameters.
+ Recv *ast.Object // nil if not a method
+ Params ObjList // (incoming) parameters from left to right; or nil
+ Results ObjList // (outgoing) results from left to right; or nil
+ IsVariadic bool // true if the last parameter's type is of the form ...T
}
// An Interface represents an interface type interface{...}.
type Interface struct {
ImplementsType
- // TODO(gri) need to remember methods.
+ Methods ObjList // interface methods sorted by name; or nil
}
@@ -112,11 +131,143 @@ func Deref(typ Type) Type {
func Underlying(typ Type) Type {
if typ, ok := typ.(*Name); ok {
utyp := typ.Underlying
- if _, ok := utyp.(*Basic); ok {
- return typ
+ if _, ok := utyp.(*Basic); !ok {
+ return utyp
}
- return utyp
-
+ // the underlying type of a type name referring
+ // to an (untyped) basic type is the basic type
+ // name
}
return typ
}
+
+
+// An ObjList represents an ordered (in some fashion) list of objects.
+type ObjList []*ast.Object
+
+// ObjList implements sort.Interface.
+func (list ObjList) Len() int { return len(list) }
+func (list ObjList) Less(i, j int) bool { return list[i].Name < list[j].Name }
+func (list ObjList) Swap(i, j int) { list[i], list[j] = list[j], list[i] }
+
+// Sort sorts an object list by object name.
+func (list ObjList) Sort() { sort.Sort(list) }
+
+
+// identicalTypes returns true if both lists a and b have the
+// same length and corresponding objects have identical types.
+func identicalTypes(a, b ObjList) bool {
+ if len(a) == len(b) {
+ for i, x := range a {
+ y := b[i]
+ if !Identical(x.Type.(Type), y.Type.(Type)) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+
+// Identical returns true if two types are identical.
+func Identical(x, y Type) bool {
+ if x == y {
+ return true
+ }
+
+ switch x := x.(type) {
+ case *Bad:
+ // A Bad type is always identical to any other type
+ // (to avoid spurious follow-up errors).
+ return true
+
+ case *Basic:
+ if y, ok := y.(*Basic); ok {
+ panic("unimplemented")
+ _ = y
+ }
+
+ case *Array:
+ // Two array types are identical if they have identical element types
+ // and the same array length.
+ if y, ok := y.(*Array); ok {
+ return x.Len == y.Len && Identical(x.Elt, y.Elt)
+ }
+
+ case *Slice:
+ // Two slice types are identical if they have identical element types.
+ if y, ok := y.(*Slice); ok {
+ return Identical(x.Elt, y.Elt)
+ }
+
+ case *Struct:
+ // Two struct types are identical if they have the same sequence of fields,
+ // and if corresponding fields have the same names, and identical types,
+ // and identical tags. Two anonymous fields are considered to have the same
+ // name. Lower-case field names from different packages are always different.
+ if y, ok := y.(*Struct); ok {
+ // TODO(gri) handle structs from different packages
+ if identicalTypes(x.Fields, y.Fields) {
+ for i, f := range x.Fields {
+ g := y.Fields[i]
+ if f.Name != g.Name || x.Tags[i] != y.Tags[i] {
+ return false
+ }
+ }
+ return true
+ }
+ }
+
+ case *Pointer:
+ // Two pointer types are identical if they have identical base types.
+ if y, ok := y.(*Pointer); ok {
+ return Identical(x.Base, y.Base)
+ }
+
+ case *Func:
+ // Two function types are identical if they have the same number of parameters
+ // and result values, corresponding parameter and result types are identical,
+ // and either both functions are variadic or neither is. Parameter and result
+ // names are not required to match.
+ if y, ok := y.(*Func); ok {
+ return identicalTypes(x.Params, y.Params) &&
+ identicalTypes(x.Results, y.Results) &&
+ x.IsVariadic == y.IsVariadic
+ }
+
+ case *Interface:
+ // Two interface types are identical if they have the same set of methods with
+ // the same names and identical function types. Lower-case method names from
+ // different packages are always different. The order of the methods is irrelevant.
+ if y, ok := y.(*Interface); ok {
+ return identicalTypes(x.Methods, y.Methods) // methods are sorted
+ }
+
+ case *Map:
+ // Two map types are identical if they have identical key and value types.
+ if y, ok := y.(*Map); ok {
+ return Identical(x.Key, y.Key) && Identical(x.Elt, y.Elt)
+ }
+
+ case *Chan:
+ // Two channel types are identical if they have identical value types
+ // and the same direction.
+ if y, ok := y.(*Chan); ok {
+ return x.Dir == y.Dir && Identical(x.Elt, y.Elt)
+ }
+
+ case *Name:
+ // Two named types are identical if their type names originate
+ // in the same type declaration.
+ if y, ok := y.(*Name); ok {
+ return x.Obj == y.Obj ||
+ // permit bad objects to be equal to avoid
+ // follow up errors
+ x.Obj != nil && x.Obj.Kind == ast.Bad ||
+ y.Obj != nil && y.Obj.Kind == ast.Bad
+ }
+ }
+
+ return false
+}
diff --git a/src/pkg/go/types/universe.go b/src/pkg/go/types/universe.go
index 2a54a8ac1..96005cff5 100644
--- a/src/pkg/go/types/universe.go
+++ b/src/pkg/go/types/universe.go
@@ -11,9 +11,9 @@ import "go/ast"
var (
- scope, // current scope to use for initialization
- Universe,
- Unsafe *ast.Scope
+ scope *ast.Scope // current scope to use for initialization
+ Universe *ast.Scope
+ Unsafe *ast.Object // package unsafe
)
@@ -56,8 +56,8 @@ var (
func init() {
- Universe = ast.NewScope(nil)
- scope = Universe
+ scope = ast.NewScope(nil)
+ Universe = scope
Bool = defType("bool")
defType("byte") // TODO(gri) should be an alias for uint8
@@ -98,8 +98,10 @@ func init() {
defFun("real")
defFun("recover")
- Unsafe = ast.NewScope(nil)
- scope = Unsafe
+ scope = ast.NewScope(nil)
+ Unsafe = ast.NewObj(ast.Pkg, "unsafe")
+ Unsafe.Data = scope
+
defType("Pointer")
defFun("Alignof")
diff --git a/src/pkg/gob/decode.go b/src/pkg/gob/decode.go
index 0e86df6b5..381d44c05 100644
--- a/src/pkg/gob/decode.go
+++ b/src/pkg/gob/decode.go
@@ -172,7 +172,7 @@ func ignoreTwoUints(i *decInstr, state *decoderState, p unsafe.Pointer) {
state.decodeUint()
}
-// decBool decodes a uiint and stores it as a boolean through p.
+// decBool decodes a uint and stores it as a boolean through p.
func decBool(i *decInstr, state *decoderState, p unsafe.Pointer) {
if i.indir > 0 {
if *(*unsafe.Pointer)(p) == nil {
diff --git a/src/pkg/gob/doc.go b/src/pkg/gob/doc.go
index 189086f52..850759bbd 100644
--- a/src/pkg/gob/doc.go
+++ b/src/pkg/gob/doc.go
@@ -159,7 +159,7 @@ description, constructed from these types:
Elem typeId
Len int
}
- type CommonType {
+ type CommonType struct {
Name string // the name of the struct type
Id int // the id of the type, repeated so it's inside the type
}
diff --git a/src/pkg/gob/gobencdec_test.go b/src/pkg/gob/gobencdec_test.go
index e94534f4c..3e1906020 100644
--- a/src/pkg/gob/gobencdec_test.go
+++ b/src/pkg/gob/gobencdec_test.go
@@ -384,7 +384,7 @@ func TestGobEncoderFieldTypeError(t *testing.T) {
y := &GobTest1{}
err = dec.Decode(y)
if err == nil {
- t.Fatal("expected decode error for mistmatched fields (non-encoder to decoder)")
+ t.Fatal("expected decode error for mismatched fields (non-encoder to decoder)")
}
if strings.Index(err.String(), "type") < 0 {
t.Fatal("expected type error; got", err)
diff --git a/src/pkg/gob/type.go b/src/pkg/gob/type.go
index c5b8fb5d9..c6542633a 100644
--- a/src/pkg/gob/type.go
+++ b/src/pkg/gob/type.go
@@ -673,7 +673,7 @@ func mustGetTypeInfo(rt reflect.Type) *typeInfo {
// A type that implements GobEncoder and GobDecoder has complete
// control over the representation of its data and may therefore
// contain things such as private fields, channels, and functions,
-// which are not usually transmissable in gob streams.
+// which are not usually transmissible in gob streams.
//
// Note: Since gobs can be stored permanently, It is good design
// to guarantee the encoding used by a GobEncoder is stable as the
diff --git a/src/pkg/html/token.go b/src/pkg/html/token.go
index ad03241ed..6d8eb604e 100644
--- a/src/pkg/html/token.go
+++ b/src/pkg/html/token.go
@@ -331,10 +331,10 @@ func (z *Tokenizer) trim(i int) int {
return k
}
-// lower finds the largest alphabetic [0-9A-Za-z]* word at the start of z.buf[i:]
-// and returns that word lower-cased, as well as the trimmed cursor location
-// after that word.
-func (z *Tokenizer) lower(i int) ([]byte, int) {
+// word finds the largest alphabetic [0-9A-Za-z]* word at the start
+// of z.buf[i:] and returns that word (optionally lower-cased), as
+// well as the trimmed cursor location after that word.
+func (z *Tokenizer) word(i int, lower bool) ([]byte, int) {
i0 := i
loop:
for ; i < z.p1; i++ {
@@ -343,7 +343,9 @@ loop:
case '0' <= c && c <= '9':
// No-op.
case 'A' <= c && c <= 'Z':
- z.buf[i] = c + 'a' - 'A'
+ if lower {
+ z.buf[i] = c + 'a' - 'A'
+ }
case 'a' <= c && c <= 'z':
// No-op.
default:
@@ -388,7 +390,7 @@ func (z *Tokenizer) TagName() (name []byte, hasAttr bool) {
if z.buf[i] == '/' {
i++
}
- name, z.p0 = z.lower(i)
+ name, z.p0 = z.word(i, true)
hasAttr = z.p0 != z.p1
return
}
@@ -397,23 +399,36 @@ func (z *Tokenizer) TagName() (name []byte, hasAttr bool) {
// attribute for the current tag token and whether there are more attributes.
// The contents of the returned slices may change on the next call to Next.
func (z *Tokenizer) TagAttr() (key, val []byte, moreAttr bool) {
- key, i := z.lower(z.p0)
- // Get past the "=\"".
- if i == z.p1 || z.buf[i] != '=' {
+ key, i := z.word(z.p0, true)
+ // Check for an empty attribute value.
+ if i == z.p1 {
+ z.p0 = i
+ return
+ }
+ // Get past the equals and quote characters.
+ if z.buf[i] != '=' {
+ z.p0, moreAttr = i, true
return
}
i = z.trim(i + 1)
- if i == z.p1 || z.buf[i] != '"' {
+ if i == z.p1 {
+ z.p0 = i
+ return
+ }
+ closeQuote := z.buf[i]
+ if closeQuote != '\'' && closeQuote != '"' {
+ val, z.p0 = z.word(i, false)
+ moreAttr = z.p0 != z.p1
return
}
i = z.trim(i + 1)
- // Copy and unescape everything up to the closing '"'.
+ // Copy and unescape everything up to the closing quote.
dst, src := i, i
loop:
for src < z.p1 {
c := z.buf[src]
switch c {
- case '"':
+ case closeQuote:
src++
break loop
case '&':
diff --git a/src/pkg/html/token_test.go b/src/pkg/html/token_test.go
index 5cf1f6dac..6291af600 100644
--- a/src/pkg/html/token_test.go
+++ b/src/pkg/html/token_test.go
@@ -100,13 +100,51 @@ var tokenTests = []tokenTest{
"<p \t\n iD=\"a&quot;B\" foo=\"bar\"><EM>te&lt;&amp;;xt</em></p>",
`<p id="a&quot;B" foo="bar">$<em>$te&lt;&amp;;xt$</em>$</p>`,
},
- // A non-existant entity. Tokenizing and converting back to a string should
+ // A nonexistent entity. Tokenizing and converting back to a string should
// escape the "&" to become "&amp;".
{
"noSuchEntity",
`<a b="c&noSuchEntity;d">&lt;&alsoDoesntExist;&`,
`<a b="c&amp;noSuchEntity;d">$&lt;&amp;alsoDoesntExist;&amp;`,
},
+
+ // Attribute tests:
+ // http://dev.w3.org/html5/spec/Overview.html#attributes-0
+ {
+ "Empty attribute",
+ `<input disabled FOO>`,
+ `<input disabled="" foo="">`,
+ },
+ {
+ "Empty attribute, whitespace",
+ `<input disabled FOO >`,
+ `<input disabled="" foo="">`,
+ },
+ {
+ "Unquoted attribute value",
+ `<input value=yes FOO=BAR>`,
+ `<input value="yes" foo="BAR">`,
+ },
+ {
+ "Unquoted attribute value, trailing space",
+ `<input value=yes FOO=BAR >`,
+ `<input value="yes" foo="BAR">`,
+ },
+ {
+ "Single-quoted attribute value",
+ `<input value='yes' FOO='BAR'>`,
+ `<input value="yes" foo="BAR">`,
+ },
+ {
+ "Single-quoted attribute value, trailing space",
+ `<input value='yes' FOO='BAR' >`,
+ `<input value="yes" foo="BAR">`,
+ },
+ {
+ "Double-quoted attribute value",
+ `<input value="I'm an attribute" FOO="BAR">`,
+ `<input value="I&apos;m an attribute" foo="BAR">`,
+ },
}
func TestTokenizer(t *testing.T) {
diff --git a/src/pkg/http/cgi/child.go b/src/pkg/http/cgi/child.go
index c7d48b9eb..e1ad7ad32 100644
--- a/src/pkg/http/cgi/child.go
+++ b/src/pkg/http/cgi/child.go
@@ -9,10 +9,12 @@ package cgi
import (
"bufio"
+ "crypto/tls"
"fmt"
"http"
"io"
"io/ioutil"
+ "net"
"os"
"strconv"
"strings"
@@ -21,8 +23,16 @@ import (
// Request returns the HTTP request as represented in the current
// environment. This assumes the current program is being run
// by a web server in a CGI environment.
+// The returned Request's Body is populated, if applicable.
func Request() (*http.Request, os.Error) {
- return requestFromEnvironment(envMap(os.Environ()))
+ r, err := RequestFromMap(envMap(os.Environ()))
+ if err != nil {
+ return nil, err
+ }
+ if r.ContentLength > 0 {
+ r.Body = ioutil.NopCloser(io.LimitReader(os.Stdin, r.ContentLength))
+ }
+ return r, nil
}
func envMap(env []string) map[string]string {
@@ -42,37 +52,44 @@ var skipHeader = map[string]bool{
"HTTP_USER_AGENT": true,
}
-func requestFromEnvironment(env map[string]string) (*http.Request, os.Error) {
+// RequestFromMap creates an http.Request from CGI variables.
+// The returned Request's Body field is not populated.
+func RequestFromMap(params map[string]string) (*http.Request, os.Error) {
r := new(http.Request)
- r.Method = env["REQUEST_METHOD"]
+ r.Method = params["REQUEST_METHOD"]
if r.Method == "" {
return nil, os.NewError("cgi: no REQUEST_METHOD in environment")
}
+
+ r.Proto = params["SERVER_PROTOCOL"]
+ var ok bool
+ r.ProtoMajor, r.ProtoMinor, ok = http.ParseHTTPVersion(r.Proto)
+ if !ok {
+ return nil, os.NewError("cgi: invalid SERVER_PROTOCOL version")
+ }
+
r.Close = true
r.Trailer = http.Header{}
r.Header = http.Header{}
- r.Host = env["HTTP_HOST"]
- r.Referer = env["HTTP_REFERER"]
- r.UserAgent = env["HTTP_USER_AGENT"]
+ r.Host = params["HTTP_HOST"]
+ r.Referer = params["HTTP_REFERER"]
+ r.UserAgent = params["HTTP_USER_AGENT"]
- // CGI doesn't allow chunked requests, so these should all be accurate:
- r.Proto = "HTTP/1.0"
- r.ProtoMajor = 1
- r.ProtoMinor = 0
- r.TransferEncoding = nil
-
- if lenstr := env["CONTENT_LENGTH"]; lenstr != "" {
+ if lenstr := params["CONTENT_LENGTH"]; lenstr != "" {
clen, err := strconv.Atoi64(lenstr)
if err != nil {
return nil, os.NewError("cgi: bad CONTENT_LENGTH in environment: " + lenstr)
}
r.ContentLength = clen
- r.Body = ioutil.NopCloser(io.LimitReader(os.Stdin, clen))
+ }
+
+ if ct := params["CONTENT_TYPE"]; ct != "" {
+ r.Header.Set("Content-Type", ct)
}
// Copy "HTTP_FOO_BAR" variables to "Foo-Bar" Headers
- for k, v := range env {
+ for k, v := range params {
if !strings.HasPrefix(k, "HTTP_") || skipHeader[k] {
continue
}
@@ -84,7 +101,7 @@ func requestFromEnvironment(env map[string]string) (*http.Request, os.Error) {
if r.Host != "" {
// Hostname is provided, so we can reasonably construct a URL,
// even if we have to assume 'http' for the scheme.
- r.RawURL = "http://" + r.Host + env["REQUEST_URI"]
+ r.RawURL = "http://" + r.Host + params["REQUEST_URI"]
url, err := http.ParseURL(r.RawURL)
if err != nil {
return nil, os.NewError("cgi: failed to parse host and REQUEST_URI into a URL: " + r.RawURL)
@@ -94,13 +111,25 @@ func requestFromEnvironment(env map[string]string) (*http.Request, os.Error) {
// Fallback logic if we don't have a Host header or the URL
// failed to parse
if r.URL == nil {
- r.RawURL = env["REQUEST_URI"]
+ r.RawURL = params["REQUEST_URI"]
url, err := http.ParseURL(r.RawURL)
if err != nil {
return nil, os.NewError("cgi: failed to parse REQUEST_URI into a URL: " + r.RawURL)
}
r.URL = url
}
+
+ // There's apparently a de-facto standard for this.
+ // http://docstore.mik.ua/orelly/linux/cgi/ch03_02.htm#ch03-35636
+ if s := params["HTTPS"]; s == "on" || s == "ON" || s == "1" {
+ r.TLS = &tls.ConnectionState{HandshakeComplete: true}
+ }
+
+ // Request.RemoteAddr has its port set by Go's standard http
+ // server, so we do here too. We don't have one, though, so we
+ // use a dummy one.
+ r.RemoteAddr = net.JoinHostPort(params["REMOTE_ADDR"], "0")
+
return r, nil
}
@@ -139,10 +168,6 @@ func (r *response) Flush() {
r.bufw.Flush()
}
-func (r *response) RemoteAddr() string {
- return os.Getenv("REMOTE_ADDR")
-}
-
func (r *response) Header() http.Header {
return r.header
}
@@ -168,25 +193,7 @@ func (r *response) WriteHeader(code int) {
r.header.Add("Content-Type", "text/html; charset=utf-8")
}
- // TODO: add a method on http.Header to write itself to an io.Writer?
- // This is duplicated code.
- for k, vv := range r.header {
- for _, v := range vv {
- v = strings.Replace(v, "\n", "", -1)
- v = strings.Replace(v, "\r", "", -1)
- v = strings.TrimSpace(v)
- fmt.Fprintf(r.bufw, "%s: %s\r\n", k, v)
- }
- }
- r.bufw.Write([]byte("\r\n"))
+ r.header.Write(r.bufw)
+ r.bufw.WriteString("\r\n")
r.bufw.Flush()
}
-
-func (r *response) UsingTLS() bool {
- // There's apparently a de-facto standard for this.
- // http://docstore.mik.ua/orelly/linux/cgi/ch03_02.htm#ch03-35636
- if s := os.Getenv("HTTPS"); s == "on" || s == "ON" || s == "1" {
- return true
- }
- return false
-}
diff --git a/src/pkg/http/cgi/child_test.go b/src/pkg/http/cgi/child_test.go
index db0e09cf6..d12947814 100644
--- a/src/pkg/http/cgi/child_test.go
+++ b/src/pkg/http/cgi/child_test.go
@@ -12,6 +12,7 @@ import (
func TestRequest(t *testing.T) {
env := map[string]string{
+ "SERVER_PROTOCOL": "HTTP/1.1",
"REQUEST_METHOD": "GET",
"HTTP_HOST": "example.com",
"HTTP_REFERER": "elsewhere",
@@ -19,10 +20,13 @@ func TestRequest(t *testing.T) {
"HTTP_FOO_BAR": "baz",
"REQUEST_URI": "/path?a=b",
"CONTENT_LENGTH": "123",
+ "CONTENT_TYPE": "text/xml",
+ "HTTPS": "1",
+ "REMOTE_ADDR": "5.6.7.8",
}
- req, err := requestFromEnvironment(env)
+ req, err := RequestFromMap(env)
if err != nil {
- t.Fatalf("requestFromEnvironment: %v", err)
+ t.Fatalf("RequestFromMap: %v", err)
}
if g, e := req.UserAgent, "goclient"; e != g {
t.Errorf("expected UserAgent %q; got %q", e, g)
@@ -34,6 +38,9 @@ func TestRequest(t *testing.T) {
// Tests that we don't put recognized headers in the map
t.Errorf("expected User-Agent %q; got %q", e, g)
}
+ if g, e := req.Header.Get("Content-Type"), "text/xml"; e != g {
+ t.Errorf("expected Content-Type %q; got %q", e, g)
+ }
if g, e := req.ContentLength, int64(123); e != g {
t.Errorf("expected ContentLength %d; got %d", e, g)
}
@@ -58,18 +65,25 @@ func TestRequest(t *testing.T) {
if req.Trailer == nil {
t.Errorf("unexpected nil Trailer")
}
+ if req.TLS == nil {
+ t.Errorf("expected non-nil TLS")
+ }
+ if e, g := "5.6.7.8:0", req.RemoteAddr; e != g {
+ t.Errorf("RemoteAddr: got %q; want %q", g, e)
+ }
}
func TestRequestWithoutHost(t *testing.T) {
env := map[string]string{
- "HTTP_HOST": "",
- "REQUEST_METHOD": "GET",
- "REQUEST_URI": "/path?a=b",
- "CONTENT_LENGTH": "123",
+ "SERVER_PROTOCOL": "HTTP/1.1",
+ "HTTP_HOST": "",
+ "REQUEST_METHOD": "GET",
+ "REQUEST_URI": "/path?a=b",
+ "CONTENT_LENGTH": "123",
}
- req, err := requestFromEnvironment(env)
+ req, err := RequestFromMap(env)
if err != nil {
- t.Fatalf("requestFromEnvironment: %v", err)
+ t.Fatalf("RequestFromMap: %v", err)
}
if g, e := req.RawURL, "/path?a=b"; e != g {
t.Errorf("expected RawURL %q; got %q", e, g)
diff --git a/src/pkg/http/cgi/host.go b/src/pkg/http/cgi/host.go
index 136d4e4ee..7e4ccf881 100644
--- a/src/pkg/http/cgi/host.go
+++ b/src/pkg/http/cgi/host.go
@@ -36,7 +36,9 @@ var osDefaultInheritEnv = map[string][]string{
"darwin": []string{"DYLD_LIBRARY_PATH"},
"freebsd": []string{"LD_LIBRARY_PATH"},
"hpux": []string{"LD_LIBRARY_PATH", "SHLIB_PATH"},
+ "irix": []string{"LD_LIBRARY_PATH", "LD_LIBRARYN32_PATH", "LD_LIBRARY64_PATH"},
"linux": []string{"LD_LIBRARY_PATH"},
+ "solaris": []string{"LD_LIBRARY_PATH", "LD_LIBRARY_PATH_32", "LD_LIBRARY_PATH_64"},
"windows": []string{"SystemRoot", "COMSPEC", "PATHEXT", "WINDIR"},
}
@@ -86,6 +88,7 @@ func (h *Handler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
env := []string{
"SERVER_SOFTWARE=go",
"SERVER_NAME=" + req.Host,
+ "SERVER_PROTOCOL=HTTP/1.1",
"HTTP_HOST=" + req.Host,
"GATEWAY_INTERFACE=CGI/1.1",
"REQUEST_METHOD=" + req.Method,
diff --git a/src/pkg/http/chunked.go b/src/pkg/http/chunked.go
index 66195f06b..59121c5a2 100644
--- a/src/pkg/http/chunked.go
+++ b/src/pkg/http/chunked.go
@@ -6,19 +6,29 @@ package http
import (
"io"
+ "log"
"os"
"strconv"
)
// NewChunkedWriter returns a new writer that translates writes into HTTP
-// "chunked" format before writing them to w. Closing the returned writer
+// "chunked" format before writing them to w. Closing the returned writer
// sends the final 0-length chunk that marks the end of the stream.
+//
+// NewChunkedWriter is not needed by normal applications. The http
+// package adds chunking automatically if handlers don't set a
+// Content-Length header. Using NewChunkedWriter inside a handler
+// would result in double chunking or chunking with a Content-Length
+// length, both of which are wrong.
func NewChunkedWriter(w io.Writer) io.WriteCloser {
+ if _, bad := w.(*response); bad {
+ log.Printf("warning: using NewChunkedWriter in an http.Handler; expect corrupt output")
+ }
return &chunkedWriter{w}
}
// Writing to ChunkedWriter translates to writing in HTTP chunked Transfer
-// Encoding wire format to the undering Wire writer.
+// Encoding wire format to the underlying Wire writer.
type chunkedWriter struct {
Wire io.Writer
}
diff --git a/src/pkg/http/client.go b/src/pkg/http/client.go
index d73cbc855..ac7ff1853 100644
--- a/src/pkg/http/client.go
+++ b/src/pkg/http/client.go
@@ -74,6 +74,9 @@ type readClose struct {
//
// Generally Get, Post, or PostForm will be used instead of Do.
func (c *Client) Do(req *Request) (resp *Response, err os.Error) {
+ if req.Method == "GET" || req.Method == "HEAD" {
+ return c.doFollowingRedirects(req)
+ }
return send(req, c.Transport)
}
@@ -126,13 +129,10 @@ func shouldRedirect(statusCode int) bool {
// 303 (See Other)
// 307 (Temporary Redirect)
//
-// finalURL is the URL from which the response was fetched -- identical to the
-// input URL unless redirects were followed.
-//
// Caller should close r.Body when done reading from it.
//
// Get is a convenience wrapper around DefaultClient.Get.
-func Get(url string) (r *Response, finalURL string, err os.Error) {
+func Get(url string) (r *Response, err os.Error) {
return DefaultClient.Get(url)
}
@@ -145,11 +145,16 @@ func Get(url string) (r *Response, finalURL string, err os.Error) {
// 303 (See Other)
// 307 (Temporary Redirect)
//
-// finalURL is the URL from which the response was fetched -- identical
-// to the input URL unless redirects were followed.
-//
// Caller should close r.Body when done reading from it.
-func (c *Client) Get(url string) (r *Response, finalURL string, err os.Error) {
+func (c *Client) Get(url string) (r *Response, err os.Error) {
+ req, err := NewRequest("GET", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ return c.doFollowingRedirects(req)
+}
+
+func (c *Client) doFollowingRedirects(ireq *Request) (r *Response, err os.Error) {
// TODO: if/when we add cookie support, the redirected request shouldn't
// necessarily supply the same cookies as the original.
var base *URL
@@ -159,33 +164,33 @@ func (c *Client) Get(url string) (r *Response, finalURL string, err os.Error) {
}
var via []*Request
+ req := ireq
+ url := "" // next relative or absolute URL to fetch (after first request)
for redirect := 0; ; redirect++ {
- var req Request
- req.Method = "GET"
- req.Header = make(Header)
- if base == nil {
- req.URL, err = ParseURL(url)
- } else {
+ if redirect != 0 {
+ req = new(Request)
+ req.Method = ireq.Method
+ req.Header = make(Header)
req.URL, err = base.ParseURL(url)
- }
- if err != nil {
- break
- }
- if len(via) > 0 {
- // Add the Referer header.
- lastReq := via[len(via)-1]
- if lastReq.URL.Scheme != "https" {
- req.Referer = lastReq.URL.String()
- }
-
- err = redirectChecker(&req, via)
if err != nil {
break
}
+ if len(via) > 0 {
+ // Add the Referer header.
+ lastReq := via[len(via)-1]
+ if lastReq.URL.Scheme != "https" {
+ req.Referer = lastReq.URL.String()
+ }
+
+ err = redirectChecker(req, via)
+ if err != nil {
+ break
+ }
+ }
}
url = req.URL.String()
- if r, err = send(&req, c.Transport); err != nil {
+ if r, err = send(req, c.Transport); err != nil {
break
}
if shouldRedirect(r.StatusCode) {
@@ -195,14 +200,14 @@ func (c *Client) Get(url string) (r *Response, finalURL string, err os.Error) {
break
}
base = req.URL
- via = append(via, &req)
+ via = append(via, req)
continue
}
- finalURL = url
return
}
- err = &URLError{"Get", url, err}
+ method := ireq.Method
+ err = &URLError{method[0:1] + strings.ToLower(method[1:]), url, err}
return
}
@@ -290,19 +295,32 @@ func urlencode(data map[string]string) (b *bytes.Buffer) {
return bytes.NewBuffer([]byte(EncodeQuery(m)))
}
-// Head issues a HEAD to the specified URL.
+// Head issues a HEAD to the specified URL. If the response is one of the
+// following redirect codes, Head follows the redirect after calling the
+// Client's CheckRedirect function.
+//
+// 301 (Moved Permanently)
+// 302 (Found)
+// 303 (See Other)
+// 307 (Temporary Redirect)
//
// Head is a wrapper around DefaultClient.Head
func Head(url string) (r *Response, err os.Error) {
return DefaultClient.Head(url)
}
-// Head issues a HEAD to the specified URL.
+// Head issues a HEAD to the specified URL. If the response is one of the
+// following redirect codes, Head follows the redirect after calling the
+// Client's CheckRedirect function.
+//
+// 301 (Moved Permanently)
+// 302 (Found)
+// 303 (See Other)
+// 307 (Temporary Redirect)
func (c *Client) Head(url string) (r *Response, err os.Error) {
- var req Request
- req.Method = "HEAD"
- if req.URL, err = ParseURL(url); err != nil {
- return
+ req, err := NewRequest("HEAD", url, nil)
+ if err != nil {
+ return nil, err
}
- return send(&req, c.Transport)
+ return c.doFollowingRedirects(req)
}
diff --git a/src/pkg/http/client_test.go b/src/pkg/http/client_test.go
index 59d62c1c9..0869015b3 100644
--- a/src/pkg/http/client_test.go
+++ b/src/pkg/http/client_test.go
@@ -26,7 +26,7 @@ func TestClient(t *testing.T) {
ts := httptest.NewServer(robotsTxtHandler)
defer ts.Close()
- r, _, err := Get(ts.URL)
+ r, err := Get(ts.URL)
var b []byte
if err == nil {
b, err = ioutil.ReadAll(r.Body)
@@ -96,9 +96,22 @@ func TestRedirects(t *testing.T) {
defer ts.Close()
c := &Client{}
- _, _, err := c.Get(ts.URL)
+ _, err := c.Get(ts.URL)
if e, g := "Get /?n=10: stopped after 10 redirects", fmt.Sprintf("%v", err); e != g {
- t.Errorf("with default client, expected error %q, got %q", e, g)
+ t.Errorf("with default client Get, expected error %q, got %q", e, g)
+ }
+
+ // HEAD request should also have the ability to follow redirects.
+ _, err = c.Head(ts.URL)
+ if e, g := "Head /?n=10: stopped after 10 redirects", fmt.Sprintf("%v", err); e != g {
+ t.Errorf("with default client Head, expected error %q, got %q", e, g)
+ }
+
+ // Do should also follow redirects.
+ greq, _ := NewRequest("GET", ts.URL, nil)
+ _, err = c.Do(greq)
+ if e, g := "Get /?n=10: stopped after 10 redirects", fmt.Sprintf("%v", err); e != g {
+ t.Errorf("with default client Do, expected error %q, got %q", e, g)
}
var checkErr os.Error
@@ -107,7 +120,8 @@ func TestRedirects(t *testing.T) {
lastVia = via
return checkErr
}}
- _, finalUrl, err := c.Get(ts.URL)
+ res, err := c.Get(ts.URL)
+ finalUrl := res.Request.URL.String()
if e, g := "<nil>", fmt.Sprintf("%v", err); e != g {
t.Errorf("with custom client, expected error %q, got %q", e, g)
}
@@ -119,7 +133,8 @@ func TestRedirects(t *testing.T) {
}
checkErr = os.NewError("no redirects allowed")
- _, finalUrl, err = c.Get(ts.URL)
+ res, err = c.Get(ts.URL)
+ finalUrl = res.Request.URL.String()
if e, g := "Get /?n=1: no redirects allowed", fmt.Sprintf("%v", err); e != g {
t.Errorf("with redirects forbidden, expected error %q, got %q", e, g)
}
diff --git a/src/pkg/http/cookie.go b/src/pkg/http/cookie.go
index 2c01826a1..5add1ccc2 100644
--- a/src/pkg/http/cookie.go
+++ b/src/pkg/http/cookie.go
@@ -15,9 +15,9 @@ import (
"time"
)
-// This implementation is done according to IETF draft-ietf-httpstate-cookie-23, found at
+// This implementation is done according to RFC 6265:
//
-// http://tools.ietf.org/html/draft-ietf-httpstate-cookie-23
+// http://tools.ietf.org/html/rfc6265
// A Cookie represents an HTTP cookie as sent in the Set-Cookie header of an
// HTTP response or the Cookie header of an HTTP request.
@@ -130,6 +130,37 @@ func readSetCookies(h Header) []*Cookie {
return cookies
}
+// SetCookie adds a Set-Cookie header to the provided ResponseWriter's headers.
+func SetCookie(w ResponseWriter, cookie *Cookie) {
+ var b bytes.Buffer
+ writeSetCookieToBuffer(&b, cookie)
+ w.Header().Add("Set-Cookie", b.String())
+}
+
+func writeSetCookieToBuffer(buf *bytes.Buffer, c *Cookie) {
+ fmt.Fprintf(buf, "%s=%s", sanitizeName(c.Name), sanitizeValue(c.Value))
+ if len(c.Path) > 0 {
+ fmt.Fprintf(buf, "; Path=%s", sanitizeValue(c.Path))
+ }
+ if len(c.Domain) > 0 {
+ fmt.Fprintf(buf, "; Domain=%s", sanitizeValue(c.Domain))
+ }
+ if len(c.Expires.Zone) > 0 {
+ fmt.Fprintf(buf, "; Expires=%s", c.Expires.Format(time.RFC1123))
+ }
+ if c.MaxAge > 0 {
+ fmt.Fprintf(buf, "; Max-Age=%d", c.MaxAge)
+ } else if c.MaxAge < 0 {
+ fmt.Fprintf(buf, "; Max-Age=0")
+ }
+ if c.HttpOnly {
+ fmt.Fprintf(buf, "; HttpOnly")
+ }
+ if c.Secure {
+ fmt.Fprintf(buf, "; Secure")
+ }
+}
+
// writeSetCookies writes the wire representation of the set-cookies
// to w. Each cookie is written on a separate "Set-Cookie: " line.
// This choice is made because HTTP parsers tend to have a limit on
@@ -142,27 +173,7 @@ func writeSetCookies(w io.Writer, kk []*Cookie) os.Error {
var b bytes.Buffer
for _, c := range kk {
b.Reset()
- fmt.Fprintf(&b, "%s=%s", sanitizeName(c.Name), sanitizeValue(c.Value))
- if len(c.Path) > 0 {
- fmt.Fprintf(&b, "; Path=%s", sanitizeValue(c.Path))
- }
- if len(c.Domain) > 0 {
- fmt.Fprintf(&b, "; Domain=%s", sanitizeValue(c.Domain))
- }
- if len(c.Expires.Zone) > 0 {
- fmt.Fprintf(&b, "; Expires=%s", c.Expires.Format(time.RFC1123))
- }
- if c.MaxAge > 0 {
- fmt.Fprintf(&b, "; Max-Age=%d", c.MaxAge)
- } else if c.MaxAge < 0 {
- fmt.Fprintf(&b, "; Max-Age=0")
- }
- if c.HttpOnly {
- fmt.Fprintf(&b, "; HttpOnly")
- }
- if c.Secure {
- fmt.Fprintf(&b, "; Secure")
- }
+ writeSetCookieToBuffer(&b, c)
lines = append(lines, "Set-Cookie: "+b.String()+"\r\n")
}
sort.SortStrings(lines)
@@ -218,22 +229,26 @@ func readCookies(h Header) []*Cookie {
return cookies
}
-// writeCookies writes the wire representation of the cookies
-// to w. Each cookie is written on a separate "Cookie: " line.
-// This choice is made because HTTP parsers tend to have a limit on
-// line-length, so it seems safer to place cookies on separate lines.
+// writeCookies writes the wire representation of the cookies to
+// w. According to RFC 6265 section 5.4, writeCookies does not
+// attach more than one Cookie header field. That means all
+// cookies, if any, are written into the same line, separated by
+// semicolon.
func writeCookies(w io.Writer, kk []*Cookie) os.Error {
- lines := make([]string, 0, len(kk))
- for _, c := range kk {
- lines = append(lines, fmt.Sprintf("Cookie: %s=%s\r\n", sanitizeName(c.Name), sanitizeValue(c.Value)))
+ if len(kk) == 0 {
+ return nil
}
- sort.SortStrings(lines)
- for _, l := range lines {
- if _, err := io.WriteString(w, l); err != nil {
- return err
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "Cookie: ")
+ for i, c := range kk {
+ if i > 0 {
+ fmt.Fprintf(&buf, "; ")
}
+ fmt.Fprintf(&buf, "%s=%s", sanitizeName(c.Name), sanitizeValue(c.Value))
}
- return nil
+ fmt.Fprintf(&buf, "\r\n")
+ _, err := w.Write(buf.Bytes())
+ return err
}
func sanitizeName(n string) string {
diff --git a/src/pkg/http/cookie_test.go b/src/pkg/http/cookie_test.go
index a3ae85cd6..13c9fff4a 100644
--- a/src/pkg/http/cookie_test.go
+++ b/src/pkg/http/cookie_test.go
@@ -8,6 +8,7 @@ import (
"bytes"
"fmt"
"json"
+ "os"
"reflect"
"testing"
)
@@ -43,14 +44,55 @@ func TestWriteSetCookies(t *testing.T) {
}
}
+type headerOnlyResponseWriter Header
+
+func (ho headerOnlyResponseWriter) Header() Header {
+ return Header(ho)
+}
+
+func (ho headerOnlyResponseWriter) Write([]byte) (int, os.Error) {
+ panic("NOIMPL")
+}
+
+func (ho headerOnlyResponseWriter) WriteHeader(int) {
+ panic("NOIMPL")
+}
+
+func TestSetCookie(t *testing.T) {
+ m := make(Header)
+ SetCookie(headerOnlyResponseWriter(m), &Cookie{Name: "cookie-1", Value: "one", Path: "/restricted/"})
+ SetCookie(headerOnlyResponseWriter(m), &Cookie{Name: "cookie-2", Value: "two", MaxAge: 3600})
+ if l := len(m["Set-Cookie"]); l != 2 {
+ t.Fatalf("expected %d cookies, got %d", 2, l)
+ }
+ if g, e := m["Set-Cookie"][0], "cookie-1=one; Path=/restricted/"; g != e {
+ t.Errorf("cookie #1: want %q, got %q", e, g)
+ }
+ if g, e := m["Set-Cookie"][1], "cookie-2=two; Max-Age=3600"; g != e {
+ t.Errorf("cookie #2: want %q, got %q", e, g)
+ }
+}
+
var writeCookiesTests = []struct {
Cookies []*Cookie
Raw string
}{
{
+ []*Cookie{},
+ "",
+ },
+ {
[]*Cookie{&Cookie{Name: "cookie-1", Value: "v$1"}},
"Cookie: cookie-1=v$1\r\n",
},
+ {
+ []*Cookie{
+ &Cookie{Name: "cookie-1", Value: "v$1"},
+ &Cookie{Name: "cookie-2", Value: "v$2"},
+ &Cookie{Name: "cookie-3", Value: "v$3"},
+ },
+ "Cookie: cookie-1=v$1; cookie-2=v$2; cookie-3=v$3\r\n",
+ },
}
func TestWriteCookies(t *testing.T) {
diff --git a/src/pkg/http/fcgi/child.go b/src/pkg/http/fcgi/child.go
index 114052bee..19718824c 100644
--- a/src/pkg/http/fcgi/child.go
+++ b/src/pkg/http/fcgi/child.go
@@ -9,11 +9,10 @@ package fcgi
import (
"fmt"
"http"
+ "http/cgi"
"io"
"net"
"os"
- "strconv"
- "strings"
"time"
)
@@ -38,68 +37,6 @@ func newRequest(reqId uint16, flags uint8) *request {
return r
}
-// TODO(eds): copied from http/cgi
-var skipHeader = map[string]bool{
- "HTTP_HOST": true,
- "HTTP_REFERER": true,
- "HTTP_USER_AGENT": true,
-}
-
-// httpRequest converts r to an http.Request.
-// TODO(eds): this is very similar to http/cgi's requestFromEnvironment
-func (r *request) httpRequest(body io.ReadCloser) (*http.Request, os.Error) {
- req := &http.Request{
- Method: r.params["REQUEST_METHOD"],
- RawURL: r.params["REQUEST_URI"],
- Body: body,
- Header: http.Header{},
- Trailer: http.Header{},
- Proto: r.params["SERVER_PROTOCOL"],
- }
-
- var ok bool
- req.ProtoMajor, req.ProtoMinor, ok = http.ParseHTTPVersion(req.Proto)
- if !ok {
- return nil, os.NewError("fcgi: invalid HTTP version")
- }
-
- req.Host = r.params["HTTP_HOST"]
- req.Referer = r.params["HTTP_REFERER"]
- req.UserAgent = r.params["HTTP_USER_AGENT"]
-
- if lenstr := r.params["CONTENT_LENGTH"]; lenstr != "" {
- clen, err := strconv.Atoi64(r.params["CONTENT_LENGTH"])
- if err != nil {
- return nil, os.NewError("fcgi: bad CONTENT_LENGTH parameter: " + lenstr)
- }
- req.ContentLength = clen
- }
-
- if req.Host != "" {
- req.RawURL = "http://" + req.Host + r.params["REQUEST_URI"]
- url, err := http.ParseURL(req.RawURL)
- if err != nil {
- return nil, os.NewError("fcgi: failed to parse host and REQUEST_URI into a URL: " + req.RawURL)
- }
- req.URL = url
- }
- if req.URL == nil {
- req.RawURL = r.params["REQUEST_URI"]
- url, err := http.ParseURL(req.RawURL)
- if err != nil {
- return nil, os.NewError("fcgi: failed to parse REQUEST_URI into a URL: " + req.RawURL)
- }
- req.URL = url
- }
-
- for key, val := range r.params {
- if strings.HasPrefix(key, "HTTP_") && !skipHeader[key] {
- req.Header.Add(strings.Replace(key[5:], "_", "-", -1), val)
- }
- }
- return req, nil
-}
-
// parseParams reads an encoded []byte into Params.
func (r *request) parseParams() {
text := r.rawParams
@@ -169,15 +106,7 @@ func (r *response) WriteHeader(code int) {
}
fmt.Fprintf(r.w, "Status: %d %s\r\n", code, http.StatusText(code))
- // TODO(eds): this is duplicated in http and http/cgi
- for k, vv := range r.header {
- for _, v := range vv {
- v = strings.Replace(v, "\n", "", -1)
- v = strings.Replace(v, "\r", "", -1)
- v = strings.TrimSpace(v)
- fmt.Fprintf(r.w, "%s: %s\r\n", k, v)
- }
- }
+ r.header.Write(r.w)
r.w.WriteString("\r\n")
}
@@ -281,12 +210,13 @@ func (c *child) serve() {
func (c *child) serveRequest(req *request, body io.ReadCloser) {
r := newResponse(c, req)
- httpReq, err := req.httpRequest(body)
+ httpReq, err := cgi.RequestFromMap(req.params)
if err != nil {
// there was an error reading the request
r.WriteHeader(http.StatusInternalServerError)
c.conn.writeRecord(typeStderr, req.reqId, []byte(err.String()))
} else {
+ httpReq.Body = body
c.handler.ServeHTTP(r, httpReq)
}
if body != nil {
diff --git a/src/pkg/http/fs_test.go b/src/pkg/http/fs_test.go
index 09d0981f2..b94196258 100644
--- a/src/pkg/http/fs_test.go
+++ b/src/pkg/http/fs_test.go
@@ -96,7 +96,7 @@ func TestServeFileContentType(t *testing.T) {
}))
defer ts.Close()
get := func(want string) {
- resp, _, err := Get(ts.URL)
+ resp, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
diff --git a/src/pkg/http/header.go b/src/pkg/http/header.go
index 95b0f3db6..95140b01f 100644
--- a/src/pkg/http/header.go
+++ b/src/pkg/http/header.go
@@ -4,7 +4,14 @@
package http
-import "net/textproto"
+import (
+ "fmt"
+ "io"
+ "net/textproto"
+ "os"
+ "sort"
+ "strings"
+)
// A Header represents the key-value pairs in an HTTP header.
type Header map[string][]string
@@ -35,6 +42,37 @@ func (h Header) Del(key string) {
textproto.MIMEHeader(h).Del(key)
}
+// Write writes a header in wire format.
+func (h Header) Write(w io.Writer) os.Error {
+ return h.WriteSubset(w, nil)
+}
+
+// WriteSubset writes a header in wire format.
+// If exclude is not nil, keys where exclude[key] == true are not written.
+func (h Header) WriteSubset(w io.Writer, exclude map[string]bool) os.Error {
+ keys := make([]string, 0, len(h))
+ for k := range h {
+ if exclude == nil || !exclude[k] {
+ keys = append(keys, k)
+ }
+ }
+ sort.SortStrings(keys)
+ for _, k := range keys {
+ for _, v := range h[k] {
+ v = strings.Replace(v, "\n", " ", -1)
+ v = strings.Replace(v, "\r", " ", -1)
+ v = strings.TrimSpace(v)
+ if v == "" {
+ continue
+ }
+ if _, err := fmt.Fprintf(w, "%s: %s\r\n", k, v); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
// CanonicalHeaderKey returns the canonical format of the
// header key s. The canonicalization converts the first
// letter and any letter following a hyphen to upper case;
diff --git a/src/pkg/http/header_test.go b/src/pkg/http/header_test.go
new file mode 100644
index 000000000..7e24cb069
--- /dev/null
+++ b/src/pkg/http/header_test.go
@@ -0,0 +1,71 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import (
+ "bytes"
+ "testing"
+)
+
+var headerWriteTests = []struct {
+ h Header
+ exclude map[string]bool
+ expected string
+}{
+ {Header{}, nil, ""},
+ {
+ Header{
+ "Content-Type": {"text/html; charset=UTF-8"},
+ "Content-Length": {"0"},
+ },
+ nil,
+ "Content-Length: 0\r\nContent-Type: text/html; charset=UTF-8\r\n",
+ },
+ {
+ Header{
+ "Content-Length": {"0", "1", "2"},
+ },
+ nil,
+ "Content-Length: 0\r\nContent-Length: 1\r\nContent-Length: 2\r\n",
+ },
+ {
+ Header{
+ "Expires": {"-1"},
+ "Content-Length": {"0"},
+ "Content-Encoding": {"gzip"},
+ },
+ map[string]bool{"Content-Length": true},
+ "Content-Encoding: gzip\r\nExpires: -1\r\n",
+ },
+ {
+ Header{
+ "Expires": {"-1"},
+ "Content-Length": {"0", "1", "2"},
+ "Content-Encoding": {"gzip"},
+ },
+ map[string]bool{"Content-Length": true},
+ "Content-Encoding: gzip\r\nExpires: -1\r\n",
+ },
+ {
+ Header{
+ "Expires": {"-1"},
+ "Content-Length": {"0"},
+ "Content-Encoding": {"gzip"},
+ },
+ map[string]bool{"Content-Length": true, "Expires": true, "Content-Encoding": true},
+ "",
+ },
+}
+
+func TestHeaderWrite(t *testing.T) {
+ var buf bytes.Buffer
+ for i, test := range headerWriteTests {
+ test.h.WriteSubset(&buf, test.exclude)
+ if buf.String() != test.expected {
+ t.Errorf("#%d:\n got: %q\nwant: %q", i, buf.String(), test.expected)
+ }
+ buf.Reset()
+ }
+}
diff --git a/src/pkg/http/persist.go b/src/pkg/http/persist.go
index e4eea6815..62f9ff1b5 100644
--- a/src/pkg/http/persist.go
+++ b/src/pkg/http/persist.go
@@ -111,7 +111,7 @@ func (sc *ServerConn) Read() (req *Request, err os.Error) {
// Make sure body is fully consumed, even if user does not call body.Close
if lastbody != nil {
// body.Close is assumed to be idempotent and multiple calls to
- // it should return the error that its first invokation
+ // it should return the error that its first invocation
// returned.
err = lastbody.Close()
if err != nil {
@@ -222,7 +222,6 @@ type ClientConn struct {
pipe textproto.Pipeline
writeReq func(*Request, io.Writer) os.Error
- readRes func(buf *bufio.Reader, method string) (*Response, os.Error)
}
// NewClientConn returns a new ClientConn reading and writing c. If r is not
@@ -236,7 +235,6 @@ func NewClientConn(c net.Conn, r *bufio.Reader) *ClientConn {
r: r,
pipereq: make(map[*Request]uint),
writeReq: (*Request).Write,
- readRes: ReadResponse,
}
}
@@ -339,8 +337,13 @@ func (cc *ClientConn) Pending() int {
// returned together with an ErrPersistEOF, which means that the remote
// requested that this be the last request serviced. Read can be called
// concurrently with Write, but not with another Read.
-func (cc *ClientConn) Read(req *Request) (resp *Response, err os.Error) {
+func (cc *ClientConn) Read(req *Request) (*Response, os.Error) {
+ return cc.readUsing(req, ReadResponse)
+}
+// readUsing is the implementation of Read with a replaceable
+// ReadResponse-like function, used by the Transport.
+func (cc *ClientConn) readUsing(req *Request, readRes func(*bufio.Reader, *Request) (*Response, os.Error)) (resp *Response, err os.Error) {
// Retrieve the pipeline ID of this request/response pair
cc.lk.Lock()
id, ok := cc.pipereq[req]
@@ -383,7 +386,7 @@ func (cc *ClientConn) Read(req *Request) (resp *Response, err os.Error) {
}
}
- resp, err = cc.readRes(r, req.Method)
+ resp, err = readRes(r, req)
cc.lk.Lock()
defer cc.lk.Unlock()
if err != nil {
diff --git a/src/pkg/http/pprof/pprof.go b/src/pkg/http/pprof/pprof.go
index bc79e2183..917c7f877 100644
--- a/src/pkg/http/pprof/pprof.go
+++ b/src/pkg/http/pprof/pprof.go
@@ -26,6 +26,7 @@ package pprof
import (
"bufio"
+ "bytes"
"fmt"
"http"
"os"
@@ -88,10 +89,14 @@ func Profile(w http.ResponseWriter, r *http.Request) {
func Symbol(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ // We have to read the whole POST body before
+ // writing any output. Buffer the output here.
+ var buf bytes.Buffer
+
// We don't know how many symbols we have, but we
// do have symbol information. Pprof only cares whether
// this number is 0 (no symbols available) or > 0.
- fmt.Fprintf(w, "num_symbols: 1\n")
+ fmt.Fprintf(&buf, "num_symbols: 1\n")
var b *bufio.Reader
if r.Method == "POST" {
@@ -109,14 +114,19 @@ func Symbol(w http.ResponseWriter, r *http.Request) {
if pc != 0 {
f := runtime.FuncForPC(uintptr(pc))
if f != nil {
- fmt.Fprintf(w, "%#x %s\n", pc, f.Name())
+ fmt.Fprintf(&buf, "%#x %s\n", pc, f.Name())
}
}
// Wait until here to check for err; the last
// symbol will have an err because it doesn't end in +.
if err != nil {
+ if err != os.EOF {
+ fmt.Fprintf(&buf, "reading request: %v\n", err)
+ }
break
}
}
+
+ w.Write(buf.Bytes())
}
diff --git a/src/pkg/http/proxy_test.go b/src/pkg/http/proxy_test.go
index 308bf44b4..9b320b3aa 100644
--- a/src/pkg/http/proxy_test.go
+++ b/src/pkg/http/proxy_test.go
@@ -40,10 +40,8 @@ func TestUseProxy(t *testing.T) {
no_proxy := "foobar.com, .barbaz.net"
os.Setenv("NO_PROXY", no_proxy)
- tr := &Transport{}
-
for _, test := range UseProxyTests {
- if tr.useProxy(test.host+":80") != test.match {
+ if useProxy(test.host+":80") != test.match {
t.Errorf("useProxy(%v) = %v, want %v", test.host, !test.match, test.match)
}
}
diff --git a/src/pkg/http/request.go b/src/pkg/http/request.go
index b8e9a2142..05d489211 100644
--- a/src/pkg/http/request.go
+++ b/src/pkg/http/request.go
@@ -12,6 +12,7 @@ import (
"bufio"
"crypto/tls"
"container/vector"
+ "encoding/base64"
"fmt"
"io"
"io/ioutil"
@@ -275,9 +276,7 @@ func (req *Request) write(w io.Writer, usingProxy bool) os.Error {
fmt.Fprintf(w, "%s %s HTTP/1.1\r\n", valueOrDefault(req.Method, "GET"), uri)
// Header lines
- if !usingProxy {
- fmt.Fprintf(w, "Host: %s\r\n", host)
- }
+ fmt.Fprintf(w, "Host: %s\r\n", host)
fmt.Fprintf(w, "User-Agent: %s\r\n", valueOrDefault(req.UserAgent, defaultUserAgent))
if req.Referer != "" {
fmt.Fprintf(w, "Referer: %s\r\n", req.Referer)
@@ -300,7 +299,7 @@ func (req *Request) write(w io.Writer, usingProxy bool) os.Error {
// from Request, and introduce Request methods along the lines of
// Response.{GetHeader,AddHeader} and string constants for "Host",
// "User-Agent" and "Referer".
- err = writeSortedHeader(w, req.Header, reqExcludeHeader)
+ err = req.Header.WriteSubset(w, reqExcludeHeader)
if err != nil {
return err
}
@@ -479,6 +478,18 @@ func NewRequest(method, url string, body io.Reader) (*Request, os.Error) {
return req, nil
}
+// SetBasicAuth sets the request's Authorization header to use HTTP
+// Basic Authentication with the provided username and password.
+//
+// With HTTP Basic Authentication the provided username and password
+// are not encrypted.
+func (r *Request) SetBasicAuth(username, password string) {
+ s := username + ":" + password
+ buf := make([]byte, base64.StdEncoding.EncodedLen(len(s)))
+ base64.StdEncoding.Encode(buf, []byte(s))
+ r.Header.Set("Authorization", "Basic "+string(buf))
+}
+
// ReadRequest reads and parses a request from b.
func ReadRequest(b *bufio.Reader) (req *Request, err os.Error) {
@@ -715,9 +726,11 @@ func (r *Request) FormFile(key string) (multipart.File, *multipart.FileHeader, o
return nil, nil, err
}
}
- if fhs := r.MultipartForm.File[key]; len(fhs) > 0 {
- f, err := fhs[0].Open()
- return f, fhs[0], err
+ if r.MultipartForm != nil && r.MultipartForm.File != nil {
+ if fhs := r.MultipartForm.File[key]; len(fhs) > 0 {
+ f, err := fhs[0].Open()
+ return f, fhs[0], err
+ }
}
return nil, nil, ErrMissingFile
}
diff --git a/src/pkg/http/request_test.go b/src/pkg/http/request_test.go
index f982471d8..e03ed3b05 100644
--- a/src/pkg/http/request_test.go
+++ b/src/pkg/http/request_test.go
@@ -162,16 +162,25 @@ func TestRedirect(t *testing.T) {
defer ts.Close()
var end = regexp.MustCompile("/foo/$")
- r, url, err := Get(ts.URL)
+ r, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
r.Body.Close()
+ url := r.Request.URL.String()
if r.StatusCode != 200 || !end.MatchString(url) {
t.Fatalf("Get got status %d at %q, want 200 matching /foo/$", r.StatusCode, url)
}
}
+func TestSetBasicAuth(t *testing.T) {
+ r, _ := NewRequest("GET", "http://example.com/", nil)
+ r.SetBasicAuth("Aladdin", "open sesame")
+ if g, e := r.Header.Get("Authorization"), "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ=="; g != e {
+ t.Errorf("got header %q, want %q", g, e)
+ }
+}
+
func TestMultipartRequest(t *testing.T) {
// Test that we can read the values and files of a
// multipart request with FormValue and FormFile,
@@ -200,11 +209,34 @@ func TestMultipartRequestAuto(t *testing.T) {
validateTestMultipartContents(t, req, true)
}
+func TestEmptyMultipartRequest(t *testing.T) {
+ // Test that FormValue and FormFile automatically invoke
+ // ParseMultipartForm and return the right values.
+ req, err := NewRequest("GET", "/", nil)
+ if err != nil {
+ t.Errorf("NewRequest err = %q", err)
+ }
+ testMissingFile(t, req)
+}
+
+func testMissingFile(t *testing.T, req *Request) {
+ f, fh, err := req.FormFile("missing")
+ if f != nil {
+ t.Errorf("FormFile file = %q, want nil", f)
+ }
+ if fh != nil {
+ t.Errorf("FormFile file header = %q, want nil", fh)
+ }
+ if err != ErrMissingFile {
+ t.Errorf("FormFile err = %q, want ErrMissingFile", err)
+ }
+}
+
func newTestMultipartRequest(t *testing.T) *Request {
b := bytes.NewBufferString(strings.Replace(message, "\n", "\r\n", -1))
req, err := NewRequest("POST", "/", b)
if err != nil {
- t.Fatalf("NewRequest:", err)
+ t.Fatal("NewRequest:", err)
}
ctype := fmt.Sprintf(`multipart/form-data; boundary="%s"`, boundary)
req.Header.Set("Content-type", ctype)
@@ -218,6 +250,9 @@ func validateTestMultipartContents(t *testing.T, req *Request, allMem bool) {
if g, e := req.FormValue("texta"), textaValue; g != e {
t.Errorf("texta value = %q, want %q", g, e)
}
+ if g := req.FormValue("missing"); g != "" {
+ t.Errorf("missing value = %q, want empty string", g)
+ }
assertMem := func(n string, fd multipart.File) {
if _, ok := fd.(*os.File); ok {
@@ -234,12 +269,14 @@ func validateTestMultipartContents(t *testing.T, req *Request, allMem bool) {
t.Errorf("fileb has unexpected underlying type %T", fd)
}
}
+
+ testMissingFile(t, req)
}
func testMultipartFile(t *testing.T, req *Request, key, expectFilename, expectContent string) multipart.File {
f, fh, err := req.FormFile(key)
if err != nil {
- t.Fatalf("FormFile(%q):", key, err)
+ t.Fatalf("FormFile(%q): %q", key, err)
}
if fh.Filename != expectFilename {
t.Errorf("filename = %q, want %q", fh.Filename, expectFilename)
diff --git a/src/pkg/http/requestwrite_test.go b/src/pkg/http/requestwrite_test.go
index bb000c701..beb51fb8d 100644
--- a/src/pkg/http/requestwrite_test.go
+++ b/src/pkg/http/requestwrite_test.go
@@ -69,6 +69,7 @@ var reqWriteTests = []reqWriteTest{
"Proxy-Connection: keep-alive\r\n\r\n",
"GET http://www.techcrunch.com/ HTTP/1.1\r\n" +
+ "Host: www.techcrunch.com\r\n" +
"User-Agent: Fake\r\n" +
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n" +
"Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7\r\n" +
@@ -101,6 +102,7 @@ var reqWriteTests = []reqWriteTest{
"6\r\nabcdef\r\n0\r\n\r\n",
"GET http://www.google.com/search HTTP/1.1\r\n" +
+ "Host: www.google.com\r\n" +
"User-Agent: Go http package\r\n" +
"Transfer-Encoding: chunked\r\n\r\n" +
"6\r\nabcdef\r\n0\r\n\r\n",
@@ -131,6 +133,7 @@ var reqWriteTests = []reqWriteTest{
"6\r\nabcdef\r\n0\r\n\r\n",
"POST http://www.google.com/search HTTP/1.1\r\n" +
+ "Host: www.google.com\r\n" +
"User-Agent: Go http package\r\n" +
"Connection: close\r\n" +
"Transfer-Encoding: chunked\r\n\r\n" +
@@ -164,6 +167,7 @@ var reqWriteTests = []reqWriteTest{
"abcdef",
"POST http://www.google.com/search HTTP/1.1\r\n" +
+ "Host: www.google.com\r\n" +
"User-Agent: Go http package\r\n" +
"Connection: close\r\n" +
"Content-Length: 6\r\n" +
@@ -188,6 +192,7 @@ var reqWriteTests = []reqWriteTest{
// Looks weird but RawURL overrides what WriteProxy would choose.
"GET /search HTTP/1.1\r\n" +
+ "Host: www.google.com\r\n" +
"User-Agent: Go http package\r\n" +
"\r\n",
},
diff --git a/src/pkg/http/response.go b/src/pkg/http/response.go
index 1f725ecdd..42e60c1f6 100644
--- a/src/pkg/http/response.go
+++ b/src/pkg/http/response.go
@@ -8,11 +8,9 @@ package http
import (
"bufio"
- "fmt"
"io"
"net/textproto"
"os"
- "sort"
"strconv"
"strings"
)
@@ -32,10 +30,6 @@ type Response struct {
ProtoMajor int // e.g. 1
ProtoMinor int // e.g. 0
- // RequestMethod records the method used in the HTTP request.
- // Header fields such as Content-Length have method-specific meaning.
- RequestMethod string // e.g. "HEAD", "CONNECT", "GET", etc.
-
// Header maps header keys to values. If the response had multiple
// headers with the same key, they will be concatenated, with comma
// delimiters. (Section 4.2 of RFC 2616 requires that multiple headers
@@ -70,19 +64,26 @@ type Response struct {
// Trailer maps trailer keys to values, in the same
// format as the header.
Trailer Header
+
+ // The Request that was sent to obtain this Response.
+ // Request's Body is nil (having already been consumed).
+ // This is only populated for Client requests.
+ Request *Request
}
-// ReadResponse reads and returns an HTTP response from r. The RequestMethod
-// parameter specifies the method used in the corresponding request (e.g.,
-// "GET", "HEAD"). Clients must call resp.Body.Close when finished reading
-// resp.Body. After that call, clients can inspect resp.Trailer to find
-// key/value pairs included in the response trailer.
-func ReadResponse(r *bufio.Reader, requestMethod string) (resp *Response, err os.Error) {
+// ReadResponse reads and returns an HTTP response from r. The
+// req parameter specifies the Request that corresponds to
+// this Response. Clients must call resp.Body.Close when finished
+// reading resp.Body. After that call, clients can inspect
+// resp.Trailer to find key/value pairs included in the response
+// trailer.
+func ReadResponse(r *bufio.Reader, req *Request) (resp *Response, err os.Error) {
tp := textproto.NewReader(r)
resp = new(Response)
- resp.RequestMethod = strings.ToUpper(requestMethod)
+ resp.Request = req
+ resp.Request.Method = strings.ToUpper(resp.Request.Method)
// Parse the first line of the response.
line, err := tp.ReadLine()
@@ -166,7 +167,9 @@ func (r *Response) ProtoAtLeast(major, minor int) bool {
func (resp *Response) Write(w io.Writer) os.Error {
// RequestMethod should be upper-case
- resp.RequestMethod = strings.ToUpper(resp.RequestMethod)
+ if resp.Request != nil {
+ resp.Request.Method = strings.ToUpper(resp.Request.Method)
+ }
// Status line
text := resp.Status
@@ -192,7 +195,7 @@ func (resp *Response) Write(w io.Writer) os.Error {
}
// Rest of header
- err = writeSortedHeader(w, resp.Header, respExcludeHeader)
+ err = resp.Header.WriteSubset(w, respExcludeHeader)
if err != nil {
return err
}
@@ -213,27 +216,3 @@ func (resp *Response) Write(w io.Writer) os.Error {
// Success
return nil
}
-
-func writeSortedHeader(w io.Writer, h Header, exclude map[string]bool) os.Error {
- keys := make([]string, 0, len(h))
- for k := range h {
- if exclude == nil || !exclude[k] {
- keys = append(keys, k)
- }
- }
- sort.SortStrings(keys)
- for _, k := range keys {
- for _, v := range h[k] {
- v = strings.Replace(v, "\n", " ", -1)
- v = strings.Replace(v, "\r", " ", -1)
- v = strings.TrimSpace(v)
- if v == "" {
- continue
- }
- if _, err := fmt.Fprintf(w, "%s: %s\r\n", k, v); err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/src/pkg/http/response_test.go b/src/pkg/http/response_test.go
index 9e77c20c4..1d4a23423 100644
--- a/src/pkg/http/response_test.go
+++ b/src/pkg/http/response_test.go
@@ -23,6 +23,10 @@ type respTest struct {
Body string
}
+func dummyReq(method string) *Request {
+ return &Request{Method: method}
+}
+
var respTests = []respTest{
// Unchunked response without Content-Length.
{
@@ -32,12 +36,12 @@ var respTests = []respTest{
"Body here\n",
Response{
- Status: "200 OK",
- StatusCode: 200,
- Proto: "HTTP/1.0",
- ProtoMajor: 1,
- ProtoMinor: 0,
- RequestMethod: "GET",
+ Status: "200 OK",
+ StatusCode: 200,
+ Proto: "HTTP/1.0",
+ ProtoMajor: 1,
+ ProtoMinor: 0,
+ Request: dummyReq("GET"),
Header: Header{
"Connection": {"close"}, // TODO(rsc): Delete?
},
@@ -61,7 +65,7 @@ var respTests = []respTest{
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
- RequestMethod: "GET",
+ Request: dummyReq("GET"),
Close: true,
ContentLength: -1,
},
@@ -81,7 +85,7 @@ var respTests = []respTest{
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
- RequestMethod: "GET",
+ Request: dummyReq("GET"),
Close: false,
ContentLength: 0,
},
@@ -98,12 +102,12 @@ var respTests = []respTest{
"Body here\n",
Response{
- Status: "200 OK",
- StatusCode: 200,
- Proto: "HTTP/1.0",
- ProtoMajor: 1,
- ProtoMinor: 0,
- RequestMethod: "GET",
+ Status: "200 OK",
+ StatusCode: 200,
+ Proto: "HTTP/1.0",
+ ProtoMajor: 1,
+ ProtoMinor: 0,
+ Request: dummyReq("GET"),
Header: Header{
"Connection": {"close"}, // TODO(rsc): Delete?
"Content-Length": {"10"}, // TODO(rsc): Delete?
@@ -133,7 +137,7 @@ var respTests = []respTest{
Proto: "HTTP/1.0",
ProtoMajor: 1,
ProtoMinor: 0,
- RequestMethod: "GET",
+ Request: dummyReq("GET"),
Header: Header{},
Close: true,
ContentLength: -1,
@@ -160,7 +164,7 @@ var respTests = []respTest{
Proto: "HTTP/1.0",
ProtoMajor: 1,
ProtoMinor: 0,
- RequestMethod: "GET",
+ Request: dummyReq("GET"),
Header: Header{},
Close: true,
ContentLength: -1, // TODO(rsc): Fix?
@@ -183,7 +187,7 @@ var respTests = []respTest{
Proto: "HTTP/1.0",
ProtoMajor: 1,
ProtoMinor: 0,
- RequestMethod: "HEAD",
+ Request: dummyReq("HEAD"),
Header: Header{},
Close: true,
ContentLength: 0,
@@ -199,12 +203,12 @@ var respTests = []respTest{
"\r\n",
Response{
- Status: "200 OK",
- StatusCode: 200,
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- RequestMethod: "GET",
+ Status: "200 OK",
+ StatusCode: 200,
+ Proto: "HTTP/1.1",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Request: dummyReq("GET"),
Header: Header{
"Content-Length": {"0"},
},
@@ -225,7 +229,7 @@ var respTests = []respTest{
Proto: "HTTP/1.0",
ProtoMajor: 1,
ProtoMinor: 0,
- RequestMethod: "GET",
+ Request: dummyReq("GET"),
Header: Header{},
Close: true,
ContentLength: -1,
@@ -244,7 +248,7 @@ var respTests = []respTest{
Proto: "HTTP/1.0",
ProtoMajor: 1,
ProtoMinor: 0,
- RequestMethod: "GET",
+ Request: dummyReq("GET"),
Header: Header{},
Close: true,
ContentLength: -1,
@@ -259,7 +263,7 @@ func TestReadResponse(t *testing.T) {
tt := &respTests[i]
var braw bytes.Buffer
braw.WriteString(tt.Raw)
- resp, err := ReadResponse(bufio.NewReader(&braw), tt.Resp.RequestMethod)
+ resp, err := ReadResponse(bufio.NewReader(&braw), tt.Resp.Request)
if err != nil {
t.Errorf("#%d: %s", i, err)
continue
@@ -340,7 +344,7 @@ func TestReadResponseCloseInMiddle(t *testing.T) {
buf.WriteString("Next Request Here")
bufr := bufio.NewReader(&buf)
- resp, err := ReadResponse(bufr, "GET")
+ resp, err := ReadResponse(bufr, dummyReq("GET"))
checkErr(err, "ReadResponse")
expectedLength := int64(-1)
if !test.chunked {
@@ -372,7 +376,7 @@ func TestReadResponseCloseInMiddle(t *testing.T) {
rest, err := ioutil.ReadAll(bufr)
checkErr(err, "ReadAll on remainder")
if e, g := "Next Request Here", string(rest); e != g {
- fatalf("for chunked=%v remainder = %q, expected %q", g, e)
+ fatalf("remainder = %q, expected %q", g, e)
}
}
}
@@ -381,7 +385,7 @@ func diff(t *testing.T, prefix string, have, want interface{}) {
hv := reflect.ValueOf(have).Elem()
wv := reflect.ValueOf(want).Elem()
if hv.Type() != wv.Type() {
- t.Errorf("%s: type mismatch %v vs %v", prefix, hv.Type(), wv.Type())
+ t.Errorf("%s: type mismatch %v want %v", prefix, hv.Type(), wv.Type())
}
for i := 0; i < hv.NumField(); i++ {
hf := hv.Field(i).Interface()
diff --git a/src/pkg/http/responsewrite_test.go b/src/pkg/http/responsewrite_test.go
index de0635da5..f8e63acf4 100644
--- a/src/pkg/http/responsewrite_test.go
+++ b/src/pkg/http/responsewrite_test.go
@@ -22,7 +22,7 @@ var respWriteTests = []respWriteTest{
StatusCode: 503,
ProtoMajor: 1,
ProtoMinor: 0,
- RequestMethod: "GET",
+ Request: dummyReq("GET"),
Header: Header{},
Body: ioutil.NopCloser(bytes.NewBufferString("abcdef")),
ContentLength: 6,
@@ -38,7 +38,7 @@ var respWriteTests = []respWriteTest{
StatusCode: 200,
ProtoMajor: 1,
ProtoMinor: 0,
- RequestMethod: "GET",
+ Request: dummyReq("GET"),
Header: Header{},
Body: ioutil.NopCloser(bytes.NewBufferString("abcdef")),
ContentLength: -1,
@@ -53,7 +53,7 @@ var respWriteTests = []respWriteTest{
StatusCode: 200,
ProtoMajor: 1,
ProtoMinor: 1,
- RequestMethod: "GET",
+ Request: dummyReq("GET"),
Header: Header{},
Body: ioutil.NopCloser(bytes.NewBufferString("abcdef")),
ContentLength: 6,
@@ -71,10 +71,10 @@ var respWriteTests = []respWriteTest{
// Also tests removal of leading and trailing whitespace.
{
Response{
- StatusCode: 204,
- ProtoMajor: 1,
- ProtoMinor: 1,
- RequestMethod: "GET",
+ StatusCode: 204,
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Request: dummyReq("GET"),
Header: Header{
"Foo": []string{" Bar\nBaz "},
},
diff --git a/src/pkg/http/reverseproxy_test.go b/src/pkg/http/reverseproxy_test.go
index 8cf7705d7..162000ece 100644
--- a/src/pkg/http/reverseproxy_test.go
+++ b/src/pkg/http/reverseproxy_test.go
@@ -33,7 +33,7 @@ func TestReverseProxy(t *testing.T) {
frontend := httptest.NewServer(proxyHandler)
defer frontend.Close()
- res, _, err := Get(frontend.URL)
+ res, err := Get(frontend.URL)
if err != nil {
t.Fatalf("Get: %v", err)
}
diff --git a/src/pkg/http/serve_test.go b/src/pkg/http/serve_test.go
index c3c7b8d33..120a02605 100644
--- a/src/pkg/http/serve_test.go
+++ b/src/pkg/http/serve_test.go
@@ -252,7 +252,7 @@ func TestServerTimeouts(t *testing.T) {
// Hit the HTTP server successfully.
tr := &Transport{DisableKeepAlives: true} // they interfere with this test
c := &Client{Transport: tr}
- r, _, err := c.Get(url)
+ r, err := c.Get(url)
if err != nil {
t.Fatalf("http Get #1: %v", err)
}
@@ -282,7 +282,7 @@ func TestServerTimeouts(t *testing.T) {
// Hit the HTTP server successfully again, verifying that the
// previous slow connection didn't run our handler. (that we
// get "req=2", not "req=3")
- r, _, err = Get(url)
+ r, err = Get(url)
if err != nil {
t.Fatalf("http Get #2: %v", err)
}
@@ -323,7 +323,7 @@ func TestIdentityResponse(t *testing.T) {
// responses.
for _, te := range []string{"", "identity"} {
url := ts.URL + "/?te=" + te
- res, _, err := Get(url)
+ res, err := Get(url)
if err != nil {
t.Fatalf("error with Get of %s: %v", url, err)
}
@@ -342,7 +342,7 @@ func TestIdentityResponse(t *testing.T) {
// Verify that ErrContentLength is returned
url := ts.URL + "/?overwrite=1"
- _, _, err := Get(url)
+ _, err := Get(url)
if err != nil {
t.Fatalf("error with Get of %s: %v", url, err)
}
@@ -389,7 +389,7 @@ func TestServeHTTP10Close(t *testing.T) {
}
r := bufio.NewReader(conn)
- _, err = ReadResponse(r, "GET")
+ _, err = ReadResponse(r, &Request{Method: "GET"})
if err != nil {
t.Fatal("ReadResponse error:", err)
}
@@ -417,7 +417,7 @@ func TestSetsRemoteAddr(t *testing.T) {
}))
defer ts.Close()
- res, _, err := Get(ts.URL)
+ res, err := Get(ts.URL)
if err != nil {
t.Fatalf("Get error: %v", err)
}
@@ -438,7 +438,7 @@ func TestChunkedResponseHeaders(t *testing.T) {
}))
defer ts.Close()
- res, _, err := Get(ts.URL)
+ res, err := Get(ts.URL)
if err != nil {
t.Fatalf("Get error: %v", err)
}
@@ -465,7 +465,7 @@ func Test304Responses(t *testing.T) {
}
}))
defer ts.Close()
- res, _, err := Get(ts.URL)
+ res, err := Get(ts.URL)
if err != nil {
t.Error(err)
}
@@ -516,7 +516,7 @@ func TestTLSServer(t *testing.T) {
if !strings.HasPrefix(ts.URL, "https://") {
t.Fatalf("expected test TLS server to start with https://, got %q", ts.URL)
}
- res, _, err := Get(ts.URL)
+ res, err := Get(ts.URL)
if err != nil {
t.Error(err)
}
@@ -551,7 +551,7 @@ var serverExpectTests = []serverExpectTest{
{100, "", true, "200 OK"},
// 100-continue but requesting client to deny us,
- // so it never eads the body.
+ // so it never reads the body.
{100, "100-continue", false, "401 Unauthorized"},
// Likewise without 100-continue:
{100, "", false, "401 Unauthorized"},
@@ -618,49 +618,29 @@ func TestServerExpect(t *testing.T) {
}
func TestServerConsumesRequestBody(t *testing.T) {
- log := make(chan string, 100)
-
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- log <- "got_request"
- w.WriteHeader(StatusOK)
- log <- "wrote_header"
- }))
- defer ts.Close()
-
- conn, err := net.Dial("tcp", ts.Listener.Addr().String())
- if err != nil {
- t.Fatalf("Dial: %v", err)
- }
- defer conn.Close()
-
- bufr := bufio.NewReader(conn)
- gotres := make(chan bool)
- go func() {
- line, err := bufr.ReadString('\n')
- if err != nil {
- t.Fatal(err)
+ conn := new(testConn)
+ body := strings.Repeat("x", 1<<20)
+ conn.readBuf.Write([]byte(fmt.Sprintf(
+ "POST / HTTP/1.1\r\n"+
+ "Host: test\r\n"+
+ "Content-Length: %d\r\n"+
+ "\r\n", len(body))))
+ conn.readBuf.Write([]byte(body))
+
+ done := make(chan bool)
+
+ ls := &oneConnListener{conn}
+ go Serve(ls, HandlerFunc(func(rw ResponseWriter, req *Request) {
+ if conn.readBuf.Len() < len(body)/2 {
+ t.Errorf("on request, read buffer length is %d; expected about 1MB", conn.readBuf.Len())
}
- log <- line
- gotres <- true
- }()
-
- size := 1 << 20
- log <- "writing_request"
- fmt.Fprintf(conn, "POST / HTTP/1.0\r\nContent-Length: %d\r\n\r\n", size)
- time.Sleep(25e6) // give server chance to misbehave & speak out of turn
- log <- "slept_after_req_headers"
- conn.Write([]byte(strings.Repeat("a", size)))
-
- <-gotres
- expected := []string{
- "writing_request", "got_request",
- "slept_after_req_headers", "wrote_header",
- "HTTP/1.0 200 OK\r\n"}
- for step, e := range expected {
- if g := <-log; e != g {
- t.Errorf("on step %d expected %q, got %q", step, e, g)
+ rw.WriteHeader(200)
+ if g, e := conn.readBuf.Len(), 0; g != e {
+ t.Errorf("after WriteHeader, read buffer length is %d; want %d", g, e)
}
- }
+ done <- true
+ }))
+ <-done
}
func TestTimeoutHandler(t *testing.T) {
@@ -677,7 +657,7 @@ func TestTimeoutHandler(t *testing.T) {
// Succeed without timing out:
sendHi <- true
- res, _, err := Get(ts.URL)
+ res, err := Get(ts.URL)
if err != nil {
t.Error(err)
}
@@ -694,7 +674,7 @@ func TestTimeoutHandler(t *testing.T) {
// Times out:
timeout <- 1
- res, _, err = Get(ts.URL)
+ res, err = Get(ts.URL)
if err != nil {
t.Error(err)
}
@@ -713,3 +693,64 @@ func TestTimeoutHandler(t *testing.T) {
t.Errorf("expected Write error of %v; got %v", e, g)
}
}
+
+// Verifies we don't path.Clean() on the wrong parts in redirects.
+func TestRedirectMunging(t *testing.T) {
+ req, _ := NewRequest("GET", "http://example.com/", nil)
+
+ resp := httptest.NewRecorder()
+ Redirect(resp, req, "/foo?next=http://bar.com/", 302)
+ if g, e := resp.Header().Get("Location"), "/foo?next=http://bar.com/"; g != e {
+ t.Errorf("Location header was %q; want %q", g, e)
+ }
+
+ resp = httptest.NewRecorder()
+ Redirect(resp, req, "http://localhost:8080/_ah/login?continue=http://localhost:8080/", 302)
+ if g, e := resp.Header().Get("Location"), "http://localhost:8080/_ah/login?continue=http://localhost:8080/"; g != e {
+ t.Errorf("Location header was %q; want %q", g, e)
+ }
+}
+
+// TestZeroLengthPostAndResponse exercises an optimization done by the Transport:
+// when there is no body (either because the method doesn't permit a body, or an
+// explicit Content-Length of zero is present), then the transport can re-use the
+// connection immediately. But when it re-uses the connection, it typically closes
+// the previous request's body, which is not optimal for zero-lengthed bodies,
+// as the client would then see http.ErrBodyReadAfterClose and not 0, os.EOF.
+func TestZeroLengthPostAndResponse(t *testing.T) {
+ ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, r *Request) {
+ all, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ t.Fatalf("handler ReadAll: %v", err)
+ }
+ if len(all) != 0 {
+ t.Errorf("handler got %d bytes; expected 0", len(all))
+ }
+ rw.Header().Set("Content-Length", "0")
+ }))
+ defer ts.Close()
+
+ req, err := NewRequest("POST", ts.URL, strings.NewReader(""))
+ if err != nil {
+ t.Fatal(err)
+ }
+ req.ContentLength = 0
+
+ var resp [5]*Response
+ for i := range resp {
+ resp[i], err = DefaultClient.Do(req)
+ if err != nil {
+ t.Fatalf("client post #%d: %v", i, err)
+ }
+ }
+
+ for i := range resp {
+ all, err := ioutil.ReadAll(resp[i].Body)
+ if err != nil {
+ t.Fatalf("req #%d: client ReadAll: %v", i, err)
+ }
+ if len(all) != 0 {
+ t.Errorf("req #%d: client got %d bytes; expected 0", i, len(all))
+ }
+ }
+}
diff --git a/src/pkg/http/server.go b/src/pkg/http/server.go
index 96d2cb638..eb5a3a365 100644
--- a/src/pkg/http/server.go
+++ b/src/pkg/http/server.go
@@ -309,7 +309,7 @@ func (w *response) WriteHeader(code int) {
text = "status code " + codestring
}
io.WriteString(w.conn.buf, proto+" "+codestring+" "+text+"\r\n")
- writeSortedHeader(w.conn.buf, w.header, nil)
+ w.header.Write(w.conn.buf)
io.WriteString(w.conn.buf, "\r\n")
}
@@ -581,12 +581,18 @@ func Redirect(w ResponseWriter, r *Request, url string, code int) {
url = olddir + url
}
+ var query string
+ if i := strings.Index(url, "?"); i != -1 {
+ url, query = url[:i], url[i:]
+ }
+
// clean up but preserve trailing slash
trailing := url[len(url)-1] == '/'
url = path.Clean(url)
if trailing && url[len(url)-1] != '/' {
url += "/"
}
+ url += query
}
}
diff --git a/src/pkg/http/spdy/Makefile b/src/pkg/http/spdy/Makefile
new file mode 100644
index 000000000..ff70d0437
--- /dev/null
+++ b/src/pkg/http/spdy/Makefile
@@ -0,0 +1,11 @@
+# Copyright 2011 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+include ../../../Make.inc
+
+TARG=http/spdy
+GOFILES=\
+ protocol.go\
+
+include ../../../Make.pkg
diff --git a/src/pkg/http/spdy/protocol.go b/src/pkg/http/spdy/protocol.go
new file mode 100644
index 000000000..ad9aa6333
--- /dev/null
+++ b/src/pkg/http/spdy/protocol.go
@@ -0,0 +1,367 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package spdy is an incomplete implementation of the SPDY protocol.
+//
+// The implementation follows draft 2 of the spec:
+// https://sites.google.com/a/chromium.org/dev/spdy/spdy-protocol/spdy-protocol-draft2
+package spdy
+
+import (
+ "bytes"
+ "compress/zlib"
+ "encoding/binary"
+ "http"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+// Version is the protocol version number that this package implements.
+const Version = 2
+
+// ControlFrameType stores the type field in a control frame header.
+type ControlFrameType uint16
+
+// Control frame type constants
+const (
+ TypeSynStream ControlFrameType = 0x0001
+ TypeSynReply = 0x0002
+ TypeRstStream = 0x0003
+ TypeSettings = 0x0004
+ TypeNoop = 0x0005
+ TypePing = 0x0006
+ TypeGoaway = 0x0007
+ TypeHeaders = 0x0008
+ TypeWindowUpdate = 0x0009
+)
+
+func (t ControlFrameType) String() string {
+ switch t {
+ case TypeSynStream:
+ return "SYN_STREAM"
+ case TypeSynReply:
+ return "SYN_REPLY"
+ case TypeRstStream:
+ return "RST_STREAM"
+ case TypeSettings:
+ return "SETTINGS"
+ case TypeNoop:
+ return "NOOP"
+ case TypePing:
+ return "PING"
+ case TypeGoaway:
+ return "GOAWAY"
+ case TypeHeaders:
+ return "HEADERS"
+ case TypeWindowUpdate:
+ return "WINDOW_UPDATE"
+ }
+ return "Type(" + strconv.Itoa(int(t)) + ")"
+}
+
+type FrameFlags uint8
+
+// Stream frame flags
+const (
+ FlagFin FrameFlags = 0x01
+ FlagUnidirectional = 0x02
+)
+
+// SETTINGS frame flags
+const (
+ FlagClearPreviouslyPersistedSettings FrameFlags = 0x01
+)
+
+// MaxDataLength is the maximum number of bytes that can be stored in one frame.
+const MaxDataLength = 1<<24 - 1
+
+// A Frame is a framed message as sent between clients and servers.
+// There are two types of frames: control frames and data frames.
+type Frame struct {
+ Header [4]byte
+ Flags FrameFlags
+ Data []byte
+}
+
+// ControlFrame creates a control frame with the given information.
+func ControlFrame(t ControlFrameType, f FrameFlags, data []byte) Frame {
+ return Frame{
+ Header: [4]byte{
+ (Version&0xff00)>>8 | 0x80,
+ (Version & 0x00ff),
+ byte((t & 0xff00) >> 8),
+ byte((t & 0x00ff) >> 0),
+ },
+ Flags: f,
+ Data: data,
+ }
+}
+
+// DataFrame creates a data frame with the given information.
+func DataFrame(streamId uint32, f FrameFlags, data []byte) Frame {
+ return Frame{
+ Header: [4]byte{
+ byte(streamId & 0x7f000000 >> 24),
+ byte(streamId & 0x00ff0000 >> 16),
+ byte(streamId & 0x0000ff00 >> 8),
+ byte(streamId & 0x000000ff >> 0),
+ },
+ Flags: f,
+ Data: data,
+ }
+}
+
+// ReadFrame reads an entire frame into memory.
+func ReadFrame(r io.Reader) (f Frame, err os.Error) {
+ _, err = io.ReadFull(r, f.Header[:])
+ if err != nil {
+ return
+ }
+ err = binary.Read(r, binary.BigEndian, &f.Flags)
+ if err != nil {
+ return
+ }
+ var lengthField [3]byte
+ _, err = io.ReadFull(r, lengthField[:])
+ if err != nil {
+ if err == os.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return
+ }
+ var length uint32
+ length |= uint32(lengthField[0]) << 16
+ length |= uint32(lengthField[1]) << 8
+ length |= uint32(lengthField[2]) << 0
+ if length > 0 {
+ f.Data = make([]byte, int(length))
+ _, err = io.ReadFull(r, f.Data)
+ if err == os.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ } else {
+ f.Data = []byte{}
+ }
+ return
+}
+
+// IsControl returns whether the frame holds a control frame.
+func (f Frame) IsControl() bool {
+ return f.Header[0]&0x80 != 0
+}
+
+// Type obtains the type field if the frame is a control frame, otherwise it returns zero.
+func (f Frame) Type() ControlFrameType {
+ if !f.IsControl() {
+ return 0
+ }
+ return (ControlFrameType(f.Header[2])<<8 | ControlFrameType(f.Header[3]))
+}
+
+// StreamId returns the stream ID field if the frame is a data frame, otherwise it returns zero.
+func (f Frame) StreamId() (id uint32) {
+ if f.IsControl() {
+ return 0
+ }
+ id |= uint32(f.Header[0]) << 24
+ id |= uint32(f.Header[1]) << 16
+ id |= uint32(f.Header[2]) << 8
+ id |= uint32(f.Header[3]) << 0
+ return
+}
+
+// WriteTo writes the frame in the SPDY format.
+func (f Frame) WriteTo(w io.Writer) (n int64, err os.Error) {
+ var nn int
+ // Header
+ nn, err = w.Write(f.Header[:])
+ n += int64(nn)
+ if err != nil {
+ return
+ }
+ // Flags
+ nn, err = w.Write([]byte{byte(f.Flags)})
+ n += int64(nn)
+ if err != nil {
+ return
+ }
+ // Length
+ nn, err = w.Write([]byte{
+ byte(len(f.Data) & 0x00ff0000 >> 16),
+ byte(len(f.Data) & 0x0000ff00 >> 8),
+ byte(len(f.Data) & 0x000000ff),
+ })
+ n += int64(nn)
+ if err != nil {
+ return
+ }
+ // Data
+ if len(f.Data) > 0 {
+ nn, err = w.Write(f.Data)
+ n += int64(nn)
+ }
+ return
+}
+
+// headerDictionary is the dictionary sent to the zlib compressor/decompressor.
+// Even though the specification states there is no null byte at the end, Chrome sends it.
+const headerDictionary = "optionsgetheadpostputdeletetrace" +
+ "acceptaccept-charsetaccept-encodingaccept-languageauthorizationexpectfromhost" +
+ "if-modified-sinceif-matchif-none-matchif-rangeif-unmodifiedsince" +
+ "max-forwardsproxy-authorizationrangerefererteuser-agent" +
+ "100101200201202203204205206300301302303304305306307400401402403404405406407408409410411412413414415416417500501502503504505" +
+ "accept-rangesageetaglocationproxy-authenticatepublicretry-after" +
+ "servervarywarningwww-authenticateallowcontent-basecontent-encodingcache-control" +
+ "connectiondatetrailertransfer-encodingupgradeviawarning" +
+ "content-languagecontent-lengthcontent-locationcontent-md5content-rangecontent-typeetagexpireslast-modifiedset-cookie" +
+ "MondayTuesdayWednesdayThursdayFridaySaturdaySunday" +
+ "JanFebMarAprMayJunJulAugSepOctNovDec" +
+ "chunkedtext/htmlimage/pngimage/jpgimage/gifapplication/xmlapplication/xhtmltext/plainpublicmax-age" +
+ "charset=iso-8859-1utf-8gzipdeflateHTTP/1.1statusversionurl\x00"
+
+// hrSource is a reader that passes through reads from another reader.
+// When the underlying reader reaches EOF, Read will block until another reader is added via change.
+type hrSource struct {
+ r io.Reader
+ m sync.RWMutex
+ c *sync.Cond
+}
+
+func (src *hrSource) Read(p []byte) (n int, err os.Error) {
+ src.m.RLock()
+ for src.r == nil {
+ src.c.Wait()
+ }
+ n, err = src.r.Read(p)
+ src.m.RUnlock()
+ if err == os.EOF {
+ src.change(nil)
+ err = nil
+ }
+ return
+}
+
+func (src *hrSource) change(r io.Reader) {
+ src.m.Lock()
+ defer src.m.Unlock()
+ src.r = r
+ src.c.Broadcast()
+}
+
+// A HeaderReader reads zlib-compressed headers.
+type HeaderReader struct {
+ source hrSource
+ decompressor io.ReadCloser
+}
+
+// NewHeaderReader creates a HeaderReader with the initial dictionary.
+func NewHeaderReader() (hr *HeaderReader) {
+ hr = new(HeaderReader)
+ hr.source.c = sync.NewCond(hr.source.m.RLocker())
+ return
+}
+
+// ReadHeader reads a set of headers from a reader.
+func (hr *HeaderReader) ReadHeader(r io.Reader) (h http.Header, err os.Error) {
+ hr.source.change(r)
+ h, err = hr.read()
+ return
+}
+
+// Decode reads a set of headers from a block of bytes.
+func (hr *HeaderReader) Decode(data []byte) (h http.Header, err os.Error) {
+ hr.source.change(bytes.NewBuffer(data))
+ h, err = hr.read()
+ return
+}
+
+func (hr *HeaderReader) read() (h http.Header, err os.Error) {
+ var count uint16
+ if hr.decompressor == nil {
+ hr.decompressor, err = zlib.NewReaderDict(&hr.source, []byte(headerDictionary))
+ if err != nil {
+ return
+ }
+ }
+ err = binary.Read(hr.decompressor, binary.BigEndian, &count)
+ if err != nil {
+ return
+ }
+ h = make(http.Header, int(count))
+ for i := 0; i < int(count); i++ {
+ var name, value string
+ name, err = readHeaderString(hr.decompressor)
+ if err != nil {
+ return
+ }
+ value, err = readHeaderString(hr.decompressor)
+ if err != nil {
+ return
+ }
+ valueList := strings.Split(string(value), "\x00", -1)
+ for _, v := range valueList {
+ h.Add(name, v)
+ }
+ }
+ return
+}
+
+func readHeaderString(r io.Reader) (s string, err os.Error) {
+ var length uint16
+ err = binary.Read(r, binary.BigEndian, &length)
+ if err != nil {
+ return
+ }
+ data := make([]byte, int(length))
+ _, err = io.ReadFull(r, data)
+ if err != nil {
+ return
+ }
+ return string(data), nil
+}
+
+// HeaderWriter will write zlib-compressed headers on different streams.
+type HeaderWriter struct {
+ compressor *zlib.Writer
+ buffer *bytes.Buffer
+}
+
+// NewHeaderWriter creates a HeaderWriter ready to compress headers.
+func NewHeaderWriter(level int) (hw *HeaderWriter) {
+ hw = &HeaderWriter{buffer: new(bytes.Buffer)}
+ hw.compressor, _ = zlib.NewWriterDict(hw.buffer, level, []byte(headerDictionary))
+ return
+}
+
+// WriteHeader writes a header block directly to an output.
+func (hw *HeaderWriter) WriteHeader(w io.Writer, h http.Header) (err os.Error) {
+ hw.write(h)
+ _, err = io.Copy(w, hw.buffer)
+ hw.buffer.Reset()
+ return
+}
+
+// Encode returns a compressed header block.
+func (hw *HeaderWriter) Encode(h http.Header) (data []byte) {
+ hw.write(h)
+ data = make([]byte, hw.buffer.Len())
+ hw.buffer.Read(data)
+ return
+}
+
+func (hw *HeaderWriter) write(h http.Header) {
+ binary.Write(hw.compressor, binary.BigEndian, uint16(len(h)))
+ for k, vals := range h {
+ k = strings.ToLower(k)
+ binary.Write(hw.compressor, binary.BigEndian, uint16(len(k)))
+ binary.Write(hw.compressor, binary.BigEndian, []byte(k))
+ v := strings.Join(vals, "\x00")
+ binary.Write(hw.compressor, binary.BigEndian, uint16(len(v)))
+ binary.Write(hw.compressor, binary.BigEndian, []byte(v))
+ }
+ hw.compressor.Flush()
+}
diff --git a/src/pkg/http/spdy/protocol_test.go b/src/pkg/http/spdy/protocol_test.go
new file mode 100644
index 000000000..998ff998b
--- /dev/null
+++ b/src/pkg/http/spdy/protocol_test.go
@@ -0,0 +1,259 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package spdy
+
+import (
+ "bytes"
+ "compress/zlib"
+ "http"
+ "os"
+ "testing"
+)
+
+type frameIoTest struct {
+ desc string
+ data []byte
+ frame Frame
+ readError os.Error
+ readOnly bool
+}
+
+var frameIoTests = []frameIoTest{
+ {
+ "noop frame",
+ []byte{
+ 0x80, 0x02, 0x00, 0x05,
+ 0x00, 0x00, 0x00, 0x00,
+ },
+ ControlFrame(
+ TypeNoop,
+ 0x00,
+ []byte{},
+ ),
+ nil,
+ false,
+ },
+ {
+ "ping frame",
+ []byte{
+ 0x80, 0x02, 0x00, 0x06,
+ 0x00, 0x00, 0x00, 0x04,
+ 0x00, 0x00, 0x00, 0x01,
+ },
+ ControlFrame(
+ TypePing,
+ 0x00,
+ []byte{0x00, 0x00, 0x00, 0x01},
+ ),
+ nil,
+ false,
+ },
+ {
+ "syn_stream frame",
+ []byte{
+ 0x80, 0x02, 0x00, 0x01,
+ 0x01, 0x00, 0x00, 0x53,
+ 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x78, 0xbb,
+ 0xdf, 0xa2, 0x51, 0xb2,
+ 0x62, 0x60, 0x66, 0x60,
+ 0xcb, 0x4d, 0x2d, 0xc9,
+ 0xc8, 0x4f, 0x61, 0x60,
+ 0x4e, 0x4f, 0x2d, 0x61,
+ 0x60, 0x2e, 0x2d, 0xca,
+ 0x61, 0x10, 0xcb, 0x28,
+ 0x29, 0x29, 0xb0, 0xd2,
+ 0xd7, 0x2f, 0x2f, 0x2f,
+ 0xd7, 0x4b, 0xcf, 0xcf,
+ 0x4f, 0xcf, 0x49, 0xd5,
+ 0x4b, 0xce, 0xcf, 0xd5,
+ 0x67, 0x60, 0x2f, 0x4b,
+ 0x2d, 0x2a, 0xce, 0xcc,
+ 0xcf, 0x63, 0xe0, 0x00,
+ 0x29, 0xd0, 0x37, 0xd4,
+ 0x33, 0x04, 0x00, 0x00,
+ 0x00, 0xff, 0xff,
+ },
+ ControlFrame(
+ TypeSynStream,
+ 0x01,
+ []byte{
+ 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x78, 0xbb,
+ 0xdf, 0xa2, 0x51, 0xb2,
+ 0x62, 0x60, 0x66, 0x60,
+ 0xcb, 0x4d, 0x2d, 0xc9,
+ 0xc8, 0x4f, 0x61, 0x60,
+ 0x4e, 0x4f, 0x2d, 0x61,
+ 0x60, 0x2e, 0x2d, 0xca,
+ 0x61, 0x10, 0xcb, 0x28,
+ 0x29, 0x29, 0xb0, 0xd2,
+ 0xd7, 0x2f, 0x2f, 0x2f,
+ 0xd7, 0x4b, 0xcf, 0xcf,
+ 0x4f, 0xcf, 0x49, 0xd5,
+ 0x4b, 0xce, 0xcf, 0xd5,
+ 0x67, 0x60, 0x2f, 0x4b,
+ 0x2d, 0x2a, 0xce, 0xcc,
+ 0xcf, 0x63, 0xe0, 0x00,
+ 0x29, 0xd0, 0x37, 0xd4,
+ 0x33, 0x04, 0x00, 0x00,
+ 0x00, 0xff, 0xff,
+ },
+ ),
+ nil,
+ false,
+ },
+ {
+ "data frame",
+ []byte{
+ 0x00, 0x00, 0x00, 0x05,
+ 0x01, 0x00, 0x00, 0x04,
+ 0x01, 0x02, 0x03, 0x04,
+ },
+ DataFrame(
+ 5,
+ 0x01,
+ []byte{0x01, 0x02, 0x03, 0x04},
+ ),
+ nil,
+ false,
+ },
+ {
+ "too much data",
+ []byte{
+ 0x00, 0x00, 0x00, 0x05,
+ 0x01, 0x00, 0x00, 0x04,
+ 0x01, 0x02, 0x03, 0x04,
+ 0x05, 0x06, 0x07, 0x08,
+ },
+ DataFrame(
+ 5,
+ 0x01,
+ []byte{0x01, 0x02, 0x03, 0x04},
+ ),
+ nil,
+ true,
+ },
+ {
+ "not enough data",
+ []byte{
+ 0x00, 0x00, 0x00, 0x05,
+ },
+ Frame{},
+ os.EOF,
+ true,
+ },
+}
+
+func TestReadFrame(t *testing.T) {
+ for _, tt := range frameIoTests {
+ f, err := ReadFrame(bytes.NewBuffer(tt.data))
+ if err != tt.readError {
+ t.Errorf("%s: ReadFrame: %s", tt.desc, err)
+ continue
+ }
+ if err == nil {
+ if !bytes.Equal(f.Header[:], tt.frame.Header[:]) {
+ t.Errorf("%s: header %q != %q", tt.desc, string(f.Header[:]), string(tt.frame.Header[:]))
+ }
+ if f.Flags != tt.frame.Flags {
+ t.Errorf("%s: flags %#02x != %#02x", tt.desc, f.Flags, tt.frame.Flags)
+ }
+ if !bytes.Equal(f.Data, tt.frame.Data) {
+ t.Errorf("%s: data %q != %q", tt.desc, string(f.Data), string(tt.frame.Data))
+ }
+ }
+ }
+}
+
+func TestWriteTo(t *testing.T) {
+ for _, tt := range frameIoTests {
+ if tt.readOnly {
+ continue
+ }
+ b := new(bytes.Buffer)
+ _, err := tt.frame.WriteTo(b)
+ if err != nil {
+ t.Errorf("%s: WriteTo: %s", tt.desc, err)
+ }
+ if !bytes.Equal(b.Bytes(), tt.data) {
+ t.Errorf("%s: data %q != %q", tt.desc, string(b.Bytes()), string(tt.data))
+ }
+ }
+}
+
+var headerDataTest = []byte{
+ 0x78, 0xbb, 0xdf, 0xa2,
+ 0x51, 0xb2, 0x62, 0x60,
+ 0x66, 0x60, 0xcb, 0x4d,
+ 0x2d, 0xc9, 0xc8, 0x4f,
+ 0x61, 0x60, 0x4e, 0x4f,
+ 0x2d, 0x61, 0x60, 0x2e,
+ 0x2d, 0xca, 0x61, 0x10,
+ 0xcb, 0x28, 0x29, 0x29,
+ 0xb0, 0xd2, 0xd7, 0x2f,
+ 0x2f, 0x2f, 0xd7, 0x4b,
+ 0xcf, 0xcf, 0x4f, 0xcf,
+ 0x49, 0xd5, 0x4b, 0xce,
+ 0xcf, 0xd5, 0x67, 0x60,
+ 0x2f, 0x4b, 0x2d, 0x2a,
+ 0xce, 0xcc, 0xcf, 0x63,
+ 0xe0, 0x00, 0x29, 0xd0,
+ 0x37, 0xd4, 0x33, 0x04,
+ 0x00, 0x00, 0x00, 0xff,
+ 0xff,
+}
+
+func TestReadHeader(t *testing.T) {
+ r := NewHeaderReader()
+ h, err := r.Decode(headerDataTest)
+ if err != nil {
+ t.Fatalf("Error: %v", err)
+ return
+ }
+ if len(h) != 3 {
+ t.Errorf("Header count = %d (expected 3)", len(h))
+ }
+ if h.Get("Url") != "http://www.google.com/" {
+ t.Errorf("Url: %q != %q", h.Get("Url"), "http://www.google.com/")
+ }
+ if h.Get("Method") != "get" {
+ t.Errorf("Method: %q != %q", h.Get("Method"), "get")
+ }
+ if h.Get("Version") != "http/1.1" {
+ t.Errorf("Version: %q != %q", h.Get("Version"), "http/1.1")
+ }
+}
+
+func TestWriteHeader(t *testing.T) {
+ for level := zlib.NoCompression; level <= zlib.BestCompression; level++ {
+ r := NewHeaderReader()
+ w := NewHeaderWriter(level)
+ for i := 0; i < 100; i++ {
+ b := new(bytes.Buffer)
+ gold := http.Header{
+ "Url": []string{"http://www.google.com/"},
+ "Method": []string{"get"},
+ "Version": []string{"http/1.1"},
+ }
+ w.WriteHeader(b, gold)
+ h, err := r.Decode(b.Bytes())
+ if err != nil {
+ t.Errorf("(level=%d i=%d) Error: %v", level, i, err)
+ return
+ }
+ if len(h) != len(gold) {
+ t.Errorf("(level=%d i=%d) Header count = %d (expected %d)", level, i, len(h), len(gold))
+ }
+ for k, _ := range h {
+ if h.Get(k) != gold.Get(k) {
+ t.Errorf("(level=%d i=%d) %s: %q != %q", level, i, k, h.Get(k), gold.Get(k))
+ }
+ }
+ }
+ }
+}
diff --git a/src/pkg/http/transfer.go b/src/pkg/http/transfer.go
index 98c32bab6..062e7a0ff 100644
--- a/src/pkg/http/transfer.go
+++ b/src/pkg/http/transfer.go
@@ -45,7 +45,7 @@ func newTransferWriter(r interface{}) (t *transferWriter, err os.Error) {
t.TransferEncoding = rr.TransferEncoding
t.Trailer = rr.Trailer
atLeastHTTP11 = rr.ProtoAtLeast(1, 1)
- t.ResponseToHEAD = noBodyExpected(rr.RequestMethod)
+ t.ResponseToHEAD = noBodyExpected(rr.Request.Method)
}
// Sanitize Body,ContentLength,TransferEncoding
@@ -196,7 +196,7 @@ func readTransfer(msg interface{}, r *bufio.Reader) (err os.Error) {
case *Response:
t.Header = rr.Header
t.StatusCode = rr.StatusCode
- t.RequestMethod = rr.RequestMethod
+ t.RequestMethod = rr.Request.Method
t.ProtoMajor = rr.ProtoMajor
t.ProtoMinor = rr.ProtoMinor
t.Close = shouldClose(t.ProtoMajor, t.ProtoMinor, t.Header)
@@ -439,9 +439,29 @@ type body struct {
hdr interface{} // non-nil (Response or Request) value means read trailer
r *bufio.Reader // underlying wire-format reader for the trailer
closing bool // is the connection to be closed after reading body?
+ closed bool
+}
+
+// ErrBodyReadAfterClose is returned when reading a Request Body after
+// the body has been closed. This typically happens when the body is
+// read after an HTTP Handler calls WriteHeader or Write on its
+// ResponseWriter.
+var ErrBodyReadAfterClose = os.NewError("http: invalid Read on closed request Body")
+
+func (b *body) Read(p []byte) (n int, err os.Error) {
+ if b.closed {
+ return 0, ErrBodyReadAfterClose
+ }
+ return b.Reader.Read(p)
}
func (b *body) Close() os.Error {
+ if b.closed {
+ return nil
+ }
+ defer func() {
+ b.closed = true
+ }()
if b.hdr == nil && b.closing {
// no trailer and closing the connection next.
// no point in reading to EOF.
diff --git a/src/pkg/http/transport.go b/src/pkg/http/transport.go
index 73a2c2191..34bfbdd34 100644
--- a/src/pkg/http/transport.go
+++ b/src/pkg/http/transport.go
@@ -6,12 +6,12 @@ package http
import (
"bufio"
- "bytes"
"compress/gzip"
"crypto/tls"
"encoding/base64"
"fmt"
"io"
+ "io/ioutil"
"log"
"net"
"os"
@@ -24,7 +24,7 @@ import (
// each call to Do and uses HTTP proxies as directed by the
// $HTTP_PROXY and $NO_PROXY (or $http_proxy and $no_proxy)
// environment variables.
-var DefaultTransport RoundTripper = &Transport{}
+var DefaultTransport RoundTripper = &Transport{Proxy: ProxyFromEnvironment}
// DefaultMaxIdleConnsPerHost is the default value of Transport's
// MaxIdleConnsPerHost.
@@ -41,7 +41,12 @@ type Transport struct {
// TODO: tunable on timeout on cached connections
// TODO: optional pipelining
- IgnoreEnvironment bool // don't look at environment variables for proxy configuration
+ // Proxy optionally specifies a function to return a proxy for
+ // a given Request. If the function returns a non-nil error,
+ // the request is aborted with the provided error. If Proxy is
+ // nil or returns a nil *URL, no proxy is used.
+ Proxy func(*Request) (*URL, os.Error)
+
DisableKeepAlives bool
DisableCompression bool
@@ -51,6 +56,39 @@ type Transport struct {
MaxIdleConnsPerHost int
}
+// ProxyFromEnvironment returns the URL of the proxy to use for a
+// given request, as indicated by the environment variables
+// $HTTP_PROXY and $NO_PROXY (or $http_proxy and $no_proxy).
+// Either URL or an error is returned.
+func ProxyFromEnvironment(req *Request) (*URL, os.Error) {
+ proxy := getenvEitherCase("HTTP_PROXY")
+ if proxy == "" {
+ return nil, nil
+ }
+ if !useProxy(canonicalAddr(req.URL)) {
+ return nil, nil
+ }
+ proxyURL, err := ParseRequestURL(proxy)
+ if err != nil {
+ return nil, os.ErrorString("invalid proxy address")
+ }
+ if proxyURL.Host == "" {
+ proxyURL, err = ParseRequestURL("http://" + proxy)
+ if err != nil {
+ return nil, os.ErrorString("invalid proxy address")
+ }
+ }
+ return proxyURL, nil
+}
+
+// ProxyURL returns a proxy function (for use in a Transport)
+// that always returns the same URL.
+func ProxyURL(url *URL) func(*Request) (*URL, os.Error) {
+ return func(*Request) (*URL, os.Error) {
+ return url, nil
+ }
+}
+
// RoundTrip implements the RoundTripper interface.
func (t *Transport) RoundTrip(req *Request) (resp *Response, err os.Error) {
if req.URL == nil {
@@ -101,21 +139,11 @@ func (t *Transport) CloseIdleConnections() {
// Private implementation past this point.
//
-func (t *Transport) getenvEitherCase(k string) string {
- if t.IgnoreEnvironment {
- return ""
- }
- if v := t.getenv(strings.ToUpper(k)); v != "" {
+func getenvEitherCase(k string) string {
+ if v := os.Getenv(strings.ToUpper(k)); v != "" {
return v
}
- return t.getenv(strings.ToLower(k))
-}
-
-func (t *Transport) getenv(k string) string {
- if t.IgnoreEnvironment {
- return ""
- }
- return os.Getenv(k)
+ return os.Getenv(strings.ToLower(k))
}
func (t *Transport) connectMethodForRequest(req *Request) (*connectMethod, os.Error) {
@@ -123,20 +151,12 @@ func (t *Transport) connectMethodForRequest(req *Request) (*connectMethod, os.Er
targetScheme: req.URL.Scheme,
targetAddr: canonicalAddr(req.URL),
}
-
- proxy := t.getenvEitherCase("HTTP_PROXY")
- if proxy != "" && t.useProxy(cm.targetAddr) {
- proxyURL, err := ParseRequestURL(proxy)
+ if t.Proxy != nil {
+ var err os.Error
+ cm.proxyURL, err = t.Proxy(req)
if err != nil {
- return nil, os.ErrorString("invalid proxy address")
- }
- if proxyURL.Host == "" {
- proxyURL, err = ParseRequestURL("http://" + proxy)
- if err != nil {
- return nil, os.ErrorString("invalid proxy address")
- }
+ return nil, err
}
- cm.proxyURL = proxyURL
}
return cm, nil
}
@@ -248,18 +268,22 @@ func (t *Transport) getConn(cm *connectMethod) (*persistConn, os.Error) {
}
}
case cm.targetScheme == "https":
- fmt.Fprintf(conn, "CONNECT %s HTTP/1.1\r\n", cm.targetAddr)
- fmt.Fprintf(conn, "Host: %s\r\n", cm.targetAddr)
+ connectReq := &Request{
+ Method: "CONNECT",
+ RawURL: cm.targetAddr,
+ Host: cm.targetAddr,
+ Header: make(Header),
+ }
if pa != "" {
- fmt.Fprintf(conn, "Proxy-Authorization: %s\r\n", pa)
+ connectReq.Header.Set("Proxy-Authorization", pa)
}
- fmt.Fprintf(conn, "\r\n")
+ connectReq.Write(conn)
// Read response.
// Okay to use and discard buffered reader here, because
// TLS server will not speak until spoken to.
br := bufio.NewReader(conn)
- resp, err := ReadResponse(br, "CONNECT")
+ resp, err := ReadResponse(br, connectReq)
if err != nil {
conn.Close()
return nil, err
@@ -285,7 +309,6 @@ func (t *Transport) getConn(cm *connectMethod) (*persistConn, os.Error) {
pconn.br = bufio.NewReader(pconn.conn)
pconn.cc = newClientConnFunc(conn, pconn.br)
- pconn.cc.readRes = readResponseWithEOFSignal
go pconn.readLoop()
return pconn, nil
}
@@ -293,7 +316,7 @@ func (t *Transport) getConn(cm *connectMethod) (*persistConn, os.Error) {
// useProxy returns true if requests to addr should use a proxy,
// according to the NO_PROXY or no_proxy environment variable.
// addr is always a canonicalAddr with a host and port.
-func (t *Transport) useProxy(addr string) bool {
+func useProxy(addr string) bool {
if len(addr) == 0 {
return true
}
@@ -305,16 +328,12 @@ func (t *Transport) useProxy(addr string) bool {
return false
}
if ip := net.ParseIP(host); ip != nil {
- if ip4 := ip.To4(); ip4 != nil && ip4[0] == 127 {
- // 127.0.0.0/8 loopback isn't proxied.
- return false
- }
- if bytes.Equal(ip, net.IPv6loopback) {
+ if ip.IsLoopback() {
return false
}
}
- no_proxy := t.getenvEitherCase("NO_PROXY")
+ no_proxy := getenvEitherCase("NO_PROXY")
if no_proxy == "*" {
return false
}
@@ -447,7 +466,25 @@ func (pc *persistConn) readLoop() {
}
rc := <-pc.reqch
- resp, err := pc.cc.Read(rc.req)
+ resp, err := pc.cc.readUsing(rc.req, func(buf *bufio.Reader, forReq *Request) (*Response, os.Error) {
+ resp, err := ReadResponse(buf, forReq)
+ if err != nil || resp.ContentLength == 0 {
+ return resp, err
+ }
+ if rc.addedGzip && resp.Header.Get("Content-Encoding") == "gzip" {
+ resp.Header.Del("Content-Encoding")
+ resp.Header.Del("Content-Length")
+ resp.ContentLength = -1
+ gzReader, err := gzip.NewReader(resp.Body)
+ if err != nil {
+ pc.close()
+ return nil, err
+ }
+ resp.Body = &readFirstCloseBoth{&discardOnCloseReadCloser{gzReader}, resp.Body}
+ }
+ resp.Body = &bodyEOFSignal{body: resp.Body}
+ return resp, err
+ })
if err == ErrPersistEOF {
// Succeeded, but we can't send any more
@@ -469,6 +506,17 @@ func (pc *persistConn) readLoop() {
waitForBodyRead <- true
}
} else {
+ // When there's no response body, we immediately
+ // reuse the TCP connection (putIdleConn), but
+ // we need to prevent ClientConn.Read from
+ // closing the Response.Body on the next
+ // loop, otherwise it might close the body
+ // before the client code has had a chance to
+ // read it (even though it'll just be 0, EOF).
+ pc.cc.lk.Lock()
+ pc.cc.lastbody = nil
+ pc.cc.lk.Unlock()
+
pc.t.putIdleConn(pc)
}
}
@@ -491,6 +539,11 @@ type responseAndError struct {
type requestAndChan struct {
req *Request
ch chan responseAndError
+
+ // did the Transport (as opposed to the client code) add an
+ // Accept-Encoding gzip header? only if it we set it do
+ // we transparently decode the gzip.
+ addedGzip bool
}
func (pc *persistConn) roundTrip(req *Request) (resp *Response, err os.Error) {
@@ -522,25 +575,12 @@ func (pc *persistConn) roundTrip(req *Request) (resp *Response, err os.Error) {
}
ch := make(chan responseAndError, 1)
- pc.reqch <- requestAndChan{req, ch}
+ pc.reqch <- requestAndChan{req, ch, requestedGzip}
re := <-ch
pc.lk.Lock()
pc.numExpectedResponses--
pc.lk.Unlock()
- if re.err == nil && requestedGzip && re.res.Header.Get("Content-Encoding") == "gzip" {
- re.res.Header.Del("Content-Encoding")
- re.res.Header.Del("Content-Length")
- re.res.ContentLength = -1
- esb := re.res.Body.(*bodyEOFSignal)
- gzReader, err := gzip.NewReader(esb.body)
- if err != nil {
- pc.close()
- return nil, err
- }
- esb.body = &readFirstCloseBoth{gzReader, esb.body}
- }
-
return re.res, re.err
}
@@ -572,16 +612,6 @@ func responseIsKeepAlive(res *Response) bool {
return false
}
-// readResponseWithEOFSignal is a wrapper around ReadResponse that replaces
-// the response body with a bodyEOFSignal-wrapped version.
-func readResponseWithEOFSignal(r *bufio.Reader, requestMethod string) (resp *Response, err os.Error) {
- resp, err = ReadResponse(r, requestMethod)
- if err == nil && resp.ContentLength != 0 {
- resp.Body = &bodyEOFSignal{body: resp.Body}
- }
- return
-}
-
// bodyEOFSignal wraps a ReadCloser but runs fn (if non-nil) at most
// once, right before the final Read() or Close() call returns, but after
// EOF has been seen.
@@ -604,6 +634,9 @@ func (es *bodyEOFSignal) Read(p []byte) (n int, err os.Error) {
}
func (es *bodyEOFSignal) Close() (err os.Error) {
+ if es.isClosed {
+ return nil
+ }
es.isClosed = true
err = es.body.Close()
if err == nil && es.fn != nil {
@@ -628,3 +661,13 @@ func (r *readFirstCloseBoth) Close() os.Error {
}
return nil
}
+
+// discardOnCloseReadCloser consumes all its input on Close.
+type discardOnCloseReadCloser struct {
+ io.ReadCloser
+}
+
+func (d *discardOnCloseReadCloser) Close() os.Error {
+ io.Copy(ioutil.Discard, d.ReadCloser) // ignore errors; likely invalid or already closed
+ return d.ReadCloser.Close()
+}
diff --git a/src/pkg/http/transport_test.go b/src/pkg/http/transport_test.go
index a32ac4c4f..9cd18ffec 100644
--- a/src/pkg/http/transport_test.go
+++ b/src/pkg/http/transport_test.go
@@ -43,7 +43,7 @@ func TestTransportKeepAlives(t *testing.T) {
c := &Client{Transport: tr}
fetch := func(n int) string {
- res, _, err := c.Get(ts.URL)
+ res, err := c.Get(ts.URL)
if err != nil {
t.Fatalf("error in disableKeepAlive=%v, req #%d, GET: %v", disableKeepAlive, n, err)
}
@@ -160,7 +160,7 @@ func TestTransportIdleCacheKeys(t *testing.T) {
t.Errorf("After CloseIdleConnections expected %d idle conn cache keys; got %d", e, g)
}
- resp, _, err := c.Get(ts.URL)
+ resp, err := c.Get(ts.URL)
if err != nil {
t.Error(err)
}
@@ -201,7 +201,7 @@ func TestTransportMaxPerHostIdleConns(t *testing.T) {
// Their responses will hang until we we write to resch, though.
donech := make(chan bool)
doReq := func() {
- resp, _, err := c.Get(ts.URL)
+ resp, err := c.Get(ts.URL)
if err != nil {
t.Error(err)
}
@@ -256,26 +256,44 @@ func TestTransportServerClosingUnexpectedly(t *testing.T) {
tr := &Transport{}
c := &Client{Transport: tr}
- fetch := func(n int) string {
- res, _, err := c.Get(ts.URL)
- if err != nil {
- t.Fatalf("error in req #%d, GET: %v", n, err)
+ fetch := func(n, retries int) string {
+ condFatalf := func(format string, arg ...interface{}) {
+ if retries <= 0 {
+ t.Fatalf(format, arg...)
+ }
+ t.Logf("retrying shortly after expected error: "+format, arg...)
+ time.Sleep(1e9 / int64(retries))
}
- body, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Fatalf("error in req #%d, ReadAll: %v", n, err)
+ for retries >= 0 {
+ retries--
+ res, err := c.Get(ts.URL)
+ if err != nil {
+ condFatalf("error in req #%d, GET: %v", n, err)
+ continue
+ }
+ body, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ condFatalf("error in req #%d, ReadAll: %v", n, err)
+ continue
+ }
+ res.Body.Close()
+ return string(body)
}
- res.Body.Close()
- return string(body)
+ panic("unreachable")
}
- body1 := fetch(1)
- body2 := fetch(2)
+ body1 := fetch(1, 0)
+ body2 := fetch(2, 0)
ts.CloseClientConnections() // surprise!
- time.Sleep(25e6) // idle for a bit (test is inherently racey, but expectedly)
- body3 := fetch(3)
+ // This test has an expected race. Sleeping for 25 ms prevents
+ // it on most fast machines, causing the next fetch() call to
+ // succeed quickly. But if we do get errors, fetch() will retry 5
+ // times with some delays between.
+ time.Sleep(25e6)
+
+ body3 := fetch(3, 5)
if body1 != body2 {
t.Errorf("expected body1 and body2 to be equal")
@@ -376,6 +394,9 @@ func TestTransportGzip(t *testing.T) {
t.Errorf("Accept-Encoding = %q, want %q", g, e)
}
rw.Header().Set("Content-Encoding", "gzip")
+ if req.Method == "HEAD" {
+ return
+ }
var w io.Writer = rw
var buf bytes.Buffer
@@ -399,7 +420,7 @@ func TestTransportGzip(t *testing.T) {
c := &Client{Transport: &Transport{}}
// First fetch something large, but only read some of it.
- res, _, err := c.Get(ts.URL + "?body=large&chunked=" + chunked)
+ res, err := c.Get(ts.URL + "?body=large&chunked=" + chunked)
if err != nil {
t.Fatalf("large get: %v", err)
}
@@ -419,7 +440,7 @@ func TestTransportGzip(t *testing.T) {
}
// Then something small.
- res, _, err = c.Get(ts.URL + "?chunked=" + chunked)
+ res, err = c.Get(ts.URL + "?chunked=" + chunked)
if err != nil {
t.Fatal(err)
}
@@ -445,6 +466,40 @@ func TestTransportGzip(t *testing.T) {
t.Errorf("expected Read error after Close; got %d, %v", n, err)
}
}
+
+ // And a HEAD request too, because they're always weird.
+ c := &Client{Transport: &Transport{}}
+ res, err := c.Head(ts.URL)
+ if err != nil {
+ t.Fatalf("Head: %v", err)
+ }
+ if res.StatusCode != 200 {
+ t.Errorf("Head status=%d; want=200", res.StatusCode)
+ }
+}
+
+func TestTransportProxy(t *testing.T) {
+ ch := make(chan string, 1)
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ ch <- "real server"
+ }))
+ defer ts.Close()
+ proxy := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ ch <- "proxy for " + r.URL.String()
+ }))
+ defer proxy.Close()
+
+ pu, err := ParseURL(proxy.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ c := &Client{Transport: &Transport{Proxy: ProxyURL(pu)}}
+ c.Head(ts.URL)
+ got := <-ch
+ want := "proxy for " + ts.URL + "/"
+ if got != want {
+ t.Errorf("want %q, got %q", want, got)
+ }
}
// TestTransportGzipRecursive sends a gzip quine and checks that the
@@ -459,7 +514,7 @@ func TestTransportGzipRecursive(t *testing.T) {
defer ts.Close()
c := &Client{Transport: &Transport{}}
- res, _, err := c.Get(ts.URL)
+ res, err := c.Get(ts.URL)
if err != nil {
t.Fatal(err)
}
diff --git a/src/pkg/http/url.go b/src/pkg/http/url.go
index 0fc0cb2d7..d7ee14ee8 100644
--- a/src/pkg/http/url.go
+++ b/src/pkg/http/url.go
@@ -449,7 +449,7 @@ func ParseURLReference(rawurlref string) (url *URL, err os.Error) {
//
// There are redundant fields stored in the URL structure:
// the String method consults Scheme, Path, Host, RawUserinfo,
-// RawQuery, and Fragment, but not Raw, RawPath or Authority.
+// RawQuery, and Fragment, but not Raw, RawPath or RawAuthority.
func (url *URL) String() string {
result := ""
if url.Scheme != "" {
diff --git a/src/pkg/image/bmp/Makefile b/src/pkg/image/bmp/Makefile
new file mode 100644
index 000000000..56635f7ce
--- /dev/null
+++ b/src/pkg/image/bmp/Makefile
@@ -0,0 +1,11 @@
+# Copyright 2011 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+include ../../../Make.inc
+
+TARG=image/bmp
+GOFILES=\
+ reader.go\
+
+include ../../../Make.pkg
diff --git a/src/pkg/image/bmp/reader.go b/src/pkg/image/bmp/reader.go
new file mode 100644
index 000000000..f2842caed
--- /dev/null
+++ b/src/pkg/image/bmp/reader.go
@@ -0,0 +1,148 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package bmp implements a BMP image decoder.
+//
+// The BMP specification is at http://www.digicamsoft.com/bmp/bmp.html.
+package bmp
+
+import (
+ "image"
+ "io"
+ "os"
+)
+
+// ErrUnsupported means that the input BMP image uses a valid but unsupported
+// feature.
+var ErrUnsupported = os.NewError("bmp: unsupported BMP image")
+
+func readUint16(b []byte) uint16 {
+ return uint16(b[0]) | uint16(b[1])<<8
+}
+
+func readUint32(b []byte) uint32 {
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+// decodePaletted reads an 8 bit-per-pixel BMP image from r.
+func decodePaletted(r io.Reader, c image.Config) (image.Image, os.Error) {
+ var tmp [4]byte
+ paletted := image.NewPaletted(c.Width, c.Height, c.ColorModel.(image.PalettedColorModel))
+ // BMP images are stored bottom-up rather than top-down.
+ for y := c.Height - 1; y >= 0; y-- {
+ p := paletted.Pix[y*paletted.Stride : y*paletted.Stride+c.Width]
+ _, err := io.ReadFull(r, p)
+ if err != nil {
+ return nil, err
+ }
+ // Each row is 4-byte aligned.
+ if c.Width%4 != 0 {
+ _, err := io.ReadFull(r, tmp[:4-c.Width%4])
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ return paletted, nil
+}
+
+// decodeRGBA reads a 24 bit-per-pixel BMP image from r.
+func decodeRGBA(r io.Reader, c image.Config) (image.Image, os.Error) {
+ rgba := image.NewRGBA(c.Width, c.Height)
+ // There are 3 bytes per pixel, and each row is 4-byte aligned.
+ b := make([]byte, (3*c.Width+3)&^3)
+ // BMP images are stored bottom-up rather than top-down.
+ for y := c.Height - 1; y >= 0; y-- {
+ _, err := io.ReadFull(r, b)
+ if err != nil {
+ return nil, err
+ }
+ p := rgba.Pix[y*rgba.Stride : y*rgba.Stride+c.Width]
+ for x := range p {
+ // BMP images are stored in BGR order rather than RGB order.
+ p[x] = image.RGBAColor{b[3*x+2], b[3*x+1], b[3*x+0], 0xFF}
+ }
+ }
+ return rgba, nil
+}
+
+// Decode reads a BMP image from r and returns it as an image.Image.
+// Limitation: The file must be 8 or 24 bits per pixel.
+func Decode(r io.Reader) (image.Image, os.Error) {
+ c, err := DecodeConfig(r)
+ if err != nil {
+ return nil, err
+ }
+ if c.ColorModel == image.RGBAColorModel {
+ return decodeRGBA(r, c)
+ }
+ return decodePaletted(r, c)
+}
+
+// DecodeConfig returns the color model and dimensions of a BMP image without
+// decoding the entire image.
+// Limitation: The file must be 8 or 24 bits per pixel.
+func DecodeConfig(r io.Reader) (config image.Config, err os.Error) {
+ // We only support those BMP images that are a BITMAPFILEHEADER
+ // immediately followed by a BITMAPINFOHEADER.
+ const (
+ fileHeaderLen = 14
+ infoHeaderLen = 40
+ )
+ var b [1024]byte
+ if _, err = io.ReadFull(r, b[:fileHeaderLen+infoHeaderLen]); err != nil {
+ return
+ }
+ if string(b[:2]) != "BM" {
+ err = os.NewError("bmp: invalid format")
+ return
+ }
+ offset := readUint32(b[10:14])
+ if readUint32(b[14:18]) != infoHeaderLen {
+ err = ErrUnsupported
+ return
+ }
+ width := int(readUint32(b[18:22]))
+ height := int(readUint32(b[22:26]))
+ if width < 0 || height < 0 {
+ err = ErrUnsupported
+ return
+ }
+ // We only support 1 plane, 8 or 24 bits per pixel and no compression.
+ planes, bpp, compression := readUint16(b[26:28]), readUint16(b[28:30]), readUint32(b[30:34])
+ if planes != 1 || compression != 0 {
+ err = ErrUnsupported
+ return
+ }
+ switch bpp {
+ case 8:
+ if offset != fileHeaderLen+infoHeaderLen+256*4 {
+ err = ErrUnsupported
+ return
+ }
+ _, err = io.ReadFull(r, b[:256*4])
+ if err != nil {
+ return
+ }
+ pcm := make(image.PalettedColorModel, 256)
+ for i := range pcm {
+ // BMP images are stored in BGR order rather than RGB order.
+ // Every 4th byte is padding.
+ pcm[i] = image.RGBAColor{b[4*i+2], b[4*i+1], b[4*i+0], 0xFF}
+ }
+ return image.Config{pcm, width, height}, nil
+ case 24:
+ if offset != fileHeaderLen+infoHeaderLen {
+ err = ErrUnsupported
+ return
+ }
+ return image.Config{image.RGBAColorModel, width, height}, nil
+ }
+ err = ErrUnsupported
+ return
+}
+
+func init() {
+ image.RegisterFormat("bmp", "BM????\x00\x00\x00\x00", Decode, DecodeConfig)
+}
diff --git a/src/pkg/image/decode_test.go b/src/pkg/image/decode_test.go
index 0716ad905..540d5eda5 100644
--- a/src/pkg/image/decode_test.go
+++ b/src/pkg/image/decode_test.go
@@ -10,27 +10,34 @@ import (
"os"
"testing"
- // TODO(nigeltao): implement bmp, gif and tiff decoders.
+ _ "image/bmp"
+ _ "image/gif"
_ "image/jpeg"
_ "image/png"
+ _ "image/tiff"
)
-const goldenFile = "testdata/video-001.png"
-
type imageTest struct {
- filename string
- tolerance int
+ goldenFilename string
+ filename string
+ tolerance int
}
var imageTests = []imageTest{
- //{"testdata/video-001.bmp", 0},
+ {"testdata/video-001.png", "testdata/video-001.bmp", 0},
// GIF images are restricted to a 256-color palette and the conversion
// to GIF loses significant image quality.
- //{"testdata/video-001.gif", 64<<8},
+ {"testdata/video-001.png", "testdata/video-001.gif", 64 << 8},
+ {"testdata/video-001.png", "testdata/video-001.interlaced.gif", 64 << 8},
+ {"testdata/video-001.png", "testdata/video-001.5bpp.gif", 128 << 8},
// JPEG is a lossy format and hence needs a non-zero tolerance.
- {"testdata/video-001.jpeg", 8 << 8},
- {"testdata/video-001.png", 0},
- //{"testdata/video-001.tiff", 0},
+ {"testdata/video-001.png", "testdata/video-001.jpeg", 8 << 8},
+ {"testdata/video-001.png", "testdata/video-001.png", 0},
+ {"testdata/video-001.png", "testdata/video-001.tiff", 0},
+
+ // Test grayscale images.
+ {"testdata/video-005.gray.png", "testdata/video-005.gray.jpeg", 8 << 8},
+ {"testdata/video-005.gray.png", "testdata/video-005.gray.png", 0},
}
func decode(filename string) (image.Image, string, os.Error) {
@@ -42,6 +49,15 @@ func decode(filename string) (image.Image, string, os.Error) {
return image.Decode(bufio.NewReader(f))
}
+func decodeConfig(filename string) (image.Config, string, os.Error) {
+ f, err := os.Open(filename)
+ if err != nil {
+ return image.Config{}, "", err
+ }
+ defer f.Close()
+ return image.DecodeConfig(bufio.NewReader(f))
+}
+
func delta(u0, u1 uint32) int {
d := int(u0) - int(u1)
if d < 0 {
@@ -61,29 +77,47 @@ func withinTolerance(c0, c1 image.Color, tolerance int) bool {
}
func TestDecode(t *testing.T) {
- golden, _, err := decode(goldenFile)
- if err != nil {
- t.Errorf("%s: %v", goldenFile, err)
- }
+ golden := make(map[string]image.Image)
loop:
for _, it := range imageTests {
- m, _, err := decode(it.filename)
+ g := golden[it.goldenFilename]
+ if g == nil {
+ var err os.Error
+ g, _, err = decode(it.goldenFilename)
+ if err != nil {
+ t.Errorf("%s: %v", it.goldenFilename, err)
+ continue loop
+ }
+ golden[it.goldenFilename] = g
+ }
+ m, imageFormat, err := decode(it.filename)
if err != nil {
t.Errorf("%s: %v", it.filename, err)
continue loop
}
- b := golden.Bounds()
+ b := g.Bounds()
if !b.Eq(m.Bounds()) {
t.Errorf("%s: want bounds %v got %v", it.filename, b, m.Bounds())
continue loop
}
for y := b.Min.Y; y < b.Max.Y; y++ {
for x := b.Min.X; x < b.Max.X; x++ {
- if !withinTolerance(golden.At(x, y), m.At(x, y), it.tolerance) {
- t.Errorf("%s: at (%d, %d), want %v got %v", it.filename, x, y, golden.At(x, y), m.At(x, y))
+ if !withinTolerance(g.At(x, y), m.At(x, y), it.tolerance) {
+ t.Errorf("%s: at (%d, %d), want %v got %v", it.filename, x, y, g.At(x, y), m.At(x, y))
continue loop
}
}
}
+ if imageFormat == "gif" {
+ // Each frame of a GIF can have a frame-local palette override the
+ // GIF-global palette. Thus, image.Decode can yield a different ColorModel
+ // than image.DecodeConfig.
+ continue
+ }
+ c, _, err := decodeConfig(it.filename)
+ if m.ColorModel() != c.ColorModel {
+ t.Errorf("%s: color models differ", it.filename)
+ continue loop
+ }
}
}
diff --git a/src/pkg/image/gif/Makefile b/src/pkg/image/gif/Makefile
new file mode 100644
index 000000000..e89a71361
--- /dev/null
+++ b/src/pkg/image/gif/Makefile
@@ -0,0 +1,11 @@
+# Copyright 2011 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+include ../../../Make.inc
+
+TARG=image/gif
+GOFILES=\
+ reader.go\
+
+include ../../../Make.pkg
diff --git a/src/pkg/image/gif/reader.go b/src/pkg/image/gif/reader.go
new file mode 100644
index 000000000..5dd404036
--- /dev/null
+++ b/src/pkg/image/gif/reader.go
@@ -0,0 +1,423 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package gif implements a GIF image decoder.
+//
+// The GIF specification is at http://www.w3.org/Graphics/GIF/spec-gif89a.txt.
+package gif
+
+import (
+ "bufio"
+ "compress/lzw"
+ "fmt"
+ "image"
+ "io"
+ "os"
+)
+
+// If the io.Reader does not also have ReadByte, then decode will introduce its own buffering.
+type reader interface {
+ io.Reader
+ io.ByteReader
+}
+
+// Masks etc.
+const (
+ // Fields.
+ fColorMapFollows = 1 << 7
+
+ // Image fields.
+ ifInterlace = 1 << 6
+
+ // Graphic control flags.
+ gcTransparentColorSet = 1 << 0
+)
+
+// Section indicators.
+const (
+ sExtension = 0x21
+ sImageDescriptor = 0x2C
+ sTrailer = 0x3B
+)
+
+// Extensions.
+const (
+ eText = 0x01 // Plain Text
+ eGraphicControl = 0xF9 // Graphic Control
+ eComment = 0xFE // Comment
+ eApplication = 0xFF // Application
+)
+
+// decoder is the type used to decode a GIF file.
+type decoder struct {
+ r reader
+
+ // From header.
+ vers string
+ width int
+ height int
+ flags byte
+ headerFields byte
+ backgroundIndex byte
+ loopCount int
+ delayTime int
+
+ // Unused from header.
+ aspect byte
+
+ // From image descriptor.
+ imageFields byte
+
+ // From graphics control.
+ transparentIndex byte
+
+ // Computed.
+ pixelSize uint
+ globalColorMap image.PalettedColorModel
+
+ // Used when decoding.
+ delay []int
+ image []*image.Paletted
+ tmp [1024]byte // must be at least 768 so we can read color map
+}
+
+// blockReader parses the block structure of GIF image data, which
+// comprises (n, (n bytes)) blocks, with 1 <= n <= 255. It is the
+// reader given to the LZW decoder, which is thus immune to the
+// blocking. After the LZW decoder completes, there will be a 0-byte
+// block remaining (0, ()), but under normal execution blockReader
+// doesn't consume it, so it is handled in decode.
+type blockReader struct {
+ r reader
+ slice []byte
+ tmp [256]byte
+}
+
+func (b *blockReader) Read(p []byte) (n int, err os.Error) {
+ if len(p) == 0 {
+ return
+ }
+ if len(b.slice) > 0 {
+ n = copy(p, b.slice)
+ b.slice = b.slice[n:]
+ return
+ }
+ var blockLen uint8
+ blockLen, err = b.r.ReadByte()
+ if err != nil {
+ return
+ }
+ if blockLen == 0 {
+ return 0, os.EOF
+ }
+ b.slice = b.tmp[0:blockLen]
+ if _, err = io.ReadFull(b.r, b.slice); err != nil {
+ return
+ }
+ return b.Read(p)
+}
+
+// decode reads a GIF image from r and stores the result in d.
+func (d *decoder) decode(r io.Reader, configOnly bool) os.Error {
+ // Add buffering if r does not provide ReadByte.
+ if rr, ok := r.(reader); ok {
+ d.r = rr
+ } else {
+ d.r = bufio.NewReader(r)
+ }
+
+ err := d.readHeaderAndScreenDescriptor()
+ if err != nil {
+ return err
+ }
+ if configOnly {
+ return nil
+ }
+
+ if d.headerFields&fColorMapFollows != 0 {
+ if d.globalColorMap, err = d.readColorMap(); err != nil {
+ return err
+ }
+ }
+
+Loop:
+ for err == nil {
+ var c byte
+ c, err = d.r.ReadByte()
+ if err == os.EOF {
+ break
+ }
+ switch c {
+ case sExtension:
+ err = d.readExtension()
+
+ case sImageDescriptor:
+ var m *image.Paletted
+ m, err = d.newImageFromDescriptor()
+ if err != nil {
+ break
+ }
+ if d.imageFields&fColorMapFollows != 0 {
+ m.Palette, err = d.readColorMap()
+ if err != nil {
+ break
+ }
+ // TODO: do we set transparency in this map too? That would be
+ // d.setTransparency(m.Palette)
+ } else {
+ m.Palette = d.globalColorMap
+ }
+ var litWidth uint8
+ litWidth, err = d.r.ReadByte()
+ if err != nil {
+ return err
+ }
+ if litWidth < 2 || litWidth > 8 {
+ return fmt.Errorf("gif: pixel size in decode out of range: %d", litWidth)
+ }
+ // A wonderfully Go-like piece of magic.
+ lzwr := lzw.NewReader(&blockReader{r: d.r}, lzw.LSB, int(litWidth))
+ if _, err = io.ReadFull(lzwr, m.Pix); err != nil {
+ break
+ }
+
+ // There should be a "0" block remaining; drain that.
+ c, err = d.r.ReadByte()
+ if err != nil {
+ return err
+ }
+ if c != 0 {
+ return os.ErrorString("gif: extra data after image")
+ }
+
+ // Undo the interlacing if necessary.
+ d.uninterlace(m)
+
+ d.image = append(d.image, m)
+ d.delay = append(d.delay, d.delayTime)
+ d.delayTime = 0 // TODO: is this correct, or should we hold on to the value?
+
+ case sTrailer:
+ break Loop
+
+ default:
+ err = fmt.Errorf("gif: unknown block type: 0x%.2x", c)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ if len(d.image) == 0 {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+
+func (d *decoder) readHeaderAndScreenDescriptor() os.Error {
+ _, err := io.ReadFull(d.r, d.tmp[0:13])
+ if err != nil {
+ return err
+ }
+ d.vers = string(d.tmp[0:6])
+ if d.vers != "GIF87a" && d.vers != "GIF89a" {
+ return fmt.Errorf("gif: can't recognize format %s", d.vers)
+ }
+ d.width = int(d.tmp[6]) + int(d.tmp[7])<<8
+ d.height = int(d.tmp[8]) + int(d.tmp[9])<<8
+ d.headerFields = d.tmp[10]
+ d.backgroundIndex = d.tmp[11]
+ d.aspect = d.tmp[12]
+ d.loopCount = -1
+ d.pixelSize = uint(d.headerFields&7) + 1
+ return nil
+}
+
+func (d *decoder) readColorMap() (image.PalettedColorModel, os.Error) {
+ if d.pixelSize > 8 {
+ return nil, fmt.Errorf("gif: can't handle %d bits per pixel", d.pixelSize)
+ }
+ numColors := 1 << d.pixelSize
+ numValues := 3 * numColors
+ _, err := io.ReadFull(d.r, d.tmp[0:numValues])
+ if err != nil {
+ return nil, fmt.Errorf("gif: short read on color map: %s", err)
+ }
+ colorMap := make(image.PalettedColorModel, numColors)
+ j := 0
+ for i := range colorMap {
+ colorMap[i] = image.RGBAColor{d.tmp[j+0], d.tmp[j+1], d.tmp[j+2], 0xFF}
+ j += 3
+ }
+ return colorMap, nil
+}
+
+func (d *decoder) readExtension() os.Error {
+ extension, err := d.r.ReadByte()
+ if err != nil {
+ return err
+ }
+ size := 0
+ switch extension {
+ case eText:
+ size = 13
+ case eGraphicControl:
+ return d.readGraphicControl()
+ case eComment:
+ // nothing to do but read the data.
+ case eApplication:
+ b, err := d.r.ReadByte()
+ if err != nil {
+ return err
+ }
+ // The spec requires size be 11, but Adobe sometimes uses 10.
+ size = int(b)
+ default:
+ return fmt.Errorf("gif: unknown extension 0x%.2x", extension)
+ }
+ if size > 0 {
+ if _, err := d.r.Read(d.tmp[0:size]); err != nil {
+ return err
+ }
+ }
+
+ // Application Extension with "NETSCAPE2.0" as string and 1 in data means
+ // this extension defines a loop count.
+ if extension == eApplication && string(d.tmp[:size]) == "NETSCAPE2.0" {
+ n, err := d.readBlock()
+ if n == 0 || err != nil {
+ return err
+ }
+ if n == 3 && d.tmp[0] == 1 {
+ d.loopCount = int(d.tmp[1]) | int(d.tmp[2])<<8
+ }
+ }
+ for {
+ n, err := d.readBlock()
+ if n == 0 || err != nil {
+ return err
+ }
+ }
+ panic("unreachable")
+}
+
+func (d *decoder) readGraphicControl() os.Error {
+ if _, err := io.ReadFull(d.r, d.tmp[0:6]); err != nil {
+ return fmt.Errorf("gif: can't read graphic control: %s", err)
+ }
+ d.flags = d.tmp[1]
+ d.delayTime = int(d.tmp[2]) | int(d.tmp[3])<<8
+ if d.flags&gcTransparentColorSet != 0 {
+ d.transparentIndex = d.tmp[4]
+ d.setTransparency(d.globalColorMap)
+ }
+ return nil
+}
+
+func (d *decoder) setTransparency(colorMap image.PalettedColorModel) {
+ if int(d.transparentIndex) < len(colorMap) {
+ colorMap[d.transparentIndex] = image.RGBAColor{}
+ }
+}
+
+func (d *decoder) newImageFromDescriptor() (*image.Paletted, os.Error) {
+ if _, err := io.ReadFull(d.r, d.tmp[0:9]); err != nil {
+ return nil, fmt.Errorf("gif: can't read image descriptor: %s", err)
+ }
+ // TODO: This code (throughout) ignores the top and left values,
+ // and assumes (in interlacing, for example) that the images'
+ // widths and heights are all the same.
+ _ = int(d.tmp[0]) + int(d.tmp[1])<<8 // TODO: honor left value
+ _ = int(d.tmp[2]) + int(d.tmp[3])<<8 // TODO: honor top value
+ width := int(d.tmp[4]) + int(d.tmp[5])<<8
+ height := int(d.tmp[6]) + int(d.tmp[7])<<8
+ d.imageFields = d.tmp[8]
+ return image.NewPaletted(width, height, nil), nil
+}
+
+func (d *decoder) readBlock() (int, os.Error) {
+ n, err := d.r.ReadByte()
+ if n == 0 || err != nil {
+ return 0, err
+ }
+ return io.ReadFull(d.r, d.tmp[0:n])
+}
+
+// interlaceScan defines the ordering for a pass of the interlace algorithm.
+type interlaceScan struct {
+ skip, start int
+}
+
+// interlacing represents the set of scans in an interlaced GIF image.
+var interlacing = []interlaceScan{
+ {8, 0}, // Group 1 : Every 8th. row, starting with row 0.
+ {8, 4}, // Group 2 : Every 8th. row, starting with row 4.
+ {4, 2}, // Group 3 : Every 4th. row, starting with row 2.
+ {2, 1}, // Group 4 : Every 2nd. row, starting with row 1.
+}
+
+func (d *decoder) uninterlace(m *image.Paletted) {
+ if d.imageFields&ifInterlace == 0 {
+ return
+ }
+ var nPix []uint8
+ dx := d.width
+ dy := d.height
+ nPix = make([]uint8, dx*dy)
+ offset := 0 // steps through the input by sequentical scan lines.
+ for _, pass := range interlacing {
+ nOffset := pass.start * dx // steps through the output as defined by pass.
+ for y := pass.start; y < dy; y += pass.skip {
+ copy(nPix[nOffset:nOffset+dx], m.Pix[offset:offset+dx])
+ offset += dx
+ nOffset += dx * pass.skip
+ }
+ }
+ m.Pix = nPix
+}
+
+// Decode reads a GIF image from r and returns the first embedded
+// image as an image.Image.
+func Decode(r io.Reader) (image.Image, os.Error) {
+ var d decoder
+ if err := d.decode(r, false); err != nil {
+ return nil, err
+ }
+ return d.image[0], nil
+}
+
+// GIF represents the possibly multiple images stored in a GIF file.
+type GIF struct {
+ Image []*image.Paletted // The successive images.
+ Delay []int // The successive delay times, one per frame, in 100ths of a second.
+ LoopCount int // The loop count.
+}
+
+// DecodeAll reads a GIF image from r and returns the sequential frames
+// and timing information.
+func DecodeAll(r io.Reader) (*GIF, os.Error) {
+ var d decoder
+ if err := d.decode(r, false); err != nil {
+ return nil, err
+ }
+ gif := &GIF{
+ Image: d.image,
+ LoopCount: d.loopCount,
+ Delay: d.delay,
+ }
+ return gif, nil
+}
+
+// DecodeConfig returns the color model and dimensions of a GIF image without
+// decoding the entire image.
+func DecodeConfig(r io.Reader) (image.Config, os.Error) {
+ var d decoder
+ if err := d.decode(r, true); err != nil {
+ return image.Config{}, err
+ }
+ return image.Config{d.globalColorMap, d.width, d.height}, nil
+}
+
+func init() {
+ image.RegisterFormat("gif", "GIF8?a", Decode, DecodeConfig)
+}
diff --git a/src/pkg/image/image.go b/src/pkg/image/image.go
index 5f398a304..4350acc82 100644
--- a/src/pkg/image/image.go
+++ b/src/pkg/image/image.go
@@ -51,6 +51,13 @@ func (p *RGBA) Set(x, y int, c Color) {
p.Pix[y*p.Stride+x] = toRGBAColor(c).(RGBAColor)
}
+func (p *RGBA) SetRGBA(x, y int, c RGBAColor) {
+ if !p.Rect.Contains(Point{x, y}) {
+ return
+ }
+ p.Pix[y*p.Stride+x] = c
+}
+
// Opaque scans the entire image and returns whether or not it is fully opaque.
func (p *RGBA) Opaque() bool {
if p.Rect.Empty() {
@@ -103,6 +110,13 @@ func (p *RGBA64) Set(x, y int, c Color) {
p.Pix[y*p.Stride+x] = toRGBA64Color(c).(RGBA64Color)
}
+func (p *RGBA64) SetRGBA64(x, y int, c RGBA64Color) {
+ if !p.Rect.Contains(Point{x, y}) {
+ return
+ }
+ p.Pix[y*p.Stride+x] = c
+}
+
// Opaque scans the entire image and returns whether or not it is fully opaque.
func (p *RGBA64) Opaque() bool {
if p.Rect.Empty() {
@@ -155,6 +169,13 @@ func (p *NRGBA) Set(x, y int, c Color) {
p.Pix[y*p.Stride+x] = toNRGBAColor(c).(NRGBAColor)
}
+func (p *NRGBA) SetNRGBA(x, y int, c NRGBAColor) {
+ if !p.Rect.Contains(Point{x, y}) {
+ return
+ }
+ p.Pix[y*p.Stride+x] = c
+}
+
// Opaque scans the entire image and returns whether or not it is fully opaque.
func (p *NRGBA) Opaque() bool {
if p.Rect.Empty() {
@@ -207,6 +228,13 @@ func (p *NRGBA64) Set(x, y int, c Color) {
p.Pix[y*p.Stride+x] = toNRGBA64Color(c).(NRGBA64Color)
}
+func (p *NRGBA64) SetNRGBA64(x, y int, c NRGBA64Color) {
+ if !p.Rect.Contains(Point{x, y}) {
+ return
+ }
+ p.Pix[y*p.Stride+x] = c
+}
+
// Opaque scans the entire image and returns whether or not it is fully opaque.
func (p *NRGBA64) Opaque() bool {
if p.Rect.Empty() {
@@ -259,6 +287,13 @@ func (p *Alpha) Set(x, y int, c Color) {
p.Pix[y*p.Stride+x] = toAlphaColor(c).(AlphaColor)
}
+func (p *Alpha) SetAlpha(x, y int, c AlphaColor) {
+ if !p.Rect.Contains(Point{x, y}) {
+ return
+ }
+ p.Pix[y*p.Stride+x] = c
+}
+
// Opaque scans the entire image and returns whether or not it is fully opaque.
func (p *Alpha) Opaque() bool {
if p.Rect.Empty() {
@@ -311,6 +346,13 @@ func (p *Alpha16) Set(x, y int, c Color) {
p.Pix[y*p.Stride+x] = toAlpha16Color(c).(Alpha16Color)
}
+func (p *Alpha16) SetAlpha16(x, y int, c Alpha16Color) {
+ if !p.Rect.Contains(Point{x, y}) {
+ return
+ }
+ p.Pix[y*p.Stride+x] = c
+}
+
// Opaque scans the entire image and returns whether or not it is fully opaque.
func (p *Alpha16) Opaque() bool {
if p.Rect.Empty() {
@@ -363,6 +405,13 @@ func (p *Gray) Set(x, y int, c Color) {
p.Pix[y*p.Stride+x] = toGrayColor(c).(GrayColor)
}
+func (p *Gray) SetGray(x, y int, c GrayColor) {
+ if !p.Rect.Contains(Point{x, y}) {
+ return
+ }
+ p.Pix[y*p.Stride+x] = c
+}
+
// Opaque scans the entire image and returns whether or not it is fully opaque.
func (p *Gray) Opaque() bool {
return true
@@ -401,6 +450,13 @@ func (p *Gray16) Set(x, y int, c Color) {
p.Pix[y*p.Stride+x] = toGray16Color(c).(Gray16Color)
}
+func (p *Gray16) SetGray16(x, y int, c Gray16Color) {
+ if !p.Rect.Contains(Point{x, y}) {
+ return
+ }
+ p.Pix[y*p.Stride+x] = c
+}
+
// Opaque scans the entire image and returns whether or not it is fully opaque.
func (p *Gray16) Opaque() bool {
return true
diff --git a/src/pkg/image/jpeg/idct.go b/src/pkg/image/jpeg/idct.go
index e5a2f40f5..b387dfdff 100644
--- a/src/pkg/image/jpeg/idct.go
+++ b/src/pkg/image/jpeg/idct.go
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+package jpeg
+
// This is a Go translation of idct.c from
//
// http://standards.iso.org/ittf/PubliclyAvailableStandards/ISO_IEC_13818-4_2004_Conformance_Testing/Video/verifier/mpeg2decode_960109.tar.gz
@@ -35,8 +37,6 @@
*
*/
-package jpeg
-
const (
w1 = 2841 // 2048*sqrt(2)*cos(1*pi/16)
w2 = 2676 // 2048*sqrt(2)*cos(2*pi/16)
@@ -55,41 +55,45 @@ const (
r2 = 181 // 256/sqrt(2)
)
-// 2-D Inverse Discrete Cosine Transformation, followed by a +128 level shift.
+// idct performs a 2-D Inverse Discrete Cosine Transformation, followed by a
+// +128 level shift and a clip to [0, 255], writing the results to dst.
+// stride is the number of elements between successive rows of dst.
//
-// The input coefficients should already have been multiplied by the appropriate quantization table.
-// We use fixed-point computation, with the number of bits for the fractional component varying over the
-// intermediate stages. The final values are expected to range within [0, 255], after a +128 level shift.
+// The input coefficients should already have been multiplied by the
+// appropriate quantization table. We use fixed-point computation, with the
+// number of bits for the fractional component varying over the intermediate
+// stages.
//
-// For more on the actual algorithm, see Z. Wang, "Fast algorithms for the discrete W transform and
-// for the discrete Fourier transform", IEEE Trans. on ASSP, Vol. ASSP- 32, pp. 803-816, Aug. 1984.
-func idct(b *block) {
+// For more on the actual algorithm, see Z. Wang, "Fast algorithms for the
+// discrete W transform and for the discrete Fourier transform", IEEE Trans. on
+// ASSP, Vol. ASSP- 32, pp. 803-816, Aug. 1984.
+func idct(dst []byte, stride int, src *block) {
// Horizontal 1-D IDCT.
for y := 0; y < 8; y++ {
// If all the AC components are zero, then the IDCT is trivial.
- if b[y*8+1] == 0 && b[y*8+2] == 0 && b[y*8+3] == 0 &&
- b[y*8+4] == 0 && b[y*8+5] == 0 && b[y*8+6] == 0 && b[y*8+7] == 0 {
- dc := b[y*8+0] << 3
- b[y*8+0] = dc
- b[y*8+1] = dc
- b[y*8+2] = dc
- b[y*8+3] = dc
- b[y*8+4] = dc
- b[y*8+5] = dc
- b[y*8+6] = dc
- b[y*8+7] = dc
+ if src[y*8+1] == 0 && src[y*8+2] == 0 && src[y*8+3] == 0 &&
+ src[y*8+4] == 0 && src[y*8+5] == 0 && src[y*8+6] == 0 && src[y*8+7] == 0 {
+ dc := src[y*8+0] << 3
+ src[y*8+0] = dc
+ src[y*8+1] = dc
+ src[y*8+2] = dc
+ src[y*8+3] = dc
+ src[y*8+4] = dc
+ src[y*8+5] = dc
+ src[y*8+6] = dc
+ src[y*8+7] = dc
continue
}
// Prescale.
- x0 := (b[y*8+0] << 11) + 128
- x1 := b[y*8+4] << 11
- x2 := b[y*8+6]
- x3 := b[y*8+2]
- x4 := b[y*8+1]
- x5 := b[y*8+7]
- x6 := b[y*8+5]
- x7 := b[y*8+3]
+ x0 := (src[y*8+0] << 11) + 128
+ x1 := src[y*8+4] << 11
+ x2 := src[y*8+6]
+ x3 := src[y*8+2]
+ x4 := src[y*8+1]
+ x5 := src[y*8+7]
+ x6 := src[y*8+5]
+ x7 := src[y*8+3]
// Stage 1.
x8 := w7 * (x4 + x5)
@@ -119,14 +123,14 @@ func idct(b *block) {
x4 = (r2*(x4-x5) + 128) >> 8
// Stage 4.
- b[8*y+0] = (x7 + x1) >> 8
- b[8*y+1] = (x3 + x2) >> 8
- b[8*y+2] = (x0 + x4) >> 8
- b[8*y+3] = (x8 + x6) >> 8
- b[8*y+4] = (x8 - x6) >> 8
- b[8*y+5] = (x0 - x4) >> 8
- b[8*y+6] = (x3 - x2) >> 8
- b[8*y+7] = (x7 - x1) >> 8
+ src[8*y+0] = (x7 + x1) >> 8
+ src[8*y+1] = (x3 + x2) >> 8
+ src[8*y+2] = (x0 + x4) >> 8
+ src[8*y+3] = (x8 + x6) >> 8
+ src[8*y+4] = (x8 - x6) >> 8
+ src[8*y+5] = (x0 - x4) >> 8
+ src[8*y+6] = (x3 - x2) >> 8
+ src[8*y+7] = (x7 - x1) >> 8
}
// Vertical 1-D IDCT.
@@ -136,14 +140,14 @@ func idct(b *block) {
// we do not bother to check for the all-zero case.
// Prescale.
- y0 := (b[8*0+x] << 8) + 8192
- y1 := b[8*4+x] << 8
- y2 := b[8*6+x]
- y3 := b[8*2+x]
- y4 := b[8*1+x]
- y5 := b[8*7+x]
- y6 := b[8*5+x]
- y7 := b[8*3+x]
+ y0 := (src[8*0+x] << 8) + 8192
+ y1 := src[8*4+x] << 8
+ y2 := src[8*6+x]
+ y3 := src[8*2+x]
+ y4 := src[8*1+x]
+ y5 := src[8*7+x]
+ y6 := src[8*5+x]
+ y7 := src[8*3+x]
// Stage 1.
y8 := w7*(y4+y5) + 4
@@ -173,18 +177,28 @@ func idct(b *block) {
y4 = (r2*(y4-y5) + 128) >> 8
// Stage 4.
- b[8*0+x] = (y7 + y1) >> 14
- b[8*1+x] = (y3 + y2) >> 14
- b[8*2+x] = (y0 + y4) >> 14
- b[8*3+x] = (y8 + y6) >> 14
- b[8*4+x] = (y8 - y6) >> 14
- b[8*5+x] = (y0 - y4) >> 14
- b[8*6+x] = (y3 - y2) >> 14
- b[8*7+x] = (y7 - y1) >> 14
+ src[8*0+x] = (y7 + y1) >> 14
+ src[8*1+x] = (y3 + y2) >> 14
+ src[8*2+x] = (y0 + y4) >> 14
+ src[8*3+x] = (y8 + y6) >> 14
+ src[8*4+x] = (y8 - y6) >> 14
+ src[8*5+x] = (y0 - y4) >> 14
+ src[8*6+x] = (y3 - y2) >> 14
+ src[8*7+x] = (y7 - y1) >> 14
}
- // Level shift.
- for i := range *b {
- b[i] += 128
+ // Level shift by +128, clip to [0, 255], and write to dst.
+ for y := 0; y < 8; y++ {
+ for x := 0; x < 8; x++ {
+ c := src[y*8+x]
+ if c < -128 {
+ c = 0
+ } else if c > 127 {
+ c = 255
+ } else {
+ c += 128
+ }
+ dst[y*stride+x] = uint8(c)
+ }
}
}
diff --git a/src/pkg/image/jpeg/reader.go b/src/pkg/image/jpeg/reader.go
index 21a6fff96..ef8383a35 100644
--- a/src/pkg/image/jpeg/reader.go
+++ b/src/pkg/image/jpeg/reader.go
@@ -41,16 +41,22 @@ type block [blockSize]int
const (
blockSize = 64 // A DCT block is 8x8.
- dcTableClass = 0
- acTableClass = 1
- maxTc = 1
- maxTh = 3
- maxTq = 3
-
- // We only support 4:4:4, 4:2:2 and 4:2:0 downsampling, and assume that the components are Y, Cb, Cr.
- nComponent = 3
- maxH = 2
- maxV = 2
+ dcTable = 0
+ acTable = 1
+ maxTc = 1
+ maxTh = 3
+ maxTq = 3
+
+ // A grayscale JPEG image has only a Y component.
+ nGrayComponent = 1
+ // A color JPEG image has Y, Cb and Cr components.
+ nColorComponent = 3
+
+ // We only support 4:4:4, 4:2:2 and 4:2:0 downsampling, and therefore the
+ // number of luma samples per chroma sample is at most 2 in the horizontal
+ // and 2 in the vertical direction.
+ maxH = 2
+ maxV = 2
)
const (
@@ -90,13 +96,14 @@ type Reader interface {
type decoder struct {
r Reader
width, height int
- img *ycbcr.YCbCr
+ img1 *image.Gray
+ img3 *ycbcr.YCbCr
ri int // Restart Interval.
- comps [nComponent]component
+ nComp int
+ comp [nColorComponent]component
huff [maxTc + 1][maxTh + 1]huffman
quant [maxTq + 1]block
b bits
- blocks [nComponent][maxH * maxV]block
tmp [1024]byte
}
@@ -118,10 +125,15 @@ func (d *decoder) ignore(n int) os.Error {
// Specified in section B.2.2.
func (d *decoder) processSOF(n int) os.Error {
- if n != 6+3*nComponent {
+ switch n {
+ case 6 + 3*nGrayComponent:
+ d.nComp = nGrayComponent
+ case 6 + 3*nColorComponent:
+ d.nComp = nColorComponent
+ default:
return UnsupportedError("SOF has wrong length")
}
- _, err := io.ReadFull(d.r, d.tmp[0:6+3*nComponent])
+ _, err := io.ReadFull(d.r, d.tmp[:n])
if err != nil {
return err
}
@@ -131,26 +143,28 @@ func (d *decoder) processSOF(n int) os.Error {
}
d.height = int(d.tmp[1])<<8 + int(d.tmp[2])
d.width = int(d.tmp[3])<<8 + int(d.tmp[4])
- if d.tmp[5] != nComponent {
+ if int(d.tmp[5]) != d.nComp {
return UnsupportedError("SOF has wrong number of image components")
}
- for i := 0; i < nComponent; i++ {
+ for i := 0; i < d.nComp; i++ {
hv := d.tmp[7+3*i]
- d.comps[i].h = int(hv >> 4)
- d.comps[i].v = int(hv & 0x0f)
- d.comps[i].c = d.tmp[6+3*i]
- d.comps[i].tq = d.tmp[8+3*i]
- // We only support YCbCr images, and 4:4:4, 4:2:2 or 4:2:0 chroma downsampling ratios. This implies that
- // the (h, v) values for the Y component are either (1, 1), (2, 1) or (2, 2), and the
- // (h, v) values for the Cr and Cb components must be (1, 1).
+ d.comp[i].h = int(hv >> 4)
+ d.comp[i].v = int(hv & 0x0f)
+ d.comp[i].c = d.tmp[6+3*i]
+ d.comp[i].tq = d.tmp[8+3*i]
+ if d.nComp == nGrayComponent {
+ continue
+ }
+ // For color images, we only support 4:4:4, 4:2:2 or 4:2:0 chroma
+ // downsampling ratios. This implies that the (h, v) values for the Y
+ // component are either (1, 1), (2, 1) or (2, 2), and the (h, v)
+ // values for the Cr and Cb components must be (1, 1).
if i == 0 {
if hv != 0x11 && hv != 0x21 && hv != 0x22 {
return UnsupportedError("luma downsample ratio")
}
- } else {
- if hv != 0x11 {
- return UnsupportedError("chroma downsample ratio")
- }
+ } else if hv != 0x11 {
+ return UnsupportedError("chroma downsample ratio")
}
}
return nil
@@ -182,110 +196,88 @@ func (d *decoder) processDQT(n int) os.Error {
return nil
}
-// Clip x to the range [0, 255] inclusive.
-func clip(x int) uint8 {
- if x < 0 {
- return 0
+// makeImg allocates and initializes the destination image.
+func (d *decoder) makeImg(h0, v0, mxx, myy int) {
+ if d.nComp == nGrayComponent {
+ d.img1 = image.NewGray(8*mxx, 8*myy)
+ d.img1.Rect = image.Rect(0, 0, d.width, d.height)
+ return
}
- if x > 255 {
- return 255
+ var subsampleRatio ycbcr.SubsampleRatio
+ n := h0 * v0
+ switch n {
+ case 1:
+ subsampleRatio = ycbcr.SubsampleRatio444
+ case 2:
+ subsampleRatio = ycbcr.SubsampleRatio422
+ case 4:
+ subsampleRatio = ycbcr.SubsampleRatio420
+ default:
+ panic("unreachable")
}
- return uint8(x)
-}
-
-// Store the MCU to the image.
-func (d *decoder) storeMCU(mx, my int) {
- h0, v0 := d.comps[0].h, d.comps[0].v
- // Store the luma blocks.
- for v := 0; v < v0; v++ {
- for h := 0; h < h0; h++ {
- p := 8 * ((v0*my+v)*d.img.YStride + (h0*mx + h))
- for y := 0; y < 8; y++ {
- for x := 0; x < 8; x++ {
- d.img.Y[p] = clip(d.blocks[0][h0*v+h][8*y+x])
- p++
- }
- p += d.img.YStride - 8
- }
- }
- }
- // Store the chroma blocks.
- p := 8 * (my*d.img.CStride + mx)
- for y := 0; y < 8; y++ {
- for x := 0; x < 8; x++ {
- d.img.Cb[p] = clip(d.blocks[1][0][8*y+x])
- d.img.Cr[p] = clip(d.blocks[2][0][8*y+x])
- p++
- }
- p += d.img.CStride - 8
+ b := make([]byte, mxx*myy*(1*8*8*n+2*8*8))
+ d.img3 = &ycbcr.YCbCr{
+ Y: b[mxx*myy*(0*8*8*n+0*8*8) : mxx*myy*(1*8*8*n+0*8*8)],
+ Cb: b[mxx*myy*(1*8*8*n+0*8*8) : mxx*myy*(1*8*8*n+1*8*8)],
+ Cr: b[mxx*myy*(1*8*8*n+1*8*8) : mxx*myy*(1*8*8*n+2*8*8)],
+ SubsampleRatio: subsampleRatio,
+ YStride: mxx * 8 * h0,
+ CStride: mxx * 8,
+ Rect: image.Rect(0, 0, d.width, d.height),
}
}
// Specified in section B.2.3.
func (d *decoder) processSOS(n int) os.Error {
- if n != 4+2*nComponent {
+ if d.nComp == 0 {
+ return FormatError("missing SOF marker")
+ }
+ if n != 4+2*d.nComp {
return UnsupportedError("SOS has wrong length")
}
- _, err := io.ReadFull(d.r, d.tmp[0:4+2*nComponent])
+ _, err := io.ReadFull(d.r, d.tmp[0:4+2*d.nComp])
if err != nil {
return err
}
- if d.tmp[0] != nComponent {
+ if int(d.tmp[0]) != d.nComp {
return UnsupportedError("SOS has wrong number of image components")
}
- var scanComps [nComponent]struct {
+ var scan [nColorComponent]struct {
td uint8 // DC table selector.
ta uint8 // AC table selector.
}
- for i := 0; i < nComponent; i++ {
+ for i := 0; i < d.nComp; i++ {
cs := d.tmp[1+2*i] // Component selector.
- if cs != d.comps[i].c {
+ if cs != d.comp[i].c {
return UnsupportedError("scan components out of order")
}
- scanComps[i].td = d.tmp[2+2*i] >> 4
- scanComps[i].ta = d.tmp[2+2*i] & 0x0f
+ scan[i].td = d.tmp[2+2*i] >> 4
+ scan[i].ta = d.tmp[2+2*i] & 0x0f
}
// mxx and myy are the number of MCUs (Minimum Coded Units) in the image.
- h0, v0 := d.comps[0].h, d.comps[0].v // The h and v values from the Y components.
+ h0, v0 := d.comp[0].h, d.comp[0].v // The h and v values from the Y components.
mxx := (d.width + 8*h0 - 1) / (8 * h0)
myy := (d.height + 8*v0 - 1) / (8 * v0)
- if d.img == nil {
- var subsampleRatio ycbcr.SubsampleRatio
- n := h0 * v0
- switch n {
- case 1:
- subsampleRatio = ycbcr.SubsampleRatio444
- case 2:
- subsampleRatio = ycbcr.SubsampleRatio422
- case 4:
- subsampleRatio = ycbcr.SubsampleRatio420
- default:
- panic("unreachable")
- }
- b := make([]byte, mxx*myy*(1*8*8*n+2*8*8))
- d.img = &ycbcr.YCbCr{
- Y: b[mxx*myy*(0*8*8*n+0*8*8) : mxx*myy*(1*8*8*n+0*8*8)],
- Cb: b[mxx*myy*(1*8*8*n+0*8*8) : mxx*myy*(1*8*8*n+1*8*8)],
- Cr: b[mxx*myy*(1*8*8*n+1*8*8) : mxx*myy*(1*8*8*n+2*8*8)],
- SubsampleRatio: subsampleRatio,
- YStride: mxx * 8 * h0,
- CStride: mxx * 8,
- Rect: image.Rect(0, 0, d.width, d.height),
- }
+ if d.img1 == nil && d.img3 == nil {
+ d.makeImg(h0, v0, mxx, myy)
}
mcu, expectedRST := 0, uint8(rst0Marker)
- var allZeroes block
- var dc [nComponent]int
+ var (
+ b block
+ dc [nColorComponent]int
+ )
for my := 0; my < myy; my++ {
for mx := 0; mx < mxx; mx++ {
- for i := 0; i < nComponent; i++ {
- qt := &d.quant[d.comps[i].tq]
- for j := 0; j < d.comps[i].h*d.comps[i].v; j++ {
- d.blocks[i][j] = allZeroes
+ for i := 0; i < d.nComp; i++ {
+ qt := &d.quant[d.comp[i].tq]
+ for j := 0; j < d.comp[i].h*d.comp[i].v; j++ {
+ // TODO(nigeltao): make this a "var b block" once the compiler's escape
+ // analysis is good enough to allocate it on the stack, not the heap.
+ b = block{}
// Decode the DC coefficient, as specified in section F.2.2.1.
- value, err := d.decodeHuffman(&d.huff[dcTableClass][scanComps[i].td])
+ value, err := d.decodeHuffman(&d.huff[dcTable][scan[i].td])
if err != nil {
return err
}
@@ -297,11 +289,11 @@ func (d *decoder) processSOS(n int) os.Error {
return err
}
dc[i] += dcDelta
- d.blocks[i][j][0] = dc[i] * qt[0]
+ b[0] = dc[i] * qt[0]
// Decode the AC coefficients, as specified in section F.2.2.2.
for k := 1; k < blockSize; k++ {
- value, err := d.decodeHuffman(&d.huff[acTableClass][scanComps[i].ta])
+ value, err := d.decodeHuffman(&d.huff[acTable][scan[i].ta])
if err != nil {
return err
}
@@ -316,7 +308,7 @@ func (d *decoder) processSOS(n int) os.Error {
if err != nil {
return err
}
- d.blocks[i][j][unzig[k]] = ac * qt[k]
+ b[unzig[k]] = ac * qt[k]
} else {
if val0 != 0x0f {
break
@@ -325,10 +317,32 @@ func (d *decoder) processSOS(n int) os.Error {
}
}
- idct(&d.blocks[i][j])
+ // Perform the inverse DCT and store the MCU component to the image.
+ if d.nComp == nGrayComponent {
+ idct(d.tmp[:64], 8, &b)
+ // Convert from []uint8 to []image.GrayColor.
+ p := d.img1.Pix[8*(my*d.img1.Stride+mx):]
+ for y := 0; y < 8; y++ {
+ dst := p[y*d.img1.Stride:]
+ src := d.tmp[8*y:]
+ for x := 0; x < 8; x++ {
+ dst[x] = image.GrayColor{src[x]}
+ }
+ }
+ } else {
+ switch i {
+ case 0:
+ mx0 := h0*mx + (j % 2)
+ my0 := v0*my + (j / 2)
+ idct(d.img3.Y[8*(my0*d.img3.YStride+mx0):], d.img3.YStride, &b)
+ case 1:
+ idct(d.img3.Cb[8*(my*d.img3.CStride+mx):], d.img3.CStride, &b)
+ case 2:
+ idct(d.img3.Cr[8*(my*d.img3.CStride+mx):], d.img3.CStride, &b)
+ }
+ }
} // for j
} // for i
- d.storeMCU(mx, my)
mcu++
if d.ri > 0 && mcu%d.ri == 0 && mcu < mxx*myy {
// A more sophisticated decoder could use RST[0-7] markers to resynchronize from corrupt input,
@@ -347,9 +361,7 @@ func (d *decoder) processSOS(n int) os.Error {
// Reset the Huffman decoder.
d.b = bits{}
// Reset the DC components, as per section F.2.1.3.1.
- for i := 0; i < nComponent; i++ {
- dc[i] = 0
- }
+ dc = [nColorComponent]int{}
}
} // for mx
} // for my
@@ -437,7 +449,13 @@ func (d *decoder) decode(r io.Reader, configOnly bool) (image.Image, os.Error) {
return nil, err
}
}
- return d.img, nil
+ if d.img1 != nil {
+ return d.img1, nil
+ }
+ if d.img3 != nil {
+ return d.img3, nil
+ }
+ return nil, FormatError("missing SOS marker")
}
// Decode reads a JPEG image from r and returns it as an image.Image.
@@ -453,7 +471,13 @@ func DecodeConfig(r io.Reader) (image.Config, os.Error) {
if _, err := d.decode(r, true); err != nil {
return image.Config{}, err
}
- return image.Config{image.RGBAColorModel, d.width, d.height}, nil
+ switch d.nComp {
+ case nGrayComponent:
+ return image.Config{image.GrayColorModel, d.width, d.height}, nil
+ case nColorComponent:
+ return image.Config{ycbcr.YCbCrColorModel, d.width, d.height}, nil
+ }
+ return image.Config{}, FormatError("missing SOF marker")
}
func init() {
diff --git a/src/pkg/image/jpeg/writer.go b/src/pkg/image/jpeg/writer.go
index 505cce04f..eddaaefb6 100644
--- a/src/pkg/image/jpeg/writer.go
+++ b/src/pkg/image/jpeg/writer.go
@@ -221,8 +221,7 @@ type encoder struct {
// buf is a scratch buffer.
buf [16]byte
// bits and nBits are accumulated bits to write to w.
- bits uint32
- nBits uint8
+ bits, nBits uint32
// quant is the scaled quantization tables.
quant [nQuantIndex][blockSize]byte
}
@@ -250,7 +249,7 @@ func (e *encoder) writeByte(b byte) {
// emit emits the least significant nBits bits of bits to the bitstream.
// The precondition is bits < 1<<nBits && nBits <= 16.
-func (e *encoder) emit(bits uint32, nBits uint8) {
+func (e *encoder) emit(bits, nBits uint32) {
nBits += e.nBits
bits <<= 32 - nBits
bits |= e.bits
@@ -269,7 +268,7 @@ func (e *encoder) emit(bits uint32, nBits uint8) {
// emitHuff emits the given value with the given Huffman encoder.
func (e *encoder) emitHuff(h huffIndex, value int) {
x := theHuffmanLUT[h][value]
- e.emit(x&(1<<24-1), uint8(x>>24))
+ e.emit(x&(1<<24-1), x>>24)
}
// emitHuffRLE emits a run of runLength copies of value encoded with the given
@@ -279,11 +278,11 @@ func (e *encoder) emitHuffRLE(h huffIndex, runLength, value int) {
if a < 0 {
a, b = -value, value-1
}
- var nBits uint8
+ var nBits uint32
if a < 0x100 {
- nBits = bitCount[a]
+ nBits = uint32(bitCount[a])
} else {
- nBits = 8 + bitCount[a>>8]
+ nBits = 8 + uint32(bitCount[a>>8])
}
e.emitHuff(h, runLength<<4|int(nBits))
if nBits > 0 {
@@ -302,34 +301,31 @@ func (e *encoder) writeMarkerHeader(marker uint8, markerlen int) {
// writeDQT writes the Define Quantization Table marker.
func (e *encoder) writeDQT() {
- markerlen := 2
- for _, q := range e.quant {
- markerlen += 1 + len(q)
- }
+ markerlen := 2 + int(nQuantIndex)*(1+blockSize)
e.writeMarkerHeader(dqtMarker, markerlen)
- for i, q := range e.quant {
+ for i := range e.quant {
e.writeByte(uint8(i))
- e.write(q[:])
+ e.write(e.quant[i][:])
}
}
// writeSOF0 writes the Start Of Frame (Baseline) marker.
func (e *encoder) writeSOF0(size image.Point) {
- markerlen := 8 + 3*nComponent
+ markerlen := 8 + 3*nColorComponent
e.writeMarkerHeader(sof0Marker, markerlen)
e.buf[0] = 8 // 8-bit color.
e.buf[1] = uint8(size.Y >> 8)
e.buf[2] = uint8(size.Y & 0xff)
e.buf[3] = uint8(size.X >> 8)
e.buf[4] = uint8(size.X & 0xff)
- e.buf[5] = nComponent
- for i := 0; i < nComponent; i++ {
+ e.buf[5] = nColorComponent
+ for i := 0; i < nColorComponent; i++ {
e.buf[3*i+6] = uint8(i + 1)
// We use 4:2:0 chroma subsampling.
e.buf[3*i+7] = "\x22\x11\x11"[i]
e.buf[3*i+8] = "\x00\x01\x01"[i]
}
- e.write(e.buf[:3*(nComponent-1)+9])
+ e.write(e.buf[:3*(nColorComponent-1)+9])
}
// writeDHT writes the Define Huffman Table marker.
@@ -391,6 +387,31 @@ func toYCbCr(m image.Image, p image.Point, yBlock, cbBlock, crBlock *block) {
}
}
+// rgbaToYCbCr is a specialized version of toYCbCr for image.RGBA images.
+func rgbaToYCbCr(m *image.RGBA, p image.Point, yBlock, cbBlock, crBlock *block) {
+ b := m.Bounds()
+ xmax := b.Max.X - 1
+ ymax := b.Max.Y - 1
+ for j := 0; j < 8; j++ {
+ sj := p.Y + j
+ if sj > ymax {
+ sj = ymax
+ }
+ yoff := sj * m.Stride
+ for i := 0; i < 8; i++ {
+ sx := p.X + i
+ if sx > xmax {
+ sx = xmax
+ }
+ col := &m.Pix[yoff+sx]
+ yy, cb, cr := ycbcr.RGBToYCbCr(col.R, col.G, col.B)
+ yBlock[8*j+i] = int(yy)
+ cbBlock[8*j+i] = int(cb)
+ crBlock[8*j+i] = int(cr)
+ }
+ }
+}
+
// scale scales the 16x16 region represented by the 4 src blocks to the 8x8
// dst block.
func scale(dst *block, src *[4]block) {
@@ -431,13 +452,18 @@ func (e *encoder) writeSOS(m image.Image) {
prevDCY, prevDCCb, prevDCCr int
)
bounds := m.Bounds()
+ rgba, _ := m.(*image.RGBA)
for y := bounds.Min.Y; y < bounds.Max.Y; y += 16 {
for x := bounds.Min.X; x < bounds.Max.X; x += 16 {
for i := 0; i < 4; i++ {
xOff := (i & 1) * 8
yOff := (i & 2) * 4
p := image.Point{x + xOff, y + yOff}
- toYCbCr(m, p, &yBlock, &cbBlock[i], &crBlock[i])
+ if rgba != nil {
+ rgbaToYCbCr(rgba, p, &yBlock, &cbBlock[i], &crBlock[i])
+ } else {
+ toYCbCr(m, p, &yBlock, &cbBlock[i], &crBlock[i])
+ }
prevDCY = e.writeBlock(&yBlock, 0, prevDCY)
}
scale(&cBlock, &cbBlock)
diff --git a/src/pkg/image/jpeg/writer_test.go b/src/pkg/image/jpeg/writer_test.go
index 00922dd5c..7aec70f01 100644
--- a/src/pkg/image/jpeg/writer_test.go
+++ b/src/pkg/image/jpeg/writer_test.go
@@ -8,6 +8,8 @@ import (
"bytes"
"image"
"image/png"
+ "io/ioutil"
+ "rand"
"os"
"testing"
)
@@ -85,3 +87,29 @@ func TestWriter(t *testing.T) {
}
}
}
+
+func BenchmarkEncodeRGBOpaque(b *testing.B) {
+ b.StopTimer()
+ img := image.NewRGBA(640, 480)
+ // Set all pixels to 0xFF alpha to force opaque mode.
+ bo := img.Bounds()
+ rnd := rand.New(rand.NewSource(123))
+ for y := bo.Min.Y; y < bo.Max.Y; y++ {
+ for x := bo.Min.X; x < bo.Max.X; x++ {
+ img.Set(x, y, image.RGBAColor{
+ uint8(rnd.Intn(256)),
+ uint8(rnd.Intn(256)),
+ uint8(rnd.Intn(256)),
+ 255})
+ }
+ }
+ if !img.Opaque() {
+ panic("expected image to be opaque")
+ }
+ b.SetBytes(640 * 480 * 4)
+ b.StartTimer()
+ options := &Options{Quality: 90}
+ for i := 0; i < b.N; i++ {
+ Encode(ioutil.Discard, img, options)
+ }
+}
diff --git a/src/pkg/image/png/reader.go b/src/pkg/image/png/reader.go
index b30a951c1..8c76afa72 100644
--- a/src/pkg/image/png/reader.go
+++ b/src/pkg/image/png/reader.go
@@ -378,7 +378,7 @@ func (d *decoder) idatReader(idat io.Reader) (image.Image, os.Error) {
for x := 0; x < d.width; x += 8 {
b := cdat[x/8]
for x2 := 0; x2 < 8 && x+x2 < d.width; x2++ {
- gray.Set(x+x2, y, image.GrayColor{(b >> 7) * 0xff})
+ gray.SetGray(x+x2, y, image.GrayColor{(b >> 7) * 0xff})
b <<= 1
}
}
@@ -386,7 +386,7 @@ func (d *decoder) idatReader(idat io.Reader) (image.Image, os.Error) {
for x := 0; x < d.width; x += 4 {
b := cdat[x/4]
for x2 := 0; x2 < 4 && x+x2 < d.width; x2++ {
- gray.Set(x+x2, y, image.GrayColor{(b >> 6) * 0x55})
+ gray.SetGray(x+x2, y, image.GrayColor{(b >> 6) * 0x55})
b <<= 2
}
}
@@ -394,22 +394,22 @@ func (d *decoder) idatReader(idat io.Reader) (image.Image, os.Error) {
for x := 0; x < d.width; x += 2 {
b := cdat[x/2]
for x2 := 0; x2 < 2 && x+x2 < d.width; x2++ {
- gray.Set(x+x2, y, image.GrayColor{(b >> 4) * 0x11})
+ gray.SetGray(x+x2, y, image.GrayColor{(b >> 4) * 0x11})
b <<= 4
}
}
case cbG8:
for x := 0; x < d.width; x++ {
- gray.Set(x, y, image.GrayColor{cdat[x]})
+ gray.SetGray(x, y, image.GrayColor{cdat[x]})
}
case cbGA8:
for x := 0; x < d.width; x++ {
ycol := cdat[2*x+0]
- nrgba.Set(x, y, image.NRGBAColor{ycol, ycol, ycol, cdat[2*x+1]})
+ nrgba.SetNRGBA(x, y, image.NRGBAColor{ycol, ycol, ycol, cdat[2*x+1]})
}
case cbTC8:
for x := 0; x < d.width; x++ {
- rgba.Set(x, y, image.RGBAColor{cdat[3*x+0], cdat[3*x+1], cdat[3*x+2], 0xff})
+ rgba.SetRGBA(x, y, image.RGBAColor{cdat[3*x+0], cdat[3*x+1], cdat[3*x+2], 0xff})
}
case cbP1:
for x := 0; x < d.width; x += 8 {
@@ -456,25 +456,25 @@ func (d *decoder) idatReader(idat io.Reader) (image.Image, os.Error) {
}
case cbTCA8:
for x := 0; x < d.width; x++ {
- nrgba.Set(x, y, image.NRGBAColor{cdat[4*x+0], cdat[4*x+1], cdat[4*x+2], cdat[4*x+3]})
+ nrgba.SetNRGBA(x, y, image.NRGBAColor{cdat[4*x+0], cdat[4*x+1], cdat[4*x+2], cdat[4*x+3]})
}
case cbG16:
for x := 0; x < d.width; x++ {
ycol := uint16(cdat[2*x+0])<<8 | uint16(cdat[2*x+1])
- gray16.Set(x, y, image.Gray16Color{ycol})
+ gray16.SetGray16(x, y, image.Gray16Color{ycol})
}
case cbGA16:
for x := 0; x < d.width; x++ {
ycol := uint16(cdat[4*x+0])<<8 | uint16(cdat[4*x+1])
acol := uint16(cdat[4*x+2])<<8 | uint16(cdat[4*x+3])
- nrgba64.Set(x, y, image.NRGBA64Color{ycol, ycol, ycol, acol})
+ nrgba64.SetNRGBA64(x, y, image.NRGBA64Color{ycol, ycol, ycol, acol})
}
case cbTC16:
for x := 0; x < d.width; x++ {
rcol := uint16(cdat[6*x+0])<<8 | uint16(cdat[6*x+1])
gcol := uint16(cdat[6*x+2])<<8 | uint16(cdat[6*x+3])
bcol := uint16(cdat[6*x+4])<<8 | uint16(cdat[6*x+5])
- rgba64.Set(x, y, image.RGBA64Color{rcol, gcol, bcol, 0xffff})
+ rgba64.SetRGBA64(x, y, image.RGBA64Color{rcol, gcol, bcol, 0xffff})
}
case cbTCA16:
for x := 0; x < d.width; x++ {
@@ -482,7 +482,7 @@ func (d *decoder) idatReader(idat io.Reader) (image.Image, os.Error) {
gcol := uint16(cdat[8*x+2])<<8 | uint16(cdat[8*x+3])
bcol := uint16(cdat[8*x+4])<<8 | uint16(cdat[8*x+5])
acol := uint16(cdat[8*x+6])<<8 | uint16(cdat[8*x+7])
- nrgba64.Set(x, y, image.NRGBA64Color{rcol, gcol, bcol, acol})
+ nrgba64.SetNRGBA64(x, y, image.NRGBA64Color{rcol, gcol, bcol, acol})
}
}
diff --git a/src/pkg/image/png/reader_test.go b/src/pkg/image/png/reader_test.go
index efa6336d7..bcc1a3db4 100644
--- a/src/pkg/image/png/reader_test.go
+++ b/src/pkg/image/png/reader_test.go
@@ -28,6 +28,7 @@ var filenames = []string{
"basn3p02",
"basn3p04",
"basn3p08",
+ "basn3p08-trns",
"basn4a08",
"basn4a16",
"basn6a08",
@@ -98,17 +99,30 @@ func sng(w io.WriteCloser, filename string, png image.Image) {
// (the PNG spec section 11.3 says "Ancillary chunks may be ignored by a decoder").
io.WriteString(w, "gAMA {1.0000}\n")
- // Write the PLTE (if applicable).
+ // Write the PLTE and tRNS (if applicable).
if cpm != nil {
+ lastAlpha := -1
io.WriteString(w, "PLTE {\n")
- for i := 0; i < len(cpm); i++ {
- r, g, b, _ := cpm[i].RGBA()
+ for i, c := range cpm {
+ r, g, b, a := c.RGBA()
+ if a != 0xffff {
+ lastAlpha = i
+ }
r >>= 8
g >>= 8
b >>= 8
fmt.Fprintf(w, " (%3d,%3d,%3d) # rgb = (0x%02x,0x%02x,0x%02x)\n", r, g, b, r, g, b)
}
io.WriteString(w, "}\n")
+ if lastAlpha != -1 {
+ io.WriteString(w, "tRNS {\n")
+ for i := 0; i <= lastAlpha; i++ {
+ _, _, _, a := cpm[i].RGBA()
+ a >>= 8
+ fmt.Fprintf(w, " %d", a)
+ }
+ io.WriteString(w, "}\n")
+ }
}
// Write the IMAGE.
diff --git a/src/pkg/image/png/testdata/pngsuite/README b/src/pkg/image/png/testdata/pngsuite/README
index abe3ecb20..c0f78bde8 100644
--- a/src/pkg/image/png/testdata/pngsuite/README
+++ b/src/pkg/image/png/testdata/pngsuite/README
@@ -10,6 +10,9 @@ The files basn0g01-30.png, basn0g02-29.png and basn0g04-31.png are in fact
not part of pngsuite but were created from files in pngsuite. Their non-power-
of-two sizes makes them useful for testing bit-depths smaller than a byte.
+basn3a08.png was generated from basn6a08.png using the pngnq tool, which
+converted it to the 8-bit paletted image with alpha values in tRNS chunk.
+
The *.sng files in this directory were generated from the *.png files
by the sng command-line tool and some hand editing. The files
basn0g0{1,2,4}.sng were actually generated by first converting the PNG
diff --git a/src/pkg/image/png/testdata/pngsuite/basn3p08-trns.png b/src/pkg/image/png/testdata/pngsuite/basn3p08-trns.png
new file mode 100644
index 000000000..b0fc0c1be
--- /dev/null
+++ b/src/pkg/image/png/testdata/pngsuite/basn3p08-trns.png
Binary files differ
diff --git a/src/pkg/image/png/testdata/pngsuite/basn3p08-trns.sng b/src/pkg/image/png/testdata/pngsuite/basn3p08-trns.sng
new file mode 100644
index 000000000..78dc367bb
--- /dev/null
+++ b/src/pkg/image/png/testdata/pngsuite/basn3p08-trns.sng
@@ -0,0 +1,301 @@
+#SNG: from basn3p08-trns.png
+IHDR {
+ width: 32; height: 32; bitdepth: 8;
+ using color palette;
+}
+gAMA {1.0000}
+PLTE {
+ (255, 3, 7) # rgb = (0xff,0x03,0x07)
+ (255, 4, 7) # rgb = (0xff,0x04,0x07)
+ (255, 9, 7) # rgb = (0xff,0x09,0x07)
+ (217, 14, 7) # rgb = (0xd9,0x0e,0x07)
+ (255, 14, 7) # rgb = (0xff,0x0e,0x07)
+ ( 2, 22, 19) # rgb = (0x02,0x16,0x13)
+ (255, 26, 7) # rgb = (0xff,0x1a,0x07)
+ (255, 31, 7) # rgb = (0xff,0x1f,0x07)
+ ( 10, 37, 14) # rgb = (0x0a,0x25,0x0e)
+ (179, 37, 6) # rgb = (0xb3,0x25,0x06)
+ (254, 42, 7) # rgb = (0xfe,0x2a,0x07)
+ (255, 45, 7) # rgb = (0xff,0x2d,0x07)
+ ( 25, 46, 9) # rgb = (0x19,0x2e,0x09)
+ ( 0, 48,254) # rgb = (0x00,0x30,0xfe)
+ ( 0, 48,255) # rgb = (0x00,0x30,0xff)
+ ( 0, 49,255) # rgb = (0x00,0x31,0xff)
+ ( 0, 51,254) # rgb = (0x00,0x33,0xfe)
+ ( 0, 52,255) # rgb = (0x00,0x34,0xff)
+ (255, 53, 7) # rgb = (0xff,0x35,0x07)
+ ( 0, 54,252) # rgb = (0x00,0x36,0xfc)
+ (254, 57, 7) # rgb = (0xfe,0x39,0x07)
+ (251, 57, 7) # rgb = (0xfb,0x39,0x07)
+ (247, 59, 7) # rgb = (0xf7,0x3b,0x07)
+ ( 0, 59, 61) # rgb = (0x00,0x3b,0x3d)
+ ( 0, 62,255) # rgb = (0x00,0x3e,0xff)
+ (142, 63, 5) # rgb = (0x8e,0x3f,0x05)
+ ( 0, 63,250) # rgb = (0x00,0x3f,0xfa)
+ (255, 63, 7) # rgb = (0xff,0x3f,0x07)
+ (253, 68, 7) # rgb = (0xfd,0x44,0x07)
+ ( 0, 73,255) # rgb = (0x00,0x49,0xff)
+ ( 0, 73,246) # rgb = (0x00,0x49,0xf6)
+ (255, 75, 7) # rgb = (0xff,0x4b,0x07)
+ ( 82, 85, 9) # rgb = (0x52,0x55,0x09)
+ (255, 85, 7) # rgb = (0xff,0x55,0x07)
+ ( 0, 89,255) # rgb = (0x00,0x59,0xff)
+ ( 0, 91,237) # rgb = (0x00,0x5b,0xed)
+ (255, 94, 7) # rgb = (0xff,0x5e,0x07)
+ (241,100, 7) # rgb = (0xf1,0x64,0x07)
+ ( 0,101,255) # rgb = (0x00,0x65,0xff)
+ (253,105, 7) # rgb = (0xfd,0x69,0x07)
+ ( 0,107,223) # rgb = (0x00,0x6b,0xdf)
+ (255,106, 7) # rgb = (0xff,0x6a,0x07)
+ ( 1,110, 95) # rgb = (0x01,0x6e,0x5f)
+ (255,115, 7) # rgb = (0xff,0x73,0x07)
+ ( 0,117,255) # rgb = (0x00,0x75,0xff)
+ (255,124, 7) # rgb = (0xff,0x7c,0x07)
+ (118,126, 10) # rgb = (0x76,0x7e,0x0a)
+ ( 0,130,250) # rgb = (0x00,0x82,0xfa)
+ ( 0,132,255) # rgb = (0x00,0x84,0xff)
+ ( 0,134,207) # rgb = (0x00,0x86,0xcf)
+ (255,134, 7) # rgb = (0xff,0x86,0x07)
+ ( 0,136,249) # rgb = (0x00,0x88,0xf9)
+ (219,140, 6) # rgb = (0xdb,0x8c,0x06)
+ ( 0,140,252) # rgb = (0x00,0x8c,0xfc)
+ ( 0,140,255) # rgb = (0x00,0x8c,0xff)
+ ( 1,142,136) # rgb = (0x01,0x8e,0x88)
+ (255,143, 7) # rgb = (0xff,0x8f,0x07)
+ (243,150, 7) # rgb = (0xf3,0x96,0x07)
+ (198,152, 7) # rgb = (0xc6,0x98,0x07)
+ (165,153, 7) # rgb = (0xa5,0x99,0x07)
+ ( 0,157,255) # rgb = (0x00,0x9d,0xff)
+ (255,158, 7) # rgb = (0xff,0x9e,0x07)
+ ( 70,159, 4) # rgb = (0x46,0x9f,0x04)
+ ( 0,160,251) # rgb = (0x00,0xa0,0xfb)
+ (203,163, 6) # rgb = (0xcb,0xa3,0x06)
+ ( 0,163,239) # rgb = (0x00,0xa3,0xef)
+ ( 1,164,178) # rgb = (0x01,0xa4,0xb2)
+ (255,166, 7) # rgb = (0xff,0xa6,0x07)
+ ( 1,169,165) # rgb = (0x01,0xa9,0xa5)
+ ( 1,170,255) # rgb = (0x01,0xaa,0xff)
+ (232,172, 6) # rgb = (0xe8,0xac,0x06)
+ (255,175, 7) # rgb = (0xff,0xaf,0x07)
+ (185,176,131) # rgb = (0xb9,0xb0,0x83)
+ ( 1,179,225) # rgb = (0x01,0xb3,0xe1)
+ (188,179,118) # rgb = (0xbc,0xb3,0x76)
+ (199,180, 6) # rgb = (0xc7,0xb4,0x06)
+ ( 1,182,255) # rgb = (0x01,0xb6,0xff)
+ ( 1,184,249) # rgb = (0x01,0xb8,0xf9)
+ (255,184, 7) # rgb = (0xff,0xb8,0x07)
+ (207,186, 71) # rgb = (0xcf,0xba,0x47)
+ (193,187, 6) # rgb = (0xc1,0xbb,0x06)
+ (253,191, 7) # rgb = (0xfd,0xbf,0x07)
+ (218,193, 48) # rgb = (0xda,0xc1,0x30)
+ ( 1,193,157) # rgb = (0x01,0xc1,0x9d)
+ ( 1,196,244) # rgb = (0x01,0xc4,0xf4)
+ ( 1,196,254) # rgb = (0x01,0xc4,0xfe)
+ ( 48,199, 3) # rgb = (0x30,0xc7,0x03)
+ (164,199, 5) # rgb = (0xa4,0xc7,0x05)
+ (220,202, 6) # rgb = (0xdc,0xca,0x06)
+ (253,203, 7) # rgb = (0xfd,0xcb,0x07)
+ ( 1,204,204) # rgb = (0x01,0xcc,0xcc)
+ (251,209, 7) # rgb = (0xfb,0xd1,0x07)
+ (231,208, 24) # rgb = (0xe7,0xd0,0x18)
+ ( 1,210,254) # rgb = (0x01,0xd2,0xfe)
+ ( 2,211,146) # rgb = (0x02,0xd3,0x92)
+ ( 1,212,156) # rgb = (0x01,0xd4,0x9c)
+ ( 1,213,252) # rgb = (0x01,0xd5,0xfc)
+ (237,219, 15) # rgb = (0xed,0xdb,0x0f)
+ ( 1,218,240) # rgb = (0x01,0xda,0xf0)
+ (165,220, 5) # rgb = (0xa5,0xdc,0x05)
+ ( 1,221,250) # rgb = (0x01,0xdd,0xfa)
+ (249,221, 6) # rgb = (0xf9,0xdd,0x06)
+ (146,222, 4) # rgb = (0x92,0xde,0x04)
+ ( 1,224,184) # rgb = (0x01,0xe0,0xb8)
+ ( 2,224,155) # rgb = (0x02,0xe0,0x9b)
+ (244,225, 10) # rgb = (0xf4,0xe1,0x0a)
+ (249,227, 7) # rgb = (0xf9,0xe3,0x07)
+ ( 2,229,133) # rgb = (0x02,0xe5,0x85)
+ (192,228, 6) # rgb = (0xc0,0xe4,0x06)
+ ( 37,230, 3) # rgb = (0x25,0xe6,0x03)
+ (246,230, 7) # rgb = (0xf6,0xe6,0x07)
+ (143,232, 4) # rgb = (0x8f,0xe8,0x04)
+ (244,233, 8) # rgb = (0xf4,0xe9,0x08)
+ ( 2,236,139) # rgb = (0x02,0xec,0x8b)
+ ( 1,236,227) # rgb = (0x01,0xec,0xe3)
+ ( 1,238,238) # rgb = (0x01,0xee,0xee)
+ (101,241, 4) # rgb = (0x65,0xf1,0x04)
+ ( 1,241,218) # rgb = (0x01,0xf1,0xda)
+ ( 1,240,232) # rgb = (0x01,0xf0,0xe8)
+ (167,240, 5) # rgb = (0xa7,0xf0,0x05)
+ ( 27,243, 2) # rgb = (0x1b,0xf3,0x02)
+ (126,243, 4) # rgb = (0x7e,0xf3,0x04)
+ ( 2,246,113) # rgb = (0x02,0xf6,0x71)
+ (133,248, 5) # rgb = (0x85,0xf8,0x05)
+ ( 22,250, 1) # rgb = (0x16,0xfa,0x01)
+ ( 2,249,219) # rgb = (0x02,0xf9,0xdb)
+ (148,250, 5) # rgb = (0x94,0xfa,0x05)
+ ( 2,250,199) # rgb = (0x02,0xfa,0xc7)
+ (183,252, 5) # rgb = (0xb7,0xfc,0x05)
+ (176,252, 5) # rgb = (0xb0,0xfc,0x05)
+ ( 2,252,211) # rgb = (0x02,0xfc,0xd3)
+ ( 2,252,190) # rgb = (0x02,0xfc,0xbe)
+ (164,251, 5) # rgb = (0xa4,0xfb,0x05)
+ ( 12,254,128) # rgb = (0x0c,0xfe,0x80)
+ (192,253, 5) # rgb = (0xc0,0xfd,0x05)
+ (164,253, 5) # rgb = (0xa4,0xfd,0x05)
+ ( 26,254, 85) # rgb = (0x1a,0xfe,0x55)
+ ( 14,254, 1) # rgb = (0x0e,0xfe,0x01)
+ (133,253, 5) # rgb = (0x85,0xfd,0x05)
+ ( 4,253,180) # rgb = (0x04,0xfd,0xb4)
+ (196,253, 5) # rgb = (0xc4,0xfd,0x05)
+ ( 2,253,198) # rgb = (0x02,0xfd,0xc6)
+ ( 3,255, 91) # rgb = (0x03,0xff,0x5b)
+ ( 3,255, 80) # rgb = (0x03,0xff,0x50)
+ (186,255, 5) # rgb = (0xba,0xff,0x05)
+ ( 9,255, 2) # rgb = (0x09,0xff,0x02)
+ ( 3,255,118) # rgb = (0x03,0xff,0x76)
+ ( 9,255, 3) # rgb = (0x09,0xff,0x03)
+ ( 10,255, 1) # rgb = (0x0a,0xff,0x01)
+ ( 3,255, 76) # rgb = (0x03,0xff,0x4c)
+ ( 3,255, 86) # rgb = (0x03,0xff,0x56)
+ ( 3,255, 82) # rgb = (0x03,0xff,0x52)
+ ( 13,255, 1) # rgb = (0x0d,0xff,0x01)
+ ( 3,255, 49) # rgb = (0x03,0xff,0x31)
+ ( 3,255,101) # rgb = (0x03,0xff,0x65)
+ ( 61,255, 32) # rgb = (0x3d,0xff,0x20)
+ (129,255, 5) # rgb = (0x81,0xff,0x05)
+ (177,255, 5) # rgb = (0xb1,0xff,0x05)
+ ( 3,255, 37) # rgb = (0x03,0xff,0x25)
+ (149,255, 5) # rgb = (0x95,0xff,0x05)
+ ( 7,255, 6) # rgb = (0x07,0xff,0x06)
+ (192,255, 5) # rgb = (0xc0,0xff,0x05)
+ ( 2,255,131) # rgb = (0x02,0xff,0x83)
+ ( 3,255, 98) # rgb = (0x03,0xff,0x62)
+ ( 85,255, 11) # rgb = (0x55,0xff,0x0b)
+ ( 2,255,163) # rgb = (0x02,0xff,0xa3)
+ ( 2,255,149) # rgb = (0x02,0xff,0x95)
+ ( 4,255, 23) # rgb = (0x04,0xff,0x17)
+ ( 6,255, 12) # rgb = (0x06,0xff,0x0c)
+ ( 3,255, 67) # rgb = (0x03,0xff,0x43)
+ (160,255, 5) # rgb = (0xa0,0xff,0x05)
+ (119,255, 6) # rgb = (0x77,0xff,0x06)
+ (102,255, 8) # rgb = (0x66,0xff,0x08)
+ (255,255,255) # rgb = (0xff,0xff,0xff)
+ (254,254,254) # rgb = (0xfe,0xfe,0xfe)
+ (254,254,254) # rgb = (0xfe,0xfe,0xfe)
+ (252,252,252) # rgb = (0xfc,0xfc,0xfc)
+ (252,252,252) # rgb = (0xfc,0xfc,0xfc)
+ (250,250,250) # rgb = (0xfa,0xfa,0xfa)
+ (250,250,250) # rgb = (0xfa,0xfa,0xfa)
+ (248,248,248) # rgb = (0xf8,0xf8,0xf8)
+ (248,248,248) # rgb = (0xf8,0xf8,0xf8)
+ (247,247,247) # rgb = (0xf7,0xf7,0xf7)
+ (245,245,245) # rgb = (0xf5,0xf5,0xf5)
+ (245,245,245) # rgb = (0xf5,0xf5,0xf5)
+ (243,243,243) # rgb = (0xf3,0xf3,0xf3)
+ (243,243,243) # rgb = (0xf3,0xf3,0xf3)
+ (241,241,241) # rgb = (0xf1,0xf1,0xf1)
+ (241,241,241) # rgb = (0xf1,0xf1,0xf1)
+ (239,239,239) # rgb = (0xef,0xef,0xef)
+ (238,238,238) # rgb = (0xee,0xee,0xee)
+ (238,238,238) # rgb = (0xee,0xee,0xee)
+ (236,236,236) # rgb = (0xec,0xec,0xec)
+ (236,236,236) # rgb = (0xec,0xec,0xec)
+ (234,234,234) # rgb = (0xea,0xea,0xea)
+ (234,234,234) # rgb = (0xea,0xea,0xea)
+ (232,232,232) # rgb = (0xe8,0xe8,0xe8)
+ (231,231,231) # rgb = (0xe7,0xe7,0xe7)
+ (231,231,231) # rgb = (0xe7,0xe7,0xe7)
+ (229,229,229) # rgb = (0xe5,0xe5,0xe5)
+ (229,229,229) # rgb = (0xe5,0xe5,0xe5)
+ (227,227,227) # rgb = (0xe3,0xe3,0xe3)
+ (226,226,226) # rgb = (0xe2,0xe2,0xe2)
+ (226,226,226) # rgb = (0xe2,0xe2,0xe2)
+ (224,224,224) # rgb = (0xe0,0xe0,0xe0)
+ (224,224,224) # rgb = (0xe0,0xe0,0xe0)
+ (222,222,222) # rgb = (0xde,0xde,0xde)
+ (222,222,222) # rgb = (0xde,0xde,0xde)
+ (220,220,220) # rgb = (0xdc,0xdc,0xdc)
+ (219,219,219) # rgb = (0xdb,0xdb,0xdb)
+ (219,219,219) # rgb = (0xdb,0xdb,0xdb)
+ (217,217,217) # rgb = (0xd9,0xd9,0xd9)
+ (217,217,217) # rgb = (0xd9,0xd9,0xd9)
+ (215,215,215) # rgb = (0xd7,0xd7,0xd7)
+ (214,214,214) # rgb = (0xd6,0xd6,0xd6)
+ (214,214,214) # rgb = (0xd6,0xd6,0xd6)
+ (212,212,212) # rgb = (0xd4,0xd4,0xd4)
+ (212,212,212) # rgb = (0xd4,0xd4,0xd4)
+ (210,210,210) # rgb = (0xd2,0xd2,0xd2)
+ (209,209,209) # rgb = (0xd1,0xd1,0xd1)
+ (209,209,209) # rgb = (0xd1,0xd1,0xd1)
+ (207,207,207) # rgb = (0xcf,0xcf,0xcf)
+ (205,205,205) # rgb = (0xcd,0xcd,0xcd)
+ (205,205,205) # rgb = (0xcd,0xcd,0xcd)
+ (204,204,204) # rgb = (0xcc,0xcc,0xcc)
+ (204,204,204) # rgb = (0xcc,0xcc,0xcc)
+ (202,202,202) # rgb = (0xca,0xca,0xca)
+ (201,201,201) # rgb = (0xc9,0xc9,0xc9)
+ (201,201,201) # rgb = (0xc9,0xc9,0xc9)
+ (199,199,199) # rgb = (0xc7,0xc7,0xc7)
+ (199,199,199) # rgb = (0xc7,0xc7,0xc7)
+ (197,197,197) # rgb = (0xc5,0xc5,0xc5)
+ (196,196,196) # rgb = (0xc4,0xc4,0xc4)
+ (196,196,196) # rgb = (0xc4,0xc4,0xc4)
+ (194,194,194) # rgb = (0xc2,0xc2,0xc2)
+ (193,193,193) # rgb = (0xc1,0xc1,0xc1)
+ (193,193,193) # rgb = (0xc1,0xc1,0xc1)
+ (191,191,191) # rgb = (0xbf,0xbf,0xbf)
+ (191,191,191) # rgb = (0xbf,0xbf,0xbf)
+ (189,189,189) # rgb = (0xbd,0xbd,0xbd)
+ (188,188,188) # rgb = (0xbc,0xbc,0xbc)
+ (188,188,188) # rgb = (0xbc,0xbc,0xbc)
+ (186,186,186) # rgb = (0xba,0xba,0xba)
+ (185,185,185) # rgb = (0xb9,0xb9,0xb9)
+ (185,185,185) # rgb = (0xb9,0xb9,0xb9)
+ (183,183,183) # rgb = (0xb7,0xb7,0xb7)
+ (182,182,182) # rgb = (0xb6,0xb6,0xb6)
+ (182,182,182) # rgb = (0xb6,0xb6,0xb6)
+ (180,180,180) # rgb = (0xb4,0xb4,0xb4)
+ (178,178,178) # rgb = (0xb2,0xb2,0xb2)
+ (178,178,178) # rgb = (0xb2,0xb2,0xb2)
+ (177,177,177) # rgb = (0xb1,0xb1,0xb1)
+ (177,177,177) # rgb = (0xb1,0xb1,0xb1)
+ (175,175,175) # rgb = (0xaf,0xaf,0xaf)
+ (174,174,174) # rgb = (0xae,0xae,0xae)
+ (174,174,174) # rgb = (0xae,0xae,0xae)
+}
+tRNS {
+ 197 187 190 194 186 4 186 189 4 195 84 191 5 193 175 163 205 150 191 213 88 75 67 8 147 191 220 203 95 151 223 199 8 207 156 227 199 65 163 98 226 204 12 202 167 201 11 65 178 228 205 74 59 87 178 19 201 99 18 14 184 204 184 96 22 61 227 199 22 193 97 197 254 59 253 28 192 102 199 247 58 198 244 30 109 202 188 32 96 196 60 203 239 202 230 41 207 237 119 53 213 209 37 55 45 230 214 233 92 185 223 50 230 57 124 217 43 133 221 95 198 47 233 99 194 221 107 138 152 144 226 140 133 220 172 125 218 196 118 225 161 223 235 238 200 155 147 146 172 236 236 151 183 150 234 216 217 211 151 219 132 185 145 147 217 138 144 137 142 151 217 217 213}
+IMAGE {
+ pixels hex
+0520201616160a0a0a0a0a0a0a0a010101010101010101000000000000000000
+053a3a161616160a0a0a0a0a0a0a0a0a0a06060606060607070707070707071b
+053a3a3a161616161615151c1c1c1c1c1c1c12121212121b1b1b1b1b1b1b1b1b
+053a3a3a3a252525252527272727272727272724242424242424212121212121
+053a3a3a4034343425252727272727393939392d2d2d2d2d2d2d323232323232
+053a3a404034343434343939393939393939394747474343433d3d3d3d3d3d3d
+053a404b4b4b50505046464646464646464659595959595151514e5b5b616161
+053a404b4b4b50505058585858585858588c8c8c595959595b656a6e70707070
+053a4b4b4b4b5050506c5858585858588c8c8c8c8c8c5965656a6a6e70707070
+053b4b4b4b636363506c6c6c6c6c6c8781808c8c8c86a1a1a1906e6e70707070
+053b4b5757636363636c6c6c6c7787878181808c8c86a1a190909d9d9d9daa70
+053b576666666f6363777777777e8787848481808086a19090aaaaaaaa9f9f9f
+053b576666797979797b7b7b7b7b8a8a8a8a848480809c9c9c9c9c9c9c9c9c9c
+053b66747474747474747b7b7b7b8a8a8a8a8a8aacacacacacacacacacaca4a4
+052e7474747474747474747b7b7b8a8a8a6d6d6d6d6d6d6da4a4a4a4a4a4a4a4
+052e7474747474747474a0a0a0a0a0a09393936d6d6d6d787878787878787878
+05207474747474a0a0a0a0a0a0a0a0a093939191949494948989898989898989
+052a2a2a7171717171a7a7a7a7a7a7a7a7a79e9e9e9e9e9e9e9e959595959595
+052a53536871717171717171a9a9a9a9a9a9a9a9a9a9a9a99595959595959595
+053753536871717171717171a3a3a3a3a3a3a3a3979797979a9a9a9a8e8e8e8e
+05445353686871717171717171a5a2a2a2a2a2929292928585857a7a7a7a7a7a
+054453535f68687171717171a5a5a5a5a5a5a6a6a6a6a68b8b8b8b8b8b8b8b6b
+054444535f686767676767677272727f7f8383838383838d8d8d8d8d8d8d8b8b
+054444535f6767675a5a5a627272727275757f7f7f7f5d73737d7d7d82828282
+0544445367675a5a5a5a4d546262727272757575755d5d5d7373737376767676
+054444535349495a5a5a4d4d54626262626275754c5d5d5d5d60646464767676
+054444444949494949494d4d4d5454546262624c4c4c4c4c5555556060646464
+05444444444941414133353f3f3f3f3f3f4d3636363c3c454545454531313131
+05444444442f2f2f2f333535353535352c2c2c2c2c3030303030282828282828
+053744442f2f2f2f2f2f333535351d1d22222222262626262323232323232323
+053737372f2f2f2f2f2f2f331818181818181d1d1d1d1d131a1a1a1a1a1e1e1e
+052a37372f2f2f2f2f2f18111111110f0e0e0e0e0d0d0d0d0d0d0d0d0d0d0d13
+}
diff --git a/src/pkg/image/png/writer.go b/src/pkg/image/png/writer.go
index 081d06bf5..a27586f23 100644
--- a/src/pkg/image/png/writer.go
+++ b/src/pkg/image/png/writer.go
@@ -130,12 +130,8 @@ func (e *encoder) writePLTE(p image.PalettedColorModel) {
e.err = FormatError("bad palette length: " + strconv.Itoa(len(p)))
return
}
- for i := 0; i < len(p); i++ {
- r, g, b, a := p[i].RGBA()
- if a != 0xffff {
- e.err = UnsupportedError("non-opaque palette color")
- return
- }
+ for i, c := range p {
+ r, g, b, _ := c.RGBA()
e.tmp[3*i+0] = uint8(r >> 8)
e.tmp[3*i+1] = uint8(g >> 8)
e.tmp[3*i+2] = uint8(b >> 8)
@@ -143,6 +139,21 @@ func (e *encoder) writePLTE(p image.PalettedColorModel) {
e.writeChunk(e.tmp[0:3*len(p)], "PLTE")
}
+func (e *encoder) maybeWritetRNS(p image.PalettedColorModel) {
+ last := -1
+ for i, c := range p {
+ _, _, _, a := c.RGBA()
+ if a != 0xffff {
+ last = i
+ }
+ e.tmp[i] = uint8(a >> 8)
+ }
+ if last == -1 {
+ return
+ }
+ e.writeChunk(e.tmp[:last+1], "tRNS")
+}
+
// An encoder is an io.Writer that satisfies writes by writing PNG IDAT chunks,
// including an 8-byte header and 4-byte CRC checksum per Write call. Such calls
// should be relatively infrequent, since writeIDATs uses a bufio.Writer.
@@ -263,7 +274,12 @@ func writeImage(w io.Writer, m image.Image, cb int) os.Error {
defer zw.Close()
bpp := 0 // Bytes per pixel.
+
+ // Used by fast paths for common image types
var paletted *image.Paletted
+ var rgba *image.RGBA
+ rgba, _ = m.(*image.RGBA)
+
switch cb {
case cbG8:
bpp = 1
@@ -303,12 +319,24 @@ func writeImage(w io.Writer, m image.Image, cb int) os.Error {
cr[0][x+1] = c.Y
}
case cbTC8:
- for x := b.Min.X; x < b.Max.X; x++ {
- // We have previously verified that the alpha value is fully opaque.
- r, g, b, _ := m.At(x, y).RGBA()
- cr[0][3*x+1] = uint8(r >> 8)
- cr[0][3*x+2] = uint8(g >> 8)
- cr[0][3*x+3] = uint8(b >> 8)
+ // We have previously verified that the alpha value is fully opaque.
+ cr0 := cr[0]
+ if rgba != nil {
+ yoff := y * rgba.Stride
+ xoff := 3*b.Min.X + 1
+ for _, color := range rgba.Pix[yoff+b.Min.X : yoff+b.Max.X] {
+ cr0[xoff] = color.R
+ cr0[xoff+1] = color.G
+ cr0[xoff+2] = color.B
+ xoff += 3
+ }
+ } else {
+ for x := b.Min.X; x < b.Max.X; x++ {
+ r, g, b, _ := m.At(x, y).RGBA()
+ cr0[3*x+1] = uint8(r >> 8)
+ cr0[3*x+2] = uint8(g >> 8)
+ cr0[3*x+3] = uint8(b >> 8)
+ }
}
case cbP8:
rowOffset := y * paletted.Stride
@@ -430,6 +458,7 @@ func Encode(w io.Writer, m image.Image) os.Error {
e.writeIHDR()
if pal != nil {
e.writePLTE(pal.Palette)
+ e.maybeWritetRNS(pal.Palette)
}
e.writeIDATs()
e.writeIEND()
diff --git a/src/pkg/image/png/writer_test.go b/src/pkg/image/png/writer_test.go
index 4d9929f31..6b054aaa8 100644
--- a/src/pkg/image/png/writer_test.go
+++ b/src/pkg/image/png/writer_test.go
@@ -5,10 +5,10 @@
package png
import (
- "bytes"
"fmt"
"image"
"io"
+ "io/ioutil"
"os"
"testing"
)
@@ -81,10 +81,42 @@ func BenchmarkEncodePaletted(b *testing.B) {
image.RGBAColor{0, 0, 0, 255},
image.RGBAColor{255, 255, 255, 255},
})
+ b.SetBytes(640 * 480 * 1)
b.StartTimer()
- buffer := new(bytes.Buffer)
for i := 0; i < b.N; i++ {
- buffer.Reset()
- Encode(buffer, img)
+ Encode(ioutil.Discard, img)
+ }
+}
+
+func BenchmarkEncodeRGBOpaque(b *testing.B) {
+ b.StopTimer()
+ img := image.NewRGBA(640, 480)
+ // Set all pixels to 0xFF alpha to force opaque mode.
+ bo := img.Bounds()
+ for y := bo.Min.Y; y < bo.Max.Y; y++ {
+ for x := bo.Min.X; x < bo.Max.X; x++ {
+ img.Set(x, y, image.RGBAColor{0, 0, 0, 255})
+ }
+ }
+ if !img.Opaque() {
+ panic("expected image to be opaque")
+ }
+ b.SetBytes(640 * 480 * 4)
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ Encode(ioutil.Discard, img)
+ }
+}
+
+func BenchmarkEncodeRGBA(b *testing.B) {
+ b.StopTimer()
+ img := image.NewRGBA(640, 480)
+ if img.Opaque() {
+ panic("expected image to not be opaque")
+ }
+ b.SetBytes(640 * 480 * 4)
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ Encode(ioutil.Discard, img)
}
}
diff --git a/src/pkg/image/testdata/video-001.5bpp.gif b/src/pkg/image/testdata/video-001.5bpp.gif
new file mode 100644
index 000000000..ce53104b2
--- /dev/null
+++ b/src/pkg/image/testdata/video-001.5bpp.gif
Binary files differ
diff --git a/src/pkg/image/testdata/video-001.interlaced.gif b/src/pkg/image/testdata/video-001.interlaced.gif
new file mode 100644
index 000000000..590594ea9
--- /dev/null
+++ b/src/pkg/image/testdata/video-001.interlaced.gif
Binary files differ
diff --git a/src/pkg/image/testdata/video-005.gray.jpeg b/src/pkg/image/testdata/video-005.gray.jpeg
new file mode 100644
index 000000000..f9d6e5cdb
--- /dev/null
+++ b/src/pkg/image/testdata/video-005.gray.jpeg
Binary files differ
diff --git a/src/pkg/image/testdata/video-005.gray.png b/src/pkg/image/testdata/video-005.gray.png
new file mode 100644
index 000000000..0b0ee7538
--- /dev/null
+++ b/src/pkg/image/testdata/video-005.gray.png
Binary files differ
diff --git a/src/pkg/image/tiff/Makefile b/src/pkg/image/tiff/Makefile
new file mode 100644
index 000000000..1a001afb9
--- /dev/null
+++ b/src/pkg/image/tiff/Makefile
@@ -0,0 +1,13 @@
+# Copyright 2011 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+include ../../../Make.inc
+
+TARG=image/tiff
+GOFILES=\
+ buffer.go\
+ consts.go\
+ reader.go\
+
+include ../../../Make.pkg
diff --git a/src/pkg/image/tiff/buffer.go b/src/pkg/image/tiff/buffer.go
new file mode 100644
index 000000000..7c0714225
--- /dev/null
+++ b/src/pkg/image/tiff/buffer.go
@@ -0,0 +1,57 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tiff
+
+import (
+ "io"
+ "os"
+)
+
+// buffer buffers an io.Reader to satisfy io.ReaderAt.
+type buffer struct {
+ r io.Reader
+ buf []byte
+}
+
+func (b *buffer) ReadAt(p []byte, off int64) (int, os.Error) {
+ o := int(off)
+ end := o + len(p)
+ if int64(end) != off+int64(len(p)) {
+ return 0, os.EINVAL
+ }
+
+ m := len(b.buf)
+ if end > m {
+ if end > cap(b.buf) {
+ newcap := 1024
+ for newcap < end {
+ newcap *= 2
+ }
+ newbuf := make([]byte, end, newcap)
+ copy(newbuf, b.buf)
+ b.buf = newbuf
+ } else {
+ b.buf = b.buf[:end]
+ }
+ if n, err := io.ReadFull(b.r, b.buf[m:end]); err != nil {
+ end = m + n
+ b.buf = b.buf[:end]
+ return copy(p, b.buf[o:end]), err
+ }
+ }
+
+ return copy(p, b.buf[o:end]), nil
+}
+
+// newReaderAt converts an io.Reader into an io.ReaderAt.
+func newReaderAt(r io.Reader) io.ReaderAt {
+ if ra, ok := r.(io.ReaderAt); ok {
+ return ra
+ }
+ return &buffer{
+ r: r,
+ buf: make([]byte, 0, 1024),
+ }
+}
diff --git a/src/pkg/image/tiff/buffer_test.go b/src/pkg/image/tiff/buffer_test.go
new file mode 100644
index 000000000..4f3e68e83
--- /dev/null
+++ b/src/pkg/image/tiff/buffer_test.go
@@ -0,0 +1,36 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tiff
+
+import (
+ "os"
+ "strings"
+ "testing"
+)
+
+var readAtTests = []struct {
+ n int
+ off int64
+ s string
+ err os.Error
+}{
+ {2, 0, "ab", nil},
+ {6, 0, "abcdef", nil},
+ {3, 3, "def", nil},
+ {3, 5, "f", os.EOF},
+ {3, 6, "", os.EOF},
+}
+
+func TestReadAt(t *testing.T) {
+ r := newReaderAt(strings.NewReader("abcdef"))
+ b := make([]byte, 10)
+ for _, test := range readAtTests {
+ n, err := r.ReadAt(b[:test.n], test.off)
+ s := string(b[:n])
+ if s != test.s || err != test.err {
+ t.Errorf("buffer.ReadAt(<%v bytes>, %v): got %v, %q; want %v, %q", test.n, test.off, err, s, test.err, test.s)
+ }
+ }
+}
diff --git a/src/pkg/image/tiff/consts.go b/src/pkg/image/tiff/consts.go
new file mode 100644
index 000000000..169ba2772
--- /dev/null
+++ b/src/pkg/image/tiff/consts.go
@@ -0,0 +1,103 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tiff
+
+// A tiff image file contains one or more images. The metadata
+// of each image is contained in an Image File Directory (IFD),
+// which contains entries of 12 bytes each and is described
+// on page 14-16 of the specification. An IFD entry consists of
+//
+// - a tag, which describes the signification of the entry,
+// - the data type and length of the entry,
+// - the data itself or a pointer to it if it is more than 4 bytes.
+//
+// The presence of a length means that each IFD is effectively an array.
+
+const (
+ leHeader = "II\x2A\x00" // Header for little-endian files.
+ beHeader = "MM\x00\x2A" // Header for big-endian files.
+
+ ifdLen = 12 // Length of an IFD entry in bytes.
+)
+
+// Data types (p. 14-16 of the spec).
+const (
+ dtByte = 1
+ dtASCII = 2
+ dtShort = 3
+ dtLong = 4
+ dtRational = 5
+)
+
+// The length of one instance of each data type in bytes.
+var lengths = [...]uint32{0, 1, 1, 2, 4, 8}
+
+// Tags (see p. 28-41 of the spec).
+const (
+ tImageWidth = 256
+ tImageLength = 257
+ tBitsPerSample = 258
+ tCompression = 259
+ tPhotometricInterpretation = 262
+
+ tStripOffsets = 273
+ tSamplesPerPixel = 277
+ tRowsPerStrip = 278
+ tStripByteCounts = 279
+
+ tXResolution = 282
+ tYResolution = 283
+ tResolutionUnit = 296
+
+ tPredictor = 317
+ tColorMap = 320
+ tExtraSamples = 338
+ tSampleFormat = 339
+)
+
+// Compression types (defined in various places in the spec and supplements).
+const (
+ cNone = 1
+ cCCITT = 2
+ cG3 = 3 // Group 3 Fax.
+ cG4 = 4 // Group 4 Fax.
+ cLZW = 5
+ cJPEGOld = 6 // Superseded by cJPEG.
+ cJPEG = 7
+ cDeflate = 8 // zlib compression.
+ cPackBits = 32773
+ cDeflateOld = 32946 // Superseded by cDeflate.
+)
+
+// Photometric interpretation values (see p. 37 of the spec).
+const (
+ pWhiteIsZero = 0
+ pBlackIsZero = 1
+ pRGB = 2
+ pPaletted = 3
+ pTransMask = 4 // transparency mask
+ pCMYK = 5
+ pYCbCr = 6
+ pCIELab = 8
+)
+
+// Values for the tPredictor tag (page 64-65 of the spec).
+const (
+ prNone = 1
+ prHorizontal = 2
+)
+
+// imageMode represents the mode of the image.
+type imageMode int
+
+const (
+ mBilevel imageMode = iota
+ mPaletted
+ mGray
+ mGrayInvert
+ mRGB
+ mRGBA
+ mNRGBA
+)
diff --git a/src/pkg/image/tiff/reader.go b/src/pkg/image/tiff/reader.go
new file mode 100644
index 000000000..57a7be4a2
--- /dev/null
+++ b/src/pkg/image/tiff/reader.go
@@ -0,0 +1,399 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package tiff implements a TIFF image decoder.
+//
+// The TIFF specification is at http://partners.adobe.com/public/developer/en/tiff/TIFF6.pdf
+package tiff
+
+import (
+ "compress/lzw"
+ "compress/zlib"
+ "encoding/binary"
+ "image"
+ "io"
+ "io/ioutil"
+ "os"
+)
+
+// A FormatError reports that the input is not a valid TIFF image.
+type FormatError string
+
+func (e FormatError) String() string {
+ return "tiff: invalid format: " + string(e)
+}
+
+// An UnsupportedError reports that the input uses a valid but
+// unimplemented feature.
+type UnsupportedError string
+
+func (e UnsupportedError) String() string {
+ return "tiff: unsupported feature: " + string(e)
+}
+
+// An InternalError reports that an internal error was encountered.
+type InternalError string
+
+func (e InternalError) String() string {
+ return "tiff: internal error: " + string(e)
+}
+
+type decoder struct {
+ r io.ReaderAt
+ byteOrder binary.ByteOrder
+ config image.Config
+ mode imageMode
+ features map[int][]uint
+ palette []image.Color
+}
+
+// firstVal returns the first uint of the features entry with the given tag,
+// or 0 if the tag does not exist.
+func (d *decoder) firstVal(tag int) uint {
+ f := d.features[tag]
+ if len(f) == 0 {
+ return 0
+ }
+ return f[0]
+}
+
+// ifdUint decodes the IFD entry in p, which must be of the Byte, Short
+// or Long type, and returns the decoded uint values.
+func (d *decoder) ifdUint(p []byte) (u []uint, err os.Error) {
+ var raw []byte
+ datatype := d.byteOrder.Uint16(p[2:4])
+ count := d.byteOrder.Uint32(p[4:8])
+ if datalen := lengths[datatype] * count; datalen > 4 {
+ // The IFD contains a pointer to the real value.
+ raw = make([]byte, datalen)
+ _, err = d.r.ReadAt(raw, int64(d.byteOrder.Uint32(p[8:12])))
+ } else {
+ raw = p[8 : 8+datalen]
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ u = make([]uint, count)
+ switch datatype {
+ case dtByte:
+ for i := uint32(0); i < count; i++ {
+ u[i] = uint(raw[i])
+ }
+ case dtShort:
+ for i := uint32(0); i < count; i++ {
+ u[i] = uint(d.byteOrder.Uint16(raw[2*i : 2*(i+1)]))
+ }
+ case dtLong:
+ for i := uint32(0); i < count; i++ {
+ u[i] = uint(d.byteOrder.Uint32(raw[4*i : 4*(i+1)]))
+ }
+ default:
+ return nil, UnsupportedError("data type")
+ }
+ return u, nil
+}
+
+// parseIFD decides whether the the IFD entry in p is "interesting" and
+// stows away the data in the decoder.
+func (d *decoder) parseIFD(p []byte) os.Error {
+ tag := d.byteOrder.Uint16(p[0:2])
+ switch tag {
+ case tBitsPerSample,
+ tExtraSamples,
+ tPhotometricInterpretation,
+ tCompression,
+ tPredictor,
+ tStripOffsets,
+ tStripByteCounts,
+ tRowsPerStrip,
+ tImageLength,
+ tImageWidth:
+ val, err := d.ifdUint(p)
+ if err != nil {
+ return err
+ }
+ d.features[int(tag)] = val
+ case tColorMap:
+ val, err := d.ifdUint(p)
+ if err != nil {
+ return err
+ }
+ numcolors := len(val) / 3
+ if len(val)%3 != 0 || numcolors <= 0 || numcolors > 256 {
+ return FormatError("bad ColorMap length")
+ }
+ d.palette = make([]image.Color, numcolors)
+ for i := 0; i < numcolors; i++ {
+ d.palette[i] = image.RGBA64Color{
+ uint16(val[i]),
+ uint16(val[i+numcolors]),
+ uint16(val[i+2*numcolors]),
+ 0xffff,
+ }
+ }
+ case tSampleFormat:
+ // Page 27 of the spec: If the SampleFormat is present and
+ // the value is not 1 [= unsigned integer data], a Baseline
+ // TIFF reader that cannot handle the SampleFormat value
+ // must terminate the import process gracefully.
+ val, err := d.ifdUint(p)
+ if err != nil {
+ return err
+ }
+ for _, v := range val {
+ if v != 1 {
+ return UnsupportedError("sample format")
+ }
+ }
+ }
+ return nil
+}
+
+// decode decodes the raw data of an image with 8 bits in each sample.
+// It reads from p and writes the strip with ymin <= y < ymax into dst.
+func (d *decoder) decode(dst image.Image, p []byte, ymin, ymax int) os.Error {
+ spp := len(d.features[tBitsPerSample]) // samples per pixel
+ off := 0
+ width := dst.Bounds().Dx()
+
+ if len(p) < spp*(ymax-ymin)*width {
+ return FormatError("short data strip")
+ }
+
+ // Apply horizontal predictor if necessary.
+ // In this case, p contains the color difference to the preceding pixel.
+ // See page 64-65 of the spec.
+ if d.firstVal(tPredictor) == prHorizontal {
+ for y := ymin; y < ymax; y++ {
+ off += spp
+ for x := 0; x < (width-1)*spp; x++ {
+ p[off] += p[off-spp]
+ off++
+ }
+ }
+ off = 0
+ }
+
+ switch d.mode {
+ case mGray:
+ img := dst.(*image.Gray)
+ for y := ymin; y < ymax; y++ {
+ for x := img.Rect.Min.X; x < img.Rect.Max.X; x++ {
+ img.Set(x, y, image.GrayColor{p[off]})
+ off += spp
+ }
+ }
+ case mGrayInvert:
+ img := dst.(*image.Gray)
+ for y := ymin; y < ymax; y++ {
+ for x := img.Rect.Min.X; x < img.Rect.Max.X; x++ {
+ img.Set(x, y, image.GrayColor{0xff - p[off]})
+ off += spp
+ }
+ }
+ case mPaletted:
+ img := dst.(*image.Paletted)
+ for y := ymin; y < ymax; y++ {
+ for x := img.Rect.Min.X; x < img.Rect.Max.X; x++ {
+ img.SetColorIndex(x, y, p[off])
+ off += spp
+ }
+ }
+ case mRGB:
+ img := dst.(*image.RGBA)
+ for y := ymin; y < ymax; y++ {
+ for x := img.Rect.Min.X; x < img.Rect.Max.X; x++ {
+ img.Set(x, y, image.RGBAColor{p[off], p[off+1], p[off+2], 0xff})
+ off += spp
+ }
+ }
+ case mNRGBA:
+ img := dst.(*image.NRGBA)
+ for y := ymin; y < ymax; y++ {
+ for x := img.Rect.Min.X; x < img.Rect.Max.X; x++ {
+ img.Set(x, y, image.NRGBAColor{p[off], p[off+1], p[off+2], p[off+3]})
+ off += spp
+ }
+ }
+ case mRGBA:
+ img := dst.(*image.RGBA)
+ for y := ymin; y < ymax; y++ {
+ for x := img.Rect.Min.X; x < img.Rect.Max.X; x++ {
+ img.Set(x, y, image.RGBAColor{p[off], p[off+1], p[off+2], p[off+3]})
+ off += spp
+ }
+ }
+ }
+
+ return nil
+}
+
+func newDecoder(r io.Reader) (*decoder, os.Error) {
+ d := &decoder{
+ r: newReaderAt(r),
+ features: make(map[int][]uint),
+ }
+
+ p := make([]byte, 8)
+ if _, err := d.r.ReadAt(p, 0); err != nil {
+ return nil, err
+ }
+ switch string(p[0:4]) {
+ case leHeader:
+ d.byteOrder = binary.LittleEndian
+ case beHeader:
+ d.byteOrder = binary.BigEndian
+ default:
+ return nil, FormatError("malformed header")
+ }
+
+ ifdOffset := int64(d.byteOrder.Uint32(p[4:8]))
+
+ // The first two bytes contain the number of entries (12 bytes each).
+ if _, err := d.r.ReadAt(p[0:2], ifdOffset); err != nil {
+ return nil, err
+ }
+ numItems := int(d.byteOrder.Uint16(p[0:2]))
+
+ // All IFD entries are read in one chunk.
+ p = make([]byte, ifdLen*numItems)
+ if _, err := d.r.ReadAt(p, ifdOffset+2); err != nil {
+ return nil, err
+ }
+
+ for i := 0; i < len(p); i += ifdLen {
+ if err := d.parseIFD(p[i : i+ifdLen]); err != nil {
+ return nil, err
+ }
+ }
+
+ d.config.Width = int(d.firstVal(tImageWidth))
+ d.config.Height = int(d.firstVal(tImageLength))
+
+ // Determine the image mode.
+ switch d.firstVal(tPhotometricInterpretation) {
+ case pRGB:
+ d.config.ColorModel = image.RGBAColorModel
+ // RGB images normally have 3 samples per pixel.
+ // If there are more, ExtraSamples (p. 31-32 of the spec)
+ // gives their meaning (usually an alpha channel).
+ switch len(d.features[tBitsPerSample]) {
+ case 3:
+ d.mode = mRGB
+ case 4:
+ switch d.firstVal(tExtraSamples) {
+ case 1:
+ d.mode = mRGBA
+ case 2:
+ d.mode = mNRGBA
+ d.config.ColorModel = image.NRGBAColorModel
+ default:
+ // The extra sample is discarded.
+ d.mode = mRGB
+ }
+ default:
+ return nil, FormatError("wrong number of samples for RGB")
+ }
+ case pPaletted:
+ d.mode = mPaletted
+ d.config.ColorModel = image.PalettedColorModel(d.palette)
+ case pWhiteIsZero:
+ d.mode = mGrayInvert
+ d.config.ColorModel = image.GrayColorModel
+ case pBlackIsZero:
+ d.mode = mGray
+ d.config.ColorModel = image.GrayColorModel
+ default:
+ return nil, UnsupportedError("color model")
+ }
+
+ if _, ok := d.features[tBitsPerSample]; !ok {
+ return nil, FormatError("BitsPerSample tag missing")
+ }
+ for _, b := range d.features[tBitsPerSample] {
+ if b != 8 {
+ return nil, UnsupportedError("not an 8-bit image")
+ }
+ }
+
+ return d, nil
+}
+
+// DecodeConfig returns the color model and dimensions of a TIFF image without
+// decoding the entire image.
+func DecodeConfig(r io.Reader) (image.Config, os.Error) {
+ d, err := newDecoder(r)
+ if err != nil {
+ return image.Config{}, err
+ }
+ return d.config, nil
+}
+
+// Decode reads a TIFF image from r and returns it as an image.Image.
+// The type of Image returned depends on the contents of the TIFF.
+func Decode(r io.Reader) (img image.Image, err os.Error) {
+ d, err := newDecoder(r)
+ if err != nil {
+ return
+ }
+
+ // Check if we have the right number of strips, offsets and counts.
+ rps := int(d.firstVal(tRowsPerStrip))
+ numStrips := (d.config.Height + rps - 1) / rps
+ if rps == 0 || len(d.features[tStripOffsets]) < numStrips || len(d.features[tStripByteCounts]) < numStrips {
+ return nil, FormatError("inconsistent header")
+ }
+
+ switch d.mode {
+ case mGray, mGrayInvert:
+ img = image.NewGray(d.config.Width, d.config.Height)
+ case mPaletted:
+ img = image.NewPaletted(d.config.Width, d.config.Height, d.palette)
+ case mNRGBA:
+ img = image.NewNRGBA(d.config.Width, d.config.Height)
+ case mRGB, mRGBA:
+ img = image.NewRGBA(d.config.Width, d.config.Height)
+ }
+
+ var p []byte
+ for i := 0; i < numStrips; i++ {
+ ymin := i * rps
+ // The last strip may be shorter.
+ if i == numStrips-1 && d.config.Height%rps != 0 {
+ rps = d.config.Height % rps
+ }
+ offset := int64(d.features[tStripOffsets][i])
+ n := int64(d.features[tStripByteCounts][i])
+ switch d.firstVal(tCompression) {
+ case cNone:
+ // TODO(bsiegert): Avoid copy if r is a tiff.buffer.
+ p = make([]byte, 0, n)
+ _, err = d.r.ReadAt(p, offset)
+ case cLZW:
+ r := lzw.NewReader(io.NewSectionReader(d.r, offset, n), lzw.MSB, 8)
+ p, err = ioutil.ReadAll(r)
+ r.Close()
+ case cDeflate, cDeflateOld:
+ r, err := zlib.NewReader(io.NewSectionReader(d.r, offset, n))
+ if err != nil {
+ return nil, err
+ }
+ p, err = ioutil.ReadAll(r)
+ r.Close()
+ default:
+ err = UnsupportedError("compression")
+ }
+ if err != nil {
+ return
+ }
+ err = d.decode(img, p, ymin, ymin+rps)
+ }
+ return
+}
+
+func init() {
+ image.RegisterFormat("tiff", leHeader, Decode, DecodeConfig)
+ image.RegisterFormat("tiff", beHeader, Decode, DecodeConfig)
+}
diff --git a/src/pkg/io/multi_test.go b/src/pkg/io/multi_test.go
index 3ecb7c75d..1b3589dde 100644
--- a/src/pkg/io/multi_test.go
+++ b/src/pkg/io/multi_test.go
@@ -20,8 +20,9 @@ func TestMultiReader(t *testing.T) {
nread := 0
withFooBar := func(tests func()) {
r1 := strings.NewReader("foo ")
- r2 := strings.NewReader("bar")
- mr = MultiReader(r1, r2)
+ r2 := strings.NewReader("")
+ r3 := strings.NewReader("bar")
+ mr = MultiReader(r1, r2, r3)
buf = make([]byte, 20)
tests()
}
diff --git a/src/pkg/mime/multipart/Makefile b/src/pkg/mime/multipart/Makefile
index 5051f0df1..de1a439f2 100644
--- a/src/pkg/mime/multipart/Makefile
+++ b/src/pkg/mime/multipart/Makefile
@@ -8,5 +8,6 @@ TARG=mime/multipart
GOFILES=\
formdata.go\
multipart.go\
+ writer.go\
include ../../../Make.pkg
diff --git a/src/pkg/mime/multipart/formdata.go b/src/pkg/mime/multipart/formdata.go
index 287938557..5f3286565 100644
--- a/src/pkg/mime/multipart/formdata.go
+++ b/src/pkg/mime/multipart/formdata.go
@@ -30,21 +30,18 @@ func (r *multiReader) ReadForm(maxMemory int64) (f *Form, err os.Error) {
maxValueBytes := int64(10 << 20) // 10 MB is a lot of text.
for {
p, err := r.NextPart()
+ if err == os.EOF {
+ break
+ }
if err != nil {
return nil, err
}
- if p == nil {
- break
- }
name := p.FormName()
if name == "" {
continue
}
- var filename string
- if p.dispositionParams != nil {
- filename = p.dispositionParams["filename"]
- }
+ filename := p.FileName()
var b bytes.Buffer
diff --git a/src/pkg/mime/multipart/formdata_test.go b/src/pkg/mime/multipart/formdata_test.go
index b56e2a430..9424c3778 100644
--- a/src/pkg/mime/multipart/formdata_test.go
+++ b/src/pkg/mime/multipart/formdata_test.go
@@ -33,7 +33,7 @@ func TestReadForm(t *testing.T) {
}
fd = testFile(t, f.File["fileb"][0], "fileb.txt", filebContents)
if _, ok := fd.(*os.File); !ok {
- t.Error("file has unexpected underlying type %T", fd)
+ t.Errorf("file has unexpected underlying type %T", fd)
}
}
diff --git a/src/pkg/mime/multipart/multipart.go b/src/pkg/mime/multipart/multipart.go
index e0b747c3f..9affa1126 100644
--- a/src/pkg/mime/multipart/multipart.go
+++ b/src/pkg/mime/multipart/multipart.go
@@ -15,25 +15,25 @@ package multipart
import (
"bufio"
"bytes"
+ "fmt"
"io"
"io/ioutil"
"mime"
"net/textproto"
"os"
"regexp"
- "strings"
)
var headerRegexp *regexp.Regexp = regexp.MustCompile("^([a-zA-Z0-9\\-]+): *([^\r\n]+)")
+var emptyParams = make(map[string]string)
+
// Reader is an iterator over parts in a MIME multipart body.
// Reader's underlying parser consumes its input as needed. Seeking
// isn't supported.
type Reader interface {
- // NextPart returns the next part in the multipart, or (nil,
- // nil) on EOF. An error is returned if the underlying reader
- // reports errors, or on truncated or otherwise malformed
- // input.
+ // NextPart returns the next part in the multipart or an error.
+ // When there are no more parts, the error os.EOF is returned.
NextPart() (*Part, os.Error)
// ReadForm parses an entire multipart message whose parts have
@@ -53,6 +53,7 @@ type Part struct {
buffer *bytes.Buffer
mr *multiReader
+ disposition string
dispositionParams map[string]string
}
@@ -61,43 +62,58 @@ type Part struct {
func (p *Part) FormName() string {
// See http://tools.ietf.org/html/rfc2183 section 2 for EBNF
// of Content-Disposition value format.
- if p.dispositionParams != nil {
- return p.dispositionParams["name"]
- }
- v := p.Header.Get("Content-Disposition")
- if v == "" {
- return ""
+ if p.dispositionParams == nil {
+ p.parseContentDisposition()
}
- if d, params := mime.ParseMediaType(v); d != "form-data" {
+ if p.disposition != "form-data" {
return ""
- } else {
- p.dispositionParams = params
}
return p.dispositionParams["name"]
}
+
+// FileName returns the filename parameter of the Part's
+// Content-Disposition header.
+func (p *Part) FileName() string {
+ if p.dispositionParams == nil {
+ p.parseContentDisposition()
+ }
+ return p.dispositionParams["filename"]
+}
+
+func (p *Part) parseContentDisposition() {
+ v := p.Header.Get("Content-Disposition")
+ p.disposition, p.dispositionParams = mime.ParseMediaType(v)
+ if p.dispositionParams == nil {
+ p.dispositionParams = emptyParams
+ }
+}
+
// NewReader creates a new multipart Reader reading from r using the
// given MIME boundary.
func NewReader(reader io.Reader, boundary string) Reader {
+ b := []byte("\r\n--" + boundary + "--")
return &multiReader{
- boundary: boundary,
- dashBoundary: "--" + boundary,
- endLine: "--" + boundary + "--",
- bufReader: bufio.NewReader(reader),
+ bufReader: bufio.NewReader(reader),
+
+ nlDashBoundary: b[:len(b)-2],
+ dashBoundaryDash: b[2:],
+ dashBoundary: b[2 : len(b)-2],
}
}
// Implementation ....
-func newPart(mr *multiReader) (bp *Part, err os.Error) {
- bp = new(Part)
- bp.Header = make(map[string][]string)
- bp.mr = mr
- bp.buffer = new(bytes.Buffer)
- if err = bp.populateHeaders(); err != nil {
- bp = nil
+func newPart(mr *multiReader) (*Part, os.Error) {
+ bp := &Part{
+ Header: make(map[string][]string),
+ mr: mr,
+ buffer: new(bytes.Buffer),
}
- return
+ if err := bp.populateHeaders(); err != nil {
+ return nil, err
+ }
+ return bp, nil
}
func (bp *Part) populateHeaders() os.Error {
@@ -122,44 +138,49 @@ func (bp *Part) populateHeaders() os.Error {
// Read reads the body of a part, after its headers and before the
// next part (if any) begins.
func (bp *Part) Read(p []byte) (n int, err os.Error) {
- for {
- if bp.buffer.Len() >= len(p) {
- // Internal buffer of unconsumed data is large enough for
- // the read request. No need to parse more at the moment.
- break
- }
- if !bp.mr.ensureBufferedLine() {
- return 0, io.ErrUnexpectedEOF
- }
- if bp.mr.bufferedLineIsBoundary() {
- // Don't consume this line
- break
- }
+ if bp.buffer.Len() >= len(p) {
+ // Internal buffer of unconsumed data is large enough for
+ // the read request. No need to parse more at the moment.
+ return bp.buffer.Read(p)
+ }
+ peek, err := bp.mr.bufReader.Peek(4096) // TODO(bradfitz): add buffer size accessor
+ unexpectedEof := err == os.EOF
+ if err != nil && !unexpectedEof {
+ return 0, fmt.Errorf("multipart: Part Read: %v", err)
+ }
+ if peek == nil {
+ panic("nil peek buf")
+ }
- // Write all of this line, except the final CRLF
- s := *bp.mr.bufferedLine
- if strings.HasSuffix(s, "\r\n") {
- bp.mr.consumeLine()
- if !bp.mr.ensureBufferedLine() {
- return 0, io.ErrUnexpectedEOF
- }
- if bp.mr.bufferedLineIsBoundary() {
- // The final \r\n isn't ours. It logically belongs
- // to the boundary line which follows.
- bp.buffer.WriteString(s[0 : len(s)-2])
- } else {
- bp.buffer.WriteString(s)
- }
- break
- }
- if strings.HasSuffix(s, "\n") {
- bp.buffer.WriteString(s)
- bp.mr.consumeLine()
- continue
+ // Search the peek buffer for "\r\n--boundary". If found,
+ // consume everything up to the boundary. If not, consume only
+ // as much of the peek buffer as cannot hold the boundary
+ // string.
+ nCopy := 0
+ foundBoundary := false
+ if idx := bytes.Index(peek, bp.mr.nlDashBoundary); idx != -1 {
+ nCopy = idx
+ foundBoundary = true
+ } else if safeCount := len(peek) - len(bp.mr.nlDashBoundary); safeCount > 0 {
+ nCopy = safeCount
+ } else if unexpectedEof {
+ // If we've run out of peek buffer and the boundary
+ // wasn't found (and can't possibly fit), we must have
+ // hit the end of the file unexpectedly.
+ return 0, io.ErrUnexpectedEOF
+ }
+ if nCopy > 0 {
+ if _, err := io.Copyn(bp.buffer, bp.mr.bufReader, int64(nCopy)); err != nil {
+ return 0, err
}
- return 0, os.NewError("multipart parse error during Read; unexpected line: " + s)
}
- return bp.buffer.Read(p)
+ n, err = bp.buffer.Read(p)
+ if err == os.EOF && !foundBoundary {
+ // If the boundary hasn't been reached there's more to
+ // read, so don't pass through an EOF from the buffer
+ err = nil
+ }
+ return
}
func (bp *Part) Close() os.Error {
@@ -168,46 +189,12 @@ func (bp *Part) Close() os.Error {
}
type multiReader struct {
- boundary string
- dashBoundary string // --boundary
- endLine string // --boundary--
+ bufReader *bufio.Reader
- bufferedLine *string
-
- bufReader *bufio.Reader
currentPart *Part
partsRead int
-}
-
-func (mr *multiReader) eof() bool {
- return mr.bufferedLine == nil &&
- !mr.readLine()
-}
-
-func (mr *multiReader) readLine() bool {
- lineBytes, err := mr.bufReader.ReadSlice('\n')
- if err != nil {
- // TODO: care about err being EOF or not?
- return false
- }
- line := string(lineBytes)
- mr.bufferedLine = &line
- return true
-}
-func (mr *multiReader) bufferedLineIsBoundary() bool {
- return strings.HasPrefix(*mr.bufferedLine, mr.dashBoundary)
-}
-
-func (mr *multiReader) ensureBufferedLine() bool {
- if mr.bufferedLine == nil {
- return mr.readLine()
- }
- return true
-}
-
-func (mr *multiReader) consumeLine() {
- mr.bufferedLine = nil
+ nlDashBoundary, dashBoundaryDash, dashBoundary []byte
}
func (mr *multiReader) NextPart() (*Part, os.Error) {
@@ -215,13 +202,14 @@ func (mr *multiReader) NextPart() (*Part, os.Error) {
mr.currentPart.Close()
}
+ expectNewPart := false
for {
- if mr.eof() {
- return nil, io.ErrUnexpectedEOF
+ line, err := mr.bufReader.ReadSlice('\n')
+ if err != nil {
+ return nil, fmt.Errorf("multipart: NextPart: %v", err)
}
- if isBoundaryDelimiterLine(*mr.bufferedLine, mr.dashBoundary) {
- mr.consumeLine()
+ if mr.isBoundaryDelimiterLine(line) {
mr.partsRead++
bp, err := newPart(mr)
if err != nil {
@@ -231,55 +219,66 @@ func (mr *multiReader) NextPart() (*Part, os.Error) {
return bp, nil
}
- if hasPrefixThenNewline(*mr.bufferedLine, mr.endLine) {
- mr.consumeLine()
- // Expected EOF (no error)
- return nil, nil
+ if hasPrefixThenNewline(line, mr.dashBoundaryDash) {
+ // Expected EOF
+ return nil, os.EOF
+ }
+
+ if expectNewPart {
+ return nil, fmt.Errorf("multipart: expecting a new Part; got line %q", string(line))
}
if mr.partsRead == 0 {
// skip line
- mr.consumeLine()
continue
}
- return nil, os.NewError("Unexpected line in Next().")
+ if bytes.Equal(line, []byte("\r\n")) {
+ // Consume the "\r\n" separator between the
+ // body of the previous part and the boundary
+ // line we now expect will follow. (either a
+ // new part or the end boundary)
+ expectNewPart = true
+ continue
+ }
+
+ return nil, fmt.Errorf("multipart: unexpected line in Next(): %q", line)
}
panic("unreachable")
}
-func isBoundaryDelimiterLine(line, dashPrefix string) bool {
+func (mr *multiReader) isBoundaryDelimiterLine(line []byte) bool {
// http://tools.ietf.org/html/rfc2046#section-5.1
// The boundary delimiter line is then defined as a line
// consisting entirely of two hyphen characters ("-",
// decimal value 45) followed by the boundary parameter
// value from the Content-Type header field, optional linear
// whitespace, and a terminating CRLF.
- if !strings.HasPrefix(line, dashPrefix) {
+ if !bytes.HasPrefix(line, mr.dashBoundary) {
return false
}
- if strings.HasSuffix(line, "\r\n") {
- return onlyHorizontalWhitespace(line[len(dashPrefix) : len(line)-2])
+ if bytes.HasSuffix(line, []byte("\r\n")) {
+ return onlyHorizontalWhitespace(line[len(mr.dashBoundary) : len(line)-2])
}
// Violate the spec and also support newlines without the
// carriage return...
- if strings.HasSuffix(line, "\n") {
- return onlyHorizontalWhitespace(line[len(dashPrefix) : len(line)-1])
+ if bytes.HasSuffix(line, []byte("\n")) {
+ return onlyHorizontalWhitespace(line[len(mr.dashBoundary) : len(line)-1])
}
return false
}
-func onlyHorizontalWhitespace(s string) bool {
- for i := 0; i < len(s); i++ {
- if s[i] != ' ' && s[i] != '\t' {
+func onlyHorizontalWhitespace(s []byte) bool {
+ for _, b := range s {
+ if b != ' ' && b != '\t' {
return false
}
}
return true
}
-func hasPrefixThenNewline(s, prefix string) bool {
- return strings.HasPrefix(s, prefix) &&
- (len(s) == len(prefix)+1 && strings.HasSuffix(s, "\n") ||
- len(s) == len(prefix)+2 && strings.HasSuffix(s, "\r\n"))
+func hasPrefixThenNewline(s, prefix []byte) bool {
+ return bytes.HasPrefix(s, prefix) &&
+ (len(s) == len(prefix)+1 && s[len(s)-1] == '\n' ||
+ len(s) == len(prefix)+2 && bytes.HasSuffix(s, []byte("\r\n")))
}
diff --git a/src/pkg/mime/multipart/multipart_test.go b/src/pkg/mime/multipart/multipart_test.go
index f8f10f3e1..ec564b1d9 100644
--- a/src/pkg/mime/multipart/multipart_test.go
+++ b/src/pkg/mime/multipart/multipart_test.go
@@ -8,38 +8,37 @@ import (
"bytes"
"fmt"
"io"
+ "io/ioutil"
"json"
"os"
- "regexp"
"strings"
"testing"
)
func TestHorizontalWhitespace(t *testing.T) {
- if !onlyHorizontalWhitespace(" \t") {
+ if !onlyHorizontalWhitespace([]byte(" \t")) {
t.Error("expected pass")
}
- if onlyHorizontalWhitespace("foo bar") {
+ if onlyHorizontalWhitespace([]byte("foo bar")) {
t.Error("expected failure")
}
}
func TestBoundaryLine(t *testing.T) {
- boundary := "myBoundary"
- prefix := "--" + boundary
- if !isBoundaryDelimiterLine("--myBoundary\r\n", prefix) {
+ mr := NewReader(strings.NewReader(""), "myBoundary").(*multiReader)
+ if !mr.isBoundaryDelimiterLine([]byte("--myBoundary\r\n")) {
t.Error("expected")
}
- if !isBoundaryDelimiterLine("--myBoundary \r\n", prefix) {
+ if !mr.isBoundaryDelimiterLine([]byte("--myBoundary \r\n")) {
t.Error("expected")
}
- if !isBoundaryDelimiterLine("--myBoundary \n", prefix) {
+ if !mr.isBoundaryDelimiterLine([]byte("--myBoundary \n")) {
t.Error("expected")
}
- if isBoundaryDelimiterLine("--myBoundary bogus \n", prefix) {
+ if mr.isBoundaryDelimiterLine([]byte("--myBoundary bogus \n")) {
t.Error("expected fail")
}
- if isBoundaryDelimiterLine("--myBoundary bogus--", prefix) {
+ if mr.isBoundaryDelimiterLine([]byte("--myBoundary bogus--")) {
t.Error("expected fail")
}
}
@@ -57,29 +56,32 @@ func expectEq(t *testing.T, expected, actual, what string) {
what, escapeString(actual), len(actual), escapeString(expected), len(expected))
}
-func TestFormName(t *testing.T) {
- p := new(Part)
- p.Header = make(map[string][]string)
- tests := [...][2]string{
- {`form-data; name="foo"`, "foo"},
- {` form-data ; name=foo`, "foo"},
- {`FORM-DATA;name="foo"`, "foo"},
- {` FORM-DATA ; name="foo"`, "foo"},
- {` FORM-DATA ; name="foo"`, "foo"},
- {` FORM-DATA ; name=foo`, "foo"},
- {` FORM-DATA ; filename="foo.txt"; name=foo; baz=quux`, "foo"},
+func TestNameAccessors(t *testing.T) {
+ tests := [...][3]string{
+ {`form-data; name="foo"`, "foo", ""},
+ {` form-data ; name=foo`, "foo", ""},
+ {`FORM-DATA;name="foo"`, "foo", ""},
+ {` FORM-DATA ; name="foo"`, "foo", ""},
+ {` FORM-DATA ; name="foo"`, "foo", ""},
+ {` FORM-DATA ; name=foo`, "foo", ""},
+ {` FORM-DATA ; filename="foo.txt"; name=foo; baz=quux`, "foo", "foo.txt"},
+ {` not-form-data ; filename="bar.txt"; name=foo; baz=quux`, "", "bar.txt"},
}
- for _, test := range tests {
+ for i, test := range tests {
+ p := &Part{Header: make(map[string][]string)}
p.Header.Set("Content-Disposition", test[0])
- expected := test[1]
- actual := p.FormName()
- if actual != expected {
- t.Errorf("expected \"%s\"; got: \"%s\"", expected, actual)
+ if g, e := p.FormName(), test[1]; g != e {
+ t.Errorf("test %d: FormName() = %q; want %q", i, g, e)
+ }
+ if g, e := p.FileName(), test[2]; g != e {
+ t.Errorf("test %d: FileName() = %q; want %q", i, g, e)
}
}
}
-func TestMultipart(t *testing.T) {
+var longLine = strings.Repeat("\n\n\r\r\r\n\r\000", (1<<20)/8)
+
+func testMultipartBody() string {
testBody := `
This is a multi-part message. This line is ignored.
--MyBoundary
@@ -90,6 +92,10 @@ foo-bar: baz
My value
The end.
--MyBoundary
+name: bigsection
+
+[longline]
+--MyBoundary
Header1: value1b
HEADER2: value2b
foo-bar: bazb
@@ -102,11 +108,26 @@ Line 3 ends in a newline, but just one.
never read data
--MyBoundary--
+
+
+useless trailer
`
- testBody = regexp.MustCompile("\n").ReplaceAllString(testBody, "\r\n")
- bodyReader := strings.NewReader(testBody)
+ testBody = strings.Replace(testBody, "\n", "\r\n", -1)
+ return strings.Replace(testBody, "[longline]", longLine, 1)
+}
- reader := NewReader(bodyReader, "MyBoundary")
+func TestMultipart(t *testing.T) {
+ bodyReader := strings.NewReader(testMultipartBody())
+ testMultipart(t, bodyReader)
+}
+
+func TestMultipartSlowInput(t *testing.T) {
+ bodyReader := strings.NewReader(testMultipartBody())
+ testMultipart(t, &slowReader{bodyReader})
+}
+
+func testMultipart(t *testing.T, r io.Reader) {
+ reader := NewReader(r, "MyBoundary")
buf := new(bytes.Buffer)
// Part1
@@ -125,38 +146,64 @@ never read data
t.Error("Expected Foo-Bar: baz")
}
buf.Reset()
- io.Copy(buf, part)
+ if _, err := io.Copy(buf, part); err != nil {
+ t.Errorf("part 1 copy: %v", err)
+ }
expectEq(t, "My value\r\nThe end.",
buf.String(), "Value of first part")
// Part2
part, err = reader.NextPart()
+ if err != nil {
+ t.Fatalf("Expected part2; got: %v", err)
+ return
+ }
+ if e, g := "bigsection", part.Header.Get("name"); e != g {
+ t.Errorf("part2's name header: expected %q, got %q", e, g)
+ }
+ buf.Reset()
+ if _, err := io.Copy(buf, part); err != nil {
+ t.Errorf("part 2 copy: %v", err)
+ }
+ s := buf.String()
+ if len(s) != len(longLine) {
+ t.Errorf("part2 body expected long line of length %d; got length %d",
+ len(longLine), len(s))
+ }
+ if s != longLine {
+ t.Errorf("part2 long body didn't match")
+ }
+
+ // Part3
+ part, err = reader.NextPart()
if part == nil || err != nil {
- t.Error("Expected part2")
+ t.Error("Expected part3")
return
}
if part.Header.Get("foo-bar") != "bazb" {
t.Error("Expected foo-bar: bazb")
}
buf.Reset()
- io.Copy(buf, part)
+ if _, err := io.Copy(buf, part); err != nil {
+ t.Errorf("part 3 copy: %v", err)
+ }
expectEq(t, "Line 1\r\nLine 2\r\nLine 3 ends in a newline, but just one.\r\n",
- buf.String(), "Value of second part")
+ buf.String(), "body of part 3")
- // Part3
+ // Part4
part, err = reader.NextPart()
if part == nil || err != nil {
- t.Error("Expected part3 without errors")
+ t.Error("Expected part 4 without errors")
return
}
- // Non-existent part4
+ // Non-existent part5
part, err = reader.NextPart()
if part != nil {
- t.Error("Didn't expect a third part.")
+ t.Error("Didn't expect a fifth part.")
}
- if err != nil {
- t.Errorf("Unexpected error getting third part: %v", err)
+ if err != os.EOF {
+ t.Errorf("On fifth part expected os.EOF; got %v", err)
}
}
@@ -200,8 +247,8 @@ func TestVariousTextLineEndings(t *testing.T) {
if part != nil {
t.Errorf("Unexpected part in test %d", testNum)
}
- if err != nil {
- t.Errorf("Unexpected error in test %d: %v", testNum, err)
+ if err != os.EOF {
+ t.Errorf("On test %d expected os.EOF; got %v", testNum, err)
}
}
@@ -237,3 +284,59 @@ func TestLineLimit(t *testing.T) {
t.Errorf("expected to read < %d bytes; read %d", maxReadThreshold, mr.n)
}
}
+
+func TestMultipartTruncated(t *testing.T) {
+ testBody := `
+This is a multi-part message. This line is ignored.
+--MyBoundary
+foo-bar: baz
+
+Oh no, premature EOF!
+`
+ body := strings.Replace(testBody, "\n", "\r\n", -1)
+ bodyReader := strings.NewReader(body)
+ r := NewReader(bodyReader, "MyBoundary")
+
+ part, err := r.NextPart()
+ if err != nil {
+ t.Fatalf("didn't get a part")
+ }
+ _, err = io.Copy(ioutil.Discard, part)
+ if err != io.ErrUnexpectedEOF {
+ t.Fatalf("expected error io.ErrUnexpectedEOF; got %v", err)
+ }
+}
+
+func TestZeroLengthBody(t *testing.T) {
+ testBody := strings.Replace(`
+This is a multi-part message. This line is ignored.
+--MyBoundary
+foo: bar
+
+
+--MyBoundary--
+`, "\n", "\r\n", -1)
+ r := NewReader(strings.NewReader(testBody), "MyBoundary")
+ part, err := r.NextPart()
+ if err != nil {
+ t.Fatalf("didn't get a part")
+ }
+ n, err := io.Copy(ioutil.Discard, part)
+ if err != nil {
+ t.Errorf("error reading part: %v", err)
+ }
+ if n != 0 {
+ t.Errorf("read %d bytes; expected 0", n)
+ }
+}
+
+type slowReader struct {
+ r io.Reader
+}
+
+func (s *slowReader) Read(p []byte) (int, os.Error) {
+ if len(p) == 0 {
+ return s.r.Read(p)
+ }
+ return s.r.Read(p[:1])
+}
diff --git a/src/pkg/mime/multipart/writer.go b/src/pkg/mime/multipart/writer.go
new file mode 100644
index 000000000..74aa7be1c
--- /dev/null
+++ b/src/pkg/mime/multipart/writer.go
@@ -0,0 +1,160 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package multipart
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net/textproto"
+ "os"
+ "rand"
+ "strings"
+)
+
+// Writer is used to generate multipart messages.
+type Writer struct {
+ // Boundary is the random boundary string between
+ // parts. NewWriter will generate this but it must
+ // not be changed after a part has been created.
+ // Setting this to an invalid value will generate
+ // malformed messages.
+ Boundary string
+
+ w io.Writer
+ lastpart *part
+}
+
+// NewWriter returns a new multipart Writer with a random boundary,
+// writing to w.
+func NewWriter(w io.Writer) *Writer {
+ return &Writer{
+ w: w,
+ Boundary: randomBoundary(),
+ }
+}
+
+// FormDataContentType returns the Content-Type for an HTTP
+// multipart/form-data with this Writer's Boundary.
+func (w *Writer) FormDataContentType() string {
+ return "multipart/form-data; boundary=" + w.Boundary
+}
+
+const randChars = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
+
+func randomBoundary() string {
+ var buf [60]byte
+ for i := range buf {
+ buf[i] = randChars[rand.Intn(len(randChars))]
+ }
+ return string(buf[:])
+}
+
+// CreatePart creates a new multipart section with the provided
+// header. The previous part, if still open, is closed. The body of
+// the part should be written to the returned WriteCloser. Closing the
+// returned WriteCloser after writing is optional.
+func (w *Writer) CreatePart(header textproto.MIMEHeader) (io.WriteCloser, os.Error) {
+ if w.lastpart != nil {
+ if err := w.lastpart.Close(); err != nil {
+ return nil, err
+ }
+ }
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "\r\n--%s\r\n", w.Boundary)
+ // TODO(bradfitz): move this to textproto.MimeHeader.Write(w), have it sort
+ // and clean, like http.Header.Write(w) does.
+ for k, vv := range header {
+ for _, v := range vv {
+ fmt.Fprintf(&b, "%s: %s\r\n", k, v)
+ }
+ }
+ fmt.Fprintf(&b, "\r\n")
+ _, err := io.Copy(w.w, &b)
+ if err != nil {
+ return nil, err
+ }
+ p := &part{
+ mw: w,
+ }
+ w.lastpart = p
+ return p, nil
+}
+
+func escapeQuotes(s string) string {
+ s = strings.Replace(s, "\\", "\\\\", -1)
+ s = strings.Replace(s, "\"", "\\\"", -1)
+ return s
+}
+
+// CreateFormFile is a convenience wrapper around CreatePart. It creates
+// a new form-data header with the provided field name and file name.
+func (w *Writer) CreateFormFile(fieldname, filename string) (io.WriteCloser, os.Error) {
+ h := make(textproto.MIMEHeader)
+ h.Set("Content-Disposition",
+ fmt.Sprintf(`form-data; name="%s"; filename="%s"`,
+ escapeQuotes(fieldname), escapeQuotes(filename)))
+ h.Set("Content-Type", "application/octet-stream")
+ return w.CreatePart(h)
+}
+
+// CreateFormField is a convenience wrapper around CreatePart. It creates
+// a new form-data header with the provided field name.
+func (w *Writer) CreateFormField(fieldname string) (io.WriteCloser, os.Error) {
+ h := make(textproto.MIMEHeader)
+ h.Set("Content-Disposition",
+ fmt.Sprintf(`form-data; name="%s"`, escapeQuotes(fieldname)))
+ return w.CreatePart(h)
+}
+
+// WriteField is a convenience wrapper around CreateFormField. It creates and
+// writes a part with the provided name and value.
+func (w *Writer) WriteField(fieldname, value string) os.Error {
+ p, err := w.CreateFormField(fieldname)
+ if err != nil {
+ return err
+ }
+ _, err = p.Write([]byte(value))
+ if err != nil {
+ return err
+ }
+ return p.Close()
+}
+
+// Close finishes the multipart message. It closes the previous part,
+// if still open, and writes the trailing boundary end line to the
+// output.
+func (w *Writer) Close() os.Error {
+ if w.lastpart != nil {
+ if err := w.lastpart.Close(); err != nil {
+ return err
+ }
+ w.lastpart = nil
+ }
+ _, err := fmt.Fprintf(w.w, "\r\n--%s--\r\n", w.Boundary)
+ return err
+}
+
+type part struct {
+ mw *Writer
+ closed bool
+ we os.Error // last error that occurred writing
+}
+
+func (p *part) Close() os.Error {
+ p.closed = true
+ return p.we
+}
+
+func (p *part) Write(d []byte) (n int, err os.Error) {
+ if p.closed {
+ return 0, os.NewError("multipart: Write after Close")
+ }
+ n, err = p.mw.w.Write(d)
+ if err != nil {
+ p.we = err
+ }
+ return
+}
diff --git a/src/pkg/mime/multipart/writer_test.go b/src/pkg/mime/multipart/writer_test.go
new file mode 100644
index 000000000..b85fbf877
--- /dev/null
+++ b/src/pkg/mime/multipart/writer_test.go
@@ -0,0 +1,71 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package multipart
+
+import (
+ "bytes"
+ "io/ioutil"
+ "testing"
+)
+
+func TestWriter(t *testing.T) {
+ fileContents := []byte("my file contents")
+
+ var b bytes.Buffer
+ w := NewWriter(&b)
+ {
+ part, err := w.CreateFormFile("myfile", "my-file.txt")
+ if err != nil {
+ t.Fatalf("CreateFormFile: %v", err)
+ }
+ part.Write(fileContents)
+ err = w.WriteField("key", "val")
+ if err != nil {
+ t.Fatalf("CreateFormFieldValue: %v", err)
+ }
+ part.Write([]byte("val"))
+ err = w.Close()
+ if err != nil {
+ t.Fatalf("Close: %v", err)
+ }
+ }
+
+ r := NewReader(&b, w.Boundary)
+
+ part, err := r.NextPart()
+ if err != nil {
+ t.Fatalf("part 1: %v", err)
+ }
+ if g, e := part.FormName(), "myfile"; g != e {
+ t.Errorf("part 1: want form name %q, got %q", e, g)
+ }
+ slurp, err := ioutil.ReadAll(part)
+ if err != nil {
+ t.Fatalf("part 1: ReadAll: %v", err)
+ }
+ if e, g := string(fileContents), string(slurp); e != g {
+ t.Errorf("part 1: want contents %q, got %q", e, g)
+ }
+
+ part, err = r.NextPart()
+ if err != nil {
+ t.Fatalf("part 2: %v", err)
+ }
+ if g, e := part.FormName(), "key"; g != e {
+ t.Errorf("part 2: want form name %q, got %q", e, g)
+ }
+ slurp, err = ioutil.ReadAll(part)
+ if err != nil {
+ t.Fatalf("part 2: ReadAll: %v", err)
+ }
+ if e, g := "val", string(slurp); e != g {
+ t.Errorf("part 2: want contents %q, got %q", e, g)
+ }
+
+ part, err = r.NextPart()
+ if part != nil || err == nil {
+ t.Fatalf("expected end of parts; got %v, %v", part, err)
+ }
+}
diff --git a/src/pkg/net/Makefile b/src/pkg/net/Makefile
index 221871cb1..376e9c6dc 100644
--- a/src/pkg/net/Makefile
+++ b/src/pkg/net/Makefile
@@ -29,6 +29,7 @@ GOFILES_freebsd=\
dnsconfig.go\
dnsclient.go\
port.go\
+ sock_bsd.go\
CGOFILES_freebsd=\
cgo_bsd.go\
@@ -41,6 +42,7 @@ GOFILES_darwin=\
dnsconfig.go\
dnsclient.go\
port.go\
+ sock_bsd.go\
CGOFILES_darwin=\
cgo_bsd.go\
@@ -53,6 +55,7 @@ GOFILES_linux=\
dnsconfig.go\
dnsclient.go\
port.go\
+ sock_linux.go\
ifeq ($(GOARCH),arm)
# ARM has no cgo, so use the stubs.
@@ -67,6 +70,7 @@ GOFILES_windows=\
cgo_stub.go\
resolv_windows.go\
file_windows.go\
+ sock_windows.go\
GOFILES+=$(GOFILES_$(GOOS))
ifneq ($(CGOFILES_$(GOOS)),)
diff --git a/src/pkg/net/dial.go b/src/pkg/net/dial.go
index 16896b426..ead775fe6 100644
--- a/src/pkg/net/dial.go
+++ b/src/pkg/net/dial.go
@@ -60,7 +60,7 @@ func Dial(net, addr string) (c Conn, err os.Error) {
return c, nil
case "ip", "ip4", "ip6":
var ra *IPAddr
- if ra, err = ResolveIPAddr(raddr); err != nil {
+ if ra, err = ResolveIPAddr(net, raddr); err != nil {
goto Error
}
c, err := DialIP(net, nil, ra)
@@ -139,12 +139,13 @@ func ListenPacket(net, laddr string) (c PacketConn, err os.Error) {
return c, nil
}
- if i := last(net, ':'); i > 0 {
- switch net[0:i] {
+ var rawnet string
+ if rawnet, _, err = splitNetProto(net); err != nil {
+ switch rawnet {
case "ip", "ip4", "ip6":
var la *IPAddr
if laddr != "" {
- if la, err = ResolveIPAddr(laddr); err != nil {
+ if la, err = ResolveIPAddr(rawnet, laddr); err != nil {
return nil, err
}
}
diff --git a/src/pkg/net/dialgoogle_test.go b/src/pkg/net/dialgoogle_test.go
index c25089ba4..9ad1770da 100644
--- a/src/pkg/net/dialgoogle_test.go
+++ b/src/pkg/net/dialgoogle_test.go
@@ -41,20 +41,6 @@ func doDial(t *testing.T, network, addr string) {
fd.Close()
}
-var googleaddrs = []string{
- "%d.%d.%d.%d:80",
- "www.google.com:80",
- "%d.%d.%d.%d:http",
- "www.google.com:http",
- "%03d.%03d.%03d.%03d:0080",
- "[::ffff:%d.%d.%d.%d]:80",
- "[::ffff:%02x%02x:%02x%02x]:80",
- "[0:0:0:0:0000:ffff:%d.%d.%d.%d]:80",
- "[0:0:0:0:000000:ffff:%d.%d.%d.%d]:80",
- "[0:0:0:0:0:ffff::%d.%d.%d.%d]:80",
- "[2001:4860:0:2001::68]:80", // ipv6.google.com; removed if ipv6 flag not set
-}
-
func TestLookupCNAME(t *testing.T) {
if testing.Short() {
// Don't use external network.
@@ -67,16 +53,25 @@ func TestLookupCNAME(t *testing.T) {
}
}
-func TestDialGoogle(t *testing.T) {
+var googleaddrsipv4 = []string{
+ "%d.%d.%d.%d:80",
+ "www.google.com:80",
+ "%d.%d.%d.%d:http",
+ "www.google.com:http",
+ "%03d.%03d.%03d.%03d:0080",
+ "[::ffff:%d.%d.%d.%d]:80",
+ "[::ffff:%02x%02x:%02x%02x]:80",
+ "[0:0:0:0:0000:ffff:%d.%d.%d.%d]:80",
+ "[0:0:0:0:000000:ffff:%d.%d.%d.%d]:80",
+ "[0:0:0:0:0:ffff::%d.%d.%d.%d]:80",
+}
+
+func TestDialGoogleIPv4(t *testing.T) {
if testing.Short() {
// Don't use external network.
t.Logf("skipping external network test during -short")
return
}
- // If no ipv6 tunnel, don't try the last address.
- if !*ipv6 {
- googleaddrs[len(googleaddrs)-1] = ""
- }
// Insert an actual IPv4 address for google.com
// into the table.
@@ -95,14 +90,14 @@ func TestDialGoogle(t *testing.T) {
t.Fatalf("no IPv4 addresses for www.google.com")
}
- for i, s := range googleaddrs {
+ for i, s := range googleaddrsipv4 {
if strings.Contains(s, "%") {
- googleaddrs[i] = fmt.Sprintf(s, ip[0], ip[1], ip[2], ip[3])
+ googleaddrsipv4[i] = fmt.Sprintf(s, ip[0], ip[1], ip[2], ip[3])
}
}
- for i := 0; i < len(googleaddrs); i++ {
- addr := googleaddrs[i]
+ for i := 0; i < len(googleaddrsipv4); i++ {
+ addr := googleaddrsipv4[i]
if addr == "" {
continue
}
@@ -110,20 +105,65 @@ func TestDialGoogle(t *testing.T) {
doDial(t, "tcp", addr)
if addr[0] != '[' {
doDial(t, "tcp4", addr)
-
- if !preferIPv4 {
- // make sure preferIPv4 flag works.
- preferIPv4 = true
+ if supportsIPv6 {
+ // make sure syscall.SocketDisableIPv6 flag works.
syscall.SocketDisableIPv6 = true
+ doDial(t, "tcp", addr)
doDial(t, "tcp4", addr)
syscall.SocketDisableIPv6 = false
- preferIPv4 = false
}
}
+ }
+}
+
+var googleaddrsipv6 = []string{
+ "[%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x]:80",
+ "ipv6.google.com:80",
+ "[%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x]:http",
+ "ipv6.google.com:http",
+}
+
+func TestDialGoogleIPv6(t *testing.T) {
+ if testing.Short() {
+ // Don't use external network.
+ t.Logf("skipping external network test during -short")
+ return
+ }
+ // Only run tcp6 if the kernel will take it.
+ if !*ipv6 || !supportsIPv6 {
+ return
+ }
+
+ // Insert an actual IPv6 address for ipv6.google.com
+ // into the table.
+ addrs, err := LookupIP("ipv6.google.com")
+ if err != nil {
+ t.Fatalf("lookup ipv6.google.com: %v", err)
+ }
+ var ip IP
+ for _, addr := range addrs {
+ if x := addr.To16(); x != nil {
+ ip = x
+ break
+ }
+ }
+ if ip == nil {
+ t.Fatalf("no IPv6 addresses for ipv6.google.com")
+ }
+
+ for i, s := range googleaddrsipv6 {
+ if strings.Contains(s, "%") {
+ googleaddrsipv6[i] = fmt.Sprintf(s, ip[0], ip[1], ip[2], ip[3], ip[4], ip[5], ip[6], ip[7], ip[8], ip[9], ip[10], ip[11], ip[12], ip[13], ip[14], ip[15])
+ }
+ }
- // Only run tcp6 if the kernel will take it.
- if kernelSupportsIPv6() {
- doDial(t, "tcp6", addr)
+ for i := 0; i < len(googleaddrsipv6); i++ {
+ addr := googleaddrsipv6[i]
+ if addr == "" {
+ continue
}
+ t.Logf("-- %s --", addr)
+ doDial(t, "tcp", addr)
+ doDial(t, "tcp6", addr)
}
}
diff --git a/src/pkg/net/dnsclient.go b/src/pkg/net/dnsclient.go
index 89f2409bf..ae9ca8430 100644
--- a/src/pkg/net/dnsclient.go
+++ b/src/pkg/net/dnsclient.go
@@ -121,15 +121,19 @@ func answer(name, server string, dns *dnsMsg, qtype uint16) (cname string, addrs
Cname:
for cnameloop := 0; cnameloop < 10; cnameloop++ {
addrs = addrs[0:0]
- for i := 0; i < len(dns.answer); i++ {
- rr := dns.answer[i]
+ for _, rr := range dns.answer {
+ if _, justHeader := rr.(*dnsRR_Header); justHeader {
+ // Corrupt record: we only have a
+ // header. That header might say it's
+ // of type qtype, but we don't
+ // actually have it. Skip.
+ continue
+ }
h := rr.Header()
if h.Class == dnsClassINET && h.Name == name {
switch h.Rrtype {
case qtype:
- n := len(addrs)
- addrs = addrs[0 : n+1]
- addrs[n] = rr
+ addrs = append(addrs, rr)
case dnsTypeCNAME:
// redirect to cname
name = rr.(*dnsRR_CNAME).Cname
@@ -181,8 +185,7 @@ func tryOneName(cfg *dnsConfig, name string, qtype uint16) (cname string, addrs
func convertRR_A(records []dnsRR) []IP {
addrs := make([]IP, len(records))
- for i := 0; i < len(records); i++ {
- rr := records[i]
+ for i, rr := range records {
a := rr.(*dnsRR_A).A
addrs[i] = IPv4(byte(a>>24), byte(a>>16), byte(a>>8), byte(a))
}
@@ -191,8 +194,7 @@ func convertRR_A(records []dnsRR) []IP {
func convertRR_AAAA(records []dnsRR) []IP {
addrs := make([]IP, len(records))
- for i := 0; i < len(records); i++ {
- rr := records[i]
+ for i, rr := range records {
a := make(IP, 16)
copy(a, rr.(*dnsRR_AAAA).AAAA[:])
addrs[i] = a
@@ -384,9 +386,7 @@ func goLookupCNAME(name string) (cname string, err os.Error) {
if err != nil {
return
}
- if len(rr) >= 0 {
- cname = rr[0].(*dnsRR_CNAME).Cname
- }
+ cname = rr[0].(*dnsRR_CNAME).Cname
return
}
@@ -398,10 +398,49 @@ type SRV struct {
Weight uint16
}
+// byPriorityWeight sorts SRV records by ascending priority and weight.
+type byPriorityWeight []*SRV
+
+func (s byPriorityWeight) Len() int { return len(s) }
+
+func (s byPriorityWeight) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+func (s byPriorityWeight) Less(i, j int) bool {
+ return s[i].Priority < s[j].Priority ||
+ (s[i].Priority == s[j].Priority && s[i].Weight < s[j].Weight)
+}
+
+// shuffleSRVByWeight shuffles SRV records by weight using the algorithm
+// described in RFC 2782.
+func shuffleSRVByWeight(addrs []*SRV) {
+ sum := 0
+ for _, addr := range addrs {
+ sum += int(addr.Weight)
+ }
+ for sum > 0 && len(addrs) > 1 {
+ s := 0
+ n := rand.Intn(sum + 1)
+ for i := range addrs {
+ s += int(addrs[i].Weight)
+ if s >= n {
+ if i > 0 {
+ t := addrs[i]
+ copy(addrs[1:i+1], addrs[0:i])
+ addrs[0] = t
+ }
+ break
+ }
+ }
+ sum -= int(addrs[0].Weight)
+ addrs = addrs[1:]
+ }
+}
+
// LookupSRV tries to resolve an SRV query of the given service,
// protocol, and domain name, as specified in RFC 2782. In most cases
// the proto argument can be the same as the corresponding
-// Addr.Network().
+// Addr.Network(). The returned records are sorted by priority
+// and randomized by weight within a priority.
func LookupSRV(service, proto, name string) (cname string, addrs []*SRV, err os.Error) {
target := "_" + service + "._" + proto + "." + name
var records []dnsRR
@@ -410,10 +449,19 @@ func LookupSRV(service, proto, name string) (cname string, addrs []*SRV, err os.
return
}
addrs = make([]*SRV, len(records))
- for i := 0; i < len(records); i++ {
- r := records[i].(*dnsRR_SRV)
+ for i, rr := range records {
+ r := rr.(*dnsRR_SRV)
addrs[i] = &SRV{r.Target, r.Port, r.Priority, r.Weight}
}
+ sort.Sort(byPriorityWeight(addrs))
+ i := 0
+ for j := 1; j < len(addrs); j++ {
+ if addrs[i].Priority != addrs[j].Priority {
+ shuffleSRVByWeight(addrs[i:j])
+ i = j
+ }
+ }
+ shuffleSRVByWeight(addrs[i:len(addrs)])
return
}
diff --git a/src/pkg/net/dnsconfig.go b/src/pkg/net/dnsconfig.go
index 26f0e04e9..54e334342 100644
--- a/src/pkg/net/dnsconfig.go
+++ b/src/pkg/net/dnsconfig.go
@@ -30,7 +30,6 @@ func (e *DNSConfigError) String() string {
func (e *DNSConfigError) Timeout() bool { return false }
func (e *DNSConfigError) Temporary() bool { return false }
-
// See resolv.conf(5) on a Linux machine.
// TODO(rsc): Supposed to call uname() and chop the beginning
// of the host name to get the default search domain.
diff --git a/src/pkg/net/dnsmsg.go b/src/pkg/net/dnsmsg.go
index 7b8e5c6d3..0ba69a0ce 100644
--- a/src/pkg/net/dnsmsg.go
+++ b/src/pkg/net/dnsmsg.go
@@ -117,7 +117,6 @@ type dnsRR interface {
Header() *dnsRR_Header
}
-
// Specific DNS RR formats for each query type.
type dnsRR_CNAME struct {
@@ -715,24 +714,35 @@ func (dns *dnsMsg) Unpack(msg []byte) bool {
// Arrays.
dns.question = make([]dnsQuestion, dh.Qdcount)
- dns.answer = make([]dnsRR, dh.Ancount)
- dns.ns = make([]dnsRR, dh.Nscount)
- dns.extra = make([]dnsRR, dh.Arcount)
+ dns.answer = make([]dnsRR, 0, dh.Ancount)
+ dns.ns = make([]dnsRR, 0, dh.Nscount)
+ dns.extra = make([]dnsRR, 0, dh.Arcount)
+
+ var rec dnsRR
for i := 0; i < len(dns.question); i++ {
off, ok = unpackStruct(&dns.question[i], msg, off)
}
- for i := 0; i < len(dns.answer); i++ {
- dns.answer[i], off, ok = unpackRR(msg, off)
- }
- for i := 0; i < len(dns.ns); i++ {
- dns.ns[i], off, ok = unpackRR(msg, off)
+ for i := 0; i < int(dh.Ancount); i++ {
+ rec, off, ok = unpackRR(msg, off)
+ if !ok {
+ return false
+ }
+ dns.answer = append(dns.answer, rec)
}
- for i := 0; i < len(dns.extra); i++ {
- dns.extra[i], off, ok = unpackRR(msg, off)
+ for i := 0; i < int(dh.Nscount); i++ {
+ rec, off, ok = unpackRR(msg, off)
+ if !ok {
+ return false
+ }
+ dns.ns = append(dns.ns, rec)
}
- if !ok {
- return false
+ for i := 0; i < int(dh.Arcount); i++ {
+ rec, off, ok = unpackRR(msg, off)
+ if !ok {
+ return false
+ }
+ dns.extra = append(dns.extra, rec)
}
// if off != len(msg) {
// println("extra bytes in dns packet", off, "<", len(msg));
diff --git a/src/pkg/net/dnsmsg_test.go b/src/pkg/net/dnsmsg_test.go
new file mode 100644
index 000000000..20c9f02b0
--- /dev/null
+++ b/src/pkg/net/dnsmsg_test.go
@@ -0,0 +1,107 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package net
+
+import (
+ "encoding/hex"
+ "runtime"
+ "testing"
+)
+
+func TestDNSParseSRVReply(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ return
+ }
+ data, err := hex.DecodeString(dnsSRVReply)
+ if err != nil {
+ t.Fatal(err)
+ }
+ msg := new(dnsMsg)
+ ok := msg.Unpack(data)
+ if !ok {
+ t.Fatalf("unpacking packet failed")
+ }
+ if g, e := len(msg.answer), 5; g != e {
+ t.Errorf("len(msg.answer) = %d; want %d", g, e)
+ }
+ for idx, rr := range msg.answer {
+ if g, e := rr.Header().Rrtype, uint16(dnsTypeSRV); g != e {
+ t.Errorf("rr[%d].Header().Rrtype = %d; want %d", idx, g, e)
+ }
+ if _, ok := rr.(*dnsRR_SRV); !ok {
+ t.Errorf("answer[%d] = %T; want *dnsRR_SRV", idx, rr)
+ }
+ }
+ _, addrs, err := answer("_xmpp-server._tcp.google.com.", "foo:53", msg, uint16(dnsTypeSRV))
+ if err != nil {
+ t.Fatalf("answer: %v", err)
+ }
+ if g, e := len(addrs), 5; g != e {
+ t.Errorf("len(addrs) = %d; want %d", g, e)
+ t.Logf("addrs = %#v", addrs)
+ }
+}
+
+func TestDNSParseCorruptSRVReply(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ return
+ }
+ data, err := hex.DecodeString(dnsSRVCorruptReply)
+ if err != nil {
+ t.Fatal(err)
+ }
+ msg := new(dnsMsg)
+ ok := msg.Unpack(data)
+ if !ok {
+ t.Fatalf("unpacking packet failed")
+ }
+ if g, e := len(msg.answer), 5; g != e {
+ t.Errorf("len(msg.answer) = %d; want %d", g, e)
+ }
+ for idx, rr := range msg.answer {
+ if g, e := rr.Header().Rrtype, uint16(dnsTypeSRV); g != e {
+ t.Errorf("rr[%d].Header().Rrtype = %d; want %d", idx, g, e)
+ }
+ if idx == 4 {
+ if _, ok := rr.(*dnsRR_Header); !ok {
+ t.Errorf("answer[%d] = %T; want *dnsRR_Header", idx, rr)
+ }
+ } else {
+ if _, ok := rr.(*dnsRR_SRV); !ok {
+ t.Errorf("answer[%d] = %T; want *dnsRR_SRV", idx, rr)
+ }
+ }
+ }
+ _, addrs, err := answer("_xmpp-server._tcp.google.com.", "foo:53", msg, uint16(dnsTypeSRV))
+ if err != nil {
+ t.Fatalf("answer: %v", err)
+ }
+ if g, e := len(addrs), 4; g != e {
+ t.Errorf("len(addrs) = %d; want %d", g, e)
+ t.Logf("addrs = %#v", addrs)
+ }
+}
+
+// Valid DNS SRV reply
+const dnsSRVReply = "0901818000010005000000000c5f786d70702d736572766572045f74637006676f6f67" +
+ "6c6503636f6d0000210001c00c002100010000012c00210014000014950c786d70702d" +
+ "73657276657234016c06676f6f676c6503636f6d00c00c002100010000012c00210014" +
+ "000014950c786d70702d73657276657232016c06676f6f676c6503636f6d00c00c0021" +
+ "00010000012c00210014000014950c786d70702d73657276657233016c06676f6f676c" +
+ "6503636f6d00c00c002100010000012c00200005000014950b786d70702d7365727665" +
+ "72016c06676f6f676c6503636f6d00c00c002100010000012c00210014000014950c78" +
+ "6d70702d73657276657231016c06676f6f676c6503636f6d00"
+
+// Corrupt DNS SRV reply, with its final RR having a bogus length
+// (perhaps it was truncated, or it's malicious) The mutation is the
+// capital "FF" below, instead of the proper "21".
+const dnsSRVCorruptReply = "0901818000010005000000000c5f786d70702d736572766572045f74637006676f6f67" +
+ "6c6503636f6d0000210001c00c002100010000012c00210014000014950c786d70702d" +
+ "73657276657234016c06676f6f676c6503636f6d00c00c002100010000012c00210014" +
+ "000014950c786d70702d73657276657232016c06676f6f676c6503636f6d00c00c0021" +
+ "00010000012c00210014000014950c786d70702d73657276657233016c06676f6f676c" +
+ "6503636f6d00c00c002100010000012c00200005000014950b786d70702d7365727665" +
+ "72016c06676f6f676c6503636f6d00c00c002100010000012c00FF0014000014950c78" +
+ "6d70702d73657276657231016c06676f6f676c6503636f6d00"
diff --git a/src/pkg/net/file_test.go b/src/pkg/net/file_test.go
index 1ec05fdee..bd1e2c9d7 100644
--- a/src/pkg/net/file_test.go
+++ b/src/pkg/net/file_test.go
@@ -62,7 +62,7 @@ func TestFileListener(t *testing.T) {
}
testFileListener(t, "tcp", "127.0.0.1")
testFileListener(t, "tcp", "127.0.0.1")
- if kernelSupportsIPv6() {
+ if supportsIPv6 && supportsIPv4map {
testFileListener(t, "tcp", "[::ffff:127.0.0.1]")
testFileListener(t, "tcp", "127.0.0.1")
testFileListener(t, "tcp", "[::ffff:127.0.0.1]")
@@ -121,8 +121,10 @@ func TestFilePacketConn(t *testing.T) {
}
testFilePacketConnListen(t, "udp", "127.0.0.1:0")
testFilePacketConnDial(t, "udp", "127.0.0.1:12345")
- if kernelSupportsIPv6() {
+ if supportsIPv6 {
testFilePacketConnListen(t, "udp", "[::1]:0")
+ }
+ if supportsIPv6 && supportsIPv4map {
testFilePacketConnDial(t, "udp", "[::ffff:127.0.0.1]:12345")
}
if syscall.OS == "linux" {
diff --git a/src/pkg/net/ip.go b/src/pkg/net/ip.go
index 61b2c687e..a3000af8a 100644
--- a/src/pkg/net/ip.go
+++ b/src/pkg/net/ip.go
@@ -75,10 +75,71 @@ var (
// Well-known IPv6 addresses
var (
- IPzero = make(IP, IPv6len) // all zeros
- IPv6loopback = IP([]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1})
+ IPv6zero = IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ IPv6unspecified = IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ IPv6loopback = IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}
+ IPv6interfacelocalallnodes = IP{0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01}
+ IPv6linklocalallnodes = IP{0xff, 0x02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01}
+ IPv6linklocalallrouters = IP{0xff, 0x02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x02}
)
+// IsUnspecified returns true if ip is an unspecified address.
+func (ip IP) IsUnspecified() bool {
+ if ip.Equal(IPv4zero) || ip.Equal(IPv6unspecified) {
+ return true
+ }
+ return false
+}
+
+// IsLoopback returns true if ip is a loopback address.
+func (ip IP) IsLoopback() bool {
+ if ip4 := ip.To4(); ip4 != nil && ip4[0] == 127 {
+ return true
+ }
+ return ip.Equal(IPv6loopback)
+}
+
+// IsMulticast returns true if ip is a multicast address.
+func (ip IP) IsMulticast() bool {
+ if ip4 := ip.To4(); ip4 != nil && ip4[0]&0xf0 == 0xe0 {
+ return true
+ }
+ return ip[0] == 0xff
+}
+
+// IsInterfaceLinkLocalMulticast returns true if ip is
+// an interface-local multicast address.
+func (ip IP) IsInterfaceLocalMulticast() bool {
+ return len(ip) == IPv6len && ip[0] == 0xff && ip[1]&0x0f == 0x01
+}
+
+// IsLinkLinkLocalMulticast returns true if ip is a link-local
+// multicast address.
+func (ip IP) IsLinkLocalMulticast() bool {
+ if ip4 := ip.To4(); ip4 != nil && ip4[0] == 224 && ip4[1] == 0 && ip4[2] == 0 {
+ return true
+ }
+ return ip[0] == 0xff && ip[1]&0x0f == 0x02
+}
+
+// IsLinkLinkLocalUnicast returns true if ip is a link-local
+// unicast address.
+func (ip IP) IsLinkLocalUnicast() bool {
+ if ip4 := ip.To4(); ip4 != nil && ip4[0] == 169 && ip4[1] == 254 {
+ return true
+ }
+ return ip[0] == 0xfe && ip[1]&0xc0 == 0x80
+}
+
+// IsGlobalUnicast returns true if ip is a global unicast
+// address.
+func (ip IP) IsGlobalUnicast() bool {
+ return !ip.IsUnspecified() &&
+ !ip.IsLoopback() &&
+ !ip.IsMulticast() &&
+ !ip.IsLinkLocalUnicast()
+}
+
// Is p all zeros?
func isZeros(p IP) bool {
for i := 0; i < len(p); i++ {
diff --git a/src/pkg/net/ip_test.go b/src/pkg/net/ip_test.go
index 2008953ef..b189b10c4 100644
--- a/src/pkg/net/ip_test.go
+++ b/src/pkg/net/ip_test.go
@@ -9,6 +9,7 @@ import (
"reflect"
"testing"
"os"
+ "runtime"
)
func isEqual(a, b []byte) bool {
@@ -31,11 +32,7 @@ var parseiptests = []struct {
{"abc", nil},
{"123:", nil},
{"::ffff:127.0.0.1", IPv4(127, 0, 0, 1)},
- {"2001:4860:0:2001::68",
- IP{0x20, 0x01, 0x48, 0x60, 0, 0, 0x20, 0x01,
- 0, 0, 0, 0, 0, 0, 0x00, 0x68,
- },
- },
+ {"2001:4860:0:2001::68", IP{0x20, 0x01, 0x48, 0x60, 0, 0, 0x20, 0x01, 0, 0, 0, 0, 0, 0, 0x00, 0x68}},
{"::ffff:4a7d:1363", IPv4(74, 125, 19, 99)},
}
@@ -52,29 +49,21 @@ var ipstringtests = []struct {
out string
}{
// cf. RFC 5952 (A Recommendation for IPv6 Address Text Representation)
- {IP{0x20, 0x1, 0xd, 0xb8, 0, 0, 0, 0,
- 0, 0, 0x1, 0x23, 0, 0x12, 0, 0x1},
+ {IP{0x20, 0x1, 0xd, 0xb8, 0, 0, 0, 0, 0, 0, 0x1, 0x23, 0, 0x12, 0, 0x1},
"2001:db8::123:12:1"},
- {IP{0x20, 0x1, 0xd, 0xb8, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0x1},
+ {IP{0x20, 0x1, 0xd, 0xb8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x1},
"2001:db8::1"},
- {IP{0x20, 0x1, 0xd, 0xb8, 0, 0, 0, 0x1,
- 0, 0, 0, 0x1, 0, 0, 0, 0x1},
+ {IP{0x20, 0x1, 0xd, 0xb8, 0, 0, 0, 0x1, 0, 0, 0, 0x1, 0, 0, 0, 0x1},
"2001:db8:0:1:0:1:0:1"},
- {IP{0x20, 0x1, 0xd, 0xb8, 0, 0x1, 0, 0,
- 0, 0x1, 0, 0, 0, 0x1, 0, 0},
+ {IP{0x20, 0x1, 0xd, 0xb8, 0, 0x1, 0, 0, 0, 0x1, 0, 0, 0, 0x1, 0, 0},
"2001:db8:1:0:1:0:1:0"},
- {IP{0x20, 0x1, 0, 0, 0, 0, 0, 0,
- 0, 0x1, 0, 0, 0, 0, 0, 0x1},
+ {IP{0x20, 0x1, 0, 0, 0, 0, 0, 0, 0, 0x1, 0, 0, 0, 0, 0, 0x1},
"2001::1:0:0:1"},
- {IP{0x20, 0x1, 0xd, 0xb8, 0, 0, 0, 0,
- 0, 0x1, 0, 0, 0, 0, 0, 0},
+ {IP{0x20, 0x1, 0xd, 0xb8, 0, 0, 0, 0, 0, 0x1, 0, 0, 0, 0, 0, 0},
"2001:db8:0:0:1::"},
- {IP{0x20, 0x1, 0xd, 0xb8, 0, 0, 0, 0,
- 0, 0x1, 0, 0, 0, 0, 0, 0x1},
+ {IP{0x20, 0x1, 0xd, 0xb8, 0, 0, 0, 0, 0, 0x1, 0, 0, 0, 0, 0, 0x1},
"2001:db8::1:0:0:1"},
- {IP{0x20, 0x1, 0xD, 0xB8, 0, 0, 0, 0,
- 0, 0xA, 0, 0xB, 0, 0xC, 0, 0xD},
+ {IP{0x20, 0x1, 0xD, 0xB8, 0, 0, 0, 0, 0, 0xA, 0, 0xB, 0, 0xC, 0, 0xD},
"2001:db8::a:b:c:d"},
}
@@ -143,3 +132,84 @@ func TestJoinHostPort(t *testing.T) {
}
}
}
+
+var ipaftests = []struct {
+ in IP
+ af4 bool
+ af6 bool
+}{
+ {IPv4bcast, true, false},
+ {IPv4allsys, true, false},
+ {IPv4allrouter, true, false},
+ {IPv4zero, true, false},
+ {IPv4(224, 0, 0, 1), true, false},
+ {IPv4(127, 0, 0, 1), true, false},
+ {IPv4(240, 0, 0, 1), true, false},
+ {IPv6unspecified, false, true},
+ {IPv6loopback, false, true},
+ {IPv6interfacelocalallnodes, false, true},
+ {IPv6linklocalallnodes, false, true},
+ {IPv6linklocalallrouters, false, true},
+ {ParseIP("ff05::a:b:c:d"), false, true},
+ {ParseIP("fe80::1:2:3:4"), false, true},
+ {ParseIP("2001:db8::123:12:1"), false, true},
+}
+
+func TestIPAddrFamily(t *testing.T) {
+ for _, tt := range ipaftests {
+ if af := tt.in.To4() != nil; af != tt.af4 {
+ t.Errorf("verifying IPv4 address family for %#q = %v, want %v", tt.in, af, tt.af4)
+ }
+ if af := len(tt.in) == IPv6len && tt.in.To4() == nil; af != tt.af6 {
+ t.Errorf("verifying IPv6 address family for %#q = %v, want %v", tt.in, af, tt.af6)
+ }
+ }
+}
+
+var ipscopetests = []struct {
+ scope func(IP) bool
+ in IP
+ ok bool
+}{
+ {IP.IsUnspecified, IPv4zero, true},
+ {IP.IsUnspecified, IPv4(127, 0, 0, 1), false},
+ {IP.IsUnspecified, IPv6unspecified, true},
+ {IP.IsUnspecified, IPv6interfacelocalallnodes, false},
+ {IP.IsLoopback, IPv4(127, 0, 0, 1), true},
+ {IP.IsLoopback, IPv4(127, 255, 255, 254), true},
+ {IP.IsLoopback, IPv4(128, 1, 2, 3), false},
+ {IP.IsLoopback, IPv6loopback, true},
+ {IP.IsLoopback, IPv6linklocalallrouters, false},
+ {IP.IsMulticast, IPv4(224, 0, 0, 0), true},
+ {IP.IsMulticast, IPv4(239, 0, 0, 0), true},
+ {IP.IsMulticast, IPv4(240, 0, 0, 0), false},
+ {IP.IsMulticast, IPv6linklocalallnodes, true},
+ {IP.IsMulticast, IP{0xff, 0x05, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, true},
+ {IP.IsMulticast, IP{0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, false},
+ {IP.IsLinkLocalMulticast, IPv4(224, 0, 0, 0), true},
+ {IP.IsLinkLocalMulticast, IPv4(239, 0, 0, 0), false},
+ {IP.IsLinkLocalMulticast, IPv6linklocalallrouters, true},
+ {IP.IsLinkLocalMulticast, IP{0xff, 0x05, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, false},
+ {IP.IsLinkLocalUnicast, IPv4(169, 254, 0, 0), true},
+ {IP.IsLinkLocalUnicast, IPv4(169, 255, 0, 0), false},
+ {IP.IsLinkLocalUnicast, IP{0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, true},
+ {IP.IsLinkLocalUnicast, IP{0xfe, 0xc0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, false},
+ {IP.IsGlobalUnicast, IPv4(240, 0, 0, 0), true},
+ {IP.IsGlobalUnicast, IPv4(232, 0, 0, 0), false},
+ {IP.IsGlobalUnicast, IPv4(169, 254, 0, 0), false},
+ {IP.IsGlobalUnicast, IP{0x20, 0x1, 0xd, 0xb8, 0, 0, 0, 0, 0, 0, 0x1, 0x23, 0, 0x12, 0, 0x1}, true},
+ {IP.IsGlobalUnicast, IP{0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, false},
+ {IP.IsGlobalUnicast, IP{0xff, 0x05, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, false},
+}
+
+func name(f interface{}) string {
+ return runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name()
+}
+
+func TestIPAddrScope(t *testing.T) {
+ for _, tt := range ipscopetests {
+ if ok := tt.scope(tt.in); ok != tt.ok {
+ t.Errorf("%s(%#q) = %v, want %v", name(tt.scope), tt.in, ok, tt.ok)
+ }
+ }
+}
diff --git a/src/pkg/net/ipraw_test.go b/src/pkg/net/ipraw_test.go
index ee8c71fc1..7cc9604b5 100644
--- a/src/pkg/net/ipraw_test.go
+++ b/src/pkg/net/ipraw_test.go
@@ -60,7 +60,8 @@ func parsePingReply(p []byte) (id, seq int) {
}
var srchost = flag.String("srchost", "", "Source of the ICMP ECHO request")
-var dsthost = flag.String("dsthost", "localhost", "Destination for the ICMP ECHO request")
+// 127.0.0.1 because this is an IPv4-specific test.
+var dsthost = flag.String("dsthost", "127.0.0.1", "Destination for the ICMP ECHO request")
// test (raw) IP socket using ICMP
func TestICMP(t *testing.T) {
@@ -74,15 +75,15 @@ func TestICMP(t *testing.T) {
err os.Error
)
if *srchost != "" {
- laddr, err = ResolveIPAddr(*srchost)
+ laddr, err = ResolveIPAddr("ip4", *srchost)
if err != nil {
- t.Fatalf(`net.ResolveIPAddr("%v") = %v, %v`, *srchost, laddr, err)
+ t.Fatalf(`net.ResolveIPAddr("ip4", %v") = %v, %v`, *srchost, laddr, err)
}
}
- raddr, err := ResolveIPAddr(*dsthost)
+ raddr, err := ResolveIPAddr("ip4", *dsthost)
if err != nil {
- t.Fatalf(`net.ResolveIPAddr("%v") = %v, %v`, *dsthost, raddr, err)
+ t.Fatalf(`net.ResolveIPAddr("ip4", %v") = %v, %v`, *dsthost, raddr, err)
}
c, err := ListenIP("ip4:icmp", laddr)
diff --git a/src/pkg/net/iprawsock.go b/src/pkg/net/iprawsock.go
index 5be6fe4e0..a811027b1 100644
--- a/src/pkg/net/iprawsock.go
+++ b/src/pkg/net/iprawsock.go
@@ -43,7 +43,7 @@ func (a *IPAddr) family() int {
if a == nil || len(a.IP) <= 4 {
return syscall.AF_INET
}
- if ip := a.IP.To4(); ip != nil {
+ if a.IP.To4() != nil {
return syscall.AF_INET
}
return syscall.AF_INET6
@@ -61,10 +61,11 @@ func (a *IPAddr) toAddr() sockaddr {
}
// ResolveIPAddr parses addr as a IP address and resolves domain
-// names to numeric addresses. A literal IPv6 host address must be
+// names to numeric addresses on the network net, which must be
+// "ip", "ip4" or "ip6". A literal IPv6 host address must be
// enclosed in square brackets, as in "[::]".
-func ResolveIPAddr(addr string) (*IPAddr, os.Error) {
- ip, err := hostToIP(addr)
+func ResolveIPAddr(net, addr string) (*IPAddr, os.Error) {
+ ip, err := hostToIP(net, addr)
if err != nil {
return nil, err
}
@@ -234,32 +235,36 @@ func (c *IPConn) WriteTo(b []byte, addr Addr) (n int, err os.Error) {
}
// Convert "host" into IP address.
-func hostToIP(host string) (ip IP, err os.Error) {
+func hostToIP(net, host string) (ip IP, err os.Error) {
var addr IP
// Try as an IP address.
addr = ParseIP(host)
if addr == nil {
+ filter := anyaddr
+ if net != "" && net[len(net)-1] == '4' {
+ filter = ipv4only
+ }
+ if net != "" && net[len(net)-1] == '6' {
+ filter = ipv6only
+ }
// Not an IP address. Try as a DNS name.
addrs, err1 := LookupHost(host)
if err1 != nil {
err = err1
goto Error
}
- addr = firstSupportedAddr(anyaddr, addrs)
+ addr = firstFavoriteAddr(filter, addrs)
if addr == nil {
// should not happen
- err = &AddrError{"LookupHost returned invalid address", addrs[0]}
+ err = &AddrError{"LookupHost returned no suitable address", addrs[0]}
goto Error
}
}
-
return addr, nil
-
Error:
return nil, err
}
-
var protocols map[string]int
func readProtocols() {
@@ -285,7 +290,7 @@ func readProtocols() {
}
}
-func netProtoSplit(netProto string) (net string, proto int, err os.Error) {
+func splitNetProto(netProto string) (net string, proto int, err os.Error) {
onceReadProtocols.Do(readProtocols)
i := last(netProto, ':')
if i < 0 { // no colon
@@ -307,7 +312,7 @@ func netProtoSplit(netProto string) (net string, proto int, err os.Error) {
// DialIP connects to the remote address raddr on the network net,
// which must be "ip", "ip4", or "ip6".
func DialIP(netProto string, laddr, raddr *IPAddr) (c *IPConn, err os.Error) {
- net, proto, err := netProtoSplit(netProto)
+ net, proto, err := splitNetProto(netProto)
if err != nil {
return
}
@@ -331,7 +336,7 @@ func DialIP(netProto string, laddr, raddr *IPAddr) (c *IPConn, err os.Error) {
// and WriteTo methods can be used to receive and send IP
// packets with per-packet addressing.
func ListenIP(netProto string, laddr *IPAddr) (c *IPConn, err os.Error) {
- net, proto, err := netProtoSplit(netProto)
+ net, proto, err := splitNetProto(netProto)
if err != nil {
return
}
diff --git a/src/pkg/net/ipsock.go b/src/pkg/net/ipsock.go
index e8bcac646..b83284d36 100644
--- a/src/pkg/net/ipsock.go
+++ b/src/pkg/net/ipsock.go
@@ -15,25 +15,105 @@ import (
// only dealing with IPv4 sockets? As long as the host system
// understands IPv6, it's okay to pass IPv4 addresses to the IPv6
// interface. That simplifies our code and is most general.
-// Unfortunately, we need to run on kernels built without IPv6 support too.
-// So probe the kernel to figure it out.
-func kernelSupportsIPv6() bool {
- s, err := syscall.Socket(syscall.AF_INET6, syscall.SOCK_STREAM, syscall.IPPROTO_TCP)
- if err != 0 {
- return false
+// Unfortunately, we need to run on kernels built without IPv6
+// support too. So probe the kernel to figure it out.
+//
+// probeIPv6Stack probes both basic IPv6 capability and IPv6 IPv4-
+// mapping capability which is controlled by IPV6_V6ONLY socket
+// option and/or kernel state "net.inet6.ip6.v6only".
+// It returns two boolean values. If the first boolean value is
+// true, kernel supports basic IPv6 functionality. If the second
+// boolean value is true, kernel supports IPv6 IPv4-mapping.
+func probeIPv6Stack() (supportsIPv6, supportsIPv4map bool) {
+ var probes = []struct {
+ s int
+ la TCPAddr
+ ok bool
+ }{
+ // IPv6 communication capability
+ {-1, TCPAddr{IP: ParseIP("::1")}, false},
+ // IPv6 IPv4-mapped address communication capability
+ {-1, TCPAddr{IP: IPv4(127, 0, 0, 1)}, false},
}
- defer closesocket(s)
+ var errno int
- la := &TCPAddr{IP: IPv4(127, 0, 0, 1)}
- sa, oserr := la.toAddr().sockaddr(syscall.AF_INET6)
- if oserr != nil {
- return false
+ for i := range probes {
+ probes[i].s, errno = syscall.Socket(syscall.AF_INET6, syscall.SOCK_STREAM, syscall.IPPROTO_TCP)
+ if errno != 0 {
+ continue
+ }
+ defer closesocket(probes[i].s)
+ sa, err := probes[i].la.toAddr().sockaddr(syscall.AF_INET6)
+ if err != nil {
+ continue
+ }
+ errno = syscall.Bind(probes[i].s, sa)
+ if errno != 0 {
+ continue
+ }
+ probes[i].ok = true
}
- return syscall.Bind(s, sa) == 0
+ return probes[0].ok, probes[1].ok
}
-var preferIPv4 = !kernelSupportsIPv6()
+var supportsIPv6, supportsIPv4map = probeIPv6Stack()
+
+// favoriteAddrFamily returns the appropriate address family to
+// the given net, raddr, laddr and mode. At first it figures
+// address family out from the net. If mode indicates "listen"
+// and laddr.(type).IP is nil, it assuumes that the user wants to
+// make a passive connection with wildcard address family, both
+// INET and INET6, and wildcard address. Otherwise guess: if the
+// addresses are IPv4 then returns INET, or else returns INET6.
+func favoriteAddrFamily(net string, raddr, laddr sockaddr, mode string) int {
+ switch net[len(net)-1] {
+ case '4':
+ return syscall.AF_INET
+ case '6':
+ return syscall.AF_INET6
+ }
+
+ if mode == "listen" {
+ switch a := laddr.(type) {
+ case *TCPAddr:
+ if a.IP == nil && supportsIPv6 {
+ return syscall.AF_INET6
+ }
+ case *UDPAddr:
+ if a.IP == nil && supportsIPv6 {
+ return syscall.AF_INET6
+ }
+ case *IPAddr:
+ if a.IP == nil && supportsIPv6 {
+ return syscall.AF_INET6
+ }
+ }
+ }
+
+ if (laddr == nil || laddr.family() == syscall.AF_INET) &&
+ (raddr == nil || raddr.family() == syscall.AF_INET) {
+ return syscall.AF_INET
+ }
+ return syscall.AF_INET6
+}
+
+func firstFavoriteAddr(filter func(IP) IP, addrs []string) (addr IP) {
+ if filter == anyaddr {
+ // We'll take any IP address, but since the dialing code
+ // does not yet try multiple addresses, prefer to use
+ // an IPv4 address if possible. This is especially relevant
+ // if localhost resolves to [ipv6-localhost, ipv4-localhost].
+ // Too much code assumes localhost == ipv4-localhost.
+ addr = firstSupportedAddr(ipv4only, addrs)
+ if addr == nil {
+ addr = firstSupportedAddr(anyaddr, addrs)
+ }
+ } else {
+ addr = firstSupportedAddr(filter, addrs)
+ }
+ return
+}
func firstSupportedAddr(filter func(IP) IP, addrs []string) IP {
for _, s := range addrs {
@@ -44,19 +124,25 @@ func firstSupportedAddr(filter func(IP) IP, addrs []string) IP {
return nil
}
-func anyaddr(x IP) IP { return x }
+func anyaddr(x IP) IP {
+ if x4 := x.To4(); x4 != nil {
+ return x4
+ }
+ if supportsIPv6 {
+ return x
+ }
+ return nil
+}
+
func ipv4only(x IP) IP { return x.To4() }
func ipv6only(x IP) IP {
// Only return addresses that we can use
// with the kernel's IPv6 addressing modes.
- // If preferIPv4 is set, it means the IPv6 stack
- // cannot take IPv4 addresses directly (we prefer
- // to use the IPv4 stack) so reject IPv4 addresses.
- if x.To4() != nil && preferIPv4 {
- return nil
+ if len(x) == IPv6len && x.To4() == nil && supportsIPv6 {
+ return x
}
- return x
+ return nil
}
// TODO(rsc): if syscall.OS == "linux", we're supposd to read
@@ -75,26 +161,9 @@ type sockaddr interface {
}
func internetSocket(net string, laddr, raddr sockaddr, socktype, proto int, mode string, toAddr func(syscall.Sockaddr) Addr) (fd *netFD, err os.Error) {
- // Figure out IP version.
- // If network has a suffix like "tcp4", obey it.
var oserr os.Error
- family := syscall.AF_INET6
- switch net[len(net)-1] {
- case '4':
- family = syscall.AF_INET
- case '6':
- // nothing to do
- default:
- // Otherwise, guess.
- // If the addresses are IPv4 and we prefer IPv4, use 4; else 6.
- if preferIPv4 &&
- (laddr == nil || laddr.family() == syscall.AF_INET) &&
- (raddr == nil || raddr.family() == syscall.AF_INET) {
- family = syscall.AF_INET
- }
- }
-
var la, ra syscall.Sockaddr
+ family := favoriteAddrFamily(net, raddr, laddr, mode)
if laddr != nil {
if la, oserr = laddr.sockaddr(family); oserr != nil {
goto Error
@@ -119,25 +188,6 @@ Error:
return nil, &OpError{mode, net, addr, oserr}
}
-func getip(fd int, remote bool) (ip []byte, port int, ok bool) {
- // No attempt at error reporting because
- // there are no possible errors, and the
- // caller won't report them anyway.
- var sa syscall.Sockaddr
- if remote {
- sa, _ = syscall.Getpeername(fd)
- } else {
- sa, _ = syscall.Getsockname(fd)
- }
- switch sa := sa.(type) {
- case *syscall.SockaddrInet4:
- return sa.Addr[0:], sa.Port, true
- case *syscall.SockaddrInet6:
- return sa.Addr[0:], sa.Port, true
- }
- return
-}
-
type InvalidAddrError string
func (e InvalidAddrError) String() string { return string(e) }
@@ -161,13 +211,13 @@ func ipToSockaddr(family int, ip IP, port int) (syscall.Sockaddr, os.Error) {
return s, nil
case syscall.AF_INET6:
if len(ip) == 0 {
- ip = IPzero
+ ip = IPv6zero
}
// IPv4 callers use 0.0.0.0 to mean "announce on any available address".
// In IPv6 mode, Linux treats that as meaning "announce on 0.0.0.0",
// which it refuses to do. Rewrite to the IPv6 all zeros.
- if p4 := ip.To4(); p4 != nil && p4[0] == 0 && p4[1] == 0 && p4[2] == 0 && p4[3] == 0 {
- ip = IPzero
+ if ip.Equal(IPv4zero) {
+ ip = IPv6zero
}
if ip = ip.To16(); ip == nil {
return nil, InvalidAddrError("non-IPv6 address")
@@ -231,9 +281,10 @@ func hostPortToIP(net, hostport string) (ip IP, iport int, err os.Error) {
addr = ParseIP(host)
if addr == nil {
filter := anyaddr
- if len(net) >= 4 && net[3] == '4' {
+ if net != "" && net[len(net)-1] == '4' {
filter = ipv4only
- } else if len(net) >= 4 && net[3] == '6' {
+ }
+ if net != "" && net[len(net)-1] == '6' {
filter = ipv6only
}
// Not an IP address. Try as a DNS name.
@@ -242,22 +293,10 @@ func hostPortToIP(net, hostport string) (ip IP, iport int, err os.Error) {
err = err1
goto Error
}
- if filter == anyaddr {
- // We'll take any IP address, but since the dialing code
- // does not yet try multiple addresses, prefer to use
- // an IPv4 address if possible. This is especially relevant
- // if localhost resolves to [ipv6-localhost, ipv4-localhost].
- // Too much code assumes localhost == ipv4-localhost.
- addr = firstSupportedAddr(ipv4only, addrs)
- if addr == nil {
- addr = firstSupportedAddr(anyaddr, addrs)
- }
- } else {
- addr = firstSupportedAddr(filter, addrs)
- }
+ addr = firstFavoriteAddr(filter, addrs)
if addr == nil {
// should not happen
- err = &AddrError{"LookupHost returned invalid address", addrs[0]}
+ err = &AddrError{"LookupHost returned no suitable address", addrs[0]}
goto Error
}
}
diff --git a/src/pkg/net/resolv_windows.go b/src/pkg/net/resolv_windows.go
index 3506ea177..f7c3f51be 100644
--- a/src/pkg/net/resolv_windows.go
+++ b/src/pkg/net/resolv_windows.go
@@ -113,6 +113,10 @@ func reverseaddr(addr string) (arpa string, err os.Error) {
panic("unimplemented")
}
+func answer(name, server string, dns *dnsMsg, qtype uint16) (cname string, addrs []dnsRR, err os.Error) {
+ panic("unimplemented")
+}
+
// DNSError represents a DNS lookup error.
type DNSError struct {
Error string // description of the error
diff --git a/src/pkg/net/server_test.go b/src/pkg/net/server_test.go
index 075748b83..107de3e1c 100644
--- a/src/pkg/net/server_test.go
+++ b/src/pkg/net/server_test.go
@@ -92,15 +92,19 @@ func connect(t *testing.T, network, addr string, isEmpty bool) {
}
func doTest(t *testing.T, network, listenaddr, dialaddr string) {
- t.Logf("Test %s %s %s\n", network, listenaddr, dialaddr)
+ if listenaddr == "" {
+ t.Logf("Test %s %s %s\n", network, "<nil>", dialaddr)
+ } else {
+ t.Logf("Test %s %s %s\n", network, listenaddr, dialaddr)
+ }
listening := make(chan string)
done := make(chan int)
- if network == "tcp" {
+ if network == "tcp" || network == "tcp4" || network == "tcp6" {
listenaddr += ":0" // any available port
}
go runServe(t, network, listenaddr, listening, done)
addr := <-listening // wait for server to start
- if network == "tcp" {
+ if network == "tcp" || network == "tcp4" || network == "tcp6" {
dialaddr += addr[strings.LastIndex(addr, ":"):]
}
connect(t, network, dialaddr, false)
@@ -108,10 +112,33 @@ func doTest(t *testing.T, network, listenaddr, dialaddr string) {
}
func TestTCPServer(t *testing.T) {
+ doTest(t, "tcp", "", "127.0.0.1")
+ doTest(t, "tcp", "0.0.0.0", "127.0.0.1")
doTest(t, "tcp", "127.0.0.1", "127.0.0.1")
- if kernelSupportsIPv6() {
+ doTest(t, "tcp4", "", "127.0.0.1")
+ doTest(t, "tcp4", "0.0.0.0", "127.0.0.1")
+ doTest(t, "tcp4", "127.0.0.1", "127.0.0.1")
+ if supportsIPv6 {
+ doTest(t, "tcp", "", "[::1]")
+ doTest(t, "tcp", "[::]", "[::1]")
doTest(t, "tcp", "[::1]", "[::1]")
+ doTest(t, "tcp6", "", "[::1]")
+ doTest(t, "tcp6", "[::]", "[::1]")
+ doTest(t, "tcp6", "[::1]", "[::1]")
+ }
+ if supportsIPv6 && supportsIPv4map {
+ doTest(t, "tcp", "[::ffff:0.0.0.0]", "127.0.0.1")
+ doTest(t, "tcp", "[::]", "127.0.0.1")
+ doTest(t, "tcp4", "[::ffff:0.0.0.0]", "127.0.0.1")
+ doTest(t, "tcp6", "", "127.0.0.1")
+ doTest(t, "tcp6", "[::ffff:0.0.0.0]", "127.0.0.1")
+ doTest(t, "tcp6", "[::]", "127.0.0.1")
doTest(t, "tcp", "127.0.0.1", "[::ffff:127.0.0.1]")
+ doTest(t, "tcp", "[::ffff:127.0.0.1]", "127.0.0.1")
+ doTest(t, "tcp4", "127.0.0.1", "[::ffff:127.0.0.1]")
+ doTest(t, "tcp4", "[::ffff:127.0.0.1]", "127.0.0.1")
+ doTest(t, "tcp6", "127.0.0.1", "[::ffff:127.0.0.1]")
+ doTest(t, "tcp6", "[::ffff:127.0.0.1]", "127.0.0.1")
}
}
@@ -186,7 +213,7 @@ func TestUDPServer(t *testing.T) {
for _, isEmpty := range []bool{false, true} {
doTestPacket(t, "udp", "0.0.0.0", "127.0.0.1", isEmpty)
doTestPacket(t, "udp", "", "127.0.0.1", isEmpty)
- if kernelSupportsIPv6() {
+ if supportsIPv6 && supportsIPv4map {
doTestPacket(t, "udp", "[::]", "[::ffff:127.0.0.1]", isEmpty)
doTestPacket(t, "udp", "[::]", "127.0.0.1", isEmpty)
doTestPacket(t, "udp", "0.0.0.0", "[::ffff:127.0.0.1]", isEmpty)
diff --git a/src/pkg/net/sock.go b/src/pkg/net/sock.go
index bd88f7ece..5c47e4f77 100644
--- a/src/pkg/net/sock.go
+++ b/src/pkg/net/sock.go
@@ -32,17 +32,7 @@ func socket(net string, f, p, t int, la, ra syscall.Sockaddr, toAddr func(syscal
syscall.CloseOnExec(s)
syscall.ForkLock.RUnlock()
- // Allow reuse of recently-used addresses.
- syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1)
-
- // Allow broadcast.
- syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_BROADCAST, 1)
-
- if f == syscall.AF_INET6 {
- // using ip, tcp, udp, etc.
- // allow both protocols even if the OS default is otherwise.
- syscall.SetsockoptInt(s, syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, 0)
- }
+ setKernelSpecificSockopt(s, f)
if la != nil {
e = syscall.Bind(s, la)
@@ -163,16 +153,3 @@ type UnknownSocketError struct {
func (e *UnknownSocketError) String() string {
return "unknown socket address type " + reflect.TypeOf(e.sa).String()
}
-
-func sockaddrToString(sa syscall.Sockaddr) (name string, err os.Error) {
- switch a := sa.(type) {
- case *syscall.SockaddrInet4:
- return JoinHostPort(IP(a.Addr[0:]).String(), itoa(a.Port)), nil
- case *syscall.SockaddrInet6:
- return JoinHostPort(IP(a.Addr[0:]).String(), itoa(a.Port)), nil
- case *syscall.SockaddrUnix:
- return a.Name, nil
- }
-
- return "", &UnknownSocketError{sa}
-}
diff --git a/src/pkg/net/sock_bsd.go b/src/pkg/net/sock_bsd.go
new file mode 100644
index 000000000..5fd52074a
--- /dev/null
+++ b/src/pkg/net/sock_bsd.go
@@ -0,0 +1,31 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Sockets for BSD variants
+
+package net
+
+import (
+ "syscall"
+)
+
+func setKernelSpecificSockopt(s, f int) {
+ // Allow reuse of recently-used addresses.
+ syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1)
+
+ // Allow reuse of recently-used ports.
+ // This option is supported only in descendants of 4.4BSD,
+ // to make an effective multicast application and an application
+ // that requires quick draw possible.
+ syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_REUSEPORT, 1)
+
+ // Allow broadcast.
+ syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_BROADCAST, 1)
+
+ if f == syscall.AF_INET6 {
+ // using ip, tcp, udp, etc.
+ // allow both protocols even if the OS default is otherwise.
+ syscall.SetsockoptInt(s, syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, 0)
+ }
+}
diff --git a/src/pkg/net/sock_linux.go b/src/pkg/net/sock_linux.go
new file mode 100644
index 000000000..ec31e803b
--- /dev/null
+++ b/src/pkg/net/sock_linux.go
@@ -0,0 +1,25 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Sockets for Linux
+
+package net
+
+import (
+ "syscall"
+)
+
+func setKernelSpecificSockopt(s, f int) {
+ // Allow reuse of recently-used addresses.
+ syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1)
+
+ // Allow broadcast.
+ syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_BROADCAST, 1)
+
+ if f == syscall.AF_INET6 {
+ // using ip, tcp, udp, etc.
+ // allow both protocols even if the OS default is otherwise.
+ syscall.SetsockoptInt(s, syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, 0)
+ }
+}
diff --git a/src/pkg/net/sock_windows.go b/src/pkg/net/sock_windows.go
new file mode 100644
index 000000000..e17c60b98
--- /dev/null
+++ b/src/pkg/net/sock_windows.go
@@ -0,0 +1,25 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Sockets for Windows
+
+package net
+
+import (
+ "syscall"
+)
+
+func setKernelSpecificSockopt(s, f int) {
+ // Allow reuse of recently-used addresses and ports.
+ syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1)
+
+ // Allow broadcast.
+ syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_BROADCAST, 1)
+
+ if f == syscall.AF_INET6 {
+ // using ip, tcp, udp, etc.
+ // allow both protocols even if the OS default is otherwise.
+ syscall.SetsockoptInt(s, syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, 0)
+ }
+}
diff --git a/src/pkg/net/tcpsock.go b/src/pkg/net/tcpsock.go
index d9aa7cf19..8aeed4895 100644
--- a/src/pkg/net/tcpsock.go
+++ b/src/pkg/net/tcpsock.go
@@ -41,7 +41,7 @@ func (a *TCPAddr) family() int {
if a == nil || len(a.IP) <= 4 {
return syscall.AF_INET
}
- if ip := a.IP.To4(); ip != nil {
+ if a.IP.To4() != nil {
return syscall.AF_INET
}
return syscall.AF_INET6
@@ -60,10 +60,11 @@ func (a *TCPAddr) toAddr() sockaddr {
// ResolveTCPAddr parses addr as a TCP address of the form
// host:port and resolves domain names or port names to
-// numeric addresses. A literal IPv6 host address must be
+// numeric addresses on the network net, which must be "tcp",
+// "tcp4" or "tcp6". A literal IPv6 host address must be
// enclosed in square brackets, as in "[::]:80".
-func ResolveTCPAddr(network, addr string) (*TCPAddr, os.Error) {
- ip, port, err := hostPortToIP(network, addr)
+func ResolveTCPAddr(net, addr string) (*TCPAddr, os.Error) {
+ ip, port, err := hostPortToIP(net, addr)
if err != nil {
return nil, err
}
diff --git a/src/pkg/net/udpsock.go b/src/pkg/net/udpsock.go
index 67684471b..409355667 100644
--- a/src/pkg/net/udpsock.go
+++ b/src/pkg/net/udpsock.go
@@ -41,7 +41,7 @@ func (a *UDPAddr) family() int {
if a == nil || len(a.IP) <= 4 {
return syscall.AF_INET
}
- if ip := a.IP.To4(); ip != nil {
+ if a.IP.To4() != nil {
return syscall.AF_INET
}
return syscall.AF_INET6
@@ -60,10 +60,11 @@ func (a *UDPAddr) toAddr() sockaddr {
// ResolveUDPAddr parses addr as a UDP address of the form
// host:port and resolves domain names or port names to
-// numeric addresses. A literal IPv6 host address must be
+// numeric addresses on the network net, which must be "udp",
+// "udp4" or "udp6". A literal IPv6 host address must be
// enclosed in square brackets, as in "[::]:80".
-func ResolveUDPAddr(network, addr string) (*UDPAddr, os.Error) {
- ip, port, err := hostPortToIP(network, addr)
+func ResolveUDPAddr(net, addr string) (*UDPAddr, os.Error) {
+ ip, port, err := hostPortToIP(net, addr)
if err != nil {
return nil, err
}
diff --git a/src/pkg/os/dir_unix.go b/src/pkg/os/dir_unix.go
index f5b82230d..9c543838e 100644
--- a/src/pkg/os/dir_unix.go
+++ b/src/pkg/os/dir_unix.go
@@ -12,30 +12,40 @@ const (
blockSize = 4096
)
-// Readdirnames reads the contents of the directory associated with file and
-// returns an array of up to count names, in directory order. Subsequent
-// calls on the same file will yield further names.
-// A negative count means to read until EOF.
-// Readdirnames returns the array and an Error, if any.
-func (file *File) Readdirnames(count int) (names []string, err Error) {
+// Readdirnames reads and returns a slice of names from the directory f.
+//
+// If n > 0, Readdirnames returns at most n names. In this case, if
+// Readdirnames returns an empty slice, it will return a non-nil error
+// explaining why. At the end of a directory, the error is os.EOF.
+//
+// If n <= 0, Readdirnames returns all the names from the directory in
+// a single slice. In this case, if Readdirnames succeeds (reads all
+// the way to the end of the directory), it returns the slice and a
+// nil os.Error. If it encounters an error before the end of the
+// directory, Readdirnames returns the names read until that point and
+// a non-nil error.
+func (f *File) Readdirnames(n int) (names []string, err Error) {
// If this file has no dirinfo, create one.
- if file.dirinfo == nil {
- file.dirinfo = new(dirInfo)
+ if f.dirinfo == nil {
+ f.dirinfo = new(dirInfo)
// The buffer must be at least a block long.
- file.dirinfo.buf = make([]byte, blockSize)
+ f.dirinfo.buf = make([]byte, blockSize)
}
- d := file.dirinfo
- size := count
+ d := f.dirinfo
+ wantAll := n < 0
+
+ size := n
if size < 0 {
size = 100
}
+
names = make([]string, 0, size) // Empty with room to grow.
- for count != 0 {
+ for n != 0 {
// Refill the buffer if necessary
if d.bufp >= d.nbuf {
d.bufp = 0
var errno int
- d.nbuf, errno = syscall.ReadDirent(file.fd, d.buf)
+ d.nbuf, errno = syscall.ReadDirent(f.fd, d.buf)
if errno != 0 {
return names, NewSyscallError("readdirent", errno)
}
@@ -46,9 +56,12 @@ func (file *File) Readdirnames(count int) (names []string, err Error) {
// Drain the buffer
var nb, nc int
- nb, nc, names = syscall.ParseDirent(d.buf[d.bufp:d.nbuf], count, names)
+ nb, nc, names = syscall.ParseDirent(d.buf[d.bufp:d.nbuf], n, names)
d.bufp += nb
- count -= nc
+ n -= nc
+ }
+ if !wantAll && len(names) == 0 {
+ return names, EOF
}
return names, nil
}
diff --git a/src/pkg/os/dir_windows.go b/src/pkg/os/dir_windows.go
index 0d8267b59..a4df9d3ea 100644
--- a/src/pkg/os/dir_windows.go
+++ b/src/pkg/os/dir_windows.go
@@ -4,14 +4,16 @@
package os
-func (file *File) Readdirnames(count int) (names []string, err Error) {
- fis, e := file.Readdir(count)
- if e != nil {
- return nil, e
+func (file *File) Readdirnames(n int) (names []string, err Error) {
+ fis, err := file.Readdir(n)
+ // If n > 0 and we get an error, we return now.
+ // If n < 0, we return whatever we got + any error.
+ if n > 0 && err != nil {
+ return nil, err
}
names = make([]string, len(fis))
for i, fi := range fis {
names[i] = fi.Name
}
- return names, nil
+ return names, err
}
diff --git a/src/pkg/os/env.go b/src/pkg/os/env.go
index 3a6d79dd0..3772c090b 100644
--- a/src/pkg/os/env.go
+++ b/src/pkg/os/env.go
@@ -6,6 +6,8 @@
package os
+func setenv_c(k, v string)
+
// Expand replaces ${var} or $var in the string based on the mapping function.
// Invocations of undefined variables are replaced with the empty string.
func Expand(s string, mapping func(string) string) string {
diff --git a/src/pkg/os/env_unix.go b/src/pkg/os/env_unix.go
index e7e1c3b90..8aa71e83a 100644
--- a/src/pkg/os/env_unix.go
+++ b/src/pkg/os/env_unix.go
@@ -29,6 +29,8 @@ func copyenv() {
}
}
+var envLock sync.RWMutex
+
// Getenverror retrieves the value of the environment variable named by the key.
// It returns the value and an error, if any.
func Getenverror(key string) (value string, err Error) {
@@ -37,6 +39,10 @@ func Getenverror(key string) (value string, err Error) {
if len(key) == 0 {
return "", EINVAL
}
+
+ envLock.RLock()
+ defer envLock.RUnlock()
+
v, ok := env[key]
if !ok {
return "", ENOENV
@@ -55,35 +61,43 @@ func Getenv(key string) string {
// It returns an Error, if any.
func Setenv(key, value string) Error {
once.Do(copyenv)
-
if len(key) == 0 {
return EINVAL
}
+
+ envLock.Lock()
+ defer envLock.Unlock()
+
env[key] = value
+ setenv_c(key, value) // is a no-op if cgo isn't loaded
return nil
}
// Clearenv deletes all environment variables.
func Clearenv() {
once.Do(copyenv) // prevent copyenv in Getenv/Setenv
+
+ envLock.Lock()
+ defer envLock.Unlock()
+
env = make(map[string]string)
+
+ // TODO(bradfitz): pass through to C
}
// Environ returns an array of strings representing the environment,
// in the form "key=value".
func Environ() []string {
once.Do(copyenv)
+ envLock.RLock()
+ defer envLock.RUnlock()
a := make([]string, len(env))
i := 0
for k, v := range env {
- // check i < len(a) for safety,
- // in case env is changing underfoot.
- if i < len(a) {
- a[i] = k + "=" + v
- i++
- }
+ a[i] = k + "=" + v
+ i++
}
- return a[0:i]
+ return a
}
// TempDir returns the default directory to use for temporary files.
diff --git a/src/pkg/os/file_unix.go b/src/pkg/os/file_unix.go
index 2fb28df65..c65c5b3ff 100644
--- a/src/pkg/os/file_unix.go
+++ b/src/pkg/os/file_unix.go
@@ -70,19 +70,29 @@ func (file *File) Stat() (fi *FileInfo, err Error) {
// Readdir reads the contents of the directory associated with file and
// returns an array of up to count FileInfo structures, as would be returned
-// by Lstat, in directory order. Subsequent calls on the same file will yield
+// by Lstat, in directory order. Subsequent calls on the same file will yield
// further FileInfos.
-// A negative count means to read until EOF.
-// Readdir returns the array and an Error, if any.
-func (file *File) Readdir(count int) (fi []FileInfo, err Error) {
+//
+// If n > 0, Readdir returns at most n names. In this case, if
+// Readdirnames returns an empty slice, it will return a non-nil error
+// explaining why. At the end of a directory, the error is os.EOF.
+//
+// If n <= 0, Readdir returns all the FileInfo from the directory in
+// a single slice. In this case, if Readdir succeeds (reads all
+// the way to the end of the directory), it returns the slice and a
+// nil os.Error. If it encounters an error before the end of the
+// directory, Readdir returns the FileInfo read until that point
+// and a non-nil error.
+func (file *File) Readdir(n int) (fi []FileInfo, err Error) {
dirname := file.name
if dirname == "" {
dirname = "."
}
dirname += "/"
- names, err1 := file.Readdirnames(count)
- if err1 != nil {
- return nil, err1
+ wantAll := n < 0
+ names, namesErr := file.Readdirnames(n)
+ if namesErr != nil && !wantAll {
+ return nil, namesErr
}
fi = make([]FileInfo, len(names))
for i, filename := range names {
@@ -93,6 +103,9 @@ func (file *File) Readdir(count int) (fi []FileInfo, err Error) {
fi[i] = *fip
}
}
+ if !wantAll && namesErr != EOF {
+ err = namesErr
+ }
return
}
diff --git a/src/pkg/os/file_windows.go b/src/pkg/os/file_windows.go
index 95f60b735..74ff3eb88 100644
--- a/src/pkg/os/file_windows.go
+++ b/src/pkg/os/file_windows.go
@@ -124,11 +124,20 @@ func (file *File) Stat() (fi *FileInfo, err Error) {
// Readdir reads the contents of the directory associated with file and
// returns an array of up to count FileInfo structures, as would be returned
-// by Lstat, in directory order. Subsequent calls on the same file will yield
+// by Lstat, in directory order. Subsequent calls on the same file will yield
// further FileInfos.
-// A negative count means to read until EOF.
-// Readdir returns the array and an Error, if any.
-func (file *File) Readdir(count int) (fi []FileInfo, err Error) {
+//
+// If n > 0, Readdir returns at most n names. In this case, if
+// Readdirnames returns an empty slice, it will return a non-nil error
+// explaining why. At the end of a directory, the error is os.EOF.
+//
+// If n <= 0, Readdir returns all the FileInfo from the directory in
+// a single slice. In this case, if Readdir succeeds (reads all
+// the way to the end of the directory), it returns the slice and a
+// nil os.Error. If it encounters an error before the end of the
+// directory, Readdir returns the FileInfo read until that point
+// and a non-nil error.
+func (file *File) Readdir(n int) (fi []FileInfo, err Error) {
if file == nil || file.fd < 0 {
return nil, EINVAL
}
@@ -136,12 +145,13 @@ func (file *File) Readdir(count int) (fi []FileInfo, err Error) {
return nil, &PathError{"Readdir", file.name, ENOTDIR}
}
di := file.dirinfo
- size := count
+ wantAll := n < 0
+ size := n
if size < 0 {
size = 100
}
fi = make([]FileInfo, 0, size) // Empty with room to grow.
- for count != 0 {
+ for n != 0 {
if di.usefirststat {
di.usefirststat = false
} else {
@@ -150,7 +160,11 @@ func (file *File) Readdir(count int) (fi []FileInfo, err Error) {
if e == syscall.ERROR_NO_MORE_FILES {
break
} else {
- return nil, &PathError{"FindNextFile", file.name, Errno(e)}
+ err = &PathError{"FindNextFile", file.name, Errno(e)}
+ if !wantAll {
+ fi = nil
+ }
+ return
}
}
}
@@ -159,9 +173,12 @@ func (file *File) Readdir(count int) (fi []FileInfo, err Error) {
if f.Name == "." || f.Name == ".." { // Useless names
continue
}
- count--
+ n--
fi = append(fi, f)
}
+ if !wantAll && len(fi) == 0 {
+ return fi, EOF
+ }
return fi, nil
}
diff --git a/src/pkg/os/os_test.go b/src/pkg/os/os_test.go
index 65475c118..b06d57b85 100644
--- a/src/pkg/os/os_test.go
+++ b/src/pkg/os/os_test.go
@@ -236,11 +236,14 @@ func smallReaddirnames(file *File, length int, t *testing.T) []string {
count := 0
for {
d, err := file.Readdirnames(1)
+ if err == EOF {
+ break
+ }
if err != nil {
- t.Fatalf("readdir %q failed: %v", file.Name(), err)
+ t.Fatalf("readdirnames %q failed: %v", file.Name(), err)
}
if len(d) == 0 {
- break
+ t.Fatalf("readdirnames %q returned empty slice and no error", file.Name())
}
names[count] = d[0]
count++
diff --git a/src/pkg/os/path.go b/src/pkg/os/path.go
index 0eb3ee503..5565aaa29 100644
--- a/src/pkg/os/path.go
+++ b/src/pkg/os/path.go
@@ -95,6 +95,9 @@ func RemoveAll(path string) Error {
err = err1
}
}
+ if err1 == EOF {
+ break
+ }
// If Readdirnames returned an error, use it.
if err == nil {
err = err1
diff --git a/src/pkg/os/user/user_test.go b/src/pkg/os/user/user_test.go
index 2c142bf18..ee917b57a 100644
--- a/src/pkg/os/user/user_test.go
+++ b/src/pkg/os/user/user_test.go
@@ -42,7 +42,7 @@ func TestLookup(t *testing.T) {
}
fi, err := os.Stat(u.HomeDir)
if err != nil || !fi.IsDirectory() {
- t.Errorf("expected a valid HomeDir; stat(%q): err=%v, IsDirectory=%v", err, fi.IsDirectory())
+ t.Errorf("expected a valid HomeDir; stat(%q): err=%v, IsDirectory=%v", u.HomeDir, err, fi.IsDirectory())
}
if u.Username == "" {
t.Fatalf("didn't get a username")
@@ -56,6 +56,6 @@ func TestLookup(t *testing.T) {
if !reflect.DeepEqual(u, un) {
t.Errorf("Lookup by userid vs. name didn't match\n"+
"LookupId(%d): %#v\n"+
- "Lookup(%q): %#v\n",uid, u, u.Username, un)
+ "Lookup(%q): %#v\n", uid, u, u.Username, un)
}
}
diff --git a/src/pkg/path/filepath/path.go b/src/pkg/path/filepath/path.go
index 541a23306..6917218db 100644
--- a/src/pkg/path/filepath/path.go
+++ b/src/pkg/path/filepath/path.go
@@ -9,6 +9,7 @@ package filepath
import (
"bytes"
"os"
+ "runtime"
"sort"
"strings"
)
@@ -178,6 +179,14 @@ func Ext(path string) string {
// links.
// If path is relative it will be evaluated relative to the current directory.
func EvalSymlinks(path string) (string, os.Error) {
+ if runtime.GOOS == "windows" {
+ // Symlinks are not supported under windows.
+ _, err := os.Lstat(path)
+ if err != nil {
+ return "", err
+ }
+ return Clean(path), nil
+ }
const maxIter = 255
originalPath := path
// consume path by taking each frontmost path element,
diff --git a/src/pkg/path/filepath/path_test.go b/src/pkg/path/filepath/path_test.go
index b3b6eb5ab..b14734983 100644
--- a/src/pkg/path/filepath/path_test.go
+++ b/src/pkg/path/filepath/path_test.go
@@ -440,48 +440,64 @@ var EvalSymlinksTests = []EvalSymlinksTest{
{"test/link2/link3/test", "test"},
}
-func TestEvalSymlinks(t *testing.T) {
- // Symlinks are not supported under windows.
- if runtime.GOOS == "windows" {
- return
+var EvalSymlinksAbsWindowsTests = []EvalSymlinksTest{
+ {`c:\`, `c:\`},
+}
+
+func testEvalSymlinks(t *testing.T, tests []EvalSymlinksTest) {
+ for _, d := range tests {
+ if p, err := filepath.EvalSymlinks(d.path); err != nil {
+ t.Errorf("EvalSymlinks(%q) error: %v", d.path, err)
+ } else if filepath.Clean(p) != filepath.Clean(d.dest) {
+ t.Errorf("EvalSymlinks(%q)=%q, want %q", d.path, p, d.dest)
+ }
}
+}
+
+func TestEvalSymlinks(t *testing.T) {
defer os.RemoveAll("test")
for _, d := range EvalSymlinksTestDirs {
var err os.Error
if d.dest == "" {
err = os.Mkdir(d.path, 0755)
} else {
- err = os.Symlink(d.dest, d.path)
+ if runtime.GOOS != "windows" {
+ err = os.Symlink(d.dest, d.path)
+ }
}
if err != nil {
t.Fatal(err)
}
}
- // relative
- for _, d := range EvalSymlinksTests {
- if p, err := filepath.EvalSymlinks(d.path); err != nil {
- t.Errorf("EvalSymlinks(%q) error: %v", d.path, err)
- } else if p != d.dest {
- t.Errorf("EvalSymlinks(%q)=%q, want %q", d.path, p, d.dest)
+ var tests []EvalSymlinksTest
+ if runtime.GOOS == "windows" {
+ for _, d := range EvalSymlinksTests {
+ if d.path == d.dest {
+ // will test only real files and directories
+ tests = append(tests, d)
+ }
}
+ } else {
+ tests = EvalSymlinksTests
}
+ // relative
+ testEvalSymlinks(t, tests)
// absolute
goroot, err := filepath.EvalSymlinks(os.Getenv("GOROOT"))
if err != nil {
t.Fatalf("EvalSymlinks(%q) error: %v", os.Getenv("GOROOT"), err)
}
testroot := filepath.Join(goroot, "src", "pkg", "path", "filepath")
- for _, d := range EvalSymlinksTests {
- a := EvalSymlinksTest{
- filepath.Join(testroot, d.path),
- filepath.Join(testroot, d.dest),
- }
- if p, err := filepath.EvalSymlinks(a.path); err != nil {
- t.Errorf("EvalSymlinks(%q) error: %v", a.path, err)
- } else if p != a.dest {
- t.Errorf("EvalSymlinks(%q)=%q, want %q", a.path, p, a.dest)
+ for i, d := range tests {
+ tests[i].path = filepath.Join(testroot, d.path)
+ tests[i].dest = filepath.Join(testroot, d.dest)
+ }
+ if runtime.GOOS == "windows" {
+ for _, d := range EvalSymlinksAbsWindowsTests {
+ tests = append(tests, d)
}
}
+ testEvalSymlinks(t, tests)
}
// Test paths relative to $GOROOT/src
diff --git a/src/pkg/rand/rand_test.go b/src/pkg/rand/rand_test.go
index 2476ebaf6..a689da848 100644
--- a/src/pkg/rand/rand_test.go
+++ b/src/pkg/rand/rand_test.go
@@ -197,7 +197,7 @@ func initNorm() (testKn []uint32, testWn, testFn []float32) {
const m1 = 1 << 31
var (
dn float64 = rn
- tn = dn
+ tn = dn
vn float64 = 9.91256303526217e-3
)
@@ -226,7 +226,7 @@ func initExp() (testKe []uint32, testWe, testFe []float32) {
const m2 = 1 << 32
var (
de float64 = re
- te = de
+ te = de
ve float64 = 3.9496598225815571993e-3
)
diff --git a/src/pkg/reflect/all_test.go b/src/pkg/reflect/all_test.go
index 5bf65333c..c83a9b75f 100644
--- a/src/pkg/reflect/all_test.go
+++ b/src/pkg/reflect/all_test.go
@@ -579,13 +579,7 @@ func TestCopyArray(t *testing.T) {
}
for i := len(a); i < len(b); i++ {
if b[i] != c[i] {
- if i < len(a) {
- t.Errorf("(ii) a[%d]=%d, b[%d]=%d, c[%d]=%d",
- i, a[i], i, b[i], i, c[i])
- } else {
- t.Errorf("(iii) b[%d]=%d, c[%d]=%d",
- i, b[i], i, c[i])
- }
+ t.Errorf("(ii) b[%d]=%d, c[%d]=%d", i, b[i], i, c[i])
} else {
t.Logf("elem %d is okay\n", i)
}
@@ -732,6 +726,24 @@ func TestDeepEqualComplexStructInequality(t *testing.T) {
}
}
+type UnexpT struct {
+ m map[int]int
+}
+
+func TestDeepEqualUnexportedMap(t *testing.T) {
+ // Check that DeepEqual can look at unexported fields.
+ x1 := UnexpT{map[int]int{1: 2}}
+ x2 := UnexpT{map[int]int{1: 2}}
+ if !DeepEqual(&x1, &x2) {
+ t.Error("DeepEqual(x1, x2) = false, want true")
+ }
+
+ y1 := UnexpT{map[int]int{2: 3}}
+ if DeepEqual(&x1, &y1) {
+ t.Error("DeepEqual(x1, y1) = true, want false")
+ }
+}
+
func check2ndField(x interface{}, offs uintptr, t *testing.T) {
s := ValueOf(x)
@@ -1439,7 +1451,9 @@ func noAlloc(t *testing.T, n int, f func(int)) {
for j := 0; j < n; j++ {
f(j)
}
- if runtime.MemStats.Mallocs != 0 {
+ // A few allocs may happen in the testing package when GOMAXPROCS > 1, so don't
+ // require zero mallocs.
+ if runtime.MemStats.Mallocs > 5 {
t.Fatalf("%d mallocs after %d iterations", runtime.MemStats.Mallocs, n)
}
}
diff --git a/src/pkg/reflect/value.go b/src/pkg/reflect/value.go
index 6dffb0783..2c2158a3c 100644
--- a/src/pkg/reflect/value.go
+++ b/src/pkg/reflect/value.go
@@ -958,14 +958,19 @@ func (v Value) MapIndex(key Value) Value {
iv.mustBe(Map)
typ := iv.typ.toType()
+ // Do not require ikey to be exported, so that DeepEqual
+ // and other programs can use all the keys returned by
+ // MapKeys as arguments to MapIndex. If either the map
+ // or the key is unexported, though, the result will be
+ // considered unexported.
+
ikey := key.internal()
- ikey.mustBeExported()
ikey = convertForAssignment("reflect.Value.MapIndex", nil, typ.Key(), ikey)
if iv.word == 0 {
return Value{}
}
- flag := iv.flag & flagRO
+ flag := (iv.flag | ikey.flag) & flagRO
elemType := typ.Elem()
elemWord, ok := mapaccess(iv.word, ikey.word)
if !ok {
diff --git a/src/pkg/rpc/client.go b/src/pkg/rpc/client.go
index 8af4afcf6..a8e560cbe 100644
--- a/src/pkg/rpc/client.go
+++ b/src/pkg/rpc/client.go
@@ -216,7 +216,7 @@ func DialHTTPPath(network, address, path string) (*Client, os.Error) {
// Require successful HTTP response
// before switching to RPC protocol.
- resp, err := http.ReadResponse(bufio.NewReader(conn), "CONNECT")
+ resp, err := http.ReadResponse(bufio.NewReader(conn), &http.Request{Method: "CONNECT"})
if err == nil && resp.Status == connected {
return NewClient(conn), nil
}
diff --git a/src/pkg/runtime/append_test.go b/src/pkg/runtime/append_test.go
new file mode 100644
index 000000000..75a635306
--- /dev/null
+++ b/src/pkg/runtime/append_test.go
@@ -0,0 +1,51 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package runtime_test
+
+import "testing"
+
+const N = 20
+
+func BenchmarkAppend(b *testing.B) {
+ b.StopTimer()
+ x := make([]int, 0, N)
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ x = x[0:0]
+ for j := 0; j < N; j++ {
+ x = append(x, j)
+ }
+ }
+}
+
+func BenchmarkAppendSpecialCase(b *testing.B) {
+ b.StopTimer()
+ x := make([]int, 0, N)
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ x = x[0:0]
+ for j := 0; j < N; j++ {
+ if len(x) < cap(x) {
+ x = x[:len(x)+1]
+ x[len(x)-1] = j
+ } else {
+ x = append(x, j)
+ }
+ }
+ }
+}
+
+var x = make([]int, 0, 10)
+
+func f() int {
+ x[:1][0] = 3
+ return 2
+}
+
+func TestSideEffectOrder(t *testing.T) {
+ x = append(x, 1, f())
+ if x[0] != 1 || x[1] != 2 {
+ t.Error("append failed: ", x[0], x[1])
+ }
+}
diff --git a/src/pkg/runtime/arm/closure.c b/src/pkg/runtime/arm/closure.c
index 3aca3a42d..36a93bc53 100644
--- a/src/pkg/runtime/arm/closure.c
+++ b/src/pkg/runtime/arm/closure.c
@@ -43,7 +43,7 @@ vars: WORD arg0
WORD arg2
*/
-extern void cacheflush(byte* start, byte* end);
+extern void runtime·cacheflush(byte* start, byte* end);
#pragma textflag 7
void
diff --git a/src/pkg/runtime/cgo/Makefile b/src/pkg/runtime/cgo/Makefile
index 768fe80ac..f26da2c51 100644
--- a/src/pkg/runtime/cgo/Makefile
+++ b/src/pkg/runtime/cgo/Makefile
@@ -28,18 +28,20 @@ CGO_OFILES=\
$(GOOS)_$(GOARCH).o\
util.o\
-OFILES=\
- iscgo.$O\
- callbacks.$O\
- _cgo_import.$O\
- $(CGO_OFILES)\
-
ifeq ($(GOOS),windows)
CGO_LDFLAGS=-lm -mthreads
else
CGO_LDFLAGS=-lpthread
+CGO_OFILES+=setenv.o\
+
endif
+OFILES=\
+ iscgo.$O\
+ callbacks.$O\
+ _cgo_import.$O\
+ $(CGO_OFILES)\
+
ifeq ($(GOOS),freebsd)
OFILES+=\
freebsd.$O\
diff --git a/src/pkg/runtime/cgo/setenv.c b/src/pkg/runtime/cgo/setenv.c
new file mode 100644
index 000000000..c911b8392
--- /dev/null
+++ b/src/pkg/runtime/cgo/setenv.c
@@ -0,0 +1,16 @@
+// Copyright 20111 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "libcgo.h"
+
+#include <stdlib.h>
+
+/* Stub for calling setenv */
+static void
+xlibcgo_setenv(char **arg)
+{
+ setenv(arg[0], arg[1], 1);
+}
+
+void (*libcgo_setenv)(char**) = xlibcgo_setenv;
diff --git a/src/pkg/runtime/linux/arm/sys.s b/src/pkg/runtime/linux/arm/sys.s
index d866b0e22..2b5365bd8 100644
--- a/src/pkg/runtime/linux/arm/sys.s
+++ b/src/pkg/runtime/linux/arm/sys.s
@@ -258,11 +258,22 @@ TEXT cas<>(SB),7,$0
TEXT runtime·cas(SB),7,$0
MOVW valptr+0(FP), R2
MOVW old+4(FP), R0
+casagain:
MOVW new+8(FP), R1
BL cas<>(SB)
- MOVW $0, R0
- MOVW.CS $1, R0
+ BCC cascheck
+ MOVW $1, R0
RET
+cascheck:
+ // Kernel lies; double-check.
+ MOVW valptr+0(FP), R2
+ MOVW old+4(FP), R0
+ MOVW 0(R2), R3
+ CMP R0, R3
+ BEQ casagain
+ MOVW $0, R0
+ RET
+
TEXT runtime·casp(SB),7,$0
B runtime·cas(SB)
diff --git a/src/pkg/runtime/linux/thread.c b/src/pkg/runtime/linux/thread.c
index 7166b0ef2..6c506236f 100644
--- a/src/pkg/runtime/linux/thread.c
+++ b/src/pkg/runtime/linux/thread.c
@@ -252,10 +252,10 @@ runtime·newosproc(M *m, G *g, void *stk, void (*fn)(void))
stk, m, g, fn, runtime·clone, m->id, m->tls[0], &m);
}
- ret = runtime·clone(flags, stk, m, g, fn);
-
- if(ret < 0)
- *(int32*)123 = 123;
+ if((ret = runtime·clone(flags, stk, m, g, fn)) < 0) {
+ runtime·printf("runtime: failed to create new OS thread (have %d already; errno=%d)\n", runtime·mcount(), -ret);
+ runtime·throw("runtime.newosproc");
+ }
}
void
diff --git a/src/pkg/runtime/malloc.goc b/src/pkg/runtime/malloc.goc
index 1f2d6da40..c55be9772 100644
--- a/src/pkg/runtime/malloc.goc
+++ b/src/pkg/runtime/malloc.goc
@@ -487,7 +487,7 @@ func SetFinalizer(obj Eface, finalizer Eface) {
nret = (nret + sizeof(void*)-1) & ~(sizeof(void*)-1);
if(runtime·getfinalizer(obj.data, 0)) {
- runtime·printf("runtime.SetFinalizer: finalizer already set");
+ runtime·printf("runtime.SetFinalizer: finalizer already set\n");
goto throw;
}
}
diff --git a/src/pkg/runtime/proc.c b/src/pkg/runtime/proc.c
index 52784854f..c5af8b754 100644
--- a/src/pkg/runtime/proc.c
+++ b/src/pkg/runtime/proc.c
@@ -714,7 +714,7 @@ runtime·oldstack(void)
goid = old.gobuf.g->goid; // fault if g is bad, before gogo
if(old.free != 0)
- runtime·stackfree(g1->stackguard - StackGuard - StackSystem, old.free);
+ runtime·stackfree(g1->stackguard - StackGuard, old.free);
g1->stackbase = old.stackbase;
g1->stackguard = old.stackguard;
@@ -756,7 +756,7 @@ runtime·newstack(void)
// the new Stktop* is necessary to unwind, but
// we don't need to create a new segment.
top = (Stktop*)(m->morebuf.sp - sizeof(*top));
- stk = g1->stackguard - StackGuard - StackSystem;
+ stk = g1->stackguard - StackGuard;
free = 0;
} else {
// allocate new segment.
@@ -785,7 +785,7 @@ runtime·newstack(void)
g1->ispanic = false;
g1->stackbase = (byte*)top;
- g1->stackguard = stk + StackGuard + StackSystem;
+ g1->stackguard = stk + StackGuard;
sp = (byte*)top;
if(argsize > 0) {
@@ -834,7 +834,7 @@ runtime·malg(int32 stacksize)
g->param = nil;
}
newg->stack0 = stk;
- newg->stackguard = stk + StackSystem + StackGuard;
+ newg->stackguard = stk + StackGuard;
newg->stackbase = stk + StackSystem + stacksize - sizeof(Stktop);
runtime·memclr(newg->stackbase, sizeof(Stktop));
}
@@ -880,7 +880,7 @@ runtime·newproc1(byte *fn, byte *argp, int32 narg, int32 nret, void *callerpc)
if((newg = gfget()) != nil){
newg->status = Gwaiting;
- if(newg->stackguard - StackGuard - StackSystem != newg->stack0)
+ if(newg->stackguard - StackGuard != newg->stack0)
runtime·throw("invalid stack in newg");
} else {
newg = runtime·malg(StackMin);
@@ -1165,7 +1165,7 @@ nomatch:
static void
gfput(G *g)
{
- if(g->stackguard - StackGuard - StackSystem != g->stack0)
+ if(g->stackguard - StackGuard != g->stack0)
runtime·throw("invalid stack in gfput");
g->schedlink = runtime·sched.gfree;
runtime·sched.gfree = g;
@@ -1343,3 +1343,26 @@ runtime·setcpuprofilerate(void (*fn)(uintptr*, int32), int32 hz)
if(hz != 0)
runtime·resetcpuprofiler(hz);
}
+
+void (*libcgo_setenv)(byte**);
+
+void
+os·setenv_c(String k, String v)
+{
+ byte *arg[2];
+
+ if(libcgo_setenv == nil)
+ return;
+
+ arg[0] = runtime·malloc(k.len + 1);
+ runtime·mcpy(arg[0], k.str, k.len);
+ arg[0][k.len] = 0;
+
+ arg[1] = runtime·malloc(v.len + 1);
+ runtime·mcpy(arg[1], v.str, v.len);
+ arg[1][v.len] = 0;
+
+ runtime·asmcgocall(libcgo_setenv, arg);
+ runtime·free(arg[0]);
+ runtime·free(arg[1]);
+}
diff --git a/src/pkg/runtime/runtime.h b/src/pkg/runtime/runtime.h
index f9b404e15..f2f8dcd5b 100644
--- a/src/pkg/runtime/runtime.h
+++ b/src/pkg/runtime/runtime.h
@@ -597,17 +597,3 @@ int32 runtime·chancap(Hchan*);
void runtime·ifaceE2I(struct InterfaceType*, Eface, Iface*);
-enum
-{
- // StackSystem is a number of additional bytes to add
- // to each stack below the usual guard area for OS-specific
- // purposes like signal handling.
- // TODO(rsc): This is only for Windows. Can't Windows use
- // a separate exception stack like every other operating system?
-#ifdef __WINDOWS__
- StackSystem = 2048,
-#else
- StackSystem = 0,
-#endif
-};
-
diff --git a/src/pkg/runtime/slice.c b/src/pkg/runtime/slice.c
index 1fee923e4..9146c177f 100644
--- a/src/pkg/runtime/slice.c
+++ b/src/pkg/runtime/slice.c
@@ -9,6 +9,8 @@
static int32 debug = 0;
static void makeslice1(SliceType*, int32, int32, Slice*);
+static void growslice1(SliceType*, Slice, int32, Slice *);
+static void appendslice1(SliceType*, Slice, Slice, Slice*);
void runtime·slicecopy(Slice to, Slice fm, uintptr width, int32 ret);
// see also unsafe·NewArray
@@ -46,22 +48,6 @@ makeslice1(SliceType *t, int32 len, int32 cap, Slice *ret)
ret->array = runtime·mal(size);
}
-static void appendslice1(SliceType*, Slice, Slice, Slice*);
-
-// append(type *Type, n int, old []T, ...,) []T
-#pragma textflag 7
-void
-runtime·append(SliceType *t, int32 n, Slice old, ...)
-{
- Slice sl;
- Slice *ret;
-
- sl.len = n;
- sl.array = (byte*)(&old+1);
- ret = (Slice*)(sl.array + ((t->elem->size*n+sizeof(uintptr)-1) & ~(sizeof(uintptr)-1)));
- appendslice1(t, old, sl, ret);
-}
-
// appendslice(type *Type, x, y, []T) []T
void
runtime·appendslice(SliceType *t, Slice x, Slice y, Slice ret)
@@ -72,36 +58,69 @@ runtime·appendslice(SliceType *t, Slice x, Slice y, Slice ret)
static void
appendslice1(SliceType *t, Slice x, Slice y, Slice *ret)
{
- Slice newx;
int32 m;
uintptr w;
- if(x.len+y.len < x.len)
+ m = x.len+y.len;
+
+ if(m < x.len)
runtime·throw("append: slice overflow");
+ if(m > x.cap)
+ growslice1(t, x, m, ret);
+ else
+ *ret = x;
+
w = t->elem->size;
- if(x.len+y.len > x.cap) {
- m = x.cap;
- if(m == 0)
- m = y.len;
- else {
- do {
- if(x.len < 1024)
- m += m;
- else
- m += m/4;
- } while(m < x.len+y.len);
- }
- makeslice1(t, x.len, m, &newx);
- runtime·memmove(newx.array, x.array, x.len*w);
- x = newx;
+ runtime·memmove(ret->array + ret->len*w, y.array, y.len*w);
+ ret->len += y.len;
+}
+
+// growslice(type *Type, x, []T, n int64) []T
+void
+runtime·growslice(SliceType *t, Slice old, int64 n, Slice ret)
+{
+ int64 cap;
+
+ if(n < 1)
+ runtime·panicstring("growslice: invalid n");
+
+ cap = old.cap + n;
+
+ if((int32)cap != cap || cap > ((uintptr)-1) / t->elem->size)
+ runtime·panicstring("growslice: cap out of range");
+
+ growslice1(t, old, cap, &ret);
+
+ FLUSH(&ret);
+
+ if(debug) {
+ runtime·printf("growslice(%S,", *t->string);
+ runtime·printslice(old);
+ runtime·printf(", new cap=%D) =", cap);
+ runtime·printslice(ret);
}
- runtime·memmove(x.array+x.len*w, y.array, y.len*w);
- x.len += y.len;
- *ret = x;
}
+static void
+growslice1(SliceType *t, Slice x, int32 newcap, Slice *ret)
+{
+ int32 m;
+ m = x.cap;
+ if(m == 0)
+ m = newcap;
+ else {
+ do {
+ if(x.len < 1024)
+ m += m;
+ else
+ m += m/4;
+ } while(m < newcap);
+ }
+ makeslice1(t, x.len, m, ret);
+ runtime·memmove(ret->array, x.array, ret->len * t->elem->size);
+}
// sliceslice(old []any, lb uint64, hb uint64, width uint64) (ary []any);
void
diff --git a/src/pkg/runtime/softfloat64.go b/src/pkg/runtime/softfloat64.go
index d9bbe5def..e0c3b7b73 100644
--- a/src/pkg/runtime/softfloat64.go
+++ b/src/pkg/runtime/softfloat64.go
@@ -11,7 +11,7 @@ package runtime
const (
mantbits64 uint = 52
expbits64 uint = 11
- bias64 = -1<<(expbits64-1) + 1
+ bias64 = -1<<(expbits64-1) + 1
nan64 uint64 = (1<<expbits64-1)<<mantbits64 + 1
inf64 uint64 = (1<<expbits64 - 1) << mantbits64
@@ -19,7 +19,7 @@ const (
mantbits32 uint = 23
expbits32 uint = 8
- bias32 = -1<<(expbits32-1) + 1
+ bias32 = -1<<(expbits32-1) + 1
nan32 uint32 = (1<<expbits32-1)<<mantbits32 + 1
inf32 uint32 = (1<<expbits32 - 1) << mantbits32
diff --git a/src/pkg/runtime/stack.h b/src/pkg/runtime/stack.h
index ebf0462b5..2b6b0e387 100644
--- a/src/pkg/runtime/stack.h
+++ b/src/pkg/runtime/stack.h
@@ -53,6 +53,16 @@ functions to make sure that this limit cannot be violated.
*/
enum {
+ // StackSystem is a number of additional bytes to add
+ // to each stack below the usual guard area for OS-specific
+ // purposes like signal handling. Used on Windows because
+ // it does not use a separate stack.
+#ifdef __WINDOWS__
+ StackSystem = 2048,
+#else
+ StackSystem = 0,
+#endif
+
// The amount of extra stack to allocate beyond the size
// needed for the single frame that triggered the split.
StackExtra = 1024,
@@ -73,7 +83,7 @@ enum {
// The stack guard is a pointer this many bytes above the
// bottom of the stack.
- StackGuard = 256,
+ StackGuard = 256 + StackSystem,
// After a stack split check the SP is allowed to be this
// many bytes below the stack guard. This saves an instruction
@@ -82,5 +92,5 @@ enum {
// The maximum number of bytes that a chain of NOSPLIT
// functions can use.
- StackLimit = StackGuard - StackSmall,
+ StackLimit = StackGuard - StackSystem - StackSmall,
};
diff --git a/src/pkg/strconv/atob.go b/src/pkg/strconv/atob.go
index 69fa2292a..98ce75079 100644
--- a/src/pkg/strconv/atob.go
+++ b/src/pkg/strconv/atob.go
@@ -7,8 +7,8 @@ package strconv
import "os"
// Atob returns the boolean value represented by the string.
-// It accepts 1, t, T, TRUE, true, 0, f, F, FALSE, false. Any other value returns
-// an error.
+// It accepts 1, t, T, TRUE, true, True, 0, f, F, FALSE, false, False.
+// Any other value returns an error.
func Atob(str string) (value bool, err os.Error) {
switch str {
case "1", "t", "T", "true", "TRUE", "True":
diff --git a/src/pkg/strconv/atob_test.go b/src/pkg/strconv/atob_test.go
index 497df5b18..541e60d1e 100644
--- a/src/pkg/strconv/atob_test.go
+++ b/src/pkg/strconv/atob_test.go
@@ -24,11 +24,13 @@ var atobtests = []atobTest{
{"F", false, nil},
{"FALSE", false, nil},
{"false", false, nil},
+ {"False", false, nil},
{"1", true, nil},
{"t", true, nil},
{"T", true, nil},
{"TRUE", true, nil},
{"true", true, nil},
+ {"True", true, nil},
}
func TestAtob(t *testing.T) {
diff --git a/src/pkg/strings/reader.go b/src/pkg/strings/reader.go
index 914faa003..4eae90e73 100644
--- a/src/pkg/strings/reader.go
+++ b/src/pkg/strings/reader.go
@@ -18,10 +18,7 @@ func (r *Reader) Read(b []byte) (n int, err os.Error) {
if len(s) == 0 {
return 0, os.EOF
}
- for n < len(s) && n < len(b) {
- b[n] = s[n]
- n++
- }
+ n = copy(b, s)
*r = s[n:]
return
}
diff --git a/src/pkg/sync/atomic/asm_linux_arm.s b/src/pkg/sync/atomic/asm_linux_arm.s
index 5e7aea292..72f8d746b 100644
--- a/src/pkg/sync/atomic/asm_linux_arm.s
+++ b/src/pkg/sync/atomic/asm_linux_arm.s
@@ -13,6 +13,12 @@
// LR = return address
// The function returns with CS true if the swap happened.
// http://lxr.linux.no/linux+v2.6.37.2/arch/arm/kernel/entry-armv.S#L850
+// On older kernels (before 2.6.24) the function can incorrectly
+// report a conflict, so we have to double-check the compare ourselves
+// and retry if necessary.
+//
+// http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=b49c0f24cf6744a3f4fd09289fe7cade349dead5
+//
TEXT cas<>(SB),7,$0
MOVW $0xffff0fc0, PC
@@ -23,12 +29,23 @@ TEXT ·CompareAndSwapInt32(SB),7,$0
TEXT ·CompareAndSwapUint32(SB),7,$0
MOVW valptr+0(FP), R2
MOVW old+4(FP), R0
+casagain:
MOVW new+8(FP), R1
BL cas<>(SB)
- MOVW $0, R0
- MOVW.CS $1, R0
+ BCC cascheck
+ MOVW $1, R0
+casret:
MOVW R0, ret+12(FP)
RET
+cascheck:
+ // Kernel lies; double-check.
+ MOVW valptr+0(FP), R2
+ MOVW old+4(FP), R0
+ MOVW 0(R2), R3
+ CMP R0, R3
+ BEQ casagain
+ MOVW $0, R0
+ B casret
TEXT ·CompareAndSwapUintptr(SB),7,$0
B ·CompareAndSwapUint32(SB)
diff --git a/src/pkg/syscall/mkall.sh b/src/pkg/syscall/mkall.sh
index a2e6c5d71..0a4ca0326 100755
--- a/src/pkg/syscall/mkall.sh
+++ b/src/pkg/syscall/mkall.sh
@@ -109,12 +109,12 @@ _* | *_ | _)
freebsd_386)
mkerrors="$mkerrors -f -m32"
mksyscall="./mksyscall.pl -l32"
- mksysnum="curl -s 'http://svn.freebsd.org/viewvc/base/head/sys/kern/syscalls.master?view=markup' | ./mksysnum_freebsd.pl"
+ mksysnum="curl -s 'http://svn.freebsd.org/base/head/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
mktypes="godefs -gsyscall -f-m32"
;;
freebsd_amd64)
mkerrors="$mkerrors -f -m64"
- mksysnum="curl -s 'http://svn.freebsd.org/viewvc/base/head/sys/kern/syscalls.master?view=markup' | ./mksysnum_freebsd.pl"
+ mksysnum="curl -s 'http://svn.freebsd.org/base/head/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
mktypes="godefs -gsyscall -f-m64"
;;
darwin_386)
diff --git a/src/pkg/syscall/mkerrors.sh b/src/pkg/syscall/mkerrors.sh
index 0bfd9af1d..3916174aa 100755
--- a/src/pkg/syscall/mkerrors.sh
+++ b/src/pkg/syscall/mkerrors.sh
@@ -21,17 +21,21 @@ includes_Linux='
#define _FILE_OFFSET_BITS 64
#define _GNU_SOURCE
-#include <sys/types.h>
+#include <bits/sockaddr.h>
#include <sys/epoll.h>
#include <sys/inotify.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/mount.h>
#include <sys/stat.h>
-#include <linux/ptrace.h>
-#include <linux/wait.h>
+#include <sys/types.h>
+#include <linux/if_addr.h>
#include <linux/if_tun.h>
+#include <linux/netlink.h>
#include <linux/reboot.h>
+#include <linux/rtnetlink.h>
+#include <linux/ptrace.h>
+#include <linux/wait.h>
#include <net/if.h>
#include <netpacket/packet.h>
'
@@ -132,6 +136,8 @@ done
$2 ~ /^(O|F|FD|NAME|S|PTRACE)_/ ||
$2 ~ /^LINUX_REBOOT_CMD_/ ||
$2 ~ /^LINUX_REBOOT_MAGIC[12]$/ ||
+ $2 !~ "NLA_TYPE_MASK" &&
+ $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|RTM)_/ ||
$2 ~ /^SIOC/ ||
$2 ~ /^(IFF|NET_RT|RTM|RTF|RTV|RTA|RTAX)_/ ||
$2 ~ /^BIOC/ ||
diff --git a/src/pkg/syscall/mksyscall_windows.pl b/src/pkg/syscall/mksyscall_windows.pl
index d92ac3d28..fb5a1272b 100755
--- a/src/pkg/syscall/mksyscall_windows.pl
+++ b/src/pkg/syscall/mksyscall_windows.pl
@@ -156,6 +156,7 @@ while(<>) {
$text .= "\tvar _p$n uint32\n";
$text .= "\tif $name {\n\t\t_p$n = 1\n\t} else {\n\t\t_p$n = 0\n\t}\n";
push @args, "uintptr(_p$n)";
+ $n++;
} else {
push @args, "uintptr($name)";
}
diff --git a/src/pkg/syscall/syscall_bsd.go b/src/pkg/syscall/syscall_bsd.go
index 9f1244f13..766a56db5 100644
--- a/src/pkg/syscall/syscall_bsd.go
+++ b/src/pkg/syscall/syscall_bsd.go
@@ -191,9 +191,10 @@ func (sa *SockaddrInet4) sockaddr() (uintptr, _Socklen, int) {
}
type SockaddrInet6 struct {
- Port int
- Addr [16]byte
- raw RawSockaddrInet6
+ Port int
+ ZoneId uint32
+ Addr [16]byte
+ raw RawSockaddrInet6
}
func (sa *SockaddrInet6) sockaddr() (uintptr, _Socklen, int) {
@@ -205,6 +206,7 @@ func (sa *SockaddrInet6) sockaddr() (uintptr, _Socklen, int) {
p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
p[0] = byte(sa.Port >> 8)
p[1] = byte(sa.Port)
+ sa.raw.Scope_id = sa.ZoneId
for i := 0; i < len(sa.Addr); i++ {
sa.raw.Addr[i] = sa.Addr[i]
}
@@ -297,6 +299,7 @@ func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, int) {
sa := new(SockaddrInet6)
p := (*[2]byte)(unsafe.Pointer(&pp.Port))
sa.Port = int(p[0])<<8 + int(p[1])
+ sa.ZoneId = pp.Scope_id
for i := 0; i < len(sa.Addr); i++ {
sa.Addr[i] = pp.Addr[i]
}
diff --git a/src/pkg/syscall/syscall_darwin.go b/src/pkg/syscall/syscall_darwin.go
index 30b57cf55..9e153b73d 100644
--- a/src/pkg/syscall/syscall_darwin.go
+++ b/src/pkg/syscall/syscall_darwin.go
@@ -56,6 +56,11 @@ func ParseDirent(buf []byte, max int, names []string) (consumed int, count int,
return origlen - len(buf), count, names
}
+// TODO
+func Sendfile(outfd int, infd int, offset *int64, count int) (written int, errno int) {
+ return -1, ENOSYS
+}
+
/*
* Wrapped
*/
diff --git a/src/pkg/syscall/syscall_freebsd.go b/src/pkg/syscall/syscall_freebsd.go
index 242503dd7..a38c8ecd3 100644
--- a/src/pkg/syscall/syscall_freebsd.go
+++ b/src/pkg/syscall/syscall_freebsd.go
@@ -56,6 +56,11 @@ func ParseDirent(buf []byte, max int, names []string) (consumed int, count int,
return origlen - len(buf), count, names
}
+// TODO
+func Sendfile(outfd int, infd int, offset *int64, count int) (written int, errno int) {
+ return -1, ENOSYS
+}
+
/*
* Exposed directly
*/
diff --git a/src/pkg/syscall/syscall_linux.go b/src/pkg/syscall/syscall_linux.go
index 4a3797c20..9e9037ea1 100644
--- a/src/pkg/syscall/syscall_linux.go
+++ b/src/pkg/syscall/syscall_linux.go
@@ -219,9 +219,10 @@ func (sa *SockaddrInet4) sockaddr() (uintptr, _Socklen, int) {
}
type SockaddrInet6 struct {
- Port int
- Addr [16]byte
- raw RawSockaddrInet6
+ Port int
+ ZoneId uint32
+ Addr [16]byte
+ raw RawSockaddrInet6
}
func (sa *SockaddrInet6) sockaddr() (uintptr, _Socklen, int) {
@@ -232,6 +233,7 @@ func (sa *SockaddrInet6) sockaddr() (uintptr, _Socklen, int) {
p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
p[0] = byte(sa.Port >> 8)
p[1] = byte(sa.Port)
+ sa.raw.Scope_id = sa.ZoneId
for i := 0; i < len(sa.Addr); i++ {
sa.raw.Addr[i] = sa.Addr[i]
}
@@ -290,8 +292,33 @@ func (sa *SockaddrLinklayer) sockaddr() (uintptr, _Socklen, int) {
return uintptr(unsafe.Pointer(&sa.raw)), SizeofSockaddrLinklayer, 0
}
+type SockaddrNetlink struct {
+ Family uint16
+ Pad uint16
+ Pid uint32
+ Groups uint32
+ raw RawSockaddrNetlink
+}
+
+func (sa *SockaddrNetlink) sockaddr() (uintptr, _Socklen, int) {
+ sa.raw.Family = AF_NETLINK
+ sa.raw.Pad = sa.Pad
+ sa.raw.Pid = sa.Pid
+ sa.raw.Groups = sa.Groups
+ return uintptr(unsafe.Pointer(&sa.raw)), SizeofSockaddrNetlink, 0
+}
+
func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, int) {
switch rsa.Addr.Family {
+ case AF_NETLINK:
+ pp := (*RawSockaddrNetlink)(unsafe.Pointer(rsa))
+ sa := new(SockaddrNetlink)
+ sa.Family = pp.Family
+ sa.Pad = pp.Pad
+ sa.Pid = pp.Pid
+ sa.Groups = pp.Groups
+ return sa, 0
+
case AF_PACKET:
pp := (*RawSockaddrLinklayer)(unsafe.Pointer(rsa))
sa := new(SockaddrLinklayer)
@@ -345,6 +372,7 @@ func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, int) {
sa := new(SockaddrInet6)
p := (*[2]byte)(unsafe.Pointer(&pp.Port))
sa.Port = int(p[0])<<8 + int(p[1])
+ sa.ZoneId = pp.Scope_id
for i := 0; i < len(sa.Addr); i++ {
sa.Addr[i] = pp.Addr[i]
}
@@ -489,7 +517,7 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from
oobn = int(msg.Controllen)
recvflags = int(msg.Flags)
// source address is only specified if the socket is unconnected
- if rsa.Addr.Family != 0 {
+ if rsa.Addr.Family != AF_UNSPEC {
from, errno = anyToSockaddr(&rsa)
}
return
@@ -932,7 +960,6 @@ func Munmap(b []byte) (errno int) {
// Semget
// Semop
// Semtimedop
-// Sendfile
// SetMempolicy
// SetRobustList
// SetThreadArea
diff --git a/src/pkg/syscall/syscall_linux_386.go b/src/pkg/syscall/syscall_linux_386.go
index 2b6bdebf8..5195179a2 100644
--- a/src/pkg/syscall/syscall_linux_386.go
+++ b/src/pkg/syscall/syscall_linux_386.go
@@ -41,6 +41,7 @@ func NsecToTimeval(nsec int64) (tv Timeval) {
//sys Lstat(path string, stat *Stat_t) (errno int) = SYS_LSTAT64
//sys Pread(fd int, p []byte, offset int64) (n int, errno int) = SYS_PREAD64
//sys Pwrite(fd int, p []byte, offset int64) (n int, errno int) = SYS_PWRITE64
+//sys Sendfile(outfd int, infd int, offset *int64, count int) (written int, errno int) = SYS_SENDFILE64
//sys Setfsgid(gid int) (errno int) = SYS_SETFSGID32
//sys Setfsuid(uid int) (errno int) = SYS_SETFSUID32
//sysnb Setgid(gid int) (errno int) = SYS_SETGID32
diff --git a/src/pkg/syscall/syscall_linux_amd64.go b/src/pkg/syscall/syscall_linux_amd64.go
index f2a4acfe9..db9524668 100644
--- a/src/pkg/syscall/syscall_linux_amd64.go
+++ b/src/pkg/syscall/syscall_linux_amd64.go
@@ -22,6 +22,7 @@ package syscall
//sys Pwrite(fd int, p []byte, offset int64) (n int, errno int) = SYS_PWRITE64
//sys Seek(fd int, offset int64, whence int) (off int64, errno int) = SYS_LSEEK
//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, errno int)
+//sys Sendfile(outfd int, infd int, offset *int64, count int) (written int, errno int)
//sys Setfsgid(gid int) (errno int)
//sys Setfsuid(uid int) (errno int)
//sysnb Setgid(gid int) (errno int)
diff --git a/src/pkg/syscall/syscall_linux_arm.go b/src/pkg/syscall/syscall_linux_arm.go
index 458745885..37845301f 100644
--- a/src/pkg/syscall/syscall_linux_arm.go
+++ b/src/pkg/syscall/syscall_linux_arm.go
@@ -92,6 +92,7 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, errno int)
//sys Lchown(path string, uid int, gid int) (errno int)
//sys Listen(s int, n int) (errno int)
//sys Lstat(path string, stat *Stat_t) (errno int) = SYS_LSTAT64
+//sys Sendfile(outfd int, infd int, offset *int64, count int) (written int, errno int) = SYS_SENDFILE64
//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, errno int) = SYS__NEWSELECT
//sys Setfsgid(gid int) (errno int)
//sys Setfsuid(uid int) (errno int)
diff --git a/src/pkg/syscall/syscall_plan9.go b/src/pkg/syscall/syscall_plan9.go
index 831cbddb2..730126f23 100644
--- a/src/pkg/syscall/syscall_plan9.go
+++ b/src/pkg/syscall/syscall_plan9.go
@@ -327,6 +327,11 @@ func Getgroups() (gids []int, err Error) {
return make([]int, 0), nil
}
+// TODO
+func Sendfile(outfd int, infd int, offset *int64, count int) (written int, errno int) {
+ return -1, ENOSYS
+}
+
//sys Dup(oldfd int, newfd int) (fd int, err Error)
//sys Open(path string, mode int) (fd int, err Error)
//sys Create(path string, mode int, perm uint32) (fd int, err Error)
diff --git a/src/pkg/syscall/syscall_windows.go b/src/pkg/syscall/syscall_windows.go
index 1fbb3ccbf..6ba031faf 100644
--- a/src/pkg/syscall/syscall_windows.go
+++ b/src/pkg/syscall/syscall_windows.go
@@ -102,6 +102,11 @@ func getSysProcAddr(m uint32, pname string) uintptr {
// Implemented in ../runtime/windows/syscall.cgo
func NewCallback(fn interface{}) uintptr
+// TODO
+func Sendfile(outfd int, infd int, offset *int64, count int) (written int, errno int) {
+ return -1, ENOSYS
+}
+
// windows api calls
//sys GetLastError() (lasterrno int)
@@ -161,6 +166,12 @@ func NewCallback(fn interface{}) uintptr
//sys SetHandleInformation(handle int32, mask uint32, flags uint32) (errno int)
//sys FlushFileBuffers(handle int32) (errno int)
//sys GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, errno int) = kernel32.GetFullPathNameW
+//sys CreateFileMapping(fhandle int32, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle int32, errno int) = kernel32.CreateFileMappingW
+//sys MapViewOfFile(handle int32, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, errno int)
+//sys UnmapViewOfFile(addr uintptr) (errno int)
+//sys FlushViewOfFile(addr uintptr, length uintptr) (errno int)
+//sys VirtualLock(addr uintptr, length uintptr) (errno int)
+//sys VirtualUnlock(addr uintptr, length uintptr) (errno int)
// syscall interface implementation for other packages
@@ -525,8 +536,9 @@ func (sa *SockaddrInet4) sockaddr() (uintptr, int32, int) {
}
type SockaddrInet6 struct {
- Port int
- Addr [16]byte
+ Port int
+ ZoneId uint32
+ Addr [16]byte
}
func (sa *SockaddrInet6) sockaddr() (uintptr, int32, int) {
diff --git a/src/pkg/syscall/types_linux.c b/src/pkg/syscall/types_linux.c
index ec94c84df..1eaa58255 100644
--- a/src/pkg/syscall/types_linux.c
+++ b/src/pkg/syscall/types_linux.c
@@ -38,6 +38,8 @@ Input to godefs. See also mkerrors.sh and mkall.sh
#include <sys/user.h>
#include <sys/utsname.h>
#include <sys/wait.h>
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
#include <time.h>
#include <unistd.h>
#include <ustat.h>
@@ -93,6 +95,7 @@ union sockaddr_all {
struct sockaddr_in6 s3;
struct sockaddr_un s4;
struct sockaddr_ll s5;
+ struct sockaddr_nl s6;
};
struct sockaddr_any {
@@ -104,6 +107,7 @@ typedef struct sockaddr_in $RawSockaddrInet4;
typedef struct sockaddr_in6 $RawSockaddrInet6;
typedef struct sockaddr_un $RawSockaddrUnix;
typedef struct sockaddr_ll $RawSockaddrLinklayer;
+typedef struct sockaddr_nl $RawSockaddrNetlink;
typedef struct sockaddr $RawSockaddr;
typedef struct sockaddr_any $RawSockaddrAny;
typedef socklen_t $_Socklen;
@@ -120,6 +124,7 @@ enum {
$SizeofSockaddrAny = sizeof(struct sockaddr_any),
$SizeofSockaddrUnix = sizeof(struct sockaddr_un),
$SizeofSockaddrLinklayer = sizeof(struct sockaddr_ll),
+ $SizeofSockaddrNetlink = sizeof(struct sockaddr_nl),
$SizeofLinger = sizeof(struct linger),
$SizeofIpMreq = sizeof(struct ip_mreq),
$SizeofMsghdr = sizeof(struct msghdr),
@@ -127,15 +132,66 @@ enum {
$SizeofUcred = sizeof(struct ucred),
};
+// Netlink routing and interface messages
+
+enum {
+ $IFA_UNSPEC = IFA_UNSPEC,
+ $IFA_ADDRESS = IFA_ADDRESS,
+ $IFA_LOCAL = IFA_LOCAL,
+ $IFA_LABEL = IFA_LABEL,
+ $IFA_BROADCAST = IFA_BROADCAST,
+ $IFA_ANYCAST = IFA_ANYCAST,
+ $IFA_CACHEINFO = IFA_CACHEINFO,
+ $IFA_MULTICAST = IFA_MULTICAST,
+ $IFLA_UNSPEC = IFLA_UNSPEC,
+ $IFLA_ADDRESS = IFLA_ADDRESS,
+ $IFLA_BROADCAST = IFLA_BROADCAST,
+ $IFLA_IFNAME = IFLA_IFNAME,
+ $IFLA_MTU = IFLA_MTU,
+ $IFLA_LINK = IFLA_LINK,
+ $IFLA_QDISC = IFLA_QDISC,
+ $IFLA_STATS = IFLA_STATS,
+ $IFLA_COST = IFLA_COST,
+ $IFLA_PRIORITY = IFLA_PRIORITY,
+ $IFLA_MASTER = IFLA_MASTER,
+ $IFLA_WIRELESS = IFLA_WIRELESS,
+ $IFLA_PROTINFO = IFLA_PROTINFO,
+ $IFLA_TXQLEN = IFLA_TXQLEN,
+ $IFLA_MAP = IFLA_MAP,
+ $IFLA_WEIGHT = IFLA_WEIGHT,
+ $IFLA_OPERSTATE = IFLA_OPERSTATE,
+ $IFLA_LINKMODE = IFLA_LINKMODE,
+ $IFLA_LINKINFO = IFLA_LINKINFO,
+ $IFLA_NET_NS_PID = IFLA_NET_NS_PID,
+ $IFLA_IFALIAS = IFLA_IFALIAS,
+ $IFLA_MAX = IFLA_MAX,
+ $SizeofNlMsghdr = sizeof(struct nlmsghdr),
+ $SizeofNlMsgerr = sizeof(struct nlmsgerr),
+ $SizeofRtGenmsg = sizeof(struct rtgenmsg),
+ $SizeofNlAttr = sizeof(struct nlattr),
+ $SizeofRtAttr = sizeof(struct rtattr),
+ $SizeofIfInfomsg = sizeof(struct ifinfomsg),
+ $SizeofIfAddrmsg = sizeof(struct ifaddrmsg),
+ $SizeofRtmsg = sizeof(struct rtmsg),
+};
+
+typedef struct nlmsghdr $NlMsghdr;
+typedef struct nlmsgerr $NlMsgerr;
+typedef struct rtgenmsg $RtGenmsg;
+typedef struct nlattr $NlAttr;
+typedef struct rtattr $RtAttr;
+typedef struct ifinfomsg $IfInfomsg;
+typedef struct ifaddrmsg $IfAddrmsg;
+typedef struct rtmsg $RtMsg;
// Inotify
+
typedef struct inotify_event $InotifyEvent;
enum {
$SizeofInotifyEvent = sizeof(struct inotify_event)
};
-
// Ptrace
// Register structures
diff --git a/src/pkg/syscall/zerrors_linux_386.go b/src/pkg/syscall/zerrors_linux_386.go
index 3fc7cc738..3fba3ae94 100644
--- a/src/pkg/syscall/zerrors_linux_386.go
+++ b/src/pkg/syscall/zerrors_linux_386.go
@@ -241,6 +241,16 @@ const (
F_ULOCK = 0
F_UNLCK = 0x2
F_WRLCK = 0x1
+ IFA_F_DADFAILED = 0x8
+ IFA_F_DEPRECATED = 0x20
+ IFA_F_HOMEADDRESS = 0x10
+ IFA_F_NODAD = 0x2
+ IFA_F_OPTIMISTIC = 0x4
+ IFA_F_PERMANENT = 0x80
+ IFA_F_SECONDARY = 0x1
+ IFA_F_TEMPORARY = 0x1
+ IFA_F_TENTATIVE = 0x40
+ IFA_MAX = 0x7
IFF_ALLMULTI = 0x200
IFF_AUTOMEDIA = 0x4000
IFF_BROADCAST = 0x2
@@ -494,6 +504,53 @@ const (
MS_SYNC = 0x4
MS_SYNCHRONOUS = 0x10
NAME_MAX = 0xff
+ NETLINK_ADD_MEMBERSHIP = 0x1
+ NETLINK_AUDIT = 0x9
+ NETLINK_BROADCAST_ERROR = 0x4
+ NETLINK_CONNECTOR = 0xb
+ NETLINK_DNRTMSG = 0xe
+ NETLINK_DROP_MEMBERSHIP = 0x2
+ NETLINK_ECRYPTFS = 0x13
+ NETLINK_FIB_LOOKUP = 0xa
+ NETLINK_FIREWALL = 0x3
+ NETLINK_GENERIC = 0x10
+ NETLINK_INET_DIAG = 0x4
+ NETLINK_IP6_FW = 0xd
+ NETLINK_ISCSI = 0x8
+ NETLINK_KOBJECT_UEVENT = 0xf
+ NETLINK_NETFILTER = 0xc
+ NETLINK_NFLOG = 0x5
+ NETLINK_NO_ENOBUFS = 0x5
+ NETLINK_PKTINFO = 0x3
+ NETLINK_ROUTE = 0
+ NETLINK_SCSITRANSPORT = 0x12
+ NETLINK_SELINUX = 0x7
+ NETLINK_UNUSED = 0x1
+ NETLINK_USERSOCK = 0x2
+ NETLINK_XFRM = 0x6
+ NLA_ALIGNTO = 0x4
+ NLA_F_NESTED = 0x8000
+ NLA_F_NET_BYTEORDER = 0x4000
+ NLA_HDRLEN = 0x4
+ NLMSG_ALIGNTO = 0x4
+ NLMSG_DONE = 0x3
+ NLMSG_ERROR = 0x2
+ NLMSG_HDRLEN = 0x10
+ NLMSG_MIN_TYPE = 0x10
+ NLMSG_NOOP = 0x1
+ NLMSG_OVERRUN = 0x4
+ NLM_F_ACK = 0x4
+ NLM_F_APPEND = 0x800
+ NLM_F_ATOMIC = 0x400
+ NLM_F_CREATE = 0x400
+ NLM_F_DUMP = 0x300
+ NLM_F_ECHO = 0x8
+ NLM_F_EXCL = 0x200
+ NLM_F_MATCH = 0x200
+ NLM_F_MULTI = 0x2
+ NLM_F_REPLACE = 0x100
+ NLM_F_REQUEST = 0x1
+ NLM_F_ROOT = 0x100
O_ACCMODE = 0x3
O_APPEND = 0x400
O_ASYNC = 0x2000
@@ -590,6 +647,75 @@ const (
PTRACE_SYSEMU = 0x1f
PTRACE_SYSEMU_SINGLESTEP = 0x20
PTRACE_TRACEME = 0
+ RTAX_ADVMSS = 0x8
+ RTAX_CWND = 0x7
+ RTAX_FEATURES = 0xc
+ RTAX_FEATURE_ALLFRAG = 0x8
+ RTAX_FEATURE_ECN = 0x1
+ RTAX_FEATURE_SACK = 0x2
+ RTAX_FEATURE_TIMESTAMP = 0x4
+ RTAX_HOPLIMIT = 0xa
+ RTAX_INITCWND = 0xb
+ RTAX_LOCK = 0x1
+ RTAX_MAX = 0xd
+ RTAX_MTU = 0x2
+ RTAX_REORDERING = 0x9
+ RTAX_RTO_MIN = 0xd
+ RTAX_RTT = 0x4
+ RTAX_RTTVAR = 0x5
+ RTAX_SSTHRESH = 0x6
+ RTAX_UNSPEC = 0
+ RTAX_WINDOW = 0x3
+ RTA_ALIGNTO = 0x4
+ RTA_MAX = 0xf
+ RTM_BASE = 0x10
+ RTM_DELACTION = 0x31
+ RTM_DELADDR = 0x15
+ RTM_DELADDRLABEL = 0x49
+ RTM_DELLINK = 0x11
+ RTM_DELNEIGH = 0x1d
+ RTM_DELQDISC = 0x25
+ RTM_DELROUTE = 0x19
+ RTM_DELRULE = 0x21
+ RTM_DELTCLASS = 0x29
+ RTM_DELTFILTER = 0x2d
+ RTM_F_CLONED = 0x200
+ RTM_F_EQUALIZE = 0x400
+ RTM_F_NOTIFY = 0x100
+ RTM_F_PREFIX = 0x800
+ RTM_GETACTION = 0x32
+ RTM_GETADDR = 0x16
+ RTM_GETADDRLABEL = 0x4a
+ RTM_GETANYCAST = 0x3e
+ RTM_GETDCB = 0x4e
+ RTM_GETLINK = 0x12
+ RTM_GETMULTICAST = 0x3a
+ RTM_GETNEIGH = 0x1e
+ RTM_GETNEIGHTBL = 0x42
+ RTM_GETQDISC = 0x26
+ RTM_GETROUTE = 0x1a
+ RTM_GETRULE = 0x22
+ RTM_GETTCLASS = 0x2a
+ RTM_GETTFILTER = 0x2e
+ RTM_MAX = 0x4f
+ RTM_NEWACTION = 0x30
+ RTM_NEWADDR = 0x14
+ RTM_NEWADDRLABEL = 0x48
+ RTM_NEWLINK = 0x10
+ RTM_NEWNDUSEROPT = 0x44
+ RTM_NEWNEIGH = 0x1c
+ RTM_NEWNEIGHTBL = 0x40
+ RTM_NEWPREFIX = 0x34
+ RTM_NEWQDISC = 0x24
+ RTM_NEWROUTE = 0x18
+ RTM_NEWRULE = 0x20
+ RTM_NEWTCLASS = 0x28
+ RTM_NEWTFILTER = 0x2c
+ RTM_NR_FAMILIES = 0x10
+ RTM_NR_MSGTYPES = 0x40
+ RTM_SETDCB = 0x4f
+ RTM_SETLINK = 0x13
+ RTM_SETNEIGHTBL = 0x43
SCM_CREDENTIALS = 0x2
SCM_RIGHTS = 0x1
SCM_TIMESTAMP = 0x1d
diff --git a/src/pkg/syscall/zerrors_linux_amd64.go b/src/pkg/syscall/zerrors_linux_amd64.go
index d5efdf55d..0e57b9333 100644
--- a/src/pkg/syscall/zerrors_linux_amd64.go
+++ b/src/pkg/syscall/zerrors_linux_amd64.go
@@ -241,6 +241,16 @@ const (
F_ULOCK = 0
F_UNLCK = 0x2
F_WRLCK = 0x1
+ IFA_F_DADFAILED = 0x8
+ IFA_F_DEPRECATED = 0x20
+ IFA_F_HOMEADDRESS = 0x10
+ IFA_F_NODAD = 0x2
+ IFA_F_OPTIMISTIC = 0x4
+ IFA_F_PERMANENT = 0x80
+ IFA_F_SECONDARY = 0x1
+ IFA_F_TEMPORARY = 0x1
+ IFA_F_TENTATIVE = 0x40
+ IFA_MAX = 0x7
IFF_ALLMULTI = 0x200
IFF_AUTOMEDIA = 0x4000
IFF_BROADCAST = 0x2
@@ -494,6 +504,53 @@ const (
MS_SYNC = 0x4
MS_SYNCHRONOUS = 0x10
NAME_MAX = 0xff
+ NETLINK_ADD_MEMBERSHIP = 0x1
+ NETLINK_AUDIT = 0x9
+ NETLINK_BROADCAST_ERROR = 0x4
+ NETLINK_CONNECTOR = 0xb
+ NETLINK_DNRTMSG = 0xe
+ NETLINK_DROP_MEMBERSHIP = 0x2
+ NETLINK_ECRYPTFS = 0x13
+ NETLINK_FIB_LOOKUP = 0xa
+ NETLINK_FIREWALL = 0x3
+ NETLINK_GENERIC = 0x10
+ NETLINK_INET_DIAG = 0x4
+ NETLINK_IP6_FW = 0xd
+ NETLINK_ISCSI = 0x8
+ NETLINK_KOBJECT_UEVENT = 0xf
+ NETLINK_NETFILTER = 0xc
+ NETLINK_NFLOG = 0x5
+ NETLINK_NO_ENOBUFS = 0x5
+ NETLINK_PKTINFO = 0x3
+ NETLINK_ROUTE = 0
+ NETLINK_SCSITRANSPORT = 0x12
+ NETLINK_SELINUX = 0x7
+ NETLINK_UNUSED = 0x1
+ NETLINK_USERSOCK = 0x2
+ NETLINK_XFRM = 0x6
+ NLA_ALIGNTO = 0x4
+ NLA_F_NESTED = 0x8000
+ NLA_F_NET_BYTEORDER = 0x4000
+ NLA_HDRLEN = 0x4
+ NLMSG_ALIGNTO = 0x4
+ NLMSG_DONE = 0x3
+ NLMSG_ERROR = 0x2
+ NLMSG_HDRLEN = 0x10
+ NLMSG_MIN_TYPE = 0x10
+ NLMSG_NOOP = 0x1
+ NLMSG_OVERRUN = 0x4
+ NLM_F_ACK = 0x4
+ NLM_F_APPEND = 0x800
+ NLM_F_ATOMIC = 0x400
+ NLM_F_CREATE = 0x400
+ NLM_F_DUMP = 0x300
+ NLM_F_ECHO = 0x8
+ NLM_F_EXCL = 0x200
+ NLM_F_MATCH = 0x200
+ NLM_F_MULTI = 0x2
+ NLM_F_REPLACE = 0x100
+ NLM_F_REQUEST = 0x1
+ NLM_F_ROOT = 0x100
O_ACCMODE = 0x3
O_APPEND = 0x400
O_ASYNC = 0x2000
@@ -591,6 +648,75 @@ const (
PTRACE_SYSEMU = 0x1f
PTRACE_SYSEMU_SINGLESTEP = 0x20
PTRACE_TRACEME = 0
+ RTAX_ADVMSS = 0x8
+ RTAX_CWND = 0x7
+ RTAX_FEATURES = 0xc
+ RTAX_FEATURE_ALLFRAG = 0x8
+ RTAX_FEATURE_ECN = 0x1
+ RTAX_FEATURE_SACK = 0x2
+ RTAX_FEATURE_TIMESTAMP = 0x4
+ RTAX_HOPLIMIT = 0xa
+ RTAX_INITCWND = 0xb
+ RTAX_LOCK = 0x1
+ RTAX_MAX = 0xd
+ RTAX_MTU = 0x2
+ RTAX_REORDERING = 0x9
+ RTAX_RTO_MIN = 0xd
+ RTAX_RTT = 0x4
+ RTAX_RTTVAR = 0x5
+ RTAX_SSTHRESH = 0x6
+ RTAX_UNSPEC = 0
+ RTAX_WINDOW = 0x3
+ RTA_ALIGNTO = 0x4
+ RTA_MAX = 0xf
+ RTM_BASE = 0x10
+ RTM_DELACTION = 0x31
+ RTM_DELADDR = 0x15
+ RTM_DELADDRLABEL = 0x49
+ RTM_DELLINK = 0x11
+ RTM_DELNEIGH = 0x1d
+ RTM_DELQDISC = 0x25
+ RTM_DELROUTE = 0x19
+ RTM_DELRULE = 0x21
+ RTM_DELTCLASS = 0x29
+ RTM_DELTFILTER = 0x2d
+ RTM_F_CLONED = 0x200
+ RTM_F_EQUALIZE = 0x400
+ RTM_F_NOTIFY = 0x100
+ RTM_F_PREFIX = 0x800
+ RTM_GETACTION = 0x32
+ RTM_GETADDR = 0x16
+ RTM_GETADDRLABEL = 0x4a
+ RTM_GETANYCAST = 0x3e
+ RTM_GETDCB = 0x4e
+ RTM_GETLINK = 0x12
+ RTM_GETMULTICAST = 0x3a
+ RTM_GETNEIGH = 0x1e
+ RTM_GETNEIGHTBL = 0x42
+ RTM_GETQDISC = 0x26
+ RTM_GETROUTE = 0x1a
+ RTM_GETRULE = 0x22
+ RTM_GETTCLASS = 0x2a
+ RTM_GETTFILTER = 0x2e
+ RTM_MAX = 0x4f
+ RTM_NEWACTION = 0x30
+ RTM_NEWADDR = 0x14
+ RTM_NEWADDRLABEL = 0x48
+ RTM_NEWLINK = 0x10
+ RTM_NEWNDUSEROPT = 0x44
+ RTM_NEWNEIGH = 0x1c
+ RTM_NEWNEIGHTBL = 0x40
+ RTM_NEWPREFIX = 0x34
+ RTM_NEWQDISC = 0x24
+ RTM_NEWROUTE = 0x18
+ RTM_NEWRULE = 0x20
+ RTM_NEWTCLASS = 0x28
+ RTM_NEWTFILTER = 0x2c
+ RTM_NR_FAMILIES = 0x10
+ RTM_NR_MSGTYPES = 0x40
+ RTM_SETDCB = 0x4f
+ RTM_SETLINK = 0x13
+ RTM_SETNEIGHTBL = 0x43
SCM_CREDENTIALS = 0x2
SCM_RIGHTS = 0x1
SCM_TIMESTAMP = 0x1d
diff --git a/src/pkg/syscall/zerrors_linux_arm.go b/src/pkg/syscall/zerrors_linux_arm.go
index 356d51ca5..5be8bfa5c 100644
--- a/src/pkg/syscall/zerrors_linux_arm.go
+++ b/src/pkg/syscall/zerrors_linux_arm.go
@@ -16,9 +16,11 @@ const (
AF_AX25 = 0x3
AF_BLUETOOTH = 0x1f
AF_BRIDGE = 0x7
+ AF_CAN = 0x1d
AF_DECnet = 0xc
AF_ECONET = 0x13
AF_FILE = 0x1
+ AF_IEEE802154 = 0x24
AF_INET = 0x2
AF_INET6 = 0xa
AF_IPX = 0x4
@@ -26,22 +28,35 @@ const (
AF_ISDN = 0x22
AF_IUCV = 0x20
AF_KEY = 0xf
+ AF_LLC = 0x1a
AF_LOCAL = 0x1
- AF_MAX = 0x23
+ AF_MAX = 0x25
AF_NETBEUI = 0xd
AF_NETLINK = 0x10
AF_NETROM = 0x6
AF_PACKET = 0x11
+ AF_PHONET = 0x23
AF_PPPOX = 0x18
+ AF_RDS = 0x15
AF_ROSE = 0xb
AF_ROUTE = 0x10
AF_RXRPC = 0x21
AF_SECURITY = 0xe
AF_SNA = 0x16
+ AF_TIPC = 0x1e
AF_UNIX = 0x1
AF_UNSPEC = 0
AF_WANPIPE = 0x19
AF_X25 = 0x9
+ DT_BLK = 0x6
+ DT_CHR = 0x2
+ DT_DIR = 0x4
+ DT_FIFO = 0x1
+ DT_LNK = 0xa
+ DT_REG = 0x8
+ DT_SOCK = 0xc
+ DT_UNKNOWN = 0
+ DT_WHT = 0xe
E2BIG = 0x7
EACCES = 0xd
EADDRINUSE = 0x62
@@ -206,6 +221,7 @@ const (
F_GETLK = 0xc
F_GETLK64 = 0xc
F_GETOWN = 0x9
+ F_GETOWN_EX = 0x10
F_GETSIG = 0xb
F_LOCK = 0x1
F_NOTIFY = 0x402
@@ -219,6 +235,7 @@ const (
F_SETLKW = 0xe
F_SETLKW64 = 0xe
F_SETOWN = 0x8
+ F_SETOWN_EX = 0xf
F_SETSIG = 0xa
F_SHLCK = 0x8
F_TEST = 0x3
@@ -226,6 +243,39 @@ const (
F_ULOCK = 0
F_UNLCK = 0x2
F_WRLCK = 0x1
+ IFA_F_DADFAILED = 0x8
+ IFA_F_DEPRECATED = 0x20
+ IFA_F_HOMEADDRESS = 0x10
+ IFA_F_NODAD = 0x2
+ IFA_F_OPTIMISTIC = 0x4
+ IFA_F_PERMANENT = 0x80
+ IFA_F_SECONDARY = 0x1
+ IFA_F_TEMPORARY = 0x1
+ IFA_F_TENTATIVE = 0x40
+ IFA_MAX = 0x7
+ IFF_ALLMULTI = 0x200
+ IFF_AUTOMEDIA = 0x4000
+ IFF_BROADCAST = 0x2
+ IFF_DEBUG = 0x4
+ IFF_DYNAMIC = 0x8000
+ IFF_LOOPBACK = 0x8
+ IFF_MASTER = 0x400
+ IFF_MULTICAST = 0x1000
+ IFF_NOARP = 0x80
+ IFF_NOTRAILERS = 0x20
+ IFF_NO_PI = 0x1000
+ IFF_ONE_QUEUE = 0x2000
+ IFF_POINTOPOINT = 0x10
+ IFF_PORTSEL = 0x2000
+ IFF_PROMISC = 0x100
+ IFF_RUNNING = 0x40
+ IFF_SLAVE = 0x800
+ IFF_TAP = 0x2
+ IFF_TUN = 0x1
+ IFF_TUN_EXCL = 0x8000
+ IFF_UP = 0x1
+ IFF_VNET_HDR = 0x4000
+ IFNAMSIZ = 0x10
IN_ACCESS = 0x1
IN_ALL_EVENTS = 0xfff
IN_ATTRIB = 0x4
@@ -389,9 +439,54 @@ const (
LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2
LINUX_REBOOT_MAGIC1 = 0xfee1dead
LINUX_REBOOT_MAGIC2 = 0x28121969
+ MADV_DOFORK = 0xb
+ MADV_DONTFORK = 0xa
+ MADV_DONTNEED = 0x4
+ MADV_HWPOISON = 0x64
+ MADV_MERGEABLE = 0xc
+ MADV_NORMAL = 0
+ MADV_RANDOM = 0x1
+ MADV_REMOVE = 0x9
+ MADV_SEQUENTIAL = 0x2
+ MADV_UNMERGEABLE = 0xd
+ MADV_WILLNEED = 0x3
+ MAP_ANON = 0x20
+ MAP_ANONYMOUS = 0x20
+ MAP_DENYWRITE = 0x800
+ MAP_EXECUTABLE = 0x1000
+ MAP_FILE = 0
+ MAP_FIXED = 0x10
+ MAP_GROWSDOWN = 0x100
+ MAP_LOCKED = 0x2000
+ MAP_NONBLOCK = 0x10000
+ MAP_NORESERVE = 0x4000
+ MAP_POPULATE = 0x8000
+ MAP_PRIVATE = 0x2
+ MAP_SHARED = 0x1
+ MAP_TYPE = 0xf
+ MCL_CURRENT = 0x1
+ MCL_FUTURE = 0x2
MNT_DETACH = 0x2
MNT_EXPIRE = 0x4
MNT_FORCE = 0x1
+ MSG_CMSG_CLOEXEC = 0x40000000
+ MSG_CONFIRM = 0x800
+ MSG_CTRUNC = 0x8
+ MSG_DONTROUTE = 0x4
+ MSG_DONTWAIT = 0x40
+ MSG_EOR = 0x80
+ MSG_ERRQUEUE = 0x2000
+ MSG_FIN = 0x200
+ MSG_MORE = 0x8000
+ MSG_NOSIGNAL = 0x4000
+ MSG_OOB = 0x1
+ MSG_PEEK = 0x2
+ MSG_PROXY = 0x10
+ MSG_RST = 0x1000
+ MSG_SYN = 0x400
+ MSG_TRUNC = 0x20
+ MSG_TRYHARD = 0x4
+ MSG_WAITALL = 0x100
MS_ASYNC = 0x1
MS_BIND = 0x1000
MS_INVALIDATE = 0x2
@@ -409,6 +504,53 @@ const (
MS_SYNC = 0x4
MS_SYNCHRONOUS = 0x10
NAME_MAX = 0xff
+ NETLINK_ADD_MEMBERSHIP = 0x1
+ NETLINK_AUDIT = 0x9
+ NETLINK_BROADCAST_ERROR = 0x4
+ NETLINK_CONNECTOR = 0xb
+ NETLINK_DNRTMSG = 0xe
+ NETLINK_DROP_MEMBERSHIP = 0x2
+ NETLINK_ECRYPTFS = 0x13
+ NETLINK_FIB_LOOKUP = 0xa
+ NETLINK_FIREWALL = 0x3
+ NETLINK_GENERIC = 0x10
+ NETLINK_INET_DIAG = 0x4
+ NETLINK_IP6_FW = 0xd
+ NETLINK_ISCSI = 0x8
+ NETLINK_KOBJECT_UEVENT = 0xf
+ NETLINK_NETFILTER = 0xc
+ NETLINK_NFLOG = 0x5
+ NETLINK_NO_ENOBUFS = 0x5
+ NETLINK_PKTINFO = 0x3
+ NETLINK_ROUTE = 0
+ NETLINK_SCSITRANSPORT = 0x12
+ NETLINK_SELINUX = 0x7
+ NETLINK_UNUSED = 0x1
+ NETLINK_USERSOCK = 0x2
+ NETLINK_XFRM = 0x6
+ NLA_ALIGNTO = 0x4
+ NLA_F_NESTED = 0x8000
+ NLA_F_NET_BYTEORDER = 0x4000
+ NLA_HDRLEN = 0x4
+ NLMSG_ALIGNTO = 0x4
+ NLMSG_DONE = 0x3
+ NLMSG_ERROR = 0x2
+ NLMSG_HDRLEN = 0x10
+ NLMSG_MIN_TYPE = 0x10
+ NLMSG_NOOP = 0x1
+ NLMSG_OVERRUN = 0x4
+ NLM_F_ACK = 0x4
+ NLM_F_APPEND = 0x800
+ NLM_F_ATOMIC = 0x400
+ NLM_F_CREATE = 0x400
+ NLM_F_DUMP = 0x300
+ NLM_F_ECHO = 0x8
+ NLM_F_EXCL = 0x200
+ NLM_F_MATCH = 0x200
+ NLM_F_MULTI = 0x2
+ NLM_F_REPLACE = 0x100
+ NLM_F_REQUEST = 0x1
+ NLM_F_ROOT = 0x100
O_ACCMODE = 0x3
O_APPEND = 0x400
O_ASYNC = 0x2000
@@ -431,6 +573,27 @@ const (
O_SYNC = 0x1000
O_TRUNC = 0x200
O_WRONLY = 0x1
+ PACKET_ADD_MEMBERSHIP = 0x1
+ PACKET_BROADCAST = 0x1
+ PACKET_DROP_MEMBERSHIP = 0x2
+ PACKET_FASTROUTE = 0x6
+ PACKET_HOST = 0
+ PACKET_LOOPBACK = 0x5
+ PACKET_MR_ALLMULTI = 0x2
+ PACKET_MR_MULTICAST = 0
+ PACKET_MR_PROMISC = 0x1
+ PACKET_MULTICAST = 0x2
+ PACKET_OTHERHOST = 0x3
+ PACKET_OUTGOING = 0x4
+ PACKET_RECV_OUTPUT = 0x3
+ PACKET_RX_RING = 0x5
+ PACKET_STATISTICS = 0x6
+ PROT_EXEC = 0x4
+ PROT_GROWSDOWN = 0x1000000
+ PROT_GROWSUP = 0x2000000
+ PROT_NONE = 0
+ PROT_READ = 0x1
+ PROT_WRITE = 0x2
PTRACE_ATTACH = 0x10
PTRACE_CONT = 0x7
PTRACE_DETACH = 0x11
@@ -475,6 +638,80 @@ const (
PTRACE_SINGLESTEP = 0x9
PTRACE_SYSCALL = 0x18
PTRACE_TRACEME = 0
+ RTAX_ADVMSS = 0x8
+ RTAX_CWND = 0x7
+ RTAX_FEATURES = 0xc
+ RTAX_FEATURE_ALLFRAG = 0x8
+ RTAX_FEATURE_ECN = 0x1
+ RTAX_FEATURE_SACK = 0x2
+ RTAX_FEATURE_TIMESTAMP = 0x4
+ RTAX_HOPLIMIT = 0xa
+ RTAX_INITCWND = 0xb
+ RTAX_LOCK = 0x1
+ RTAX_MAX = 0xd
+ RTAX_MTU = 0x2
+ RTAX_REORDERING = 0x9
+ RTAX_RTO_MIN = 0xd
+ RTAX_RTT = 0x4
+ RTAX_RTTVAR = 0x5
+ RTAX_SSTHRESH = 0x6
+ RTAX_UNSPEC = 0
+ RTAX_WINDOW = 0x3
+ RTA_ALIGNTO = 0x4
+ RTA_MAX = 0xf
+ RTM_BASE = 0x10
+ RTM_DELACTION = 0x31
+ RTM_DELADDR = 0x15
+ RTM_DELADDRLABEL = 0x49
+ RTM_DELLINK = 0x11
+ RTM_DELNEIGH = 0x1d
+ RTM_DELQDISC = 0x25
+ RTM_DELROUTE = 0x19
+ RTM_DELRULE = 0x21
+ RTM_DELTCLASS = 0x29
+ RTM_DELTFILTER = 0x2d
+ RTM_F_CLONED = 0x200
+ RTM_F_EQUALIZE = 0x400
+ RTM_F_NOTIFY = 0x100
+ RTM_F_PREFIX = 0x800
+ RTM_GETACTION = 0x32
+ RTM_GETADDR = 0x16
+ RTM_GETADDRLABEL = 0x4a
+ RTM_GETANYCAST = 0x3e
+ RTM_GETDCB = 0x4e
+ RTM_GETLINK = 0x12
+ RTM_GETMULTICAST = 0x3a
+ RTM_GETNEIGH = 0x1e
+ RTM_GETNEIGHTBL = 0x42
+ RTM_GETQDISC = 0x26
+ RTM_GETROUTE = 0x1a
+ RTM_GETRULE = 0x22
+ RTM_GETTCLASS = 0x2a
+ RTM_GETTFILTER = 0x2e
+ RTM_MAX = 0x4f
+ RTM_NEWACTION = 0x30
+ RTM_NEWADDR = 0x14
+ RTM_NEWADDRLABEL = 0x48
+ RTM_NEWLINK = 0x10
+ RTM_NEWNDUSEROPT = 0x44
+ RTM_NEWNEIGH = 0x1c
+ RTM_NEWNEIGHTBL = 0x40
+ RTM_NEWPREFIX = 0x34
+ RTM_NEWQDISC = 0x24
+ RTM_NEWROUTE = 0x18
+ RTM_NEWRULE = 0x20
+ RTM_NEWTCLASS = 0x28
+ RTM_NEWTFILTER = 0x2c
+ RTM_NR_FAMILIES = 0x10
+ RTM_NR_MSGTYPES = 0x40
+ RTM_SETDCB = 0x4f
+ RTM_SETLINK = 0x13
+ RTM_SETNEIGHTBL = 0x43
+ SCM_CREDENTIALS = 0x2
+ SCM_RIGHTS = 0x1
+ SCM_TIMESTAMP = 0x1d
+ SCM_TIMESTAMPING = 0x25
+ SCM_TIMESTAMPNS = 0x23
SHUT_RD = 0
SHUT_RDWR = 0x2
SHUT_WR = 0x1
@@ -513,6 +750,64 @@ const (
SIGWINCH = 0x1c
SIGXCPU = 0x18
SIGXFSZ = 0x19
+ SIOCADDDLCI = 0x8980
+ SIOCADDMULTI = 0x8931
+ SIOCADDRT = 0x890b
+ SIOCATMARK = 0x8905
+ SIOCDARP = 0x8953
+ SIOCDELDLCI = 0x8981
+ SIOCDELMULTI = 0x8932
+ SIOCDELRT = 0x890c
+ SIOCDEVPRIVATE = 0x89f0
+ SIOCDIFADDR = 0x8936
+ SIOCDRARP = 0x8960
+ SIOCGARP = 0x8954
+ SIOCGIFADDR = 0x8915
+ SIOCGIFBR = 0x8940
+ SIOCGIFBRDADDR = 0x8919
+ SIOCGIFCONF = 0x8912
+ SIOCGIFCOUNT = 0x8938
+ SIOCGIFDSTADDR = 0x8917
+ SIOCGIFENCAP = 0x8925
+ SIOCGIFFLAGS = 0x8913
+ SIOCGIFHWADDR = 0x8927
+ SIOCGIFINDEX = 0x8933
+ SIOCGIFMAP = 0x8970
+ SIOCGIFMEM = 0x891f
+ SIOCGIFMETRIC = 0x891d
+ SIOCGIFMTU = 0x8921
+ SIOCGIFNAME = 0x8910
+ SIOCGIFNETMASK = 0x891b
+ SIOCGIFPFLAGS = 0x8935
+ SIOCGIFSLAVE = 0x8929
+ SIOCGIFTXQLEN = 0x8942
+ SIOCGPGRP = 0x8904
+ SIOCGRARP = 0x8961
+ SIOCGSTAMP = 0x8906
+ SIOCGSTAMPNS = 0x8907
+ SIOCPROTOPRIVATE = 0x89e0
+ SIOCRTMSG = 0x890d
+ SIOCSARP = 0x8955
+ SIOCSIFADDR = 0x8916
+ SIOCSIFBR = 0x8941
+ SIOCSIFBRDADDR = 0x891a
+ SIOCSIFDSTADDR = 0x8918
+ SIOCSIFENCAP = 0x8926
+ SIOCSIFFLAGS = 0x8914
+ SIOCSIFHWADDR = 0x8924
+ SIOCSIFHWBROADCAST = 0x8937
+ SIOCSIFLINK = 0x8911
+ SIOCSIFMAP = 0x8971
+ SIOCSIFMEM = 0x8920
+ SIOCSIFMETRIC = 0x891e
+ SIOCSIFMTU = 0x8922
+ SIOCSIFNAME = 0x8923
+ SIOCSIFNETMASK = 0x891c
+ SIOCSIFPFLAGS = 0x8934
+ SIOCSIFSLAVE = 0x8930
+ SIOCSIFTXQLEN = 0x8943
+ SIOCSPGRP = 0x8902
+ SIOCSRARP = 0x8962
SOCK_CLOEXEC = 0x80000
SOCK_DCCP = 0x6
SOCK_DGRAM = 0x2
@@ -542,6 +837,7 @@ const (
SO_BSDCOMPAT = 0xe
SO_DEBUG = 0x1
SO_DETACH_FILTER = 0x1b
+ SO_DOMAIN = 0x27
SO_DONTROUTE = 0x5
SO_ERROR = 0x4
SO_KEEPALIVE = 0x9
@@ -555,6 +851,7 @@ const (
SO_PEERNAME = 0x1c
SO_PEERSEC = 0x1f
SO_PRIORITY = 0xc
+ SO_PROTOCOL = 0x26
SO_RCVBUF = 0x8
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
@@ -619,6 +916,19 @@ const (
TCP_QUICKACK = 0xc
TCP_SYNCNT = 0x7
TCP_WINDOW_CLAMP = 0xa
+ TUNGETFEATURES = 0x800454cf
+ TUNGETIFF = 0x800454d2
+ TUNGETSNDBUF = 0x800454d3
+ TUNSETDEBUG = 0x400454c9
+ TUNSETGROUP = 0x400454ce
+ TUNSETIFF = 0x400454ca
+ TUNSETLINK = 0x400454cd
+ TUNSETNOCSUM = 0x400454c8
+ TUNSETOFFLOAD = 0x400454d0
+ TUNSETOWNER = 0x400454cc
+ TUNSETPERSIST = 0x400454cb
+ TUNSETSNDBUF = 0x400454d4
+ TUNSETTXFILTER = 0x400454d1
WALL = 0x40000000
WCLONE = 0x80000000
WCONTINUED = 0x8
@@ -636,134 +946,134 @@ const (
// Error table
var errors = [...]string{
+ 1: "operation not permitted",
+ 2: "no such file or directory",
+ 3: "no such process",
+ 4: "interrupted system call",
+ 5: "input/output error",
+ 6: "no such device or address",
7: "argument list too long",
- 13: "permission denied",
- 98: "address already in use",
- 99: "cannot assign requested address",
- 68: "advertise error",
- 97: "address family not supported by protocol",
- 11: "resource temporarily unavailable",
- 114: "operation already in progress",
- 52: "invalid exchange",
+ 8: "exec format error",
9: "bad file descriptor",
- 77: "file descriptor in bad state",
- 74: "bad message",
- 53: "invalid request descriptor",
- 56: "invalid request code",
- 57: "invalid slot",
- 59: "bad font file format",
- 16: "device or resource busy",
- 125: "operation canceled",
10: "no child processes",
- 44: "channel number out of range",
- 70: "communication error on send",
- 103: "software caused connection abort",
- 111: "connection refused",
- 104: "connection reset by peer",
- 35: "resource deadlock avoided",
- 89: "destination address required",
- 33: "numerical argument out of domain",
- 73: "RFS specific error",
- 122: "disk quota exceeded",
- 17: "file exists",
+ 11: "resource temporarily unavailable",
+ 12: "cannot allocate memory",
+ 13: "permission denied",
14: "bad address",
+ 15: "block device required",
+ 16: "device or resource busy",
+ 17: "file exists",
+ 18: "invalid cross-device link",
+ 19: "no such device",
+ 20: "not a directory",
+ 21: "is a directory",
+ 22: "invalid argument",
+ 23: "too many open files in system",
+ 24: "too many open files",
+ 25: "inappropriate ioctl for device",
+ 26: "text file busy",
27: "file too large",
- 112: "host is down",
- 113: "no route to host",
+ 28: "no space left on device",
+ 29: "illegal seek",
+ 30: "read-only file system",
+ 31: "too many links",
+ 32: "broken pipe",
+ 33: "numerical argument out of domain",
+ 34: "numerical result out of range",
+ 35: "resource deadlock avoided",
+ 36: "file name too long",
+ 37: "no locks available",
+ 38: "function not implemented",
+ 39: "directory not empty",
+ 40: "too many levels of symbolic links",
+ 42: "no message of desired type",
43: "identifier removed",
- 84: "invalid or incomplete multibyte or wide character",
- 115: "operation now in progress",
- 4: "interrupted system call",
- 22: "invalid argument",
- 5: "input/output error",
- 106: "transport endpoint is already connected",
- 21: "is a directory",
- 120: "is a named type file",
- 127: "key has expired",
- 129: "key was rejected by service",
- 128: "key has been revoked",
- 51: "level 2 halted",
+ 44: "channel number out of range",
45: "level 2 not synchronized",
46: "level 3 halted",
47: "level 3 reset",
- 79: "can not access a needed shared library",
- 80: "accessing a corrupted shared library",
- 83: "cannot exec a shared library directly",
- 82: "attempting to link in too many shared libraries",
- 81: ".lib section in a.out corrupted",
48: "link number out of range",
- 40: "too many levels of symbolic links",
- 124: "wrong medium type",
- 24: "too many open files",
- 31: "too many links",
- 90: "message too long",
- 72: "multihop attempted",
- 36: "file name too long",
- 119: "no XENIX semaphores available",
- 100: "network is down",
- 102: "network dropped connection on reset",
- 101: "network is unreachable",
- 23: "too many open files in system",
- 55: "no anode",
- 105: "no buffer space available",
+ 49: "protocol driver not attached",
50: "no CSI structure available",
+ 51: "level 2 halted",
+ 52: "invalid exchange",
+ 53: "invalid request descriptor",
+ 54: "exchange full",
+ 55: "no anode",
+ 56: "invalid request code",
+ 57: "invalid slot",
+ 59: "bad font file format",
+ 60: "device not a stream",
61: "no data available",
- 19: "no such device",
- 2: "no such file or directory",
- 8: "exec format error",
- 126: "required key not available",
- 37: "no locks available",
- 67: "link has been severed",
- 123: "no medium found",
- 12: "cannot allocate memory",
- 42: "no message of desired type",
+ 62: "timer expired",
+ 63: "out of streams resources",
64: "machine is not on the network",
65: "package not installed",
- 92: "protocol not available",
- 28: "no space left on device",
- 63: "out of streams resources",
- 60: "device not a stream",
- 38: "function not implemented",
- 15: "block device required",
- 107: "transport endpoint is not connected",
- 20: "not a directory",
- 39: "directory not empty",
- 118: "not a XENIX named type file",
- 131: "state not recoverable",
- 88: "socket operation on non-socket",
- 95: "operation not supported",
- 25: "inappropriate ioctl for device",
- 76: "name not unique on network",
- 6: "no such device or address",
- 75: "value too large for defined data type",
- 130: "owner died",
- 1: "operation not permitted",
- 96: "protocol family not supported",
- 32: "broken pipe",
+ 66: "object is remote",
+ 67: "link has been severed",
+ 68: "advertise error",
+ 69: "srmount error",
+ 70: "communication error on send",
71: "protocol error",
- 93: "protocol not supported",
- 91: "protocol wrong type for socket",
- 34: "numerical result out of range",
+ 72: "multihop attempted",
+ 73: "RFS specific error",
+ 74: "bad message",
+ 75: "value too large for defined data type",
+ 76: "name not unique on network",
+ 77: "file descriptor in bad state",
78: "remote address changed",
- 66: "object is remote",
- 121: "remote I/O error",
+ 79: "can not access a needed shared library",
+ 80: "accessing a corrupted shared library",
+ 81: ".lib section in a.out corrupted",
+ 82: "attempting to link in too many shared libraries",
+ 83: "cannot exec a shared library directly",
+ 84: "invalid or incomplete multibyte or wide character",
85: "interrupted system call should be restarted",
- 132: "unknown error 132",
- 30: "read-only file system",
- 108: "cannot send after transport endpoint shutdown",
- 94: "socket type not supported",
- 29: "illegal seek",
- 3: "no such process",
- 69: "srmount error",
- 116: "stale NFS file handle",
86: "streams pipe error",
- 62: "timer expired",
- 110: "connection timed out",
+ 87: "too many users",
+ 88: "socket operation on non-socket",
+ 89: "destination address required",
+ 90: "message too long",
+ 91: "protocol wrong type for socket",
+ 92: "protocol not available",
+ 93: "protocol not supported",
+ 94: "socket type not supported",
+ 95: "operation not supported",
+ 96: "protocol family not supported",
+ 97: "address family not supported by protocol",
+ 98: "address already in use",
+ 99: "cannot assign requested address",
+ 100: "network is down",
+ 101: "network is unreachable",
+ 102: "network dropped connection on reset",
+ 103: "software caused connection abort",
+ 104: "connection reset by peer",
+ 105: "no buffer space available",
+ 106: "transport endpoint is already connected",
+ 107: "transport endpoint is not connected",
+ 108: "cannot send after transport endpoint shutdown",
109: "too many references: cannot splice",
- 26: "text file busy",
+ 110: "connection timed out",
+ 111: "connection refused",
+ 112: "host is down",
+ 113: "no route to host",
+ 114: "operation already in progress",
+ 115: "operation now in progress",
+ 116: "stale NFS file handle",
117: "structure needs cleaning",
- 49: "protocol driver not attached",
- 87: "too many users",
- 18: "invalid cross-device link",
- 54: "exchange full",
+ 118: "not a XENIX named type file",
+ 119: "no XENIX semaphores available",
+ 120: "is a named type file",
+ 121: "remote I/O error",
+ 122: "disk quota exceeded",
+ 123: "no medium found",
+ 124: "wrong medium type",
+ 125: "operation canceled",
+ 126: "required key not available",
+ 127: "key has expired",
+ 128: "key has been revoked",
+ 129: "key was rejected by service",
+ 130: "owner died",
+ 131: "state not recoverable",
+ 132: "unknown error 132",
}
diff --git a/src/pkg/syscall/zsyscall_linux_386.go b/src/pkg/syscall/zsyscall_linux_386.go
index 4f331aa22..8df29f136 100644
--- a/src/pkg/syscall/zsyscall_linux_386.go
+++ b/src/pkg/syscall/zsyscall_linux_386.go
@@ -971,6 +971,15 @@ func Pwrite(fd int, p []byte, offset int64) (n int, errno int) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Sendfile(outfd int, infd int, offset *int64, count int) (written int, errno int) {
+ r0, _, e1 := Syscall6(SYS_SENDFILE64, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0)
+ written = int(r0)
+ errno = int(e1)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Setfsgid(gid int) (errno int) {
_, _, e1 := Syscall(SYS_SETFSGID32, uintptr(gid), 0, 0)
errno = int(e1)
diff --git a/src/pkg/syscall/zsyscall_linux_amd64.go b/src/pkg/syscall/zsyscall_linux_amd64.go
index 19501dbfa..d6e287967 100644
--- a/src/pkg/syscall/zsyscall_linux_amd64.go
+++ b/src/pkg/syscall/zsyscall_linux_amd64.go
@@ -1005,6 +1005,15 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Sendfile(outfd int, infd int, offset *int64, count int) (written int, errno int) {
+ r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0)
+ written = int(r0)
+ errno = int(e1)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Setfsgid(gid int) (errno int) {
_, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0)
errno = int(e1)
diff --git a/src/pkg/syscall/zsyscall_linux_arm.go b/src/pkg/syscall/zsyscall_linux_arm.go
index db49b6482..af5f7c50c 100644
--- a/src/pkg/syscall/zsyscall_linux_arm.go
+++ b/src/pkg/syscall/zsyscall_linux_arm.go
@@ -1070,6 +1070,15 @@ func Lstat(path string, stat *Stat_t) (errno int) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Sendfile(outfd int, infd int, offset *int64, count int) (written int, errno int) {
+ r0, _, e1 := Syscall6(SYS_SENDFILE64, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0)
+ written = int(r0)
+ errno = int(e1)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, errno int) {
r0, _, e1 := Syscall6(SYS__NEWSELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
n = int(r0)
diff --git a/src/pkg/syscall/zsyscall_windows_386.go b/src/pkg/syscall/zsyscall_windows_386.go
index f4cfdeed8..ce36ab6c0 100644
--- a/src/pkg/syscall/zsyscall_windows_386.go
+++ b/src/pkg/syscall/zsyscall_windows_386.go
@@ -70,6 +70,12 @@ var (
procSetHandleInformation = getSysProcAddr(modkernel32, "SetHandleInformation")
procFlushFileBuffers = getSysProcAddr(modkernel32, "FlushFileBuffers")
procGetFullPathNameW = getSysProcAddr(modkernel32, "GetFullPathNameW")
+ procCreateFileMappingW = getSysProcAddr(modkernel32, "CreateFileMappingW")
+ procMapViewOfFile = getSysProcAddr(modkernel32, "MapViewOfFile")
+ procUnmapViewOfFile = getSysProcAddr(modkernel32, "UnmapViewOfFile")
+ procFlushViewOfFile = getSysProcAddr(modkernel32, "FlushViewOfFile")
+ procVirtualLock = getSysProcAddr(modkernel32, "VirtualLock")
+ procVirtualUnlock = getSysProcAddr(modkernel32, "VirtualUnlock")
procWSAStartup = getSysProcAddr(modwsock32, "WSAStartup")
procWSACleanup = getSysProcAddr(modwsock32, "WSACleanup")
procsocket = getSysProcAddr(modwsock32, "socket")
@@ -901,6 +907,92 @@ func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (
return
}
+func CreateFileMapping(fhandle int32, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle int32, errno int) {
+ r0, _, e1 := Syscall6(procCreateFileMappingW, 6, uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name)))
+ handle = int32(r0)
+ if handle == 0 {
+ if e1 != 0 {
+ errno = int(e1)
+ } else {
+ errno = EINVAL
+ }
+ } else {
+ errno = 0
+ }
+ return
+}
+
+func MapViewOfFile(handle int32, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, errno int) {
+ r0, _, e1 := Syscall6(procMapViewOfFile, 5, uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length), 0)
+ addr = uintptr(r0)
+ if addr == 0 {
+ if e1 != 0 {
+ errno = int(e1)
+ } else {
+ errno = EINVAL
+ }
+ } else {
+ errno = 0
+ }
+ return
+}
+
+func UnmapViewOfFile(addr uintptr) (errno int) {
+ r1, _, e1 := Syscall(procUnmapViewOfFile, 1, uintptr(addr), 0, 0)
+ if int(r1) == 0 {
+ if e1 != 0 {
+ errno = int(e1)
+ } else {
+ errno = EINVAL
+ }
+ } else {
+ errno = 0
+ }
+ return
+}
+
+func FlushViewOfFile(addr uintptr, length uintptr) (errno int) {
+ r1, _, e1 := Syscall(procFlushViewOfFile, 2, uintptr(addr), uintptr(length), 0)
+ if int(r1) == 0 {
+ if e1 != 0 {
+ errno = int(e1)
+ } else {
+ errno = EINVAL
+ }
+ } else {
+ errno = 0
+ }
+ return
+}
+
+func VirtualLock(addr uintptr, length uintptr) (errno int) {
+ r1, _, e1 := Syscall(procVirtualLock, 2, uintptr(addr), uintptr(length), 0)
+ if int(r1) == 0 {
+ if e1 != 0 {
+ errno = int(e1)
+ } else {
+ errno = EINVAL
+ }
+ } else {
+ errno = 0
+ }
+ return
+}
+
+func VirtualUnlock(addr uintptr, length uintptr) (errno int) {
+ r1, _, e1 := Syscall(procVirtualUnlock, 2, uintptr(addr), uintptr(length), 0)
+ if int(r1) == 0 {
+ if e1 != 0 {
+ errno = int(e1)
+ } else {
+ errno = EINVAL
+ }
+ } else {
+ errno = 0
+ }
+ return
+}
+
func WSAStartup(verreq uint32, data *WSAData) (sockerrno int) {
r0, _, _ := Syscall(procWSAStartup, 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0)
sockerrno = int(r0)
diff --git a/src/pkg/syscall/ztypes_darwin_386.go b/src/pkg/syscall/ztypes_darwin_386.go
index b3541778e..1f378427f 100644
--- a/src/pkg/syscall/ztypes_darwin_386.go
+++ b/src/pkg/syscall/ztypes_darwin_386.go
@@ -2,6 +2,10 @@
// MACHINE GENERATED - DO NOT EDIT.
+// Manual corrections: TODO: need to fix godefs (issue 1466)
+// change Msghdr field to Iov *Iovec (was uint32/64)
+// change BpfProgram field to Insns *BpfInsn (was uint32/64)
+
package syscall
// Constants
diff --git a/src/pkg/syscall/ztypes_darwin_amd64.go b/src/pkg/syscall/ztypes_darwin_amd64.go
index d61c8b8de..5fa27bdd7 100644
--- a/src/pkg/syscall/ztypes_darwin_amd64.go
+++ b/src/pkg/syscall/ztypes_darwin_amd64.go
@@ -2,6 +2,10 @@
// MACHINE GENERATED - DO NOT EDIT.
+// Manual corrections: TODO: need to fix godefs (issue 1466)
+// change Msghdr field to Iov *Iovec (was uint32/64)
+// change BpfProgram field to Insns *BpfInsn (was uint32/64)
+
package syscall
// Constants
diff --git a/src/pkg/syscall/ztypes_linux_386.go b/src/pkg/syscall/ztypes_linux_386.go
index d98d6af05..0409027de 100644
--- a/src/pkg/syscall/ztypes_linux_386.go
+++ b/src/pkg/syscall/ztypes_linux_386.go
@@ -17,11 +17,50 @@ const (
SizeofSockaddrAny = 0x70
SizeofSockaddrUnix = 0x6e
SizeofSockaddrLinklayer = 0x14
+ SizeofSockaddrNetlink = 0xc
SizeofLinger = 0x8
SizeofIpMreq = 0x8
SizeofMsghdr = 0x1c
SizeofCmsghdr = 0xc
SizeofUcred = 0xc
+ IFA_UNSPEC = 0
+ IFA_ADDRESS = 0x1
+ IFA_LOCAL = 0x2
+ IFA_LABEL = 0x3
+ IFA_BROADCAST = 0x4
+ IFA_ANYCAST = 0x5
+ IFA_CACHEINFO = 0x6
+ IFA_MULTICAST = 0x7
+ IFLA_UNSPEC = 0
+ IFLA_ADDRESS = 0x1
+ IFLA_BROADCAST = 0x2
+ IFLA_IFNAME = 0x3
+ IFLA_MTU = 0x4
+ IFLA_LINK = 0x5
+ IFLA_QDISC = 0x6
+ IFLA_STATS = 0x7
+ IFLA_COST = 0x8
+ IFLA_PRIORITY = 0x9
+ IFLA_MASTER = 0xa
+ IFLA_WIRELESS = 0xb
+ IFLA_PROTINFO = 0xc
+ IFLA_TXQLEN = 0xd
+ IFLA_MAP = 0xe
+ IFLA_WEIGHT = 0xf
+ IFLA_OPERSTATE = 0x10
+ IFLA_LINKMODE = 0x11
+ IFLA_LINKINFO = 0x12
+ IFLA_NET_NS_PID = 0x13
+ IFLA_IFALIAS = 0x14
+ IFLA_MAX = 0x14
+ SizeofNlMsghdr = 0x10
+ SizeofNlMsgerr = 0x14
+ SizeofRtGenmsg = 0x1
+ SizeofNlAttr = 0x4
+ SizeofRtAttr = 0x4
+ SizeofIfInfomsg = 0x10
+ SizeofIfAddrmsg = 0x8
+ SizeofRtmsg = 0xc
SizeofInotifyEvent = 0x10
)
@@ -193,6 +232,13 @@ type RawSockaddrLinklayer struct {
Addr [8]uint8
}
+type RawSockaddrNetlink struct {
+ Family uint16
+ Pad uint16
+ Pid uint32
+ Groups uint32
+}
+
type RawSockaddr struct {
Family uint16
Data [14]int8
@@ -242,6 +288,62 @@ type Ucred struct {
Gid uint32
}
+type NlMsghdr struct {
+ Len uint32
+ Type uint16
+ Flags uint16
+ Seq uint32
+ Pid uint32
+}
+
+type NlMsgerr struct {
+ Error int32
+ Msg NlMsghdr
+}
+
+type RtGenmsg struct {
+ Family uint8
+}
+
+type NlAttr struct {
+ Len uint16
+ Type uint16
+}
+
+type RtAttr struct {
+ Len uint16
+ Type uint16
+}
+
+type IfInfomsg struct {
+ Family uint8
+ X__ifi_pad uint8
+ Type uint16
+ Index int32
+ Flags uint32
+ Change uint32
+}
+
+type IfAddrmsg struct {
+ Family uint8
+ Prefixlen uint8
+ Flags uint8
+ Scope uint8
+ Index uint32
+}
+
+type RtMsg struct {
+ Family uint8
+ Dst_len uint8
+ Src_len uint8
+ Tos uint8
+ Table uint8
+ Protocol uint8
+ Scope uint8
+ Type uint8
+ Flags uint32
+}
+
type InotifyEvent struct {
Wd int32
Mask uint32
diff --git a/src/pkg/syscall/ztypes_linux_amd64.go b/src/pkg/syscall/ztypes_linux_amd64.go
index db5c32cae..1d375e0d4 100644
--- a/src/pkg/syscall/ztypes_linux_amd64.go
+++ b/src/pkg/syscall/ztypes_linux_amd64.go
@@ -17,11 +17,50 @@ const (
SizeofSockaddrAny = 0x70
SizeofSockaddrUnix = 0x6e
SizeofSockaddrLinklayer = 0x14
+ SizeofSockaddrNetlink = 0xc
SizeofLinger = 0x8
SizeofIpMreq = 0x8
SizeofMsghdr = 0x38
SizeofCmsghdr = 0x10
SizeofUcred = 0xc
+ IFA_UNSPEC = 0
+ IFA_ADDRESS = 0x1
+ IFA_LOCAL = 0x2
+ IFA_LABEL = 0x3
+ IFA_BROADCAST = 0x4
+ IFA_ANYCAST = 0x5
+ IFA_CACHEINFO = 0x6
+ IFA_MULTICAST = 0x7
+ IFLA_UNSPEC = 0
+ IFLA_ADDRESS = 0x1
+ IFLA_BROADCAST = 0x2
+ IFLA_IFNAME = 0x3
+ IFLA_MTU = 0x4
+ IFLA_LINK = 0x5
+ IFLA_QDISC = 0x6
+ IFLA_STATS = 0x7
+ IFLA_COST = 0x8
+ IFLA_PRIORITY = 0x9
+ IFLA_MASTER = 0xa
+ IFLA_WIRELESS = 0xb
+ IFLA_PROTINFO = 0xc
+ IFLA_TXQLEN = 0xd
+ IFLA_MAP = 0xe
+ IFLA_WEIGHT = 0xf
+ IFLA_OPERSTATE = 0x10
+ IFLA_LINKMODE = 0x11
+ IFLA_LINKINFO = 0x12
+ IFLA_NET_NS_PID = 0x13
+ IFLA_IFALIAS = 0x14
+ IFLA_MAX = 0x14
+ SizeofNlMsghdr = 0x10
+ SizeofNlMsgerr = 0x14
+ SizeofRtGenmsg = 0x1
+ SizeofNlAttr = 0x4
+ SizeofRtAttr = 0x4
+ SizeofIfInfomsg = 0x10
+ SizeofIfAddrmsg = 0x8
+ SizeofRtmsg = 0xc
SizeofInotifyEvent = 0x10
)
@@ -193,6 +232,13 @@ type RawSockaddrLinklayer struct {
Addr [8]uint8
}
+type RawSockaddrNetlink struct {
+ Family uint16
+ Pad uint16
+ Pid uint32
+ Groups uint32
+}
+
type RawSockaddr struct {
Family uint16
Data [14]int8
@@ -244,6 +290,62 @@ type Ucred struct {
Gid uint32
}
+type NlMsghdr struct {
+ Len uint32
+ Type uint16
+ Flags uint16
+ Seq uint32
+ Pid uint32
+}
+
+type NlMsgerr struct {
+ Error int32
+ Msg NlMsghdr
+}
+
+type RtGenmsg struct {
+ Family uint8
+}
+
+type NlAttr struct {
+ Len uint16
+ Type uint16
+}
+
+type RtAttr struct {
+ Len uint16
+ Type uint16
+}
+
+type IfInfomsg struct {
+ Family uint8
+ X__ifi_pad uint8
+ Type uint16
+ Index int32
+ Flags uint32
+ Change uint32
+}
+
+type IfAddrmsg struct {
+ Family uint8
+ Prefixlen uint8
+ Flags uint8
+ Scope uint8
+ Index uint32
+}
+
+type RtMsg struct {
+ Family uint8
+ Dst_len uint8
+ Src_len uint8
+ Tos uint8
+ Table uint8
+ Protocol uint8
+ Scope uint8
+ Type uint8
+ Flags uint32
+}
+
type InotifyEvent struct {
Wd int32
Mask uint32
diff --git a/src/pkg/syscall/ztypes_linux_arm.go b/src/pkg/syscall/ztypes_linux_arm.go
index 236155b20..5f2cde3cb 100644
--- a/src/pkg/syscall/ztypes_linux_arm.go
+++ b/src/pkg/syscall/ztypes_linux_arm.go
@@ -5,7 +5,7 @@
// Manual corrections: TODO(rsc): need to fix godefs
// remove duplicate PtraceRegs type
// change RawSockaddrUnix field to Path [108]int8 (was uint8)
-// add padding to EpollEvent
+// add padding to EpollEvent
package syscall
@@ -22,11 +22,50 @@ const (
SizeofSockaddrAny = 0x70
SizeofSockaddrUnix = 0x6e
SizeofSockaddrLinklayer = 0x14
+ SizeofSockaddrNetlink = 0xc
SizeofLinger = 0x8
SizeofIpMreq = 0x8
SizeofMsghdr = 0x1c
SizeofCmsghdr = 0xc
SizeofUcred = 0xc
+ IFA_UNSPEC = 0
+ IFA_ADDRESS = 0x1
+ IFA_LOCAL = 0x2
+ IFA_LABEL = 0x3
+ IFA_BROADCAST = 0x4
+ IFA_ANYCAST = 0x5
+ IFA_CACHEINFO = 0x6
+ IFA_MULTICAST = 0x7
+ IFLA_UNSPEC = 0
+ IFLA_ADDRESS = 0x1
+ IFLA_BROADCAST = 0x2
+ IFLA_IFNAME = 0x3
+ IFLA_MTU = 0x4
+ IFLA_LINK = 0x5
+ IFLA_QDISC = 0x6
+ IFLA_STATS = 0x7
+ IFLA_COST = 0x8
+ IFLA_PRIORITY = 0x9
+ IFLA_MASTER = 0xa
+ IFLA_WIRELESS = 0xb
+ IFLA_PROTINFO = 0xc
+ IFLA_TXQLEN = 0xd
+ IFLA_MAP = 0xe
+ IFLA_WEIGHT = 0xf
+ IFLA_OPERSTATE = 0x10
+ IFLA_LINKMODE = 0x11
+ IFLA_LINKINFO = 0x12
+ IFLA_NET_NS_PID = 0x13
+ IFLA_IFALIAS = 0x14
+ IFLA_MAX = 0x14
+ SizeofNlMsghdr = 0x10
+ SizeofNlMsgerr = 0x14
+ SizeofRtGenmsg = 0x1
+ SizeofNlAttr = 0x4
+ SizeofRtAttr = 0x4
+ SizeofIfInfomsg = 0x10
+ SizeofIfAddrmsg = 0x8
+ SizeofRtmsg = 0xc
SizeofInotifyEvent = 0x10
)
@@ -51,37 +90,37 @@ type Timeval struct {
}
type Timex struct {
- Modes uint32
- Offset int32
- Freq int32
- Maxerror int32
- Esterror int32
- Status int32
- Constant int32
- Precision int32
- Tolerance int32
- Time Timeval
- Tick int32
- Ppsfreq int32
- Jitter int32
- Shift int32
- Stabil int32
- Jitcnt int32
- Calcnt int32
- Errcnt int32
- Stbcnt int32
- Tai int32
- Pad0 int32
- Pad1 int32
- Pad2 int32
- Pad3 int32
- Pad4 int32
- Pad5 int32
- Pad6 int32
- Pad7 int32
- Pad8 int32
- Pad9 int32
- Pad10 int32
+ Modes uint32
+ Offset int32
+ Freq int32
+ Maxerror int32
+ Esterror int32
+ Status int32
+ Constant int32
+ Precision int32
+ Tolerance int32
+ Time Timeval
+ Tick int32
+ Ppsfreq int32
+ Jitter int32
+ Shift int32
+ Stabil int32
+ Jitcnt int32
+ Calcnt int32
+ Errcnt int32
+ Stbcnt int32
+ Tai int32
+ Pad_godefs_0 int32
+ Pad_godefs_1 int32
+ Pad_godefs_2 int32
+ Pad_godefs_3 int32
+ Pad_godefs_4 int32
+ Pad_godefs_5 int32
+ Pad_godefs_6 int32
+ Pad_godefs_7 int32
+ Pad_godefs_8 int32
+ Pad_godefs_9 int32
+ Pad_godefs_10 int32
}
type Time_t int32
@@ -125,49 +164,49 @@ type Rlimit struct {
type _Gid_t uint32
type Stat_t struct {
- Dev uint64
- X__pad1 uint16
- Pad0 [2]byte
- X__st_ino uint32
- Mode uint32
- Nlink uint32
- Uid uint32
- Gid uint32
- Rdev uint64
- X__pad2 uint16
- Pad1 [6]byte
- Size int64
- Blksize int32
- Pad2 [4]byte
- Blocks int64
- Atim Timespec
- Mtim Timespec
- Ctim Timespec
- Ino uint64
+ Dev uint64
+ X__pad1 uint16
+ Pad_godefs_0 [2]byte
+ X__st_ino uint32
+ Mode uint32
+ Nlink uint32
+ Uid uint32
+ Gid uint32
+ Rdev uint64
+ X__pad2 uint16
+ Pad_godefs_1 [6]byte
+ Size int64
+ Blksize int32
+ Pad_godefs_2 [4]byte
+ Blocks int64
+ Atim Timespec
+ Mtim Timespec
+ Ctim Timespec
+ Ino uint64
}
type Statfs_t struct {
- Type int32
- Bsize int32
- Blocks uint64
- Bfree uint64
- Bavail uint64
- Files uint64
- Ffree uint64
- Fsid [8]byte /* __fsid_t */
- Namelen int32
- Frsize int32
- Spare [5]int32
- Pad0 [4]byte
+ Type int32
+ Bsize int32
+ Blocks uint64
+ Bfree uint64
+ Bavail uint64
+ Files uint64
+ Ffree uint64
+ Fsid [8]byte /* __fsid_t */
+ Namelen int32
+ Frsize int32
+ Spare [5]int32
+ Pad_godefs_0 [4]byte
}
type Dirent struct {
- Ino uint64
- Off int64
- Reclen uint16
- Type uint8
- Name [256]uint8
- Pad0 [5]byte
+ Ino uint64
+ Off int64
+ Reclen uint16
+ Type uint8
+ Name [256]uint8
+ Pad_godefs_0 [5]byte
}
type RawSockaddrInet4 struct {
@@ -200,6 +239,13 @@ type RawSockaddrLinklayer struct {
Addr [8]uint8
}
+type RawSockaddrNetlink struct {
+ Family uint16
+ Pad uint16
+ Pid uint32
+ Groups uint32
+}
+
type RawSockaddr struct {
Family uint16
Data [14]uint8
@@ -249,6 +295,62 @@ type Ucred struct {
Gid uint32
}
+type NlMsghdr struct {
+ Len uint32
+ Type uint16
+ Flags uint16
+ Seq uint32
+ Pid uint32
+}
+
+type NlMsgerr struct {
+ Error int32
+ Msg NlMsghdr
+}
+
+type RtGenmsg struct {
+ Family uint8
+}
+
+type NlAttr struct {
+ Len uint16
+ Type uint16
+}
+
+type RtAttr struct {
+ Len uint16
+ Type uint16
+}
+
+type IfInfomsg struct {
+ Family uint8
+ X__ifi_pad uint8
+ Type uint16
+ Index int32
+ Flags uint32
+ Change uint32
+}
+
+type IfAddrmsg struct {
+ Family uint8
+ Prefixlen uint8
+ Flags uint8
+ Scope uint8
+ Index uint32
+}
+
+type RtMsg struct {
+ Family uint8
+ Dst_len uint8
+ Src_len uint8
+ Tos uint8
+ Table uint8
+ Protocol uint8
+ Scope uint8
+ Type uint8
+ Flags uint32
+}
+
type InotifyEvent struct {
Wd int32
Mask uint32
diff --git a/src/pkg/syscall/ztypes_windows_386.go b/src/pkg/syscall/ztypes_windows_386.go
index 3a50be14c..7b15ea404 100644
--- a/src/pkg/syscall/ztypes_windows_386.go
+++ b/src/pkg/syscall/ztypes_windows_386.go
@@ -119,6 +119,18 @@ const (
STANDARD_RIGHTS_READ = 0x00020000
PROCESS_QUERY_INFORMATION = 0x00000400
SYNCHRONIZE = 0x00100000
+
+ PAGE_READONLY = 0x02
+ PAGE_READWRITE = 0x04
+ PAGE_WRITECOPY = 0x08
+ PAGE_EXECUTE_READ = 0x20
+ PAGE_EXECUTE_READWRITE = 0x40
+ PAGE_EXECUTE_WRITECOPY = 0x80
+
+ FILE_MAP_COPY = 0x01
+ FILE_MAP_WRITE = 0x02
+ FILE_MAP_READ = 0x04
+ FILE_MAP_EXECUTE = 0x20
)
const (
@@ -186,7 +198,7 @@ type Overlapped struct {
InternalHigh uint32
Offset uint32
OffsetHigh uint32
- HEvent *byte
+ HEvent int32
}
type Filetime struct {
diff --git a/src/pkg/syslog/syslog_test.go b/src/pkg/syslog/syslog_test.go
index 4816ddf2a..5c0b3e0c4 100644
--- a/src/pkg/syslog/syslog_test.go
+++ b/src/pkg/syslog/syslog_test.go
@@ -35,7 +35,19 @@ func startServer(done chan<- string) {
go runSyslog(c, done)
}
+func skipNetTest(t *testing.T) bool {
+ if testing.Short() {
+ // Depends on syslog daemon running, and sometimes it's not.
+ t.Logf("skipping syslog test during -short")
+ return true
+ }
+ return false
+}
+
func TestNew(t *testing.T) {
+ if skipNetTest(t) {
+ return
+ }
s, err := New(LOG_INFO, "")
if err != nil {
t.Fatalf("New() failed: %s", err)
@@ -45,6 +57,9 @@ func TestNew(t *testing.T) {
}
func TestNewLogger(t *testing.T) {
+ if skipNetTest(t) {
+ return
+ }
f := NewLogger(LOG_INFO, 0)
if f == nil {
t.Error("NewLogger() failed")
@@ -52,9 +67,8 @@ func TestNewLogger(t *testing.T) {
}
func TestDial(t *testing.T) {
- if testing.Short() {
- // Depends on syslog daemon running, and sometimes it's not.
- t.Logf("skipping syslog test during -short")
+ if skipNetTest(t) {
+ return
}
l, err := Dial("", "", LOG_ERR, "syslog_test")
if err != nil {
diff --git a/src/pkg/template/template.go b/src/pkg/template/template.go
index 253207852..c00f72ac9 100644
--- a/src/pkg/template/template.go
+++ b/src/pkg/template/template.go
@@ -76,6 +76,10 @@
executed sequentially, with each formatter receiving the bytes
emitted by the one to its left.
+ As well as field names, one may use literals with Go syntax.
+ Integer, floating-point, and string literals are supported.
+ Raw strings may not span newlines.
+
The delimiter strings get their default value, "{" and "}", from
JSON-template. They may be set to any non-empty, space-free
string using the SetDelims method. Their value can be printed
@@ -91,6 +95,7 @@ import (
"io/ioutil"
"os"
"reflect"
+ "strconv"
"strings"
"unicode"
"utf8"
@@ -151,10 +156,13 @@ type literalElement struct {
// A variable invocation to be evaluated
type variableElement struct {
linenum int
- word []string // The fields in the invocation.
- fmts []string // Names of formatters to apply. len(fmts) > 0
+ args []interface{} // The fields and literals in the invocation.
+ fmts []string // Names of formatters to apply. len(fmts) > 0
}
+// A variableElement arg to be evaluated as a field name
+type fieldName string
+
// A .section block, possibly with a .or
type sectionElement struct {
linenum int // of .section itself
@@ -245,6 +253,31 @@ func equal(s []byte, n int, t []byte) bool {
return true
}
+// isQuote returns true if c is a string- or character-delimiting quote character.
+func isQuote(c byte) bool {
+ return c == '"' || c == '`' || c == '\''
+}
+
+// endQuote returns the end quote index for the quoted string that
+// starts at n, or -1 if no matching end quote is found before the end
+// of the line.
+func endQuote(s []byte, n int) int {
+ quote := s[n]
+ for n++; n < len(s); n++ {
+ switch s[n] {
+ case '\\':
+ if quote == '"' || quote == '\'' {
+ n++
+ }
+ case '\n':
+ return -1
+ case quote:
+ return n
+ }
+ }
+ return -1
+}
+
// nextItem returns the next item from the input buffer. If the returned
// item is empty, we are at EOF. The item will be either a
// delimited string or a non-empty string between delimited
@@ -282,6 +315,14 @@ func (t *Template) nextItem() []byte {
if t.buf[i] == '\n' {
break
}
+ if isQuote(t.buf[i]) {
+ i = endQuote(t.buf, i)
+ if i == -1 {
+ t.parseError("unmatched quote")
+ return nil
+ }
+ continue
+ }
if equal(t.buf, i, t.rdelim) {
i += len(t.rdelim)
right = i
@@ -333,23 +374,33 @@ func (t *Template) nextItem() []byte {
return item
}
-// Turn a byte array into a white-space-split array of strings.
+// Turn a byte array into a white-space-split array of strings,
+// taking into account quoted strings.
func words(buf []byte) []string {
s := make([]string, 0, 5)
- p := 0 // position in buf
- // one word per loop
- for i := 0; ; i++ {
- // skip white space
- for ; p < len(buf) && white(buf[p]); p++ {
- }
- // grab word
- start := p
- for ; p < len(buf) && !white(buf[p]); p++ {
+ for i := 0; i < len(buf); {
+ // One word per loop
+ for i < len(buf) && white(buf[i]) {
+ i++
}
- if start == p { // no text left
+ if i == len(buf) {
break
}
- s = append(s, string(buf[start:p]))
+ // Got a word
+ start := i
+ if isQuote(buf[i]) {
+ i = endQuote(buf, i)
+ if i < 0 {
+ i = len(buf)
+ } else {
+ i++
+ }
+ } else {
+ for i < len(buf) && !white(buf[i]) {
+ i++
+ }
+ }
+ s = append(s, string(buf[start:i]))
}
return s
}
@@ -381,11 +432,17 @@ func (t *Template) analyze(item []byte) (tok int, w []string) {
t.parseError("empty directive")
return
}
- if len(w) > 0 && w[0][0] != '.' {
+ first := w[0]
+ if first[0] != '.' {
tok = tokVariable
return
}
- switch w[0] {
+ if len(first) > 1 && first[1] >= '0' && first[1] <= '9' {
+ // Must be a float.
+ tok = tokVariable
+ return
+ }
+ switch first {
case ".meta-left", ".meta-right", ".space", ".tab":
tok = tokLiteral
return
@@ -447,6 +504,37 @@ func (t *Template) newVariable(words []string) *variableElement {
formatters = strings.Split(lastWord[bar+1:], "|", -1)
}
+ args := make([]interface{}, len(words))
+
+ // Build argument list, processing any literals
+ for i, word := range words {
+ var lerr os.Error
+ switch word[0] {
+ case '"', '`', '\'':
+ v, err := strconv.Unquote(word)
+ if err == nil && word[0] == '\'' {
+ args[i] = []int(v)[0]
+ } else {
+ args[i], lerr = v, err
+ }
+
+ case '.', '+', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ v, err := strconv.Btoi64(word, 0)
+ if err == nil {
+ args[i] = v
+ } else {
+ v, err := strconv.Atof64(word)
+ args[i], lerr = v, err
+ }
+
+ default:
+ args[i] = fieldName(word)
+ }
+ if lerr != nil {
+ t.parseError("invalid literal: %q: %s", word, lerr)
+ }
+ }
+
// We could remember the function address here and avoid the lookup later,
// but it's more dynamic to let the user change the map contents underfoot.
// We do require the name to be present, though.
@@ -457,7 +545,8 @@ func (t *Template) newVariable(words []string) *variableElement {
t.parseError("unknown formatter: %q", f)
}
}
- return &variableElement{t.linenum, words, formatters}
+
+ return &variableElement{t.linenum, args, formatters}
}
// Grab the next item. If it's simple, just append it to the template.
@@ -753,7 +842,7 @@ func (t *Template) varValue(name string, st *state) reflect.Value {
func (t *Template) format(wr io.Writer, fmt string, val []interface{}, v *variableElement, st *state) {
fn := t.formatter(fmt)
if fn == nil {
- t.execError(st, v.linenum, "missing formatter %s for variable %s", fmt, v.word[0])
+ t.execError(st, v.linenum, "missing formatter %s for variable", fmt)
}
fn(wr, fmt, val...)
}
@@ -761,12 +850,15 @@ func (t *Template) format(wr io.Writer, fmt string, val []interface{}, v *variab
// Evaluate a variable, looking up through the parent if necessary.
// If it has a formatter attached ({var|formatter}) run that too.
func (t *Template) writeVariable(v *variableElement, st *state) {
- // Turn the words of the invocation into values.
- val := make([]interface{}, len(v.word))
- for i, word := range v.word {
- val[i] = t.varValue(word, st).Interface()
+ // Resolve field names
+ val := make([]interface{}, len(v.args))
+ for i, arg := range v.args {
+ if name, ok := arg.(fieldName); ok {
+ val[i] = t.varValue(string(name), st).Interface()
+ } else {
+ val[i] = arg
+ }
}
-
for i, fmt := range v.fmts[:len(v.fmts)-1] {
b := &st.buf[i&1]
b.Reset()
diff --git a/src/pkg/template/template_test.go b/src/pkg/template/template_test.go
index d21a5397a..a5e6a4ecc 100644
--- a/src/pkg/template/template_test.go
+++ b/src/pkg/template/template_test.go
@@ -94,10 +94,15 @@ func multiword(w io.Writer, format string, value ...interface{}) {
}
}
+func printf(w io.Writer, format string, v ...interface{}) {
+ io.WriteString(w, fmt.Sprintf(v[0].(string), v[1:]...))
+}
+
var formatters = FormatterMap{
"uppercase": writer(uppercase),
"+1": writer(plus1),
"multiword": multiword,
+ "printf": printf,
}
var tests = []*Test{
@@ -138,6 +143,36 @@ var tests = []*Test{
out: "nil pointer: <nil>=77\n",
},
+ &Test{
+ in: `{"Strings" ":"} {""} {"\t\u0123 \x23\\"} {"\"}{\\"}`,
+
+ out: "Strings: \t\u0123 \x23\\ \"}{\\",
+ },
+
+ &Test{
+ in: "{`Raw strings` `:`} {``} {`\\t\\u0123 \\x23\\`} {`}{\\`}",
+
+ out: "Raw strings: \\t\\u0123 \\x23\\ }{\\",
+ },
+
+ &Test{
+ in: "Characters: {'a'} {'\\u0123'} {' '} {'}'} {'{'}",
+
+ out: "Characters: 97 291 32 125 123",
+ },
+
+ &Test{
+ in: "Integers: {1} {-2} {+42} {0777} {0x0a}",
+
+ out: "Integers: 1 -2 42 511 10",
+ },
+
+ &Test{
+ in: "Floats: {.5} {-.5} {1.1} {-2.2} {+42.1} {1e10} {1.2e-3} {1.2e3} {-1.2e3}",
+
+ out: "Floats: 0.5 -0.5 1.1 -2.2 42.1 1e+10 0.0012 1200 -1200",
+ },
+
// Method at top level
&Test{
in: "ptrmethod={PointerMethod}\n",
@@ -723,6 +758,10 @@ var formatterTests = []Test{
in: "{Integer|||||}", // empty string is a valid formatter
out: "77",
},
+ {
+ in: `{"%.02f 0x%02X" 1.1 10|printf}`,
+ out: "1.10 0x0A",
+ },
}
func TestFormatters(t *testing.T) {
diff --git a/src/pkg/unicode/Makefile b/src/pkg/unicode/Makefile
index 53f7229e7..26e6e501f 100644
--- a/src/pkg/unicode/Makefile
+++ b/src/pkg/unicode/Makefile
@@ -26,6 +26,7 @@ tables: maketables
# Build (but do not run) maketables during testing,
# just to make sure it still compiles.
test: maketables
+testshort: maketables
# Downloads from www.unicode.org, so not part
# of standard test scripts.
diff --git a/src/pkg/unicode/maketables.go b/src/pkg/unicode/maketables.go
index 0c367673e..33a826862 100644
--- a/src/pkg/unicode/maketables.go
+++ b/src/pkg/unicode/maketables.go
@@ -258,7 +258,7 @@ func loadChars() {
if *dataURL == "" {
flag.Set("data", *url+"UnicodeData.txt")
}
- resp, _, err := http.Get(*dataURL)
+ resp, err := http.Get(*dataURL)
if err != nil {
logger.Fatal(err)
}
@@ -549,7 +549,7 @@ func printScriptOrProperty(doProps bool) {
return
}
var err os.Error
- resp, _, err := http.Get(*url + file)
+ resp, err := http.Get(*url + file)
if err != nil {
logger.Fatal(err)
}
diff --git a/src/pkg/websocket/client.go b/src/pkg/websocket/client.go
index 78c8b7f57..3712c2d1b 100644
--- a/src/pkg/websocket/client.go
+++ b/src/pkg/websocket/client.go
@@ -235,7 +235,7 @@ func handshake(resourceName, host, origin, location, protocol string, br *bufio.
}
// Step 28-29, 32-40. read response from server.
- resp, err := http.ReadResponse(br, "GET")
+ resp, err := http.ReadResponse(br, &http.Request{Method: "GET"})
if err != nil {
return err
}
@@ -297,7 +297,7 @@ func draft75handshake(resourceName, host, origin, location, protocol string, br
}
bw.WriteString("\r\n")
bw.Flush()
- resp, err := http.ReadResponse(br, "GET")
+ resp, err := http.ReadResponse(br, &http.Request{Method: "GET"})
if err != nil {
return
}
diff --git a/src/pkg/websocket/websocket_test.go b/src/pkg/websocket/websocket_test.go
index 10f88dfd1..84788b416 100644
--- a/src/pkg/websocket/websocket_test.go
+++ b/src/pkg/websocket/websocket_test.go
@@ -150,7 +150,7 @@ func TestHTTP(t *testing.T) {
// If the client did not send a handshake that matches the protocol
// specification, the server should abort the WebSocket connection.
- _, _, err := http.Get(fmt.Sprintf("http://%s/echo", serverAddr))
+ _, err := http.Get(fmt.Sprintf("http://%s/echo", serverAddr))
if err == nil {
t.Error("Get: unexpected success")
return
@@ -169,7 +169,7 @@ func TestHTTP(t *testing.T) {
func TestHTTPDraft75(t *testing.T) {
once.Do(startServer)
- r, _, err := http.Get(fmt.Sprintf("http://%s/echoDraft75", serverAddr))
+ r, err := http.Get(fmt.Sprintf("http://%s/echoDraft75", serverAddr))
if err != nil {
t.Errorf("Get: error %#v", err)
return
diff --git a/src/pkg/xml/read.go b/src/pkg/xml/read.go
index 554b2a61b..e2b349c3f 100644
--- a/src/pkg/xml/read.go
+++ b/src/pkg/xml/read.go
@@ -220,13 +220,10 @@ func (p *Parser) unmarshal(val reflect.Value, start *StartElement) os.Error {
}
if pv := val; pv.Kind() == reflect.Ptr {
- if pv.Pointer() == 0 {
- zv := reflect.Zero(pv.Type().Elem())
- pv.Set(zv.Addr())
- val = zv
- } else {
- val = pv.Elem()
+ if pv.IsNil() {
+ pv.Set(reflect.New(pv.Type().Elem()))
}
+ val = pv.Elem()
}
var (
diff --git a/src/pkg/xml/xml_test.go b/src/pkg/xml/xml_test.go
index a99c1919e..4e51cd53a 100644
--- a/src/pkg/xml/xml_test.go
+++ b/src/pkg/xml/xml_test.go
@@ -329,46 +329,50 @@ func TestSyntax(t *testing.T) {
}
type allScalars struct {
- True1 bool
- True2 bool
- False1 bool
- False2 bool
- Int int
- Int8 int8
- Int16 int16
- Int32 int32
- Int64 int64
- Uint int
- Uint8 uint8
- Uint16 uint16
- Uint32 uint32
- Uint64 uint64
- Uintptr uintptr
- Float32 float32
- Float64 float64
- String string
+ True1 bool
+ True2 bool
+ False1 bool
+ False2 bool
+ Int int
+ Int8 int8
+ Int16 int16
+ Int32 int32
+ Int64 int64
+ Uint int
+ Uint8 uint8
+ Uint16 uint16
+ Uint32 uint32
+ Uint64 uint64
+ Uintptr uintptr
+ Float32 float32
+ Float64 float64
+ String string
+ PtrString *string
}
var all = allScalars{
- True1: true,
- True2: true,
- False1: false,
- False2: false,
- Int: 1,
- Int8: -2,
- Int16: 3,
- Int32: -4,
- Int64: 5,
- Uint: 6,
- Uint8: 7,
- Uint16: 8,
- Uint32: 9,
- Uint64: 10,
- Uintptr: 11,
- Float32: 13.0,
- Float64: 14.0,
- String: "15",
-}
+ True1: true,
+ True2: true,
+ False1: false,
+ False2: false,
+ Int: 1,
+ Int8: -2,
+ Int16: 3,
+ Int32: -4,
+ Int64: 5,
+ Uint: 6,
+ Uint8: 7,
+ Uint16: 8,
+ Uint32: 9,
+ Uint64: 10,
+ Uintptr: 11,
+ Float32: 13.0,
+ Float64: 14.0,
+ String: "15",
+ PtrString: &sixteen,
+}
+
+var sixteen = "16"
const testScalarsInput = `<allscalars>
<true1>true</true1>
@@ -390,6 +394,7 @@ const testScalarsInput = `<allscalars>
<float32>13.0</float32>
<float64>14.0</float64>
<string>15</string>
+ <ptrstring>16</ptrstring>
</allscalars>`
func TestAllScalars(t *testing.T) {
@@ -401,7 +406,7 @@ func TestAllScalars(t *testing.T) {
t.Fatal(err)
}
if !reflect.DeepEqual(a, all) {
- t.Errorf("expected %+v got %+v", all, a)
+ t.Errorf("have %+v want %+v", a, all)
}
}