summaryrefslogtreecommitdiff
path: root/src/pkg/reflect
diff options
context:
space:
mode:
Diffstat (limited to 'src/pkg/reflect')
-rw-r--r--src/pkg/reflect/all_test.go1279
-rw-r--r--src/pkg/reflect/asm_386.s13
-rw-r--r--src/pkg/reflect/asm_amd64.s13
-rw-r--r--src/pkg/reflect/asm_arm.s13
-rw-r--r--src/pkg/reflect/deepequal.go9
-rw-r--r--src/pkg/reflect/example_test.go52
-rw-r--r--src/pkg/reflect/export_test.go18
-rw-r--r--src/pkg/reflect/makefunc.go67
-rw-r--r--src/pkg/reflect/set_test.go8
-rw-r--r--src/pkg/reflect/type.go808
-rw-r--r--src/pkg/reflect/value.go716
11 files changed, 2661 insertions, 335 deletions
diff --git a/src/pkg/reflect/all_test.go b/src/pkg/reflect/all_test.go
index e33140563..6f006db18 100644
--- a/src/pkg/reflect/all_test.go
+++ b/src/pkg/reflect/all_test.go
@@ -7,12 +7,15 @@ package reflect_test
import (
"bytes"
"encoding/base64"
+ "flag"
"fmt"
"io"
+ "math/rand"
"os"
. "reflect"
- "runtime"
+ "sync"
"testing"
+ "time"
"unsafe"
)
@@ -1053,26 +1056,393 @@ func TestChan(t *testing.T) {
if l, m := cv.Len(), cv.Cap(); l != len(c) || m != cap(c) {
t.Errorf("Len/Cap = %d/%d want %d/%d", l, m, len(c), cap(c))
}
+}
+// caseInfo describes a single case in a select test.
+type caseInfo struct {
+ desc string
+ canSelect bool
+ recv Value
+ closed bool
+ helper func()
+ panic bool
}
+var allselect = flag.Bool("allselect", false, "exhaustive select test")
+
+func TestSelect(t *testing.T) {
+ selectWatch.once.Do(func() { go selectWatcher() })
+
+ var x exhaustive
+ nch := 0
+ newop := func(n int, cap int) (ch, val Value) {
+ nch++
+ if nch%101%2 == 1 {
+ c := make(chan int, cap)
+ ch = ValueOf(c)
+ val = ValueOf(n)
+ } else {
+ c := make(chan string, cap)
+ ch = ValueOf(c)
+ val = ValueOf(fmt.Sprint(n))
+ }
+ return
+ }
+
+ for n := 0; x.Next(); n++ {
+ if testing.Short() && n >= 1000 {
+ break
+ }
+ if n >= 100000 && !*allselect {
+ break
+ }
+ if n%100000 == 0 && testing.Verbose() {
+ println("TestSelect", n)
+ }
+ var cases []SelectCase
+ var info []caseInfo
+
+ // Ready send.
+ if x.Maybe() {
+ ch, val := newop(len(cases), 1)
+ cases = append(cases, SelectCase{
+ Dir: SelectSend,
+ Chan: ch,
+ Send: val,
+ })
+ info = append(info, caseInfo{desc: "ready send", canSelect: true})
+ }
+
+ // Ready recv.
+ if x.Maybe() {
+ ch, val := newop(len(cases), 1)
+ ch.Send(val)
+ cases = append(cases, SelectCase{
+ Dir: SelectRecv,
+ Chan: ch,
+ })
+ info = append(info, caseInfo{desc: "ready recv", canSelect: true, recv: val})
+ }
+
+ // Blocking send.
+ if x.Maybe() {
+ ch, val := newop(len(cases), 0)
+ cases = append(cases, SelectCase{
+ Dir: SelectSend,
+ Chan: ch,
+ Send: val,
+ })
+ // Let it execute?
+ if x.Maybe() {
+ f := func() { ch.Recv() }
+ info = append(info, caseInfo{desc: "blocking send", helper: f})
+ } else {
+ info = append(info, caseInfo{desc: "blocking send"})
+ }
+ }
+
+ // Blocking recv.
+ if x.Maybe() {
+ ch, val := newop(len(cases), 0)
+ cases = append(cases, SelectCase{
+ Dir: SelectRecv,
+ Chan: ch,
+ })
+ // Let it execute?
+ if x.Maybe() {
+ f := func() { ch.Send(val) }
+ info = append(info, caseInfo{desc: "blocking recv", recv: val, helper: f})
+ } else {
+ info = append(info, caseInfo{desc: "blocking recv"})
+ }
+ }
+
+ // Zero Chan send.
+ if x.Maybe() {
+ // Maybe include value to send.
+ var val Value
+ if x.Maybe() {
+ val = ValueOf(100)
+ }
+ cases = append(cases, SelectCase{
+ Dir: SelectSend,
+ Send: val,
+ })
+ info = append(info, caseInfo{desc: "zero Chan send"})
+ }
+
+ // Zero Chan receive.
+ if x.Maybe() {
+ cases = append(cases, SelectCase{
+ Dir: SelectRecv,
+ })
+ info = append(info, caseInfo{desc: "zero Chan recv"})
+ }
+
+ // nil Chan send.
+ if x.Maybe() {
+ cases = append(cases, SelectCase{
+ Dir: SelectSend,
+ Chan: ValueOf((chan int)(nil)),
+ Send: ValueOf(101),
+ })
+ info = append(info, caseInfo{desc: "nil Chan send"})
+ }
+
+ // nil Chan recv.
+ if x.Maybe() {
+ cases = append(cases, SelectCase{
+ Dir: SelectRecv,
+ Chan: ValueOf((chan int)(nil)),
+ })
+ info = append(info, caseInfo{desc: "nil Chan recv"})
+ }
+
+ // closed Chan send.
+ if x.Maybe() {
+ ch := make(chan int)
+ close(ch)
+ cases = append(cases, SelectCase{
+ Dir: SelectSend,
+ Chan: ValueOf(ch),
+ Send: ValueOf(101),
+ })
+ info = append(info, caseInfo{desc: "closed Chan send", canSelect: true, panic: true})
+ }
+
+ // closed Chan recv.
+ if x.Maybe() {
+ ch, val := newop(len(cases), 0)
+ ch.Close()
+ val = Zero(val.Type())
+ cases = append(cases, SelectCase{
+ Dir: SelectRecv,
+ Chan: ch,
+ })
+ info = append(info, caseInfo{desc: "closed Chan recv", canSelect: true, closed: true, recv: val})
+ }
+
+ var helper func() // goroutine to help the select complete
+
+ // Add default? Must be last case here, but will permute.
+ // Add the default if the select would otherwise
+ // block forever, and maybe add it anyway.
+ numCanSelect := 0
+ canProceed := false
+ canBlock := true
+ canPanic := false
+ helpers := []int{}
+ for i, c := range info {
+ if c.canSelect {
+ canProceed = true
+ canBlock = false
+ numCanSelect++
+ if c.panic {
+ canPanic = true
+ }
+ } else if c.helper != nil {
+ canProceed = true
+ helpers = append(helpers, i)
+ }
+ }
+ if !canProceed || x.Maybe() {
+ cases = append(cases, SelectCase{
+ Dir: SelectDefault,
+ })
+ info = append(info, caseInfo{desc: "default", canSelect: canBlock})
+ numCanSelect++
+ } else if canBlock {
+ // Select needs to communicate with another goroutine.
+ cas := &info[helpers[x.Choose(len(helpers))]]
+ helper = cas.helper
+ cas.canSelect = true
+ numCanSelect++
+ }
+
+ // Permute cases and case info.
+ // Doing too much here makes the exhaustive loop
+ // too exhausting, so just do two swaps.
+ for loop := 0; loop < 2; loop++ {
+ i := x.Choose(len(cases))
+ j := x.Choose(len(cases))
+ cases[i], cases[j] = cases[j], cases[i]
+ info[i], info[j] = info[j], info[i]
+ }
+
+ if helper != nil {
+ // We wait before kicking off a goroutine to satisfy a blocked select.
+ // The pause needs to be big enough to let the select block before
+ // we run the helper, but if we lose that race once in a while it's okay: the
+ // select will just proceed immediately. Not a big deal.
+ // For short tests we can grow [sic] the timeout a bit without fear of taking too long
+ pause := 10 * time.Microsecond
+ if testing.Short() {
+ pause = 100 * time.Microsecond
+ }
+ time.AfterFunc(pause, helper)
+ }
+
+ // Run select.
+ i, recv, recvOK, panicErr := runSelect(cases, info)
+ if panicErr != nil && !canPanic {
+ t.Fatalf("%s\npanicked unexpectedly: %v", fmtSelect(info), panicErr)
+ }
+ if panicErr == nil && canPanic && numCanSelect == 1 {
+ t.Fatalf("%s\nselected #%d incorrectly (should panic)", fmtSelect(info), i)
+ }
+ if panicErr != nil {
+ continue
+ }
+
+ cas := info[i]
+ if !cas.canSelect {
+ recvStr := ""
+ if recv.IsValid() {
+ recvStr = fmt.Sprintf(", received %v, %v", recv.Interface(), recvOK)
+ }
+ t.Fatalf("%s\nselected #%d incorrectly%s", fmtSelect(info), i, recvStr)
+ continue
+ }
+ if cas.panic {
+ t.Fatalf("%s\nselected #%d incorrectly (case should panic)", fmtSelect(info), i)
+ continue
+ }
+
+ if cases[i].Dir == SelectRecv {
+ if !recv.IsValid() {
+ t.Fatalf("%s\nselected #%d but got %v, %v, want %v, %v", fmtSelect(info), i, recv, recvOK, cas.recv.Interface(), !cas.closed)
+ }
+ if !cas.recv.IsValid() {
+ t.Fatalf("%s\nselected #%d but internal error: missing recv value", fmtSelect(info), i)
+ }
+ if recv.Interface() != cas.recv.Interface() || recvOK != !cas.closed {
+ if recv.Interface() == cas.recv.Interface() && recvOK == !cas.closed {
+ t.Fatalf("%s\nselected #%d, got %#v, %v, and DeepEqual is broken on %T", fmtSelect(info), i, recv.Interface(), recvOK, recv.Interface())
+ }
+ t.Fatalf("%s\nselected #%d but got %#v, %v, want %#v, %v", fmtSelect(info), i, recv.Interface(), recvOK, cas.recv.Interface(), !cas.closed)
+ }
+ } else {
+ if recv.IsValid() || recvOK {
+ t.Fatalf("%s\nselected #%d but got %v, %v, want %v, %v", fmtSelect(info), i, recv, recvOK, Value{}, false)
+ }
+ }
+ }
+}
+
+// selectWatch and the selectWatcher are a watchdog mechanism for running Select.
+// If the selectWatcher notices that the select has been blocked for >1 second, it prints
+// an error describing the select and panics the entire test binary.
+var selectWatch struct {
+ sync.Mutex
+ once sync.Once
+ now time.Time
+ info []caseInfo
+}
+
+func selectWatcher() {
+ for {
+ time.Sleep(1 * time.Second)
+ selectWatch.Lock()
+ if selectWatch.info != nil && time.Since(selectWatch.now) > 1*time.Second {
+ fmt.Fprintf(os.Stderr, "TestSelect:\n%s blocked indefinitely\n", fmtSelect(selectWatch.info))
+ panic("select stuck")
+ }
+ selectWatch.Unlock()
+ }
+}
+
+// runSelect runs a single select test.
+// It returns the values returned by Select but also returns
+// a panic value if the Select panics.
+func runSelect(cases []SelectCase, info []caseInfo) (chosen int, recv Value, recvOK bool, panicErr interface{}) {
+ defer func() {
+ panicErr = recover()
+
+ selectWatch.Lock()
+ selectWatch.info = nil
+ selectWatch.Unlock()
+ }()
+
+ selectWatch.Lock()
+ selectWatch.now = time.Now()
+ selectWatch.info = info
+ selectWatch.Unlock()
+
+ chosen, recv, recvOK = Select(cases)
+ return
+}
+
+// fmtSelect formats the information about a single select test.
+func fmtSelect(info []caseInfo) string {
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "\nselect {\n")
+ for i, cas := range info {
+ fmt.Fprintf(&buf, "%d: %s", i, cas.desc)
+ if cas.recv.IsValid() {
+ fmt.Fprintf(&buf, " val=%#v", cas.recv.Interface())
+ }
+ if cas.canSelect {
+ fmt.Fprintf(&buf, " canselect")
+ }
+ if cas.panic {
+ fmt.Fprintf(&buf, " panic")
+ }
+ fmt.Fprintf(&buf, "\n")
+ }
+ fmt.Fprintf(&buf, "}")
+ return buf.String()
+}
+
+type two [2]uintptr
+
// Difficult test for function call because of
// implicit padding between arguments.
-func dummy(b byte, c int, d byte) (i byte, j int, k byte) {
- return b, c, d
+func dummy(b byte, c int, d byte, e two, f byte, g float32, h byte) (i byte, j int, k byte, l two, m byte, n float32, o byte) {
+ return b, c, d, e, f, g, h
}
func TestFunc(t *testing.T) {
- ret := ValueOf(dummy).Call([]Value{ValueOf(byte(10)), ValueOf(20), ValueOf(byte(30))})
- if len(ret) != 3 {
- t.Fatalf("Call returned %d values, want 3", len(ret))
+ ret := ValueOf(dummy).Call([]Value{
+ ValueOf(byte(10)),
+ ValueOf(20),
+ ValueOf(byte(30)),
+ ValueOf(two{40, 50}),
+ ValueOf(byte(60)),
+ ValueOf(float32(70)),
+ ValueOf(byte(80)),
+ })
+ if len(ret) != 7 {
+ t.Fatalf("Call returned %d values, want 7", len(ret))
}
i := byte(ret[0].Uint())
j := int(ret[1].Int())
k := byte(ret[2].Uint())
- if i != 10 || j != 20 || k != 30 {
- t.Errorf("Call returned %d, %d, %d; want 10, 20, 30", i, j, k)
+ l := ret[3].Interface().(two)
+ m := byte(ret[4].Uint())
+ n := float32(ret[5].Float())
+ o := byte(ret[6].Uint())
+
+ if i != 10 || j != 20 || k != 30 || l != (two{40, 50}) || m != 60 || n != 70 || o != 80 {
+ t.Errorf("Call returned %d, %d, %d, %v, %d, %g, %d; want 10, 20, 30, [40, 50], 60, 70, 80", i, j, k, l, m, n, o)
+ }
+}
+
+func TestMakeFunc(t *testing.T) {
+ f := dummy
+ fv := MakeFunc(TypeOf(f), func(in []Value) []Value { return in })
+ ValueOf(&f).Elem().Set(fv)
+
+ // Call g with small arguments so that there is
+ // something predictable (and different from the
+ // correct results) in those positions on the stack.
+ g := dummy
+ g(1, 2, 3, two{4, 5}, 6, 7, 8)
+
+ // Call constructed function f.
+ i, j, k, l, m, n, o := f(10, 20, 30, two{40, 50}, 60, 70, 80)
+ if i != 10 || j != 20 || k != 30 || l != (two{40, 50}) || m != 60 || n != 70 || o != 80 {
+ t.Errorf("Call returned %d, %d, %d, %v, %d, %g, %d; want 10, 20, 30, [40, 50], 60, 70, 80", i, j, k, l, m, n, o)
}
}
@@ -1123,7 +1493,7 @@ func TestMethod(t *testing.T) {
}
// Curried method of value.
- tfunc := TypeOf(func(int) int(nil))
+ tfunc := TypeOf((func(int) int)(nil))
v := ValueOf(p).Method(1)
if tt := v.Type(); tt != tfunc {
t.Errorf("Value Method Type is %s; want %s", tt, tfunc)
@@ -1225,7 +1595,7 @@ func TestAnonymousFields(t *testing.T) {
var t1 T1
type1 := TypeOf(t1)
if field, ok = type1.FieldByName("int"); !ok {
- t.Error("no field 'int'")
+ t.Fatal("no field 'int'")
}
if field.Index[0] != 1 {
t.Error("field index should be 1; is", field.Index)
@@ -1282,6 +1652,61 @@ type S4 struct {
A int
}
+// The X in S6 and S7 annihilate, but they also block the X in S8.S9.
+type S5 struct {
+ S6
+ S7
+ S8
+}
+
+type S6 struct {
+ X int
+}
+
+type S7 S6
+
+type S8 struct {
+ S9
+}
+
+type S9 struct {
+ X int
+ Y int
+}
+
+// The X in S11.S6 and S12.S6 annihilate, but they also block the X in S13.S8.S9.
+type S10 struct {
+ S11
+ S12
+ S13
+}
+
+type S11 struct {
+ S6
+}
+
+type S12 struct {
+ S6
+}
+
+type S13 struct {
+ S8
+}
+
+// The X in S15.S11.S1 and S16.S11.S1 annihilate.
+type S14 struct {
+ S15
+ S16
+}
+
+type S15 struct {
+ S11
+}
+
+type S16 struct {
+ S11
+}
+
var fieldTests = []FTest{
{struct{}{}, "", nil, 0},
{struct{}{}, "Foo", nil, 0},
@@ -1303,6 +1728,11 @@ var fieldTests = []FTest{
{S3{E: 'e'}, "E", []int{3}, 'e'},
{S4{A: 'a'}, "A", []int{1}, 'a'},
{S4{}, "B", nil, 0},
+ {S5{}, "X", nil, 0},
+ {S5{}, "Y", []int{2, 0, 1}, 0},
+ {S10{}, "X", nil, 0},
+ {S10{}, "Y", []int{2, 0, 0, 1}, 0},
+ {S14{}, "X", nil, 0},
}
func TestFieldByIndex(t *testing.T) {
@@ -1346,7 +1776,7 @@ func TestFieldByName(t *testing.T) {
if test.index != nil {
// Verify field depth and index.
if len(f.Index) != len(test.index) {
- t.Errorf("%s.%s depth %d; want %d", s.Name(), test.name, len(f.Index), len(test.index))
+ t.Errorf("%s.%s depth %d; want %d: %v vs %v", s.Name(), test.name, len(f.Index), len(test.index), f.Index, test.index)
} else {
for i, x := range f.Index {
if x != test.index[i] {
@@ -1384,7 +1814,30 @@ func TestImportPath(t *testing.T) {
path string
}{
{TypeOf(&base64.Encoding{}).Elem(), "encoding/base64"},
+ {TypeOf(int(0)), ""},
+ {TypeOf(int8(0)), ""},
+ {TypeOf(int16(0)), ""},
+ {TypeOf(int32(0)), ""},
+ {TypeOf(int64(0)), ""},
{TypeOf(uint(0)), ""},
+ {TypeOf(uint8(0)), ""},
+ {TypeOf(uint16(0)), ""},
+ {TypeOf(uint32(0)), ""},
+ {TypeOf(uint64(0)), ""},
+ {TypeOf(uintptr(0)), ""},
+ {TypeOf(float32(0)), ""},
+ {TypeOf(float64(0)), ""},
+ {TypeOf(complex64(0)), ""},
+ {TypeOf(complex128(0)), ""},
+ {TypeOf(byte(0)), ""},
+ {TypeOf(rune(0)), ""},
+ {TypeOf([]byte(nil)), ""},
+ {TypeOf([]rune(nil)), ""},
+ {TypeOf(string("")), ""},
+ {TypeOf((*interface{})(nil)).Elem(), ""},
+ {TypeOf((*byte)(nil)), ""},
+ {TypeOf((*rune)(nil)), ""},
+ {TypeOf((*int64)(nil)), ""},
{TypeOf(map[string]int{}), ""},
{TypeOf((*error)(nil)).Elem(), ""},
}
@@ -1558,21 +2011,13 @@ func TestAddr(t *testing.T) {
}
func noAlloc(t *testing.T, n int, f func(int)) {
- // once to prime everything
- f(-1)
- memstats := new(runtime.MemStats)
- runtime.ReadMemStats(memstats)
- oldmallocs := memstats.Mallocs
-
- for j := 0; j < n; j++ {
- f(j)
- }
- // A few allocs may happen in the testing package when GOMAXPROCS > 1, so don't
- // require zero mallocs.
- runtime.ReadMemStats(memstats)
- mallocs := memstats.Mallocs - oldmallocs
- if mallocs > 5 {
- t.Fatalf("%d mallocs after %d iterations", mallocs, n)
+ i := -1
+ allocs := testing.AllocsPerRun(n, func() {
+ f(i)
+ i++
+ })
+ if allocs > 0 {
+ t.Errorf("%d iterations: got %v mallocs, want 0", n, allocs)
}
}
@@ -1596,6 +2041,24 @@ func TestSmallNegativeInt(t *testing.T) {
}
}
+func TestIndex(t *testing.T) {
+ xs := []byte{1, 2, 3, 4, 5, 6, 7, 8}
+ v := ValueOf(xs).Index(3).Interface().(byte)
+ if v != xs[3] {
+ t.Errorf("xs.Index(3) = %v; expected %v", v, xs[3])
+ }
+ xa := [8]byte{10, 20, 30, 40, 50, 60, 70, 80}
+ v = ValueOf(xa).Index(2).Interface().(byte)
+ if v != xa[2] {
+ t.Errorf("xa.Index(2) = %v; expected %v", v, xa[2])
+ }
+ s := "0123456789"
+ v = ValueOf(s).Index(3).Interface().(byte)
+ if v != s[3] {
+ t.Errorf("s.Index(3) = %v; expected %v", v, s[3])
+ }
+}
+
func TestSlice(t *testing.T) {
xs := []int{1, 2, 3, 4, 5, 6, 7, 8}
v := ValueOf(xs).Slice(3, 5).Interface().([]int)
@@ -1608,7 +2071,6 @@ func TestSlice(t *testing.T) {
if !DeepEqual(v[0:5], xs[3:]) {
t.Errorf("xs.Slice(3, 5)[0:5] = %v", v[0:5])
}
-
xa := [8]int{10, 20, 30, 40, 50, 60, 70, 80}
v = ValueOf(&xa).Elem().Slice(2, 5).Interface().([]int)
if len(v) != 3 {
@@ -1620,6 +2082,11 @@ func TestSlice(t *testing.T) {
if !DeepEqual(v[0:6], xa[2:]) {
t.Errorf("xs.Slice(2, 5)[0:6] = %v", v[0:6])
}
+ s := "0123456789"
+ vs := ValueOf(s).Slice(3, 5).Interface().(string)
+ if vs != s[3:5] {
+ t.Errorf("s.Slice(3, 5) = %q; expected %q", vs, s[3:5])
+ }
}
func TestVariadic(t *testing.T) {
@@ -1760,3 +2227,761 @@ func TestAlias(t *testing.T) {
t.Errorf("aliasing: old=%q new=%q, want hello, world", oldvalue, newvalue)
}
}
+
+var V = ValueOf
+
+func EmptyInterfaceV(x interface{}) Value {
+ return ValueOf(&x).Elem()
+}
+
+func ReaderV(x io.Reader) Value {
+ return ValueOf(&x).Elem()
+}
+
+func ReadWriterV(x io.ReadWriter) Value {
+ return ValueOf(&x).Elem()
+}
+
+type Empty struct{}
+type MyString string
+type MyBytes []byte
+type MyRunes []int32
+type MyFunc func()
+type MyByte byte
+
+var convertTests = []struct {
+ in Value
+ out Value
+}{
+ // numbers
+ /*
+ Edit .+1,/\*\//-1>cat >/tmp/x.go && go run /tmp/x.go
+
+ package main
+
+ import "fmt"
+
+ var numbers = []string{
+ "int8", "uint8", "int16", "uint16",
+ "int32", "uint32", "int64", "uint64",
+ "int", "uint", "uintptr",
+ "float32", "float64",
+ }
+
+ func main() {
+ // all pairs but in an unusual order,
+ // to emit all the int8, uint8 cases
+ // before n grows too big.
+ n := 1
+ for i, f := range numbers {
+ for _, g := range numbers[i:] {
+ fmt.Printf("\t{V(%s(%d)), V(%s(%d))},\n", f, n, g, n)
+ n++
+ if f != g {
+ fmt.Printf("\t{V(%s(%d)), V(%s(%d))},\n", g, n, f, n)
+ n++
+ }
+ }
+ }
+ }
+ */
+ {V(int8(1)), V(int8(1))},
+ {V(int8(2)), V(uint8(2))},
+ {V(uint8(3)), V(int8(3))},
+ {V(int8(4)), V(int16(4))},
+ {V(int16(5)), V(int8(5))},
+ {V(int8(6)), V(uint16(6))},
+ {V(uint16(7)), V(int8(7))},
+ {V(int8(8)), V(int32(8))},
+ {V(int32(9)), V(int8(9))},
+ {V(int8(10)), V(uint32(10))},
+ {V(uint32(11)), V(int8(11))},
+ {V(int8(12)), V(int64(12))},
+ {V(int64(13)), V(int8(13))},
+ {V(int8(14)), V(uint64(14))},
+ {V(uint64(15)), V(int8(15))},
+ {V(int8(16)), V(int(16))},
+ {V(int(17)), V(int8(17))},
+ {V(int8(18)), V(uint(18))},
+ {V(uint(19)), V(int8(19))},
+ {V(int8(20)), V(uintptr(20))},
+ {V(uintptr(21)), V(int8(21))},
+ {V(int8(22)), V(float32(22))},
+ {V(float32(23)), V(int8(23))},
+ {V(int8(24)), V(float64(24))},
+ {V(float64(25)), V(int8(25))},
+ {V(uint8(26)), V(uint8(26))},
+ {V(uint8(27)), V(int16(27))},
+ {V(int16(28)), V(uint8(28))},
+ {V(uint8(29)), V(uint16(29))},
+ {V(uint16(30)), V(uint8(30))},
+ {V(uint8(31)), V(int32(31))},
+ {V(int32(32)), V(uint8(32))},
+ {V(uint8(33)), V(uint32(33))},
+ {V(uint32(34)), V(uint8(34))},
+ {V(uint8(35)), V(int64(35))},
+ {V(int64(36)), V(uint8(36))},
+ {V(uint8(37)), V(uint64(37))},
+ {V(uint64(38)), V(uint8(38))},
+ {V(uint8(39)), V(int(39))},
+ {V(int(40)), V(uint8(40))},
+ {V(uint8(41)), V(uint(41))},
+ {V(uint(42)), V(uint8(42))},
+ {V(uint8(43)), V(uintptr(43))},
+ {V(uintptr(44)), V(uint8(44))},
+ {V(uint8(45)), V(float32(45))},
+ {V(float32(46)), V(uint8(46))},
+ {V(uint8(47)), V(float64(47))},
+ {V(float64(48)), V(uint8(48))},
+ {V(int16(49)), V(int16(49))},
+ {V(int16(50)), V(uint16(50))},
+ {V(uint16(51)), V(int16(51))},
+ {V(int16(52)), V(int32(52))},
+ {V(int32(53)), V(int16(53))},
+ {V(int16(54)), V(uint32(54))},
+ {V(uint32(55)), V(int16(55))},
+ {V(int16(56)), V(int64(56))},
+ {V(int64(57)), V(int16(57))},
+ {V(int16(58)), V(uint64(58))},
+ {V(uint64(59)), V(int16(59))},
+ {V(int16(60)), V(int(60))},
+ {V(int(61)), V(int16(61))},
+ {V(int16(62)), V(uint(62))},
+ {V(uint(63)), V(int16(63))},
+ {V(int16(64)), V(uintptr(64))},
+ {V(uintptr(65)), V(int16(65))},
+ {V(int16(66)), V(float32(66))},
+ {V(float32(67)), V(int16(67))},
+ {V(int16(68)), V(float64(68))},
+ {V(float64(69)), V(int16(69))},
+ {V(uint16(70)), V(uint16(70))},
+ {V(uint16(71)), V(int32(71))},
+ {V(int32(72)), V(uint16(72))},
+ {V(uint16(73)), V(uint32(73))},
+ {V(uint32(74)), V(uint16(74))},
+ {V(uint16(75)), V(int64(75))},
+ {V(int64(76)), V(uint16(76))},
+ {V(uint16(77)), V(uint64(77))},
+ {V(uint64(78)), V(uint16(78))},
+ {V(uint16(79)), V(int(79))},
+ {V(int(80)), V(uint16(80))},
+ {V(uint16(81)), V(uint(81))},
+ {V(uint(82)), V(uint16(82))},
+ {V(uint16(83)), V(uintptr(83))},
+ {V(uintptr(84)), V(uint16(84))},
+ {V(uint16(85)), V(float32(85))},
+ {V(float32(86)), V(uint16(86))},
+ {V(uint16(87)), V(float64(87))},
+ {V(float64(88)), V(uint16(88))},
+ {V(int32(89)), V(int32(89))},
+ {V(int32(90)), V(uint32(90))},
+ {V(uint32(91)), V(int32(91))},
+ {V(int32(92)), V(int64(92))},
+ {V(int64(93)), V(int32(93))},
+ {V(int32(94)), V(uint64(94))},
+ {V(uint64(95)), V(int32(95))},
+ {V(int32(96)), V(int(96))},
+ {V(int(97)), V(int32(97))},
+ {V(int32(98)), V(uint(98))},
+ {V(uint(99)), V(int32(99))},
+ {V(int32(100)), V(uintptr(100))},
+ {V(uintptr(101)), V(int32(101))},
+ {V(int32(102)), V(float32(102))},
+ {V(float32(103)), V(int32(103))},
+ {V(int32(104)), V(float64(104))},
+ {V(float64(105)), V(int32(105))},
+ {V(uint32(106)), V(uint32(106))},
+ {V(uint32(107)), V(int64(107))},
+ {V(int64(108)), V(uint32(108))},
+ {V(uint32(109)), V(uint64(109))},
+ {V(uint64(110)), V(uint32(110))},
+ {V(uint32(111)), V(int(111))},
+ {V(int(112)), V(uint32(112))},
+ {V(uint32(113)), V(uint(113))},
+ {V(uint(114)), V(uint32(114))},
+ {V(uint32(115)), V(uintptr(115))},
+ {V(uintptr(116)), V(uint32(116))},
+ {V(uint32(117)), V(float32(117))},
+ {V(float32(118)), V(uint32(118))},
+ {V(uint32(119)), V(float64(119))},
+ {V(float64(120)), V(uint32(120))},
+ {V(int64(121)), V(int64(121))},
+ {V(int64(122)), V(uint64(122))},
+ {V(uint64(123)), V(int64(123))},
+ {V(int64(124)), V(int(124))},
+ {V(int(125)), V(int64(125))},
+ {V(int64(126)), V(uint(126))},
+ {V(uint(127)), V(int64(127))},
+ {V(int64(128)), V(uintptr(128))},
+ {V(uintptr(129)), V(int64(129))},
+ {V(int64(130)), V(float32(130))},
+ {V(float32(131)), V(int64(131))},
+ {V(int64(132)), V(float64(132))},
+ {V(float64(133)), V(int64(133))},
+ {V(uint64(134)), V(uint64(134))},
+ {V(uint64(135)), V(int(135))},
+ {V(int(136)), V(uint64(136))},
+ {V(uint64(137)), V(uint(137))},
+ {V(uint(138)), V(uint64(138))},
+ {V(uint64(139)), V(uintptr(139))},
+ {V(uintptr(140)), V(uint64(140))},
+ {V(uint64(141)), V(float32(141))},
+ {V(float32(142)), V(uint64(142))},
+ {V(uint64(143)), V(float64(143))},
+ {V(float64(144)), V(uint64(144))},
+ {V(int(145)), V(int(145))},
+ {V(int(146)), V(uint(146))},
+ {V(uint(147)), V(int(147))},
+ {V(int(148)), V(uintptr(148))},
+ {V(uintptr(149)), V(int(149))},
+ {V(int(150)), V(float32(150))},
+ {V(float32(151)), V(int(151))},
+ {V(int(152)), V(float64(152))},
+ {V(float64(153)), V(int(153))},
+ {V(uint(154)), V(uint(154))},
+ {V(uint(155)), V(uintptr(155))},
+ {V(uintptr(156)), V(uint(156))},
+ {V(uint(157)), V(float32(157))},
+ {V(float32(158)), V(uint(158))},
+ {V(uint(159)), V(float64(159))},
+ {V(float64(160)), V(uint(160))},
+ {V(uintptr(161)), V(uintptr(161))},
+ {V(uintptr(162)), V(float32(162))},
+ {V(float32(163)), V(uintptr(163))},
+ {V(uintptr(164)), V(float64(164))},
+ {V(float64(165)), V(uintptr(165))},
+ {V(float32(166)), V(float32(166))},
+ {V(float32(167)), V(float64(167))},
+ {V(float64(168)), V(float32(168))},
+ {V(float64(169)), V(float64(169))},
+
+ // truncation
+ {V(float64(1.5)), V(int(1))},
+
+ // complex
+ {V(complex64(1i)), V(complex64(1i))},
+ {V(complex64(2i)), V(complex128(2i))},
+ {V(complex128(3i)), V(complex64(3i))},
+ {V(complex128(4i)), V(complex128(4i))},
+
+ // string
+ {V(string("hello")), V(string("hello"))},
+ {V(string("bytes1")), V([]byte("bytes1"))},
+ {V([]byte("bytes2")), V(string("bytes2"))},
+ {V([]byte("bytes3")), V([]byte("bytes3"))},
+ {V(string("runes♝")), V([]rune("runes♝"))},
+ {V([]rune("runes♕")), V(string("runes♕"))},
+ {V([]rune("runes🙈🙉🙊")), V([]rune("runes🙈🙉🙊"))},
+ {V(int('a')), V(string("a"))},
+ {V(int8('a')), V(string("a"))},
+ {V(int16('a')), V(string("a"))},
+ {V(int32('a')), V(string("a"))},
+ {V(int64('a')), V(string("a"))},
+ {V(uint('a')), V(string("a"))},
+ {V(uint8('a')), V(string("a"))},
+ {V(uint16('a')), V(string("a"))},
+ {V(uint32('a')), V(string("a"))},
+ {V(uint64('a')), V(string("a"))},
+ {V(uintptr('a')), V(string("a"))},
+ {V(int(-1)), V(string("\uFFFD"))},
+ {V(int8(-2)), V(string("\uFFFD"))},
+ {V(int16(-3)), V(string("\uFFFD"))},
+ {V(int32(-4)), V(string("\uFFFD"))},
+ {V(int64(-5)), V(string("\uFFFD"))},
+ {V(uint(0x110001)), V(string("\uFFFD"))},
+ {V(uint32(0x110002)), V(string("\uFFFD"))},
+ {V(uint64(0x110003)), V(string("\uFFFD"))},
+ {V(uintptr(0x110004)), V(string("\uFFFD"))},
+
+ // named string
+ {V(MyString("hello")), V(string("hello"))},
+ {V(string("hello")), V(MyString("hello"))},
+ {V(string("hello")), V(string("hello"))},
+ {V(MyString("hello")), V(MyString("hello"))},
+ {V(MyString("bytes1")), V([]byte("bytes1"))},
+ {V([]byte("bytes2")), V(MyString("bytes2"))},
+ {V([]byte("bytes3")), V([]byte("bytes3"))},
+ {V(MyString("runes♝")), V([]rune("runes♝"))},
+ {V([]rune("runes♕")), V(MyString("runes♕"))},
+ {V([]rune("runes🙈🙉🙊")), V([]rune("runes🙈🙉🙊"))},
+ {V([]rune("runes🙈🙉🙊")), V(MyRunes("runes🙈🙉🙊"))},
+ {V(MyRunes("runes🙈🙉🙊")), V([]rune("runes🙈🙉🙊"))},
+ {V(int('a')), V(MyString("a"))},
+ {V(int8('a')), V(MyString("a"))},
+ {V(int16('a')), V(MyString("a"))},
+ {V(int32('a')), V(MyString("a"))},
+ {V(int64('a')), V(MyString("a"))},
+ {V(uint('a')), V(MyString("a"))},
+ {V(uint8('a')), V(MyString("a"))},
+ {V(uint16('a')), V(MyString("a"))},
+ {V(uint32('a')), V(MyString("a"))},
+ {V(uint64('a')), V(MyString("a"))},
+ {V(uintptr('a')), V(MyString("a"))},
+ {V(int(-1)), V(MyString("\uFFFD"))},
+ {V(int8(-2)), V(MyString("\uFFFD"))},
+ {V(int16(-3)), V(MyString("\uFFFD"))},
+ {V(int32(-4)), V(MyString("\uFFFD"))},
+ {V(int64(-5)), V(MyString("\uFFFD"))},
+ {V(uint(0x110001)), V(MyString("\uFFFD"))},
+ {V(uint32(0x110002)), V(MyString("\uFFFD"))},
+ {V(uint64(0x110003)), V(MyString("\uFFFD"))},
+ {V(uintptr(0x110004)), V(MyString("\uFFFD"))},
+
+ // named []byte
+ {V(string("bytes1")), V(MyBytes("bytes1"))},
+ {V(MyBytes("bytes2")), V(string("bytes2"))},
+ {V(MyBytes("bytes3")), V(MyBytes("bytes3"))},
+ {V(MyString("bytes1")), V(MyBytes("bytes1"))},
+ {V(MyBytes("bytes2")), V(MyString("bytes2"))},
+
+ // named []rune
+ {V(string("runes♝")), V(MyRunes("runes♝"))},
+ {V(MyRunes("runes♕")), V(string("runes♕"))},
+ {V(MyRunes("runes🙈🙉🙊")), V(MyRunes("runes🙈🙉🙊"))},
+ {V(MyString("runes♝")), V(MyRunes("runes♝"))},
+ {V(MyRunes("runes♕")), V(MyString("runes♕"))},
+
+ // named types and equal underlying types
+ {V(new(int)), V(new(integer))},
+ {V(new(integer)), V(new(int))},
+ {V(Empty{}), V(struct{}{})},
+ {V(new(Empty)), V(new(struct{}))},
+ {V(struct{}{}), V(Empty{})},
+ {V(new(struct{})), V(new(Empty))},
+ {V(Empty{}), V(Empty{})},
+ {V(MyBytes{}), V([]byte{})},
+ {V([]byte{}), V(MyBytes{})},
+ {V((func())(nil)), V(MyFunc(nil))},
+ {V((MyFunc)(nil)), V((func())(nil))},
+
+ // can convert *byte and *MyByte
+ {V((*byte)(nil)), V((*MyByte)(nil))},
+ {V((*MyByte)(nil)), V((*byte)(nil))},
+
+ // cannot convert mismatched array sizes
+ {V([2]byte{}), V([2]byte{})},
+ {V([3]byte{}), V([3]byte{})},
+
+ // cannot convert other instances
+ {V((**byte)(nil)), V((**byte)(nil))},
+ {V((**MyByte)(nil)), V((**MyByte)(nil))},
+ {V((chan byte)(nil)), V((chan byte)(nil))},
+ {V((chan MyByte)(nil)), V((chan MyByte)(nil))},
+ {V(([]byte)(nil)), V(([]byte)(nil))},
+ {V(([]MyByte)(nil)), V(([]MyByte)(nil))},
+ {V((map[int]byte)(nil)), V((map[int]byte)(nil))},
+ {V((map[int]MyByte)(nil)), V((map[int]MyByte)(nil))},
+ {V((map[byte]int)(nil)), V((map[byte]int)(nil))},
+ {V((map[MyByte]int)(nil)), V((map[MyByte]int)(nil))},
+ {V([2]byte{}), V([2]byte{})},
+ {V([2]MyByte{}), V([2]MyByte{})},
+
+ // other
+ {V((***int)(nil)), V((***int)(nil))},
+ {V((***byte)(nil)), V((***byte)(nil))},
+ {V((***int32)(nil)), V((***int32)(nil))},
+ {V((***int64)(nil)), V((***int64)(nil))},
+ {V((chan int)(nil)), V((<-chan int)(nil))},
+ {V((chan int)(nil)), V((chan<- int)(nil))},
+ {V((chan string)(nil)), V((<-chan string)(nil))},
+ {V((chan string)(nil)), V((chan<- string)(nil))},
+ {V((chan byte)(nil)), V((chan byte)(nil))},
+ {V((chan MyByte)(nil)), V((chan MyByte)(nil))},
+ {V((map[int]bool)(nil)), V((map[int]bool)(nil))},
+ {V((map[int]byte)(nil)), V((map[int]byte)(nil))},
+ {V((map[uint]bool)(nil)), V((map[uint]bool)(nil))},
+ {V([]uint(nil)), V([]uint(nil))},
+ {V([]int(nil)), V([]int(nil))},
+ {V(new(interface{})), V(new(interface{}))},
+ {V(new(io.Reader)), V(new(io.Reader))},
+ {V(new(io.Writer)), V(new(io.Writer))},
+
+ // interfaces
+ {V(int(1)), EmptyInterfaceV(int(1))},
+ {V(string("hello")), EmptyInterfaceV(string("hello"))},
+ {V(new(bytes.Buffer)), ReaderV(new(bytes.Buffer))},
+ {ReadWriterV(new(bytes.Buffer)), ReaderV(new(bytes.Buffer))},
+ {V(new(bytes.Buffer)), ReadWriterV(new(bytes.Buffer))},
+}
+
+func TestConvert(t *testing.T) {
+ canConvert := map[[2]Type]bool{}
+ all := map[Type]bool{}
+
+ for _, tt := range convertTests {
+ t1 := tt.in.Type()
+ if !t1.ConvertibleTo(t1) {
+ t.Errorf("(%s).ConvertibleTo(%s) = false, want true", t1, t1)
+ continue
+ }
+
+ t2 := tt.out.Type()
+ if !t1.ConvertibleTo(t2) {
+ t.Errorf("(%s).ConvertibleTo(%s) = false, want true", t1, t2)
+ continue
+ }
+
+ all[t1] = true
+ all[t2] = true
+ canConvert[[2]Type{t1, t2}] = true
+
+ v1 := tt.in
+ vout1 := v1.Convert(t1)
+ out1 := vout1.Interface()
+ if vout1.Type() != tt.in.Type() || !DeepEqual(out1, tt.in.Interface()) {
+ t.Errorf("ValueOf(%T(%v)).Convert(%s) = %T(%v), want %T(%v)", tt.in.Interface(), tt.in.Interface(), t1, out1, out1, tt.in.Interface(), tt.in.Interface())
+ }
+
+ vout := v1.Convert(t2)
+ out := vout.Interface()
+ if vout.Type() != tt.out.Type() || !DeepEqual(out, tt.out.Interface()) {
+ t.Errorf("ValueOf(%T(%v)).Convert(%s) = %T(%v), want %T(%v)", tt.in.Interface(), tt.in.Interface(), t2, out, out, tt.out.Interface(), tt.out.Interface())
+ }
+
+ if IsRO(v1) {
+ t.Errorf("table entry %v is RO, should not be", v1)
+ }
+ if IsRO(vout1) {
+ t.Errorf("self-conversion output %v is RO, should not be", vout1)
+ }
+ if IsRO(vout) {
+ t.Errorf("conversion output %v is RO, should not be", vout)
+ }
+ if !IsRO(MakeRO(v1).Convert(t1)) {
+ t.Errorf("RO self-conversion output %v is not RO, should be", v1)
+ }
+ if !IsRO(MakeRO(v1).Convert(t2)) {
+ t.Errorf("RO conversion output %v is not RO, should be", v1)
+ }
+ }
+
+ // Assume that of all the types we saw during the tests,
+ // if there wasn't an explicit entry for a conversion between
+ // a pair of types, then it's not to be allowed. This checks for
+ // things like 'int64' converting to '*int'.
+ for t1 := range all {
+ for t2 := range all {
+ expectOK := t1 == t2 || canConvert[[2]Type{t1, t2}] || t2.Kind() == Interface && t2.NumMethod() == 0
+ if ok := t1.ConvertibleTo(t2); ok != expectOK {
+ t.Errorf("(%s).ConvertibleTo(%s) = %v, want %v", t1, t2, ok, expectOK)
+ }
+ }
+ }
+}
+
+func TestOverflow(t *testing.T) {
+ if ovf := V(float64(0)).OverflowFloat(1e300); ovf {
+ t.Errorf("%v wrongly overflows float64", 1e300)
+ }
+
+ maxFloat32 := float64((1<<24 - 1) << (127 - 23))
+ if ovf := V(float32(0)).OverflowFloat(maxFloat32); ovf {
+ t.Errorf("%v wrongly overflows float32", maxFloat32)
+ }
+ ovfFloat32 := float64((1<<24-1)<<(127-23) + 1<<(127-52))
+ if ovf := V(float32(0)).OverflowFloat(ovfFloat32); !ovf {
+ t.Errorf("%v should overflow float32", ovfFloat32)
+ }
+ if ovf := V(float32(0)).OverflowFloat(-ovfFloat32); !ovf {
+ t.Errorf("%v should overflow float32", -ovfFloat32)
+ }
+
+ maxInt32 := int64(0x7fffffff)
+ if ovf := V(int32(0)).OverflowInt(maxInt32); ovf {
+ t.Errorf("%v wrongly overflows int32", maxInt32)
+ }
+ if ovf := V(int32(0)).OverflowInt(-1 << 31); ovf {
+ t.Errorf("%v wrongly overflows int32", -int64(1)<<31)
+ }
+ ovfInt32 := int64(1 << 31)
+ if ovf := V(int32(0)).OverflowInt(ovfInt32); !ovf {
+ t.Errorf("%v should overflow int32", ovfInt32)
+ }
+
+ maxUint32 := uint64(0xffffffff)
+ if ovf := V(uint32(0)).OverflowUint(maxUint32); ovf {
+ t.Errorf("%v wrongly overflows uint32", maxUint32)
+ }
+ ovfUint32 := uint64(1 << 32)
+ if ovf := V(uint32(0)).OverflowUint(ovfUint32); !ovf {
+ t.Errorf("%v should overflow uint32", ovfUint32)
+ }
+}
+
+func checkSameType(t *testing.T, x, y interface{}) {
+ if TypeOf(x) != TypeOf(y) {
+ t.Errorf("did not find preexisting type for %s (vs %s)", TypeOf(x), TypeOf(y))
+ }
+}
+
+func TestArrayOf(t *testing.T) {
+ // check construction and use of type not in binary
+ type T int
+ at := ArrayOf(10, TypeOf(T(1)))
+ v := New(at).Elem()
+ for i := 0; i < v.Len(); i++ {
+ v.Index(i).Set(ValueOf(T(i)))
+ }
+ s := fmt.Sprint(v.Interface())
+ want := "[0 1 2 3 4 5 6 7 8 9]"
+ if s != want {
+ t.Errorf("constructed array = %s, want %s", s, want)
+ }
+
+ // check that type already in binary is found
+ checkSameType(t, Zero(ArrayOf(5, TypeOf(T(1)))).Interface(), [5]T{})
+}
+
+func TestSliceOf(t *testing.T) {
+ // check construction and use of type not in binary
+ type T int
+ st := SliceOf(TypeOf(T(1)))
+ v := MakeSlice(st, 10, 10)
+ for i := 0; i < v.Len(); i++ {
+ v.Index(i).Set(ValueOf(T(i)))
+ }
+ s := fmt.Sprint(v.Interface())
+ want := "[0 1 2 3 4 5 6 7 8 9]"
+ if s != want {
+ t.Errorf("constructed slice = %s, want %s", s, want)
+ }
+
+ // check that type already in binary is found
+ type T1 int
+ checkSameType(t, Zero(SliceOf(TypeOf(T1(1)))).Interface(), []T1{})
+}
+
+func TestChanOf(t *testing.T) {
+ // check construction and use of type not in binary
+ type T string
+ ct := ChanOf(BothDir, TypeOf(T("")))
+ v := MakeChan(ct, 2)
+ v.Send(ValueOf(T("hello")))
+ v.Send(ValueOf(T("world")))
+
+ sv1, _ := v.Recv()
+ sv2, _ := v.Recv()
+ s1 := sv1.String()
+ s2 := sv2.String()
+ if s1 != "hello" || s2 != "world" {
+ t.Errorf("constructed chan: have %q, %q, want %q, %q", s1, s2, "hello", "world")
+ }
+
+ // check that type already in binary is found
+ type T1 int
+ checkSameType(t, Zero(ChanOf(BothDir, TypeOf(T1(1)))).Interface(), (chan T1)(nil))
+}
+
+func TestMapOf(t *testing.T) {
+ // check construction and use of type not in binary
+ type K string
+ type V float64
+
+ v := MakeMap(MapOf(TypeOf(K("")), TypeOf(V(0))))
+ v.SetMapIndex(ValueOf(K("a")), ValueOf(V(1)))
+
+ s := fmt.Sprint(v.Interface())
+ want := "map[a:1]"
+ if s != want {
+ t.Errorf("constructed map = %s, want %s", s, want)
+ }
+
+ // check that type already in binary is found
+ checkSameType(t, Zero(MapOf(TypeOf(V(0)), TypeOf(K("")))).Interface(), map[V]K(nil))
+}
+
+type B1 struct {
+ X int
+ Y int
+ Z int
+}
+
+func BenchmarkFieldByName1(b *testing.B) {
+ t := TypeOf(B1{})
+ for i := 0; i < b.N; i++ {
+ t.FieldByName("Z")
+ }
+}
+
+func BenchmarkFieldByName2(b *testing.B) {
+ t := TypeOf(S3{})
+ for i := 0; i < b.N; i++ {
+ t.FieldByName("B")
+ }
+}
+
+type R0 struct {
+ *R1
+ *R2
+ *R3
+ *R4
+}
+
+type R1 struct {
+ *R5
+ *R6
+ *R7
+ *R8
+}
+
+type R2 R1
+type R3 R1
+type R4 R1
+
+type R5 struct {
+ *R9
+ *R10
+ *R11
+ *R12
+}
+
+type R6 R5
+type R7 R5
+type R8 R5
+
+type R9 struct {
+ *R13
+ *R14
+ *R15
+ *R16
+}
+
+type R10 R9
+type R11 R9
+type R12 R9
+
+type R13 struct {
+ *R17
+ *R18
+ *R19
+ *R20
+}
+
+type R14 R13
+type R15 R13
+type R16 R13
+
+type R17 struct {
+ *R21
+ *R22
+ *R23
+ *R24
+}
+
+type R18 R17
+type R19 R17
+type R20 R17
+
+type R21 struct {
+ X int
+}
+
+type R22 R21
+type R23 R21
+type R24 R21
+
+func TestEmbed(t *testing.T) {
+ typ := TypeOf(R0{})
+ f, ok := typ.FieldByName("X")
+ if ok {
+ t.Fatalf(`FieldByName("X") should fail, returned %v`, f.Index)
+ }
+}
+
+func BenchmarkFieldByName3(b *testing.B) {
+ t := TypeOf(R0{})
+ for i := 0; i < b.N; i++ {
+ t.FieldByName("X")
+ }
+}
+
+// An exhaustive is a mechanism for writing exhaustive or stochastic tests.
+// The basic usage is:
+//
+// for x.Next() {
+// ... code using x.Maybe() or x.Choice(n) to create test cases ...
+// }
+//
+// Each iteration of the loop returns a different set of results, until all
+// possible result sets have been explored. It is okay for different code paths
+// to make different method call sequences on x, but there must be no
+// other source of non-determinism in the call sequences.
+//
+// When faced with a new decision, x chooses randomly. Future explorations
+// of that path will choose successive values for the result. Thus, stopping
+// the loop after a fixed number of iterations gives somewhat stochastic
+// testing.
+//
+// Example:
+//
+// for x.Next() {
+// v := make([]bool, x.Choose(4))
+// for i := range v {
+// v[i] = x.Maybe()
+// }
+// fmt.Println(v)
+// }
+//
+// prints (in some order):
+//
+// []
+// [false]
+// [true]
+// [false false]
+// [false true]
+// ...
+// [true true]
+// [false false false]
+// ...
+// [true true true]
+// [false false false false]
+// ...
+// [true true true true]
+//
+type exhaustive struct {
+ r *rand.Rand
+ pos int
+ last []choice
+}
+
+type choice struct {
+ off int
+ n int
+ max int
+}
+
+func (x *exhaustive) Next() bool {
+ if x.r == nil {
+ x.r = rand.New(rand.NewSource(time.Now().UnixNano()))
+ }
+ x.pos = 0
+ if x.last == nil {
+ x.last = []choice{}
+ return true
+ }
+ for i := len(x.last) - 1; i >= 0; i-- {
+ c := &x.last[i]
+ if c.n+1 < c.max {
+ c.n++
+ x.last = x.last[:i+1]
+ return true
+ }
+ }
+ return false
+}
+
+func (x *exhaustive) Choose(max int) int {
+ if x.pos >= len(x.last) {
+ x.last = append(x.last, choice{x.r.Intn(max), 0, max})
+ }
+ c := &x.last[x.pos]
+ x.pos++
+ if c.max != max {
+ panic("inconsistent use of exhaustive tester")
+ }
+ return (c.n + c.off) % max
+}
+
+func (x *exhaustive) Maybe() bool {
+ return x.Choose(2) == 1
+}
diff --git a/src/pkg/reflect/asm_386.s b/src/pkg/reflect/asm_386.s
new file mode 100644
index 000000000..27d3fa21d
--- /dev/null
+++ b/src/pkg/reflect/asm_386.s
@@ -0,0 +1,13 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// makeFuncStub is the code half of the function returned by MakeFunc.
+// See the comment on the declaration of makeFuncStub in value.go
+// for more details.
+TEXT ·makeFuncStub(SB),7,$8
+ MOVL DX, 0(SP)
+ LEAL arg+0(FP), CX
+ MOVL CX, 4(SP)
+ CALL ·callReflect(SB)
+ RET
diff --git a/src/pkg/reflect/asm_amd64.s b/src/pkg/reflect/asm_amd64.s
new file mode 100644
index 000000000..d51d982a9
--- /dev/null
+++ b/src/pkg/reflect/asm_amd64.s
@@ -0,0 +1,13 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// makeFuncStub is the code half of the function returned by MakeFunc.
+// See the comment on the declaration of makeFuncStub in value.go
+// for more details.
+TEXT ·makeFuncStub(SB),7,$16
+ MOVQ DX, 0(SP)
+ LEAQ arg+0(FP), CX
+ MOVQ CX, 8(SP)
+ CALL ·callReflect(SB)
+ RET
diff --git a/src/pkg/reflect/asm_arm.s b/src/pkg/reflect/asm_arm.s
new file mode 100644
index 000000000..db487f8a5
--- /dev/null
+++ b/src/pkg/reflect/asm_arm.s
@@ -0,0 +1,13 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// makeFuncStub is jumped to by the code generated by MakeFunc.
+// See the comment on the declaration of makeFuncStub in value.go
+// for more details.
+TEXT ·makeFuncStub(SB),7,$8
+ MOVW R7, 4(R13)
+ MOVW $arg+0(FP), R1
+ MOVW R1, 8(R13)
+ BL ·callReflect(SB)
+ RET
diff --git a/src/pkg/reflect/deepequal.go b/src/pkg/reflect/deepequal.go
index c12e90f36..db047963e 100644
--- a/src/pkg/reflect/deepequal.go
+++ b/src/pkg/reflect/deepequal.go
@@ -122,9 +122,12 @@ func deepValueEqual(v1, v2 Value, visited map[uintptr]*visit, depth int) (b bool
panic("Not reached")
}
-// DeepEqual tests for deep equality. It uses normal == equality where possible
-// but will scan members of arrays, slices, maps, and fields of structs. It correctly
-// handles recursive types. Functions are equal only if they are both nil.
+// DeepEqual tests for deep equality. It uses normal == equality where
+// possible but will scan elements of arrays, slices, maps, and fields of
+// structs. In maps, keys are compared with == but elements use deep
+// equality. DeepEqual correctly handles recursive types. Functions are equal
+// only if they are both nil.
+// An empty slice is not equal to a nil slice.
func DeepEqual(a1, a2 interface{}) bool {
if a1 == nil || a2 == nil {
return a1 == a2
diff --git a/src/pkg/reflect/example_test.go b/src/pkg/reflect/example_test.go
new file mode 100644
index 000000000..62455c00a
--- /dev/null
+++ b/src/pkg/reflect/example_test.go
@@ -0,0 +1,52 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflect_test
+
+import (
+ "fmt"
+ "reflect"
+)
+
+func ExampleMakeFunc() {
+ // swap is the implementation passed to MakeFunc.
+ // It must work in terms of reflect.Values so that it is possible
+ // to write code without knowing beforehand what the types
+ // will be.
+ swap := func(in []reflect.Value) []reflect.Value {
+ return []reflect.Value{in[1], in[0]}
+ }
+
+ // makeSwap expects fptr to be a pointer to a nil function.
+ // It sets that pointer to a new function created with MakeFunc.
+ // When the function is invoked, reflect turns the arguments
+ // into Values, calls swap, and then turns swap's result slice
+ // into the values returned by the new function.
+ makeSwap := func(fptr interface{}) {
+ // fptr is a pointer to a function.
+ // Obtain the function value itself (likely nil) as a reflect.Value
+ // so that we can query its type and then set the value.
+ fn := reflect.ValueOf(fptr).Elem()
+
+ // Make a function of the right type.
+ v := reflect.MakeFunc(fn.Type(), swap)
+
+ // Assign it to the value fn represents.
+ fn.Set(v)
+ }
+
+ // Make and call a swap function for ints.
+ var intSwap func(int, int) (int, int)
+ makeSwap(&intSwap)
+ fmt.Println(intSwap(0, 1))
+
+ // Make and call a swap function for float64s.
+ var floatSwap func(float64, float64) (float64, float64)
+ makeSwap(&floatSwap)
+ fmt.Println(floatSwap(2.72, 3.14))
+
+ // Output:
+ // 1 0
+ // 3.14 2.72
+}
diff --git a/src/pkg/reflect/export_test.go b/src/pkg/reflect/export_test.go
new file mode 100644
index 000000000..cd8cf2cf2
--- /dev/null
+++ b/src/pkg/reflect/export_test.go
@@ -0,0 +1,18 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflect
+
+// MakeRO returns a copy of v with the read-only flag set.
+func MakeRO(v Value) Value {
+ v.flag |= flagRO
+ return v
+}
+
+// IsRO reports whether v's read-only flag is set.
+func IsRO(v Value) bool {
+ return v.flag&flagRO != 0
+}
+
+var ArrayOf = arrayOf
diff --git a/src/pkg/reflect/makefunc.go b/src/pkg/reflect/makefunc.go
new file mode 100644
index 000000000..024f938f1
--- /dev/null
+++ b/src/pkg/reflect/makefunc.go
@@ -0,0 +1,67 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// MakeFunc implementation.
+
+package reflect
+
+import (
+ "unsafe"
+)
+
+// makeFuncImpl is the closure value implementing the function
+// returned by MakeFunc.
+type makeFuncImpl struct {
+ code uintptr
+ typ *funcType
+ fn func([]Value) []Value
+}
+
+// MakeFunc returns a new function of the given Type
+// that wraps the function fn. When called, that new function
+// does the following:
+//
+// - converts its arguments to a list of Values args.
+// - runs results := fn(args).
+// - returns the results as a slice of Values, one per formal result.
+//
+// The implementation fn can assume that the argument Value slice
+// has the number and type of arguments given by typ.
+// If typ describes a variadic function, the final Value is itself
+// a slice representing the variadic arguments, as in the
+// body of a variadic function. The result Value slice returned by fn
+// must have the number and type of results given by typ.
+//
+// The Value.Call method allows the caller to invoke a typed function
+// in terms of Values; in contrast, MakeFunc allows the caller to implement
+// a typed function in terms of Values.
+//
+// The Examples section of the documentation includes an illustration
+// of how to use MakeFunc to build a swap function for different types.
+//
+func MakeFunc(typ Type, fn func(args []Value) (results []Value)) Value {
+ if typ.Kind() != Func {
+ panic("reflect: call of MakeFunc with non-Func type")
+ }
+
+ t := typ.common()
+ ftyp := (*funcType)(unsafe.Pointer(t))
+
+ // indirect Go func value (dummy) to obtain
+ // actual code address. (A Go func is a pointer
+ // to a C function pointer. http://golang.org/s/go11func.)
+ dummy := makeFuncStub
+ code := **(**uintptr)(unsafe.Pointer(&dummy))
+
+ impl := &makeFuncImpl{code: code, typ: ftyp, fn: fn}
+
+ return Value{t, unsafe.Pointer(impl), flag(Func) << flagKindShift}
+}
+
+// makeFuncStub is an assembly function that is the code half of
+// the function returned from MakeFunc. It expects a *callReflectFunc
+// as its context register, and its job is to invoke callReflect(ctxt, frame)
+// where ctxt is the context register and frame is a pointer to the first
+// word in the passed-in argument frame.
+func makeFuncStub()
diff --git a/src/pkg/reflect/set_test.go b/src/pkg/reflect/set_test.go
index 8135a4cd1..85dc55e68 100644
--- a/src/pkg/reflect/set_test.go
+++ b/src/pkg/reflect/set_test.go
@@ -81,11 +81,11 @@ func TestImplicitMapConversion(t *testing.T) {
t.Errorf("#5 after SetMapIndex(b1, b2): %p (!= %p), %t (map=%v)", x, b2, ok, m)
}
if p := mv.MapIndex(ValueOf(b1)).Elem().Pointer(); p != uintptr(unsafe.Pointer(b2)) {
- t.Errorf("#5 MapIndex(b1) = %p want %p", p, b2)
+ t.Errorf("#5 MapIndex(b1) = %#x want %p", p, b2)
}
}
{
- // convert channel direction
+ // convert channel direction
m := make(map[<-chan int]chan int)
mv := ValueOf(m)
c1 := make(chan int)
@@ -96,7 +96,7 @@ func TestImplicitMapConversion(t *testing.T) {
t.Errorf("#6 after SetMapIndex(c1, c2): %p (!= %p), %t (map=%v)", x, c2, ok, m)
}
if p := mv.MapIndex(ValueOf(c1)).Pointer(); p != ValueOf(c2).Pointer() {
- t.Errorf("#6 MapIndex(c1) = %p want %p", p, c2)
+ t.Errorf("#6 MapIndex(c1) = %#x want %p", p, c2)
}
}
{
@@ -115,7 +115,7 @@ func TestImplicitMapConversion(t *testing.T) {
t.Errorf("#7 after SetMapIndex(b1, b2): %p (!= %p), %t (map=%v)", x, b2, ok, m)
}
if p := mv.MapIndex(ValueOf(b1)).Pointer(); p != uintptr(unsafe.Pointer(b2)) {
- t.Errorf("#7 MapIndex(b1) = %p want %p", p, b2)
+ t.Errorf("#7 MapIndex(b1) = %#x want %p", p, b2)
}
}
diff --git a/src/pkg/reflect/type.go b/src/pkg/reflect/type.go
index 060bde3af..94a7521a7 100644
--- a/src/pkg/reflect/type.go
+++ b/src/pkg/reflect/type.go
@@ -92,6 +92,9 @@ type Type interface {
// AssignableTo returns true if a value of the type is assignable to type u.
AssignableTo(u Type) bool
+ // ConvertibleTo returns true if a value of the type is convertible to type u.
+ ConvertibleTo(u Type) bool
+
// Methods applicable only to some types, depending on Kind.
// The methods allowed for each kind are:
//
@@ -181,11 +184,16 @@ type Type interface {
// It panics if i is not in the range [0, NumOut()).
Out(i int) Type
- runtimeType() *runtimeType
- common() *commonType
+ common() *rtype
uncommon() *uncommonType
}
+/*
+ * These data structures are known to the compiler (../../cmd/gc/reflect.c).
+ * A few are known to ../runtime/type.go to convey to debuggers.
+ * They are also known to ../runtime/type.h.
+ */
+
// A Kind represents the specific kind of type that a Type represents.
// The zero Kind is not a valid kind.
type Kind uint
@@ -220,41 +228,30 @@ const (
UnsafePointer
)
-/*
- * These data structures are known to the compiler (../../cmd/gc/reflect.c).
- * A few are known to ../runtime/type.go to convey to debuggers.
- */
-
-// The compiler can only construct empty interface values at
-// compile time; non-empty interface values get created
-// during initialization. Type is an empty interface
-// so that the compiler can lay out references as data.
-// The underlying type is *reflect.ArrayType and so on.
-type runtimeType interface{}
-
-// commonType is the common implementation of most values.
+// rtype is the common implementation of most values.
// It is embedded in other, public struct types, but always
// with a unique tag like `reflect:"array"` or `reflect:"ptr"`
// so that code cannot convert from, say, *arrayType to *ptrType.
-type commonType struct {
- size uintptr // size in bytes
- hash uint32 // hash of type; avoids computation in hash tables
- _ uint8 // unused/padding
- align uint8 // alignment of variable with this type
- fieldAlign uint8 // alignment of struct field with this type
- kind uint8 // enumeration for C
- alg *uintptr // algorithm table (../runtime/runtime.h:/Alg)
- string *string // string form; unnecessary but undeniably useful
- *uncommonType // (relatively) uncommon fields
- ptrToThis *runtimeType // pointer to this type, if used in binary or has methods
+type rtype struct {
+ size uintptr // size in bytes
+ hash uint32 // hash of type; avoids computation in hash tables
+ _ uint8 // unused/padding
+ align uint8 // alignment of variable with this type
+ fieldAlign uint8 // alignment of struct field with this type
+ kind uint8 // enumeration for C
+ alg *uintptr // algorithm table (../runtime/runtime.h:/Alg)
+ gc uintptr // garbage collection data
+ string *string // string form; unnecessary but undeniably useful
+ *uncommonType // (relatively) uncommon fields
+ ptrToThis *rtype // type for pointer to this type, if used in binary or has methods
}
// Method on non-interface type
type method struct {
name *string // name of method
pkgPath *string // nil for exported Names; otherwise import path
- mtyp *runtimeType // method type (without receiver)
- typ *runtimeType // .(*FuncType) underneath (with receiver)
+ mtyp *rtype // method type (without receiver)
+ typ *rtype // .(*FuncType) underneath (with receiver)
ifn unsafe.Pointer // fn used in interface call (one-word receiver)
tfn unsafe.Pointer // fn used for normal method call
}
@@ -280,72 +277,72 @@ const (
// arrayType represents a fixed array type.
type arrayType struct {
- commonType `reflect:"array"`
- elem *runtimeType // array element type
- slice *runtimeType // slice type
- len uintptr
+ rtype `reflect:"array"`
+ elem *rtype // array element type
+ slice *rtype // slice type
+ len uintptr
}
// chanType represents a channel type.
type chanType struct {
- commonType `reflect:"chan"`
- elem *runtimeType // channel element type
- dir uintptr // channel direction (ChanDir)
+ rtype `reflect:"chan"`
+ elem *rtype // channel element type
+ dir uintptr // channel direction (ChanDir)
}
// funcType represents a function type.
type funcType struct {
- commonType `reflect:"func"`
- dotdotdot bool // last input parameter is ...
- in []*runtimeType // input parameter types
- out []*runtimeType // output parameter types
+ rtype `reflect:"func"`
+ dotdotdot bool // last input parameter is ...
+ in []*rtype // input parameter types
+ out []*rtype // output parameter types
}
// imethod represents a method on an interface type
type imethod struct {
- name *string // name of method
- pkgPath *string // nil for exported Names; otherwise import path
- typ *runtimeType // .(*FuncType) underneath
+ name *string // name of method
+ pkgPath *string // nil for exported Names; otherwise import path
+ typ *rtype // .(*FuncType) underneath
}
// interfaceType represents an interface type.
type interfaceType struct {
- commonType `reflect:"interface"`
- methods []imethod // sorted by hash
+ rtype `reflect:"interface"`
+ methods []imethod // sorted by hash
}
// mapType represents a map type.
type mapType struct {
- commonType `reflect:"map"`
- key *runtimeType // map key type
- elem *runtimeType // map element (value) type
+ rtype `reflect:"map"`
+ key *rtype // map key type
+ elem *rtype // map element (value) type
}
// ptrType represents a pointer type.
type ptrType struct {
- commonType `reflect:"ptr"`
- elem *runtimeType // pointer element (pointed at) type
+ rtype `reflect:"ptr"`
+ elem *rtype // pointer element (pointed at) type
}
// sliceType represents a slice type.
type sliceType struct {
- commonType `reflect:"slice"`
- elem *runtimeType // slice element type
+ rtype `reflect:"slice"`
+ elem *rtype // slice element type
}
// Struct field
type structField struct {
- name *string // nil for embedded fields
- pkgPath *string // nil for exported Names; otherwise import path
- typ *runtimeType // type of field
- tag *string // nil if no tag
- offset uintptr // byte offset of field within struct
+ name *string // nil for embedded fields
+ pkgPath *string // nil for exported Names; otherwise import path
+ typ *rtype // type of field
+ tag *string // nil if no tag
+ offset uintptr // byte offset of field within struct
}
// structType represents a struct type.
type structType struct {
- commonType `reflect:"struct"`
- fields []structField // sorted by offset
+ rtype `reflect:"struct"`
+ fields []structField // sorted by offset
}
/*
@@ -359,7 +356,7 @@ type Method struct {
// PkgPath is the package path that qualifies a lower case (unexported)
// method name. It is empty for upper case (exported) method names.
// The combination of PkgPath and Name uniquely identifies a method
- // in a method set.
+ // in a method set.
// See http://golang.org/ref/spec#Uniqueness_of_identifiers
Name string
PkgPath string
@@ -428,18 +425,11 @@ func (t *uncommonType) Name() string {
return *t.name
}
-func (t *commonType) toType() Type {
- if t == nil {
- return nil
- }
- return t
-}
-
-func (t *commonType) String() string { return *t.string }
+func (t *rtype) String() string { return *t.string }
-func (t *commonType) Size() uintptr { return t.size }
+func (t *rtype) Size() uintptr { return t.size }
-func (t *commonType) Bits() int {
+func (t *rtype) Bits() int {
if t == nil {
panic("reflect: Bits of nil Type")
}
@@ -450,13 +440,13 @@ func (t *commonType) Bits() int {
return int(t.size) * 8
}
-func (t *commonType) Align() int { return int(t.align) }
+func (t *rtype) Align() int { return int(t.align) }
-func (t *commonType) FieldAlign() int { return int(t.fieldAlign) }
+func (t *rtype) FieldAlign() int { return int(t.fieldAlign) }
-func (t *commonType) Kind() Kind { return Kind(t.kind & kindMask) }
+func (t *rtype) Kind() Kind { return Kind(t.kind & kindMask) }
-func (t *commonType) common() *commonType { return t }
+func (t *rtype) common() *rtype { return t }
func (t *uncommonType) Method(i int) (m Method) {
if t == nil || i < 0 || i >= len(t.methods) {
@@ -471,9 +461,9 @@ func (t *uncommonType) Method(i int) (m Method) {
m.PkgPath = *p.pkgPath
fl |= flagRO
}
- mt := toCommonType(p.typ)
+ mt := p.typ
m.Type = mt
- fn := p.tfn
+ fn := unsafe.Pointer(&p.tfn)
m.Func = Value{mt, fn, fl}
m.Index = i
return
@@ -502,8 +492,8 @@ func (t *uncommonType) MethodByName(name string) (m Method, ok bool) {
// TODO(rsc): 6g supplies these, but they are not
// as efficient as they could be: they have commonType
-// as the receiver instead of *commonType.
-func (t *commonType) NumMethod() int {
+// as the receiver instead of *rtype.
+func (t *rtype) NumMethod() int {
if t.Kind() == Interface {
tt := (*interfaceType)(unsafe.Pointer(t))
return tt.NumMethod()
@@ -511,7 +501,7 @@ func (t *commonType) NumMethod() int {
return t.uncommonType.NumMethod()
}
-func (t *commonType) Method(i int) (m Method) {
+func (t *rtype) Method(i int) (m Method) {
if t.Kind() == Interface {
tt := (*interfaceType)(unsafe.Pointer(t))
return tt.Method(i)
@@ -519,7 +509,7 @@ func (t *commonType) Method(i int) (m Method) {
return t.uncommonType.Method(i)
}
-func (t *commonType) MethodByName(name string) (m Method, ok bool) {
+func (t *rtype) MethodByName(name string) (m Method, ok bool) {
if t.Kind() == Interface {
tt := (*interfaceType)(unsafe.Pointer(t))
return tt.MethodByName(name)
@@ -527,15 +517,15 @@ func (t *commonType) MethodByName(name string) (m Method, ok bool) {
return t.uncommonType.MethodByName(name)
}
-func (t *commonType) PkgPath() string {
+func (t *rtype) PkgPath() string {
return t.uncommonType.PkgPath()
}
-func (t *commonType) Name() string {
+func (t *rtype) Name() string {
return t.uncommonType.Name()
}
-func (t *commonType) ChanDir() ChanDir {
+func (t *rtype) ChanDir() ChanDir {
if t.Kind() != Chan {
panic("reflect: ChanDir of non-chan type")
}
@@ -543,7 +533,7 @@ func (t *commonType) ChanDir() ChanDir {
return ChanDir(tt.dir)
}
-func (t *commonType) IsVariadic() bool {
+func (t *rtype) IsVariadic() bool {
if t.Kind() != Func {
panic("reflect: IsVariadic of non-func type")
}
@@ -551,7 +541,7 @@ func (t *commonType) IsVariadic() bool {
return tt.dotdotdot
}
-func (t *commonType) Elem() Type {
+func (t *rtype) Elem() Type {
switch t.Kind() {
case Array:
tt := (*arrayType)(unsafe.Pointer(t))
@@ -572,7 +562,7 @@ func (t *commonType) Elem() Type {
panic("reflect: Elem of invalid type")
}
-func (t *commonType) Field(i int) StructField {
+func (t *rtype) Field(i int) StructField {
if t.Kind() != Struct {
panic("reflect: Field of non-struct type")
}
@@ -580,7 +570,7 @@ func (t *commonType) Field(i int) StructField {
return tt.Field(i)
}
-func (t *commonType) FieldByIndex(index []int) StructField {
+func (t *rtype) FieldByIndex(index []int) StructField {
if t.Kind() != Struct {
panic("reflect: FieldByIndex of non-struct type")
}
@@ -588,7 +578,7 @@ func (t *commonType) FieldByIndex(index []int) StructField {
return tt.FieldByIndex(index)
}
-func (t *commonType) FieldByName(name string) (StructField, bool) {
+func (t *rtype) FieldByName(name string) (StructField, bool) {
if t.Kind() != Struct {
panic("reflect: FieldByName of non-struct type")
}
@@ -596,7 +586,7 @@ func (t *commonType) FieldByName(name string) (StructField, bool) {
return tt.FieldByName(name)
}
-func (t *commonType) FieldByNameFunc(match func(string) bool) (StructField, bool) {
+func (t *rtype) FieldByNameFunc(match func(string) bool) (StructField, bool) {
if t.Kind() != Struct {
panic("reflect: FieldByNameFunc of non-struct type")
}
@@ -604,7 +594,7 @@ func (t *commonType) FieldByNameFunc(match func(string) bool) (StructField, bool
return tt.FieldByNameFunc(match)
}
-func (t *commonType) In(i int) Type {
+func (t *rtype) In(i int) Type {
if t.Kind() != Func {
panic("reflect: In of non-func type")
}
@@ -612,7 +602,7 @@ func (t *commonType) In(i int) Type {
return toType(tt.in[i])
}
-func (t *commonType) Key() Type {
+func (t *rtype) Key() Type {
if t.Kind() != Map {
panic("reflect: Key of non-map type")
}
@@ -620,7 +610,7 @@ func (t *commonType) Key() Type {
return toType(tt.key)
}
-func (t *commonType) Len() int {
+func (t *rtype) Len() int {
if t.Kind() != Array {
panic("reflect: Len of non-array type")
}
@@ -628,7 +618,7 @@ func (t *commonType) Len() int {
return int(tt.len)
}
-func (t *commonType) NumField() int {
+func (t *rtype) NumField() int {
if t.Kind() != Struct {
panic("reflect: NumField of non-struct type")
}
@@ -636,7 +626,7 @@ func (t *commonType) NumField() int {
return len(tt.fields)
}
-func (t *commonType) NumIn() int {
+func (t *rtype) NumIn() int {
if t.Kind() != Func {
panic("reflect: NumIn of non-func type")
}
@@ -644,7 +634,7 @@ func (t *commonType) NumIn() int {
return len(tt.in)
}
-func (t *commonType) NumOut() int {
+func (t *rtype) NumOut() int {
if t.Kind() != Func {
panic("reflect: NumOut of non-func type")
}
@@ -652,7 +642,7 @@ func (t *commonType) NumOut() int {
return len(tt.out)
}
-func (t *commonType) Out(i int) Type {
+func (t *rtype) Out(i int) Type {
if t.Kind() != Func {
panic("reflect: Out of non-func type")
}
@@ -718,7 +708,7 @@ type StructField struct {
Tag StructTag // field tag string
Offset uintptr // offset within struct, in bytes
Index []int // index sequence for Type.FieldByIndex
- Anonymous bool // is an anonymous field
+ Anonymous bool // is an embedded field
}
// A StructTag is the tag string in a struct field.
@@ -822,7 +812,7 @@ func (t *structType) Field(i int) (f StructField) {
// FieldByIndex returns the nested field corresponding to index.
func (t *structType) FieldByIndex(index []int) (f StructField) {
- f.Type = Type(t.toType())
+ f.Type = toType(&t.rtype)
for i, x := range index {
if i > 0 {
ft := f.Type
@@ -836,107 +826,143 @@ func (t *structType) FieldByIndex(index []int) (f StructField) {
return
}
-const inf = 1 << 30 // infinity - no struct has that many nesting levels
-
-func (t *structType) fieldByNameFunc(match func(string) bool, mark map[*structType]bool, depth int) (ff StructField, fd int) {
- fd = inf // field depth
+// A fieldScan represents an item on the fieldByNameFunc scan work list.
+type fieldScan struct {
+ typ *structType
+ index []int
+}
- if mark[t] {
- // Struct already seen.
- return
- }
- mark[t] = true
-
- var fi int // field index
- n := 0 // number of matching fields at depth fd
-L:
- for i := range t.fields {
- f := t.Field(i)
- d := inf
- switch {
- case match(f.Name):
- // Matching top-level field.
- d = depth
- case f.Anonymous:
- ft := f.Type
- if ft.Kind() == Ptr {
- ft = ft.Elem()
+// FieldByNameFunc returns the struct field with a name that satisfies the
+// match function and a boolean to indicate if the field was found.
+func (t *structType) FieldByNameFunc(match func(string) bool) (result StructField, ok bool) {
+ // This uses the same condition that the Go language does: there must be a unique instance
+ // of the match at a given depth level. If there are multiple instances of a match at the
+ // same depth, they annihilate each other and inhibit any possible match at a lower level.
+ // The algorithm is breadth first search, one depth level at a time.
+
+ // The current and next slices are work queues:
+ // current lists the fields to visit on this depth level,
+ // and next lists the fields on the next lower level.
+ current := []fieldScan{}
+ next := []fieldScan{{typ: t}}
+
+ // nextCount records the number of times an embedded type has been
+ // encountered and considered for queueing in the 'next' slice.
+ // We only queue the first one, but we increment the count on each.
+ // If a struct type T can be reached more than once at a given depth level,
+ // then it annihilates itself and need not be considered at all when we
+ // process that next depth level.
+ var nextCount map[*structType]int
+
+ // visited records the structs that have been considered already.
+ // Embedded pointer fields can create cycles in the graph of
+ // reachable embedded types; visited avoids following those cycles.
+ // It also avoids duplicated effort: if we didn't find the field in an
+ // embedded type T at level 2, we won't find it in one at level 4 either.
+ visited := map[*structType]bool{}
+
+ for len(next) > 0 {
+ current, next = next, current[:0]
+ count := nextCount
+ nextCount = nil
+
+ // Process all the fields at this depth, now listed in 'current'.
+ // The loop queues embedded fields found in 'next', for processing during the next
+ // iteration. The multiplicity of the 'current' field counts is recorded
+ // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'.
+ for _, scan := range current {
+ t := scan.typ
+ if visited[t] {
+ // We've looked through this type before, at a higher level.
+ // That higher level would shadow the lower level we're now at,
+ // so this one can't be useful to us. Ignore it.
+ continue
}
- switch {
- case match(ft.Name()):
- // Matching anonymous top-level field.
- d = depth
- case fd > depth:
- // No top-level field yet; look inside nested structs.
- if ft.Kind() == Struct {
- st := (*structType)(unsafe.Pointer(ft.(*commonType)))
- f, d = st.fieldByNameFunc(match, mark, depth+1)
+ visited[t] = true
+ for i := range t.fields {
+ f := &t.fields[i]
+ // Find name and type for field f.
+ var fname string
+ var ntyp *rtype
+ if f.name != nil {
+ fname = *f.name
+ } else {
+ // Anonymous field of type T or *T.
+ // Name taken from type.
+ ntyp = f.typ
+ if ntyp.Kind() == Ptr {
+ ntyp = ntyp.Elem().common()
+ }
+ fname = ntyp.Name()
}
- }
- }
- switch {
- case d < fd:
- // Found field at shallower depth.
- ff, fi, fd = f, i, d
- n = 1
- case d == fd:
- // More than one matching field at the same depth (or d, fd == inf).
- // Same as no field found at this depth.
- n++
- if d == depth {
- // Impossible to find a field at lower depth.
- break L
- }
- }
- }
+ // Does it match?
+ if match(fname) {
+ // Potential match
+ if count[t] > 1 || ok {
+ // Name appeared multiple times at this level: annihilate.
+ return StructField{}, false
+ }
+ result = t.Field(i)
+ result.Index = nil
+ result.Index = append(result.Index, scan.index...)
+ result.Index = append(result.Index, i)
+ ok = true
+ continue
+ }
- if n == 1 {
- // Found matching field.
- if depth >= len(ff.Index) {
- ff.Index = make([]int, depth+1)
+ // Queue embedded struct fields for processing with next level,
+ // but only if we haven't seen a match yet at this level and only
+ // if the embedded types haven't already been queued.
+ if ok || ntyp == nil || ntyp.Kind() != Struct {
+ continue
+ }
+ styp := (*structType)(unsafe.Pointer(ntyp))
+ if nextCount[styp] > 0 {
+ nextCount[styp] = 2 // exact multiple doesn't matter
+ continue
+ }
+ if nextCount == nil {
+ nextCount = map[*structType]int{}
+ }
+ nextCount[styp] = 1
+ if count[t] > 1 {
+ nextCount[styp] = 2 // exact multiple doesn't matter
+ }
+ var index []int
+ index = append(index, scan.index...)
+ index = append(index, i)
+ next = append(next, fieldScan{styp, index})
+ }
}
- if len(ff.Index) > 1 {
- ff.Index[depth] = fi
+ if ok {
+ break
}
- } else {
- // None or more than one matching field found.
- fd = inf
}
-
- delete(mark, t)
return
}
// FieldByName returns the struct field with the given name
// and a boolean to indicate if the field was found.
func (t *structType) FieldByName(name string) (f StructField, present bool) {
- return t.FieldByNameFunc(func(s string) bool { return s == name })
-}
-
-// FieldByNameFunc returns the struct field with a name that satisfies the
-// match function and a boolean to indicate if the field was found.
-func (t *structType) FieldByNameFunc(match func(string) bool) (f StructField, present bool) {
- if ff, fd := t.fieldByNameFunc(match, make(map[*structType]bool), 0); fd < inf {
- ff.Index = ff.Index[0 : fd+1]
- f, present = ff, true
- }
- return
-}
-
-// Convert runtime type to reflect type.
-func toCommonType(p *runtimeType) *commonType {
- if p == nil {
- return nil
+ // Quick check for top-level name, or struct without anonymous fields.
+ hasAnon := false
+ if name != "" {
+ for i := range t.fields {
+ tf := &t.fields[i]
+ if tf.name == nil {
+ hasAnon = true
+ continue
+ }
+ if *tf.name == name {
+ return t.Field(i), true
+ }
+ }
}
- return (*p).(*commonType)
-}
-
-func toType(p *runtimeType) Type {
- if p == nil {
- return nil
+ if !hasAnon {
+ return
}
- return (*p).(*commonType)
+ return t.FieldByNameFunc(func(s string) bool { return s == name })
}
// TypeOf returns the reflection Type of the value in the interface{}.
@@ -949,28 +975,18 @@ func TypeOf(i interface{}) Type {
// ptrMap is the cache for PtrTo.
var ptrMap struct {
sync.RWMutex
- m map[*commonType]*ptrType
-}
-
-func (t *commonType) runtimeType() *runtimeType {
- // The runtimeType always precedes the commonType in memory.
- // Adjust pointer to find it.
- var rt struct {
- i runtimeType
- ct commonType
- }
- return (*runtimeType)(unsafe.Pointer(uintptr(unsafe.Pointer(t)) - unsafe.Offsetof(rt.ct)))
+ m map[*rtype]*ptrType
}
// PtrTo returns the pointer type with element t.
// For example, if t represents type Foo, PtrTo(t) represents *Foo.
func PtrTo(t Type) Type {
- return t.(*commonType).ptrTo()
+ return t.(*rtype).ptrTo()
}
-func (ct *commonType) ptrTo() *commonType {
- if p := ct.ptrToThis; p != nil {
- return toCommonType(p)
+func (t *rtype) ptrTo() *rtype {
+ if p := t.ptrToThis; p != nil {
+ return p
}
// Otherwise, synthesize one.
@@ -980,36 +996,31 @@ func (ct *commonType) ptrTo() *commonType {
// the type structures in read-only memory.
ptrMap.RLock()
if m := ptrMap.m; m != nil {
- if p := m[ct]; p != nil {
+ if p := m[t]; p != nil {
ptrMap.RUnlock()
- return &p.commonType
+ return &p.rtype
}
}
ptrMap.RUnlock()
ptrMap.Lock()
if ptrMap.m == nil {
- ptrMap.m = make(map[*commonType]*ptrType)
+ ptrMap.m = make(map[*rtype]*ptrType)
}
- p := ptrMap.m[ct]
+ p := ptrMap.m[t]
if p != nil {
// some other goroutine won the race and created it
ptrMap.Unlock()
- return &p.commonType
+ return &p.rtype
}
- var rt struct {
- i runtimeType
- ptrType
- }
- rt.i = &rt.commonType
-
- // initialize p using *byte's ptrType as a prototype.
- p = &rt.ptrType
- var ibyte interface{} = (*byte)(nil)
- bp := (*ptrType)(unsafe.Pointer((**(**runtimeType)(unsafe.Pointer(&ibyte))).(*commonType)))
- *p = *bp
+ // Create a new ptrType starting with the description
+ // of an *unsafe.Pointer.
+ p = new(ptrType)
+ var iptr interface{} = (*unsafe.Pointer)(nil)
+ prototype := *(**ptrType)(unsafe.Pointer(&iptr))
+ *p = *prototype
- s := "*" + *ct.string
+ s := "*" + *t.string
p.string = &s
// For the type structures linked into the binary, the
@@ -1017,37 +1028,53 @@ func (ct *commonType) ptrTo() *commonType {
// Create a good hash for the new string by using
// the FNV-1 hash's mixing function to combine the
// old hash and the new "*".
- p.hash = ct.hash*16777619 ^ '*'
+ p.hash = fnv1(t.hash, '*')
p.uncommonType = nil
p.ptrToThis = nil
- p.elem = (*runtimeType)(unsafe.Pointer(uintptr(unsafe.Pointer(ct)) - unsafe.Offsetof(rt.ptrType)))
+ p.elem = t
- ptrMap.m[ct] = p
+ ptrMap.m[t] = p
ptrMap.Unlock()
- return &p.commonType
+ return &p.rtype
+}
+
+// fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function.
+func fnv1(x uint32, list ...byte) uint32 {
+ for _, b := range list {
+ x = x*16777619 ^ uint32(b)
+ }
+ return x
}
-func (t *commonType) Implements(u Type) bool {
+func (t *rtype) Implements(u Type) bool {
if u == nil {
panic("reflect: nil type passed to Type.Implements")
}
if u.Kind() != Interface {
panic("reflect: non-interface type passed to Type.Implements")
}
- return implements(u.(*commonType), t)
+ return implements(u.(*rtype), t)
}
-func (t *commonType) AssignableTo(u Type) bool {
+func (t *rtype) AssignableTo(u Type) bool {
if u == nil {
panic("reflect: nil type passed to Type.AssignableTo")
}
- uu := u.(*commonType)
+ uu := u.(*rtype)
return directlyAssignable(uu, t) || implements(uu, t)
}
+func (t *rtype) ConvertibleTo(u Type) bool {
+ if u == nil {
+ panic("reflect: nil type passed to Type.ConvertibleTo")
+ }
+ uu := u.(*rtype)
+ return convertOp(uu, t) != nil
+}
+
// implements returns true if the type V implements the interface type T.
-func implements(T, V *commonType) bool {
+func implements(T, V *rtype) bool {
if T.Kind() != Interface {
return false
}
@@ -1105,7 +1132,7 @@ func implements(T, V *commonType) bool {
// http://golang.org/doc/go_spec.html#Assignability
// Ignoring the interface rules (implemented elsewhere)
// and the ideal constant rules (no ideal constants at run time).
-func directlyAssignable(T, V *commonType) bool {
+func directlyAssignable(T, V *rtype) bool {
// x's type V is identical to T?
if T == V {
return true
@@ -1117,10 +1144,28 @@ func directlyAssignable(T, V *commonType) bool {
return false
}
- // x's type T and V have identical underlying types.
- // Since at least one is unnamed, only the composite types
- // need to be considered.
- switch T.Kind() {
+ // x's type T and V must have identical underlying types.
+ return haveIdenticalUnderlyingType(T, V)
+}
+
+func haveIdenticalUnderlyingType(T, V *rtype) bool {
+ if T == V {
+ return true
+ }
+
+ kind := T.Kind()
+ if kind != V.Kind() {
+ return false
+ }
+
+ // Non-composite types of equal kind have same underlying type
+ // (the predefined instance of the type).
+ if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer {
+ return true
+ }
+
+ // Composite types.
+ switch kind {
case Array:
return T.Elem() == V.Elem() && T.Len() == V.Len()
@@ -1178,8 +1223,19 @@ func directlyAssignable(T, V *commonType) bool {
for i := range t.fields {
tf := &t.fields[i]
vf := &v.fields[i]
- if tf.name != vf.name || tf.pkgPath != vf.pkgPath ||
- tf.typ != vf.typ || tf.tag != vf.tag || tf.offset != vf.offset {
+ if tf.name != vf.name && (tf.name == nil || vf.name == nil || *tf.name != *vf.name) {
+ return false
+ }
+ if tf.pkgPath != vf.pkgPath && (tf.pkgPath == nil || vf.pkgPath == nil || *tf.pkgPath != *vf.pkgPath) {
+ return false
+ }
+ if tf.typ != vf.typ {
+ return false
+ }
+ if tf.tag != vf.tag && (tf.tag == nil || vf.tag == nil || *tf.tag != *vf.tag) {
+ return false
+ }
+ if tf.offset != vf.offset {
return false
}
}
@@ -1188,3 +1244,295 @@ func directlyAssignable(T, V *commonType) bool {
return false
}
+
+// typelinks is implemented in package runtime.
+// It retuns a slice of all the 'typelink' information in the binary,
+// which is to say a slice of known types, sorted by string.
+// Note that strings are not unique identifiers for types:
+// there can be more than one with a given string.
+// Only types we might want to look up are included:
+// channels, maps, slices, and arrays.
+func typelinks() []*rtype
+
+// typesByString returns the subslice of typelinks() whose elements have
+// the given string representation.
+// It may be empty (no known types with that string) or may have
+// multiple elements (multiple types with that string).
+func typesByString(s string) []*rtype {
+ typ := typelinks()
+
+ // We are looking for the first index i where the string becomes >= s.
+ // This is a copy of sort.Search, with f(h) replaced by (*typ[h].string >= s).
+ i, j := 0, len(typ)
+ for i < j {
+ h := i + (j-i)/2 // avoid overflow when computing h
+ // i ≤ h < j
+ if !(*typ[h].string >= s) {
+ i = h + 1 // preserves f(i-1) == false
+ } else {
+ j = h // preserves f(j) == true
+ }
+ }
+ // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i.
+
+ // Having found the first, linear scan forward to find the last.
+ // We could do a second binary search, but the caller is going
+ // to do a linear scan anyway.
+ j = i
+ for j < len(typ) && *typ[j].string == s {
+ j++
+ }
+
+ // This slice will be empty if the string is not found.
+ return typ[i:j]
+}
+
+// The lookupCache caches ChanOf, MapOf, and SliceOf lookups.
+var lookupCache struct {
+ sync.RWMutex
+ m map[cacheKey]*rtype
+}
+
+// A cacheKey is the key for use in the lookupCache.
+// Four values describe any of the types we are looking for:
+// type kind, one or two subtypes, and an extra integer.
+type cacheKey struct {
+ kind Kind
+ t1 *rtype
+ t2 *rtype
+ extra uintptr
+}
+
+// cacheGet looks for a type under the key k in the lookupCache.
+// If it finds one, it returns that type.
+// If not, it returns nil with the cache locked.
+// The caller is expected to use cachePut to unlock the cache.
+func cacheGet(k cacheKey) Type {
+ lookupCache.RLock()
+ t := lookupCache.m[k]
+ lookupCache.RUnlock()
+ if t != nil {
+ return t
+ }
+
+ lookupCache.Lock()
+ t = lookupCache.m[k]
+ if t != nil {
+ lookupCache.Unlock()
+ return t
+ }
+
+ if lookupCache.m == nil {
+ lookupCache.m = make(map[cacheKey]*rtype)
+ }
+
+ return nil
+}
+
+// cachePut stores the given type in the cache, unlocks the cache,
+// and returns the type. It is expected that the cache is locked
+// because cacheGet returned nil.
+func cachePut(k cacheKey, t *rtype) Type {
+ lookupCache.m[k] = t
+ lookupCache.Unlock()
+ return t
+}
+
+// ChanOf returns the channel type with the given direction and element type.
+// For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int.
+//
+// The gc runtime imposes a limit of 64 kB on channel element types.
+// If t's size is equal to or exceeds this limit, ChanOf panics.
+func ChanOf(dir ChanDir, t Type) Type {
+ typ := t.(*rtype)
+
+ // Look in cache.
+ ckey := cacheKey{Chan, typ, nil, uintptr(dir)}
+ if ch := cacheGet(ckey); ch != nil {
+ return ch
+ }
+
+ // This restriction is imposed by the gc compiler and the runtime.
+ if typ.size >= 1<<16 {
+ lookupCache.Unlock()
+ panic("reflect.ChanOf: element size too large")
+ }
+
+ // Look in known types.
+ // TODO: Precedence when constructing string.
+ var s string
+ switch dir {
+ default:
+ lookupCache.Unlock()
+ panic("reflect.ChanOf: invalid dir")
+ case SendDir:
+ s = "chan<- " + *typ.string
+ case RecvDir:
+ s = "<-chan " + *typ.string
+ case BothDir:
+ s = "chan " + *typ.string
+ }
+ for _, tt := range typesByString(s) {
+ ch := (*chanType)(unsafe.Pointer(tt))
+ if ch.elem == typ && ch.dir == uintptr(dir) {
+ return cachePut(ckey, tt)
+ }
+ }
+
+ // Make a channel type.
+ var ichan interface{} = (chan unsafe.Pointer)(nil)
+ prototype := *(**chanType)(unsafe.Pointer(&ichan))
+ ch := new(chanType)
+ *ch = *prototype
+ ch.string = &s
+ ch.hash = fnv1(typ.hash, 'c', byte(dir))
+ ch.elem = typ
+ ch.uncommonType = nil
+ ch.ptrToThis = nil
+
+ return cachePut(ckey, &ch.rtype)
+}
+
+// MapOf returns the map type with the given key and element types.
+// For example, if k represents int and e represents string,
+// MapOf(k, e) represents map[int]string.
+//
+// If the key type is not a valid map key type (that is, if it does
+// not implement Go's == operator), MapOf panics. TODO(rsc).
+func MapOf(key, elem Type) Type {
+ ktyp := key.(*rtype)
+ etyp := elem.(*rtype)
+
+ // TODO: Check for invalid key types.
+
+ // Look in cache.
+ ckey := cacheKey{Map, ktyp, etyp, 0}
+ if mt := cacheGet(ckey); mt != nil {
+ return mt
+ }
+
+ // Look in known types.
+ s := "map[" + *ktyp.string + "]" + *etyp.string
+ for _, tt := range typesByString(s) {
+ mt := (*mapType)(unsafe.Pointer(tt))
+ if mt.key == ktyp && mt.elem == etyp {
+ return cachePut(ckey, tt)
+ }
+ }
+
+ // Make a map type.
+ var imap interface{} = (map[unsafe.Pointer]unsafe.Pointer)(nil)
+ prototype := *(**mapType)(unsafe.Pointer(&imap))
+ mt := new(mapType)
+ *mt = *prototype
+ mt.string = &s
+ mt.hash = fnv1(etyp.hash, 'm', byte(ktyp.hash>>24), byte(ktyp.hash>>16), byte(ktyp.hash>>8), byte(ktyp.hash))
+ mt.key = ktyp
+ mt.elem = etyp
+ mt.uncommonType = nil
+ mt.ptrToThis = nil
+
+ return cachePut(ckey, &mt.rtype)
+}
+
+// SliceOf returns the slice type with element type t.
+// For example, if t represents int, SliceOf(t) represents []int.
+func SliceOf(t Type) Type {
+ typ := t.(*rtype)
+
+ // Look in cache.
+ ckey := cacheKey{Slice, typ, nil, 0}
+ if slice := cacheGet(ckey); slice != nil {
+ return slice
+ }
+
+ // Look in known types.
+ s := "[]" + *typ.string
+ for _, tt := range typesByString(s) {
+ slice := (*sliceType)(unsafe.Pointer(tt))
+ if slice.elem == typ {
+ return cachePut(ckey, tt)
+ }
+ }
+
+ // Make a slice type.
+ var islice interface{} = ([]unsafe.Pointer)(nil)
+ prototype := *(**sliceType)(unsafe.Pointer(&islice))
+ slice := new(sliceType)
+ *slice = *prototype
+ slice.string = &s
+ slice.hash = fnv1(typ.hash, '[')
+ slice.elem = typ
+ slice.uncommonType = nil
+ slice.ptrToThis = nil
+
+ return cachePut(ckey, &slice.rtype)
+}
+
+// ArrayOf returns the array type with the given count and element type.
+// For example, if t represents int, ArrayOf(5, t) represents [5]int.
+//
+// If the resulting type would be larger than the available address space,
+// ArrayOf panics.
+//
+// TODO(rsc): Unexported for now. Export once the alg field is set correctly
+// for the type. This may require significant work.
+func arrayOf(count int, elem Type) Type {
+ typ := elem.(*rtype)
+ slice := SliceOf(elem)
+
+ // Look in cache.
+ ckey := cacheKey{Array, typ, nil, uintptr(count)}
+ if slice := cacheGet(ckey); slice != nil {
+ return slice
+ }
+
+ // Look in known types.
+ s := "[" + strconv.Itoa(count) + "]" + *typ.string
+ for _, tt := range typesByString(s) {
+ slice := (*sliceType)(unsafe.Pointer(tt))
+ if slice.elem == typ {
+ return cachePut(ckey, tt)
+ }
+ }
+
+ // Make an array type.
+ var iarray interface{} = [1]unsafe.Pointer{}
+ prototype := *(**arrayType)(unsafe.Pointer(&iarray))
+ array := new(arrayType)
+ *array = *prototype
+ array.string = &s
+ array.hash = fnv1(typ.hash, '[')
+ for n := uint32(count); n > 0; n >>= 8 {
+ array.hash = fnv1(array.hash, byte(n))
+ }
+ array.hash = fnv1(array.hash, ']')
+ array.elem = typ
+ max := ^uintptr(0) / typ.size
+ if uintptr(count) > max {
+ panic("reflect.ArrayOf: array size would exceed virtual address space")
+ }
+ array.size = typ.size * uintptr(count)
+ array.align = typ.align
+ array.fieldAlign = typ.fieldAlign
+ // TODO: array.alg
+ // TODO: array.gc
+ array.uncommonType = nil
+ array.ptrToThis = nil
+ array.len = uintptr(count)
+ array.slice = slice.(*rtype)
+
+ return cachePut(ckey, &array.rtype)
+}
+
+// toType converts from a *rtype to a Type that can be returned
+// to the client of package reflect. In gc, the only concern is that
+// a nil *rtype must be replaced by a nil Type, but in gccgo this
+// function takes care of ensuring that multiple *rtype for the same
+// type are coalesced into a single Type.
+func toType(t *rtype) Type {
+ if t == nil {
+ return nil
+ }
+ return t
+}
diff --git a/src/pkg/reflect/value.go b/src/pkg/reflect/value.go
index 79476ad22..c87812c46 100644
--- a/src/pkg/reflect/value.go
+++ b/src/pkg/reflect/value.go
@@ -60,7 +60,7 @@ func memmove(adst, asrc unsafe.Pointer, n uintptr) {
// direct operations.
type Value struct {
// typ holds the type of the value represented by a Value.
- typ *commonType
+ typ *rtype
// val holds the 1-word representation of the value.
// If flag's flagIndir bit is set, then val is a pointer to the data.
@@ -211,7 +211,7 @@ func storeIword(p unsafe.Pointer, w iword, n uintptr) {
// emptyInterface is the header for an interface{} value.
type emptyInterface struct {
- typ *runtimeType
+ typ *rtype
word iword
}
@@ -219,8 +219,8 @@ type emptyInterface struct {
type nonEmptyInterface struct {
// see ../runtime/iface.c:/Itab
itab *struct {
- ityp *runtimeType // static interface type
- typ *runtimeType // dynamic concrete type
+ ityp *rtype // static interface type
+ typ *rtype // dynamic concrete type
link unsafe.Pointer
bad int32
unused int32
@@ -302,6 +302,17 @@ func (v Value) Bytes() []byte {
return *(*[]byte)(v.val)
}
+// runes returns v's underlying value.
+// It panics if v's underlying value is not a slice of runes (int32s).
+func (v Value) runes() []rune {
+ v.mustBe(Slice)
+ if v.typ.Elem().Kind() != Int32 {
+ panic("reflect.Value.Bytes of non-rune slice")
+ }
+ // Slice is always bigger than a word; assume flagIndir.
+ return *(*[]rune)(v.val)
+}
+
// CanAddr returns true if the value's address can be obtained with Addr.
// Such values are called addressable. A value is addressable if it is
// an element of a slice, an element of an addressable array,
@@ -335,7 +346,7 @@ func (v Value) Call(in []Value) []Value {
}
// CallSlice calls the variadic function v with the input arguments in,
-// assigning the slice in[len(in)-1] to v's final variadic argument.
+// assigning the slice in[len(in)-1] to v's final variadic argument.
// For example, if len(in) == 3, v.Call(in) represents the Go call v(in[0], in[1], in[2]...).
// Call panics if v's Kind is not Func or if v is not variadic.
// It returns the output results as Values.
@@ -365,12 +376,12 @@ func (v Value) call(method string, in []Value) []Value {
if m.pkgPath != nil {
panic(method + " of unexported method")
}
- t = toCommonType(m.typ)
+ t = m.typ
iface := (*nonEmptyInterface)(v.val)
if iface.itab == nil {
panic(method + " of method on nil interface value")
}
- fn = iface.itab.fun[i]
+ fn = unsafe.Pointer(&iface.itab.fun[i])
rcvr = iface.word
} else {
ut := v.typ.uncommon()
@@ -381,8 +392,8 @@ func (v Value) call(method string, in []Value) []Value {
if m.pkgPath != nil {
panic(method + " of unexported method")
}
- fn = m.ifn
- t = toCommonType(m.mtyp)
+ fn = unsafe.Pointer(&m.ifn)
+ t = m.mtyp
rcvr = v.iword()
}
} else if v.flag&flagIndir != 0 {
@@ -490,9 +501,9 @@ func (v Value) call(method string, in []Value) []Value {
// TODO(rsc): revisit when reference counting happens.
// The values are holding up the in references for us,
// but something must be done for the out references.
- // For now make everything look like a pointer by pretending
- // to allocate a []*int.
- args := make([]*int, size/ptrSize)
+ // For now make everything look like a pointer by allocating
+ // a []unsafe.Pointer.
+ args := make([]unsafe.Pointer, size/ptrSize)
ptr := uintptr(unsafe.Pointer(&args[0]))
off := uintptr(0)
if v.flag&flagMethod != 0 {
@@ -502,7 +513,7 @@ func (v Value) call(method string, in []Value) []Value {
}
for i, v := range in {
v.mustBeExported()
- targ := t.In(i).(*commonType)
+ targ := t.In(i).(*rtype)
a := uintptr(targ.align)
off = (off + a - 1) &^ (a - 1)
n := targ.size
@@ -536,6 +547,85 @@ func (v Value) call(method string, in []Value) []Value {
return ret
}
+// callReflect is the call implementation used by a function
+// returned by MakeFunc. In many ways it is the opposite of the
+// method Value.call above. The method above converts a call using Values
+// into a call of a function with a concrete argument frame, while
+// callReflect converts a call of a function with a concrete argument
+// frame into a call using Values.
+// It is in this file so that it can be next to the call method above.
+// The remainder of the MakeFunc implementation is in makefunc.go.
+func callReflect(ctxt *makeFuncImpl, frame unsafe.Pointer) {
+ ftyp := ctxt.typ
+ f := ctxt.fn
+
+ // Copy argument frame into Values.
+ ptr := frame
+ off := uintptr(0)
+ in := make([]Value, 0, len(ftyp.in))
+ for _, arg := range ftyp.in {
+ typ := arg
+ off += -off & uintptr(typ.align-1)
+ v := Value{typ, nil, flag(typ.Kind()) << flagKindShift}
+ if typ.size <= ptrSize {
+ // value fits in word.
+ v.val = unsafe.Pointer(loadIword(unsafe.Pointer(uintptr(ptr)+off), typ.size))
+ } else {
+ // value does not fit in word.
+ // Must make a copy, because f might keep a reference to it,
+ // and we cannot let f keep a reference to the stack frame
+ // after this function returns, not even a read-only reference.
+ v.val = unsafe_New(typ)
+ memmove(v.val, unsafe.Pointer(uintptr(ptr)+off), typ.size)
+ v.flag |= flagIndir
+ }
+ in = append(in, v)
+ off += typ.size
+ }
+
+ // Call underlying function.
+ out := f(in)
+ if len(out) != len(ftyp.out) {
+ panic("reflect: wrong return count from function created by MakeFunc")
+ }
+
+ // Copy results back into argument frame.
+ if len(ftyp.out) > 0 {
+ off += -off & (ptrSize - 1)
+ for i, arg := range ftyp.out {
+ typ := arg
+ v := out[i]
+ if v.typ != typ {
+ panic("reflect: function created by MakeFunc using " + funcName(f) +
+ " returned wrong type: have " +
+ out[i].typ.String() + " for " + typ.String())
+ }
+ if v.flag&flagRO != 0 {
+ panic("reflect: function created by MakeFunc using " + funcName(f) +
+ " returned value obtained from unexported field")
+ }
+ off += -off & uintptr(typ.align-1)
+ addr := unsafe.Pointer(uintptr(ptr) + off)
+ if v.flag&flagIndir == 0 {
+ storeIword(addr, iword(v.val), typ.size)
+ } else {
+ memmove(addr, v.val, typ.size)
+ }
+ off += typ.size
+ }
+ }
+}
+
+// funcName returns the name of f, for use in error messages.
+func funcName(f func([]Value) []Value) string {
+ pc := *(*uintptr)(unsafe.Pointer(&f))
+ rf := runtime.FuncForPC(pc)
+ if rf != nil {
+ return rf.Name()
+ }
+ return "closure"
+}
+
// Cap returns v's capacity.
// It panics if v's Kind is not Array, Chan, or Slice.
func (v Value) Cap() int {
@@ -586,7 +676,7 @@ func (v Value) Elem() Value {
switch k {
case Interface:
var (
- typ *commonType
+ typ *rtype
val unsafe.Pointer
)
if v.typ.NumMethod() == 0 {
@@ -595,7 +685,7 @@ func (v Value) Elem() Value {
// nil interface value
return Value{}
}
- typ = toCommonType(eface.typ)
+ typ = eface.typ
val = unsafe.Pointer(eface.word)
} else {
iface := (*nonEmptyInterface)(v.val)
@@ -603,7 +693,7 @@ func (v Value) Elem() Value {
// nil interface value
return Value{}
}
- typ = toCommonType(iface.itab.typ)
+ typ = iface.itab.typ
val = unsafe.Pointer(iface.word)
}
fl := v.flag & flagRO
@@ -623,7 +713,7 @@ func (v Value) Elem() Value {
return Value{}
}
tt := (*ptrType)(unsafe.Pointer(v.typ))
- typ := toCommonType(tt.elem)
+ typ := tt.elem
fl := v.flag&flagRO | flagIndir | flagAddr
fl |= flag(typ.Kind() << flagKindShift)
return Value{typ, val, fl}
@@ -640,7 +730,7 @@ func (v Value) Field(i int) Value {
panic("reflect: Field index out of range")
}
field := &tt.fields[i]
- typ := toCommonType(field.typ)
+ typ := field.typ
// Inherit permission bits from v.
fl := v.flag & (flagRO | flagIndir | flagAddr)
@@ -723,8 +813,10 @@ func (v Value) Float() float64 {
panic(&ValueError{"reflect.Value.Float", k})
}
+var uint8Type = TypeOf(uint8(0)).(*rtype)
+
// Index returns v's i'th element.
-// It panics if v's Kind is not Array or Slice or i is out of range.
+// It panics if v's Kind is not Array, Slice, or String or i is out of range.
func (v Value) Index(i int) Value {
k := v.kind()
switch k {
@@ -733,7 +825,7 @@ func (v Value) Index(i int) Value {
if i < 0 || i > int(tt.len) {
panic("reflect: array index out of range")
}
- typ := toCommonType(tt.elem)
+ typ := tt.elem
fl := v.flag & (flagRO | flagIndir | flagAddr) // bits same as overall array
fl |= flag(typ.Kind()) << flagKindShift
offset := uintptr(i) * typ.size
@@ -761,10 +853,19 @@ func (v Value) Index(i int) Value {
panic("reflect: slice index out of range")
}
tt := (*sliceType)(unsafe.Pointer(v.typ))
- typ := toCommonType(tt.elem)
+ typ := tt.elem
fl |= flag(typ.Kind()) << flagKindShift
val := unsafe.Pointer(s.Data + uintptr(i)*typ.size)
return Value{typ, val, fl}
+
+ case String:
+ fl := v.flag&flagRO | flag(Uint8<<flagKindShift)
+ s := (*StringHeader)(v.val)
+ if i < 0 || i >= s.Len {
+ panic("reflect: string index out of range")
+ }
+ val := *(*byte)(unsafe.Pointer(s.Data + uintptr(i)))
+ return Value{uint8Type, unsafe.Pointer(uintptr(val)), fl}
}
panic(&ValueError{"reflect.Value.Index", k})
}
@@ -826,7 +927,7 @@ func valueInterface(v Value, safe bool) interface{} {
if safe && v.flag&flagRO != 0 {
// Do not allow access to unexported values via Interface,
- // because they might be pointers that should not be
+ // because they might be pointers that should not be
// writable or methods or function that should not be callable.
panic("reflect.Value.Interface: cannot return value obtained from unexported field or method")
}
@@ -846,7 +947,7 @@ func valueInterface(v Value, safe bool) interface{} {
// Non-interface value.
var eface emptyInterface
- eface.typ = v.typ.runtimeType()
+ eface.typ = v.typ
eface.word = v.iword()
if v.flag&flagIndir != 0 && v.typ.size > ptrSize {
@@ -919,9 +1020,9 @@ func (v Value) Len() int {
tt := (*arrayType)(unsafe.Pointer(v.typ))
return int(tt.len)
case Chan:
- return int(chanlen(v.iword()))
+ return chanlen(v.iword())
case Map:
- return int(maplen(v.iword()))
+ return maplen(v.iword())
case Slice:
// Slice is bigger than a word; assume flagIndir.
return (*SliceHeader)(v.val).Len
@@ -947,13 +1048,13 @@ func (v Value) MapIndex(key Value) Value {
// considered unexported. This is consistent with the
// behavior for structs, which allow read but not write
// of unexported fields.
- key = key.assignTo("reflect.Value.MapIndex", toCommonType(tt.key), nil)
+ key = key.assignTo("reflect.Value.MapIndex", tt.key, nil)
- word, ok := mapaccess(v.typ.runtimeType(), v.iword(), key.iword())
+ word, ok := mapaccess(v.typ, v.iword(), key.iword())
if !ok {
return Value{}
}
- typ := toCommonType(tt.elem)
+ typ := tt.elem
fl := (v.flag | key.flag) & flagRO
if typ.size > ptrSize {
fl |= flagIndir
@@ -969,7 +1070,7 @@ func (v Value) MapIndex(key Value) Value {
func (v Value) MapKeys() []Value {
v.mustBe(Map)
tt := (*mapType)(unsafe.Pointer(v.typ))
- keyType := toCommonType(tt.key)
+ keyType := tt.key
fl := v.flag & flagRO
fl |= flag(keyType.Kind()) << flagKindShift
@@ -978,11 +1079,11 @@ func (v Value) MapKeys() []Value {
}
m := v.iword()
- mlen := int32(0)
+ mlen := int(0)
if m != nil {
mlen = maplen(m)
}
- it := mapiterinit(v.typ.runtimeType(), m)
+ it := mapiterinit(v.typ, m)
a := make([]Value, mlen)
var i int
for i = 0; i < len(a); i++ {
@@ -1081,7 +1182,7 @@ func overflowFloat32(x float64) bool {
if x < 0 {
x = -x
}
- return math.MaxFloat32 <= x && x <= math.MaxFloat64
+ return math.MaxFloat32 < x && x <= math.MaxFloat64
}
// OverflowInt returns true if the int64 x cannot be represented by v's type.
@@ -1115,18 +1216,35 @@ func (v Value) OverflowUint(x uint64) bool {
// code using reflect cannot obtain unsafe.Pointers
// without importing the unsafe package explicitly.
// It panics if v's Kind is not Chan, Func, Map, Ptr, Slice, or UnsafePointer.
+//
+// If v's Kind is Func, the returned pointer is an underlying
+// code pointer, but not necessarily enough to identify a
+// single function uniquely. The only guarantee is that the
+// result is zero if and only if v is a nil func Value.
func (v Value) Pointer() uintptr {
k := v.kind()
switch k {
- case Chan, Func, Map, Ptr, UnsafePointer:
- if k == Func && v.flag&flagMethod != 0 {
+ case Chan, Map, Ptr, UnsafePointer:
+ p := v.val
+ if v.flag&flagIndir != 0 {
+ p = *(*unsafe.Pointer)(p)
+ }
+ return uintptr(p)
+ case Func:
+ if v.flag&flagMethod != 0 {
panic("reflect.Value.Pointer of method Value")
}
p := v.val
if v.flag&flagIndir != 0 {
p = *(*unsafe.Pointer)(p)
}
+ // Non-nil func value points at data block.
+ // First word of data block is actual code.
+ if p != nil {
+ p = *(*unsafe.Pointer)(p)
+ }
return uintptr(p)
+
case Slice:
return (*SliceHeader)(v.val).Data
}
@@ -1151,9 +1269,9 @@ func (v Value) recv(nb bool) (val Value, ok bool) {
if ChanDir(tt.dir)&RecvDir == 0 {
panic("recv on send-only channel")
}
- word, selected, ok := chanrecv(v.typ.runtimeType(), v.iword(), nb)
+ word, selected, ok := chanrecv(v.typ, v.iword(), nb)
if selected {
- typ := toCommonType(tt.elem)
+ typ := tt.elem
fl := flag(typ.Kind()) << flagKindShift
if typ.size > ptrSize {
fl |= flagIndir
@@ -1180,8 +1298,8 @@ func (v Value) send(x Value, nb bool) (selected bool) {
panic("send on recv-only channel")
}
x.mustBeExported()
- x = x.assignTo("reflect.Value.Send", toCommonType(tt.elem), nil)
- return chansend(v.typ.runtimeType(), v.iword(), x.iword(), nb)
+ x = x.assignTo("reflect.Value.Send", tt.elem, nil)
+ return chansend(v.typ, v.iword(), x.iword(), nb)
}
// Set assigns x to the value v.
@@ -1221,6 +1339,17 @@ func (v Value) SetBytes(x []byte) {
*(*[]byte)(v.val) = x
}
+// setRunes sets v's underlying value.
+// It panics if v's underlying value is not a slice of runes (int32s).
+func (v Value) setRunes(x []rune) {
+ v.mustBeAssignable()
+ v.mustBe(Slice)
+ if v.typ.Elem().Kind() != Int32 {
+ panic("reflect.Value.setRunes of non-rune slice")
+ }
+ *(*[]rune)(v.val) = x
+}
+
// SetComplex sets v's underlying value to x.
// It panics if v's Kind is not Complex64 or Complex128, or if CanSet() is false.
func (v Value) SetComplex(x complex128) {
@@ -1292,12 +1421,12 @@ func (v Value) SetMapIndex(key, val Value) {
v.mustBeExported()
key.mustBeExported()
tt := (*mapType)(unsafe.Pointer(v.typ))
- key = key.assignTo("reflect.Value.SetMapIndex", toCommonType(tt.key), nil)
+ key = key.assignTo("reflect.Value.SetMapIndex", tt.key, nil)
if val.typ != nil {
val.mustBeExported()
- val = val.assignTo("reflect.Value.SetMapIndex", toCommonType(tt.elem), nil)
+ val = val.assignTo("reflect.Value.SetMapIndex", tt.elem, nil)
}
- mapassign(v.typ.runtimeType(), v.iword(), key.iword(), val.iword(), val.typ != nil)
+ mapassign(v.typ, v.iword(), key.iword(), val.iword(), val.typ != nil)
}
// SetUint sets v's underlying value to x.
@@ -1339,7 +1468,7 @@ func (v Value) SetString(x string) {
}
// Slice returns a slice of v.
-// It panics if v's Kind is not Array or Slice.
+// It panics if v's Kind is not Array, Slice or String, or if v is an unaddressable array.
func (v Value) Slice(beg, end int) Value {
var (
cap int
@@ -1349,31 +1478,44 @@ func (v Value) Slice(beg, end int) Value {
switch k := v.kind(); k {
default:
panic(&ValueError{"reflect.Value.Slice", k})
+
case Array:
if v.flag&flagAddr == 0 {
panic("reflect.Value.Slice: slice of unaddressable array")
}
tt := (*arrayType)(unsafe.Pointer(v.typ))
cap = int(tt.len)
- typ = (*sliceType)(unsafe.Pointer(toCommonType(tt.slice)))
+ typ = (*sliceType)(unsafe.Pointer(tt.slice))
base = v.val
+
case Slice:
typ = (*sliceType)(unsafe.Pointer(v.typ))
s := (*SliceHeader)(v.val)
base = unsafe.Pointer(s.Data)
cap = s.Cap
+ case String:
+ s := (*StringHeader)(v.val)
+ if beg < 0 || end < beg || end > s.Len {
+ panic("reflect.Value.Slice: string slice index out of bounds")
+ }
+ var x string
+ val := (*StringHeader)(unsafe.Pointer(&x))
+ val.Data = s.Data + uintptr(beg)
+ val.Len = end - beg
+ return Value{v.typ, unsafe.Pointer(&x), v.flag}
}
+
if beg < 0 || end < beg || end > cap {
panic("reflect.Value.Slice: slice index out of bounds")
}
// Declare slice so that gc can see the base pointer in it.
- var x []byte
+ var x []unsafe.Pointer
// Reinterpret as *SliceHeader to edit.
s := (*SliceHeader)(unsafe.Pointer(&x))
- s.Data = uintptr(base) + uintptr(beg)*toCommonType(typ.elem).Size()
+ s.Data = uintptr(base) + uintptr(beg)*typ.elem.Size()
s.Len = end - beg
s.Cap = cap - beg
@@ -1439,7 +1581,7 @@ func (v Value) Type() Type {
panic("reflect: broken Value")
}
m := &tt.methods[i]
- return toCommonType(m.typ)
+ return m.typ
}
// Method on concrete type.
ut := v.typ.uncommon()
@@ -1447,7 +1589,7 @@ func (v Value) Type() Type {
panic("reflect: broken Value")
}
m := &ut.methods[i]
- return toCommonType(m.mtyp)
+ return m.mtyp
}
// Uint returns v's underlying value, as a uint64.
@@ -1618,13 +1760,148 @@ func Copy(dst, src Value) int {
return n
}
+// A runtimeSelect is a single case passed to rselect.
+// This must match ../runtime/chan.c:/runtimeSelect
+type runtimeSelect struct {
+ dir uintptr // 0, SendDir, or RecvDir
+ typ *rtype // channel type
+ ch iword // interface word for channel
+ val iword // interface word for value (for SendDir)
+}
+
+// rselect runs a select. It returns the index of the chosen case,
+// and if the case was a receive, the interface word of the received
+// value and the conventional OK bool to indicate whether the receive
+// corresponds to a sent value.
+func rselect([]runtimeSelect) (chosen int, recv iword, recvOK bool)
+
+// A SelectDir describes the communication direction of a select case.
+type SelectDir int
+
+// NOTE: These values must match ../runtime/chan.c:/SelectDir.
+
+const (
+ _ SelectDir = iota
+ SelectSend // case Chan <- Send
+ SelectRecv // case <-Chan:
+ SelectDefault // default
+)
+
+// A SelectCase describes a single case in a select operation.
+// The kind of case depends on Dir, the communication direction.
+//
+// If Dir is SelectDefault, the case represents a default case.
+// Chan and Send must be zero Values.
+//
+// If Dir is SelectSend, the case represents a send operation.
+// Normally Chan's underlying value must be a channel, and Send's underlying value must be
+// assignable to the channel's element type. As a special case, if Chan is a zero Value,
+// then the case is ignored, and the field Send will also be ignored and may be either zero
+// or non-zero.
+//
+// If Dir is SelectRecv, the case represents a receive operation.
+// Normally Chan's underlying value must be a channel and Send must be a zero Value.
+// If Chan is a zero Value, then the case is ignored, but Send must still be a zero Value.
+// When a receive operation is selected, the received Value is returned by Select.
+//
+type SelectCase struct {
+ Dir SelectDir // direction of case
+ Chan Value // channel to use (for send or receive)
+ Send Value // value to send (for send)
+}
+
+// Select executes a select operation described by the list of cases.
+// Like the Go select statement, it blocks until at least one of the cases
+// can proceed, makes a uniform pseudo-random choice,
+// and then executes that case. It returns the index of the chosen case
+// and, if that case was a receive operation, the value received and a
+// boolean indicating whether the value corresponds to a send on the channel
+// (as opposed to a zero value received because the channel is closed).
+func Select(cases []SelectCase) (chosen int, recv Value, recvOK bool) {
+ // NOTE: Do not trust that caller is not modifying cases data underfoot.
+ // The range is safe because the caller cannot modify our copy of the len
+ // and each iteration makes its own copy of the value c.
+ runcases := make([]runtimeSelect, len(cases))
+ haveDefault := false
+ for i, c := range cases {
+ rc := &runcases[i]
+ rc.dir = uintptr(c.Dir)
+ switch c.Dir {
+ default:
+ panic("reflect.Select: invalid Dir")
+
+ case SelectDefault: // default
+ if haveDefault {
+ panic("reflect.Select: multiple default cases")
+ }
+ haveDefault = true
+ if c.Chan.IsValid() {
+ panic("reflect.Select: default case has Chan value")
+ }
+ if c.Send.IsValid() {
+ panic("reflect.Select: default case has Send value")
+ }
+
+ case SelectSend:
+ ch := c.Chan
+ if !ch.IsValid() {
+ break
+ }
+ ch.mustBe(Chan)
+ ch.mustBeExported()
+ tt := (*chanType)(unsafe.Pointer(ch.typ))
+ if ChanDir(tt.dir)&SendDir == 0 {
+ panic("reflect.Select: SendDir case using recv-only channel")
+ }
+ rc.ch = ch.iword()
+ rc.typ = &tt.rtype
+ v := c.Send
+ if !v.IsValid() {
+ panic("reflect.Select: SendDir case missing Send value")
+ }
+ v.mustBeExported()
+ v = v.assignTo("reflect.Select", tt.elem, nil)
+ rc.val = v.iword()
+
+ case SelectRecv:
+ if c.Send.IsValid() {
+ panic("reflect.Select: RecvDir case has Send value")
+ }
+ ch := c.Chan
+ if !ch.IsValid() {
+ break
+ }
+ ch.mustBe(Chan)
+ ch.mustBeExported()
+ tt := (*chanType)(unsafe.Pointer(ch.typ))
+ rc.typ = &tt.rtype
+ if ChanDir(tt.dir)&RecvDir == 0 {
+ panic("reflect.Select: RecvDir case using send-only channel")
+ }
+ rc.ch = ch.iword()
+ }
+ }
+
+ chosen, word, recvOK := rselect(runcases)
+ if runcases[chosen].dir == uintptr(SelectRecv) {
+ tt := (*chanType)(unsafe.Pointer(runcases[chosen].typ))
+ typ := tt.elem
+ fl := flag(typ.Kind()) << flagKindShift
+ if typ.size > ptrSize {
+ fl |= flagIndir
+ }
+ recv = Value{typ, unsafe.Pointer(word), fl}
+ }
+ return chosen, recv, recvOK
+}
+
/*
* constructors
*/
// implemented in package runtime
-func unsafe_New(Type) unsafe.Pointer
-func unsafe_NewArray(Type, int) unsafe.Pointer
+func unsafe_New(*rtype) unsafe.Pointer
+func unsafe_NewArray(*rtype, int) unsafe.Pointer
// MakeSlice creates a new zero-initialized slice value
// for the specified slice type, length, and capacity.
@@ -1643,11 +1920,11 @@ func MakeSlice(typ Type, len, cap int) Value {
}
// Declare slice so that gc can see the base pointer in it.
- var x []byte
+ var x []unsafe.Pointer
// Reinterpret as *SliceHeader to edit.
s := (*SliceHeader)(unsafe.Pointer(&x))
- s.Data = uintptr(unsafe_NewArray(typ.Elem(), cap))
+ s.Data = uintptr(unsafe_NewArray(typ.Elem().(*rtype), cap))
s.Len = len
s.Cap = cap
@@ -1665,7 +1942,7 @@ func MakeChan(typ Type, buffer int) Value {
if typ.ChanDir() != BothDir {
panic("reflect.MakeChan: unidirectional channel type")
}
- ch := makechan(typ.runtimeType(), uint32(buffer))
+ ch := makechan(typ.(*rtype), uint64(buffer))
return Value{typ.common(), unsafe.Pointer(ch), flag(Chan) << flagKindShift}
}
@@ -1674,7 +1951,7 @@ func MakeMap(typ Type) Value {
if typ.Kind() != Map {
panic("reflect.MakeMap of non-map type")
}
- m := makemap(typ.runtimeType())
+ m := makemap(typ.(*rtype))
return Value{typ.common(), unsafe.Pointer(m), flag(Map) << flagKindShift}
}
@@ -1705,7 +1982,7 @@ func ValueOf(i interface{}) Value {
// For an interface value with the noAddr bit set,
// the representation is identical to an empty interface.
eface := *(*emptyInterface)(unsafe.Pointer(&i))
- typ := toCommonType(eface.typ)
+ typ := eface.typ
fl := flag(typ.Kind()) << flagKindShift
if typ.size > ptrSize {
fl |= flagIndir
@@ -1713,10 +1990,11 @@ func ValueOf(i interface{}) Value {
return Value{typ, unsafe.Pointer(eface.word), fl}
}
-// Zero returns a Value representing a zero value for the specified type.
+// Zero returns a Value representing the zero value for the specified type.
// The result is different from the zero value of the Value struct,
// which represents no value at all.
// For example, Zero(TypeOf(42)) returns a Value with Kind Int and value 0.
+// The returned value is neither addressable nor settable.
func Zero(typ Type) Value {
if typ == nil {
panic("reflect: Zero(nil)")
@@ -1726,7 +2004,7 @@ func Zero(typ Type) Value {
if t.size <= ptrSize {
return Value{t, nil, fl}
}
- return Value{t, unsafe_New(typ), fl | flagIndir}
+ return Value{t, unsafe_New(typ.(*rtype)), fl | flagIndir}
}
// New returns a Value representing a pointer to a new zero value
@@ -1735,7 +2013,7 @@ func New(typ Type) Value {
if typ == nil {
panic("reflect: New(nil)")
}
- ptr := unsafe_New(typ)
+ ptr := unsafe_New(typ.(*rtype))
fl := flag(Ptr) << flagKindShift
return Value{typ.common().ptrTo(), ptr, fl}
}
@@ -1750,7 +2028,7 @@ func NewAt(typ Type, p unsafe.Pointer) Value {
// assignTo returns a value v that can be assigned directly to typ.
// It panics if v is not assignable to typ.
// For a conversion to an interface type, target is a suggested scratch space to use.
-func (v Value) assignTo(context string, dst *commonType, target *interface{}) Value {
+func (v Value) assignTo(context string, dst *rtype, target *interface{}) Value {
if v.flag&flagMethod != 0 {
panic(context + ": cannot assign method value to type " + dst.String())
}
@@ -1772,7 +2050,7 @@ func (v Value) assignTo(context string, dst *commonType, target *interface{}) Va
if dst.NumMethod() == 0 {
*target = x
} else {
- ifaceE2I(dst.runtimeType(), x, unsafe.Pointer(target))
+ ifaceE2I(dst, x, unsafe.Pointer(target))
}
return Value{dst, unsafe.Pointer(target), flagIndir | flag(Interface)<<flagKindShift}
}
@@ -1781,24 +2059,320 @@ func (v Value) assignTo(context string, dst *commonType, target *interface{}) Va
panic(context + ": value of type " + v.typ.String() + " is not assignable to type " + dst.String())
}
+// Convert returns the value v converted to type t.
+// If the usual Go conversion rules do not allow conversion
+// of the value v to type t, Convert panics.
+func (v Value) Convert(t Type) Value {
+ if v.flag&flagMethod != 0 {
+ panic("reflect.Value.Convert: cannot convert method values")
+ }
+ op := convertOp(t.common(), v.typ)
+ if op == nil {
+ panic("reflect.Value.Convert: value of type " + v.typ.String() + " cannot be converted to type " + t.String())
+ }
+ return op(v, t)
+}
+
+// convertOp returns the function to convert a value of type src
+// to a value of type dst. If the conversion is illegal, convertOp returns nil.
+func convertOp(dst, src *rtype) func(Value, Type) Value {
+ switch src.Kind() {
+ case Int, Int8, Int16, Int32, Int64:
+ switch dst.Kind() {
+ case Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
+ return cvtInt
+ case Float32, Float64:
+ return cvtIntFloat
+ case String:
+ return cvtIntString
+ }
+
+ case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
+ switch dst.Kind() {
+ case Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
+ return cvtUint
+ case Float32, Float64:
+ return cvtUintFloat
+ case String:
+ return cvtUintString
+ }
+
+ case Float32, Float64:
+ switch dst.Kind() {
+ case Int, Int8, Int16, Int32, Int64:
+ return cvtFloatInt
+ case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
+ return cvtFloatUint
+ case Float32, Float64:
+ return cvtFloat
+ }
+
+ case Complex64, Complex128:
+ switch dst.Kind() {
+ case Complex64, Complex128:
+ return cvtComplex
+ }
+
+ case String:
+ if dst.Kind() == Slice && dst.Elem().PkgPath() == "" {
+ switch dst.Elem().Kind() {
+ case Uint8:
+ return cvtStringBytes
+ case Int32:
+ return cvtStringRunes
+ }
+ }
+
+ case Slice:
+ if dst.Kind() == String && src.Elem().PkgPath() == "" {
+ switch src.Elem().Kind() {
+ case Uint8:
+ return cvtBytesString
+ case Int32:
+ return cvtRunesString
+ }
+ }
+ }
+
+ // dst and src have same underlying type.
+ if haveIdenticalUnderlyingType(dst, src) {
+ return cvtDirect
+ }
+
+ // dst and src are unnamed pointer types with same underlying base type.
+ if dst.Kind() == Ptr && dst.Name() == "" &&
+ src.Kind() == Ptr && src.Name() == "" &&
+ haveIdenticalUnderlyingType(dst.Elem().common(), src.Elem().common()) {
+ return cvtDirect
+ }
+
+ if implements(dst, src) {
+ if src.Kind() == Interface {
+ return cvtI2I
+ }
+ return cvtT2I
+ }
+
+ return nil
+}
+
+// makeInt returns a Value of type t equal to bits (possibly truncated),
+// where t is a signed or unsigned int type.
+func makeInt(f flag, bits uint64, t Type) Value {
+ typ := t.common()
+ if typ.size > ptrSize {
+ // Assume ptrSize >= 4, so this must be uint64.
+ ptr := unsafe_New(typ)
+ *(*uint64)(unsafe.Pointer(ptr)) = bits
+ return Value{typ, ptr, f | flag(typ.Kind())<<flagKindShift}
+ }
+ var w iword
+ switch typ.size {
+ case 1:
+ *(*uint8)(unsafe.Pointer(&w)) = uint8(bits)
+ case 2:
+ *(*uint16)(unsafe.Pointer(&w)) = uint16(bits)
+ case 4:
+ *(*uint32)(unsafe.Pointer(&w)) = uint32(bits)
+ case 8:
+ *(*uint64)(unsafe.Pointer(&w)) = uint64(bits)
+ }
+ return Value{typ, unsafe.Pointer(w), f | flag(typ.Kind())<<flagKindShift}
+}
+
+// makeFloat returns a Value of type t equal to v (possibly truncated to float32),
+// where t is a float32 or float64 type.
+func makeFloat(f flag, v float64, t Type) Value {
+ typ := t.common()
+ if typ.size > ptrSize {
+ // Assume ptrSize >= 4, so this must be float64.
+ ptr := unsafe_New(typ)
+ *(*float64)(unsafe.Pointer(ptr)) = v
+ return Value{typ, ptr, f | flag(typ.Kind())<<flagKindShift}
+ }
+
+ var w iword
+ switch typ.size {
+ case 4:
+ *(*float32)(unsafe.Pointer(&w)) = float32(v)
+ case 8:
+ *(*float64)(unsafe.Pointer(&w)) = v
+ }
+ return Value{typ, unsafe.Pointer(w), f | flag(typ.Kind())<<flagKindShift}
+}
+
+// makeComplex returns a Value of type t equal to v (possibly truncated to complex64),
+// where t is a complex64 or complex128 type.
+func makeComplex(f flag, v complex128, t Type) Value {
+ typ := t.common()
+ if typ.size > ptrSize {
+ ptr := unsafe_New(typ)
+ switch typ.size {
+ case 8:
+ *(*complex64)(unsafe.Pointer(ptr)) = complex64(v)
+ case 16:
+ *(*complex128)(unsafe.Pointer(ptr)) = v
+ }
+ return Value{typ, ptr, f | flag(typ.Kind())<<flagKindShift}
+ }
+
+ // Assume ptrSize <= 8 so this must be complex64.
+ var w iword
+ *(*complex64)(unsafe.Pointer(&w)) = complex64(v)
+ return Value{typ, unsafe.Pointer(w), f | flag(typ.Kind())<<flagKindShift}
+}
+
+func makeString(f flag, v string, t Type) Value {
+ ret := New(t).Elem()
+ ret.SetString(v)
+ ret.flag = ret.flag&^flagAddr | f
+ return ret
+}
+
+func makeBytes(f flag, v []byte, t Type) Value {
+ ret := New(t).Elem()
+ ret.SetBytes(v)
+ ret.flag = ret.flag&^flagAddr | f
+ return ret
+}
+
+func makeRunes(f flag, v []rune, t Type) Value {
+ ret := New(t).Elem()
+ ret.setRunes(v)
+ ret.flag = ret.flag&^flagAddr | f
+ return ret
+}
+
+// These conversion functions are returned by convertOp
+// for classes of conversions. For example, the first function, cvtInt,
+// takes any value v of signed int type and returns the value converted
+// to type t, where t is any signed or unsigned int type.
+
+// convertOp: intXX -> [u]intXX
+func cvtInt(v Value, t Type) Value {
+ return makeInt(v.flag&flagRO, uint64(v.Int()), t)
+}
+
+// convertOp: uintXX -> [u]intXX
+func cvtUint(v Value, t Type) Value {
+ return makeInt(v.flag&flagRO, v.Uint(), t)
+}
+
+// convertOp: floatXX -> intXX
+func cvtFloatInt(v Value, t Type) Value {
+ return makeInt(v.flag&flagRO, uint64(int64(v.Float())), t)
+}
+
+// convertOp: floatXX -> uintXX
+func cvtFloatUint(v Value, t Type) Value {
+ return makeInt(v.flag&flagRO, uint64(v.Float()), t)
+}
+
+// convertOp: intXX -> floatXX
+func cvtIntFloat(v Value, t Type) Value {
+ return makeFloat(v.flag&flagRO, float64(v.Int()), t)
+}
+
+// convertOp: uintXX -> floatXX
+func cvtUintFloat(v Value, t Type) Value {
+ return makeFloat(v.flag&flagRO, float64(v.Uint()), t)
+}
+
+// convertOp: floatXX -> floatXX
+func cvtFloat(v Value, t Type) Value {
+ return makeFloat(v.flag&flagRO, v.Float(), t)
+}
+
+// convertOp: complexXX -> complexXX
+func cvtComplex(v Value, t Type) Value {
+ return makeComplex(v.flag&flagRO, v.Complex(), t)
+}
+
+// convertOp: intXX -> string
+func cvtIntString(v Value, t Type) Value {
+ return makeString(v.flag&flagRO, string(v.Int()), t)
+}
+
+// convertOp: uintXX -> string
+func cvtUintString(v Value, t Type) Value {
+ return makeString(v.flag&flagRO, string(v.Uint()), t)
+}
+
+// convertOp: []byte -> string
+func cvtBytesString(v Value, t Type) Value {
+ return makeString(v.flag&flagRO, string(v.Bytes()), t)
+}
+
+// convertOp: string -> []byte
+func cvtStringBytes(v Value, t Type) Value {
+ return makeBytes(v.flag&flagRO, []byte(v.String()), t)
+}
+
+// convertOp: []rune -> string
+func cvtRunesString(v Value, t Type) Value {
+ return makeString(v.flag&flagRO, string(v.runes()), t)
+}
+
+// convertOp: string -> []rune
+func cvtStringRunes(v Value, t Type) Value {
+ return makeRunes(v.flag&flagRO, []rune(v.String()), t)
+}
+
+// convertOp: direct copy
+func cvtDirect(v Value, typ Type) Value {
+ f := v.flag
+ t := typ.common()
+ val := v.val
+ if f&flagAddr != 0 {
+ // indirect, mutable word - make a copy
+ ptr := unsafe_New(t)
+ memmove(ptr, val, t.size)
+ val = ptr
+ f &^= flagAddr
+ }
+ return Value{t, val, v.flag&flagRO | f}
+}
+
+// convertOp: concrete -> interface
+func cvtT2I(v Value, typ Type) Value {
+ target := new(interface{})
+ x := valueInterface(v, false)
+ if typ.NumMethod() == 0 {
+ *target = x
+ } else {
+ ifaceE2I(typ.(*rtype), x, unsafe.Pointer(target))
+ }
+ return Value{typ.common(), unsafe.Pointer(target), v.flag&flagRO | flagIndir | flag(Interface)<<flagKindShift}
+}
+
+// convertOp: interface -> interface
+func cvtI2I(v Value, typ Type) Value {
+ if v.IsNil() {
+ ret := Zero(typ)
+ ret.flag |= v.flag & flagRO
+ return ret
+ }
+ return cvtT2I(v.Elem(), typ)
+}
+
// implemented in ../pkg/runtime
-func chancap(ch iword) int32
+func chancap(ch iword) int
func chanclose(ch iword)
-func chanlen(ch iword) int32
-func chanrecv(t *runtimeType, ch iword, nb bool) (val iword, selected, received bool)
-func chansend(t *runtimeType, ch iword, val iword, nb bool) bool
-
-func makechan(typ *runtimeType, size uint32) (ch iword)
-func makemap(t *runtimeType) (m iword)
-func mapaccess(t *runtimeType, m iword, key iword) (val iword, ok bool)
-func mapassign(t *runtimeType, m iword, key, val iword, ok bool)
-func mapiterinit(t *runtimeType, m iword) *byte
+func chanlen(ch iword) int
+func chanrecv(t *rtype, ch iword, nb bool) (val iword, selected, received bool)
+func chansend(t *rtype, ch iword, val iword, nb bool) bool
+
+func makechan(typ *rtype, size uint64) (ch iword)
+func makemap(t *rtype) (m iword)
+func mapaccess(t *rtype, m iword, key iword) (val iword, ok bool)
+func mapassign(t *rtype, m iword, key, val iword, ok bool)
+func mapiterinit(t *rtype, m iword) *byte
func mapiterkey(it *byte) (key iword, ok bool)
func mapiternext(it *byte)
-func maplen(m iword) int32
+func maplen(m iword) int
func call(fn, arg unsafe.Pointer, n uint32)
-func ifaceE2I(t *runtimeType, src interface{}, dst unsafe.Pointer)
+func ifaceE2I(t *rtype, src interface{}, dst unsafe.Pointer)
// Dummy annotation marking that the value x escapes,
// for use in cases where the reflect code is so clever that