summaryrefslogtreecommitdiff
path: root/src/pkg/sync
diff options
context:
space:
mode:
authorOndřej Surý <ondrej@sury.org>2012-04-06 15:14:11 +0200
committerOndřej Surý <ondrej@sury.org>2012-04-06 15:14:11 +0200
commit505c19580e0f43fe5224431459cacb7c21edd93d (patch)
tree79e2634c253d60afc0cc0b2f510dc7dcbb48497b /src/pkg/sync
parent1336a7c91e596c423a49d1194ea42d98bca0d958 (diff)
downloadgolang-505c19580e0f43fe5224431459cacb7c21edd93d.tar.gz
Imported Upstream version 1upstream/1
Diffstat (limited to 'src/pkg/sync')
-rw-r--r--src/pkg/sync/Makefile15
-rw-r--r--src/pkg/sync/atomic/Makefile18
-rw-r--r--src/pkg/sync/atomic/asm_386.s55
-rw-r--r--src/pkg/sync/atomic/asm_amd64.s47
-rw-r--r--src/pkg/sync/atomic/asm_arm.s24
-rw-r--r--src/pkg/sync/atomic/asm_linux_arm.s39
-rw-r--r--src/pkg/sync/atomic/atomic_test.go639
-rw-r--r--src/pkg/sync/atomic/doc.go39
-rw-r--r--src/pkg/sync/cond.go15
-rw-r--r--src/pkg/sync/example_test.go54
-rw-r--r--src/pkg/sync/export_test.go9
-rw-r--r--src/pkg/sync/mutex.go11
-rw-r--r--src/pkg/sync/mutex_test.go6
-rw-r--r--src/pkg/sync/once_test.go2
-rw-r--r--src/pkg/sync/runtime.go18
-rw-r--r--src/pkg/sync/runtime_sema_test.go101
-rw-r--r--src/pkg/sync/rwmutex.go13
-rw-r--r--src/pkg/sync/rwmutex_test.go2
-rw-r--r--src/pkg/sync/waitgroup.go24
19 files changed, 1006 insertions, 125 deletions
diff --git a/src/pkg/sync/Makefile b/src/pkg/sync/Makefile
deleted file mode 100644
index e8a766226..000000000
--- a/src/pkg/sync/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2009 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-include ../../Make.inc
-
-TARG=sync
-GOFILES=\
- cond.go\
- mutex.go\
- once.go \
- rwmutex.go\
- waitgroup.go\
-
-include ../../Make.pkg
diff --git a/src/pkg/sync/atomic/Makefile b/src/pkg/sync/atomic/Makefile
deleted file mode 100644
index 38d8998c0..000000000
--- a/src/pkg/sync/atomic/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright 2011 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-include ../../../Make.inc
-
-TARG=sync/atomic
-GOFILES=\
- doc.go\
-
-OFILES=\
- asm_$(GOARCH).$O\
-
-ifeq ($(GOARCH),arm)
-OFILES+=asm_$(GOOS)_$(GOARCH).$O
-endif
-
-include ../../../Make.pkg
diff --git a/src/pkg/sync/atomic/asm_386.s b/src/pkg/sync/atomic/asm_386.s
index 914d2feeb..a406852f4 100644
--- a/src/pkg/sync/atomic/asm_386.s
+++ b/src/pkg/sync/atomic/asm_386.s
@@ -18,6 +18,9 @@ TEXT ·CompareAndSwapUint32(SB),7,$0
TEXT ·CompareAndSwapUintptr(SB),7,$0
JMP ·CompareAndSwapUint32(SB)
+TEXT ·CompareAndSwapPointer(SB),7,$0
+ JMP ·CompareAndSwapUint32(SB)
+
TEXT ·CompareAndSwapInt64(SB),7,$0
JMP ·CompareAndSwapUint64(SB)
@@ -94,3 +97,55 @@ TEXT ·LoadUint32(SB),7,$0
MOVL 0(AX), AX
MOVL AX, ret+4(FP)
RET
+
+TEXT ·LoadInt64(SB),7,$0
+ JMP ·LoadUint64(SB)
+
+TEXT ·LoadUint64(SB),7,$0
+ MOVL addrptr+0(FP), AX
+ // MOVQ and EMMS were introduced on the Pentium MMX.
+ // MOVQ (%EAX), %MM0
+ BYTE $0x0f; BYTE $0x6f; BYTE $0x00
+ // MOVQ %MM0, 0x8(%ESP)
+ BYTE $0x0f; BYTE $0x7f; BYTE $0x44; BYTE $0x24; BYTE $0x08
+ EMMS
+ RET
+
+TEXT ·LoadUintptr(SB),7,$0
+ JMP ·LoadUint32(SB)
+
+TEXT ·LoadPointer(SB),7,$0
+ JMP ·LoadUint32(SB)
+
+TEXT ·StoreInt32(SB),7,$0
+ JMP ·StoreUint32(SB)
+
+TEXT ·StoreUint32(SB),7,$0
+ MOVL addrptr+0(FP), BP
+ MOVL val+4(FP), AX
+ XCHGL AX, 0(BP)
+ RET
+
+TEXT ·StoreInt64(SB),7,$0
+ JMP ·StoreUint64(SB)
+
+TEXT ·StoreUint64(SB),7,$0
+ MOVL addrptr+0(FP), AX
+ // MOVQ and EMMS were introduced on the Pentium MMX.
+ // MOVQ 0x8(%ESP), %MM0
+ BYTE $0x0f; BYTE $0x6f; BYTE $0x44; BYTE $0x24; BYTE $0x08
+ // MOVQ %MM0, (%EAX)
+ BYTE $0x0f; BYTE $0x7f; BYTE $0x00
+ EMMS
+ // This is essentially a no-op, but it provides required memory fencing.
+ // It can be replaced with MFENCE, but MFENCE was introduced only on the Pentium4 (SSE2).
+ XORL AX, AX
+ LOCK
+ XADDL AX, (SP)
+ RET
+
+TEXT ·StoreUintptr(SB),7,$0
+ JMP ·StoreUint32(SB)
+
+TEXT ·StorePointer(SB),7,$0
+ JMP ·StoreUint32(SB)
diff --git a/src/pkg/sync/atomic/asm_amd64.s b/src/pkg/sync/atomic/asm_amd64.s
index 428295063..6f8bde068 100644
--- a/src/pkg/sync/atomic/asm_amd64.s
+++ b/src/pkg/sync/atomic/asm_amd64.s
@@ -17,6 +17,9 @@ TEXT ·CompareAndSwapUint32(SB),7,$0
TEXT ·CompareAndSwapUintptr(SB),7,$0
JMP ·CompareAndSwapUint64(SB)
+TEXT ·CompareAndSwapPointer(SB),7,$0
+ JMP ·CompareAndSwapUint64(SB)
+
TEXT ·CompareAndSwapInt64(SB),7,$0
JMP ·CompareAndSwapUint64(SB)
@@ -67,3 +70,47 @@ TEXT ·LoadUint32(SB),7,$0
MOVL AX, ret+8(FP)
RET
+TEXT ·LoadInt64(SB),7,$0
+ JMP ·LoadUint64(SB)
+
+TEXT ·LoadUint64(SB),7,$0
+ MOVQ addrptr+0(FP), AX
+ MOVQ 0(AX), AX
+ MOVQ AX, ret+8(FP)
+ RET
+
+TEXT ·LoadUintptr(SB),7,$0
+ JMP ·LoadPointer(SB)
+
+TEXT ·LoadPointer(SB),7,$0
+ MOVQ addrptr+0(FP), AX
+ MOVQ 0(AX), AX
+ MOVQ AX, ret+8(FP)
+ RET
+
+TEXT ·StoreInt32(SB),7,$0
+ JMP ·StoreUint32(SB)
+
+TEXT ·StoreUint32(SB),7,$0
+ MOVQ addrptr+0(FP), BP
+ MOVL val+8(FP), AX
+ XCHGL AX, 0(BP)
+ RET
+
+TEXT ·StoreInt64(SB),7,$0
+ JMP ·StoreUint64(SB)
+
+TEXT ·StoreUint64(SB),7,$0
+ MOVQ addrptr+0(FP), BP
+ MOVQ val+8(FP), AX
+ XCHGQ AX, 0(BP)
+ RET
+
+TEXT ·StoreUintptr(SB),7,$0
+ JMP ·StorePointer(SB)
+
+TEXT ·StorePointer(SB),7,$0
+ MOVQ addrptr+0(FP), BP
+ MOVQ val+8(FP), AX
+ XCHGQ AX, 0(BP)
+ RET
diff --git a/src/pkg/sync/atomic/asm_arm.s b/src/pkg/sync/atomic/asm_arm.s
index 95e2f5be4..2d10a922b 100644
--- a/src/pkg/sync/atomic/asm_arm.s
+++ b/src/pkg/sync/atomic/asm_arm.s
@@ -79,6 +79,30 @@ add64loop:
MOVW R5, rethi+16(FP)
RET
+TEXT ·armLoadUint64(SB),7,$0
+ BL fastCheck64<>(SB)
+ MOVW addrptr+0(FP), R1
+load64loop:
+ LDREXD (R1), R2 // loads R2 and R3
+ STREXD R2, (R1), R0 // stores R2 and R3
+ CMP $0, R0
+ BNE load64loop
+ MOVW R2, vallo+4(FP)
+ MOVW R3, valhi+8(FP)
+ RET
+
+TEXT ·armStoreUint64(SB),7,$0
+ BL fastCheck64<>(SB)
+ MOVW addrptr+0(FP), R1
+ MOVW vallo+4(FP), R2
+ MOVW valhi+8(FP), R3
+store64loop:
+ LDREXD (R1), R4 // loads R4 and R5
+ STREXD R2, (R1), R0 // stores R2 and R3
+ CMP $0, R0
+ BNE store64loop
+ RET
+
// Check for broken 64-bit LDREXD as found in QEMU.
// LDREXD followed by immediate STREXD should succeed.
// If it fails, try a few times just to be sure (maybe our thread got
diff --git a/src/pkg/sync/atomic/asm_linux_arm.s b/src/pkg/sync/atomic/asm_linux_arm.s
index 9ac411944..25dc85804 100644
--- a/src/pkg/sync/atomic/asm_linux_arm.s
+++ b/src/pkg/sync/atomic/asm_linux_arm.s
@@ -50,6 +50,9 @@ cascheck:
TEXT ·CompareAndSwapUintptr(SB),7,$0
B ·CompareAndSwapUint32(SB)
+TEXT ·CompareAndSwapPointer(SB),7,$0
+ B ·CompareAndSwapUint32(SB)
+
TEXT ·AddInt32(SB),7,$0
B ·AddUint32(SB)
@@ -96,3 +99,39 @@ loadloop1:
BCC loadloop1
MOVW R1, val+4(FP)
RET
+
+TEXT ·LoadInt64(SB),7,$0
+ B ·armLoadUint64(SB)
+
+TEXT ·LoadUint64(SB),7,$0
+ B ·armLoadUint64(SB)
+
+TEXT ·LoadUintptr(SB),7,$0
+ B ·LoadUint32(SB)
+
+TEXT ·LoadPointer(SB),7,$0
+ B ·LoadUint32(SB)
+
+TEXT ·StoreInt32(SB),7,$0
+ B ·StoreUint32(SB)
+
+TEXT ·StoreUint32(SB),7,$0
+ MOVW addrptr+0(FP), R2
+ MOVW val+4(FP), R1
+storeloop1:
+ MOVW 0(R2), R0
+ BL cas<>(SB)
+ BCC storeloop1
+ RET
+
+TEXT ·StoreInt64(SB),7,$0
+ B ·armStoreUint64(SB)
+
+TEXT ·StoreUint64(SB),7,$0
+ B ·armStoreUint64(SB)
+
+TEXT ·StoreUintptr(SB),7,$0
+ B ·StoreUint32(SB)
+
+TEXT ·StorePointer(SB),7,$0
+ B ·StoreUint32(SB)
diff --git a/src/pkg/sync/atomic/atomic_test.go b/src/pkg/sync/atomic/atomic_test.go
index 2229e58d0..f60d997ce 100644
--- a/src/pkg/sync/atomic/atomic_test.go
+++ b/src/pkg/sync/atomic/atomic_test.go
@@ -164,17 +164,17 @@ func TestCompareAndSwapInt32(t *testing.T) {
for val := int32(1); val+val > val; val += val {
x.i = val
if !CompareAndSwapInt32(&x.i, val, val+1) {
- t.Errorf("should have swapped %#x %#x", val, val+1)
+ t.Fatalf("should have swapped %#x %#x", val, val+1)
}
if x.i != val+1 {
- t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
x.i = val + 1
if CompareAndSwapInt32(&x.i, val, val+2) {
- t.Errorf("should not have swapped %#x %#x", val, val+2)
+ t.Fatalf("should not have swapped %#x %#x", val, val+2)
}
if x.i != val+1 {
- t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
}
if x.before != magic32 || x.after != magic32 {
@@ -193,17 +193,17 @@ func TestCompareAndSwapUint32(t *testing.T) {
for val := uint32(1); val+val > val; val += val {
x.i = val
if !CompareAndSwapUint32(&x.i, val, val+1) {
- t.Errorf("should have swapped %#x %#x", val, val+1)
+ t.Fatalf("should have swapped %#x %#x", val, val+1)
}
if x.i != val+1 {
- t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
x.i = val + 1
if CompareAndSwapUint32(&x.i, val, val+2) {
- t.Errorf("should not have swapped %#x %#x", val, val+2)
+ t.Fatalf("should not have swapped %#x %#x", val, val+2)
}
if x.i != val+1 {
- t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
}
if x.before != magic32 || x.after != magic32 {
@@ -226,17 +226,17 @@ func TestCompareAndSwapInt64(t *testing.T) {
for val := int64(1); val+val > val; val += val {
x.i = val
if !CompareAndSwapInt64(&x.i, val, val+1) {
- t.Errorf("should have swapped %#x %#x", val, val+1)
+ t.Fatalf("should have swapped %#x %#x", val, val+1)
}
if x.i != val+1 {
- t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
x.i = val + 1
if CompareAndSwapInt64(&x.i, val, val+2) {
- t.Errorf("should not have swapped %#x %#x", val, val+2)
+ t.Fatalf("should not have swapped %#x %#x", val, val+2)
}
if x.i != val+1 {
- t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
}
if x.before != magic64 || x.after != magic64 {
@@ -259,17 +259,17 @@ func TestCompareAndSwapUint64(t *testing.T) {
for val := uint64(1); val+val > val; val += val {
x.i = val
if !CompareAndSwapUint64(&x.i, val, val+1) {
- t.Errorf("should have swapped %#x %#x", val, val+1)
+ t.Fatalf("should have swapped %#x %#x", val, val+1)
}
if x.i != val+1 {
- t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
x.i = val + 1
if CompareAndSwapUint64(&x.i, val, val+2) {
- t.Errorf("should not have swapped %#x %#x", val, val+2)
+ t.Fatalf("should not have swapped %#x %#x", val, val+2)
}
if x.i != val+1 {
- t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
}
if x.before != magic64 || x.after != magic64 {
@@ -290,17 +290,48 @@ func TestCompareAndSwapUintptr(t *testing.T) {
for val := uintptr(1); val+val > val; val += val {
x.i = val
if !CompareAndSwapUintptr(&x.i, val, val+1) {
- t.Errorf("should have swapped %#x %#x", val, val+1)
+ t.Fatalf("should have swapped %#x %#x", val, val+1)
}
if x.i != val+1 {
- t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
x.i = val + 1
if CompareAndSwapUintptr(&x.i, val, val+2) {
- t.Errorf("should not have swapped %#x %#x", val, val+2)
+ t.Fatalf("should not have swapped %#x %#x", val, val+2)
}
if x.i != val+1 {
- t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ }
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestCompareAndSwapPointer(t *testing.T) {
+ var x struct {
+ before uintptr
+ i unsafe.Pointer
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ for val := uintptr(1); val+val > val; val += val {
+ x.i = unsafe.Pointer(val)
+ if !CompareAndSwapPointer(&x.i, unsafe.Pointer(val), unsafe.Pointer(val+1)) {
+ t.Fatalf("should have swapped %#x %#x", val, val+1)
+ }
+ if x.i != unsafe.Pointer(val+1) {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ }
+ x.i = unsafe.Pointer(val + 1)
+ if CompareAndSwapPointer(&x.i, unsafe.Pointer(val), unsafe.Pointer(val+2)) {
+ t.Fatalf("should not have swapped %#x %#x", val, val+2)
+ }
+ if x.i != unsafe.Pointer(val+1) {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
}
if x.before != magicptr || x.after != magicptr {
@@ -348,6 +379,236 @@ func TestLoadUint32(t *testing.T) {
}
}
+func TestLoadInt64(t *testing.T) {
+ if test64err != nil {
+ t.Logf("Skipping 64-bit tests: %v", test64err)
+ return
+ }
+ var x struct {
+ before int64
+ i int64
+ after int64
+ }
+ x.before = magic64
+ x.after = magic64
+ for delta := int64(1); delta+delta > delta; delta += delta {
+ k := LoadInt64(&x.i)
+ if k != x.i {
+ t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k)
+ }
+ x.i += delta
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64))
+ }
+}
+
+func TestLoadUint64(t *testing.T) {
+ if test64err != nil {
+ t.Logf("Skipping 64-bit tests: %v", test64err)
+ return
+ }
+ var x struct {
+ before uint64
+ i uint64
+ after uint64
+ }
+ x.before = magic64
+ x.after = magic64
+ for delta := uint64(1); delta+delta > delta; delta += delta {
+ k := LoadUint64(&x.i)
+ if k != x.i {
+ t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k)
+ }
+ x.i += delta
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64))
+ }
+}
+
+func TestLoadUintptr(t *testing.T) {
+ var x struct {
+ before uintptr
+ i uintptr
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ for delta := uintptr(1); delta+delta > delta; delta += delta {
+ k := LoadUintptr(&x.i)
+ if k != x.i {
+ t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k)
+ }
+ x.i += delta
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestLoadPointer(t *testing.T) {
+ var x struct {
+ before uintptr
+ i unsafe.Pointer
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ for delta := uintptr(1); delta+delta > delta; delta += delta {
+ k := LoadPointer(&x.i)
+ if k != x.i {
+ t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k)
+ }
+ x.i = unsafe.Pointer(uintptr(x.i) + delta)
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestStoreInt32(t *testing.T) {
+ var x struct {
+ before int32
+ i int32
+ after int32
+ }
+ x.before = magic32
+ x.after = magic32
+ v := int32(0)
+ for delta := int32(1); delta+delta > delta; delta += delta {
+ StoreInt32(&x.i, v)
+ if x.i != v {
+ t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v)
+ }
+ v += delta
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestStoreUint32(t *testing.T) {
+ var x struct {
+ before uint32
+ i uint32
+ after uint32
+ }
+ x.before = magic32
+ x.after = magic32
+ v := uint32(0)
+ for delta := uint32(1); delta+delta > delta; delta += delta {
+ StoreUint32(&x.i, v)
+ if x.i != v {
+ t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v)
+ }
+ v += delta
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestStoreInt64(t *testing.T) {
+ if test64err != nil {
+ t.Logf("Skipping 64-bit tests: %v", test64err)
+ return
+ }
+ var x struct {
+ before int64
+ i int64
+ after int64
+ }
+ x.before = magic64
+ x.after = magic64
+ v := int64(0)
+ for delta := int64(1); delta+delta > delta; delta += delta {
+ StoreInt64(&x.i, v)
+ if x.i != v {
+ t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v)
+ }
+ v += delta
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64))
+ }
+}
+
+func TestStoreUint64(t *testing.T) {
+ if test64err != nil {
+ t.Logf("Skipping 64-bit tests: %v", test64err)
+ return
+ }
+ var x struct {
+ before uint64
+ i uint64
+ after uint64
+ }
+ x.before = magic64
+ x.after = magic64
+ v := uint64(0)
+ for delta := uint64(1); delta+delta > delta; delta += delta {
+ StoreUint64(&x.i, v)
+ if x.i != v {
+ t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v)
+ }
+ v += delta
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64))
+ }
+}
+
+func TestStoreUintptr(t *testing.T) {
+ var x struct {
+ before uintptr
+ i uintptr
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ v := uintptr(0)
+ for delta := uintptr(1); delta+delta > delta; delta += delta {
+ StoreUintptr(&x.i, v)
+ if x.i != v {
+ t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v)
+ }
+ v += delta
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestStorePointer(t *testing.T) {
+ var x struct {
+ before uintptr
+ i unsafe.Pointer
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ v := unsafe.Pointer(uintptr(0))
+ for delta := uintptr(1); delta+delta > delta; delta += delta {
+ StorePointer(&x.i, unsafe.Pointer(v))
+ if x.i != v {
+ t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v)
+ }
+ v = unsafe.Pointer(uintptr(v) + delta)
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
// Tests of correct behavior, with contention.
// (Is the function atomic?)
//
@@ -366,6 +627,7 @@ var hammer32 = []struct {
{"CompareAndSwapInt32", hammerCompareAndSwapInt32},
{"CompareAndSwapUint32", hammerCompareAndSwapUint32},
{"CompareAndSwapUintptr", hammerCompareAndSwapUintptr32},
+ {"CompareAndSwapPointer", hammerCompareAndSwapPointer32},
}
func init() {
@@ -374,6 +636,7 @@ func init() {
// 64-bit system; clear uintptr tests
hammer32[2].f = nil
hammer32[5].f = nil
+ hammer32[6].f = nil
}
}
@@ -436,6 +699,20 @@ func hammerCompareAndSwapUintptr32(uval *uint32, count int) {
}
}
+func hammerCompareAndSwapPointer32(uval *uint32, count int) {
+ // only safe when uintptr is 32-bit.
+ // not called on 64-bit systems.
+ val := (*unsafe.Pointer)(unsafe.Pointer(uval))
+ for i := 0; i < count; i++ {
+ for {
+ v := *val
+ if CompareAndSwapPointer(val, v, unsafe.Pointer(uintptr(v)+1)) {
+ break
+ }
+ }
+ }
+}
+
func TestHammer32(t *testing.T) {
const p = 4
n := 100000
@@ -460,7 +737,7 @@ func TestHammer32(t *testing.T) {
<-c
}
if val != uint32(n)*p {
- t.Errorf("%s: val=%d want %d", tt.name, val, n*p)
+ t.Fatalf("%s: val=%d want %d", tt.name, val, n*p)
}
}
}
@@ -475,6 +752,7 @@ var hammer64 = []struct {
{"CompareAndSwapInt64", hammerCompareAndSwapInt64},
{"CompareAndSwapUint64", hammerCompareAndSwapUint64},
{"CompareAndSwapUintptr", hammerCompareAndSwapUintptr64},
+ {"CompareAndSwapPointer", hammerCompareAndSwapPointer64},
}
func init() {
@@ -483,6 +761,7 @@ func init() {
// 32-bit system; clear uintptr tests
hammer64[2].f = nil
hammer64[5].f = nil
+ hammer64[6].f = nil
}
}
@@ -545,6 +824,20 @@ func hammerCompareAndSwapUintptr64(uval *uint64, count int) {
}
}
+func hammerCompareAndSwapPointer64(uval *uint64, count int) {
+ // only safe when uintptr is 64-bit.
+ // not called on 32-bit systems.
+ val := (*unsafe.Pointer)(unsafe.Pointer(uval))
+ for i := 0; i < count; i++ {
+ for {
+ v := *val
+ if CompareAndSwapPointer(val, v, unsafe.Pointer(uintptr(v)+1)) {
+ break
+ }
+ }
+ }
+}
+
func TestHammer64(t *testing.T) {
if test64err != nil {
t.Logf("Skipping 64-bit tests: %v", test64err)
@@ -573,63 +866,141 @@ func TestHammer64(t *testing.T) {
<-c
}
if val != uint64(n)*p {
- t.Errorf("%s: val=%d want %d", tt.name, val, n*p)
+ t.Fatalf("%s: val=%d want %d", tt.name, val, n*p)
}
}
}
-func hammerLoadInt32(t *testing.T, uval *uint32) {
- val := (*int32)(unsafe.Pointer(uval))
- for {
- v := LoadInt32(val)
+func hammerStoreLoadInt32(t *testing.T, valp unsafe.Pointer) {
+ val := (*int32)(valp)
+ v := LoadInt32(val)
+ vlo := v & ((1 << 16) - 1)
+ vhi := v >> 16
+ if vlo != vhi {
+ t.Fatalf("Int32: %#x != %#x", vlo, vhi)
+ }
+ new := v + 1 + 1<<16
+ if vlo == 1e4 {
+ new = 0
+ }
+ StoreInt32(val, new)
+}
+
+func hammerStoreLoadUint32(t *testing.T, valp unsafe.Pointer) {
+ val := (*uint32)(valp)
+ v := LoadUint32(val)
+ vlo := v & ((1 << 16) - 1)
+ vhi := v >> 16
+ if vlo != vhi {
+ t.Fatalf("Uint32: %#x != %#x", vlo, vhi)
+ }
+ new := v + 1 + 1<<16
+ if vlo == 1e4 {
+ new = 0
+ }
+ StoreUint32(val, new)
+}
+
+func hammerStoreLoadInt64(t *testing.T, valp unsafe.Pointer) {
+ val := (*int64)(valp)
+ v := LoadInt64(val)
+ vlo := v & ((1 << 32) - 1)
+ vhi := v >> 32
+ if vlo != vhi {
+ t.Fatalf("Int64: %#x != %#x", vlo, vhi)
+ }
+ new := v + 1 + 1<<32
+ StoreInt64(val, new)
+}
+
+func hammerStoreLoadUint64(t *testing.T, valp unsafe.Pointer) {
+ val := (*uint64)(valp)
+ v := LoadUint64(val)
+ vlo := v & ((1 << 32) - 1)
+ vhi := v >> 32
+ if vlo != vhi {
+ t.Fatalf("Uint64: %#x != %#x", vlo, vhi)
+ }
+ new := v + 1 + 1<<32
+ StoreUint64(val, new)
+}
+
+func hammerStoreLoadUintptr(t *testing.T, valp unsafe.Pointer) {
+ val := (*uintptr)(valp)
+ var test64 uint64 = 1 << 50
+ arch32 := uintptr(test64) == 0
+ v := LoadUintptr(val)
+ new := v
+ if arch32 {
vlo := v & ((1 << 16) - 1)
vhi := v >> 16
if vlo != vhi {
- t.Fatalf("LoadInt32: %#x != %#x", vlo, vhi)
+ t.Fatalf("Uintptr: %#x != %#x", vlo, vhi)
}
- new := v + 1 + 1<<16
+ new = v + 1 + 1<<16
if vlo == 1e4 {
new = 0
}
- if CompareAndSwapInt32(val, v, new) {
- break
+ } else {
+ vlo := v & ((1 << 32) - 1)
+ vhi := v >> 32
+ if vlo != vhi {
+ t.Fatalf("Uintptr: %#x != %#x", vlo, vhi)
}
+ inc := uint64(1 + 1<<32)
+ new = v + uintptr(inc)
}
+ StoreUintptr(val, new)
}
-func hammerLoadUint32(t *testing.T, val *uint32) {
- for {
- v := LoadUint32(val)
+func hammerStoreLoadPointer(t *testing.T, valp unsafe.Pointer) {
+ val := (*unsafe.Pointer)(valp)
+ var test64 uint64 = 1 << 50
+ arch32 := uintptr(test64) == 0
+ v := uintptr(LoadPointer(val))
+ new := v
+ if arch32 {
vlo := v & ((1 << 16) - 1)
vhi := v >> 16
if vlo != vhi {
- t.Fatalf("LoadUint32: %#x != %#x", vlo, vhi)
+ t.Fatalf("Pointer: %#x != %#x", vlo, vhi)
}
- new := v + 1 + 1<<16
+ new = v + 1 + 1<<16
if vlo == 1e4 {
new = 0
}
- if CompareAndSwapUint32(val, v, new) {
- break
+ } else {
+ vlo := v & ((1 << 32) - 1)
+ vhi := v >> 32
+ if vlo != vhi {
+ t.Fatalf("Pointer: %#x != %#x", vlo, vhi)
}
+ inc := uint64(1 + 1<<32)
+ new = v + uintptr(inc)
}
+ StorePointer(val, unsafe.Pointer(new))
}
-func TestHammerLoad(t *testing.T) {
- tests := [...]func(*testing.T, *uint32){hammerLoadInt32, hammerLoadUint32}
- n := 100000
+func TestHammerStoreLoad(t *testing.T) {
+ var tests []func(*testing.T, unsafe.Pointer)
+ tests = append(tests, hammerStoreLoadInt32, hammerStoreLoadUint32,
+ hammerStoreLoadUintptr, hammerStoreLoadPointer)
+ if test64err == nil {
+ tests = append(tests, hammerStoreLoadInt64, hammerStoreLoadUint64)
+ }
+ n := int(1e6)
if testing.Short() {
- n = 10000
+ n = int(1e4)
}
const procs = 8
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(procs))
for _, tt := range tests {
c := make(chan int)
- var val uint32
+ var val uint64
for p := 0; p < procs; p++ {
go func() {
for i := 0; i < n; i++ {
- tt(t, &val)
+ tt(t, unsafe.Pointer(&val))
}
c <- 1
}()
@@ -639,3 +1010,185 @@ func TestHammerLoad(t *testing.T) {
}
}
}
+
+func TestStoreLoadSeqCst32(t *testing.T) {
+ if runtime.NumCPU() == 1 {
+ t.Logf("Skipping test on %v processor machine", runtime.NumCPU())
+ return
+ }
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
+ N := int32(1e3)
+ if testing.Short() {
+ N = int32(1e2)
+ }
+ c := make(chan bool, 2)
+ X := [2]int32{}
+ ack := [2][3]int32{{-1, -1, -1}, {-1, -1, -1}}
+ for p := 0; p < 2; p++ {
+ go func(me int) {
+ he := 1 - me
+ for i := int32(1); i < N; i++ {
+ StoreInt32(&X[me], i)
+ my := LoadInt32(&X[he])
+ StoreInt32(&ack[me][i%3], my)
+ for w := 1; LoadInt32(&ack[he][i%3]) == -1; w++ {
+ if w%1000 == 0 {
+ runtime.Gosched()
+ }
+ }
+ his := LoadInt32(&ack[he][i%3])
+ if (my != i && my != i-1) || (his != i && his != i-1) {
+ t.Fatalf("invalid values: %d/%d (%d)", my, his, i)
+ }
+ if my != i && his != i {
+ t.Fatalf("store/load are not sequentially consistent: %d/%d (%d)", my, his, i)
+ }
+ StoreInt32(&ack[me][(i-1)%3], -1)
+ }
+ c <- true
+ }(p)
+ }
+ <-c
+ <-c
+}
+
+func TestStoreLoadSeqCst64(t *testing.T) {
+ if runtime.NumCPU() == 1 {
+ t.Logf("Skipping test on %v processor machine", runtime.NumCPU())
+ return
+ }
+ if test64err != nil {
+ t.Logf("Skipping 64-bit tests: %v", test64err)
+ return
+ }
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
+ N := int64(1e3)
+ if testing.Short() {
+ N = int64(1e2)
+ }
+ c := make(chan bool, 2)
+ X := [2]int64{}
+ ack := [2][3]int64{{-1, -1, -1}, {-1, -1, -1}}
+ for p := 0; p < 2; p++ {
+ go func(me int) {
+ he := 1 - me
+ for i := int64(1); i < N; i++ {
+ StoreInt64(&X[me], i)
+ my := LoadInt64(&X[he])
+ StoreInt64(&ack[me][i%3], my)
+ for w := 1; LoadInt64(&ack[he][i%3]) == -1; w++ {
+ if w%1000 == 0 {
+ runtime.Gosched()
+ }
+ }
+ his := LoadInt64(&ack[he][i%3])
+ if (my != i && my != i-1) || (his != i && his != i-1) {
+ t.Fatalf("invalid values: %d/%d (%d)", my, his, i)
+ }
+ if my != i && his != i {
+ t.Fatalf("store/load are not sequentially consistent: %d/%d (%d)", my, his, i)
+ }
+ StoreInt64(&ack[me][(i-1)%3], -1)
+ }
+ c <- true
+ }(p)
+ }
+ <-c
+ <-c
+}
+
+func TestStoreLoadRelAcq32(t *testing.T) {
+ if runtime.NumCPU() == 1 {
+ t.Logf("Skipping test on %v processor machine", runtime.NumCPU())
+ return
+ }
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
+ N := int32(1e3)
+ if testing.Short() {
+ N = int32(1e2)
+ }
+ c := make(chan bool, 2)
+ type Data struct {
+ signal int32
+ pad1 [128]int8
+ data1 int32
+ pad2 [128]int8
+ data2 float32
+ }
+ var X Data
+ for p := int32(0); p < 2; p++ {
+ go func(p int32) {
+ for i := int32(1); i < N; i++ {
+ if (i+p)%2 == 0 {
+ X.data1 = i
+ X.data2 = float32(i)
+ StoreInt32(&X.signal, i)
+ } else {
+ for w := 1; LoadInt32(&X.signal) != i; w++ {
+ if w%1000 == 0 {
+ runtime.Gosched()
+ }
+ }
+ d1 := X.data1
+ d2 := X.data2
+ if d1 != i || d2 != float32(i) {
+ t.Fatalf("incorrect data: %d/%d (%d)", d1, d2, i)
+ }
+ }
+ }
+ c <- true
+ }(p)
+ }
+ <-c
+ <-c
+}
+
+func TestStoreLoadRelAcq64(t *testing.T) {
+ if runtime.NumCPU() == 1 {
+ t.Logf("Skipping test on %v processor machine", runtime.NumCPU())
+ return
+ }
+ if test64err != nil {
+ t.Logf("Skipping 64-bit tests: %v", test64err)
+ return
+ }
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
+ N := int64(1e3)
+ if testing.Short() {
+ N = int64(1e2)
+ }
+ c := make(chan bool, 2)
+ type Data struct {
+ signal int64
+ pad1 [128]int8
+ data1 int64
+ pad2 [128]int8
+ data2 float64
+ }
+ var X Data
+ for p := int64(0); p < 2; p++ {
+ go func(p int64) {
+ for i := int64(1); i < N; i++ {
+ if (i+p)%2 == 0 {
+ X.data1 = i
+ X.data2 = float64(i)
+ StoreInt64(&X.signal, i)
+ } else {
+ for w := 1; LoadInt64(&X.signal) != i; w++ {
+ if w%1000 == 0 {
+ runtime.Gosched()
+ }
+ }
+ d1 := X.data1
+ d2 := X.data2
+ if d1 != i || d2 != float64(i) {
+ t.Fatalf("incorrect data: %d/%d (%d)", d1, d2, i)
+ }
+ }
+ }
+ c <- true
+ }(p)
+ }
+ <-c
+ <-c
+}
diff --git a/src/pkg/sync/atomic/doc.go b/src/pkg/sync/atomic/doc.go
index b35eb539c..ecb4808ce 100644
--- a/src/pkg/sync/atomic/doc.go
+++ b/src/pkg/sync/atomic/doc.go
@@ -22,9 +22,13 @@
//
package atomic
+import (
+ "unsafe"
+)
+
// BUG(rsc): On ARM, the 64-bit functions use instructions unavailable before ARM 11.
//
-// On x86-32, the 64-bit functions use instructions unavailable before the Pentium.
+// On x86-32, the 64-bit functions use instructions unavailable before the Pentium MMX.
// CompareAndSwapInt32 executes the compare-and-swap operation for an int32 value.
func CompareAndSwapInt32(val *int32, old, new int32) (swapped bool)
@@ -41,6 +45,9 @@ func CompareAndSwapUint64(val *uint64, old, new uint64) (swapped bool)
// CompareAndSwapUintptr executes the compare-and-swap operation for a uintptr value.
func CompareAndSwapUintptr(val *uintptr, old, new uintptr) (swapped bool)
+// CompareAndSwapPointer executes the compare-and-swap operation for a unsafe.Pointer value.
+func CompareAndSwapPointer(val *unsafe.Pointer, old, new unsafe.Pointer) (swapped bool)
+
// AddInt32 atomically adds delta to *val and returns the new value.
func AddInt32(val *int32, delta int32) (new int32)
@@ -59,9 +66,39 @@ func AddUintptr(val *uintptr, delta uintptr) (new uintptr)
// LoadInt32 atomically loads *addr.
func LoadInt32(addr *int32) (val int32)
+// LoadInt64 atomically loads *addr.
+func LoadInt64(addr *int64) (val int64)
+
// LoadUint32 atomically loads *addr.
func LoadUint32(addr *uint32) (val uint32)
+// LoadUint64 atomically loads *addr.
+func LoadUint64(addr *uint64) (val uint64)
+
+// LoadUintptr atomically loads *addr.
+func LoadUintptr(addr *uintptr) (val uintptr)
+
+// LoadPointer atomically loads *addr.
+func LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer)
+
+// StoreInt32 atomically stores val into *addr.
+func StoreInt32(addr *int32, val int32)
+
+// StoreInt64 atomically stores val into *addr.
+func StoreInt64(addr *int64, val int64)
+
+// StoreUint32 atomically stores val into *addr.
+func StoreUint32(addr *uint32, val uint32)
+
+// StoreUint64 atomically stores val into *addr.
+func StoreUint64(addr *uint64, val uint64)
+
+// StoreUintptr atomically stores val into *addr.
+func StoreUintptr(addr *uintptr, val uintptr)
+
+// StorePointer atomically stores val into *addr.
+func StorePointer(addr *unsafe.Pointer, val unsafe.Pointer)
+
// Helper for ARM. Linker will discard on other systems
func panic64() {
panic("sync/atomic: broken 64-bit atomic operations (buggy QEMU)")
diff --git a/src/pkg/sync/cond.go b/src/pkg/sync/cond.go
index 75494b535..1fc3deaf1 100644
--- a/src/pkg/sync/cond.go
+++ b/src/pkg/sync/cond.go
@@ -4,8 +4,6 @@
package sync
-import "runtime"
-
// Cond implements a condition variable, a rendezvous point
// for goroutines waiting for or announcing the occurrence
// of an event.
@@ -43,9 +41,10 @@ func NewCond(l Locker) *Cond {
// Wait atomically unlocks c.L and suspends execution
// of the calling goroutine. After later resuming execution,
-// Wait locks c.L before returning.
+// Wait locks c.L before returning. Unlike in other systems,
+// Wait cannot return unless awoken by Broadcast or Signal.
//
-// Because L is not locked when Wait first resumes, the caller
+// Because c.L is not locked when Wait first resumes, the caller
// typically cannot assume that the condition is true when
// Wait returns. Instead, the caller should Wait in a loop:
//
@@ -65,7 +64,7 @@ func (c *Cond) Wait() {
c.newWaiters++
c.m.Unlock()
c.L.Unlock()
- runtime.Semacquire(s)
+ runtime_Semacquire(s)
c.L.Lock()
}
@@ -84,7 +83,7 @@ func (c *Cond) Signal() {
}
if c.oldWaiters > 0 {
c.oldWaiters--
- runtime.Semrelease(c.oldSema)
+ runtime_Semrelease(c.oldSema)
}
c.m.Unlock()
}
@@ -98,13 +97,13 @@ func (c *Cond) Broadcast() {
// Wake both generations.
if c.oldWaiters > 0 {
for i := 0; i < c.oldWaiters; i++ {
- runtime.Semrelease(c.oldSema)
+ runtime_Semrelease(c.oldSema)
}
c.oldWaiters = 0
}
if c.newWaiters > 0 {
for i := 0; i < c.newWaiters; i++ {
- runtime.Semrelease(c.newSema)
+ runtime_Semrelease(c.newSema)
}
c.newWaiters = 0
c.newSema = nil
diff --git a/src/pkg/sync/example_test.go b/src/pkg/sync/example_test.go
new file mode 100644
index 000000000..156492400
--- /dev/null
+++ b/src/pkg/sync/example_test.go
@@ -0,0 +1,54 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync_test
+
+import (
+ "fmt"
+ "net/http"
+ "sync"
+)
+
+// This example fetches several URLs concurrently,
+// using a WaitGroup to block until all the fetches are complete.
+func ExampleWaitGroup() {
+ var wg sync.WaitGroup
+ var urls = []string{
+ "http://www.golang.org/",
+ "http://www.google.com/",
+ "http://www.somestupidname.com/",
+ }
+ for _, url := range urls {
+ // Increment the WaitGroup counter.
+ wg.Add(1)
+ // Launch a goroutine to fetch the URL.
+ go func(url string) {
+ // Fetch the URL.
+ http.Get(url)
+ // Decrement the counter.
+ wg.Done()
+ }(url)
+ }
+ // Wait for all HTTP fetches to complete.
+ wg.Wait()
+}
+
+func ExampleOnce() {
+ var once sync.Once
+ onceBody := func() {
+ fmt.Printf("Only once\n")
+ }
+ done := make(chan bool)
+ for i := 0; i < 10; i++ {
+ go func() {
+ once.Do(onceBody)
+ done <- true
+ }()
+ }
+ for i := 0; i < 10; i++ {
+ <-done
+ }
+ // Output:
+ // Only once
+}
diff --git a/src/pkg/sync/export_test.go b/src/pkg/sync/export_test.go
new file mode 100644
index 000000000..fa5983a2d
--- /dev/null
+++ b/src/pkg/sync/export_test.go
@@ -0,0 +1,9 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync
+
+// Export for testing.
+var Runtime_Semacquire = runtime_Semacquire
+var Runtime_Semrelease = runtime_Semrelease
diff --git a/src/pkg/sync/mutex.go b/src/pkg/sync/mutex.go
index 2d46c8994..9494cc3f8 100644
--- a/src/pkg/sync/mutex.go
+++ b/src/pkg/sync/mutex.go
@@ -6,12 +6,11 @@
// exclusion locks. Other than the Once and WaitGroup types, most are intended
// for use by low-level library routines. Higher-level synchronization is
// better done via channels and communication.
+//
+// Values containing the types defined in this package should not be copied.
package sync
-import (
- "runtime"
- "sync/atomic"
-)
+import "sync/atomic"
// A Mutex is a mutual exclusion lock.
// Mutexes can be created as part of other structures;
@@ -58,7 +57,7 @@ func (m *Mutex) Lock() {
if old&mutexLocked == 0 {
break
}
- runtime.Semacquire(&m.sema)
+ runtime_Semacquire(&m.sema)
awoke = true
}
}
@@ -87,7 +86,7 @@ func (m *Mutex) Unlock() {
// Grab the right to wake someone.
new = (old - 1<<mutexWaiterShift) | mutexWoken
if atomic.CompareAndSwapInt32(&m.state, old, new) {
- runtime.Semrelease(&m.sema)
+ runtime_Semrelease(&m.sema)
return
}
old = m.state
diff --git a/src/pkg/sync/mutex_test.go b/src/pkg/sync/mutex_test.go
index 47758844f..bf78c6f60 100644
--- a/src/pkg/sync/mutex_test.go
+++ b/src/pkg/sync/mutex_test.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// GOMAXPROCS=10 gotest
+// GOMAXPROCS=10 go test
package sync_test
@@ -15,8 +15,8 @@ import (
func HammerSemaphore(s *uint32, loops int, cdone chan bool) {
for i := 0; i < loops; i++ {
- runtime.Semacquire(s)
- runtime.Semrelease(s)
+ Runtime_Semacquire(s)
+ Runtime_Semrelease(s)
}
cdone <- true
}
diff --git a/src/pkg/sync/once_test.go b/src/pkg/sync/once_test.go
index 157a3667a..37075af17 100644
--- a/src/pkg/sync/once_test.go
+++ b/src/pkg/sync/once_test.go
@@ -5,9 +5,9 @@
package sync_test
import (
+ "runtime"
. "sync"
"sync/atomic"
- "runtime"
"testing"
)
diff --git a/src/pkg/sync/runtime.go b/src/pkg/sync/runtime.go
new file mode 100644
index 000000000..e99599c11
--- /dev/null
+++ b/src/pkg/sync/runtime.go
@@ -0,0 +1,18 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync
+
+// defined in package runtime
+
+// Semacquire waits until *s > 0 and then atomically decrements it.
+// It is intended as a simple sleep primitive for use by the synchronization
+// library and should not be used directly.
+func runtime_Semacquire(s *uint32)
+
+// Semrelease atomically increments *s and notifies a waiting goroutine
+// if one is blocked in Semacquire.
+// It is intended as a simple wakeup primitive for use by the synchronization
+// library and should not be used directly.
+func runtime_Semrelease(s *uint32)
diff --git a/src/pkg/sync/runtime_sema_test.go b/src/pkg/sync/runtime_sema_test.go
new file mode 100644
index 000000000..57a8dbee7
--- /dev/null
+++ b/src/pkg/sync/runtime_sema_test.go
@@ -0,0 +1,101 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync_test
+
+import (
+ "runtime"
+ . "sync"
+ "sync/atomic"
+ "testing"
+)
+
+func BenchmarkSemaUncontended(b *testing.B) {
+ type PaddedSem struct {
+ sem uint32
+ pad [32]uint32
+ }
+ const CallsPerSched = 1000
+ procs := runtime.GOMAXPROCS(-1)
+ N := int32(b.N / CallsPerSched)
+ c := make(chan bool, procs)
+ for p := 0; p < procs; p++ {
+ go func() {
+ sem := new(PaddedSem)
+ for atomic.AddInt32(&N, -1) >= 0 {
+ runtime.Gosched()
+ for g := 0; g < CallsPerSched; g++ {
+ Runtime_Semrelease(&sem.sem)
+ Runtime_Semacquire(&sem.sem)
+ }
+ }
+ c <- true
+ }()
+ }
+ for p := 0; p < procs; p++ {
+ <-c
+ }
+}
+
+func benchmarkSema(b *testing.B, block, work bool) {
+ const CallsPerSched = 1000
+ const LocalWork = 100
+ procs := runtime.GOMAXPROCS(-1)
+ N := int32(b.N / CallsPerSched)
+ c := make(chan bool, procs)
+ c2 := make(chan bool, procs/2)
+ sem := uint32(0)
+ if block {
+ for p := 0; p < procs/2; p++ {
+ go func() {
+ Runtime_Semacquire(&sem)
+ c2 <- true
+ }()
+ }
+ }
+ for p := 0; p < procs; p++ {
+ go func() {
+ foo := 0
+ for atomic.AddInt32(&N, -1) >= 0 {
+ runtime.Gosched()
+ for g := 0; g < CallsPerSched; g++ {
+ Runtime_Semrelease(&sem)
+ if work {
+ for i := 0; i < LocalWork; i++ {
+ foo *= 2
+ foo /= 2
+ }
+ }
+ Runtime_Semacquire(&sem)
+ }
+ }
+ c <- foo == 42
+ Runtime_Semrelease(&sem)
+ }()
+ }
+ if block {
+ for p := 0; p < procs/2; p++ {
+ <-c2
+ }
+ }
+ for p := 0; p < procs; p++ {
+ <-c
+ }
+}
+
+func BenchmarkSemaSyntNonblock(b *testing.B) {
+ benchmarkSema(b, false, false)
+}
+
+func BenchmarkSemaSyntBlock(b *testing.B) {
+ benchmarkSema(b, true, false)
+}
+
+func BenchmarkSemaWorkNonblock(b *testing.B) {
+ benchmarkSema(b, false, true)
+}
+
+func BenchmarkSemaWorkBlock(b *testing.B) {
+ benchmarkSema(b, true, true)
+}
diff --git a/src/pkg/sync/rwmutex.go b/src/pkg/sync/rwmutex.go
index cb1a47720..782a9c319 100644
--- a/src/pkg/sync/rwmutex.go
+++ b/src/pkg/sync/rwmutex.go
@@ -4,10 +4,7 @@
package sync
-import (
- "runtime"
- "sync/atomic"
-)
+import "sync/atomic"
// An RWMutex is a reader/writer mutual exclusion lock.
// The lock can be held by an arbitrary number of readers
@@ -29,7 +26,7 @@ const rwmutexMaxReaders = 1 << 30
func (rw *RWMutex) RLock() {
if atomic.AddInt32(&rw.readerCount, 1) < 0 {
// A writer is pending, wait for it.
- runtime.Semacquire(&rw.readerSem)
+ runtime_Semacquire(&rw.readerSem)
}
}
@@ -42,7 +39,7 @@ func (rw *RWMutex) RUnlock() {
// A writer is pending.
if atomic.AddInt32(&rw.readerWait, -1) == 0 {
// The last reader unblocks the writer.
- runtime.Semrelease(&rw.writerSem)
+ runtime_Semrelease(&rw.writerSem)
}
}
}
@@ -60,7 +57,7 @@ func (rw *RWMutex) Lock() {
r := atomic.AddInt32(&rw.readerCount, -rwmutexMaxReaders) + rwmutexMaxReaders
// Wait for active readers.
if r != 0 && atomic.AddInt32(&rw.readerWait, r) != 0 {
- runtime.Semacquire(&rw.writerSem)
+ runtime_Semacquire(&rw.writerSem)
}
}
@@ -75,7 +72,7 @@ func (rw *RWMutex) Unlock() {
r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders)
// Unblock blocked readers, if any.
for i := 0; i < int(r); i++ {
- runtime.Semrelease(&rw.readerSem)
+ runtime_Semrelease(&rw.readerSem)
}
// Allow other writers to proceed.
rw.w.Unlock()
diff --git a/src/pkg/sync/rwmutex_test.go b/src/pkg/sync/rwmutex_test.go
index dc8ce9653..39d5d6540 100644
--- a/src/pkg/sync/rwmutex_test.go
+++ b/src/pkg/sync/rwmutex_test.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// GOMAXPROCS=10 gotest
+// GOMAXPROCS=10 go test
package sync_test
diff --git a/src/pkg/sync/waitgroup.go b/src/pkg/sync/waitgroup.go
index a4c9b7e43..0165b1ffb 100644
--- a/src/pkg/sync/waitgroup.go
+++ b/src/pkg/sync/waitgroup.go
@@ -4,31 +4,13 @@
package sync
-import (
- "runtime"
- "sync/atomic"
-)
+import "sync/atomic"
// A WaitGroup waits for a collection of goroutines to finish.
// The main goroutine calls Add to set the number of
// goroutines to wait for. Then each of the goroutines
// runs and calls Done when finished. At the same time,
// Wait can be used to block until all goroutines have finished.
-//
-// For example:
-//
-// for i := 0; i < n; i++ {
-// if !condition(i) {
-// continue
-// }
-// wg.Add(1)
-// go func() {
-// // Do something.
-// wg.Done()
-// }()
-// }
-// wg.Wait()
-//
type WaitGroup struct {
m Mutex
counter int32
@@ -60,7 +42,7 @@ func (wg *WaitGroup) Add(delta int) {
}
wg.m.Lock()
for i := int32(0); i < wg.waiters; i++ {
- runtime.Semrelease(wg.sema)
+ runtime_Semrelease(wg.sema)
}
wg.waiters = 0
wg.sema = nil
@@ -93,5 +75,5 @@ func (wg *WaitGroup) Wait() {
}
s := wg.sema
wg.m.Unlock()
- runtime.Semacquire(s)
+ runtime_Semacquire(s)
}