summaryrefslogtreecommitdiff
path: root/src/pkg/sync
diff options
context:
space:
mode:
authorMichael Stapelberg <stapelberg@debian.org>2013-03-04 21:27:36 +0100
committerMichael Stapelberg <michael@stapelberg.de>2013-03-04 21:27:36 +0100
commit04b08da9af0c450d645ab7389d1467308cfc2db8 (patch)
treedb247935fa4f2f94408edc3acd5d0d4f997aa0d8 /src/pkg/sync
parent917c5fb8ec48e22459d77e3849e6d388f93d3260 (diff)
downloadgolang-04b08da9af0c450d645ab7389d1467308cfc2db8.tar.gz
Imported Upstream version 1.1~hg20130304upstream/1.1_hg20130304
Diffstat (limited to 'src/pkg/sync')
-rw-r--r--src/pkg/sync/atomic/64bit_arm.go36
-rw-r--r--src/pkg/sync/atomic/asm_386.s52
-rw-r--r--src/pkg/sync/atomic/asm_amd64.s36
-rw-r--r--src/pkg/sync/atomic/asm_arm.s14
-rw-r--r--src/pkg/sync/atomic/asm_freebsd_arm.s89
-rw-r--r--src/pkg/sync/atomic/asm_linux_arm.s87
-rw-r--r--src/pkg/sync/atomic/asm_netbsd_arm.s89
-rw-r--r--src/pkg/sync/atomic/atomic_test.go189
-rw-r--r--src/pkg/sync/atomic/doc.go55
-rw-r--r--src/pkg/sync/atomic/race.go213
-rw-r--r--src/pkg/sync/cond.go18
-rw-r--r--src/pkg/sync/example_test.go6
-rw-r--r--src/pkg/sync/mutex.go16
-rw-r--r--src/pkg/sync/once.go2
-rw-r--r--src/pkg/sync/once_test.go29
-rw-r--r--src/pkg/sync/race.go34
-rw-r--r--src/pkg/sync/race0.go28
-rw-r--r--src/pkg/sync/rwmutex.go36
-rw-r--r--src/pkg/sync/waitgroup.go39
-rw-r--r--src/pkg/sync/waitgroup_test.go2
20 files changed, 869 insertions, 201 deletions
diff --git a/src/pkg/sync/atomic/64bit_arm.go b/src/pkg/sync/atomic/64bit_arm.go
new file mode 100644
index 000000000..f070e78bd
--- /dev/null
+++ b/src/pkg/sync/atomic/64bit_arm.go
@@ -0,0 +1,36 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package atomic
+
+func loadUint64(addr *uint64) (val uint64) {
+ for {
+ val = *addr
+ if CompareAndSwapUint64(addr, val, val) {
+ break
+ }
+ }
+ return
+}
+
+func storeUint64(addr *uint64, val uint64) {
+ for {
+ old := *addr
+ if CompareAndSwapUint64(addr, old, val) {
+ break
+ }
+ }
+ return
+}
+
+func addUint64(val *uint64, delta uint64) (new uint64) {
+ for {
+ old := *val
+ new = old + delta
+ if CompareAndSwapUint64(val, old, new) {
+ break
+ }
+ }
+ return
+}
diff --git a/src/pkg/sync/atomic/asm_386.s b/src/pkg/sync/atomic/asm_386.s
index a406852f4..7a98a61d8 100644
--- a/src/pkg/sync/atomic/asm_386.s
+++ b/src/pkg/sync/atomic/asm_386.s
@@ -2,17 +2,19 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// +build !race
+
TEXT ·CompareAndSwapInt32(SB),7,$0
JMP ·CompareAndSwapUint32(SB)
TEXT ·CompareAndSwapUint32(SB),7,$0
- MOVL valptr+0(FP), BP
+ MOVL addr+0(FP), BP
MOVL old+4(FP), AX
MOVL new+8(FP), CX
// CMPXCHGL was introduced on the 486.
LOCK
CMPXCHGL CX, 0(BP)
- SETEQ ret+12(FP)
+ SETEQ swapped+12(FP)
RET
TEXT ·CompareAndSwapUintptr(SB),7,$0
@@ -25,29 +27,29 @@ TEXT ·CompareAndSwapInt64(SB),7,$0
JMP ·CompareAndSwapUint64(SB)
TEXT ·CompareAndSwapUint64(SB),7,$0
- MOVL valptr+0(FP), BP
- MOVL oldlo+4(FP), AX
- MOVL oldhi+8(FP), DX
- MOVL newlo+12(FP), BX
- MOVL newhi+16(FP), CX
+ MOVL addr+0(FP), BP
+ MOVL old+4(FP), AX
+ MOVL old+8(FP), DX
+ MOVL new+12(FP), BX
+ MOVL new+16(FP), CX
// CMPXCHG8B was introduced on the Pentium.
LOCK
CMPXCHG8B 0(BP)
- SETEQ ret+20(FP)
+ SETEQ swapped+20(FP)
RET
TEXT ·AddInt32(SB),7,$0
JMP ·AddUint32(SB)
TEXT ·AddUint32(SB),7,$0
- MOVL valptr+0(FP), BP
+ MOVL addr+0(FP), BP
MOVL delta+4(FP), AX
MOVL AX, CX
// XADD was introduced on the 486.
LOCK
XADDL AX, 0(BP)
ADDL AX, CX
- MOVL CX, ret+8(FP)
+ MOVL CX, new+8(FP)
RET
TEXT ·AddUintptr(SB),7,$0
@@ -58,24 +60,24 @@ TEXT ·AddInt64(SB),7,$0
TEXT ·AddUint64(SB),7,$0
// no XADDQ so use CMPXCHG8B loop
- MOVL valptr+0(FP), BP
+ MOVL addr+0(FP), BP
// DI:SI = delta
- MOVL deltalo+4(FP), SI
- MOVL deltahi+8(FP), DI
- // DX:AX = *valptr
+ MOVL delta+4(FP), SI
+ MOVL delta+8(FP), DI
+ // DX:AX = *addr
MOVL 0(BP), AX
MOVL 4(BP), DX
addloop:
- // CX:BX = DX:AX (*valptr) + DI:SI (delta)
+ // CX:BX = DX:AX (*addr) + DI:SI (delta)
MOVL AX, BX
MOVL DX, CX
ADDL SI, BX
ADCL DI, CX
- // if *valptr == DX:AX {
- // *valptr = CX:BX
+ // if *addr == DX:AX {
+ // *addr = CX:BX
// } else {
- // DX:AX = *valptr
+ // DX:AX = *addr
// }
// all in one instruction
LOCK
@@ -85,24 +87,24 @@ addloop:
// success
// return CX:BX
- MOVL BX, retlo+12(FP)
- MOVL CX, rethi+16(FP)
+ MOVL BX, new+12(FP)
+ MOVL CX, new+16(FP)
RET
TEXT ·LoadInt32(SB),7,$0
JMP ·LoadUint32(SB)
TEXT ·LoadUint32(SB),7,$0
- MOVL addrptr+0(FP), AX
+ MOVL addr+0(FP), AX
MOVL 0(AX), AX
- MOVL AX, ret+4(FP)
+ MOVL AX, val+4(FP)
RET
TEXT ·LoadInt64(SB),7,$0
JMP ·LoadUint64(SB)
TEXT ·LoadUint64(SB),7,$0
- MOVL addrptr+0(FP), AX
+ MOVL addr+0(FP), AX
// MOVQ and EMMS were introduced on the Pentium MMX.
// MOVQ (%EAX), %MM0
BYTE $0x0f; BYTE $0x6f; BYTE $0x00
@@ -121,7 +123,7 @@ TEXT ·StoreInt32(SB),7,$0
JMP ·StoreUint32(SB)
TEXT ·StoreUint32(SB),7,$0
- MOVL addrptr+0(FP), BP
+ MOVL addr+0(FP), BP
MOVL val+4(FP), AX
XCHGL AX, 0(BP)
RET
@@ -130,7 +132,7 @@ TEXT ·StoreInt64(SB),7,$0
JMP ·StoreUint64(SB)
TEXT ·StoreUint64(SB),7,$0
- MOVL addrptr+0(FP), AX
+ MOVL addr+0(FP), AX
// MOVQ and EMMS were introduced on the Pentium MMX.
// MOVQ 0x8(%ESP), %MM0
BYTE $0x0f; BYTE $0x6f; BYTE $0x44; BYTE $0x24; BYTE $0x08
diff --git a/src/pkg/sync/atomic/asm_amd64.s b/src/pkg/sync/atomic/asm_amd64.s
index 6f8bde068..58bda9e4f 100644
--- a/src/pkg/sync/atomic/asm_amd64.s
+++ b/src/pkg/sync/atomic/asm_amd64.s
@@ -2,16 +2,18 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// +build !race
+
TEXT ·CompareAndSwapInt32(SB),7,$0
JMP ·CompareAndSwapUint32(SB)
TEXT ·CompareAndSwapUint32(SB),7,$0
- MOVQ valptr+0(FP), BP
+ MOVQ addr+0(FP), BP
MOVL old+8(FP), AX
MOVL new+12(FP), CX
LOCK
CMPXCHGL CX, 0(BP)
- SETEQ ret+16(FP)
+ SETEQ swapped+16(FP)
RET
TEXT ·CompareAndSwapUintptr(SB),7,$0
@@ -24,25 +26,25 @@ TEXT ·CompareAndSwapInt64(SB),7,$0
JMP ·CompareAndSwapUint64(SB)
TEXT ·CompareAndSwapUint64(SB),7,$0
- MOVQ valptr+0(FP), BP
+ MOVQ addr+0(FP), BP
MOVQ old+8(FP), AX
MOVQ new+16(FP), CX
LOCK
CMPXCHGQ CX, 0(BP)
- SETEQ ret+24(FP)
+ SETEQ swapped+24(FP)
RET
TEXT ·AddInt32(SB),7,$0
JMP ·AddUint32(SB)
TEXT ·AddUint32(SB),7,$0
- MOVQ valptr+0(FP), BP
+ MOVQ addr+0(FP), BP
MOVL delta+8(FP), AX
MOVL AX, CX
LOCK
XADDL AX, 0(BP)
ADDL AX, CX
- MOVL CX, ret+16(FP)
+ MOVL CX, new+16(FP)
RET
TEXT ·AddUintptr(SB),7,$0
@@ -52,47 +54,47 @@ TEXT ·AddInt64(SB),7,$0
JMP ·AddUint64(SB)
TEXT ·AddUint64(SB),7,$0
- MOVQ valptr+0(FP), BP
+ MOVQ addr+0(FP), BP
MOVQ delta+8(FP), AX
MOVQ AX, CX
LOCK
XADDQ AX, 0(BP)
ADDQ AX, CX
- MOVQ CX, ret+16(FP)
+ MOVQ CX, new+16(FP)
RET
TEXT ·LoadInt32(SB),7,$0
JMP ·LoadUint32(SB)
TEXT ·LoadUint32(SB),7,$0
- MOVQ addrptr+0(FP), AX
+ MOVQ addr+0(FP), AX
MOVL 0(AX), AX
- MOVL AX, ret+8(FP)
+ MOVL AX, val+8(FP)
RET
TEXT ·LoadInt64(SB),7,$0
JMP ·LoadUint64(SB)
TEXT ·LoadUint64(SB),7,$0
- MOVQ addrptr+0(FP), AX
+ MOVQ addr+0(FP), AX
MOVQ 0(AX), AX
- MOVQ AX, ret+8(FP)
+ MOVQ AX, val+8(FP)
RET
TEXT ·LoadUintptr(SB),7,$0
JMP ·LoadPointer(SB)
TEXT ·LoadPointer(SB),7,$0
- MOVQ addrptr+0(FP), AX
+ MOVQ addr+0(FP), AX
MOVQ 0(AX), AX
- MOVQ AX, ret+8(FP)
+ MOVQ AX, val+8(FP)
RET
TEXT ·StoreInt32(SB),7,$0
JMP ·StoreUint32(SB)
TEXT ·StoreUint32(SB),7,$0
- MOVQ addrptr+0(FP), BP
+ MOVQ addr+0(FP), BP
MOVL val+8(FP), AX
XCHGL AX, 0(BP)
RET
@@ -101,7 +103,7 @@ TEXT ·StoreInt64(SB),7,$0
JMP ·StoreUint64(SB)
TEXT ·StoreUint64(SB),7,$0
- MOVQ addrptr+0(FP), BP
+ MOVQ addr+0(FP), BP
MOVQ val+8(FP), AX
XCHGQ AX, 0(BP)
RET
@@ -110,7 +112,7 @@ TEXT ·StoreUintptr(SB),7,$0
JMP ·StorePointer(SB)
TEXT ·StorePointer(SB),7,$0
- MOVQ addrptr+0(FP), BP
+ MOVQ addr+0(FP), BP
MOVQ val+8(FP), AX
XCHGQ AX, 0(BP)
RET
diff --git a/src/pkg/sync/atomic/asm_arm.s b/src/pkg/sync/atomic/asm_arm.s
index 2d10a922b..4faf5b5d9 100644
--- a/src/pkg/sync/atomic/asm_arm.s
+++ b/src/pkg/sync/atomic/asm_arm.s
@@ -2,10 +2,12 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// +build !race
+
// ARM atomic operations, for use by asm_$(GOOS)_arm.s.
TEXT ·armCompareAndSwapUint32(SB),7,$0
- MOVW valptr+0(FP), R1
+ MOVW addr+0(FP), R1
MOVW old+4(FP), R2
MOVW new+8(FP), R3
casloop:
@@ -26,7 +28,7 @@ casfail:
TEXT ·armCompareAndSwapUint64(SB),7,$0
BL fastCheck64<>(SB)
- MOVW valptr+0(FP), R1
+ MOVW addr+0(FP), R1
MOVW oldlo+4(FP), R2
MOVW oldhi+8(FP), R3
MOVW newlo+12(FP), R4
@@ -50,7 +52,7 @@ cas64fail:
RET
TEXT ·armAddUint32(SB),7,$0
- MOVW valptr+0(FP), R1
+ MOVW addr+0(FP), R1
MOVW delta+4(FP), R2
addloop:
// LDREX and STREX were introduced in ARM 6.
@@ -64,7 +66,7 @@ addloop:
TEXT ·armAddUint64(SB),7,$0
BL fastCheck64<>(SB)
- MOVW valptr+0(FP), R1
+ MOVW addr+0(FP), R1
MOVW deltalo+4(FP), R2
MOVW deltahi+8(FP), R3
add64loop:
@@ -81,7 +83,7 @@ add64loop:
TEXT ·armLoadUint64(SB),7,$0
BL fastCheck64<>(SB)
- MOVW addrptr+0(FP), R1
+ MOVW addr+0(FP), R1
load64loop:
LDREXD (R1), R2 // loads R2 and R3
STREXD R2, (R1), R0 // stores R2 and R3
@@ -93,7 +95,7 @@ load64loop:
TEXT ·armStoreUint64(SB),7,$0
BL fastCheck64<>(SB)
- MOVW addrptr+0(FP), R1
+ MOVW addr+0(FP), R1
MOVW vallo+4(FP), R2
MOVW valhi+8(FP), R3
store64loop:
diff --git a/src/pkg/sync/atomic/asm_freebsd_arm.s b/src/pkg/sync/atomic/asm_freebsd_arm.s
new file mode 100644
index 000000000..6590921b0
--- /dev/null
+++ b/src/pkg/sync/atomic/asm_freebsd_arm.s
@@ -0,0 +1,89 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// FreeBSD/ARM atomic operations.
+// TODO(minux): this only supports ARMv6K or higher.
+
+TEXT ·CompareAndSwapInt32(SB),7,$0
+ B ·CompareAndSwapUint32(SB)
+
+TEXT ·CompareAndSwapUint32(SB),7,$0
+ B ·armCompareAndSwapUint32(SB)
+
+TEXT ·CompareAndSwapUintptr(SB),7,$0
+ B ·CompareAndSwapUint32(SB)
+
+TEXT ·CompareAndSwapPointer(SB),7,$0
+ B ·CompareAndSwapUint32(SB)
+
+TEXT ·AddInt32(SB),7,$0
+ B ·AddUint32(SB)
+
+TEXT ·AddUint32(SB),7,$0
+ B ·armAddUint32(SB)
+
+TEXT ·AddUintptr(SB),7,$0
+ B ·AddUint32(SB)
+
+TEXT ·CompareAndSwapInt64(SB),7,$0
+ B ·CompareAndSwapUint64(SB)
+
+TEXT ·CompareAndSwapUint64(SB),7,$-4
+ B ·armCompareAndSwapUint64(SB)
+
+TEXT ·AddInt64(SB),7,$0
+ B ·addUint64(SB)
+
+TEXT ·AddUint64(SB),7,$0
+ B ·addUint64(SB)
+
+TEXT ·LoadInt32(SB),7,$0
+ B ·LoadUint32(SB)
+
+TEXT ·LoadUint32(SB),7,$0
+ MOVW addr+0(FP), R1
+load32loop:
+ LDREX (R1), R2 // loads R2
+ STREX R2, (R1), R0 // stores R2
+ CMP $0, R0
+ BNE load32loop
+ MOVW R2, val+4(FP)
+ RET
+
+TEXT ·LoadInt64(SB),7,$0
+ B ·loadUint64(SB)
+
+TEXT ·LoadUint64(SB),7,$0
+ B ·loadUint64(SB)
+
+TEXT ·LoadUintptr(SB),7,$0
+ B ·LoadUint32(SB)
+
+TEXT ·LoadPointer(SB),7,$0
+ B ·LoadUint32(SB)
+
+TEXT ·StoreInt32(SB),7,$0
+ B ·StoreUint32(SB)
+
+TEXT ·StoreUint32(SB),7,$0
+ MOVW addr+0(FP), R1
+ MOVW val+4(FP), R2
+storeloop:
+ LDREX (R1), R4 // loads R4
+ STREX R2, (R1), R0 // stores R2
+ CMP $0, R0
+ BNE storeloop
+ RET
+
+TEXT ·StoreInt64(SB),7,$0
+ B ·storeUint64(SB)
+
+TEXT ·StoreUint64(SB),7,$0
+ B ·storeUint64(SB)
+
+TEXT ·StoreUintptr(SB),7,$0
+ B ·StoreUint32(SB)
+
+TEXT ·StorePointer(SB),7,$0
+ B ·StoreUint32(SB)
diff --git a/src/pkg/sync/atomic/asm_linux_arm.s b/src/pkg/sync/atomic/asm_linux_arm.s
index 25dc85804..098acf35b 100644
--- a/src/pkg/sync/atomic/asm_linux_arm.s
+++ b/src/pkg/sync/atomic/asm_linux_arm.s
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// +build !race
+
// Linux/ARM atomic operations.
// Because there is so much variation in ARM devices,
@@ -9,7 +11,7 @@
// implementation at address 0xffff0fc0. Caller sets:
// R0 = old value
// R1 = new value
-// R2 = valptr
+// R2 = addr
// LR = return address
// The function returns with CS true if the swap happened.
// http://lxr.linux.no/linux+v2.6.37.2/arch/arm/kernel/entry-armv.S#L850
@@ -27,7 +29,7 @@ TEXT ·CompareAndSwapInt32(SB),7,$0
// Implement using kernel cas for portability.
TEXT ·CompareAndSwapUint32(SB),7,$0
- MOVW valptr+0(FP), R2
+ MOVW addr+0(FP), R2
MOVW old+4(FP), R0
casagain:
MOVW new+8(FP), R1
@@ -39,7 +41,7 @@ casret:
RET
cascheck:
// Kernel lies; double-check.
- MOVW valptr+0(FP), R2
+ MOVW addr+0(FP), R2
MOVW old+4(FP), R0
MOVW 0(R2), R3
CMP R0, R3
@@ -58,7 +60,7 @@ TEXT ·AddInt32(SB),7,$0
// Implement using kernel cas for portability.
TEXT ·AddUint32(SB),7,$0
- MOVW valptr+0(FP), R2
+ MOVW addr+0(FP), R2
MOVW delta+4(FP), R4
addloop1:
MOVW 0(R2), R0
@@ -72,26 +74,75 @@ addloop1:
TEXT ·AddUintptr(SB),7,$0
B ·AddUint32(SB)
-// The kernel provides no 64-bit compare-and-swap,
-// so use native ARM instructions, which will only work on
-// ARM 11 and later devices.
+TEXT cas64<>(SB),7,$0
+ MOVW $0xffff0f60, PC // __kuser_cmpxchg64: Linux-3.1 and above
+
+TEXT kernelCAS64<>(SB),7,$0
+ // int (*__kuser_cmpxchg64_t)(const int64_t *oldval, const int64_t *newval, volatile int64_t *ptr);
+ MOVW addr+0(FP), R2 // ptr
+ MOVW $4(FP), R0 // oldval
+ MOVW $12(FP), R1 // newval
+ BL cas64<>(SB)
+ MOVW.CS $1, R0 // C is set if the kernel has changed *ptr
+ MOVW.CC $0, R0
+ MOVW R0, 20(FP)
+ RET
+
+TEXT generalCAS64<>(SB),7,$20
+ // bool runtime·cas64(uint64 volatile *addr, uint64 *old, uint64 new)
+ MOVW addr+0(FP), R0
+ MOVW R0, 4(R13)
+ MOVW $4(FP), R1 // oldval
+ MOVW R1, 8(R13)
+ MOVW newlo+12(FP), R2
+ MOVW R2, 12(R13)
+ MOVW newhi+16(FP), R3
+ MOVW R3, 16(R13)
+ BL runtime·cas64(SB)
+ MOVW R0, 20(FP)
+ RET
+
+GLOBL armCAS64(SB), $4
+
+TEXT setupAndCallCAS64<>(SB),7,$-4
+ MOVW $0xffff0ffc, R0 // __kuser_helper_version
+ MOVW (R0), R0
+ // __kuser_cmpxchg64 only present if helper version >= 5
+ CMP $5, R0
+ MOVW.CS $kernelCAS64<>(SB), R1
+ MOVW.CS R1, armCAS64(SB)
+ MOVW.CS R1, PC
+ MOVB runtime·armArch(SB), R0
+ // LDREXD, STREXD only present on ARMv6K or higher
+ CMP $6, R0 // TODO(minux): how to differentiate ARMv6 with ARMv6K?
+ MOVW.CS $·armCompareAndSwapUint64(SB), R1
+ MOVW.CS R1, armCAS64(SB)
+ MOVW.CS R1, PC
+ // we are out of luck, can only use runtime's emulated 64-bit cas
+ MOVW $generalCAS64<>(SB), R1
+ MOVW R1, armCAS64(SB)
+ MOVW R1, PC
+
TEXT ·CompareAndSwapInt64(SB),7,$0
- B ·armCompareAndSwapUint64(SB)
+ B ·CompareAndSwapUint64(SB)
-TEXT ·CompareAndSwapUint64(SB),7,$0
- B ·armCompareAndSwapUint64(SB)
+TEXT ·CompareAndSwapUint64(SB),7,$-4
+ MOVW armCAS64(SB), R0
+ CMP $0, R0
+ MOVW.NE R0, PC
+ B setupAndCallCAS64<>(SB)
TEXT ·AddInt64(SB),7,$0
- B ·armAddUint64(SB)
+ B ·addUint64(SB)
TEXT ·AddUint64(SB),7,$0
- B ·armAddUint64(SB)
+ B ·addUint64(SB)
TEXT ·LoadInt32(SB),7,$0
B ·LoadUint32(SB)
TEXT ·LoadUint32(SB),7,$0
- MOVW addrptr+0(FP), R2
+ MOVW addr+0(FP), R2
loadloop1:
MOVW 0(R2), R0
MOVW R0, R1
@@ -101,10 +152,10 @@ loadloop1:
RET
TEXT ·LoadInt64(SB),7,$0
- B ·armLoadUint64(SB)
+ B ·loadUint64(SB)
TEXT ·LoadUint64(SB),7,$0
- B ·armLoadUint64(SB)
+ B ·loadUint64(SB)
TEXT ·LoadUintptr(SB),7,$0
B ·LoadUint32(SB)
@@ -116,7 +167,7 @@ TEXT ·StoreInt32(SB),7,$0
B ·StoreUint32(SB)
TEXT ·StoreUint32(SB),7,$0
- MOVW addrptr+0(FP), R2
+ MOVW addr+0(FP), R2
MOVW val+4(FP), R1
storeloop1:
MOVW 0(R2), R0
@@ -125,10 +176,10 @@ storeloop1:
RET
TEXT ·StoreInt64(SB),7,$0
- B ·armStoreUint64(SB)
+ B ·storeUint64(SB)
TEXT ·StoreUint64(SB),7,$0
- B ·armStoreUint64(SB)
+ B ·storeUint64(SB)
TEXT ·StoreUintptr(SB),7,$0
B ·StoreUint32(SB)
diff --git a/src/pkg/sync/atomic/asm_netbsd_arm.s b/src/pkg/sync/atomic/asm_netbsd_arm.s
new file mode 100644
index 000000000..677f3daaa
--- /dev/null
+++ b/src/pkg/sync/atomic/asm_netbsd_arm.s
@@ -0,0 +1,89 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// NetBSD/ARM atomic operations.
+// TODO(minux): this only supports ARMv6K or higher.
+
+TEXT ·CompareAndSwapInt32(SB),7,$0
+ B ·CompareAndSwapUint32(SB)
+
+TEXT ·CompareAndSwapUint32(SB),7,$0
+ B ·armCompareAndSwapUint32(SB)
+
+TEXT ·CompareAndSwapUintptr(SB),7,$0
+ B ·CompareAndSwapUint32(SB)
+
+TEXT ·CompareAndSwapPointer(SB),7,$0
+ B ·CompareAndSwapUint32(SB)
+
+TEXT ·AddInt32(SB),7,$0
+ B ·AddUint32(SB)
+
+TEXT ·AddUint32(SB),7,$0
+ B ·armAddUint32(SB)
+
+TEXT ·AddUintptr(SB),7,$0
+ B ·AddUint32(SB)
+
+TEXT ·CompareAndSwapInt64(SB),7,$0
+ B ·CompareAndSwapUint64(SB)
+
+TEXT ·CompareAndSwapUint64(SB),7,$-4
+ B ·armCompareAndSwapUint64(SB)
+
+TEXT ·AddInt64(SB),7,$0
+ B ·addUint64(SB)
+
+TEXT ·AddUint64(SB),7,$0
+ B ·addUint64(SB)
+
+TEXT ·LoadInt32(SB),7,$0
+ B ·LoadUint32(SB)
+
+TEXT ·LoadUint32(SB),7,$0
+ MOVW addr+0(FP), R1
+load32loop:
+ LDREX (R1), R2 // loads R2
+ STREX R2, (R1), R0 // stores R2
+ CMP $0, R0
+ BNE load32loop
+ MOVW R2, val+4(FP)
+ RET
+
+TEXT ·LoadInt64(SB),7,$0
+ B ·loadUint64(SB)
+
+TEXT ·LoadUint64(SB),7,$0
+ B ·loadUint64(SB)
+
+TEXT ·LoadUintptr(SB),7,$0
+ B ·LoadUint32(SB)
+
+TEXT ·LoadPointer(SB),7,$0
+ B ·LoadUint32(SB)
+
+TEXT ·StoreInt32(SB),7,$0
+ B ·StoreUint32(SB)
+
+TEXT ·StoreUint32(SB),7,$0
+ MOVW addr+0(FP), R1
+ MOVW val+4(FP), R2
+storeloop:
+ LDREX (R1), R4 // loads R4
+ STREX R2, (R1), R0 // stores R2
+ CMP $0, R0
+ BNE storeloop
+ RET
+
+TEXT ·StoreInt64(SB),7,$0
+ B ·storeUint64(SB)
+
+TEXT ·StoreUint64(SB),7,$0
+ B ·storeUint64(SB)
+
+TEXT ·StoreUintptr(SB),7,$0
+ B ·StoreUint32(SB)
+
+TEXT ·StorePointer(SB),7,$0
+ B ·StoreUint32(SB)
diff --git a/src/pkg/sync/atomic/atomic_test.go b/src/pkg/sync/atomic/atomic_test.go
index f60d997ce..3e105561c 100644
--- a/src/pkg/sync/atomic/atomic_test.go
+++ b/src/pkg/sync/atomic/atomic_test.go
@@ -82,8 +82,7 @@ func TestAddUint32(t *testing.T) {
func TestAddInt64(t *testing.T) {
if test64err != nil {
- t.Logf("Skipping 64-bit tests: %v", test64err)
- return
+ t.Skipf("Skipping 64-bit tests: %v", test64err)
}
var x struct {
before int64
@@ -107,8 +106,7 @@ func TestAddInt64(t *testing.T) {
func TestAddUint64(t *testing.T) {
if test64err != nil {
- t.Logf("Skipping 64-bit tests: %v", test64err)
- return
+ t.Skipf("Skipping 64-bit tests: %v", test64err)
}
var x struct {
before uint64
@@ -213,8 +211,7 @@ func TestCompareAndSwapUint32(t *testing.T) {
func TestCompareAndSwapInt64(t *testing.T) {
if test64err != nil {
- t.Logf("Skipping 64-bit tests: %v", test64err)
- return
+ t.Skipf("Skipping 64-bit tests: %v", test64err)
}
var x struct {
before int64
@@ -246,8 +243,7 @@ func TestCompareAndSwapInt64(t *testing.T) {
func TestCompareAndSwapUint64(t *testing.T) {
if test64err != nil {
- t.Logf("Skipping 64-bit tests: %v", test64err)
- return
+ t.Skipf("Skipping 64-bit tests: %v", test64err)
}
var x struct {
before uint64
@@ -381,8 +377,7 @@ func TestLoadUint32(t *testing.T) {
func TestLoadInt64(t *testing.T) {
if test64err != nil {
- t.Logf("Skipping 64-bit tests: %v", test64err)
- return
+ t.Skipf("Skipping 64-bit tests: %v", test64err)
}
var x struct {
before int64
@@ -405,8 +400,7 @@ func TestLoadInt64(t *testing.T) {
func TestLoadUint64(t *testing.T) {
if test64err != nil {
- t.Logf("Skipping 64-bit tests: %v", test64err)
- return
+ t.Skipf("Skipping 64-bit tests: %v", test64err)
}
var x struct {
before uint64
@@ -515,8 +509,7 @@ func TestStoreUint32(t *testing.T) {
func TestStoreInt64(t *testing.T) {
if test64err != nil {
- t.Logf("Skipping 64-bit tests: %v", test64err)
- return
+ t.Skipf("Skipping 64-bit tests: %v", test64err)
}
var x struct {
before int64
@@ -540,8 +533,7 @@ func TestStoreInt64(t *testing.T) {
func TestStoreUint64(t *testing.T) {
if test64err != nil {
- t.Logf("Skipping 64-bit tests: %v", test64err)
- return
+ t.Skipf("Skipping 64-bit tests: %v", test64err)
}
var x struct {
before uint64
@@ -640,73 +632,73 @@ func init() {
}
}
-func hammerAddInt32(uval *uint32, count int) {
- val := (*int32)(unsafe.Pointer(uval))
+func hammerAddInt32(uaddr *uint32, count int) {
+ addr := (*int32)(unsafe.Pointer(uaddr))
for i := 0; i < count; i++ {
- AddInt32(val, 1)
+ AddInt32(addr, 1)
}
}
-func hammerAddUint32(val *uint32, count int) {
+func hammerAddUint32(addr *uint32, count int) {
for i := 0; i < count; i++ {
- AddUint32(val, 1)
+ AddUint32(addr, 1)
}
}
-func hammerAddUintptr32(uval *uint32, count int) {
+func hammerAddUintptr32(uaddr *uint32, count int) {
// only safe when uintptr is 32-bit.
// not called on 64-bit systems.
- val := (*uintptr)(unsafe.Pointer(uval))
+ addr := (*uintptr)(unsafe.Pointer(uaddr))
for i := 0; i < count; i++ {
- AddUintptr(val, 1)
+ AddUintptr(addr, 1)
}
}
-func hammerCompareAndSwapInt32(uval *uint32, count int) {
- val := (*int32)(unsafe.Pointer(uval))
+func hammerCompareAndSwapInt32(uaddr *uint32, count int) {
+ addr := (*int32)(unsafe.Pointer(uaddr))
for i := 0; i < count; i++ {
for {
- v := *val
- if CompareAndSwapInt32(val, v, v+1) {
+ v := *addr
+ if CompareAndSwapInt32(addr, v, v+1) {
break
}
}
}
}
-func hammerCompareAndSwapUint32(val *uint32, count int) {
+func hammerCompareAndSwapUint32(addr *uint32, count int) {
for i := 0; i < count; i++ {
for {
- v := *val
- if CompareAndSwapUint32(val, v, v+1) {
+ v := *addr
+ if CompareAndSwapUint32(addr, v, v+1) {
break
}
}
}
}
-func hammerCompareAndSwapUintptr32(uval *uint32, count int) {
+func hammerCompareAndSwapUintptr32(uaddr *uint32, count int) {
// only safe when uintptr is 32-bit.
// not called on 64-bit systems.
- val := (*uintptr)(unsafe.Pointer(uval))
+ addr := (*uintptr)(unsafe.Pointer(uaddr))
for i := 0; i < count; i++ {
for {
- v := *val
- if CompareAndSwapUintptr(val, v, v+1) {
+ v := *addr
+ if CompareAndSwapUintptr(addr, v, v+1) {
break
}
}
}
}
-func hammerCompareAndSwapPointer32(uval *uint32, count int) {
+func hammerCompareAndSwapPointer32(uaddr *uint32, count int) {
// only safe when uintptr is 32-bit.
// not called on 64-bit systems.
- val := (*unsafe.Pointer)(unsafe.Pointer(uval))
+ addr := (*unsafe.Pointer)(unsafe.Pointer(uaddr))
for i := 0; i < count; i++ {
for {
- v := *val
- if CompareAndSwapPointer(val, v, unsafe.Pointer(uintptr(v)+1)) {
+ v := *addr
+ if CompareAndSwapPointer(addr, v, unsafe.Pointer(uintptr(v)+1)) {
break
}
}
@@ -765,73 +757,73 @@ func init() {
}
}
-func hammerAddInt64(uval *uint64, count int) {
- val := (*int64)(unsafe.Pointer(uval))
+func hammerAddInt64(uaddr *uint64, count int) {
+ addr := (*int64)(unsafe.Pointer(uaddr))
for i := 0; i < count; i++ {
- AddInt64(val, 1)
+ AddInt64(addr, 1)
}
}
-func hammerAddUint64(val *uint64, count int) {
+func hammerAddUint64(addr *uint64, count int) {
for i := 0; i < count; i++ {
- AddUint64(val, 1)
+ AddUint64(addr, 1)
}
}
-func hammerAddUintptr64(uval *uint64, count int) {
+func hammerAddUintptr64(uaddr *uint64, count int) {
// only safe when uintptr is 64-bit.
// not called on 32-bit systems.
- val := (*uintptr)(unsafe.Pointer(uval))
+ addr := (*uintptr)(unsafe.Pointer(uaddr))
for i := 0; i < count; i++ {
- AddUintptr(val, 1)
+ AddUintptr(addr, 1)
}
}
-func hammerCompareAndSwapInt64(uval *uint64, count int) {
- val := (*int64)(unsafe.Pointer(uval))
+func hammerCompareAndSwapInt64(uaddr *uint64, count int) {
+ addr := (*int64)(unsafe.Pointer(uaddr))
for i := 0; i < count; i++ {
for {
- v := *val
- if CompareAndSwapInt64(val, v, v+1) {
+ v := *addr
+ if CompareAndSwapInt64(addr, v, v+1) {
break
}
}
}
}
-func hammerCompareAndSwapUint64(val *uint64, count int) {
+func hammerCompareAndSwapUint64(addr *uint64, count int) {
for i := 0; i < count; i++ {
for {
- v := *val
- if CompareAndSwapUint64(val, v, v+1) {
+ v := *addr
+ if CompareAndSwapUint64(addr, v, v+1) {
break
}
}
}
}
-func hammerCompareAndSwapUintptr64(uval *uint64, count int) {
+func hammerCompareAndSwapUintptr64(uaddr *uint64, count int) {
// only safe when uintptr is 64-bit.
// not called on 32-bit systems.
- val := (*uintptr)(unsafe.Pointer(uval))
+ addr := (*uintptr)(unsafe.Pointer(uaddr))
for i := 0; i < count; i++ {
for {
- v := *val
- if CompareAndSwapUintptr(val, v, v+1) {
+ v := *addr
+ if CompareAndSwapUintptr(addr, v, v+1) {
break
}
}
}
}
-func hammerCompareAndSwapPointer64(uval *uint64, count int) {
+func hammerCompareAndSwapPointer64(uaddr *uint64, count int) {
// only safe when uintptr is 64-bit.
// not called on 32-bit systems.
- val := (*unsafe.Pointer)(unsafe.Pointer(uval))
+ addr := (*unsafe.Pointer)(unsafe.Pointer(uaddr))
for i := 0; i < count; i++ {
for {
- v := *val
- if CompareAndSwapPointer(val, v, unsafe.Pointer(uintptr(v)+1)) {
+ v := *addr
+ if CompareAndSwapPointer(addr, v, unsafe.Pointer(uintptr(v)+1)) {
break
}
}
@@ -840,8 +832,7 @@ func hammerCompareAndSwapPointer64(uval *uint64, count int) {
func TestHammer64(t *testing.T) {
if test64err != nil {
- t.Logf("Skipping 64-bit tests: %v", test64err)
- return
+ t.Skipf("Skipping 64-bit tests: %v", test64err)
}
const p = 4
n := 100000
@@ -871,9 +862,9 @@ func TestHammer64(t *testing.T) {
}
}
-func hammerStoreLoadInt32(t *testing.T, valp unsafe.Pointer) {
- val := (*int32)(valp)
- v := LoadInt32(val)
+func hammerStoreLoadInt32(t *testing.T, paddr unsafe.Pointer) {
+ addr := (*int32)(paddr)
+ v := LoadInt32(addr)
vlo := v & ((1 << 16) - 1)
vhi := v >> 16
if vlo != vhi {
@@ -883,12 +874,12 @@ func hammerStoreLoadInt32(t *testing.T, valp unsafe.Pointer) {
if vlo == 1e4 {
new = 0
}
- StoreInt32(val, new)
+ StoreInt32(addr, new)
}
-func hammerStoreLoadUint32(t *testing.T, valp unsafe.Pointer) {
- val := (*uint32)(valp)
- v := LoadUint32(val)
+func hammerStoreLoadUint32(t *testing.T, paddr unsafe.Pointer) {
+ addr := (*uint32)(paddr)
+ v := LoadUint32(addr)
vlo := v & ((1 << 16) - 1)
vhi := v >> 16
if vlo != vhi {
@@ -898,38 +889,38 @@ func hammerStoreLoadUint32(t *testing.T, valp unsafe.Pointer) {
if vlo == 1e4 {
new = 0
}
- StoreUint32(val, new)
+ StoreUint32(addr, new)
}
-func hammerStoreLoadInt64(t *testing.T, valp unsafe.Pointer) {
- val := (*int64)(valp)
- v := LoadInt64(val)
+func hammerStoreLoadInt64(t *testing.T, paddr unsafe.Pointer) {
+ addr := (*int64)(paddr)
+ v := LoadInt64(addr)
vlo := v & ((1 << 32) - 1)
vhi := v >> 32
if vlo != vhi {
t.Fatalf("Int64: %#x != %#x", vlo, vhi)
}
new := v + 1 + 1<<32
- StoreInt64(val, new)
+ StoreInt64(addr, new)
}
-func hammerStoreLoadUint64(t *testing.T, valp unsafe.Pointer) {
- val := (*uint64)(valp)
- v := LoadUint64(val)
+func hammerStoreLoadUint64(t *testing.T, paddr unsafe.Pointer) {
+ addr := (*uint64)(paddr)
+ v := LoadUint64(addr)
vlo := v & ((1 << 32) - 1)
vhi := v >> 32
if vlo != vhi {
t.Fatalf("Uint64: %#x != %#x", vlo, vhi)
}
new := v + 1 + 1<<32
- StoreUint64(val, new)
+ StoreUint64(addr, new)
}
-func hammerStoreLoadUintptr(t *testing.T, valp unsafe.Pointer) {
- val := (*uintptr)(valp)
+func hammerStoreLoadUintptr(t *testing.T, paddr unsafe.Pointer) {
+ addr := (*uintptr)(paddr)
var test64 uint64 = 1 << 50
arch32 := uintptr(test64) == 0
- v := LoadUintptr(val)
+ v := LoadUintptr(addr)
new := v
if arch32 {
vlo := v & ((1 << 16) - 1)
@@ -950,14 +941,14 @@ func hammerStoreLoadUintptr(t *testing.T, valp unsafe.Pointer) {
inc := uint64(1 + 1<<32)
new = v + uintptr(inc)
}
- StoreUintptr(val, new)
+ StoreUintptr(addr, new)
}
-func hammerStoreLoadPointer(t *testing.T, valp unsafe.Pointer) {
- val := (*unsafe.Pointer)(valp)
+func hammerStoreLoadPointer(t *testing.T, paddr unsafe.Pointer) {
+ addr := (*unsafe.Pointer)(paddr)
var test64 uint64 = 1 << 50
arch32 := uintptr(test64) == 0
- v := uintptr(LoadPointer(val))
+ v := uintptr(LoadPointer(addr))
new := v
if arch32 {
vlo := v & ((1 << 16) - 1)
@@ -978,7 +969,7 @@ func hammerStoreLoadPointer(t *testing.T, valp unsafe.Pointer) {
inc := uint64(1 + 1<<32)
new = v + uintptr(inc)
}
- StorePointer(val, unsafe.Pointer(new))
+ StorePointer(addr, unsafe.Pointer(new))
}
func TestHammerStoreLoad(t *testing.T) {
@@ -1013,8 +1004,7 @@ func TestHammerStoreLoad(t *testing.T) {
func TestStoreLoadSeqCst32(t *testing.T) {
if runtime.NumCPU() == 1 {
- t.Logf("Skipping test on %v processor machine", runtime.NumCPU())
- return
+ t.Skipf("Skipping test on %v processor machine", runtime.NumCPU())
}
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
N := int32(1e3)
@@ -1054,12 +1044,10 @@ func TestStoreLoadSeqCst32(t *testing.T) {
func TestStoreLoadSeqCst64(t *testing.T) {
if runtime.NumCPU() == 1 {
- t.Logf("Skipping test on %v processor machine", runtime.NumCPU())
- return
+ t.Skipf("Skipping test on %v processor machine", runtime.NumCPU())
}
if test64err != nil {
- t.Logf("Skipping 64-bit tests: %v", test64err)
- return
+ t.Skipf("Skipping 64-bit tests: %v", test64err)
}
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
N := int64(1e3)
@@ -1099,8 +1087,7 @@ func TestStoreLoadSeqCst64(t *testing.T) {
func TestStoreLoadRelAcq32(t *testing.T) {
if runtime.NumCPU() == 1 {
- t.Logf("Skipping test on %v processor machine", runtime.NumCPU())
- return
+ t.Skipf("Skipping test on %v processor machine", runtime.NumCPU())
}
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
N := int32(1e3)
@@ -1132,7 +1119,7 @@ func TestStoreLoadRelAcq32(t *testing.T) {
d1 := X.data1
d2 := X.data2
if d1 != i || d2 != float32(i) {
- t.Fatalf("incorrect data: %d/%d (%d)", d1, d2, i)
+ t.Fatalf("incorrect data: %d/%g (%d)", d1, d2, i)
}
}
}
@@ -1145,12 +1132,10 @@ func TestStoreLoadRelAcq32(t *testing.T) {
func TestStoreLoadRelAcq64(t *testing.T) {
if runtime.NumCPU() == 1 {
- t.Logf("Skipping test on %v processor machine", runtime.NumCPU())
- return
+ t.Skipf("Skipping test on %v processor machine", runtime.NumCPU())
}
if test64err != nil {
- t.Logf("Skipping 64-bit tests: %v", test64err)
- return
+ t.Skipf("Skipping 64-bit tests: %v", test64err)
}
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
N := int64(1e3)
@@ -1182,7 +1167,7 @@ func TestStoreLoadRelAcq64(t *testing.T) {
d1 := X.data1
d2 := X.data2
if d1 != i || d2 != float64(i) {
- t.Fatalf("incorrect data: %d/%d (%d)", d1, d2, i)
+ t.Fatalf("incorrect data: %d/%g (%d)", d1, d2, i)
}
}
}
diff --git a/src/pkg/sync/atomic/doc.go b/src/pkg/sync/atomic/doc.go
index ecb4808ce..27a12c984 100644
--- a/src/pkg/sync/atomic/doc.go
+++ b/src/pkg/sync/atomic/doc.go
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// +build !race
+
// Package atomic provides low-level atomic memory primitives
// useful for implementing synchronization algorithms.
//
@@ -14,54 +16,67 @@
// The compare-and-swap operation, implemented by the CompareAndSwapT
// functions, is the atomic equivalent of:
//
-// if *val == old {
-// *val = new
+// if *addr == old {
+// *addr = new
// return true
// }
// return false
//
+// The add operation, implemented by the AddT functions, is the atomic
+// equivalent of:
+//
+// *addr += delta
+// return *addr
+//
+// The load and store operations, implemented by the LoadT and StoreT
+// functions, are the atomic equivalents of "return *addr" and
+// "*addr = val".
+//
package atomic
import (
"unsafe"
)
-// BUG(rsc): On ARM, the 64-bit functions use instructions unavailable before ARM 11.
+// BUG(rsc): On x86-32, the 64-bit functions use instructions unavailable before the Pentium MMX.
//
-// On x86-32, the 64-bit functions use instructions unavailable before the Pentium MMX.
+// On both ARM and x86-32, it is the caller's responsibility to arrange for 64-bit
+// alignment of 64-bit words accessed atomically. The first word in a global
+// variable or in an allocated struct or slice can be relied upon to be
+// 64-bit aligned.
// CompareAndSwapInt32 executes the compare-and-swap operation for an int32 value.
-func CompareAndSwapInt32(val *int32, old, new int32) (swapped bool)
+func CompareAndSwapInt32(addr *int32, old, new int32) (swapped bool)
// CompareAndSwapInt64 executes the compare-and-swap operation for an int64 value.
-func CompareAndSwapInt64(val *int64, old, new int64) (swapped bool)
+func CompareAndSwapInt64(addr *int64, old, new int64) (swapped bool)
// CompareAndSwapUint32 executes the compare-and-swap operation for a uint32 value.
-func CompareAndSwapUint32(val *uint32, old, new uint32) (swapped bool)
+func CompareAndSwapUint32(addr *uint32, old, new uint32) (swapped bool)
// CompareAndSwapUint64 executes the compare-and-swap operation for a uint64 value.
-func CompareAndSwapUint64(val *uint64, old, new uint64) (swapped bool)
+func CompareAndSwapUint64(addr *uint64, old, new uint64) (swapped bool)
// CompareAndSwapUintptr executes the compare-and-swap operation for a uintptr value.
-func CompareAndSwapUintptr(val *uintptr, old, new uintptr) (swapped bool)
+func CompareAndSwapUintptr(addr *uintptr, old, new uintptr) (swapped bool)
// CompareAndSwapPointer executes the compare-and-swap operation for a unsafe.Pointer value.
-func CompareAndSwapPointer(val *unsafe.Pointer, old, new unsafe.Pointer) (swapped bool)
+func CompareAndSwapPointer(addr *unsafe.Pointer, old, new unsafe.Pointer) (swapped bool)
-// AddInt32 atomically adds delta to *val and returns the new value.
-func AddInt32(val *int32, delta int32) (new int32)
+// AddInt32 atomically adds delta to *addr and returns the new value.
+func AddInt32(addr *int32, delta int32) (new int32)
-// AddUint32 atomically adds delta to *val and returns the new value.
-func AddUint32(val *uint32, delta uint32) (new uint32)
+// AddUint32 atomically adds delta to *addr and returns the new value.
+func AddUint32(addr *uint32, delta uint32) (new uint32)
-// AddInt64 atomically adds delta to *val and returns the new value.
-func AddInt64(val *int64, delta int64) (new int64)
+// AddInt64 atomically adds delta to *addr and returns the new value.
+func AddInt64(addr *int64, delta int64) (new int64)
-// AddUint64 atomically adds delta to *val and returns the new value.
-func AddUint64(val *uint64, delta uint64) (new uint64)
+// AddUint64 atomically adds delta to *addr and returns the new value.
+func AddUint64(addr *uint64, delta uint64) (new uint64)
-// AddUintptr atomically adds delta to *val and returns the new value.
-func AddUintptr(val *uintptr, delta uintptr) (new uintptr)
+// AddUintptr atomically adds delta to *addr and returns the new value.
+func AddUintptr(addr *uintptr, delta uintptr) (new uintptr)
// LoadInt32 atomically loads *addr.
func LoadInt32(addr *int32) (val int32)
diff --git a/src/pkg/sync/atomic/race.go b/src/pkg/sync/atomic/race.go
new file mode 100644
index 000000000..242bbf298
--- /dev/null
+++ b/src/pkg/sync/atomic/race.go
@@ -0,0 +1,213 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build race
+
+package atomic
+
+import (
+ "runtime"
+ "unsafe"
+)
+
+// We use runtime.RaceRead() inside of atomic operations to catch races
+// between atomic and non-atomic operations. It will also catch races
+// between Mutex.Lock() and mutex overwrite (mu = Mutex{}). Since we use
+// only RaceRead() we won't catch races with non-atomic loads.
+// Otherwise (if we use RaceWrite()) we will report races
+// between atomic operations (false positives).
+
+var mtx uint32 = 1 // same for all
+
+func CompareAndSwapInt32(val *int32, old, new int32) bool {
+ return CompareAndSwapUint32((*uint32)(unsafe.Pointer(val)), uint32(old), uint32(new))
+}
+
+func CompareAndSwapUint32(val *uint32, old, new uint32) (swapped bool) {
+ swapped = false
+ runtime.RaceSemacquire(&mtx)
+ runtime.RaceRead(unsafe.Pointer(val))
+ runtime.RaceAcquire(unsafe.Pointer(val))
+ if *val == old {
+ *val = new
+ swapped = true
+ runtime.RaceReleaseMerge(unsafe.Pointer(val))
+ }
+ runtime.RaceSemrelease(&mtx)
+ return
+}
+
+func CompareAndSwapInt64(val *int64, old, new int64) bool {
+ return CompareAndSwapUint64((*uint64)(unsafe.Pointer(val)), uint64(old), uint64(new))
+}
+
+func CompareAndSwapUint64(val *uint64, old, new uint64) (swapped bool) {
+ swapped = false
+ runtime.RaceSemacquire(&mtx)
+ runtime.RaceRead(unsafe.Pointer(val))
+ runtime.RaceAcquire(unsafe.Pointer(val))
+ if *val == old {
+ *val = new
+ swapped = true
+ runtime.RaceReleaseMerge(unsafe.Pointer(val))
+ }
+ runtime.RaceSemrelease(&mtx)
+ return
+}
+
+func CompareAndSwapPointer(val *unsafe.Pointer, old, new unsafe.Pointer) (swapped bool) {
+ swapped = false
+ runtime.RaceSemacquire(&mtx)
+ runtime.RaceRead(unsafe.Pointer(val))
+ runtime.RaceAcquire(unsafe.Pointer(val))
+ if *val == old {
+ *val = new
+ swapped = true
+ runtime.RaceReleaseMerge(unsafe.Pointer(val))
+ }
+ runtime.RaceSemrelease(&mtx)
+ return
+}
+
+func CompareAndSwapUintptr(val *uintptr, old, new uintptr) (swapped bool) {
+ swapped = false
+ runtime.RaceSemacquire(&mtx)
+ runtime.RaceRead(unsafe.Pointer(val))
+ runtime.RaceAcquire(unsafe.Pointer(val))
+ if *val == old {
+ *val = new
+ swapped = true
+ runtime.RaceReleaseMerge(unsafe.Pointer(val))
+ }
+ runtime.RaceSemrelease(&mtx)
+ return
+}
+
+func AddInt32(val *int32, delta int32) int32 {
+ return int32(AddUint32((*uint32)(unsafe.Pointer(val)), uint32(delta)))
+}
+
+func AddUint32(val *uint32, delta uint32) (new uint32) {
+ runtime.RaceSemacquire(&mtx)
+ runtime.RaceRead(unsafe.Pointer(val))
+ runtime.RaceAcquire(unsafe.Pointer(val))
+ *val = *val + delta
+ new = *val
+ runtime.RaceReleaseMerge(unsafe.Pointer(val))
+ runtime.RaceSemrelease(&mtx)
+
+ return
+}
+
+func AddInt64(val *int64, delta int64) int64 {
+ return int64(AddUint64((*uint64)(unsafe.Pointer(val)), uint64(delta)))
+}
+
+func AddUint64(val *uint64, delta uint64) (new uint64) {
+ runtime.RaceSemacquire(&mtx)
+ runtime.RaceRead(unsafe.Pointer(val))
+ runtime.RaceAcquire(unsafe.Pointer(val))
+ *val = *val + delta
+ new = *val
+ runtime.RaceReleaseMerge(unsafe.Pointer(val))
+ runtime.RaceSemrelease(&mtx)
+
+ return
+}
+
+func AddUintptr(val *uintptr, delta uintptr) (new uintptr) {
+ runtime.RaceSemacquire(&mtx)
+ runtime.RaceRead(unsafe.Pointer(val))
+ runtime.RaceAcquire(unsafe.Pointer(val))
+ *val = *val + delta
+ new = *val
+ runtime.RaceReleaseMerge(unsafe.Pointer(val))
+ runtime.RaceSemrelease(&mtx)
+
+ return
+}
+
+func LoadInt32(addr *int32) int32 {
+ return int32(LoadUint32((*uint32)(unsafe.Pointer(addr))))
+}
+
+func LoadUint32(addr *uint32) (val uint32) {
+ runtime.RaceSemacquire(&mtx)
+ runtime.RaceRead(unsafe.Pointer(addr))
+ runtime.RaceAcquire(unsafe.Pointer(addr))
+ val = *addr
+ runtime.RaceSemrelease(&mtx)
+ return
+}
+
+func LoadInt64(addr *int64) int64 {
+ return int64(LoadUint64((*uint64)(unsafe.Pointer(addr))))
+}
+
+func LoadUint64(addr *uint64) (val uint64) {
+ runtime.RaceSemacquire(&mtx)
+ runtime.RaceRead(unsafe.Pointer(addr))
+ runtime.RaceAcquire(unsafe.Pointer(addr))
+ val = *addr
+ runtime.RaceSemrelease(&mtx)
+ return
+}
+
+func LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer) {
+ runtime.RaceSemacquire(&mtx)
+ runtime.RaceRead(unsafe.Pointer(addr))
+ runtime.RaceAcquire(unsafe.Pointer(addr))
+ val = *addr
+ runtime.RaceSemrelease(&mtx)
+ return
+}
+
+func LoadUintptr(addr *uintptr) (val uintptr) {
+ runtime.RaceSemacquire(&mtx)
+ runtime.RaceRead(unsafe.Pointer(addr))
+ runtime.RaceAcquire(unsafe.Pointer(addr))
+ val = *addr
+ runtime.RaceSemrelease(&mtx)
+ return
+}
+
+func StoreInt32(addr *int32, val int32) {
+ StoreUint32((*uint32)(unsafe.Pointer(addr)), uint32(val))
+}
+
+func StoreUint32(addr *uint32, val uint32) {
+ runtime.RaceSemacquire(&mtx)
+ runtime.RaceRead(unsafe.Pointer(addr))
+ *addr = val
+ runtime.RaceRelease(unsafe.Pointer(addr))
+ runtime.RaceSemrelease(&mtx)
+}
+
+func StoreInt64(addr *int64, val int64) {
+ StoreUint64((*uint64)(unsafe.Pointer(addr)), uint64(val))
+}
+
+func StoreUint64(addr *uint64, val uint64) {
+ runtime.RaceSemacquire(&mtx)
+ runtime.RaceRead(unsafe.Pointer(addr))
+ *addr = val
+ runtime.RaceRelease(unsafe.Pointer(addr))
+ runtime.RaceSemrelease(&mtx)
+}
+
+func StorePointer(addr *unsafe.Pointer, val unsafe.Pointer) {
+ runtime.RaceSemacquire(&mtx)
+ runtime.RaceRead(unsafe.Pointer(addr))
+ *addr = val
+ runtime.RaceRelease(unsafe.Pointer(addr))
+ runtime.RaceSemrelease(&mtx)
+}
+
+func StoreUintptr(addr *uintptr, val uintptr) {
+ runtime.RaceSemacquire(&mtx)
+ runtime.RaceRead(unsafe.Pointer(addr))
+ *addr = val
+ runtime.RaceRelease(unsafe.Pointer(addr))
+ runtime.RaceSemrelease(&mtx)
+}
diff --git a/src/pkg/sync/cond.go b/src/pkg/sync/cond.go
index 1fc3deaf1..491b98569 100644
--- a/src/pkg/sync/cond.go
+++ b/src/pkg/sync/cond.go
@@ -56,6 +56,9 @@ func NewCond(l Locker) *Cond {
// c.L.Unlock()
//
func (c *Cond) Wait() {
+ if raceenabled {
+ raceDisable()
+ }
c.m.Lock()
if c.newSema == nil {
c.newSema = new(uint32)
@@ -63,6 +66,9 @@ func (c *Cond) Wait() {
s := c.newSema
c.newWaiters++
c.m.Unlock()
+ if raceenabled {
+ raceEnable()
+ }
c.L.Unlock()
runtime_Semacquire(s)
c.L.Lock()
@@ -73,6 +79,9 @@ func (c *Cond) Wait() {
// It is allowed but not required for the caller to hold c.L
// during the call.
func (c *Cond) Signal() {
+ if raceenabled {
+ raceDisable()
+ }
c.m.Lock()
if c.oldWaiters == 0 && c.newWaiters > 0 {
// Retire old generation; rename new to old.
@@ -86,6 +95,9 @@ func (c *Cond) Signal() {
runtime_Semrelease(c.oldSema)
}
c.m.Unlock()
+ if raceenabled {
+ raceEnable()
+ }
}
// Broadcast wakes all goroutines waiting on c.
@@ -93,6 +105,9 @@ func (c *Cond) Signal() {
// It is allowed but not required for the caller to hold c.L
// during the call.
func (c *Cond) Broadcast() {
+ if raceenabled {
+ raceDisable()
+ }
c.m.Lock()
// Wake both generations.
if c.oldWaiters > 0 {
@@ -109,4 +124,7 @@ func (c *Cond) Broadcast() {
c.newSema = nil
}
c.m.Unlock()
+ if raceenabled {
+ raceEnable()
+ }
}
diff --git a/src/pkg/sync/example_test.go b/src/pkg/sync/example_test.go
index 156492400..031c87f03 100644
--- a/src/pkg/sync/example_test.go
+++ b/src/pkg/sync/example_test.go
@@ -24,10 +24,10 @@ func ExampleWaitGroup() {
wg.Add(1)
// Launch a goroutine to fetch the URL.
go func(url string) {
+ // Decrement the counter when the goroutine completes.
+ defer wg.Done()
// Fetch the URL.
http.Get(url)
- // Decrement the counter.
- wg.Done()
}(url)
}
// Wait for all HTTP fetches to complete.
@@ -37,7 +37,7 @@ func ExampleWaitGroup() {
func ExampleOnce() {
var once sync.Once
onceBody := func() {
- fmt.Printf("Only once\n")
+ fmt.Println("Only once")
}
done := make(chan bool)
for i := 0; i < 10; i++ {
diff --git a/src/pkg/sync/mutex.go b/src/pkg/sync/mutex.go
index 9494cc3f8..b4629ebca 100644
--- a/src/pkg/sync/mutex.go
+++ b/src/pkg/sync/mutex.go
@@ -10,7 +10,10 @@
// Values containing the types defined in this package should not be copied.
package sync
-import "sync/atomic"
+import (
+ "sync/atomic"
+ "unsafe"
+)
// A Mutex is a mutual exclusion lock.
// Mutexes can be created as part of other structures;
@@ -38,6 +41,9 @@ const (
func (m *Mutex) Lock() {
// Fast path: grab unlocked mutex.
if atomic.CompareAndSwapInt32(&m.state, 0, mutexLocked) {
+ if raceenabled {
+ raceAcquire(unsafe.Pointer(m))
+ }
return
}
@@ -61,6 +67,10 @@ func (m *Mutex) Lock() {
awoke = true
}
}
+
+ if raceenabled {
+ raceAcquire(unsafe.Pointer(m))
+ }
}
// Unlock unlocks m.
@@ -70,6 +80,10 @@ func (m *Mutex) Lock() {
// It is allowed for one goroutine to lock a Mutex and then
// arrange for another goroutine to unlock it.
func (m *Mutex) Unlock() {
+ if raceenabled {
+ raceRelease(unsafe.Pointer(m))
+ }
+
// Fast path: drop lock bit.
new := atomic.AddInt32(&m.state, -mutexLocked)
if (new+mutexLocked)&mutexLocked == 0 {
diff --git a/src/pkg/sync/once.go b/src/pkg/sync/once.go
index 04b714a3e..1699e86a9 100644
--- a/src/pkg/sync/once.go
+++ b/src/pkg/sync/once.go
@@ -38,6 +38,6 @@ func (o *Once) Do(f func()) {
defer o.m.Unlock()
if o.done == 0 {
f()
- atomic.CompareAndSwapUint32(&o.done, 0, 1)
+ atomic.StoreUint32(&o.done, 1)
}
}
diff --git a/src/pkg/sync/once_test.go b/src/pkg/sync/once_test.go
index 37075af17..183069a1a 100644
--- a/src/pkg/sync/once_test.go
+++ b/src/pkg/sync/once_test.go
@@ -17,8 +17,11 @@ func (o *one) Increment() {
*o++
}
-func run(once *Once, o *one, c chan bool) {
+func run(t *testing.T, once *Once, o *one, c chan bool) {
once.Do(func() { o.Increment() })
+ if v := *o; v != 1 {
+ t.Errorf("once failed inside run: %d is not 1", v)
+ }
c <- true
}
@@ -28,14 +31,34 @@ func TestOnce(t *testing.T) {
c := make(chan bool)
const N = 10
for i := 0; i < N; i++ {
- go run(once, o, c)
+ go run(t, once, o, c)
}
for i := 0; i < N; i++ {
<-c
}
if *o != 1 {
- t.Errorf("once failed: %d is not 1", *o)
+ t.Errorf("once failed outside run: %d is not 1", *o)
+ }
+}
+
+func TestOncePanic(t *testing.T) {
+ once := new(Once)
+ for i := 0; i < 2; i++ {
+ func() {
+ defer func() {
+ if recover() == nil {
+ t.Fatalf("Once.Do() has not panic'ed")
+ }
+ }()
+ once.Do(func() {
+ panic("failed")
+ })
+ }()
}
+ once.Do(func() {})
+ once.Do(func() {
+ t.Fatalf("Once called twice")
+ })
}
func BenchmarkOnce(b *testing.B) {
diff --git a/src/pkg/sync/race.go b/src/pkg/sync/race.go
new file mode 100644
index 000000000..d9431af6f
--- /dev/null
+++ b/src/pkg/sync/race.go
@@ -0,0 +1,34 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build race
+
+package sync
+
+import (
+ "runtime"
+ "unsafe"
+)
+
+const raceenabled = true
+
+func raceAcquire(addr unsafe.Pointer) {
+ runtime.RaceAcquire(addr)
+}
+
+func raceRelease(addr unsafe.Pointer) {
+ runtime.RaceRelease(addr)
+}
+
+func raceReleaseMerge(addr unsafe.Pointer) {
+ runtime.RaceReleaseMerge(addr)
+}
+
+func raceDisable() {
+ runtime.RaceDisable()
+}
+
+func raceEnable() {
+ runtime.RaceEnable()
+}
diff --git a/src/pkg/sync/race0.go b/src/pkg/sync/race0.go
new file mode 100644
index 000000000..bef14f974
--- /dev/null
+++ b/src/pkg/sync/race0.go
@@ -0,0 +1,28 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !race
+
+package sync
+
+import (
+ "unsafe"
+)
+
+const raceenabled = false
+
+func raceAcquire(addr unsafe.Pointer) {
+}
+
+func raceRelease(addr unsafe.Pointer) {
+}
+
+func raceReleaseMerge(addr unsafe.Pointer) {
+}
+
+func raceDisable() {
+}
+
+func raceEnable() {
+}
diff --git a/src/pkg/sync/rwmutex.go b/src/pkg/sync/rwmutex.go
index 782a9c319..b494c6435 100644
--- a/src/pkg/sync/rwmutex.go
+++ b/src/pkg/sync/rwmutex.go
@@ -4,7 +4,10 @@
package sync
-import "sync/atomic"
+import (
+ "sync/atomic"
+ "unsafe"
+)
// An RWMutex is a reader/writer mutual exclusion lock.
// The lock can be held by an arbitrary number of readers
@@ -24,10 +27,17 @@ const rwmutexMaxReaders = 1 << 30
// RLock locks rw for reading.
func (rw *RWMutex) RLock() {
+ if raceenabled {
+ raceDisable()
+ }
if atomic.AddInt32(&rw.readerCount, 1) < 0 {
// A writer is pending, wait for it.
runtime_Semacquire(&rw.readerSem)
}
+ if raceenabled {
+ raceEnable()
+ raceAcquire(unsafe.Pointer(&rw.readerSem))
+ }
}
// RUnlock undoes a single RLock call;
@@ -35,6 +45,10 @@ func (rw *RWMutex) RLock() {
// It is a run-time error if rw is not locked for reading
// on entry to RUnlock.
func (rw *RWMutex) RUnlock() {
+ if raceenabled {
+ raceReleaseMerge(unsafe.Pointer(&rw.writerSem))
+ raceDisable()
+ }
if atomic.AddInt32(&rw.readerCount, -1) < 0 {
// A writer is pending.
if atomic.AddInt32(&rw.readerWait, -1) == 0 {
@@ -42,6 +56,9 @@ func (rw *RWMutex) RUnlock() {
runtime_Semrelease(&rw.writerSem)
}
}
+ if raceenabled {
+ raceEnable()
+ }
}
// Lock locks rw for writing.
@@ -51,6 +68,9 @@ func (rw *RWMutex) RUnlock() {
// a blocked Lock call excludes new readers from acquiring
// the lock.
func (rw *RWMutex) Lock() {
+ if raceenabled {
+ raceDisable()
+ }
// First, resolve competition with other writers.
rw.w.Lock()
// Announce to readers there is a pending writer.
@@ -59,6 +79,11 @@ func (rw *RWMutex) Lock() {
if r != 0 && atomic.AddInt32(&rw.readerWait, r) != 0 {
runtime_Semacquire(&rw.writerSem)
}
+ if raceenabled {
+ raceEnable()
+ raceAcquire(unsafe.Pointer(&rw.readerSem))
+ raceAcquire(unsafe.Pointer(&rw.writerSem))
+ }
}
// Unlock unlocks rw for writing. It is a run-time error if rw is
@@ -68,6 +93,12 @@ func (rw *RWMutex) Lock() {
// goroutine. One goroutine may RLock (Lock) an RWMutex and then
// arrange for another goroutine to RUnlock (Unlock) it.
func (rw *RWMutex) Unlock() {
+ if raceenabled {
+ raceRelease(unsafe.Pointer(&rw.readerSem))
+ raceRelease(unsafe.Pointer(&rw.writerSem))
+ raceDisable()
+ }
+
// Announce to readers there is no active writer.
r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders)
// Unblock blocked readers, if any.
@@ -76,6 +107,9 @@ func (rw *RWMutex) Unlock() {
}
// Allow other writers to proceed.
rw.w.Unlock()
+ if raceenabled {
+ raceEnable()
+ }
}
// RLocker returns a Locker interface that implements
diff --git a/src/pkg/sync/waitgroup.go b/src/pkg/sync/waitgroup.go
index 0165b1ffb..1277f1c6d 100644
--- a/src/pkg/sync/waitgroup.go
+++ b/src/pkg/sync/waitgroup.go
@@ -4,7 +4,10 @@
package sync
-import "sync/atomic"
+import (
+ "sync/atomic"
+ "unsafe"
+)
// A WaitGroup waits for a collection of goroutines to finish.
// The main goroutine calls Add to set the number of
@@ -31,11 +34,22 @@ type WaitGroup struct {
// G3: Wait() // G1 still hasn't run, G3 finds sema == 1, unblocked! Bug.
// Add adds delta, which may be negative, to the WaitGroup counter.
-// If the counter becomes zero, all goroutines blocked on Wait() are released.
+// If the counter becomes zero, all goroutines blocked on Wait are released.
+// If the counter goes negative, Add panics.
+//
+// Note that calls with positive delta must happen before the call to Wait,
+// or else Wait may wait for too small a group. Typically this means the calls
+// to Add should execute before the statement creating the goroutine or
+// other event to be waited for. See the WaitGroup example.
func (wg *WaitGroup) Add(delta int) {
+ if raceenabled {
+ raceReleaseMerge(unsafe.Pointer(wg))
+ raceDisable()
+ defer raceEnable()
+ }
v := atomic.AddInt32(&wg.counter, int32(delta))
if v < 0 {
- panic("sync: negative WaitGroup count")
+ panic("sync: negative WaitGroup counter")
}
if v > 0 || atomic.LoadInt32(&wg.waiters) == 0 {
return
@@ -56,7 +70,14 @@ func (wg *WaitGroup) Done() {
// Wait blocks until the WaitGroup counter is zero.
func (wg *WaitGroup) Wait() {
+ if raceenabled {
+ raceDisable()
+ }
if atomic.LoadInt32(&wg.counter) == 0 {
+ if raceenabled {
+ raceEnable()
+ raceAcquire(unsafe.Pointer(wg))
+ }
return
}
wg.m.Lock()
@@ -67,7 +88,15 @@ func (wg *WaitGroup) Wait() {
// to avoid missing an Add.
if atomic.LoadInt32(&wg.counter) == 0 {
atomic.AddInt32(&wg.waiters, -1)
+ if raceenabled {
+ raceEnable()
+ raceAcquire(unsafe.Pointer(wg))
+ raceDisable()
+ }
wg.m.Unlock()
+ if raceenabled {
+ raceEnable()
+ }
return
}
if wg.sema == nil {
@@ -76,4 +105,8 @@ func (wg *WaitGroup) Wait() {
s := wg.sema
wg.m.Unlock()
runtime_Semacquire(s)
+ if raceenabled {
+ raceEnable()
+ raceAcquire(unsafe.Pointer(wg))
+ }
}
diff --git a/src/pkg/sync/waitgroup_test.go b/src/pkg/sync/waitgroup_test.go
index 34430fc21..84c4cfc37 100644
--- a/src/pkg/sync/waitgroup_test.go
+++ b/src/pkg/sync/waitgroup_test.go
@@ -50,7 +50,7 @@ func TestWaitGroup(t *testing.T) {
func TestWaitGroupMisuse(t *testing.T) {
defer func() {
err := recover()
- if err != "sync: negative WaitGroup count" {
+ if err != "sync: negative WaitGroup counter" {
t.Fatalf("Unexpected panic: %#v", err)
}
}()