diff options
Diffstat (limited to 'src/sync/atomic')
-rw-r--r-- | src/sync/atomic/64bit_arm.go | 58 | ||||
-rw-r--r-- | src/sync/atomic/asm_386.s | 214 | ||||
-rw-r--r-- | src/sync/atomic/asm_amd64.s | 146 | ||||
-rw-r--r-- | src/sync/atomic/asm_amd64p32.s | 159 | ||||
-rw-r--r-- | src/sync/atomic/asm_arm.s | 197 | ||||
-rw-r--r-- | src/sync/atomic/asm_freebsd_arm.s | 109 | ||||
-rw-r--r-- | src/sync/atomic/asm_linux_arm.s | 216 | ||||
-rw-r--r-- | src/sync/atomic/asm_nacl_arm.s | 109 | ||||
-rw-r--r-- | src/sync/atomic/asm_netbsd_arm.s | 109 | ||||
-rw-r--r-- | src/sync/atomic/atomic_linux_arm_test.go | 14 | ||||
-rw-r--r-- | src/sync/atomic/atomic_test.go | 1509 | ||||
-rw-r--r-- | src/sync/atomic/doc.go | 149 | ||||
-rw-r--r-- | src/sync/atomic/export_linux_arm_test.go | 7 | ||||
-rw-r--r-- | src/sync/atomic/race.s | 8 | ||||
-rw-r--r-- | src/sync/atomic/value.go | 85 | ||||
-rw-r--r-- | src/sync/atomic/value_test.go | 195 |
16 files changed, 3284 insertions, 0 deletions
diff --git a/src/sync/atomic/64bit_arm.go b/src/sync/atomic/64bit_arm.go new file mode 100644 index 000000000..b98e60827 --- /dev/null +++ b/src/sync/atomic/64bit_arm.go @@ -0,0 +1,58 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package atomic + +func loadUint64(addr *uint64) (val uint64) { + for { + val = *addr + if CompareAndSwapUint64(addr, val, val) { + break + } + } + return +} + +func storeUint64(addr *uint64, val uint64) { + for { + old := *addr + if CompareAndSwapUint64(addr, old, val) { + break + } + } + return +} + +func addUint64(val *uint64, delta uint64) (new uint64) { + for { + old := *val + new = old + delta + if CompareAndSwapUint64(val, old, new) { + break + } + } + return +} + +func swapUint64(addr *uint64, new uint64) (old uint64) { + for { + old = *addr + if CompareAndSwapUint64(addr, old, new) { + break + } + } + return +} + +// Additional ARM-specific assembly routines. +// Declaration here to give assembly routines correct stack maps for arguments. +func armCompareAndSwapUint32(addr *uint32, old, new uint32) (swapped bool) +func armCompareAndSwapUint64(addr *uint64, old, new uint64) (swapped bool) +func generalCAS64(addr *uint64, old, new uint64) (swapped bool) +func armAddUint32(addr *uint32, delta uint32) (new uint32) +func armAddUint64(addr *uint64, delta uint64) (new uint64) +func armSwapUint32(addr *uint32, new uint32) (old uint32) +func armSwapUint64(addr *uint64, new uint64) (old uint64) +func armLoadUint64(addr *uint64) (val uint64) +func armStoreUint64(addr *uint64, val uint64) diff --git a/src/sync/atomic/asm_386.s b/src/sync/atomic/asm_386.s new file mode 100644 index 000000000..740dfe76b --- /dev/null +++ b/src/sync/atomic/asm_386.s @@ -0,0 +1,214 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !race + +#include "textflag.h" + +TEXT ·SwapInt32(SB),NOSPLIT,$0-12 + JMP ·SwapUint32(SB) + +TEXT ·SwapUint32(SB),NOSPLIT,$0-12 + MOVL addr+0(FP), BP + MOVL new+4(FP), AX + XCHGL AX, 0(BP) + MOVL AX, old+8(FP) + RET + +TEXT ·SwapInt64(SB),NOSPLIT,$0-20 + JMP ·SwapUint64(SB) + +TEXT ·SwapUint64(SB),NOSPLIT,$0-20 + // no XCHGQ so use CMPXCHG8B loop + MOVL addr+0(FP), BP + TESTL $7, BP + JZ 2(PC) + MOVL 0, AX // crash with nil ptr deref + // CX:BX = new + MOVL new_lo+4(FP), BX + MOVL new_hi+8(FP), CX + // DX:AX = *addr + MOVL 0(BP), AX + MOVL 4(BP), DX +swaploop: + // if *addr == DX:AX + // *addr = CX:BX + // else + // DX:AX = *addr + // all in one instruction + LOCK + CMPXCHG8B 0(BP) + JNZ swaploop + + // success + // return DX:AX + MOVL AX, old_lo+12(FP) + MOVL DX, old_hi+16(FP) + RET + +TEXT ·SwapUintptr(SB),NOSPLIT,$0-12 + JMP ·SwapUint32(SB) + +TEXT ·SwapPointer(SB),NOSPLIT,$0-12 + JMP ·SwapUint32(SB) + +TEXT ·CompareAndSwapInt32(SB),NOSPLIT,$0-13 + JMP ·CompareAndSwapUint32(SB) + +TEXT ·CompareAndSwapUint32(SB),NOSPLIT,$0-13 + MOVL addr+0(FP), BP + MOVL old+4(FP), AX + MOVL new+8(FP), CX + // CMPXCHGL was introduced on the 486. + LOCK + CMPXCHGL CX, 0(BP) + SETEQ swapped+12(FP) + RET + +TEXT ·CompareAndSwapUintptr(SB),NOSPLIT,$0-13 + JMP ·CompareAndSwapUint32(SB) + +TEXT ·CompareAndSwapPointer(SB),NOSPLIT,$0-13 + JMP ·CompareAndSwapUint32(SB) + +TEXT ·CompareAndSwapInt64(SB),NOSPLIT,$0-21 + JMP ·CompareAndSwapUint64(SB) + +TEXT ·CompareAndSwapUint64(SB),NOSPLIT,$0-21 + MOVL addr+0(FP), BP + TESTL $7, BP + JZ 2(PC) + MOVL 0, AX // crash with nil ptr deref + MOVL old_lo+4(FP), AX + MOVL old_hi+8(FP), DX + MOVL new_lo+12(FP), BX + MOVL new_hi+16(FP), CX + // CMPXCHG8B was introduced on the Pentium. + LOCK + CMPXCHG8B 0(BP) + SETEQ swapped+20(FP) + RET + +TEXT ·AddInt32(SB),NOSPLIT,$0-12 + JMP ·AddUint32(SB) + +TEXT ·AddUint32(SB),NOSPLIT,$0-12 + MOVL addr+0(FP), BP + MOVL delta+4(FP), AX + MOVL AX, CX + // XADD was introduced on the 486. + LOCK + XADDL AX, 0(BP) + ADDL AX, CX + MOVL CX, new+8(FP) + RET + +TEXT ·AddUintptr(SB),NOSPLIT,$0-12 + JMP ·AddUint32(SB) + +TEXT ·AddInt64(SB),NOSPLIT,$0-20 + JMP ·AddUint64(SB) + +TEXT ·AddUint64(SB),NOSPLIT,$0-20 + // no XADDQ so use CMPXCHG8B loop + MOVL addr+0(FP), BP + TESTL $7, BP + JZ 2(PC) + MOVL 0, AX // crash with nil ptr deref + // DI:SI = delta + MOVL delta_lo+4(FP), SI + MOVL delta_hi+8(FP), DI + // DX:AX = *addr + MOVL 0(BP), AX + MOVL 4(BP), DX +addloop: + // CX:BX = DX:AX (*addr) + DI:SI (delta) + MOVL AX, BX + MOVL DX, CX + ADDL SI, BX + ADCL DI, CX + + // if *addr == DX:AX { + // *addr = CX:BX + // } else { + // DX:AX = *addr + // } + // all in one instruction + LOCK + CMPXCHG8B 0(BP) + + JNZ addloop + + // success + // return CX:BX + MOVL BX, new_lo+12(FP) + MOVL CX, new_hi+16(FP) + RET + +TEXT ·LoadInt32(SB),NOSPLIT,$0-8 + JMP ·LoadUint32(SB) + +TEXT ·LoadUint32(SB),NOSPLIT,$0-8 + MOVL addr+0(FP), AX + MOVL 0(AX), AX + MOVL AX, val+4(FP) + RET + +TEXT ·LoadInt64(SB),NOSPLIT,$0-12 + JMP ·LoadUint64(SB) + +TEXT ·LoadUint64(SB),NOSPLIT,$0-12 + MOVL addr+0(FP), AX + TESTL $7, AX + JZ 2(PC) + MOVL 0, AX // crash with nil ptr deref + // MOVQ and EMMS were introduced on the Pentium MMX. + // MOVQ (%EAX), %MM0 + BYTE $0x0f; BYTE $0x6f; BYTE $0x00 + // MOVQ %MM0, 0x8(%ESP) + BYTE $0x0f; BYTE $0x7f; BYTE $0x44; BYTE $0x24; BYTE $0x08 + EMMS + RET + +TEXT ·LoadUintptr(SB),NOSPLIT,$0-8 + JMP ·LoadUint32(SB) + +TEXT ·LoadPointer(SB),NOSPLIT,$0-8 + JMP ·LoadUint32(SB) + +TEXT ·StoreInt32(SB),NOSPLIT,$0-8 + JMP ·StoreUint32(SB) + +TEXT ·StoreUint32(SB),NOSPLIT,$0-8 + MOVL addr+0(FP), BP + MOVL val+4(FP), AX + XCHGL AX, 0(BP) + RET + +TEXT ·StoreInt64(SB),NOSPLIT,$0-12 + JMP ·StoreUint64(SB) + +TEXT ·StoreUint64(SB),NOSPLIT,$0-12 + MOVL addr+0(FP), AX + TESTL $7, AX + JZ 2(PC) + MOVL 0, AX // crash with nil ptr deref + // MOVQ and EMMS were introduced on the Pentium MMX. + // MOVQ 0x8(%ESP), %MM0 + BYTE $0x0f; BYTE $0x6f; BYTE $0x44; BYTE $0x24; BYTE $0x08 + // MOVQ %MM0, (%EAX) + BYTE $0x0f; BYTE $0x7f; BYTE $0x00 + EMMS + // This is essentially a no-op, but it provides required memory fencing. + // It can be replaced with MFENCE, but MFENCE was introduced only on the Pentium4 (SSE2). + XORL AX, AX + LOCK + XADDL AX, (SP) + RET + +TEXT ·StoreUintptr(SB),NOSPLIT,$0-8 + JMP ·StoreUint32(SB) + +TEXT ·StorePointer(SB),NOSPLIT,$0-8 + JMP ·StoreUint32(SB) diff --git a/src/sync/atomic/asm_amd64.s b/src/sync/atomic/asm_amd64.s new file mode 100644 index 000000000..6e53ebedd --- /dev/null +++ b/src/sync/atomic/asm_amd64.s @@ -0,0 +1,146 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !race + +#include "textflag.h" + +TEXT ·SwapInt32(SB),NOSPLIT,$0-20 + JMP ·SwapUint32(SB) + +TEXT ·SwapUint32(SB),NOSPLIT,$0-20 + MOVQ addr+0(FP), BP + MOVL new+8(FP), AX + XCHGL AX, 0(BP) + MOVL AX, old+16(FP) + RET + +TEXT ·SwapInt64(SB),NOSPLIT,$0-24 + JMP ·SwapUint64(SB) + +TEXT ·SwapUint64(SB),NOSPLIT,$0-24 + MOVQ addr+0(FP), BP + MOVQ new+8(FP), AX + XCHGQ AX, 0(BP) + MOVQ AX, old+16(FP) + RET + +TEXT ·SwapUintptr(SB),NOSPLIT,$0-24 + JMP ·SwapUint64(SB) + +TEXT ·SwapPointer(SB),NOSPLIT,$0-24 + JMP ·SwapUint64(SB) + +TEXT ·CompareAndSwapInt32(SB),NOSPLIT,$0-17 + JMP ·CompareAndSwapUint32(SB) + +TEXT ·CompareAndSwapUint32(SB),NOSPLIT,$0-17 + MOVQ addr+0(FP), BP + MOVL old+8(FP), AX + MOVL new+12(FP), CX + LOCK + CMPXCHGL CX, 0(BP) + SETEQ swapped+16(FP) + RET + +TEXT ·CompareAndSwapUintptr(SB),NOSPLIT,$0-25 + JMP ·CompareAndSwapUint64(SB) + +TEXT ·CompareAndSwapPointer(SB),NOSPLIT,$0-25 + JMP ·CompareAndSwapUint64(SB) + +TEXT ·CompareAndSwapInt64(SB),NOSPLIT,$0-25 + JMP ·CompareAndSwapUint64(SB) + +TEXT ·CompareAndSwapUint64(SB),NOSPLIT,$0-25 + MOVQ addr+0(FP), BP + MOVQ old+8(FP), AX + MOVQ new+16(FP), CX + LOCK + CMPXCHGQ CX, 0(BP) + SETEQ swapped+24(FP) + RET + +TEXT ·AddInt32(SB),NOSPLIT,$0-20 + JMP ·AddUint32(SB) + +TEXT ·AddUint32(SB),NOSPLIT,$0-20 + MOVQ addr+0(FP), BP + MOVL delta+8(FP), AX + MOVL AX, CX + LOCK + XADDL AX, 0(BP) + ADDL AX, CX + MOVL CX, new+16(FP) + RET + +TEXT ·AddUintptr(SB),NOSPLIT,$0-24 + JMP ·AddUint64(SB) + +TEXT ·AddInt64(SB),NOSPLIT,$0-24 + JMP ·AddUint64(SB) + +TEXT ·AddUint64(SB),NOSPLIT,$0-24 + MOVQ addr+0(FP), BP + MOVQ delta+8(FP), AX + MOVQ AX, CX + LOCK + XADDQ AX, 0(BP) + ADDQ AX, CX + MOVQ CX, new+16(FP) + RET + +TEXT ·LoadInt32(SB),NOSPLIT,$0-12 + JMP ·LoadUint32(SB) + +TEXT ·LoadUint32(SB),NOSPLIT,$0-12 + MOVQ addr+0(FP), AX + MOVL 0(AX), AX + MOVL AX, val+8(FP) + RET + +TEXT ·LoadInt64(SB),NOSPLIT,$0-16 + JMP ·LoadUint64(SB) + +TEXT ·LoadUint64(SB),NOSPLIT,$0-16 + MOVQ addr+0(FP), AX + MOVQ 0(AX), AX + MOVQ AX, val+8(FP) + RET + +TEXT ·LoadUintptr(SB),NOSPLIT,$0-16 + JMP ·LoadPointer(SB) + +TEXT ·LoadPointer(SB),NOSPLIT,$0-16 + MOVQ addr+0(FP), AX + MOVQ 0(AX), AX + MOVQ AX, val+8(FP) + RET + +TEXT ·StoreInt32(SB),NOSPLIT,$0-12 + JMP ·StoreUint32(SB) + +TEXT ·StoreUint32(SB),NOSPLIT,$0-12 + MOVQ addr+0(FP), BP + MOVL val+8(FP), AX + XCHGL AX, 0(BP) + RET + +TEXT ·StoreInt64(SB),NOSPLIT,$0-16 + JMP ·StoreUint64(SB) + +TEXT ·StoreUint64(SB),NOSPLIT,$0-16 + MOVQ addr+0(FP), BP + MOVQ val+8(FP), AX + XCHGQ AX, 0(BP) + RET + +TEXT ·StoreUintptr(SB),NOSPLIT,$0-16 + JMP ·StorePointer(SB) + +TEXT ·StorePointer(SB),NOSPLIT,$0-16 + MOVQ addr+0(FP), BP + MOVQ val+8(FP), AX + XCHGQ AX, 0(BP) + RET diff --git a/src/sync/atomic/asm_amd64p32.s b/src/sync/atomic/asm_amd64p32.s new file mode 100644 index 000000000..d77cc2c08 --- /dev/null +++ b/src/sync/atomic/asm_amd64p32.s @@ -0,0 +1,159 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·SwapInt32(SB),NOSPLIT,$0-12 + JMP ·SwapUint32(SB) + +TEXT ·SwapUint32(SB),NOSPLIT,$0-12 + MOVL addr+0(FP), BX + MOVL new+4(FP), AX + XCHGL AX, 0(BX) + MOVL AX, old+8(FP) + RET + +TEXT ·SwapInt64(SB),NOSPLIT,$0-24 + JMP ·SwapUint64(SB) + +TEXT ·SwapUint64(SB),NOSPLIT,$0-24 + MOVL addr+0(FP), BX + TESTL $7, BX + JZ 2(PC) + MOVL 0, BX // crash with nil ptr deref + MOVQ new+8(FP), AX + XCHGQ AX, 0(BX) + MOVQ AX, old+16(FP) + RET + +TEXT ·SwapUintptr(SB),NOSPLIT,$0-12 + JMP ·SwapUint32(SB) + +TEXT ·SwapPointer(SB),NOSPLIT,$0-12 + JMP ·SwapUint32(SB) + +TEXT ·CompareAndSwapInt32(SB),NOSPLIT,$0-17 + JMP ·CompareAndSwapUint32(SB) + +TEXT ·CompareAndSwapUint32(SB),NOSPLIT,$0-17 + MOVL addr+0(FP), BX + MOVL old+4(FP), AX + MOVL new+8(FP), CX + LOCK + CMPXCHGL CX, 0(BX) + SETEQ swapped+16(FP) + RET + +TEXT ·CompareAndSwapUintptr(SB),NOSPLIT,$0-17 + JMP ·CompareAndSwapUint32(SB) + +TEXT ·CompareAndSwapPointer(SB),NOSPLIT,$0-17 + JMP ·CompareAndSwapUint32(SB) + +TEXT ·CompareAndSwapInt64(SB),NOSPLIT,$0-25 + JMP ·CompareAndSwapUint64(SB) + +TEXT ·CompareAndSwapUint64(SB),NOSPLIT,$0-25 + MOVL addr+0(FP), BX + TESTL $7, BX + JZ 2(PC) + MOVL 0, BX // crash with nil ptr deref + MOVQ old+8(FP), AX + MOVQ new+16(FP), CX + LOCK + CMPXCHGQ CX, 0(BX) + SETEQ swapped+24(FP) + RET + +TEXT ·AddInt32(SB),NOSPLIT,$0-12 + JMP ·AddUint32(SB) + +TEXT ·AddUint32(SB),NOSPLIT,$0-12 + MOVL addr+0(FP), BX + MOVL delta+4(FP), AX + MOVL AX, CX + LOCK + XADDL AX, 0(BX) + ADDL AX, CX + MOVL CX, new+8(FP) + RET + +TEXT ·AddUintptr(SB),NOSPLIT,$0-12 + JMP ·AddUint32(SB) + +TEXT ·AddInt64(SB),NOSPLIT,$0-24 + JMP ·AddUint64(SB) + +TEXT ·AddUint64(SB),NOSPLIT,$0-24 + MOVL addr+0(FP), BX + TESTL $7, BX + JZ 2(PC) + MOVL 0, BX // crash with nil ptr deref + MOVQ delta+8(FP), AX + MOVQ AX, CX + LOCK + XADDQ AX, 0(BX) + ADDQ AX, CX + MOVQ CX, new+16(FP) + RET + +TEXT ·LoadInt32(SB),NOSPLIT,$0-12 + JMP ·LoadUint32(SB) + +TEXT ·LoadUint32(SB),NOSPLIT,$0-12 + MOVL addr+0(FP), AX + MOVL 0(AX), AX + MOVL AX, val+8(FP) + RET + +TEXT ·LoadInt64(SB),NOSPLIT,$0-16 + JMP ·LoadUint64(SB) + +TEXT ·LoadUint64(SB),NOSPLIT,$0-16 + MOVL addr+0(FP), AX + TESTL $7, AX + JZ 2(PC) + MOVL 0, AX // crash with nil ptr deref + MOVQ 0(AX), AX + MOVQ AX, val+8(FP) + RET + +TEXT ·LoadUintptr(SB),NOSPLIT,$0-12 + JMP ·LoadPointer(SB) + +TEXT ·LoadPointer(SB),NOSPLIT,$0-12 + MOVL addr+0(FP), AX + MOVL 0(AX), AX + MOVL AX, val+8(FP) + RET + +TEXT ·StoreInt32(SB),NOSPLIT,$0-8 + JMP ·StoreUint32(SB) + +TEXT ·StoreUint32(SB),NOSPLIT,$0-8 + MOVL addr+0(FP), BX + MOVL val+4(FP), AX + XCHGL AX, 0(BX) + RET + +TEXT ·StoreInt64(SB),NOSPLIT,$0-16 + JMP ·StoreUint64(SB) + +TEXT ·StoreUint64(SB),NOSPLIT,$0-16 + MOVL addr+0(FP), BX + TESTL $7, BX + JZ 2(PC) + MOVL 0, BX // crash with nil ptr deref + MOVQ val+8(FP), AX + XCHGQ AX, 0(BX) + RET + +TEXT ·StoreUintptr(SB),NOSPLIT,$0-8 + JMP ·StorePointer(SB) + +TEXT ·StorePointer(SB),NOSPLIT,$0-8 + MOVL addr+0(FP), BX + MOVL val+4(FP), AX + XCHGL AX, 0(BX) + RET diff --git a/src/sync/atomic/asm_arm.s b/src/sync/atomic/asm_arm.s new file mode 100644 index 000000000..8a85273da --- /dev/null +++ b/src/sync/atomic/asm_arm.s @@ -0,0 +1,197 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !race + +#include "textflag.h" + +// ARM atomic operations, for use by asm_$(GOOS)_arm.s. + +TEXT ·armCompareAndSwapUint32(SB),NOSPLIT,$0-13 + MOVW addr+0(FP), R1 + MOVW old+4(FP), R2 + MOVW new+8(FP), R3 +casloop: + // LDREX and STREX were introduced in ARMv6. + LDREX (R1), R0 + CMP R0, R2 + BNE casfail + STREX R3, (R1), R0 + CMP $0, R0 + BNE casloop + MOVW $1, R0 + MOVBU R0, ret+12(FP) + RET +casfail: + MOVW $0, R0 + MOVBU R0, ret+12(FP) + RET + +TEXT ·armCompareAndSwapUint64(SB),NOSPLIT,$0-21 + BL fastCheck64<>(SB) + MOVW addr+0(FP), R1 + // make unaligned atomic access panic + AND.S $7, R1, R2 + BEQ 2(PC) + MOVW R2, (R2) + MOVW oldlo+4(FP), R2 + MOVW oldhi+8(FP), R3 + MOVW newlo+12(FP), R4 + MOVW newhi+16(FP), R5 +cas64loop: + // LDREXD and STREXD were introduced in ARMv6k. + LDREXD (R1), R6 // loads R6 and R7 + CMP R2, R6 + BNE cas64fail + CMP R3, R7 + BNE cas64fail + STREXD R4, (R1), R0 // stores R4 and R5 + CMP $0, R0 + BNE cas64loop + MOVW $1, R0 + MOVBU R0, ret+20(FP) + RET +cas64fail: + MOVW $0, R0 + MOVBU R0, ret+20(FP) + RET + +TEXT ·armAddUint32(SB),NOSPLIT,$0-12 + MOVW addr+0(FP), R1 + MOVW delta+4(FP), R2 +addloop: + // LDREX and STREX were introduced in ARMv6. + LDREX (R1), R3 + ADD R2, R3 + STREX R3, (R1), R0 + CMP $0, R0 + BNE addloop + MOVW R3, ret+8(FP) + RET + +TEXT ·armAddUint64(SB),NOSPLIT,$0-20 + BL fastCheck64<>(SB) + MOVW addr+0(FP), R1 + // make unaligned atomic access panic + AND.S $7, R1, R2 + BEQ 2(PC) + MOVW R2, (R2) + MOVW deltalo+4(FP), R2 + MOVW deltahi+8(FP), R3 +add64loop: + // LDREXD and STREXD were introduced in ARMv6k. + LDREXD (R1), R4 // loads R4 and R5 + ADD.S R2, R4 + ADC R3, R5 + STREXD R4, (R1), R0 // stores R4 and R5 + CMP $0, R0 + BNE add64loop + MOVW R4, retlo+12(FP) + MOVW R5, rethi+16(FP) + RET + +TEXT ·armSwapUint32(SB),NOSPLIT,$0-12 + MOVW addr+0(FP), R1 + MOVW new+4(FP), R2 +swaploop: + // LDREX and STREX were introduced in ARMv6. + LDREX (R1), R3 + STREX R2, (R1), R0 + CMP $0, R0 + BNE swaploop + MOVW R3, old+8(FP) + RET + +TEXT ·armSwapUint64(SB),NOSPLIT,$0-20 + BL fastCheck64<>(SB) + MOVW addr+0(FP), R1 + // make unaligned atomic access panic + AND.S $7, R1, R2 + BEQ 2(PC) + MOVW R2, (R2) + MOVW newlo+4(FP), R2 + MOVW newhi+8(FP), R3 +swap64loop: + // LDREXD and STREXD were introduced in ARMv6k. + LDREXD (R1), R4 // loads R4 and R5 + STREXD R2, (R1), R0 // stores R2 and R3 + CMP $0, R0 + BNE swap64loop + MOVW R4, oldlo+12(FP) + MOVW R5, oldhi+16(FP) + RET + +TEXT ·armLoadUint64(SB),NOSPLIT,$0-12 + BL fastCheck64<>(SB) + MOVW addr+0(FP), R1 + // make unaligned atomic access panic + AND.S $7, R1, R2 + BEQ 2(PC) + MOVW R2, (R2) +load64loop: + LDREXD (R1), R2 // loads R2 and R3 + STREXD R2, (R1), R0 // stores R2 and R3 + CMP $0, R0 + BNE load64loop + MOVW R2, vallo+4(FP) + MOVW R3, valhi+8(FP) + RET + +TEXT ·armStoreUint64(SB),NOSPLIT,$0-12 + BL fastCheck64<>(SB) + MOVW addr+0(FP), R1 + // make unaligned atomic access panic + AND.S $7, R1, R2 + BEQ 2(PC) + MOVW R2, (R2) + MOVW vallo+4(FP), R2 + MOVW valhi+8(FP), R3 +store64loop: + LDREXD (R1), R4 // loads R4 and R5 + STREXD R2, (R1), R0 // stores R2 and R3 + CMP $0, R0 + BNE store64loop + RET + +// Check for broken 64-bit LDREXD as found in QEMU. +// LDREXD followed by immediate STREXD should succeed. +// If it fails, try a few times just to be sure (maybe our thread got +// rescheduled between the two instructions) and then panic. +// A bug in some copies of QEMU makes STREXD never succeed, +// which will make uses of the 64-bit atomic operations loop forever. +// If things are working, set okLDREXD to avoid future checks. +// https://bugs.launchpad.net/qemu/+bug/670883. +TEXT check64<>(SB),NOSPLIT,$16-0 + MOVW $10, R1 + // 8-aligned stack address scratch space. + MOVW $8(R13), R5 + AND $~7, R5 +loop: + LDREXD (R5), R2 + STREXD R2, (R5), R0 + CMP $0, R0 + BEQ ok + SUB $1, R1 + CMP $0, R1 + BNE loop + // Must be buggy QEMU. + BL ·panic64(SB) +ok: + RET + +// Fast, cached version of check. No frame, just MOVW CMP RET after first time. +TEXT fastCheck64<>(SB),NOSPLIT,$-4 + MOVW ok64<>(SB), R0 + CMP $0, R0 // have we been here before? + RET.NE + B slowCheck64<>(SB) + +TEXT slowCheck64<>(SB),NOSPLIT,$0-0 + BL check64<>(SB) + // Still here, must be okay. + MOVW $1, R0 + MOVW R0, ok64<>(SB) + RET + +GLOBL ok64<>(SB), NOPTR, $4 diff --git a/src/sync/atomic/asm_freebsd_arm.s b/src/sync/atomic/asm_freebsd_arm.s new file mode 100644 index 000000000..06b975e89 --- /dev/null +++ b/src/sync/atomic/asm_freebsd_arm.s @@ -0,0 +1,109 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// FreeBSD/ARM atomic operations. +// TODO(minux): this only supports ARMv6K or higher. + +TEXT ·CompareAndSwapInt32(SB),NOSPLIT,$0 + B ·CompareAndSwapUint32(SB) + +TEXT ·CompareAndSwapUint32(SB),NOSPLIT,$0 + B ·armCompareAndSwapUint32(SB) + +TEXT ·CompareAndSwapUintptr(SB),NOSPLIT,$0 + B ·CompareAndSwapUint32(SB) + +TEXT ·CompareAndSwapPointer(SB),NOSPLIT,$0 + B ·CompareAndSwapUint32(SB) + +TEXT ·AddInt32(SB),NOSPLIT,$0 + B ·AddUint32(SB) + +TEXT ·AddUint32(SB),NOSPLIT,$0 + B ·armAddUint32(SB) + +TEXT ·AddUintptr(SB),NOSPLIT,$0 + B ·AddUint32(SB) + +TEXT ·SwapInt32(SB),NOSPLIT,$0 + B ·SwapUint32(SB) + +TEXT ·SwapUint32(SB),NOSPLIT,$0 + B ·armSwapUint32(SB) + +TEXT ·SwapUintptr(SB),NOSPLIT,$0 + B ·SwapUint32(SB) + +TEXT ·SwapPointer(SB),NOSPLIT,$0 + B ·SwapUint32(SB) + +TEXT ·CompareAndSwapInt64(SB),NOSPLIT,$0 + B ·CompareAndSwapUint64(SB) + +TEXT ·CompareAndSwapUint64(SB),NOSPLIT,$-4 + B ·armCompareAndSwapUint64(SB) + +TEXT ·AddInt64(SB),NOSPLIT,$0 + B ·addUint64(SB) + +TEXT ·AddUint64(SB),NOSPLIT,$0 + B ·addUint64(SB) + +TEXT ·SwapInt64(SB),NOSPLIT,$0 + B ·swapUint64(SB) + +TEXT ·SwapUint64(SB),NOSPLIT,$0 + B ·swapUint64(SB) + +TEXT ·LoadInt32(SB),NOSPLIT,$0 + B ·LoadUint32(SB) + +TEXT ·LoadUint32(SB),NOSPLIT,$0-8 + MOVW addr+0(FP), R1 +load32loop: + LDREX (R1), R2 // loads R2 + STREX R2, (R1), R0 // stores R2 + CMP $0, R0 + BNE load32loop + MOVW R2, val+4(FP) + RET + +TEXT ·LoadInt64(SB),NOSPLIT,$0 + B ·loadUint64(SB) + +TEXT ·LoadUint64(SB),NOSPLIT,$0 + B ·loadUint64(SB) + +TEXT ·LoadUintptr(SB),NOSPLIT,$0 + B ·LoadUint32(SB) + +TEXT ·LoadPointer(SB),NOSPLIT,$0 + B ·LoadUint32(SB) + +TEXT ·StoreInt32(SB),NOSPLIT,$0 + B ·StoreUint32(SB) + +TEXT ·StoreUint32(SB),NOSPLIT,$0-8 + MOVW addr+0(FP), R1 + MOVW val+4(FP), R2 +storeloop: + LDREX (R1), R4 // loads R4 + STREX R2, (R1), R0 // stores R2 + CMP $0, R0 + BNE storeloop + RET + +TEXT ·StoreInt64(SB),NOSPLIT,$0 + B ·storeUint64(SB) + +TEXT ·StoreUint64(SB),NOSPLIT,$0 + B ·storeUint64(SB) + +TEXT ·StoreUintptr(SB),NOSPLIT,$0 + B ·StoreUint32(SB) + +TEXT ·StorePointer(SB),NOSPLIT,$0 + B ·StoreUint32(SB) diff --git a/src/sync/atomic/asm_linux_arm.s b/src/sync/atomic/asm_linux_arm.s new file mode 100644 index 000000000..944758441 --- /dev/null +++ b/src/sync/atomic/asm_linux_arm.s @@ -0,0 +1,216 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !race + +#include "textflag.h" + +// Linux/ARM atomic operations. + +// Because there is so much variation in ARM devices, +// the Linux kernel provides an appropriate compare-and-swap +// implementation at address 0xffff0fc0. Caller sets: +// R0 = old value +// R1 = new value +// R2 = addr +// LR = return address +// The function returns with CS true if the swap happened. +// http://lxr.linux.no/linux+v2.6.37.2/arch/arm/kernel/entry-armv.S#L850 +// On older kernels (before 2.6.24) the function can incorrectly +// report a conflict, so we have to double-check the compare ourselves +// and retry if necessary. +// +// http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=b49c0f24cf6744a3f4fd09289fe7cade349dead5 +// +TEXT cas<>(SB),NOSPLIT,$0 + MOVW $0xffff0fc0, PC + +TEXT ·CompareAndSwapInt32(SB),NOSPLIT,$0 + B ·CompareAndSwapUint32(SB) + +// Implement using kernel cas for portability. +TEXT ·CompareAndSwapUint32(SB),NOSPLIT,$0-13 + MOVW addr+0(FP), R2 + // trigger potential paging fault here, + // because we don't know how to traceback through __kuser_cmpxchg + MOVW (R2), R0 + MOVW old+4(FP), R0 +casagain: + MOVW new+8(FP), R1 + BL cas<>(SB) + BCC cascheck + MOVW $1, R0 +casret: + MOVB R0, swapped+12(FP) + RET +cascheck: + // Kernel lies; double-check. + MOVW addr+0(FP), R2 + MOVW old+4(FP), R0 + MOVW 0(R2), R3 + CMP R0, R3 + BEQ casagain + MOVW $0, R0 + B casret + +TEXT ·CompareAndSwapUintptr(SB),NOSPLIT,$0 + B ·CompareAndSwapUint32(SB) + +TEXT ·CompareAndSwapPointer(SB),NOSPLIT,$0 + B ·CompareAndSwapUint32(SB) + +TEXT ·AddInt32(SB),NOSPLIT,$0 + B ·AddUint32(SB) + +// Implement using kernel cas for portability. +TEXT ·AddUint32(SB),NOSPLIT,$0-12 + MOVW addr+0(FP), R2 + MOVW delta+4(FP), R4 +addloop1: + MOVW 0(R2), R0 + MOVW R0, R1 + ADD R4, R1 + BL cas<>(SB) + BCC addloop1 + MOVW R1, new+8(FP) + RET + +TEXT ·AddUintptr(SB),NOSPLIT,$0 + B ·AddUint32(SB) + +TEXT ·SwapInt32(SB),NOSPLIT,$0 + B ·SwapUint32(SB) + +// Implement using kernel cas for portability. +TEXT ·SwapUint32(SB),NOSPLIT,$0-12 + MOVW addr+0(FP), R2 + MOVW new+4(FP), R1 +swaploop1: + MOVW 0(R2), R0 + MOVW R0, R4 // cas smashes R0 + BL cas<>(SB) + BCC swaploop1 + MOVW R4, old+8(FP) + RET + +TEXT ·SwapUintptr(SB),NOSPLIT,$0 + B ·SwapUint32(SB) + +TEXT ·SwapPointer(SB),NOSPLIT,$0 + B ·SwapUint32(SB) + +TEXT cas64<>(SB),NOSPLIT,$0 + MOVW $0xffff0f60, PC // __kuser_cmpxchg64: Linux-3.1 and above + +TEXT kernelCAS64<>(SB),NOSPLIT,$0-21 + // int (*__kuser_cmpxchg64_t)(const int64_t *oldval, const int64_t *newval, volatile int64_t *ptr); + MOVW addr+0(FP), R2 // ptr + // trigger potential paging fault here, + // because we don't know how to traceback through __kuser_cmpxchg64 + MOVW (R2), R0 + // make unaligned atomic access panic + AND.S $7, R2, R1 + BEQ 2(PC) + MOVW R1, (R1) + MOVW $4(FP), R0 // oldval + MOVW $12(FP), R1 // newval + BL cas64<>(SB) + MOVW.CS $1, R0 // C is set if the kernel has changed *ptr + MOVW.CC $0, R0 + MOVW R0, 20(FP) + RET + +TEXT ·generalCAS64(SB),NOSPLIT,$0-21 + B runtime·cas64(SB) + +GLOBL armCAS64(SB), NOPTR, $4 + +TEXT setupAndCallCAS64<>(SB),NOSPLIT,$-4-21 + MOVW $0xffff0ffc, R0 // __kuser_helper_version + MOVW (R0), R0 + // __kuser_cmpxchg64 only present if helper version >= 5 + CMP $5, R0 + MOVW.CS $kernelCAS64<>(SB), R1 + MOVW.CS R1, armCAS64(SB) + MOVW.CS R1, PC + MOVB runtime·armArch(SB), R0 + // LDREXD, STREXD only present on ARMv6K or higher + CMP $6, R0 // TODO(minux): how to differentiate ARMv6 with ARMv6K? + MOVW.CS $·armCompareAndSwapUint64(SB), R1 + MOVW.CS R1, armCAS64(SB) + MOVW.CS R1, PC + // we are out of luck, can only use runtime's emulated 64-bit cas + MOVW $·generalCAS64(SB), R1 + MOVW R1, armCAS64(SB) + MOVW R1, PC + +TEXT ·CompareAndSwapInt64(SB),NOSPLIT,$0 + B ·CompareAndSwapUint64(SB) + +TEXT ·CompareAndSwapUint64(SB),NOSPLIT,$-4-21 + MOVW armCAS64(SB), R0 + CMP $0, R0 + MOVW.NE R0, PC + B setupAndCallCAS64<>(SB) + +TEXT ·AddInt64(SB),NOSPLIT,$0 + B ·addUint64(SB) + +TEXT ·AddUint64(SB),NOSPLIT,$0 + B ·addUint64(SB) + +TEXT ·SwapInt64(SB),NOSPLIT,$0 + B ·swapUint64(SB) + +TEXT ·SwapUint64(SB),NOSPLIT,$0 + B ·swapUint64(SB) + +TEXT ·LoadInt32(SB),NOSPLIT,$0 + B ·LoadUint32(SB) + +TEXT ·LoadUint32(SB),NOSPLIT,$0-8 + MOVW addr+0(FP), R2 +loadloop1: + MOVW 0(R2), R0 + MOVW R0, R1 + BL cas<>(SB) + BCC loadloop1 + MOVW R1, val+4(FP) + RET + +TEXT ·LoadInt64(SB),NOSPLIT,$0 + B ·loadUint64(SB) + +TEXT ·LoadUint64(SB),NOSPLIT,$0 + B ·loadUint64(SB) + +TEXT ·LoadUintptr(SB),NOSPLIT,$0 + B ·LoadUint32(SB) + +TEXT ·LoadPointer(SB),NOSPLIT,$0 + B ·LoadUint32(SB) + +TEXT ·StoreInt32(SB),NOSPLIT,$0 + B ·StoreUint32(SB) + +TEXT ·StoreUint32(SB),NOSPLIT,$0-8 + MOVW addr+0(FP), R2 + MOVW val+4(FP), R1 +storeloop1: + MOVW 0(R2), R0 + BL cas<>(SB) + BCC storeloop1 + RET + +TEXT ·StoreInt64(SB),NOSPLIT,$0 + B ·storeUint64(SB) + +TEXT ·StoreUint64(SB),NOSPLIT,$0 + B ·storeUint64(SB) + +TEXT ·StoreUintptr(SB),NOSPLIT,$0 + B ·StoreUint32(SB) + +TEXT ·StorePointer(SB),NOSPLIT,$0 + B ·StoreUint32(SB) diff --git a/src/sync/atomic/asm_nacl_arm.s b/src/sync/atomic/asm_nacl_arm.s new file mode 100644 index 000000000..76f623336 --- /dev/null +++ b/src/sync/atomic/asm_nacl_arm.s @@ -0,0 +1,109 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// NaCl/ARM atomic operations. +// NaCl/ARM explicitly targets ARMv7A. + +TEXT ·CompareAndSwapInt32(SB),NOSPLIT,$0 + B ·CompareAndSwapUint32(SB) + +TEXT ·CompareAndSwapUint32(SB),NOSPLIT,$0 + B ·armCompareAndSwapUint32(SB) + +TEXT ·CompareAndSwapUintptr(SB),NOSPLIT,$0 + B ·CompareAndSwapUint32(SB) + +TEXT ·CompareAndSwapPointer(SB),NOSPLIT,$0 + B ·CompareAndSwapUint32(SB) + +TEXT ·AddInt32(SB),NOSPLIT,$0 + B ·AddUint32(SB) + +TEXT ·AddUint32(SB),NOSPLIT,$0 + B ·armAddUint32(SB) + +TEXT ·AddUintptr(SB),NOSPLIT,$0 + B ·AddUint32(SB) + +TEXT ·SwapInt32(SB),NOSPLIT,$0 + B ·SwapUint32(SB) + +TEXT ·SwapUint32(SB),NOSPLIT,$0 + B ·armSwapUint32(SB) + +TEXT ·SwapUintptr(SB),NOSPLIT,$0 + B ·SwapUint32(SB) + +TEXT ·SwapPointer(SB),NOSPLIT,$0 + B ·SwapUint32(SB) + +TEXT ·CompareAndSwapInt64(SB),NOSPLIT,$0 + B ·CompareAndSwapUint64(SB) + +TEXT ·CompareAndSwapUint64(SB),NOSPLIT,$-4 + B ·armCompareAndSwapUint64(SB) + +TEXT ·AddInt64(SB),NOSPLIT,$0 + B ·addUint64(SB) + +TEXT ·AddUint64(SB),NOSPLIT,$0 + B ·addUint64(SB) + +TEXT ·SwapInt64(SB),NOSPLIT,$0 + B ·swapUint64(SB) + +TEXT ·SwapUint64(SB),NOSPLIT,$0 + B ·swapUint64(SB) + +TEXT ·LoadInt32(SB),NOSPLIT,$0 + B ·LoadUint32(SB) + +TEXT ·LoadUint32(SB),NOSPLIT,$0-8 + MOVW addr+0(FP), R1 +load32loop: + LDREX (R1), R2 // loads R2 + STREX R2, (R1), R0 // stores R2 + CMP $0, R0 + BNE load32loop + MOVW R2, val+4(FP) + RET + +TEXT ·LoadInt64(SB),NOSPLIT,$0 + B ·loadUint64(SB) + +TEXT ·LoadUint64(SB),NOSPLIT,$0 + B ·loadUint64(SB) + +TEXT ·LoadUintptr(SB),NOSPLIT,$0 + B ·LoadUint32(SB) + +TEXT ·LoadPointer(SB),NOSPLIT,$0 + B ·LoadUint32(SB) + +TEXT ·StoreInt32(SB),NOSPLIT,$0 + B ·StoreUint32(SB) + +TEXT ·StoreUint32(SB),NOSPLIT,$0-8 + MOVW addr+0(FP), R1 + MOVW val+4(FP), R2 +storeloop: + LDREX (R1), R4 // loads R4 + STREX R2, (R1), R0 // stores R2 + CMP $0, R0 + BNE storeloop + RET + +TEXT ·StoreInt64(SB),NOSPLIT,$0 + B ·storeUint64(SB) + +TEXT ·StoreUint64(SB),NOSPLIT,$0 + B ·storeUint64(SB) + +TEXT ·StoreUintptr(SB),NOSPLIT,$0 + B ·StoreUint32(SB) + +TEXT ·StorePointer(SB),NOSPLIT,$0 + B ·StoreUint32(SB) diff --git a/src/sync/atomic/asm_netbsd_arm.s b/src/sync/atomic/asm_netbsd_arm.s new file mode 100644 index 000000000..dbe80898f --- /dev/null +++ b/src/sync/atomic/asm_netbsd_arm.s @@ -0,0 +1,109 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// NetBSD/ARM atomic operations. +// TODO(minux): this only supports ARMv6K or higher. + +TEXT ·CompareAndSwapInt32(SB),NOSPLIT,$0 + B ·CompareAndSwapUint32(SB) + +TEXT ·CompareAndSwapUint32(SB),NOSPLIT,$0 + B ·armCompareAndSwapUint32(SB) + +TEXT ·CompareAndSwapUintptr(SB),NOSPLIT,$0 + B ·CompareAndSwapUint32(SB) + +TEXT ·CompareAndSwapPointer(SB),NOSPLIT,$0 + B ·CompareAndSwapUint32(SB) + +TEXT ·AddInt32(SB),NOSPLIT,$0 + B ·AddUint32(SB) + +TEXT ·AddUint32(SB),NOSPLIT,$0 + B ·armAddUint32(SB) + +TEXT ·AddUintptr(SB),NOSPLIT,$0 + B ·AddUint32(SB) + +TEXT ·SwapInt32(SB),NOSPLIT,$0 + B ·SwapUint32(SB) + +TEXT ·SwapUint32(SB),NOSPLIT,$0 + B ·armSwapUint32(SB) + +TEXT ·SwapUintptr(SB),NOSPLIT,$0 + B ·SwapUint32(SB) + +TEXT ·SwapPointer(SB),NOSPLIT,$0 + B ·SwapUint32(SB) + +TEXT ·CompareAndSwapInt64(SB),NOSPLIT,$0 + B ·CompareAndSwapUint64(SB) + +TEXT ·CompareAndSwapUint64(SB),NOSPLIT,$-4 + B ·armCompareAndSwapUint64(SB) + +TEXT ·AddInt64(SB),NOSPLIT,$0 + B ·addUint64(SB) + +TEXT ·AddUint64(SB),NOSPLIT,$0 + B ·addUint64(SB) + +TEXT ·SwapInt64(SB),NOSPLIT,$0 + B ·swapUint64(SB) + +TEXT ·SwapUint64(SB),NOSPLIT,$0 + B ·swapUint64(SB) + +TEXT ·LoadInt32(SB),NOSPLIT,$0 + B ·LoadUint32(SB) + +TEXT ·LoadUint32(SB),NOSPLIT,$0-8 + MOVW addr+0(FP), R1 +load32loop: + LDREX (R1), R2 // loads R2 + STREX R2, (R1), R0 // stores R2 + CMP $0, R0 + BNE load32loop + MOVW R2, val+4(FP) + RET + +TEXT ·LoadInt64(SB),NOSPLIT,$0 + B ·loadUint64(SB) + +TEXT ·LoadUint64(SB),NOSPLIT,$0 + B ·loadUint64(SB) + +TEXT ·LoadUintptr(SB),NOSPLIT,$0 + B ·LoadUint32(SB) + +TEXT ·LoadPointer(SB),NOSPLIT,$0 + B ·LoadUint32(SB) + +TEXT ·StoreInt32(SB),NOSPLIT,$0 + B ·StoreUint32(SB) + +TEXT ·StoreUint32(SB),NOSPLIT,$0-8 + MOVW addr+0(FP), R1 + MOVW val+4(FP), R2 +storeloop: + LDREX (R1), R4 // loads R4 + STREX R2, (R1), R0 // stores R2 + CMP $0, R0 + BNE storeloop + RET + +TEXT ·StoreInt64(SB),NOSPLIT,$0 + B ·storeUint64(SB) + +TEXT ·StoreUint64(SB),NOSPLIT,$0 + B ·storeUint64(SB) + +TEXT ·StoreUintptr(SB),NOSPLIT,$0 + B ·StoreUint32(SB) + +TEXT ·StorePointer(SB),NOSPLIT,$0 + B ·StoreUint32(SB) diff --git a/src/sync/atomic/atomic_linux_arm_test.go b/src/sync/atomic/atomic_linux_arm_test.go new file mode 100644 index 000000000..b6965b99b --- /dev/null +++ b/src/sync/atomic/atomic_linux_arm_test.go @@ -0,0 +1,14 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package atomic_test + +import ( + . "sync/atomic" + "testing" +) + +func TestGeneralCAS64(t *testing.T) { + testCompareAndSwapUint64(t, GeneralCAS64) +} diff --git a/src/sync/atomic/atomic_test.go b/src/sync/atomic/atomic_test.go new file mode 100644 index 000000000..9f13af48b --- /dev/null +++ b/src/sync/atomic/atomic_test.go @@ -0,0 +1,1509 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package atomic_test + +import ( + "fmt" + "runtime" + "strings" + . "sync/atomic" + "testing" + "unsafe" +) + +// Tests of correct behavior, without contention. +// (Does the function work as advertised?) +// +// Test that the Add functions add correctly. +// Test that the CompareAndSwap functions actually +// do the comparison and the swap correctly. +// +// The loop over power-of-two values is meant to +// ensure that the operations apply to the full word size. +// The struct fields x.before and x.after check that the +// operations do not extend past the full word size. + +const ( + magic32 = 0xdedbeef + magic64 = 0xdeddeadbeefbeef +) + +// Do the 64-bit functions panic? If so, don't bother testing. +var test64err = func() (err interface{}) { + defer func() { + err = recover() + }() + var x int64 + AddInt64(&x, 1) + return nil +}() + +func TestSwapInt32(t *testing.T) { + var x struct { + before int32 + i int32 + after int32 + } + x.before = magic32 + x.after = magic32 + var j int32 + for delta := int32(1); delta+delta > delta; delta += delta { + k := SwapInt32(&x.i, delta) + if x.i != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + j = delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestSwapUint32(t *testing.T) { + var x struct { + before uint32 + i uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + var j uint32 + for delta := uint32(1); delta+delta > delta; delta += delta { + k := SwapUint32(&x.i, delta) + if x.i != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + j = delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestSwapInt64(t *testing.T) { + if test64err != nil { + t.Skipf("Skipping 64-bit tests: %v", test64err) + } + var x struct { + before int64 + i int64 + after int64 + } + x.before = magic64 + x.after = magic64 + var j int64 + for delta := int64(1); delta+delta > delta; delta += delta { + k := SwapInt64(&x.i, delta) + if x.i != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + j = delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64)) + } +} + +func TestSwapUint64(t *testing.T) { + if test64err != nil { + t.Skipf("Skipping 64-bit tests: %v", test64err) + } + var x struct { + before uint64 + i uint64 + after uint64 + } + x.before = magic64 + x.after = magic64 + var j uint64 + for delta := uint64(1); delta+delta > delta; delta += delta { + k := SwapUint64(&x.i, delta) + if x.i != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + j = delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64)) + } +} + +func TestSwapUintptr(t *testing.T) { + var x struct { + before uintptr + i uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + var j uintptr + for delta := uintptr(1); delta+delta > delta; delta += delta { + k := SwapUintptr(&x.i, delta) + if x.i != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + j = delta + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestSwapPointer(t *testing.T) { + var x struct { + before uintptr + i unsafe.Pointer + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + var j uintptr + for delta := uintptr(1); delta+delta > delta; delta += delta { + k := SwapPointer(&x.i, unsafe.Pointer(delta)) + if uintptr(x.i) != delta || uintptr(k) != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + j = delta + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestAddInt32(t *testing.T) { + var x struct { + before int32 + i int32 + after int32 + } + x.before = magic32 + x.after = magic32 + var j int32 + for delta := int32(1); delta+delta > delta; delta += delta { + k := AddInt32(&x.i, delta) + j += delta + if x.i != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestAddUint32(t *testing.T) { + var x struct { + before uint32 + i uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + var j uint32 + for delta := uint32(1); delta+delta > delta; delta += delta { + k := AddUint32(&x.i, delta) + j += delta + if x.i != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestAddInt64(t *testing.T) { + if test64err != nil { + t.Skipf("Skipping 64-bit tests: %v", test64err) + } + var x struct { + before int64 + i int64 + after int64 + } + x.before = magic64 + x.after = magic64 + var j int64 + for delta := int64(1); delta+delta > delta; delta += delta { + k := AddInt64(&x.i, delta) + j += delta + if x.i != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, int64(magic64), int64(magic64)) + } +} + +func TestAddUint64(t *testing.T) { + if test64err != nil { + t.Skipf("Skipping 64-bit tests: %v", test64err) + } + var x struct { + before uint64 + i uint64 + after uint64 + } + x.before = magic64 + x.after = magic64 + var j uint64 + for delta := uint64(1); delta+delta > delta; delta += delta { + k := AddUint64(&x.i, delta) + j += delta + if x.i != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64)) + } +} + +func TestAddUintptr(t *testing.T) { + var x struct { + before uintptr + i uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + var j uintptr + for delta := uintptr(1); delta+delta > delta; delta += delta { + k := AddUintptr(&x.i, delta) + j += delta + if x.i != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestCompareAndSwapInt32(t *testing.T) { + var x struct { + before int32 + i int32 + after int32 + } + x.before = magic32 + x.after = magic32 + for val := int32(1); val+val > val; val += val { + x.i = val + if !CompareAndSwapInt32(&x.i, val, val+1) { + t.Fatalf("should have swapped %#x %#x", val, val+1) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + x.i = val + 1 + if CompareAndSwapInt32(&x.i, val, val+2) { + t.Fatalf("should not have swapped %#x %#x", val, val+2) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestCompareAndSwapUint32(t *testing.T) { + var x struct { + before uint32 + i uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + for val := uint32(1); val+val > val; val += val { + x.i = val + if !CompareAndSwapUint32(&x.i, val, val+1) { + t.Fatalf("should have swapped %#x %#x", val, val+1) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + x.i = val + 1 + if CompareAndSwapUint32(&x.i, val, val+2) { + t.Fatalf("should not have swapped %#x %#x", val, val+2) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestCompareAndSwapInt64(t *testing.T) { + if test64err != nil { + t.Skipf("Skipping 64-bit tests: %v", test64err) + } + var x struct { + before int64 + i int64 + after int64 + } + x.before = magic64 + x.after = magic64 + for val := int64(1); val+val > val; val += val { + x.i = val + if !CompareAndSwapInt64(&x.i, val, val+1) { + t.Fatalf("should have swapped %#x %#x", val, val+1) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + x.i = val + 1 + if CompareAndSwapInt64(&x.i, val, val+2) { + t.Fatalf("should not have swapped %#x %#x", val, val+2) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64)) + } +} + +func testCompareAndSwapUint64(t *testing.T, cas func(*uint64, uint64, uint64) bool) { + if test64err != nil { + t.Skipf("Skipping 64-bit tests: %v", test64err) + } + var x struct { + before uint64 + i uint64 + after uint64 + } + x.before = magic64 + x.after = magic64 + for val := uint64(1); val+val > val; val += val { + x.i = val + if !cas(&x.i, val, val+1) { + t.Fatalf("should have swapped %#x %#x", val, val+1) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + x.i = val + 1 + if cas(&x.i, val, val+2) { + t.Fatalf("should not have swapped %#x %#x", val, val+2) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64)) + } +} + +func TestCompareAndSwapUint64(t *testing.T) { + testCompareAndSwapUint64(t, CompareAndSwapUint64) +} + +func TestCompareAndSwapUintptr(t *testing.T) { + var x struct { + before uintptr + i uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + for val := uintptr(1); val+val > val; val += val { + x.i = val + if !CompareAndSwapUintptr(&x.i, val, val+1) { + t.Fatalf("should have swapped %#x %#x", val, val+1) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + x.i = val + 1 + if CompareAndSwapUintptr(&x.i, val, val+2) { + t.Fatalf("should not have swapped %#x %#x", val, val+2) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestCompareAndSwapPointer(t *testing.T) { + var x struct { + before uintptr + i unsafe.Pointer + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + for val := uintptr(1); val+val > val; val += val { + x.i = unsafe.Pointer(val) + if !CompareAndSwapPointer(&x.i, unsafe.Pointer(val), unsafe.Pointer(val+1)) { + t.Fatalf("should have swapped %#x %#x", val, val+1) + } + if x.i != unsafe.Pointer(val+1) { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + x.i = unsafe.Pointer(val + 1) + if CompareAndSwapPointer(&x.i, unsafe.Pointer(val), unsafe.Pointer(val+2)) { + t.Fatalf("should not have swapped %#x %#x", val, val+2) + } + if x.i != unsafe.Pointer(val+1) { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestLoadInt32(t *testing.T) { + var x struct { + before int32 + i int32 + after int32 + } + x.before = magic32 + x.after = magic32 + for delta := int32(1); delta+delta > delta; delta += delta { + k := LoadInt32(&x.i) + if k != x.i { + t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k) + } + x.i += delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestLoadUint32(t *testing.T) { + var x struct { + before uint32 + i uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + for delta := uint32(1); delta+delta > delta; delta += delta { + k := LoadUint32(&x.i) + if k != x.i { + t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k) + } + x.i += delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestLoadInt64(t *testing.T) { + if test64err != nil { + t.Skipf("Skipping 64-bit tests: %v", test64err) + } + var x struct { + before int64 + i int64 + after int64 + } + x.before = magic64 + x.after = magic64 + for delta := int64(1); delta+delta > delta; delta += delta { + k := LoadInt64(&x.i) + if k != x.i { + t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k) + } + x.i += delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64)) + } +} + +func TestLoadUint64(t *testing.T) { + if test64err != nil { + t.Skipf("Skipping 64-bit tests: %v", test64err) + } + var x struct { + before uint64 + i uint64 + after uint64 + } + x.before = magic64 + x.after = magic64 + for delta := uint64(1); delta+delta > delta; delta += delta { + k := LoadUint64(&x.i) + if k != x.i { + t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k) + } + x.i += delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64)) + } +} + +func TestLoadUintptr(t *testing.T) { + var x struct { + before uintptr + i uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + for delta := uintptr(1); delta+delta > delta; delta += delta { + k := LoadUintptr(&x.i) + if k != x.i { + t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k) + } + x.i += delta + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestLoadPointer(t *testing.T) { + var x struct { + before uintptr + i unsafe.Pointer + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + for delta := uintptr(1); delta+delta > delta; delta += delta { + k := LoadPointer(&x.i) + if k != x.i { + t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k) + } + x.i = unsafe.Pointer(uintptr(x.i) + delta) + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestStoreInt32(t *testing.T) { + var x struct { + before int32 + i int32 + after int32 + } + x.before = magic32 + x.after = magic32 + v := int32(0) + for delta := int32(1); delta+delta > delta; delta += delta { + StoreInt32(&x.i, v) + if x.i != v { + t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v) + } + v += delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestStoreUint32(t *testing.T) { + var x struct { + before uint32 + i uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + v := uint32(0) + for delta := uint32(1); delta+delta > delta; delta += delta { + StoreUint32(&x.i, v) + if x.i != v { + t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v) + } + v += delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestStoreInt64(t *testing.T) { + if test64err != nil { + t.Skipf("Skipping 64-bit tests: %v", test64err) + } + var x struct { + before int64 + i int64 + after int64 + } + x.before = magic64 + x.after = magic64 + v := int64(0) + for delta := int64(1); delta+delta > delta; delta += delta { + StoreInt64(&x.i, v) + if x.i != v { + t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v) + } + v += delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64)) + } +} + +func TestStoreUint64(t *testing.T) { + if test64err != nil { + t.Skipf("Skipping 64-bit tests: %v", test64err) + } + var x struct { + before uint64 + i uint64 + after uint64 + } + x.before = magic64 + x.after = magic64 + v := uint64(0) + for delta := uint64(1); delta+delta > delta; delta += delta { + StoreUint64(&x.i, v) + if x.i != v { + t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v) + } + v += delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64)) + } +} + +func TestStoreUintptr(t *testing.T) { + var x struct { + before uintptr + i uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + v := uintptr(0) + for delta := uintptr(1); delta+delta > delta; delta += delta { + StoreUintptr(&x.i, v) + if x.i != v { + t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v) + } + v += delta + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestStorePointer(t *testing.T) { + var x struct { + before uintptr + i unsafe.Pointer + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + v := unsafe.Pointer(uintptr(0)) + for delta := uintptr(1); delta+delta > delta; delta += delta { + StorePointer(&x.i, unsafe.Pointer(v)) + if x.i != v { + t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v) + } + v = unsafe.Pointer(uintptr(v) + delta) + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +// Tests of correct behavior, with contention. +// (Is the function atomic?) +// +// For each function, we write a "hammer" function that repeatedly +// uses the atomic operation to add 1 to a value. After running +// multiple hammers in parallel, check that we end with the correct +// total. +// Swap can't add 1, so it uses a different scheme. +// The functions repeatedly generate a pseudo-random number such that +// low bits are equal to high bits, swap, check that the old value +// has low and high bits equal. + +var hammer32 = map[string]func(*uint32, int){ + "SwapInt32": hammerSwapInt32, + "SwapUint32": hammerSwapUint32, + "SwapUintptr": hammerSwapUintptr32, + "SwapPointer": hammerSwapPointer32, + "AddInt32": hammerAddInt32, + "AddUint32": hammerAddUint32, + "AddUintptr": hammerAddUintptr32, + "CompareAndSwapInt32": hammerCompareAndSwapInt32, + "CompareAndSwapUint32": hammerCompareAndSwapUint32, + "CompareAndSwapUintptr": hammerCompareAndSwapUintptr32, + "CompareAndSwapPointer": hammerCompareAndSwapPointer32, +} + +func init() { + var v uint64 = 1 << 50 + if uintptr(v) != 0 { + // 64-bit system; clear uintptr tests + delete(hammer32, "SwapUintptr") + delete(hammer32, "SwapPointer") + delete(hammer32, "AddUintptr") + delete(hammer32, "CompareAndSwapUintptr") + delete(hammer32, "CompareAndSwapPointer") + } +} + +func hammerSwapInt32(uaddr *uint32, count int) { + addr := (*int32)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uint32(seed+i)<<16 | uint32(seed+i)<<16>>16 + old := uint32(SwapInt32(addr, int32(new))) + if old>>16 != old<<16>>16 { + panic(fmt.Sprintf("SwapInt32 is not atomic: %v", old)) + } + } +} + +func hammerSwapUint32(addr *uint32, count int) { + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uint32(seed+i)<<16 | uint32(seed+i)<<16>>16 + old := SwapUint32(addr, new) + if old>>16 != old<<16>>16 { + panic(fmt.Sprintf("SwapUint32 is not atomic: %v", old)) + } + } +} + +func hammerSwapUintptr32(uaddr *uint32, count int) { + // only safe when uintptr is 32-bit. + // not called on 64-bit systems. + addr := (*uintptr)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uintptr(seed+i)<<16 | uintptr(seed+i)<<16>>16 + old := SwapUintptr(addr, new) + if old>>16 != old<<16>>16 { + panic(fmt.Sprintf("SwapUintptr is not atomic: %#08x", old)) + } + } +} + +func hammerSwapPointer32(uaddr *uint32, count int) { + // only safe when uintptr is 32-bit. + // not called on 64-bit systems. + addr := (*unsafe.Pointer)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uintptr(seed+i)<<16 | uintptr(seed+i)<<16>>16 + old := uintptr(SwapPointer(addr, unsafe.Pointer(new))) + if old>>16 != old<<16>>16 { + panic(fmt.Sprintf("SwapPointer is not atomic: %#08x", old)) + } + } +} + +func hammerAddInt32(uaddr *uint32, count int) { + addr := (*int32)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + AddInt32(addr, 1) + } +} + +func hammerAddUint32(addr *uint32, count int) { + for i := 0; i < count; i++ { + AddUint32(addr, 1) + } +} + +func hammerAddUintptr32(uaddr *uint32, count int) { + // only safe when uintptr is 32-bit. + // not called on 64-bit systems. + addr := (*uintptr)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + AddUintptr(addr, 1) + } +} + +func hammerCompareAndSwapInt32(uaddr *uint32, count int) { + addr := (*int32)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + for { + v := LoadInt32(addr) + if CompareAndSwapInt32(addr, v, v+1) { + break + } + } + } +} + +func hammerCompareAndSwapUint32(addr *uint32, count int) { + for i := 0; i < count; i++ { + for { + v := LoadUint32(addr) + if CompareAndSwapUint32(addr, v, v+1) { + break + } + } + } +} + +func hammerCompareAndSwapUintptr32(uaddr *uint32, count int) { + // only safe when uintptr is 32-bit. + // not called on 64-bit systems. + addr := (*uintptr)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + for { + v := LoadUintptr(addr) + if CompareAndSwapUintptr(addr, v, v+1) { + break + } + } + } +} + +func hammerCompareAndSwapPointer32(uaddr *uint32, count int) { + // only safe when uintptr is 32-bit. + // not called on 64-bit systems. + addr := (*unsafe.Pointer)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + for { + v := LoadPointer(addr) + if CompareAndSwapPointer(addr, v, unsafe.Pointer(uintptr(v)+1)) { + break + } + } + } +} + +func TestHammer32(t *testing.T) { + const p = 4 + n := 100000 + if testing.Short() { + n = 1000 + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(p)) + + for name, testf := range hammer32 { + c := make(chan int) + var val uint32 + for i := 0; i < p; i++ { + go func() { + defer func() { + if err := recover(); err != nil { + t.Error(err.(string)) + } + c <- 1 + }() + testf(&val, n) + }() + } + for i := 0; i < p; i++ { + <-c + } + if !strings.HasPrefix(name, "Swap") && val != uint32(n)*p { + t.Fatalf("%s: val=%d want %d", name, val, n*p) + } + } +} + +var hammer64 = map[string]func(*uint64, int){ + "SwapInt64": hammerSwapInt64, + "SwapUint64": hammerSwapUint64, + "SwapUintptr": hammerSwapUintptr64, + "SwapPointer": hammerSwapPointer64, + "AddInt64": hammerAddInt64, + "AddUint64": hammerAddUint64, + "AddUintptr": hammerAddUintptr64, + "CompareAndSwapInt64": hammerCompareAndSwapInt64, + "CompareAndSwapUint64": hammerCompareAndSwapUint64, + "CompareAndSwapUintptr": hammerCompareAndSwapUintptr64, + "CompareAndSwapPointer": hammerCompareAndSwapPointer64, +} + +func init() { + var v uint64 = 1 << 50 + if uintptr(v) == 0 { + // 32-bit system; clear uintptr tests + delete(hammer64, "SwapUintptr") + delete(hammer64, "SwapPointer") + delete(hammer64, "AddUintptr") + delete(hammer64, "CompareAndSwapUintptr") + delete(hammer64, "CompareAndSwapPointer") + } +} + +func hammerSwapInt64(uaddr *uint64, count int) { + addr := (*int64)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uint64(seed+i)<<32 | uint64(seed+i)<<32>>32 + old := uint64(SwapInt64(addr, int64(new))) + if old>>32 != old<<32>>32 { + panic(fmt.Sprintf("SwapInt64 is not atomic: %v", old)) + } + } +} + +func hammerSwapUint64(addr *uint64, count int) { + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uint64(seed+i)<<32 | uint64(seed+i)<<32>>32 + old := SwapUint64(addr, new) + if old>>32 != old<<32>>32 { + panic(fmt.Sprintf("SwapUint64 is not atomic: %v", old)) + } + } +} + +func hammerSwapUintptr64(uaddr *uint64, count int) { + // only safe when uintptr is 64-bit. + // not called on 32-bit systems. + addr := (*uintptr)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uintptr(seed+i)<<32 | uintptr(seed+i)<<32>>32 + old := SwapUintptr(addr, new) + if old>>32 != old<<32>>32 { + panic(fmt.Sprintf("SwapUintptr is not atomic: %v", old)) + } + } +} + +func hammerSwapPointer64(uaddr *uint64, count int) { + // only safe when uintptr is 64-bit. + // not called on 32-bit systems. + addr := (*unsafe.Pointer)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uintptr(seed+i)<<32 | uintptr(seed+i)<<32>>32 + old := uintptr(SwapPointer(addr, unsafe.Pointer(new))) + if old>>32 != old<<32>>32 { + panic(fmt.Sprintf("SwapPointer is not atomic: %v", old)) + } + } +} + +func hammerAddInt64(uaddr *uint64, count int) { + addr := (*int64)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + AddInt64(addr, 1) + } +} + +func hammerAddUint64(addr *uint64, count int) { + for i := 0; i < count; i++ { + AddUint64(addr, 1) + } +} + +func hammerAddUintptr64(uaddr *uint64, count int) { + // only safe when uintptr is 64-bit. + // not called on 32-bit systems. + addr := (*uintptr)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + AddUintptr(addr, 1) + } +} + +func hammerCompareAndSwapInt64(uaddr *uint64, count int) { + addr := (*int64)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + for { + v := LoadInt64(addr) + if CompareAndSwapInt64(addr, v, v+1) { + break + } + } + } +} + +func hammerCompareAndSwapUint64(addr *uint64, count int) { + for i := 0; i < count; i++ { + for { + v := LoadUint64(addr) + if CompareAndSwapUint64(addr, v, v+1) { + break + } + } + } +} + +func hammerCompareAndSwapUintptr64(uaddr *uint64, count int) { + // only safe when uintptr is 64-bit. + // not called on 32-bit systems. + addr := (*uintptr)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + for { + v := LoadUintptr(addr) + if CompareAndSwapUintptr(addr, v, v+1) { + break + } + } + } +} + +func hammerCompareAndSwapPointer64(uaddr *uint64, count int) { + // only safe when uintptr is 64-bit. + // not called on 32-bit systems. + addr := (*unsafe.Pointer)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + for { + v := LoadPointer(addr) + if CompareAndSwapPointer(addr, v, unsafe.Pointer(uintptr(v)+1)) { + break + } + } + } +} + +func TestHammer64(t *testing.T) { + if test64err != nil { + t.Skipf("Skipping 64-bit tests: %v", test64err) + } + const p = 4 + n := 100000 + if testing.Short() { + n = 1000 + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(p)) + + for name, testf := range hammer64 { + c := make(chan int) + var val uint64 + for i := 0; i < p; i++ { + go func() { + defer func() { + if err := recover(); err != nil { + t.Error(err.(string)) + } + c <- 1 + }() + testf(&val, n) + }() + } + for i := 0; i < p; i++ { + <-c + } + if !strings.HasPrefix(name, "Swap") && val != uint64(n)*p { + t.Fatalf("%s: val=%d want %d", name, val, n*p) + } + } +} + +func hammerStoreLoadInt32(t *testing.T, paddr unsafe.Pointer) { + addr := (*int32)(paddr) + v := LoadInt32(addr) + vlo := v & ((1 << 16) - 1) + vhi := v >> 16 + if vlo != vhi { + t.Fatalf("Int32: %#x != %#x", vlo, vhi) + } + new := v + 1 + 1<<16 + if vlo == 1e4 { + new = 0 + } + StoreInt32(addr, new) +} + +func hammerStoreLoadUint32(t *testing.T, paddr unsafe.Pointer) { + addr := (*uint32)(paddr) + v := LoadUint32(addr) + vlo := v & ((1 << 16) - 1) + vhi := v >> 16 + if vlo != vhi { + t.Fatalf("Uint32: %#x != %#x", vlo, vhi) + } + new := v + 1 + 1<<16 + if vlo == 1e4 { + new = 0 + } + StoreUint32(addr, new) +} + +func hammerStoreLoadInt64(t *testing.T, paddr unsafe.Pointer) { + addr := (*int64)(paddr) + v := LoadInt64(addr) + vlo := v & ((1 << 32) - 1) + vhi := v >> 32 + if vlo != vhi { + t.Fatalf("Int64: %#x != %#x", vlo, vhi) + } + new := v + 1 + 1<<32 + StoreInt64(addr, new) +} + +func hammerStoreLoadUint64(t *testing.T, paddr unsafe.Pointer) { + addr := (*uint64)(paddr) + v := LoadUint64(addr) + vlo := v & ((1 << 32) - 1) + vhi := v >> 32 + if vlo != vhi { + t.Fatalf("Uint64: %#x != %#x", vlo, vhi) + } + new := v + 1 + 1<<32 + StoreUint64(addr, new) +} + +func hammerStoreLoadUintptr(t *testing.T, paddr unsafe.Pointer) { + addr := (*uintptr)(paddr) + var test64 uint64 = 1 << 50 + arch32 := uintptr(test64) == 0 + v := LoadUintptr(addr) + new := v + if arch32 { + vlo := v & ((1 << 16) - 1) + vhi := v >> 16 + if vlo != vhi { + t.Fatalf("Uintptr: %#x != %#x", vlo, vhi) + } + new = v + 1 + 1<<16 + if vlo == 1e4 { + new = 0 + } + } else { + vlo := v & ((1 << 32) - 1) + vhi := v >> 32 + if vlo != vhi { + t.Fatalf("Uintptr: %#x != %#x", vlo, vhi) + } + inc := uint64(1 + 1<<32) + new = v + uintptr(inc) + } + StoreUintptr(addr, new) +} + +func hammerStoreLoadPointer(t *testing.T, paddr unsafe.Pointer) { + addr := (*unsafe.Pointer)(paddr) + var test64 uint64 = 1 << 50 + arch32 := uintptr(test64) == 0 + v := uintptr(LoadPointer(addr)) + new := v + if arch32 { + vlo := v & ((1 << 16) - 1) + vhi := v >> 16 + if vlo != vhi { + t.Fatalf("Pointer: %#x != %#x", vlo, vhi) + } + new = v + 1 + 1<<16 + if vlo == 1e4 { + new = 0 + } + } else { + vlo := v & ((1 << 32) - 1) + vhi := v >> 32 + if vlo != vhi { + t.Fatalf("Pointer: %#x != %#x", vlo, vhi) + } + inc := uint64(1 + 1<<32) + new = v + uintptr(inc) + } + StorePointer(addr, unsafe.Pointer(new)) +} + +func TestHammerStoreLoad(t *testing.T) { + var tests []func(*testing.T, unsafe.Pointer) + tests = append(tests, hammerStoreLoadInt32, hammerStoreLoadUint32, + hammerStoreLoadUintptr, hammerStoreLoadPointer) + if test64err == nil { + tests = append(tests, hammerStoreLoadInt64, hammerStoreLoadUint64) + } + n := int(1e6) + if testing.Short() { + n = int(1e4) + } + const procs = 8 + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(procs)) + for _, tt := range tests { + c := make(chan int) + var val uint64 + for p := 0; p < procs; p++ { + go func() { + for i := 0; i < n; i++ { + tt(t, unsafe.Pointer(&val)) + } + c <- 1 + }() + } + for p := 0; p < procs; p++ { + <-c + } + } +} + +func TestStoreLoadSeqCst32(t *testing.T) { + if runtime.NumCPU() == 1 { + t.Skipf("Skipping test on %v processor machine", runtime.NumCPU()) + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4)) + N := int32(1e3) + if testing.Short() { + N = int32(1e2) + } + c := make(chan bool, 2) + X := [2]int32{} + ack := [2][3]int32{{-1, -1, -1}, {-1, -1, -1}} + for p := 0; p < 2; p++ { + go func(me int) { + he := 1 - me + for i := int32(1); i < N; i++ { + StoreInt32(&X[me], i) + my := LoadInt32(&X[he]) + StoreInt32(&ack[me][i%3], my) + for w := 1; LoadInt32(&ack[he][i%3]) == -1; w++ { + if w%1000 == 0 { + runtime.Gosched() + } + } + his := LoadInt32(&ack[he][i%3]) + if (my != i && my != i-1) || (his != i && his != i-1) { + t.Fatalf("invalid values: %d/%d (%d)", my, his, i) + } + if my != i && his != i { + t.Fatalf("store/load are not sequentially consistent: %d/%d (%d)", my, his, i) + } + StoreInt32(&ack[me][(i-1)%3], -1) + } + c <- true + }(p) + } + <-c + <-c +} + +func TestStoreLoadSeqCst64(t *testing.T) { + if runtime.NumCPU() == 1 { + t.Skipf("Skipping test on %v processor machine", runtime.NumCPU()) + } + if test64err != nil { + t.Skipf("Skipping 64-bit tests: %v", test64err) + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4)) + N := int64(1e3) + if testing.Short() { + N = int64(1e2) + } + c := make(chan bool, 2) + X := [2]int64{} + ack := [2][3]int64{{-1, -1, -1}, {-1, -1, -1}} + for p := 0; p < 2; p++ { + go func(me int) { + he := 1 - me + for i := int64(1); i < N; i++ { + StoreInt64(&X[me], i) + my := LoadInt64(&X[he]) + StoreInt64(&ack[me][i%3], my) + for w := 1; LoadInt64(&ack[he][i%3]) == -1; w++ { + if w%1000 == 0 { + runtime.Gosched() + } + } + his := LoadInt64(&ack[he][i%3]) + if (my != i && my != i-1) || (his != i && his != i-1) { + t.Fatalf("invalid values: %d/%d (%d)", my, his, i) + } + if my != i && his != i { + t.Fatalf("store/load are not sequentially consistent: %d/%d (%d)", my, his, i) + } + StoreInt64(&ack[me][(i-1)%3], -1) + } + c <- true + }(p) + } + <-c + <-c +} + +func TestStoreLoadRelAcq32(t *testing.T) { + if runtime.NumCPU() == 1 { + t.Skipf("Skipping test on %v processor machine", runtime.NumCPU()) + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4)) + N := int32(1e3) + if testing.Short() { + N = int32(1e2) + } + c := make(chan bool, 2) + type Data struct { + signal int32 + pad1 [128]int8 + data1 int32 + pad2 [128]int8 + data2 float32 + } + var X Data + for p := int32(0); p < 2; p++ { + go func(p int32) { + for i := int32(1); i < N; i++ { + if (i+p)%2 == 0 { + X.data1 = i + X.data2 = float32(i) + StoreInt32(&X.signal, i) + } else { + for w := 1; LoadInt32(&X.signal) != i; w++ { + if w%1000 == 0 { + runtime.Gosched() + } + } + d1 := X.data1 + d2 := X.data2 + if d1 != i || d2 != float32(i) { + t.Fatalf("incorrect data: %d/%g (%d)", d1, d2, i) + } + } + } + c <- true + }(p) + } + <-c + <-c +} + +func TestStoreLoadRelAcq64(t *testing.T) { + if runtime.NumCPU() == 1 { + t.Skipf("Skipping test on %v processor machine", runtime.NumCPU()) + } + if test64err != nil { + t.Skipf("Skipping 64-bit tests: %v", test64err) + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4)) + N := int64(1e3) + if testing.Short() { + N = int64(1e2) + } + c := make(chan bool, 2) + type Data struct { + signal int64 + pad1 [128]int8 + data1 int64 + pad2 [128]int8 + data2 float64 + } + var X Data + for p := int64(0); p < 2; p++ { + go func(p int64) { + for i := int64(1); i < N; i++ { + if (i+p)%2 == 0 { + X.data1 = i + X.data2 = float64(i) + StoreInt64(&X.signal, i) + } else { + for w := 1; LoadInt64(&X.signal) != i; w++ { + if w%1000 == 0 { + runtime.Gosched() + } + } + d1 := X.data1 + d2 := X.data2 + if d1 != i || d2 != float64(i) { + t.Fatalf("incorrect data: %d/%g (%d)", d1, d2, i) + } + } + } + c <- true + }(p) + } + <-c + <-c +} + +func shouldPanic(t *testing.T, name string, f func()) { + defer func() { + if recover() == nil { + t.Errorf("%s did not panic", name) + } + }() + f() +} + +func TestUnaligned64(t *testing.T) { + // Unaligned 64-bit atomics on 32-bit systems are + // a continual source of pain. Test that on 32-bit systems they crash + // instead of failing silently. + if unsafe.Sizeof(int(0)) != 4 { + t.Skip("test only runs on 32-bit systems") + } + + x := make([]uint32, 4) + p := (*uint64)(unsafe.Pointer(&x[1])) // misaligned + + shouldPanic(t, "LoadUint64", func() { LoadUint64(p) }) + shouldPanic(t, "StoreUint64", func() { StoreUint64(p, 1) }) + shouldPanic(t, "CompareAndSwapUint64", func() { CompareAndSwapUint64(p, 1, 2) }) + shouldPanic(t, "AddUint64", func() { AddUint64(p, 3) }) +} + +func TestNilDeref(t *testing.T) { + if p := runtime.GOOS + "/" + runtime.GOARCH; p == "freebsd/arm" || p == "netbsd/arm" { + t.Skipf("issue 7338: skipping test on %q", p) + } + funcs := [...]func(){ + func() { CompareAndSwapInt32(nil, 0, 0) }, + func() { CompareAndSwapInt64(nil, 0, 0) }, + func() { CompareAndSwapUint32(nil, 0, 0) }, + func() { CompareAndSwapUint64(nil, 0, 0) }, + func() { CompareAndSwapUintptr(nil, 0, 0) }, + func() { CompareAndSwapPointer(nil, nil, nil) }, + func() { SwapInt32(nil, 0) }, + func() { SwapUint32(nil, 0) }, + func() { SwapInt64(nil, 0) }, + func() { SwapUint64(nil, 0) }, + func() { SwapUintptr(nil, 0) }, + func() { SwapPointer(nil, nil) }, + func() { AddInt32(nil, 0) }, + func() { AddUint32(nil, 0) }, + func() { AddInt64(nil, 0) }, + func() { AddUint64(nil, 0) }, + func() { AddUintptr(nil, 0) }, + func() { LoadInt32(nil) }, + func() { LoadInt64(nil) }, + func() { LoadUint32(nil) }, + func() { LoadUint64(nil) }, + func() { LoadUintptr(nil) }, + func() { LoadPointer(nil) }, + func() { StoreInt32(nil, 0) }, + func() { StoreInt64(nil, 0) }, + func() { StoreUint32(nil, 0) }, + func() { StoreUint64(nil, 0) }, + func() { StoreUintptr(nil, 0) }, + func() { StorePointer(nil, nil) }, + } + for _, f := range funcs { + func() { + defer func() { + runtime.GC() + recover() + }() + f() + }() + } +} diff --git a/src/sync/atomic/doc.go b/src/sync/atomic/doc.go new file mode 100644 index 000000000..10fb8c917 --- /dev/null +++ b/src/sync/atomic/doc.go @@ -0,0 +1,149 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package atomic provides low-level atomic memory primitives +// useful for implementing synchronization algorithms. +// +// These functions require great care to be used correctly. +// Except for special, low-level applications, synchronization is better +// done with channels or the facilities of the sync package. +// Share memory by communicating; +// don't communicate by sharing memory. +// +// The swap operation, implemented by the SwapT functions, is the atomic +// equivalent of: +// +// old = *addr +// *addr = new +// return old +// +// The compare-and-swap operation, implemented by the CompareAndSwapT +// functions, is the atomic equivalent of: +// +// if *addr == old { +// *addr = new +// return true +// } +// return false +// +// The add operation, implemented by the AddT functions, is the atomic +// equivalent of: +// +// *addr += delta +// return *addr +// +// The load and store operations, implemented by the LoadT and StoreT +// functions, are the atomic equivalents of "return *addr" and +// "*addr = val". +// +package atomic + +import ( + "unsafe" +) + +// BUG(rsc): On x86-32, the 64-bit functions use instructions unavailable before the Pentium MMX. +// +// On non-Linux ARM, the 64-bit functions use instructions unavailable before the ARMv6k core. +// +// On both ARM and x86-32, it is the caller's responsibility to arrange for 64-bit +// alignment of 64-bit words accessed atomically. The first word in a global +// variable or in an allocated struct or slice can be relied upon to be +// 64-bit aligned. + +// SwapInt32 atomically stores new into *addr and returns the previous *addr value. +func SwapInt32(addr *int32, new int32) (old int32) + +// SwapInt64 atomically stores new into *addr and returns the previous *addr value. +func SwapInt64(addr *int64, new int64) (old int64) + +// SwapUint32 atomically stores new into *addr and returns the previous *addr value. +func SwapUint32(addr *uint32, new uint32) (old uint32) + +// SwapUint64 atomically stores new into *addr and returns the previous *addr value. +func SwapUint64(addr *uint64, new uint64) (old uint64) + +// SwapUintptr atomically stores new into *addr and returns the previous *addr value. +func SwapUintptr(addr *uintptr, new uintptr) (old uintptr) + +// SwapPointer atomically stores new into *addr and returns the previous *addr value. +func SwapPointer(addr *unsafe.Pointer, new unsafe.Pointer) (old unsafe.Pointer) + +// CompareAndSwapInt32 executes the compare-and-swap operation for an int32 value. +func CompareAndSwapInt32(addr *int32, old, new int32) (swapped bool) + +// CompareAndSwapInt64 executes the compare-and-swap operation for an int64 value. +func CompareAndSwapInt64(addr *int64, old, new int64) (swapped bool) + +// CompareAndSwapUint32 executes the compare-and-swap operation for a uint32 value. +func CompareAndSwapUint32(addr *uint32, old, new uint32) (swapped bool) + +// CompareAndSwapUint64 executes the compare-and-swap operation for a uint64 value. +func CompareAndSwapUint64(addr *uint64, old, new uint64) (swapped bool) + +// CompareAndSwapUintptr executes the compare-and-swap operation for a uintptr value. +func CompareAndSwapUintptr(addr *uintptr, old, new uintptr) (swapped bool) + +// CompareAndSwapPointer executes the compare-and-swap operation for a unsafe.Pointer value. +func CompareAndSwapPointer(addr *unsafe.Pointer, old, new unsafe.Pointer) (swapped bool) + +// AddInt32 atomically adds delta to *addr and returns the new value. +func AddInt32(addr *int32, delta int32) (new int32) + +// AddUint32 atomically adds delta to *addr and returns the new value. +// To subtract a signed positive constant value c from x, do AddUint32(&x, ^uint32(c-1)). +// In particular, to decrement x, do AddUint32(&x, ^uint32(0)). +func AddUint32(addr *uint32, delta uint32) (new uint32) + +// AddInt64 atomically adds delta to *addr and returns the new value. +func AddInt64(addr *int64, delta int64) (new int64) + +// AddUint64 atomically adds delta to *addr and returns the new value. +// To subtract a signed positive constant value c from x, do AddUint64(&x, ^uint64(c-1)). +// In particular, to decrement x, do AddUint64(&x, ^uint64(0)). +func AddUint64(addr *uint64, delta uint64) (new uint64) + +// AddUintptr atomically adds delta to *addr and returns the new value. +func AddUintptr(addr *uintptr, delta uintptr) (new uintptr) + +// LoadInt32 atomically loads *addr. +func LoadInt32(addr *int32) (val int32) + +// LoadInt64 atomically loads *addr. +func LoadInt64(addr *int64) (val int64) + +// LoadUint32 atomically loads *addr. +func LoadUint32(addr *uint32) (val uint32) + +// LoadUint64 atomically loads *addr. +func LoadUint64(addr *uint64) (val uint64) + +// LoadUintptr atomically loads *addr. +func LoadUintptr(addr *uintptr) (val uintptr) + +// LoadPointer atomically loads *addr. +func LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer) + +// StoreInt32 atomically stores val into *addr. +func StoreInt32(addr *int32, val int32) + +// StoreInt64 atomically stores val into *addr. +func StoreInt64(addr *int64, val int64) + +// StoreUint32 atomically stores val into *addr. +func StoreUint32(addr *uint32, val uint32) + +// StoreUint64 atomically stores val into *addr. +func StoreUint64(addr *uint64, val uint64) + +// StoreUintptr atomically stores val into *addr. +func StoreUintptr(addr *uintptr, val uintptr) + +// StorePointer atomically stores val into *addr. +func StorePointer(addr *unsafe.Pointer, val unsafe.Pointer) + +// Helper for ARM. Linker will discard on other systems +func panic64() { + panic("sync/atomic: broken 64-bit atomic operations (buggy QEMU)") +} diff --git a/src/sync/atomic/export_linux_arm_test.go b/src/sync/atomic/export_linux_arm_test.go new file mode 100644 index 000000000..9f0c856a7 --- /dev/null +++ b/src/sync/atomic/export_linux_arm_test.go @@ -0,0 +1,7 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package atomic + +var GeneralCAS64 = generalCAS64 diff --git a/src/sync/atomic/race.s b/src/sync/atomic/race.s new file mode 100644 index 000000000..bdce7668b --- /dev/null +++ b/src/sync/atomic/race.s @@ -0,0 +1,8 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build race + +// This file is here only to allow external functions. +// The operations are implemented in src/runtime/race_amd64.s diff --git a/src/sync/atomic/value.go b/src/sync/atomic/value.go new file mode 100644 index 000000000..ab3aa1128 --- /dev/null +++ b/src/sync/atomic/value.go @@ -0,0 +1,85 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package atomic + +import ( + "unsafe" +) + +// A Value provides an atomic load and store of a consistently typed value. +// Values can be created as part of other data structures. +// The zero value for a Value returns nil from Load. +// Once Store has been called, a Value must not be copied. +type Value struct { + v interface{} +} + +// ifaceWords is interface{} internal representation. +type ifaceWords struct { + typ unsafe.Pointer + data unsafe.Pointer +} + +// Load returns the value set by the most recent Store. +// It returns nil if there has been no call to Store for this Value. +func (v *Value) Load() (x interface{}) { + vp := (*ifaceWords)(unsafe.Pointer(v)) + typ := LoadPointer(&vp.typ) + if typ == nil || uintptr(typ) == ^uintptr(0) { + // First store not yet completed. + return nil + } + data := LoadPointer(&vp.data) + xp := (*ifaceWords)(unsafe.Pointer(&x)) + xp.typ = typ + xp.data = data + return +} + +// Store sets the value of the Value to x. +// All calls to Store for a given Value must use values of the same concrete type. +// Store of an inconsistent type panics, as does Store(nil). +func (v *Value) Store(x interface{}) { + if x == nil { + panic("sync/atomic: store of nil value into Value") + } + vp := (*ifaceWords)(unsafe.Pointer(v)) + xp := (*ifaceWords)(unsafe.Pointer(&x)) + for { + typ := LoadPointer(&vp.typ) + if typ == nil { + // Attempt to start first store. + // Disable preemption so that other goroutines can use + // active spin wait to wait for completion; and so that + // GC does not see the fake type accidentally. + runtime_procPin() + if !CompareAndSwapPointer(&vp.typ, nil, unsafe.Pointer(^uintptr(0))) { + runtime_procUnpin() + continue + } + // Complete first store. + StorePointer(&vp.data, xp.data) + StorePointer(&vp.typ, xp.typ) + runtime_procUnpin() + return + } + if uintptr(typ) == ^uintptr(0) { + // First store in progress. Wait. + // Since we disable preemption around the first store, + // we can wait with active spinning. + continue + } + // First store completed. Check type and overwrite data. + if typ != xp.typ { + panic("sync/atomic: store of inconsistently typed value into Value") + } + StorePointer(&vp.data, xp.data) + return + } +} + +// Disable/enable preemption, implemented in runtime. +func runtime_procPin() +func runtime_procUnpin() diff --git a/src/sync/atomic/value_test.go b/src/sync/atomic/value_test.go new file mode 100644 index 000000000..382dc6854 --- /dev/null +++ b/src/sync/atomic/value_test.go @@ -0,0 +1,195 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package atomic_test + +import ( + "math/rand" + "runtime" + "sync" + . "sync/atomic" + "testing" + "time" +) + +func TestValue(t *testing.T) { + var v Value + if v.Load() != nil { + t.Fatal("initial Value is not nil") + } + v.Store(42) + x := v.Load() + if xx, ok := x.(int); !ok || xx != 42 { + t.Fatalf("wrong value: got %+v, want 42", x) + } + v.Store(84) + x = v.Load() + if xx, ok := x.(int); !ok || xx != 84 { + t.Fatalf("wrong value: got %+v, want 84", x) + } +} + +func TestValueLarge(t *testing.T) { + var v Value + v.Store("foo") + x := v.Load() + if xx, ok := x.(string); !ok || xx != "foo" { + t.Fatalf("wrong value: got %+v, want foo", x) + } + v.Store("barbaz") + x = v.Load() + if xx, ok := x.(string); !ok || xx != "barbaz" { + t.Fatalf("wrong value: got %+v, want barbaz", x) + } +} + +func TestValuePanic(t *testing.T) { + const nilErr = "sync/atomic: store of nil value into Value" + const badErr = "sync/atomic: store of inconsistently typed value into Value" + var v Value + func() { + defer func() { + err := recover() + if err != nilErr { + t.Fatalf("inconsistent store panic: got '%v', want '%v'", err, nilErr) + } + }() + v.Store(nil) + }() + v.Store(42) + func() { + defer func() { + err := recover() + if err != badErr { + t.Fatalf("inconsistent store panic: got '%v', want '%v'", err, badErr) + } + }() + v.Store("foo") + }() + func() { + defer func() { + err := recover() + if err != nilErr { + t.Fatalf("inconsistent store panic: got '%v', want '%v'", err, nilErr) + } + }() + v.Store(nil) + }() +} + +func TestValueConcurrent(t *testing.T) { + tests := [][]interface{}{ + {uint16(0), ^uint16(0), uint16(1 + 2<<8), uint16(3 + 4<<8)}, + {uint32(0), ^uint32(0), uint32(1 + 2<<16), uint32(3 + 4<<16)}, + {uint64(0), ^uint64(0), uint64(1 + 2<<32), uint64(3 + 4<<32)}, + {complex(0, 0), complex(1, 2), complex(3, 4), complex(5, 6)}, + } + p := 4 * runtime.GOMAXPROCS(0) + for _, test := range tests { + var v Value + done := make(chan bool) + for i := 0; i < p; i++ { + go func() { + r := rand.New(rand.NewSource(rand.Int63())) + loop: + for j := 0; j < 1e5; j++ { + x := test[r.Intn(len(test))] + v.Store(x) + x = v.Load() + for _, x1 := range test { + if x == x1 { + continue loop + } + } + t.Logf("loaded unexpected value %+v, want %+v", x, test) + done <- false + } + done <- true + }() + } + for i := 0; i < p; i++ { + if !<-done { + t.FailNow() + } + } + } +} + +func BenchmarkValueRead(b *testing.B) { + var v Value + v.Store(new(int)) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + x := v.Load().(*int) + if *x != 0 { + b.Fatalf("wrong value: got %v, want 0", *x) + } + } + }) +} + +// The following example shows how to use Value for periodic program config updates +// and propagation of the changes to worker goroutines. +func ExampleValue_config() { + var config Value // holds current server configuration + // Create initial config value and store into config. + config.Store(loadConfig()) + go func() { + // Reload config every 10 seconds + // and update config value with the new version. + for { + time.Sleep(10 * time.Second) + config.Store(loadConfig()) + } + }() + // Create worker goroutines that handle incoming requests + // using the latest config value. + for i := 0; i < 10; i++ { + go func() { + for r := range requests() { + c := config.Load() + // Handle request r using config c. + _, _ = r, c + } + }() + } +} + +func loadConfig() map[string]string { + return make(map[string]string) +} + +func requests() chan int { + return make(chan int) +} + +// The following example shows how to maintain a scalable frequently read, +// but infrequently updated data structure using copy-on-write idiom. +func ExampleValue_readMostly() { + type Map map[string]string + var m Value + m.Store(make(Map)) + var mu sync.Mutex // used only by writers + // read function can be used to read the data without further synchronization + read := func(key string) (val string) { + m1 := m.Load().(Map) + return m1[key] + } + // insert function can be used to update the data without further synchronization + insert := func(key, val string) { + mu.Lock() // synchronize with other potential writers + defer mu.Unlock() + m1 := m.Load().(Map) // load current value of the data structure + m2 := make(Map) // create a new value + for k, v := range m1 { + m2[k] = v // copy all data from the current object to the new one + } + m2[key] = val // do the update that we need + m.Store(m2) // atomically replace the current object with the new one + // At this point all new readers start working with the new version. + // The old version will be garbage collected once the existing readers + // (if any) are done with it. + } + _, _ = read, insert +} |