diff options
author | Ondřej Surý <ondrej@sury.org> | 2011-09-13 13:13:40 +0200 |
---|---|---|
committer | Ondřej Surý <ondrej@sury.org> | 2011-09-13 13:13:40 +0200 |
commit | 5ff4c17907d5b19510a62e08fd8d3b11e62b431d (patch) | |
tree | c0650497e988f47be9c6f2324fa692a52dea82e1 /src/pkg/sync/atomic | |
parent | 80f18fc933cf3f3e829c5455a1023d69f7b86e52 (diff) | |
download | golang-5ff4c17907d5b19510a62e08fd8d3b11e62b431d.tar.gz |
Imported Upstream version 60upstream/60
Diffstat (limited to 'src/pkg/sync/atomic')
-rw-r--r-- | src/pkg/sync/atomic/Makefile | 18 | ||||
-rw-r--r-- | src/pkg/sync/atomic/asm_386.s | 96 | ||||
-rw-r--r-- | src/pkg/sync/atomic/asm_amd64.s | 69 | ||||
-rw-r--r-- | src/pkg/sync/atomic/asm_arm.s | 122 | ||||
-rw-r--r-- | src/pkg/sync/atomic/asm_linux_arm.s | 98 | ||||
-rw-r--r-- | src/pkg/sync/atomic/atomic_test.go | 641 | ||||
-rw-r--r-- | src/pkg/sync/atomic/doc.go | 68 |
7 files changed, 1112 insertions, 0 deletions
diff --git a/src/pkg/sync/atomic/Makefile b/src/pkg/sync/atomic/Makefile new file mode 100644 index 000000000..38d8998c0 --- /dev/null +++ b/src/pkg/sync/atomic/Makefile @@ -0,0 +1,18 @@ +# Copyright 2011 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +include ../../../Make.inc + +TARG=sync/atomic +GOFILES=\ + doc.go\ + +OFILES=\ + asm_$(GOARCH).$O\ + +ifeq ($(GOARCH),arm) +OFILES+=asm_$(GOOS)_$(GOARCH).$O +endif + +include ../../../Make.pkg diff --git a/src/pkg/sync/atomic/asm_386.s b/src/pkg/sync/atomic/asm_386.s new file mode 100644 index 000000000..914d2feeb --- /dev/null +++ b/src/pkg/sync/atomic/asm_386.s @@ -0,0 +1,96 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +TEXT ·CompareAndSwapInt32(SB),7,$0 + JMP ·CompareAndSwapUint32(SB) + +TEXT ·CompareAndSwapUint32(SB),7,$0 + MOVL valptr+0(FP), BP + MOVL old+4(FP), AX + MOVL new+8(FP), CX + // CMPXCHGL was introduced on the 486. + LOCK + CMPXCHGL CX, 0(BP) + SETEQ ret+12(FP) + RET + +TEXT ·CompareAndSwapUintptr(SB),7,$0 + JMP ·CompareAndSwapUint32(SB) + +TEXT ·CompareAndSwapInt64(SB),7,$0 + JMP ·CompareAndSwapUint64(SB) + +TEXT ·CompareAndSwapUint64(SB),7,$0 + MOVL valptr+0(FP), BP + MOVL oldlo+4(FP), AX + MOVL oldhi+8(FP), DX + MOVL newlo+12(FP), BX + MOVL newhi+16(FP), CX + // CMPXCHG8B was introduced on the Pentium. + LOCK + CMPXCHG8B 0(BP) + SETEQ ret+20(FP) + RET + +TEXT ·AddInt32(SB),7,$0 + JMP ·AddUint32(SB) + +TEXT ·AddUint32(SB),7,$0 + MOVL valptr+0(FP), BP + MOVL delta+4(FP), AX + MOVL AX, CX + // XADD was introduced on the 486. + LOCK + XADDL AX, 0(BP) + ADDL AX, CX + MOVL CX, ret+8(FP) + RET + +TEXT ·AddUintptr(SB),7,$0 + JMP ·AddUint32(SB) + +TEXT ·AddInt64(SB),7,$0 + JMP ·AddUint64(SB) + +TEXT ·AddUint64(SB),7,$0 + // no XADDQ so use CMPXCHG8B loop + MOVL valptr+0(FP), BP + // DI:SI = delta + MOVL deltalo+4(FP), SI + MOVL deltahi+8(FP), DI + // DX:AX = *valptr + MOVL 0(BP), AX + MOVL 4(BP), DX +addloop: + // CX:BX = DX:AX (*valptr) + DI:SI (delta) + MOVL AX, BX + MOVL DX, CX + ADDL SI, BX + ADCL DI, CX + + // if *valptr == DX:AX { + // *valptr = CX:BX + // } else { + // DX:AX = *valptr + // } + // all in one instruction + LOCK + CMPXCHG8B 0(BP) + + JNZ addloop + + // success + // return CX:BX + MOVL BX, retlo+12(FP) + MOVL CX, rethi+16(FP) + RET + +TEXT ·LoadInt32(SB),7,$0 + JMP ·LoadUint32(SB) + +TEXT ·LoadUint32(SB),7,$0 + MOVL addrptr+0(FP), AX + MOVL 0(AX), AX + MOVL AX, ret+4(FP) + RET diff --git a/src/pkg/sync/atomic/asm_amd64.s b/src/pkg/sync/atomic/asm_amd64.s new file mode 100644 index 000000000..428295063 --- /dev/null +++ b/src/pkg/sync/atomic/asm_amd64.s @@ -0,0 +1,69 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +TEXT ·CompareAndSwapInt32(SB),7,$0 + JMP ·CompareAndSwapUint32(SB) + +TEXT ·CompareAndSwapUint32(SB),7,$0 + MOVQ valptr+0(FP), BP + MOVL old+8(FP), AX + MOVL new+12(FP), CX + LOCK + CMPXCHGL CX, 0(BP) + SETEQ ret+16(FP) + RET + +TEXT ·CompareAndSwapUintptr(SB),7,$0 + JMP ·CompareAndSwapUint64(SB) + +TEXT ·CompareAndSwapInt64(SB),7,$0 + JMP ·CompareAndSwapUint64(SB) + +TEXT ·CompareAndSwapUint64(SB),7,$0 + MOVQ valptr+0(FP), BP + MOVQ old+8(FP), AX + MOVQ new+16(FP), CX + LOCK + CMPXCHGQ CX, 0(BP) + SETEQ ret+24(FP) + RET + +TEXT ·AddInt32(SB),7,$0 + JMP ·AddUint32(SB) + +TEXT ·AddUint32(SB),7,$0 + MOVQ valptr+0(FP), BP + MOVL delta+8(FP), AX + MOVL AX, CX + LOCK + XADDL AX, 0(BP) + ADDL AX, CX + MOVL CX, ret+16(FP) + RET + +TEXT ·AddUintptr(SB),7,$0 + JMP ·AddUint64(SB) + +TEXT ·AddInt64(SB),7,$0 + JMP ·AddUint64(SB) + +TEXT ·AddUint64(SB),7,$0 + MOVQ valptr+0(FP), BP + MOVQ delta+8(FP), AX + MOVQ AX, CX + LOCK + XADDQ AX, 0(BP) + ADDQ AX, CX + MOVQ CX, ret+16(FP) + RET + +TEXT ·LoadInt32(SB),7,$0 + JMP ·LoadUint32(SB) + +TEXT ·LoadUint32(SB),7,$0 + MOVQ addrptr+0(FP), AX + MOVL 0(AX), AX + MOVL AX, ret+8(FP) + RET + diff --git a/src/pkg/sync/atomic/asm_arm.s b/src/pkg/sync/atomic/asm_arm.s new file mode 100644 index 000000000..95e2f5be4 --- /dev/null +++ b/src/pkg/sync/atomic/asm_arm.s @@ -0,0 +1,122 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// ARM atomic operations, for use by asm_$(GOOS)_arm.s. + +TEXT ·armCompareAndSwapUint32(SB),7,$0 + MOVW valptr+0(FP), R1 + MOVW old+4(FP), R2 + MOVW new+8(FP), R3 +casloop: + // LDREX and STREX were introduced in ARM 6. + LDREX (R1), R0 + CMP R0, R2 + BNE casfail + STREX R3, (R1), R0 + CMP $0, R0 + BNE casloop + MOVW $1, R0 + MOVBU R0, ret+12(FP) + RET +casfail: + MOVW $0, R0 + MOVBU R0, ret+12(FP) + RET + +TEXT ·armCompareAndSwapUint64(SB),7,$0 + BL fastCheck64<>(SB) + MOVW valptr+0(FP), R1 + MOVW oldlo+4(FP), R2 + MOVW oldhi+8(FP), R3 + MOVW newlo+12(FP), R4 + MOVW newhi+16(FP), R5 +cas64loop: + // LDREXD and STREXD were introduced in ARM 11. + LDREXD (R1), R6 // loads R6 and R7 + CMP R2, R6 + BNE cas64fail + CMP R3, R7 + BNE cas64fail + STREXD R4, (R1), R0 // stores R4 and R5 + CMP $0, R0 + BNE cas64loop + MOVW $1, R0 + MOVBU R0, ret+20(FP) + RET +cas64fail: + MOVW $0, R0 + MOVBU R0, ret+20(FP) + RET + +TEXT ·armAddUint32(SB),7,$0 + MOVW valptr+0(FP), R1 + MOVW delta+4(FP), R2 +addloop: + // LDREX and STREX were introduced in ARM 6. + LDREX (R1), R3 + ADD R2, R3 + STREX R3, (R1), R0 + CMP $0, R0 + BNE addloop + MOVW R3, ret+8(FP) + RET + +TEXT ·armAddUint64(SB),7,$0 + BL fastCheck64<>(SB) + MOVW valptr+0(FP), R1 + MOVW deltalo+4(FP), R2 + MOVW deltahi+8(FP), R3 +add64loop: + // LDREXD and STREXD were introduced in ARM 11. + LDREXD (R1), R4 // loads R4 and R5 + ADD.S R2, R4 + ADC R3, R5 + STREXD R4, (R1), R0 // stores R4 and R5 + CMP $0, R0 + BNE add64loop + MOVW R4, retlo+12(FP) + MOVW R5, rethi+16(FP) + RET + +// Check for broken 64-bit LDREXD as found in QEMU. +// LDREXD followed by immediate STREXD should succeed. +// If it fails, try a few times just to be sure (maybe our thread got +// rescheduled between the two instructions) and then panic. +// A bug in some copies of QEMU makes STREXD never succeed, +// which will make uses of the 64-bit atomic operations loop forever. +// If things are working, set okLDREXD to avoid future checks. +// https://bugs.launchpad.net/qemu/+bug/670883. +TEXT check64<>(SB),7,$16 + MOVW $10, R1 + // 8-aligned stack address scratch space. + MOVW $8(R13), R5 + AND $~7, R5 +loop: + LDREXD (R5), R2 + STREXD R2, (R5), R0 + CMP $0, R0 + BEQ ok + SUB $1, R1 + CMP $0, R1 + BNE loop + // Must be buggy QEMU. + BL ·panic64(SB) +ok: + RET + +// Fast, cached version of check. No frame, just MOVW CMP RET after first time. +TEXT fastCheck64<>(SB),7,$-4 + MOVW ok64<>(SB), R0 + CMP $0, R0 // have we been here before? + RET.NE + B slowCheck64<>(SB) + +TEXT slowCheck64<>(SB),7,$0 + BL check64<>(SB) + // Still here, must be okay. + MOVW $1, R0 + MOVW R0, ok64<>(SB) + RET + +GLOBL ok64<>(SB), $4 diff --git a/src/pkg/sync/atomic/asm_linux_arm.s b/src/pkg/sync/atomic/asm_linux_arm.s new file mode 100644 index 000000000..9ac411944 --- /dev/null +++ b/src/pkg/sync/atomic/asm_linux_arm.s @@ -0,0 +1,98 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Linux/ARM atomic operations. + +// Because there is so much variation in ARM devices, +// the Linux kernel provides an appropriate compare-and-swap +// implementation at address 0xffff0fc0. Caller sets: +// R0 = old value +// R1 = new value +// R2 = valptr +// LR = return address +// The function returns with CS true if the swap happened. +// http://lxr.linux.no/linux+v2.6.37.2/arch/arm/kernel/entry-armv.S#L850 +// On older kernels (before 2.6.24) the function can incorrectly +// report a conflict, so we have to double-check the compare ourselves +// and retry if necessary. +// +// http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=b49c0f24cf6744a3f4fd09289fe7cade349dead5 +// +TEXT cas<>(SB),7,$0 + MOVW $0xffff0fc0, PC + +TEXT ·CompareAndSwapInt32(SB),7,$0 + B ·CompareAndSwapUint32(SB) + +// Implement using kernel cas for portability. +TEXT ·CompareAndSwapUint32(SB),7,$0 + MOVW valptr+0(FP), R2 + MOVW old+4(FP), R0 +casagain: + MOVW new+8(FP), R1 + BL cas<>(SB) + BCC cascheck + MOVW $1, R0 +casret: + MOVW R0, ret+12(FP) + RET +cascheck: + // Kernel lies; double-check. + MOVW valptr+0(FP), R2 + MOVW old+4(FP), R0 + MOVW 0(R2), R3 + CMP R0, R3 + BEQ casagain + MOVW $0, R0 + B casret + +TEXT ·CompareAndSwapUintptr(SB),7,$0 + B ·CompareAndSwapUint32(SB) + +TEXT ·AddInt32(SB),7,$0 + B ·AddUint32(SB) + +// Implement using kernel cas for portability. +TEXT ·AddUint32(SB),7,$0 + MOVW valptr+0(FP), R2 + MOVW delta+4(FP), R4 +addloop1: + MOVW 0(R2), R0 + MOVW R0, R1 + ADD R4, R1 + BL cas<>(SB) + BCC addloop1 + MOVW R1, ret+8(FP) + RET + +TEXT ·AddUintptr(SB),7,$0 + B ·AddUint32(SB) + +// The kernel provides no 64-bit compare-and-swap, +// so use native ARM instructions, which will only work on +// ARM 11 and later devices. +TEXT ·CompareAndSwapInt64(SB),7,$0 + B ·armCompareAndSwapUint64(SB) + +TEXT ·CompareAndSwapUint64(SB),7,$0 + B ·armCompareAndSwapUint64(SB) + +TEXT ·AddInt64(SB),7,$0 + B ·armAddUint64(SB) + +TEXT ·AddUint64(SB),7,$0 + B ·armAddUint64(SB) + +TEXT ·LoadInt32(SB),7,$0 + B ·LoadUint32(SB) + +TEXT ·LoadUint32(SB),7,$0 + MOVW addrptr+0(FP), R2 +loadloop1: + MOVW 0(R2), R0 + MOVW R0, R1 + BL cas<>(SB) + BCC loadloop1 + MOVW R1, val+4(FP) + RET diff --git a/src/pkg/sync/atomic/atomic_test.go b/src/pkg/sync/atomic/atomic_test.go new file mode 100644 index 000000000..2229e58d0 --- /dev/null +++ b/src/pkg/sync/atomic/atomic_test.go @@ -0,0 +1,641 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package atomic_test + +import ( + "runtime" + . "sync/atomic" + "testing" + "unsafe" +) + +// Tests of correct behavior, without contention. +// (Does the function work as advertised?) +// +// Test that the Add functions add correctly. +// Test that the CompareAndSwap functions actually +// do the comparison and the swap correctly. +// +// The loop over power-of-two values is meant to +// ensure that the operations apply to the full word size. +// The struct fields x.before and x.after check that the +// operations do not extend past the full word size. + +const ( + magic32 = 0xdedbeef + magic64 = 0xdeddeadbeefbeef +) + +// Do the 64-bit functions panic? If so, don't bother testing. +var test64err = func() (err interface{}) { + defer func() { + err = recover() + }() + var x int64 + AddInt64(&x, 1) + return nil +}() + +func TestAddInt32(t *testing.T) { + var x struct { + before int32 + i int32 + after int32 + } + x.before = magic32 + x.after = magic32 + var j int32 + for delta := int32(1); delta+delta > delta; delta += delta { + k := AddInt32(&x.i, delta) + j += delta + if x.i != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestAddUint32(t *testing.T) { + var x struct { + before uint32 + i uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + var j uint32 + for delta := uint32(1); delta+delta > delta; delta += delta { + k := AddUint32(&x.i, delta) + j += delta + if x.i != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestAddInt64(t *testing.T) { + if test64err != nil { + t.Logf("Skipping 64-bit tests: %v", test64err) + return + } + var x struct { + before int64 + i int64 + after int64 + } + x.before = magic64 + x.after = magic64 + var j int64 + for delta := int64(1); delta+delta > delta; delta += delta { + k := AddInt64(&x.i, delta) + j += delta + if x.i != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, int64(magic64), int64(magic64)) + } +} + +func TestAddUint64(t *testing.T) { + if test64err != nil { + t.Logf("Skipping 64-bit tests: %v", test64err) + return + } + var x struct { + before uint64 + i uint64 + after uint64 + } + x.before = magic64 + x.after = magic64 + var j uint64 + for delta := uint64(1); delta+delta > delta; delta += delta { + k := AddUint64(&x.i, delta) + j += delta + if x.i != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64)) + } +} + +func TestAddUintptr(t *testing.T) { + var x struct { + before uintptr + i uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + var j uintptr + for delta := uintptr(1); delta+delta > delta; delta += delta { + k := AddUintptr(&x.i, delta) + j += delta + if x.i != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestCompareAndSwapInt32(t *testing.T) { + var x struct { + before int32 + i int32 + after int32 + } + x.before = magic32 + x.after = magic32 + for val := int32(1); val+val > val; val += val { + x.i = val + if !CompareAndSwapInt32(&x.i, val, val+1) { + t.Errorf("should have swapped %#x %#x", val, val+1) + } + if x.i != val+1 { + t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + x.i = val + 1 + if CompareAndSwapInt32(&x.i, val, val+2) { + t.Errorf("should not have swapped %#x %#x", val, val+2) + } + if x.i != val+1 { + t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestCompareAndSwapUint32(t *testing.T) { + var x struct { + before uint32 + i uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + for val := uint32(1); val+val > val; val += val { + x.i = val + if !CompareAndSwapUint32(&x.i, val, val+1) { + t.Errorf("should have swapped %#x %#x", val, val+1) + } + if x.i != val+1 { + t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + x.i = val + 1 + if CompareAndSwapUint32(&x.i, val, val+2) { + t.Errorf("should not have swapped %#x %#x", val, val+2) + } + if x.i != val+1 { + t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestCompareAndSwapInt64(t *testing.T) { + if test64err != nil { + t.Logf("Skipping 64-bit tests: %v", test64err) + return + } + var x struct { + before int64 + i int64 + after int64 + } + x.before = magic64 + x.after = magic64 + for val := int64(1); val+val > val; val += val { + x.i = val + if !CompareAndSwapInt64(&x.i, val, val+1) { + t.Errorf("should have swapped %#x %#x", val, val+1) + } + if x.i != val+1 { + t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + x.i = val + 1 + if CompareAndSwapInt64(&x.i, val, val+2) { + t.Errorf("should not have swapped %#x %#x", val, val+2) + } + if x.i != val+1 { + t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64)) + } +} + +func TestCompareAndSwapUint64(t *testing.T) { + if test64err != nil { + t.Logf("Skipping 64-bit tests: %v", test64err) + return + } + var x struct { + before uint64 + i uint64 + after uint64 + } + x.before = magic64 + x.after = magic64 + for val := uint64(1); val+val > val; val += val { + x.i = val + if !CompareAndSwapUint64(&x.i, val, val+1) { + t.Errorf("should have swapped %#x %#x", val, val+1) + } + if x.i != val+1 { + t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + x.i = val + 1 + if CompareAndSwapUint64(&x.i, val, val+2) { + t.Errorf("should not have swapped %#x %#x", val, val+2) + } + if x.i != val+1 { + t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64)) + } +} + +func TestCompareAndSwapUintptr(t *testing.T) { + var x struct { + before uintptr + i uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + for val := uintptr(1); val+val > val; val += val { + x.i = val + if !CompareAndSwapUintptr(&x.i, val, val+1) { + t.Errorf("should have swapped %#x %#x", val, val+1) + } + if x.i != val+1 { + t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + x.i = val + 1 + if CompareAndSwapUintptr(&x.i, val, val+2) { + t.Errorf("should not have swapped %#x %#x", val, val+2) + } + if x.i != val+1 { + t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestLoadInt32(t *testing.T) { + var x struct { + before int32 + i int32 + after int32 + } + x.before = magic32 + x.after = magic32 + for delta := int32(1); delta+delta > delta; delta += delta { + k := LoadInt32(&x.i) + if k != x.i { + t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k) + } + x.i += delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestLoadUint32(t *testing.T) { + var x struct { + before uint32 + i uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + for delta := uint32(1); delta+delta > delta; delta += delta { + k := LoadUint32(&x.i) + if k != x.i { + t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k) + } + x.i += delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +// Tests of correct behavior, with contention. +// (Is the function atomic?) +// +// For each function, we write a "hammer" function that repeatedly +// uses the atomic operation to add 1 to a value. After running +// multiple hammers in parallel, check that we end with the correct +// total. + +var hammer32 = []struct { + name string + f func(*uint32, int) +}{ + {"AddInt32", hammerAddInt32}, + {"AddUint32", hammerAddUint32}, + {"AddUintptr", hammerAddUintptr32}, + {"CompareAndSwapInt32", hammerCompareAndSwapInt32}, + {"CompareAndSwapUint32", hammerCompareAndSwapUint32}, + {"CompareAndSwapUintptr", hammerCompareAndSwapUintptr32}, +} + +func init() { + var v uint64 = 1 << 50 + if uintptr(v) != 0 { + // 64-bit system; clear uintptr tests + hammer32[2].f = nil + hammer32[5].f = nil + } +} + +func hammerAddInt32(uval *uint32, count int) { + val := (*int32)(unsafe.Pointer(uval)) + for i := 0; i < count; i++ { + AddInt32(val, 1) + } +} + +func hammerAddUint32(val *uint32, count int) { + for i := 0; i < count; i++ { + AddUint32(val, 1) + } +} + +func hammerAddUintptr32(uval *uint32, count int) { + // only safe when uintptr is 32-bit. + // not called on 64-bit systems. + val := (*uintptr)(unsafe.Pointer(uval)) + for i := 0; i < count; i++ { + AddUintptr(val, 1) + } +} + +func hammerCompareAndSwapInt32(uval *uint32, count int) { + val := (*int32)(unsafe.Pointer(uval)) + for i := 0; i < count; i++ { + for { + v := *val + if CompareAndSwapInt32(val, v, v+1) { + break + } + } + } +} + +func hammerCompareAndSwapUint32(val *uint32, count int) { + for i := 0; i < count; i++ { + for { + v := *val + if CompareAndSwapUint32(val, v, v+1) { + break + } + } + } +} + +func hammerCompareAndSwapUintptr32(uval *uint32, count int) { + // only safe when uintptr is 32-bit. + // not called on 64-bit systems. + val := (*uintptr)(unsafe.Pointer(uval)) + for i := 0; i < count; i++ { + for { + v := *val + if CompareAndSwapUintptr(val, v, v+1) { + break + } + } + } +} + +func TestHammer32(t *testing.T) { + const p = 4 + n := 100000 + if testing.Short() { + n = 1000 + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(p)) + + for _, tt := range hammer32 { + if tt.f == nil { + continue + } + c := make(chan int) + var val uint32 + for i := 0; i < p; i++ { + go func() { + tt.f(&val, n) + c <- 1 + }() + } + for i := 0; i < p; i++ { + <-c + } + if val != uint32(n)*p { + t.Errorf("%s: val=%d want %d", tt.name, val, n*p) + } + } +} + +var hammer64 = []struct { + name string + f func(*uint64, int) +}{ + {"AddInt64", hammerAddInt64}, + {"AddUint64", hammerAddUint64}, + {"AddUintptr", hammerAddUintptr64}, + {"CompareAndSwapInt64", hammerCompareAndSwapInt64}, + {"CompareAndSwapUint64", hammerCompareAndSwapUint64}, + {"CompareAndSwapUintptr", hammerCompareAndSwapUintptr64}, +} + +func init() { + var v uint64 = 1 << 50 + if uintptr(v) == 0 { + // 32-bit system; clear uintptr tests + hammer64[2].f = nil + hammer64[5].f = nil + } +} + +func hammerAddInt64(uval *uint64, count int) { + val := (*int64)(unsafe.Pointer(uval)) + for i := 0; i < count; i++ { + AddInt64(val, 1) + } +} + +func hammerAddUint64(val *uint64, count int) { + for i := 0; i < count; i++ { + AddUint64(val, 1) + } +} + +func hammerAddUintptr64(uval *uint64, count int) { + // only safe when uintptr is 64-bit. + // not called on 32-bit systems. + val := (*uintptr)(unsafe.Pointer(uval)) + for i := 0; i < count; i++ { + AddUintptr(val, 1) + } +} + +func hammerCompareAndSwapInt64(uval *uint64, count int) { + val := (*int64)(unsafe.Pointer(uval)) + for i := 0; i < count; i++ { + for { + v := *val + if CompareAndSwapInt64(val, v, v+1) { + break + } + } + } +} + +func hammerCompareAndSwapUint64(val *uint64, count int) { + for i := 0; i < count; i++ { + for { + v := *val + if CompareAndSwapUint64(val, v, v+1) { + break + } + } + } +} + +func hammerCompareAndSwapUintptr64(uval *uint64, count int) { + // only safe when uintptr is 64-bit. + // not called on 32-bit systems. + val := (*uintptr)(unsafe.Pointer(uval)) + for i := 0; i < count; i++ { + for { + v := *val + if CompareAndSwapUintptr(val, v, v+1) { + break + } + } + } +} + +func TestHammer64(t *testing.T) { + if test64err != nil { + t.Logf("Skipping 64-bit tests: %v", test64err) + return + } + const p = 4 + n := 100000 + if testing.Short() { + n = 1000 + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(p)) + + for _, tt := range hammer64 { + if tt.f == nil { + continue + } + c := make(chan int) + var val uint64 + for i := 0; i < p; i++ { + go func() { + tt.f(&val, n) + c <- 1 + }() + } + for i := 0; i < p; i++ { + <-c + } + if val != uint64(n)*p { + t.Errorf("%s: val=%d want %d", tt.name, val, n*p) + } + } +} + +func hammerLoadInt32(t *testing.T, uval *uint32) { + val := (*int32)(unsafe.Pointer(uval)) + for { + v := LoadInt32(val) + vlo := v & ((1 << 16) - 1) + vhi := v >> 16 + if vlo != vhi { + t.Fatalf("LoadInt32: %#x != %#x", vlo, vhi) + } + new := v + 1 + 1<<16 + if vlo == 1e4 { + new = 0 + } + if CompareAndSwapInt32(val, v, new) { + break + } + } +} + +func hammerLoadUint32(t *testing.T, val *uint32) { + for { + v := LoadUint32(val) + vlo := v & ((1 << 16) - 1) + vhi := v >> 16 + if vlo != vhi { + t.Fatalf("LoadUint32: %#x != %#x", vlo, vhi) + } + new := v + 1 + 1<<16 + if vlo == 1e4 { + new = 0 + } + if CompareAndSwapUint32(val, v, new) { + break + } + } +} + +func TestHammerLoad(t *testing.T) { + tests := [...]func(*testing.T, *uint32){hammerLoadInt32, hammerLoadUint32} + n := 100000 + if testing.Short() { + n = 10000 + } + const procs = 8 + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(procs)) + for _, tt := range tests { + c := make(chan int) + var val uint32 + for p := 0; p < procs; p++ { + go func() { + for i := 0; i < n; i++ { + tt(t, &val) + } + c <- 1 + }() + } + for p := 0; p < procs; p++ { + <-c + } + } +} diff --git a/src/pkg/sync/atomic/doc.go b/src/pkg/sync/atomic/doc.go new file mode 100644 index 000000000..b35eb539c --- /dev/null +++ b/src/pkg/sync/atomic/doc.go @@ -0,0 +1,68 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package atomic provides low-level atomic memory primitives +// useful for implementing synchronization algorithms. +// +// These functions require great care to be used correctly. +// Except for special, low-level applications, synchronization is better +// done with channels or the facilities of the sync package. +// Share memory by communicating; +// don't communicate by sharing memory. +// +// The compare-and-swap operation, implemented by the CompareAndSwapT +// functions, is the atomic equivalent of: +// +// if *val == old { +// *val = new +// return true +// } +// return false +// +package atomic + +// BUG(rsc): On ARM, the 64-bit functions use instructions unavailable before ARM 11. +// +// On x86-32, the 64-bit functions use instructions unavailable before the Pentium. + +// CompareAndSwapInt32 executes the compare-and-swap operation for an int32 value. +func CompareAndSwapInt32(val *int32, old, new int32) (swapped bool) + +// CompareAndSwapInt64 executes the compare-and-swap operation for an int64 value. +func CompareAndSwapInt64(val *int64, old, new int64) (swapped bool) + +// CompareAndSwapUint32 executes the compare-and-swap operation for a uint32 value. +func CompareAndSwapUint32(val *uint32, old, new uint32) (swapped bool) + +// CompareAndSwapUint64 executes the compare-and-swap operation for a uint64 value. +func CompareAndSwapUint64(val *uint64, old, new uint64) (swapped bool) + +// CompareAndSwapUintptr executes the compare-and-swap operation for a uintptr value. +func CompareAndSwapUintptr(val *uintptr, old, new uintptr) (swapped bool) + +// AddInt32 atomically adds delta to *val and returns the new value. +func AddInt32(val *int32, delta int32) (new int32) + +// AddUint32 atomically adds delta to *val and returns the new value. +func AddUint32(val *uint32, delta uint32) (new uint32) + +// AddInt64 atomically adds delta to *val and returns the new value. +func AddInt64(val *int64, delta int64) (new int64) + +// AddUint64 atomically adds delta to *val and returns the new value. +func AddUint64(val *uint64, delta uint64) (new uint64) + +// AddUintptr atomically adds delta to *val and returns the new value. +func AddUintptr(val *uintptr, delta uintptr) (new uintptr) + +// LoadInt32 atomically loads *addr. +func LoadInt32(addr *int32) (val int32) + +// LoadUint32 atomically loads *addr. +func LoadUint32(addr *uint32) (val uint32) + +// Helper for ARM. Linker will discard on other systems +func panic64() { + panic("sync/atomic: broken 64-bit atomic operations (buggy QEMU)") +} |