diff options
Diffstat (limited to 'src/pkg/sync')
-rw-r--r-- | src/pkg/sync/Makefile | 15 | ||||
-rw-r--r-- | src/pkg/sync/atomic/Makefile | 18 | ||||
-rw-r--r-- | src/pkg/sync/atomic/asm_386.s | 87 | ||||
-rw-r--r-- | src/pkg/sync/atomic/asm_amd64.s | 59 | ||||
-rw-r--r-- | src/pkg/sync/atomic/asm_arm.s | 122 | ||||
-rw-r--r-- | src/pkg/sync/atomic/asm_linux_arm.s | 85 | ||||
-rw-r--r-- | src/pkg/sync/atomic/atomic_test.go | 539 | ||||
-rw-r--r-- | src/pkg/sync/atomic/doc.go | 62 | ||||
-rw-r--r-- | src/pkg/sync/cond.go | 113 | ||||
-rw-r--r-- | src/pkg/sync/cond_test.go | 126 | ||||
-rw-r--r-- | src/pkg/sync/mutex.go | 95 | ||||
-rw-r--r-- | src/pkg/sync/mutex_test.go | 167 | ||||
-rw-r--r-- | src/pkg/sync/once.go | 43 | ||||
-rw-r--r-- | src/pkg/sync/once_test.go | 62 | ||||
-rw-r--r-- | src/pkg/sync/rwmutex.go | 86 | ||||
-rw-r--r-- | src/pkg/sync/rwmutex_test.go | 156 | ||||
-rw-r--r-- | src/pkg/sync/waitgroup.go | 86 | ||||
-rw-r--r-- | src/pkg/sync/waitgroup_test.go | 60 |
18 files changed, 0 insertions, 1981 deletions
diff --git a/src/pkg/sync/Makefile b/src/pkg/sync/Makefile deleted file mode 100644 index e8a766226..000000000 --- a/src/pkg/sync/Makefile +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2009 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -include ../../Make.inc - -TARG=sync -GOFILES=\ - cond.go\ - mutex.go\ - once.go \ - rwmutex.go\ - waitgroup.go\ - -include ../../Make.pkg diff --git a/src/pkg/sync/atomic/Makefile b/src/pkg/sync/atomic/Makefile deleted file mode 100644 index 38d8998c0..000000000 --- a/src/pkg/sync/atomic/Makefile +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2011 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -include ../../../Make.inc - -TARG=sync/atomic -GOFILES=\ - doc.go\ - -OFILES=\ - asm_$(GOARCH).$O\ - -ifeq ($(GOARCH),arm) -OFILES+=asm_$(GOOS)_$(GOARCH).$O -endif - -include ../../../Make.pkg diff --git a/src/pkg/sync/atomic/asm_386.s b/src/pkg/sync/atomic/asm_386.s deleted file mode 100644 index a9360efae..000000000 --- a/src/pkg/sync/atomic/asm_386.s +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -TEXT ·CompareAndSwapInt32(SB),7,$0 - JMP ·CompareAndSwapUint32(SB) - -TEXT ·CompareAndSwapUint32(SB),7,$0 - MOVL valptr+0(FP), BP - MOVL old+4(FP), AX - MOVL new+8(FP), CX - // CMPXCHGL was introduced on the 486. - LOCK - CMPXCHGL CX, 0(BP) - SETEQ ret+12(FP) - RET - -TEXT ·CompareAndSwapUintptr(SB),7,$0 - JMP ·CompareAndSwapUint32(SB) - -TEXT ·CompareAndSwapInt64(SB),7,$0 - JMP ·CompareAndSwapUint64(SB) - -TEXT ·CompareAndSwapUint64(SB),7,$0 - MOVL valptr+0(FP), BP - MOVL oldlo+4(FP), AX - MOVL oldhi+8(FP), DX - MOVL newlo+12(FP), BX - MOVL newhi+16(FP), CX - // CMPXCHG8B was introduced on the Pentium. - LOCK - CMPXCHG8B 0(BP) - SETEQ ret+20(FP) - RET - -TEXT ·AddInt32(SB),7,$0 - JMP ·AddUint32(SB) - -TEXT ·AddUint32(SB),7,$0 - MOVL valptr+0(FP), BP - MOVL delta+4(FP), AX - MOVL AX, CX - // XADD was introduced on the 486. - LOCK - XADDL AX, 0(BP) - ADDL AX, CX - MOVL CX, ret+8(FP) - RET - -TEXT ·AddUintptr(SB),7,$0 - JMP ·AddUint32(SB) - -TEXT ·AddInt64(SB),7,$0 - JMP ·AddUint64(SB) - -TEXT ·AddUint64(SB),7,$0 - // no XADDQ so use CMPXCHG8B loop - MOVL valptr+0(FP), BP - // DI:SI = delta - MOVL deltalo+4(FP), SI - MOVL deltahi+8(FP), DI - // DX:AX = *valptr - MOVL 0(BP), AX - MOVL 4(BP), DX -addloop: - // CX:BX = DX:AX (*valptr) + DI:SI (delta) - MOVL AX, BX - MOVL DX, CX - ADDL SI, BX - ADCL DI, CX - - // if *valptr == DX:AX { - // *valptr = CX:BX - // } else { - // DX:AX = *valptr - // } - // all in one instruction - LOCK - CMPXCHG8B 0(BP) - - JNZ addloop - - // success - // return CX:BX - MOVL BX, retlo+12(FP) - MOVL CX, rethi+16(FP) - RET diff --git a/src/pkg/sync/atomic/asm_amd64.s b/src/pkg/sync/atomic/asm_amd64.s deleted file mode 100644 index a260902a7..000000000 --- a/src/pkg/sync/atomic/asm_amd64.s +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -TEXT ·CompareAndSwapInt32(SB),7,$0 - JMP ·CompareAndSwapUint32(SB) - -TEXT ·CompareAndSwapUint32(SB),7,$0 - MOVQ valptr+0(FP), BP - MOVL old+8(FP), AX - MOVL new+12(FP), CX - LOCK - CMPXCHGL CX, 0(BP) - SETEQ ret+16(FP) - RET - -TEXT ·CompareAndSwapUintptr(SB),7,$0 - JMP ·CompareAndSwapUint64(SB) - -TEXT ·CompareAndSwapInt64(SB),7,$0 - JMP ·CompareAndSwapUint64(SB) - -TEXT ·CompareAndSwapUint64(SB),7,$0 - MOVQ valptr+0(FP), BP - MOVQ old+8(FP), AX - MOVQ new+16(FP), CX - LOCK - CMPXCHGQ CX, 0(BP) - SETEQ ret+24(FP) - RET - -TEXT ·AddInt32(SB),7,$0 - JMP ·AddUint32(SB) - -TEXT ·AddUint32(SB),7,$0 - MOVQ valptr+0(FP), BP - MOVL delta+8(FP), AX - MOVL AX, CX - LOCK - XADDL AX, 0(BP) - ADDL AX, CX - MOVL CX, ret+16(FP) - RET - -TEXT ·AddUintptr(SB),7,$0 - JMP ·AddUint64(SB) - -TEXT ·AddInt64(SB),7,$0 - JMP ·AddUint64(SB) - -TEXT ·AddUint64(SB),7,$0 - MOVQ valptr+0(FP), BP - MOVQ delta+8(FP), AX - MOVQ AX, CX - LOCK - XADDQ AX, 0(BP) - ADDQ AX, CX - MOVQ CX, ret+16(FP) - RET diff --git a/src/pkg/sync/atomic/asm_arm.s b/src/pkg/sync/atomic/asm_arm.s deleted file mode 100644 index 95e2f5be4..000000000 --- a/src/pkg/sync/atomic/asm_arm.s +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// ARM atomic operations, for use by asm_$(GOOS)_arm.s. - -TEXT ·armCompareAndSwapUint32(SB),7,$0 - MOVW valptr+0(FP), R1 - MOVW old+4(FP), R2 - MOVW new+8(FP), R3 -casloop: - // LDREX and STREX were introduced in ARM 6. - LDREX (R1), R0 - CMP R0, R2 - BNE casfail - STREX R3, (R1), R0 - CMP $0, R0 - BNE casloop - MOVW $1, R0 - MOVBU R0, ret+12(FP) - RET -casfail: - MOVW $0, R0 - MOVBU R0, ret+12(FP) - RET - -TEXT ·armCompareAndSwapUint64(SB),7,$0 - BL fastCheck64<>(SB) - MOVW valptr+0(FP), R1 - MOVW oldlo+4(FP), R2 - MOVW oldhi+8(FP), R3 - MOVW newlo+12(FP), R4 - MOVW newhi+16(FP), R5 -cas64loop: - // LDREXD and STREXD were introduced in ARM 11. - LDREXD (R1), R6 // loads R6 and R7 - CMP R2, R6 - BNE cas64fail - CMP R3, R7 - BNE cas64fail - STREXD R4, (R1), R0 // stores R4 and R5 - CMP $0, R0 - BNE cas64loop - MOVW $1, R0 - MOVBU R0, ret+20(FP) - RET -cas64fail: - MOVW $0, R0 - MOVBU R0, ret+20(FP) - RET - -TEXT ·armAddUint32(SB),7,$0 - MOVW valptr+0(FP), R1 - MOVW delta+4(FP), R2 -addloop: - // LDREX and STREX were introduced in ARM 6. - LDREX (R1), R3 - ADD R2, R3 - STREX R3, (R1), R0 - CMP $0, R0 - BNE addloop - MOVW R3, ret+8(FP) - RET - -TEXT ·armAddUint64(SB),7,$0 - BL fastCheck64<>(SB) - MOVW valptr+0(FP), R1 - MOVW deltalo+4(FP), R2 - MOVW deltahi+8(FP), R3 -add64loop: - // LDREXD and STREXD were introduced in ARM 11. - LDREXD (R1), R4 // loads R4 and R5 - ADD.S R2, R4 - ADC R3, R5 - STREXD R4, (R1), R0 // stores R4 and R5 - CMP $0, R0 - BNE add64loop - MOVW R4, retlo+12(FP) - MOVW R5, rethi+16(FP) - RET - -// Check for broken 64-bit LDREXD as found in QEMU. -// LDREXD followed by immediate STREXD should succeed. -// If it fails, try a few times just to be sure (maybe our thread got -// rescheduled between the two instructions) and then panic. -// A bug in some copies of QEMU makes STREXD never succeed, -// which will make uses of the 64-bit atomic operations loop forever. -// If things are working, set okLDREXD to avoid future checks. -// https://bugs.launchpad.net/qemu/+bug/670883. -TEXT check64<>(SB),7,$16 - MOVW $10, R1 - // 8-aligned stack address scratch space. - MOVW $8(R13), R5 - AND $~7, R5 -loop: - LDREXD (R5), R2 - STREXD R2, (R5), R0 - CMP $0, R0 - BEQ ok - SUB $1, R1 - CMP $0, R1 - BNE loop - // Must be buggy QEMU. - BL ·panic64(SB) -ok: - RET - -// Fast, cached version of check. No frame, just MOVW CMP RET after first time. -TEXT fastCheck64<>(SB),7,$-4 - MOVW ok64<>(SB), R0 - CMP $0, R0 // have we been here before? - RET.NE - B slowCheck64<>(SB) - -TEXT slowCheck64<>(SB),7,$0 - BL check64<>(SB) - // Still here, must be okay. - MOVW $1, R0 - MOVW R0, ok64<>(SB) - RET - -GLOBL ok64<>(SB), $4 diff --git a/src/pkg/sync/atomic/asm_linux_arm.s b/src/pkg/sync/atomic/asm_linux_arm.s deleted file mode 100644 index 72f8d746b..000000000 --- a/src/pkg/sync/atomic/asm_linux_arm.s +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Linux/ARM atomic operations. - -// Because there is so much variation in ARM devices, -// the Linux kernel provides an appropriate compare-and-swap -// implementation at address 0xffff0fc0. Caller sets: -// R0 = old value -// R1 = new value -// R2 = valptr -// LR = return address -// The function returns with CS true if the swap happened. -// http://lxr.linux.no/linux+v2.6.37.2/arch/arm/kernel/entry-armv.S#L850 -// On older kernels (before 2.6.24) the function can incorrectly -// report a conflict, so we have to double-check the compare ourselves -// and retry if necessary. -// -// http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=b49c0f24cf6744a3f4fd09289fe7cade349dead5 -// -TEXT cas<>(SB),7,$0 - MOVW $0xffff0fc0, PC - -TEXT ·CompareAndSwapInt32(SB),7,$0 - B ·CompareAndSwapUint32(SB) - -// Implement using kernel cas for portability. -TEXT ·CompareAndSwapUint32(SB),7,$0 - MOVW valptr+0(FP), R2 - MOVW old+4(FP), R0 -casagain: - MOVW new+8(FP), R1 - BL cas<>(SB) - BCC cascheck - MOVW $1, R0 -casret: - MOVW R0, ret+12(FP) - RET -cascheck: - // Kernel lies; double-check. - MOVW valptr+0(FP), R2 - MOVW old+4(FP), R0 - MOVW 0(R2), R3 - CMP R0, R3 - BEQ casagain - MOVW $0, R0 - B casret - -TEXT ·CompareAndSwapUintptr(SB),7,$0 - B ·CompareAndSwapUint32(SB) - -TEXT ·AddInt32(SB),7,$0 - B ·AddUint32(SB) - -// Implement using kernel cas for portability. -TEXT ·AddUint32(SB),7,$0 - MOVW valptr+0(FP), R2 - MOVW delta+4(FP), R4 -addloop1: - MOVW 0(R2), R0 - MOVW R0, R1 - ADD R4, R1 - BL cas<>(SB) - BCC addloop1 - MOVW R1, ret+8(FP) - RET - -TEXT ·AddUintptr(SB),7,$0 - B ·AddUint32(SB) - -// The kernel provides no 64-bit compare-and-swap, -// so use native ARM instructions, which will only work on -// ARM 11 and later devices. -TEXT ·CompareAndSwapInt64(SB),7,$0 - B ·armCompareAndSwapUint64(SB) - -TEXT ·CompareAndSwapUint64(SB),7,$0 - B ·armCompareAndSwapUint64(SB) - -TEXT ·AddInt64(SB),7,$0 - B ·armAddUint64(SB) - -TEXT ·AddUint64(SB),7,$0 - B ·armAddUint64(SB) diff --git a/src/pkg/sync/atomic/atomic_test.go b/src/pkg/sync/atomic/atomic_test.go deleted file mode 100644 index 119ad0036..000000000 --- a/src/pkg/sync/atomic/atomic_test.go +++ /dev/null @@ -1,539 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package atomic_test - -import ( - "runtime" - . "sync/atomic" - "testing" - "unsafe" -) - -// Tests of correct behavior, without contention. -// (Does the function work as advertised?) -// -// Test that the Add functions add correctly. -// Test that the CompareAndSwap functions actually -// do the comparison and the swap correctly. -// -// The loop over power-of-two values is meant to -// ensure that the operations apply to the full word size. -// The struct fields x.before and x.after check that the -// operations do not extend past the full word size. - -const ( - magic32 = 0xdedbeef - magic64 = 0xdeddeadbeefbeef -) - -// Do the 64-bit functions panic? If so, don't bother testing. -var test64err = func() (err interface{}) { - defer func() { - err = recover() - }() - var x int64 - AddInt64(&x, 1) - return nil -}() - -func TestAddInt32(t *testing.T) { - var x struct { - before int32 - i int32 - after int32 - } - x.before = magic32 - x.after = magic32 - var j int32 - for delta := int32(1); delta+delta > delta; delta += delta { - k := AddInt32(&x.i, delta) - j += delta - if x.i != j || k != j { - t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) - } - } - if x.before != magic32 || x.after != magic32 { - t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) - } -} - -func TestAddUint32(t *testing.T) { - var x struct { - before uint32 - i uint32 - after uint32 - } - x.before = magic32 - x.after = magic32 - var j uint32 - for delta := uint32(1); delta+delta > delta; delta += delta { - k := AddUint32(&x.i, delta) - j += delta - if x.i != j || k != j { - t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) - } - } - if x.before != magic32 || x.after != magic32 { - t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) - } -} - -func TestAddInt64(t *testing.T) { - if test64err != nil { - t.Logf("Skipping 64-bit tests: %v", test64err) - return - } - var x struct { - before int64 - i int64 - after int64 - } - x.before = magic64 - x.after = magic64 - var j int64 - for delta := int64(1); delta+delta > delta; delta += delta { - k := AddInt64(&x.i, delta) - j += delta - if x.i != j || k != j { - t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) - } - } - if x.before != magic64 || x.after != magic64 { - t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, int64(magic64), int64(magic64)) - } -} - -func TestAddUint64(t *testing.T) { - if test64err != nil { - t.Logf("Skipping 64-bit tests: %v", test64err) - return - } - var x struct { - before uint64 - i uint64 - after uint64 - } - x.before = magic64 - x.after = magic64 - var j uint64 - for delta := uint64(1); delta+delta > delta; delta += delta { - k := AddUint64(&x.i, delta) - j += delta - if x.i != j || k != j { - t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) - } - } - if x.before != magic64 || x.after != magic64 { - t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64)) - } -} - -func TestAddUintptr(t *testing.T) { - var x struct { - before uintptr - i uintptr - after uintptr - } - var m uint64 = magic64 - magicptr := uintptr(m) - x.before = magicptr - x.after = magicptr - var j uintptr - for delta := uintptr(1); delta+delta > delta; delta += delta { - k := AddUintptr(&x.i, delta) - j += delta - if x.i != j || k != j { - t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) - } - } - if x.before != magicptr || x.after != magicptr { - t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) - } -} - -func TestCompareAndSwapInt32(t *testing.T) { - var x struct { - before int32 - i int32 - after int32 - } - x.before = magic32 - x.after = magic32 - for val := int32(1); val+val > val; val += val { - x.i = val - if !CompareAndSwapInt32(&x.i, val, val+1) { - t.Errorf("should have swapped %#x %#x", val, val+1) - } - if x.i != val+1 { - t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) - } - x.i = val + 1 - if CompareAndSwapInt32(&x.i, val, val+2) { - t.Errorf("should not have swapped %#x %#x", val, val+2) - } - if x.i != val+1 { - t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) - } - } - if x.before != magic32 || x.after != magic32 { - t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) - } -} - -func TestCompareAndSwapUint32(t *testing.T) { - var x struct { - before uint32 - i uint32 - after uint32 - } - x.before = magic32 - x.after = magic32 - for val := uint32(1); val+val > val; val += val { - x.i = val - if !CompareAndSwapUint32(&x.i, val, val+1) { - t.Errorf("should have swapped %#x %#x", val, val+1) - } - if x.i != val+1 { - t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) - } - x.i = val + 1 - if CompareAndSwapUint32(&x.i, val, val+2) { - t.Errorf("should not have swapped %#x %#x", val, val+2) - } - if x.i != val+1 { - t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) - } - } - if x.before != magic32 || x.after != magic32 { - t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) - } -} - -func TestCompareAndSwapInt64(t *testing.T) { - if test64err != nil { - t.Logf("Skipping 64-bit tests: %v", test64err) - return - } - var x struct { - before int64 - i int64 - after int64 - } - x.before = magic64 - x.after = magic64 - for val := int64(1); val+val > val; val += val { - x.i = val - if !CompareAndSwapInt64(&x.i, val, val+1) { - t.Errorf("should have swapped %#x %#x", val, val+1) - } - if x.i != val+1 { - t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) - } - x.i = val + 1 - if CompareAndSwapInt64(&x.i, val, val+2) { - t.Errorf("should not have swapped %#x %#x", val, val+2) - } - if x.i != val+1 { - t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) - } - } - if x.before != magic64 || x.after != magic64 { - t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64)) - } -} - -func TestCompareAndSwapUint64(t *testing.T) { - if test64err != nil { - t.Logf("Skipping 64-bit tests: %v", test64err) - return - } - var x struct { - before uint64 - i uint64 - after uint64 - } - x.before = magic64 - x.after = magic64 - for val := uint64(1); val+val > val; val += val { - x.i = val - if !CompareAndSwapUint64(&x.i, val, val+1) { - t.Errorf("should have swapped %#x %#x", val, val+1) - } - if x.i != val+1 { - t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) - } - x.i = val + 1 - if CompareAndSwapUint64(&x.i, val, val+2) { - t.Errorf("should not have swapped %#x %#x", val, val+2) - } - if x.i != val+1 { - t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) - } - } - if x.before != magic64 || x.after != magic64 { - t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64)) - } -} - -func TestCompareAndSwapUintptr(t *testing.T) { - var x struct { - before uintptr - i uintptr - after uintptr - } - var m uint64 = magic64 - magicptr := uintptr(m) - x.before = magicptr - x.after = magicptr - for val := uintptr(1); val+val > val; val += val { - x.i = val - if !CompareAndSwapUintptr(&x.i, val, val+1) { - t.Errorf("should have swapped %#x %#x", val, val+1) - } - if x.i != val+1 { - t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) - } - x.i = val + 1 - if CompareAndSwapUintptr(&x.i, val, val+2) { - t.Errorf("should not have swapped %#x %#x", val, val+2) - } - if x.i != val+1 { - t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) - } - } - if x.before != magicptr || x.after != magicptr { - t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) - } -} - -// Tests of correct behavior, with contention. -// (Is the function atomic?) -// -// For each function, we write a "hammer" function that repeatedly -// uses the atomic operation to add 1 to a value. After running -// multiple hammers in parallel, check that we end with the correct -// total. - -var hammer32 = []struct { - name string - f func(*uint32, int) -}{ - {"AddInt32", hammerAddInt32}, - {"AddUint32", hammerAddUint32}, - {"AddUintptr", hammerAddUintptr32}, - {"CompareAndSwapInt32", hammerCompareAndSwapInt32}, - {"CompareAndSwapUint32", hammerCompareAndSwapUint32}, - {"CompareAndSwapUintptr", hammerCompareAndSwapUintptr32}, -} - -func init() { - var v uint64 = 1 << 50 - if uintptr(v) != 0 { - // 64-bit system; clear uintptr tests - hammer32[2].f = nil - hammer32[5].f = nil - } -} - -func hammerAddInt32(uval *uint32, count int) { - val := (*int32)(unsafe.Pointer(uval)) - for i := 0; i < count; i++ { - AddInt32(val, 1) - } -} - -func hammerAddUint32(val *uint32, count int) { - for i := 0; i < count; i++ { - AddUint32(val, 1) - } -} - -func hammerAddUintptr32(uval *uint32, count int) { - // only safe when uintptr is 32-bit. - // not called on 64-bit systems. - val := (*uintptr)(unsafe.Pointer(uval)) - for i := 0; i < count; i++ { - AddUintptr(val, 1) - } -} - -func hammerCompareAndSwapInt32(uval *uint32, count int) { - val := (*int32)(unsafe.Pointer(uval)) - for i := 0; i < count; i++ { - for { - v := *val - if CompareAndSwapInt32(val, v, v+1) { - break - } - } - } -} - -func hammerCompareAndSwapUint32(val *uint32, count int) { - for i := 0; i < count; i++ { - for { - v := *val - if CompareAndSwapUint32(val, v, v+1) { - break - } - } - } -} - -func hammerCompareAndSwapUintptr32(uval *uint32, count int) { - // only safe when uintptr is 32-bit. - // not called on 64-bit systems. - val := (*uintptr)(unsafe.Pointer(uval)) - for i := 0; i < count; i++ { - for { - v := *val - if CompareAndSwapUintptr(val, v, v+1) { - break - } - } - } -} - -func TestHammer32(t *testing.T) { - const p = 4 - n := 100000 - if testing.Short() { - n = 1000 - } - defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(p)) - - for _, tt := range hammer32 { - if tt.f == nil { - continue - } - c := make(chan int) - var val uint32 - for i := 0; i < p; i++ { - go func() { - tt.f(&val, n) - c <- 1 - }() - } - for i := 0; i < p; i++ { - <-c - } - if val != uint32(n)*p { - t.Errorf("%s: val=%d want %d", tt.name, val, n*p) - } - } -} - -var hammer64 = []struct { - name string - f func(*uint64, int) -}{ - {"AddInt64", hammerAddInt64}, - {"AddUint64", hammerAddUint64}, - {"AddUintptr", hammerAddUintptr64}, - {"CompareAndSwapInt64", hammerCompareAndSwapInt64}, - {"CompareAndSwapUint64", hammerCompareAndSwapUint64}, - {"CompareAndSwapUintptr", hammerCompareAndSwapUintptr64}, -} - -func init() { - var v uint64 = 1 << 50 - if uintptr(v) == 0 { - // 32-bit system; clear uintptr tests - hammer64[2].f = nil - hammer64[5].f = nil - } -} - -func hammerAddInt64(uval *uint64, count int) { - val := (*int64)(unsafe.Pointer(uval)) - for i := 0; i < count; i++ { - AddInt64(val, 1) - } -} - -func hammerAddUint64(val *uint64, count int) { - for i := 0; i < count; i++ { - AddUint64(val, 1) - } -} - -func hammerAddUintptr64(uval *uint64, count int) { - // only safe when uintptr is 64-bit. - // not called on 32-bit systems. - val := (*uintptr)(unsafe.Pointer(uval)) - for i := 0; i < count; i++ { - AddUintptr(val, 1) - } -} - -func hammerCompareAndSwapInt64(uval *uint64, count int) { - val := (*int64)(unsafe.Pointer(uval)) - for i := 0; i < count; i++ { - for { - v := *val - if CompareAndSwapInt64(val, v, v+1) { - break - } - } - } -} - -func hammerCompareAndSwapUint64(val *uint64, count int) { - for i := 0; i < count; i++ { - for { - v := *val - if CompareAndSwapUint64(val, v, v+1) { - break - } - } - } -} - -func hammerCompareAndSwapUintptr64(uval *uint64, count int) { - // only safe when uintptr is 64-bit. - // not called on 32-bit systems. - val := (*uintptr)(unsafe.Pointer(uval)) - for i := 0; i < count; i++ { - for { - v := *val - if CompareAndSwapUintptr(val, v, v+1) { - break - } - } - } -} - -func TestHammer64(t *testing.T) { - if test64err != nil { - t.Logf("Skipping 64-bit tests: %v", test64err) - return - } - const p = 4 - n := 100000 - if testing.Short() { - n = 1000 - } - defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(p)) - - for _, tt := range hammer64 { - if tt.f == nil { - continue - } - c := make(chan int) - var val uint64 - for i := 0; i < p; i++ { - go func() { - tt.f(&val, n) - c <- 1 - }() - } - for i := 0; i < p; i++ { - <-c - } - if val != uint64(n)*p { - t.Errorf("%s: val=%d want %d", tt.name, val, n*p) - } - } -} diff --git a/src/pkg/sync/atomic/doc.go b/src/pkg/sync/atomic/doc.go deleted file mode 100644 index ec5a0d33a..000000000 --- a/src/pkg/sync/atomic/doc.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package atomic provides low-level atomic memory primitives -// useful for implementing synchronization algorithms. -// -// These functions require great care to be used correctly. -// Except for special, low-level applications, synchronization is better -// done with channels or the facilities of the sync package. -// Share memory by communicating; -// don't communicate by sharing memory. -// -// The compare-and-swap operation, implemented by the CompareAndSwapT -// functions, is the atomic equivalent of: -// -// if *val == old { -// *val = new -// return true -// } -// return false -// -package atomic - -// BUG(rsc): On ARM, the 64-bit functions use instructions unavailable before ARM 11. -// -// On x86-32, the 64-bit functions use instructions unavailable before the Pentium. - -// CompareAndSwapInt32 executes the compare-and-swap operation for an int32 value. -func CompareAndSwapInt32(val *int32, old, new int32) (swapped bool) - -// CompareAndSwapInt64 executes the compare-and-swap operation for an int64 value. -func CompareAndSwapInt64(val *int64, old, new int64) (swapped bool) - -// CompareAndSwapUint32 executes the compare-and-swap operation for a uint32 value. -func CompareAndSwapUint32(val *uint32, old, new uint32) (swapped bool) - -// CompareAndSwapUint64 executes the compare-and-swap operation for a uint64 value. -func CompareAndSwapUint64(val *uint64, old, new uint64) (swapped bool) - -// CompareAndSwapUintptr executes the compare-and-swap operation for a uintptr value. -func CompareAndSwapUintptr(val *uintptr, old, new uintptr) (swapped bool) - -// AddInt32 atomically adds delta to *val and returns the new value. -func AddInt32(val *int32, delta int32) (new int32) - -// AddUint32 atomically adds delta to *val and returns the new value. -func AddUint32(val *uint32, delta uint32) (new uint32) - -// AddInt64 atomically adds delta to *val and returns the new value. -func AddInt64(val *int64, delta int64) (new int64) - -// AddUint64 atomically adds delta to *val and returns the new value. -func AddUint64(val *uint64, delta uint64) (new uint64) - -// AddUintptr atomically adds delta to *val and returns the new value. -func AddUintptr(val *uintptr, delta uintptr) (new uintptr) - -// Helper for ARM. Linker will discard on other systems -func panic64() { - panic("sync/atomic: broken 64-bit atomic operations (buggy QEMU)") -} diff --git a/src/pkg/sync/cond.go b/src/pkg/sync/cond.go deleted file mode 100644 index 75494b535..000000000 --- a/src/pkg/sync/cond.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sync - -import "runtime" - -// Cond implements a condition variable, a rendezvous point -// for goroutines waiting for or announcing the occurrence -// of an event. -// -// Each Cond has an associated Locker L (often a *Mutex or *RWMutex), -// which must be held when changing the condition and -// when calling the Wait method. -type Cond struct { - L Locker // held while observing or changing the condition - m Mutex // held to avoid internal races - - // We must be careful to make sure that when Signal - // releases a semaphore, the corresponding acquire is - // executed by a goroutine that was already waiting at - // the time of the call to Signal, not one that arrived later. - // To ensure this, we segment waiting goroutines into - // generations punctuated by calls to Signal. Each call to - // Signal begins another generation if there are no goroutines - // left in older generations for it to wake. Because of this - // optimization (only begin another generation if there - // are no older goroutines left), we only need to keep track - // of the two most recent generations, which we call old - // and new. - oldWaiters int // number of waiters in old generation... - oldSema *uint32 // ... waiting on this semaphore - - newWaiters int // number of waiters in new generation... - newSema *uint32 // ... waiting on this semaphore -} - -// NewCond returns a new Cond with Locker l. -func NewCond(l Locker) *Cond { - return &Cond{L: l} -} - -// Wait atomically unlocks c.L and suspends execution -// of the calling goroutine. After later resuming execution, -// Wait locks c.L before returning. -// -// Because L is not locked when Wait first resumes, the caller -// typically cannot assume that the condition is true when -// Wait returns. Instead, the caller should Wait in a loop: -// -// c.L.Lock() -// for !condition() { -// c.Wait() -// } -// ... make use of condition ... -// c.L.Unlock() -// -func (c *Cond) Wait() { - c.m.Lock() - if c.newSema == nil { - c.newSema = new(uint32) - } - s := c.newSema - c.newWaiters++ - c.m.Unlock() - c.L.Unlock() - runtime.Semacquire(s) - c.L.Lock() -} - -// Signal wakes one goroutine waiting on c, if there is any. -// -// It is allowed but not required for the caller to hold c.L -// during the call. -func (c *Cond) Signal() { - c.m.Lock() - if c.oldWaiters == 0 && c.newWaiters > 0 { - // Retire old generation; rename new to old. - c.oldWaiters = c.newWaiters - c.oldSema = c.newSema - c.newWaiters = 0 - c.newSema = nil - } - if c.oldWaiters > 0 { - c.oldWaiters-- - runtime.Semrelease(c.oldSema) - } - c.m.Unlock() -} - -// Broadcast wakes all goroutines waiting on c. -// -// It is allowed but not required for the caller to hold c.L -// during the call. -func (c *Cond) Broadcast() { - c.m.Lock() - // Wake both generations. - if c.oldWaiters > 0 { - for i := 0; i < c.oldWaiters; i++ { - runtime.Semrelease(c.oldSema) - } - c.oldWaiters = 0 - } - if c.newWaiters > 0 { - for i := 0; i < c.newWaiters; i++ { - runtime.Semrelease(c.newSema) - } - c.newWaiters = 0 - c.newSema = nil - } - c.m.Unlock() -} diff --git a/src/pkg/sync/cond_test.go b/src/pkg/sync/cond_test.go deleted file mode 100644 index cefacb184..000000000 --- a/src/pkg/sync/cond_test.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -package sync_test - -import ( - . "sync" - "testing" -) - -func TestCondSignal(t *testing.T) { - var m Mutex - c := NewCond(&m) - n := 2 - running := make(chan bool, n) - awake := make(chan bool, n) - for i := 0; i < n; i++ { - go func() { - m.Lock() - running <- true - c.Wait() - awake <- true - m.Unlock() - }() - } - for i := 0; i < n; i++ { - <-running // Wait for everyone to run. - } - for n > 0 { - select { - case <-awake: - t.Fatal("goroutine not asleep") - default: - } - m.Lock() - c.Signal() - m.Unlock() - <-awake // Will deadlock if no goroutine wakes up - select { - case <-awake: - t.Fatal("too many goroutines awake") - default: - } - n-- - } - c.Signal() -} - -func TestCondSignalGenerations(t *testing.T) { - var m Mutex - c := NewCond(&m) - n := 100 - running := make(chan bool, n) - awake := make(chan int, n) - for i := 0; i < n; i++ { - go func(i int) { - m.Lock() - running <- true - c.Wait() - awake <- i - m.Unlock() - }(i) - if i > 0 { - a := <-awake - if a != i-1 { - t.Fatalf("wrong goroutine woke up: want %d, got %d", i-1, a) - } - } - <-running - m.Lock() - c.Signal() - m.Unlock() - } -} - -func TestCondBroadcast(t *testing.T) { - var m Mutex - c := NewCond(&m) - n := 200 - running := make(chan int, n) - awake := make(chan int, n) - exit := false - for i := 0; i < n; i++ { - go func(g int) { - m.Lock() - for !exit { - running <- g - c.Wait() - awake <- g - } - m.Unlock() - }(i) - } - for i := 0; i < n; i++ { - for i := 0; i < n; i++ { - <-running // Will deadlock unless n are running. - } - if i == n-1 { - m.Lock() - exit = true - m.Unlock() - } - select { - case <-awake: - t.Fatal("goroutine not asleep") - default: - } - m.Lock() - c.Broadcast() - m.Unlock() - seen := make([]bool, n) - for i := 0; i < n; i++ { - g := <-awake - if seen[g] { - t.Fatal("goroutine woke up twice") - } - seen[g] = true - } - } - select { - case <-running: - t.Fatal("goroutine did not exit") - default: - } - c.Broadcast() -} diff --git a/src/pkg/sync/mutex.go b/src/pkg/sync/mutex.go deleted file mode 100644 index 2d46c8994..000000000 --- a/src/pkg/sync/mutex.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package sync provides basic synchronization primitives such as mutual -// exclusion locks. Other than the Once and WaitGroup types, most are intended -// for use by low-level library routines. Higher-level synchronization is -// better done via channels and communication. -package sync - -import ( - "runtime" - "sync/atomic" -) - -// A Mutex is a mutual exclusion lock. -// Mutexes can be created as part of other structures; -// the zero value for a Mutex is an unlocked mutex. -type Mutex struct { - state int32 - sema uint32 -} - -// A Locker represents an object that can be locked and unlocked. -type Locker interface { - Lock() - Unlock() -} - -const ( - mutexLocked = 1 << iota // mutex is locked - mutexWoken - mutexWaiterShift = iota -) - -// Lock locks m. -// If the lock is already in use, the calling goroutine -// blocks until the mutex is available. -func (m *Mutex) Lock() { - // Fast path: grab unlocked mutex. - if atomic.CompareAndSwapInt32(&m.state, 0, mutexLocked) { - return - } - - awoke := false - for { - old := m.state - new := old | mutexLocked - if old&mutexLocked != 0 { - new = old + 1<<mutexWaiterShift - } - if awoke { - // The goroutine has been woken from sleep, - // so we need to reset the flag in either case. - new &^= mutexWoken - } - if atomic.CompareAndSwapInt32(&m.state, old, new) { - if old&mutexLocked == 0 { - break - } - runtime.Semacquire(&m.sema) - awoke = true - } - } -} - -// Unlock unlocks m. -// It is a run-time error if m is not locked on entry to Unlock. -// -// A locked Mutex is not associated with a particular goroutine. -// It is allowed for one goroutine to lock a Mutex and then -// arrange for another goroutine to unlock it. -func (m *Mutex) Unlock() { - // Fast path: drop lock bit. - new := atomic.AddInt32(&m.state, -mutexLocked) - if (new+mutexLocked)&mutexLocked == 0 { - panic("sync: unlock of unlocked mutex") - } - - old := new - for { - // If there are no waiters or a goroutine has already - // been woken or grabbed the lock, no need to wake anyone. - if old>>mutexWaiterShift == 0 || old&(mutexLocked|mutexWoken) != 0 { - return - } - // Grab the right to wake someone. - new = (old - 1<<mutexWaiterShift) | mutexWoken - if atomic.CompareAndSwapInt32(&m.state, old, new) { - runtime.Semrelease(&m.sema) - return - } - old = m.state - } -} diff --git a/src/pkg/sync/mutex_test.go b/src/pkg/sync/mutex_test.go deleted file mode 100644 index d5ada8567..000000000 --- a/src/pkg/sync/mutex_test.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// GOMAXPROCS=10 gotest - -package sync_test - -import ( - "runtime" - . "sync" - "sync/atomic" - "testing" -) - -func HammerSemaphore(s *uint32, loops int, cdone chan bool) { - for i := 0; i < loops; i++ { - runtime.Semacquire(s) - runtime.Semrelease(s) - } - cdone <- true -} - -func TestSemaphore(t *testing.T) { - s := new(uint32) - *s = 1 - c := make(chan bool) - for i := 0; i < 10; i++ { - go HammerSemaphore(s, 1000, c) - } - for i := 0; i < 10; i++ { - <-c - } -} - -func BenchmarkUncontendedSemaphore(b *testing.B) { - s := new(uint32) - *s = 1 - HammerSemaphore(s, b.N, make(chan bool, 2)) -} - -func BenchmarkContendedSemaphore(b *testing.B) { - b.StopTimer() - s := new(uint32) - *s = 1 - c := make(chan bool) - defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2)) - b.StartTimer() - - go HammerSemaphore(s, b.N/2, c) - go HammerSemaphore(s, b.N/2, c) - <-c - <-c -} - - -func HammerMutex(m *Mutex, loops int, cdone chan bool) { - for i := 0; i < loops; i++ { - m.Lock() - m.Unlock() - } - cdone <- true -} - -func TestMutex(t *testing.T) { - m := new(Mutex) - c := make(chan bool) - for i := 0; i < 10; i++ { - go HammerMutex(m, 1000, c) - } - for i := 0; i < 10; i++ { - <-c - } -} - -func TestMutexPanic(t *testing.T) { - defer func() { - if recover() == nil { - t.Fatalf("unlock of unlocked mutex did not panic") - } - }() - - var mu Mutex - mu.Lock() - mu.Unlock() - mu.Unlock() -} - -func BenchmarkMutexUncontended(b *testing.B) { - type PaddedMutex struct { - Mutex - pad [128]uint8 - } - const CallsPerSched = 1000 - procs := runtime.GOMAXPROCS(-1) - N := int32(b.N / CallsPerSched) - c := make(chan bool, procs) - for p := 0; p < procs; p++ { - go func() { - var mu PaddedMutex - for atomic.AddInt32(&N, -1) >= 0 { - runtime.Gosched() - for g := 0; g < CallsPerSched; g++ { - mu.Lock() - mu.Unlock() - } - } - c <- true - }() - } - for p := 0; p < procs; p++ { - <-c - } -} - -func benchmarkMutex(b *testing.B, slack, work bool) { - const ( - CallsPerSched = 1000 - LocalWork = 100 - GoroutineSlack = 10 - ) - procs := runtime.GOMAXPROCS(-1) - if slack { - procs *= GoroutineSlack - } - N := int32(b.N / CallsPerSched) - c := make(chan bool, procs) - var mu Mutex - for p := 0; p < procs; p++ { - go func() { - foo := 0 - for atomic.AddInt32(&N, -1) >= 0 { - runtime.Gosched() - for g := 0; g < CallsPerSched; g++ { - mu.Lock() - mu.Unlock() - if work { - for i := 0; i < LocalWork; i++ { - foo *= 2 - foo /= 2 - } - } - } - } - c <- foo == 42 - }() - } - for p := 0; p < procs; p++ { - <-c - } -} - -func BenchmarkMutex(b *testing.B) { - benchmarkMutex(b, false, false) -} - -func BenchmarkMutexSlack(b *testing.B) { - benchmarkMutex(b, true, false) -} - -func BenchmarkMutexWork(b *testing.B) { - benchmarkMutex(b, false, true) -} - -func BenchmarkMutexWorkSlack(b *testing.B) { - benchmarkMutex(b, true, true) -} diff --git a/src/pkg/sync/once.go b/src/pkg/sync/once.go deleted file mode 100644 index 447b71dcb..000000000 --- a/src/pkg/sync/once.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sync - -import ( - "sync/atomic" -) - -// Once is an object that will perform exactly one action. -type Once struct { - m Mutex - done int32 -} - -// Do calls the function f if and only if the method is being called for the -// first time with this receiver. In other words, given -// var once Once -// if once.Do(f) is called multiple times, only the first call will invoke f, -// even if f has a different value in each invocation. A new instance of -// Once is required for each function to execute. -// -// Do is intended for initialization that must be run exactly once. Since f -// is niladic, it may be necessary to use a function literal to capture the -// arguments to a function to be invoked by Do: -// config.once.Do(func() { config.init(filename) }) -// -// Because no call to Do returns until the one call to f returns, if f causes -// Do to be called, it will deadlock. -// -func (o *Once) Do(f func()) { - if atomic.AddInt32(&o.done, 0) == 1 { - return - } - // Slow-path. - o.m.Lock() - defer o.m.Unlock() - if o.done == 0 { - f() - atomic.CompareAndSwapInt32(&o.done, 0, 1) - } -} diff --git a/src/pkg/sync/once_test.go b/src/pkg/sync/once_test.go deleted file mode 100644 index 157a3667a..000000000 --- a/src/pkg/sync/once_test.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sync_test - -import ( - . "sync" - "sync/atomic" - "runtime" - "testing" -) - -type one int - -func (o *one) Increment() { - *o++ -} - -func run(once *Once, o *one, c chan bool) { - once.Do(func() { o.Increment() }) - c <- true -} - -func TestOnce(t *testing.T) { - o := new(one) - once := new(Once) - c := make(chan bool) - const N = 10 - for i := 0; i < N; i++ { - go run(once, o, c) - } - for i := 0; i < N; i++ { - <-c - } - if *o != 1 { - t.Errorf("once failed: %d is not 1", *o) - } -} - -func BenchmarkOnce(b *testing.B) { - const CallsPerSched = 1000 - procs := runtime.GOMAXPROCS(-1) - N := int32(b.N / CallsPerSched) - var once Once - f := func() {} - c := make(chan bool, procs) - for p := 0; p < procs; p++ { - go func() { - for atomic.AddInt32(&N, -1) >= 0 { - runtime.Gosched() - for g := 0; g < CallsPerSched; g++ { - once.Do(f) - } - } - c <- true - }() - } - for p := 0; p < procs; p++ { - <-c - } -} diff --git a/src/pkg/sync/rwmutex.go b/src/pkg/sync/rwmutex.go deleted file mode 100644 index 9248b4b03..000000000 --- a/src/pkg/sync/rwmutex.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sync - -import "sync/atomic" - -// An RWMutex is a reader/writer mutual exclusion lock. -// The lock can be held by an arbitrary number of readers -// or a single writer. -// RWMutexes can be created as part of other -// structures; the zero value for a RWMutex is -// an unlocked mutex. -// -// Writers take priority over Readers: no new RLocks -// are granted while a blocked Lock call is waiting. -type RWMutex struct { - w Mutex // held if there are pending readers or writers - r Mutex // held if the w is being rd - readerCount int32 // number of pending readers -} - -// RLock locks rw for reading. -// If the lock is already locked for writing or there is a writer already waiting -// to release the lock, RLock blocks until the writer has released the lock. -func (rw *RWMutex) RLock() { - // Use rw.r.Lock() to block granting the RLock if a goroutine - // is waiting for its Lock. This is the prevent starvation of W in - // this situation: - // A: rw.RLock() // granted - // W: rw.Lock() // waiting for rw.w().Lock() - // B: rw.RLock() // granted - // C: rw.RLock() // granted - // B: rw.RUnlock() - // ... (new readers come and go indefinitely, W is starving) - rw.r.Lock() - if atomic.AddInt32(&rw.readerCount, 1) == 1 { - // The first reader locks rw.w, so writers will be blocked - // while the readers have the RLock. - rw.w.Lock() - } - rw.r.Unlock() -} - -// RUnlock undoes a single RLock call; -// it does not affect other simultaneous readers. -// It is a run-time error if rw is not locked for reading -// on entry to RUnlock. -func (rw *RWMutex) RUnlock() { - if atomic.AddInt32(&rw.readerCount, -1) == 0 { - // last reader finished, enable writers - rw.w.Unlock() - } -} - -// Lock locks rw for writing. -// If the lock is already locked for reading or writing, -// Lock blocks until the lock is available. -// To ensure that the lock eventually becomes available, -// a blocked Lock call excludes new readers from acquiring -// the lock. -func (rw *RWMutex) Lock() { - rw.r.Lock() - rw.w.Lock() - rw.r.Unlock() -} - -// Unlock unlocks rw for writing. It is a run-time error if rw is -// not locked for writing on entry to Unlock. -// -// As with Mutexes, a locked RWMutex is not associated with a particular -// goroutine. One goroutine may RLock (Lock) an RWMutex and then -// arrange for another goroutine to RUnlock (Unlock) it. -func (rw *RWMutex) Unlock() { rw.w.Unlock() } - -// RLocker returns a Locker interface that implements -// the Lock and Unlock methods by calling rw.RLock and rw.RUnlock. -func (rw *RWMutex) RLocker() Locker { - return (*rlocker)(rw) -} - -type rlocker RWMutex - -func (r *rlocker) Lock() { (*RWMutex)(r).RLock() } -func (r *rlocker) Unlock() { (*RWMutex)(r).RUnlock() } diff --git a/src/pkg/sync/rwmutex_test.go b/src/pkg/sync/rwmutex_test.go deleted file mode 100644 index 0480a6601..000000000 --- a/src/pkg/sync/rwmutex_test.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// GOMAXPROCS=10 gotest - -package sync_test - -import ( - "fmt" - "runtime" - . "sync" - "sync/atomic" - "testing" -) - -func parallelReader(m *RWMutex, clocked, cunlock, cdone chan bool) { - m.RLock() - clocked <- true - <-cunlock - m.RUnlock() - cdone <- true -} - -func doTestParallelReaders(numReaders, gomaxprocs int) { - runtime.GOMAXPROCS(gomaxprocs) - var m RWMutex - clocked := make(chan bool) - cunlock := make(chan bool) - cdone := make(chan bool) - for i := 0; i < numReaders; i++ { - go parallelReader(&m, clocked, cunlock, cdone) - } - // Wait for all parallel RLock()s to succeed. - for i := 0; i < numReaders; i++ { - <-clocked - } - for i := 0; i < numReaders; i++ { - cunlock <- true - } - // Wait for the goroutines to finish. - for i := 0; i < numReaders; i++ { - <-cdone - } -} - -func TestParallelReaders(t *testing.T) { - defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1)) - doTestParallelReaders(1, 4) - doTestParallelReaders(3, 4) - doTestParallelReaders(4, 2) -} - -func reader(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) { - for i := 0; i < num_iterations; i++ { - rwm.RLock() - n := atomic.AddInt32(activity, 1) - if n < 1 || n >= 10000 { - panic(fmt.Sprintf("wlock(%d)\n", n)) - } - for i := 0; i < 100; i++ { - } - atomic.AddInt32(activity, -1) - rwm.RUnlock() - } - cdone <- true -} - -func writer(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) { - for i := 0; i < num_iterations; i++ { - rwm.Lock() - n := atomic.AddInt32(activity, 10000) - if n != 10000 { - panic(fmt.Sprintf("wlock(%d)\n", n)) - } - for i := 0; i < 100; i++ { - } - atomic.AddInt32(activity, -10000) - rwm.Unlock() - } - cdone <- true -} - -func HammerRWMutex(gomaxprocs, numReaders, num_iterations int) { - runtime.GOMAXPROCS(gomaxprocs) - // Number of active readers + 10000 * number of active writers. - var activity int32 - var rwm RWMutex - cdone := make(chan bool) - go writer(&rwm, num_iterations, &activity, cdone) - var i int - for i = 0; i < numReaders/2; i++ { - go reader(&rwm, num_iterations, &activity, cdone) - } - go writer(&rwm, num_iterations, &activity, cdone) - for ; i < numReaders; i++ { - go reader(&rwm, num_iterations, &activity, cdone) - } - // Wait for the 2 writers and all readers to finish. - for i := 0; i < 2+numReaders; i++ { - <-cdone - } -} - -func TestRWMutex(t *testing.T) { - defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1)) - n := 1000 - if testing.Short() { - n = 5 - } - HammerRWMutex(1, 1, n) - HammerRWMutex(1, 3, n) - HammerRWMutex(1, 10, n) - HammerRWMutex(4, 1, n) - HammerRWMutex(4, 3, n) - HammerRWMutex(4, 10, n) - HammerRWMutex(10, 1, n) - HammerRWMutex(10, 3, n) - HammerRWMutex(10, 10, n) - HammerRWMutex(10, 5, n) -} - -func TestRLocker(t *testing.T) { - var wl RWMutex - var rl Locker - wlocked := make(chan bool, 1) - rlocked := make(chan bool, 1) - rl = wl.RLocker() - n := 10 - go func() { - for i := 0; i < n; i++ { - rl.Lock() - rl.Lock() - rlocked <- true - wl.Lock() - wlocked <- true - } - }() - for i := 0; i < n; i++ { - <-rlocked - rl.Unlock() - select { - case <-wlocked: - t.Fatal("RLocker() didn't read-lock it") - default: - } - rl.Unlock() - <-wlocked - select { - case <-rlocked: - t.Fatal("RLocker() didn't respect the write lock") - default: - } - wl.Unlock() - } -} diff --git a/src/pkg/sync/waitgroup.go b/src/pkg/sync/waitgroup.go deleted file mode 100644 index 05478c630..000000000 --- a/src/pkg/sync/waitgroup.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sync - -import "runtime" - -// A WaitGroup waits for a collection of goroutines to finish. -// The main goroutine calls Add to set the number of -// goroutines to wait for. Then each of the goroutines -// runs and calls Done when finished. At the same time, -// Wait can be used to block until all goroutines have finished. -// -// For example: -// -// for i := 0; i < n; i++ { -// if !condition(i) { -// continue -// } -// wg.Add(1) -// go func() { -// // Do something. -// wg.Done() -// }() -// } -// wg.Wait() -// -type WaitGroup struct { - m Mutex - counter int - waiters int - sema *uint32 -} - -// WaitGroup creates a new semaphore each time the old semaphore -// is released. This is to avoid the following race: -// -// G1: Add(1) -// G1: go G2() -// G1: Wait() // Context switch after Unlock() and before Semacquire(). -// G2: Done() // Release semaphore: sema == 1, waiters == 0. G1 doesn't run yet. -// G3: Wait() // Finds counter == 0, waiters == 0, doesn't block. -// G3: Add(1) // Makes counter == 1, waiters == 0. -// G3: go G4() -// G3: Wait() // G1 still hasn't run, G3 finds sema == 1, unblocked! Bug. - -// Add adds delta, which may be negative, to the WaitGroup counter. -// If the counter becomes zero, all goroutines blocked on Wait() are released. -func (wg *WaitGroup) Add(delta int) { - wg.m.Lock() - if delta < -wg.counter { - wg.m.Unlock() - panic("sync: negative WaitGroup count") - } - wg.counter += delta - if wg.counter == 0 && wg.waiters > 0 { - for i := 0; i < wg.waiters; i++ { - runtime.Semrelease(wg.sema) - } - wg.waiters = 0 - wg.sema = nil - } - wg.m.Unlock() -} - -// Done decrements the WaitGroup counter. -func (wg *WaitGroup) Done() { - wg.Add(-1) -} - -// Wait blocks until the WaitGroup counter is zero. -func (wg *WaitGroup) Wait() { - wg.m.Lock() - if wg.counter == 0 { - wg.m.Unlock() - return - } - wg.waiters++ - if wg.sema == nil { - wg.sema = new(uint32) - } - s := wg.sema - wg.m.Unlock() - runtime.Semacquire(s) -} diff --git a/src/pkg/sync/waitgroup_test.go b/src/pkg/sync/waitgroup_test.go deleted file mode 100644 index fe35732e7..000000000 --- a/src/pkg/sync/waitgroup_test.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sync_test - -import ( - . "sync" - "testing" -) - -func testWaitGroup(t *testing.T, wg1 *WaitGroup, wg2 *WaitGroup) { - n := 16 - wg1.Add(n) - wg2.Add(n) - exited := make(chan bool, n) - for i := 0; i != n; i++ { - go func(i int) { - wg1.Done() - wg2.Wait() - exited <- true - }(i) - } - wg1.Wait() - for i := 0; i != n; i++ { - select { - case <-exited: - t.Fatal("WaitGroup released group too soon") - default: - } - wg2.Done() - } - for i := 0; i != n; i++ { - <-exited // Will block if barrier fails to unlock someone. - } -} - -func TestWaitGroup(t *testing.T) { - wg1 := &WaitGroup{} - wg2 := &WaitGroup{} - - // Run the same test a few times to ensure barrier is in a proper state. - for i := 0; i != 8; i++ { - testWaitGroup(t, wg1, wg2) - } -} - -func TestWaitGroupMisuse(t *testing.T) { - defer func() { - err := recover() - if err != "sync: negative WaitGroup count" { - t.Fatalf("Unexpected panic: %#v", err) - } - }() - wg := &WaitGroup{} - wg.Add(1) - wg.Done() - wg.Done() - t.Fatal("Should panic") -} |