diff options
Diffstat (limited to 'src/pkg/sync')
-rw-r--r-- | src/pkg/sync/mutex.go | 14 | ||||
-rw-r--r-- | src/pkg/sync/mutex_test.go | 76 | ||||
-rw-r--r-- | src/pkg/sync/rwmutex.go | 18 | ||||
-rw-r--r-- | src/pkg/sync/rwmutex_test.go | 88 |
4 files changed, 98 insertions, 98 deletions
diff --git a/src/pkg/sync/mutex.go b/src/pkg/sync/mutex.go index 9ba628824..b170370bc 100644 --- a/src/pkg/sync/mutex.go +++ b/src/pkg/sync/mutex.go @@ -16,8 +16,8 @@ func cas(val *uint32, old, new uint32) bool // Mutexes can be created as part of other structures; // the zero value for a Mutex is an unlocked mutex. type Mutex struct { - key uint32; - sema uint32; + key uint32 + sema uint32 } // Add delta to *val, and return the new *val in a thread-safe way. If multiple @@ -25,13 +25,13 @@ type Mutex struct { // serialized, and all the deltas will be added in an undefined order. func xadd(val *uint32, delta int32) (new uint32) { for { - v := *val; - nv := v + uint32(delta); + v := *val + nv := v + uint32(delta) if cas(val, v, nv) { return nv } } - panic("unreached"); + panic("unreached") } // Lock locks m. @@ -42,7 +42,7 @@ func (m *Mutex) Lock() { // changed from 0 to 1; we hold lock return } - runtime.Semacquire(&m.sema); + runtime.Semacquire(&m.sema) } // Unlock unlocks m. @@ -56,5 +56,5 @@ func (m *Mutex) Unlock() { // changed from 1 to 0; no contention return } - runtime.Semrelease(&m.sema); + runtime.Semrelease(&m.sema) } diff --git a/src/pkg/sync/mutex_test.go b/src/pkg/sync/mutex_test.go index 05fef786a..d0e048ed7 100644 --- a/src/pkg/sync/mutex_test.go +++ b/src/pkg/sync/mutex_test.go @@ -7,23 +7,23 @@ package sync_test import ( - "runtime"; - . "sync"; - "testing"; + "runtime" + . "sync" + "testing" ) func HammerSemaphore(s *uint32, loops int, cdone chan bool) { for i := 0; i < loops; i++ { - runtime.Semacquire(s); - runtime.Semrelease(s); + runtime.Semacquire(s) + runtime.Semrelease(s) } - cdone <- true; + cdone <- true } func TestSemaphore(t *testing.T) { - s := new(uint32); - *s = 1; - c := make(chan bool); + s := new(uint32) + *s = 1 + c := make(chan bool) for i := 0; i < 10; i++ { go HammerSemaphore(s, 1000, c) } @@ -33,37 +33,37 @@ func TestSemaphore(t *testing.T) { } func BenchmarkUncontendedSemaphore(b *testing.B) { - s := new(uint32); - *s = 1; - HammerSemaphore(s, b.N, make(chan bool, 2)); + s := new(uint32) + *s = 1 + HammerSemaphore(s, b.N, make(chan bool, 2)) } func BenchmarkContendedSemaphore(b *testing.B) { - b.StopTimer(); - s := new(uint32); - *s = 1; - c := make(chan bool); - runtime.GOMAXPROCS(2); - b.StartTimer(); + b.StopTimer() + s := new(uint32) + *s = 1 + c := make(chan bool) + runtime.GOMAXPROCS(2) + b.StartTimer() - go HammerSemaphore(s, b.N/2, c); - go HammerSemaphore(s, b.N/2, c); - <-c; - <-c; + go HammerSemaphore(s, b.N/2, c) + go HammerSemaphore(s, b.N/2, c) + <-c + <-c } func HammerMutex(m *Mutex, loops int, cdone chan bool) { for i := 0; i < loops; i++ { - m.Lock(); - m.Unlock(); + m.Lock() + m.Unlock() } - cdone <- true; + cdone <- true } func TestMutex(t *testing.T) { - m := new(Mutex); - c := make(chan bool); + m := new(Mutex) + c := make(chan bool) for i := 0; i < 10; i++ { go HammerMutex(m, 1000, c) } @@ -73,19 +73,19 @@ func TestMutex(t *testing.T) { } func BenchmarkUncontendedMutex(b *testing.B) { - m := new(Mutex); - HammerMutex(m, b.N, make(chan bool, 2)); + m := new(Mutex) + HammerMutex(m, b.N, make(chan bool, 2)) } func BenchmarkContendedMutex(b *testing.B) { - b.StopTimer(); - m := new(Mutex); - c := make(chan bool); - runtime.GOMAXPROCS(2); - b.StartTimer(); + b.StopTimer() + m := new(Mutex) + c := make(chan bool) + runtime.GOMAXPROCS(2) + b.StartTimer() - go HammerMutex(m, b.N/2, c); - go HammerMutex(m, b.N/2, c); - <-c; - <-c; + go HammerMutex(m, b.N/2, c) + go HammerMutex(m, b.N/2, c) + <-c + <-c } diff --git a/src/pkg/sync/rwmutex.go b/src/pkg/sync/rwmutex.go index b5e2b55c0..0058cf252 100644 --- a/src/pkg/sync/rwmutex.go +++ b/src/pkg/sync/rwmutex.go @@ -14,9 +14,9 @@ package sync // Writers take priority over Readers: no new RLocks // are granted while a blocked Lock call is waiting. type RWMutex struct { - w Mutex; // held if there are pending readers or writers - r Mutex; // held if the w is being rd - readerCount uint32; // number of pending readers + w Mutex // held if there are pending readers or writers + r Mutex // held if the w is being rd + readerCount uint32 // number of pending readers } // RLock locks rw for reading. @@ -32,13 +32,13 @@ func (rw *RWMutex) RLock() { // C: rw.RLock() // granted // B: rw.RUnlock() // ... (new readers come and go indefinitely, W is starving) - rw.r.Lock(); + rw.r.Lock() if xadd(&rw.readerCount, 1) == 1 { // The first reader locks rw.w, so writers will be blocked // while the readers have the RLock. rw.w.Lock() } - rw.r.Unlock(); + rw.r.Unlock() } // RUnlock undoes a single RLock call; @@ -59,9 +59,9 @@ func (rw *RWMutex) RUnlock() { // a blocked Lock call excludes new readers from acquiring // the lock. func (rw *RWMutex) Lock() { - rw.r.Lock(); - rw.w.Lock(); - rw.r.Unlock(); + rw.r.Lock() + rw.w.Lock() + rw.r.Unlock() } // Unlock unlocks rw for writing. @@ -72,4 +72,4 @@ func (rw *RWMutex) Lock() { // a locked RWMutex is not associated with a particular goroutine. // It is allowed for one goroutine to RLock (Lock) an RWMutex and then // arrange for another goroutine to RUnlock (Unlock) it. -func (rw *RWMutex) Unlock() { rw.w.Unlock() } +func (rw *RWMutex) Unlock() { rw.w.Unlock() } diff --git a/src/pkg/sync/rwmutex_test.go b/src/pkg/sync/rwmutex_test.go index ad3560800..111bca1e3 100644 --- a/src/pkg/sync/rwmutex_test.go +++ b/src/pkg/sync/rwmutex_test.go @@ -7,26 +7,26 @@ package sync_test import ( - "fmt"; - "runtime"; - . "sync"; - "testing"; + "fmt" + "runtime" + . "sync" + "testing" ) func parallelReader(m *RWMutex, clocked, cunlock, cdone chan bool) { - m.RLock(); - clocked <- true; - <-cunlock; - m.RUnlock(); - cdone <- true; + m.RLock() + clocked <- true + <-cunlock + m.RUnlock() + cdone <- true } func doTestParallelReaders(numReaders, gomaxprocs int) { - runtime.GOMAXPROCS(gomaxprocs); - var m RWMutex; - clocked := make(chan bool); - cunlock := make(chan bool); - cdone := make(chan bool); + runtime.GOMAXPROCS(gomaxprocs) + var m RWMutex + clocked := make(chan bool) + cunlock := make(chan bool) + cdone := make(chan bool) for i := 0; i < numReaders; i++ { go parallelReader(&m, clocked, cunlock, cdone) } @@ -44,53 +44,53 @@ func doTestParallelReaders(numReaders, gomaxprocs int) { } func TestParallelReaders(t *testing.T) { - doTestParallelReaders(1, 4); - doTestParallelReaders(3, 4); - doTestParallelReaders(4, 2); + doTestParallelReaders(1, 4) + doTestParallelReaders(3, 4) + doTestParallelReaders(4, 2) } func reader(rwm *RWMutex, num_iterations int, activity *uint32, cdone chan bool) { for i := 0; i < num_iterations; i++ { - rwm.RLock(); - n := Xadd(activity, 1); + rwm.RLock() + n := Xadd(activity, 1) if n < 1 || n >= 10000 { panic(fmt.Sprintf("wlock(%d)\n", n)) } for i := 0; i < 100; i++ { } - Xadd(activity, -1); - rwm.RUnlock(); + Xadd(activity, -1) + rwm.RUnlock() } - cdone <- true; + cdone <- true } func writer(rwm *RWMutex, num_iterations int, activity *uint32, cdone chan bool) { for i := 0; i < num_iterations; i++ { - rwm.Lock(); - n := Xadd(activity, 10000); + rwm.Lock() + n := Xadd(activity, 10000) if n != 10000 { panic(fmt.Sprintf("wlock(%d)\n", n)) } for i := 0; i < 100; i++ { } - Xadd(activity, -10000); - rwm.Unlock(); + Xadd(activity, -10000) + rwm.Unlock() } - cdone <- true; + cdone <- true } func HammerRWMutex(gomaxprocs, numReaders, num_iterations int) { - runtime.GOMAXPROCS(gomaxprocs); + runtime.GOMAXPROCS(gomaxprocs) // Number of active readers + 10000 * number of active writers. - var activity uint32; - var rwm RWMutex; - cdone := make(chan bool); - go writer(&rwm, num_iterations, &activity, cdone); - var i int; + var activity uint32 + var rwm RWMutex + cdone := make(chan bool) + go writer(&rwm, num_iterations, &activity, cdone) + var i int for i = 0; i < numReaders/2; i++ { go reader(&rwm, num_iterations, &activity, cdone) } - go writer(&rwm, num_iterations, &activity, cdone); + go writer(&rwm, num_iterations, &activity, cdone) for ; i < numReaders; i++ { go reader(&rwm, num_iterations, &activity, cdone) } @@ -101,14 +101,14 @@ func HammerRWMutex(gomaxprocs, numReaders, num_iterations int) { } func TestRWMutex(t *testing.T) { - HammerRWMutex(1, 1, 1000); - HammerRWMutex(1, 3, 1000); - HammerRWMutex(1, 10, 1000); - HammerRWMutex(4, 1, 1000); - HammerRWMutex(4, 3, 1000); - HammerRWMutex(4, 10, 1000); - HammerRWMutex(10, 1, 1000); - HammerRWMutex(10, 3, 1000); - HammerRWMutex(10, 10, 1000); - HammerRWMutex(10, 5, 10000); + HammerRWMutex(1, 1, 1000) + HammerRWMutex(1, 3, 1000) + HammerRWMutex(1, 10, 1000) + HammerRWMutex(4, 1, 1000) + HammerRWMutex(4, 3, 1000) + HammerRWMutex(4, 10, 1000) + HammerRWMutex(10, 1, 1000) + HammerRWMutex(10, 3, 1000) + HammerRWMutex(10, 10, 1000) + HammerRWMutex(10, 5, 10000) } |