diff options
author | Michael Stapelberg <stapelberg@debian.org> | 2014-06-19 09:23:02 +0200 |
---|---|---|
committer | Michael Stapelberg <stapelberg@debian.org> | 2014-06-19 09:23:02 +0200 |
commit | 8fcc691d6fa80c9ddf38bf0d34b803bab0e421d5 (patch) | |
tree | ba71646a10b518372d110532d86fcf0b98edc14f /src/pkg/sync/pool.go | |
parent | 3bb719bbf3cdb97b3901f3baaa2da9d02a5c3cdb (diff) | |
parent | 8a39ee361feb9bf46d728ff1ba4f07ca1d9610b1 (diff) | |
download | golang-8fcc691d6fa80c9ddf38bf0d34b803bab0e421d5.tar.gz |
Merge tag 'upstream/1.3' into debian-sid
Upstream version 1.3
Diffstat (limited to 'src/pkg/sync/pool.go')
-rw-r--r-- | src/pkg/sync/pool.go | 223 |
1 files changed, 223 insertions, 0 deletions
diff --git a/src/pkg/sync/pool.go b/src/pkg/sync/pool.go new file mode 100644 index 000000000..1f08707cd --- /dev/null +++ b/src/pkg/sync/pool.go @@ -0,0 +1,223 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sync + +import ( + "runtime" + "sync/atomic" + "unsafe" +) + +// A Pool is a set of temporary objects that may be individually saved and +// retrieved. +// +// Any item stored in the Pool may be removed automatically at any time without +// notification. If the Pool holds the only reference when this happens, the +// item might be deallocated. +// +// A Pool is safe for use by multiple goroutines simultaneously. +// +// Pool's purpose is to cache allocated but unused items for later reuse, +// relieving pressure on the garbage collector. That is, it makes it easy to +// build efficient, thread-safe free lists. However, it is not suitable for all +// free lists. +// +// An appropriate use of a Pool is to manage a group of temporary items +// silently shared among and potentially reused by concurrent independent +// clients of a package. Pool provides a way to amortize allocation overhead +// across many clients. +// +// An example of good use of a Pool is in the fmt package, which maintains a +// dynamically-sized store of temporary output buffers. The store scales under +// load (when many goroutines are actively printing) and shrinks when +// quiescent. +// +// On the other hand, a free list maintained as part of a short-lived object is +// not a suitable use for a Pool, since the overhead does not amortize well in +// that scenario. It is more efficient to have such objects implement their own +// free list. +// +type Pool struct { + local unsafe.Pointer // local fixed-size per-P pool, actual type is [P]poolLocal + localSize uintptr // size of the local array + + // New optionally specifies a function to generate + // a value when Get would otherwise return nil. + // It may not be changed concurrently with calls to Get. + New func() interface{} +} + +// Local per-P Pool appendix. +type poolLocal struct { + private interface{} // Can be used only by the respective P. + shared []interface{} // Can be used by any P. + Mutex // Protects shared. + pad [128]byte // Prevents false sharing. +} + +// Put adds x to the pool. +func (p *Pool) Put(x interface{}) { + if raceenabled { + // Under race detector the Pool degenerates into no-op. + // It's conforming, simple and does not introduce excessive + // happens-before edges between unrelated goroutines. + return + } + if x == nil { + return + } + l := p.pin() + if l.private == nil { + l.private = x + x = nil + } + runtime_procUnpin() + if x == nil { + return + } + l.Lock() + l.shared = append(l.shared, x) + l.Unlock() +} + +// Get selects an arbitrary item from the Pool, removes it from the +// Pool, and returns it to the caller. +// Get may choose to ignore the pool and treat it as empty. +// Callers should not assume any relation between values passed to Put and +// the values returned by Get. +// +// If Get would otherwise return nil and p.New is non-nil, Get returns +// the result of calling p.New. +func (p *Pool) Get() interface{} { + if raceenabled { + if p.New != nil { + return p.New() + } + return nil + } + l := p.pin() + x := l.private + l.private = nil + runtime_procUnpin() + if x != nil { + return x + } + l.Lock() + last := len(l.shared) - 1 + if last >= 0 { + x = l.shared[last] + l.shared = l.shared[:last] + } + l.Unlock() + if x != nil { + return x + } + return p.getSlow() +} + +func (p *Pool) getSlow() (x interface{}) { + // See the comment in pin regarding ordering of the loads. + size := atomic.LoadUintptr(&p.localSize) // load-acquire + local := p.local // load-consume + // Try to steal one element from other procs. + pid := runtime_procPin() + runtime_procUnpin() + for i := 0; i < int(size); i++ { + l := indexLocal(local, (pid+i+1)%int(size)) + l.Lock() + last := len(l.shared) - 1 + if last >= 0 { + x = l.shared[last] + l.shared = l.shared[:last] + l.Unlock() + break + } + l.Unlock() + } + + if x == nil && p.New != nil { + x = p.New() + } + return x +} + +// pin pins the current goroutine to P, disables preemption and returns poolLocal pool for the P. +// Caller must call runtime_procUnpin() when done with the pool. +func (p *Pool) pin() *poolLocal { + pid := runtime_procPin() + // In pinSlow we store to localSize and then to local, here we load in opposite order. + // Since we've disabled preemption, GC can not happen in between. + // Thus here we must observe local at least as large localSize. + // We can observe a newer/larger local, it is fine (we must observe its zero-initialized-ness). + s := atomic.LoadUintptr(&p.localSize) // load-acquire + l := p.local // load-consume + if uintptr(pid) < s { + return indexLocal(l, pid) + } + return p.pinSlow() +} + +func (p *Pool) pinSlow() *poolLocal { + // Retry under the mutex. + // Can not lock the mutex while pinned. + runtime_procUnpin() + allPoolsMu.Lock() + defer allPoolsMu.Unlock() + pid := runtime_procPin() + // poolCleanup won't be called while we are pinned. + s := p.localSize + l := p.local + if uintptr(pid) < s { + return indexLocal(l, pid) + } + if p.local == nil { + allPools = append(allPools, p) + } + // If GOMAXPROCS changes between GCs, we re-allocate the array and lose the old one. + size := runtime.GOMAXPROCS(0) + local := make([]poolLocal, size) + atomic.StorePointer((*unsafe.Pointer)(&p.local), unsafe.Pointer(&local[0])) // store-release + atomic.StoreUintptr(&p.localSize, uintptr(size)) // store-release + return &local[pid] +} + +func poolCleanup() { + // This function is called with the world stopped, at the beginning of a garbage collection. + // It must not allocate and probably should not call any runtime functions. + // Defensively zero out everything, 2 reasons: + // 1. To prevent false retention of whole Pools. + // 2. If GC happens while a goroutine works with l.shared in Put/Get, + // it will retain whole Pool. So next cycle memory consumption would be doubled. + for i, p := range allPools { + allPools[i] = nil + for i := 0; i < int(p.localSize); i++ { + l := indexLocal(p.local, i) + l.private = nil + for j := range l.shared { + l.shared[j] = nil + } + l.shared = nil + } + } + allPools = []*Pool{} +} + +var ( + allPoolsMu Mutex + allPools []*Pool +) + +func init() { + runtime_registerPoolCleanup(poolCleanup) +} + +func indexLocal(l unsafe.Pointer, i int) *poolLocal { + return &(*[1000000]poolLocal)(l)[i] +} + +// Implemented in runtime. +func runtime_registerPoolCleanup(cleanup func()) +func runtime_procPin() int +func runtime_procUnpin() |