summaryrefslogtreecommitdiff
path: root/src/pkg/runtime/malloc.goc
diff options
context:
space:
mode:
Diffstat (limited to 'src/pkg/runtime/malloc.goc')
-rw-r--r--src/pkg/runtime/malloc.goc111
1 files changed, 49 insertions, 62 deletions
diff --git a/src/pkg/runtime/malloc.goc b/src/pkg/runtime/malloc.goc
index 49ab24df8..b9fe36db6 100644
--- a/src/pkg/runtime/malloc.goc
+++ b/src/pkg/runtime/malloc.goc
@@ -18,21 +18,6 @@ extern MStats mstats; // defined in extern.go
extern volatile int32 runtime·MemProfileRate;
-// Same algorithm from chan.c, but a different
-// instance of the static uint32 x.
-// Not protected by a lock - let the threads use
-// the same random number if they like.
-static uint32
-fastrand1(void)
-{
- static uint32 x = 0x49f6428aUL;
-
- x += x;
- if(x & 0x80000000L)
- x ^= 0x88888eefUL;
- return x;
-}
-
// Allocate an object of at least size bytes.
// Small objects are allocated from the per-thread cache's free lists.
// Large objects (> 32 kB) are allocated straight from the heap.
@@ -53,18 +38,18 @@ runtime·mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed)
if(size == 0)
size = 1;
- mstats.nmalloc++;
+ c = m->mcache;
+ c->local_nmalloc++;
if(size <= MaxSmallSize) {
// Allocate from mcache free lists.
sizeclass = runtime·SizeToClass(size);
size = runtime·class_to_size[sizeclass];
- c = m->mcache;
v = runtime·MCache_Alloc(c, sizeclass, size, zeroed);
if(v == nil)
runtime·throw("out of memory");
- mstats.alloc += size;
- mstats.total_alloc += size;
- mstats.by_size[sizeclass].nmalloc++;
+ c->local_alloc += size;
+ c->local_total_alloc += size;
+ c->local_by_size[sizeclass].nmalloc++;
} else {
// TODO(rsc): Report tracebacks for very large allocations.
@@ -76,8 +61,8 @@ runtime·mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed)
if(s == nil)
runtime·throw("out of memory");
size = npages<<PageShift;
- mstats.alloc += size;
- mstats.total_alloc += size;
+ c->local_alloc += size;
+ c->local_total_alloc += size;
v = (void*)(s->start << PageShift);
// setup for mark sweep
@@ -97,7 +82,7 @@ runtime·mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed)
// pick next profile time
if(rate > 0x3fffffff) // make 2*rate not overflow
rate = 0x3fffffff;
- m->mcache->next_sample = fastrand1() % (2*rate);
+ m->mcache->next_sample = runtime·fastrand1() % (2*rate);
profile:
runtime·setblockspecial(v);
runtime·MProf_Malloc(v, size);
@@ -143,6 +128,7 @@ runtime·free(void *v)
// Find size class for v.
sizeclass = s->sizeclass;
+ c = m->mcache;
if(sizeclass == 0) {
// Large object.
size = s->npages<<PageShift;
@@ -154,7 +140,6 @@ runtime·free(void *v)
runtime·MHeap_Free(&runtime·mheap, s, 1);
} else {
// Small object.
- c = m->mcache;
size = runtime·class_to_size[sizeclass];
if(size > sizeof(uintptr))
((uintptr*)v)[1] = 1; // mark as "needs to be zeroed"
@@ -162,10 +147,10 @@ runtime·free(void *v)
// it might coalesce v and other blocks into a bigger span
// and change the bitmap further.
runtime·markfreed(v, size);
- mstats.by_size[sizeclass].nfree++;
+ c->local_by_size[sizeclass].nfree++;
runtime·MCache_Free(c, v, sizeclass, size);
}
- mstats.alloc -= size;
+ c->local_alloc -= size;
if(prof)
runtime·MProf_Free(v, size);
m->mallocing = 0;
@@ -178,7 +163,7 @@ runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **sp)
byte *p;
MSpan *s;
- mstats.nlookup++;
+ m->mcache->local_nlookup++;
s = runtime·MHeap_LookupMaybe(&runtime·mheap, v);
if(sp)
*sp = s;
@@ -207,9 +192,10 @@ runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **sp)
}
n = runtime·class_to_size[s->sizeclass];
- i = ((byte*)v - p)/n;
- if(base)
+ if(base) {
+ i = ((byte*)v - p)/n;
*base = p + i*n;
+ }
if(size)
*size = n;
@@ -229,6 +215,29 @@ runtime·allocmcache(void)
return c;
}
+void
+runtime·purgecachedstats(M* m)
+{
+ MCache *c;
+
+ // Protected by either heap or GC lock.
+ c = m->mcache;
+ mstats.heap_alloc += c->local_cachealloc;
+ c->local_cachealloc = 0;
+ mstats.heap_objects += c->local_objects;
+ c->local_objects = 0;
+ mstats.nmalloc += c->local_nmalloc;
+ c->local_nmalloc = 0;
+ mstats.nfree += c->local_nfree;
+ c->local_nfree = 0;
+ mstats.nlookup += c->local_nlookup;
+ c->local_nlookup = 0;
+ mstats.alloc += c->local_alloc;
+ c->local_alloc= 0;
+ mstats.total_alloc += c->local_total_alloc;
+ c->local_total_alloc= 0;
+}
+
uintptr runtime·sizeof_C_MStats = sizeof(MStats);
#define MaxArena32 (2U<<30)
@@ -373,46 +382,28 @@ func new(n uint32) (ret *uint8) {
ret = runtime·mal(n);
}
-// Stack allocator uses malloc/free most of the time,
-// but if we're in the middle of malloc and need stack,
-// we have to do something else to avoid deadlock.
-// In that case, we fall back on a fixed-size free-list
-// allocator, assuming that inside malloc all the stack
-// frames are small, so that all the stack allocations
-// will be a single size, the minimum (right now, 5k).
-static struct {
- Lock;
- FixAlloc;
-} stacks;
-
-enum {
- FixedStack = StackMin,
-};
-
void*
runtime·stackalloc(uint32 n)
{
- void *v;
-
// Stackalloc must be called on scheduler stack, so that we
// never try to grow the stack during the code that stackalloc runs.
// Doing so would cause a deadlock (issue 1547).
if(g != m->g0)
runtime·throw("stackalloc not on scheduler stack");
+ // Stack allocator uses malloc/free most of the time,
+ // but if we're in the middle of malloc and need stack,
+ // we have to do something else to avoid deadlock.
+ // In that case, we fall back on a fixed-size free-list
+ // allocator, assuming that inside malloc all the stack
+ // frames are small, so that all the stack allocations
+ // will be a single size, the minimum (right now, 5k).
if(m->mallocing || m->gcing || n == FixedStack) {
- runtime·lock(&stacks);
- if(stacks.size == 0)
- runtime·FixAlloc_Init(&stacks, n, runtime·SysAlloc, nil, nil);
- if(stacks.size != n) {
- runtime·printf("stackalloc: in malloc, size=%D want %d", (uint64)stacks.size, n);
+ if(n != FixedStack) {
+ runtime·printf("stackalloc: in malloc, size=%d want %d", FixedStack, n);
runtime·throw("stackalloc");
}
- v = runtime·FixAlloc_Alloc(&stacks);
- mstats.stacks_inuse = stacks.inuse;
- mstats.stacks_sys = stacks.sys;
- runtime·unlock(&stacks);
- return v;
+ return runtime·FixAlloc_Alloc(m->stackalloc);
}
return runtime·mallocgc(n, FlagNoProfiling|FlagNoGC, 0, 0);
}
@@ -421,11 +412,7 @@ void
runtime·stackfree(void *v, uintptr n)
{
if(m->mallocing || m->gcing || n == FixedStack) {
- runtime·lock(&stacks);
- runtime·FixAlloc_Free(&stacks, v);
- mstats.stacks_inuse = stacks.inuse;
- mstats.stacks_sys = stacks.sys;
- runtime·unlock(&stacks);
+ runtime·FixAlloc_Free(m->stackalloc, v);
return;
}
runtime·free(v);