summaryrefslogtreecommitdiff
path: root/src/pkg/runtime/malloc.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/pkg/runtime/malloc.h')
-rw-r--r--src/pkg/runtime/malloc.h38
1 files changed, 19 insertions, 19 deletions
diff --git a/src/pkg/runtime/malloc.h b/src/pkg/runtime/malloc.h
index 5bc80f4df..a85e1af8c 100644
--- a/src/pkg/runtime/malloc.h
+++ b/src/pkg/runtime/malloc.h
@@ -120,6 +120,12 @@ enum
#else
MHeapMap_Bits = 20,
#endif
+
+ // Max number of threads to run garbage collection.
+ // 2, 3, and 4 are all plausible maximums depending
+ // on the hardware details of the machine. The garbage
+ // collector scales well to 4 cpus.
+ MaxGcproc = 4,
};
// A generic linked list of blocks. (Typically the block is bigger than sizeof(MLink).)
@@ -192,7 +198,7 @@ struct MStats
uint64 nlookup; // number of pointer lookups
uint64 nmalloc; // number of mallocs
uint64 nfree; // number of frees
-
+
// Statistics about malloc heap.
// protected by mheap.Lock
uint64 heap_alloc; // bytes allocated and still in use
@@ -210,7 +216,7 @@ struct MStats
uint64 mcache_inuse; // MCache structures
uint64 mcache_sys;
uint64 buckhash_sys; // profiling bucket hash table
-
+
// Statistics about garbage collector.
// Protected by stopping the world during GC.
uint64 next_gc; // next GC (in heap_alloc time)
@@ -219,7 +225,7 @@ struct MStats
uint32 numgc;
bool enablegc;
bool debuggc;
-
+
// Statistics about allocation size classes.
struct {
uint32 size;
@@ -240,7 +246,7 @@ extern MStats mstats;
//
// class_to_size[i] = largest size in class i
// class_to_allocnpages[i] = number of pages to allocate when
-// making new objects in class i
+// making new objects in class i
// class_to_transfercount[i] = number of objects to move when
// taking a bunch of objects out of the central lists
// and putting them in the thread free list.
@@ -279,7 +285,7 @@ struct MCache
int64 nmalloc;
int64 nfree;
} local_by_size[NumSizeClasses];
-
+
};
void* runtime·MCache_Alloc(MCache *c, int32 sizeclass, uintptr size, int32 zeroed);
@@ -352,14 +358,14 @@ struct MHeap
byte *arena_start;
byte *arena_used;
byte *arena_end;
-
+
// central free lists for small size classes.
// the union makes sure that the MCentrals are
- // spaced 64 bytes apart, so that each MCentral.Lock
+ // spaced CacheLineSize bytes apart, so that each MCentral.Lock
// gets its own cache line.
union {
MCentral;
- byte pad[64];
+ byte pad[CacheLineSize];
} central[NumSizeClasses];
FixAlloc spanalloc; // allocator for Span*
@@ -387,7 +393,7 @@ int32 runtime·checking;
void runtime·markspan(void *v, uintptr size, uintptr n, bool leftover);
void runtime·unmarkspan(void *v, uintptr size);
bool runtime·blockspecial(void*);
-void runtime·setblockspecial(void*);
+void runtime·setblockspecial(void*, bool);
void runtime·purgecachedstats(M*);
enum
@@ -400,6 +406,8 @@ enum
void runtime·MProf_Malloc(void*, uintptr);
void runtime·MProf_Free(void*, uintptr);
+int32 runtime·helpgc(bool*);
+void runtime·gchelper(void);
// Malloc profiling settings.
// Must match definition in extern.go.
@@ -410,13 +418,5 @@ enum {
};
extern int32 runtime·malloc_profile;
-typedef struct Finalizer Finalizer;
-struct Finalizer
-{
- Finalizer *next; // for use by caller of getfinalizer
- void (*fn)(void*);
- void *arg;
- int32 nret;
-};
-
-Finalizer* runtime·getfinalizer(void*, bool);
+bool runtime·getfinalizer(void *p, bool del, void (**fn)(void*), int32 *nret);
+void runtime·walkfintab(void (*fn)(void*));