1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
|
# DP: Fix statically linked gccgo binaries on AArch64.
--- a/src/libgo/runtime/malloc.goc
+++ b/src/libgo/runtime/malloc.goc
@@ -339,12 +339,14 @@ runtime_mallocinit(void)
extern byte _end[];
byte *want;
uintptr limit;
+ bool is_reserved;
runtime_sizeof_C_MStats = sizeof(MStats);
p = nil;
arena_size = 0;
bitmap_size = 0;
+ is_reserved = false;
// for 64-bit build
USED(p);
@@ -389,7 +391,8 @@ runtime_mallocinit(void)
// If this fails we fall back to the 32 bit memory mechanism
arena_size = MaxMem;
bitmap_size = arena_size / (sizeof(void*)*8/4);
- p = runtime_SysReserve((void*)(0x00c0ULL<<32), bitmap_size + arena_size);
+ p = runtime_SysReserve((void*)(0x00c0ULL<<32), bitmap_size + arena_size, false);
+ is_reserved = false;
}
if (p == nil) {
// On a 32-bit machine, we can't typically get away
@@ -428,7 +431,8 @@ runtime_mallocinit(void)
want = (byte*)(((uintptr)_end + (1<<18) + (1<<20) - 1)&~((1<<20)-1));
if(0xffffffff - (uintptr)want <= bitmap_size + arena_size)
want = 0;
- p = runtime_SysReserve(want, bitmap_size + arena_size);
+ p = runtime_SysReserve(want, bitmap_size + arena_size, true);
+ is_reserved = true;
if(p == nil)
runtime_throw("runtime: cannot reserve arena virtual address space");
if((uintptr)p & (((uintptr)1<<PageShift)-1))
@@ -441,6 +445,7 @@ runtime_mallocinit(void)
runtime_mheap->arena_start = p + bitmap_size;
runtime_mheap->arena_used = runtime_mheap->arena_start;
runtime_mheap->arena_end = runtime_mheap->arena_start + arena_size;
+ runtime_mheap->is_reserved = is_reserved;
// Initialize the rest of the allocator.
runtime_MHeap_Init(runtime_mheap, runtime_SysAlloc);
@@ -462,12 +467,15 @@ runtime_MHeap_SysAlloc(MHeap *h, uintptr n)
byte *new_end;
uintptr needed;
+ if (!h->is_reserved) {
+ runtime_throw("runtime: cannot allocate memory");
+ }
needed = (uintptr)h->arena_used + n - (uintptr)h->arena_end;
// Round wanted arena size to a multiple of 256MB.
needed = (needed + (256<<20) - 1) & ~((256<<20)-1);
new_end = h->arena_end + needed;
if(new_end <= h->arena_start + MaxArena32) {
- p = runtime_SysReserve(h->arena_end, new_end - h->arena_end);
+ p = runtime_SysReserve(h->arena_end, new_end - h->arena_end, true);
if(p == h->arena_end)
h->arena_end = new_end;
}
@@ -475,7 +483,7 @@ runtime_MHeap_SysAlloc(MHeap *h, uintptr n)
if(n <= (uintptr)(h->arena_end - h->arena_used)) {
// Keep taking from our reservation.
p = h->arena_used;
- runtime_SysMap(p, n);
+ runtime_SysMap(p, n, h->is_reserved);
h->arena_used += n;
runtime_MHeap_MapBits(h);
if(raceenabled)
--- a/src/libgo/runtime/malloc.h
+++ b/src/libgo/runtime/malloc.h
@@ -177,8 +177,8 @@ struct MLink
void* runtime_SysAlloc(uintptr nbytes);
void runtime_SysFree(void *v, uintptr nbytes);
void runtime_SysUnused(void *v, uintptr nbytes);
-void runtime_SysMap(void *v, uintptr nbytes);
-void* runtime_SysReserve(void *v, uintptr nbytes);
+void runtime_SysMap(void *v, uintptr nbytes, bool do_reserve);
+void* runtime_SysReserve(void *v, uintptr nbytes, bool is_reserved);
// FixAlloc is a simple free-list allocator for fixed size objects.
// Malloc uses a FixAlloc wrapped around SysAlloc to manages its
@@ -434,6 +434,7 @@ struct MHeap
FixAlloc spanalloc; // allocator for Span*
FixAlloc cachealloc; // allocator for MCache*
+ bool is_reserved; // is the all the address space for this heap actually reserved by us?
};
extern MHeap *runtime_mheap;
--- a/src/libgo/runtime/mem.c
+++ b/src/libgo/runtime/mem.c
@@ -110,7 +110,7 @@ runtime_SysFree(void *v, uintptr n)
}
void*
-runtime_SysReserve(void *v, uintptr n)
+runtime_SysReserve(void *v, uintptr n, bool do_reserve)
{
int fd = -1;
void *p;
@@ -130,7 +130,7 @@ runtime_SysReserve(void *v, uintptr n)
// much address space. Instead, assume that the reservation is okay
// if we can reserve at least 64K and check the assumption in SysMap.
// Only user-mode Linux (UML) rejects these requests.
- if(sizeof(void*) == 8 && (uintptr)v >= 0xffffffffU) {
+ if(!do_reserve) {
p = mmap_fixed(v, 64<<10, PROT_NONE, MAP_ANON|MAP_PRIVATE, fd, 0);
if (p != v)
return nil;
@@ -149,7 +149,7 @@ runtime_SysReserve(void *v, uintptr n)
}
void
-runtime_SysMap(void *v, uintptr n)
+runtime_SysMap(void *v, uintptr n, bool is_reserved)
{
void *p;
int fd = -1;
@@ -168,7 +168,7 @@ runtime_SysMap(void *v, uintptr n)
#endif
// On 64-bit, we don't actually have v reserved, so tread carefully.
- if(sizeof(void*) == 8 && (uintptr)v >= 0xffffffffU) {
+ if(!is_reserved) {
p = mmap_fixed(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, fd, 0);
if(p == MAP_FAILED && errno == ENOMEM)
runtime_throw("runtime: out of memory");
--- a/src/libgo/runtime/mgc0.c
+++ b/src/libgo/runtime/mgc0.c
@@ -2483,6 +2483,6 @@ runtime_MHeap_MapBits(MHeap *h)
page_size = getpagesize();
n = (n+page_size-1) & ~(page_size-1);
- runtime_SysMap(h->arena_start - n, n - h->bitmap_mapped);
+ runtime_SysMap(h->arena_start - n, n - h->bitmap_mapped, h->is_reserved);
h->bitmap_mapped = n;
}
|