25
25
#include "libc/dce.h"
26
26
#include "libc/errno.h"
27
27
#include "libc/intrin/atomic.h"
28
+ #include "libc/intrin/describebacktrace.internal.h"
28
29
#include "libc/intrin/describeflags.internal.h"
29
30
#include "libc/intrin/directmap.internal.h"
30
31
#include "libc/intrin/dll.h"
32
+ #include "libc/intrin/kprintf.h"
31
33
#include "libc/intrin/maps.h"
32
34
#include "libc/intrin/strace.internal.h"
33
35
#include "libc/intrin/weaken.h"
44
46
#include "libc/sysv/errfuns.h"
45
47
#include "libc/thread/thread.h"
46
48
49
+ #define MMDEBUG 0 // this code is too slow for openbsd/windows
47
50
#define WINBASE 0x100080040000 // TODO: Can we support Windows Vista again?
48
51
#define WINMAXX 0x200080000000
49
52
50
53
#define MAP_FIXED_NOREPLACE_linux 0x100000
51
54
52
55
#define PGUP (x ) (((x) + granularity - 1) & -granularity)
53
56
57
+ #if MMDEBUG
58
+ #define ASSERT (x ) (void)0
59
+ #else
60
+ #define ASSERT (x ) \
61
+ do { \
62
+ if (!(x)) { \
63
+ char bt[160]; \
64
+ struct StackFrame *bp = __builtin_frame_address(0); \
65
+ kprintf("%!s:%d: assertion failed: %!s\n", __FILE__, __LINE__, #x); \
66
+ kprintf("bt %!s\n", (DescribeBacktrace)(bt, bp)); \
67
+ __print_maps(); \
68
+ _Exit(99); \
69
+ } \
70
+ } while (0)
71
+ #endif
72
+
54
73
static atomic_ulong rollo ;
55
74
75
+ static bool overlaps_existing_map (const char * addr , size_t size ) {
76
+ int granularity = __granularity ();
77
+ for (struct Map * map = __maps .maps ; map ; map = map -> next )
78
+ if (MAX (addr , map -> addr ) <
79
+ MIN (addr + PGUP (size ), map -> addr + PGUP (map -> size )))
80
+ return true;
81
+ return false;
82
+ }
83
+
84
+ void __maps_check (void ) {
85
+ #if MMDEBUG
86
+ size_t maps = 0 ;
87
+ size_t pages = 0 ;
88
+ int granularity = getauxval (AT_PAGESZ );
89
+ for (struct Map * map = __maps .maps ; map ; map = map -> next ) {
90
+ ASSERT (map -> addr != MAP_FAILED );
91
+ ASSERT (map -> size );
92
+ pages += PGUP (map -> size ) / granularity ;
93
+ maps += 1 ;
94
+ }
95
+ ASSERT (maps = __maps .count );
96
+ ASSERT (pages == __maps .pages );
97
+ for (struct Map * m1 = __maps .maps ; m1 ; m1 = m1 -> next )
98
+ for (struct Map * m2 = m1 -> next ; m2 ; m2 = m2 -> next )
99
+ ASSERT (MAX (m1 -> addr , m2 -> addr ) >=
100
+ MIN (m1 -> addr + PGUP (m1 -> size ), m2 -> addr + PGUP (m2 -> size )));
101
+ #endif
102
+ }
103
+
56
104
void __maps_free (struct Map * map ) {
105
+ map -> next = 0 ;
106
+ map -> size = 0 ;
107
+ map -> addr = MAP_FAILED ;
108
+ ASSERT (dll_is_alone (& map -> elem ));
57
109
dll_make_last (& __maps .free , & map -> elem );
58
110
}
59
111
60
112
void __maps_insert (struct Map * map ) {
61
113
struct Map * last = __maps .maps ;
62
- if (last && //
114
+ int granularity = getauxval (AT_PAGESZ );
115
+ __maps .pages += PGUP (map -> size ) / granularity ;
116
+ if (last && !IsWindows () && //
63
117
map -> addr == last -> addr + last -> size && //
118
+ (map -> flags & MAP_ANONYMOUS ) && //
64
119
map -> flags == last -> flags && //
65
- map -> prot == last -> prot && //
66
- map -> off == last -> off && //
67
- map -> h == last -> h && //
68
- map -> off == -1 ) {
120
+ map -> prot == last -> prot ) {
121
+ last -> size += map -> size ;
122
+ dll_remove (& __maps .used , & last -> elem );
123
+ dll_make_first (& __maps .used , & last -> elem );
124
+ __maps_free (map );
125
+ } else if (last && !IsWindows () && //
126
+ map -> addr + map -> size == last -> addr && //
127
+ (map -> flags & MAP_ANONYMOUS ) && //
128
+ map -> flags == last -> flags && //
129
+ map -> prot == last -> prot ) {
130
+ last -> addr -= map -> size ;
69
131
last -> size += map -> size ;
70
132
dll_remove (& __maps .used , & last -> elem );
71
133
dll_make_first (& __maps .used , & last -> elem );
@@ -74,7 +136,9 @@ void __maps_insert(struct Map *map) {
74
136
dll_make_first (& __maps .used , & map -> elem );
75
137
map -> next = __maps .maps ;
76
138
__maps .maps = map ;
139
+ ++ __maps .count ;
77
140
}
141
+ __maps_check ();
78
142
}
79
143
80
144
struct Map * __maps_alloc (void ) {
@@ -104,21 +168,14 @@ struct Map *__maps_alloc(void) {
104
168
return map ;
105
169
}
106
170
107
- static bool __overlaps_existing_map (const char * addr , size_t size ) {
108
- for (struct Map * map = __maps .maps ; map ; map = map -> next ) {
109
- if (MAX (addr , map -> addr ) < MIN (addr + size , map -> addr + map -> size ))
110
- return true;
111
- }
112
- return false;
113
- }
114
-
115
171
static int __munmap_chunk (void * addr , size_t size ) {
116
172
return sys_munmap (addr , size );
117
173
}
118
174
119
175
static int __munmap (char * addr , size_t size , bool untrack_only ) {
120
176
121
177
// validate arguments
178
+ int pagesz = getauxval (AT_PAGESZ );
122
179
int granularity = __granularity ();
123
180
if (((uintptr_t )addr & (granularity - 1 )) || //
124
181
!size || (uintptr_t )addr + size < size )
@@ -127,7 +184,6 @@ static int __munmap(char *addr, size_t size, bool untrack_only) {
127
184
// untrack and delete mapping
128
185
int rc = 0 ;
129
186
__maps_lock ();
130
- // we can't call strace, kprintf, or nothing
131
187
StartOver :;
132
188
struct Map * map = __maps .maps ;
133
189
_Atomic(struct Map * ) * prev = & __maps .maps ;
@@ -141,12 +197,16 @@ StartOver:;
141
197
// remove mapping completely
142
198
dll_remove (& __maps .used , & map -> elem );
143
199
* prev = next ;
144
- map -> size = 0 ;
145
- map -> addr = MAP_FAILED ;
200
+ __maps . pages -= ( map_size + pagesz - 1 ) / pagesz ;
201
+ __maps . count -= 1 ;
146
202
if (untrack_only ) {
147
203
__maps_free (map );
204
+ __maps_check ();
148
205
} else {
206
+ __maps_unlock ();
149
207
if (!IsWindows ()) {
208
+ ASSERT (addr <= map_addr );
209
+ ASSERT (map_addr + PGUP (map_size ) <= addr + PGUP (size ));
150
210
if (__munmap_chunk (map_addr , map_size ))
151
211
rc = -1 ;
152
212
} else {
@@ -155,7 +215,9 @@ StartOver:;
155
215
if (!CloseHandle (map -> h ))
156
216
rc = -1 ;
157
217
}
218
+ __maps_lock ();
158
219
__maps_free (map );
220
+ __maps_check ();
159
221
goto StartOver ;
160
222
}
161
223
map = next ;
@@ -167,25 +229,39 @@ StartOver:;
167
229
rc = einval ();
168
230
} else if (addr <= map_addr ) {
169
231
// shave off lefthand side of mapping
170
- size_t left = addr + size - map_addr ;
171
- size_t right = map_addr + map_size - (addr + size );
232
+ ASSERT (addr + size < map_addr + PGUP (map_size ));
233
+ size_t left = PGUP (addr + size - map_addr );
234
+ size_t right = map_size - left ;
235
+ ASSERT (right > 0 );
236
+ ASSERT (left > 0 );
172
237
map -> addr += left ;
173
238
map -> size = right ;
174
239
if (map -> off != -1 )
175
240
map -> off += left ;
241
+ __maps .pages -= (left + pagesz - 1 ) / pagesz ;
242
+ __maps_check ();
176
243
if (!untrack_only ) {
244
+ __maps_unlock ();
245
+ ASSERT (addr <= map_addr );
246
+ ASSERT (map_addr + PGUP (left ) <= addr + PGUP (size ));
177
247
if (__munmap_chunk (map_addr , left ) == -1 )
178
248
rc = -1 ;
249
+ __maps_lock ();
179
250
goto StartOver ;
180
251
}
181
252
} else if (addr + PGUP (size ) >= map_addr + PGUP (map_size )) {
182
253
// shave off righthand side of mapping
183
254
size_t left = addr - map_addr ;
184
255
size_t right = map_addr + map_size - addr ;
185
256
map -> size = left ;
257
+ __maps .pages -= (right + pagesz - 1 ) / pagesz ;
258
+ __maps_check ();
186
259
if (!untrack_only ) {
260
+ __maps_unlock ();
261
+ ASSERT (PGUP (right ) <= PGUP (size ));
187
262
if (__munmap_chunk (addr , right ) == -1 )
188
263
rc = -1 ;
264
+ __maps_lock ();
189
265
goto StartOver ;
190
266
}
191
267
} else {
@@ -207,9 +283,14 @@ StartOver:;
207
283
map -> off += left + middle ;
208
284
dll_make_first (& __maps .used , & leftmap -> elem );
209
285
* prev = leftmap ;
286
+ __maps .pages -= (middle + pagesz - 1 ) / pagesz ;
287
+ __maps .count += 1 ;
288
+ __maps_check ();
210
289
if (!untrack_only ) {
290
+ __maps_unlock ();
211
291
if (__munmap_chunk (addr , size ) == -1 )
212
292
rc = -1 ;
293
+ __maps_lock ();
213
294
goto StartOver ;
214
295
}
215
296
} else {
@@ -241,7 +322,7 @@ static void *__mmap_chunk(void *addr, size_t size, int prot, int flags, int fd,
241
322
sysflags |= MAP_FIXED_NOREPLACE_linux ;
242
323
} else if (IsFreebsd () || IsNetbsd ()) {
243
324
sysflags |= MAP_FIXED ;
244
- if (__overlaps_existing_map (addr , size ))
325
+ if (overlaps_existing_map (addr , size ))
245
326
return (void * )eexist ();
246
327
} else {
247
328
noreplace = true;
@@ -296,7 +377,7 @@ static void *__mmap_chunk(void *addr, size_t size, int prot, int flags, int fd,
296
377
297
378
// untrack mapping we blew away
298
379
if (should_untrack )
299
- __munmap (addr , size , true);
380
+ __munmap (res . addr , size , true);
300
381
301
382
// track Map object
302
383
map -> addr = res .addr ;
@@ -346,7 +427,7 @@ static void *__mmap_impl(char *addr, size_t size, int prot, int flags, int fd,
346
427
347
428
// so we create an separate map for each granule in the mapping
348
429
if (!(flags & MAP_FIXED )) {
349
- while (__overlaps_existing_map (addr , size )) {
430
+ while (overlaps_existing_map (addr , size )) {
350
431
if (flags & MAP_FIXED_NOREPLACE )
351
432
return (void * )eexist ();
352
433
addr += granularity ;
0 commit comments