File: | src/malloc/malloc.c |
Location: | line 429, column 3 |
Description: | Use of memory after it is freed |
1 | #define _GNU_SOURCE | |||
2 | #include <stdlib.h> | |||
3 | #include <string.h> | |||
4 | #include <limits.h> | |||
5 | #include <stdint.h> | |||
6 | #include <errno(*__errno_location()).h> | |||
7 | #include <sys/mman.h> | |||
8 | #include "libc.h" | |||
9 | #include "atomic.h" | |||
10 | #include "pthread_impl.h" | |||
11 | ||||
12 | #if defined(__GNUC__4) && defined(__PIC__2) | |||
13 | #define inlineinline __attribute__((always_inline)) inlineinline __attribute__((always_inline)) __attribute__((always_inline)) | |||
14 | #endif | |||
15 | ||||
16 | void *__mmap(void *, size_t, int, int, int, off_t); | |||
17 | int __munmap(void *, size_t); | |||
18 | void *__mremap(void *, size_t, size_t, int, ...); | |||
19 | int __madvise(void *, size_t, int); | |||
20 | ||||
21 | struct chunk { | |||
22 | size_t psize, csize; | |||
23 | struct chunk *next, *prev; | |||
24 | }; | |||
25 | ||||
26 | struct bin { | |||
27 | volatile int lock[2]; | |||
28 | struct chunk *head; | |||
29 | struct chunk *tail; | |||
30 | }; | |||
31 | ||||
32 | static struct { | |||
33 | volatile uint64_t binmap; | |||
34 | struct bin bins[64]; | |||
35 | volatile int free_lock[2]; | |||
36 | } mal; | |||
37 | ||||
38 | ||||
39 | #define SIZE_ALIGN(4*sizeof(size_t)) (4*sizeof(size_t)) | |||
40 | #define SIZE_MASK(-(4*sizeof(size_t))) (-SIZE_ALIGN(4*sizeof(size_t))) | |||
41 | #define OVERHEAD(2*sizeof(size_t)) (2*sizeof(size_t)) | |||
42 | #define MMAP_THRESHOLD(0x1c00*(4*sizeof(size_t))) (0x1c00*SIZE_ALIGN(4*sizeof(size_t))) | |||
43 | #define DONTCARE16 16 | |||
44 | #define RECLAIM163840 163840 | |||
45 | ||||
46 | #define CHUNK_SIZE(c)((c)->csize & -2) ((c)->csize & -2) | |||
47 | #define CHUNK_PSIZE(c)((c)->psize & -2) ((c)->psize & -2) | |||
48 | #define PREV_CHUNK(c)((struct chunk *)((char *)(c) - ((c)->psize & -2))) ((struct chunk *)((char *)(c) - CHUNK_PSIZE(c)((c)->psize & -2))) | |||
49 | #define NEXT_CHUNK(c)((struct chunk *)((char *)(c) + ((c)->csize & -2))) ((struct chunk *)((char *)(c) + CHUNK_SIZE(c)((c)->csize & -2))) | |||
50 | #define MEM_TO_CHUNK(p)(struct chunk *)((char *)(p) - (2*sizeof(size_t))) (struct chunk *)((char *)(p) - OVERHEAD(2*sizeof(size_t))) | |||
51 | #define CHUNK_TO_MEM(c)(void *)((char *)(c) + (2*sizeof(size_t))) (void *)((char *)(c) + OVERHEAD(2*sizeof(size_t))) | |||
52 | #define BIN_TO_CHUNK(i)((struct chunk *)((char *)(&mal.bins[i].head) - (2*sizeof (size_t)))) (MEM_TO_CHUNK(&mal.bins[i].head)(struct chunk *)((char *)(&mal.bins[i].head) - (2*sizeof( size_t)))) | |||
53 | ||||
54 | #define C_INUSE((size_t)1) ((size_t)1) | |||
55 | ||||
56 | #define IS_MMAPPED(c)!((c)->csize & (((size_t)1))) !((c)->csize & (C_INUSE((size_t)1))) | |||
57 | ||||
58 | ||||
59 | /* Synchronization tools */ | |||
60 | ||||
61 | static inlineinline __attribute__((always_inline)) void lock(volatile int *lk) | |||
62 | { | |||
63 | if (libc__libc.threads_minus_1) | |||
64 | while(a_swapa_swap(lk, 1)) __wait(lk, lk+1, 1, 1); | |||
65 | } | |||
66 | ||||
67 | static inlineinline __attribute__((always_inline)) void unlock(volatile int *lk) | |||
68 | { | |||
69 | if (lk[0]) { | |||
70 | a_storea_store(lk, 0); | |||
71 | if (lk[1]) __wake(lk, 1, 1); | |||
72 | } | |||
73 | } | |||
74 | ||||
75 | static inlineinline __attribute__((always_inline)) void lock_bin(int i) | |||
76 | { | |||
77 | lock(mal.bins[i].lock); | |||
78 | if (!mal.bins[i].head) | |||
79 | mal.bins[i].head = mal.bins[i].tail = BIN_TO_CHUNK(i)((struct chunk *)((char *)(&mal.bins[i].head) - (2*sizeof (size_t)))); | |||
80 | } | |||
81 | ||||
82 | static inlineinline __attribute__((always_inline)) void unlock_bin(int i) | |||
83 | { | |||
84 | unlock(mal.bins[i].lock); | |||
85 | } | |||
86 | ||||
87 | static int first_set(uint64_t x) | |||
88 | { | |||
89 | #if 1 | |||
90 | return a_ctz_64a_ctz_64(x); | |||
91 | #else | |||
92 | static const char debruijn64[64] = { | |||
93 | 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28, | |||
94 | 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11, | |||
95 | 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10, | |||
96 | 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12 | |||
97 | }; | |||
98 | static const char debruijn32[32] = { | |||
99 | 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13, | |||
100 | 31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14 | |||
101 | }; | |||
102 | if (sizeof(long) < 8) { | |||
103 | uint32_t y = x; | |||
104 | if (!y) { | |||
105 | y = x>>32; | |||
106 | return 32 + debruijn32[(y&-y)*0x076be629 >> 27]; | |||
107 | } | |||
108 | return debruijn32[(y&-y)*0x076be629 >> 27]; | |||
109 | } | |||
110 | return debruijn64[(x&-x)*0x022fdd63cc95386dull >> 58]; | |||
111 | #endif | |||
112 | } | |||
113 | ||||
114 | static int bin_index(size_t x) | |||
115 | { | |||
116 | x = x / SIZE_ALIGN(4*sizeof(size_t)) - 1; | |||
117 | if (x <= 32) return x; | |||
118 | if (x > 0x1c00) return 63; | |||
119 | return ((union { float v; uint32_t r; }){(int)x}.r>>21) - 496; | |||
120 | } | |||
121 | ||||
122 | static int bin_index_up(size_t x) | |||
123 | { | |||
124 | x = x / SIZE_ALIGN(4*sizeof(size_t)) - 1; | |||
125 | if (x <= 32) return x; | |||
126 | return ((union { float v; uint32_t r; }){(int)x}.r+0x1fffff>>21) - 496; | |||
127 | } | |||
128 | ||||
129 | #if 0 | |||
130 | void __dump_heap(int x) | |||
131 | { | |||
132 | struct chunk *c; | |||
133 | int i; | |||
134 | for (c = (void *)mal.heap; CHUNK_SIZE(c)((c)->csize & -2); c = NEXT_CHUNK(c)((struct chunk *)((char *)(c) + ((c)->csize & -2)))) | |||
135 | fprintf(stderr(stderr), "base %p size %zu (%d) flags %d/%d\n", | |||
136 | c, CHUNK_SIZE(c)((c)->csize & -2), bin_index(CHUNK_SIZE(c)((c)->csize & -2)), | |||
137 | c->csize & 15, | |||
138 | NEXT_CHUNK(c)((struct chunk *)((char *)(c) + ((c)->csize & -2)))->psize & 15); | |||
139 | for (i=0; i<64; i++) { | |||
140 | if (mal.bins[i].head != BIN_TO_CHUNK(i)((struct chunk *)((char *)(&mal.bins[i].head) - (2*sizeof (size_t)))) && mal.bins[i].head) { | |||
141 | fprintf(stderr(stderr), "bin %d: %p\n", i, mal.bins[i].head); | |||
142 | if (!(mal.binmap & 1ULL<<i)) | |||
143 | fprintf(stderr(stderr), "missing from binmap!\n"); | |||
144 | } else if (mal.binmap & 1ULL<<i) | |||
145 | fprintf(stderr(stderr), "binmap wrongly contains %d!\n", i); | |||
146 | } | |||
147 | } | |||
148 | #endif | |||
149 | ||||
150 | void *__expand_heap(size_t *); | |||
151 | ||||
152 | static struct chunk *expand_heap(size_t n) | |||
153 | { | |||
154 | static int heap_lock[2]; | |||
155 | static void *end; | |||
156 | void *p; | |||
157 | struct chunk *w; | |||
158 | ||||
159 | /* The argument n already accounts for the caller's chunk | |||
160 | * overhead needs, but if the heap can't be extended in-place, | |||
161 | * we need room for an extra zero-sized sentinel chunk. */ | |||
162 | n += SIZE_ALIGN(4*sizeof(size_t)); | |||
163 | ||||
164 | lock(heap_lock); | |||
165 | ||||
166 | p = __expand_heap(&n); | |||
167 | if (!p) { | |||
168 | unlock(heap_lock); | |||
169 | return 0; | |||
170 | } | |||
171 | ||||
172 | /* If not just expanding existing space, we need to make a | |||
173 | * new sentinel chunk below the allocated space. */ | |||
174 | if (p != end) { | |||
175 | /* Valid/safe because of the prologue increment. */ | |||
176 | n -= SIZE_ALIGN(4*sizeof(size_t)); | |||
177 | p = (char *)p + SIZE_ALIGN(4*sizeof(size_t)); | |||
178 | w = MEM_TO_CHUNK(p)(struct chunk *)((char *)(p) - (2*sizeof(size_t))); | |||
179 | w->psize = 0 | C_INUSE((size_t)1); | |||
180 | } | |||
181 | ||||
182 | /* Record new heap end and fill in footer. */ | |||
183 | end = (char *)p + n; | |||
184 | w = MEM_TO_CHUNK(end)(struct chunk *)((char *)(end) - (2*sizeof(size_t))); | |||
185 | w->psize = n | C_INUSE((size_t)1); | |||
186 | w->csize = 0 | C_INUSE((size_t)1); | |||
187 | ||||
188 | /* Fill in header, which may be new or may be replacing a | |||
189 | * zero-size sentinel header at the old end-of-heap. */ | |||
190 | w = MEM_TO_CHUNK(p)(struct chunk *)((char *)(p) - (2*sizeof(size_t))); | |||
191 | w->csize = n | C_INUSE((size_t)1); | |||
192 | ||||
193 | unlock(heap_lock); | |||
194 | ||||
195 | return w; | |||
196 | } | |||
197 | ||||
198 | static int adjust_size(size_t *n) | |||
199 | { | |||
200 | /* Result of pointer difference must fit in ptrdiff_t. */ | |||
201 | if (*n-1 > PTRDIFF_MAX(0x7fffffffffffffff) - SIZE_ALIGN(4*sizeof(size_t)) - PAGE_SIZE4096) { | |||
202 | if (*n) { | |||
203 | errno(*__errno_location()) = ENOMEM12; | |||
204 | return -1; | |||
205 | } else { | |||
206 | *n = SIZE_ALIGN(4*sizeof(size_t)); | |||
207 | return 0; | |||
208 | } | |||
209 | } | |||
210 | *n = (*n + OVERHEAD(2*sizeof(size_t)) + SIZE_ALIGN(4*sizeof(size_t)) - 1) & SIZE_MASK(-(4*sizeof(size_t))); | |||
211 | return 0; | |||
212 | } | |||
213 | ||||
214 | static void unbin(struct chunk *c, int i) | |||
215 | { | |||
216 | if (c->prev == c->next) | |||
217 | a_and_64a_and_64(&mal.binmap, ~(1ULL<<i)); | |||
218 | c->prev->next = c->next; | |||
219 | c->next->prev = c->prev; | |||
220 | c->csize |= C_INUSE((size_t)1); | |||
221 | NEXT_CHUNK(c)((struct chunk *)((char *)(c) + ((c)->csize & -2)))->psize |= C_INUSE((size_t)1); | |||
222 | } | |||
223 | ||||
224 | static int alloc_fwd(struct chunk *c) | |||
225 | { | |||
226 | int i; | |||
227 | size_t k; | |||
228 | while (!((k=c->csize) & C_INUSE((size_t)1))) { | |||
229 | i = bin_index(k); | |||
230 | lock_bin(i); | |||
231 | if (c->csize == k) { | |||
232 | unbin(c, i); | |||
233 | unlock_bin(i); | |||
234 | return 1; | |||
235 | } | |||
236 | unlock_bin(i); | |||
237 | } | |||
238 | return 0; | |||
239 | } | |||
240 | ||||
241 | static int alloc_rev(struct chunk *c) | |||
242 | { | |||
243 | int i; | |||
244 | size_t k; | |||
245 | while (!((k=c->psize) & C_INUSE((size_t)1))) { | |||
246 | i = bin_index(k); | |||
247 | lock_bin(i); | |||
248 | if (c->psize == k) { | |||
249 | unbin(PREV_CHUNK(c)((struct chunk *)((char *)(c) - ((c)->psize & -2))), i); | |||
250 | unlock_bin(i); | |||
251 | return 1; | |||
252 | } | |||
253 | unlock_bin(i); | |||
254 | } | |||
255 | return 0; | |||
256 | } | |||
257 | ||||
258 | ||||
259 | /* pretrim - trims a chunk _prior_ to removing it from its bin. | |||
260 | * Must be called with i as the ideal bin for size n, j the bin | |||
261 | * for the _free_ chunk self, and bin j locked. */ | |||
262 | static int pretrim(struct chunk *self, size_t n, int i, int j) | |||
263 | { | |||
264 | size_t n1; | |||
265 | struct chunk *next, *split; | |||
266 | ||||
267 | /* We cannot pretrim if it would require re-binning. */ | |||
268 | if (j < 40) return 0; | |||
269 | if (j < i+3) { | |||
270 | if (j != 63) return 0; | |||
271 | n1 = CHUNK_SIZE(self)((self)->csize & -2); | |||
272 | if (n1-n <= MMAP_THRESHOLD(0x1c00*(4*sizeof(size_t)))) return 0; | |||
273 | } else { | |||
274 | n1 = CHUNK_SIZE(self)((self)->csize & -2); | |||
275 | } | |||
276 | if (bin_index(n1-n) != j) return 0; | |||
277 | ||||
278 | next = NEXT_CHUNK(self)((struct chunk *)((char *)(self) + ((self)->csize & -2 ))); | |||
279 | split = (void *)((char *)self + n); | |||
280 | ||||
281 | split->prev = self->prev; | |||
282 | split->next = self->next; | |||
283 | split->prev->next = split; | |||
284 | split->next->prev = split; | |||
285 | split->psize = n | C_INUSE((size_t)1); | |||
286 | split->csize = n1-n; | |||
287 | next->psize = n1-n; | |||
288 | self->csize = n | C_INUSE((size_t)1); | |||
289 | return 1; | |||
290 | } | |||
291 | ||||
292 | static void trim(struct chunk *self, size_t n) | |||
293 | { | |||
294 | size_t n1 = CHUNK_SIZE(self)((self)->csize & -2); | |||
295 | struct chunk *next, *split; | |||
296 | ||||
297 | if (n >= n1 - DONTCARE16) return; | |||
298 | ||||
299 | next = NEXT_CHUNK(self)((struct chunk *)((char *)(self) + ((self)->csize & -2 ))); | |||
300 | split = (void *)((char *)self + n); | |||
301 | ||||
302 | split->psize = n | C_INUSE((size_t)1); | |||
303 | split->csize = n1-n | C_INUSE((size_t)1); | |||
304 | next->psize = n1-n | C_INUSE((size_t)1); | |||
305 | self->csize = n | C_INUSE((size_t)1); | |||
306 | ||||
307 | free(CHUNK_TO_MEM(split)(void *)((char *)(split) + (2*sizeof(size_t)))); | |||
308 | } | |||
309 | ||||
310 | void *malloc(size_t n) | |||
311 | { | |||
312 | struct chunk *c; | |||
313 | int i, j; | |||
314 | ||||
315 | if (adjust_size(&n) < 0) return 0; | |||
316 | ||||
317 | if (n > MMAP_THRESHOLD(0x1c00*(4*sizeof(size_t)))) { | |||
318 | size_t len = n + OVERHEAD(2*sizeof(size_t)) + PAGE_SIZE4096 - 1 & -PAGE_SIZE4096; | |||
319 | char *base = __mmap(0, len, PROT_READ1|PROT_WRITE2, | |||
320 | MAP_PRIVATE0x02|MAP_ANONYMOUS0x20, -1, 0); | |||
321 | if (base == (void *)-1) return 0; | |||
322 | c = (void *)(base + SIZE_ALIGN(4*sizeof(size_t)) - OVERHEAD(2*sizeof(size_t))); | |||
323 | c->csize = len - (SIZE_ALIGN(4*sizeof(size_t)) - OVERHEAD(2*sizeof(size_t))); | |||
324 | c->psize = SIZE_ALIGN(4*sizeof(size_t)) - OVERHEAD(2*sizeof(size_t)); | |||
325 | return CHUNK_TO_MEM(c)(void *)((char *)(c) + (2*sizeof(size_t))); | |||
326 | } | |||
327 | ||||
328 | i = bin_index_up(n); | |||
329 | for (;;) { | |||
330 | uint64_t mask = mal.binmap & -(1ULL<<i); | |||
331 | if (!mask) { | |||
332 | c = expand_heap(n); | |||
333 | if (!c) return 0; | |||
334 | if (alloc_rev(c)) { | |||
335 | struct chunk *x = c; | |||
336 | c = PREV_CHUNK(c)((struct chunk *)((char *)(c) - ((c)->psize & -2))); | |||
337 | NEXT_CHUNK(x)((struct chunk *)((char *)(x) + ((x)->csize & -2)))->psize = c->csize = | |||
338 | x->csize + CHUNK_SIZE(c)((c)->csize & -2); | |||
339 | } | |||
340 | break; | |||
341 | } | |||
342 | j = first_set(mask); | |||
343 | lock_bin(j); | |||
344 | c = mal.bins[j].head; | |||
345 | if (c != BIN_TO_CHUNK(j)((struct chunk *)((char *)(&mal.bins[j].head) - (2*sizeof (size_t))))) { | |||
346 | if (!pretrim(c, n, i, j)) unbin(c, j); | |||
347 | unlock_bin(j); | |||
348 | break; | |||
349 | } | |||
350 | unlock_bin(j); | |||
351 | } | |||
352 | ||||
353 | /* Now patch up in case we over-allocated */ | |||
354 | trim(c, n); | |||
355 | ||||
356 | return CHUNK_TO_MEM(c)(void *)((char *)(c) + (2*sizeof(size_t))); | |||
357 | } | |||
358 | ||||
359 | void *__malloc0(size_t n) | |||
360 | { | |||
361 | void *p = malloc(n); | |||
362 | if (p && !IS_MMAPPED(MEM_TO_CHUNK(p))!(((struct chunk *)((char *)(p) - (2*sizeof(size_t))))->csize & (((size_t)1)))) { | |||
363 | size_t *z; | |||
364 | n = (n + sizeof *z - 1)/sizeof *z; | |||
365 | for (z=p; n; n--, z++) if (*z) *z=0; | |||
366 | } | |||
367 | return p; | |||
368 | } | |||
369 | ||||
370 | void *realloc(void *p, size_t n) | |||
371 | { | |||
372 | struct chunk *self, *next; | |||
373 | size_t n0, n1; | |||
374 | void *new; | |||
375 | ||||
376 | if (!p) return malloc(n); | |||
| ||||
377 | ||||
378 | if (adjust_size(&n) < 0) return 0; | |||
379 | ||||
380 | self = MEM_TO_CHUNK(p)(struct chunk *)((char *)(p) - (2*sizeof(size_t))); | |||
381 | n1 = n0 = CHUNK_SIZE(self)((self)->csize & -2); | |||
382 | ||||
383 | if (IS_MMAPPED(self)!((self)->csize & (((size_t)1)))) { | |||
384 | size_t extra = self->psize; | |||
385 | char *base = (char *)self - extra; | |||
386 | size_t oldlen = n0 + extra; | |||
387 | size_t newlen = n + extra; | |||
388 | /* Crash on realloc of freed chunk */ | |||
389 | if (extra & 1) a_crasha_crash(); | |||
390 | if (newlen < PAGE_SIZE4096 && (new = malloc(n))) { | |||
391 | memcpy(new, p, n-OVERHEAD(2*sizeof(size_t))); | |||
392 | free(p); | |||
393 | return new; | |||
394 | } | |||
395 | newlen = (newlen + PAGE_SIZE4096-1) & -PAGE_SIZE4096; | |||
396 | if (oldlen == newlen) return p; | |||
397 | base = __mremap(base, oldlen, newlen, MREMAP_MAYMOVE1); | |||
398 | if (base == (void *)-1) | |||
399 | return newlen < oldlen ? p : 0; | |||
400 | self = (void *)(base + extra); | |||
401 | self->csize = newlen - extra; | |||
402 | return CHUNK_TO_MEM(self)(void *)((char *)(self) + (2*sizeof(size_t))); | |||
403 | } | |||
404 | ||||
405 | next = NEXT_CHUNK(self)((struct chunk *)((char *)(self) + ((self)->csize & -2 ))); | |||
406 | ||||
407 | /* Crash on corrupted footer (likely from buffer overflow) */ | |||
408 | if (next->psize != self->csize) a_crasha_crash(); | |||
409 | ||||
410 | /* Merge adjacent chunks if we need more space. This is not | |||
411 | * a waste of time even if we fail to get enough space, because our | |||
412 | * subsequent call to free would otherwise have to do the merge. */ | |||
413 | if (n > n1 && alloc_fwd(next)) { | |||
414 | n1 += CHUNK_SIZE(next)((next)->csize & -2); | |||
415 | next = NEXT_CHUNK(next)((struct chunk *)((char *)(next) + ((next)->csize & -2 ))); | |||
416 | } | |||
417 | /* FIXME: find what's wrong here and reenable it..? */ | |||
418 | if (0 && n > n1 && alloc_rev(self)) { | |||
419 | self = PREV_CHUNK(self)((struct chunk *)((char *)(self) - ((self)->psize & -2 ))); | |||
420 | n1 += CHUNK_SIZE(self)((self)->csize & -2); | |||
421 | } | |||
422 | self->csize = n1 | C_INUSE((size_t)1); | |||
423 | next->psize = n1 | C_INUSE((size_t)1); | |||
424 | ||||
425 | /* If we got enough space, split off the excess and return */ | |||
426 | if (n <= n1) { | |||
427 | //memmove(CHUNK_TO_MEM(self), p, n0-OVERHEAD); | |||
428 | trim(self, n); | |||
429 | return CHUNK_TO_MEM(self)(void *)((char *)(self) + (2*sizeof(size_t))); | |||
| ||||
430 | } | |||
431 | ||||
432 | /* As a last resort, allocate a new chunk and copy to it. */ | |||
433 | new = malloc(n-OVERHEAD(2*sizeof(size_t))); | |||
434 | if (!new) return 0; | |||
435 | memcpy(new, p, n0-OVERHEAD(2*sizeof(size_t))); | |||
436 | free(CHUNK_TO_MEM(self)(void *)((char *)(self) + (2*sizeof(size_t)))); | |||
437 | return new; | |||
438 | } | |||
439 | ||||
440 | void free(void *p) | |||
441 | { | |||
442 | struct chunk *self = MEM_TO_CHUNK(p)(struct chunk *)((char *)(p) - (2*sizeof(size_t))); | |||
443 | struct chunk *next; | |||
444 | size_t final_size, new_size, size; | |||
445 | int reclaim=0; | |||
446 | int i; | |||
447 | ||||
448 | if (!p) return; | |||
449 | ||||
450 | if (IS_MMAPPED(self)!((self)->csize & (((size_t)1)))) { | |||
451 | size_t extra = self->psize; | |||
452 | char *base = (char *)self - extra; | |||
453 | size_t len = CHUNK_SIZE(self)((self)->csize & -2) + extra; | |||
454 | /* Crash on double free */ | |||
455 | if (extra & 1) a_crasha_crash(); | |||
456 | __munmap(base, len); | |||
457 | return; | |||
458 | } | |||
459 | ||||
460 | final_size = new_size = CHUNK_SIZE(self)((self)->csize & -2); | |||
461 | next = NEXT_CHUNK(self)((struct chunk *)((char *)(self) + ((self)->csize & -2 ))); | |||
462 | ||||
463 | /* Crash on corrupted footer (likely from buffer overflow) */ | |||
464 | if (next->psize != self->csize) a_crasha_crash(); | |||
465 | ||||
466 | for (;;) { | |||
467 | if (self->psize & next->csize & C_INUSE((size_t)1)) { | |||
468 | self->csize = final_size | C_INUSE((size_t)1); | |||
469 | next->psize = final_size | C_INUSE((size_t)1); | |||
470 | i = bin_index(final_size); | |||
471 | lock_bin(i); | |||
472 | lock(mal.free_lock); | |||
473 | if (self->psize & next->csize & C_INUSE((size_t)1)) | |||
474 | break; | |||
475 | unlock(mal.free_lock); | |||
476 | unlock_bin(i); | |||
477 | } | |||
478 | ||||
479 | if (alloc_rev(self)) { | |||
480 | self = PREV_CHUNK(self)((struct chunk *)((char *)(self) - ((self)->psize & -2 ))); | |||
481 | size = CHUNK_SIZE(self)((self)->csize & -2); | |||
482 | final_size += size; | |||
483 | if (new_size+size > RECLAIM163840 && (new_size+size^size) > size) | |||
484 | reclaim = 1; | |||
485 | } | |||
486 | ||||
487 | if (alloc_fwd(next)) { | |||
488 | size = CHUNK_SIZE(next)((next)->csize & -2); | |||
489 | final_size += size; | |||
490 | if (new_size+size > RECLAIM163840 && (new_size+size^size) > size) | |||
491 | reclaim = 1; | |||
492 | next = NEXT_CHUNK(next)((struct chunk *)((char *)(next) + ((next)->csize & -2 ))); | |||
493 | } | |||
494 | } | |||
495 | ||||
496 | if (!(mal.binmap & 1ULL<<i)) | |||
497 | a_or_64a_or_64(&mal.binmap, 1ULL<<i); | |||
498 | ||||
499 | self->csize = final_size; | |||
500 | next->psize = final_size; | |||
501 | unlock(mal.free_lock); | |||
502 | ||||
503 | self->next = BIN_TO_CHUNK(i)((struct chunk *)((char *)(&mal.bins[i].head) - (2*sizeof (size_t)))); | |||
504 | self->prev = mal.bins[i].tail; | |||
505 | self->next->prev = self; | |||
506 | self->prev->next = self; | |||
507 | ||||
508 | /* Replace middle of large chunks with fresh zero pages */ | |||
509 | if (reclaim) { | |||
510 | uintptr_t a = (uintptr_t)self + SIZE_ALIGN(4*sizeof(size_t))+PAGE_SIZE4096-1 & -PAGE_SIZE4096; | |||
511 | uintptr_t b = (uintptr_t)next - SIZE_ALIGN(4*sizeof(size_t)) & -PAGE_SIZE4096; | |||
512 | #if 1 | |||
513 | __madvise((void *)a, b-a, MADV_DONTNEED4); | |||
514 | #else | |||
515 | __mmap((void *)a, b-a, PROT_READ1|PROT_WRITE2, | |||
516 | MAP_PRIVATE0x02|MAP_ANONYMOUS0x20|MAP_FIXED0x10, -1, 0); | |||
517 | #endif | |||
518 | } | |||
519 | ||||
520 | unlock_bin(i); | |||
521 | } |