Bug Summary

File:src/malloc/malloc.c
Warning:line 372, column 12
The left operand of '&' is a garbage value

Annotated Source Code

1#define _GNU_SOURCE
2#include <stdlib.h>
3#include <string.h>
4#include <limits.h>
5#include <stdint.h>
6#include <errno(*__errno_location()).h>
7#include <sys/mman.h>
8#include "libc.h"
9#include "atomic.h"
10#include "pthread_impl.h"
11
12#if defined(__GNUC__4) && defined(__PIC__2)
13#define inlineinline __attribute__((always_inline)) inlineinline __attribute__((always_inline)) __attribute__((always_inline))
14#endif
15
16void *__mmap(void *, size_t, int, int, int, off_t);
17int __munmap(void *, size_t);
18void *__mremap(void *, size_t, size_t, int, ...);
19int __madvise(void *, size_t, int);
20
21struct chunk {
22 size_t psize, csize;
23 struct chunk *next, *prev;
24};
25
26struct bin {
27 volatile int lock[2];
28 struct chunk *head;
29 struct chunk *tail;
30};
31
32static struct {
33 volatile uint64_t binmap;
34 struct bin bins[64];
35 volatile int free_lock[2];
36} mal;
37
38
39#define SIZE_ALIGN(4*sizeof(size_t)) (4*sizeof(size_t))
40#define SIZE_MASK(-(4*sizeof(size_t))) (-SIZE_ALIGN(4*sizeof(size_t)))
41#define OVERHEAD(2*sizeof(size_t)) (2*sizeof(size_t))
42#define MMAP_THRESHOLD(0x1c00*(4*sizeof(size_t))) (0x1c00*SIZE_ALIGN(4*sizeof(size_t)))
43#define DONTCARE16 16
44#define RECLAIM163840 163840
45
46#define CHUNK_SIZE(c)((c)->csize & -2) ((c)->csize & -2)
47#define CHUNK_PSIZE(c)((c)->psize & -2) ((c)->psize & -2)
48#define PREV_CHUNK(c)((struct chunk *)((char *)(c) - ((c)->psize & -2))) ((struct chunk *)((char *)(c) - CHUNK_PSIZE(c)((c)->psize & -2)))
49#define NEXT_CHUNK(c)((struct chunk *)((char *)(c) + ((c)->csize & -2))) ((struct chunk *)((char *)(c) + CHUNK_SIZE(c)((c)->csize & -2)))
50#define MEM_TO_CHUNK(p)(struct chunk *)((char *)(p) - (2*sizeof(size_t))) (struct chunk *)((char *)(p) - OVERHEAD(2*sizeof(size_t)))
51#define CHUNK_TO_MEM(c)(void *)((char *)(c) + (2*sizeof(size_t))) (void *)((char *)(c) + OVERHEAD(2*sizeof(size_t)))
52#define BIN_TO_CHUNK(i)((struct chunk *)((char *)(&mal.bins[i].head) - (2*sizeof
(size_t))))
(MEM_TO_CHUNK(&mal.bins[i].head)(struct chunk *)((char *)(&mal.bins[i].head) - (2*sizeof(
size_t)))
)
53
54#define C_INUSE((size_t)1) ((size_t)1)
55
56#define IS_MMAPPED(c)!((c)->csize & (((size_t)1))) !((c)->csize & (C_INUSE((size_t)1)))
57
58
59/* Synchronization tools */
60
61static inlineinline __attribute__((always_inline)) void lock(volatile int *lk)
62{
63 if (libc__libc.threads_minus_1)
64 while(a_swapa_swap(lk, 1)) __wait(lk, lk+1, 1, 1);
65}
66
67static inlineinline __attribute__((always_inline)) void unlock(volatile int *lk)
68{
69 if (lk[0]) {
70 a_storea_store(lk, 0);
71 if (lk[1]) __wake(lk, 1, 1);
72 }
73}
74
75static inlineinline __attribute__((always_inline)) void lock_bin(int i)
76{
77 lock(mal.bins[i].lock);
78 if (!mal.bins[i].head)
79 mal.bins[i].head = mal.bins[i].tail = BIN_TO_CHUNK(i)((struct chunk *)((char *)(&mal.bins[i].head) - (2*sizeof
(size_t))))
;
80}
81
82static inlineinline __attribute__((always_inline)) void unlock_bin(int i)
83{
84 unlock(mal.bins[i].lock);
85}
86
87static int first_set(uint64_t x)
88{
89#if 1
90 return a_ctz_64a_ctz_64(x);
91#else
92 static const char debruijn64[64] = {
93 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28,
94 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11,
95 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10,
96 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12
97 };
98 static const char debruijn32[32] = {
99 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13,
100 31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14
101 };
102 if (sizeof(long) < 8) {
103 uint32_t y = x;
104 if (!y) {
105 y = x>>32;
106 return 32 + debruijn32[(y&-y)*0x076be629 >> 27];
107 }
108 return debruijn32[(y&-y)*0x076be629 >> 27];
109 }
110 return debruijn64[(x&-x)*0x022fdd63cc95386dull >> 58];
111#endif
112}
113
114static const unsigned char bin_tab[60] = {
115 32,33,34,35,36,36,37,37,38,38,39,39,
116 40,40,40,40,41,41,41,41,42,42,42,42,43,43,43,43,
117 44,44,44,44,44,44,44,44,45,45,45,45,45,45,45,45,
118 46,46,46,46,46,46,46,46,47,47,47,47,47,47,47,47,
119};
120
121static int bin_index(size_t x)
122{
123 x = x / SIZE_ALIGN(4*sizeof(size_t)) - 1;
124 if (x <= 32) return x;
125 if (x < 512) return bin_tab[x/8-4];
126 if (x > 0x1c00) return 63;
127 return bin_tab[x/128-4] + 16;
128}
129
130static int bin_index_up(size_t x)
131{
132 x = x / SIZE_ALIGN(4*sizeof(size_t)) - 1;
133 if (x <= 32) return x;
134 x--;
135 if (x < 512) return bin_tab[x/8-4] + 1;
136 return bin_tab[x/128-4] + 17;
137}
138
139#if 0
140void __dump_heap(int x)
141{
142 struct chunk *c;
143 int i;
144 for (c = (void *)mal.heap; CHUNK_SIZE(c)((c)->csize & -2); c = NEXT_CHUNK(c)((struct chunk *)((char *)(c) + ((c)->csize & -2))))
145 fprintf(stderr(stderr), "base %p size %zu (%d) flags %d/%d\n",
146 c, CHUNK_SIZE(c)((c)->csize & -2), bin_index(CHUNK_SIZE(c)((c)->csize & -2)),
147 c->csize & 15,
148 NEXT_CHUNK(c)((struct chunk *)((char *)(c) + ((c)->csize & -2)))->psize & 15);
149 for (i=0; i<64; i++) {
150 if (mal.bins[i].head != BIN_TO_CHUNK(i)((struct chunk *)((char *)(&mal.bins[i].head) - (2*sizeof
(size_t))))
&& mal.bins[i].head) {
151 fprintf(stderr(stderr), "bin %d: %p\n", i, mal.bins[i].head);
152 if (!(mal.binmap & 1ULL<<i))
153 fprintf(stderr(stderr), "missing from binmap!\n");
154 } else if (mal.binmap & 1ULL<<i)
155 fprintf(stderr(stderr), "binmap wrongly contains %d!\n", i);
156 }
157}
158#endif
159
160void *__expand_heap(size_t *);
161
162static struct chunk *expand_heap(size_t n)
163{
164 static int heap_lock[2];
165 static void *end;
166 void *p;
167 struct chunk *w;
168
169 /* The argument n already accounts for the caller's chunk
170 * overhead needs, but if the heap can't be extended in-place,
171 * we need room for an extra zero-sized sentinel chunk. */
172 n += SIZE_ALIGN(4*sizeof(size_t));
173
174 lock(heap_lock);
175
176 p = __expand_heap(&n);
177 if (!p) {
178 unlock(heap_lock);
179 return 0;
180 }
181
182 /* If not just expanding existing space, we need to make a
183 * new sentinel chunk below the allocated space. */
184 if (p != end) {
185 /* Valid/safe because of the prologue increment. */
186 n -= SIZE_ALIGN(4*sizeof(size_t));
187 p = (char *)p + SIZE_ALIGN(4*sizeof(size_t));
188 w = MEM_TO_CHUNK(p)(struct chunk *)((char *)(p) - (2*sizeof(size_t)));
189 w->psize = 0 | C_INUSE((size_t)1);
190 }
191
192 /* Record new heap end and fill in footer. */
193 end = (char *)p + n;
194 w = MEM_TO_CHUNK(end)(struct chunk *)((char *)(end) - (2*sizeof(size_t)));
195 w->psize = n | C_INUSE((size_t)1);
196 w->csize = 0 | C_INUSE((size_t)1);
197
198 /* Fill in header, which may be new or may be replacing a
199 * zero-size sentinel header at the old end-of-heap. */
200 w = MEM_TO_CHUNK(p)(struct chunk *)((char *)(p) - (2*sizeof(size_t)));
201 w->csize = n | C_INUSE((size_t)1);
202
203 unlock(heap_lock);
204
205 return w;
206}
207
208static int adjust_size(size_t *n)
209{
210 /* Result of pointer difference must fit in ptrdiff_t. */
211 if (*n-1 > PTRDIFF_MAX(0x7fffffffffffffff) - SIZE_ALIGN(4*sizeof(size_t)) - PAGE_SIZE4096) {
212 if (*n) {
213 errno(*__errno_location()) = ENOMEM12;
214 return -1;
215 } else {
216 *n = SIZE_ALIGN(4*sizeof(size_t));
217 return 0;
218 }
219 }
220 *n = (*n + OVERHEAD(2*sizeof(size_t)) + SIZE_ALIGN(4*sizeof(size_t)) - 1) & SIZE_MASK(-(4*sizeof(size_t)));
221 return 0;
222}
223
224static void unbin(struct chunk *c, int i)
225{
226 if (c->prev == c->next)
227 a_and_64a_and_64(&mal.binmap, ~(1ULL<<i));
228 c->prev->next = c->next;
229 c->next->prev = c->prev;
230 c->csize |= C_INUSE((size_t)1);
231 NEXT_CHUNK(c)((struct chunk *)((char *)(c) + ((c)->csize & -2)))->psize |= C_INUSE((size_t)1);
232}
233
234static int alloc_fwd(struct chunk *c)
235{
236 int i;
237 size_t k;
238 while (!((k=c->csize) & C_INUSE((size_t)1))) {
239 i = bin_index(k);
240 lock_bin(i);
241 if (c->csize == k) {
242 unbin(c, i);
243 unlock_bin(i);
244 return 1;
245 }
246 unlock_bin(i);
247 }
248 return 0;
249}
250
251static int alloc_rev(struct chunk *c)
252{
253 int i;
254 size_t k;
255 while (!((k=c->psize) & C_INUSE((size_t)1))) {
256 i = bin_index(k);
257 lock_bin(i);
258 if (c->psize == k) {
259 unbin(PREV_CHUNK(c)((struct chunk *)((char *)(c) - ((c)->psize & -2))), i);
260 unlock_bin(i);
261 return 1;
262 }
263 unlock_bin(i);
264 }
265 return 0;
266}
267
268
269/* pretrim - trims a chunk _prior_ to removing it from its bin.
270 * Must be called with i as the ideal bin for size n, j the bin
271 * for the _free_ chunk self, and bin j locked. */
272static int pretrim(struct chunk *self, size_t n, int i, int j)
273{
274 size_t n1;
275 struct chunk *next, *split;
276
277 /* We cannot pretrim if it would require re-binning. */
278 if (j < 40) return 0;
279 if (j < i+3) {
280 if (j != 63) return 0;
281 n1 = CHUNK_SIZE(self)((self)->csize & -2);
282 if (n1-n <= MMAP_THRESHOLD(0x1c00*(4*sizeof(size_t)))) return 0;
283 } else {
284 n1 = CHUNK_SIZE(self)((self)->csize & -2);
285 }
286 if (bin_index(n1-n) != j) return 0;
287
288 next = NEXT_CHUNK(self)((struct chunk *)((char *)(self) + ((self)->csize & -2
)))
;
289 split = (void *)((char *)self + n);
290
291 split->prev = self->prev;
292 split->next = self->next;
293 split->prev->next = split;
294 split->next->prev = split;
295 split->psize = n | C_INUSE((size_t)1);
296 split->csize = n1-n;
297 next->psize = n1-n;
298 self->csize = n | C_INUSE((size_t)1);
299 return 1;
300}
301
302static void trim(struct chunk *self, size_t n)
303{
304 size_t n1 = CHUNK_SIZE(self)((self)->csize & -2);
305 struct chunk *next, *split;
306
307 if (n >= n1 - DONTCARE16) return;
308
309 next = NEXT_CHUNK(self)((struct chunk *)((char *)(self) + ((self)->csize & -2
)))
;
310 split = (void *)((char *)self + n);
311
312 split->psize = n | C_INUSE((size_t)1);
313 split->csize = n1-n | C_INUSE((size_t)1);
314 next->psize = n1-n | C_INUSE((size_t)1);
315 self->csize = n | C_INUSE((size_t)1);
316
317 free(CHUNK_TO_MEM(split)(void *)((char *)(split) + (2*sizeof(size_t))));
318}
319
320void *malloc(size_t n)
321{
322 struct chunk *c;
323 int i, j;
324
325 if (adjust_size(&n) < 0) return 0;
326
327 if (n > MMAP_THRESHOLD(0x1c00*(4*sizeof(size_t)))) {
328 size_t len = n + OVERHEAD(2*sizeof(size_t)) + PAGE_SIZE4096 - 1 & -PAGE_SIZE4096;
329 char *base = __mmap(0, len, PROT_READ1|PROT_WRITE2,
330 MAP_PRIVATE0x02|MAP_ANONYMOUS0x20, -1, 0);
331 if (base == (void *)-1) return 0;
332 c = (void *)(base + SIZE_ALIGN(4*sizeof(size_t)) - OVERHEAD(2*sizeof(size_t)));
333 c->csize = len - (SIZE_ALIGN(4*sizeof(size_t)) - OVERHEAD(2*sizeof(size_t)));
334 c->psize = SIZE_ALIGN(4*sizeof(size_t)) - OVERHEAD(2*sizeof(size_t));
335 return CHUNK_TO_MEM(c)(void *)((char *)(c) + (2*sizeof(size_t)));
336 }
337
338 i = bin_index_up(n);
339 for (;;) {
340 uint64_t mask = mal.binmap & -(1ULL<<i);
341 if (!mask) {
342 c = expand_heap(n);
343 if (!c) return 0;
344 if (alloc_rev(c)) {
345 struct chunk *x = c;
346 c = PREV_CHUNK(c)((struct chunk *)((char *)(c) - ((c)->psize & -2)));
347 NEXT_CHUNK(x)((struct chunk *)((char *)(x) + ((x)->csize & -2)))->psize = c->csize =
348 x->csize + CHUNK_SIZE(c)((c)->csize & -2);
349 }
350 break;
351 }
352 j = first_set(mask);
353 lock_bin(j);
354 c = mal.bins[j].head;
355 if (c != BIN_TO_CHUNK(j)((struct chunk *)((char *)(&mal.bins[j].head) - (2*sizeof
(size_t))))
) {
356 if (!pretrim(c, n, i, j)) unbin(c, j);
357 unlock_bin(j);
358 break;
359 }
360 unlock_bin(j);
361 }
362
363 /* Now patch up in case we over-allocated */
364 trim(c, n);
365
366 return CHUNK_TO_MEM(c)(void *)((char *)(c) + (2*sizeof(size_t)));
367}
368
369void *__malloc0(size_t n)
370{
371 void *p = malloc(n);
1
Uninitialized value stored to field 'csize'
372 if (p && !IS_MMAPPED(MEM_TO_CHUNK(p))!(((struct chunk *)((char *)(p) - (2*sizeof(size_t))))->csize
& (((size_t)1)))
) {
2
Assuming 'p' is non-null
3
Within the expansion of the macro 'IS_MMAPPED':
a
The left operand of '&' is a garbage value
373 size_t *z;
374 n = (n + sizeof *z - 1)/sizeof *z;
375 for (z=p; n; n--, z++) if (*z) *z=0;
376 }
377 return p;
378}
379
380void *realloc(void *p, size_t n)
381{
382 struct chunk *self, *next;
383 size_t n0, n1;
384 void *new;
385
386 if (!p) return malloc(n);
387
388 if (adjust_size(&n) < 0) return 0;
389
390 self = MEM_TO_CHUNK(p)(struct chunk *)((char *)(p) - (2*sizeof(size_t)));
391 n1 = n0 = CHUNK_SIZE(self)((self)->csize & -2);
392
393 if (IS_MMAPPED(self)!((self)->csize & (((size_t)1)))) {
394 size_t extra = self->psize;
395 char *base = (char *)self - extra;
396 size_t oldlen = n0 + extra;
397 size_t newlen = n + extra;
398 /* Crash on realloc of freed chunk */
399 if (extra & 1) a_crasha_crash();
400 if (newlen < PAGE_SIZE4096 && (new = malloc(n))) {
401 memcpy(new, p, n-OVERHEAD(2*sizeof(size_t)));
402 free(p);
403 return new;
404 }
405 newlen = (newlen + PAGE_SIZE4096-1) & -PAGE_SIZE4096;
406 if (oldlen == newlen) return p;
407 base = __mremap(base, oldlen, newlen, MREMAP_MAYMOVE1);
408 if (base == (void *)-1)
409 return newlen < oldlen ? p : 0;
410 self = (void *)(base + extra);
411 self->csize = newlen - extra;
412 return CHUNK_TO_MEM(self)(void *)((char *)(self) + (2*sizeof(size_t)));
413 }
414
415 next = NEXT_CHUNK(self)((struct chunk *)((char *)(self) + ((self)->csize & -2
)))
;
416
417 /* Crash on corrupted footer (likely from buffer overflow) */
418 if (next->psize != self->csize) a_crasha_crash();
419
420 /* Merge adjacent chunks if we need more space. This is not
421 * a waste of time even if we fail to get enough space, because our
422 * subsequent call to free would otherwise have to do the merge. */
423 if (n > n1 && alloc_fwd(next)) {
424 n1 += CHUNK_SIZE(next)((next)->csize & -2);
425 next = NEXT_CHUNK(next)((struct chunk *)((char *)(next) + ((next)->csize & -2
)))
;
426 }
427 /* FIXME: find what's wrong here and reenable it..? */
428 if (0 && n > n1 && alloc_rev(self)) {
429 self = PREV_CHUNK(self)((struct chunk *)((char *)(self) - ((self)->psize & -2
)))
;
430 n1 += CHUNK_SIZE(self)((self)->csize & -2);
431 }
432 self->csize = n1 | C_INUSE((size_t)1);
433 next->psize = n1 | C_INUSE((size_t)1);
434
435 /* If we got enough space, split off the excess and return */
436 if (n <= n1) {
437 //memmove(CHUNK_TO_MEM(self), p, n0-OVERHEAD);
438 trim(self, n);
439 return CHUNK_TO_MEM(self)(void *)((char *)(self) + (2*sizeof(size_t)));
440 }
441
442 /* As a last resort, allocate a new chunk and copy to it. */
443 new = malloc(n-OVERHEAD(2*sizeof(size_t)));
444 if (!new) return 0;
445 memcpy(new, p, n0-OVERHEAD(2*sizeof(size_t)));
446 free(CHUNK_TO_MEM(self)(void *)((char *)(self) + (2*sizeof(size_t))));
447 return new;
448}
449
450void free(void *p)
451{
452 struct chunk *self = MEM_TO_CHUNK(p)(struct chunk *)((char *)(p) - (2*sizeof(size_t)));
453 struct chunk *next;
454 size_t final_size, new_size, size;
455 int reclaim=0;
456 int i;
457
458 if (!p) return;
459
460 if (IS_MMAPPED(self)!((self)->csize & (((size_t)1)))) {
461 size_t extra = self->psize;
462 char *base = (char *)self - extra;
463 size_t len = CHUNK_SIZE(self)((self)->csize & -2) + extra;
464 /* Crash on double free */
465 if (extra & 1) a_crasha_crash();
466 __munmap(base, len);
467 return;
468 }
469
470 final_size = new_size = CHUNK_SIZE(self)((self)->csize & -2);
471 next = NEXT_CHUNK(self)((struct chunk *)((char *)(self) + ((self)->csize & -2
)))
;
472
473 /* Crash on corrupted footer (likely from buffer overflow) */
474 if (next->psize != self->csize) a_crasha_crash();
475
476 for (;;) {
477 if (self->psize & next->csize & C_INUSE((size_t)1)) {
478 self->csize = final_size | C_INUSE((size_t)1);
479 next->psize = final_size | C_INUSE((size_t)1);
480 i = bin_index(final_size);
481 lock_bin(i);
482 lock(mal.free_lock);
483 if (self->psize & next->csize & C_INUSE((size_t)1))
484 break;
485 unlock(mal.free_lock);
486 unlock_bin(i);
487 }
488
489 if (alloc_rev(self)) {
490 self = PREV_CHUNK(self)((struct chunk *)((char *)(self) - ((self)->psize & -2
)))
;
491 size = CHUNK_SIZE(self)((self)->csize & -2);
492 final_size += size;
493 if (new_size+size > RECLAIM163840 && (new_size+size^size) > size)
494 reclaim = 1;
495 }
496
497 if (alloc_fwd(next)) {
498 size = CHUNK_SIZE(next)((next)->csize & -2);
499 final_size += size;
500 if (new_size+size > RECLAIM163840 && (new_size+size^size) > size)
501 reclaim = 1;
502 next = NEXT_CHUNK(next)((struct chunk *)((char *)(next) + ((next)->csize & -2
)))
;
503 }
504 }
505
506 if (!(mal.binmap & 1ULL<<i))
507 a_or_64a_or_64(&mal.binmap, 1ULL<<i);
508
509 self->csize = final_size;
510 next->psize = final_size;
511 unlock(mal.free_lock);
512
513 self->next = BIN_TO_CHUNK(i)((struct chunk *)((char *)(&mal.bins[i].head) - (2*sizeof
(size_t))))
;
514 self->prev = mal.bins[i].tail;
515 self->next->prev = self;
516 self->prev->next = self;
517
518 /* Replace middle of large chunks with fresh zero pages */
519 if (reclaim) {
520 uintptr_t a = (uintptr_t)self + SIZE_ALIGN(4*sizeof(size_t))+PAGE_SIZE4096-1 & -PAGE_SIZE4096;
521 uintptr_t b = (uintptr_t)next - SIZE_ALIGN(4*sizeof(size_t)) & -PAGE_SIZE4096;
522#if 1
523 __madvise((void *)a, b-a, MADV_DONTNEED4);
524#else
525 __mmap((void *)a, b-a, PROT_READ1|PROT_WRITE2,
526 MAP_PRIVATE0x02|MAP_ANONYMOUS0x20|MAP_FIXED0x10, -1, 0);
527#endif
528 }
529
530 unlock_bin(i);
531}