/* creates a new inputmapnode object */ inputmapnode_t* inputmapnode_create(const char* name) { inputmapnode_t* f = mallocx(sizeof *f); f->data = mallocx(sizeof *(f->data)); f->data->name = str_dup(name); /* defaults */ f->data->keyboard.enabled = FALSE; f->data->keyboard.scancode[IB_UP] = keycode_of("KEY_NONE"); f->data->keyboard.scancode[IB_RIGHT] = keycode_of("KEY_NONE"); f->data->keyboard.scancode[IB_DOWN] = keycode_of("KEY_NONE"); f->data->keyboard.scancode[IB_LEFT] = keycode_of("KEY_NONE"); f->data->keyboard.scancode[IB_FIRE1] = keycode_of("KEY_NONE"); f->data->keyboard.scancode[IB_FIRE2] = keycode_of("KEY_NONE"); f->data->keyboard.scancode[IB_FIRE3] = keycode_of("KEY_NONE"); f->data->keyboard.scancode[IB_FIRE4] = keycode_of("KEY_NONE"); f->data->keyboard.scancode[IB_FIRE5] = keycode_of("KEY_NONE"); f->data->keyboard.scancode[IB_FIRE6] = keycode_of("KEY_NONE"); f->data->keyboard.scancode[IB_FIRE7] = keycode_of("KEY_NONE"); f->data->keyboard.scancode[IB_FIRE8] = keycode_of("KEY_NONE"); f->data->joystick.enabled = FALSE; f->data->joystick.id = 0; f->data->joystick.button[IB_FIRE1] = INFINITY; f->data->joystick.button[IB_FIRE2] = INFINITY; f->data->joystick.button[IB_FIRE3] = INFINITY; f->data->joystick.button[IB_FIRE4] = INFINITY; f->data->joystick.button[IB_FIRE5] = INFINITY; f->data->joystick.button[IB_FIRE6] = INFINITY; f->data->joystick.button[IB_FIRE7] = INFINITY; f->data->joystick.button[IB_FIRE8] = INFINITY; return f; }
TEST_END TEST_BEGIN(test_oom) { size_t hugemax, size, alignment; hugemax = get_huge_size(get_nhuge()-1); /* * It should be impossible to allocate two objects that each consume * more than half the virtual address space. */ { void *p; p = mallocx(hugemax, 0); if (p != NULL) { assert_ptr_null(mallocx(hugemax, 0), "Expected OOM for mallocx(size=%#zx, 0)", hugemax); dallocx(p, 0); } } #if LG_SIZEOF_PTR == 3 size = ZU(0x8000000000000000); alignment = ZU(0x8000000000000000); #else size = ZU(0x80000000); alignment = ZU(0x80000000); #endif assert_ptr_null(mallocx(size, MALLOCX_ALIGN(alignment)), "Expected OOM for mallocx(size=%#zx, MALLOCX_ALIGN(%#zx)", size, alignment); }
TEST_END TEST_BEGIN(test_basic) { #define MAXSZ (((size_t)1) << 26) size_t sz; for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) { size_t nsz, rsz; void *p; nsz = nallocx(sz, 0); assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); p = mallocx(sz, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); rsz = sallocx(p, 0); assert_zu_ge(rsz, sz, "Real size smaller than expected"); assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch"); dallocx(p, 0); p = mallocx(sz, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); dallocx(p, 0); nsz = nallocx(sz, MALLOCX_ZERO); assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); p = mallocx(sz, MALLOCX_ZERO); assert_ptr_not_null(p, "Unexpected mallocx() error"); rsz = sallocx(p, 0); assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch"); dallocx(p, 0); } #undef MAXSZ }
TEST_END TEST_BEGIN(huge_allocation) { unsigned arena1, arena2; void *ptr = mallocx(HUGE_SZ, 0); assert_ptr_not_null(ptr, "Fail to allocate huge size"); size_t sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.lookup", &arena1, &sz, &ptr, sizeof(ptr)), 0, "Unexpected mallctl() failure"); assert_u_gt(arena1, 0, "Huge allocation should not come from arena 0"); dallocx(ptr, 0); ptr = mallocx(HUGE_SZ >> 1, 0); assert_ptr_not_null(ptr, "Fail to allocate half huge size"); assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr, sizeof(ptr)), 0, "Unexpected mallctl() failure"); assert_u_ne(arena1, arena2, "Wrong arena used for half huge"); dallocx(ptr, 0); ptr = mallocx(SMALL_SZ, MALLOCX_TCACHE_NONE); assert_ptr_not_null(ptr, "Fail to allocate small size"); assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr, sizeof(ptr)), 0, "Unexpected mallctl() failure"); assert_u_ne(arena1, arena2, "Huge and small should be from different arenas"); dallocx(ptr, 0); }
TEST_END TEST_BEGIN(huge_mallocx) { unsigned arena1, arena2; size_t sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.create", &arena1, &sz, NULL, 0), 0, "Failed to create arena"); void *huge = mallocx(HUGE_SZ, MALLOCX_ARENA(arena1)); assert_ptr_not_null(huge, "Fail to allocate huge size"); assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &huge, sizeof(huge)), 0, "Unexpected mallctl() failure"); assert_u_eq(arena1, arena2, "Wrong arena used for mallocx"); dallocx(huge, MALLOCX_ARENA(arena1)); void *huge2 = mallocx(HUGE_SZ, 0); assert_ptr_not_null(huge, "Fail to allocate huge size"); assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &huge2, sizeof(huge2)), 0, "Unexpected mallctl() failure"); assert_u_ne(arena1, arena2, "Huge allocation should not come from the manual arena."); assert_u_ne(arena2, 0, "Huge allocation should not come from the arena 0."); dallocx(huge2, 0); }
static void alloc_free_size(size_t sz) { void *ptr = mallocx(1, 0); free(ptr); ptr = mallocx(1, 0); free(ptr); ptr = mallocx(1, MALLOCX_TCACHE_NONE); dallocx(ptr, MALLOCX_TCACHE_NONE); }
TEST_END TEST_BEGIN(test_stats_arenas_summary) { unsigned arena; void *little, *large, *huge; uint64_t epoch; size_t sz; int expected = config_stats ? 0 : ENOENT; size_t mapped; uint64_t npurge, nmadvise, purged; arena = 0; assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena, sizeof(arena)), 0, "Unexpected mallctl() failure"); little = mallocx(SMALL_MAXCLASS, 0); assert_ptr_not_null(little, "Unexpected mallocx() failure"); large = mallocx(large_maxclass, 0); assert_ptr_not_null(large, "Unexpected mallocx() failure"); huge = mallocx(chunksize, 0); assert_ptr_not_null(huge, "Unexpected mallocx() failure"); dallocx(little, 0); dallocx(large, 0); dallocx(huge, 0); assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.arenas.0.mapped", (void *)&mapped, &sz, NULL, 0), expected, "Unexepected mallctl() result"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge, &sz, NULL, 0), expected, "Unexepected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.nmadvise", (void *)&nmadvise, &sz, NULL, 0), expected, "Unexepected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.purged", (void *)&purged, &sz, NULL, 0), expected, "Unexepected mallctl() result"); if (config_stats) { assert_u64_gt(npurge, 0, "At least one purge should have occurred"); assert_u64_le(nmadvise, purged, "nmadvise should be no greater than purged"); } }
TEST_END TEST_BEGIN(test_size_extra_overflow) { size_t small0, hugemax; void *p; /* Get size classes. */ small0 = get_small_size(0); hugemax = get_huge_size(get_nhuge()-1); p = mallocx(small0, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); /* Test overflows that can be resolved by clamping extra. */ assert_zu_le(xallocx(p, hugemax-1, 2, 0), hugemax, "Unexpected xallocx() behavior"); assert_zu_le(xallocx(p, hugemax, 1, 0), hugemax, "Unexpected xallocx() behavior"); /* Test overflow such that hugemax-size underflows. */ assert_zu_le(xallocx(p, hugemax+1, 2, 0), hugemax, "Unexpected xallocx() behavior"); assert_zu_le(xallocx(p, hugemax+2, 3, 0), hugemax, "Unexpected xallocx() behavior"); assert_zu_le(xallocx(p, SIZE_T_MAX-2, 2, 0), hugemax, "Unexpected xallocx() behavior"); assert_zu_le(xallocx(p, SIZE_T_MAX-1, 1, 0), hugemax, "Unexpected xallocx() behavior"); dallocx(p, 0); }
TEST_END static void * thd_start_reincarnated(void *arg) { tsd_t *tsd = tsd_fetch(); assert(tsd); void *p = malloc(1); assert_ptr_not_null(p, "Unexpected malloc() failure"); /* Manually trigger reincarnation. */ assert_ptr_not_null(tsd_arena_get(tsd), "Should have tsd arena set."); tsd_cleanup((void *)tsd); assert_ptr_null(*tsd_arenap_get_unsafe(tsd), "TSD arena should have been cleared."); assert_u_eq(tsd->state, tsd_state_purgatory, "TSD state should be purgatory\n"); free(p); assert_u_eq(tsd->state, tsd_state_reincarnated, "TSD state should be reincarnated\n"); p = mallocx(1, MALLOCX_TCACHE_NONE); assert_ptr_not_null(p, "Unexpected malloc() failure"); assert_ptr_null(*tsd_arenap_get_unsafe(tsd), "Should not have tsd arena set after reincarnation."); free(p); tsd_cleanup((void *)tsd); assert_ptr_null(*tsd_arenap_get_unsafe(tsd), "TSD arena should have been cleared after 2nd cleanup."); return NULL; }
void data_cleanup(int *data) { if (data_cleanup_count == 0) { assert_x_eq(*data, MALLOC_TSD_TEST_DATA_INIT, "Argument passed into cleanup function should match tsd " "value"); } ++data_cleanup_count; /* * Allocate during cleanup for two rounds, in order to assure that * jemalloc's internal tsd reinitialization happens. */ bool reincarnate = false; switch (*data) { case MALLOC_TSD_TEST_DATA_INIT: *data = 1; reincarnate = true; break; case 1: *data = 2; reincarnate = true; break; case 2: return; default: not_reached(); } if (reincarnate) { void *p = mallocx(1, 0); assert_ptr_not_null(p, "Unexpeced mallocx() failure"); dallocx(p, 0); } }
/* * load_sprite_images() * Loads the sprite by reading the spritesheet */ void load_sprite_images(spriteinfo_t *spr) { int i, cur_x, cur_y; image_t *sheet; spr->frame_count = (spr->rect_w / spr->frame_w) * (spr->rect_h / spr->frame_h); spr->frame_data = mallocx(spr->frame_count * sizeof(*(spr->frame_data))); /* reading the images... */ if(NULL == (sheet = image_load(spr->source_file))) fatal_error("FATAL ERROR: couldn't load spritesheet \"%s\"", spr->source_file); cur_x = spr->rect_x; cur_y = spr->rect_y; for(i=0; i<spr->frame_count; i++) { spr->frame_data[i] = image_create_shared(sheet, cur_x, cur_y, spr->frame_w, spr->frame_h); cur_x += spr->frame_w; if(cur_x >= spr->rect_x+spr->rect_w) { cur_x = spr->rect_x; cur_y += spr->frame_h; } } image_unref(spr->source_file); }
void merge_sort_mix(void *base, size_t size, int (*comparator)(const void*,const void*), int p, int q, int m) { uint8 *arr = mallocx((q-p+1) * size); uint8 *i = arr; uint8 *j = arr + (m+1-p) * size; int k = p; memcpy(arr, (uint8*)base + p * size, (q-p+1) * size); while(i < arr + (m+1-p) * size && j <= arr + (q-p) * size) { if(comparator((const void*)i, (const void*)j) <= 0) { memcpy((uint8*)base + (k++) * size, i, size); i += size; } else { memcpy((uint8*)base + (k++) * size, j, size); j += size; } } while(i < arr + (m+1-p) * size) { memcpy((uint8*)base + (k++) * size, i, size); i += size; } while(j <= arr + (q-p) * size) { memcpy((uint8*)base + (k++) * size, j, size); j += size; } free(arr); }
TEST_END TEST_BEGIN(test_align) { void *p, *q; size_t align; #define MAX_ALIGN (ZU(1) << 29) align = ZU(1); p = mallocx(1, MALLOCX_ALIGN(align)); assert_ptr_not_null(p, "Unexpected mallocx() error"); for (align <<= 1; align <= MAX_ALIGN; align <<= 1) { q = rallocx(p, 1, MALLOCX_ALIGN(align)); assert_ptr_not_null(q, "Unexpected rallocx() error for align=%zu", align); assert_ptr_null( (void *)((uintptr_t)q & (align-1)), "%p inadequately aligned for align=%zu", q, align); p = q; } dallocx(p, 0); #undef MAX_ALIGN }
void radix_sort(int* v, size_t size){ int i; int* c; int bucket[10] = {0,0,0,0,0,0,0,0,0,0}; int largest = v[0]; int exp = 1; c = mallocx(sizeof(int)*size); for (i = 0; i < size; i++) { if (v[i] > largest) largest = v[i]; } while (largest/exp > 0){ memset(bucket,0,sizeof(int)*10); for (i = 0; i < size; i++) bucket[(v[i] / exp) % 10]++; for (i = 1; i < 10; i++) bucket[i] += bucket[i - 1]; for (i = size - 1; i >= 0; i--) c[--bucket[(v[i] / exp) % 10]] = v[i]; for (i = 0; i < size; i++) v[i] = c[i]; exp *= 10; } free(c); }
STRING* new_string(int increments) { STRING *s = mallocx(sizeof(STRING)); bcreate(s, 1, increments); badd(s, "\0", 1); return s; }
TEST_END TEST_BEGIN(test_extra_small) { size_t small0, small1, hugemax; void *p; /* Get size classes. */ small0 = get_small_size(0); small1 = get_small_size(1); hugemax = get_huge_size(get_nhuge()-1); p = mallocx(small0, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); assert_zu_eq(xallocx(p, small1, 0, 0), small0, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, small1, 0, 0), small0, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, small0, small1 - small0, 0), small0, "Unexpected xallocx() behavior"); /* Test size+extra overflow. */ assert_zu_eq(xallocx(p, small0, hugemax - small0 + 1, 0), small0, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, small0, SIZE_T_MAX - small0, 0), small0, "Unexpected xallocx() behavior"); dallocx(p, 0); }
TEST_END TEST_BEGIN(test_overflow) { size_t largemax; void *p; largemax = get_large_size(get_nlarge()-1); p = mallocx(1, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_ptr_null(rallocx(p, largemax+1, 0), "Expected OOM for rallocx(p, size=%#zx, 0)", largemax+1); assert_ptr_null(rallocx(p, ZU(PTRDIFF_MAX)+1, 0), "Expected OOM for rallocx(p, size=%#zx, 0)", ZU(PTRDIFF_MAX)+1); assert_ptr_null(rallocx(p, SIZE_T_MAX, 0), "Expected OOM for rallocx(p, size=%#zx, 0)", SIZE_T_MAX); assert_ptr_null(rallocx(p, 1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)), "Expected OOM for rallocx(p, size=1, MALLOCX_ALIGN(%#zx))", ZU(PTRDIFF_MAX)+1); dallocx(p, 0); }
no *NO_novo(chave){ no *p=mallocx(sizeof(no)); p->esq = p->dir = NULL; p->chave = chave; return p; }
sensorstate_t* sensorstate_create_leftwallmode() { sensorstate_t *s = mallocx(sizeof *s); s->check = check_leftwallmode; s->render = render_leftwallmode; return s; }
sensorstate_t* sensorstate_create_ceilingmode() { sensorstate_t *s = mallocx(sizeof *s); s->check = check_ceilingmode; s->render = render_ceilingmode; return s; }
object_children_t* object_children_add(object_children_t* list, const char *name, enemy_t *data) { object_children_t *x = mallocx(sizeof *x); x->name = str_dup(name); x->data = data; x->next = list; return x; }
objectmachine_list_t* objectmachine_list_new(objectmachine_list_t* list, const char *name, enemy_t *owner) { objectmachine_list_t *l = mallocx(sizeof *l); l->name = str_dup(name); l->data = objectbasicmachine_new(owner); l->next = list; return l; }
TEST_END TEST_BEGIN(test_alignment_and_size) { #define MAXALIGN (((size_t)1) << 25) #define NITER 4 size_t nsz, rsz, sz, alignment, total; unsigned i; void *ps[NITER]; for (i = 0; i < NITER; i++) ps[i] = NULL; for (alignment = 8; alignment <= MAXALIGN; alignment <<= 1) { total = 0; for (sz = 1; sz < 3 * alignment && sz < (1U << 31); sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { for (i = 0; i < NITER; i++) { nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO); assert_zu_ne(nsz, 0, "nallocx() error for alignment=%zu, " "size=%zu (%#zx)", alignment, sz, sz); ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO); assert_ptr_not_null(ps[i], "mallocx() error for alignment=%zu, " "size=%zu (%#zx)", alignment, sz, sz); rsz = sallocx(ps[i], 0); assert_zu_ge(rsz, sz, "Real size smaller than expected for " "alignment=%zu, size=%zu", alignment, sz); assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch for " "alignment=%zu, size=%zu", alignment, sz); assert_ptr_null( (void *)((uintptr_t)ps[i] & (alignment-1)), "%p inadequately aligned for" " alignment=%zu, size=%zu", ps[i], alignment, sz); total += rsz; if (total >= (MAXALIGN << 1)) break; } for (i = 0; i < NITER; i++) { if (ps[i] != NULL) { dallocx(ps[i], 0); ps[i] = NULL; } } } } #undef MAXALIGN #undef NITER }
static void test_junk(size_t sz_min, size_t sz_max) { char *s; size_t sz_prev, sz, i; arena_dalloc_junk_small_orig = arena_dalloc_junk_small; arena_dalloc_junk_small = arena_dalloc_junk_small_intercept; arena_dalloc_junk_large_orig = arena_dalloc_junk_large; arena_dalloc_junk_large = arena_dalloc_junk_large_intercept; huge_dalloc_junk_orig = huge_dalloc_junk; huge_dalloc_junk = huge_dalloc_junk_intercept; sz_prev = 0; s = (char *)mallocx(sz_min, 0); assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); for (sz = sallocx(s, 0); sz <= sz_max; sz_prev = sz, sz = sallocx(s, 0)) { if (sz_prev > 0) { assert_c_eq(s[0], 'a', "Previously allocated byte %zu/%zu is corrupted", ZU(0), sz_prev); assert_c_eq(s[sz_prev-1], 'a', "Previously allocated byte %zu/%zu is corrupted", sz_prev-1, sz_prev); } for (i = sz_prev; i < sz; i++) { assert_c_eq(s[i], 0xa5, "Newly allocated byte %zu/%zu isn't junk-filled", i, sz); s[i] = 'a'; } if (xallocx(s, sz+1, 0, 0) == sz) { void *junked = (void *)s; s = (char *)rallocx(s, sz+1, 0); assert_ptr_not_null((void *)s, "Unexpected rallocx() failure"); if (!config_mremap || sz+1 <= arena_maxclass) { assert_ptr_eq(most_recently_junked, junked, "Expected region of size %zu to be " "junk-filled", sz); } } } dallocx(s, 0); assert_ptr_eq(most_recently_junked, (void *)s, "Expected region of size %zu to be junk-filled", sz); arena_dalloc_junk_small = arena_dalloc_junk_small_orig; arena_dalloc_junk_large = arena_dalloc_junk_large_orig; huge_dalloc_junk = huge_dalloc_junk_orig; }
objectvm_t* objectvm_create(enemy_t* owner) { objectvm_t *vm = mallocx(sizeof *vm); vm->owner = owner; vm->state_list = NULL; vm->reference_to_current_state = NULL; vm->history = objectmachine_stack_new(); vm->symbol_table = symboltable_new(); return vm; }
bgstrategy_t *bgstrategy_default_new(background_t *background) { bgstrategy_default_t *me = mallocx(sizeof *me); bgstrategy_t *base = (bgstrategy_t*)me; base->background = background; base->update = bgstrategy_default_update; return base; }
static void mallocx_free(void) { void *p = mallocx(1, 0); if (p == NULL) { test_fail("Unexpected mallocx() failure"); return; } free(p); }
MemBlock BigHeap::allocBig(size_t bytes, HeaderKind kind) { #ifdef USE_JEMALLOC auto n = static_cast<BigNode*>(mallocx(bytes + sizeof(BigNode), 0)); auto cap = sallocx(n, 0); #else auto cap = bytes + sizeof(BigNode); auto n = static_cast<BigNode*>(safe_malloc(cap)); #endif enlist(n, kind, cap); return {n + 1, cap - sizeof(BigNode)}; }
/* checkpointorb 생성 객체 */ item_t* checkpointorb_create() { item_t *item = mallocx(sizeof(checkpointorb_t)); item->init = checkpointorb_init; item->release = checkpointorb_release; item->update = checkpointorb_update; item->render = checkpointorb_render; return item; }
/* public methods */ item_t* bigring_create() { item_t *item = mallocx(sizeof(bigring_t)); item->init = bigring_init; item->release = bigring_release; item->update = bigring_update; item->render = bigring_render; return item; }