/* * Allocate a 2^n chunk of pages, aligned at 2^n. This is currently * for the benefit of thread stack allocation, and should be going * away in some time when the migration to TLS is complete. */ static void * alignedpgalloc(int shift) { struct stackcache *sc; int align = 1<<shift; size_t alignedoff; void *rv; if (shift == BMK_THREAD_STACK_PAGE_ORDER && (sc = LIST_FIRST(&cacheofstacks)) != NULL) { LIST_REMOVE(sc, sc_entries); return sc; } if (align > MAXPAGEALIGN) align = MAXPAGEALIGN; /* need to leave this much space until the next aligned alloc */ alignedoff = (bmk_membase + currentpg*PAGE_SIZE) % (align*PAGE_SIZE); if (alignedoff) currentpg += align - (alignedoff>>PAGE_SHIFT); rv = bmk_allocpg(1<<shift); if (((unsigned long)rv & (align*PAGE_SIZE-1)) != 0) { bmk_printf("wanted %d aligned, got memory at %p\n", align, rv); bmk_platform_halt("fail"); } return rv; }
static void * corealloc(int shift) { void *v; #ifdef MEMALLOC_TESTING v = malloc((1<<shift) * pagesz); #else v = bmk_allocpg(1<<shift); #endif return v; }