SML_PRIMITIVE void * sml_alloc_code() { void *p; size_t pagesize; mutex_lock(&callbacks_lock); if (trampoline_heap.end - trampoline_heap.base < TRAMPOLINE_SIZE) { pagesize = getpagesize(); p = mmap(NULL, pagesize, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANON | MAP_PRIVATE, -1, 0); if (p == MAP_FAILED) sml_sysfatal("mmap"); trampoline_heap.base = p; trampoline_heap.end = (char*)p + pagesize; } p = trampoline_heap.base; trampoline_heap.base += TRAMPOLINE_SIZE; mutex_unlock(&callbacks_lock); return p; }
void * xrealloc(void *p, size_t size) { p = realloc(p, size); if (p == NULL) sml_sysfatal("realloc"); return p; }
void * xmalloc(size_t size) { void *p = malloc(size); if (p == NULL) sml_sysfatal("malloc"); return p; }
SML_PRIMITIVE void sml_control_start() { struct sml_control *control = CONTROL(); void **frame_end = CALLER_FRAME_END_ADDRESS(); short *layout; if (control != NULL) { control_resume(control); layout = lookup_stack_layout(FRAME_CODE_ADDRESS(frame_end)); ASSERT(layout != NULL); ASSERT(NUM_ROOTS(layout) > 0); frame_end[ROOTS(layout)[0]] = control->frame_stack_top; control->frame_stack_top = frame_end; return; } control = xmalloc(sizeof(struct sml_control)); control->state = RUN; #ifdef MULTITHREAD if (pthread_mutex_init(&control->state_lock, NULL) != 0) sml_sysfatal("pthread_mutex_init failed"); if (pthread_cond_init(&control->state_cond, NULL) != 0) sml_sysfatal("pthread_cond_init failed"); #ifdef CONCURRENT control->phase = ASYNC; #endif /* CONCURRENT */ #endif /* MULTITHREAD */ control->frame_stack_top_override = NULL; control->frame_stack_top = frame_end; control->frame_stack_bottom = frame_end; control->tmp_root[0] = NULL; control->tmp_root[1] = NULL; control->heap = sml_heap_thread_init(); control->exn = sml_exn_init(); SET_CONTROL(control); attach_control(control); DBG(("START THREAD %p", control)); }
void * xmalloc(size_t size) { void *page, *p; size_t allocsize; size_t pagesize = getpagesize(); allocsize = ALIGNSIZE(size + sizeof(size_t), pagesize) + pagesize; page = mmap(NULL, allocsize, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0); if (page == (void*)-1) sml_sysfatal("mmap"); mprotect(page + allocsize - pagesize, pagesize, 0); p = page + allocsize - pagesize - size; if (PAGEHEAD(p) != page) sml_fatal(0, "xmalloc"); *(size_t*)page = size; //sml_debug("xmalloc: %p (%p:%u)\n", p, page, (unsigned int)size); return p; }
static void heap_space_alloc(struct heap_space *heap, size_t size) { size_t pagesize = getpagesize(); size_t allocsize; void *page; allocsize = ALIGNSIZE(size, pagesize); #if defined(DEBUG) && defined(DEBUG_USE_MMAP) { static void *base = (void*)0x2000000; page = mmap(base, allocsize, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0); base = (char*)base + 0x2000000; if (page == (void*)-1) sml_sysfatal("mmap"); } #else page = xmalloc(allocsize); #endif /* DEBUG && DEBUG_USE_MMAP */ heap->base = page; heap->limit = page + allocsize; }