static void *mill_allocstackmem(void) { void *ptr; #if defined HAVE_POSIX_MEMALIGN && HAVE_MPROTECT /* Allocate the stack so that it's memory-page-aligned. */ int rc = posix_memalign(&ptr, mill_page_size(), mill_get_stack_size()); if(mill_slow(rc != 0)) { errno = rc; return NULL; } /* The bottom page is used as a stack guard. This way stack overflow will cause segfault rather than randomly overwrite the heap. */ rc = mprotect(ptr, mill_page_size(), PROT_NONE); if(mill_slow(rc != 0)) { int err = errno; free(ptr); errno = err; return NULL; } #else ptr = malloc(mill_get_stack_size()); if(mill_slow(!ptr)) { errno = ENOMEM; return NULL; } #endif return (void*)(((char*)ptr) + mill_get_stack_size()); }
void mill_preparestacks(int count, size_t stack_size) { /* Purge the cached stacks. */ while(1) { struct mill_slist_item *item = mill_slist_pop(&mill_cached_stacks); if(!item) break; free(((char*)(item + 1)) - mill_get_stack_size()); } /* Now that there are no stacks allocated, we can adjust the stack size. */ size_t old_stack_size = mill_stack_size; size_t old_sanitised_stack_size = mill_sanitised_stack_size; mill_stack_size = stack_size; mill_sanitised_stack_size = 0; /* Allocate the new stacks. */ int i; for(i = 0; i != count; ++i) { void *ptr = mill_allocstackmem(); if(!ptr) goto error; struct mill_slist_item *item = ((struct mill_slist_item*)ptr) - 1; mill_slist_push_back(&mill_cached_stacks, item); } mill_num_cached_stacks = count; /* Make sure that the stacks won't get deallocated even if they aren't used at the moment. */ mill_max_cached_stacks = count; errno = 0; return; error: /* If we can't allocate all the stacks, allocate none, restore state and return error. */ while(1) { struct mill_slist_item *item = mill_slist_pop(&mill_cached_stacks); if(!item) break; free(((char*)(item + 1)) - mill_get_stack_size()); } mill_num_cached_stacks = 0; mill_stack_size = old_stack_size; mill_sanitised_stack_size = old_sanitised_stack_size; errno = ENOMEM; }
void *mill_allocstack(size_t *stack_size) { if(!mill_slist_empty(&mill_cached_stacks)) { --mill_num_cached_stacks; return (void*)(mill_slist_pop(&mill_cached_stacks) + 1); } void *ptr = mill_allocstackmem(); if(!ptr) mill_panic("not enough memory to allocate coroutine stack"); if(stack_size) *stack_size = mill_get_stack_size(); return ptr; }
void mill_freestack(void *stack) { /* Put the stack to the list of cached stacks. */ struct mill_slist_item *item = ((struct mill_slist_item*)stack) - 1; mill_slist_push_back(&mill_cached_stacks, item); if(mill_num_cached_stacks < mill_max_cached_stacks) { ++mill_num_cached_stacks; return; } /* We can't deallocate the stack we are running on at the moment. Standard C free() is not required to work when it deallocates its own stack from underneath itself. Instead, we'll deallocate one of the unused cached stacks. */ item = mill_slist_pop(&mill_cached_stacks); void *ptr = ((char*)(item + 1)) - mill_get_stack_size(); #if HAVE_POSIX_MEMALIGN && HAVE_MPROTECT int rc = mprotect(ptr, mill_page_size(), PROT_READ|PROT_WRITE); mill_assert(rc == 0); #endif free(ptr); }