int mill_suspend(void) { /* Even if process never gets idle, we have to process external events once in a while. The external signal may very well be a deadline or a user-issued command that cancels the CPU intensive operation. */ static int counter = 0; if(counter >= 103) { mill_wait(0); counter = 0; } /* Store the context of the current coroutine, if any. */ if(mill_running && sigsetjmp(mill_running->ctx, 0)) return mill_running->result; while(1) { /* If there's a coroutine ready to be executed go for it. */ if(!mill_slist_empty(&mill_ready)) { ++counter; struct mill_slist_item *it = mill_slist_pop(&mill_ready); mill_running = mill_cont(it, struct mill_cr, ready); siglongjmp(mill_running->ctx, 1); } /* Otherwise, we are going to wait for sleeping coroutines and for external events. */ mill_wait(1); mill_assert(!mill_slist_empty(&mill_ready)); counter = 0; }
void *mill_allocstack(void) { if(!mill_slist_empty(&mill_cached_stacks)) { --mill_num_cached_stacks; return (void*)(mill_slist_pop(&mill_cached_stacks) + 1); } char *ptr = malloc(MILL_STACK_SIZE); assert(ptr); return ptr + MILL_STACK_SIZE; }
void *mill_allocstack(void) { if(!mill_slist_empty(&mill_cached_stacks)) { --mill_num_cached_stacks; return (void*)(mill_slist_pop(&mill_cached_stacks) + 1); } void *ptr = mill_allocstackmem(); if(!ptr) mill_panic("not enough memory to allocate coroutine stack"); return ptr; }
void mill_preparestacks(int count, size_t stack_size) { /* Purge the cached stacks. */ while(1) { struct mill_slist_item *item = mill_slist_pop(&mill_cached_stacks); if(!item) break; free(((char*)(item + 1)) - mill_get_stack_size()); } /* Now that there are no stacks allocated, we can adjust the stack size. */ size_t old_stack_size = mill_stack_size; size_t old_sanitised_stack_size = mill_sanitised_stack_size; mill_stack_size = stack_size; mill_sanitised_stack_size = 0; /* Allocate the new stacks. */ int i; for(i = 0; i != count; ++i) { void *ptr = mill_allocstackmem(); if(!ptr) goto error; struct mill_slist_item *item = ((struct mill_slist_item*)ptr) - 1; mill_slist_push_back(&mill_cached_stacks, item); } mill_num_cached_stacks = count; /* Make sure that the stacks won't get deallocated even if they aren't used at the moment. */ mill_max_cached_stacks = count; errno = 0; return; error: /* If we can't allocate all the stacks, allocate none, restore state and return error. */ while(1) { struct mill_slist_item *item = mill_slist_pop(&mill_cached_stacks); if(!item) break; free(((char*)(item + 1)) - mill_get_stack_size()); } mill_num_cached_stacks = 0; mill_stack_size = old_stack_size; mill_sanitised_stack_size = old_sanitised_stack_size; errno = ENOMEM; }
void mill_freestack(void *stack) { /* Put the stack to the list of cached stacks. */ struct mill_slist_item *item = ((struct mill_slist_item*)stack) - 1; mill_slist_push_back(&mill_cached_stacks, item); if(mill_num_cached_stacks < MILL_MAX_CACHED_STACKS) { ++mill_num_cached_stacks; return; } /* We can't deallocate the stack we are running on at the moment. Standard C free() is not required to work when it deallocates its own stack from underneath itself. Instead, we'll deallocate one of the unused cached stacks. */ item = mill_slist_pop(&mill_cached_stacks); free(((char*)(item + 1)) - MILL_STACK_SIZE); }
void mill_slist_remove(struct mill_slist *self, struct mill_slist_item *item) { struct mill_slist_item *it; if (! self->first) return; if (item == self->first) { (void) mill_slist_pop(self); return; } for (it = self->first; it; it = it->next) { if (it->next == item) { it->next = item->next; if (!it->next) self->last = it; mill_slist_set_detached(item); break; } } }
void mill_freestack(void *stack) { /* Put the stack to the list of cached stacks. */ struct mill_slist_item *item = ((struct mill_slist_item*)stack) - 1; mill_slist_push_back(&mill_cached_stacks, item); if(mill_num_cached_stacks < mill_max_cached_stacks) { ++mill_num_cached_stacks; return; } /* We can't deallocate the stack we are running on at the moment. Standard C free() is not required to work when it deallocates its own stack from underneath itself. Instead, we'll deallocate one of the unused cached stacks. */ item = mill_slist_pop(&mill_cached_stacks); void *ptr = ((char*)(item + 1)) - mill_get_stack_size(); #if HAVE_POSIX_MEMALIGN && HAVE_MPROTECT int rc = mprotect(ptr, mill_page_size(), PROT_READ|PROT_WRITE); mill_assert(rc == 0); #endif free(ptr); }