int dill_wait(void) { struct dill_ctx_cr *ctx = &dill_getctx->cr; /* Store the context of the current coroutine, if any. */ if(dill_setjmp(ctx->r->ctx)) { /* We get here once the coroutine is resumed. */ dill_slist_init(&ctx->r->clauses); errno = ctx->r->err; return ctx->r->id; } /* For performance reasons, we want to avoid excessive checking of current time, so we cache the value here. It will be recomputed only after a blocking call. */ int64_t nw = dill_now(); /* Wait for timeouts and external events. However, if there are ready coroutines there's no need to poll for external events every time. Still, we'll do it at least once a second. The external signal may very well be a deadline or a user-issued command that cancels the CPU intensive operation. */ if(dill_qlist_empty(&ctx->ready) || nw > ctx->last_poll + 1000) { int block = dill_qlist_empty(&ctx->ready); while(1) { /* Compute the timeout for the subsequent poll. */ int timeout = 0; if(block) { if(dill_rbtree_empty(&ctx->timers)) timeout = -1; else { int64_t deadline = dill_cont( dill_rbtree_first(&ctx->timers), struct dill_tmclause, item)->item.val; timeout = (int) (nw >= deadline ? 0 : deadline - nw); } } /* Wait for events. */ int fired = dill_pollset_poll(timeout); if(timeout != 0) nw = dill_now(); if(dill_slow(fired < 0)) continue; /* Fire all expired timers. */ if(!dill_rbtree_empty(&ctx->timers)) { while(!dill_rbtree_empty(&ctx->timers)) { struct dill_tmclause *tmcl = dill_cont( dill_rbtree_first(&ctx->timers), struct dill_tmclause, item); if(tmcl->item.val > nw) break; dill_trigger(&tmcl->cl, ETIMEDOUT); fired = 1; } } /* Never retry the poll when in non-blocking mode. */ if(!block || fired) break; /* If the timeout was hit but there were no expired timers, do the poll again. It can happen if the timers were canceled in the meantime. */ } ctx->last_poll = nw; }
void *dill_allocstack(size_t *stack_size) { struct dill_ctx_stack *ctx = &dill_getctx->stack; if(stack_size) *stack_size = dill_stack_size; /* If there's a cached stack, use it. */ if(!dill_qlist_empty(&ctx->cache)) { --ctx->count; return (void*)(dill_qlist_pop(&ctx->cache) + 1); } /* Allocate a new stack. */ uint8_t *top; #if (HAVE_POSIX_MEMALIGN && HAVE_MPROTECT) & !defined DILL_NOGUARD /* Allocate the stack so that it's memory-page-aligned. Add one page as stack overflow guard. */ size_t sz = dill_align(dill_stack_size, dill_page_size()) + dill_page_size(); uint8_t *ptr; int rc = posix_memalign((void**)&ptr, dill_page_size(), sz); if(dill_slow(rc != 0)) { errno = rc; return NULL; } /* The bottom page is used as a stack guard. This way stack overflow will cause segfault rather than randomly overwrite the heap. */ rc = mprotect(ptr, dill_page_size(), PROT_NONE); if(dill_slow(rc != 0)) { int err = errno; free(ptr); errno = err; return NULL; } top = ptr + dill_page_size() + dill_stack_size; #else /* Simple allocation without a guard page. */ uint8_t *ptr = malloc(dill_stack_size); if(dill_slow(!ptr)) { errno = ENOMEM; return NULL; } top = ptr + dill_stack_size; #endif return top; }