void mill_choose_out(void *clause, chan ch, void *val, size_t sz, int idx) { if(mill_slow(!ch)) mill_panic("null channel used"); if(mill_slow(ch->done)) mill_panic("send to done-with channel"); if(mill_slow(ch->sz != sz)) mill_panic("send of a type not matching the channel"); /* Find out whether the clause is immediately available. */ int available = !mill_list_empty(&ch->receiver.clauses) || ch->items < ch->bufsz ? 1 : 0; if(available) ++mill_running->u_choose.available; /* If there are available clauses don't bother with non-available ones. */ if(!available && mill_running->u_choose.available) return; /* Fill in the clause entry. */ struct mill_clause *cl = (struct mill_clause*) clause; cl->cr = mill_running; cl->ep = &ch->sender; cl->val = val; cl->available = available; cl->idx = idx; cl->used = 1; mill_slist_push_back(&mill_running->u_choose.clauses, &cl->chitem); if(cl->ep->seqnum == mill_choose_seqnum) { ++cl->ep->refs; return; } cl->ep->seqnum = mill_choose_seqnum; cl->ep->refs = 1; cl->ep->tmp = -1; }
void mill_freestack(void *stack) { /* Put the stack to the list of cached stacks. */ struct mill_slist_item *item = ((struct mill_slist_item*)stack) - 1; mill_slist_push_back(&mill_cached_stacks, item); if(mill_num_cached_stacks < MILL_MAX_CACHED_STACKS) { ++mill_num_cached_stacks; return; } /* We can't deallocate the stack we are running on at the moment. Standard C free() is not required to work when it deallocates its own stack from underneath itself. Instead, we'll deallocate one of the unused cached stacks. */ item = mill_slist_pop(&mill_cached_stacks); free(((char*)(item + 1)) - MILL_STACK_SIZE); }
void mill_freestack(void *stack) { /* Put the stack to the list of cached stacks. */ struct mill_slist_item *item = ((struct mill_slist_item*)stack) - 1; mill_slist_push_back(&mill_cached_stacks, item); if(mill_num_cached_stacks < mill_max_cached_stacks) { ++mill_num_cached_stacks; return; } /* We can't deallocate the stack we are running on at the moment. Standard C free() is not required to work when it deallocates its own stack from underneath itself. Instead, we'll deallocate one of the unused cached stacks. */ item = mill_slist_pop(&mill_cached_stacks); void *ptr = ((char*)(item + 1)) - mill_get_stack_size(); #if HAVE_POSIX_MEMALIGN && HAVE_MPROTECT int rc = mprotect(ptr, mill_page_size(), PROT_READ|PROT_WRITE); mill_assert(rc == 0); #endif free(ptr); }
void mill_preparestacks(int count, size_t stack_size) { /* Purge the cached stacks. */ while(1) { struct mill_slist_item *item = mill_slist_pop(&mill_cached_stacks); if(!item) break; free(((char*)(item + 1)) - mill_get_stack_size()); } /* Now that there are no stacks allocated, we can adjust the stack size. */ size_t old_stack_size = mill_stack_size; size_t old_sanitised_stack_size = mill_sanitised_stack_size; mill_stack_size = stack_size; mill_sanitised_stack_size = 0; /* Allocate the new stacks. */ int i; for(i = 0; i != count; ++i) { void *ptr = mill_allocstackmem(); if(!ptr) goto error; struct mill_slist_item *item = ((struct mill_slist_item*)ptr) - 1; mill_slist_push_back(&mill_cached_stacks, item); } mill_num_cached_stacks = count; /* Make sure that the stacks won't get deallocated even if they aren't used at the moment. */ mill_max_cached_stacks = count; errno = 0; return; error: /* If we can't allocate all the stacks, allocate none, restore state and return error. */ while(1) { struct mill_slist_item *item = mill_slist_pop(&mill_cached_stacks); if(!item) break; free(((char*)(item + 1)) - mill_get_stack_size()); } mill_num_cached_stacks = 0; mill_stack_size = old_stack_size; mill_sanitised_stack_size = old_sanitised_stack_size; errno = ENOMEM; }