static void *mill_allocstackmem(void) { void *ptr; #if defined HAVE_POSIX_MEMALIGN && HAVE_MPROTECT /* Allocate the stack so that it's memory-page-aligned. */ int rc = posix_memalign(&ptr, mill_page_size(), mill_get_stack_size()); if(mill_slow(rc != 0)) { errno = rc; return NULL; } /* The bottom page is used as a stack guard. This way stack overflow will cause segfault rather than randomly overwrite the heap. */ rc = mprotect(ptr, mill_page_size(), PROT_NONE); if(mill_slow(rc != 0)) { int err = errno; free(ptr); errno = err; return NULL; } #else ptr = malloc(mill_get_stack_size()); if(mill_slow(!ptr)) { errno = ENOMEM; return NULL; } #endif return (void*)(((char*)ptr) + mill_get_stack_size()); }
void mill_choose_out(void *clause, chan ch, void *val, size_t sz, int idx) { if(mill_slow(!ch)) mill_panic("null channel used"); if(mill_slow(ch->done)) mill_panic("send to done-with channel"); if(mill_slow(ch->sz != sz)) mill_panic("send of a type not matching the channel"); /* Find out whether the clause is immediately available. */ int available = !mill_list_empty(&ch->receiver.clauses) || ch->items < ch->bufsz ? 1 : 0; if(available) ++mill_running->u_choose.available; /* If there are available clauses don't bother with non-available ones. */ if(!available && mill_running->u_choose.available) return; /* Fill in the clause entry. */ struct mill_clause *cl = (struct mill_clause*) clause; cl->cr = mill_running; cl->ep = &ch->sender; cl->val = val; cl->available = available; cl->idx = idx; cl->used = 1; mill_slist_push_back(&mill_running->u_choose.clauses, &cl->chitem); if(cl->ep->seqnum == mill_choose_seqnum) { ++cl->ep->refs; return; } cl->ep->seqnum = mill_choose_seqnum; cl->ep->refs = 1; cl->ep->tmp = -1; }
void mill_list_insert(struct mill_list *self, struct mill_list_item *item, struct mill_list_item *it) { item->prev = it ? it->prev : self->last; item->next = it; if(mill_fast(item->prev)) item->prev->next = item; if(mill_fast(item->next)) item->next->prev = item; if(mill_slow(!self->first || self->first == it)) self->first = item; if(mill_slow(!it)) self->last = item; }
void goprepare(int count, size_t stack_size, size_t val_size) { if(mill_slow(mill_hascrs())) {errno = EAGAIN; return;} /* Allocate any resources needed by the polling mechanism. */ mill_poller_init(); if(mill_slow(errno != 0)) return; /* If needed, make val size slightly bigger to align properly. */ mill_valbuf_size = (val_size + 15) & ~((size_t)0xf); /* Preallocate the valbuf for the main coroutine. */ if(mill_slow(!mill_getvalbuf(&mill_main, mill_valbuf_size))) { errno = ENOMEM; return;} /* Allocate the stacks. */ mill_preparestacks(count, stack_size + mill_valbuf_size + sizeof(struct mill_cr)); }
/* Pop one value from the channel. */ static void mill_dequeue(chan ch, void *val) { /* Get a blocked sender, if any. */ struct mill_clause *cl = mill_cont(mill_list_begin(&ch->sender.clauses), struct mill_clause, epitem); if(!ch->items) { /* If chdone was already called we can return the value immediately. There are no senders waiting to send. */ if(mill_slow(ch->done)) { mill_assert(!cl); memcpy(val, ((char*)(ch + 1)) + (ch->bufsz * ch->sz), ch->sz); return; } /* Otherwise there must be a sender waiting to send. */ mill_assert(cl); memcpy(val, cl->val, ch->sz); mill_choose_unblock(cl); return; } /* If there's a value in the buffer start by retrieving it. */ memcpy(val, ((char*)(ch + 1)) + (ch->first * ch->sz), ch->sz); ch->first = (ch->first + 1) % ch->bufsz; --ch->items; /* And if there was a sender waiting, unblock it. */ if(cl) { assert(ch->items < ch->bufsz); size_t pos = (ch->first + ch->items) % ch->bufsz; memcpy(((char*)(ch + 1)) + (pos * ch->sz) , cl->val, ch->sz); ++ch->items; mill_choose_unblock(cl); } }
int64_t now(void) { #if (defined __GNUC__ || defined __clang__) && \ (defined __i386__ || defined __x86_64__) /* Get the timestamp counter. This is time since startup, expressed in CPU cycles. Unlike gettimeofday() or similar function, it's extremely fast - it takes only few CPU cycles to evaluate. */ uint32_t low; uint32_t high; __asm__ volatile("rdtsc" : "=a" (low), "=d" (high)); int64_t tsc = (int64_t)((uint64_t)high << 32 | low); /* These global variables are used to hold the last seen timestamp counter and last seen time measurement. We'll initilise them the first time this function is called. */ static int64_t last_tsc = -1; static int64_t last_now = -1; if(mill_slow(last_tsc < 0)) { last_tsc = tsc; last_now = mill_now(); } /* If TSC haven't jumped back or progressed more than 1/2 ms, we can use the cached time value. */ if(mill_fast(tsc - last_tsc <= (MILL_CLOCK_PRECISION / 2) && tsc >= last_tsc)) return last_now; /* It's more than 1/2 ms since we've last measured the time. We'll do a new measurement now. */ last_tsc = tsc; last_now = mill_now(); return last_now; #else return mill_now(); #endif }
/* Convert literal IPv4 or IPv6 address to a binary one. */ static ipaddr mill_ipliteral(const char *addr, int port, int mode) { ipaddr raddr; struct sockaddr *sa = (struct sockaddr*)&raddr; if(mill_slow(!addr || port < 0 || port > 0xffff)) { sa->sa_family = AF_UNSPEC; errno = EINVAL; return raddr; } switch(mode) { case IPADDR_IPV4: return mill_ipv4_literal(addr, port); case IPADDR_IPV6: return mill_ipv6_literal(addr, port); case 0: case IPADDR_PREF_IPV4: raddr = mill_ipv4_literal(addr, port); if(errno == 0) return raddr; return mill_ipv6_literal(addr, port); case IPADDR_PREF_IPV6: raddr = mill_ipv6_literal(addr, port); if(errno == 0) return raddr; return mill_ipv4_literal(addr, port); default: mill_assert(0); } }
mfile mill_mferr(void) { static struct mill_file f = {-1, 0, 0, 0}; if(mill_slow(f.fd < 0)) { mill_filetune(STDERR_FILENO); f.fd = STDERR_FILENO; } return &f; }
int mill_pipesend(struct mill_pipe_s *mp, void *ptr) { int rc = pipe_write(mp, ptr); if (mill_slow(rc == -1)) { /* mill_panic("attempt to send to a closed pipe"); */ errno = EPIPE; return -1; } return 0; }
void *mill_piperecv(struct mill_pipe_s *mp, int *done) { void *ptr = mill_valbuf(mill->running, mp->sz); mill_assert(done); int rc = pipe_read(mp, ptr); if (mill_slow(rc == -1)) mill_panic(strerror(errno)); /* Hmm! */ *done = (rc == 0); return ptr; }
void mill_chclose(chan ch) { if(mill_slow(!ch)) mill_panic("null channel used"); assert(ch->refcount > 0); --ch->refcount; if(ch->refcount) return; if(!mill_list_empty(&ch->sender.clauses) || !mill_list_empty(&ch->receiver.clauses)) mill_panic("attempt to close a channel while it is still being used"); free(ch); }
int64_t now(void) { #if defined __APPLE__ if (mill_slow(!mill_mtid.denom)) mach_timebase_info(&mill_mtid); uint64_t ticks = mach_absolute_time(); return (int64_t)(ticks * mill_mtid.numer / mill_mtid.denom / 1000000); #elif defined CLOCK_MONOTONIC struct timespec ts; int rc = clock_gettime(CLOCK_MONOTONIC, &ts); mill_assert (rc == 0); return ((int64_t)ts.tv_sec) * 1000 + (((int64_t)ts.tv_nsec) / 1000000); #else struct timeval tv; int rc = gettimeofday(&tv, NULL); assert(rc == 0); return ((int64_t)tv.tv_sec) * 1000 + (((int64_t)tv.tv_usec) / 1000); #endif }
static ipaddr mill_ipany(int port, int mode) { ipaddr addr; if(mill_slow(port < 0 || port > 0xffff)) { ((struct sockaddr*)&addr)->sa_family = AF_UNSPEC; errno = EINVAL; return addr; } if (mode == 0 || mode == IPADDR_IPV4 || mode == IPADDR_PREF_IPV4) { struct sockaddr_in *ipv4 = (struct sockaddr_in*)&addr; ipv4->sin_family = AF_INET; ipv4->sin_addr.s_addr = htonl(INADDR_ANY); ipv4->sin_port = htons((uint16_t)port); } else { struct sockaddr_in6 *ipv6 = (struct sockaddr_in6*)&addr; ipv6->sin6_family = AF_INET6; memcpy(&ipv6->sin6_addr, &in6addr_any, sizeof(in6addr_any)); ipv6->sin6_port = htons((uint16_t)port); } errno = 0; return addr; }
static int pipe_read(struct mill_pipe_s *mp, void *ptr) { unsigned size = mp->sz; int n, total = 0; while (1) { if (trylock(mp)) { again: n = (int) read(mp->fd[0], (char *) ptr + total, size - total); if (mill_slow(n == 0)) { /* done */ mill_assert(total == 0); unlock(mp); return 0; } if (n > 0) { total += n; if (mill_fast(total == size)) { unlock(mp); return total; } goto again; } /* n == -1 */ if (errno == EINTR) goto again; if (errno == EAGAIN) { mill_fdevent(mp->fd[0], FDW_IN, -1); goto again; } unlock(mp); break; } mill_fdevent(mp->fd[0], FDW_IN, -1); /* Multiple threads may receive notification. Race for the lock. */ } return -1; }
chan mill_chdup(chan ch) { if(mill_slow(!ch)) mill_panic("null channel used"); ++ch->refcount; return ch; }
ipaddr ipremote(const char *name, int port, int mode, int64_t deadline) { ipaddr addr = mill_ipliteral(name, port, mode); #if !defined __linux__ return addr; #else if(errno == 0) return addr; /* Let's do asynchronous DNS query here. */ int efd = eventfd(0, 0); if(mill_slow(efd < 0)) return addr; struct addrinfo request; memset(&request, 0, sizeof(request)); request.ai_family = AF_UNSPEC; request.ai_socktype = SOCK_STREAM; struct gaicb gcb; memset(&gcb, 0, sizeof(gcb)); gcb.ar_name = name; gcb.ar_service = NULL; gcb.ar_request = &request; gcb.ar_result = NULL; struct sigevent sev; memset(&sev, 0, sizeof(sev)); /* The event will be delivered using a new thread rather than by a signal running of one of the coroutines' stack and possibly breaking it. */ sev.sigev_notify = SIGEV_THREAD; sev.sigev_notify_function = mill_getaddrinfo_a_done; sev.sigev_value.sival_int = efd; struct gaicb *pgcb = &gcb; int rc = getaddrinfo_a(GAI_NOWAIT, &pgcb, 1, &sev); if(mill_slow(rc != 0)) { if(rc == EAI_AGAIN || rc == EAI_MEMORY) { close(efd); errno = ENOMEM; return addr; } mill_assert(0); } rc = fdwait(efd, FDW_IN, deadline); if(rc == 0) { gai_cancel(&gcb); rc = fdwait(efd, FDW_IN, -1); } mill_assert(rc == FDW_IN); close(efd); rc = gai_error(&gcb); if(rc != 0) { errno = EINVAL; return addr; } struct addrinfo *ipv4 = NULL; struct addrinfo *ipv6 = NULL; struct addrinfo *it = gcb.ar_result; while(it) { if(!ipv4 && it->ai_family == AF_INET) ipv4 = it; if(!ipv6 && it->ai_family == AF_INET6) ipv6 = it; if(ipv4 && ipv6) break; it = it->ai_next; } switch(mode) { case IPADDR_IPV4: ipv6 = NULL; break; case IPADDR_IPV6: ipv4 = NULL; break; case 0: case IPADDR_PREF_IPV4: if(ipv4) ipv6 = NULL; break; case IPADDR_PREF_IPV6: if(ipv6) ipv4 = NULL; break; default: mill_assert(0); } if(ipv4) { struct sockaddr_in *inaddr = (struct sockaddr_in*)&addr; memcpy(inaddr, ipv4->ai_addr, sizeof (struct sockaddr_in)); inaddr->sin_port = htons(port); } if(ipv6) { struct sockaddr_in6 *inaddr = (struct sockaddr_in6*)&addr; memcpy(inaddr, ipv6->ai_addr, sizeof (struct sockaddr_in6)); inaddr->sin6_port = htons(port); } freeaddrinfo(gcb.ar_result); errno = 0; return addr; #endif }
ipaddr mill_ipremote_(const char *name, int port, int mode, int64_t deadline) { int rc; ipaddr addr = mill_ipliteral(name, port, mode); if(errno == 0) return addr; /* Load DNS config files, unless they are already chached. */ if(mill_slow(!mill_dns_conf)) { /* TODO: Maybe re-read the configuration once in a while? */ mill_dns_conf = dns_resconf_local(&rc); mill_assert(mill_dns_conf); mill_dns_hosts = dns_hosts_local(&rc); mill_assert(mill_dns_hosts); mill_dns_hints = dns_hints_local(mill_dns_conf, &rc); mill_assert(mill_dns_hints); } /* Let's do asynchronous DNS query here. */ struct dns_resolver *resolver = dns_res_open(mill_dns_conf, mill_dns_hosts, mill_dns_hints, NULL, dns_opts(), &rc); mill_assert(resolver); mill_assert(port >= 0 && port <= 0xffff); char portstr[8]; snprintf(portstr, sizeof(portstr), "%d", port); struct addrinfo hints; memset(&hints, 0, sizeof(hints)); hints.ai_family = PF_UNSPEC; struct dns_addrinfo *ai = dns_ai_open(name, portstr, DNS_T_A, &hints, resolver, &rc); mill_assert(ai); dns_res_close(resolver); struct addrinfo *ipv4 = NULL; struct addrinfo *ipv6 = NULL; struct addrinfo *it = NULL; while(1) { rc = dns_ai_nextent(&it, ai); if(rc == EAGAIN) { int fd = dns_ai_pollfd(ai); mill_assert(fd >= 0); int events = fdwait(fd, FDW_IN, deadline); /* There's no guarantee that the file descriptor will be reused in next iteration. We have to clean the fdwait cache here to be on the safe side. */ fdclean(fd); if(mill_slow(!events)) { errno = ETIMEDOUT; return addr; } mill_assert(events == FDW_IN); continue; } if(rc == ENOENT) break; if(!ipv4 && it && it->ai_family == AF_INET) { ipv4 = it; } else if(!ipv6 && it && it->ai_family == AF_INET6) { ipv6 = it; } else { free(it); } if(ipv4 && ipv6) break; } switch(mode) { case IPADDR_IPV4: if(ipv6) { free(ipv6); ipv6 = NULL; } break; case IPADDR_IPV6: if(ipv4) { free(ipv4); ipv4 = NULL; } break; case 0: case IPADDR_PREF_IPV4: if(ipv4 && ipv6) { free(ipv6); ipv6 = NULL; } break; case IPADDR_PREF_IPV6: if(ipv6 && ipv4) { free(ipv4); ipv4 = NULL; } break; default: mill_assert(0); } if(ipv4) { struct sockaddr_in *inaddr = (struct sockaddr_in*)&addr; memcpy(inaddr, ipv4->ai_addr, sizeof (struct sockaddr_in)); inaddr->sin_port = htons(port); dns_ai_close(ai); free(ipv4); errno = 0; return addr; } if(ipv6) { struct sockaddr_in6 *inaddr = (struct sockaddr_in6*)&addr; memcpy(inaddr, ipv6->ai_addr, sizeof (struct sockaddr_in6)); inaddr->sin6_port = htons(port); dns_ai_close(ai); free(ipv6); errno = 0; return addr; } dns_ai_close(ai); ((struct sockaddr*)&addr)->sa_family = AF_UNSPEC; errno = EADDRNOTAVAIL; return addr; }
void mill_choose_otherwise(void) { if(mill_slow(mill_running->u_choose.othws != 0)) mill_panic("multiple 'otherwise' clauses in a choose statement"); mill_running->u_choose.othws = 1; }