struct poolworker_state *poolworker_create(int numThreads) { struct poolworker_state *pw; int i; pw = safe_calloc(1, sizeof *pw); pw->p = safe_calloc(numThreads, sizeof *pw->p); pw->jobs_req = NULL; pw->jobs_active = NULL; pthread_mutex_init(&pw->lock, NULL); pthread_cond_init(&pw->cond_req, NULL); pthread_cond_init(&pw->cond_cmp, NULL); atomic_write(&pw->numRunning, 0); atomic_write(&pw->exit, 0); Log(LGPFX " creating %u threads\n", numThreads); for (i = 0; i < numThreads; i++) { pthread_create(&pw->p[i].tid, NULL, poolworker_main, pw); while (atomic_read(&pw->numRunning) != i + 1) { sched_yield(); } } return pw; }
static void endfs_lb (wctx_t *ctx) { alg_global_t *sm = ctx->global; atomic_write (&sm->done, 1); size_t workers[W]; int idle_count = W-1; for (size_t i = 0; i<((size_t)W); i++) workers[i] = (i==ctx->id ? 0 : 1); while (0 != idle_count) { for (size_t i = 0; i < W; i++) { if (0==workers[i]) continue; alg_global_t *remote = ctx->run->contexts[i]->global; if (1 == atomic_read(&remote->done)) { workers[i] = 0; idle_count--; continue; } ref_t work = atomic_read (&remote->work); if (SIZE_MAX == work) continue; rec_ndfs_call (ctx, work); } } }
int create_act(char *n, day_t day, tm_t *date) { int dfd; if (date) { dfd = opendate(date); } else { dfd = openday(day); } int adfd = openacts(dfd); int afd = openat(adfd, n, O_RDWR, ALLRWX); if (afd != -1) { return (CREATE_EEXIST); } size_t dval = 0; char yval = 1; int tval = -1; afd = openat(adfd, n, O_CREAT | O_RDWR, ALLRWX); int time_xattr = openat(afd, "time", O_XATTR | O_CREAT | O_RDWR, ALLRWX); int dur_xattr = openat(afd, "dur", O_XATTR | O_CREAT | O_RDWR, ALLRWX); int dyn_xattr = openat(afd, "dyn", O_XATTR | O_CREAT | O_RDWR, ALLRWX); atomic_write(time_xattr, &tval, sizeof (int)); atomic_write(dur_xattr, &dval, sizeof (size_t)); atomic_write(dyn_xattr, &yval, sizeof (char)); close(dfd); close(adfd); close(afd); close(time_xattr); close(dur_xattr); close(dyn_xattr); return (0); }
static bool uf_lock_uf (const uf_t *uf, ref_t a) { if (atomic_read (&uf->array[a].uf_status) == UF_LIVE) { if (cas (&uf->array[a].uf_status, UF_LIVE, UF_LOCK)) { // successfully locked // ensure that we actually locked the representative if (atomic_read (&uf->array[a].parent) == 0) return 1; // otherwise unlock and try again atomic_write (&uf->array[a].uf_status, UF_LIVE); } } return 0; }
/** * returns the representative for the UF set */ ref_t uf_find (const uf_t *uf, ref_t state) { //HREassert (state != 0); // recursively find and update the parent (path compression) ref_t parent = atomic_read (&uf->array[state].parent); ref_t root; if (parent == 0) return state; root = uf_find (uf, parent); if (root != parent) atomic_write (&uf->array[state].parent, root); return root; }
/** * returns the representative for the UF set */ ref_t uf_find (const uf_t *uf, ref_t state) { // recursively find and update the parent (path compression) ref_t x = state; ref_t parent = atomic_read (&uf->array[x].parent); ref_t y; while (parent != 0) { y = parent; parent = atomic_read (&uf->array[y].parent); if (parent == 0) { return y; } atomic_write (&uf->array[x].parent, parent); x = parent; parent = atomic_read (&uf->array[x].parent); } return x; }
int create_todo(char *n, int day, tm_t *date) { int dfd; if (date) { dfd = opendate(date); } else { dfd = openday(day); } int tdfd; tdfd = opentodos(dfd); int tfd = openat(tdfd, n, O_RDWR, ALLRWX); if (tfd != -1) { return (CREATE_TD_EEXIST); } int tval = 0; tfd = openat(tdfd, n, O_CREAT | O_RDWR, ALLRWX); if (tfd == -1) { perror("ctodo"); exit(0); } int time_xattr = openat(tfd, "time", O_XATTR | O_CREAT | O_RDWR, ALLRWX); if (time_xattr == -1) { perror("ctodo"); exit(0); } atomic_write(time_xattr, &tval, sizeof (int)); close(dfd); close(tdfd); close(tfd); close(time_xattr); return (0); }
static void uf_unlock_list (const uf_t *uf, ref_t a_l) { // HREassert (atomic_read (&uf->array[a_l].list_status) == LIST_LOCK); atomic_write (&uf->array[a_l].list_status, LIST_LIVE); }
static void uf_unlock_uf (const uf_t *uf, ref_t a) { // HREassert (atomic_read (&uf->array[a].uf_status) == UF_LOCK); atomic_write (&uf->array[a].uf_status, UF_LIVE); }
/** * unites two sets and ensures that their cyclic lists are combined to one list */ bool uf_union (const uf_t *uf, ref_t a, ref_t b) { ref_t a_r, b_r, a_l, b_l, a_n, b_n, r, q; sz_w q_w, r_w; while ( 1 ) { a_r = uf_find (uf, a); b_r = uf_find (uf, b); // find the representatives if (a_r == b_r) { return 0; } // decide on the new root (deterministically) // take the highest index as root r = a_r; q = b_r; if (a_r < b_r) { r = b_r; q = a_r; } // lock the non-root if ( !uf_lock_uf (uf, q) ) continue; break; } // lock the list entries if ( !uf_lock_list (uf, a, &a_l) ) { // HREassert ( uf_is_dead(uf, a) && uf_sameset(uf, a, b) ); return 0; } if ( !uf_lock_list (uf, b, &b_l) ) { // HREassert ( uf_is_dead(uf, b) && uf_sameset(uf, a, b) ); uf_unlock_list (uf, a_l); return 0; } // swap the list entries a_n = atomic_read (&uf->array[a_l].list_next); b_n = atomic_read (&uf->array[b_l].list_next); if (a_n == 0) // singleton a_n = a_l; if (b_n == 0) // singleton b_n = b_l; atomic_write (&uf->array[a_l].list_next, b_n); atomic_write (&uf->array[b_l].list_next, a_n); // update parent atomic_write (&uf->array[q].parent, r); // only update worker set for r if q adds workers q_w = atomic_read (&uf->array[q].p_set); r_w = atomic_read (&uf->array[r].p_set); if ( (q_w | r_w) != r_w) { // update! fetch_or (&uf->array[r].p_set, q_w); while (atomic_read (&uf->array[r].parent) != 0) { r = uf_find (uf, r); fetch_or (&uf->array[r].p_set, q_w); } } // unlock uf_unlock_list (uf, a_l); uf_unlock_list (uf, b_l); uf_unlock_uf (uf, q); return 1; }
/* This must write the full size_t count if it can, and therefore we use atomic_write() */ ssize_t uade_ipc_write(void *f, const void *buf, size_t count) { int fd = (intptr_t) f; return atomic_write(fd, buf, count); }
void child_continue(child_t *child) { assert(NULL != child); atomic_write(child->barrier[1], "X", 1, NULL); }
void client_mode(const char *client_port) { struct sockaddr_in dst; uint32_t build_info_len; ssize_t recv_bytes, sent_bytes; char *build_info; int fd; if (parse_sockaddr_in(client_port, &dst)) errx(1, "Could not parse addr/port"); fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); if (fd == -1) err(1, "Could not create socket"); if (connect(fd, (struct sockaddr *)&dst, sizeof(dst)) == -1) err(1, "Could not connect socket"); loop: sent_bytes = atomic_write(fd, "G", 1); if (sent_bytes == -1) err(1, "Could not write to socket"); if (sent_bytes == 0) exit(0); if (sent_bytes != 1) errx(1, "Premature end of stream while writing to socket"); recv_bytes = atomic_read(fd, &build_info_len, 4); if (recv_bytes == 0 || (recv_bytes == -1 && errno == ECONNRESET)) exit(0); if (recv_bytes == -1) err(1, "Could not read from socket"); if (recv_bytes != 4) errx(1, "Premature end while reading build info from socket"); build_info_len = ntohl(build_info_len); if (build_info_len < 10 || build_info_len > 0xffffff) errx(1, "Invalid build info length from master"); build_info = xmalloc(build_info_len + 1); build_info[build_info_len] = '\0'; recv_bytes = atomic_read(fd, build_info, build_info_len); if (recv_bytes == -1) err(1, "Could not read from socket"); if ((uint32_t)recv_bytes != build_info_len || strlen(build_info) != build_info_len) errx(1, "Premature end of stream while reading path from socket"); if (verbosity > 0) { const char *begin, *end; if (strncmp(build_info, "PKGNAME=", 8) != 0) err(1, "Inconsistent build info from server"); begin = build_info + 8; if ((end = strchr(begin, '\n')) == NULL) err(1, "Inconsistent build info from server"); printf("Building package %.*s\n", (int)(end - begin), begin); fflush(stdout); } if (build_package(build_info, build_info_len) == 0) sent_bytes = atomic_write(fd, "D", 1); else sent_bytes = atomic_write(fd, "F", 1); if (sent_bytes == -1) err(1, "Could not write to socket"); if (sent_bytes != 1) errx(1, "Premature end of stream while writing to socket"); free(build_info); goto loop; }
/** * searches the first LIVE state in the cyclic list, starting from state: * if all elements in the list are TOMB, we mark the SCC DEAD * if three consecutive items a -> b -> c with a == TOMB and b == TOMB : * we try to update a -> c (and thereby reducing the size of the cyclic list) */ pick_e uf_pick_from_list (const uf_t *uf, ref_t state, ref_t *ret) { // invariant: every consecutive non-LOCK state is in the same set ref_t a, b, c; list_status a_status, b_status; a = state; while ( 1 ) { // HREassert ( a != 0 ); // if we exit this loop, a.status == TOMB or we returned a LIVE state while ( 1 ) { a_status = atomic_read (&uf->array[a].list_status); // return directly if a is LIVE if (a_status == LIST_LIVE) { *ret = a; return PICK_SUCCESS; } // otherwise wait until a is TOMB (it might be LOCK now) else if (a_status == LIST_TOMB) break; } // find next state: a --> b b = atomic_read (&uf->array[a].list_next); // if a is TOMB and only element, then the SCC is DEAD if (a == b || b == 0) { if ( uf_mark_dead (uf, a) ) return PICK_MARK_DEAD; return PICK_DEAD; } // if we exit this loop, b.status == TOMB or we returned a LIVE state while ( 1 ) { b_status = atomic_read (&uf->array[b].list_status); // return directly if b is LIVE if (b_status == LIST_LIVE) { *ret = b; return PICK_SUCCESS; } // otherwise wait until b is TOMB (it might be LOCK now) else if (b_status == LIST_TOMB) break; } // a --> b --> c c = atomic_read (&uf->array[b].list_next); // HREassert ( c != 0 ); // make the list shorter (a --> c) //cas (&uf->array[a].list_next, b, c); atomic_write (&uf->array[a].list_next, c); a = c; // continue searching from c } }