/*===========================================================================* * worker_stop_by_endpt * *===========================================================================*/ PUBLIC void worker_stop_by_endpt(endpoint_t proc_e) { struct worker_thread *worker; int i; if (proc_e == NONE) return; if (worker_waiting_for(&sys_worker, proc_e)) worker_stop(&sys_worker); if (worker_waiting_for(&dl_worker, proc_e)) worker_stop(&dl_worker); for (i = 0; i < NR_WTHREADS; i++) { worker = &workers[i]; if (worker_waiting_for(worker, proc_e)) worker_stop(worker); } }
static inline void worker_write_input_cb(EV_P_ ev_io *w, int revents, struct worker *worker, struct writeq *writeq, enum msg_type type) { int rc; if (worker->session == NULL) { printf("ERROR: worker without session!\n"); worker_stop(EV_A_ worker); return; } if ((revents & EV_ERROR)) { goto err; } rc = writeq_write(writeq, w->fd); if (rc < 0) { goto err; } else if (rc > 0) { /* disarm writes. */ ev_io_stop(EV_A_ w); return; } return; err: DEBUGF(0, "WARNING: worker pid %d input pipe fd=%d error\n", worker_pid(worker), w->fd); ev_io_stop(EV_A_ w); session_on_worker_pipe_err_cb(EV_A_ worker->session, type); }
void AXB_REG_FUNC UserDevClose(AXB_REG(struct IOStdReq *ior,a1), AXB_REG(struct DevBase *db,a6)) { struct ExBase *eb = (struct ExBase *)db; D(("UserDevClose\n")); worker_stop(db, eb->eb_WorkerPort); }
void workers_adjust (int new_count) { INFO1 ("requested worker count %d", new_count); while (worker_count != new_count) { if (worker_count < new_count) worker_start (); else if (worker_count > new_count) worker_stop (); } }
/*===========================================================================* * fs_cancel * *===========================================================================*/ void fs_cancel(struct vmnt *vmp) { /* Cancel all pending requests for this vmp */ struct worker_thread *worker; while ((worker = vmp->m_comm.c_req_queue) != NULL) { vmp->m_comm.c_req_queue = worker->w_next; worker->w_next = NULL; sending--; worker_stop(worker); } }
/*===========================================================================* * dmap_endpt_up * *===========================================================================*/ void dmap_endpt_up(endpoint_t proc_e, int is_blk) { /* A device driver with endpoint proc_e has been restarted. Go tell everyone * that might be blocking on it that this device is 'up'. */ devmajor_t major; struct dmap *dp; struct worker_thread *worker; if (proc_e == NONE) return; for (major = 0; major < NR_DEVICES; major++) { if ((dp = get_dmap_by_major(major)) == NULL) continue; if (dp->dmap_driver == proc_e) { if (is_blk) { if (dp->dmap_recovering) { printf("VFS: driver recovery failure for" " major %d\n", major); if (dp->dmap_servicing != INVALID_THREAD) { worker = worker_get(dp->dmap_servicing); worker_stop(worker); } dp->dmap_recovering = 0; continue; } dp->dmap_recovering = 1; bdev_up(major); dp->dmap_recovering = 0; } else { if (dp->dmap_servicing != INVALID_THREAD) { worker = worker_get(dp->dmap_servicing); worker_stop(worker); } invalidate_filp_by_char_major(major); } } } }
void workers_adjust (int new_count) { INFO1 ("requested worker count %d", new_count); while (worker_count != new_count) { if (worker_count < new_count) worker_start (); else if (worker_count > new_count) worker_stop (); } if (worker_count == 0) { logger_commits(0); sock_close (logger_fd[1]); sock_close (logger_fd[0]); } }
static inline void worker_read_output_cb(EV_P_ ev_io *w, int revents, struct worker *worker, enum msg_type type) { struct worker_read_ctx ctx = { .session = worker->session, .type = type, }; int rc; if (worker->session == NULL) { printf("ERROR: worker without session!\n"); worker_stop(EV_A_ worker); return; } if ((revents & EV_ERROR)) { goto err; } rc = rbuf_read_chunks(EV_A_ w->fd, worker_rbuf_read_cb, &ctx); if (rc < 0) { goto err; } else if (rc > 0) { /* EOF - disarm reads. */ ev_io_stop(EV_A_ w); // session_on_worker_output_cb(EV_A_ worker->session, NULL, 0, 0, type); } return; err: DEBUGF(0, "WARNING: worker pid %d output pipe fd=%d error\n", worker_pid(worker), w->fd); ev_io_stop(EV_A_ w); session_on_worker_pipe_err_cb(EV_A_ worker->session, type); }
static int run_paxos(int duelling_proposers) { int i; struct timespec ts; if (sem_init(&g_sem_accept_leader, 0, 0)) abort(); if (pthread_mutex_init(&g_start_lock, 0)) abort(); if (pthread_cond_init(&g_start_cond, 0)) abort(); g_start = 0; memset(g_nodes, 0, sizeof(g_nodes)); memset(g_node_data, 0, sizeof(g_node_data)); for (i = 0; i < g_num_nodes; ++i) { char name[WORKER_NAME_MAX]; snprintf(name, WORKER_NAME_MAX, "node_%3d", i); g_node_data[i].id = i; g_node_data[i].state = NODE_STATE_INIT; reset_remotes(g_node_data[i].remotes); g_node_data[i].seen_pseq = 0; g_node_data[i].prop_pseq = 0; g_node_data[i].prop_leader = -1; g_node_data[i].leader = -1; g_nodes[i] = worker_start(name, paxos_handle_msg, NULL, &g_node_data[i]); if (!g_nodes[i]) { fprintf(stderr, "failed to allocate node %d\n", i); abort(); } } if (g_num_nodes < 3) abort(); send_do_propose(2); if (duelling_proposers) { send_do_propose(0); send_do_propose(1); } /* start acceptors */ pthread_mutex_lock(&g_start_lock); g_start = 1; pthread_cond_broadcast(&g_start_cond); pthread_mutex_unlock(&g_start_lock); /* Wait for consensus. * We only actually need more than half the nodes. However, to make * debugging a little nicer, we'll wait 10 seconds for all the remaining * nodes rather than exiting immediately after we get half. */ for (i = 0; i < 1 + (g_num_nodes / 2); ++i) { TEMP_FAILURE_RETRY(sem_wait(&g_sem_accept_leader)); } if (clock_gettime(CLOCK_REALTIME, &ts) == -1) abort(); ts.tv_sec += 10; for (; i < g_num_nodes; ++i) { TEMP_FAILURE_RETRY(sem_timedwait(&g_sem_accept_leader, &ts)); } /* cleanup */ for (i = 0; i < g_num_nodes; ++i) { worker_stop(g_nodes[i]); } for (i = 0; i < g_num_nodes; ++i) { worker_join(g_nodes[i]); } pthread_cond_destroy(&g_start_cond); g_start = 0; pthread_mutex_destroy(&g_start_lock); sem_destroy(&g_sem_accept_leader); return check_leaders(); }