/* * Free resources previously allocated in nfs_node_reinit(). */ void nfs_node_done(void) { pool_destroy(&nfs_node_pool); pool_destroy(&nfs_vattr_pool); workqueue_destroy(nfs_sillyworkq); }
static void on_readable(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { GPR_TIMER_BEGIN("workqueue.on_readable", 0); grpc_workqueue *workqueue = arg; if (error != GRPC_ERROR_NONE) { /* HACK: let wakeup_fd code know that we stole the fd */ workqueue->wakeup_fd.read_fd = 0; grpc_wakeup_fd_destroy(&workqueue->wakeup_fd); grpc_fd_orphan(exec_ctx, workqueue->wakeup_read_fd, NULL, NULL, "destroy"); GPR_ASSERT(gpr_atm_no_barrier_load(&workqueue->state) == 0); gpr_free(workqueue); } else { error = grpc_wakeup_fd_consume_wakeup(&workqueue->wakeup_fd); gpr_mpscq_node *n = gpr_mpscq_pop(&workqueue->queue); if (error == GRPC_ERROR_NONE) { grpc_fd_notify_on_read(exec_ctx, workqueue->wakeup_read_fd, &workqueue->read_closure); } else { /* recurse to get error handling */ on_readable(exec_ctx, arg, error); } if (n == NULL) { /* try again - queue in an inconsistant state */ wakeup(exec_ctx, workqueue); } else { switch (gpr_atm_full_fetch_add(&workqueue->state, -2)) { case 3: // had one count, one unorphaned --> done, unorphaned break; case 2: // had one count, one orphaned --> done, orphaned workqueue_destroy(exec_ctx, workqueue); break; case 1: case 0: // these values are illegal - representing an already done or // deleted workqueue GPR_UNREACHABLE_CODE(break); default: // schedule a wakeup since there's more to do wakeup(exec_ctx, workqueue); } grpc_closure *cl = (grpc_closure *)n; grpc_error *clerr = cl->error; cl->cb(exec_ctx, cl->cb_arg, clerr); GRPC_ERROR_UNREF(clerr); } } GPR_TIMER_END("workqueue.on_readable", 0); }
void emdtv_ir_detach(struct emdtv_softc *sc, int flags) { if (sc->sc_ir_wq != NULL) workqueue_destroy(sc->sc_ir_wq); if (sc->sc_intr_pipe != NULL) { usbd_abort_pipe(sc->sc_intr_pipe); usbd_close_pipe(sc->sc_intr_pipe); sc->sc_intr_pipe = NULL; } mutex_enter(&sc->sc_ir_mutex); mutex_exit(&sc->sc_ir_mutex); mutex_destroy(&sc->sc_ir_mutex); if (sc->sc_cirdev != NULL) config_detach(sc->sc_cirdev, flags); }
int main(int argc, char **argv) { int rc; workqueue_t wq; rc = workqueue_init(&wq, "thread"); assert(rc == 0); rc = workqueue_submit(&wq, hello, NULL); assert(rc == 0); workqueue_lock(&wq); while (!workqueue_idle(&wq)) { workqueue_wait(&wq, 0); } workqueue_unlock(&wq); workqueue_destroy(&wq); return 0; }
void destroy_workqueue(struct workqueue_struct *wq) { /* * Cancel all delayed work. */ for (;;) { struct delayed_work *dw; mutex_enter(&wq->wq_lock); if (TAILQ_EMPTY(&wq->wq_delayed)) { dw = NULL; } else { dw = TAILQ_FIRST(&wq->wq_delayed); TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry); } mutex_exit(&wq->wq_lock); if (dw == NULL) break; cancel_delayed_work_sync(dw); } /* * workqueue_destroy empties the queue; we need not wait for * completion explicitly. However, we can't destroy the * condvar or mutex until this is done. */ workqueue_destroy(wq->wq_workqueue); KASSERT(wq->wq_current_work == NULL); wq->wq_workqueue = NULL; cv_destroy(&wq->wq_cv); mutex_destroy(&wq->wq_lock); kmem_free(wq, sizeof(*wq)); }
int main(int argc, char *argv[]) { struct prg_ctx prg; int i; int num_jobs = 3; int ret; printf("starting\n"); signal(SIGTERM, sighandler_func); prg.counter = 0; prg.ctx = workqueue_init(32, 1, NULL); for (i = 0; i < num_jobs; i++) { ret = workqueue_add_work(prg.ctx, 2, 0, callback_func, &prg); if (ret >= 0) { printf("Added job %d \n", ret); } else { printf("Error adding job err=%d\n", ret); } } workqueue_show_status(prg.ctx, stdout); for (i = 0; i < num_jobs; i++) { printf("job %d is queued=%d running=%d queued_or_running=%d\n", i, workqueue_job_queued(prg.ctx, i), workqueue_job_running(prg.ctx, i), workqueue_job_queued_or_running(prg.ctx, i)); } workqueue_show_status(prg.ctx, stdout); for (i = 0; i < num_jobs; i++) { ret = workqueue_dequeue(prg.ctx, i), printf(" dequeue job %d ret=%d\n", i , ret); } workqueue_show_status(prg.ctx, stdout); for (i = 0; i < num_jobs; i++) { ret = workqueue_add_work(prg.ctx, 2, 0, callback_func, &prg); if (ret >= 0) { printf("Added job %d \n", ret); } else { printf("Error adding job err=%d\n", ret); } } for (i = 0; i < num_jobs*2; i++) { ret = workqueue_dequeue(prg.ctx, i), printf(" dequeue job %d ret=%d\n", i , ret); } for (i = 20; i && (ret = workqueue_get_queue_len(prg.ctx)); i--) { printf("waiting for %d jobs \n", ret); sleep(1); } workqueue_destroy(prg.ctx); #ifdef WINDOWS system("pause"); #endif return 0; }
int main(int argc, char *argv[]) { struct prg_ctx prg; int i; int num_jobs=6; int ret; printf("starting\n"); prg.counter = 0; prg.ctx = workqueue_init(32, 1, NULL); for (i = 0; i < num_jobs; i++) { ret = workqueue_add_work(prg.ctx, 2, 0, callback_func, &prg); if (ret >= 0) { printf("Added job %d \n", ret); } else { printf("Error adding job err=%d\n", ret); } } workqueue_show_status(prg.ctx, stdout); for (i = 0; i < num_jobs; i++) { printf("job %d is queued=%d running=%d queued_or_running=%d\n", i, workqueue_job_queued(prg.ctx, i), workqueue_job_running(prg.ctx, i), workqueue_job_queued_or_running(prg.ctx, i)); } for (i = 0; i < num_jobs/2; i++) { ret = workqueue_add_work(prg.ctx, 5, 0, callback_func, &prg); if (ret >= 0) { printf("Added job %d \n", ret); } else { printf("Error adding job err=%d\n", ret); } } workqueue_show_status(prg.ctx, stdout); for (i = 0; i < num_jobs/2; i++) { ret = workqueue_add_work(prg.ctx, 1, 0, callback_func, &prg); if (ret >= 0) { printf("Added job %d \n", ret); } else { printf("Error adding job err=%d\n", ret); } } workqueue_show_status(prg.ctx, stdout); for (i = 20; i && (ret = workqueue_get_queue_len(prg.ctx)) > 5; i--) { printf("waiting for %d jobs \n", ret); sleep(1); } // empty out remaining jobs and wait for running job to finish workqueue_empty_wait(prg.ctx); workqueue_destroy(prg.ctx); #ifdef WINDOWS system("pause"); #endif return 0; }
static void workqueue_orphan(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) { if (gpr_atm_full_fetch_add(&workqueue->state, -1) == 1) { workqueue_destroy(exec_ctx, workqueue); } }