void cleanup_io_work_queue(struct tcmu_device *dev, bool cancel) { struct tcmur_device *rdev = tcmu_get_daemon_dev_private(dev); struct tcmu_io_queue *io_wq = &rdev->work_queue; int ret; if (!io_wq->io_wq_threads) { return; } if (cancel) { cleanup_io_work_queue_threads(dev); } /* * Note that there's no need to drain ->io_queue at this point * as it _should_ be empty (target layer would call this path * when no commands are running - thanks Mike). * * Out of tree handlers which do not use the aio code are not * supported in this path. */ ret = pthread_mutex_destroy(&io_wq->io_lock); if (ret != 0) { tcmu_err("failed to destroy io workqueue lock\n"); } ret = pthread_cond_destroy(&io_wq->io_cond); if (ret != 0) { tcmu_err("failed to destroy io workqueue cond\n"); } free(io_wq->io_wq_threads); }
int setup_io_work_queue(struct tcmu_device *dev) { struct tcmur_handler *r_handler = tcmu_get_runner_handler(dev); struct tcmur_device *rdev = tcmu_dev_get_private(dev); struct tcmu_io_queue *io_wq = &rdev->work_queue; int ret, i, nr_threads = r_handler->nr_threads; if (!nr_threads) return 0; list_head_init(&io_wq->io_queue); ret = pthread_mutex_init(&io_wq->io_lock, NULL); if (ret != 0) { goto out; } ret = pthread_cond_init(&io_wq->io_cond, NULL); if (ret != 0) { goto cleanup_lock; } /* TODO: Allow user to override device defaults */ io_wq->io_wq_threads = calloc(nr_threads, sizeof(pthread_t)); if (!io_wq->io_wq_threads) { ret = ENOMEM; goto cleanup_cond; } for (i = 0; i < nr_threads; i++) { ret = pthread_create(&io_wq->io_wq_threads[i], NULL, io_work_queue, dev); if (ret != 0) { goto cleanup_threads; } } return 0; cleanup_threads: cleanup_io_work_queue_threads(dev); free(io_wq->io_wq_threads); cleanup_cond: pthread_cond_destroy(&io_wq->io_cond); cleanup_lock: pthread_mutex_destroy(&io_wq->io_lock); out: return -ret; }