static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf, size_t count, loff_t *offp) { struct perf_ctx *perf = filp->private_data; int node, i; DECLARE_WAIT_QUEUE_HEAD(wq); if (wait_event_interruptible(perf->link_wq, perf->link_is_up)) return -ENOLINK; if (perf->perf_threads == 0) return -EINVAL; if (!mutex_trylock(&perf->run_mutex)) return -EBUSY; perf_clear_thread_status(perf); if (perf->perf_threads > MAX_THREADS) { perf->perf_threads = MAX_THREADS; pr_info("Reset total threads to: %u\n", MAX_THREADS); } /* no greater than 1M */ if (seg_order > MAX_SEG_ORDER) { seg_order = MAX_SEG_ORDER; pr_info("Fix seg_order to %u\n", seg_order); } if (run_order < seg_order) { run_order = seg_order; pr_info("Fix run_order to %u\n", run_order); } node = dev_to_node(&perf->ntb->pdev->dev); atomic_set(&perf->tdone, 0); /* launch kernel thread */ for (i = 0; i < perf->perf_threads; i++) { struct pthr_ctx *pctx; pctx = &perf->pthr_ctx[i]; atomic_set(&pctx->dma_sync, 0); pctx->perf = perf; pctx->wq = &wq; pctx->thread = kthread_create_on_node(ntb_perf_thread, (void *)pctx, node, "ntb_perf %d", i); if (IS_ERR(pctx->thread)) { pctx->thread = NULL; goto err; } else { wake_up_process(pctx->thread); } } wait_event_interruptible(wq, atomic_read(&perf->tdone) == perf->perf_threads); threads_cleanup(perf); mutex_unlock(&perf->run_mutex); return count; err: threads_cleanup(perf); mutex_unlock(&perf->run_mutex); return -ENXIO; }
static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf, size_t count, loff_t *offp) { struct perf_ctx *perf = filp->private_data; int node, i; if (!perf->link_is_up) return 0; if (perf->perf_threads == 0) return 0; if (atomic_read(&perf->tsync) == 0) perf->run = false; if (perf->run) threads_cleanup(perf); else { perf->run = true; if (perf->perf_threads > MAX_THREADS) { perf->perf_threads = MAX_THREADS; pr_info("Reset total threads to: %u\n", MAX_THREADS); } /* no greater than 1M */ if (seg_order > MAX_SEG_ORDER) { seg_order = MAX_SEG_ORDER; pr_info("Fix seg_order to %u\n", seg_order); } if (run_order < seg_order) { run_order = seg_order; pr_info("Fix run_order to %u\n", run_order); } node = dev_to_node(&perf->ntb->pdev->dev); /* launch kernel thread */ for (i = 0; i < perf->perf_threads; i++) { struct pthr_ctx *pctx; pctx = &perf->pthr_ctx[i]; atomic_set(&pctx->dma_sync, 0); pctx->perf = perf; pctx->thread = kthread_create_on_node(ntb_perf_thread, (void *)pctx, node, "ntb_perf %d", i); if (IS_ERR(pctx->thread)) { pctx->thread = NULL; goto err; } else wake_up_process(pctx->thread); if (perf->run == false) return -ENXIO; } } return count; err: threads_cleanup(perf); return -ENXIO; }