Esempio n. 1
0
static ssize_t tool_link_write(struct file *filep, const char __user *ubuf,
			       size_t size, loff_t *offp)
{
	struct tool_ctx *tc = filep->private_data;
	char buf[32];
	size_t buf_size;
	bool val;
	int rc;

	buf_size = min(size, (sizeof(buf) - 1));
	if (copy_from_user(buf, ubuf, buf_size))
		return -EFAULT;

	buf[buf_size] = '\0';

	rc = strtobool(buf, &val);
	if (rc)
		return rc;

	if (val)
		rc = ntb_link_enable(tc->ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
	else
		rc = ntb_link_disable(tc->ntb);

	if (rc)
		return rc;

	return size;
}
Esempio n. 2
0
static int perf_probe(struct ntb_client *client, struct ntb_dev *ntb)
{
	struct pci_dev *pdev = ntb->pdev;
	struct perf_ctx *perf;
	int node;
	int rc = 0;

	if (ntb_spad_count(ntb) < MAX_SPAD) {
		dev_err(&ntb->dev, "Not enough scratch pad registers for %s",
			DRIVER_NAME);
		return -EIO;
	}

	node = dev_to_node(&pdev->dev);

	perf = kzalloc_node(sizeof(*perf), GFP_KERNEL, node);
	if (!perf) {
		rc = -ENOMEM;
		goto err_perf;
	}

	perf->ntb = ntb;
	perf->perf_threads = 1;
	atomic_set(&perf->tsync, 0);
	mutex_init(&perf->run_mutex);
	spin_lock_init(&perf->db_lock);
	perf_setup_mw(ntb, perf);
	init_waitqueue_head(&perf->link_wq);
	INIT_DELAYED_WORK(&perf->link_work, perf_link_work);

	rc = ntb_set_ctx(ntb, perf, &perf_ops);
	if (rc)
		goto err_ctx;

	perf->link_is_up = false;
	ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
	ntb_link_event(ntb);

	rc = perf_debugfs_setup(perf);
	if (rc)
		goto err_ctx;

	perf_clear_thread_status(perf);

	return 0;

err_ctx:
	cancel_delayed_work_sync(&perf->link_work);
	kfree(perf);
err_perf:
	return rc;
}
Esempio n. 3
0
static int pp_setup_ctx(struct pp_ctx *pp)
{
	int ret;

	ret = ntb_set_ctx(pp->ntb, pp, &pp_ops);
	if (ret)
		return ret;

	ntb_link_enable(pp->ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
	/* Might be not necessary */
	ntb_link_event(pp->ntb);

	return 0;
}
Esempio n. 4
0
static int perf_probe(struct ntb_client *client, struct ntb_dev *ntb)
{
	struct pci_dev *pdev = ntb->pdev;
	struct perf_ctx *perf;
	int node;
	int rc = 0;

	node = dev_to_node(&pdev->dev);

	perf = kzalloc_node(sizeof(*perf), GFP_KERNEL, node);
	if (!perf) {
		rc = -ENOMEM;
		goto err_perf;
	}

	perf->ntb = ntb;
	perf->perf_threads = 1;
	atomic_set(&perf->tsync, 0);
	perf->run = false;
	spin_lock_init(&perf->db_lock);
	perf_setup_mw(ntb, perf);
	INIT_DELAYED_WORK(&perf->link_work, perf_link_work);
	INIT_WORK(&perf->link_cleanup, perf_link_cleanup);

	rc = ntb_set_ctx(ntb, perf, &perf_ops);
	if (rc)
		goto err_ctx;

	perf->link_is_up = false;
	ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
	ntb_link_event(ntb);

	rc = perf_debugfs_setup(perf);
	if (rc)
		goto err_ctx;

	return 0;

err_ctx:
	cancel_delayed_work_sync(&perf->link_work);
	cancel_work_sync(&perf->link_cleanup);
	kfree(perf);
err_perf:
	return rc;
}
Esempio n. 5
0
static int tool_probe(struct ntb_client *self, struct ntb_dev *ntb)
{
	struct tool_ctx *tc;
	int rc;
	int i;

	if (!ntb->ops->mw_set_trans) {
		dev_dbg(&ntb->dev, "need inbound MW based NTB API\n");
		rc = -EINVAL;
		goto err_tc;
	}

	if (ntb_spad_count(ntb) < 1) {
		dev_dbg(&ntb->dev, "no enough scratchpads\n");
		rc = -EINVAL;
		goto err_tc;
	}

	if (ntb_db_is_unsafe(ntb))
		dev_dbg(&ntb->dev, "doorbell is unsafe\n");

	if (ntb_spad_is_unsafe(ntb))
		dev_dbg(&ntb->dev, "scratchpad is unsafe\n");

	if (ntb_peer_port_count(ntb) != NTB_DEF_PEER_CNT)
		dev_warn(&ntb->dev, "multi-port NTB is unsupported\n");

	tc = kzalloc(sizeof(*tc), GFP_KERNEL);
	if (!tc) {
		rc = -ENOMEM;
		goto err_tc;
	}

	tc->ntb = ntb;
	init_waitqueue_head(&tc->link_wq);

	tc->mw_count = min(ntb_peer_mw_count(tc->ntb), MAX_MWS);
	for (i = 0; i < tc->mw_count; i++) {
		rc = tool_init_mw(tc, i);
		if (rc)
			goto err_ctx;
	}

	tool_setup_dbgfs(tc);

	rc = ntb_set_ctx(ntb, tc, &tool_ops);
	if (rc)
		goto err_ctx;

	ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
	ntb_link_event(ntb);

	return 0;

err_ctx:
	tool_free_mws(tc);
	debugfs_remove_recursive(tc->dbgfs);
	kfree(tc);
err_tc:
	return rc;
}
Esempio n. 6
0
static int
ntb_transport_probe(struct ntb_softc *ntb)
{
	struct ntb_transport_ctx *nt = &net_softc;
	struct ntb_transport_mw *mw;
	uint64_t qp_bitmap;
	int rc;
	unsigned i;

	nt->mw_count = ntb_mw_count(ntb);
	for (i = 0; i < nt->mw_count; i++) {
		mw = &nt->mw_vec[i];

		rc = ntb_mw_get_range(ntb, i, &mw->phys_addr, &mw->vbase,
		    &mw->phys_size, &mw->xlat_align, &mw->xlat_align_size,
		    &mw->addr_limit);
		if (rc != 0)
			goto err;

		mw->buff_size = 0;
		mw->xlat_size = 0;
		mw->virt_addr = NULL;
		mw->dma_addr = 0;
	}

	qp_bitmap = ntb_db_valid_mask(ntb);
	nt->qp_count = flsll(qp_bitmap);
	KASSERT(nt->qp_count != 0, ("bogus db bitmap"));
	nt->qp_count -= 1;

	if (max_num_clients != 0 && max_num_clients < nt->qp_count)
		nt->qp_count = max_num_clients;
	else if (nt->mw_count < nt->qp_count)
		nt->qp_count = nt->mw_count;
	KASSERT(nt->qp_count <= QP_SETSIZE, ("invalid qp_count"));

	mtx_init(&nt->tx_lock, "ntb transport tx", NULL, MTX_DEF);
	mtx_init(&nt->rx_lock, "ntb transport rx", NULL, MTX_DEF);

	nt->qp_vec = malloc(nt->qp_count * sizeof(*nt->qp_vec), M_NTB_IF,
	    M_WAITOK | M_ZERO);

	for (i = 0; i < nt->qp_count; i++) {
		set_bit(i, &nt->qp_bitmap);
		set_bit(i, &nt->qp_bitmap_free);
		ntb_transport_init_queue(nt, i);
	}

	callout_init(&nt->link_work, 0);
	callout_init(&nt->link_watchdog, 0);
	TASK_INIT(&nt->link_cleanup, 0, ntb_transport_link_cleanup_work, nt);

	rc = ntb_set_ctx(ntb, nt, &ntb_transport_ops);
	if (rc != 0)
		goto err;

	nt->link_is_up = false;
	ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
	ntb_link_event(ntb);

	callout_reset(&nt->link_work, 0, ntb_transport_link_work, nt);
	if (enable_xeon_watchdog != 0)
		callout_reset(&nt->link_watchdog, 0, xeon_link_watchdog_hb, nt);
	return (0);

err:
	free(nt->qp_vec, M_NTB_IF);
	nt->qp_vec = NULL;
	return (rc);
}