Exemplo n.º 1
0
static void perf_link_work(struct work_struct *work)
{
	struct perf_ctx *perf =
		container_of(work, struct perf_ctx, link_work.work);
	struct ntb_dev *ndev = perf->ntb;
	struct pci_dev *pdev = ndev->pdev;
	u32 val;
	u64 size;
	int rc;

	dev_dbg(&perf->ntb->pdev->dev, "%s called\n", __func__);

	size = perf->mw.phys_size;

	if (max_mw_size && size > max_mw_size)
		size = max_mw_size;

	ntb_peer_spad_write(ndev, MW_SZ_HIGH, upper_32_bits(size));
	ntb_peer_spad_write(ndev, MW_SZ_LOW, lower_32_bits(size));
	ntb_peer_spad_write(ndev, VERSION, PERF_VERSION);

	/* now read what peer wrote */
	val = ntb_spad_read(ndev, VERSION);
	if (val != PERF_VERSION) {
		dev_dbg(&pdev->dev, "Remote version = %#x\n", val);
		goto out;
	}

	val = ntb_spad_read(ndev, MW_SZ_HIGH);
	size = (u64)val << 32;

	val = ntb_spad_read(ndev, MW_SZ_LOW);
	size |= val;

	dev_dbg(&pdev->dev, "Remote MW size = %#llx\n", size);

	rc = perf_set_mw(perf, size);
	if (rc)
		goto out1;

	perf->link_is_up = true;
	wake_up(&perf->link_wq);

	return;

out1:
	perf_free_mw(perf);

out:
	if (ntb_link_is_up(ndev, NULL, NULL) == 1)
		schedule_delayed_work(&perf->link_work,
				      msecs_to_jiffies(PERF_LINK_DOWN_TIMEOUT));
}
Exemplo n.º 2
0
static void pp_pong(struct pp_ctx *pp)
{
	u32 msg_data = -1, spad_data = -1;
	int pidx = 0;

	/* Read pong data */
	spad_data = ntb_spad_read(pp->ntb, 0);
	msg_data = ntb_msg_read(pp->ntb, &pidx, 0);
	ntb_msg_clear_sts(pp->ntb, -1);

	/*
	 * Scratchpad and message data may differ, since message register can't
	 * be rewritten unless status is cleared. Additionally either of them
	 * might be unsupported
	 */
	dev_dbg(&pp->ntb->dev, "Pong spad %#x, msg %#x (port %d)\n",
		spad_data, msg_data, ntb_peer_port_number(pp->ntb, pidx));

	atomic_inc(&pp->count);

	ntb_db_set_mask(pp->ntb, pp->in_db);
	ntb_db_clear(pp->ntb, pp->in_db);

	hrtimer_start(&pp->timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
}
Exemplo n.º 3
0
static void
ntb_qp_link_work(void *arg)
{
	struct ntb_transport_qp *qp = arg;
	struct ntb_softc *ntb = qp->ntb;
	struct ntb_transport_ctx *nt = qp->transport;
	uint32_t val, dummy;

	ntb_spad_read(ntb, IF_NTB_QP_LINKS, &val);

	ntb_peer_spad_write(ntb, IF_NTB_QP_LINKS, val | (1ull << qp->qp_num));

	/* query remote spad for qp ready bits */
	ntb_peer_spad_read(ntb, IF_NTB_QP_LINKS, &dummy);

	/* See if the remote side is up */
	if ((val & (1ull << qp->qp_num)) != 0) {
		ntb_printf(2, "qp link up\n");
		qp->link_is_up = true;

		if (qp->event_handler != NULL)
			qp->event_handler(qp->cb_data, NTB_LINK_UP);

		taskqueue_enqueue(taskqueue_swi, &qp->rxc_db_work);
	} else if (nt->link_is_up)
		callout_reset(&qp->link_work,
		    NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_qp_link_work, qp);
}
Exemplo n.º 4
0
/**
 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
 * @qp: NTB transport layer queue to be disabled
 *
 * Notify NTB transport layer of client's desire to no longer receive data on
 * transport queue specified.  It is the client's responsibility to ensure all
 * entries on queue are purged or otherwise handled appropriately.
 */
static void
ntb_transport_link_down(struct ntb_transport_qp *qp)
{
	uint32_t val;

	if (qp == NULL)
		return;

	qp->client_ready = false;

	ntb_spad_read(qp->ntb, IF_NTB_QP_LINKS, &val);

	ntb_peer_spad_write(qp->ntb, IF_NTB_QP_LINKS,
	   val & ~(1 << qp->qp_num));

	if (qp->link_is_up)
		ntb_send_link_down(qp);
	else
		callout_drain(&qp->link_work);
}
Exemplo n.º 5
0
/* Link bring up */
static void
ntb_transport_link_work(void *arg)
{
	struct ntb_transport_ctx *nt = arg;
	struct ntb_softc *ntb = nt->ntb;
	struct ntb_transport_qp *qp;
	uint64_t val64, size;
	uint32_t val;
	unsigned i;
	int rc;

	/* send the local info, in the opposite order of the way we read it */
	for (i = 0; i < nt->mw_count; i++) {
		size = nt->mw_vec[i].phys_size;

		if (max_mw_size != 0 && size > max_mw_size)
			size = max_mw_size;

		ntb_peer_spad_write(ntb, IF_NTB_MW0_SZ_HIGH + (i * 2),
		    size >> 32);
		ntb_peer_spad_write(ntb, IF_NTB_MW0_SZ_LOW + (i * 2), size);
	}

	ntb_peer_spad_write(ntb, IF_NTB_NUM_MWS, nt->mw_count);

	ntb_peer_spad_write(ntb, IF_NTB_NUM_QPS, nt->qp_count);

	ntb_peer_spad_write(ntb, IF_NTB_VERSION, NTB_TRANSPORT_VERSION);

	/* Query the remote side for its info */
	val = 0;
	ntb_spad_read(ntb, IF_NTB_VERSION, &val);
	if (val != NTB_TRANSPORT_VERSION)
		goto out;

	ntb_spad_read(ntb, IF_NTB_NUM_QPS, &val);
	if (val != nt->qp_count)
		goto out;

	ntb_spad_read(ntb, IF_NTB_NUM_MWS, &val);
	if (val != nt->mw_count)
		goto out;

	for (i = 0; i < nt->mw_count; i++) {
		ntb_spad_read(ntb, IF_NTB_MW0_SZ_HIGH + (i * 2), &val);
		val64 = (uint64_t)val << 32;

		ntb_spad_read(ntb, IF_NTB_MW0_SZ_LOW + (i * 2), &val);
		val64 |= val;

		rc = ntb_set_mw(nt, i, val64);
		if (rc != 0)
			goto free_mws;
	}

	nt->link_is_up = true;
	ntb_printf(1, "transport link up\n");

	for (i = 0; i < nt->qp_count; i++) {
		qp = &nt->qp_vec[i];

		ntb_transport_setup_qp_mw(nt, i);

		if (qp->client_ready)
			callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp);
	}

	return;

free_mws:
	for (i = 0; i < nt->mw_count; i++)
		ntb_free_mw(nt, i);
out:
	if (ntb_link_is_up(ntb, NULL, NULL))
		callout_reset(&nt->link_work,
		    NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_transport_link_work, nt);
}