Exemple #1
0
/* Free up pending packets in all rx/tx rings.
 *
 * The chip has been shut down and the driver detached from
 * the networking, so no interrupts or new tx packets will
 * end up in the driver.  bp->lock is not held and we are not
 * in an interrupt context and thus may sleep.
 */
static void b44_free_rings(struct b44 *bp)
{
	struct ring_info *rp;
	int i;

	for (i = 0; i < B44_RX_RING_SIZE; i++) {
		rp = &bp->rx_buffers[i];

		if (rp->skb == NULL)
			continue;
		pci_unmap_single(bp->pdev,
				 pci_unmap_addr(rp, mapping),
				 RX_PKT_BUF_SZ,
				 PCI_DMA_FROMDEVICE);
		dev_kfree_skb_any(rp->skb);
		rp->skb = NULL;
	}

	/* XXX needs changes once NETIF_F_SG is set... */
	for (i = 0; i < B44_TX_RING_SIZE; i++) {
		rp = &bp->tx_buffers[i];

		if (rp->skb == NULL)
			continue;
		pci_unmap_single(bp->pdev,
				 pci_unmap_addr(rp, mapping),
				 rp->skb->len,
				 PCI_DMA_TODEVICE);
		dev_kfree_skb_any(rp->skb);
		rp->skb = NULL;
	}
}
Exemple #2
0
static void krping_free_buffers(struct krping_cb *cb)
{
	DEBUG_LOG(PFX "krping_free_buffers called on cb %p\n", cb);
	
#if 0
	dma_unmap_single(cb->pd->device->dma_device,
			 pci_unmap_addr(cb, recv_mapping),
			 sizeof(cb->recv_buf), DMA_BIDIRECTIONAL);
	dma_unmap_single(cb->pd->device->dma_device,
			 pci_unmap_addr(cb, send_mapping),
			 sizeof(cb->send_buf), DMA_BIDIRECTIONAL);
	dma_unmap_single(cb->pd->device->dma_device,
			 pci_unmap_addr(cb, rdma_mapping),
			 cb->size, DMA_BIDIRECTIONAL);
#endif
	contigfree(cb->rdma_buf, cb->size, M_DEVBUF);
	if (!cb->server || cb->wlat || cb->rlat || cb->bw) {
#if 0
		dma_unmap_single(cb->pd->device->dma_device,
			 pci_unmap_addr(cb, start_mapping),
			 cb->size, DMA_BIDIRECTIONAL);
#endif
		contigfree(cb->start_buf, cb->size, M_DEVBUF);
	}
	if (cb->use_dmamr)
		ib_dereg_mr(cb->dma_mr);
	else {
		ib_dereg_mr(cb->send_mr);
		ib_dereg_mr(cb->recv_mr);
		ib_dereg_mr(cb->rdma_mr);
		if (!cb->server)
			ib_dereg_mr(cb->start_mr);
	}
}
Exemple #3
0
void mthca_free_qp(struct mthca_dev *dev,
		   struct mthca_qp *qp)
{
	u8 status;
	int size;
	int i;

	spin_lock_irq(&dev->qp_table.lock);
	mthca_array_clear(&dev->qp_table.qp,
			  qp->qpn & (dev->limits.num_qps - 1));
	spin_unlock_irq(&dev->qp_table.lock);

	atomic_dec(&qp->refcount);
	wait_event(qp->wait, !atomic_read(&qp->refcount));

	if (qp->state != IB_QPS_RESET)
		mthca_MODIFY_QP(dev, MTHCA_TRANS_ANY2RST, qp->qpn, 0, NULL, 0, &status);

	mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn);
	if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
		mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn);

	mthca_free_mr(dev, &qp->mr);

	size = PAGE_ALIGN(qp->send_wqe_offset +
			  (qp->sq.max << qp->sq.wqe_shift));

	if (qp->is_direct) {
		pci_free_consistent(dev->pdev, size,
				    qp->queue.direct.buf,
				    pci_unmap_addr(&qp->queue.direct, mapping));
	} else {
		for (i = 0; i < size / PAGE_SIZE; ++i) {
			pci_free_consistent(dev->pdev, PAGE_SIZE,
					    qp->queue.page_list[i].buf,
					    pci_unmap_addr(&qp->queue.page_list[i],
							   mapping));
		}
	}

	kfree(qp->wrid);

	if (is_sqp(dev, qp)) {
		atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count));
		dma_free_coherent(&dev->pdev->dev,
				  to_msqp(qp)->header_buf_size,
				  to_msqp(qp)->header_buf,
				  to_msqp(qp)->header_dma);
	}
	else
		mthca_free(&dev->qp_table.alloc, qp->qpn);
}
Exemple #4
0
static void b44_tx(struct b44 *bp)
{
	u32 cur, cons;

	cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
	cur /= sizeof(struct dma_desc);

	/* XXX needs updating when NETIF_F_SG is supported */
	for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
		struct ring_info *rp = &bp->tx_buffers[cons];
		struct sk_buff *skb = rp->skb;

		if (unlikely(skb == NULL))
			BUG();

		pci_unmap_single(bp->pdev,
				 pci_unmap_addr(rp, mapping),
				 skb->len,
				 PCI_DMA_TODEVICE);
		rp->skb = NULL;
		dev_kfree_skb_irq(skb);
	}

	bp->tx_cons = cons;
	if (netif_queue_stopped(bp->dev) &&
	    TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
		netif_wake_queue(bp->dev);

	bw32(bp, B44_GPTIMER, 0);
}
Exemple #5
0
static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
{
	struct dma_desc *src_desc, *dest_desc;
	struct ring_info *src_map, *dest_map;
	struct rx_header *rh;
	int dest_idx;
	u32 ctrl;

	dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
	dest_desc = &bp->rx_ring[dest_idx];
	dest_map = &bp->rx_buffers[dest_idx];
	src_desc = &bp->rx_ring[src_idx];
	src_map = &bp->rx_buffers[src_idx];

	dest_map->skb = src_map->skb;
	rh = (struct rx_header *) src_map->skb->data;
	rh->len = 0;
	rh->flags = 0;
	pci_unmap_addr_set(dest_map, mapping,
			   pci_unmap_addr(src_map, mapping));

	ctrl = src_desc->ctrl;
	if (dest_idx == (B44_RX_RING_SIZE - 1))
		ctrl |= cpu_to_le32(DESC_CTRL_EOT);
	else
		ctrl &= cpu_to_le32(~DESC_CTRL_EOT);

	dest_desc->ctrl = ctrl;
	dest_desc->addr = src_desc->addr;
	src_map->skb = NULL;

	pci_dma_sync_single_for_device(bp->pdev, src_desc->addr,
				       RX_PKT_BUF_SZ,
				       PCI_DMA_FROMDEVICE);
}
void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf,
		    int is_direct, struct mthca_mr *mr)
{
	int i;

	if (mr)
		mthca_free_mr(dev, mr);

	if (is_direct)
		dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
				  pci_unmap_addr(&buf->direct, mapping));
	else {
		for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
			dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
					  buf->page_list[i].buf,
					  pci_unmap_addr(&buf->page_list[i],
							 mapping));
		kfree(buf->page_list);
	}
}
Exemple #7
0
void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root)
{
    struct sp_chunk *next;

    while (root) {
        next = root->next;
        dma_free_coherent(&c2dev->pcidev->dev, PAGE_SIZE, root,
                  pci_unmap_addr(root, mapping));
        root = next;
    }
}
/*
 * Called by c2_remove to cleanup the RNIC resources.
 */
void __devexit c2_rnic_term(struct c2_dev *c2dev)
{

	/* Close the open adapter instance */
	c2_rnic_close(c2dev);

	/* Send the TERM message to the adapter */
	c2_adapter_term(c2dev);

	/* Disable interrupts on the adapter */
	writel(1, c2dev->regs + C2_IDIS);

	/* Free the QP pool */
	c2_cleanup_qp_table(c2dev);

	/* Free the PD pool */
	c2_cleanup_pd_table(c2dev);

	/* Free the verbs request allocator */
	vq_term(c2dev);

	/* Free the asynchronus event queue */
	dma_free_coherent(&c2dev->pcidev->dev,
			  c2dev->aeq.q_size * c2dev->aeq.msg_size,
			  c2dev->aeq.msg_pool.host,
			  pci_unmap_addr(&c2dev->aeq, mapping));

	/* Free the verbs reply queue */
	dma_free_coherent(&c2dev->pcidev->dev,
			  c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
			  c2dev->rep_vq.msg_pool.host,
			  pci_unmap_addr(&c2dev->rep_vq, mapping));

	/* Free the MQ shared pointer pool */
	c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);

	/* Free the qptr_array */
	vfree(c2dev->qptr_array);

	return;
}
Exemple #9
0
static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
		      struct c4iw_dev_ucontext *uctx)
{
	/*
	 * uP clears EQ contexts when the connection exits rdma mode,
	 * so no need to post a RESET WR for these EQs.
	 */
	dma_free_coherent(&(rdev->lldi.pdev->dev),
			  wq->rq.memsize, wq->rq.queue,
			  pci_unmap_addr(&wq->rq, mapping));
	dealloc_sq(rdev, &wq->sq);
	c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
	kfree(wq->rq.sw_rq);
	kfree(wq->sq.sw_sq);
	c4iw_put_qpid(rdev, wq->rq.qid, uctx);
	c4iw_put_qpid(rdev, wq->sq.qid, uctx);
	return 0;
}
Exemple #10
0
static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
		      struct c4iw_dev_ucontext *uctx)
{
	struct fw_ri_res_wr *res_wr;
	struct fw_ri_res *res;
	int wr_len;
	struct c4iw_wr_wait wr_wait;
	struct sk_buff *skb;
	int ret;

	wr_len = sizeof *res_wr + sizeof *res;
	skb = alloc_skb(wr_len, GFP_KERNEL);
	if (!skb)
		return -ENOMEM;
	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);

	res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
	memset(res_wr, 0, wr_len);
	res_wr->op_nres = cpu_to_be32(
			FW_WR_OP(FW_RI_RES_WR) |
			V_FW_RI_RES_WR_NRES(1) |
			FW_WR_COMPL(1));
	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
	res_wr->cookie = (unsigned long) &wr_wait;
	res = res_wr->res;
	res->u.cq.restype = FW_RI_RES_TYPE_CQ;
	res->u.cq.op = FW_RI_RES_OP_RESET;
	res->u.cq.iqid = cpu_to_be32(cq->cqid);

	c4iw_init_wr_wait(&wr_wait);
	ret = c4iw_ofld_send(rdev, skb);
	if (!ret) {
		ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
	}

	kfree(cq->sw_queue);
	dma_free_coherent(&(rdev->lldi.pdev->dev),
			  cq->memsize, cq->queue,
			  pci_unmap_addr(cq, mapping));
	c4iw_put_cqid(rdev, cq->cqid, uctx);
	return ret;
}
Exemple #11
0
static void be_rx_q_clean(struct be_net_object *pnob)
{
	if (pnob->rx_ctxt) {
		int i;
		struct be_rx_page_info *rx_page_info;
		for (i = 0; i < pnob->rx_q_len; i++) {
			rx_page_info = &(pnob->rx_page_info[i]);
			if (!pnob->rx_pg_shared || rx_page_info->page_offset) {
				pci_unmap_page(pnob->adapter->pdev,
				       pci_unmap_addr(rx_page_info, bus),
					       pnob->rx_buf_size,
					       PCI_DMA_FROMDEVICE);
			}
			if (rx_page_info->page)
				put_page(rx_page_info->page);
			memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
		}
		pnob->rx_pg_info_hd = 0;
	}
}
Exemple #12
0
/*
 * Allocate and register buffer for WQEs.  qp->rq.max, sq.max,
 * rq.max_gs and sq.max_gs must all be assigned.
 * mthca_alloc_wqe_buf will calculate rq.wqe_shift and
 * sq.wqe_shift (as well as send_wqe_offset, is_direct, and
 * queue)
 */
static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
			       struct mthca_pd *pd,
			       struct mthca_qp *qp)
{
	int size;
	int i;
	int npages, shift;
	dma_addr_t t;
	u64 *dma_list = NULL;
	int err = -ENOMEM;

	size = sizeof (struct mthca_next_seg) +
		qp->rq.max_gs * sizeof (struct mthca_data_seg);

	for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size;
	     qp->rq.wqe_shift++)
		; /* nothing */

	size = sizeof (struct mthca_next_seg) +
		qp->sq.max_gs * sizeof (struct mthca_data_seg);
	if (qp->transport == MLX)
		size += 2 * sizeof (struct mthca_data_seg);
	else if (qp->transport == UD)
		size += sizeof (struct mthca_ud_seg);
	else /* bind seg is as big as atomic + raddr segs */
		size += sizeof (struct mthca_bind_seg);

	for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size;
	     qp->sq.wqe_shift++)
		; /* nothing */

	qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift,
				    1 << qp->sq.wqe_shift);
	size = PAGE_ALIGN(qp->send_wqe_offset +
			  (qp->sq.max << qp->sq.wqe_shift));

	qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64),
			   GFP_KERNEL);
	if (!qp->wrid)
		goto err_out;

	if (size <= MTHCA_MAX_DIRECT_QP_SIZE) {
		qp->is_direct = 1;
		npages = 1;
		shift = get_order(size) + PAGE_SHIFT;

		if (0)
			mthca_dbg(dev, "Creating direct QP of size %d (shift %d)\n",
				  size, shift);

		qp->queue.direct.buf = pci_alloc_consistent(dev->pdev, size, &t);
		if (!qp->queue.direct.buf)
			goto err_out;

		pci_unmap_addr_set(&qp->queue.direct, mapping, t);

		memset(qp->queue.direct.buf, 0, size);

		while (t & ((1 << shift) - 1)) {
			--shift;
			npages *= 2;
		}

		dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
		if (!dma_list)
			goto err_out_free;

		for (i = 0; i < npages; ++i)
			dma_list[i] = t + i * (1 << shift);
	} else {
		qp->is_direct = 0;
		npages = size / PAGE_SIZE;
		shift = PAGE_SHIFT;

		if (0)
			mthca_dbg(dev, "Creating indirect QP with %d pages\n", npages);

		dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
		if (!dma_list)
			goto err_out;

		qp->queue.page_list = kmalloc(npages *
					      sizeof *qp->queue.page_list,
					      GFP_KERNEL);
		if (!qp->queue.page_list)
			goto err_out;

		for (i = 0; i < npages; ++i) {
			qp->queue.page_list[i].buf =
				pci_alloc_consistent(dev->pdev, PAGE_SIZE, &t);
			if (!qp->queue.page_list[i].buf)
				goto err_out_free;

			memset(qp->queue.page_list[i].buf, 0, PAGE_SIZE);

			pci_unmap_addr_set(&qp->queue.page_list[i], mapping, t);
			dma_list[i] = t;
		}
	}

	err = mthca_mr_alloc_phys(dev, pd->pd_num, dma_list, shift,
				  npages, 0, size,
				  MTHCA_MPT_FLAG_LOCAL_WRITE |
				  MTHCA_MPT_FLAG_LOCAL_READ,
				  &qp->mr);
	if (err)
		goto err_out_free;

	kfree(dma_list);
	return 0;

 err_out_free:
	if (qp->is_direct) {
		pci_free_consistent(dev->pdev, size,
				    qp->queue.direct.buf,
				    pci_unmap_addr(&qp->queue.direct, mapping));
	} else
		for (i = 0; i < npages; ++i) {
			if (qp->queue.page_list[i].buf)
				pci_free_consistent(dev->pdev, PAGE_SIZE,
						    qp->queue.page_list[i].buf,
						    pci_unmap_addr(&qp->queue.page_list[i],
								   mapping));

		}

 err_out:
	kfree(qp->wrid);
	kfree(dma_list);
	return err;
}
Exemple #13
0
static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
{
	dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue,
			  pci_unmap_addr(sq, mapping));
}
Exemple #14
0
static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
		     struct t4_cq *rcq, struct t4_cq *scq,
		     struct c4iw_dev_ucontext *uctx)
{
	int user = (uctx != &rdev->uctx);
	struct fw_ri_res_wr *res_wr;
	struct fw_ri_res *res;
	int wr_len;
	struct c4iw_wr_wait wr_wait;
	struct sk_buff *skb;
	int ret;
	int eqsize;

	wq->sq.qid = c4iw_get_qpid(rdev, uctx);
	if (!wq->sq.qid)
		return -ENOMEM;

	wq->rq.qid = c4iw_get_qpid(rdev, uctx);
	if (!wq->rq.qid)
		goto err1;

	if (!user) {
		wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
				 GFP_KERNEL);
		if (!wq->sq.sw_sq)
			goto err2;

		wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
				 GFP_KERNEL);
		if (!wq->rq.sw_rq)
			goto err3;
	}

	/*
	 * RQT must be a power of 2.
	 */
	wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
	wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
	if (!wq->rq.rqt_hwaddr)
		goto err4;

	if (user) {
		if (alloc_oc_sq(rdev, &wq->sq) && alloc_host_sq(rdev, &wq->sq))
			goto err5;
	} else
		if (alloc_host_sq(rdev, &wq->sq))
			goto err5;
	memset(wq->sq.queue, 0, wq->sq.memsize);
	pci_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);

	wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
					  wq->rq.memsize, &(wq->rq.dma_addr),
					  GFP_KERNEL);
	if (!wq->rq.queue)
		goto err6;
	PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
		__func__, wq->sq.queue,
		(unsigned long long)virt_to_phys(wq->sq.queue),
		wq->rq.queue,
		(unsigned long long)virt_to_phys(wq->rq.queue));
	memset(wq->rq.queue, 0, wq->rq.memsize);
	pci_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);

	wq->db = rdev->lldi.db_reg;
	wq->gts = rdev->lldi.gts_reg;
	if (user) {
		wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
					(wq->sq.qid << rdev->qpshift);
		wq->sq.udb &= PAGE_MASK;
		wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
					(wq->rq.qid << rdev->qpshift);
		wq->rq.udb &= PAGE_MASK;
	}
	wq->rdev = rdev;
	wq->rq.msn = 1;

	/* build fw_ri_res_wr */
	wr_len = sizeof *res_wr + 2 * sizeof *res;

	skb = alloc_skb(wr_len, GFP_KERNEL);
	if (!skb) {
		ret = -ENOMEM;
		goto err7;
	}
	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);

	res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
	memset(res_wr, 0, wr_len);
	res_wr->op_nres = cpu_to_be32(
			FW_WR_OP(FW_RI_RES_WR) |
			V_FW_RI_RES_WR_NRES(2) |
			FW_WR_COMPL(1));
	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
	res_wr->cookie = (unsigned long) &wr_wait;
	res = res_wr->res;
	res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
	res->u.sqrq.op = FW_RI_RES_OP_WRITE;

	/*
	 * eqsize is the number of 64B entries plus the status page size.
	 */
	eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;

	res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
		V_FW_RI_RES_WR_HOSTFCMODE(0) |	/* no host cidx updates */
		V_FW_RI_RES_WR_CPRIO(0) |	/* don't keep in chip cache */
		V_FW_RI_RES_WR_PCIECHN(0) |	/* set by uP at ri_init time */
		(t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) |
		V_FW_RI_RES_WR_IQID(scq->cqid));
	res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
		V_FW_RI_RES_WR_DCAEN(0) |
		V_FW_RI_RES_WR_DCACPU(0) |
		V_FW_RI_RES_WR_FBMIN(2) |
		V_FW_RI_RES_WR_FBMAX(2) |
		V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
		V_FW_RI_RES_WR_CIDXFTHRESH(0) |
		V_FW_RI_RES_WR_EQSIZE(eqsize));
	res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
	res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
	res++;
	res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
	res->u.sqrq.op = FW_RI_RES_OP_WRITE;

	/*
	 * eqsize is the number of 64B entries plus the status page size.
	 */
	eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
	res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
		V_FW_RI_RES_WR_HOSTFCMODE(0) |	/* no host cidx updates */
		V_FW_RI_RES_WR_CPRIO(0) |	/* don't keep in chip cache */
		V_FW_RI_RES_WR_PCIECHN(0) |	/* set by uP at ri_init time */
		V_FW_RI_RES_WR_IQID(rcq->cqid));
	res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
		V_FW_RI_RES_WR_DCAEN(0) |
		V_FW_RI_RES_WR_DCACPU(0) |
		V_FW_RI_RES_WR_FBMIN(2) |
		V_FW_RI_RES_WR_FBMAX(2) |
		V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
		V_FW_RI_RES_WR_CIDXFTHRESH(0) |
		V_FW_RI_RES_WR_EQSIZE(eqsize));
	res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
	res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);

	c4iw_init_wr_wait(&wr_wait);

	ret = c4iw_ofld_send(rdev, skb);
	if (ret)
		goto err7;
	ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
	if (ret)
		goto err7;

	PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n",
	     __func__, wq->sq.qid, wq->rq.qid, wq->db,
	     (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);

	return 0;
err7:
	dma_free_coherent(&(rdev->lldi.pdev->dev),
			  wq->rq.memsize, wq->rq.queue,
			  pci_unmap_addr(&wq->rq, mapping));
err6:
	dealloc_sq(rdev, &wq->sq);
err5:
	c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
err4:
	kfree(wq->rq.sw_rq);
err3:
	kfree(wq->sq.sw_sq);
err2:
	c4iw_put_qpid(rdev, wq->rq.qid, uctx);
err1:
	c4iw_put_qpid(rdev, wq->sq.qid, uctx);
	return -ENOMEM;
}
/*
 * Called by c2_probe to initialize the RNIC. This principally
 * involves initalizing the various limits and resouce pools that
 * comprise the RNIC instance.
 */
int __devinit c2_rnic_init(struct c2_dev *c2dev)
{
	int err;
	u32 qsize, msgsize;
	void *q1_pages;
	void *q2_pages;
	void __iomem *mmio_regs;

	/* Device capabilities */
	c2dev->device_cap_flags =
	    (IB_DEVICE_RESIZE_MAX_WR |
	     IB_DEVICE_CURR_QP_STATE_MOD |
	     IB_DEVICE_SYS_IMAGE_GUID |
	     IB_DEVICE_ZERO_STAG |
	     IB_DEVICE_MEM_WINDOW);

	/* Allocate the qptr_array */
	c2dev->qptr_array = vmalloc(C2_MAX_CQS * sizeof(void *));
	if (!c2dev->qptr_array) {
		return -ENOMEM;
	}

	/* Inialize the qptr_array */
	memset(c2dev->qptr_array, 0, C2_MAX_CQS * sizeof(void *));
	c2dev->qptr_array[0] = (void *) &c2dev->req_vq;
	c2dev->qptr_array[1] = (void *) &c2dev->rep_vq;
	c2dev->qptr_array[2] = (void *) &c2dev->aeq;

	/* Initialize data structures */
	init_waitqueue_head(&c2dev->req_vq_wo);
	spin_lock_init(&c2dev->vqlock);
	spin_lock_init(&c2dev->lock);

	/* Allocate MQ shared pointer pool for kernel clients. User
	 * mode client pools are hung off the user context
	 */
	err = c2_init_mqsp_pool(c2dev, GFP_KERNEL, &c2dev->kern_mqsp_pool);
	if (err) {
		goto bail0;
	}

	/* Allocate shared pointers for Q0, Q1, and Q2 from
	 * the shared pointer pool.
	 */

	c2dev->hint_count = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
					     &c2dev->hint_count_dma,
					     GFP_KERNEL);
	c2dev->req_vq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
					     &c2dev->req_vq.shared_dma,
					     GFP_KERNEL);
	c2dev->rep_vq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
					     &c2dev->rep_vq.shared_dma,
					     GFP_KERNEL);
	c2dev->aeq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
					  &c2dev->aeq.shared_dma, GFP_KERNEL);
	if (!c2dev->hint_count || !c2dev->req_vq.shared ||
	    !c2dev->rep_vq.shared || !c2dev->aeq.shared) {
		err = -ENOMEM;
		goto bail1;
	}

	mmio_regs = c2dev->kva;
	/* Initialize the Verbs Request Queue */
	c2_mq_req_init(&c2dev->req_vq, 0,
		       be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_QSIZE)),
		       be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_MSGSIZE)),
		       mmio_regs +
		       be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_POOLSTART)),
		       mmio_regs +
		       be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_SHARED)),
		       C2_MQ_ADAPTER_TARGET);

	/* Initialize the Verbs Reply Queue */
	qsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_QSIZE));
	msgsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_MSGSIZE));
	q1_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
				      &c2dev->rep_vq.host_dma, GFP_KERNEL);
	if (!q1_pages) {
		err = -ENOMEM;
		goto bail1;
	}
	pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma);
	pr_debug("%s rep_vq va %p dma %llx\n", __func__, q1_pages,
		 (unsigned long long) c2dev->rep_vq.host_dma);
	c2_mq_rep_init(&c2dev->rep_vq,
		   1,
		   qsize,
		   msgsize,
		   q1_pages,
		   mmio_regs +
		   be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_SHARED)),
		   C2_MQ_HOST_TARGET);

	/* Initialize the Asynchronus Event Queue */
	qsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_QSIZE));
	msgsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_MSGSIZE));
	q2_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
				      &c2dev->aeq.host_dma, GFP_KERNEL);
	if (!q2_pages) {
		err = -ENOMEM;
		goto bail2;
	}
	pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma);
	pr_debug("%s aeq va %p dma %llx\n", __func__, q2_pages,
		 (unsigned long long) c2dev->aeq.host_dma);
	c2_mq_rep_init(&c2dev->aeq,
		       2,
		       qsize,
		       msgsize,
		       q2_pages,
		       mmio_regs +
		       be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_SHARED)),
		       C2_MQ_HOST_TARGET);

	/* Initialize the verbs request allocator */
	err = vq_init(c2dev);
	if (err)
		goto bail3;

	/* Enable interrupts on the adapter */
	writel(0, c2dev->regs + C2_IDIS);

	/* create the WR init message */
	err = c2_adapter_init(c2dev);
	if (err)
		goto bail4;
	c2dev->init++;

	/* open an adapter instance */
	err = c2_rnic_open(c2dev);
	if (err)
		goto bail4;

	/* Initialize cached the adapter limits */
	if (c2_rnic_query(c2dev, &c2dev->props))
		goto bail5;

	/* Initialize the PD pool */
	err = c2_init_pd_table(c2dev);
	if (err)
		goto bail5;

	/* Initialize the QP pool */
	c2_init_qp_table(c2dev);
	return 0;

      bail5:
	c2_rnic_close(c2dev);
      bail4:
	vq_term(c2dev);
      bail3:
	dma_free_coherent(&c2dev->pcidev->dev,
			  c2dev->aeq.q_size * c2dev->aeq.msg_size,
			  q2_pages, pci_unmap_addr(&c2dev->aeq, mapping));
      bail2:
	dma_free_coherent(&c2dev->pcidev->dev,
			  c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
			  q1_pages, pci_unmap_addr(&c2dev->rep_vq, mapping));
      bail1:
	c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
      bail0:
	vfree(c2dev->qptr_array);

	return err;
}
Exemple #16
0
static int b44_rx(struct b44 *bp, int budget)
{
	int received;
	u32 cons, prod;

	received = 0;
	prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
	prod /= sizeof(struct dma_desc);
	cons = bp->rx_cons;

	while (cons != prod && budget > 0) {
		struct ring_info *rp = &bp->rx_buffers[cons];
		struct sk_buff *skb = rp->skb;
		dma_addr_t map = pci_unmap_addr(rp, mapping);
		struct rx_header *rh;
		u16 len;

		pci_dma_sync_single_for_cpu(bp->pdev, map,
					    RX_PKT_BUF_SZ,
					    PCI_DMA_FROMDEVICE);
		rh = (struct rx_header *) skb->data;
		len = cpu_to_le16(rh->len);
		if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
		    (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
		drop_it:
			b44_recycle_rx(bp, cons, bp->rx_prod);
		drop_it_no_recycle:
			bp->stats.rx_dropped++;
			goto next_pkt;
		}

		if (len == 0) {
			int i = 0;

			do {
				udelay(2);
				barrier();
				len = cpu_to_le16(rh->len);
			} while (len == 0 && i++ < 5);
			if (len == 0)
				goto drop_it;
		}

		/* Omit CRC. */
		len -= 4;

		if (len > RX_COPY_THRESHOLD) {
			int skb_size;
			skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
			if (skb_size < 0)
				goto drop_it;
			pci_unmap_single(bp->pdev, map,
					 skb_size, PCI_DMA_FROMDEVICE);
			/* Leave out rx_header */
                	skb_put(skb, len+bp->rx_offset);
            	        skb_pull(skb,bp->rx_offset);
		} else {
			struct sk_buff *copy_skb;

			b44_recycle_rx(bp, cons, bp->rx_prod);
			copy_skb = dev_alloc_skb(len + 2);
			if (copy_skb == NULL)
				goto drop_it_no_recycle;

			copy_skb->dev = bp->dev;
			skb_reserve(copy_skb, 2);
			skb_put(copy_skb, len);
			/* DMA sync done above, copy just the actual packet */
			memcpy(copy_skb->data, skb->data+bp->rx_offset, len);

			skb = copy_skb;
		}
		skb->ip_summed = CHECKSUM_NONE;
		skb->protocol = eth_type_trans(skb, bp->dev);
		netif_receive_skb(skb);
		bp->dev->last_rx = jiffies;
		received++;
		budget--;
	next_pkt:
		bp->rx_prod = (bp->rx_prod + 1) &
			(B44_RX_RING_SIZE - 1);
		cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
	}

	bp->rx_cons = cons;
	bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));

	return received;
}
Exemple #17
0
static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
		     struct c4iw_dev_ucontext *uctx)
{
	struct fw_ri_res_wr *res_wr;
	struct fw_ri_res *res;
	int wr_len;
	int user = (uctx != &rdev->uctx);
	struct c4iw_wr_wait wr_wait;
	int ret;
	struct sk_buff *skb;

	cq->cqid = c4iw_get_cqid(rdev, uctx);
	if (!cq->cqid) {
		ret = -ENOMEM;
		goto err1;
	}

	if (!user) {
		cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
		if (!cq->sw_queue) {
			ret = -ENOMEM;
			goto err2;
		}
	}
	cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
				       &cq->dma_addr, GFP_KERNEL);
	if (!cq->queue) {
		ret = -ENOMEM;
		goto err3;
	}
	pci_unmap_addr_set(cq, mapping, cq->dma_addr);
	memset(cq->queue, 0, cq->memsize);

	/* build fw_ri_res_wr */
	wr_len = sizeof *res_wr + sizeof *res;

	skb = alloc_skb(wr_len, GFP_KERNEL);
	if (!skb) {
		ret = -ENOMEM;
		goto err4;
	}
	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);

	res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
	memset(res_wr, 0, wr_len);
	res_wr->op_nres = cpu_to_be32(
			FW_WR_OP(FW_RI_RES_WR) |
			V_FW_RI_RES_WR_NRES(1) |
			FW_WR_COMPL(1));
	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
	res_wr->cookie = (unsigned long) &wr_wait;
	res = res_wr->res;
	res->u.cq.restype = FW_RI_RES_TYPE_CQ;
	res->u.cq.op = FW_RI_RES_OP_WRITE;
	res->u.cq.iqid = cpu_to_be32(cq->cqid);
	res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
			V_FW_RI_RES_WR_IQANUS(0) |
			V_FW_RI_RES_WR_IQANUD(1) |
			F_FW_RI_RES_WR_IQANDST |
			V_FW_RI_RES_WR_IQANDSTINDEX(*rdev->lldi.rxq_ids));
	res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
			F_FW_RI_RES_WR_IQDROPRSS |
			V_FW_RI_RES_WR_IQPCIECH(2) |
			V_FW_RI_RES_WR_IQINTCNTTHRESH(0) |
			F_FW_RI_RES_WR_IQO |
			V_FW_RI_RES_WR_IQESIZE(1));
	res->u.cq.iqsize = cpu_to_be16(cq->size);
	res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);

	c4iw_init_wr_wait(&wr_wait);

	ret = c4iw_ofld_send(rdev, skb);
	if (ret)
		goto err4;
	PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait);
	ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
	if (ret)
		goto err4;

	cq->gen = 1;
	cq->gts = rdev->lldi.gts_reg;
	cq->rdev = rdev;
	if (user) {
		cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
					(cq->cqid << rdev->cqshift);
		cq->ugts &= PAGE_MASK;
	}
	return 0;
err4:
	dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
			  pci_unmap_addr(cq, mapping));
err3:
	kfree(cq->sw_queue);
err2:
	c4iw_put_cqid(rdev, cq->cqid, uctx);
err1:
	return ret;
}
Exemple #18
0
static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
{
	dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size,
			  mq->msg_pool.host, pci_unmap_addr(mq, mapping));
}