예제 #1
0
static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
				  u64 mapping[IPOIB_CM_RX_SG])
{
	int i;

	ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);

	for (i = 0; i < frags; ++i)
		ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
}
예제 #2
0
static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
					     struct ipoib_cm_rx_buf *rx_ring,
					     int id, int frags,
					     u64 mapping[IPOIB_CM_RX_SG])
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);
	struct sk_buff *skb;
	int i;

	skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
	if (unlikely(!skb))
		return NULL;

	/*
	 * IPoIB adds a 4 byte header. So we need 12 more bytes to align the
	 * IP header to a multiple of 16.
	 */
	skb_reserve(skb, 12);

	mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
				       DMA_FROM_DEVICE);
	if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
		dev_kfree_skb_any(skb);
		return NULL;
	}

	for (i = 0; i < frags; i++) {
		struct page *page = alloc_page(GFP_ATOMIC);

		if (!page)
			goto partial_error;
		skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);

		mapping[i + 1] = ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[i].page,
						 0, PAGE_SIZE, DMA_FROM_DEVICE);
		if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
			goto partial_error;
	}

	rx_ring[id].skb = skb;
	return skb;

partial_error:

	ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);

	for (; i > 0; --i)
		ib_dma_unmap_single(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE);

	dev_kfree_skb_any(skb);
	return NULL;
}
예제 #3
0
int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
			      struct iscsi_session *session)
{
	int i, j;
	u64 dma_addr;
	struct iser_rx_desc *rx_desc;
	struct ib_sge       *rx_sg;
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
	struct iser_device *device = ib_conn->device;

	iser_conn->qp_max_recv_dtos = session->cmds_max;
	iser_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */
	iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2;

	if (device->iser_alloc_rdma_reg_res(ib_conn, session->scsi_cmds_max))
		goto create_rdma_reg_res_failed;

	if (iser_alloc_login_buf(iser_conn))
		goto alloc_login_buf_fail;

	iser_conn->num_rx_descs = session->cmds_max;
	iser_conn->rx_descs = kmalloc(iser_conn->num_rx_descs *
				sizeof(struct iser_rx_desc), GFP_KERNEL);
	if (!iser_conn->rx_descs)
		goto rx_desc_alloc_fail;

	rx_desc = iser_conn->rx_descs;

	for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++)  {
		dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc,
					ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
		if (ib_dma_mapping_error(device->ib_device, dma_addr))
			goto rx_desc_dma_map_failed;

		rx_desc->dma_addr = dma_addr;

		rx_sg = &rx_desc->rx_sg;
		rx_sg->addr   = rx_desc->dma_addr;
		rx_sg->length = ISER_RX_PAYLOAD_SIZE;
		rx_sg->lkey   = device->mr->lkey;
	}

	iser_conn->rx_desc_head = 0;
	return 0;

rx_desc_dma_map_failed:
	rx_desc = iser_conn->rx_descs;
	for (j = 0; j < i; j++, rx_desc++)
		ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
				    ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
	kfree(iser_conn->rx_descs);
	iser_conn->rx_descs = NULL;
rx_desc_alloc_fail:
	iser_free_login_buf(iser_conn);
alloc_login_buf_fail:
	device->iser_free_rdma_reg_res(ib_conn);
create_rdma_reg_res_failed:
	iser_err("failed allocating rx descriptors / data buffers\n");
	return -ENOMEM;
}
예제 #4
0
/**
 * iscsi_iser_cleanup_task() - cleanup an iscsi-iser task
 * @task: iscsi task
 *
 * Notes: In case the RDMA device is already NULL (might have
 *        been removed in DEVICE_REMOVAL CM event it will bail-out
 *        without doing dma unmapping.
 */
static void iscsi_iser_cleanup_task(struct iscsi_task *task)
{
	struct iscsi_iser_task *iser_task = task->dd_data;
	struct iser_tx_desc *tx_desc = &iser_task->desc;
	struct iser_conn *iser_conn = task->conn->dd_data;
	struct iser_device *device = iser_conn->ib_conn.device;

	/* DEVICE_REMOVAL event might have already released the device */
	if (!device)
		return;

	if (likely(tx_desc->mapped)) {
		ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
				    ISER_HEADERS_LEN, DMA_TO_DEVICE);
		tx_desc->mapped = false;
	}

	/* mgmt tasks do not need special cleanup */
	if (!task->sc)
		return;

	if (iser_task->status == ISER_TASK_STATUS_STARTED) {
		iser_task->status = ISER_TASK_STATUS_COMPLETED;
		iser_task_rdma_finalize(iser_task);
	}
}
예제 #5
0
static void
handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma,
	    struct p9_rdma_context *c, enum ib_wc_status status, u32 byte_len)
{
	struct p9_req_t *req;
	int err = 0;
	int16_t tag;

	req = NULL;
	ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize,
							 DMA_FROM_DEVICE);

	if (status != IB_WC_SUCCESS)
		goto err_out;

	err = p9_parse_header(c->rc, NULL, NULL, &tag, 1);
	if (err)
		goto err_out;

	req = p9_tag_lookup(client, tag);
	if (!req)
		goto err_out;

	req->rc = c->rc;
	p9_client_cb(client, req);

	return;

 err_out:
	P9_DPRINTK(P9_DEBUG_ERROR, "req %p err %d status %d\n",
		   req, err, status);
	rdma->state = P9_RDMA_FLUSHING;
	client->status = Disconnected;
	return;
}
예제 #6
0
/**
 * Decrements the reference count for the
 * registered buffer & releases it
 *
 * returns 0 if released, 1 if deferred
 */
int iser_regd_buff_release(struct iser_regd_buf *regd_buf)
{
	struct ib_device *dev;

	if ((atomic_read(&regd_buf->ref_count) == 0) ||
	    atomic_dec_and_test(&regd_buf->ref_count)) {
		/* if we used the dma mr, unreg is just NOP */
		if (regd_buf->reg.is_fmr)
			iser_unreg_mem(&regd_buf->reg);

		if (regd_buf->dma_addr) {
			dev = regd_buf->device->ib_device;
			ib_dma_unmap_single(dev,
					 regd_buf->dma_addr,
					 regd_buf->data_size,
					 regd_buf->direction);
		}
		/* else this regd buf is associated with task which we */
		/* dma_unmap_single/sg later */
		return 0;
	} else {
		iser_dbg("Release deferred, regd.buff: 0x%p\n", regd_buf);
		return 1;
	}
}
예제 #7
0
static void
handle_send(struct p9_client *client, struct p9_trans_rdma *rdma,
	    struct p9_rdma_context *c, enum ib_wc_status status, u32 byte_len)
{
	ib_dma_unmap_single(rdma->cm_id->device,
			    c->busa, c->req->tc->size,
			    DMA_TO_DEVICE);
}
예제 #8
0
static void svc_rdma_recv_ctxt_destroy(struct svcxprt_rdma *rdma,
				       struct svc_rdma_recv_ctxt *ctxt)
{
	ib_dma_unmap_single(rdma->sc_pd->device, ctxt->rc_recv_sge.addr,
			    ctxt->rc_recv_sge.length, DMA_FROM_DEVICE);
	kfree(ctxt->rc_recv_buf);
	kfree(ctxt);
}
예제 #9
0
static inline void free_single_frag(struct vnic_rx_ring *ring, int e,int i)
{
		ib_dma_unmap_single(ring->port->dev->ca,
			ring->rx_info[e].dma_addr[i],
			ring->frag_info[i].frag_size,
			PCI_DMA_FROMDEVICE);
		ring->rx_info[e].dma_addr[i] = 0;
		put_page(ring->rx_info[e].frags[i].page.p);
}
static void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt)
{
	struct svcxprt_rdma *xprt = ctxt->xprt;
	int i;
	for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) {
		atomic_dec(&xprt->sc_dma_used);
		ib_dma_unmap_single(xprt->sc_cm_id->device,
				    ctxt->sge[i].addr,
				    ctxt->sge[i].length,
				    ctxt->direction);
	}
}
예제 #11
0
void iser_free_rx_descriptors(struct iser_conn *ib_conn)
{
	int i;
	struct iser_rx_desc *rx_desc;
	struct iser_device *device = ib_conn->device;

	if (ib_conn->login_buf) {
		ib_dma_unmap_single(device->ib_device, ib_conn->login_dma,
			ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
		kfree(ib_conn->login_buf);
	}

	if (!ib_conn->rx_descs)
		return;

	rx_desc = ib_conn->rx_descs;
	for (i = 0; i < ISER_QP_MAX_RECV_DTOS; i++, rx_desc++)
		ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
			ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
	kfree(ib_conn->rx_descs);
}
예제 #12
0
static void iser_free_login_buf(struct iser_conn *iser_conn)
{
	struct iser_device *device = iser_conn->ib_conn.device;
	struct iser_login_desc *desc = &iser_conn->login_desc;

	if (!desc->req)
		return;

	ib_dma_unmap_single(device->ib_device, desc->req_dma,
			    ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);

	ib_dma_unmap_single(device->ib_device, desc->rsp_dma,
			    ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);

	kfree(desc->req);
	kfree(desc->rsp);

	/* make sure we never redo any unmapping */
	desc->req = NULL;
	desc->rsp = NULL;
}
예제 #13
0
void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc)
{
	struct iser_tx_desc *desc = iser_tx(wc->wr_cqe);
	struct ib_conn *ib_conn = wc->qp->qp_context;
	struct iser_device *device = ib_conn->device;

	if (unlikely(wc->status != IB_WC_SUCCESS))
		iser_err_comp(wc, "dataout");

	ib_dma_unmap_single(device->ib_device, desc->dma_addr,
			    ISER_HEADERS_LEN, DMA_TO_DEVICE);
	kmem_cache_free(ig.desc_cache, desc);
}
static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
{
	BUG_ON(ib_conn == NULL);

	iser_err("freeing conn %p cma_id %p fmr pool %p qp %p\n",
		 ib_conn, ib_conn->cma_id,
		 ib_conn->fmr_pool, ib_conn->qp);

	
	if (ib_conn->fmr_pool != NULL)
		ib_destroy_fmr_pool(ib_conn->fmr_pool);

	if (ib_conn->qp != NULL)
		rdma_destroy_qp(ib_conn->cma_id);

	
	if (ib_conn->cma_id != NULL && can_destroy_id)
		rdma_destroy_id(ib_conn->cma_id);

	ib_conn->fmr_pool = NULL;
	ib_conn->qp	  = NULL;
	ib_conn->cma_id   = NULL;
	kfree(ib_conn->page_vec);

	if (ib_conn->login_buf) {
		if (ib_conn->login_req_dma)
			ib_dma_unmap_single(ib_conn->device->ib_device,
				ib_conn->login_req_dma,
				ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
		if (ib_conn->login_resp_dma)
			ib_dma_unmap_single(ib_conn->device->ib_device,
				ib_conn->login_resp_dma,
				ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
		kfree(ib_conn->login_buf);
	}

	return 0;
}
예제 #15
0
파일: iser_verbs.c 프로젝트: bond-os/linux
/**
 * releases the FMR pool, QP and CMA ID objects, returns 0 on success,
 * -1 on failure
 */
static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
{
	BUG_ON(ib_conn == NULL);

	iser_err("freeing conn %p cma_id %p fmr pool %p qp %p\n",
		 ib_conn, ib_conn->cma_id,
		 ib_conn->fmr_pool, ib_conn->qp);

	/* qp is created only once both addr & route are resolved */
	if (ib_conn->fmr_pool != NULL)
		ib_destroy_fmr_pool(ib_conn->fmr_pool);

	if (ib_conn->qp != NULL)
		rdma_destroy_qp(ib_conn->cma_id);

	/* if cma handler context, the caller acts s.t the cma destroy the id */
	if (ib_conn->cma_id != NULL && can_destroy_id)
		rdma_destroy_id(ib_conn->cma_id);

	ib_conn->fmr_pool = NULL;
	ib_conn->qp	  = NULL;
	ib_conn->cma_id   = NULL;
	kfree(ib_conn->page_vec);

	if (ib_conn->login_req_dma)
		ib_dma_unmap_single(ib_conn->device->ib_device,
				    ib_conn->login_req_dma,
				    ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
	if (ib_conn->login_resp_dma)
		ib_dma_unmap_single(ib_conn->device->ib_device,
				    ib_conn->login_resp_dma,
				    ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
	kfree(ib_conn->login_buf);
	ib_conn->login_buf = NULL;
	ib_conn->login_req_dma = ib_conn->login_resp_dma = 0;

	return 0;
}
예제 #16
0
static void
recv_done(struct ib_cq *cq, struct ib_wc *wc)
{
	struct p9_client *client = cq->cq_context;
	struct p9_trans_rdma *rdma = client->trans;
	struct p9_rdma_context *c =
		container_of(wc->wr_cqe, struct p9_rdma_context, cqe);
	struct p9_req_t *req;
	int err = 0;
	int16_t tag;

	req = NULL;
	ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize,
							 DMA_FROM_DEVICE);

	if (wc->status != IB_WC_SUCCESS)
		goto err_out;

	c->rc.size = wc->byte_len;
	err = p9_parse_header(&c->rc, NULL, NULL, &tag, 1);
	if (err)
		goto err_out;

	req = p9_tag_lookup(client, tag);
	if (!req)
		goto err_out;

	/* Check that we have not yet received a reply for this request.
	 */
	if (unlikely(req->rc.sdata)) {
		pr_err("Duplicate reply for request %d", tag);
		goto err_out;
	}

	req->rc.size = c->rc.size;
	req->rc.sdata = c->rc.sdata;
	p9_client_cb(client, req, REQ_STATUS_RCVD);

 out:
	up(&rdma->rq_sem);
	kfree(c);
	return;

 err_out:
	p9_debug(P9_DEBUG_ERROR, "req %p err %d status %d\n",
			req, err, wc->status);
	rdma->state = P9_RDMA_FLUSHING;
	client->status = Disconnected;
	goto out;
}
예제 #17
0
파일: trans_rdma.c 프로젝트: 020gzh/linux
static void
send_done(struct ib_cq *cq, struct ib_wc *wc)
{
	struct p9_client *client = cq->cq_context;
	struct p9_trans_rdma *rdma = client->trans;
	struct p9_rdma_context *c =
		container_of(wc->wr_cqe, struct p9_rdma_context, cqe);

	ib_dma_unmap_single(rdma->cm_id->device,
			    c->busa, c->req->tc->size,
			    DMA_TO_DEVICE);
	up(&rdma->sq_sem);
	kfree(c);
}
예제 #18
0
static int vnic_fill_rx_buffer(struct vnic_rx_ring *ring)
{
	struct vnic_frag_data *frags_data = &ring->rx_info[0];
	struct sk_buff *skb;
	struct ib_device *ca = ring->port->dev->ca;
	int buf_ind, frag_num, buf_size = VNIC_BUF_SIZE(ring->port);
	u64 mapping;

	if (vnic_rx_linear) {
		for (buf_ind = 0; buf_ind < ring->size; buf_ind++) {
			skb = vnic_alloc_rx_skb(ring, buf_ind, GFP_KERNEL);
			if (!skb)
				goto err_linear;
		}

		return 0;
	}

	/* non linear buffers */
	for (buf_ind = 0; buf_ind < ring->size; buf_ind++, frags_data++) {
		for (frag_num = 0; frag_num < ring->num_frags; frag_num++) {
			if (vnic_alloc_frag(ring, frags_data, frag_num))
				goto err_frags;
		}
	}

	return 0;

err_linear:
	for (buf_ind = 0; buf_ind < ring->size; buf_ind++) {
		mapping = ring->rx_info[buf_ind].dma_addr[0];
		skb = ring->rx_info[buf_ind].skb;
		if (mapping)
			ib_dma_unmap_single(ca, mapping, buf_size, DMA_FROM_DEVICE);
		if (skb)
			dev_kfree_skb_any(skb);
	}

	return -ENOMEM;

err_frags:
	for (--frag_num; frag_num >= 0; frag_num--)
		free_single_frag(ring, buf_ind, frag_num);

	for (--buf_ind; buf_ind >= 0; buf_ind--)
		vnic_empty_rx_entry(ring, buf_ind);

	return -ENOMEM;
}
static void iser_free_login_buf(struct iser_conn *iser_conn)
{
	struct iser_device *device = iser_conn->ib_conn.device;

	if (!iser_conn->login_buf)
		return;

	if (iser_conn->login_req_dma)
		ib_dma_unmap_single(device->ib_device,
				    iser_conn->login_req_dma,
				    ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);

	if (iser_conn->login_resp_dma)
		ib_dma_unmap_single(device->ib_device,
				    iser_conn->login_resp_dma,
				    ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);

	kfree(iser_conn->login_buf);

	/* make sure we never redo any unmapping */
	iser_conn->login_req_dma = 0;
	iser_conn->login_resp_dma = 0;
	iser_conn->login_buf = NULL;
}
예제 #20
0
int vnic_unmap_and_replace_rx(struct vnic_rx_ring *ring, struct ib_device *dev,
			      struct skb_frag_struct *skb_frags_rx,
			      u64 wr_id, int length)
{
	struct vnic_frag_info *frag_info;
	struct vnic_frag_data *rx_info = &ring->rx_info[wr_id];

	int nr;
	dma_addr_t dma;

	/* Collect used fragments while replacing them in the HW descriptors */
	for (nr = 0; nr < ring->num_frags; nr++) {
		frag_info = &ring->frag_info[nr];
		if (length <= frag_info->frag_prefix_size)
			break;

		/* Save page reference in skb */
		skb_frags_rx[nr].page = rx_info->frags[nr].page;
		skb_frags_rx[nr].size = rx_info->frags[nr].size;
		skb_frags_rx[nr].page_offset = rx_info->frags[nr].page_offset;
		dma = rx_info->dma_addr[nr];

		/* Allocate a replacement page */
		if (vnic_alloc_frag(ring, rx_info, nr))
			goto fail;

		/* Unmap buffer */
		ib_dma_unmap_single(dev, dma, skb_frags_rx[nr].size,
				 PCI_DMA_FROMDEVICE);
	}

	/* Adjust size of last fragment to match actual length */
	if (nr > 0)
		skb_frags_rx[nr - 1].size = length -
			ring->frag_info[nr - 1].frag_prefix_size;
	return nr;

fail:
	/* Drop all accumulated fragments (which have already been replaced in
	 * the descriptor) of this packet; remaining fragments are reused... */
	while (nr > 0) {
		nr--;
		put_page(skb_frags_rx[nr].page.p);
	}

	return 0;
}
예제 #21
0
int iser_alloc_rx_descriptors(struct iser_conn *ib_conn)
{
	int i, j;
	u64 dma_addr;
	struct iser_rx_desc *rx_desc;
	struct ib_sge       *rx_sg;
	struct iser_device  *device = ib_conn->device;

	ib_conn->rx_descs = kmalloc(ISER_QP_MAX_RECV_DTOS *
				sizeof(struct iser_rx_desc), GFP_KERNEL);
	if (!ib_conn->rx_descs)
		goto rx_desc_alloc_fail;

	rx_desc = ib_conn->rx_descs;

	for (i = 0; i < ISER_QP_MAX_RECV_DTOS; i++, rx_desc++)  {
		dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc,
					ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
		if (ib_dma_mapping_error(device->ib_device, dma_addr))
			goto rx_desc_dma_map_failed;

		rx_desc->dma_addr = dma_addr;

		rx_sg = &rx_desc->rx_sg;
		rx_sg->addr   = rx_desc->dma_addr;
		rx_sg->length = ISER_RX_PAYLOAD_SIZE;
		rx_sg->lkey   = device->mr->lkey;
	}

	ib_conn->rx_desc_head = 0;
	return 0;

rx_desc_dma_map_failed:
	rx_desc = ib_conn->rx_descs;
	for (j = 0; j < i; j++, rx_desc++)
		ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
			ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
	kfree(ib_conn->rx_descs);
	ib_conn->rx_descs = NULL;
rx_desc_alloc_fail:
	iser_err("failed allocating rx descriptors / data buffers\n");
	return -ENOMEM;
}
예제 #22
0
파일: iscsi_iser.c 프로젝트: 7799/linux
static void iscsi_iser_cleanup_task(struct iscsi_task *task)
{
	struct iscsi_iser_task *iser_task = task->dd_data;
	struct iser_tx_desc    *tx_desc   = &iser_task->desc;
	struct iser_conn       *ib_conn	  = task->conn->dd_data;
	struct iser_device     *device	  = ib_conn->device;

	ib_dma_unmap_single(device->ib_device,
		tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);

	/* mgmt tasks do not need special cleanup */
	if (!task->sc)
		return;

	if (iser_task->status == ISER_TASK_STATUS_STARTED) {
		iser_task->status = ISER_TASK_STATUS_COMPLETED;
		iser_task_rdma_finalize(iser_task);
	}
}
예제 #23
0
void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt)
{
	struct svcxprt_rdma *xprt = ctxt->xprt;
	int i;
	for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) {
		/*
		 * Unmap the DMA addr in the SGE if the lkey matches
		 * the sc_dma_lkey, otherwise, ignore it since it is
		 * an FRMR lkey and will be unmapped later when the
		 * last WR that uses it completes.
		 */
		if (ctxt->sge[i].lkey == xprt->sc_dma_lkey) {
			atomic_dec(&xprt->sc_dma_used);
			ib_dma_unmap_single(xprt->sc_cm_id->device,
					    ctxt->sge[i].addr,
					    ctxt->sge[i].length,
					    ctxt->direction);
		}
	}
}
void iser_free_rx_descriptors(struct iser_conn *iser_conn)
{
	int i;
	struct iser_rx_desc *rx_desc;
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
	struct iser_device *device = ib_conn->device;

	if (device->reg_ops->free_reg_res)
		device->reg_ops->free_reg_res(ib_conn);

	rx_desc = iser_conn->rx_descs;
	for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++)
		ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
				    ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
	kfree(iser_conn->rx_descs);
	/* make sure we never redo any unmapping */
	iser_conn->rx_descs = NULL;

	iser_free_login_buf(iser_conn);
}
void iser_snd_completion(struct iser_tx_desc *tx_desc,
			struct ib_conn *ib_conn)
{
	struct iscsi_task *task;
	struct iser_device *device = ib_conn->device;

	if (tx_desc->type == ISCSI_TX_DATAOUT) {
		ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
					ISER_HEADERS_LEN, DMA_TO_DEVICE);
		kmem_cache_free(ig.desc_cache, tx_desc);
		tx_desc = NULL;
	}

	if (tx_desc && tx_desc->type == ISCSI_TX_CONTROL) {
		/* this arithmetic is legal by libiscsi dd_data allocation */
		task = (void *) ((long)(void *)tx_desc -
				  sizeof(struct iscsi_task));
		if (task->hdr->itt == RESERVED_ITT)
			iscsi_put_task(task);
	}
}
예제 #26
0
static int iser_alloc_login_buf(struct iser_conn *iser_conn)
{
	struct iser_device *device = iser_conn->ib_conn.device;
	struct iser_login_desc *desc = &iser_conn->login_desc;

	desc->req = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
	if (!desc->req)
		return -ENOMEM;

	desc->req_dma = ib_dma_map_single(device->ib_device, desc->req,
					  ISCSI_DEF_MAX_RECV_SEG_LEN,
					  DMA_TO_DEVICE);
	if (ib_dma_mapping_error(device->ib_device,
				desc->req_dma))
		goto free_req;

	desc->rsp = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL);
	if (!desc->rsp)
		goto unmap_req;

	desc->rsp_dma = ib_dma_map_single(device->ib_device, desc->rsp,
					   ISER_RX_LOGIN_SIZE,
					   DMA_FROM_DEVICE);
	if (ib_dma_mapping_error(device->ib_device,
				desc->rsp_dma))
		goto free_rsp;

	return 0;

free_rsp:
	kfree(desc->rsp);
unmap_req:
	ib_dma_unmap_single(device->ib_device, desc->req_dma,
			    ISCSI_DEF_MAX_RECV_SEG_LEN,
			    DMA_TO_DEVICE);
free_req:
	kfree(desc->req);

	return -ENOMEM;
}
예제 #27
0
static void
handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma,
	    struct p9_rdma_context *c, enum ib_wc_status status, u32 byte_len)
{
	struct p9_req_t *req;
	int err = 0;
	int16_t tag;

	req = NULL;
	ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize,
							 DMA_FROM_DEVICE);

	if (status != IB_WC_SUCCESS)
		goto err_out;

	err = p9_parse_header(c->rc, NULL, NULL, &tag, 1);
	if (err)
		goto err_out;

	req = p9_tag_lookup(client, tag);
	if (!req)
		goto err_out;

	/* Check that we have not yet received a reply for this request.
	 */
	if (unlikely(req->rc)) {
		pr_err("Duplicate reply for request %d", tag);
		goto err_out;
	}

	req->rc = c->rc;
	p9_client_cb(client, req, REQ_STATUS_RCVD);

	return;

 err_out:
	p9_debug(P9_DEBUG_ERROR, "req %p err %d status %d\n", req, err, status);
	rdma->state = P9_RDMA_FLUSHING;
	client->status = Disconnected;
}
예제 #28
0
static void vnic_empty_rx_entry(struct vnic_rx_ring *ring, int i)
{
	int frag_num, buf_size = VNIC_BUF_SIZE(ring->port);
	struct ib_device *ca = ring->port->dev->ca;
	struct sk_buff *skb;
	u64 mapping;

	if (vnic_rx_linear) {
		for (frag_num = 0; frag_num < ring->num_frags; frag_num++) {
			mapping = ring->rx_info[i].dma_addr[0];
			skb = ring->rx_info[i].skb;
			if (mapping)
				ib_dma_unmap_single(ca, mapping, buf_size, DMA_FROM_DEVICE);
			if (skb)
				dev_kfree_skb_any(skb);
		}

		return;
	}

	/* non linear buffers */
	for (frag_num = 0; frag_num < ring->num_frags; frag_num++)
		free_single_frag(ring, i, frag_num);
}