void cxgbit_unmap_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd) { struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd); if (ccmd->release) { struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo; if (ttinfo->sgl) { struct cxgbit_sock *csk = conn->context; struct cxgbit_device *cdev = csk->com.cdev; struct cxgbi_ppm *ppm = cdev2ppm(cdev); /* Abort the TCP conn if DDP is not complete to * avoid any possibility of DDP after freeing * the cmd. */ if (unlikely(cmd->write_data_done != cmd->se_cmd.data_length)) cxgbit_abort_conn(csk); cxgbi_ppm_ppod_release(ppm, ttinfo->idx); dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl, ttinfo->nents, DMA_FROM_DEVICE); } else { put_page(sg_page(&ccmd->sg)); } ccmd->release = false; } }
static int cxgbit_ddp_reserve(struct cxgbit_sock *csk, struct cxgbi_task_tag_info *ttinfo, unsigned int xferlen) { struct cxgbit_device *cdev = csk->com.cdev; struct cxgbi_ppm *ppm = cdev2ppm(cdev); struct scatterlist *sgl = ttinfo->sgl; unsigned int sgcnt = ttinfo->nents; unsigned int sg_offset = sgl->offset; int ret; if ((xferlen < DDP_THRESHOLD) || (!sgcnt)) { pr_debug("ppm 0x%p, pgidx %u, xfer %u, sgcnt %u, NO ddp.\n", ppm, ppm->tformat.pgsz_idx_dflt, xferlen, ttinfo->nents); return -EINVAL; } if (cxgbit_ddp_sgl_check(sgl, sgcnt) < 0) return -EINVAL; ttinfo->nr_pages = (xferlen + sgl->offset + (1 << PAGE_SHIFT) - 1) >> PAGE_SHIFT; /* * the ddp tag will be used for the ttt in the outgoing r2t pdu */ ret = cxgbi_ppm_ppods_reserve(ppm, ttinfo->nr_pages, 0, &ttinfo->idx, &ttinfo->tag, 0); if (ret < 0) return ret; ttinfo->npods = ret; sgl->offset = 0; ret = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); sgl->offset = sg_offset; if (!ret) { pr_debug("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n", __func__, 0, xferlen, sgcnt); goto rel_ppods; } cxgbi_ppm_make_ppod_hdr(ppm, ttinfo->tag, csk->tid, sgl->offset, xferlen, &ttinfo->hdr); ret = cxgbit_ddp_set_map(ppm, csk, ttinfo); if (ret < 0) { __skb_queue_purge(&csk->ppodq); dma_unmap_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); goto rel_ppods; } return 0; rel_ppods: cxgbi_ppm_ppod_release(ppm, ttinfo->idx); return -EINVAL; }
void cxgbit_release_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd) { struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd); if (ccmd->release) { struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo; if (ttinfo->sgl) { struct cxgbit_sock *csk = conn->context; struct cxgbit_device *cdev = csk->com.cdev; struct cxgbi_ppm *ppm = cdev2ppm(cdev); cxgbi_ppm_ppod_release(ppm, ttinfo->idx); dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl, ttinfo->nents, DMA_FROM_DEVICE); } else { put_page(sg_page(&ccmd->sg)); } ccmd->release = false; } }