void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt) { struct svcxprt_rdma *xprt = ctxt->xprt; struct ib_device *device = xprt->sc_cm_id->device; u32 lkey = xprt->sc_pd->local_dma_lkey; unsigned int i, count; for (count = 0, i = 0; i < ctxt->mapped_sges; i++) { /* * Unmap the DMA addr in the SGE if the lkey matches * the local_dma_lkey, otherwise, ignore it since it is * an FRMR lkey and will be unmapped later when the * last WR that uses it completes. */ if (ctxt->sge[i].lkey == lkey) { count++; ib_dma_unmap_page(device, ctxt->sge[i].addr, ctxt->sge[i].length, ctxt->direction); } } ctxt->mapped_sges = 0; atomic_sub(count, &xprt->sc_dma_used); }
void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt, u64 bound) { int idx; u64 addr; struct ib_device *dev = umem->context->device; virt = max_t(u64, virt, ib_umem_start(umem)); bound = min_t(u64, bound, ib_umem_end(umem)); /* Note that during the run of this function, the * notifiers_count of the MR is > 0, preventing any racing * faults from completion. We might be racing with other * invalidations, so we must make sure we free each page only * once. */ for (addr = virt; addr < bound; addr += PAGE_SIZE) { idx = (addr - ib_umem_start(umem)) / PAGE_SIZE; mutex_lock(&umem->odp_data->umem_mutex); if (umem->odp_data->page_list[idx]) { struct page *page = umem->odp_data->page_list[idx]; #ifdef CONFIG_COMPAT_USE_COMPOUND_TRANS_HEAD struct page *head_page = compound_trans_head(page); #else struct page *head_page = compound_head(page); #endif dma_addr_t dma_addr = umem->odp_data->dma_list[idx] & ODP_DMA_ADDR_MASK; WARN_ON(!dma_addr); ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL); if (umem->odp_data->dma_list[idx] & ODP_WRITE_ALLOWED_BIT) /* * set_page_dirty prefers being called with * the page lock. However, MMU notifiers are * called sometimes with and sometimes without * the lock. We rely on the umem_mutex instead * to prevent other mmu notifiers from * continuing and allowing the page mapping to * be removed. */ set_page_dirty(head_page); /* on demand pinning support */ if (!umem->context->invalidate_range) put_page(page); umem->odp_data->page_list[idx] = NULL; umem->odp_data->dma_list[idx] = 0; atomic_inc(&dev->odp_statistics.num_invalidation_pages); } mutex_unlock(&umem->odp_data->umem_mutex); } }
void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt) { struct svcxprt_rdma *xprt = ctxt->xprt; struct ib_device *device = xprt->sc_cm_id->device; unsigned int i; for (i = 0; i < ctxt->mapped_sges; i++) ib_dma_unmap_page(device, ctxt->sge[i].addr, ctxt->sge[i].length, ctxt->direction); ctxt->mapped_sges = 0; }
void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt) { struct svcxprt_rdma *xprt = ctxt->xprt; int i; for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) { /* * Unmap the DMA addr in the SGE if the lkey matches * the sc_dma_lkey, otherwise, ignore it since it is * an FRMR lkey and will be unmapped later when the * last WR that uses it completes. */ if (ctxt->sge[i].lkey == xprt->sc_dma_lkey) { atomic_dec(&xprt->sc_dma_used); ib_dma_unmap_page(xprt->sc_cm_id->device, ctxt->sge[i].addr, ctxt->sge[i].length, ctxt->direction); } } }