Ejemplo n.º 1
0
static int create_cq_ring(PCIDevice *pci_dev , PvrdmaRing **ring,
                          uint64_t pdir_dma, uint32_t nchunks, uint32_t cqe)
{
    uint64_t *dir = NULL, *tbl = NULL;
    PvrdmaRing *r;
    int rc = -EINVAL;
    char ring_name[MAX_RING_NAME_SZ];

    pr_dbg("pdir_dma=0x%llx\n", (long long unsigned int)pdir_dma);
    dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE);
    if (!dir) {
        pr_dbg("Failed to map to CQ page directory\n");
        goto out;
    }

    tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE);
    if (!tbl) {
        pr_dbg("Failed to map to CQ page table\n");
        goto out;
    }

    r = g_malloc(sizeof(*r));
    *ring = r;

    r->ring_state = (struct pvrdma_ring *)
        rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);

    if (!r->ring_state) {
        pr_dbg("Failed to map to CQ ring state\n");
        goto out_free_ring;
    }

    sprintf(ring_name, "cq_ring_%" PRIx64, pdir_dma);
    rc = pvrdma_ring_init(r, ring_name, pci_dev, &r->ring_state[1],
                          cqe, sizeof(struct pvrdma_cqe),
                          /* first page is ring state */
                          (dma_addr_t *)&tbl[1], nchunks - 1);
    if (rc) {
        goto out_unmap_ring_state;
    }

    goto out;

out_unmap_ring_state:
    /* ring_state was in slot 1, not 0 so need to jump back */
    rdma_pci_dma_unmap(pci_dev, --r->ring_state, TARGET_PAGE_SIZE);

out_free_ring:
    g_free(r);

out:
    rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE);
    rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE);

    return rc;
}
Ejemplo n.º 2
0
static int mad_send(RdmaBackendDev *backend_dev, uint8_t sgid_idx,
                    union ibv_gid *sgid, struct ibv_sge *sge, uint32_t num_sge)
{
    RdmaCmMuxMsg msg = {};
    char *hdr, *data;
    int ret;

    if (num_sge != 2) {
        return -EINVAL;
    }

    msg.hdr.op_code = RDMACM_MUX_OP_CODE_MAD;
    memcpy(msg.hdr.sgid.raw, sgid->raw, sizeof(msg.hdr.sgid));

    msg.umad_len = sge[0].length + sge[1].length;

    if (msg.umad_len > sizeof(msg.umad.mad)) {
        return -ENOMEM;
    }

    msg.umad.hdr.addr.qpn = htobe32(1);
    msg.umad.hdr.addr.grh_present = 1;
    msg.umad.hdr.addr.gid_index = sgid_idx;
    memcpy(msg.umad.hdr.addr.gid, sgid->raw, sizeof(msg.umad.hdr.addr.gid));
    msg.umad.hdr.addr.hop_limit = 0xFF;

    hdr = rdma_pci_dma_map(backend_dev->dev, sge[0].addr, sge[0].length);
    if (!hdr) {
        return -ENOMEM;
    }
    data = rdma_pci_dma_map(backend_dev->dev, sge[1].addr, sge[1].length);
    if (!data) {
        rdma_pci_dma_unmap(backend_dev->dev, hdr, sge[0].length);
        return -ENOMEM;
    }

    memcpy(&msg.umad.mad[0], hdr, sge[0].length);
    memcpy(&msg.umad.mad[sge[0].length], data, sge[1].length);

    rdma_pci_dma_unmap(backend_dev->dev, data, sge[1].length);
    rdma_pci_dma_unmap(backend_dev->dev, hdr, sge[0].length);

    trace_mad_message("send", msg.umad.mad, msg.umad_len);

    ret = rdmacm_mux_send(backend_dev, &msg);
    if (ret) {
        rdma_error_report("Failed to send MAD to rdma_umadmux (%d)", ret);
        return -EIO;
    }

    return 0;
}
Ejemplo n.º 3
0
static int destroy_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
                      union pvrdma_cmd_resp *rsp)
{
    struct pvrdma_cmd_destroy_qp *cmd = &req->destroy_qp;
    RdmaRmQP *qp;
    PvrdmaRing *ring;

    qp = rdma_rm_get_qp(&dev->rdma_dev_res, cmd->qp_handle);
    if (!qp) {
        pr_dbg("Invalid QP handle\n");
        return -EINVAL;
    }

    rdma_rm_dealloc_qp(&dev->rdma_dev_res, cmd->qp_handle);

    ring = (PvrdmaRing *)qp->opaque;
    pr_dbg("sring=%p\n", &ring[0]);
    pvrdma_ring_free(&ring[0]);
    pr_dbg("rring=%p\n", &ring[1]);
    pvrdma_ring_free(&ring[1]);

    rdma_pci_dma_unmap(PCI_DEVICE(dev), ring->ring_state, TARGET_PAGE_SIZE);
    g_free(ring);

    return 0;
}
Ejemplo n.º 4
0
static int destroy_cq(PVRDMADev *dev, union pvrdma_cmd_req *req,
                      union pvrdma_cmd_resp *rsp)
{
    struct pvrdma_cmd_destroy_cq *cmd = &req->destroy_cq;
    RdmaRmCQ *cq;
    PvrdmaRing *ring;

    pr_dbg("cq_handle=%d\n", cmd->cq_handle);

    cq = rdma_rm_get_cq(&dev->rdma_dev_res, cmd->cq_handle);
    if (!cq) {
        pr_dbg("Invalid CQ handle\n");
        return -EINVAL;
    }

    ring = (PvrdmaRing *)cq->opaque;
    pvrdma_ring_free(ring);
    /* ring_state was in slot 1, not 0 so need to jump back */
    rdma_pci_dma_unmap(PCI_DEVICE(dev), --ring->ring_state, TARGET_PAGE_SIZE);
    g_free(ring);

    rdma_rm_dealloc_cq(&dev->rdma_dev_res, cmd->cq_handle);

    return 0;
}
Ejemplo n.º 5
0
int pvrdma_ring_init(PvrdmaRing *ring, const char *name, PCIDevice *dev,
                     struct pvrdma_ring *ring_state, uint32_t max_elems,
                     size_t elem_sz, dma_addr_t *tbl, uint32_t npages)
{
    int i;
    int rc = 0;

    strncpy(ring->name, name, MAX_RING_NAME_SZ);
    ring->name[MAX_RING_NAME_SZ - 1] = 0;
    pr_dbg("Initializing %s ring\n", ring->name);
    ring->dev = dev;
    ring->ring_state = ring_state;
    ring->max_elems = max_elems;
    ring->elem_sz = elem_sz;
    pr_dbg("ring->elem_sz=%zu\n", ring->elem_sz);
    pr_dbg("npages=%d\n", npages);
    /* TODO: Give a moment to think if we want to redo driver settings
    atomic_set(&ring->ring_state->prod_tail, 0);
    atomic_set(&ring->ring_state->cons_head, 0);
    */
    ring->npages = npages;
    ring->pages = g_malloc(npages * sizeof(void *));

    for (i = 0; i < npages; i++) {
        if (!tbl[i]) {
            pr_err("npages=%ld but tbl[%d] is NULL\n", (long)npages, i);
            continue;
        }

        ring->pages[i] = rdma_pci_dma_map(dev, tbl[i], TARGET_PAGE_SIZE);
        if (!ring->pages[i]) {
            rc = -ENOMEM;
            pr_dbg("Failed to map to page %d\n", i);
            goto out_free;
        }
        memset(ring->pages[i], 0, TARGET_PAGE_SIZE);
    }

    goto out;

out_free:
    while (i--) {
        rdma_pci_dma_unmap(dev, ring->pages[i], TARGET_PAGE_SIZE);
    }
    g_free(ring->pages);

out:
    return rc;
}
Ejemplo n.º 6
0
static void process_incoming_mad_req(RdmaBackendDev *backend_dev,
                                     RdmaCmMuxMsg *msg)
{
    unsigned long cqe_ctx_id;
    BackendCtx *bctx;
    char *mad;

    trace_mad_message("recv", msg->umad.mad, msg->umad_len);

    cqe_ctx_id = rdma_protected_qlist_pop_int64(&backend_dev->recv_mads_list);
    if (cqe_ctx_id == -ENOENT) {
        rdma_warn_report("No more free MADs buffers, waiting for a while");
        sleep(THR_POLL_TO);
        return;
    }

    bctx = rdma_rm_get_cqe_ctx(backend_dev->rdma_dev_res, cqe_ctx_id);
    if (unlikely(!bctx)) {
        rdma_error_report("No matching ctx for req %ld", cqe_ctx_id);
        backend_dev->rdma_dev_res->stats.mad_rx_err++;
        return;
    }

    mad = rdma_pci_dma_map(backend_dev->dev, bctx->sge.addr,
                           bctx->sge.length);
    if (!mad || bctx->sge.length < msg->umad_len + MAD_HDR_SIZE) {
        backend_dev->rdma_dev_res->stats.mad_rx_err++;
        complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_INV_MAD_BUFF,
                      bctx->up_ctx);
    } else {
        struct ibv_wc wc = {};
        memset(mad, 0, bctx->sge.length);
        build_mad_hdr((struct ibv_grh *)mad,
                      (union ibv_gid *)&msg->umad.hdr.addr.gid, &msg->hdr.sgid,
                      msg->umad_len);
        memcpy(&mad[MAD_HDR_SIZE], msg->umad.mad, msg->umad_len);
        rdma_pci_dma_unmap(backend_dev->dev, mad, bctx->sge.length);

        wc.byte_len = msg->umad_len;
        wc.status = IBV_WC_SUCCESS;
        wc.wc_flags = IBV_WC_GRH;
        backend_dev->rdma_dev_res->stats.mad_rx++;
        comp_handler(bctx->up_ctx, &wc);
    }

    g_free(bctx);
    rdma_rm_dealloc_cqe_ctx(backend_dev->rdma_dev_res, cqe_ctx_id);
}
Ejemplo n.º 7
0
void pvrdma_ring_free(PvrdmaRing *ring)
{
    if (!ring) {
        return;
    }

    if (!ring->pages) {
        return;
    }

    pr_dbg("ring->npages=%d\n", ring->npages);
    while (ring->npages--) {
        rdma_pci_dma_unmap(ring->dev, ring->pages[ring->npages],
                           TARGET_PAGE_SIZE);
    }

    g_free(ring->pages);
    ring->pages = NULL;
}
Ejemplo n.º 8
0
static int create_qp_rings(PCIDevice *pci_dev, uint64_t pdir_dma,
                           PvrdmaRing **rings, uint32_t scqe, uint32_t smax_sge,
                           uint32_t spages, uint32_t rcqe, uint32_t rmax_sge,
                           uint32_t rpages)
{
    uint64_t *dir = NULL, *tbl = NULL;
    PvrdmaRing *sr, *rr;
    int rc = -EINVAL;
    char ring_name[MAX_RING_NAME_SZ];
    uint32_t wqe_sz;

    pr_dbg("pdir_dma=0x%llx\n", (long long unsigned int)pdir_dma);
    dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE);
    if (!dir) {
        pr_dbg("Failed to map to CQ page directory\n");
        goto out;
    }

    tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE);
    if (!tbl) {
        pr_dbg("Failed to map to CQ page table\n");
        goto out;
    }

    sr = g_malloc(2 * sizeof(*rr));
    rr = &sr[1];
    pr_dbg("sring=%p\n", sr);
    pr_dbg("rring=%p\n", rr);

    *rings = sr;

    pr_dbg("scqe=%d\n", scqe);
    pr_dbg("smax_sge=%d\n", smax_sge);
    pr_dbg("spages=%d\n", spages);
    pr_dbg("rcqe=%d\n", rcqe);
    pr_dbg("rmax_sge=%d\n", rmax_sge);
    pr_dbg("rpages=%d\n", rpages);

    /* Create send ring */
    sr->ring_state = (struct pvrdma_ring *)
        rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
    if (!sr->ring_state) {
        pr_dbg("Failed to map to CQ ring state\n");
        goto out_free_sr_mem;
    }

    wqe_sz = pow2ceil(sizeof(struct pvrdma_sq_wqe_hdr) +
                      sizeof(struct pvrdma_sge) * smax_sge - 1);

    sprintf(ring_name, "qp_sring_%" PRIx64, pdir_dma);
    rc = pvrdma_ring_init(sr, ring_name, pci_dev, sr->ring_state,
                          scqe, wqe_sz, (dma_addr_t *)&tbl[1], spages);
    if (rc) {
        goto out_unmap_ring_state;
    }

    /* Create recv ring */
    rr->ring_state = &sr->ring_state[1];
    wqe_sz = pow2ceil(sizeof(struct pvrdma_rq_wqe_hdr) +
                      sizeof(struct pvrdma_sge) * rmax_sge - 1);
    sprintf(ring_name, "qp_rring_%" PRIx64, pdir_dma);
    rc = pvrdma_ring_init(rr, ring_name, pci_dev, rr->ring_state,
                          rcqe, wqe_sz, (dma_addr_t *)&tbl[1 + spages], rpages);
    if (rc) {
        goto out_free_sr;
    }

    goto out;

out_free_sr:
    pvrdma_ring_free(sr);

out_unmap_ring_state:
    rdma_pci_dma_unmap(pci_dev, sr->ring_state, TARGET_PAGE_SIZE);

out_free_sr_mem:
    g_free(sr);

out:
    rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE);
    rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE);

    return rc;
}
Ejemplo n.º 9
0
static void *pvrdma_map_to_pdir(PCIDevice *pdev, uint64_t pdir_dma,
                                uint32_t nchunks, size_t length)
{
    uint64_t *dir, *tbl;
    int tbl_idx, dir_idx, addr_idx;
    void *host_virt = NULL, *curr_page;

    if (!nchunks) {
        pr_dbg("nchunks=0\n");
        return NULL;
    }

    dir = rdma_pci_dma_map(pdev, pdir_dma, TARGET_PAGE_SIZE);
    if (!dir) {
        error_report("PVRDMA: Failed to map to page directory");
        return NULL;
    }

    tbl = rdma_pci_dma_map(pdev, dir[0], TARGET_PAGE_SIZE);
    if (!tbl) {
        error_report("PVRDMA: Failed to map to page table 0");
        goto out_unmap_dir;
    }

    curr_page = rdma_pci_dma_map(pdev, (dma_addr_t)tbl[0], TARGET_PAGE_SIZE);
    if (!curr_page) {
        error_report("PVRDMA: Failed to map the first page");
        goto out_unmap_tbl;
    }

    host_virt = mremap(curr_page, 0, length, MREMAP_MAYMOVE);
    if (host_virt == MAP_FAILED) {
        host_virt = NULL;
        error_report("PVRDMA: Failed to remap memory for host_virt");
        goto out_unmap_tbl;
    }

    rdma_pci_dma_unmap(pdev, curr_page, TARGET_PAGE_SIZE);

    pr_dbg("host_virt=%p\n", host_virt);

    dir_idx = 0;
    tbl_idx = 1;
    addr_idx = 1;
    while (addr_idx < nchunks) {
        if (tbl_idx == TARGET_PAGE_SIZE / sizeof(uint64_t)) {
            tbl_idx = 0;
            dir_idx++;
            pr_dbg("Mapping to table %d\n", dir_idx);
            rdma_pci_dma_unmap(pdev, tbl, TARGET_PAGE_SIZE);
            tbl = rdma_pci_dma_map(pdev, dir[dir_idx], TARGET_PAGE_SIZE);
            if (!tbl) {
                error_report("PVRDMA: Failed to map to page table %d", dir_idx);
                goto out_unmap_host_virt;
            }
        }

        pr_dbg("guest_dma[%d]=0x%" PRIx64 "\n", addr_idx, tbl[tbl_idx]);

        curr_page = rdma_pci_dma_map(pdev, (dma_addr_t)tbl[tbl_idx],
                                     TARGET_PAGE_SIZE);
        if (!curr_page) {
            error_report("PVRDMA: Failed to map to page %d, dir %d", tbl_idx,
                         dir_idx);
            goto out_unmap_host_virt;
        }

        mremap(curr_page, 0, TARGET_PAGE_SIZE, MREMAP_MAYMOVE | MREMAP_FIXED,
               host_virt + TARGET_PAGE_SIZE * addr_idx);

        rdma_pci_dma_unmap(pdev, curr_page, TARGET_PAGE_SIZE);

        addr_idx++;

        tbl_idx++;
    }

    goto out_unmap_tbl;

out_unmap_host_virt:
    munmap(host_virt, length);
    host_virt = NULL;

out_unmap_tbl:
    rdma_pci_dma_unmap(pdev, tbl, TARGET_PAGE_SIZE);

out_unmap_dir:
    rdma_pci_dma_unmap(pdev, dir, TARGET_PAGE_SIZE);

    return host_virt;
}