int mthca_uar_alloc(struct mthca_dev *dev, struct mthca_uar *uar) { uar->index = mthca_alloc(&dev->uar_table.alloc); if (uar->index == -1) return -ENOMEM; uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index; return 0; }
int mthca_pd_alloc(struct mthca_dev *dev, int privileged, struct mthca_pd *pd) { int err = 0; pd->privileged = privileged; atomic_set(&pd->sqp_count, 0); pd->pd_num = mthca_alloc(&dev->pd_table.alloc); if (pd->pd_num == -1) return -ENOMEM; if (privileged) { err = mthca_mr_alloc_notrans(dev, pd->pd_num, MTHCA_MPT_FLAG_LOCAL_READ | MTHCA_MPT_FLAG_LOCAL_WRITE, &pd->ntmr); if (err) mthca_free(&dev->pd_table.alloc, pd->pd_num); } return err; }
int mthca_alloc_qp(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, struct mthca_cq *recv_cq, enum ib_qp_type type, enum ib_sig_type send_policy, enum ib_sig_type recv_policy, struct mthca_qp *qp) { int err; switch (type) { case IB_QPT_RC: qp->transport = RC; break; case IB_QPT_UC: qp->transport = UC; break; case IB_QPT_UD: qp->transport = UD; break; default: return -EINVAL; } qp->qpn = mthca_alloc(&dev->qp_table.alloc); if (qp->qpn == -1) return -ENOMEM; err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, send_policy, recv_policy, qp); if (err) { mthca_free(&dev->qp_table.alloc, qp->qpn); return err; } spin_lock_irq(&dev->qp_table.lock); mthca_array_set(&dev->qp_table.qp, qp->qpn & (dev->limits.num_qps - 1), qp); spin_unlock_irq(&dev->qp_table.lock); return 0; }
int mthca_create_ah(struct mthca_dev *dev, struct mthca_pd *pd, struct ib_ah_attr *ah_attr, struct mthca_ah *ah) { u32 index = -1; struct mthca_av *av = NULL; ah->on_hca = 0; if (!atomic_read(&pd->sqp_count) && !(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) { index = mthca_alloc(&dev->av_table.alloc); /* fall back to allocate in host memory */ if (index == -1) goto host_alloc; av = kmalloc(sizeof *av, GFP_KERNEL); if (!av) goto host_alloc; ah->on_hca = 1; ah->avdma = dev->av_table.ddr_av_base + index * MTHCA_AV_SIZE; } host_alloc: if (!ah->on_hca) { ah->av = pci_pool_alloc(dev->av_table.pool, SLAB_KERNEL, &ah->avdma); if (!ah->av) return -ENOMEM; av = ah->av; } ah->key = pd->ntmr.ibmr.lkey; memset(av, 0, MTHCA_AV_SIZE); av->port_pd = cpu_to_be32(pd->pd_num | (ah_attr->port_num << 24)); av->g_slid = ah_attr->src_path_bits; av->dlid = cpu_to_be16(ah_attr->dlid); av->msg_sr = (3 << 4) | /* 2K message */ ah_attr->static_rate; av->sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28); if (ah_attr->ah_flags & IB_AH_GRH) { av->g_slid |= 0x80; av->gid_index = (ah_attr->port_num - 1) * dev->limits.gid_table_len + ah_attr->grh.sgid_index; av->hop_limit = ah_attr->grh.hop_limit; av->sl_tclass_flowlabel |= cpu_to_be32((ah_attr->grh.traffic_class << 20) | ah_attr->grh.flow_label); memcpy(av->dgid, ah_attr->grh.dgid.raw, 16); } else { /* Arbel workaround -- low byte of GID must be 2 */ av->dgid[3] = cpu_to_be32(2); } if (0) { int j; mthca_dbg(dev, "Created UDAV at %p/%08lx:\n", av, (unsigned long) ah->avdma); for (j = 0; j < 8; ++j) printk(KERN_DEBUG " [%2x] %08x\n", j * 4, be32_to_cpu(((u32 *) av)[j])); } if (ah->on_hca) { memcpy_toio(dev->av_table.av_map + index * MTHCA_AV_SIZE, av, MTHCA_AV_SIZE); kfree(av); } return 0; }