static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info) { int status = 0; u8 lstate = 0; struct ocrdma_dev *dev; dev = ib_alloc_device(ocrdma_dev, ibdev); if (!dev) { pr_err("Unable to allocate ib device\n"); return NULL; } dev->mbx_cmd = kzalloc(sizeof(struct ocrdma_mqe_emb_cmd), GFP_KERNEL); if (!dev->mbx_cmd) goto init_err; memcpy(&dev->nic_info, dev_info, sizeof(*dev_info)); dev->id = PCI_FUNC(dev->nic_info.pdev->devfn); status = ocrdma_init_hw(dev); if (status) goto init_err; status = ocrdma_alloc_resources(dev); if (status) goto alloc_err; ocrdma_init_service_level(dev); status = ocrdma_register_device(dev); if (status) goto alloc_err; /* Query Link state and update */ status = ocrdma_mbx_get_link_speed(dev, NULL, &lstate); if (!status) ocrdma_update_link_state(dev, lstate); /* Init stats */ ocrdma_add_port_stats(dev); /* Interrupt Moderation */ INIT_DELAYED_WORK(&dev->eqd_work, ocrdma_eqd_set_task); schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000)); pr_info("%s %s: %s \"%s\" port %d\n", dev_name(&dev->nic_info.pdev->dev), hca_name(dev), port_speed_string(dev), dev->model_number, dev->hba_port_num); pr_info("%s ocrdma%d driver loaded successfully\n", dev_name(&dev->nic_info.pdev->dev), dev->id); return dev; alloc_err: ocrdma_free_resources(dev); ocrdma_cleanup_hw(dev); init_err: kfree(dev->mbx_cmd); ib_dealloc_device(&dev->ibdev); pr_err("%s() leaving. ret=%d\n", __func__, status); return NULL; }
/** * rvt_alloc_device - allocate rdi * @size: how big of a structure to allocate * @nports: number of ports to allocate array slots for * * Use IB core device alloc to allocate space for the rdi which is assumed to be * inside of the ib_device. Any extra space that drivers require should be * included in size. * * We also allocate a port array based on the number of ports. * * Return: pointer to allocated rdi */ struct rvt_dev_info *rvt_alloc_device(size_t size, int nports) { struct rvt_dev_info *rdi; rdi = (struct rvt_dev_info *)ib_alloc_device(size); if (!rdi) return rdi; rdi->ports = kcalloc(nports, sizeof(struct rvt_ibport **), GFP_KERNEL); if (!rdi->ports) ib_dealloc_device(&rdi->ibdev); return rdi; }
static int iwch_activate(struct adapter *sc) { struct iwch_dev *rnicp; int rc; KASSERT(!isset(&sc->offload_map, MAX_NPORTS), ("%s: iWARP already activated on %s", __func__, device_get_nameunit(sc->dev))); rnicp = (struct iwch_dev *)ib_alloc_device(sizeof(*rnicp)); if (rnicp == NULL) return (ENOMEM); sc->iwarp_softc = rnicp; rnicp->rdev.adap = sc; cxio_hal_init(sc); iwch_cm_init_cpl(sc); rc = cxio_rdev_open(&rnicp->rdev); if (rc != 0) { printf("Unable to open CXIO rdev\n"); goto err1; } rnic_init(rnicp); rc = iwch_register_device(rnicp); if (rc != 0) { printf("Unable to register device\n"); goto err2; } return (0); err2: rnic_uninit(rnicp); cxio_rdev_close(&rnicp->rdev); err1: cxio_hal_uninit(sc); iwch_cm_term_cpl(sc); sc->iwarp_softc = NULL; return (rc); }
int rxe_net_add(const char *ibdev_name, struct net_device *ndev) { int err; struct rxe_dev *rxe = NULL; rxe = ib_alloc_device(rxe_dev, ib_dev); if (!rxe) return -ENOMEM; rxe->ndev = ndev; err = rxe_add(rxe, ndev->mtu, ibdev_name); if (err) { ib_dealloc_device(&rxe->ib_dev); return err; } return 0; }
static int __devinit c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) { int ret = 0, i; unsigned long reg0_start, reg0_flags, reg0_len; unsigned long reg2_start, reg2_flags, reg2_len; unsigned long reg4_start, reg4_flags, reg4_len; unsigned kva_map_size; struct net_device *netdev = NULL; struct c2_dev *c2dev = NULL; void __iomem *mmio_regs = NULL; printk(KERN_INFO PFX "AMSO1100 Gigabit Ethernet driver v%s loaded\n", DRV_VERSION); ret = pci_enable_device(pcidev); if (ret) { printk(KERN_ERR PFX "%s: Unable to enable PCI device\n", pci_name(pcidev)); goto bail0; } reg0_start = pci_resource_start(pcidev, BAR_0); reg0_len = pci_resource_len(pcidev, BAR_0); reg0_flags = pci_resource_flags(pcidev, BAR_0); reg2_start = pci_resource_start(pcidev, BAR_2); reg2_len = pci_resource_len(pcidev, BAR_2); reg2_flags = pci_resource_flags(pcidev, BAR_2); reg4_start = pci_resource_start(pcidev, BAR_4); reg4_len = pci_resource_len(pcidev, BAR_4); reg4_flags = pci_resource_flags(pcidev, BAR_4); pr_debug("BAR0 size = 0x%lX bytes\n", reg0_len); pr_debug("BAR2 size = 0x%lX bytes\n", reg2_len); pr_debug("BAR4 size = 0x%lX bytes\n", reg4_len); if (!(reg0_flags & IORESOURCE_MEM) || !(reg2_flags & IORESOURCE_MEM) || !(reg4_flags & IORESOURCE_MEM)) { printk(KERN_ERR PFX "PCI regions not an MMIO resource\n"); ret = -ENODEV; goto bail1; } if ((reg0_len < C2_REG0_SIZE) || (reg2_len < C2_REG2_SIZE) || (reg4_len < C2_REG4_SIZE)) { printk(KERN_ERR PFX "Invalid PCI region sizes\n"); ret = -ENODEV; goto bail1; } ret = pci_request_regions(pcidev, DRV_NAME); if (ret) { printk(KERN_ERR PFX "%s: Unable to request regions\n", pci_name(pcidev)); goto bail1; } if ((sizeof(dma_addr_t) > 4)) { ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)); if (ret < 0) { printk(KERN_ERR PFX "64b DMA configuration failed\n"); goto bail2; } } else { ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)); if (ret < 0) { printk(KERN_ERR PFX "32b DMA configuration failed\n"); goto bail2; } } pci_set_master(pcidev); mmio_regs = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET, sizeof(struct c2_adapter_pci_regs)); if (!mmio_regs) { printk(KERN_ERR PFX "Unable to remap adapter PCI registers in BAR4\n"); ret = -EIO; goto bail2; } for (i = 0; i < sizeof(c2_magic); i++) { if (c2_magic[i] != readb(mmio_regs + C2_REGS_MAGIC + i)) { printk(KERN_ERR PFX "Downlevel Firmware boot loader " "[%d/%Zd: got 0x%x, exp 0x%x]. Use the cc_flash " "utility to update your boot loader\n", i + 1, sizeof(c2_magic), readb(mmio_regs + C2_REGS_MAGIC + i), c2_magic[i]); printk(KERN_ERR PFX "Adapter not claimed\n"); iounmap(mmio_regs); ret = -EIO; goto bail2; } } if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)) != C2_VERSION) { printk(KERN_ERR PFX "Version mismatch " "[fw=%u, c2=%u], Adapter not claimed\n", be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)), C2_VERSION); ret = -EINVAL; iounmap(mmio_regs); goto bail2; } if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)) != C2_IVN) { printk(KERN_ERR PFX "Downlevel FIrmware level. You should be using " "the OpenIB device support kit. " "[fw=0x%x, c2=0x%x], Adapter not claimed\n", be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)), C2_IVN); ret = -EINVAL; iounmap(mmio_regs); goto bail2; } c2dev = (struct c2_dev *) ib_alloc_device(sizeof(*c2dev)); if (!c2dev) { printk(KERN_ERR PFX "%s: Unable to alloc hardware struct\n", pci_name(pcidev)); ret = -ENOMEM; iounmap(mmio_regs); goto bail2; } memset(c2dev, 0, sizeof(*c2dev)); spin_lock_init(&c2dev->lock); c2dev->pcidev = pcidev; c2dev->cur_tx = 0; c2dev->cur_rx = (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_HRX_CUR)) - 0xffffc000) / sizeof(struct c2_rxp_desc); ret = request_irq(pcidev->irq, c2_interrupt, IRQF_SHARED, DRV_NAME, c2dev); if (ret) { printk(KERN_ERR PFX "%s: requested IRQ %u is busy\n", pci_name(pcidev), pcidev->irq); iounmap(mmio_regs); goto bail3; } pci_set_drvdata(pcidev, c2dev); if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) { iounmap(mmio_regs); goto bail4; } kva_map_size = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_PCI_WINSIZE)); iounmap(mmio_regs); ret = register_netdev(netdev); if (ret) { printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n", ret); goto bail5; } netif_stop_queue(netdev); c2dev->mmio_rxp_ring = ioremap_nocache(reg4_start + C2_RXP_HRXDQ_OFFSET, C2_RXP_HRXDQ_SIZE); if (!c2dev->mmio_rxp_ring) { printk(KERN_ERR PFX "Unable to remap MMIO HRXDQ region\n"); ret = -EIO; goto bail6; } c2dev->mmio_txp_ring = ioremap_nocache(reg4_start + C2_TXP_HTXDQ_OFFSET, C2_TXP_HTXDQ_SIZE); if (!c2dev->mmio_txp_ring) { printk(KERN_ERR PFX "Unable to remap MMIO HTXDQ region\n"); ret = -EIO; goto bail7; } C2_SET_CUR_RX(c2dev, c2dev->cur_rx); c2dev->regs = ioremap_nocache(reg0_start, reg0_len); if (!c2dev->regs) { printk(KERN_ERR PFX "Unable to remap BAR0\n"); ret = -EIO; goto bail8; } c2dev->pa = reg4_start + C2_PCI_REGS_OFFSET; c2dev->kva = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET, kva_map_size); if (!c2dev->kva) { printk(KERN_ERR PFX "Unable to remap BAR4\n"); ret = -EIO; goto bail9; } c2_print_macaddr(netdev); ret = c2_rnic_init(c2dev); if (ret) { printk(KERN_ERR PFX "c2_rnic_init failed: %d\n", ret); goto bail10; } if (c2_register_device(c2dev)) goto bail10; return 0; bail10: iounmap(c2dev->kva); bail9: iounmap(c2dev->regs); bail8: iounmap(c2dev->mmio_txp_ring); bail7: iounmap(c2dev->mmio_rxp_ring); bail6: unregister_netdev(netdev); bail5: free_netdev(netdev); bail4: free_irq(pcidev->irq, c2dev); bail3: ib_dealloc_device(&c2dev->ibdev); bail2: pci_release_regions(pcidev); bail1: pci_disable_device(pcidev); bail0: return ret; }
static void *mlx5_ib_add(struct mlx5_core_dev *mdev) { struct mlx5_ib_dev *dev; int err; int i; /* don't create IB instance over Eth ports, no RoCE yet! */ if (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) return NULL; printk_once(KERN_INFO "%s", mlx5_version); dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev)); if (!dev) return NULL; dev->mdev = mdev; err = get_port_caps(dev); if (err) goto err_dealloc; if (mlx5_use_mad_ifc(dev)) get_ext_port_caps(dev); MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock); strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX); dev->ib_dev.owner = THIS_MODULE; dev->ib_dev.node_type = RDMA_NODE_IB_CA; dev->ib_dev.local_dma_lkey = 0 /* not supported for now */; dev->num_ports = MLX5_CAP_GEN(mdev, num_ports); dev->ib_dev.phys_port_cnt = dev->num_ports; dev->ib_dev.num_comp_vectors = dev->mdev->priv.eq_table.num_comp_vectors; dev->ib_dev.dma_device = &mdev->pdev->dev; dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION; dev->ib_dev.uverbs_cmd_mask = (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | (1ull << IB_USER_VERBS_CMD_REG_MR) | (1ull << IB_USER_VERBS_CMD_DEREG_MR) | (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | (1ull << IB_USER_VERBS_CMD_CREATE_QP) | (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | (1ull << IB_USER_VERBS_CMD_QUERY_QP) | (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | (1ull << IB_USER_VERBS_CMD_OPEN_QP); dev->ib_dev.uverbs_ex_cmd_mask = (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE); dev->ib_dev.query_device = mlx5_ib_query_device; dev->ib_dev.query_port = mlx5_ib_query_port; dev->ib_dev.query_gid = mlx5_ib_query_gid; dev->ib_dev.query_pkey = mlx5_ib_query_pkey; dev->ib_dev.modify_device = mlx5_ib_modify_device; dev->ib_dev.modify_port = mlx5_ib_modify_port; dev->ib_dev.alloc_ucontext = mlx5_ib_alloc_ucontext; dev->ib_dev.dealloc_ucontext = mlx5_ib_dealloc_ucontext; dev->ib_dev.mmap = mlx5_ib_mmap; dev->ib_dev.alloc_pd = mlx5_ib_alloc_pd; dev->ib_dev.dealloc_pd = mlx5_ib_dealloc_pd; dev->ib_dev.create_ah = mlx5_ib_create_ah; dev->ib_dev.query_ah = mlx5_ib_query_ah; dev->ib_dev.destroy_ah = mlx5_ib_destroy_ah; dev->ib_dev.create_srq = mlx5_ib_create_srq; dev->ib_dev.modify_srq = mlx5_ib_modify_srq; dev->ib_dev.query_srq = mlx5_ib_query_srq; dev->ib_dev.destroy_srq = mlx5_ib_destroy_srq; dev->ib_dev.post_srq_recv = mlx5_ib_post_srq_recv; dev->ib_dev.create_qp = mlx5_ib_create_qp; dev->ib_dev.modify_qp = mlx5_ib_modify_qp; dev->ib_dev.query_qp = mlx5_ib_query_qp; dev->ib_dev.destroy_qp = mlx5_ib_destroy_qp; dev->ib_dev.post_send = mlx5_ib_post_send; dev->ib_dev.post_recv = mlx5_ib_post_recv; dev->ib_dev.create_cq = mlx5_ib_create_cq; dev->ib_dev.modify_cq = mlx5_ib_modify_cq; dev->ib_dev.resize_cq = mlx5_ib_resize_cq; dev->ib_dev.destroy_cq = mlx5_ib_destroy_cq; dev->ib_dev.poll_cq = mlx5_ib_poll_cq; dev->ib_dev.req_notify_cq = mlx5_ib_arm_cq; dev->ib_dev.get_dma_mr = mlx5_ib_get_dma_mr; dev->ib_dev.reg_user_mr = mlx5_ib_reg_user_mr; dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr; dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach; dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach; dev->ib_dev.process_mad = mlx5_ib_process_mad; dev->ib_dev.alloc_mr = mlx5_ib_alloc_mr; dev->ib_dev.alloc_fast_reg_page_list = mlx5_ib_alloc_fast_reg_page_list; dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list; dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; dev->ib_dev.get_port_immutable = mlx5_port_immutable; mlx5_ib_internal_fill_odp_caps(dev); if (MLX5_CAP_GEN(mdev, xrc)) { dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd; dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd; dev->ib_dev.uverbs_cmd_mask |= (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) | (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD); } err = init_node_data(dev); if (err) goto err_dealloc; mutex_init(&dev->cap_mask_mutex); err = create_dev_resources(&dev->devr); if (err) goto err_dealloc; err = mlx5_ib_odp_init_one(dev); if (err) goto err_rsrc; err = ib_register_device(&dev->ib_dev, NULL); if (err) goto err_odp; err = create_umr_res(dev); if (err) goto err_dev; for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) { err = device_create_file(&dev->ib_dev.dev, mlx5_class_attributes[i]); if (err) goto err_umrc; } dev->ib_active = true; return dev; err_umrc: destroy_umrc_res(dev); err_dev: ib_unregister_device(&dev->ib_dev); err_odp: mlx5_ib_odp_remove_one(dev); err_rsrc: destroy_dev_resources(&dev->devr); err_dealloc: ib_dealloc_device((struct ib_device *)dev); return NULL; }
static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev, struct net_device *ndev) { struct qed_dev_rdma_info dev_info; struct qedr_dev *dev; int rc = 0, i; dev = (struct qedr_dev *)ib_alloc_device(sizeof(*dev)); if (!dev) { pr_err("Unable to allocate ib device\n"); return NULL; } DP_DEBUG(dev, QEDR_MSG_INIT, "qedr add device called\n"); dev->pdev = pdev; dev->ndev = ndev; dev->cdev = cdev; qed_ops = qed_get_rdma_ops(); if (!qed_ops) { DP_ERR(dev, "Failed to get qed roce operations\n"); goto init_err; } dev->ops = qed_ops; rc = qed_ops->fill_dev_info(cdev, &dev_info); if (rc) goto init_err; dev->user_dpm_enabled = dev_info.user_dpm_enabled; dev->rdma_type = dev_info.rdma_type; dev->num_hwfns = dev_info.common.num_hwfns; dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev); dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev); if (!dev->num_cnq) { DP_ERR(dev, "not enough CNQ resources.\n"); goto init_err; } dev->wq_multiplier = QEDR_WQ_MULTIPLIER_DFT; qedr_pci_set_atomic(dev, pdev); rc = qedr_alloc_resources(dev); if (rc) goto init_err; rc = qedr_init_hw(dev); if (rc) goto alloc_err; rc = qedr_setup_irqs(dev); if (rc) goto irq_err; rc = qedr_register_device(dev); if (rc) { DP_ERR(dev, "Unable to allocate register device\n"); goto reg_err; } for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++) if (device_create_file(&dev->ibdev.dev, qedr_attributes[i])) goto sysfs_err; if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state)) qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE); DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n"); return dev; sysfs_err: ib_unregister_device(&dev->ibdev); reg_err: qedr_sync_free_irqs(dev); irq_err: qedr_stop_hw(dev); alloc_err: qedr_free_resources(dev); init_err: ib_dealloc_device(&dev->ibdev); DP_ERR(dev, "qedr driver load failed rc=%d\n", rc); return NULL; }
static void *mlx4_ib_add(struct mlx4_dev *dev) { struct mlx4_ib_dev *ibdev; ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev); if (!ibdev) { dev_err(&dev->pdev->dev, "Device struct alloc failed\n"); return NULL; } if (mlx4_pd_alloc(dev, &ibdev->priv_pdn)) goto err_dealloc; if (mlx4_uar_alloc(dev, &ibdev->priv_uar)) goto err_pd; ibdev->uar_map = ioremap(ibdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE); if (!ibdev->uar_map) goto err_uar; MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); INIT_LIST_HEAD(&ibdev->pgdir_list); mutex_init(&ibdev->pgdir_mutex); ibdev->dev = dev; strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX); ibdev->ib_dev.owner = THIS_MODULE; ibdev->ib_dev.node_type = RDMA_NODE_IB_CA; ibdev->ib_dev.phys_port_cnt = dev->caps.num_ports; ibdev->ib_dev.num_comp_vectors = 1; ibdev->ib_dev.dma_device = &dev->pdev->dev; ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION; ibdev->ib_dev.uverbs_cmd_mask = (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | (1ull << IB_USER_VERBS_CMD_REG_MR) | (1ull << IB_USER_VERBS_CMD_DEREG_MR) | (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | (1ull << IB_USER_VERBS_CMD_CREATE_QP) | (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ); ibdev->ib_dev.query_device = mlx4_ib_query_device; ibdev->ib_dev.query_port = mlx4_ib_query_port; ibdev->ib_dev.query_gid = mlx4_ib_query_gid; ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey; ibdev->ib_dev.modify_device = mlx4_ib_modify_device; ibdev->ib_dev.modify_port = mlx4_ib_modify_port; ibdev->ib_dev.alloc_ucontext = mlx4_ib_alloc_ucontext; ibdev->ib_dev.dealloc_ucontext = mlx4_ib_dealloc_ucontext; ibdev->ib_dev.mmap = mlx4_ib_mmap; ibdev->ib_dev.alloc_pd = mlx4_ib_alloc_pd; ibdev->ib_dev.dealloc_pd = mlx4_ib_dealloc_pd; ibdev->ib_dev.create_ah = mlx4_ib_create_ah; ibdev->ib_dev.query_ah = mlx4_ib_query_ah; ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah; ibdev->ib_dev.create_srq = mlx4_ib_create_srq; ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq; ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq; ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv; ibdev->ib_dev.create_qp = mlx4_ib_create_qp; ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp; ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp; ibdev->ib_dev.post_send = mlx4_ib_post_send; ibdev->ib_dev.post_recv = mlx4_ib_post_recv; ibdev->ib_dev.create_cq = mlx4_ib_create_cq; ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq; ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq; ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq; ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr; ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr; ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr; ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach; ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach; ibdev->ib_dev.process_mad = mlx4_ib_process_mad; if (init_node_data(ibdev)) goto err_map; spin_lock_init(&ibdev->sm_lock); mutex_init(&ibdev->cap_mask_mutex); if (ib_register_device(&ibdev->ib_dev)) goto err_map; if (mlx4_ib_mad_init(ibdev)) goto err_reg; return ibdev; err_reg: ib_unregister_device(&ibdev->ib_dev); err_map: iounmap(ibdev->uar_map); err_uar: mlx4_uar_free(dev, &ibdev->priv_uar); err_pd: mlx4_pd_free(dev, ibdev->priv_pdn); err_dealloc: ib_dealloc_device(&ibdev->ib_dev); return NULL; }