int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) { struct qedr_dev *dev = get_qedr_dev(ibcq->device); struct qedr_cq *cq = get_qedr_cq(ibcq); struct qedr_qp *qp = dev->gsi_qp; unsigned long flags; int i = 0; spin_lock_irqsave(&cq->cq_lock, flags); while (i < num_entries && qp->rq.cons != qp->rq.gsi_cons) { memset(&wc[i], 0, sizeof(*wc)); wc[i].qp = &qp->ibqp; wc[i].wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id; wc[i].opcode = IB_WC_RECV; wc[i].pkey_index = 0; wc[i].status = (qp->rqe_wr_id[qp->rq.cons].rc) ? IB_WC_GENERAL_ERR : IB_WC_SUCCESS; /* 0 - currently only one recv sg is supported */ wc[i].byte_len = qp->rqe_wr_id[qp->rq.cons].sg_list[0].length; wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK; ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac); wc[i].wc_flags |= IB_WC_WITH_SMAC; if (qp->rqe_wr_id[qp->rq.cons].vlan_id) { wc[i].wc_flags |= IB_WC_WITH_VLAN; wc[i].vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan_id; } qedr_inc_sw_cons(&qp->rq); i++; } while (i < num_entries && qp->sq.cons != qp->sq.gsi_cons) { memset(&wc[i], 0, sizeof(*wc)); wc[i].qp = &qp->ibqp; wc[i].wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id; wc[i].opcode = IB_WC_SEND; wc[i].status = IB_WC_SUCCESS; qedr_inc_sw_cons(&qp->sq); i++; } spin_unlock_irqrestore(&cq->cq_lock, flags); DP_DEBUG(dev, QEDR_MSG_GSI, "gsi poll_cq: requested entries=%d, actual=%d, qp->rq.cons=%d, qp->rq.gsi_cons=%x, qp->sq.cons=%d, qp->sq.gsi_cons=%d, qp_num=%d\n", num_entries, i, qp->rq.cons, qp->rq.gsi_cons, qp->sq.cons, qp->sq.gsi_cons, qp->ibqp.qp_num); return i; }
struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev, struct ib_qp_init_attr *attrs, struct qedr_qp *qp) { int rc; rc = qedr_check_gsi_qp_attrs(dev, attrs); if (rc) return ERR_PTR(rc); rc = qedr_ll2_start(dev, attrs, qp); if (rc) { DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc); return ERR_PTR(rc); } /* create QP */ qp->ibqp.qp_num = 1; qp->rq.max_wr = attrs->cap.max_recv_wr; qp->sq.max_wr = attrs->cap.max_send_wr; qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id), GFP_KERNEL); if (!qp->rqe_wr_id) goto err; qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id), GFP_KERNEL); if (!qp->wqe_wr_id) goto err; qedr_store_gsi_qp_cq(dev, qp, attrs); ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr); /* the GSI CQ is handled by the driver so remove it from the FW */ qedr_destroy_gsi_cq(dev, attrs); dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI; dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI; DP_DEBUG(dev, QEDR_MSG_GSI, "created GSI QP %p\n", qp); return &qp->ibqp; err: kfree(qp->rqe_wr_id); rc = qedr_ll2_stop(dev); if (rc) DP_ERR(dev, "create gsi qp: failed destroy on create\n"); return ERR_PTR(-ENOMEM); }
int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr) { struct qed_roce_ll2_packet *pkt = NULL; struct qedr_qp *qp = get_qedr_qp(ibqp); struct qedr_dev *dev = qp->dev; unsigned long flags; int rc; if (qp->state != QED_ROCE_QP_STATE_RTS) { *bad_wr = wr; DP_ERR(dev, "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTS\n", qp->state); return -EINVAL; } if (wr->num_sge > RDMA_MAX_SGE_PER_SQ_WQE) { DP_ERR(dev, "gsi post send: num_sge is too large (%d>%d)\n", wr->num_sge, RDMA_MAX_SGE_PER_SQ_WQE); rc = -EINVAL; goto err; } if (wr->opcode != IB_WR_SEND) { DP_ERR(dev, "gsi post send: failed due to unsupported opcode %d\n", wr->opcode); rc = -EINVAL; goto err; } spin_lock_irqsave(&qp->q_lock, flags); rc = qedr_gsi_build_packet(dev, qp, wr, &pkt); if (rc) { spin_unlock_irqrestore(&qp->q_lock, flags); goto err; } rc = qedr_ll2_post_tx(dev, pkt); if (!rc) { qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id; qedr_inc_sw_prod(&qp->sq); DP_DEBUG(qp->dev, QEDR_MSG_GSI, "gsi post send: opcode=%d, in_irq=%ld, irqs_disabled=%d, wr_id=%llx\n", wr->opcode, in_irq(), irqs_disabled(), wr->wr_id); } else { DP_ERR(dev, "gsi post send: failed to transmit (rc=%d)\n", rc); rc = -EAGAIN; *bad_wr = wr; } spin_unlock_irqrestore(&qp->q_lock, flags); if (wr->next) { DP_ERR(dev, "gsi post send: failed second WR. Only one WR may be passed at a time\n"); *bad_wr = wr->next; rc = -EINVAL; } return rc; err: *bad_wr = wr; return rc; }
static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev, struct net_device *ndev) { struct qed_dev_rdma_info dev_info; struct qedr_dev *dev; int rc = 0, i; dev = (struct qedr_dev *)ib_alloc_device(sizeof(*dev)); if (!dev) { pr_err("Unable to allocate ib device\n"); return NULL; } DP_DEBUG(dev, QEDR_MSG_INIT, "qedr add device called\n"); dev->pdev = pdev; dev->ndev = ndev; dev->cdev = cdev; qed_ops = qed_get_rdma_ops(); if (!qed_ops) { DP_ERR(dev, "Failed to get qed roce operations\n"); goto init_err; } dev->ops = qed_ops; rc = qed_ops->fill_dev_info(cdev, &dev_info); if (rc) goto init_err; dev->user_dpm_enabled = dev_info.user_dpm_enabled; dev->rdma_type = dev_info.rdma_type; dev->num_hwfns = dev_info.common.num_hwfns; dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev); dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev); if (!dev->num_cnq) { DP_ERR(dev, "not enough CNQ resources.\n"); goto init_err; } dev->wq_multiplier = QEDR_WQ_MULTIPLIER_DFT; qedr_pci_set_atomic(dev, pdev); rc = qedr_alloc_resources(dev); if (rc) goto init_err; rc = qedr_init_hw(dev); if (rc) goto alloc_err; rc = qedr_setup_irqs(dev); if (rc) goto irq_err; rc = qedr_register_device(dev); if (rc) { DP_ERR(dev, "Unable to allocate register device\n"); goto reg_err; } for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++) if (device_create_file(&dev->ibdev.dev, qedr_attributes[i])) goto sysfs_err; if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state)) qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE); DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n"); return dev; sysfs_err: ib_unregister_device(&dev->ibdev); reg_err: qedr_sync_free_irqs(dev); irq_err: qedr_stop_hw(dev); alloc_err: qedr_free_resources(dev); init_err: ib_dealloc_device(&dev->ibdev); DP_ERR(dev, "qedr driver load failed rc=%d\n", rc); return NULL; }
static int qedr_alloc_resources(struct qedr_dev *dev) { struct qedr_cnq *cnq; __le16 *cons_pi; u16 n_entries; int i, rc; dev->sgid_tbl = kzalloc(sizeof(union ib_gid) * QEDR_MAX_SGID, GFP_KERNEL); if (!dev->sgid_tbl) return -ENOMEM; spin_lock_init(&dev->sgid_lock); if (IS_IWARP(dev)) { spin_lock_init(&dev->idr_lock); idr_init(&dev->qpidr); dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq"); } /* Allocate Status blocks for CNQ */ dev->sb_array = kcalloc(dev->num_cnq, sizeof(*dev->sb_array), GFP_KERNEL); if (!dev->sb_array) { rc = -ENOMEM; goto err1; } dev->cnq_array = kcalloc(dev->num_cnq, sizeof(*dev->cnq_array), GFP_KERNEL); if (!dev->cnq_array) { rc = -ENOMEM; goto err2; } dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev); /* Allocate CNQ PBLs */ n_entries = min_t(u32, QED_RDMA_MAX_CNQ_SIZE, QEDR_ROCE_MAX_CNQ_SIZE); for (i = 0; i < dev->num_cnq; i++) { cnq = &dev->cnq_array[i]; rc = qedr_alloc_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i); if (rc) goto err3; rc = dev->ops->common->chain_alloc(dev->cdev, QED_CHAIN_USE_TO_CONSUME, QED_CHAIN_MODE_PBL, QED_CHAIN_CNT_TYPE_U16, n_entries, sizeof(struct regpair *), &cnq->pbl, NULL); if (rc) goto err4; cnq->dev = dev; cnq->sb = &dev->sb_array[i]; cons_pi = dev->sb_array[i].sb_virt->pi_array; cnq->hw_cons_ptr = &cons_pi[QED_ROCE_PROTOCOL_INDEX]; cnq->index = i; sprintf(cnq->name, "qedr%d@pci:%s", i, pci_name(dev->pdev)); DP_DEBUG(dev, QEDR_MSG_INIT, "cnq[%d].cons=%d\n", i, qed_chain_get_cons_idx(&cnq->pbl)); } return 0; err4: qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i); err3: for (--i; i >= 0; i--) { dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl); qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i); } kfree(dev->cnq_array); err2: kfree(dev->sb_array); err1: kfree(dev->sgid_tbl); return rc; }
static void dump_sor_reg(struct tegra_dc_sor_data *sor) { #define DUMP_REG(a) printk(BIOS_INFO, \ "%-32s %03x %08x\n", \ #a, a, tegra_sor_readl(sor, a)); DUMP_REG(SUPER_STATE0); DUMP_REG(SUPER_STATE1); DUMP_REG(STATE0); DUMP_REG(STATE1); DUMP_REG(NV_HEAD_STATE0(0)); DUMP_REG(NV_HEAD_STATE0(1)); DUMP_REG(NV_HEAD_STATE1(0)); DUMP_REG(NV_HEAD_STATE1(1)); DUMP_REG(NV_HEAD_STATE2(0)); DUMP_REG(NV_HEAD_STATE2(1)); DUMP_REG(NV_HEAD_STATE3(0)); DUMP_REG(NV_HEAD_STATE3(1)); DUMP_REG(NV_HEAD_STATE4(0)); DUMP_REG(NV_HEAD_STATE4(1)); DUMP_REG(NV_HEAD_STATE5(0)); DUMP_REG(NV_HEAD_STATE5(1)); DUMP_REG(CRC_CNTRL); DUMP_REG(CLK_CNTRL); DUMP_REG(CAP); DUMP_REG(PWR); DUMP_REG(TEST); DUMP_REG(PLL0); DUMP_REG(PLL1); DUMP_REG(PLL2); DUMP_REG(PLL3); DUMP_REG(CSTM); DUMP_REG(LVDS); DUMP_REG(CRCA); DUMP_REG(CRCB); DUMP_REG(SEQ_CTL); DUMP_REG(LANE_SEQ_CTL); DUMP_REG(SEQ_INST(0)); DUMP_REG(SEQ_INST(1)); DUMP_REG(SEQ_INST(2)); DUMP_REG(SEQ_INST(3)); DUMP_REG(SEQ_INST(4)); DUMP_REG(SEQ_INST(5)); DUMP_REG(SEQ_INST(6)); DUMP_REG(SEQ_INST(7)); DUMP_REG(SEQ_INST(8)); DUMP_REG(PWM_DIV); DUMP_REG(PWM_CTL); DUMP_REG(MSCHECK); DUMP_REG(XBAR_CTRL); DUMP_REG(DP_LINKCTL(0)); DUMP_REG(DP_LINKCTL(1)); DUMP_REG(DC(0)); DUMP_REG(DC(1)); DUMP_REG(LANE_DRIVE_CURRENT(0)); DUMP_REG(PR(0)); DUMP_REG(LANE4_PREEMPHASIS(0)); DUMP_REG(POSTCURSOR(0)); DUMP_REG(DP_CONFIG(0)); DUMP_REG(DP_CONFIG(1)); DUMP_REG(DP_MN(0)); DUMP_REG(DP_MN(1)); DUMP_REG(DP_PADCTL(0)); DUMP_REG(DP_PADCTL(1)); DUMP_REG(DP_DEBUG(0)); DUMP_REG(DP_DEBUG(1)); DUMP_REG(DP_SPARE(0)); DUMP_REG(DP_SPARE(1)); DUMP_REG(DP_TPG); return; }