int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *wqc, struct mlx5_wq_cyc *wq, struct mlx5_wq_ctrl *wq_ctrl) { int err; wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride); wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1; err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err); return err; } err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq), &wq_ctrl->buf, param->buf_numa_node); if (err) { mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err); goto err_db_free; } wq->buf = wq_ctrl->buf.direct.buf; wq->db = wq_ctrl->db.db; wq_ctrl->mdev = mdev; return 0; err_db_free: mlx5_db_free(mdev, &wq_ctrl->db); return err; }
static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev) { struct mlx5_core_sriov *sriov = &dev->priv.sriov; int err; int vf; if (!sriov->enabled_vfs) #ifdef CONFIG_MLX5_CORE_EN goto disable_sriov_resources; #else return; #endif for (vf = 0; vf < sriov->num_vfs; vf++) { if (!sriov->vfs_ctx[vf].enabled) continue; err = mlx5_core_disable_hca(dev, vf + 1); if (err) { mlx5_core_warn(dev, "failed to disable VF %d\n", vf); continue; } sriov->vfs_ctx[vf].enabled = 0; sriov->enabled_vfs--; } #ifdef CONFIG_MLX5_CORE_EN disable_sriov_resources: mlx5_eswitch_disable_sriov(dev->priv.eswitch); #endif if (mlx5_wait_for_vf_pages(dev)) mlx5_core_warn(dev, "timeout reclaiming VFs pages\n"); }
int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *cqc, struct mlx5_cqwq *wq, struct mlx5_wq_ctrl *wq_ctrl) { int err; mlx5_core_init_cq_frag_buf(&wq->fbc, cqc); err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err); return err; } err = mlx5_frag_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq), &wq_ctrl->buf, param->buf_numa_node); if (err) { mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err); goto err_db_free; } wq->fbc.frag_buf = wq_ctrl->buf; wq->db = wq_ctrl->db.db; wq_ctrl->mdev = mdev; return 0; err_db_free: mlx5_db_free(mdev, &wq_ctrl->db); return err; }
static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev) { struct mlx5_core_sriov *sriov = &dev->priv.sriov; int err; int vf; if (!sriov->enabled_vfs) goto out; for (vf = 0; vf < sriov->num_vfs; vf++) { if (!sriov->vfs_ctx[vf].enabled) continue; err = mlx5_core_disable_hca(dev, vf + 1); if (err) { mlx5_core_warn(dev, "failed to disable VF %d\n", vf); continue; } sriov->vfs_ctx[vf].enabled = 0; sriov->enabled_vfs--; } out: if (MLX5_ESWITCH_MANAGER(dev)) mlx5_eswitch_disable_sriov(dev->priv.eswitch); if (mlx5_wait_for_vf_pages(dev)) mlx5_core_warn(dev, "timeout reclaiming VFs pages\n"); }
int mlx5_core_destroy_dct(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct) { struct mlx5_qp_table *table = &dev->priv.qp_table; struct mlx5_destroy_dct_mbox_out out; struct mlx5_destroy_dct_mbox_in in; unsigned long flags; int err; err = mlx5_core_drain_dct(dev, dct); if (err) { mlx5_core_warn(dev, "failed drain DCT 0x%x\n", dct->dctn); return err; } wait_for_completion(&dct->drained); mlx5_debug_dct_remove(dev, dct); spin_lock_irqsave(&table->lock, flags); if (radix_tree_delete(&table->tree, dct->dctn) != dct) mlx5_core_warn(dev, "dct delete differs\n"); spin_unlock_irqrestore(&table->lock, flags); if (atomic_dec_and_test(&dct->common.refcount)) complete(&dct->common.free); wait_for_completion(&dct->common.free); memset(&in, 0, sizeof(in)); memset(&out, 0, sizeof(out)); in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_DCT); in.dctn = cpu_to_be32(dct->dctn); return mlx5_cmd_exec_check_status(dev, (void *)&in, sizeof(in), (void *)&out, sizeof(out)); }
int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq) { struct mlx5_srq_table *table = &dev->priv.srq_table; struct mlx5_core_srq *tmp; int err; spin_lock_irq(&table->lock); tmp = radix_tree_delete(&table->tree, srq->srqn); spin_unlock_irq(&table->lock); if (!tmp) { mlx5_core_warn(dev, "srq 0x%x not found in tree\n", srq->srqn); return -EINVAL; } if (tmp != srq) { mlx5_core_warn(dev, "corruption on srqn 0x%x\n", srq->srqn); return -EINVAL; } err = destroy_srq_split(dev, srq); if (err) return err; if (atomic_dec_and_test(&srq->refcount)) complete(&srq->free); wait_for_completion(&srq->free); return 0; }
int mlx5_core_create_dct(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct, struct mlx5_create_dct_mbox_in *in) { struct mlx5_qp_table *table = &dev->priv.qp_table; struct mlx5_create_dct_mbox_out out; struct mlx5_destroy_dct_mbox_in din; struct mlx5_destroy_dct_mbox_out dout; int err; init_completion(&dct->drained); memset(&out, 0, sizeof(out)); in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_DCT); err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out)); if (err) { mlx5_core_warn(dev, "create DCT failed, ret %d", err); return err; } if (out.hdr.status) return mlx5_cmd_status_to_err(dev, &out.hdr); dct->dctn = be32_to_cpu(out.dctn) & 0xffffff; dct->common.res = MLX5_RES_DCT; spin_lock_irq(&table->lock); err = radix_tree_insert(&table->tree, dct->dctn, dct); spin_unlock_irq(&table->lock); if (err) { mlx5_core_warn(dev, "err %d", err); goto err_cmd; } err = mlx5_debug_dct_add(dev, dct); if (err) mlx5_core_dbg(dev, "failed adding DCT 0x%x to debug file system\n", dct->dctn); dct->pid = current->pid; atomic_set(&dct->common.refcount, 1); init_completion(&dct->common.free); return 0; err_cmd: memset(&din, 0, sizeof(din)); memset(&dout, 0, sizeof(dout)); din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_DCT); din.dctn = cpu_to_be32(dct->dctn); mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout)); return err; }
static int mlx5i_pkey_open(struct net_device *netdev) { struct mlx5e_priv *epriv = mlx5i_epriv(netdev); struct mlx5i_priv *ipriv = epriv->ppriv; struct mlx5_core_dev *mdev = epriv->mdev; int err; mutex_lock(&epriv->state_lock); set_bit(MLX5E_STATE_OPENED, &epriv->state); err = mlx5i_init_underlay_qp(epriv); if (err) { mlx5_core_warn(mdev, "prepare child underlay qp state failed, %d\n", err); goto err_release_lock; } err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qp.qpn); if (err) { mlx5_core_warn(mdev, "attach child underlay qp to ft failed, %d\n", err); goto err_unint_underlay_qp; } err = mlx5e_create_tis(mdev, 0 /* tc */, ipriv->qp.qpn, &epriv->tisn[0]); if (err) { mlx5_core_warn(mdev, "create child tis failed, %d\n", err); goto err_remove_rx_uderlay_qp; } err = mlx5e_open_channels(epriv, &epriv->channels); if (err) { mlx5_core_warn(mdev, "opening child channels failed, %d\n", err); goto err_clear_state_opened_flag; } mlx5e_refresh_tirs(epriv, false); mlx5e_activate_priv_channels(epriv); mutex_unlock(&epriv->state_lock); return 0; err_clear_state_opened_flag: mlx5e_destroy_tis(mdev, epriv->tisn[0]); err_remove_rx_uderlay_qp: mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn); err_unint_underlay_qp: mlx5i_uninit_underlay_qp(epriv); err_release_lock: clear_bit(MLX5E_STATE_OPENED, &epriv->state); mutex_unlock(&epriv->state_lock); return err; }
void mlx5e_timestamp_init(struct mlx5e_priv *priv) { struct mlx5e_tstamp *tstamp = &priv->tstamp; u64 ns; u64 frac = 0; u32 dev_freq; mlx5e_timestamp_init_config(tstamp); dev_freq = MLX5_CAP_GEN(priv->mdev, device_frequency_khz); if (!dev_freq) { mlx5_core_warn(priv->mdev, "invalid device_frequency_khz, aborting HW clock init\n"); return; } rwlock_init(&tstamp->lock); tstamp->cycles.read = mlx5e_read_internal_timer; tstamp->cycles.shift = MLX5E_CYCLES_SHIFT; tstamp->cycles.mult = clocksource_khz2mult(dev_freq, tstamp->cycles.shift); tstamp->nominal_c_mult = tstamp->cycles.mult; tstamp->cycles.mask = CLOCKSOURCE_MASK(41); tstamp->mdev = priv->mdev; timecounter_init(&tstamp->clock, &tstamp->cycles, ktime_to_ns(ktime_get_real())); /* Calculate period in seconds to call the overflow watchdog - to make * sure counter is checked at least once every wrap around. */ ns = cyclecounter_cyc2ns(&tstamp->cycles, tstamp->cycles.mask, frac, &frac); do_div(ns, NSEC_PER_SEC / 2 / HZ); tstamp->overflow_period = ns; INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow); if (tstamp->overflow_period) schedule_delayed_work(&tstamp->overflow_work, 0); else mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n"); /* Configure the PHC */ tstamp->ptp_info = mlx5e_ptp_clock_info; snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp"); tstamp->ptp = ptp_clock_register(&tstamp->ptp_info, &priv->mdev->pdev->dev); if (IS_ERR(tstamp->ptp)) { mlx5_core_warn(priv->mdev, "ptp_clock_register failed %ld\n", PTR_ERR(tstamp->ptp)); tstamp->ptp = NULL; } }
int mlx5_core_create_qp(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, struct mlx5_create_qp_mbox_in *in, int inlen) { struct mlx5_create_qp_mbox_out out; struct mlx5_destroy_qp_mbox_in din; struct mlx5_destroy_qp_mbox_out dout; int err; memset(&out, 0, sizeof(out)); in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP); err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); if (err) { mlx5_core_warn(dev, "ret %d\n", err); return err; } if (out.hdr.status) { mlx5_core_warn(dev, "current num of QPs 0x%x\n", atomic_read(&dev->num_qps)); return mlx5_cmd_status_to_err(dev, &out.hdr); } qp->qpn = be32_to_cpu(out.qpn) & 0xffffff; mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn); err = create_qprqsq_common(dev, qp, MLX5_RES_QP); if (err) goto err_cmd; err = mlx5_debug_qp_add(dev, qp); if (err) mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n", qp->qpn); atomic_inc(&dev->num_qps); return 0; err_cmd: memset(&din, 0, sizeof(din)); memset(&dout, 0, sizeof(dout)); din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP); din.qpn = cpu_to_be32(qp->qpn); mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout)); return err; }
int mlx5_set_nic_vport_mc_list(struct mlx5_core_dev *mdev, int vport, u64 *addr_list, size_t addr_list_len) { void *in, *ctx; int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) + MLX5_ST_SZ_BYTES(mac_address_layout) * (int)addr_list_len; int err; size_t i; int max_list_sz = 1 << MLX5_CAP_GEN_MAX(mdev, log_max_current_mc_list); if ((int)addr_list_len > max_list_sz) { mlx5_core_warn(mdev, "Requested list size (%d) > (%d) max_list_size\n", (int)addr_list_len, max_list_sz); return -ENOSPC; } in = mlx5_vzalloc(inlen); if (!in) { mlx5_core_warn(mdev, "failed to allocate inbox\n"); return -ENOMEM; } MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); if (vport) MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); MLX5_SET(modify_nic_vport_context_in, in, field_select.addresses_list, 1); ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context); MLX5_SET(nic_vport_context, ctx, allowed_list_type, MLX5_NIC_VPORT_LIST_TYPE_MC); MLX5_SET(nic_vport_context, ctx, allowed_list_size, addr_list_len); for (i = 0; i < addr_list_len; i++) { u8 *mac_lout = (u8 *)MLX5_ADDR_OF(nic_vport_context, ctx, current_uc_mac_address[i]); u8 *mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_lout, mac_addr_47_32); ether_addr_copy(mac_ptr, (u8 *)&addr_list[i]); } err = mlx5_modify_nic_vport_context(mdev, in, inlen); kvfree(in); return err; }
static int sriov_restore_guids(struct mlx5_core_dev *dev, int vf) { struct mlx5_core_sriov *sriov = &dev->priv.sriov; struct mlx5_hca_vport_context *in; int err = 0; /* Restore sriov guid and policy settings */ if (sriov->vfs_ctx[vf].node_guid || sriov->vfs_ctx[vf].port_guid || sriov->vfs_ctx[vf].policy != MLX5_POLICY_INVALID) { in = kzalloc(sizeof(*in), GFP_KERNEL); if (!in) return -ENOMEM; in->node_guid = sriov->vfs_ctx[vf].node_guid; in->port_guid = sriov->vfs_ctx[vf].port_guid; in->policy = sriov->vfs_ctx[vf].policy; in->field_select = !!(in->port_guid) * MLX5_HCA_VPORT_SEL_PORT_GUID | !!(in->node_guid) * MLX5_HCA_VPORT_SEL_NODE_GUID | !!(in->policy) * MLX5_HCA_VPORT_SEL_STATE_POLICY; err = mlx5_core_modify_hca_vport_context(dev, 1, 1, vf + 1, in); if (err) mlx5_core_warn(dev, "modify vport context failed, unable to restore VF %d settings\n", vf); kfree(in); } return err; }
static int mlx5_pci_enable_sriov(struct pci_dev *pdev, int num_vfs) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); int err = 0; if (pci_num_vf(pdev)) { mlx5_core_warn(dev, "Unable to enable pci sriov, already enabled\n"); return -EBUSY; } err = pci_enable_sriov(pdev, num_vfs); if (err) mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err); return err; }
int mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type) { struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn); struct mlx5_core_dct *dct; struct mlx5_core_qp *qp; if (!common) return -1; switch (common->res) { case MLX5_RES_QP: case MLX5_RES_RQ: case MLX5_RES_SQ: qp = (struct mlx5_core_qp *)common; qp->event(qp, event_type); break; case MLX5_RES_DCT: dct = (struct mlx5_core_dct *)common; if (event_type == MLX5_EVENT_TYPE_DCT_DRAINED) complete(&dct->drained); else dct->event(dct, event_type); break; default: mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn); } mlx5_core_put_rsc(common); return 0; }
int mlx5_set_nic_vport_permanent_mac(struct mlx5_core_dev *mdev, int vport, u8 *addr) { void *in; int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); u8 *mac_ptr; int err; in = mlx5_vzalloc(inlen); if (!in) { mlx5_core_warn(mdev, "failed to allocate inbox\n"); return -ENOMEM; } MLX5_SET(modify_nic_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); MLX5_SET(modify_nic_vport_context_in, in, field_select.permanent_address, 1); mac_ptr = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context.permanent_address.mac_addr_47_32); ether_addr_copy(mac_ptr, addr); err = mlx5_modify_nic_vport_context(mdev, in, inlen); kvfree(in); return err; }
int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev, u16 vport, u8 *addr) { void *in; int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); int err; void *nic_vport_ctx; u8 *perm_mac; in = mlx5_vzalloc(inlen); if (!in) { mlx5_core_warn(mdev, "failed to allocate inbox\n"); return -ENOMEM; } MLX5_SET(modify_nic_vport_context_in, in, field_select.permanent_address, 1); MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); if (vport) MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context); perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx, permanent_address); ether_addr_copy(&perm_mac[2], addr); err = mlx5_modify_nic_vport_context(mdev, in, inlen); kvfree(in); return err; }
int mlx5_nic_vport_modify_local_lb(struct mlx5_core_dev *mdev, enum mlx5_local_lb_selection selection, u8 value) { void *in; int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); int err; in = mlx5_vzalloc(inlen); if (!in) { mlx5_core_warn(mdev, "failed to allocate inbox\n"); return -ENOMEM; } MLX5_SET(modify_nic_vport_context_in, in, vport_number, 0); if (selection == MLX5_LOCAL_MC_LB) { MLX5_SET(modify_nic_vport_context_in, in, field_select.disable_mc_local_lb, 1); MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.disable_mc_local_lb, value); } else { MLX5_SET(modify_nic_vport_context_in, in, field_select.disable_uc_local_lb, 1); MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.disable_uc_local_lb, value); } err = mlx5_modify_nic_vport_context(mdev, in, inlen); kvfree(in); return err; }
int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, struct mlx5_srq_attr *in) { int err; struct mlx5_srq_table *table = &dev->priv.srq_table; if (in->type == IB_SRQT_XRC) srq->common.res = MLX5_RES_XSRQ; else srq->common.res = MLX5_RES_SRQ; err = create_srq_split(dev, srq, in); if (err) return err; atomic_set(&srq->refcount, 1); init_completion(&srq->free); spin_lock_irq(&table->lock); err = radix_tree_insert(&table->tree, srq->srqn, srq); spin_unlock_irq(&table->lock); if (err) { mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn); goto err_destroy_srq_split; } return 0; err_destroy_srq_split: destroy_srq_split(dev, srq); return err; }
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr) { struct mlx5_mr_table *table = &dev->priv.mr_table; struct mlx5_destroy_mkey_mbox_in in; struct mlx5_destroy_mkey_mbox_out out; struct mlx5_core_mr *deleted_mr; unsigned long flags; int err; memset(&in, 0, sizeof(in)); memset(&out, 0, sizeof(out)); write_lock_irqsave(&table->lock, flags); deleted_mr = radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key)); write_unlock_irqrestore(&table->lock, flags); if (!deleted_mr) { mlx5_core_warn(dev, "failed radix tree delete of mr 0x%x\n", mlx5_base_mkey(mr->key)); return -ENOENT; } in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_MKEY); in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key)); err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); if (err) return err; if (out.hdr.status) return mlx5_cmd_status_to_err(&out.hdr); return err; }
int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport, u8 state) { u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)]; u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)]; int err; memset(in, 0, sizeof(in)); MLX5_SET(modify_vport_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VPORT_STATE); MLX5_SET(modify_vport_state_in, in, op_mod, opmod); MLX5_SET(modify_vport_state_in, in, vport_number, vport); if (vport) MLX5_SET(modify_vport_state_in, in, other_vport, 1); MLX5_SET(modify_vport_state_in, in, admin_state, state); err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, sizeof(out)); if (err) mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_VPORT_STATE failed\n"); return err; }
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *qpc, struct mlx5_wq_qp *wq, struct mlx5_wq_ctrl *wq_ctrl) { u16 sq_strides_offset; u32 rq_pg_remainder; int err; mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4, MLX5_GET(qpc, qpc, log_rq_size), &wq->rq.fbc); rq_pg_remainder = mlx5_wq_cyc_get_byte_size(&wq->rq) % PAGE_SIZE; sq_strides_offset = rq_pg_remainder / MLX5_SEND_WQE_BB; mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB), MLX5_GET(qpc, qpc, log_sq_size), sq_strides_offset, &wq->sq.fbc); err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err); return err; } err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_qp_get_byte_size(wq), &wq_ctrl->buf, param->buf_numa_node); if (err) { mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err); goto err_db_free; } mlx5_qp_set_frag_buf(&wq_ctrl->buf, wq); wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR]; wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR]; wq_ctrl->mdev = mdev; return 0; err_db_free: mlx5_db_free(mdev, &wq_ctrl->db); return err; }
int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev, u32 vport, u16 vlans[], int *size) { u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)]; void *nic_vport_ctx; int req_list_size; int max_list_size; int out_sz; void *out; int err; int i; req_list_size = *size; max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list); if (req_list_size > max_list_size) { mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n", req_list_size, max_list_size); req_list_size = max_list_size; } out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) + req_list_size * MLX5_ST_SZ_BYTES(vlan_layout); memset(in, 0, sizeof(in)); out = kzalloc(out_sz, GFP_KERNEL); if (!out) return -ENOMEM; MLX5_SET(query_nic_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT); MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, MLX5_NVPRT_LIST_TYPE_VLAN); MLX5_SET(query_nic_vport_context_in, in, vport_number, vport); if (vport) MLX5_SET(query_nic_vport_context_in, in, other_vport, 1); err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz); if (err) goto out; nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out, nic_vport_context); req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx, allowed_list_size); *size = req_list_size; for (i = 0; i < req_list_size; i++) { void *vlan_addr = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx, current_uc_mac_address[i]); vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan); } out: kfree(out); return err; }
int mlx5_set_nic_vport_vlan_list(struct mlx5_core_dev *dev, u16 vport, u16 *vlan_list, int list_len) { void *in, *ctx; int i, err; int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) + MLX5_ST_SZ_BYTES(vlan_layout) * (int)list_len; int max_list_size = 1 << MLX5_CAP_GEN_MAX(dev, log_max_vlan_list); if (list_len > max_list_size) { mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n", list_len, max_list_size); return -ENOSPC; } in = mlx5_vzalloc(inlen); if (!in) { mlx5_core_warn(dev, "failed to allocate inbox\n"); return -ENOMEM; } MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); if (vport) MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); MLX5_SET(modify_nic_vport_context_in, in, field_select.addresses_list, 1); ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context); MLX5_SET(nic_vport_context, ctx, allowed_list_type, MLX5_NIC_VPORT_LIST_TYPE_VLAN); MLX5_SET(nic_vport_context, ctx, allowed_list_size, list_len); for (i = 0; i < list_len; i++) { u8 *vlan_lout = MLX5_ADDR_OF(nic_vport_context, ctx, current_uc_mac_address[i]); MLX5_SET(vlan_layout, vlan_lout, vlan, vlan_list[i]); } err = mlx5_modify_nic_vport_context(dev, in, inlen); kvfree(in); return err; }
static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) { struct mlx5_core_sriov *sriov = &dev->priv.sriov; int err; int vf; if (sriov->enabled_vfs) { mlx5_core_warn(dev, "failed to enable SRIOV on device, already enabled with %d vfs\n", sriov->enabled_vfs); return -EBUSY; } if (!MLX5_ESWITCH_MANAGER(dev)) goto enable_vfs_hca; err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY); if (err) { mlx5_core_warn(dev, "failed to enable eswitch SRIOV (%d)\n", err); return err; } enable_vfs_hca: for (vf = 0; vf < num_vfs; vf++) { err = mlx5_core_enable_hca(dev, vf + 1); if (err) { mlx5_core_warn(dev, "failed to enable VF %d (%d)\n", vf, err); continue; } sriov->vfs_ctx[vf].enabled = 1; sriov->enabled_vfs++; if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) { err = sriov_restore_guids(dev, vf); if (err) { mlx5_core_warn(dev, "failed to restore VF %d settings, err %d\n", vf, err); continue; } } mlx5_core_dbg(dev, "successfully enabled VF* %d\n", vf); } return 0; }
int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *wqc, struct mlx5_wq_ll *wq, struct mlx5_wq_ctrl *wq_ctrl) { struct mlx5_frag_buf_ctrl *fbc = &wq->fbc; struct mlx5_wqe_srq_next_seg *next_seg; int err; int i; mlx5_fill_fbc(MLX5_GET(wq, wqc, log_wq_stride), MLX5_GET(wq, wqc, log_wq_sz), fbc); err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err); return err; } err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq), &wq_ctrl->buf, param->buf_numa_node); if (err) { mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err); goto err_db_free; } wq->fbc.frag_buf = wq_ctrl->buf; wq->db = wq_ctrl->db.db; for (i = 0; i < fbc->sz_m1; i++) { next_seg = mlx5_wq_ll_get_wqe(wq, i); next_seg->next_wqe_index = cpu_to_be16(i + 1); } next_seg = mlx5_wq_ll_get_wqe(wq, i); wq->tail_next = &next_seg->next_wqe_index; wq_ctrl->mdev = mdev; return 0; err_db_free: mlx5_db_free(mdev, &wq_ctrl->db); return err; }
static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch_rep *rep = priv->ppriv; struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_flow_rule *flow_rule; int err; int i; err = mlx5e_create_direct_rqts(priv); if (err) { mlx5_core_warn(mdev, "create direct rqts failed, %d\n", err); return err; } err = mlx5e_create_direct_tirs(priv); if (err) { mlx5_core_warn(mdev, "create direct tirs failed, %d\n", err); goto err_destroy_direct_rqts; } flow_rule = mlx5_eswitch_create_vport_rx_rule(esw, rep->vport, priv->direct_tir[0].tirn); if (IS_ERR(flow_rule)) { err = PTR_ERR(flow_rule); goto err_destroy_direct_tirs; } rep->vport_rx_rule = flow_rule; err = mlx5e_tc_init(priv); if (err) goto err_del_flow_rule; return 0; err_del_flow_rule: mlx5_del_flow_rule(rep->vport_rx_rule); err_destroy_direct_tirs: mlx5e_destroy_direct_tirs(priv); err_destroy_direct_rqts: for (i = 0; i < priv->params.num_channels; i++) mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt); return err; }
int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *wqc, struct mlx5_wq_ll *wq, struct mlx5_wq_ctrl *wq_ctrl) { struct mlx5_wqe_srq_next_seg *next_seg; int max_direct = param->linear ? INT_MAX : 0; int err; int i; wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride); wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1; err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err); return err; } err = mlx5_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq), max_direct, &wq_ctrl->buf, param->buf_numa_node); if (err) { mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err); goto err_db_free; } wq->buf = wq_ctrl->buf.direct.buf; wq->db = wq_ctrl->db.db; for (i = 0; i < wq->sz_m1; i++) { next_seg = mlx5_wq_ll_get_wqe(wq, i); next_seg->next_wqe_index = cpu_to_be16(i + 1); } next_seg = mlx5_wq_ll_get_wqe(wq, i); wq->tail_next = &next_seg->next_wqe_index; wq_ctrl->mdev = mdev; return 0; err_db_free: mlx5_db_free(mdev, &wq_ctrl->db); return err; }
static int mlx5i_open(struct net_device *netdev) { struct mlx5e_priv *epriv = mlx5i_epriv(netdev); struct mlx5i_priv *ipriv = epriv->ppriv; struct mlx5_core_dev *mdev = epriv->mdev; int err; mutex_lock(&epriv->state_lock); set_bit(MLX5E_STATE_OPENED, &epriv->state); err = mlx5i_init_underlay_qp(epriv); if (err) { mlx5_core_warn(mdev, "prepare underlay qp state failed, %d\n", err); goto err_clear_state_opened_flag; } err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qp.qpn); if (err) { mlx5_core_warn(mdev, "attach underlay qp to ft failed, %d\n", err); goto err_reset_qp; } err = mlx5e_open_channels(epriv, &epriv->channels); if (err) goto err_remove_fs_underlay_qp; mlx5e_refresh_tirs(epriv, false); mlx5e_activate_priv_channels(epriv); mlx5e_timestamp_set(epriv); mutex_unlock(&epriv->state_lock); return 0; err_remove_fs_underlay_qp: mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn); err_reset_qp: mlx5i_uninit_underlay_qp(epriv); err_clear_state_opened_flag: clear_bit(MLX5E_STATE_OPENED, &epriv->state); mutex_unlock(&epriv->state_lock); return err; }
int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, struct mlx5_create_mkey_mbox_in *in, int inlen, mlx5_cmd_cbk_t callback, void *context, struct mlx5_create_mkey_mbox_out *out) { struct mlx5_mkey_table *table = &dev->priv.mkey_table; struct mlx5_create_mkey_mbox_out lout; int err; u8 key; memset(&lout, 0, sizeof(lout)); spin_lock_irq(&dev->priv.mkey_lock); key = dev->priv.mkey_key++; spin_unlock_irq(&dev->priv.mkey_lock); in->seg.qpn_mkey7_0 |= cpu_to_be32(key); in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_MKEY); if (callback) { err = mlx5_cmd_exec_cb(dev, in, inlen, out, sizeof(*out), callback, context); return err; } else { err = mlx5_cmd_exec(dev, in, inlen, &lout, sizeof(lout)); } if (err) { mlx5_core_dbg(dev, "cmd exec failed %d\n", err); return err; } if (lout.hdr.status) { mlx5_core_dbg(dev, "status %d\n", lout.hdr.status); return mlx5_cmd_status_to_err(&lout.hdr); } mkey->iova = be64_to_cpu(in->seg.start_addr); mkey->size = be64_to_cpu(in->seg.len); mkey->key = mlx5_idx_to_mkey(be32_to_cpu(lout.mkey) & 0xffffff) | key; mkey->pd = be32_to_cpu(in->seg.flags_pd) & 0xffffff; mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n", be32_to_cpu(lout.mkey), key, mkey->key); /* connect to mkey tree */ write_lock_irq(&table->lock); err = radix_tree_insert(&table->tree, mlx5_base_mkey(mkey->key), mkey); write_unlock_irq(&table->lock); if (err) { mlx5_core_warn(dev, "failed radix tree insert of mkey 0x%x, %d\n", mlx5_base_mkey(mkey->key), err); mlx5_core_destroy_mkey(dev, mkey); } return err; }
static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) { int err; err = mlx5e_create_tises(priv); if (err) { mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err); return err; } return 0; }