int mlx5_core_create_dct(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct, struct mlx5_create_dct_mbox_in *in) { struct mlx5_qp_table *table = &dev->priv.qp_table; struct mlx5_create_dct_mbox_out out; struct mlx5_destroy_dct_mbox_in din; struct mlx5_destroy_dct_mbox_out dout; int err; init_completion(&dct->drained); memset(&out, 0, sizeof(out)); in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_DCT); err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out)); if (err) { mlx5_core_warn(dev, "create DCT failed, ret %d", err); return err; } if (out.hdr.status) return mlx5_cmd_status_to_err(dev, &out.hdr); dct->dctn = be32_to_cpu(out.dctn) & 0xffffff; dct->common.res = MLX5_RES_DCT; spin_lock_irq(&table->lock); err = radix_tree_insert(&table->tree, dct->dctn, dct); spin_unlock_irq(&table->lock); if (err) { mlx5_core_warn(dev, "err %d", err); goto err_cmd; } err = mlx5_debug_dct_add(dev, dct); if (err) mlx5_core_dbg(dev, "failed adding DCT 0x%x to debug file system\n", dct->dctn); dct->pid = current->pid; atomic_set(&dct->common.refcount, 1); init_completion(&dct->common.free); return 0; err_cmd: memset(&din, 0, sizeof(din)); memset(&dout, 0, sizeof(dout)); din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_DCT); din.dctn = cpu_to_be32(dct->dctn); mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout)); return err; }
int mlx5_core_create_qp(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, struct mlx5_create_qp_mbox_in *in, int inlen) { struct mlx5_create_qp_mbox_out out; struct mlx5_destroy_qp_mbox_in din; struct mlx5_destroy_qp_mbox_out dout; int err; memset(&out, 0, sizeof(out)); in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP); err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); if (err) { mlx5_core_warn(dev, "ret %d\n", err); return err; } if (out.hdr.status) { mlx5_core_warn(dev, "current num of QPs 0x%x\n", atomic_read(&dev->num_qps)); return mlx5_cmd_status_to_err(dev, &out.hdr); } qp->qpn = be32_to_cpu(out.qpn) & 0xffffff; mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn); err = create_qprqsq_common(dev, qp, MLX5_RES_QP); if (err) goto err_cmd; err = mlx5_debug_qp_add(dev, qp); if (err) mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n", qp->qpn); atomic_inc(&dev->num_qps); return 0; err_cmd: memset(&din, 0, sizeof(din)); memset(&dout, 0, sizeof(dout)); din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP); din.qpn = cpu_to_be32(qp->qpn); mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout)); return err; }
int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index, u8 roce_version, u8 roce_l3_type, const u8 *gid, const u8 *mac, bool vlan, u16 vlan_id) { #define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v) u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {0}; u32 out[MLX5_ST_SZ_DW(set_roce_address_out)] = {0}; void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address); char *addr_l3_addr = MLX5_ADDR_OF(roce_addr_layout, in_addr, source_l3_address); void *addr_mac = MLX5_ADDR_OF(roce_addr_layout, in_addr, source_mac_47_32); int gidsz = MLX5_FLD_SZ_BYTES(roce_addr_layout, source_l3_address); if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) return -EINVAL; if (gid) { if (vlan) { MLX5_SET_RA(in_addr, vlan_valid, 1); MLX5_SET_RA(in_addr, vlan_id, vlan_id); } ether_addr_copy(addr_mac, mac); MLX5_SET_RA(in_addr, roce_version, roce_version); MLX5_SET_RA(in_addr, roce_l3_type, roce_l3_type); memcpy(addr_l3_addr, gid, gidsz); } MLX5_SET(set_roce_address_in, in, roce_address_index, index); MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS); return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); }
int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport, u64 *rx_discard_vport_down, u64 *tx_discard_vport_down) { u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {0}; u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {0}; int err; MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV); MLX5_SET(query_vnic_env_in, in, op_mod, 0); MLX5_SET(query_vnic_env_in, in, vport_number, vport); if (vport) MLX5_SET(query_vnic_env_in, in, other_vport, 1); err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); if (err) return err; *rx_discard_vport_down = MLX5_GET64(query_vnic_env_out, out, vport_env.receive_discard_vport_down); *tx_discard_vport_down = MLX5_GET64(query_vnic_env_out, out, vport_env.transmit_discard_vport_down); return 0; }
static int query_xrc_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, struct mlx5_srq_attr *out) { u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)]; u32 *xrcsrq_out; void *xrc_srqc; int err; xrcsrq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out), GFP_KERNEL); if (!xrcsrq_out) return -ENOMEM; memset(xrcsrq_in, 0, sizeof(xrcsrq_in)); MLX5_SET(query_xrc_srq_in, xrcsrq_in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ); MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn); err = mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in), xrcsrq_out, MLX5_ST_SZ_BYTES(query_xrc_srq_out)); if (err) goto out; xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out, xrc_srq_context_entry); get_srqc(xrc_srqc, out); if (MLX5_GET(xrc_srqc, xrc_srqc, state) != MLX5_XRC_SRQC_STATE_GOOD) out->flags |= MLX5_SRQ_FLAG_ERR; out: kvfree(xrcsrq_out); return err; }
int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport, int vf, u8 port_num, void *out, size_t out_sz) { int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in); int is_group_manager; void *in; int err; is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager); in = mlx5_vzalloc(in_sz); if (!in) { err = -ENOMEM; return err; } MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER); if (other_vport) { if (is_group_manager) { MLX5_SET(query_vport_counter_in, in, other_vport, 1); MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1); } else { err = -EPERM; goto free; } } if (MLX5_CAP_GEN(dev, num_ports) == 2) MLX5_SET(query_vport_counter_in, in, port_num, port_num); err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz); free: kvfree(in); return err; }
static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, struct mlx5_srq_attr *in) { u32 create_out[MLX5_ST_SZ_DW(create_srq_out)] = {0}; void *create_in; void *srqc; void *pas; int pas_size; int inlen; int err; pas_size = get_pas_size(in); inlen = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size; create_in = mlx5_vzalloc(inlen); if (!create_in) return -ENOMEM; srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry); pas = MLX5_ADDR_OF(create_srq_in, create_in, pas); set_srqc(srqc, in); memcpy(pas, in->pas, pas_size); MLX5_SET(create_srq_in, create_in, opcode, MLX5_CMD_OP_CREATE_SRQ); err = mlx5_cmd_exec(dev, create_in, inlen, create_out, sizeof(create_out)); kvfree(create_in); if (!err) srq->srqn = MLX5_GET(create_srq_out, create_out, srqn); return err; }
int mlx5_query_hca_vport_context(struct mlx5_core_dev *mdev, u8 port_num, u8 vport_num, u32 *out, int outlen) { u32 in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {0}; int is_group_manager; is_group_manager = MLX5_CAP_GEN(mdev, vport_group_manager); MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT); if (vport_num) { if (is_group_manager) { MLX5_SET(query_hca_vport_context_in, in, other_vport, 1); MLX5_SET(query_hca_vport_context_in, in, vport_number, vport_num); } else { return -EPERM; } } if (MLX5_CAP_GEN(mdev, num_ports) == 2) MLX5_SET(query_hca_vport_context_in, in, port_num, port_num); return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen); }
static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, struct mlx5_srq_attr *out) { u32 srq_in[MLX5_ST_SZ_DW(query_srq_in)] = {0}; u32 *srq_out; void *srqc; int outlen = MLX5_ST_SZ_BYTES(query_srq_out); int err; srq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_srq_out)); if (!srq_out) return -ENOMEM; MLX5_SET(query_srq_in, srq_in, opcode, MLX5_CMD_OP_QUERY_SRQ); MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn); err = mlx5_cmd_exec(dev, srq_in, sizeof(srq_in), srq_out, outlen); if (err) goto out; srqc = MLX5_ADDR_OF(query_srq_out, srq_out, srq_context_entry); get_srqc(srqc, out); if (MLX5_GET(srqc, srqc, state) != MLX5_SRQC_STATE_GOOD) out->flags |= MLX5_SRQ_FLAG_ERR; out: kvfree(srq_out); return err; }
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr) { struct mlx5_mr_table *table = &dev->priv.mr_table; struct mlx5_destroy_mkey_mbox_in in; struct mlx5_destroy_mkey_mbox_out out; struct mlx5_core_mr *deleted_mr; unsigned long flags; int err; memset(&in, 0, sizeof(in)); memset(&out, 0, sizeof(out)); write_lock_irqsave(&table->lock, flags); deleted_mr = radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key)); write_unlock_irqrestore(&table->lock, flags); if (!deleted_mr) { mlx5_core_warn(dev, "failed radix tree delete of mr 0x%x\n", mlx5_base_mkey(mr->key)); return -ENOENT; } in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_MKEY); in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key)); err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); if (err) return err; if (out.hdr.status) return mlx5_cmd_status_to_err(&out.hdr); return err; }
int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen) { u32 out[MLX5_ST_SZ_DW(modify_rmp_out)] = {0}; MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP); return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); }
int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num) { struct mlx5_destroy_psv_in in; struct mlx5_destroy_psv_out out; int err; memset(&in, 0, sizeof(in)); memset(&out, 0, sizeof(out)); in.psv_number = cpu_to_be32(psv_num); in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_PSV); err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); if (err) { mlx5_core_err(dev, "destroy_psv cmd exec failed %d\n", err); goto out; } if (out.hdr.status) { mlx5_core_err(dev, "destroy_psv bad status %d\n", out.hdr.status); err = mlx5_cmd_status_to_err(&out.hdr); goto out; } out: return err; }
int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, int npsvs, u32 *sig_index) { struct mlx5_allocate_psv_in in; struct mlx5_allocate_psv_out out; int i, err; if (npsvs > MLX5_MAX_PSVS) return -EINVAL; memset(&in, 0, sizeof(in)); memset(&out, 0, sizeof(out)); in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_PSV); in.npsv_pd = cpu_to_be32((npsvs << 28) | pdn); err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); if (err) { mlx5_core_err(dev, "cmd exec failed %d\n", err); return err; } if (out.hdr.status) { mlx5_core_err(dev, "create_psv bad status %d\n", out.hdr.status); return mlx5_cmd_status_to_err(&out.hdr); } for (i = 0; i < npsvs; i++) sig_index[i] = be32_to_cpu(out.psv_idx[i]) & 0xffffff; return err; }
int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev, u8 other_vport, u8 port_num, int vf, struct mlx5_hca_vport_context *req) { int in_sz = MLX5_ST_SZ_BYTES(modify_hca_vport_context_in); u8 out[MLX5_ST_SZ_BYTES(modify_hca_vport_context_out)]; int is_group_manager; void *in; int err; void *ctx; mlx5_core_dbg(dev, "vf %d\n", vf); is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager); in = kzalloc(in_sz, GFP_KERNEL); if (!in) return -ENOMEM; memset(out, 0, sizeof(out)); MLX5_SET(modify_hca_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT); if (other_vport) { if (is_group_manager) { MLX5_SET(modify_hca_vport_context_in, in, other_vport, 1); MLX5_SET(modify_hca_vport_context_in, in, vport_number, vf); } else { err = -EPERM; goto ex; } } if (MLX5_CAP_GEN(dev, num_ports) > 1) MLX5_SET(modify_hca_vport_context_in, in, port_num, port_num); ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context); MLX5_SET(hca_vport_context, ctx, field_select, req->field_select); MLX5_SET(hca_vport_context, ctx, sm_virt_aware, req->sm_virt_aware); MLX5_SET(hca_vport_context, ctx, has_smi, req->has_smi); MLX5_SET(hca_vport_context, ctx, has_raw, req->has_raw); MLX5_SET(hca_vport_context, ctx, vport_state_policy, req->policy); MLX5_SET(hca_vport_context, ctx, port_physical_state, req->phys_state); MLX5_SET(hca_vport_context, ctx, vport_state, req->vport_state); MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid); MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid); MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1); MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select, req->cap_mask1_perm); MLX5_SET(hca_vport_context, ctx, cap_mask2, req->cap_mask2); MLX5_SET(hca_vport_context, ctx, cap_mask2_field_select, req->cap_mask2_perm); MLX5_SET(hca_vport_context, ctx, lid, req->lid); MLX5_SET(hca_vport_context, ctx, init_type_reply, req->init_type_reply); MLX5_SET(hca_vport_context, ctx, lmc, req->lmc); MLX5_SET(hca_vport_context, ctx, subnet_timeout, req->subnet_timeout); MLX5_SET(hca_vport_context, ctx, sm_lid, req->sm_lid); MLX5_SET(hca_vport_context, ctx, sm_sl, req->sm_sl); MLX5_SET(hca_vport_context, ctx, qkey_violation_counter, req->qkey_violation_counter); MLX5_SET(hca_vport_context, ctx, pkey_violation_counter, req->pkey_violation_counter); err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); ex: kfree(in); return err; }
int mlx5_core_set_dc_cnak_trace(struct mlx5_core_dev *dev, int enable, u64 addr) { struct mlx5_cmd_set_dc_cnak_mbox_in *in; struct mlx5_cmd_set_dc_cnak_mbox_out out; int err; in = kzalloc(sizeof(*in), GFP_KERNEL); if (!in) return -ENOMEM; memset(&out, 0, sizeof(out)); in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_DC_CNAK_TRACE); in->enable = !!enable << 7; in->pa = cpu_to_be64(addr); err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out)); if (err) goto out; if (out.hdr.status) err = mlx5_cmd_status_to_err(&out.hdr); out: kfree(in); return err; }
static int mlx5_arm_vport_context_events(struct mlx5_core_dev *dev, int vport, u32 events_mask) { int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)]; int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)]; void *nic_vport_ctx; int err; memset(out, 0, sizeof(out)); memset(in, 0, sizeof(in)); MLX5_SET(modify_nic_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1); MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); if (vport) MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context); MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1); if (events_mask & UC_ADDR_CHANGE) MLX5_SET(nic_vport_context, nic_vport_ctx, event_on_uc_address_change, 1); if (events_mask & MC_ADDR_CHANGE) MLX5_SET(nic_vport_context, nic_vport_ctx, event_on_mc_address_change, 1); err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (err) goto ex; err = mlx5_cmd_status_to_err_v2(out); if (err) goto ex; return 0; ex: return err; }
int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev, u16 vport, u16 vlans[], int *size) { u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)]; void *nic_vport_ctx; int req_list_size; int max_list_size; int out_sz; void *out; int err; int i; req_list_size = *size; max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list); if (req_list_size > max_list_size) { mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n", req_list_size, max_list_size); req_list_size = max_list_size; } out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) + req_list_size * MLX5_ST_SZ_BYTES(vlan_layout); memset(in, 0, sizeof(in)); out = kzalloc(out_sz, GFP_KERNEL); if (!out) return -ENOMEM; MLX5_SET(query_nic_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT); MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, MLX5_NVPRT_LIST_TYPE_VLAN); MLX5_SET(query_nic_vport_context_in, in, vport_number, vport); if (vport) MLX5_SET(query_nic_vport_context_in, in, other_vport, 1); err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); if (err) goto out; nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out, nic_vport_context); req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx, allowed_list_size); *size = req_list_size; for (i = 0; i < req_list_size; i++) { void *vlan_addr = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx, current_uc_mac_address[i]); vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan); } out: kfree(out); return err; }
static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out, int outlen) { u32 in[MLX5_ST_SZ_DW(query_adapter_in)] = {0}; MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER); return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); }
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev) { u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0}; u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0}; MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA); return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); }
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, struct mlx5_create_cq_mbox_in *in, int inlen) { int err; struct mlx5_cq_table *table = &dev->priv.cq_table; struct mlx5_create_cq_mbox_out out; struct mlx5_destroy_cq_mbox_in din; struct mlx5_destroy_cq_mbox_out dout; in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_CQ); memset(&out, 0, sizeof(out)); err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); if (err) return err; if (out.hdr.status) return mlx5_cmd_status_to_err(&out.hdr); cq->cqn = be32_to_cpu(out.cqn) & 0xffffff; cq->cons_index = 0; cq->arm_sn = 0; atomic_set(&cq->refcount, 1); init_completion(&cq->free); spin_lock_irq(&table->lock); err = radix_tree_insert(&table->tree, cq->cqn, cq); spin_unlock_irq(&table->lock); if (err) goto err_cmd; cq->pid = current->pid; err = mlx5_debug_cq_add(dev, cq); if (err) mlx5_core_dbg(dev, "failed adding CP 0x%x to debug file system\n", cq->cqn); return 0; err_cmd: memset(&din, 0, sizeof(din)); memset(&dout, 0, sizeof(dout)); din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ); mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout)); return err; }
int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 xsrqn) { u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {0}; u32 out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)] = {0}; MLX5_SET(destroy_xrc_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ); MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, xsrqn); return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); }
int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in, int inlen) { u32 out[MLX5_ST_SZ_DW(modify_tir_out)] = {0}; MLX5_SET(modify_tir_in, in, tirn, tirn); MLX5_SET(modify_tir_in, in, opcode, MLX5_CMD_OP_MODIFY_TIR); return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); }
static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in, int inlen) { u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0}; MLX5_SET(modify_nic_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); }
int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out) { u32 in[MLX5_ST_SZ_DW(query_rmp_in)] = {0}; int outlen = MLX5_ST_SZ_BYTES(query_rmp_out); MLX5_SET(query_rmp_in, in, opcode, MLX5_CMD_OP_QUERY_RMP); MLX5_SET(query_rmp_in, in, rmpn, rmpn); return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); }
void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn) { u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {0}; u32 out[MLX5_ST_SZ_DW(destroy_tis_out)] = {0}; MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS); MLX5_SET(destroy_tis_in, in, tisn, tisn); mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); }
void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn) { u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {0}; u32 out[MLX5_ST_SZ_DW(destroy_rq_out)] = {0}; MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ); MLX5_SET(destroy_rq_in, in, rqn, rqn); mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); }
int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev) { u32 in[MLX5_ST_SZ_DW(destroy_vport_lag_in)] = {0}; u32 out[MLX5_ST_SZ_DW(destroy_vport_lag_out)] = {0}; MLX5_SET(destroy_vport_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_VPORT_LAG); return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); }
static int del_l2table_entry_cmd(struct mlx5_core_dev *dev, u32 index) { u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)] = {0}; u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)] = {0}; MLX5_SET(delete_l2_table_entry_in, in, opcode, MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY); MLX5_SET(delete_l2_table_entry_in, in, table_index, index); return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); }
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn) { u32 out[MLX5_ST_SZ_DW(dealloc_uar_out)] = {0}; u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {0}; MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR); MLX5_SET(dealloc_uar_in, in, uar, uarn); return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); }
int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn) { u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {0}; u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)] = {0}; MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP); MLX5_SET(destroy_rmp_in, in, rmpn, rmpn); return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); }