int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev, u8 other_vport, u8 port_num, int vf, struct mlx5_hca_vport_context *req) { int in_sz = MLX5_ST_SZ_BYTES(modify_hca_vport_context_in); u8 out[MLX5_ST_SZ_BYTES(modify_hca_vport_context_out)]; int is_group_manager; void *in; int err; void *ctx; mlx5_core_dbg(dev, "vf %d\n", vf); is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager); in = kzalloc(in_sz, GFP_KERNEL); if (!in) return -ENOMEM; memset(out, 0, sizeof(out)); MLX5_SET(modify_hca_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT); if (other_vport) { if (is_group_manager) { MLX5_SET(modify_hca_vport_context_in, in, other_vport, 1); MLX5_SET(modify_hca_vport_context_in, in, vport_number, vf); } else { err = -EPERM; goto ex; } } if (MLX5_CAP_GEN(dev, num_ports) > 1) MLX5_SET(modify_hca_vport_context_in, in, port_num, port_num); ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context); MLX5_SET(hca_vport_context, ctx, field_select, req->field_select); MLX5_SET(hca_vport_context, ctx, sm_virt_aware, req->sm_virt_aware); MLX5_SET(hca_vport_context, ctx, has_smi, req->has_smi); MLX5_SET(hca_vport_context, ctx, has_raw, req->has_raw); MLX5_SET(hca_vport_context, ctx, vport_state_policy, req->policy); MLX5_SET(hca_vport_context, ctx, port_physical_state, req->phys_state); MLX5_SET(hca_vport_context, ctx, vport_state, req->vport_state); MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid); MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid); MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1); MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select, req->cap_mask1_perm); MLX5_SET(hca_vport_context, ctx, cap_mask2, req->cap_mask2); MLX5_SET(hca_vport_context, ctx, cap_mask2_field_select, req->cap_mask2_perm); MLX5_SET(hca_vport_context, ctx, lid, req->lid); MLX5_SET(hca_vport_context, ctx, init_type_reply, req->init_type_reply); MLX5_SET(hca_vport_context, ctx, lmc, req->lmc); MLX5_SET(hca_vport_context, ctx, subnet_timeout, req->subnet_timeout); MLX5_SET(hca_vport_context, ctx, sm_lid, req->sm_lid); MLX5_SET(hca_vport_context, ctx, sm_sl, req->sm_sl); MLX5_SET(hca_vport_context, ctx, qkey_violation_counter, req->qkey_violation_counter); MLX5_SET(hca_vport_context, ctx, pkey_violation_counter, req->pkey_violation_counter); err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); ex: kfree(in); return err; }
int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, u32 vport, u64 node_guid) { int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); void *nic_vport_context; void *in; int err; if (!vport) return -EINVAL; if (!MLX5_CAP_GEN(mdev, vport_group_manager)) return -EACCES; if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify)) return -ENOTSUPP; in = mlx5_vzalloc(inlen); if (!in) return -ENOMEM; MLX5_SET(modify_nic_vport_context_in, in, field_select.node_guid, 1); MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport); nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context); MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid); err = mlx5_modify_nic_vport_context(mdev, in, inlen); kvfree(in); return err; }
static void set_wq(void *wq, struct mlx5_srq_attr *in) { MLX5_SET(wq, wq, wq_signature, !!(in->flags & MLX5_SRQ_FLAG_WQ_SIG)); MLX5_SET(wq, wq, log_wq_pg_sz, in->log_page_size); MLX5_SET(wq, wq, log_wq_stride, in->wqe_shift + 4); MLX5_SET(wq, wq, log_wq_sz, in->log_size); MLX5_SET(wq, wq, page_offset, in->page_offset); MLX5_SET(wq, wq, lwm, in->lwm); MLX5_SET(wq, wq, pd, in->pd); MLX5_SET64(wq, wq, dbr_addr, in->db_record); }
static void set_srqc(void *srqc, struct mlx5_srq_attr *in) { MLX5_SET(srqc, srqc, wq_signature, !!(in->flags & MLX5_SRQ_FLAG_WQ_SIG)); MLX5_SET(srqc, srqc, log_page_size, in->log_page_size); MLX5_SET(srqc, srqc, log_rq_stride, in->wqe_shift); MLX5_SET(srqc, srqc, log_srq_size, in->log_size); MLX5_SET(srqc, srqc, page_offset, in->page_offset); MLX5_SET(srqc, srqc, lwm, in->lwm); MLX5_SET(srqc, srqc, pd, in->pd); MLX5_SET64(srqc, srqc, dbr_addr, in->db_record); MLX5_SET(srqc, srqc, xrcd, in->xrcd); MLX5_SET(srqc, srqc, cqn, in->cqn); }
static void rmpc_srqc_reformat(void *srqc, void *rmpc, bool srqc_to_rmpc) { void *wq = MLX5_ADDR_OF(rmpc, rmpc, wq); if (srqc_to_rmpc) { switch (MLX5_GET(srqc, srqc, state)) { case MLX5_SRQC_STATE_GOOD: MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY); break; case MLX5_SRQC_STATE_ERROR: MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_ERR); break; default: printf("mlx5_core: WARN: ""%s: %d: Unknown srq state = 0x%x\n", __func__, __LINE__, MLX5_GET(srqc, srqc, state)); } MLX5_SET(wq, wq, wq_signature, MLX5_GET(srqc, srqc, wq_signature)); MLX5_SET(wq, wq, log_wq_pg_sz, MLX5_GET(srqc, srqc, log_page_size)); MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(srqc, srqc, log_rq_stride) + 4); MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(srqc, srqc, log_srq_size)); MLX5_SET(wq, wq, page_offset, MLX5_GET(srqc, srqc, page_offset)); MLX5_SET(wq, wq, lwm, MLX5_GET(srqc, srqc, lwm)); MLX5_SET(wq, wq, pd, MLX5_GET(srqc, srqc, pd)); MLX5_SET64(wq, wq, dbr_addr, ((u64)MLX5_GET(srqc, srqc, db_record_addr_h)) << 32 | ((u64)MLX5_GET(srqc, srqc, db_record_addr_l)) << 2); } else { switch (MLX5_GET(rmpc, rmpc, state)) { case MLX5_RMPC_STATE_RDY: MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_GOOD); break; case MLX5_RMPC_STATE_ERR: MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_ERROR); break; default: printf("mlx5_core: WARN: ""%s: %d: Unknown rmp state = 0x%x\n", __func__, __LINE__, MLX5_GET(rmpc, rmpc, state)); } MLX5_SET(srqc, srqc, wq_signature, MLX5_GET(wq, wq, wq_signature)); MLX5_SET(srqc, srqc, log_page_size, MLX5_GET(wq, wq, log_wq_pg_sz)); MLX5_SET(srqc, srqc, log_rq_stride, MLX5_GET(wq, wq, log_wq_stride) - 4); MLX5_SET(srqc, srqc, log_srq_size, MLX5_GET(wq, wq, log_wq_sz)); MLX5_SET(srqc, srqc, page_offset, MLX5_GET(wq, wq, page_offset)); MLX5_SET(srqc, srqc, lwm, MLX5_GET(wq, wq, lwm)); MLX5_SET(srqc, srqc, pd, MLX5_GET(wq, wq, pd)); MLX5_SET(srqc, srqc, db_record_addr_h, MLX5_GET64(wq, wq, dbr_addr) >> 32); MLX5_SET(srqc, srqc, db_record_addr_l, (MLX5_GET64(wq, wq, dbr_addr) >> 2) & 0x3fffffff); } }
static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) { struct mlx5e_channel *c = cq->channel; struct mlx5e_priv *priv = c->priv; struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_cq *mcq = &cq->mcq; void *in; void *cqc; int inlen; int irqn_not_used; int eqn; int err; inlen = MLX5_ST_SZ_BYTES(create_cq_in) + sizeof(u64) * cq->wq_ctrl.buf.npages; in = mlx5_vzalloc(inlen); if (!in) return -ENOMEM; cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); memcpy(cqc, param->cqc, sizeof(param->cqc)); mlx5_fill_page_array(&cq->wq_ctrl.buf, (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used); MLX5_SET(cqc, cqc, c_eqn, eqn); MLX5_SET(cqc, cqc, uar_page, mcq->uar->index); MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - PAGE_SHIFT); MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); err = mlx5_core_create_cq(mdev, mcq, in, inlen); kvfree(in); if (err) return err; mlx5e_cq_arm(cq); return 0; }
static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param) { struct mlx5e_channel *c = sq->channel; struct mlx5e_priv *priv = c->priv; struct mlx5_core_dev *mdev = priv->mdev; void *in; void *sqc; void *wq; int inlen; int err; inlen = MLX5_ST_SZ_BYTES(create_sq_in) + sizeof(u64) * sq->wq_ctrl.buf.npages; in = mlx5_vzalloc(inlen); if (!in) return -ENOMEM; sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); wq = MLX5_ADDR_OF(sqc, sqc, wq); memcpy(sqc, param->sqc, sizeof(param->sqc)); MLX5_SET(sqc, sqc, user_index, sq->tc); MLX5_SET(sqc, sqc, tis_num_0, priv->tisn[sq->tc]); MLX5_SET(sqc, sqc, cqn, c->sq[sq->tc].cq.mcq.cqn); MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); MLX5_SET(sqc, sqc, tis_lst_sz, 1); MLX5_SET(sqc, sqc, flush_in_error_en, 1); MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); MLX5_SET(wq, wq, uar_page, sq->uar.index); MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - PAGE_SHIFT); MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma); mlx5_fill_page_array(&sq->wq_ctrl.buf, (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); err = mlx5_create_sq(mdev, in, inlen, &sq->sqn); kvfree(in); return err; }
static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) { struct mlx5e_channel *c = rq->channel; struct mlx5e_priv *priv = c->priv; struct mlx5_core_dev *mdev = priv->mdev; void *in; void *rqc; void *wq; int inlen; int err; inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rq->wq_ctrl.buf.npages; in = mlx5_vzalloc(inlen); if (!in) return -ENOMEM; rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); wq = MLX5_ADDR_OF(rqc, rqc, wq); memcpy(rqc, param->rqc, sizeof(param->rqc)); MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn); MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); MLX5_SET(rqc, rqc, flush_in_error_en, 1); MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift - PAGE_SHIFT); MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma); mlx5_fill_page_array(&rq->wq_ctrl.buf, (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); err = mlx5_create_rq(mdev, in, inlen, &rq->rqn); kvfree(in); return err; }
int mlx5_modify_nic_vport_port_guid(struct mlx5_core_dev *mdev, u32 vport, u64 port_guid) { void *in; int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); int err; void *nic_vport_context; if (!vport) return -EINVAL; if (!MLX5_CAP_GEN(mdev, vport_group_manager)) return -EPERM; if (!MLX5_CAP_ESW(mdev, nic_vport_port_guid_modify)) return -ENOTSUPP; in = mlx5_vzalloc(inlen); if (!in) { mlx5_core_warn(mdev, "failed to allocate inbox\n"); return -ENOMEM; } MLX5_SET(modify_nic_vport_context_in, in, field_select.port_guid, 1); MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context); MLX5_SET64(nic_vport_context, nic_vport_context, port_guid, port_guid); err = mlx5_modify_nic_vport_context(mdev, in, inlen); kvfree(in); return err; }