コード例 #1
0
ファイル: cq.c プロジェクト: amaumene/mlnx-en-dkms
static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
			 int cq_num)
{
	return mlx4_cmd(dev, mailbox->dma | dev->caps.function, cq_num, 0,
			MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A);
}
コード例 #2
0
ファイル: en_selftest.c プロジェクト: Addision/LVS
static int mlx4_en_test_registers(struct mlx4_en_priv *priv)
{
	return mlx4_cmd(priv->mdev->dev, 0, 0, 0, MLX4_CMD_HW_HEALTH_CHECK,
			MLX4_CMD_TIME_CLASS_A);
}
コード例 #3
0
ファイル: srq.c プロジェクト: mdamt/linux
static int mlx4_ARM_SRQ(struct mlx4_dev *dev, int srq_num, int limit_watermark)
{
	return mlx4_cmd(dev, limit_watermark, srq_num, 0, MLX4_CMD_ARM_SRQ,
			MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
}
コード例 #4
0
ファイル: cq.c プロジェクト: u9621071/kernel-uek-UEK3
static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
			 int cq_num, u32 opmod)
{
	return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ,
			MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
コード例 #5
0
ファイル: srq.c プロジェクト: xf739645524/kernel-rhel5
static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
			  int srq_num)
{
	return mlx4_cmd(dev, mailbox->dma, srq_num, 0, MLX4_CMD_SW2HW_SRQ,
			MLX4_CMD_TIME_CLASS_A);
}
コード例 #6
0
ファイル: eq.c プロジェクト: BarrelfishOS/barrelfish
static int mlx4_SW2HW_EQ(struct mlx4_priv *priv,
		struct mlx4_cmd_mailbox *mailbox, int eq_num) {
	return mlx4_cmd(&priv->dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ,
			MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
コード例 #7
0
ファイル: eq.c プロジェクト: BarrelfishOS/barrelfish
/*
 static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor)
 {
 (entry & (eq->nent - 1)) gives us a cyclic array
 unsigned long offset = (entry & (eq->nent - 1)) * (MLX4_EQ_ENTRY_SIZE << eqe_factor);
 CX3 is capable of extending the EQE from 32 to 64 bytes.
 * When this feature is enabled, the first (in the lower addresses)
 * 32 bytes in the 64 byte EQE are reserved and the next 32 bytes
 * contain the legacy EQE information.

 return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE;
 }

 static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor)
 {
 struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor);
 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
 }

 static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq)
 {
 struct mlx4_eqe *eqe =
 &slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)];
 return (!!(eqe->owner & 0x80) ^
 !!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ?
 eqe : NULL;
 }

 void mlx4_gen_slave_eqe(struct work_struct *work)
 {
 struct mlx4_mfunc_master_ctx *master =
 container_of(work, struct mlx4_mfunc_master_ctx,
 slave_event_work);
 struct mlx4_mfunc *mfunc =
 container_of(master, struct mlx4_mfunc, master);
 struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc);
 struct mlx4_dev *dev = &priv->dev;
 struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq;
 struct mlx4_eqe *eqe;
 u8 slave;
 int i;

 for (eqe = next_slave_event_eqe(slave_eq); eqe;
 eqe = next_slave_event_eqe(slave_eq)) {
 slave = eqe->slave_id;

 All active slaves need to receive the event
 if (slave == ALL_SLAVES) {
 for (i = 0; i < priv->dev.num_slaves; i++) {
 if (mlx4_GEN_EQE(&priv->dev, i, eqe))
 MLX4_DEBUG( "Failed to generate "
 "event for slave %d\n", i);
 }
 } else {
 if (mlx4_GEN_EQE(&priv->dev, slave, eqe))
 MLX4_DEBUG( "Failed to generate event "
 "for slave %d\n", slave);
 }
 ++slave_eq->cons;
 }
 }


 static void slave_event(struct mlx4_priv *priv, u8 slave, struct mlx4_eqe *eqe)
 {
 struct mlx4_priv *priv = mlx4_priv(&priv->dev);
 struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq;
 struct mlx4_eqe *s_eqe;
 unsigned long flags;

 spin_lock_irqsave(&slave_eq->event_lock, flags);
 s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
 if ((!!(s_eqe->owner & 0x80)) ^
 (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
 MLX4_DEBUG( "Master failed to generate an EQE for slave: %d. "
 "No free EQE on slave events queue\n", slave);
 spin_unlock_irqrestore(&slave_eq->event_lock, flags);
 return;
 }

 memcpy(s_eqe, eqe, priv->dev.caps.eqe_size - 1);
 s_eqe->slave_id = slave;
 ensure all information is written before setting the ownersip bit
 wmb();
 s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80;
 ++slave_eq->prod;

 queue_work(priv->mfunc.master.comm_wq,
 &priv->mfunc.master.slave_event_work);
 spin_unlock_irqrestore(&slave_eq->event_lock, flags);
 }

 static void mlx4_slave_event(struct mlx4_priv *priv, int slave,
 struct mlx4_eqe *eqe)
 {
 struct mlx4_priv *priv = mlx4_priv(&priv->dev);

 if (slave < 0 || slave >= priv->dev.num_slaves ||
 slave == priv->dev.caps.function)
 return;

 if (!priv->mfunc.master.slave_state[slave].active)
 return;

 slave_event(&priv->dev, slave, eqe);
 }

 int mlx4_gen_pkey_eqe(struct mlx4_priv *priv, int slave, u8 port)
 {
 struct mlx4_eqe eqe;

 struct mlx4_priv *priv = mlx4_priv(&priv->dev);
 struct mlx4_slave_state *s_slave = &priv->mfunc.master.slave_state[slave];

 if (!s_slave->active)
 return 0;

 memset(&eqe, 0, sizeof eqe);

 eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
 eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE;
 eqe.event.port_mgmt_change.port = port;

 return mlx4_GEN_EQE(&priv->dev, slave, &eqe);
 }
 EXPORT_SYMBOL(mlx4_gen_pkey_eqe);

 int mlx4_gen_guid_change_eqe(struct mlx4_priv *priv, int slave, u8 port)
 {
 struct mlx4_eqe eqe;

 don't send if we don't have the that slave
 if (priv->dev.num_vfs < slave)
 return 0;
 memset(&eqe, 0, sizeof eqe);

 eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
 eqe.subtype = MLX4_DEV_PMC_SUBTYPE_GUID_INFO;
 eqe.event.port_mgmt_change.port = port;

 return mlx4_GEN_EQE(&priv->dev, slave, &eqe);
 }
 EXPORT_SYMBOL(mlx4_gen_guid_change_eqe);

 int mlx4_gen_port_state_change_eqe(struct mlx4_priv *priv, int slave, u8 port,
 u8 port_subtype_change)
 {
 struct mlx4_eqe eqe;

 don't send if we don't have the that slave
 if (priv->dev.num_vfs < slave)
 return 0;
 memset(&eqe, 0, sizeof eqe);

 eqe.type = MLX4_EVENT_TYPE_PORT_CHANGE;
 eqe.subtype = port_subtype_change;
 eqe.event.port_change.port = cpu_to_be32(port << 28);

 MLX4_DEBUG( "%s: sending: %d to slave: %d on port: %d\n", __func__,
 port_subtype_change, slave, port);
 return mlx4_GEN_EQE(&priv->dev, slave, &eqe);
 }
 EXPORT_SYMBOL(mlx4_gen_port_state_change_eqe);

 enum slave_port_state mlx4_get_slave_port_state(struct mlx4_priv *priv, int slave, u8 port)
 {
 struct mlx4_priv *priv = mlx4_priv(&priv->dev);
 struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
 if (slave >= priv->dev.num_slaves || port > MLX4_MAX_PORTS) {
 pr_err("%s: Error: asking for slave:%d, port:%d\n",
 __func__, slave, port);
 return SLAVE_PORT_DOWN;
 }
 return s_state[slave].port_state[port];
 }
 EXPORT_SYMBOL(mlx4_get_slave_port_state);

 static int mlx4_set_slave_port_state(struct mlx4_priv *priv, int slave, u8 port,
 enum slave_port_state state)
 {
 struct mlx4_priv *priv = mlx4_priv(&priv->dev);
 struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;

 if (slave >= priv->dev.num_slaves || port > MLX4_MAX_PORTS || port == 0) {
 pr_err("%s: Error: asking for slave:%d, port:%d\n",
 __func__, slave, port);
 return -1;
 }
 s_state[slave].port_state[port] = state;

 return 0;
 }

 static void set_all_slave_state(struct mlx4_priv *priv, u8 port, int event)
 {
 int i;
 enum slave_port_gen_event gen_event;

 for (i = 0; i < priv->dev.num_slaves; i++)
 set_and_calc_slave_port_state(&priv->dev, i, port, event, &gen_event);
 }
 *************************************************************************
 The function get as input the new event to that port,
 and according to the prev state change the slave's port state.
 The events are:
 MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
 MLX4_PORT_STATE_DEV_EVENT_PORT_UP
 MLX4_PORT_STATE_IB_EVENT_GID_VALID
 MLX4_PORT_STATE_IB_EVENT_GID_INVALID
 **************************************************************************
 int set_and_calc_slave_port_state(struct mlx4_priv *priv, int slave,
 u8 port, int event,
 enum slave_port_gen_event *gen_event)
 {
 struct mlx4_priv *priv = mlx4_priv(&priv->dev);
 struct mlx4_slave_state *ctx = NULL;
 unsigned long flags;
 int ret = -1;
 enum slave_port_state cur_state =
 mlx4_get_slave_port_state(&priv->dev, slave, port);

 *gen_event = SLAVE_PORT_GEN_EVENT_NONE;

 if (slave >= priv->dev.num_slaves || port > MLX4_MAX_PORTS || port == 0) {
 pr_err("%s: Error: asking for slave:%d, port:%d\n",
 __func__, slave, port);
 return ret;
 }

 ctx = &priv->mfunc.master.slave_state[slave];
 spin_lock_irqsave(&ctx->lock, flags);

 switch (cur_state) {
 case SLAVE_PORT_DOWN:
 if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP == event)
 mlx4_set_slave_port_state(&priv->dev, slave, port,
 SLAVE_PENDING_UP);
 break;
 case SLAVE_PENDING_UP:
 if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event)
 mlx4_set_slave_port_state(&priv->dev, slave, port,
 SLAVE_PORT_DOWN);
 else if (MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID == event) {
 mlx4_set_slave_port_state(&priv->dev, slave, port,
 SLAVE_PORT_UP);
 *gen_event = SLAVE_PORT_GEN_EVENT_UP;
 }
 break;
 case SLAVE_PORT_UP:
 if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event) {
 mlx4_set_slave_port_state(&priv->dev, slave, port,
 SLAVE_PORT_DOWN);
 *gen_event = SLAVE_PORT_GEN_EVENT_DOWN;
 } else if (MLX4_PORT_STATE_IB_EVENT_GID_INVALID ==
 event) {
 mlx4_set_slave_port_state(&priv->dev, slave, port,
 SLAVE_PENDING_UP);
 *gen_event = SLAVE_PORT_GEN_EVENT_DOWN;
 }
 break;
 default:
 pr_err("%s: BUG!!! UNKNOWN state: "
 "slave:%d, port:%d\n", __func__, slave, port);
 goto out;
 }
 ret = mlx4_get_slave_port_state(&priv->dev, slave, port);

 out:
 spin_unlock_irqrestore(&ctx->lock, flags);
 return ret;
 }

 EXPORT_SYMBOL(set_and_calc_slave_port_state);

 int mlx4_gen_slaves_port_mgt_ev(struct mlx4_priv *priv, u8 port, int attr, u16 sm_lid, u8 sm_sl)
 {
 struct mlx4_eqe eqe;

 memset(&eqe, 0, sizeof eqe);

 eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
 eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PORT_INFO;
 eqe.event.port_mgmt_change.port = port;
 eqe.event.port_mgmt_change.params.port_info.changed_attr =
 cpu_to_be32((u32) attr);
 if (attr & MSTR_SM_CHANGE_MASK) {
 eqe.event.port_mgmt_change.params.port_info.mstr_sm_lid =
 cpu_to_be16(sm_lid);
 eqe.event.port_mgmt_change.params.port_info.mstr_sm_sl =
 sm_sl;
 }

 slave_event(&priv->dev, ALL_SLAVES, &eqe);
 return 0;
 }
 EXPORT_SYMBOL(mlx4_gen_slaves_port_mgt_ev);

 void mlx4_master_handle_slave_flr(struct work_struct *work)
 {
 struct mlx4_mfunc_master_ctx *master =
 container_of(work, struct mlx4_mfunc_master_ctx,
 slave_flr_event_work);
 struct mlx4_mfunc *mfunc =
 container_of(master, struct mlx4_mfunc, master);
 struct mlx4_priv *priv =
 container_of(mfunc, struct mlx4_priv, mfunc);
 struct mlx4_dev *dev = &priv->dev;
 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
 int i;
 int err;
 unsigned long flags;

 MLX4_DEBUG( "mlx4_handle_slave_flr\n");

 for (i = 0 ; i < priv->dev.num_slaves; i++) {

 if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
 MLX4_DEBUG( "mlx4_handle_slave_flr: "
 "clean slave: %d\n", i);

 mlx4_delete_all_resources_for_slave(&priv->dev, i);
 return the slave to running mode
 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
 slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
 slave_state[i].is_slave_going_down = 0;
 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
 notify the FW:
 err = mlx4_cmd(&priv->dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 if (err)
 MLX4_DEBUG( "Failed to notify FW on "
 "FLR done (slave:%d)\n", i);
 }
 }
 }

 static int mlx4_eq_int(struct mlx4_priv *priv, struct mlx4_eq *eq)
 {
 struct mlx4_priv *priv = mlx4_priv(&priv->dev);
 struct mlx4_eqe *eqe;
 int cqn;
 int eqes_found = 0;
 int set_ci = 0;
 int port;
 int slave = 0;
 int ret;
 u32 flr_slave;
 u8 update_slave_state;
 int i;
 enum slave_port_gen_event gen_event;
 unsigned long flags;
 struct mlx4_vport_state *s_info;

 while ((eqe = next_eqe_sw(eq, priv->dev.caps.eqe_factor))) {

 * Make sure we read EQ entry contents after we've
 * checked the ownership bit.

 rmb();

 switch (eqe->type) {
 case MLX4_EVENT_TYPE_COMP:
 cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
 mlx4_cq_completion(&priv->dev, cqn);
 break;

 case MLX4_EVENT_TYPE_PATH_MIG:
 case MLX4_EVENT_TYPE_COMM_EST:
 case MLX4_EVENT_TYPE_SQ_DRAINED:
 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
 case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
 MLX4_DEBUG( "event %d arrived\n", eqe->type);
 if (mlx4_is_master(&priv->dev)) {
 forward only to slave owning the QP
 ret = mlx4_get_slave_from_resource_id(&priv->dev,
 RES_QP,
 be32_to_cpu(eqe->event.qp.qpn)
 & 0xffffff, &slave);
 if (ret && ret != -ENOENT) {
 MLX4_DEBUG( "QP event %02x(%02x) on "
 "EQ %d at index %u: could "
 "not get slave id (%d)\n",
 eqe->type, eqe->subtype,
 eq->eqn, eq->cons_index, ret);
 break;
 }

 if (!ret && slave != priv->dev.caps.function) {
 mlx4_slave_event(&priv->dev, slave, eqe);
 break;
 }

 }
 mlx4_qp_event(&priv->dev, be32_to_cpu(eqe->event.qp.qpn) &
 0xffffff, eqe->type);
 break;

 case MLX4_EVENT_TYPE_SRQ_LIMIT:
 MLX4_DEBUG( "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
 __func__);
 fall through
 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
 if (mlx4_is_master(&priv->dev)) {
 forward only to slave owning the SRQ
 ret = mlx4_get_slave_from_resource_id(&priv->dev,
 RES_SRQ,
 be32_to_cpu(eqe->event.srq.srqn)
 & 0xffffff,
 &slave);
 if (ret && ret != -ENOENT) {
 MLX4_DEBUG( "SRQ event %02x(%02x) "
 "on EQ %d at index %u: could"
 " not get slave id (%d)\n",
 eqe->type, eqe->subtype,
 eq->eqn, eq->cons_index, ret);
 break;
 }
 MLX4_DEBUG( "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
 __func__, slave,
 be32_to_cpu(eqe->event.srq.srqn),
 eqe->type, eqe->subtype);

 if (!ret && slave != priv->dev.caps.function) {
 MLX4_DEBUG( "%s: sending event %02x(%02x) to slave:%d\n",
 __func__, eqe->type,
 eqe->subtype, slave);
 mlx4_slave_event(&priv->dev, slave, eqe);
 break;
 }
 }
 mlx4_srq_event(&priv->dev, be32_to_cpu(eqe->event.srq.srqn) &
 0xffffff, eqe->type);
 break;

 case MLX4_EVENT_TYPE_CMD:
 mlx4_cmd_event(&priv->dev,
 be16_to_cpu(eqe->event.cmd.token),
 eqe->event.cmd.status,
 be64_to_cpu(eqe->event.cmd.out_param));
 break;

 case MLX4_EVENT_TYPE_PORT_CHANGE:
 port = be32_to_cpu(eqe->event.port_change.port) >> 28;
 if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
 mlx4_dispatch_event(&priv->dev, MLX4_DEV_EVENT_PORT_DOWN,
 port);
 mlx4_priv(&priv->dev)->sense.do_sense_port[port] = 1;
 if (!mlx4_is_master(&priv->dev))
 break;
 for (i = 0; i < priv->dev.num_slaves; i++) {
 if (priv->dev.caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
 if (i == mlx4_master_func_num(&priv->dev))
 continue;
 MLX4_DEBUG( "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN"
 " to slave: %d, port:%d\n",
 __func__, i, port);
 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state)
 mlx4_slave_event(&priv->dev, i, eqe);
 } else {   IB port
 set_and_calc_slave_port_state(&priv->dev, i, port,
 MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
 &gen_event);
 we can be in pending state, then do not send port_down event
 if (SLAVE_PORT_GEN_EVENT_DOWN ==  gen_event) {
 if (i == mlx4_master_func_num(&priv->dev))
 continue;
 mlx4_slave_event(&priv->dev, i, eqe);
 }
 }
 }
 } else {
 mlx4_dispatch_event(&priv->dev, MLX4_DEV_EVENT_PORT_UP, port);

 mlx4_priv(&priv->dev)->sense.do_sense_port[port] = 0;

 if (!mlx4_is_master(&priv->dev))
 break;
 if (priv->dev.caps.port_type[port] == MLX4_PORT_TYPE_ETH)
 for (i = 0; i < priv->dev.num_slaves; i++) {
 if (i == mlx4_master_func_num(&priv->dev))
 continue;
 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state)
 mlx4_slave_event(&priv->dev, i, eqe);
 }
 else  IB port
 port-up event will be sent to a slave when the
 * slave's alias-guid is set. This is done in alias_GUID.c

 set_all_slave_state(&priv->dev, port, MLX4_DEV_EVENT_PORT_UP);
 }
 break;

 case MLX4_EVENT_TYPE_CQ_ERROR:
 MLX4_DEBUG( "CQ %s on CQN %06x\n",
 eqe->event.cq_err.syndrome == 1 ?
 "overrun" : "access violation",
 be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
 if (mlx4_is_master(&priv->dev)) {
 ret = mlx4_get_slave_from_resource_id(&priv->dev,
 RES_CQ,
 be32_to_cpu(eqe->event.cq_err.cqn)
 & 0xffffff, &slave);
 if (ret && ret != -ENOENT) {
 MLX4_DEBUG( "CQ event %02x(%02x) on "
 "EQ %d at index %u: could "
 "not get slave id (%d)\n",
 eqe->type, eqe->subtype,
 eq->eqn, eq->cons_index, ret);
 break;
 }

 if (!ret && slave != priv->dev.caps.function) {
 mlx4_slave_event(&priv->dev, slave, eqe);
 break;
 }
 }
 mlx4_cq_event(&priv->dev,
 be32_to_cpu(eqe->event.cq_err.cqn)
 & 0xffffff,
 eqe->type);
 break;

 case MLX4_EVENT_TYPE_EQ_OVERFLOW:
 MLX4_DEBUG( "EQ overrun on EQN %d\n", eq->eqn);
 break;

 case MLX4_EVENT_TYPE_OP_REQUIRED:
 atomic_inc(&priv->opreq_count);
 FW commands can't be executed from interrupt context
 working in deferred task
 queue_work(mlx4_wq, &priv->opreq_task);
 break;

 case MLX4_EVENT_TYPE_COMM_CHANNEL:
 if (!mlx4_is_master(&priv->dev)) {
 MLX4_DEBUG( "Received comm channel event "
 "for non master device\n");
 break;
 }

 memcpy(&priv->mfunc.master.comm_arm_bit_vector,
 eqe->event.comm_channel_arm.bit_vec,
 sizeof eqe->event.comm_channel_arm.bit_vec);

 if (!queue_work(priv->mfunc.master.comm_wq,
 &priv->mfunc.master.comm_work))
 MLX4_DEBUG( "Failed to queue comm channel work\n");

 if (!queue_work(priv->mfunc.master.comm_wq,
 &priv->mfunc.master.arm_comm_work))
 MLX4_DEBUG( "Failed to queue arm comm channel work\n");
 break;

 case MLX4_EVENT_TYPE_FLR_EVENT:
 flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id);
 if (!mlx4_is_master(&priv->dev)) {
 MLX4_DEBUG( "Non-master function received"
 "FLR event\n");
 break;
 }

 MLX4_DEBUG( "FLR event for slave: %d\n", flr_slave);

 if (flr_slave >= priv->dev.num_slaves) {
 MLX4_DEBUG(
 "Got FLR for unknown function: %d\n",
 flr_slave);
 update_slave_state = 0;
 } else
 update_slave_state = 1;

 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
 if (update_slave_state) {
 priv->mfunc.master.slave_state[flr_slave].active = false;
 priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR;
 priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1;
 }
 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
 queue_work(priv->mfunc.master.comm_wq,
 &priv->mfunc.master.slave_flr_event_work);
 break;

 case MLX4_EVENT_TYPE_FATAL_WARNING:
 if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) {
 if (mlx4_is_master(&priv->dev))
 for (i = 0; i < priv->dev.num_slaves; i++) {
 MLX4_DEBUG( "%s: Sending "
 "MLX4_FATAL_WARNING_SUBTYPE_WARMING"
 " to slave: %d\n", __func__, i);
 if (i == priv->dev.caps.function)
 continue;
 mlx4_slave_event(&priv->dev, i, eqe);
 }
 MLX4_DEBUG( "Temperature Threshold was reached! "
 "Threshold: %d celsius degrees; "
 "Current Temperature: %d\n",
 be16_to_cpu(eqe->event.warming.warning_threshold),
 be16_to_cpu(eqe->event.warming.current_temperature));
 } else
 MLX4_DEBUG( "Unhandled event FATAL WARNING (%02x), "
 "subtype %02x on EQ %d at index %u. owner=%x, "
 "nent=0x%x, slave=%x, ownership=%s\n",
 eqe->type, eqe->subtype, eq->eqn,
 eq->cons_index, eqe->owner, eq->nent,
 eqe->slave_id,
 !!(eqe->owner & 0x80) ^
 !!(eq->cons_index & eq->nent) ? "HW" : "SW");

 break;

 case MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT:
 mlx4_dispatch_event(&priv->dev, MLX4_DEV_EVENT_PORT_MGMT_CHANGE,
 (unsigned long) eqe);
 break;

 case MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT:
 switch (eqe->subtype) {
 case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE:
 MLX4_DEBUG( "Bad cable detected on port %u\n",
 eqe->event.bad_cable.port);
 break;
 case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE:
 MLX4_DEBUG( "Unsupported cable detected\n");
 break;
 default:
 MLX4_DEBUG( "Unhandled recoverable error event "
 "detected: %02x(%02x) on EQ %d at index %u. "
 "owner=%x, nent=0x%x, ownership=%s\n",
 eqe->type, eqe->subtype, eq->eqn,
 eq->cons_index, eqe->owner, eq->nent,
 !!(eqe->owner & 0x80) ^
 !!(eq->cons_index & eq->nent) ? "HW" : "SW");
 break;
 }
 break;

 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
 case MLX4_EVENT_TYPE_ECC_DETECT:
 default:
 MLX4_DEBUG( "Unhandled event %02x(%02x) on EQ %d at "
 "index %u. owner=%x, nent=0x%x, slave=%x, "
 "ownership=%s\n",
 eqe->type, eqe->subtype, eq->eqn,
 eq->cons_index, eqe->owner, eq->nent,
 eqe->slave_id,
 !!(eqe->owner & 0x80) ^
 !!(eq->cons_index & eq->nent) ? "HW" : "SW");
 break;
 };

 ++eq->cons_index;
 eqes_found = 1;
 ++set_ci;


 * The HCA will think the queue has overflowed if we
 * don't tell it we've been processing events.  We
 * create our EQs with MLX4_NUM_SPARE_EQE extra
 * entries, so we must update our consumer index at
 * least that often.

 if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) {
 eq_set_ci(eq, 0);
 set_ci = 0;
 }
 }

 eq_set_ci(eq, 1);

 return eqes_found;
 }

 static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
 {
 struct mlx4_dev *dev = dev_ptr;
 struct mlx4_priv *priv = mlx4_priv(&priv->dev);
 int work = 0;
 int i;

 writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);

 for (i = 0; i < priv->dev.caps.num_comp_vectors + 1; ++i)
 work |= mlx4_eq_int(&priv->dev, &priv->eq_table.eq[i]);

 return IRQ_RETVAL(work);
 }

 static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
 {
 struct mlx4_eq  *eq  = eq_ptr;
 struct mlx4_dev *dev = eq->dev;

 mlx4_eq_int(&priv->dev, eq);

 MSI-X vectors always belong to us
 return IRQ_HANDLED;
 }

 int mlx4_MAP_EQ_wrapper(struct mlx4_priv *priv, int slave,
 struct mlx4_vhcr *vhcr,
 struct mlx4_cmd_mailbox *inbox,
 struct mlx4_cmd_mailbox *outbox,
 struct mlx4_cmd_info *cmd)
 {
 struct mlx4_priv *priv = mlx4_priv(&priv->dev);
 struct mlx4_slave_event_eq_info *event_eq =
 priv->mfunc.master.slave_state[slave].event_eq;
 u32 in_modifier = vhcr->in_modifier;
 u32 eqn = in_modifier & 0x3FF;
 u64 in_param =  vhcr->in_param;
 int err = 0;
 int i;

 if (slave == priv->dev.caps.function)
 err = mlx4_cmd(&priv->dev, in_param, (in_modifier & 0x80000000) | eqn,
 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
 MLX4_CMD_NATIVE);
 if (!err)
 for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i)
 if (in_param & (1LL << i))
 event_eq[i].eqn = in_modifier >> 31 ? -1 : eqn;

 return err;
 }
 */
static int mlx4_MAP_EQ(struct mlx4_priv *priv, u64 event_mask, int unmap,
		int eq_num) {
	return mlx4_cmd(&priv->dev, event_mask, (unmap << 31) | eq_num, 0,
			MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
}
コード例 #8
0
int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
			u64 mac, u64 clear, u8 mode)
{
	return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
			MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B);
}