static int csio_fcoe_free_vnp(struct csio_hw *hw, struct csio_lnode *ln) { struct csio_lnode *pln; struct csio_mb *mbp; struct fw_fcoe_vnp_cmd *rsp; int ret = 0; int retry = 0; /* Issue VNP cmd to free vport */ /* Allocate Mbox request */ spin_lock_irq(&hw->lock); mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); if (!mbp) { CSIO_INC_STATS(hw, n_err_nomem); ret = -ENOMEM; goto out; } pln = ln->pln; csio_fcoe_vnp_free_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO, ln->fcf_flowid, ln->vnp_flowid, NULL); for (retry = 0; retry < 3; retry++) { ret = csio_mb_issue(hw, mbp); if (ret != -EBUSY) break; /* Retry if mbox returns busy */ spin_unlock_irq(&hw->lock); msleep(2000); spin_lock_irq(&hw->lock); } if (ret) { csio_ln_err(ln, "Failed to issue mbox FCoE VNP command\n"); goto out_free; } /* Process Mbox response of VNP command */ rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb); if (FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) { csio_ln_err(ln, "FCOE VNP FREE cmd returned 0x%x!\n", FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16))); ret = -EINVAL; } out_free: mempool_free(mbp, hw->mb_mempool); out: spin_unlock_irq(&hw->lock); return ret; }
static int csio_fcoe_alloc_vnp(struct csio_hw *hw, struct csio_lnode *ln) { struct csio_lnode *pln; struct csio_mb *mbp; struct fw_fcoe_vnp_cmd *rsp; int ret = 0; int retry = 0; /* Issue VNP cmd to alloc vport */ /* Allocate Mbox request */ spin_lock_irq(&hw->lock); mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); if (!mbp) { CSIO_INC_STATS(hw, n_err_nomem); ret = -ENOMEM; goto out; } pln = ln->pln; ln->fcf_flowid = pln->fcf_flowid; ln->portid = pln->portid; csio_fcoe_vnp_alloc_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO, pln->fcf_flowid, pln->vnp_flowid, 0, csio_ln_wwnn(ln), csio_ln_wwpn(ln), NULL); for (retry = 0; retry < 3; retry++) { /* FW is expected to complete vnp cmd in immediate mode * without much delay. * Otherwise, there will be increase in IO latency since HW * lock is held till completion of vnp mbox cmd. */ ret = csio_mb_issue(hw, mbp); if (ret != -EBUSY) break; /* Retry if mbox returns busy */ spin_unlock_irq(&hw->lock); msleep(2000); spin_lock_irq(&hw->lock); } if (ret) { csio_ln_err(ln, "Failed to issue mbox FCoE VNP command\n"); goto out_free; } /* Process Mbox response of VNP command */ rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb); if (FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) { csio_ln_err(ln, "FCOE VNP ALLOC cmd returned 0x%x!\n", FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16))); ret = -EINVAL; goto out_free; } ln->vnp_flowid = FW_FCOE_VNP_CMD_VNPI_GET( ntohl(rsp->gen_wwn_to_vnpi)); memcpy(csio_ln_wwnn(ln), rsp->vnport_wwnn, 8); memcpy(csio_ln_wwpn(ln), rsp->vnport_wwpn, 8); csio_ln_dbg(ln, "FCOE VNPI: 0x%x\n", ln->vnp_flowid); csio_ln_dbg(ln, "\tWWNN: %x%x%x%x%x%x%x%x\n", ln->ln_sparm.wwnn[0], ln->ln_sparm.wwnn[1], ln->ln_sparm.wwnn[2], ln->ln_sparm.wwnn[3], ln->ln_sparm.wwnn[4], ln->ln_sparm.wwnn[5], ln->ln_sparm.wwnn[6], ln->ln_sparm.wwnn[7]); csio_ln_dbg(ln, "\tWWPN: %x%x%x%x%x%x%x%x\n", ln->ln_sparm.wwpn[0], ln->ln_sparm.wwpn[1], ln->ln_sparm.wwpn[2], ln->ln_sparm.wwpn[3], ln->ln_sparm.wwpn[4], ln->ln_sparm.wwpn[5], ln->ln_sparm.wwpn[6], ln->ln_sparm.wwpn[7]); out_free: mempool_free(mbp, hw->mb_mempool); out: spin_unlock_irq(&hw->lock); return ret; }
/** * t4vf_wr_mbox_core - send a command to FW through the mailbox * @adapter: the adapter * @cmd: the command to write * @size: command length in bytes * @rpl: where to optionally store the reply * @sleep_ok: if true we may sleep while awaiting command completion * * Sends the given command to FW through the mailbox and waits for the * FW to execute the command. If @rpl is not %NULL it is used to store * the FW's reply to the command. The command and its optional reply * are of the same length. FW can take up to 500 ms to respond. * @sleep_ok determines whether we may sleep while awaiting the response. * If sleeping is allowed we use progressive backoff otherwise we spin. * * The return value is 0 on success or a negative errno on failure. A * failure can happen either because we are not able to execute the * command or FW executes it but signals an error. In the latter case * the return value is the error code indicated by FW (negated). */ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, void *rpl, bool sleep_ok) { static const int delay[] = { 1, 1, 3, 5, 10, 10, 20, 50, 100 }; u32 v; int i, ms, delay_idx; const __be64 *p; u32 mbox_data = T4VF_MBDATA_BASE_ADDR; u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL; /* * Commands must be multiples of 16 bytes in length and may not be * larger than the size of the Mailbox Data register array. */ if ((size % 16) != 0 || size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4) return -EINVAL; /* * Loop trying to get ownership of the mailbox. Return an error * if we can't gain ownership. */ v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl)); for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl)); if (v != MBOX_OWNER_DRV) return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT; /* * Write the command array into the Mailbox Data register array and * transfer ownership of the mailbox to the firmware. * * For the VFs, the Mailbox Data "registers" are actually backed by * T4's "MA" interface rather than PL Registers (as is the case for * the PFs). Because these are in different coherency domains, the * write to the VF's PL-register-backed Mailbox Control can race in * front of the writes to the MA-backed VF Mailbox Data "registers". * So we need to do a read-back on at least one byte of the VF Mailbox * Data registers before doing the write to the VF Mailbox Control * register. */ for (i = 0, p = cmd; i < size; i += 8) t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++)); t4_read_reg(adapter, mbox_data); /* flush write */ t4_write_reg(adapter, mbox_ctl, MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); t4_read_reg(adapter, mbox_ctl); /* flush write */ /* * Spin waiting for firmware to acknowledge processing our command. */ delay_idx = 0; ms = delay[0]; for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { if (sleep_ok) { ms = delay[delay_idx]; if (delay_idx < ARRAY_SIZE(delay) - 1) delay_idx++; msleep(ms); } else mdelay(ms); /* * If we're the owner, see if this is the reply we wanted. */ v = t4_read_reg(adapter, mbox_ctl); if (MBOWNER_GET(v) == MBOX_OWNER_DRV) { /* * If the Message Valid bit isn't on, revoke ownership * of the mailbox and continue waiting for our reply. */ if ((v & MBMSGVALID) == 0) { t4_write_reg(adapter, mbox_ctl, MBOWNER(MBOX_OWNER_NONE)); continue; } /* * We now have our reply. Extract the command return * value, copy the reply back to our caller's buffer * (if specified) and revoke ownership of the mailbox. * We return the (negated) firmware command return * code (this depends on FW_SUCCESS == 0). */ /* return value in low-order little-endian word */ v = t4_read_reg(adapter, mbox_data); if (FW_CMD_RETVAL_G(v)) dump_mbox(adapter, "FW Error", mbox_data); if (rpl) { /* request bit in high-order BE word */ WARN_ON((be32_to_cpu(*(const u32 *)cmd) & FW_CMD_REQUEST_F) == 0); get_mbox_rpl(adapter, rpl, size, mbox_data); WARN_ON((be32_to_cpu(*(u32 *)rpl) & FW_CMD_REQUEST_F) != 0); } t4_write_reg(adapter, mbox_ctl, MBOWNER(MBOX_OWNER_NONE)); return -FW_CMD_RETVAL_G(v); } } /* * We timed out. Return the error ... */ dump_mbox(adapter, "FW Timeout", mbox_data); return -ETIMEDOUT; }