Example #1
0
static int i40e_get_eeprom(struct net_device *netdev,
			   struct ethtool_eeprom *eeprom, u8 *bytes)
{
	struct i40e_netdev_priv *np = netdev_priv(netdev);
	struct i40e_hw *hw = &np->vsi->back->hw;
	struct i40e_pf *pf = np->vsi->back;
	int ret_val = 0, len;
	u8 *eeprom_buff;
	u16 i, sectors;
	bool last;
#define I40E_NVM_SECTOR_SIZE  4096
	if (eeprom->len == 0)
		return -EINVAL;

	eeprom->magic = hw->vendor_id | (hw->device_id << 16);

	eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL);
	if (!eeprom_buff)
		return -ENOMEM;

	ret_val = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
	if (ret_val) {
		dev_info(&pf->pdev->dev,
			 "Failed Acquiring NVM resource for read err=%d status=0x%x\n",
			 ret_val, hw->aq.asq_last_status);
		goto free_buff;
	}

	sectors = eeprom->len / I40E_NVM_SECTOR_SIZE;
	sectors += (eeprom->len % I40E_NVM_SECTOR_SIZE) ? 1 : 0;
	len = I40E_NVM_SECTOR_SIZE;
	last = false;
	for (i = 0; i < sectors; i++) {
		if (i == (sectors - 1)) {
			len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i);
			last = true;
		}
		ret_val = i40e_aq_read_nvm(hw, 0x0,
				eeprom->offset + (I40E_NVM_SECTOR_SIZE * i),
				len,
				(u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
				last, NULL);
		if (ret_val) {
			dev_info(&pf->pdev->dev,
				 "read NVM failed err=%d status=0x%x\n",
				 ret_val, hw->aq.asq_last_status);
			goto release_nvm;
		}
	}

release_nvm:
	i40e_release_nvm(hw);
	memcpy(bytes, (u8 *)eeprom_buff, eeprom->len);
free_buff:
	kfree(eeprom_buff);
	return ret_val;
}
Example #2
0
/**
 *  i40e_clean_arq_element
 *  @hw: pointer to the hw struct
 *  @e: event info from the receive descriptor, includes any buffers
 *  @pending: number of events that could be left to process
 *
 *  This function cleans one Admin Receive Queue element and returns
 *  the contents through e.  It can also return how many events are
 *  left to process through 'pending'
 **/
enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
        struct i40e_arq_event_info *e,
        u16 *pending)
{
    enum i40e_status_code ret_code = I40E_SUCCESS;
    u16 ntc = hw->aq.arq.next_to_clean;
    struct i40e_aq_desc *desc;
    struct i40e_dma_mem *bi;
    u16 desc_idx;
    u16 datalen;
    u16 flags;
    u16 ntu;

    /* take the lock before we start messing with the ring */
    i40e_acquire_spinlock(&hw->aq.arq_spinlock);

    /* set next_to_use to head */
    ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
    if (ntu == ntc) {
        /* nothing to do - shouldn't need to update ring's values */
        i40e_debug(hw,
                   I40E_DEBUG_AQ_MESSAGE,
                   "AQRX: Queue is empty.\n");
        ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
        goto clean_arq_element_out;
    }

    /* now clean the next descriptor */
    desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
    desc_idx = ntc;

    flags = LE16_TO_CPU(desc->flags);
    if (flags & I40E_AQ_FLAG_ERR) {
        ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
        hw->aq.arq_last_status =
            (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
        i40e_debug(hw,
                   I40E_DEBUG_AQ_MESSAGE,
                   "AQRX: Event received with error 0x%X.\n",
                   hw->aq.arq_last_status);
    }

    i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
                I40E_DMA_TO_NONDMA);
    datalen = LE16_TO_CPU(desc->datalen);
    e->msg_len = min(datalen, e->buf_len);
    if (e->msg_buf != NULL && (e->msg_len != 0))
        i40e_memcpy(e->msg_buf,
                    hw->aq.arq.r.arq_bi[desc_idx].va,
                    e->msg_len, I40E_DMA_TO_NONDMA);

    i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
    i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
                  hw->aq.arq_buf_size);

    /* Restore the original datalen and buffer address in the desc,
     * FW updates datalen to indicate the event message
     * size
     */
    bi = &hw->aq.arq.r.arq_bi[ntc];
    i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);

    desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
    if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
        desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
    desc->datalen = CPU_TO_LE16((u16)bi->size);
    desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
    desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));

    /* set tail = the last cleaned desc index. */
    wr32(hw, hw->aq.arq.tail, ntc);
    /* ntc is updated to tail + 1 */
    ntc++;
    if (ntc == hw->aq.num_arq_entries)
        ntc = 0;
    hw->aq.arq.next_to_clean = ntc;
    hw->aq.arq.next_to_use = ntu;

clean_arq_element_out:
    /* Set pending if needed, unlock and return */
    if (pending != NULL)
        *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
    i40e_release_spinlock(&hw->aq.arq_spinlock);

#ifndef VF_DRIVER
    if (i40e_is_nvm_update_op(&e->desc)) {
        hw->aq.nvm_busy = false;
        if (hw->aq.nvm_release_on_done) {
            i40e_release_nvm(hw);
            hw->aq.nvm_release_on_done = false;
        }
    }

#endif
    return ret_code;
}
/**
 *  i40e_clean_arq_element
 *  @hw: pointer to the hw struct
 *  @e: event info from the receive descriptor, includes any buffers
 *  @pending: number of events that could be left to process
 *
 *  This function cleans one Admin Receive Queue element and returns
 *  the contents through e.  It can also return how many events are
 *  left to process through 'pending'
 **/
i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
					     struct i40e_arq_event_info *e,
					     u16 *pending)
{
	i40e_status ret_code = 0;
	u16 ntc = hw->aq.arq.next_to_clean;
	struct i40e_aq_desc *desc;
	struct i40e_dma_mem *bi;
	u16 desc_idx;
	u16 datalen;
	u16 flags;
	u16 ntu;

	/* take the lock before we start messing with the ring */
	mutex_lock(&hw->aq.arq_mutex);

	if (hw->aq.arq.count == 0) {
		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
			   "AQRX: Admin queue not initialized.\n");
		ret_code = I40E_ERR_QUEUE_EMPTY;
		goto clean_arq_element_err;
	}

	/* set next_to_use to head */
	ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
	if (ntu == ntc) {
		/* nothing to do - shouldn't need to update ring's values */
		ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
		goto clean_arq_element_out;
	}

	/* now clean the next descriptor */
	desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
	desc_idx = ntc;

	flags = le16_to_cpu(desc->flags);
	if (flags & I40E_AQ_FLAG_ERR) {
		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
		hw->aq.arq_last_status =
			(enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
		i40e_debug(hw,
			   I40E_DEBUG_AQ_MESSAGE,
			   "AQRX: Event received with error 0x%X.\n",
			   hw->aq.arq_last_status);
	}

	e->desc = *desc;
	datalen = le16_to_cpu(desc->datalen);
	e->msg_len = min(datalen, e->buf_len);
	if (e->msg_buf != NULL && (e->msg_len != 0))
		memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
		       e->msg_len);

	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
		      hw->aq.arq_buf_size);

	/* Restore the original datalen and buffer address in the desc,
	 * FW updates datalen to indicate the event message
	 * size
	 */
	bi = &hw->aq.arq.r.arq_bi[ntc];
	memset((void *)desc, 0, sizeof(struct i40e_aq_desc));

	desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
	if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
		desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
	desc->datalen = cpu_to_le16((u16)bi->size);
	desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
	desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));

	/* set tail = the last cleaned desc index. */
	wr32(hw, hw->aq.arq.tail, ntc);
	/* ntc is updated to tail + 1 */
	ntc++;
	if (ntc == hw->aq.num_arq_entries)
		ntc = 0;
	hw->aq.arq.next_to_clean = ntc;
	hw->aq.arq.next_to_use = ntu;

clean_arq_element_out:
	/* Set pending if needed, unlock and return */
	if (pending != NULL)
		*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);

clean_arq_element_err:
	mutex_unlock(&hw->aq.arq_mutex);

	if (i40e_is_nvm_update_op(&e->desc)) {
		if (hw->aq.nvm_release_on_done) {
			i40e_release_nvm(hw);
			hw->aq.nvm_release_on_done = false;
		}

		switch (hw->nvmupd_state) {
		case I40E_NVMUPD_STATE_INIT_WAIT:
			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
			break;

		case I40E_NVMUPD_STATE_WRITE_WAIT:
			hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
			break;

		default:
			break;
		}
	}

	return ret_code;
}