Esempio n. 1
0
/**
 *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
 *  @hw: pointer to the hardware structure
 **/
STATIC enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
{
	enum i40e_status_code ret_code;
	struct i40e_dma_mem *bi;
	int i;

	/* No mapped memory needed yet, just the buffer info structures */
	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
		(hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
	if (ret_code)
		goto alloc_asq_bufs;
	hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;

	/* allocate the mapped buffers */
	for (i = 0; i < hw->aq.num_asq_entries; i++) {
		bi = &hw->aq.asq.r.asq_bi[i];
		ret_code = i40e_allocate_dma_mem(hw, bi,
						 i40e_mem_asq_buf,
						 hw->aq.asq_buf_size,
						 I40E_ADMINQ_DESC_ALIGNMENT);
		if (ret_code)
			goto unwind_alloc_asq_bufs;
	}
alloc_asq_bufs:
	return ret_code;

unwind_alloc_asq_bufs:
	/* don't try to free the one that failed... */
	i--;
	for (; i >= 0; i--)
		i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);

	return ret_code;
}
Esempio n. 2
0
/**
 * i40e_aq_get_dcb_config
 * @hw: pointer to the hw struct
 * @mib_type: mib type for the query
 * @bridgetype: bridge type for the query (remote)
 * @dcbcfg: store for LLDPDU data
 *
 * Query DCB configuration from the Firmware
 **/
i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
				   u8 bridgetype,
				   struct i40e_dcbx_config *dcbcfg)
{
	i40e_status ret = 0;
	struct i40e_virt_mem mem;
	u8 *lldpmib;

	/* Allocate the LLDPDU */
	ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
	if (ret)
		return ret;

	lldpmib = (u8 *)mem.va;
	ret = i40e_aq_get_lldp_mib(hw, bridgetype, mib_type,
				   (void *)lldpmib, I40E_LLDPDU_SIZE,
				   NULL, NULL, NULL);
	if (ret)
		goto free_mem;

	/* Parse LLDP MIB to get dcb configuration */
	ret = i40e_lldp_to_dcb_config(lldpmib, dcbcfg);

free_mem:
	i40e_free_virt_mem(hw, &mem);
	return ret;
}
Esempio n. 3
0
/**
 *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
 *  @hw: pointer to the hardware structure
 **/
enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
{
	enum i40e_status_code ret_code;

	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
					 i40e_mem_atq_ring,
					 (hw->aq.num_asq_entries *
					 sizeof(struct i40e_aq_desc)),
					 I40E_ADMINQ_DESC_ALIGNMENT);
	if (ret_code)
		return ret_code;

	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
					  (hw->aq.num_asq_entries *
					  sizeof(struct i40e_asq_cmd_details)));
	if (ret_code) {
		i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
		return ret_code;
	}

	return ret_code;
}
Esempio n. 4
0
/**
 * i40e_add_sd_table_entry - Adds a segment descriptor to the table
 * @hw: pointer to our hw struct
 * @hmc_info: pointer to the HMC configuration information struct
 * @sd_index: segment descriptor index to manipulate
 * @type: what type of segment descriptor we're manipulating
 * @direct_mode_sz: size to alloc in direct mode
 **/
i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
                                    struct i40e_hmc_info *hmc_info,
                                    u32 sd_index,
                                    enum i40e_sd_entry_type type,
                                    u64 direct_mode_sz)
{
    enum i40e_memory_type mem_type __attribute__((unused));
    struct i40e_hmc_sd_entry *sd_entry;
    bool dma_mem_alloc_done = false;
    struct i40e_dma_mem mem;
    i40e_status ret_code;
    u64 alloc_len;

    if (NULL == hmc_info->sd_table.sd_entry) {
        ret_code = I40E_ERR_BAD_PTR;
        hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_entry\n");
        goto exit;
    }

    if (sd_index >= hmc_info->sd_table.sd_cnt) {
        ret_code = I40E_ERR_INVALID_SD_INDEX;
        hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_index\n");
        goto exit;
    }

    sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
    if (!sd_entry->valid) {
        if (I40E_SD_TYPE_PAGED == type) {
            mem_type = i40e_mem_pd;
            alloc_len = I40E_HMC_PAGED_BP_SIZE;
        } else {
            mem_type = i40e_mem_bp_jumbo;
            alloc_len = direct_mode_sz;
        }

        /* allocate a 4K pd page or 2M backing page */
        ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len,
                                         I40E_HMC_PD_BP_BUF_ALIGNMENT);
        if (ret_code)
            goto exit;
        dma_mem_alloc_done = true;
        if (I40E_SD_TYPE_PAGED == type) {
            ret_code = i40e_allocate_virt_mem(hw,
                                              &sd_entry->u.pd_table.pd_entry_virt_mem,
                                              sizeof(struct i40e_hmc_pd_entry) * 512);
            if (ret_code)
                goto exit;
            sd_entry->u.pd_table.pd_entry =
                (struct i40e_hmc_pd_entry *)
                sd_entry->u.pd_table.pd_entry_virt_mem.va;
            sd_entry->u.pd_table.pd_page_addr = mem;
        } else {
            sd_entry->u.bp.addr = mem;
            sd_entry->u.bp.sd_pd_index = sd_index;
        }
        /* initialize the sd entry */
        hmc_info->sd_table.sd_entry[sd_index].entry_type = type;

        /* increment the ref count */
        I40E_INC_SD_REFCNT(&hmc_info->sd_table);
    }
    /* Increment backing page reference count */
    if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type)
        I40E_INC_BP_REFCNT(&sd_entry->u.bp);
exit:
    if (ret_code)
        if (dma_mem_alloc_done)
            i40e_free_dma_mem(hw, &mem);

    return ret_code;
}
Esempio n. 5
0
/**
 *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
 *  @hw: pointer to the hardware structure
 **/
STATIC enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
{
	enum i40e_status_code ret_code;
	struct i40e_aq_desc *desc;
	struct i40e_dma_mem *bi;
	int i;

	/* We'll be allocating the buffer info memory first, then we can
	 * allocate the mapped buffers for the event processing
	 */

	/* buffer_info structures do not need alignment */
	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
		(hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
	if (ret_code)
		goto alloc_arq_bufs;
	hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;

	/* allocate the mapped buffers */
	for (i = 0; i < hw->aq.num_arq_entries; i++) {
		bi = &hw->aq.arq.r.arq_bi[i];
		ret_code = i40e_allocate_dma_mem(hw, bi,
						 i40e_mem_arq_buf,
						 hw->aq.arq_buf_size,
						 I40E_ADMINQ_DESC_ALIGNMENT);
		if (ret_code)
			goto unwind_alloc_arq_bufs;

		/* now configure the descriptors for use */
		desc = I40E_ADMINQ_DESC(hw->aq.arq, i);

		desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
		if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
			desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
		desc->opcode = 0;
		/* This is in accordance with Admin queue design, there is no
		 * register for buffer size configuration
		 */
		desc->datalen = CPU_TO_LE16((u16)bi->size);
		desc->retval = 0;
		desc->cookie_high = 0;
		desc->cookie_low = 0;
		desc->params.external.addr_high =
			CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
		desc->params.external.addr_low =
			CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
		desc->params.external.param0 = 0;
		desc->params.external.param1 = 0;
	}

alloc_arq_bufs:
	return ret_code;

unwind_alloc_arq_bufs:
	/* don't try to free the one that failed... */
	i--;
	for (; i >= 0; i--)
		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);

	return ret_code;
}
Esempio n. 6
0
/**
 * i40e_init_lan_hmc - initialize i40e_hmc_info struct
 * @hw: pointer to the HW structure
 * @txq_num: number of Tx queues needing backing context
 * @rxq_num: number of Rx queues needing backing context
 * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
 * @fcoe_filt_num: number of FCoE filters needing backing context
 *
 * This function will be called once per physical function initialization.
 * It will fill out the i40e_hmc_obj_info structure for LAN objects based on
 * the driver's provided input, as well as information from the HMC itself
 * loaded from NVRAM.
 *
 * Assumptions:
 *   - HMC Resource Profile has been selected before calling this function.
 **/
enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
                                        u32 rxq_num, u32 fcoe_cntx_num,
                                        u32 fcoe_filt_num)
{
    struct i40e_hmc_obj_info *obj, *full_obj;
    enum i40e_status_code ret_code = I40E_SUCCESS;
    u64 l2fpm_size;
    u32 size_exp;

    hw->hmc.signature = I40E_HMC_INFO_SIGNATURE;
    hw->hmc.hmc_fn_id = hw->pf_id;

    /* allocate memory for hmc_obj */
    ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem,
                                      sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX);
    if (ret_code)
        goto init_lan_hmc_out;
    hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *)
                      hw->hmc.hmc_obj_virt_mem.va;

    /* The full object will be used to create the LAN HMC SD */
    full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL];
    full_obj->max_cnt = 0;
    full_obj->cnt = 0;
    full_obj->base = 0;
    full_obj->size = 0;

    /* Tx queue context information */
    obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
    obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
    obj->cnt = txq_num;
    obj->base = 0;
    size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
    obj->size = (u64)1 << size_exp;

    /* validate values requested by driver don't exceed HMC capacity */
    if (txq_num > obj->max_cnt) {
        ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
        DEBUGOUT3("i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
                  txq_num, obj->max_cnt, ret_code);
        goto init_lan_hmc_out;
    }

    /* aggregate values into the full LAN object for later */
    full_obj->max_cnt += obj->max_cnt;
    full_obj->cnt += obj->cnt;

    /* Rx queue context information */
    obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
    obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
    obj->cnt = rxq_num;
    obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base +
                (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt *
                 hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
    obj->base = i40e_align_l2obj_base(obj->base);
    size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
    obj->size = (u64)1 << size_exp;

    /* validate values requested by driver don't exceed HMC capacity */
    if (rxq_num > obj->max_cnt) {
        ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
        DEBUGOUT3("i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
                  rxq_num, obj->max_cnt, ret_code);
        goto init_lan_hmc_out;
    }

    /* aggregate values into the full LAN object for later */
    full_obj->max_cnt += obj->max_cnt;
    full_obj->cnt += obj->cnt;

    /* FCoE context information */
    obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
    obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX);
    obj->cnt = fcoe_cntx_num;
    obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base +
                (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt *
                 hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
    obj->base = i40e_align_l2obj_base(obj->base);
    size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
    obj->size = (u64)1 << size_exp;

    /* validate values requested by driver don't exceed HMC capacity */
    if (fcoe_cntx_num > obj->max_cnt) {
        ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
        DEBUGOUT3("i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
                  fcoe_cntx_num, obj->max_cnt, ret_code);
        goto init_lan_hmc_out;
    }

    /* aggregate values into the full LAN object for later */
    full_obj->max_cnt += obj->max_cnt;
    full_obj->cnt += obj->cnt;

    /* FCoE filter information */
    obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
    obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX);
    obj->cnt = fcoe_filt_num;
    obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base +
                (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt *
                 hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
    obj->base = i40e_align_l2obj_base(obj->base);
    size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
    obj->size = (u64)1 << size_exp;

    /* validate values requested by driver don't exceed HMC capacity */
    if (fcoe_filt_num > obj->max_cnt) {
        ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
        DEBUGOUT3("i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
                  fcoe_filt_num, obj->max_cnt, ret_code);
        goto init_lan_hmc_out;
    }

    /* aggregate values into the full LAN object for later */
    full_obj->max_cnt += obj->max_cnt;
    full_obj->cnt += obj->cnt;

    hw->hmc.first_sd_index = 0;
    hw->hmc.sd_table.ref_cnt = 0;
    l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num,
                                           fcoe_filt_num);
    if (NULL == hw->hmc.sd_table.sd_entry) {
        hw->hmc.sd_table.sd_cnt = (u32)
                                  (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) /
                                  I40E_HMC_DIRECT_BP_SIZE;

        /* allocate the sd_entry members in the sd_table */
        ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr,
                                          (sizeof(struct i40e_hmc_sd_entry) *
                                           hw->hmc.sd_table.sd_cnt));
        if (ret_code)
            goto init_lan_hmc_out;
        hw->hmc.sd_table.sd_entry =
            (struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
    }
    /* store in the LAN full object for later */
    full_obj->size = l2fpm_size;

init_lan_hmc_out:
    return ret_code;
}