示例#1
0
文件: i40e_adminq.c 项目: btw616/dpdk
/**
 *  i40e_free_arq_bufs - Free receive queue buffer info elements
 *  @hw: pointer to the hardware structure
 **/
STATIC void i40e_free_arq_bufs(struct i40e_hw *hw)
{
	int i;

	/* free descriptors */
	for (i = 0; i < hw->aq.num_arq_entries; i++)
		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);

	/* free the descriptor memory */
	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);

	/* free the dma header */
	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
}
示例#2
0
文件: i40e_adminq.c 项目: btw616/dpdk
/**
 *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
 *  @hw: pointer to the hardware structure
 **/
STATIC enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
{
	enum i40e_status_code ret_code;
	struct i40e_dma_mem *bi;
	int i;

	/* No mapped memory needed yet, just the buffer info structures */
	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
		(hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
	if (ret_code)
		goto alloc_asq_bufs;
	hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;

	/* allocate the mapped buffers */
	for (i = 0; i < hw->aq.num_asq_entries; i++) {
		bi = &hw->aq.asq.r.asq_bi[i];
		ret_code = i40e_allocate_dma_mem(hw, bi,
						 i40e_mem_asq_buf,
						 hw->aq.asq_buf_size,
						 I40E_ADMINQ_DESC_ALIGNMENT);
		if (ret_code)
			goto unwind_alloc_asq_bufs;
	}
alloc_asq_bufs:
	return ret_code;

unwind_alloc_asq_bufs:
	/* don't try to free the one that failed... */
	i--;
	for (; i >= 0; i--)
		i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);

	return ret_code;
}
示例#3
0
文件: i40e_adminq.c 项目: btw616/dpdk
/**
 *  i40e_free_asq_bufs - Free send queue buffer info elements
 *  @hw: pointer to the hardware structure
 **/
STATIC void i40e_free_asq_bufs(struct i40e_hw *hw)
{
	int i;

	/* only unmap if the address is non-NULL */
	for (i = 0; i < hw->aq.num_asq_entries; i++)
		if (hw->aq.asq.r.asq_bi[i].pa)
			i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);

	/* free the buffer info list */
	i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);

	/* free the descriptor memory */
	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);

	/* free the dma header */
	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
}
示例#4
0
文件: i40e_hmc.c 项目: 03199618/linux
/**
 * i40e_remove_pd_bp - remove a backing page from a page descriptor
 * @hw: pointer to our HW structure
 * @hmc_info: pointer to the HMC configuration information structure
 * @idx: the page index
 * @is_pf: distinguishes a VF from a PF
 *
 * This function:
 *	1. Marks the entry in pd tabe (for paged address mode) or in sd table
 *	   (for direct address mode) invalid.
 *	2. Write to register PMPDINV to invalidate the backing page in FV cache
 *	3. Decrement the ref count for the pd _entry
 * assumptions:
 *	1. Caller can deallocate the memory used by backing storage after this
 *	   function returns.
 **/
i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
					struct i40e_hmc_info *hmc_info,
					u32 idx, bool is_pf)
{
	i40e_status ret_code = 0;
	struct i40e_hmc_pd_entry *pd_entry;
	struct i40e_hmc_pd_table *pd_table;
	struct i40e_hmc_sd_entry *sd_entry;
	u32 sd_idx, rel_pd_idx;
	u64 *pd_addr;

	/* calculate index */
	sd_idx = idx / I40E_HMC_PD_CNT_IN_SD;
	rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD;
	if (sd_idx >= hmc_info->sd_table.sd_cnt) {
		ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
		hw_dbg(hw, "i40e_remove_pd_bp: bad idx\n");
		goto exit;
	}
	sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
	if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) {
		ret_code = I40E_ERR_INVALID_SD_TYPE;
		hw_dbg(hw, "i40e_remove_pd_bp: wrong sd_entry type\n");
		goto exit;
	}
	/* get the entry and decrease its ref counter */
	pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
	pd_entry = &pd_table->pd_entry[rel_pd_idx];
	I40E_DEC_BP_REFCNT(&pd_entry->bp);
	if (pd_entry->bp.ref_cnt)
		goto exit;

	/* mark the entry invalid */
	pd_entry->valid = false;
	I40E_DEC_PD_REFCNT(pd_table);
	pd_addr = (u64 *)pd_table->pd_page_addr.va;
	pd_addr += rel_pd_idx;
	memset(pd_addr, 0, sizeof(u64));
	if (is_pf)
		I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
	else
		I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, idx, hmc_info->hmc_fn_id);

	/* free memory here */
	ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
	if (ret_code)
		goto exit;
	if (!pd_table->ref_cnt)
		i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
exit:
	return ret_code;
}
示例#5
0
/**
 * i40e_remove_pd_page_new - Removes a PD page from sd entry.
 * @hw: pointer to our hw struct
 * @hmc_info: pointer to the HMC configuration information structure
 * @idx: segment descriptor index to find the relevant page descriptor
 * @is_pf: used to distinguish between VF and PF
 **/
enum i40e_status_code i40e_remove_pd_page_new(struct i40e_hw *hw,
					      struct i40e_hmc_info *hmc_info,
					      u32 idx, bool is_pf)
{
	struct i40e_hmc_sd_entry *sd_entry;

	if (!is_pf)
		return I40E_NOT_SUPPORTED;

	sd_entry = &hmc_info->sd_table.sd_entry[idx];
	I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);

	return i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
}
示例#6
0
文件: i40e_hmc.c 项目: 2asoft/freebsd
/**
 * i40e_remove_pd_bp - remove a backing page from a page descriptor
 * @hw: pointer to our HW structure
 * @hmc_info: pointer to the HMC configuration information structure
 * @idx: the page index
 * @is_pf: distinguishes a VF from a PF
 *
 * This function:
 *	1. Marks the entry in pd tabe (for paged address mode) or in sd table
 *	   (for direct address mode) invalid.
 *	2. Write to register PMPDINV to invalidate the backing page in FV cache
 *	3. Decrement the ref count for the pd _entry
 * assumptions:
 *	1. Caller can deallocate the memory used by backing storage after this
 *	   function returns.
 **/
enum i40e_status_code i40e_remove_pd_bp(struct i40e_hw *hw,
					struct i40e_hmc_info *hmc_info,
					u32 idx)
{
	enum i40e_status_code ret_code = I40E_SUCCESS;
	struct i40e_hmc_pd_entry *pd_entry;
	struct i40e_hmc_pd_table *pd_table;
	struct i40e_hmc_sd_entry *sd_entry;
	u32 sd_idx, rel_pd_idx;
	u64 *pd_addr;

	/* calculate index */
	sd_idx = idx / I40E_HMC_PD_CNT_IN_SD;
	rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD;
	if (sd_idx >= hmc_info->sd_table.sd_cnt) {
		ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
		DEBUGOUT("i40e_remove_pd_bp: bad idx\n");
		goto exit;
	}
	sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
	if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) {
		ret_code = I40E_ERR_INVALID_SD_TYPE;
		DEBUGOUT("i40e_remove_pd_bp: wrong sd_entry type\n");
		goto exit;
	}
	/* get the entry and decrease its ref counter */
	pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
	pd_entry = &pd_table->pd_entry[rel_pd_idx];
	I40E_DEC_BP_REFCNT(&pd_entry->bp);
	if (pd_entry->bp.ref_cnt)
		goto exit;

	/* mark the entry invalid */
	pd_entry->valid = FALSE;
	I40E_DEC_PD_REFCNT(pd_table);
	pd_addr = (u64 *)pd_table->pd_page_addr.va;
	pd_addr += rel_pd_idx;
	i40e_memset(pd_addr, 0, sizeof(u64), I40E_DMA_MEM);
	I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);

	/* free memory here */
	if (!pd_entry->rsrc_pg)
		ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
	if (I40E_SUCCESS != ret_code)
		goto exit;
	if (!pd_table->ref_cnt)
		i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
exit:
	return ret_code;
}
示例#7
0
/**
 * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor
 * @hw: pointer to our hw struct
 * @hmc_info: pointer to the HMC configuration information structure
 * @idx: the page index
 * @is_pf: used to distinguish between VF and PF
 **/
enum i40e_status_code i40e_remove_sd_bp_new(struct i40e_hw *hw,
					    struct i40e_hmc_info *hmc_info,
					    u32 idx, bool is_pf)
{
	struct i40e_hmc_sd_entry *sd_entry;

	if (!is_pf)
		return I40E_NOT_SUPPORTED;

	/* get the entry and decrease its ref counter */
	sd_entry = &hmc_info->sd_table.sd_entry[idx];
	I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);

	return i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
}
示例#8
0
/**
 * i40e_remove_pd_page_new - Removes a PD page from sd entry.
 * @hw: pointer to our hw struct
 * @hmc_info: pointer to the HMC configuration information structure
 * @idx: segment descriptor index to find the relevant page descriptor
 * @is_pf: used to distinguish between VF and PF
 **/
i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
                                    struct i40e_hmc_info *hmc_info,
                                    u32 idx, bool is_pf)
{
    i40e_status ret_code = 0;
    struct i40e_hmc_sd_entry *sd_entry;

    sd_entry = &hmc_info->sd_table.sd_entry[idx];
    if (is_pf) {
        I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
    } else {
        ret_code = I40E_NOT_SUPPORTED;
        goto exit;
    }
    /* free memory here */
    ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
    if (ret_code)
        goto exit;
exit:
    return ret_code;
}
示例#9
0
/**
 * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor
 * @hw: pointer to our hw struct
 * @hmc_info: pointer to the HMC configuration information structure
 * @idx: the page index
 * @is_pf: used to distinguish between VF and PF
 **/
i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
                                  struct i40e_hmc_info *hmc_info,
                                  u32 idx, bool is_pf)
{
    struct i40e_hmc_sd_entry *sd_entry;
    i40e_status ret_code = 0;

    /* get the entry and decrease its ref counter */
    sd_entry = &hmc_info->sd_table.sd_entry[idx];
    if (is_pf) {
        I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
    } else {
        ret_code = I40E_NOT_SUPPORTED;
        goto exit;
    }
    ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
    if (ret_code)
        goto exit;
exit:
    return ret_code;
}
示例#10
0
文件: i40e_adminq.c 项目: btw616/dpdk
/**
 *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
 *  @hw: pointer to the hardware structure
 **/
enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
{
	enum i40e_status_code ret_code;

	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
					 i40e_mem_atq_ring,
					 (hw->aq.num_asq_entries *
					 sizeof(struct i40e_aq_desc)),
					 I40E_ADMINQ_DESC_ALIGNMENT);
	if (ret_code)
		return ret_code;

	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
					  (hw->aq.num_asq_entries *
					  sizeof(struct i40e_asq_cmd_details)));
	if (ret_code) {
		i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
		return ret_code;
	}

	return ret_code;
}
示例#11
0
/**
 * i40e_add_sd_table_entry - Adds a segment descriptor to the table
 * @hw: pointer to our hw struct
 * @hmc_info: pointer to the HMC configuration information struct
 * @sd_index: segment descriptor index to manipulate
 * @type: what type of segment descriptor we're manipulating
 * @direct_mode_sz: size to alloc in direct mode
 **/
i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
                                    struct i40e_hmc_info *hmc_info,
                                    u32 sd_index,
                                    enum i40e_sd_entry_type type,
                                    u64 direct_mode_sz)
{
    enum i40e_memory_type mem_type __attribute__((unused));
    struct i40e_hmc_sd_entry *sd_entry;
    bool dma_mem_alloc_done = false;
    struct i40e_dma_mem mem;
    i40e_status ret_code;
    u64 alloc_len;

    if (NULL == hmc_info->sd_table.sd_entry) {
        ret_code = I40E_ERR_BAD_PTR;
        hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_entry\n");
        goto exit;
    }

    if (sd_index >= hmc_info->sd_table.sd_cnt) {
        ret_code = I40E_ERR_INVALID_SD_INDEX;
        hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_index\n");
        goto exit;
    }

    sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
    if (!sd_entry->valid) {
        if (I40E_SD_TYPE_PAGED == type) {
            mem_type = i40e_mem_pd;
            alloc_len = I40E_HMC_PAGED_BP_SIZE;
        } else {
            mem_type = i40e_mem_bp_jumbo;
            alloc_len = direct_mode_sz;
        }

        /* allocate a 4K pd page or 2M backing page */
        ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len,
                                         I40E_HMC_PD_BP_BUF_ALIGNMENT);
        if (ret_code)
            goto exit;
        dma_mem_alloc_done = true;
        if (I40E_SD_TYPE_PAGED == type) {
            ret_code = i40e_allocate_virt_mem(hw,
                                              &sd_entry->u.pd_table.pd_entry_virt_mem,
                                              sizeof(struct i40e_hmc_pd_entry) * 512);
            if (ret_code)
                goto exit;
            sd_entry->u.pd_table.pd_entry =
                (struct i40e_hmc_pd_entry *)
                sd_entry->u.pd_table.pd_entry_virt_mem.va;
            sd_entry->u.pd_table.pd_page_addr = mem;
        } else {
            sd_entry->u.bp.addr = mem;
            sd_entry->u.bp.sd_pd_index = sd_index;
        }
        /* initialize the sd entry */
        hmc_info->sd_table.sd_entry[sd_index].entry_type = type;

        /* increment the ref count */
        I40E_INC_SD_REFCNT(&hmc_info->sd_table);
    }
    /* Increment backing page reference count */
    if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type)
        I40E_INC_BP_REFCNT(&sd_entry->u.bp);
exit:
    if (ret_code)
        if (dma_mem_alloc_done)
            i40e_free_dma_mem(hw, &mem);

    return ret_code;
}
示例#12
0
文件: i40e_adminq.c 项目: btw616/dpdk
/**
 *  i40e_free_adminq_asq - Free Admin Queue send rings
 *  @hw: pointer to the hardware structure
 *
 *  This assumes the posted send buffers have already been cleaned
 *  and de-allocated
 **/
void i40e_free_adminq_asq(struct i40e_hw *hw)
{
	i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
}
示例#13
0
文件: i40e_adminq.c 项目: btw616/dpdk
/**
 *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
 *  @hw: pointer to the hardware structure
 **/
STATIC enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
{
	enum i40e_status_code ret_code;
	struct i40e_aq_desc *desc;
	struct i40e_dma_mem *bi;
	int i;

	/* We'll be allocating the buffer info memory first, then we can
	 * allocate the mapped buffers for the event processing
	 */

	/* buffer_info structures do not need alignment */
	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
		(hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
	if (ret_code)
		goto alloc_arq_bufs;
	hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;

	/* allocate the mapped buffers */
	for (i = 0; i < hw->aq.num_arq_entries; i++) {
		bi = &hw->aq.arq.r.arq_bi[i];
		ret_code = i40e_allocate_dma_mem(hw, bi,
						 i40e_mem_arq_buf,
						 hw->aq.arq_buf_size,
						 I40E_ADMINQ_DESC_ALIGNMENT);
		if (ret_code)
			goto unwind_alloc_arq_bufs;

		/* now configure the descriptors for use */
		desc = I40E_ADMINQ_DESC(hw->aq.arq, i);

		desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
		if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
			desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
		desc->opcode = 0;
		/* This is in accordance with Admin queue design, there is no
		 * register for buffer size configuration
		 */
		desc->datalen = CPU_TO_LE16((u16)bi->size);
		desc->retval = 0;
		desc->cookie_high = 0;
		desc->cookie_low = 0;
		desc->params.external.addr_high =
			CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
		desc->params.external.addr_low =
			CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
		desc->params.external.param0 = 0;
		desc->params.external.param1 = 0;
	}

alloc_arq_bufs:
	return ret_code;

unwind_alloc_arq_bufs:
	/* don't try to free the one that failed... */
	i--;
	for (; i >= 0; i--)
		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);

	return ret_code;
}
示例#14
0
文件: i40e_adminq.c 项目: btw616/dpdk
/**
 *  i40e_free_adminq_arq - Free Admin Queue receive rings
 *  @hw: pointer to the hardware structure
 *
 *  This assumes the posted receive buffers have already been cleaned
 *  and de-allocated
 **/
void i40e_free_adminq_arq(struct i40e_hw *hw)
{
	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
}