Esempio n. 1
0
/**
 * pf_cqp_get_hmc_fcn_callback - Callback for Get HMC Fcn
 * @cqp_req_param: CQP Request param value
 * @not_used: unused CQP callback parameter
 */
static void pf_cqp_get_hmc_fcn_callback(struct i40iw_sc_dev *dev, void *callback_param,
					struct i40iw_ccq_cqe_info *cqe_info)
{
	struct i40iw_vfdev *vf_dev = callback_param;
	struct i40iw_virt_mem vf_dev_mem;

	if (cqe_info->error) {
		i40iw_debug(dev, I40IW_DEBUG_VIRT,
			    "CQP Completion Error on Get HMC Function.  Maj = 0x%04x, Minor = 0x%04x\n",
			    cqe_info->maj_err_code, cqe_info->min_err_code);
		dev->vf_dev[vf_dev->iw_vf_idx] = NULL;
		vchnl_pf_send_error_resp(dev, vf_dev->vf_id, &vf_dev->vf_msg_buffer.vchnl_msg,
					 (u16)I40IW_ERR_CQP_COMPL_ERROR);
		vf_dev_mem.va = vf_dev;
		vf_dev_mem.size = sizeof(*vf_dev);
		i40iw_free_virt_mem(dev->hw, &vf_dev_mem);
	} else {
		i40iw_debug(dev, I40IW_DEBUG_VIRT,
			    "CQP Completion Operation Return information = 0x%08x\n",
			    cqe_info->op_ret_val);
		vf_dev->pmf_index = (u16)cqe_info->op_ret_val;
		vf_dev->msg_count--;
		vchnl_pf_send_get_hmc_fcn_resp(dev,
					       vf_dev->vf_id,
					       &vf_dev->vf_msg_buffer.vchnl_msg,
					       vf_dev->pmf_index);
	}
}
Esempio n. 2
0
/**
 * i40iw_puda_alloc_buf - allocate mem for buffer
 * @dev: iwarp device
 * @length: length of buffer
 */
static struct i40iw_puda_buf *i40iw_puda_alloc_buf(struct i40iw_sc_dev *dev,
						   u32 length)
{
	struct i40iw_puda_buf *buf = NULL;
	struct i40iw_virt_mem buf_mem;
	enum i40iw_status_code ret;

	ret = i40iw_allocate_virt_mem(dev->hw, &buf_mem,
				      sizeof(struct i40iw_puda_buf));
	if (ret) {
		i40iw_debug(dev, I40IW_DEBUG_PUDA,
			    "%s: error mem for buf\n", __func__);
		return NULL;
	}
	buf = (struct i40iw_puda_buf *)buf_mem.va;
	ret = i40iw_allocate_dma_mem(dev->hw, &buf->mem, length, 1);
	if (ret) {
		i40iw_debug(dev, I40IW_DEBUG_PUDA,
			    "%s: error dma mem for buf\n", __func__);
		i40iw_free_virt_mem(dev->hw, &buf_mem);
		return NULL;
	}
	buf->buf_mem.va = buf_mem.va;
	buf->buf_mem.size = buf_mem.size;
	return buf;
}
Esempio n. 3
0
/**
 * i40iw_puda_dele_resources - delete all resources during close
 * @dev: iwarp device
 * @type: type of resource to dele
 * @reset: true if reset chip
 */
void i40iw_puda_dele_resources(struct i40iw_sc_vsi *vsi,
			       enum puda_resource_type type,
			       bool reset)
{
	struct i40iw_sc_dev *dev = vsi->dev;
	struct i40iw_puda_rsrc *rsrc;
	struct i40iw_puda_buf *buf = NULL;
	struct i40iw_puda_buf *nextbuf = NULL;
	struct i40iw_virt_mem *vmem;

	switch (type) {
	case I40IW_PUDA_RSRC_TYPE_ILQ:
		rsrc = vsi->ilq;
		vmem = &vsi->ilq_mem;
		break;
	case I40IW_PUDA_RSRC_TYPE_IEQ:
		rsrc = vsi->ieq;
		vmem = &vsi->ieq_mem;
		break;
	default:
		i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s: error resource type = 0x%x\n",
			    __func__, type);
		return;
	}

	switch (rsrc->completion) {
	case PUDA_HASH_CRC_COMPLETE:
		i40iw_free_hash_desc(rsrc->hash_desc);
		/* fall through */
	case PUDA_QP_CREATED:
		if (!reset)
			i40iw_puda_free_qp(rsrc);

		i40iw_free_dma_mem(dev->hw, &rsrc->qpmem);
		/* fallthrough */
	case PUDA_CQ_CREATED:
		if (!reset)
			i40iw_puda_free_cq(rsrc);

		i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
		break;
	default:
		i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s error no resources\n", __func__);
		break;
	}
	/* Free all allocated puda buffers for both tx and rx */
	buf = rsrc->alloclist;
	while (buf) {
		nextbuf = buf->next;
		i40iw_puda_dele_buf(dev, buf);
		buf = nextbuf;
		rsrc->alloc_buf_count--;
	}
	i40iw_free_virt_mem(dev->hw, vmem);
}
Esempio n. 4
0
/**
 * i40iw_remove_pd_bp - remove a backing page from a page descriptor
 * @hw: pointer to our HW structure
 * @hmc_info: pointer to the HMC configuration information structure
 * @idx: the page index
 * @is_pf: distinguishes a VF from a PF
 *
 * This function:
 *	1. Marks the entry in pd table (for paged address mode) or in sd table
 *	   (for direct address mode) invalid.
 *	2. Write to register PMPDINV to invalidate the backing page in FV cache
 *	3. Decrement the ref count for the pd _entry
 * assumptions:
 *	1. Caller can deallocate the memory used by backing storage after this
 *	   function returns.
 */
enum i40iw_status_code i40iw_remove_pd_bp(struct i40iw_hw *hw,
					  struct i40iw_hmc_info *hmc_info,
					  u32 idx,
					  bool is_pf)
{
	struct i40iw_hmc_pd_entry *pd_entry;
	struct i40iw_hmc_pd_table *pd_table;
	struct i40iw_hmc_sd_entry *sd_entry;
	u32 sd_idx, rel_pd_idx;
	struct i40iw_dma_mem *mem;
	u64 *pd_addr;

	sd_idx = idx / I40IW_HMC_PD_CNT_IN_SD;
	rel_pd_idx = idx % I40IW_HMC_PD_CNT_IN_SD;
	if (sd_idx >= hmc_info->sd_table.sd_cnt)
		return I40IW_ERR_INVALID_PAGE_DESC_INDEX;

	sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
	if (sd_entry->entry_type != I40IW_SD_TYPE_PAGED)
		return I40IW_ERR_INVALID_SD_TYPE;

	pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
	pd_entry = &pd_table->pd_entry[rel_pd_idx];
	I40IW_DEC_BP_REFCNT(&pd_entry->bp);
	if (pd_entry->bp.ref_cnt)
		return 0;

	pd_entry->valid = false;
	I40IW_DEC_PD_REFCNT(pd_table);
	pd_addr = (u64 *)pd_table->pd_page_addr.va;
	pd_addr += rel_pd_idx;
	memset(pd_addr, 0, sizeof(u64));
	if (is_pf)
		I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
	else
		I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, idx,
					   hmc_info->hmc_fn_id);

	if (!pd_entry->rsrc_pg) {
		mem = &pd_entry->bp.addr;
		if (!mem || !mem->va)
			return I40IW_ERR_PARAM;
		i40iw_free_dma_mem(hw, mem);
	}
	if (!pd_table->ref_cnt)
		i40iw_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);

	return 0;
}
Esempio n. 5
0
/**
 * i40iw_puda_dele_buf - delete buffer back to system
 * @dev: iwarp device
 * @buf: buffer to free
 */
static void i40iw_puda_dele_buf(struct i40iw_sc_dev *dev,
				struct i40iw_puda_buf *buf)
{
	i40iw_free_dma_mem(dev->hw, &buf->mem);
	i40iw_free_virt_mem(dev->hw, &buf->buf_mem);
}