Exemplo n.º 1
0
/**
 * i40iw_puda_dele_resources - delete all resources during close
 * @dev: iwarp device
 * @type: type of resource to dele
 * @reset: true if reset chip
 */
void i40iw_puda_dele_resources(struct i40iw_sc_vsi *vsi,
			       enum puda_resource_type type,
			       bool reset)
{
	struct i40iw_sc_dev *dev = vsi->dev;
	struct i40iw_puda_rsrc *rsrc;
	struct i40iw_puda_buf *buf = NULL;
	struct i40iw_puda_buf *nextbuf = NULL;
	struct i40iw_virt_mem *vmem;

	switch (type) {
	case I40IW_PUDA_RSRC_TYPE_ILQ:
		rsrc = vsi->ilq;
		vmem = &vsi->ilq_mem;
		break;
	case I40IW_PUDA_RSRC_TYPE_IEQ:
		rsrc = vsi->ieq;
		vmem = &vsi->ieq_mem;
		break;
	default:
		i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s: error resource type = 0x%x\n",
			    __func__, type);
		return;
	}

	switch (rsrc->completion) {
	case PUDA_HASH_CRC_COMPLETE:
		i40iw_free_hash_desc(rsrc->hash_desc);
		/* fall through */
	case PUDA_QP_CREATED:
		if (!reset)
			i40iw_puda_free_qp(rsrc);

		i40iw_free_dma_mem(dev->hw, &rsrc->qpmem);
		/* fallthrough */
	case PUDA_CQ_CREATED:
		if (!reset)
			i40iw_puda_free_cq(rsrc);

		i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
		break;
	default:
		i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s error no resources\n", __func__);
		break;
	}
	/* Free all allocated puda buffers for both tx and rx */
	buf = rsrc->alloclist;
	while (buf) {
		nextbuf = buf->next;
		i40iw_puda_dele_buf(dev, buf);
		buf = nextbuf;
		rsrc->alloc_buf_count--;
	}
	i40iw_free_virt_mem(dev->hw, vmem);
}
Exemplo n.º 2
0
/**
 * i40iw_finish_del_sd_reg - delete sd entries for objects
 * @dev: pointer to the device structure
 * @info: dele obj info
 * @reset: true if called before reset
 */
static enum i40iw_status_code i40iw_finish_del_sd_reg(struct i40iw_sc_dev *dev,
						      struct i40iw_hmc_del_obj_info *info,
						      bool reset)
{
	struct i40iw_hmc_sd_entry *sd_entry;
	enum i40iw_status_code ret_code = 0;
	u32 i, sd_idx;
	struct i40iw_dma_mem *mem;

	if (dev->is_pf && !reset)
		ret_code = i40iw_hmc_sd_grp(dev, info->hmc_info,
					    info->hmc_info->sd_indexes[0],
					    info->del_sd_cnt, false);

	if (ret_code)
		i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error cqp sd sd_grp\n", __func__);

	for (i = 0; i < info->del_sd_cnt; i++) {
		sd_idx = info->hmc_info->sd_indexes[i];
		sd_entry = &info->hmc_info->sd_table.sd_entry[sd_idx];
		if (!sd_entry)
			continue;
		mem = (sd_entry->entry_type == I40IW_SD_TYPE_PAGED) ?
			&sd_entry->u.pd_table.pd_page_addr :
			&sd_entry->u.bp.addr;

		if (!mem || !mem->va)
			i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error cqp sd mem\n", __func__);
		else
			i40iw_free_dma_mem(dev->hw, mem);
	}
	return ret_code;
}
Exemplo n.º 3
0
/**
 * i40iw_add_sd_table_entry - Adds a segment descriptor to the table
 * @hw: pointer to our hw struct
 * @hmc_info: pointer to the HMC configuration information struct
 * @sd_index: segment descriptor index to manipulate
 * @type: what type of segment descriptor we're manipulating
 * @direct_mode_sz: size to alloc in direct mode
 */
enum i40iw_status_code i40iw_add_sd_table_entry(struct i40iw_hw *hw,
						struct i40iw_hmc_info *hmc_info,
						u32 sd_index,
						enum i40iw_sd_entry_type type,
						u64 direct_mode_sz)
{
	enum i40iw_status_code ret_code = 0;
	struct i40iw_hmc_sd_entry *sd_entry;
	bool dma_mem_alloc_done = false;
	struct i40iw_dma_mem mem;
	u64 alloc_len;

	sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
	if (!sd_entry->valid) {
		if (type == I40IW_SD_TYPE_PAGED)
			alloc_len = I40IW_HMC_PAGED_BP_SIZE;
		else
			alloc_len = direct_mode_sz;

		/* allocate a 4K pd page or 2M backing page */
		ret_code = i40iw_allocate_dma_mem(hw, &mem, alloc_len,
						  I40IW_HMC_PD_BP_BUF_ALIGNMENT);
		if (ret_code)
			goto exit;
		dma_mem_alloc_done = true;
		if (type == I40IW_SD_TYPE_PAGED) {
			ret_code = i40iw_allocate_virt_mem(hw,
							   &sd_entry->u.pd_table.pd_entry_virt_mem,
							   sizeof(struct i40iw_hmc_pd_entry) * 512);
			if (ret_code)
				goto exit;
			sd_entry->u.pd_table.pd_entry = (struct i40iw_hmc_pd_entry *)
							 sd_entry->u.pd_table.pd_entry_virt_mem.va;

			memcpy(&sd_entry->u.pd_table.pd_page_addr, &mem, sizeof(struct i40iw_dma_mem));
		} else {
			memcpy(&sd_entry->u.bp.addr, &mem, sizeof(struct i40iw_dma_mem));
			sd_entry->u.bp.sd_pd_index = sd_index;
		}

		hmc_info->sd_table.sd_entry[sd_index].entry_type = type;

		I40IW_INC_SD_REFCNT(&hmc_info->sd_table);
	}
	if (sd_entry->entry_type == I40IW_SD_TYPE_DIRECT)
		I40IW_INC_BP_REFCNT(&sd_entry->u.bp);
exit:
	if (ret_code)
		if (dma_mem_alloc_done)
			i40iw_free_dma_mem(hw, &mem);

	return ret_code;
}
Exemplo n.º 4
0
/**
 * i40iw_remove_pd_bp - remove a backing page from a page descriptor
 * @hw: pointer to our HW structure
 * @hmc_info: pointer to the HMC configuration information structure
 * @idx: the page index
 * @is_pf: distinguishes a VF from a PF
 *
 * This function:
 *	1. Marks the entry in pd table (for paged address mode) or in sd table
 *	   (for direct address mode) invalid.
 *	2. Write to register PMPDINV to invalidate the backing page in FV cache
 *	3. Decrement the ref count for the pd _entry
 * assumptions:
 *	1. Caller can deallocate the memory used by backing storage after this
 *	   function returns.
 */
enum i40iw_status_code i40iw_remove_pd_bp(struct i40iw_hw *hw,
					  struct i40iw_hmc_info *hmc_info,
					  u32 idx,
					  bool is_pf)
{
	struct i40iw_hmc_pd_entry *pd_entry;
	struct i40iw_hmc_pd_table *pd_table;
	struct i40iw_hmc_sd_entry *sd_entry;
	u32 sd_idx, rel_pd_idx;
	struct i40iw_dma_mem *mem;
	u64 *pd_addr;

	sd_idx = idx / I40IW_HMC_PD_CNT_IN_SD;
	rel_pd_idx = idx % I40IW_HMC_PD_CNT_IN_SD;
	if (sd_idx >= hmc_info->sd_table.sd_cnt)
		return I40IW_ERR_INVALID_PAGE_DESC_INDEX;

	sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
	if (sd_entry->entry_type != I40IW_SD_TYPE_PAGED)
		return I40IW_ERR_INVALID_SD_TYPE;

	pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
	pd_entry = &pd_table->pd_entry[rel_pd_idx];
	I40IW_DEC_BP_REFCNT(&pd_entry->bp);
	if (pd_entry->bp.ref_cnt)
		return 0;

	pd_entry->valid = false;
	I40IW_DEC_PD_REFCNT(pd_table);
	pd_addr = (u64 *)pd_table->pd_page_addr.va;
	pd_addr += rel_pd_idx;
	memset(pd_addr, 0, sizeof(u64));
	if (is_pf)
		I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
	else
		I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, idx,
					   hmc_info->hmc_fn_id);

	if (!pd_entry->rsrc_pg) {
		mem = &pd_entry->bp.addr;
		if (!mem || !mem->va)
			return I40IW_ERR_PARAM;
		i40iw_free_dma_mem(hw, mem);
	}
	if (!pd_table->ref_cnt)
		i40iw_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);

	return 0;
}
Exemplo n.º 5
0
/**
 * i40iw_puda_cq_create - create cq for resource
 * @rsrc: resource for which cq to create
 */
static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)
{
	struct i40iw_sc_dev *dev = rsrc->dev;
	struct i40iw_sc_cq *cq = &rsrc->cq;
	enum i40iw_status_code ret = 0;
	u32 tsize, cqsize;
	struct i40iw_dma_mem *mem;
	struct i40iw_cq_init_info info;
	struct i40iw_cq_uk_init_info *init_info = &info.cq_uk_init_info;

	cq->vsi = rsrc->vsi;
	cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe));
	tsize = cqsize + sizeof(struct i40iw_cq_shadow_area);
	ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize,
				     I40IW_CQ0_ALIGNMENT_MASK);
	if (ret)
		return ret;

	mem = &rsrc->cqmem;
	memset(&info, 0, sizeof(info));
	info.dev = dev;
	info.type = (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ) ?
			 I40IW_CQ_TYPE_ILQ : I40IW_CQ_TYPE_IEQ;
	info.shadow_read_threshold = rsrc->cq_size >> 2;
	info.ceq_id_valid = true;
	info.cq_base_pa = mem->pa;
	info.shadow_area_pa = mem->pa + cqsize;
	init_info->cq_base = mem->va;
	init_info->shadow_area = (u64 *)((u8 *)mem->va + cqsize);
	init_info->cq_size = rsrc->cq_size;
	init_info->cq_id = rsrc->cq_id;
	info.ceqe_mask = true;
	info.ceq_id_valid = true;
	ret = dev->iw_priv_cq_ops->cq_init(cq, &info);
	if (ret)
		goto error;
	if (rsrc->ceq_valid)
		ret = i40iw_cqp_cq_create_cmd(dev, cq);
	else
		ret = i40iw_puda_cq_wqe(dev, cq);
error:
	if (ret)
		i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
	return ret;
}
Exemplo n.º 6
0
/**
 * i40iw_puda_qp_create - create qp for resource
 * @rsrc: resource to use for buffer
 */
static enum i40iw_status_code i40iw_puda_qp_create(struct i40iw_puda_rsrc *rsrc)
{
	struct i40iw_sc_qp *qp = &rsrc->qp;
	struct i40iw_qp_uk *ukqp = &qp->qp_uk;
	enum i40iw_status_code ret = 0;
	u32 sq_size, rq_size, t_size;
	struct i40iw_dma_mem *mem;

	sq_size = rsrc->sq_size * I40IW_QP_WQE_MIN_SIZE;
	rq_size = rsrc->rq_size * I40IW_QP_WQE_MIN_SIZE;
	t_size = (sq_size + rq_size + (I40IW_SHADOW_AREA_SIZE << 3) +
		  I40IW_QP_CTX_SIZE);
	/* Get page aligned memory */
	ret =
	    i40iw_allocate_dma_mem(rsrc->dev->hw, &rsrc->qpmem, t_size,
				   I40IW_HW_PAGE_SIZE);
	if (ret) {
		i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s: error dma mem\n", __func__);
		return ret;
	}

	mem = &rsrc->qpmem;
	memset(mem->va, 0, t_size);
	qp->hw_sq_size = i40iw_get_encoded_wqe_size(rsrc->sq_size, false);
	qp->hw_rq_size = i40iw_get_encoded_wqe_size(rsrc->rq_size, false);
	qp->pd = &rsrc->sc_pd;
	qp->qp_type = I40IW_QP_TYPE_UDA;
	qp->dev = rsrc->dev;
	qp->back_qp = (void *)rsrc;
	qp->sq_pa = mem->pa;
	qp->rq_pa = qp->sq_pa + sq_size;
	qp->vsi = rsrc->vsi;
	ukqp->sq_base = mem->va;
	ukqp->rq_base = &ukqp->sq_base[rsrc->sq_size];
	ukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem;
	qp->shadow_area_pa = qp->rq_pa + rq_size;
	qp->hw_host_ctx = ukqp->shadow_area + I40IW_SHADOW_AREA_SIZE;
	qp->hw_host_ctx_pa =
		qp->shadow_area_pa + (I40IW_SHADOW_AREA_SIZE << 3);
	ukqp->qp_id = rsrc->qp_id;
	ukqp->sq_wrtrk_array = rsrc->sq_wrtrk_array;
	ukqp->rq_wrid_array = rsrc->rq_wrid_array;

	ukqp->qp_id = rsrc->qp_id;
	ukqp->sq_size = rsrc->sq_size;
	ukqp->rq_size = rsrc->rq_size;

	I40IW_RING_INIT(ukqp->sq_ring, ukqp->sq_size);
	I40IW_RING_INIT(ukqp->initial_ring, ukqp->sq_size);
	I40IW_RING_INIT(ukqp->rq_ring, ukqp->rq_size);

	if (qp->pd->dev->is_pf)
		ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
						    I40E_PFPE_WQEALLOC);
	else
		ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
						    I40E_VFPE_WQEALLOC1);

	qp->user_pri = 0;
	i40iw_qp_add_qos(qp);
	i40iw_puda_qp_setctx(rsrc);
	if (rsrc->ceq_valid)
		ret = i40iw_cqp_qp_create_cmd(rsrc->dev, qp);
	else
		ret = i40iw_puda_qp_wqe(rsrc->dev, qp);
	if (ret)
		i40iw_free_dma_mem(rsrc->dev->hw, &rsrc->qpmem);
	return ret;
}
Exemplo n.º 7
0
/**
 * i40iw_puda_dele_buf - delete buffer back to system
 * @dev: iwarp device
 * @buf: buffer to free
 */
static void i40iw_puda_dele_buf(struct i40iw_sc_dev *dev,
				struct i40iw_puda_buf *buf)
{
	i40iw_free_dma_mem(dev->hw, &buf->mem);
	i40iw_free_virt_mem(dev->hw, &buf->buf_mem);
}