Beispiel #1
0
/**
 * i40iw_puda_alloc_buf - allocate mem for buffer
 * @dev: iwarp device
 * @length: length of buffer
 */
static struct i40iw_puda_buf *i40iw_puda_alloc_buf(struct i40iw_sc_dev *dev,
						   u32 length)
{
	struct i40iw_puda_buf *buf = NULL;
	struct i40iw_virt_mem buf_mem;
	enum i40iw_status_code ret;

	ret = i40iw_allocate_virt_mem(dev->hw, &buf_mem,
				      sizeof(struct i40iw_puda_buf));
	if (ret) {
		i40iw_debug(dev, I40IW_DEBUG_PUDA,
			    "%s: error mem for buf\n", __func__);
		return NULL;
	}
	buf = (struct i40iw_puda_buf *)buf_mem.va;
	ret = i40iw_allocate_dma_mem(dev->hw, &buf->mem, length, 1);
	if (ret) {
		i40iw_debug(dev, I40IW_DEBUG_PUDA,
			    "%s: error dma mem for buf\n", __func__);
		i40iw_free_virt_mem(dev->hw, &buf_mem);
		return NULL;
	}
	buf->buf_mem.va = buf_mem.va;
	buf->buf_mem.size = buf_mem.size;
	return buf;
}
Beispiel #2
0
/**
 * i40iw_add_sd_table_entry - Adds a segment descriptor to the table
 * @hw: pointer to our hw struct
 * @hmc_info: pointer to the HMC configuration information struct
 * @sd_index: segment descriptor index to manipulate
 * @type: what type of segment descriptor we're manipulating
 * @direct_mode_sz: size to alloc in direct mode
 */
enum i40iw_status_code i40iw_add_sd_table_entry(struct i40iw_hw *hw,
						struct i40iw_hmc_info *hmc_info,
						u32 sd_index,
						enum i40iw_sd_entry_type type,
						u64 direct_mode_sz)
{
	enum i40iw_status_code ret_code = 0;
	struct i40iw_hmc_sd_entry *sd_entry;
	bool dma_mem_alloc_done = false;
	struct i40iw_dma_mem mem;
	u64 alloc_len;

	sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
	if (!sd_entry->valid) {
		if (type == I40IW_SD_TYPE_PAGED)
			alloc_len = I40IW_HMC_PAGED_BP_SIZE;
		else
			alloc_len = direct_mode_sz;

		/* allocate a 4K pd page or 2M backing page */
		ret_code = i40iw_allocate_dma_mem(hw, &mem, alloc_len,
						  I40IW_HMC_PD_BP_BUF_ALIGNMENT);
		if (ret_code)
			goto exit;
		dma_mem_alloc_done = true;
		if (type == I40IW_SD_TYPE_PAGED) {
			ret_code = i40iw_allocate_virt_mem(hw,
							   &sd_entry->u.pd_table.pd_entry_virt_mem,
							   sizeof(struct i40iw_hmc_pd_entry) * 512);
			if (ret_code)
				goto exit;
			sd_entry->u.pd_table.pd_entry = (struct i40iw_hmc_pd_entry *)
							 sd_entry->u.pd_table.pd_entry_virt_mem.va;

			memcpy(&sd_entry->u.pd_table.pd_page_addr, &mem, sizeof(struct i40iw_dma_mem));
		} else {
			memcpy(&sd_entry->u.bp.addr, &mem, sizeof(struct i40iw_dma_mem));
			sd_entry->u.bp.sd_pd_index = sd_index;
		}

		hmc_info->sd_table.sd_entry[sd_index].entry_type = type;

		I40IW_INC_SD_REFCNT(&hmc_info->sd_table);
	}
	if (sd_entry->entry_type == I40IW_SD_TYPE_DIRECT)
		I40IW_INC_BP_REFCNT(&sd_entry->u.bp);
exit:
	if (ret_code)
		if (dma_mem_alloc_done)
			i40iw_free_dma_mem(hw, &mem);

	return ret_code;
}
Beispiel #3
0
/**
 * i40iw_vchnl_recv_pf - Receive PF virtual channel messages
 * @dev: IWARP device pointer
 * @vf_id: Virtual function ID associated with the message
 * @msg: Virtual channel message buffer pointer
 * @len: Length of the virtual channels message
 */
enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
					   u32 vf_id,
					   u8 *msg,
					   u16 len)
{
	struct i40iw_virtchnl_op_buf *vchnl_msg = (struct i40iw_virtchnl_op_buf *)msg;
	struct i40iw_vfdev *vf_dev = NULL;
	struct i40iw_hmc_fcn_info hmc_fcn_info;
	u16 iw_vf_idx;
	u16 first_avail_iw_vf = I40IW_MAX_PE_ENABLED_VF_COUNT;
	struct i40iw_virt_mem vf_dev_mem;
	struct i40iw_virtchnl_work_info work_info;
	struct i40iw_dev_pestat *devstat;
	enum i40iw_status_code ret_code;
	unsigned long flags;

	if (!dev || !msg || !len)
		return I40IW_ERR_PARAM;

	if (!dev->vchnl_up)
		return I40IW_ERR_NOT_READY;
	if (vchnl_msg->iw_op_code == I40IW_VCHNL_OP_GET_VER) {
		if (vchnl_msg->iw_op_ver != I40IW_VCHNL_OP_GET_VER_V0)
			vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
		else
			vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
		return I40IW_SUCCESS;
	}
	for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT;
	     iw_vf_idx++) {
		if (!dev->vf_dev[iw_vf_idx]) {
			if (first_avail_iw_vf ==
			    I40IW_MAX_PE_ENABLED_VF_COUNT)
				first_avail_iw_vf = iw_vf_idx;
			continue;
		}
		if (dev->vf_dev[iw_vf_idx]->vf_id == vf_id) {
			vf_dev = dev->vf_dev[iw_vf_idx];
			break;
		}
	}
	if (vf_dev) {
		if (!vf_dev->msg_count) {
			vf_dev->msg_count++;
		} else {
			i40iw_debug(dev, I40IW_DEBUG_VIRT,
				    "VF%u already has a channel message in progress.\n",
				    vf_id);
			return I40IW_SUCCESS;
		}
	}
	switch (vchnl_msg->iw_op_code) {
	case I40IW_VCHNL_OP_GET_HMC_FCN:
		if (!vf_dev &&
		    (first_avail_iw_vf != I40IW_MAX_PE_ENABLED_VF_COUNT)) {
			ret_code = i40iw_allocate_virt_mem(dev->hw, &vf_dev_mem, sizeof(struct i40iw_vfdev) +
							   (sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX));
			if (!ret_code) {
				vf_dev = vf_dev_mem.va;
				vf_dev->stats_initialized = false;
				vf_dev->pf_dev = dev;
				vf_dev->msg_count = 1;
				vf_dev->vf_id = vf_id;
				vf_dev->iw_vf_idx = first_avail_iw_vf;
				vf_dev->pf_hmc_initialized = false;
				vf_dev->hmc_info.hmc_obj = (struct i40iw_hmc_obj_info *)(&vf_dev[1]);
				i40iw_debug(dev, I40IW_DEBUG_VIRT,
					    "vf_dev %p, hmc_info %p, hmc_obj %p\n",
					    vf_dev, &vf_dev->hmc_info, vf_dev->hmc_info.hmc_obj);
				dev->vf_dev[first_avail_iw_vf] = vf_dev;
				iw_vf_idx = first_avail_iw_vf;
			} else {
				i40iw_debug(dev, I40IW_DEBUG_VIRT,
					    "VF%u Unable to allocate a VF device structure.\n",
					    vf_id);
				vchnl_pf_send_error_resp(dev, vf_id, vchnl_msg, (u16)I40IW_ERR_NO_MEMORY);
				return I40IW_SUCCESS;
			}
			memcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len);
			hmc_fcn_info.callback_fcn = pf_cqp_get_hmc_fcn_callback;
			hmc_fcn_info.vf_id = vf_id;
			hmc_fcn_info.iw_vf_idx = vf_dev->iw_vf_idx;
			hmc_fcn_info.cqp_callback_param = vf_dev;
			hmc_fcn_info.free_fcn = false;
			ret_code = i40iw_cqp_manage_hmc_fcn_cmd(dev, &hmc_fcn_info);
			if (ret_code)
				i40iw_debug(dev, I40IW_DEBUG_VIRT,
					    "VF%u error CQP HMC Function operation.\n",
					    vf_id);
			ret_code = i40iw_device_init_pestat(&vf_dev->dev_pestat);
			if (ret_code)
				i40iw_debug(dev, I40IW_DEBUG_VIRT,
					    "VF%u - i40iw_device_init_pestat failed\n",
					    vf_id);
			vf_dev->dev_pestat.ops.iw_hw_stat_init(&vf_dev->dev_pestat,
							      (u8)vf_dev->pmf_index,
							      dev->hw, false);
			vf_dev->stats_initialized = true;
		} else {
			if (vf_dev) {
				vf_dev->msg_count--;
				vchnl_pf_send_get_hmc_fcn_resp(dev, vf_id, vchnl_msg, vf_dev->pmf_index);
			} else {
				vchnl_pf_send_error_resp(dev, vf_id, vchnl_msg,
							 (u16)I40IW_ERR_NO_MEMORY);
			}
		}
		break;
	case I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE:
		if (!vf_dev)
			return I40IW_ERR_BAD_PTR;
		work_info.worker_vf_dev = vf_dev;
		work_info.callback_fcn = pf_add_hmc_obj_callback;
		memcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len);
		i40iw_cqp_spawn_worker(dev, &work_info, vf_dev->iw_vf_idx);
		break;
	case I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE:
		if (!vf_dev)
			return I40IW_ERR_BAD_PTR;
		work_info.worker_vf_dev = vf_dev;
		work_info.callback_fcn = pf_del_hmc_obj_callback;
		memcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len);
		i40iw_cqp_spawn_worker(dev, &work_info, vf_dev->iw_vf_idx);
		break;
	case I40IW_VCHNL_OP_GET_STATS:
		if (!vf_dev)
			return I40IW_ERR_BAD_PTR;
		devstat = &vf_dev->dev_pestat;
		spin_lock_irqsave(&dev->dev_pestat.stats_lock, flags);
		devstat->ops.iw_hw_stat_read_all(devstat, &devstat->hw_stats);
		spin_unlock_irqrestore(&dev->dev_pestat.stats_lock, flags);
		vf_dev->msg_count--;
		vchnl_pf_send_get_pe_stats_resp(dev, vf_id, vchnl_msg, devstat->hw_stats);
		break;
	default:
		i40iw_debug(dev, I40IW_DEBUG_VIRT,
			    "40iw_vchnl_recv_pf: Invalid OpCode 0x%x\n",
			    vchnl_msg->iw_op_code);
		vchnl_pf_send_error_resp(dev, vf_id,
					 vchnl_msg, (u16)I40IW_ERR_NOT_IMPLEMENTED);
	}
	return I40IW_SUCCESS;
}
Beispiel #4
0
/**
 * i40iw_puda_create_rsrc - create resouce (ilq or ieq)
 * @dev: iwarp device
 * @info: resource information
 */
enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_vsi *vsi,
					      struct i40iw_puda_rsrc_info *info)
{
	struct i40iw_sc_dev *dev = vsi->dev;
	enum i40iw_status_code ret = 0;
	struct i40iw_puda_rsrc *rsrc;
	u32 pudasize;
	u32 sqwridsize, rqwridsize;
	struct i40iw_virt_mem *vmem;

	info->count = 1;
	pudasize = sizeof(struct i40iw_puda_rsrc);
	sqwridsize = info->sq_size * sizeof(struct i40iw_sq_uk_wr_trk_info);
	rqwridsize = info->rq_size * 8;
	switch (info->type) {
	case I40IW_PUDA_RSRC_TYPE_ILQ:
		vmem = &vsi->ilq_mem;
		break;
	case I40IW_PUDA_RSRC_TYPE_IEQ:
		vmem = &vsi->ieq_mem;
		break;
	default:
		return I40IW_NOT_SUPPORTED;
	}
	ret =
	    i40iw_allocate_virt_mem(dev->hw, vmem,
				    pudasize + sqwridsize + rqwridsize);
	if (ret)
		return ret;
	rsrc = (struct i40iw_puda_rsrc *)vmem->va;
	spin_lock_init(&rsrc->bufpool_lock);
	if (info->type == I40IW_PUDA_RSRC_TYPE_ILQ) {
		vsi->ilq = (struct i40iw_puda_rsrc *)vmem->va;
		vsi->ilq_count = info->count;
		rsrc->receive = info->receive;
		rsrc->xmit_complete = info->xmit_complete;
	} else {
		vmem = &vsi->ieq_mem;
		vsi->ieq_count = info->count;
		vsi->ieq = (struct i40iw_puda_rsrc *)vmem->va;
		rsrc->receive = i40iw_ieq_receive;
		rsrc->xmit_complete = i40iw_ieq_tx_compl;
	}

	rsrc->ceq_valid = info->ceq_valid;
	rsrc->type = info->type;
	rsrc->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)((u8 *)vmem->va + pudasize);
	rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize);
	/* Initialize all ieq lists */
	INIT_LIST_HEAD(&rsrc->bufpool);
	INIT_LIST_HEAD(&rsrc->txpend);

	rsrc->tx_wqe_avail_cnt = info->sq_size - 1;
	dev->iw_pd_ops->pd_init(dev, &rsrc->sc_pd, info->pd_id, -1);
	rsrc->qp_id = info->qp_id;
	rsrc->cq_id = info->cq_id;
	rsrc->sq_size = info->sq_size;
	rsrc->rq_size = info->rq_size;
	rsrc->cq_size = info->rq_size + info->sq_size;
	rsrc->buf_size = info->buf_size;
	rsrc->dev = dev;
	rsrc->vsi = vsi;

	ret = i40iw_puda_cq_create(rsrc);
	if (!ret) {
		rsrc->completion = PUDA_CQ_CREATED;
		ret = i40iw_puda_qp_create(rsrc);
	}
	if (ret) {
		i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error qp_create\n", __func__);
		goto error;
	}
	rsrc->completion = PUDA_QP_CREATED;

	ret = i40iw_puda_allocbufs(rsrc, info->tx_buf_cnt + info->rq_size);
	if (ret) {
		i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error allloc_buf\n", __func__);
		goto error;
	}

	rsrc->rxq_invalid_cnt = info->rq_size;
	ret = i40iw_puda_replenish_rq(rsrc, true);
	if (ret)
		goto error;

	if (info->type == I40IW_PUDA_RSRC_TYPE_IEQ) {
		if (!i40iw_init_hash_desc(&rsrc->hash_desc)) {
			rsrc->check_crc = true;
			rsrc->completion = PUDA_HASH_CRC_COMPLETE;
			ret = 0;
		}
	}

	dev->ccq_ops->ccq_arm(&rsrc->cq);
	return ret;
 error:
	i40iw_puda_dele_resources(vsi, info->type, false);

	return ret;
}