예제 #1
0
/**
 * i40iw_puda_free_cq - free cq for resource
 * @rsrc: resource for which cq to free
 */
static void i40iw_puda_free_cq(struct i40iw_puda_rsrc *rsrc)
{
	enum i40iw_status_code ret;
	struct i40iw_ccq_cqe_info compl_info;
	struct i40iw_sc_dev *dev = rsrc->dev;

	if (rsrc->ceq_valid) {
		i40iw_cqp_cq_destroy_cmd(dev, &rsrc->cq);
		return;
	}
	ret = dev->iw_priv_cq_ops->cq_destroy(&rsrc->cq, 0, true);

	if (ret)
		i40iw_debug(dev, I40IW_DEBUG_PUDA,
			    "%s error ieq cq destroy\n",
			    __func__);

	if (!ret) {
		ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
				I40IW_CQP_OP_DESTROY_CQ,
				&compl_info);
		if (ret)
			i40iw_debug(dev, I40IW_DEBUG_PUDA,
				    "%s error ieq qp destroy done\n",
				    __func__);
	}
}
예제 #2
0
/**
 * i40iw_puda_free_qp - free qp for resource
 * @rsrc: resource for which qp to free
 */
static void i40iw_puda_free_qp(struct i40iw_puda_rsrc *rsrc)
{
	enum i40iw_status_code ret;
	struct i40iw_ccq_cqe_info compl_info;
	struct i40iw_sc_dev *dev = rsrc->dev;

	if (rsrc->ceq_valid) {
		i40iw_cqp_qp_destroy_cmd(dev, &rsrc->qp);
		return;
	}

	ret = dev->iw_priv_qp_ops->qp_destroy(&rsrc->qp,
			0, false, true, true);
	if (ret)
		i40iw_debug(dev, I40IW_DEBUG_PUDA,
			    "%s error puda qp destroy wqe\n",
			    __func__);

	if (!ret) {
		ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
				I40IW_CQP_OP_DESTROY_QP,
				&compl_info);
		if (ret)
			i40iw_debug(dev, I40IW_DEBUG_PUDA,
				    "%s error puda qp destroy failed\n",
				    __func__);
	}
}
예제 #3
0
/**
 * i40iw_puda_alloc_buf - allocate mem for buffer
 * @dev: iwarp device
 * @length: length of buffer
 */
static struct i40iw_puda_buf *i40iw_puda_alloc_buf(struct i40iw_sc_dev *dev,
						   u32 length)
{
	struct i40iw_puda_buf *buf = NULL;
	struct i40iw_virt_mem buf_mem;
	enum i40iw_status_code ret;

	ret = i40iw_allocate_virt_mem(dev->hw, &buf_mem,
				      sizeof(struct i40iw_puda_buf));
	if (ret) {
		i40iw_debug(dev, I40IW_DEBUG_PUDA,
			    "%s: error mem for buf\n", __func__);
		return NULL;
	}
	buf = (struct i40iw_puda_buf *)buf_mem.va;
	ret = i40iw_allocate_dma_mem(dev->hw, &buf->mem, length, 1);
	if (ret) {
		i40iw_debug(dev, I40IW_DEBUG_PUDA,
			    "%s: error dma mem for buf\n", __func__);
		i40iw_free_virt_mem(dev->hw, &buf_mem);
		return NULL;
	}
	buf->buf_mem.va = buf_mem.va;
	buf->buf_mem.size = buf_mem.size;
	return buf;
}
예제 #4
0
파일: i40iw_hmc.c 프로젝트: 020gzh/linux
/**
 * i40iw_finish_del_sd_reg - delete sd entries for objects
 * @dev: pointer to the device structure
 * @info: dele obj info
 * @reset: true if called before reset
 */
static enum i40iw_status_code i40iw_finish_del_sd_reg(struct i40iw_sc_dev *dev,
						      struct i40iw_hmc_del_obj_info *info,
						      bool reset)
{
	struct i40iw_hmc_sd_entry *sd_entry;
	enum i40iw_status_code ret_code = 0;
	u32 i, sd_idx;
	struct i40iw_dma_mem *mem;

	if (dev->is_pf && !reset)
		ret_code = i40iw_hmc_sd_grp(dev, info->hmc_info,
					    info->hmc_info->sd_indexes[0],
					    info->del_sd_cnt, false);

	if (ret_code)
		i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error cqp sd sd_grp\n", __func__);

	for (i = 0; i < info->del_sd_cnt; i++) {
		sd_idx = info->hmc_info->sd_indexes[i];
		sd_entry = &info->hmc_info->sd_table.sd_entry[sd_idx];
		if (!sd_entry)
			continue;
		mem = (sd_entry->entry_type == I40IW_SD_TYPE_PAGED) ?
			&sd_entry->u.pd_table.pd_page_addr :
			&sd_entry->u.bp.addr;

		if (!mem || !mem->va)
			i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error cqp sd mem\n", __func__);
		else
			i40iw_free_dma_mem(dev->hw, mem);
	}
	return ret_code;
}
예제 #5
0
/**
 * pf_cqp_get_hmc_fcn_callback - Callback for Get HMC Fcn
 * @cqp_req_param: CQP Request param value
 * @not_used: unused CQP callback parameter
 */
static void pf_cqp_get_hmc_fcn_callback(struct i40iw_sc_dev *dev, void *callback_param,
					struct i40iw_ccq_cqe_info *cqe_info)
{
	struct i40iw_vfdev *vf_dev = callback_param;
	struct i40iw_virt_mem vf_dev_mem;

	if (cqe_info->error) {
		i40iw_debug(dev, I40IW_DEBUG_VIRT,
			    "CQP Completion Error on Get HMC Function.  Maj = 0x%04x, Minor = 0x%04x\n",
			    cqe_info->maj_err_code, cqe_info->min_err_code);
		dev->vf_dev[vf_dev->iw_vf_idx] = NULL;
		vchnl_pf_send_error_resp(dev, vf_dev->vf_id, &vf_dev->vf_msg_buffer.vchnl_msg,
					 (u16)I40IW_ERR_CQP_COMPL_ERROR);
		vf_dev_mem.va = vf_dev;
		vf_dev_mem.size = sizeof(*vf_dev);
		i40iw_free_virt_mem(dev->hw, &vf_dev_mem);
	} else {
		i40iw_debug(dev, I40IW_DEBUG_VIRT,
			    "CQP Completion Operation Return information = 0x%08x\n",
			    cqe_info->op_ret_val);
		vf_dev->pmf_index = (u16)cqe_info->op_ret_val;
		vf_dev->msg_count--;
		vchnl_pf_send_get_hmc_fcn_resp(dev,
					       vf_dev->vf_id,
					       &vf_dev->vf_msg_buffer.vchnl_msg,
					       vf_dev->pmf_index);
	}
}
예제 #6
0
/**
 * i40iw_puda_dele_resources - delete all resources during close
 * @dev: iwarp device
 * @type: type of resource to dele
 * @reset: true if reset chip
 */
void i40iw_puda_dele_resources(struct i40iw_sc_vsi *vsi,
			       enum puda_resource_type type,
			       bool reset)
{
	struct i40iw_sc_dev *dev = vsi->dev;
	struct i40iw_puda_rsrc *rsrc;
	struct i40iw_puda_buf *buf = NULL;
	struct i40iw_puda_buf *nextbuf = NULL;
	struct i40iw_virt_mem *vmem;

	switch (type) {
	case I40IW_PUDA_RSRC_TYPE_ILQ:
		rsrc = vsi->ilq;
		vmem = &vsi->ilq_mem;
		break;
	case I40IW_PUDA_RSRC_TYPE_IEQ:
		rsrc = vsi->ieq;
		vmem = &vsi->ieq_mem;
		break;
	default:
		i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s: error resource type = 0x%x\n",
			    __func__, type);
		return;
	}

	switch (rsrc->completion) {
	case PUDA_HASH_CRC_COMPLETE:
		i40iw_free_hash_desc(rsrc->hash_desc);
		/* fall through */
	case PUDA_QP_CREATED:
		if (!reset)
			i40iw_puda_free_qp(rsrc);

		i40iw_free_dma_mem(dev->hw, &rsrc->qpmem);
		/* fallthrough */
	case PUDA_CQ_CREATED:
		if (!reset)
			i40iw_puda_free_cq(rsrc);

		i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
		break;
	default:
		i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s error no resources\n", __func__);
		break;
	}
	/* Free all allocated puda buffers for both tx and rx */
	buf = rsrc->alloclist;
	while (buf) {
		nextbuf = buf->next;
		i40iw_puda_dele_buf(dev, buf);
		buf = nextbuf;
		rsrc->alloc_buf_count--;
	}
	i40iw_free_virt_mem(dev->hw, vmem);
}
예제 #7
0
/**
 * i40iw_puda_send_buf - transmit puda buffer
 * @rsrc: resource to use for buffer
 * @buf: puda buffer to transmit
 */
void i40iw_puda_send_buf(struct i40iw_puda_rsrc *rsrc, struct i40iw_puda_buf *buf)
{
	struct i40iw_puda_send_info info;
	enum i40iw_status_code ret = 0;
	unsigned long	flags;

	spin_lock_irqsave(&rsrc->bufpool_lock, flags);
	/* if no wqe available or not from a completion and we have
	 * pending buffers, we must queue new buffer
	 */
	if (!rsrc->tx_wqe_avail_cnt || (buf && !list_empty(&rsrc->txpend))) {
		list_add_tail(&buf->list, &rsrc->txpend);
		spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
		rsrc->stats_sent_pkt_q++;
		if (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ)
			i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
				    "%s: adding to txpend\n", __func__);
		return;
	}
	rsrc->tx_wqe_avail_cnt--;
	/* if we are coming from a completion and have pending buffers
	 * then Get one from pending list
	 */
	if (!buf) {
		buf = i40iw_puda_get_listbuf(&rsrc->txpend);
		if (!buf)
			goto done;
	}

	info.scratch = (void *)buf;
	info.paddr = buf->mem.pa;
	info.len = buf->totallen;
	info.tcplen = buf->tcphlen;
	info.maclen = buf->maclen;
	info.ipv4 = buf->ipv4;
	info.doloopback = (rsrc->type == I40IW_PUDA_RSRC_TYPE_IEQ);

	ret = i40iw_puda_send(&rsrc->qp, &info);
	if (ret) {
		rsrc->tx_wqe_avail_cnt++;
		rsrc->stats_sent_pkt_q++;
		list_add(&buf->list, &rsrc->txpend);
		if (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ)
			i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
				    "%s: adding to puda_send\n", __func__);
	} else {
		rsrc->stats_pkt_sent++;
	}
done:
	spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
}
예제 #8
0
/**
 * i40iw_vchnl_vf_del_hmc_obj - del HMC obj
 * @dev: IWARP device pointer
 * @rsrc_type: HMC Resource type
 * @start_index: Starting index of the object to delete
 * @rsrc_count: Number of resources to be delete
 */
enum i40iw_status_code i40iw_vchnl_vf_del_hmc_obj(struct i40iw_sc_dev *dev,
						  enum i40iw_hmc_rsrc_type rsrc_type,
						  u32 start_index,
						  u32 rsrc_count)
{
	struct i40iw_virtchnl_req vchnl_req;
	enum i40iw_status_code ret_code;

	memset(&vchnl_req, 0, sizeof(vchnl_req));
	vchnl_req.dev = dev;
	vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
	ret_code = vchnl_vf_send_del_hmc_objs_req(dev,
						  &vchnl_req,
						  rsrc_type,
						  start_index,
						  rsrc_count);
	if (!ret_code) {
		ret_code = i40iw_vf_wait_vchnl_resp(dev);
		if (!ret_code)
			ret_code = vchnl_req.ret_code;
		else
			dev->vchnl_up = false;
	} else {
		i40iw_debug(dev, I40IW_DEBUG_VIRT,
			    "%s Send message failed 0x%0x\n", __func__, ret_code);
	}
	return ret_code;
}
예제 #9
0
파일: i40iw_hmc.c 프로젝트: 020gzh/linux
/**
 * i40iw_pf_init_vfhmc -
 * @vf_cnt_array: array of cnt values of iwarp hmc objects
 * @vf_hmc_fn_id: hmc function id ofr vf driver
 * @dev: pointer to i40iw_dev struct
 *
 * Called by pf driver to initialize hmc_info for vf driver instance.
 */
enum i40iw_status_code i40iw_pf_init_vfhmc(struct i40iw_sc_dev *dev,
					   u8 vf_hmc_fn_id,
					   u32 *vf_cnt_array)
{
	struct i40iw_hmc_info *hmc_info;
	enum i40iw_status_code ret_code = 0;
	u32 i;

	if ((vf_hmc_fn_id < I40IW_FIRST_VF_FPM_ID) ||
	    (vf_hmc_fn_id >= I40IW_FIRST_VF_FPM_ID +
	     I40IW_MAX_PE_ENABLED_VF_COUNT)) {
		i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: invalid vf_hmc_fn_id  0x%x\n",
			    __func__, vf_hmc_fn_id);
		return I40IW_ERR_INVALID_HMCFN_ID;
	}

	ret_code = i40iw_sc_init_iw_hmc(dev, vf_hmc_fn_id);
	if (ret_code)
		return ret_code;

	hmc_info = i40iw_vf_hmcinfo_from_fpm(dev, vf_hmc_fn_id);

	for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++)
		if (vf_cnt_array)
			hmc_info->hmc_obj[i].cnt =
			    vf_cnt_array[i - I40IW_HMC_IW_QP];
		else
			hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;

	return 0;
}
예제 #10
0
/**
 * vchnl_vf_send_del_hmc_objs_req - del HMC objects
 * @dev: IWARP device pointer
 * @vchnl_req: Virtual channel message request pointer
 * @ rsrc_type - resource type to delete
 * @ start_index - starting index for resource
 * @ rsrc_count - number of resource type to delete
 */
static enum i40iw_status_code vchnl_vf_send_del_hmc_objs_req(struct i40iw_sc_dev *dev,
							     struct i40iw_virtchnl_req *vchnl_req,
							     enum i40iw_hmc_rsrc_type rsrc_type,
							     u32 start_index,
							     u32 rsrc_count)
{
	enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
	struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
	struct i40iw_virtchnl_hmc_obj_range *add_hmc_obj;

	if (!dev->vchnl_up)
		return ret_code;

	add_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;
	memset(vchnl_msg, 0, sizeof(*vchnl_msg));
	memset(add_hmc_obj, 0, sizeof(*add_hmc_obj));
	vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
	vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg) + sizeof(struct i40iw_virtchnl_hmc_obj_range) - 1;
	vchnl_msg->iw_op_code = I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE;
	vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE_V0;
	add_hmc_obj->obj_type = (u16)rsrc_type;
	add_hmc_obj->start_index = start_index;
	add_hmc_obj->obj_count = rsrc_count;
	ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
	if (ret_code)
		i40iw_debug(dev, I40IW_DEBUG_VIRT,
			    "%s: virt channel send failed 0x%x\n", __func__, ret_code);
	return ret_code;
}
예제 #11
0
/**
 * pf_add_hmc_obj - Callback for Add HMC Object
 * @vf_dev: pointer to the VF Device
 */
static void pf_add_hmc_obj_callback(void *work_vf_dev)
{
	struct i40iw_vfdev *vf_dev = (struct i40iw_vfdev *)work_vf_dev;
	struct i40iw_hmc_info *hmc_info = &vf_dev->hmc_info;
	struct i40iw_virtchnl_op_buf *vchnl_msg = &vf_dev->vf_msg_buffer.vchnl_msg;
	struct i40iw_hmc_create_obj_info info;
	struct i40iw_virtchnl_hmc_obj_range *add_hmc_obj;
	enum i40iw_status_code ret_code;

	if (!vf_dev->pf_hmc_initialized) {
		ret_code = i40iw_pf_init_vfhmc(vf_dev->pf_dev, (u8)vf_dev->pmf_index, NULL);
		if (ret_code)
			goto add_out;
		vf_dev->pf_hmc_initialized = true;
	}

	add_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;

	memset(&info, 0, sizeof(info));
	info.hmc_info = hmc_info;
	info.is_pf = false;
	info.rsrc_type = (u32)add_hmc_obj->obj_type;
	info.entry_type = (info.rsrc_type == I40IW_HMC_IW_PBLE) ? I40IW_SD_TYPE_PAGED : I40IW_SD_TYPE_DIRECT;
	info.start_idx = add_hmc_obj->start_index;
	info.count = add_hmc_obj->obj_count;
	i40iw_debug(vf_dev->pf_dev, I40IW_DEBUG_VIRT,
		    "I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE.  Add %u type %u objects\n",
		    info.count, info.rsrc_type);
	ret_code = i40iw_sc_create_hmc_obj(vf_dev->pf_dev, &info);
	if (!ret_code)
		vf_dev->hmc_info.hmc_obj[add_hmc_obj->obj_type].cnt = add_hmc_obj->obj_count;
add_out:
	vf_dev->msg_count--;
	vchnl_pf_send_error_resp(vf_dev->pf_dev, vf_dev->vf_id, vchnl_msg, (u16)ret_code);
}
예제 #12
0
/**
 * pf_del_hmc_obj_callback - Callback for delete HMC Object
 * @work_vf_dev: pointer to the VF Device
 */
static void pf_del_hmc_obj_callback(void *work_vf_dev)
{
	struct i40iw_vfdev *vf_dev = (struct i40iw_vfdev *)work_vf_dev;
	struct i40iw_hmc_info *hmc_info = &vf_dev->hmc_info;
	struct i40iw_virtchnl_op_buf *vchnl_msg = &vf_dev->vf_msg_buffer.vchnl_msg;
	struct i40iw_hmc_del_obj_info info;
	struct i40iw_virtchnl_hmc_obj_range *del_hmc_obj;
	enum i40iw_status_code ret_code = I40IW_SUCCESS;

	if (!vf_dev->pf_hmc_initialized)
		goto del_out;

	del_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;

	memset(&info, 0, sizeof(info));
	info.hmc_info = hmc_info;
	info.is_pf = false;
	info.rsrc_type = (u32)del_hmc_obj->obj_type;
	info.start_idx = del_hmc_obj->start_index;
	info.count = del_hmc_obj->obj_count;
	i40iw_debug(vf_dev->pf_dev, I40IW_DEBUG_VIRT,
		    "I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE.  Delete %u type %u objects\n",
		    info.count, info.rsrc_type);
	ret_code = i40iw_sc_del_hmc_obj(vf_dev->pf_dev, &info, false);
del_out:
	vf_dev->msg_count--;
	vchnl_pf_send_error_resp(vf_dev->pf_dev, vf_dev->vf_id, vchnl_msg, (u16)ret_code);
}
예제 #13
0
/**
 * i40iw_ieq_handle_partial - process partial fpdu buffer
 * @ieq: ieq resource
 * @pfpdu: partial management per user qp
 * @buf: receive buffer
 * @fpdu_len: fpdu len in the buffer
 */
static enum i40iw_status_code i40iw_ieq_handle_partial(struct i40iw_puda_rsrc *ieq,
						       struct i40iw_pfpdu *pfpdu,
						       struct i40iw_puda_buf *buf,
						       u16 fpdu_len)
{
	enum i40iw_status_code status = 0;
	u8 *crcptr;
	u32 mpacrc;
	u32 seqnum = buf->seqnum;
	struct list_head pbufl;	/* partial buffer list */
	struct i40iw_puda_buf *txbuf = NULL;
	struct list_head *rxlist = &pfpdu->rxlist;

	INIT_LIST_HEAD(&pbufl);
	list_add(&buf->list, &pbufl);

	status = i40iw_ieq_create_pbufl(pfpdu, rxlist, &pbufl, buf, fpdu_len);
	if (status)
		goto error;

	txbuf = i40iw_puda_get_bufpool(ieq);
	if (!txbuf) {
		pfpdu->no_tx_bufs++;
		status = I40IW_ERR_NO_TXBUFS;
		goto error;
	}

	i40iw_ieq_compl_pfpdu(ieq, rxlist, &pbufl, txbuf, fpdu_len);
	i40iw_ieq_update_tcpip_info(txbuf, fpdu_len, seqnum);
	crcptr = txbuf->data + fpdu_len - 4;
	mpacrc = *(u32 *)crcptr;
	if (ieq->check_crc) {
		status = i40iw_ieq_check_mpacrc(ieq->hash_desc, txbuf->data,
						(fpdu_len - 4), mpacrc);
		if (status) {
			i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
				    "%s: error bad crc\n", __func__);
			goto error;
		}
	}

	i40iw_debug_buf(ieq->dev, I40IW_DEBUG_IEQ, "IEQ TX BUFFER",
			txbuf->mem.va, txbuf->totallen);
	i40iw_puda_send_buf(ieq, txbuf);
	pfpdu->rcv_nxt = seqnum + fpdu_len;
	return status;
 error:
	while (!list_empty(&pbufl)) {
		buf = (struct i40iw_puda_buf *)(pbufl.prev);
		list_del(&buf->list);
		list_add(&buf->list, rxlist);
	}
	if (txbuf)
		i40iw_puda_ret_bufpool(ieq, txbuf);
	return status;
}
예제 #14
0
/**
 * i40iw_vchnl_recv_vf - Receive VF virtual channel messages
 * @dev: IWARP device pointer
 * @vf_id: Virtual function ID associated with the message
 * @msg: Virtual channel message buffer pointer
 * @len: Length of the virtual channels message
 */
enum i40iw_status_code i40iw_vchnl_recv_vf(struct i40iw_sc_dev *dev,
					   u32 vf_id,
					   u8 *msg,
					   u16 len)
{
	struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)msg;
	struct i40iw_virtchnl_req *vchnl_req;

	vchnl_req = (struct i40iw_virtchnl_req *)(uintptr_t)vchnl_msg_resp->iw_chnl_op_ctx;
	vchnl_req->ret_code = (enum i40iw_status_code)vchnl_msg_resp->iw_op_ret_code;
	if (len == (sizeof(*vchnl_msg_resp) + vchnl_req->parm_len - 1)) {
		if (vchnl_req->parm_len && vchnl_req->parm)
			memcpy(vchnl_req->parm, vchnl_msg_resp->iw_chnl_buf, vchnl_req->parm_len);
		i40iw_debug(dev, I40IW_DEBUG_VIRT,
			    "%s: Got response, data size %u\n", __func__,
			    vchnl_req->parm_len);
	} else {
		i40iw_debug(dev, I40IW_DEBUG_VIRT,
			    "%s: error length on response, Got %u, expected %u\n", __func__,
			    len, (u32)(sizeof(*vchnl_msg_resp) + vchnl_req->parm_len - 1));
	}

	return I40IW_SUCCESS;
}
예제 #15
0
파일: i40iw_hmc.c 프로젝트: 020gzh/linux
/**
 * i40iw_hmc_sd_grp - setup group od sd entries for cqp
 * @dev: pointer to the device structure
 * @hmc_info: pointer to the HMC configuration information struct
 * @sd_index: sd index
 * @sd_cnt: number of sd entries
 * @setsd: flag to set or clear sd
 */
static enum i40iw_status_code i40iw_hmc_sd_grp(struct i40iw_sc_dev *dev,
					       struct i40iw_hmc_info *hmc_info,
					       u32 sd_index,
					       u32 sd_cnt,
					       bool setsd)
{
	struct i40iw_hmc_sd_entry *sd_entry;
	struct i40iw_update_sds_info sdinfo;
	u64 pa;
	u32 i;
	enum i40iw_status_code ret_code = 0;

	memset(&sdinfo, 0, sizeof(sdinfo));
	sdinfo.hmc_fn_id = hmc_info->hmc_fn_id;
	for (i = sd_index; i < sd_index + sd_cnt; i++) {
		sd_entry = &hmc_info->sd_table.sd_entry[i];
		if (!sd_entry ||
		    (!sd_entry->valid && setsd) ||
		    (sd_entry->valid && !setsd))
			continue;
		if (setsd) {
			pa = (sd_entry->entry_type == I40IW_SD_TYPE_PAGED) ?
			    sd_entry->u.pd_table.pd_page_addr.pa :
			    sd_entry->u.bp.addr.pa;
			i40iw_set_sd_entry(pa, i, sd_entry->entry_type,
					   &sdinfo.entry[sdinfo.cnt]);
		} else {
			i40iw_clr_sd_entry(i, sd_entry->entry_type,
					   &sdinfo.entry[sdinfo.cnt]);
		}
		sdinfo.cnt++;
		if (sdinfo.cnt == I40IW_MAX_SD_ENTRIES) {
			ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
			if (ret_code) {
				i40iw_debug(dev, I40IW_DEBUG_HMC,
					    "i40iw_hmc_sd_grp: sd_programming failed err=%d\n",
					    ret_code);
				return ret_code;
			}
			sdinfo.cnt = 0;
		}
	}
	if (sdinfo.cnt)
		ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);

	return ret_code;
}
예제 #16
0
/**
 * vchnl_pf_send_error_resp - Send an error response to VF
 * @dev: IWARP device pointer
 * @vf_id: Virtual function ID associated with the message
 * @vchnl_msg: Virtual channel message buffer pointer
 */
static void vchnl_pf_send_error_resp(struct i40iw_sc_dev *dev, u32 vf_id,
				     struct i40iw_virtchnl_op_buf *vchnl_msg,
				     u16 op_ret_code)
{
	enum i40iw_status_code ret_code;
	u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf)];
	struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer;

	memset(resp_buffer, 0, sizeof(resp_buffer));
	vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;
	vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);
	vchnl_msg_resp->iw_op_ret_code = (u16)op_ret_code;
	ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));
	if (ret_code)
		i40iw_debug(dev, I40IW_DEBUG_VIRT,
			    "%s: virt channel send failed 0x%x\n", __func__, ret_code);
}
예제 #17
0
/**
 * i40iw_puda_poll_info - poll cq for completion
 * @cq: cq for poll
 * @info: info return for successful completion
 */
static enum i40iw_status_code i40iw_puda_poll_info(struct i40iw_sc_cq *cq,
						   struct i40iw_puda_completion_info *info)
{
	u64 qword0, qword2, qword3;
	u64 *cqe;
	u64 comp_ctx;
	bool valid_bit;
	u32 major_err, minor_err;
	bool error;

	cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&cq->cq_uk);
	get_64bit_val(cqe, 24, &qword3);
	valid_bit = (bool)RS_64(qword3, I40IW_CQ_VALID);

	if (valid_bit != cq->cq_uk.polarity)
		return I40IW_ERR_QUEUE_EMPTY;

	i40iw_debug_buf(cq->dev, I40IW_DEBUG_PUDA, "PUDA CQE", cqe, 32);
	error = (bool)RS_64(qword3, I40IW_CQ_ERROR);
	if (error) {
		i40iw_debug(cq->dev, I40IW_DEBUG_PUDA, "%s receive error\n", __func__);
		major_err = (u32)(RS_64(qword3, I40IW_CQ_MAJERR));
		minor_err = (u32)(RS_64(qword3, I40IW_CQ_MINERR));
		info->compl_error = major_err << 16 | minor_err;
		return I40IW_ERR_CQ_COMPL_ERROR;
	}

	get_64bit_val(cqe, 0, &qword0);
	get_64bit_val(cqe, 16, &qword2);

	info->q_type = (u8)RS_64(qword3, I40IW_CQ_SQ);
	info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID);

	get_64bit_val(cqe, 8, &comp_ctx);
	info->qp = (struct i40iw_qp_uk *)(unsigned long)comp_ctx;
	info->wqe_idx = (u32)RS_64(qword3, I40IW_CQ_WQEIDX);

	if (info->q_type == I40IW_CQE_QTYPE_RQ) {
		info->vlan_valid = (bool)RS_64(qword3, I40IW_VLAN_TAG_VALID);
		info->l4proto = (u8)RS_64(qword2, I40IW_UDA_L4PROTO);
		info->l3proto = (u8)RS_64(qword2, I40IW_UDA_L3PROTO);
		info->payload_len = (u16)RS_64(qword0, I40IW_UDA_PAYLOADLEN);
	}

	return 0;
}
예제 #18
0
/**
 * vchnl_pf_send_get_ver_resp - Send channel version to VF
 * @dev: IWARP device pointer
 * @vf_id: Virtual function ID associated with the message
 * @vchnl_msg: Virtual channel message buffer pointer
 */
static void vchnl_pf_send_get_ver_resp(struct i40iw_sc_dev *dev,
				       u32 vf_id,
				       struct i40iw_virtchnl_op_buf *vchnl_msg)
{
	enum i40iw_status_code ret_code;
	u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf) + sizeof(u32) - 1];
	struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer;

	memset(resp_buffer, 0, sizeof(*resp_buffer));
	vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;
	vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);
	vchnl_msg_resp->iw_op_ret_code = I40IW_SUCCESS;
	*((u32 *)vchnl_msg_resp->iw_chnl_buf) = I40IW_VCHNL_CHNL_VER_V0;
	ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));
	if (ret_code)
		i40iw_debug(dev, I40IW_DEBUG_VIRT,
			    "%s: virt channel send failed 0x%x\n", __func__, ret_code);
}
예제 #19
0
/**
 * vchnl_vf_send_get_hmc_fcn_req - Request HMC Function from VF
 * @dev: IWARP device pointer
 * @vchnl_req: Virtual channel message request pointer
 */
static enum i40iw_status_code vchnl_vf_send_get_hmc_fcn_req(struct i40iw_sc_dev *dev,
							    struct i40iw_virtchnl_req *vchnl_req)
{
	enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
	struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;

	if (!dev->vchnl_up)
		return ret_code;

	memset(vchnl_msg, 0, sizeof(*vchnl_msg));
	vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
	vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg);
	vchnl_msg->iw_op_code = I40IW_VCHNL_OP_GET_HMC_FCN;
	vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_GET_HMC_FCN_V0;
	ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
	if (ret_code)
		i40iw_debug(dev, I40IW_DEBUG_VIRT,
			    "%s: virt channel send failed 0x%x\n", __func__, ret_code);
	return ret_code;
}
예제 #20
0
/**
 * i40iw_puda_post_recvbuf - set wqe for rcv buffer
 * @rsrc: resource ptr
 * @wqe_idx: wqe index to use
 * @buf: puda buffer for rcv q
 * @initial: flag if during init time
 */
static void i40iw_puda_post_recvbuf(struct i40iw_puda_rsrc *rsrc, u32 wqe_idx,
				    struct i40iw_puda_buf *buf, bool initial)
{
	u64 *wqe;
	struct i40iw_sc_qp *qp = &rsrc->qp;
	u64 offset24 = 0;

	qp->qp_uk.rq_wrid_array[wqe_idx] = (uintptr_t)buf;
	wqe = qp->qp_uk.rq_base[wqe_idx].elem;
	i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
		    "%s: wqe_idx= %d buf = %p wqe = %p\n", __func__,
		    wqe_idx, buf, wqe);
	if (!initial)
		get_64bit_val(wqe, 24, &offset24);

	offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID);

	set_64bit_val(wqe, 0, buf->mem.pa);
	set_64bit_val(wqe, 8,
		      LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN));
	i40iw_insert_wqe_hdr(wqe, offset24);
}
예제 #21
0
/**
 * i40iw_ieq_process_fpdus - process fpdu's buffers on its list
 * @qp: qp for which partial fpdus
 * @ieq: ieq resource
 */
static void i40iw_ieq_process_fpdus(struct i40iw_sc_qp *qp,
				    struct i40iw_puda_rsrc *ieq)
{
	struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
	struct list_head *rxlist = &pfpdu->rxlist;
	struct i40iw_puda_buf *buf;
	enum i40iw_status_code status;

	do {
		if (list_empty(rxlist))
			break;
		buf = i40iw_puda_get_listbuf(rxlist);
		if (!buf) {
			i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
				    "%s: error no buf\n", __func__);
			break;
		}
		if (buf->seqnum != pfpdu->rcv_nxt) {
			/* This could be out of order or missing packet */
			pfpdu->out_of_order++;
			list_add(&buf->list, rxlist);
			break;
		}
		/* keep processing buffers from the head of the list */
		status = i40iw_ieq_process_buf(ieq, pfpdu, buf);
		if (status == I40IW_ERR_MPA_CRC) {
			pfpdu->mpa_crc_err = true;
			while (!list_empty(rxlist)) {
				buf = i40iw_puda_get_listbuf(rxlist);
				i40iw_puda_ret_bufpool(ieq, buf);
				pfpdu->crc_err++;
			}
			/* create CQP for AE */
			i40iw_ieq_mpa_crc_ae(ieq->dev, qp);
		}
	} while (!status);
}
예제 #22
0
/**
 * i40iw_vchnl_vf_get_pe_stats - Get PE stats
 * @dev: IWARP device pointer
 * @hw_stats: HW stats struct
 */
enum i40iw_status_code i40iw_vchnl_vf_get_pe_stats(struct i40iw_sc_dev *dev,
						   struct i40iw_dev_hw_stats *hw_stats)
{
	struct i40iw_virtchnl_req  vchnl_req;
	enum i40iw_status_code ret_code;

	memset(&vchnl_req, 0, sizeof(vchnl_req));
	vchnl_req.dev = dev;
	vchnl_req.parm = hw_stats;
	vchnl_req.parm_len = sizeof(*hw_stats);
	vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
	ret_code = vchnl_vf_send_get_pe_stats_req(dev, &vchnl_req);
	if (!ret_code) {
		ret_code = i40iw_vf_wait_vchnl_resp(dev);
		if (!ret_code)
			ret_code = vchnl_req.ret_code;
		else
			dev->vchnl_up = false;
	} else {
		i40iw_debug(dev, I40IW_DEBUG_VIRT,
			    "%s Send message failed 0x%0x\n", __func__, ret_code);
	}
	return ret_code;
}
예제 #23
0
파일: i40iw_hmc.c 프로젝트: 020gzh/linux
/**
 * i40iw_del_iw_hmc_obj - remove pe hmc objects
 * @dev: pointer to the device structure
 * @info: pointer to i40iw_hmc_del_obj_info struct
 * @reset: true if called before reset
 *
 * This will de-populate the SDs and PDs.  It frees
 * the memory for PDS and backing storage.  After this function is returned,
 * caller should deallocate memory allocated previously for
 * book-keeping information about PDs and backing storage.
 */
enum i40iw_status_code i40iw_sc_del_hmc_obj(struct i40iw_sc_dev *dev,
					    struct i40iw_hmc_del_obj_info *info,
					    bool reset)
{
	struct i40iw_hmc_pd_table *pd_table;
	u32 sd_idx, sd_lmt;
	u32 pd_idx, pd_lmt, rel_pd_idx;
	u32 i, j;
	enum i40iw_status_code ret_code = 0;

	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
		i40iw_debug(dev, I40IW_DEBUG_HMC,
			    "%s: error start_idx[%04d]  >= [type %04d].cnt[%04d]\n",
			    __func__, info->start_idx, info->rsrc_type,
			    info->hmc_info->hmc_obj[info->rsrc_type].cnt);
		return I40IW_ERR_INVALID_HMC_OBJ_INDEX;
	}

	if ((info->start_idx + info->count) >
	    info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
		i40iw_debug(dev, I40IW_DEBUG_HMC,
			    "%s: error start_idx[%04d] + count %04d  >= [type %04d].cnt[%04d]\n",
			    __func__, info->start_idx, info->count,
			    info->rsrc_type,
			    info->hmc_info->hmc_obj[info->rsrc_type].cnt);
		return I40IW_ERR_INVALID_HMC_OBJ_COUNT;
	}
	if (!dev->is_pf) {
		ret_code = i40iw_vchnl_vf_del_hmc_obj(dev, info->rsrc_type, 0,
						      info->count);
		if (info->rsrc_type != I40IW_HMC_IW_PBLE)
			return ret_code;
	}

	i40iw_find_pd_index_limit(info->hmc_info, info->rsrc_type,
				  info->start_idx, info->count, &pd_idx, &pd_lmt);

	for (j = pd_idx; j < pd_lmt; j++) {
		sd_idx = j / I40IW_HMC_PD_CNT_IN_SD;

		if (info->hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
		    I40IW_SD_TYPE_PAGED)
			continue;

		rel_pd_idx = j % I40IW_HMC_PD_CNT_IN_SD;
		pd_table = &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
		if (pd_table->pd_entry[rel_pd_idx].valid) {
			ret_code = i40iw_remove_pd_bp(dev->hw, info->hmc_info, j,
						      info->is_pf);
			if (ret_code) {
				i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error\n", __func__);
				return ret_code;
			}
		}
	}

	i40iw_find_sd_index_limit(info->hmc_info, info->rsrc_type,
				  info->start_idx, info->count, &sd_idx, &sd_lmt);
	if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
	    sd_lmt > info->hmc_info->sd_table.sd_cnt) {
		i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error invalid sd_idx\n", __func__);
		return I40IW_ERR_INVALID_SD_INDEX;
	}

	for (i = sd_idx; i < sd_lmt; i++) {
		if (!info->hmc_info->sd_table.sd_entry[i].valid)
			continue;
		switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
		case I40IW_SD_TYPE_DIRECT:
			ret_code = i40iw_prep_remove_sd_bp(info->hmc_info, i);
			if (!ret_code) {
				info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;
				info->del_sd_cnt++;
			}
			break;
		case I40IW_SD_TYPE_PAGED:
			ret_code = i40iw_prep_remove_pd_page(info->hmc_info, i);
			if (!ret_code) {
				info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;
				info->del_sd_cnt++;
			}
			break;
		default:
			break;
		}
	}
	return i40iw_finish_del_sd_reg(dev, info, reset);
}
예제 #24
0
파일: i40iw_hmc.c 프로젝트: 020gzh/linux
/**
 * i40iw_create_iw_hmc_obj - allocate backing store for hmc objects
 * @dev: pointer to the device structure
 * @info: pointer to i40iw_hmc_iw_create_obj_info struct
 *
 * This will allocate memory for PDs and backing pages and populate
 * the sd and pd entries.
 */
enum i40iw_status_code i40iw_sc_create_hmc_obj(struct i40iw_sc_dev *dev,
					       struct i40iw_hmc_create_obj_info *info)
{
	struct i40iw_hmc_sd_entry *sd_entry;
	u32 sd_idx, sd_lmt;
	u32 pd_idx = 0, pd_lmt = 0;
	u32 pd_idx1 = 0, pd_lmt1 = 0;
	u32 i, j;
	bool pd_error = false;
	enum i40iw_status_code ret_code = 0;

	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
		return I40IW_ERR_INVALID_HMC_OBJ_INDEX;

	if ((info->start_idx + info->count) >
	    info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
		i40iw_debug(dev, I40IW_DEBUG_HMC,
			    "%s: error type %u, start = %u, req cnt %u, cnt = %u\n",
			    __func__, info->rsrc_type, info->start_idx, info->count,
			    info->hmc_info->hmc_obj[info->rsrc_type].cnt);
		return I40IW_ERR_INVALID_HMC_OBJ_COUNT;
	}

	if (!dev->is_pf)
		return i40iw_vchnl_vf_add_hmc_objs(dev, info->rsrc_type, 0, info->count);

	i40iw_find_sd_index_limit(info->hmc_info, info->rsrc_type,
				  info->start_idx, info->count,
				  &sd_idx, &sd_lmt);
	if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
	    sd_lmt > info->hmc_info->sd_table.sd_cnt) {
		return I40IW_ERR_INVALID_SD_INDEX;
	}
	i40iw_find_pd_index_limit(info->hmc_info, info->rsrc_type,
				  info->start_idx, info->count, &pd_idx, &pd_lmt);

	for (j = sd_idx; j < sd_lmt; j++) {
		ret_code = i40iw_add_sd_table_entry(dev->hw, info->hmc_info,
						    j,
						    info->entry_type,
						    I40IW_HMC_DIRECT_BP_SIZE);
		if (ret_code)
			goto exit_sd_error;
		sd_entry = &info->hmc_info->sd_table.sd_entry[j];

		if ((sd_entry->entry_type == I40IW_SD_TYPE_PAGED) &&
		    ((dev->hmc_info == info->hmc_info) &&
		     (info->rsrc_type != I40IW_HMC_IW_PBLE))) {
			pd_idx1 = max(pd_idx, (j * I40IW_HMC_MAX_BP_COUNT));
			pd_lmt1 = min(pd_lmt,
				      (j + 1) * I40IW_HMC_MAX_BP_COUNT);
			for (i = pd_idx1; i < pd_lmt1; i++) {
				/* update the pd table entry */
				ret_code = i40iw_add_pd_table_entry(dev->hw, info->hmc_info,
								    i, NULL);
				if (ret_code) {
					pd_error = true;
					break;
				}
			}
			if (pd_error) {
				while (i && (i > pd_idx1)) {
					i40iw_remove_pd_bp(dev->hw, info->hmc_info, (i - 1),
							   info->is_pf);
					i--;
				}
			}
		}
		if (sd_entry->valid)
			continue;

		info->hmc_info->sd_indexes[info->add_sd_cnt] = (u16)j;
		info->add_sd_cnt++;
		sd_entry->valid = true;
	}
	return i40iw_hmc_finish_add_sd_reg(dev, info);

exit_sd_error:
	while (j && (j > sd_idx)) {
		sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
		switch (sd_entry->entry_type) {
		case I40IW_SD_TYPE_PAGED:
			pd_idx1 = max(pd_idx,
				      (j - 1) * I40IW_HMC_MAX_BP_COUNT);
			pd_lmt1 = min(pd_lmt, (j * I40IW_HMC_MAX_BP_COUNT));
			for (i = pd_idx1; i < pd_lmt1; i++)
				i40iw_prep_remove_pd_page(info->hmc_info, i);
			break;
		case I40IW_SD_TYPE_DIRECT:
			i40iw_prep_remove_pd_page(info->hmc_info, (j - 1));
			break;
		default:
			ret_code = I40IW_ERR_INVALID_SD_TYPE;
			break;
		}
		j--;
	}

	return ret_code;
}
예제 #25
0
/**
 * i40iw_puda_poll_completion - processes completion for cq
 * @dev: iwarp device
 * @cq: cq getting interrupt
 * @compl_err: return any completion err
 */
enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
						  struct i40iw_sc_cq *cq, u32 *compl_err)
{
	struct i40iw_qp_uk *qp;
	struct i40iw_cq_uk *cq_uk = &cq->cq_uk;
	struct i40iw_puda_completion_info info;
	enum i40iw_status_code ret = 0;
	struct i40iw_puda_buf *buf;
	struct i40iw_puda_rsrc *rsrc;
	void *sqwrid;
	u8 cq_type = cq->cq_type;
	unsigned long	flags;

	if ((cq_type == I40IW_CQ_TYPE_ILQ) || (cq_type == I40IW_CQ_TYPE_IEQ)) {
		rsrc = (cq_type == I40IW_CQ_TYPE_ILQ) ? cq->vsi->ilq : cq->vsi->ieq;
	} else {
		i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s qp_type error\n", __func__);
		return I40IW_ERR_BAD_PTR;
	}
	memset(&info, 0, sizeof(info));
	ret = i40iw_puda_poll_info(cq, &info);
	*compl_err = info.compl_error;
	if (ret == I40IW_ERR_QUEUE_EMPTY)
		return ret;
	if (ret)
		goto done;

	qp = info.qp;
	if (!qp || !rsrc) {
		ret = I40IW_ERR_BAD_PTR;
		goto done;
	}

	if (qp->qp_id != rsrc->qp_id) {
		ret = I40IW_ERR_BAD_PTR;
		goto done;
	}

	if (info.q_type == I40IW_CQE_QTYPE_RQ) {
		buf = (struct i40iw_puda_buf *)(uintptr_t)qp->rq_wrid_array[info.wqe_idx];
		/* Get all the tcpip information in the buf header */
		ret = i40iw_puda_get_tcpip_info(&info, buf);
		if (ret) {
			rsrc->stats_rcvd_pkt_err++;
			if (cq_type == I40IW_CQ_TYPE_ILQ) {
				i40iw_ilq_putback_rcvbuf(&rsrc->qp,
							 info.wqe_idx);
			} else {
				i40iw_puda_ret_bufpool(rsrc, buf);
				i40iw_puda_replenish_rq(rsrc, false);
			}
			goto done;
		}

		rsrc->stats_pkt_rcvd++;
		rsrc->compl_rxwqe_idx = info.wqe_idx;
		i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s RQ completion\n", __func__);
		rsrc->receive(rsrc->vsi, buf);
		if (cq_type == I40IW_CQ_TYPE_ILQ)
			i40iw_ilq_putback_rcvbuf(&rsrc->qp, info.wqe_idx);
		else
			i40iw_puda_replenish_rq(rsrc, false);

	} else {
		i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s SQ completion\n", __func__);
		sqwrid = (void *)(uintptr_t)qp->sq_wrtrk_array[info.wqe_idx].wrid;
		I40IW_RING_SET_TAIL(qp->sq_ring, info.wqe_idx);
		rsrc->xmit_complete(rsrc->vsi, sqwrid);
		spin_lock_irqsave(&rsrc->bufpool_lock, flags);
		rsrc->tx_wqe_avail_cnt++;
		spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
		if (!list_empty(&rsrc->vsi->ilq->txpend))
			i40iw_puda_send_buf(rsrc->vsi->ilq, NULL);
	}

done:
	I40IW_RING_MOVE_HEAD(cq_uk->cq_ring, ret);
	if (I40IW_RING_GETCURRENT_HEAD(cq_uk->cq_ring) == 0)
		cq_uk->polarity = !cq_uk->polarity;
	/* update cq tail in cq shadow memory also */
	I40IW_RING_MOVE_TAIL(cq_uk->cq_ring);
	set_64bit_val(cq_uk->shadow_area, 0,
		      I40IW_RING_GETCURRENT_HEAD(cq_uk->cq_ring));
	return 0;
}
예제 #26
0
/**
 * i40iw_puda_qp_create - create qp for resource
 * @rsrc: resource to use for buffer
 */
static enum i40iw_status_code i40iw_puda_qp_create(struct i40iw_puda_rsrc *rsrc)
{
	struct i40iw_sc_qp *qp = &rsrc->qp;
	struct i40iw_qp_uk *ukqp = &qp->qp_uk;
	enum i40iw_status_code ret = 0;
	u32 sq_size, rq_size, t_size;
	struct i40iw_dma_mem *mem;

	sq_size = rsrc->sq_size * I40IW_QP_WQE_MIN_SIZE;
	rq_size = rsrc->rq_size * I40IW_QP_WQE_MIN_SIZE;
	t_size = (sq_size + rq_size + (I40IW_SHADOW_AREA_SIZE << 3) +
		  I40IW_QP_CTX_SIZE);
	/* Get page aligned memory */
	ret =
	    i40iw_allocate_dma_mem(rsrc->dev->hw, &rsrc->qpmem, t_size,
				   I40IW_HW_PAGE_SIZE);
	if (ret) {
		i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s: error dma mem\n", __func__);
		return ret;
	}

	mem = &rsrc->qpmem;
	memset(mem->va, 0, t_size);
	qp->hw_sq_size = i40iw_get_encoded_wqe_size(rsrc->sq_size, false);
	qp->hw_rq_size = i40iw_get_encoded_wqe_size(rsrc->rq_size, false);
	qp->pd = &rsrc->sc_pd;
	qp->qp_type = I40IW_QP_TYPE_UDA;
	qp->dev = rsrc->dev;
	qp->back_qp = (void *)rsrc;
	qp->sq_pa = mem->pa;
	qp->rq_pa = qp->sq_pa + sq_size;
	qp->vsi = rsrc->vsi;
	ukqp->sq_base = mem->va;
	ukqp->rq_base = &ukqp->sq_base[rsrc->sq_size];
	ukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem;
	qp->shadow_area_pa = qp->rq_pa + rq_size;
	qp->hw_host_ctx = ukqp->shadow_area + I40IW_SHADOW_AREA_SIZE;
	qp->hw_host_ctx_pa =
		qp->shadow_area_pa + (I40IW_SHADOW_AREA_SIZE << 3);
	ukqp->qp_id = rsrc->qp_id;
	ukqp->sq_wrtrk_array = rsrc->sq_wrtrk_array;
	ukqp->rq_wrid_array = rsrc->rq_wrid_array;

	ukqp->qp_id = rsrc->qp_id;
	ukqp->sq_size = rsrc->sq_size;
	ukqp->rq_size = rsrc->rq_size;

	I40IW_RING_INIT(ukqp->sq_ring, ukqp->sq_size);
	I40IW_RING_INIT(ukqp->initial_ring, ukqp->sq_size);
	I40IW_RING_INIT(ukqp->rq_ring, ukqp->rq_size);

	if (qp->pd->dev->is_pf)
		ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
						    I40E_PFPE_WQEALLOC);
	else
		ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
						    I40E_VFPE_WQEALLOC1);

	qp->user_pri = 0;
	i40iw_qp_add_qos(qp);
	i40iw_puda_qp_setctx(rsrc);
	if (rsrc->ceq_valid)
		ret = i40iw_cqp_qp_create_cmd(rsrc->dev, qp);
	else
		ret = i40iw_puda_qp_wqe(rsrc->dev, qp);
	if (ret)
		i40iw_free_dma_mem(rsrc->dev->hw, &rsrc->qpmem);
	return ret;
}
예제 #27
0
/**
 * i40iw_puda_create_rsrc - create resouce (ilq or ieq)
 * @dev: iwarp device
 * @info: resource information
 */
enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_vsi *vsi,
					      struct i40iw_puda_rsrc_info *info)
{
	struct i40iw_sc_dev *dev = vsi->dev;
	enum i40iw_status_code ret = 0;
	struct i40iw_puda_rsrc *rsrc;
	u32 pudasize;
	u32 sqwridsize, rqwridsize;
	struct i40iw_virt_mem *vmem;

	info->count = 1;
	pudasize = sizeof(struct i40iw_puda_rsrc);
	sqwridsize = info->sq_size * sizeof(struct i40iw_sq_uk_wr_trk_info);
	rqwridsize = info->rq_size * 8;
	switch (info->type) {
	case I40IW_PUDA_RSRC_TYPE_ILQ:
		vmem = &vsi->ilq_mem;
		break;
	case I40IW_PUDA_RSRC_TYPE_IEQ:
		vmem = &vsi->ieq_mem;
		break;
	default:
		return I40IW_NOT_SUPPORTED;
	}
	ret =
	    i40iw_allocate_virt_mem(dev->hw, vmem,
				    pudasize + sqwridsize + rqwridsize);
	if (ret)
		return ret;
	rsrc = (struct i40iw_puda_rsrc *)vmem->va;
	spin_lock_init(&rsrc->bufpool_lock);
	if (info->type == I40IW_PUDA_RSRC_TYPE_ILQ) {
		vsi->ilq = (struct i40iw_puda_rsrc *)vmem->va;
		vsi->ilq_count = info->count;
		rsrc->receive = info->receive;
		rsrc->xmit_complete = info->xmit_complete;
	} else {
		vmem = &vsi->ieq_mem;
		vsi->ieq_count = info->count;
		vsi->ieq = (struct i40iw_puda_rsrc *)vmem->va;
		rsrc->receive = i40iw_ieq_receive;
		rsrc->xmit_complete = i40iw_ieq_tx_compl;
	}

	rsrc->ceq_valid = info->ceq_valid;
	rsrc->type = info->type;
	rsrc->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)((u8 *)vmem->va + pudasize);
	rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize);
	/* Initialize all ieq lists */
	INIT_LIST_HEAD(&rsrc->bufpool);
	INIT_LIST_HEAD(&rsrc->txpend);

	rsrc->tx_wqe_avail_cnt = info->sq_size - 1;
	dev->iw_pd_ops->pd_init(dev, &rsrc->sc_pd, info->pd_id, -1);
	rsrc->qp_id = info->qp_id;
	rsrc->cq_id = info->cq_id;
	rsrc->sq_size = info->sq_size;
	rsrc->rq_size = info->rq_size;
	rsrc->cq_size = info->rq_size + info->sq_size;
	rsrc->buf_size = info->buf_size;
	rsrc->dev = dev;
	rsrc->vsi = vsi;

	ret = i40iw_puda_cq_create(rsrc);
	if (!ret) {
		rsrc->completion = PUDA_CQ_CREATED;
		ret = i40iw_puda_qp_create(rsrc);
	}
	if (ret) {
		i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error qp_create\n", __func__);
		goto error;
	}
	rsrc->completion = PUDA_QP_CREATED;

	ret = i40iw_puda_allocbufs(rsrc, info->tx_buf_cnt + info->rq_size);
	if (ret) {
		i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error allloc_buf\n", __func__);
		goto error;
	}

	rsrc->rxq_invalid_cnt = info->rq_size;
	ret = i40iw_puda_replenish_rq(rsrc, true);
	if (ret)
		goto error;

	if (info->type == I40IW_PUDA_RSRC_TYPE_IEQ) {
		if (!i40iw_init_hash_desc(&rsrc->hash_desc)) {
			rsrc->check_crc = true;
			rsrc->completion = PUDA_HASH_CRC_COMPLETE;
			ret = 0;
		}
	}

	dev->ccq_ops->ccq_arm(&rsrc->cq);
	return ret;
 error:
	i40iw_puda_dele_resources(vsi, info->type, false);

	return ret;
}
예제 #28
0
/**
 * i40iw_vchnl_recv_pf - Receive PF virtual channel messages
 * @dev: IWARP device pointer
 * @vf_id: Virtual function ID associated with the message
 * @msg: Virtual channel message buffer pointer
 * @len: Length of the virtual channels message
 */
enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
					   u32 vf_id,
					   u8 *msg,
					   u16 len)
{
	struct i40iw_virtchnl_op_buf *vchnl_msg = (struct i40iw_virtchnl_op_buf *)msg;
	struct i40iw_vfdev *vf_dev = NULL;
	struct i40iw_hmc_fcn_info hmc_fcn_info;
	u16 iw_vf_idx;
	u16 first_avail_iw_vf = I40IW_MAX_PE_ENABLED_VF_COUNT;
	struct i40iw_virt_mem vf_dev_mem;
	struct i40iw_virtchnl_work_info work_info;
	struct i40iw_dev_pestat *devstat;
	enum i40iw_status_code ret_code;
	unsigned long flags;

	if (!dev || !msg || !len)
		return I40IW_ERR_PARAM;

	if (!dev->vchnl_up)
		return I40IW_ERR_NOT_READY;
	if (vchnl_msg->iw_op_code == I40IW_VCHNL_OP_GET_VER) {
		if (vchnl_msg->iw_op_ver != I40IW_VCHNL_OP_GET_VER_V0)
			vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
		else
			vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
		return I40IW_SUCCESS;
	}
	for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT;
	     iw_vf_idx++) {
		if (!dev->vf_dev[iw_vf_idx]) {
			if (first_avail_iw_vf ==
			    I40IW_MAX_PE_ENABLED_VF_COUNT)
				first_avail_iw_vf = iw_vf_idx;
			continue;
		}
		if (dev->vf_dev[iw_vf_idx]->vf_id == vf_id) {
			vf_dev = dev->vf_dev[iw_vf_idx];
			break;
		}
	}
	if (vf_dev) {
		if (!vf_dev->msg_count) {
			vf_dev->msg_count++;
		} else {
			i40iw_debug(dev, I40IW_DEBUG_VIRT,
				    "VF%u already has a channel message in progress.\n",
				    vf_id);
			return I40IW_SUCCESS;
		}
	}
	switch (vchnl_msg->iw_op_code) {
	case I40IW_VCHNL_OP_GET_HMC_FCN:
		if (!vf_dev &&
		    (first_avail_iw_vf != I40IW_MAX_PE_ENABLED_VF_COUNT)) {
			ret_code = i40iw_allocate_virt_mem(dev->hw, &vf_dev_mem, sizeof(struct i40iw_vfdev) +
							   (sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX));
			if (!ret_code) {
				vf_dev = vf_dev_mem.va;
				vf_dev->stats_initialized = false;
				vf_dev->pf_dev = dev;
				vf_dev->msg_count = 1;
				vf_dev->vf_id = vf_id;
				vf_dev->iw_vf_idx = first_avail_iw_vf;
				vf_dev->pf_hmc_initialized = false;
				vf_dev->hmc_info.hmc_obj = (struct i40iw_hmc_obj_info *)(&vf_dev[1]);
				i40iw_debug(dev, I40IW_DEBUG_VIRT,
					    "vf_dev %p, hmc_info %p, hmc_obj %p\n",
					    vf_dev, &vf_dev->hmc_info, vf_dev->hmc_info.hmc_obj);
				dev->vf_dev[first_avail_iw_vf] = vf_dev;
				iw_vf_idx = first_avail_iw_vf;
			} else {
				i40iw_debug(dev, I40IW_DEBUG_VIRT,
					    "VF%u Unable to allocate a VF device structure.\n",
					    vf_id);
				vchnl_pf_send_error_resp(dev, vf_id, vchnl_msg, (u16)I40IW_ERR_NO_MEMORY);
				return I40IW_SUCCESS;
			}
			memcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len);
			hmc_fcn_info.callback_fcn = pf_cqp_get_hmc_fcn_callback;
			hmc_fcn_info.vf_id = vf_id;
			hmc_fcn_info.iw_vf_idx = vf_dev->iw_vf_idx;
			hmc_fcn_info.cqp_callback_param = vf_dev;
			hmc_fcn_info.free_fcn = false;
			ret_code = i40iw_cqp_manage_hmc_fcn_cmd(dev, &hmc_fcn_info);
			if (ret_code)
				i40iw_debug(dev, I40IW_DEBUG_VIRT,
					    "VF%u error CQP HMC Function operation.\n",
					    vf_id);
			ret_code = i40iw_device_init_pestat(&vf_dev->dev_pestat);
			if (ret_code)
				i40iw_debug(dev, I40IW_DEBUG_VIRT,
					    "VF%u - i40iw_device_init_pestat failed\n",
					    vf_id);
			vf_dev->dev_pestat.ops.iw_hw_stat_init(&vf_dev->dev_pestat,
							      (u8)vf_dev->pmf_index,
							      dev->hw, false);
			vf_dev->stats_initialized = true;
		} else {
			if (vf_dev) {
				vf_dev->msg_count--;
				vchnl_pf_send_get_hmc_fcn_resp(dev, vf_id, vchnl_msg, vf_dev->pmf_index);
			} else {
				vchnl_pf_send_error_resp(dev, vf_id, vchnl_msg,
							 (u16)I40IW_ERR_NO_MEMORY);
			}
		}
		break;
	case I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE:
		if (!vf_dev)
			return I40IW_ERR_BAD_PTR;
		work_info.worker_vf_dev = vf_dev;
		work_info.callback_fcn = pf_add_hmc_obj_callback;
		memcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len);
		i40iw_cqp_spawn_worker(dev, &work_info, vf_dev->iw_vf_idx);
		break;
	case I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE:
		if (!vf_dev)
			return I40IW_ERR_BAD_PTR;
		work_info.worker_vf_dev = vf_dev;
		work_info.callback_fcn = pf_del_hmc_obj_callback;
		memcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len);
		i40iw_cqp_spawn_worker(dev, &work_info, vf_dev->iw_vf_idx);
		break;
	case I40IW_VCHNL_OP_GET_STATS:
		if (!vf_dev)
			return I40IW_ERR_BAD_PTR;
		devstat = &vf_dev->dev_pestat;
		spin_lock_irqsave(&dev->dev_pestat.stats_lock, flags);
		devstat->ops.iw_hw_stat_read_all(devstat, &devstat->hw_stats);
		spin_unlock_irqrestore(&dev->dev_pestat.stats_lock, flags);
		vf_dev->msg_count--;
		vchnl_pf_send_get_pe_stats_resp(dev, vf_id, vchnl_msg, devstat->hw_stats);
		break;
	default:
		i40iw_debug(dev, I40IW_DEBUG_VIRT,
			    "40iw_vchnl_recv_pf: Invalid OpCode 0x%x\n",
			    vchnl_msg->iw_op_code);
		vchnl_pf_send_error_resp(dev, vf_id,
					 vchnl_msg, (u16)I40IW_ERR_NOT_IMPLEMENTED);
	}
	return I40IW_SUCCESS;
}
예제 #29
0
파일: i40iw_hw.c 프로젝트: lumag/linux
/**
 * i40iw_process_aeq - handle aeq events
 * @iwdev: iwarp device
 */
void i40iw_process_aeq(struct i40iw_device *iwdev)
{
	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
	struct i40iw_aeq *aeq = &iwdev->aeq;
	struct i40iw_sc_aeq *sc_aeq = &aeq->sc_aeq;
	struct i40iw_aeqe_info aeinfo;
	struct i40iw_aeqe_info *info = &aeinfo;
	int ret;
	struct i40iw_qp *iwqp = NULL;
	struct i40iw_sc_cq *cq = NULL;
	struct i40iw_cq *iwcq = NULL;
	struct i40iw_sc_qp *qp = NULL;
	struct i40iw_qp_host_ctx_info *ctx_info = NULL;
	unsigned long flags;

	u32 aeqcnt = 0;

	if (!sc_aeq->size)
		return;

	do {
		memset(info, 0, sizeof(*info));
		ret = dev->aeq_ops->get_next_aeqe(sc_aeq, info);
		if (ret)
			break;

		aeqcnt++;
		i40iw_debug(dev, I40IW_DEBUG_AEQ,
			    "%s ae_id = 0x%x bool qp=%d qp_id = %d\n",
			    __func__, info->ae_id, info->qp, info->qp_cq_id);
		if (info->qp) {
			spin_lock_irqsave(&iwdev->qptable_lock, flags);
			iwqp = iwdev->qp_table[info->qp_cq_id];
			if (!iwqp) {
				spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
				i40iw_debug(dev, I40IW_DEBUG_AEQ,
					    "%s qp_id %d is already freed\n",
					    __func__, info->qp_cq_id);
				continue;
			}
			i40iw_add_ref(&iwqp->ibqp);
			spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
			qp = &iwqp->sc_qp;
			spin_lock_irqsave(&iwqp->lock, flags);
			iwqp->hw_tcp_state = info->tcp_state;
			iwqp->hw_iwarp_state = info->iwarp_state;
			iwqp->last_aeq = info->ae_id;
			spin_unlock_irqrestore(&iwqp->lock, flags);
			ctx_info = &iwqp->ctx_info;
			ctx_info->err_rq_idx_valid = true;
		} else {
			if (info->ae_id != I40IW_AE_CQ_OPERATION_ERROR)
				continue;
		}

		switch (info->ae_id) {
		case I40IW_AE_LLP_FIN_RECEIVED:
			if (qp->term_flags)
				continue;
			if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
				iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSE_WAIT;
				if ((iwqp->hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) &&
				    (iwqp->ibqp_state == IB_QPS_RTS)) {
					i40iw_next_iw_state(iwqp,
							    I40IW_QP_STATE_CLOSING, 0, 0, 0);
					i40iw_cm_disconn(iwqp);
				}
				iwqp->cm_id->add_ref(iwqp->cm_id);
				i40iw_schedule_cm_timer(iwqp->cm_node,
							(struct i40iw_puda_buf *)iwqp,
							I40IW_TIMER_TYPE_CLOSE, 1, 0);
			}
			break;
		case I40IW_AE_LLP_CLOSE_COMPLETE:
			if (qp->term_flags)
				i40iw_terminate_done(qp, 0);
			else
				i40iw_cm_disconn(iwqp);
			break;
		case I40IW_AE_BAD_CLOSE:
			/* fall through */
		case I40IW_AE_RESET_SENT:
			i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 1, 0, 0);
			i40iw_cm_disconn(iwqp);
			break;
		case I40IW_AE_LLP_CONNECTION_RESET:
			if (atomic_read(&iwqp->close_timer_started))
				continue;
			i40iw_cm_disconn(iwqp);
			break;
		case I40IW_AE_QP_SUSPEND_COMPLETE:
			i40iw_qp_suspend_resume(dev, &iwqp->sc_qp, false);
			break;
		case I40IW_AE_TERMINATE_SENT:
			i40iw_terminate_send_fin(qp);
			break;
		case I40IW_AE_LLP_TERMINATE_RECEIVED:
			i40iw_terminate_received(qp, info);
			break;
		case I40IW_AE_CQ_OPERATION_ERROR:
			i40iw_pr_err("Processing an iWARP related AE for CQ misc = 0x%04X\n",
				     info->ae_id);
			cq = (struct i40iw_sc_cq *)(unsigned long)info->compl_ctx;
			iwcq = (struct i40iw_cq *)cq->back_cq;

			if (iwcq->ibcq.event_handler) {
				struct ib_event ibevent;

				ibevent.device = iwcq->ibcq.device;
				ibevent.event = IB_EVENT_CQ_ERR;
				ibevent.element.cq = &iwcq->ibcq;
				iwcq->ibcq.event_handler(&ibevent, iwcq->ibcq.cq_context);
			}
			break;
		case I40IW_AE_LLP_DOUBT_REACHABILITY:
			break;
		case I40IW_AE_PRIV_OPERATION_DENIED:
		case I40IW_AE_STAG_ZERO_INVALID:
		case I40IW_AE_IB_RREQ_AND_Q1_FULL:
		case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION:
		case I40IW_AE_DDP_UBE_INVALID_MO:
		case I40IW_AE_DDP_UBE_INVALID_QN:
		case I40IW_AE_DDP_NO_L_BIT:
		case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
		case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
		case I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST:
		case I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
		case I40IW_AE_INVALID_ARP_ENTRY:
		case I40IW_AE_INVALID_TCP_OPTION_RCVD:
		case I40IW_AE_STALE_ARP_ENTRY:
		case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
		case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
		case I40IW_AE_LLP_SYN_RECEIVED:
		case I40IW_AE_LLP_TOO_MANY_RETRIES:
		case I40IW_AE_LCE_QP_CATASTROPHIC:
		case I40IW_AE_LCE_FUNCTION_CATASTROPHIC:
		case I40IW_AE_LCE_CQ_CATASTROPHIC:
		case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG:
		case I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT:
			ctx_info->err_rq_idx_valid = false;
			/* fall through */
		default:
			if (!info->sq && ctx_info->err_rq_idx_valid) {
				ctx_info->err_rq_idx = info->wqe_idx;
				ctx_info->tcp_info_valid = false;
				ctx_info->iwarp_info_valid = false;
				ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
								     iwqp->host_ctx.va,
								     ctx_info);
			}
			i40iw_terminate_connection(qp, info);
			break;
		}
		if (info->qp)
			i40iw_rem_ref(&iwqp->ibqp);
	} while (1);

	if (aeqcnt)
		dev->aeq_ops->repost_aeq_entries(dev, aeqcnt);
}
예제 #30
0
파일: i40iw_hw.c 프로젝트: lumag/linux
/**
 * i40iw_manage_qhash - add or modify qhash
 * @iwdev: iwarp device
 * @cminfo: cm info for qhash
 * @etype: type (syn or quad)
 * @mtype: type of qhash
 * @cmnode: cmnode associated with connection
 * @wait: wait for completion
 * @user_pri:user pri of the connection
 */
enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
					  struct i40iw_cm_info *cminfo,
					  enum i40iw_quad_entry_type etype,
					  enum i40iw_quad_hash_manage_type mtype,
					  void *cmnode,
					  bool wait)
{
	struct i40iw_qhash_table_info *info;
	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
	struct i40iw_sc_vsi *vsi = &iwdev->vsi;
	enum i40iw_status_code status;
	struct i40iw_cqp *iwcqp = &iwdev->cqp;
	struct i40iw_cqp_request *cqp_request;
	struct cqp_commands_info *cqp_info;

	cqp_request = i40iw_get_cqp_request(iwcqp, wait);
	if (!cqp_request)
		return I40IW_ERR_NO_MEMORY;
	cqp_info = &cqp_request->info;
	info = &cqp_info->in.u.manage_qhash_table_entry.info;
	memset(info, 0, sizeof(*info));

	info->vsi = &iwdev->vsi;
	info->manage = mtype;
	info->entry_type = etype;
	if (cminfo->vlan_id != 0xFFFF) {
		info->vlan_valid = true;
		info->vlan_id = cpu_to_le16(cminfo->vlan_id);
	} else {
		info->vlan_valid = false;
	}

	info->ipv4_valid = cminfo->ipv4;
	info->user_pri = cminfo->user_pri;
	ether_addr_copy(info->mac_addr, iwdev->netdev->dev_addr);
	info->qp_num = cpu_to_le32(vsi->ilq->qp_id);
	info->dest_port = cpu_to_le16(cminfo->loc_port);
	info->dest_ip[0] = cpu_to_le32(cminfo->loc_addr[0]);
	info->dest_ip[1] = cpu_to_le32(cminfo->loc_addr[1]);
	info->dest_ip[2] = cpu_to_le32(cminfo->loc_addr[2]);
	info->dest_ip[3] = cpu_to_le32(cminfo->loc_addr[3]);
	if (etype == I40IW_QHASH_TYPE_TCP_ESTABLISHED) {
		info->src_port = cpu_to_le16(cminfo->rem_port);
		info->src_ip[0] = cpu_to_le32(cminfo->rem_addr[0]);
		info->src_ip[1] = cpu_to_le32(cminfo->rem_addr[1]);
		info->src_ip[2] = cpu_to_le32(cminfo->rem_addr[2]);
		info->src_ip[3] = cpu_to_le32(cminfo->rem_addr[3]);
	}
	if (cmnode) {
		cqp_request->callback_fcn = i40iw_send_syn_cqp_callback;
		cqp_request->param = (void *)cmnode;
	}

	if (info->ipv4_valid)
		i40iw_debug(dev, I40IW_DEBUG_CM,
			    "%s:%s IP=%pI4, port=%d, mac=%pM, vlan_id=%d\n",
			    __func__, (!mtype) ? "DELETE" : "ADD",
			    info->dest_ip,
			    info->dest_port, info->mac_addr, cminfo->vlan_id);
	else
		i40iw_debug(dev, I40IW_DEBUG_CM,
			    "%s:%s IP=%pI6, port=%d, mac=%pM, vlan_id=%d\n",
			    __func__, (!mtype) ? "DELETE" : "ADD",
			    info->dest_ip,
			    info->dest_port, info->mac_addr, cminfo->vlan_id);
	cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->cqp.sc_cqp;
	cqp_info->in.u.manage_qhash_table_entry.scratch = (uintptr_t)cqp_request;
	cqp_info->cqp_cmd = OP_MANAGE_QHASH_TABLE_ENTRY;
	cqp_info->post_sq = 1;
	status = i40iw_handle_cqp_op(iwdev, cqp_request);
	if (status)
		i40iw_pr_err("CQP-OP Manage Qhash Entry fail");
	return status;
}