int iser_send_control(struct iscsi_conn *conn,
		      struct iscsi_task *task)
{
	struct iser_conn *iser_conn = conn->dd_data;
	struct iscsi_iser_task *iser_task = task->dd_data;
	struct iser_tx_desc *mdesc = &iser_task->desc;
	unsigned long data_seg_len;
	int err = 0;
	struct iser_device *device;

	/* build the tx desc regd header and add it to the tx desc dto */
	mdesc->type = ISCSI_TX_CONTROL;
	iser_create_send_desc(iser_conn, mdesc);

	device = iser_conn->ib_conn.device;

	data_seg_len = ntoh24(task->hdr->dlength);

	if (data_seg_len > 0) {
		struct ib_sge *tx_dsg = &mdesc->tx_sg[1];
		if (task != conn->login_task) {
			iser_err("data present on non login task!!!\n");
			goto send_control_error;
		}

		ib_dma_sync_single_for_cpu(device->ib_device,
			iser_conn->login_req_dma, task->data_count,
			DMA_TO_DEVICE);

		memcpy(iser_conn->login_req_buf, task->data, task->data_count);

		ib_dma_sync_single_for_device(device->ib_device,
			iser_conn->login_req_dma, task->data_count,
			DMA_TO_DEVICE);

		tx_dsg->addr    = iser_conn->login_req_dma;
		tx_dsg->length  = task->data_count;
		tx_dsg->lkey    = device->pd->local_dma_lkey;
		mdesc->num_sge = 2;
	}

	if (task == conn->login_task) {
		iser_dbg("op %x dsl %lx, posting login rx buffer\n",
			 task->hdr->opcode, data_seg_len);
		err = iser_post_recvl(iser_conn);
		if (err)
			goto send_control_error;
		err = iser_post_rx_bufs(conn, task->hdr);
		if (err)
			goto send_control_error;
	}

	err = iser_post_send(&iser_conn->ib_conn, mdesc, true);
	if (!err)
		return 0;

send_control_error:
	iser_err("conn %p failed err %d\n",conn, err);
	return err;
}
示例#2
0
void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
{
	struct ib_conn *ib_conn = wc->qp->qp_context;
	struct iser_conn *iser_conn = to_iser_conn(ib_conn);
	struct iser_login_desc *desc = iser_login(wc->wr_cqe);
	struct iscsi_hdr *hdr;
	char *data;
	int length;

	if (unlikely(wc->status != IB_WC_SUCCESS)) {
		iser_err_comp(wc, "login_rsp");
		return;
	}

	ib_dma_sync_single_for_cpu(ib_conn->device->ib_device,
				   desc->rsp_dma, ISER_RX_LOGIN_SIZE,
				   DMA_FROM_DEVICE);

	hdr = desc->rsp + sizeof(struct iser_ctrl);
	data = desc->rsp + ISER_HEADERS_LEN;
	length = wc->byte_len - ISER_HEADERS_LEN;

	iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
		 hdr->itt, length);

	iscsi_iser_recv(iser_conn->iscsi_conn, hdr, data, length);

	ib_dma_sync_single_for_device(ib_conn->device->ib_device,
				      desc->rsp_dma, ISER_RX_LOGIN_SIZE,
				      DMA_FROM_DEVICE);

	ib_conn->post_recv_buf_count--;
}
/**
 * iser_rcv_dto_completion - recv DTO completion
 */
void iser_rcv_completion(struct iser_rx_desc *rx_desc,
			 unsigned long rx_xfer_len,
			 struct ib_conn *ib_conn)
{
	struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn,
						   ib_conn);
	struct iscsi_hdr *hdr;
	u64 rx_dma;
	int rx_buflen, outstanding, count, err;

	/* differentiate between login to all other PDUs */
	if ((char *)rx_desc == iser_conn->login_resp_buf) {
		rx_dma = iser_conn->login_resp_dma;
		rx_buflen = ISER_RX_LOGIN_SIZE;
	} else {
		rx_dma = rx_desc->dma_addr;
		rx_buflen = ISER_RX_PAYLOAD_SIZE;
	}

	ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, rx_dma,
				   rx_buflen, DMA_FROM_DEVICE);

	hdr = &rx_desc->iscsi_header;

	iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
			hdr->itt, (int)(rx_xfer_len - ISER_HEADERS_LEN));

	iscsi_iser_recv(iser_conn->iscsi_conn, hdr, rx_desc->data,
			rx_xfer_len - ISER_HEADERS_LEN);

	ib_dma_sync_single_for_device(ib_conn->device->ib_device, rx_dma,
				      rx_buflen, DMA_FROM_DEVICE);

	/* decrementing conn->post_recv_buf_count only --after-- freeing the   *
	 * task eliminates the need to worry on tasks which are completed in   *
	 * parallel to the execution of iser_conn_term. So the code that waits *
	 * for the posted rx bufs refcount to become zero handles everything   */
	ib_conn->post_recv_buf_count--;

	if (rx_dma == iser_conn->login_resp_dma)
		return;

	outstanding = ib_conn->post_recv_buf_count;
	if (outstanding + iser_conn->min_posted_rx <= iser_conn->qp_max_recv_dtos) {
		count = min(iser_conn->qp_max_recv_dtos - outstanding,
			    iser_conn->min_posted_rx);
		err = iser_post_recvm(iser_conn, count);
		if (err)
			iser_err("posting %d rx bufs err %d\n", count, err);
	}
}
示例#4
0
void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc)
{
	struct ib_conn *ib_conn = wc->qp->qp_context;
	struct iser_conn *iser_conn = to_iser_conn(ib_conn);
	struct iser_rx_desc *desc = iser_rx(wc->wr_cqe);
	struct iscsi_hdr *hdr;
	int length;
	int outstanding, count, err;

	if (unlikely(wc->status != IB_WC_SUCCESS)) {
		iser_err_comp(wc, "task_rsp");
		return;
	}

	ib_dma_sync_single_for_cpu(ib_conn->device->ib_device,
				   desc->dma_addr, ISER_RX_PAYLOAD_SIZE,
				   DMA_FROM_DEVICE);

	hdr = &desc->iscsi_header;
	length = wc->byte_len - ISER_HEADERS_LEN;

	iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
		 hdr->itt, length);

	if (iser_check_remote_inv(iser_conn, wc, hdr)) {
		iscsi_conn_failure(iser_conn->iscsi_conn,
				   ISCSI_ERR_CONN_FAILED);
		return;
	}

	iscsi_iser_recv(iser_conn->iscsi_conn, hdr, desc->data, length);

	ib_dma_sync_single_for_device(ib_conn->device->ib_device,
				      desc->dma_addr, ISER_RX_PAYLOAD_SIZE,
				      DMA_FROM_DEVICE);

	/* decrementing conn->post_recv_buf_count only --after-- freeing the   *
	 * task eliminates the need to worry on tasks which are completed in   *
	 * parallel to the execution of iser_conn_term. So the code that waits *
	 * for the posted rx bufs refcount to become zero handles everything   */
	ib_conn->post_recv_buf_count--;

	outstanding = ib_conn->post_recv_buf_count;
	if (outstanding + iser_conn->min_posted_rx <= iser_conn->qp_max_recv_dtos) {
		count = min(iser_conn->qp_max_recv_dtos - outstanding,
			    iser_conn->min_posted_rx);
		err = iser_post_recvm(iser_conn, count);
		if (err)
			iser_err("posting %d rx bufs err %d\n", count, err);
	}
}
示例#5
0
文件: iser_verbs.c 项目: 710leo/LVS
/**
 * iser_start_send - Initiate a Send DTO operation
 *
 * returns 0 on success, -1 on failure
 */
int iser_post_send(struct iser_conn *ib_conn, struct iser_tx_desc *tx_desc)
{
	int		  ib_ret;
	struct ib_send_wr send_wr, *send_wr_failed;

	ib_dma_sync_single_for_device(ib_conn->device->ib_device,
		tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);

	send_wr.next	   = NULL;
	send_wr.wr_id	   = (unsigned long)tx_desc;
	send_wr.sg_list	   = tx_desc->tx_sg;
	send_wr.num_sge	   = tx_desc->num_sge;
	send_wr.opcode	   = IB_WR_SEND;
	send_wr.send_flags = IB_SEND_SIGNALED;

	atomic_inc(&ib_conn->post_send_buf_count);

	ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed);
	if (ib_ret) {
		iser_err("ib_post_send failed, ret:%d\n", ib_ret);
		atomic_dec(&ib_conn->post_send_buf_count);
	}
	return ib_ret;
}
示例#6
0
static inline int isert_pdu_prepare_send(struct isert_connection *isert_conn,
					  struct isert_cmnd *tx_pdu)
{
	struct isert_device *isert_dev = isert_conn->isert_dev;
	struct ib_sge *sge = tx_pdu->wr[0].sge_list;
	size_t to_sync, size;
	int sg_cnt = 0;

	size = ISER_HDRS_SZ + tx_pdu->iscsi.pdu.ahssize +
		tx_pdu->iscsi.pdu.datasize;
	while (size) {
		to_sync = size > PAGE_SIZE ? PAGE_SIZE : size;
		ib_dma_sync_single_for_device(isert_dev->ib_dev, sge->addr,
					      to_sync,
					      DMA_TO_DEVICE);

		sge->length = to_sync;
		size -= to_sync;
		++sge;
		++sg_cnt;
	}

	return sg_cnt;
}
示例#7
0
int vnic_rx_skb(struct vnic_login *login, struct vnic_rx_ring *ring,
		struct ib_wc *wc, int ip_summed, char *eth_hdr_va)
{
	u64 wr_id = (unsigned int)wc->wr_id;
	struct sk_buff *skb;
	int used_frags;
	char *va = eth_hdr_va;
	int length = wc->byte_len - VNIC_EOIB_HDR_SIZE - VNIC_VLAN_OFFSET(login),
	    linear_length = (length <= SMALL_PACKET_SIZE) ?
	    length : SMALL_PACKET_SIZE, hdr_len = min(length, HEADER_COPY_SIZE),
	    offest = NET_IP_ALIGN + 16;
	struct ib_device *ib_dev = login->port->dev->ca;

	/* alloc a small linear SKB */
	skb = alloc_skb(linear_length + offest, GFP_ATOMIC);
	if (unlikely(!skb))
		return -ENOMEM;

	skb_record_rx_queue(skb, ring->index);
	skb_reserve(skb, offest);

	if (vnic_linear_small_pkt && length <= SMALL_PACKET_SIZE) {
		u64 dma;

		/* We are copying all relevant data to the skb - temporarily
		 * synch buffers for the copy
		 */
		dma = ring->rx_info[wr_id].dma_addr[0] + VNIC_EOIB_HDR_SIZE +
			VNIC_VLAN_OFFSET(login);
		ib_dma_sync_single_for_cpu(ib_dev, dma, length,
					   DMA_FROM_DEVICE);
		skb_copy_to_linear_data(skb, va, length);
		ib_dma_sync_single_for_device(ib_dev, dma, length,
					      DMA_FROM_DEVICE);
		skb->tail += length;
	} else {
		/* unmap the needed fragmentand reallocate them. Fragments that
		 * were not used will not be reused as is. */
		used_frags = vnic_unmap_and_replace_rx(ring, ib_dev,
						       skb_shinfo(skb)->frags,
						       wr_id, wc->byte_len);
		if (!used_frags)
			goto free_and_repost;

		skb_shinfo(skb)->nr_frags = used_frags;

		/* Copy headers into the skb linear buffer */
		memcpy(skb->data, va, hdr_len);
		skb->tail += hdr_len;
		/* Skip headers in first fragment */
		skb_shinfo(skb)->frags[0].page_offset +=
		    (VNIC_EOIB_HDR_SIZE + VNIC_VLAN_OFFSET(login) +
		     hdr_len);

		/* Adjust size of first fragment */
		skb_shinfo(skb)->frags[0].size -=
		    (VNIC_EOIB_HDR_SIZE + VNIC_VLAN_OFFSET(login) +
		     hdr_len);
		skb->data_len = length - hdr_len;
	}

	/* update skb fields */
	skb->len = length;
	skb->truesize = length + sizeof(struct sk_buff);
	skb->ip_summed = ip_summed;
	skb->dev = login->dev;
	skb->protocol = eth_type_trans(skb, skb->dev);

	return vnic_rx(login, skb, wc);

free_and_repost:
	dev_kfree_skb(skb);
	return -ENODEV;

}