static void
handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma,
	    struct p9_rdma_context *c, enum ib_wc_status status, u32 byte_len)
{
	struct p9_req_t *req;
	int err = 0;
	int16_t tag;

	req = NULL;
	ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize,
							 DMA_FROM_DEVICE);

	if (status != IB_WC_SUCCESS)
		goto err_out;

	err = p9_parse_header(c->rc, NULL, NULL, &tag, 1);
	if (err)
		goto err_out;

	req = p9_tag_lookup(client, tag);
	if (!req)
		goto err_out;

	req->rc = c->rc;
	p9_client_cb(client, req);

	return;

 err_out:
	P9_DPRINTK(P9_DEBUG_ERROR, "req %p err %d status %d\n",
		   req, err, status);
	rdma->state = P9_RDMA_FLUSHING;
	client->status = Disconnected;
	return;
}
static void req_done(struct virtqueue *vq)
{
	struct virtio_chan *chan = vq->vdev->priv;
	struct p9_fcall *rc;
	unsigned int len;
	struct p9_req_t *req;
	unsigned long flags;

	p9_debug(P9_DEBUG_TRANS, ": request done\n");

	while (1) {
		spin_lock_irqsave(&chan->lock, flags);
		rc = virtqueue_get_buf(chan->vq, &len);
		if (rc == NULL) {
			spin_unlock_irqrestore(&chan->lock, flags);
			break;
		}
		chan->ring_bufs_avail = 1;
		spin_unlock_irqrestore(&chan->lock, flags);
		/* Wakeup if anyone waiting for VirtIO ring space. */
		wake_up(chan->vc_wq);
		p9_debug(P9_DEBUG_TRANS, ": rc %p\n", rc);
		p9_debug(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag);
		req = p9_tag_lookup(chan->client, rc->tag);
		req->status = REQ_STATUS_RCVD;
		p9_client_cb(chan->client, req);
	}
}
Beispiel #3
0
static void
recv_done(struct ib_cq *cq, struct ib_wc *wc)
{
	struct p9_client *client = cq->cq_context;
	struct p9_trans_rdma *rdma = client->trans;
	struct p9_rdma_context *c =
		container_of(wc->wr_cqe, struct p9_rdma_context, cqe);
	struct p9_req_t *req;
	int err = 0;
	int16_t tag;

	req = NULL;
	ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize,
							 DMA_FROM_DEVICE);

	if (wc->status != IB_WC_SUCCESS)
		goto err_out;

	c->rc.size = wc->byte_len;
	err = p9_parse_header(&c->rc, NULL, NULL, &tag, 1);
	if (err)
		goto err_out;

	req = p9_tag_lookup(client, tag);
	if (!req)
		goto err_out;

	/* Check that we have not yet received a reply for this request.
	 */
	if (unlikely(req->rc.sdata)) {
		pr_err("Duplicate reply for request %d", tag);
		goto err_out;
	}

	req->rc.size = c->rc.size;
	req->rc.sdata = c->rc.sdata;
	p9_client_cb(client, req, REQ_STATUS_RCVD);

 out:
	up(&rdma->rq_sem);
	kfree(c);
	return;

 err_out:
	p9_debug(P9_DEBUG_ERROR, "req %p err %d status %d\n",
			req, err, wc->status);
	rdma->state = P9_RDMA_FLUSHING;
	client->status = Disconnected;
	goto out;
}
Beispiel #4
0
static void req_done(struct virtqueue *vq)
{
	struct virtio_chan *chan = vq->vdev->priv;
	struct p9_fcall *rc;
	unsigned int len;
	struct p9_req_t *req;

	P9_DPRINTK(P9_DEBUG_TRANS, ": request done\n");

	while ((rc = chan->vq->vq_ops->get_buf(chan->vq, &len)) != NULL) {
		P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc);
		P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag);
		req = p9_tag_lookup(chan->client, rc->tag);
		req->status = REQ_STATUS_RCVD;
		p9_client_cb(chan->client, req);
	}
}
Beispiel #5
0
static void
handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma,
	    struct p9_rdma_context *c, enum ib_wc_status status, u32 byte_len)
{
	struct p9_req_t *req;
	int err = 0;
	int16_t tag;

	req = NULL;
	ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize,
							 DMA_FROM_DEVICE);

	if (status != IB_WC_SUCCESS)
		goto err_out;

	err = p9_parse_header(c->rc, NULL, NULL, &tag, 1);
	if (err)
		goto err_out;

	req = p9_tag_lookup(client, tag);
	if (!req)
		goto err_out;

	/* Check that we have not yet received a reply for this request.
	 */
	if (unlikely(req->rc)) {
		pr_err("Duplicate reply for request %d", tag);
		goto err_out;
	}

	req->rc = c->rc;
	p9_client_cb(client, req, REQ_STATUS_RCVD);

	return;

 err_out:
	p9_debug(P9_DEBUG_ERROR, "req %p err %d status %d\n", req, err, status);
	rdma->state = P9_RDMA_FLUSHING;
	client->status = Disconnected;
}
Beispiel #6
0
static void p9_xos_read_work(struct work_struct *work)
{
	struct p9_xos_driver *drv;
	struct p9_xos_endpoint *ep;
	int n;
	unsigned long flags;

	prolog("w=%p", work);

	drv = container_of(work, struct p9_xos_driver, rwork);
	ep = &drv->ep[RD_EP];

	drv->wake_status = 1;

	spin_lock_irqsave(&drv->ep_lock, flags);
	n = p9_xos_deque_pop(ep->lqueue, ep);
	spin_unlock_irqrestore(&drv->ep_lock, flags);
	if (n == deque_null)
		goto done;

	do {
		u16 tag;
		int id;
		unsigned int size;
		struct p9_xos_device *device;
		struct p9_req_t *req;
		u8 *ptr;
		u8 type;

		ptr = n2a(n, ep) + 4;

		id = *(int *)ptr;
		ptr += 4;

		size = le32_to_cpu(*(__le32 *) ptr);
		if (size < 7) {
			critical("ignoring too short request");
			break;
		}

		type = *(ptr + 4);

		__log_event(drv, id, type, RD_EP);

		device = &drv->device[id];

		if (type & 1) {
			if (size >= device->client->msize) {
				warning("requested packet size too big: %d\n",
					size);
				goto ignore;
			}
			tag = le16_to_cpu(*(__le16 *) (ptr + 5));
			req = p9_tag_lookup(device->client, tag);

			if (req == NULL) {
				warning("ignoring unexpected response");
				goto ignore;
			}

			BUG_ON(!req->rc);

			if (likely(req->aio_cb != NULL)) {
				req->rc->sdata = ptr;
				req->status = REQ_STATUS_RCVD;
				p9_client_notify_aio(device->client, req);
			} else {
				req->rc->sdata =
				    (char *)req->rc + sizeof(*req->rc);
				memcpy(req->rc->sdata, ptr, size);
				p9_client_cb(device->client, req);
			}
ignore:
			spin_lock_irqsave(&drv->ep_lock, flags);
			p9_xos_deque_push(ep->rqueue, n, ep);
			nb_free_packets++;
			spin_unlock_irqrestore(&drv->ep_lock, flags);
		} else {
			/*
			 *  Dirty hack for pmu_int server
			 *    pmu_int is on channel 1
			 *    pmu_int client has always a request pending
			 *    so does not keep the wake lock if only
			 *    pmu_int request pending
			 */
			if (likely(device != &drv->device[1]))
				drv->wake_count++;

			if (unlikely(!device->open)) {
				warning("DEVICE %d NOT OPENED, ignoring req",
					device->id);
				goto ignore2;
			}
			req = kmem_cache_alloc(drv->cache, GFP_KERNEL);
			req->tc = kmalloc(sizeof(struct p9_fcall), GFP_KERNEL);
			req->tc->size = size;
			req->tc->sdata = ptr;
			req->aux = device;

			spin_lock(&device->lock);
			list_add_tail(&req->req_list, &device->req_list);
			spin_unlock(&device->lock);

			if (device->rd_cb)
				device->rd_cb(device, req);
		}
ignore2:
		spin_lock_irqsave(&drv->ep_lock, flags);
		n = p9_xos_deque_pop(ep->lqueue, ep);
		spin_unlock_irqrestore(&drv->ep_lock, flags);
	} while (n != deque_null);

done:
	if ((!drv->wake_count) && (drv->wake_status == 1)) {
		drv->wake_status = 0;
		wake_unlock(&drv->wake_lock);
		wmb();
		if (drv->wake_status == 2)
			wake_lock(&drv->wake_lock);
	}
	epilog();
}