Ejemplo n.º 1
0
void netfront_rx(struct netfront_dev *dev)
{
	RING_IDX rp, cons;
	struct netif_rx_response *rsp = &(dev->rsp);
	int more, flags;

	local_irq_save(flags);
	netfront_tx_buf_gc(dev);
	local_irq_restore(flags);
#ifdef CONFIG_NETMAP
	if (dev->netmap) {
		netmap_netfront_rx(dev);
		return;
	}
#endif
moretodo:
	rp = dev->rx.sring->rsp_prod;
	rmb(); /* Ensure we see queued responses up to 'rp'. */
	cons = dev->rx.rsp_cons;

	while (cons != rp) {
		NETIF_MEMCPY(rsp, RING_GET_RESPONSE(&dev->rx, cons), sizeof(*rsp));
		netfront_get_responses(dev, cons);
		cons = dev->rx.rsp_cons;
	}

	dev->rx.rsp_cons = cons;
	RING_FINAL_CHECK_FOR_RESPONSES(&dev->rx, more);
	if(more)
		goto moretodo;

	netfront_fillup_rx_buffers(dev);
}
Ejemplo n.º 2
0
static irqreturn_t as_int (int irq, void *dev_id)
{
	struct as_response *ring_resp;
	RING_IDX i, rp;

	printk("\nxen:DomU: as_int called");
again:
	rp = info.ring.sring->rsp_prod;
	printk(KERN_DEBUG "\nxen:DomU:ring pointers %d to %d", info.ring.rsp_cons, rp);
	for (i = info.ring.rsp_cons; i != rp; i++) {
		unsigned long id;
		ring_resp = RING_GET_RESPONSE(&(info.ring), i);
		printk(KERN_DEBUG "\nxen:domU: Recvd in IDX-%d, with id=%d, op=%d, st=%d", i, ring_resp->id, ring_resp->operation, ring_resp->status);
		id = ring_resp->id;
		switch(ring_resp->operation) {
		case 0:
			printk(KERN_DEBUG "\nxen:DomU: operation: 0");
			break;
		default:
			break;
		}
	}

	info.ring.rsp_cons = i;
	if (i != info.ring.req_prod_pvt) {
		int more_to_do;
		RING_FINAL_CHECK_FOR_RESPONSES(&info.ring, more_to_do);
		if (more_to_do)
			goto again;
	} else
		info.ring.sring->rsp_event = i + 1;
	return IRQ_HANDLED;
}
Ejemplo n.º 3
0
static irqreturn_t ixp_interrupt(int irq, void *dev_id)
{
	struct ixp_response *bret;
	RING_IDX i, rp;
	struct ixpfront_info *info = (struct ixpfront_info *)dev_id;
	int error;


	if (unlikely(info->connected != IXP_STATE_CONNECTED)) {
		return IRQ_HANDLED;
	}

 again:
	rp = info->ring.sring->rsp_prod;
	rmb(); /* Ensure we see queued responses up to 'rp'. */

	for (i = info->ring.rsp_cons; i != rp; i++) {
		unsigned long id;

		bret = RING_GET_RESPONSE(&info->ring, i);
		id   = bret->id;
		
		ixp_install_response(info, bret);
    		ixp_completion(&info->shadow[id]);

		add_id_to_freelist(info, id);

		error = (bret->status == IXPIF_RSP_OKAY) ? 0 : -EIO;
		switch (bret->operation) {
		case IXP_OP_3DES_ENCRYPT:
			if (unlikely(bret->status != IXPIF_RSP_OKAY))
				dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
					"request: %x\n", bret->status);

			break;
		default:
			BUG();
		}
	}

	info->ring.rsp_cons = i;

	if (i != info->ring.req_prod_pvt) {
		int more_to_do;
		RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
		if (more_to_do)
			goto again;
	} else
		info->ring.sring->rsp_event = i + 1;

	return IRQ_HANDLED;
}
Ejemplo n.º 4
0
static int write_block(FILE *p, blkif_sector_t sector, size_t amt)
{
  static uint64_t next_reqid = 1;
  blkif_response_t *rsp;
  blkif_request_t *req;
  int notify, work_to_do;
  uint64_t reqid;
  RING_IDX i;

  /* wait until we can write something */
  while(RING_FULL(&p->ring)) runtime_block(1);

  /* write out the request */
  i = p->ring.req_prod_pvt++;
  req = RING_GET_REQUEST(&p->ring, i);
  memset(req, 0, sizeof(blkif_request_t));
  req->operation         = BLKIF_OP_WRITE;
  req->nr_segments       = 1;
  req->handle            = p->disk_handle;
  req->id                = reqid = next_reqid++;
  req->sector_number     = sector;
  req->seg[0].gref       = p->block_grant;
  req->seg[0].first_sect = 0;
  req->seg[0].last_sect  = (amt - 1) / 512;
  wmb();
  RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&p->ring, notify);
  if(notify) channel_send(p->chan);

  /* wait for it to be satisfied */
  do {
    while(!RING_HAS_UNCONSUMED_RESPONSES(&p->ring))
      runtime_block(1);
    i = p->ring.rsp_cons++;
    rsp = RING_GET_RESPONSE(&p->ring, i);
  } while(rsp->id != reqid);

  /* was it successful? */
  if(rsp->status != BLKIF_RSP_OKAY) {
    printf("PROFILING: Block write failed!\n");
    return 0;
  }

  /* we do writes one at a time, synchronously, so work_to_do should always
     be false */
  RING_FINAL_CHECK_FOR_RESPONSES(&p->ring, work_to_do);
  assert(!work_to_do);

  return 1;
}
Ejemplo n.º 5
0
static int scsifront_ring_drain(struct vscsifrnt_info *info)
{
	struct vscsiif_response *ring_rsp;
	RING_IDX i, rp;
	int more_to_do = 0;

	rp = info->ring.sring->rsp_prod;
	rmb();	/* ordering required respective to dom0 */
	for (i = info->ring.rsp_cons; i != rp; i++) {
		ring_rsp = RING_GET_RESPONSE(&info->ring, i);
		scsifront_do_response(info, ring_rsp);
	}

	info->ring.rsp_cons = i;

	if (i != info->ring.req_prod_pvt)
		RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
	else
		info->ring.sring->rsp_event = i + 1;

	return more_to_do;
}
Ejemplo n.º 6
0
VOID
ReceiverHandleNotification(
    IN  PRECEIVER Receiver
)
/*++
Routine Description:

    Interrupt handler for receive processing
    Put the received packets into an array and call NdisMIndicateReceivePacket
    If we run low on RFDs, allocate another one

Arguments:

    Adapter     Pointer to our adapter

Return Value:

    None
    
--*/
{
    PADAPTER        Adapter = Receiver->Common.Adapter;
    RING_IDX        prod;
    int             more_work;
    PNDIS_PACKET    PacketArray[XENNET_DEF_RFDS];
    NDIS_STATUS     PacketStatus[XENNET_DEF_RFDS];
    UINT            PacketCount;

    if (!RING_HAS_UNCONSUMED_RESPONSES(&Receiver->Common.Ring))
        return;

    NdisDprAcquireSpinLock(&Receiver->Common.Lock);
    if (Receiver->Common.Adapter->media_disconnect) {
        NdisDprReleaseSpinLock(&Receiver->Common.Lock);
        return;
    }

    if (__RING_IDX_DIFFERENCE(Receiver->Common.Ring.req_prod_pvt,
                              Receiver->Common.Ring.sring->rsp_prod) >
        NET_RX_RING_SIZE)
        TraceWarning(("Strange: rsp_prod ahead of req_prod (%d vs %d (s %d))\n",
                      Receiver->Common.Ring.sring->rsp_prod,
                      Receiver->Common.Ring.req_prod_pvt,
                      Receiver->Common.Ring.sring->req_prod));

    PacketCount = 0;

 top:
    prod = Receiver->Common.Ring.sring->rsp_prod;
    XsMemoryBarrier();
    while (!RING_IDXS_EQ(Receiver->Common.Ring.rsp_cons, prod)) {
        PNDIS_PACKET packet;
        ULONG totFrags;
        NDIS_STATUS status;

        status = ReceiverReceivePacket(Receiver, &packet, &totFrags);
        if (status != NDIS_STATUS_SUCCESS)
            continue;

        TraceProfile(("%s(%s, %p)\n", __FUNCTION__, Adapter->XenbusPrefix, packet));

        // See http://msdn.microsoft.com/en-us/library/ms797610.aspx
        if (Receiver->LowResources == 2 ||
            (Receiver->LowResources == 1 && totFrags > 1)) {
            status = NDIS_STATUS_RESOURCES;
            NDIS_SET_PACKET_STATUS(packet, status);
        }

        PacketArray[PacketCount] = packet;
        PacketStatus[PacketCount] = status;
        PacketCount++;

        if (PacketCount == XENNET_DEF_RFDS) {
            ULONG Index;

            Receiver->Common.Frames += PacketCount;
            Receiver->nRxInNdis += PacketCount;

            if (Receiver->nRxInNdis >= Receiver->nRxInNdisMax)
                Receiver->nRxInNdisMax = Receiver->nRxInNdis;

            NdisDprReleaseSpinLock(&Receiver->Common.Lock);
            NdisMIndicateReceivePacket(
                Receiver->Common.Adapter->AdapterHandle,
                PacketArray,
                PacketCount);
            NdisDprAcquireSpinLock(&Receiver->Common.Lock);

            for (Index = 0; Index < PacketCount; Index++) {
                if (PacketStatus[Index] == NDIS_STATUS_RESOURCES) {
                    ReceiverReleasePacket(Receiver, PacketArray[Index]);
                    Receiver->nRxInNdis--;
                } else {
                   XM_ASSERT(PacketStatus[Index] == NDIS_STATUS_SUCCESS);
                }
            }
            PacketCount = 0;

            ReceiverSwizzle(Receiver);
        }
    }
    RING_FINAL_CHECK_FOR_RESPONSES(&Receiver->Common.Ring, more_work);
    if (more_work)
        goto top;

    if (PacketCount != 0) {
        ULONG Index;

        Receiver->Common.Frames += PacketCount;
        Receiver->nRxInNdis += PacketCount;

        if (Receiver->nRxInNdis >= Receiver->nRxInNdisMax)
            Receiver->nRxInNdisMax = Receiver->nRxInNdis;

        NdisDprReleaseSpinLock(&Receiver->Common.Lock);
        NdisMIndicateReceivePacket(
            Receiver->Common.Adapter->AdapterHandle,
            PacketArray,
            PacketCount);
        NdisDprAcquireSpinLock(&Receiver->Common.Lock);

        for (Index = 0; Index < PacketCount; Index++) {
            if (PacketStatus[Index] == NDIS_STATUS_RESOURCES) {
                ReceiverReleasePacket(Receiver, PacketArray[Index]);
                Receiver->nRxInNdis--;
            } else {
                XM_ASSERT(PacketStatus[Index] == NDIS_STATUS_SUCCESS);
            }
        }
        PacketCount = 0;
    }

    // Swizzle unconditionally to make sure we replenish the ring even if
    // nothing was passed to NDIS.
    ReceiverSwizzle(Receiver);

    NdisDprReleaseSpinLock(&Receiver->Common.Lock);
    /* XXX Should maybe adjust size of packet pool from here. */
}
Ejemplo n.º 7
0
void network_rx(struct netfront_dev *dev)
{
    RING_IDX rp,cons,req_prod;
    struct netif_rx_response *rx;
    int nr_consumed, some, more, i, notify;


moretodo:
    rp = dev->rx.sring->rsp_prod;
    rmb(); /* Ensure we see queued responses up to 'rp'. */
    cons = dev->rx.rsp_cons;

    nr_consumed = 0;
    some = 0;
    while ((cons != rp) && !some)
    {
        struct net_buffer* buf;
        unsigned char* page;
        int id;

        rx = RING_GET_RESPONSE(&dev->rx, cons);

        if (rx->flags & NETRXF_extra_info)
        {
            printk("+++++++++++++++++++++ we have extras!\n");
            continue;
        }


        if (rx->status == NETIF_RSP_NULL) continue;

        id = rx->id;
        BUG_ON(id >= NET_TX_RING_SIZE);

        buf = &dev->rx_buffers[id];
        page = (unsigned char*)buf->page;
        gnttab_end_access(buf->gref);

        if(rx->status>0)
        {
#ifdef HAVE_LIBC
	    if (dev->netif_rx == NETIF_SELECT_RX) {
		int len = rx->status;
		ASSERT(current == main_thread);
		if (len > dev->len)
		    len = dev->len;
		memcpy(dev->data, page+rx->offset, len);
		dev->rlen = len;
		some = 1;
	    } else
#endif
		dev->netif_rx(page+rx->offset,rx->status);
        }

        nr_consumed++;

        ++cons;
    }
    dev->rx.rsp_cons=cons;

    RING_FINAL_CHECK_FOR_RESPONSES(&dev->rx,more);
    if(more && !some) goto moretodo;

    req_prod = dev->rx.req_prod_pvt;

    for(i=0; i<nr_consumed; i++)
    {
        int id = xennet_rxidx(req_prod + i);
        netif_rx_request_t *req = RING_GET_REQUEST(&dev->rx, req_prod + i);
        struct net_buffer* buf = &dev->rx_buffers[id];
        void* page = buf->page;

        /* We are sure to have free gnttab entries since they got released above */
        buf->gref = req->gref = 
            gnttab_grant_access(dev->dom,virt_to_mfn(page),0);

        req->id = id;
    }

    wmb();

    dev->rx.req_prod_pvt = req_prod + i;
    
    RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->rx, notify);
    if (notify)
        notify_remote_via_evtchn(dev->evtchn);

}
Ejemplo n.º 8
0
int blkfront_aio_poll(struct blkfront_dev *dev)
{
    RING_IDX rp, cons;
    struct blkif_response *rsp;
    int more;
    int nr_consumed;

moretodo:
#ifdef HAVE_LIBC
    if (dev->fd != -1) {
        files[dev->fd].read = 0;
        mb(); /* Make sure to let the handler set read to 1 before we start looking at the ring */
    }
#endif

    rp = dev->ring.sring->rsp_prod;
    rmb(); /* Ensure we see queued responses up to 'rp'. */
    cons = dev->ring.rsp_cons;

    nr_consumed = 0;
    while ((cons != rp))
    {
        struct blkfront_aiocb *aiocbp;
        int status;

	rsp = RING_GET_RESPONSE(&dev->ring, cons);
	nr_consumed++;

        aiocbp = (void*) (uintptr_t) rsp->id;
        status = rsp->status;

        switch (rsp->operation) {
        case BLKIF_OP_READ:
        case BLKIF_OP_WRITE:
        {
            int j;

            if (status != BLKIF_RSP_OKAY)
                printk("%s error %d on %s at offset %llu, num bytes %llu\n",
                        rsp->operation == BLKIF_OP_READ?"read":"write",
                        status, aiocbp->aio_dev->nodename,
                        (unsigned long long) aiocbp->aio_offset,
                        (unsigned long long) aiocbp->aio_nbytes);

            for (j = 0; j < aiocbp->n; j++)
                gnttab_end_access(aiocbp->gref[j]);

            break;
        }

        case BLKIF_OP_WRITE_BARRIER:
            if (status != BLKIF_RSP_OKAY)
                printk("write barrier error %d\n", status);
            break;
        case BLKIF_OP_FLUSH_DISKCACHE:
            if (status != BLKIF_RSP_OKAY)
                printk("flush error %d\n", status);
            break;

        default:
            printk("unrecognized block operation %d response (status %d)\n", rsp->operation, status);
            break;
        }

        dev->ring.rsp_cons = ++cons;
        /* Nota: callback frees aiocbp itself */
        if (aiocbp && aiocbp->aio_cb)
            aiocbp->aio_cb(aiocbp, status ? -EIO : 0);
        if (dev->ring.rsp_cons != cons)
            /* We reentered, we must not continue here */
            break;
    }

    RING_FINAL_CHECK_FOR_RESPONSES(&dev->ring, more);
    if (more) goto moretodo;

    return nr_consumed;
}
Ejemplo n.º 9
0
void network_rx(struct netfront_dev *dev)
{
    RING_IDX rp,cons,req_prod;
    struct netif_rx_response *rx;
    int nr_consumed, some, more, i, notify;

#ifdef CONFIG_NETMAP
    if (dev->netmap) {
        netmap_netfront_rx(dev);
        return;
    }
#endif
moretodo:
    rp = dev->rx.sring->rsp_prod;
    rmb(); /* Ensure we see queued responses up to 'rp'. */
    cons = dev->rx.rsp_cons;

    for (nr_consumed = 0, some = 0;
	 (cons != rp);
         nr_consumed++, cons++)
    {
        struct net_buffer* buf;
        unsigned char* page;
        int id;

        rx = RING_GET_RESPONSE(&dev->rx, cons);

        if (rx->flags & NETRXF_extra_info)
        {
            printk("+++++++++++++++++++++ we have extras!\n");
            continue;
        }


        if (rx->status == NETIF_RSP_NULL) continue;

        id = rx->id;
        BUG_ON(id >= NET_TX_RING_SIZE);

        buf = &dev->rx_buffers[id];
        page = (unsigned char*)buf->page;

        if(rx->status>0)
        {
#ifdef HAVE_LIBC
	    if (dev->netif_rx == NETIF_SELECT_RX) {
		int len = rx->status;
		ASSERT(current == main_thread);
		if (len > dev->len)
		    len = dev->len;
		memcpy(dev->data, page+rx->offset, len);
		dev->rlen = len;
	    } else
#endif
		dev->netif_rx(page+rx->offset,rx->status, dev->netif_rx_arg);
		some = 1;
        }
    }
    dev->rx.rsp_cons=cons;

    RING_FINAL_CHECK_FOR_RESPONSES(&dev->rx,more);
    if(more && !some) goto moretodo;
    req_prod = dev->rx.req_prod_pvt;

    for(i=0; i<nr_consumed; i++)
    {
        int id = xennet_rxidx(req_prod + i);
        netif_rx_request_t *req = RING_GET_REQUEST(&dev->rx, req_prod + i);
        struct net_buffer* buf = &dev->rx_buffers[id];

	req->gref = buf->gref;
        req->id = id;
    }

    wmb();

    dev->rx.req_prod_pvt = req_prod + i;
    
    RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->rx, notify);
    if (notify)
        notify_remote_via_evtchn(dev->evtchn);

}
Ejemplo n.º 10
0
static VOID
XenUsb_HandleEventDpc(PKDPC dpc, PVOID context, PVOID arg1, PVOID arg2) {
  NTSTATUS status;
  PXENUSB_DEVICE_DATA xudd = context;
  RING_IDX prod, cons;
  usbif_urb_response_t *urb_rsp;
  usbif_conn_response_t *conn_rsp;
  usbif_conn_request_t *conn_req;
  int more_to_do;
  pvurb_t *pvurb, *complete_head = NULL, *complete_tail = NULL;
  partial_pvurb_t *partial_pvurb;
  BOOLEAN port_changed = FALSE;

  UNREFERENCED_PARAMETER(dpc);
  UNREFERENCED_PARAMETER(arg1);
  UNREFERENCED_PARAMETER(arg2);

  FUNCTION_ENTER();

  more_to_do = TRUE;
  KeAcquireSpinLockAtDpcLevel(&xudd->urb_ring_lock);
  while (more_to_do)
  {
    prod = xudd->urb_ring.sring->rsp_prod;
    KeMemoryBarrier();
    for (cons = xudd->urb_ring.rsp_cons; cons != prod; cons++)
    {
      urb_rsp = RING_GET_RESPONSE(&xudd->urb_ring, cons);
//      FUNCTION_MSG("urb_rsp->id = %d\n", urb_rsp->id);
      partial_pvurb = xudd->partial_pvurbs[urb_rsp->id];
      RemoveEntryList(&partial_pvurb->entry);
      partial_pvurb->rsp = *urb_rsp;
//      FUNCTION_MSG("shadow = %p\n", shadow);
//      FUNCTION_MSG("shadow->rsp = %p\n", shadow->rsp);
      if (usbif_pipeunlink(partial_pvurb->req.pipe)) {
        FUNCTION_MSG("is a cancel request for request %p\n", partial_pvurb->pvurb->request);
        FUNCTION_MSG("urb_ring rsp status = %d\n", urb_rsp->status);
        // status should be 115 == EINPROGRESS
      } else {
        partial_pvurb->pvurb->total_length += urb_rsp->actual_length;
        if (!partial_pvurb->pvurb->rsp.status)
          partial_pvurb->pvurb->rsp.status = urb_rsp->status;
        partial_pvurb->pvurb->rsp.error_count += urb_rsp->error_count;;
        if (partial_pvurb->mdl) {
          int i;
          for (i = 0; i < partial_pvurb->req.nr_buffer_segs; i++) {
            XnEndAccess(xudd->handle, partial_pvurb->req.seg[i].gref, FALSE, (ULONG)'XUSB');
          }
        }

        FUNCTION_MSG("urb_ring rsp id = %d\n", partial_pvurb->rsp.id);
        FUNCTION_MSG("urb_ring rsp start_frame = %d\n", partial_pvurb->rsp.start_frame);
        FUNCTION_MSG("urb_ring rsp status = %d\n", partial_pvurb->rsp.status);
        FUNCTION_MSG("urb_ring rsp actual_length = %d\n", partial_pvurb->rsp.actual_length);
        FUNCTION_MSG("urb_ring rsp error_count = %d\n", partial_pvurb->rsp.error_count);
      }
      if (partial_pvurb->other_partial_pvurb) {
        if (!partial_pvurb->other_partial_pvurb->on_ring) {
          /* cancel hasn't been put on the ring yet - remove it */
          RemoveEntryList(&partial_pvurb->other_partial_pvurb->entry);
          ASSERT(usbif_pipeunlink(partial_pvurb->other_partial_pvurb->req.pipe));
          partial_pvurb->pvurb->ref--;
          ExFreePoolWithTag(partial_pvurb->other_partial_pvurb, XENUSB_POOL_TAG);
        }
      }
      partial_pvurb->pvurb->ref--;
      switch (partial_pvurb->rsp.status) {
      case EINPROGRESS: /* unlink request */
      case ECONNRESET:  /* cancelled request */
        ASSERT(partial_pvurb->pvurb->status == STATUS_CANCELLED);
        break;
      default:
        break;
      }
      put_id_on_freelist(xudd->req_id_ss, partial_pvurb->rsp.id);
      partial_pvurb->pvurb->next = NULL;
      if (!partial_pvurb->pvurb->ref) {
        if (complete_tail) {
          complete_tail->next = partial_pvurb->pvurb;
        } else {
          complete_head = partial_pvurb->pvurb;
        }
        complete_tail = partial_pvurb->pvurb;
      }
    }

    xudd->urb_ring.rsp_cons = cons;
    if (cons != xudd->urb_ring.req_prod_pvt) {
      RING_FINAL_CHECK_FOR_RESPONSES(&xudd->urb_ring, more_to_do);
    } else {
      xudd->urb_ring.sring->rsp_event = cons + 1;
      more_to_do = FALSE;
    }
  }
  PutRequestsOnRing(xudd);
  KeReleaseSpinLockFromDpcLevel(&xudd->urb_ring_lock);

  pvurb = complete_head;
  while (pvurb != NULL) {
    complete_head = pvurb->next;
    status = WdfRequestUnmarkCancelable(pvurb->request);
    if (status == STATUS_CANCELLED) {
      FUNCTION_MSG("Cancel was called\n");
    }
    
    WdfRequestCompleteWithInformation(pvurb->request, pvurb->status, pvurb->total_length); /* the WDFREQUEST is always successfull here even if the pvurb->rsp has an error */
    pvurb = complete_head;
  }

  more_to_do = TRUE;
  KeAcquireSpinLockAtDpcLevel(&xudd->conn_ring_lock);
  while (more_to_do)
  {
    prod = xudd->conn_ring.sring->rsp_prod;
    KeMemoryBarrier();
    for (cons = xudd->conn_ring.rsp_cons; cons != prod; cons++)
    {
      USHORT old_port_status;
      conn_rsp = RING_GET_RESPONSE(&xudd->conn_ring, cons);
      FUNCTION_MSG("conn_rsp->portnum = %d\n", conn_rsp->portnum);
      FUNCTION_MSG("conn_rsp->speed = %d\n", conn_rsp->speed);
      
      old_port_status = xudd->ports[conn_rsp->portnum - 1].port_status;
      xudd->ports[conn_rsp->portnum - 1].port_type = conn_rsp->speed;
      xudd->ports[conn_rsp->portnum - 1].port_status &= ~((1 << PORT_LOW_SPEED) | (1 << PORT_HIGH_SPEED) | (1 << PORT_CONNECTION));
      switch (conn_rsp->speed)
      {
      case USB_PORT_TYPE_NOT_CONNECTED:
        xudd->ports[conn_rsp->portnum - 1].port_status &= ~(1 << PORT_ENABLE);
        break;
      case USB_PORT_TYPE_LOW_SPEED:
        xudd->ports[conn_rsp->portnum - 1].port_status |= (1 << PORT_LOW_SPEED) | (1 << PORT_CONNECTION);
        break;
      case USB_PORT_TYPE_FULL_SPEED:
        xudd->ports[conn_rsp->portnum - 1].port_status |= (1 << PORT_CONNECTION);
        break;
      case USB_PORT_TYPE_HIGH_SPEED:
        xudd->ports[conn_rsp->portnum - 1].port_status |= (1 << PORT_HIGH_SPEED) | (1 << PORT_CONNECTION);
        break;
      }      
      xudd->ports[conn_rsp->portnum - 1].port_change |= (xudd->ports[conn_rsp->portnum - 1].port_status ^ old_port_status) & ((1 << PORT_ENABLE) | (1 << PORT_CONNECTION));
      if (xudd->ports[conn_rsp->portnum - 1].port_change)
        port_changed = TRUE;
      conn_req = RING_GET_REQUEST(&xudd->conn_ring, xudd->conn_ring.req_prod_pvt);
      conn_req->id = conn_rsp->id;
      xudd->conn_ring.req_prod_pvt++;
    }

    xudd->conn_ring.rsp_cons = cons;
    if (cons != xudd->conn_ring.req_prod_pvt)
    {
      RING_FINAL_CHECK_FOR_RESPONSES(&xudd->conn_ring, more_to_do);
    }
    else
    {
      xudd->conn_ring.sring->rsp_event = cons + 1;
      more_to_do = FALSE;
    }
  }
  KeReleaseSpinLockFromDpcLevel(&xudd->conn_ring_lock);

  if (port_changed) {
    PXENUSB_PDO_DEVICE_DATA xupdd = GetXupdd(xudd->root_hub_device);
    XenUsbHub_ProcessHubInterruptEvent(xupdd->usb_device->configs[0]->interfaces[0]->endpoints[0]);
  }
      
  FUNCTION_EXIT();

  return;
}
Ejemplo n.º 11
0
static BOOLEAN
XenScsi_HwScsiInterrupt(PVOID DeviceExtension)
{
  PXENSCSI_DEVICE_DATA xsdd = DeviceExtension;
  PSCSI_REQUEST_BLOCK Srb;
  RING_IDX i, rp;
  int j;
  vscsiif_response_t *rep;
  int more_to_do = TRUE;
  vscsiif_shadow_t *shadow;
  BOOLEAN last_interrupt = FALSE;

  XenScsi_CheckNewDevice(DeviceExtension);

  if (!dump_mode && !xsdd->vectors.EvtChn_AckEvent(xsdd->vectors.context, xsdd->event_channel, &last_interrupt))
  {
    return FALSE;
  }

  //FUNCTION_ENTER();
  
  while (more_to_do)
  {
    rp = xsdd->ring.sring->rsp_prod;
    KeMemoryBarrier();
    for (i = xsdd->ring.rsp_cons; i != rp; i++)
    {
      rep = RING_GET_RESPONSE(&xsdd->ring, i);
      shadow = &xsdd->shadows[rep->rqid];
      Srb = shadow->Srb;
      Srb->ScsiStatus = (UCHAR)rep->rslt;
      memset(Srb->SenseInfoBuffer, 0, Srb->SenseInfoBufferLength);
      if (rep->sense_len > 0 && Srb->SenseInfoBuffer != NULL)
      {
        memcpy(Srb->SenseInfoBuffer, rep->sense_buffer, min(Srb->SenseInfoBufferLength, rep->sense_len));
      }
      switch(rep->rslt)
      {
      case 0:
        //KdPrint((__DRIVER_NAME "     Xen Operation complete - result = 0x%08x, sense_len = %d, residual = %d\n", rep->rslt, rep->sense_len, rep->residual_len));
        Srb->SrbStatus = SRB_STATUS_SUCCESS;
        if (Srb->Cdb[0] == 0x03)
        {
          KdPrint((__DRIVER_NAME "     REQUEST_SENSE DataTransferLength = %d, residual = %d\n", Srb->DataTransferLength, rep->residual_len));
          //for (j = 0; j < Srb->DataTransferLength - rep->residual_len; j++)
          //  KdPrint((__DRIVER_NAME "     sense %02x: %02x\n", j, (ULONG)((PUCHAR)Srb->DataBuffer)[j]));
        }
        break;
      case 0x00010000: /* Device does not exist */
        KdPrint((__DRIVER_NAME "     Xen Operation error - cdb[0] = %02x, result = 0x%08x, sense_len = %d, residual = %d\n", (ULONG)Srb->Cdb[0], rep->rslt, rep->sense_len, rep->residual_len));
        Srb->SrbStatus = SRB_STATUS_NO_DEVICE;
        break;
      default:
        KdPrint((__DRIVER_NAME "     Xen Operation error - cdb[0] = %02x, result = 0x%08x, sense_len = %d, residual = %d\n", (ULONG)Srb->Cdb[0], rep->rslt, rep->sense_len, rep->residual_len));
        Srb->SrbStatus = SRB_STATUS_ERROR;

        //for (j = 0; j < Srb->SenseInfoBufferLength; j++)
        //  KdPrint((__DRIVER_NAME "     sense %02x: %02x\n", j, (ULONG)((PUCHAR)Srb->SenseInfoBuffer)[j]));

        if (rep->sense_len > 0 && !(Srb->SrbFlags & SRB_FLAGS_DISABLE_AUTOSENSE) && Srb->SenseInfoBuffer != NULL)
        {
          KdPrint((__DRIVER_NAME "     Doing autosense\n"));
          Srb->SrbStatus |= SRB_STATUS_AUTOSENSE_VALID;
        }
        else if (Srb->SrbFlags & SRB_FLAGS_DISABLE_AUTOSENSE)
        {
          PXENSCSI_LU_DATA lud = ScsiPortGetLogicalUnit(DeviceExtension, Srb->PathId, Srb->TargetId, Srb->Lun);
          KdPrint((__DRIVER_NAME "     Autosense disabled\n"));
          if (lud != NULL)
          {
            KdPrint((__DRIVER_NAME "     Saving sense data\n"));
            lud->sense_len = rep->sense_len;
            memcpy(lud->sense_buffer, Srb->SenseInfoBuffer, lud->sense_len);
          }
        }
      }

      /* work around a bug in scsiback that gives an incorrect result to REPORT_LUNS - fail it if the output is only 8 bytes */
      if (Srb->Cdb[0] == 0xa0 && Srb->SrbStatus == SRB_STATUS_SUCCESS &&
        Srb->DataTransferLength - rep->residual_len == 8)
      {
        /* SRB_STATUS_ERROR appears to be sufficient here - no need to worry about sense data or anything */
        KdPrint((__DRIVER_NAME "     Worked around bad REPORT_LUNS emulation for %d:%d:%d\n",
          Srb->PathId, Srb->TargetId, Srb->Lun));
        Srb->SrbStatus = SRB_STATUS_ERROR;
      }
      //remaining = Srb->DataTransferLength;
      for (j = 0; j < shadow->req.nr_segments; j++)
      {
        xsdd->vectors.GntTbl_EndAccess(xsdd->vectors.context, shadow->req.seg[j].gref, TRUE, (ULONG)'SCSI');
        put_grant_on_freelist(xsdd, shadow->req.seg[j].gref);
        shadow->req.seg[j].gref = 0;
      }

      if (Srb->SrbStatus == SRB_STATUS_SUCCESS && rep->residual_len)
      {
//        KdPrint((__DRIVER_NAME "     SRB_STATUS_DATA_OVERRUN DataTransferLength = %d, adjusted = %d\n",
//          Srb->DataTransferLength, Srb->DataTransferLength - rep->residual_len));
        Srb->DataTransferLength -= rep->residual_len;
        Srb->SrbStatus = SRB_STATUS_DATA_OVERRUN;
      }

      put_shadow_on_freelist(xsdd, shadow);
      ScsiPortNotification(RequestComplete, xsdd, Srb);
      if (!xsdd->scsiport_paused)
        ScsiPortNotification(NextRequest, DeviceExtension);
    }

    xsdd->ring.rsp_cons = i;
    if (i != xsdd->ring.req_prod_pvt)
    {
      RING_FINAL_CHECK_FOR_RESPONSES(&xsdd->ring, more_to_do);
    }
    else
    {
      xsdd->ring.sring->rsp_event = i + 1;
      more_to_do = FALSE;
    }
  }

  //FUNCTION_EXIT();
  
  return last_interrupt;
}
Ejemplo n.º 12
0
int blkfront_aio_poll(struct blkfront_dev *dev)
{
    RING_IDX rp, cons;
    struct blkif_response *rsp;
    int more;
    int nr_consumed;

moretodo:

    rp = dev->ring.sring->rsp_prod;
    rmb(); /* Ensure we see queued responses up to 'rp'. */
    cons = dev->ring.rsp_cons;

    nr_consumed = 0;
    while ((cons != rp))
    {
        struct blkfront_aiocb *aiocbp;
        int status;

	rsp = RING_GET_RESPONSE(&dev->ring, cons);
	nr_consumed++;

        aiocbp = (void*) (uintptr_t) rsp->id;
        status = rsp->status;

        if (status != BLKIF_RSP_OKAY)
            minios_printk("block error %d for op %d\n", status, rsp->operation);

        switch (rsp->operation) {
        case BLKIF_OP_READ:
        case BLKIF_OP_WRITE:
        {
            int j;

            for (j = 0; j < aiocbp->n; j++)
                gnttab_end_access(aiocbp->gref[j]);

            break;
        }

        case BLKIF_OP_WRITE_BARRIER:
        case BLKIF_OP_FLUSH_DISKCACHE:
            break;

        default:
            minios_printk("unrecognized block operation %d response\n", rsp->operation);
        }

        dev->ring.rsp_cons = ++cons;
        /* Nota: callback frees aiocbp itself */
        if (aiocbp && aiocbp->aio_cb)
            aiocbp->aio_cb(aiocbp, status ? -EIO : 0);
        if (dev->ring.rsp_cons != cons)
            /* We reentered, we must not continue here */
            break;
    }

    RING_FINAL_CHECK_FOR_RESPONSES(&dev->ring, more);
    if (more) goto moretodo;

    return nr_consumed;
}
Ejemplo n.º 13
0
Archivo: netfe.c Proyecto: bkearns/ling
static void netfe_int(uint32_t port, void *data)
{
	netfe_t *fe = (netfe_t *)data;
	assert(fe != 0);

	netfe_tx_buf_gc(fe);

	// A reponse may have NETRXF_more_data flag set. Such responses are buffered
	// instead of passing it to upper layer immediately.
	//
	static uint8_t chained_data_buffer[CHAINED_DATA_SIZE];
	static int chained_data_offset = 0;		// buffer is empty

	RING_IDX prod, cons;

try_harder:
	prod = fe->rx_ring.sring->rsp_prod;
	rmb();	// magic
	cons = fe->rx_ring.rsp_cons;

	while (cons != prod)
	{
		netif_rx_response_t *rsp = RING_GET_RESPONSE(&fe->rx_ring, cons);
		//assert(rsp->id == (cons & (NR_RX_BUFFERS -1)));
		assert(rsp->status > 0);
		//assert(rsp->offset == 0);
		assert((rsp->flags & NETRXF_extra_info) == 0);

		uint8_t *data = fe->rx_buffers[rsp->id];
		int data_len = rsp->status;

		if (chained_data_offset > 0 || (rsp->flags & NETRXF_more_data))
		{
			assert(chained_data_offset +data_len <= CHAINED_DATA_SIZE);
			memcpy(chained_data_buffer +chained_data_offset, data, data_len);
			chained_data_offset += data_len;
		}

		if ((rsp->flags & NETRXF_more_data) == 0)
		{
			if (chained_data_offset > 0)
			{
				netfe_incoming(fe, chained_data_buffer, chained_data_offset);
				chained_data_offset = 0;
			}
			else
				netfe_incoming(fe, data, data_len);
		}

		cons++;
	}
	fe->rx_ring.rsp_cons = cons;

	int more;
	RING_FINAL_CHECK_FOR_RESPONSES(&fe->rx_ring, more);
	if (more)
		goto try_harder;
	
	int add_reqs = EXT_RX_BUFFERS - (fe->rx_ring.req_prod_pvt -fe->rx_ring.rsp_cons);
	//assert(add_reqs >= 0);

	RING_IDX req_prod = fe->rx_ring.req_prod_pvt;
	for (int i = 0; i < add_reqs; i++)
	{
		netif_rx_request_t *req = RING_GET_REQUEST(&fe->rx_ring, req_prod +i);
		req->id = (req_prod +i) & (NR_RX_BUFFERS -1); 
		req->gref = fe->rx_buf_refs[req->id];
	}

	wmb();	// dark
	fe->rx_ring.req_prod_pvt = req_prod +add_reqs;

	int notify;
	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&fe->rx_ring, notify);
	if (notify)
		event_kick(fe->evtchn);
}