示例#1
0
void blkfront_sync(struct blkfront_dev *dev)
{
    unsigned long flags;
    DEFINE_WAIT(w);

    if (dev->info.mode == O_RDWR) {
        if (dev->info.barrier == 1)
            blkfront_push_operation(dev, BLKIF_OP_WRITE_BARRIER, 0);

        if (dev->info.flush == 1)
            blkfront_push_operation(dev, BLKIF_OP_FLUSH_DISKCACHE, 0);
    }

    /* Note: This won't finish if another thread enqueues requests.  */
    local_irq_save(flags);
    while (1) {
	blkfront_aio_poll(dev);
	if (RING_FREE_REQUESTS(&dev->ring) == RING_SIZE(&dev->ring))
	    break;

	add_waiter(w, blkfront_queue);
	local_irq_restore(flags);
	schedule();
	local_irq_save(flags);
    }
    remove_waiter(w, blkfront_queue);
    local_irq_restore(flags);
}
示例#2
0
 T readFF(GlobalAddress<FullEmpty<T>> fe_addr) {
   if (fe_addr.core() == mycore()) {
     DVLOG(2) << "local";
     return fe_addr.pointer()->readFF();
   }
   
   FullEmpty<T> result;
   auto result_addr = make_global(&result);
   
   send_message(fe_addr.core(), [fe_addr,result_addr]{
     auto& fe = *fe_addr.pointer();
     
     if (fe.full()) {
       // DVLOG(2) << "no need to block";
       fill_remote(result_addr, fe.readFF());
       return;
     }
     
     DVLOG(2) << "setting up to block (" << fe_addr << ")";
     auto* c = SuspendedDelegate::create([&fe,result_addr]{
       VLOG(0) << "suspended_delegate!";
       fill_remote(result_addr, fe.readFF());
     });
     add_waiter(&fe, c);
   });
   
   return result.readFF();
 }
示例#3
0
bool
EventSync::wait( UInt32 msTimeout ) const  // nofail
{
  EventWaiter waiter;
  add_waiter(&waiter);

  // check the state
  if (!get_state()) {
    pthread_mutex_lock(&waiter.lock);
    struct timespec timeout;
    struct timeval tv;
    gettimeofday(&tv, NULL);
    // convert ms to s and ns
    UInt32 s = msTimeout / 1000;
    msTimeout = msTimeout % 1000;
    UInt32 ns = msTimeout * 1000000;
    // convert timeval to timespec
    timeout.tv_nsec = tv.tv_usec * 1000;
    timeout.tv_sec = tv.tv_sec;

    // add the time
    timeout.tv_nsec += (suseconds_t)ns;
    timeout.tv_sec += (time_t)s;
    // shift the nsec to sec if overflow
    if (timeout.tv_nsec > 1000000000) {
      timeout.tv_sec ++;
      timeout.tv_nsec -= 1000000000;
    }
    pthread_cond_timedwait(&waiter.condvar, &waiter.lock, &timeout);
    pthread_mutex_unlock(&waiter.lock);
  }
  remove_waiter(&waiter);
  return get_state();
}
示例#4
0
文件: fbfront.c 项目: farewellkou/xen
static void fbfront_out_event(struct fbfront_dev *dev, union xenfb_out_event *event)
{
    struct xenfb_page *page = dev->page;
    uint32_t prod;
    DEFINE_WAIT(w);

    add_waiter(w, fbfront_queue);
    while (page->out_prod - page->out_cons == XENFB_OUT_RING_LEN)
        schedule();
    remove_waiter(w, fbfront_queue);

    prod = page->out_prod;
    mb(); /* ensure ring space available */
    XENFB_OUT_RING_REF(page, prod) = *event;
    wmb(); /* ensure ring contents visible */
    page->out_prod = prod + 1;
    notify_remote_via_evtchn(dev->evtchn);
}
示例#5
0
static void blkfront_wait_slot(struct blkfront_dev *dev)
{
    /* Wait for a slot */
    if (RING_FULL(&dev->ring)) {
	unsigned long flags;
	DEFINE_WAIT(w);
	local_irq_save(flags);
	while (1) {
	    blkfront_aio_poll(dev);
	    if (!RING_FULL(&dev->ring))
		break;
	    /* Really no slot, go to sleep. */
	    add_waiter(w, blkfront_queue);
	    local_irq_restore(flags);
	    schedule();
	    local_irq_save(flags);
	}
	remove_waiter(w, blkfront_queue);
	local_irq_restore(flags);
    }
}
示例#6
0
static void
biothread(void *arg)
{
	DEFINE_WAIT(w);
	int i, flags, did;

	/* for the bio callback */
	rumpuser__hyp.hyp_schedule();
	rumpuser__hyp.hyp_lwproc_newlwp(0);
	rumpuser__hyp.hyp_unschedule();

	for (;;) {
		rumpuser_mutex_enter_nowrap(bio_mtx);
		while (bio_outstanding_total == 0) {
			rumpuser_cv_wait_nowrap(bio_cv, bio_mtx);
		}
		rumpuser_mutex_exit(bio_mtx);

		/*
		 * if we made any progress, recheck.  could be batched,
		 * but since currently locks are free here ... meh
		 */
		local_irq_save(flags);
		for (did = 0;;) {
			for (i = 0; i < NBLKDEV; i++) {
				if (blkdev_outstanding[i])
					did += blkfront_aio_poll(blkdevs[i]);
			}
			if (did)
				break;
			add_waiter(w, blkfront_queue);
			local_irq_restore(flags);
			schedule();
			local_irq_save(flags);
		}
		local_irq_restore(flags);
	}
}
示例#7
0
/* Send a mesasge to xenbus, in the same fashion as xb_write, and
   block waiting for a reply.  The reply is malloced and should be
   freed by the caller. */
struct xsd_sockmsg *
xenbus_msg_reply(int type,
		 xenbus_transaction_t trans,
		 struct write_req *io,
		 int nr_reqs)
{
    int id;
    DEFINE_WAIT(w);
    struct xsd_sockmsg *rep;

    id = allocate_xenbus_id();
    add_waiter(w, req_info[id].waitq);

    xb_write(type, id, trans, io, nr_reqs);

    schedule();
    remove_waiter(w, req_info[id].waitq);
    wake(current);

    rep = req_info[id].reply;
    BUG_ON(rep->req_id != id);
    release_xenbus_id(id);
    return rep;
}
示例#8
0
void blkfront_io(struct blkfront_aiocb *aiocbp, int write)
{
    unsigned long flags;
    DEFINE_WAIT(w);

    ASSERT(!aiocbp->aio_cb);
    aiocbp->aio_cb = blkfront_aio_cb;
    blkfront_aio(aiocbp, write);
    aiocbp->data = NULL;

    local_irq_save(flags);
    while (1) {
	blkfront_aio_poll(aiocbp->aio_dev);
	if (aiocbp->data)
	    break;

	add_waiter(w, blkfront_queue);
	local_irq_restore(flags);
	schedule();
	local_irq_save(flags);
    }
    remove_waiter(w, blkfront_queue);
    local_irq_restore(flags);
}
示例#9
0
文件: sys.c 项目: jonludlam/xen-arm
/* Just poll without blocking */
static int select_poll(int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds)
{
    int i, n = 0;
#ifdef HAVE_LWIP
    int sock_n = 0, sock_nfds = 0;
    fd_set sock_readfds, sock_writefds, sock_exceptfds;
    struct timeval timeout = { .tv_sec = 0, .tv_usec = 0};
#endif

#ifdef LIBC_VERBOSE
    static int nb;
    static int nbread[NOFILE], nbwrite[NOFILE], nbexcept[NOFILE];
    static s_time_t lastshown;

    nb++;
#endif

#ifdef HAVE_LWIP
    /* first poll network */
    FD_ZERO(&sock_readfds);
    FD_ZERO(&sock_writefds);
    FD_ZERO(&sock_exceptfds);
    for (i = 0; i < nfds; i++) {
	if (files[i].type == FTYPE_SOCKET) {
	    if (FD_ISSET(i, readfds)) {
		FD_SET(files[i].socket.fd, &sock_readfds);
		sock_nfds = i+1;
	    }
	    if (FD_ISSET(i, writefds)) {
		FD_SET(files[i].socket.fd, &sock_writefds);
		sock_nfds = i+1;
	    }
	    if (FD_ISSET(i, exceptfds)) {
		FD_SET(files[i].socket.fd, &sock_exceptfds);
		sock_nfds = i+1;
	    }
	}
    }
    if (sock_nfds > 0) {
        DEBUG("lwip_select(");
        dump_set(nfds, &sock_readfds, &sock_writefds, &sock_exceptfds, &timeout);
        DEBUG("); -> ");
        sock_n = lwip_select(sock_nfds, &sock_readfds, &sock_writefds, &sock_exceptfds, &timeout);
        dump_set(nfds, &sock_readfds, &sock_writefds, &sock_exceptfds, &timeout);
        DEBUG("\n");
    }
#endif

    /* Then see others as well. */
    for (i = 0; i < nfds; i++) {
	switch(files[i].type) {
	default:
	    if (FD_ISSET(i, readfds) || FD_ISSET(i, writefds) || FD_ISSET(i, exceptfds))
		printk("bogus fd %d in select\n", i);
	    /* Fallthrough.  */
	case FTYPE_CONSOLE:
	    if (FD_ISSET(i, readfds)) {
                if (xencons_ring_avail(files[i].cons.dev))
		    n++;
		else
		    FD_CLR(i, readfds);
            }
	    if (FD_ISSET(i, writefds))
                n++;
	    FD_CLR(i, exceptfds);
	    break;
#ifdef CONFIG_XENBUS
	case FTYPE_XENBUS:
	    if (FD_ISSET(i, readfds)) {
                if (files[i].xenbus.events)
		    n++;
		else
		    FD_CLR(i, readfds);
	    }
	    FD_CLR(i, writefds);
	    FD_CLR(i, exceptfds);
	    break;
#endif
	case FTYPE_EVTCHN:
	case FTYPE_TAP:
	case FTYPE_BLK:
	case FTYPE_KBD:
	case FTYPE_FB:
	    if (FD_ISSET(i, readfds)) {
		if (files[i].read)
		    n++;
		else
		    FD_CLR(i, readfds);
	    }
	    FD_CLR(i, writefds);
	    FD_CLR(i, exceptfds);
	    break;
#ifdef HAVE_LWIP
	case FTYPE_SOCKET:
	    if (FD_ISSET(i, readfds)) {
	        /* Optimize no-network-packet case.  */
		if (sock_n && FD_ISSET(files[i].socket.fd, &sock_readfds))
		    n++;
		else
		    FD_CLR(i, readfds);
	    }
            if (FD_ISSET(i, writefds)) {
		if (sock_n && FD_ISSET(files[i].socket.fd, &sock_writefds))
		    n++;
		else
		    FD_CLR(i, writefds);
            }
            if (FD_ISSET(i, exceptfds)) {
		if (sock_n && FD_ISSET(files[i].socket.fd, &sock_exceptfds))
		    n++;
		else
		    FD_CLR(i, exceptfds);
            }
	    break;
#endif
	}
#ifdef LIBC_VERBOSE
	if (FD_ISSET(i, readfds))
	    nbread[i]++;
	if (FD_ISSET(i, writefds))
	    nbwrite[i]++;
	if (FD_ISSET(i, exceptfds))
	    nbexcept[i]++;
#endif
    }
#ifdef LIBC_VERBOSE
    if (NOW() > lastshown + 1000000000ull) {
	lastshown = NOW();
	printk("%lu MB free, ", num_free_pages() / ((1 << 20) / PAGE_SIZE));
	printk("%d(%d): ", nb, sock_n);
	for (i = 0; i < nfds; i++) {
	    if (nbread[i] || nbwrite[i] || nbexcept[i])
		printk(" %d(%c):", i, file_types[files[i].type]);
	    if (nbread[i])
	    	printk(" %dR", nbread[i]);
	    if (nbwrite[i])
		printk(" %dW", nbwrite[i]);
	    if (nbexcept[i])
		printk(" %dE", nbexcept[i]);
	}
	printk("\n");
	memset(nbread, 0, sizeof(nbread));
	memset(nbwrite, 0, sizeof(nbwrite));
	memset(nbexcept, 0, sizeof(nbexcept));
	nb = 0;
    }
#endif
    return n;
}

/* The strategy is to
 * - announce that we will maybe sleep
 * - poll a bit ; if successful, return
 * - if timeout, return
 * - really sleep (except if somebody woke us in the meanwhile) */
int select(int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds,
	struct timeval *timeout)
{
    int n, ret;
    fd_set myread, mywrite, myexcept;
    struct thread *thread = get_current();
    s_time_t start = NOW(), stop;
#ifdef CONFIG_NETFRONT
    DEFINE_WAIT(netfront_w);
#endif
    DEFINE_WAIT(event_w);
#ifdef CONFIG_BLKFRONT
    DEFINE_WAIT(blkfront_w);
#endif
#ifdef CONFIG_XENBUS
    DEFINE_WAIT(xenbus_watch_w);
#endif
#ifdef CONFIG_KBDFRONT
    DEFINE_WAIT(kbdfront_w);
#endif
    DEFINE_WAIT(console_w);

    assert(thread == main_thread);

    DEBUG("select(%d, ", nfds);
    dump_set(nfds, readfds, writefds, exceptfds, timeout);
    DEBUG(");\n");

    if (timeout)
	stop = start + SECONDS(timeout->tv_sec) + timeout->tv_usec * 1000;
    else
	/* just make gcc happy */
	stop = start;

    /* Tell people we're going to sleep before looking at what they are
     * saying, hence letting them wake us if events happen between here and
     * schedule() */
#ifdef CONFIG_NETFRONT
    add_waiter(netfront_w, netfront_queue);
#endif
    add_waiter(event_w, event_queue);
#ifdef CONFIG_BLKFRONT
    add_waiter(blkfront_w, blkfront_queue);
#endif
#ifdef CONFIG_XENBUS
    add_waiter(xenbus_watch_w, xenbus_watch_queue);
#endif
#ifdef CONFIG_KBDFRONT
    add_waiter(kbdfront_w, kbdfront_queue);
#endif
    add_waiter(console_w, console_queue);

    if (readfds)
        myread = *readfds;
    else
        FD_ZERO(&myread);
    if (writefds)
        mywrite = *writefds;
    else
        FD_ZERO(&mywrite);
    if (exceptfds)
        myexcept = *exceptfds;
    else
        FD_ZERO(&myexcept);

    DEBUG("polling ");
    dump_set(nfds, &myread, &mywrite, &myexcept, timeout);
    DEBUG("\n");
    n = select_poll(nfds, &myread, &mywrite, &myexcept);

    if (n) {
	dump_set(nfds, readfds, writefds, exceptfds, timeout);
	if (readfds)
	    *readfds = myread;
	if (writefds)
	    *writefds = mywrite;
	if (exceptfds)
	    *exceptfds = myexcept;
	DEBUG(" -> ");
	dump_set(nfds, readfds, writefds, exceptfds, timeout);
	DEBUG("\n");
	wake(thread);
	ret = n;
	goto out;
    }
    if (timeout && NOW() >= stop) {
	if (readfds)
	    FD_ZERO(readfds);
	if (writefds)
	    FD_ZERO(writefds);
	if (exceptfds)
	    FD_ZERO(exceptfds);
	timeout->tv_sec = 0;
	timeout->tv_usec = 0;
	wake(thread);
	ret = 0;
	goto out;
    }

    if (timeout)
	thread->wakeup_time = stop;
    schedule();

    if (readfds)
        myread = *readfds;
    else
        FD_ZERO(&myread);
    if (writefds)
        mywrite = *writefds;
    else
        FD_ZERO(&mywrite);
    if (exceptfds)
        myexcept = *exceptfds;
    else
        FD_ZERO(&myexcept);

    n = select_poll(nfds, &myread, &mywrite, &myexcept);

    if (n) {
	if (readfds)
	    *readfds = myread;
	if (writefds)
	    *writefds = mywrite;
	if (exceptfds)
	    *exceptfds = myexcept;
	ret = n;
	goto out;
    }
    errno = EINTR;
    ret = -1;

out:
#ifdef CONFIG_NETFRONT
    remove_waiter(netfront_w, netfront_queue);
#endif
    remove_waiter(event_w, event_queue);
#ifdef CONFIG_BLKFRONT
    remove_waiter(blkfront_w, blkfront_queue);
#endif
#ifdef CONFIG_XENBUS
    remove_waiter(xenbus_watch_w, xenbus_watch_queue);
#endif
#ifdef CONFIG_KBDFRONT
    remove_waiter(kbdfront_w, kbdfront_queue);
#endif
    remove_waiter(console_w, console_queue);
    return ret;
}
示例#10
0
文件: sys.c 项目: jonludlam/xen-arm
int read(int fd, void *buf, size_t nbytes)
{
    switch (files[fd].type) {
        case FTYPE_SAVEFILE:
	case FTYPE_CONSOLE: {
	    int ret;
            DEFINE_WAIT(w);
            while(1) {
                add_waiter(w, console_queue);
                ret = xencons_ring_recv(files[fd].cons.dev, buf, nbytes);
                if (ret)
                    break;
                schedule();
            }
            remove_waiter(w, console_queue);
            return ret;
        }
#ifdef HAVE_LWIP
	case FTYPE_SOCKET:
	    return lwip_read(files[fd].socket.fd, buf, nbytes);
#endif
#ifdef CONFIG_NETFRONT
	case FTYPE_TAP: {
	    ssize_t ret;
	    ret = netfront_receive(files[fd].tap.dev, buf, nbytes);
	    if (ret <= 0) {
		errno = EAGAIN;
		return -1;
	    }
	    return ret;
	}
#endif
#ifdef CONFIG_KBDFRONT
        case FTYPE_KBD: {
            int ret, n;
            n = nbytes / sizeof(union xenkbd_in_event);
            ret = kbdfront_receive(files[fd].kbd.dev, buf, n);
	    if (ret <= 0) {
		errno = EAGAIN;
		return -1;
	    }
	    return ret * sizeof(union xenkbd_in_event);
        }
#endif
#ifdef CONFIG_FBFRONT
        case FTYPE_FB: {
            int ret, n;
            n = nbytes / sizeof(union xenfb_in_event);
            ret = fbfront_receive(files[fd].fb.dev, buf, n);
	    if (ret <= 0) {
		errno = EAGAIN;
		return -1;
	    }
	    return ret * sizeof(union xenfb_in_event);
        }
#endif
#ifdef CONFIG_BLKFRONT
        case FTYPE_BLK: {
	    return blkfront_posix_read(fd, buf, nbytes);
        }
#endif
#ifdef CONFIG_TPMFRONT
        case FTYPE_TPMFRONT: {
	    return tpmfront_posix_read(fd, buf, nbytes);
        }
#endif
#ifdef CONFIG_TPM_TIS
        case FTYPE_TPM_TIS: {
	    return tpm_tis_posix_read(fd, buf, nbytes);
        }
#endif
	default:
	    break;
    }
    printk("read(%d): Bad descriptor\n", fd);
    errno = EBADF;
    return -1;
}
示例#11
0
文件: netfront.c 项目: cnplab/mini-os
/**
 * Transmit function for pbufs which can handle checksum and segmentation offloading for TCPv4 and TCPv6
 */
err_t netfront_xmit_pbuf(struct netfront_dev *dev, struct pbuf *p, int co_type, int push)
{
	struct netif_tx_request *first_tx;
	struct netif_extra_info *gso;
	int slots;
	int used = 0;
#ifdef CONFIG_NETFRONT_GSO
	int sego;
#endif /* CONFIG_NETFRONT_GSO */
#ifdef CONFIG_NETFRONT_WAITFORTX
	unsigned long flags;
	DEFINE_WAIT(w);
#endif /* CONFIG_NETFRONT_WAITFORTX */

	/* Counts how many slots we require for this buf */
	slots = netfront_count_pbuf_slots(dev, p);
#ifdef CONFIG_NETFRONT_GSO
#if TCP_GSO /* GSO flag is only available if lwIP is built with GSO support */
	sego = (p->flags & PBUF_FLAG_GSO) ? 1 : 0;
#else
	sego = 0;
#endif
	/* GSO requires checksum offloading set */
	BUG_ON(sego && !(co_type & (XEN_NETIF_GSO_TYPE_TCPV4 | XEN_NETIF_GSO_TYPE_TCPV6)));
#endif /* CONFIG_NETFRONT_GSO */

	/* Checks if there are enough requests for this many slots (gso requires one slot more) */
#ifdef CONFIG_NETFRONT_GSO
	BUG_ON(!netfront_tx_possible(dev, slots + sego));
#else
	BUG_ON(!netfront_tx_possible(dev, slots));
#endif /* CONFIG_NETFRONT_GSO */

#ifdef CONFIG_NETFRONT_WAITFORTX
	local_irq_save(flags);
#endif /* CONFIG_NETFRONT_WAITFORTX */
#ifdef CONFIG_NETFRONT_GSO
	if (unlikely(!netfront_tx_available(dev, slots + sego))) {
#else
	if (unlikely(!netfront_tx_available(dev, slots))) {
#endif /* CONFIG_NETFRONT_GSO */
		netfront_xmit_push(dev);
#ifdef CONFIG_NETFRONT_WAITFORTX
 try_again:
#ifdef CONFIG_NETFRONT_GSO
		if (!netfront_tx_available(dev, slots + sego)) {
#else
		if (!netfront_tx_available(dev, slots)) {
#endif /* CONFIG_NETFRONT_GSO */
#ifndef CONFIG_NETFRONT_WAITFORTX_BUSYLOOP
			add_waiter(w, netfront_txqueue); /* release thread until space is free'd */
			local_irq_restore(flags);
			schedule();
			local_irq_save(flags);
#endif /* !CONFIG_NETFRONT_WAITFORTX_BUSYLOOP */
			netfront_tx_buf_gc(dev);
			goto try_again;
		}
		remove_waiter(w, netfront_txqueue); /* release thread until space is free'd */
#else
		return ERR_MEM;
#endif /* CONFIG_NETFRONT_WAITFORTX */
	}
#ifdef CONFIG_NETFRONT_WAITFORTX
	local_irq_restore(flags);
#endif /* CONFIG_NETFRONT_WAITFORTX */

	/* Set extras if packet is GSO kind */
	first_tx = netfront_get_page(dev);
	ASSERT(first_tx != NULL);
#if defined CONFIG_NETFRONT_GSO && TCP_GSO
	if (sego) {
		gso = (struct netif_extra_info *) RING_GET_REQUEST(&dev->tx, dev->tx.req_prod_pvt++);

		first_tx->flags |= NETTXF_extra_info;
		gso->u.gso.size = p->gso_size; /* segmentation size */
		gso->u.gso.type = co_type; /* XEN_NETIF_GSO_TYPE_TCPV4, XEN_NETIF_GSO_TYPE_TCPV6 */
		gso->u.gso.pad = 0;
		gso->u.gso.features = 0;

		gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
		gso->flags = 0;

		used++;
	}
#endif /* CONFIG_NETFRONT_GSO */

	/* Make TX requests for the pbuf */
#ifdef CONFIG_NETFRONT_PERSISTENT_GRANTS
	netfront_make_txreqs_pgnt(dev, first_tx, p, &used);
#else
	netfront_make_txreqs(dev, first_tx, p, &used);
#endif
	ASSERT(slots >= used); /* we should have taken at most the number slots that we estimated before */
	ASSERT(slots <= XEN_NETIF_NR_SLOTS_MIN); /* we should never take more slots than the backend supports */

	/* partially checksummed (offload enabled), or checksummed */
	first_tx->flags |= co_type ? ((NETTXF_csum_blank) | (NETTXF_data_validated)) : (NETTXF_data_validated);

	push |= (((dev)->tx.req_prod_pvt - (dev)->tx.rsp_cons) <= NET_TX_RING_SIZE / 2);
	if (push)
		netfront_xmit_push(dev);

#ifdef CONFIG_NETFRONT_STATS
	++dev->txpkts;
	dev->txbytes += p->tot_len;
#endif
	dprintk("tx: %c%c%c %u bytes (%u slots)\n", sego ? 'S' : '-', co_type ? 'C' : '-', push ? 'P' : '-', p->tot_len, slots);
	return ERR_OK;
}

void netfront_xmit_push(struct netfront_dev *dev)
{
	int flags;

	netfront_xmit_notify(dev);

	/* Collects any outstanding responses for more requests */
	local_irq_save(flags);
	netfront_tx_buf_gc(dev);
	local_irq_restore(flags);
}

void netfront_set_rx_pbuf_handler(struct netfront_dev *dev,
				  void (*thenetif_rx)(struct pbuf *p, void *arg),
				  void *arg)
{
	if (dev->netif_rx_pbuf && dev->netif_rx_pbuf != netif_rx_pbuf)
		printk("Replacing netif_rx_pbuf handler for dev %s\n", dev->nodename);

	dev->netif_rx = NULL;
	dev->netif_rx_pbuf = thenetif_rx;
	dev->netif_rx_arg = arg;
}
#endif

static void free_netfront(struct netfront_dev *dev)
{
	int i;
	int separate_tx_rx_irq = (dev->tx_evtchn != dev->rx_evtchn);

	free(dev->mac);
	free(dev->backend);

#ifdef CONFIG_NETMAP
	if (dev->netmap)
		return;
#endif

	for(i=0; i<NET_TX_RING_SIZE; i++)
		down(&dev->tx_sem);

	mask_evtchn(dev->tx_evtchn);
	if (separate_tx_rx_irq)
		mask_evtchn(dev->rx_evtchn);

	gnttab_end_access(dev->rx_ring_ref);
	gnttab_end_access(dev->tx_ring_ref);

	free_page(dev->rx.sring);
	free_page(dev->tx.sring);

	unbind_evtchn(dev->tx_evtchn);
	if (separate_tx_rx_irq)
		unbind_evtchn(dev->rx_evtchn);

#ifdef CONFIG_NETFRONT_PERSISTENT_GRANTS
	for(i=0; i<NET_RX_RING_SIZE; i++) {
		if (dev->rx_buffers[i].page) {
			gnttab_end_access(dev->rx_buffers[i].gref);
			free_page(dev->rx_buffers[i].page);
		}
	}
#else
	for(i=0; i<NET_RX_BUFFERS; i++) {
		if (dev->rx_buffer_pool[i].page) {
			if (dev->rx_buffer_pool[i].gref != GRANT_INVALID_REF)
				gnttab_end_access(dev->rx_buffer_pool[i].gref);
			free_page(dev->rx_buffer_pool[i].page);
		}
	}
#endif

#if defined CONFIG_NETFRONT_PERSISTENT_GRANTS || !defined CONFIG_NETFRONT_LWIP_ONLY
	for(i=0; i<NET_TX_RING_SIZE; i++) {
		if (dev->tx_buffers[i].page) {
#ifndef CONFIG_NETFRONT_PERSISTENT_GRANTS
			if (dev->tx_buffers[i].gref != GRANT_INVALID_REF)
#endif
			gnttab_end_access(dev->tx_buffers[i].gref);
			free_page(dev->tx_buffers[i].page);
		}
	}
#endif
}
示例#12
0
/* The strategy is to
 * - announce that we will maybe sleep
 * - poll a bit ; if successful, return
 * - if timeout, return
 * - really sleep (except if somebody woke us in the meanwhile) */
int select(int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds,
	struct timeval *timeout)
{
    int n, ret;
    fd_set myread, mywrite, myexcept;
    struct thread *thread = get_current();
    s_time_t start = NOW(), stop;
    DEFINE_WAIT(w1);
    DEFINE_WAIT(w2);
    DEFINE_WAIT(w3);
    DEFINE_WAIT(w4);
    DEFINE_WAIT(w5);
    DEFINE_WAIT(w6);
    int n_sockets;
    int n_non_sockets;

    assert(thread == main_thread);

    n_sockets = 0;
    n_non_sockets = 0;
    for (n = 0; n <= nfds; n++) {
	if ((readfds && FD_ISSET(n, readfds)) ||
	    (writefds && FD_ISSET(n, writefds)) ||
	    (exceptfds && FD_ISSET(n, exceptfds))) {
	    if (files[n].type == FTYPE_SOCKET)
		n_sockets++;
	    else
		n_non_sockets++;
	}
    }
    if (n_sockets != 0 && n_non_sockets != 0) {
	static int cntr;
	if (cntr < 1000) {
	    printk("WARNING: select combining socket and non-socket FDs (n = %d vs %d); warning %d/1000\n",
		   n_sockets, n_non_sockets, cntr);
	    cntr++;
	}
    }
    if (n_non_sockets == 0)
	return do_lwip_select(nfds, readfds, writefds, exceptfds, timeout);

    if (timeout)
	stop = start + SECONDS(timeout->tv_sec) + timeout->tv_usec * 1000;
    else
	/* just make gcc happy */
	stop = start;

    /* Tell people we're going to sleep before looking at what they are
     * saying, hence letting them wake us if events happen between here and
     * schedule() */
    add_waiter(w1, netfront_queue);
    add_waiter(w2, event_queue);
    add_waiter(w3, blkfront_queue);
    add_waiter(w4, xenbus_watch_queue);
    add_waiter(w5, kbdfront_queue);
    add_waiter(w6, console_queue);

    if (readfds)
        myread = *readfds;
    else
        FD_ZERO(&myread);
    if (writefds)
        mywrite = *writefds;
    else
        FD_ZERO(&mywrite);
    if (exceptfds)
        myexcept = *exceptfds;
    else
        FD_ZERO(&myexcept);

    DEBUG("polling ");
    dump_set(nfds, &myread, &mywrite, &myexcept, timeout);
    DEBUG("\n");
    n = select_poll(nfds, &myread, &mywrite, &myexcept);

    if (n) {
	dump_set(nfds, readfds, writefds, exceptfds, timeout);
	if (readfds)
	    *readfds = myread;
	if (writefds)
	    *writefds = mywrite;
	if (exceptfds)
	    *exceptfds = myexcept;
	DEBUG(" -> ");
	dump_set(nfds, readfds, writefds, exceptfds, timeout);
	DEBUG("\n");
	wake(thread);
	ret = n;
	goto out;
    }
    if (timeout && NOW() >= stop) {
	if (readfds)
	    FD_ZERO(readfds);
	if (writefds)
	    FD_ZERO(writefds);
	if (exceptfds)
	    FD_ZERO(exceptfds);
	timeout->tv_sec = 0;
	timeout->tv_usec = 0;
	wake(thread);
	ret = 0;
	goto out;
    }

    if (timeout)
	thread->wakeup_time = stop;
    schedule();

    if (readfds)
        myread = *readfds;
    else
        FD_ZERO(&myread);
    if (writefds)
        mywrite = *writefds;
    else
        FD_ZERO(&mywrite);
    if (exceptfds)
        myexcept = *exceptfds;
    else
        FD_ZERO(&myexcept);

    n = select_poll(nfds, &myread, &mywrite, &myexcept);

    if (n) {
	if (readfds)
	    *readfds = myread;
	if (writefds)
	    *writefds = mywrite;
	if (exceptfds)
	    *exceptfds = myexcept;
	ret = n;
	goto out;
    }
    errno = EINTR;
    ret = -1;

out:
    remove_waiter(w1);
    remove_waiter(w2);
    remove_waiter(w3);
    remove_waiter(w4);
    remove_waiter(w5);
    remove_waiter(w6);
    return ret;
}
示例#13
0
int read(int fd, void *buf, size_t nbytes)
{
    if (fd < 0 || fd >= NOFILE) {
	fd = EBADF;
	return -1;
    }
    switch (files[fd].type) {
        case FTYPE_SAVEFILE:
	case FTYPE_CONSOLE: {
	    int ret;
            DEFINE_WAIT(w);
            while(1) {
                add_waiter(w, console_queue);
                ret = xencons_ring_recv(files[fd].cons.dev, buf, nbytes);
                if (ret)
                    break;
                schedule();
            }
            remove_waiter(w);
            return ret;
        }
#ifdef HAVE_LWIP
	case FTYPE_SOCKET:
	    return lwip_read(files[fd].socket.fd, buf, nbytes);
#endif
	case FTYPE_TAP: {
	    ssize_t ret;
	    ret = netfront_receive(files[fd].tap.dev, buf, nbytes);
	    if (ret <= 0) {
		errno = EAGAIN;
		return -1;
	    }
	    return ret;
	}
        case FTYPE_KBD: {
            int ret, n;
            n = nbytes / sizeof(union xenkbd_in_event);
            ret = kbdfront_receive(files[fd].kbd.dev, buf, n);
	    if (ret <= 0) {
		errno = EAGAIN;
		return -1;
	    }
	    return ret * sizeof(union xenkbd_in_event);
        }
        case FTYPE_FB: {
            int ret, n;
            n = nbytes / sizeof(union xenfb_in_event);
            ret = fbfront_receive(files[fd].fb.dev, buf, n);
	    if (ret <= 0) {
		errno = EAGAIN;
		return -1;
	    }
	    return ret * sizeof(union xenfb_in_event);
        }
	case FTYPE_COMPILED_FILE: {
	    int n;
	    if (files[fd].compiled_file.offset >= files[fd].compiled_file.size)
		    n = 0;
	    else
		    n = files[fd].compiled_file.size - files[fd].compiled_file.offset;
	    if (n >= nbytes)
		    n = nbytes;
	    printf("Request %d on %d, get %d\n", nbytes, fd, n);
	    memcpy(buf, files[fd].compiled_file.content + files[fd].compiled_file.offset, n);
	    files[fd].compiled_file.offset += n;
	    return n;
	}
	default:
	    break;
    }
    printk("read(%d): Bad descriptor\n", fd);
    errno = EBADF;
    return -1;
}