예제 #1
0
void xencons_suspend(struct consfront_dev *dev)
{
    if (!dev) {
        mask_evtchn(start_info.console.domU.evtchn);
    } else {
        mask_evtchn(dev->evtchn);
    }
}
예제 #2
0
static int evtchn_release(struct inode *inode, struct file *filp)
{
	int i;
	struct per_user_data *u = filp->private_data;
	struct evtchn_close close;

	spin_lock_irq(&port_user_lock);

	free_page((unsigned long)u->ring);

	for (i = 0; i < NR_EVENT_CHANNELS; i++) {
		int ret;
		if (port_user[i] != u)
			continue;

		port_user[i] = NULL;
		mask_evtchn(i);
		rebind_evtchn_to_cpu(i, 0);

		close.port = i;
		ret = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
		BUG_ON(ret);
	}

	spin_unlock_irq(&port_user_lock);

	kfree(u);

	return 0;
}
예제 #3
0
void evtchn_device_upcall(int port)
{
	struct per_user_data *u;

	spin_lock(&port_user_lock);

	mask_evtchn(port);
	clear_evtchn(port);

	if ((u = port_user[port]) != NULL) {
		if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) {
			u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port;
			wmb(); /* Ensure ring contents visible */
			if (u->ring_cons == u->ring_prod++) {
				wake_up_interruptible(&u->evtchn_wait);
				kill_fasync(&u->evtchn_async_queue,
					    SIGIO, POLL_IN);
			}
		} else {
			u->ring_overflow = 1;
		}
	}

	spin_unlock(&port_user_lock);
}
예제 #4
0
static void free_netfront(struct netfront_dev *dev)
{
    int i;

    free(dev->mac);
    free(dev->backend);
#ifdef CONFIG_NETMAP
	if (dev->netmap)
			return;
#endif
    for(i=0;i<NET_TX_RING_SIZE;i++)
	down(&dev->tx_sem);

    mask_evtchn(dev->evtchn);

    gnttab_end_access(dev->rx_ring_ref);
    gnttab_end_access(dev->tx_ring_ref);

    free_page(dev->rx.sring);
    free_page(dev->tx.sring);

    unbind_evtchn(dev->evtchn);

    for(i=0;i<NET_RX_RING_SIZE;i++) {
	gnttab_end_access(dev->rx_buffers[i].gref);
	free_page(dev->rx_buffers[i].page);
    }

    for(i=0;i<NET_TX_RING_SIZE;i++) {
	if (dev->tx_buffers[i].page) {
	    gnttab_end_access(dev->tx_buffers[i].gref);
	    free_page(dev->tx_buffers[i].page);
	}
    }
}
예제 #5
0
void unbind_all_ports(void)
{
    int i;
    int cpu = 0;
    shared_info_t *s = HYPERVISOR_shared_info;
    vcpu_info_t   *vcpu_info = &s->vcpu_info[cpu];
    int rc;

    for ( i = 0; i < NR_EVS; i++ )
    {
        if ( i == start_info.console.domU.evtchn ||
             i == start_info.store_evtchn)
            continue;

        if ( test_and_clear_bit(i, bound_ports) )
        {
            struct evtchn_close close;
            printk("port %d still bound!\n", i);
            mask_evtchn(i);
            close.port = i;
            rc = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
            if ( rc )
                printk("WARN: close_port %s failed rc=%d. ignored\n", i, rc);
            clear_evtchn(i);
        }
    }
    vcpu_info->evtchn_upcall_pending = 0;
    vcpu_info->evtchn_pending_sel = 0;
}
예제 #6
0
static void free_netfront(struct netfront_dev *dev)
{
    int i;

    for(i=0;i<NET_TX_RING_SIZE;i++)
	down(&dev->tx_sem);

    mask_evtchn(dev->evtchn);

    free(dev->mac);
    free(dev->backend);

    gnttab_end_access(dev->rx_ring_ref);
    gnttab_end_access(dev->tx_ring_ref);

    free_page(dev->rx.sring);
    free_page(dev->tx.sring);

    unbind_evtchn(dev->evtchn);

    for(i=0;i<NET_RX_RING_SIZE;i++) {
	gnttab_end_access(dev->rx_buffers[i].gref);
	free_page(dev->rx_buffers[i].page);
    }

    for(i=0;i<NET_TX_RING_SIZE;i++)
	if (dev->tx_buffers[i].page)
	    free_page(dev->tx_buffers[i].page);

    free(dev->nodename);
    free(dev);
}
예제 #7
0
파일: evtchn.c 프로젝트: zhoupeng/spice4xen
static void disable_dynirq(unsigned int irq)
{
	int evtchn = evtchn_from_irq(irq);

	if (VALID_EVTCHN(evtchn))
		mask_evtchn(evtchn);
}
예제 #8
0
파일: events.c 프로젝트: HackLinux/xen-4.5
void arch_unbind_ports(void)
{
    if(debug_port != -1)
    {
        mask_evtchn(debug_port);
        unbind_evtchn(debug_port);
    }
}
예제 #9
0
파일: evtchn.c 프로젝트: zhoupeng/spice4xen
static void ack_dynirq(unsigned int irq)
{
	int evtchn = evtchn_from_irq(irq);

	move_native_irq(irq);

	if (VALID_EVTCHN(evtchn)) {
		mask_evtchn(evtchn);
		clear_evtchn(evtchn);
	}
}
예제 #10
0
void unbind_evtchn(evtchn_port_t port)
{
    struct evtchn_close close;
    int rc;

    mask_evtchn(port);
    clear_evtchn(port);

    close.port = port;
    rc = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
    if ( rc )
        printk("WARN: close_port %s failed rc=%d. ignored\n", port, rc);
}
예제 #11
0
파일: fbfront.c 프로젝트: farewellkou/xen
static void free_kbdfront(struct kbdfront_dev *dev)
{
    mask_evtchn(dev->evtchn);

    free(dev->backend);

    free_page(dev->page);

    unbind_evtchn(dev->evtchn);

    free(dev->nodename);
    free(dev);
}
예제 #12
0
/*
 * Initially all events are without a handler and disabled
 */
void init_events(void)
{
    int i;

    /* initialize event handler */
    for ( i = 0; i < NR_EVS; i++ )
	{
        ev_actions[i].handler = default_handler;
        mask_evtchn(i);
    }

    arch_init_events();
}
예제 #13
0
static void free_blkfront(struct blkfront_dev *dev)
{
    mask_evtchn(dev->evtchn);

    free(dev->backend);

    gnttab_end_access(dev->ring_ref);
    free_page(dev->ring.sring);

    unbind_evtchn(dev->evtchn);

    free(dev->nodename);
    free(dev);
}
예제 #14
0
static void free_pcifront(struct pcifront_dev *dev)
{
    mask_evtchn(dev->evtchn);

    free(dev->backend);

    gnttab_end_access(dev->info_ref);
    free_page(dev->info);

    unbind_evtchn(dev->evtchn);

    free(dev->nodename);
    free(dev);
}
예제 #15
0
파일: xc_minios.c 프로젝트: amodj/Utopia
static void evtchn_handler(evtchn_port_t port, struct pt_regs *regs, void *data)
{
    int xce_handle = (intptr_t) data;
    int i;
    assert(files[xce_handle].type == FTYPE_EVTCHN);
    mask_evtchn(port);
    for (i= 0; i < MAX_EVTCHN_PORTS; i++)
	if (files[xce_handle].evtchn.ports[i].port == port)
	    break;
    if (i == MAX_EVTCHN_PORTS) {
	printk("Unknown port for handle %d\n", xce_handle);
	return;
    }
    files[xce_handle].evtchn.ports[i].pending = 1;
    files[xce_handle].read = 1;
    wake_up(&event_queue);
}
예제 #16
0
파일: tpmfront.c 프로젝트: dzan/xenOnArm
static int tpmfront_connect(struct tpmfront_dev* dev)
{
   char* err;
   /* Create shared page */
   dev->page = (vtpm_shared_page_t*) alloc_page();
   if(dev->page == NULL) {
      TPMFRONT_ERR("Unable to allocate page for shared memory\n");
      goto error;
   }
   memset(dev->page, 0, PAGE_SIZE);
   dev->ring_ref = gnttab_grant_access(dev->bedomid, virt_to_mfn(dev->page), 0);
   TPMFRONT_DEBUG("grant ref is %lu\n", (unsigned long) dev->ring_ref);

   /*Create event channel */
   if(evtchn_alloc_unbound(dev->bedomid, tpmfront_handler, dev, &dev->evtchn)) {
      TPMFRONT_ERR("Unable to allocate event channel\n");
      goto error_postmap;
   }
   unmask_evtchn(dev->evtchn);
   TPMFRONT_DEBUG("event channel is %lu\n", (unsigned long) dev->evtchn);

   /* Write the entries to xenstore */
   if(publish_xenbus(dev)) {
      goto error_postevtchn;
   }

   /* Change state to connected */
   dev->state = XenbusStateConnected;

   /* Tell the backend that we are ready */
   if((err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%u", dev->state))) {
      TPMFRONT_ERR("Unable to write to xenstore %s/state, value=%u", dev->nodename, XenbusStateConnected);
      free(err);
      goto error;
   }

   return 0;
error_postevtchn:
      mask_evtchn(dev->evtchn);
      unbind_evtchn(dev->evtchn);
error_postmap:
      gnttab_end_access(dev->ring_ref);
      free_page(dev->page);
error:
   return -1;
}
예제 #17
0
static void shutdown_pirq(unsigned int irq)
{
	struct evtchn_close close;
	int evtchn = evtchn_from_irq(irq);

	if (!VALID_EVTCHN(evtchn))
		return;

	mask_evtchn(evtchn);

	close.port = evtchn;
	if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
		BUG();

	bind_evtchn_to_cpu(evtchn, 0);
	evtchn_to_irq[evtchn] = -1;
	irq_info[irq] = IRQ_UNBOUND;
}
예제 #18
0
void unbind_evtchn(evtchn_port_t port) {
	struct evtchn_close close;
	int rc;

	if (ev_actions[port].handler == default_handler)
		printk("WARN: No handler for port %d when unbinding\n", port);
	mask_evtchn(port);
	clear_evtchn(port);

	ev_actions[port].handler = default_handler;
	wmb();
	ev_actions[port].data = NULL;
	clear_bit(port, bound_ports);

	close.port = port;
	rc = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
	if (rc)
		printk("WARN: close_port %s failed rc=%d. ignored\n", port, rc);

}
예제 #19
0
void 
evtchn_do_upcall(struct trapframe *frame) 
{
	unsigned long  l1, l2;
	unsigned int   l1i, l2i, port;
	int            irq, cpu;
	shared_info_t *s;
	vcpu_info_t   *vcpu_info;
	
	cpu = PCPU_GET(cpuid);
	s = HYPERVISOR_shared_info;
	vcpu_info = &s->vcpu_info[cpu];

	vcpu_info->evtchn_upcall_pending = 0;

	/* NB. No need for a barrier here -- XCHG is a barrier on x86. */
	l1 = xen_xchg(&vcpu_info->evtchn_pending_sel, 0);

	while (l1 != 0) {
		l1i = __ffs(l1);
		l1 &= ~(1 << l1i);
		
		while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
			l2i = __ffs(l2);

			port = (l1i * LONG_BIT) + l2i;
			if ((irq = evtchn_to_irq[port]) != -1) {
				struct intsrc *isrc = intr_lookup_source(irq);
				/* 
				 * ack 
				 */
				mask_evtchn(port);
				clear_evtchn(port); 

				intr_execute_handlers(isrc, frame);
			} else {
				evtchn_device_upcall(port);
			}
		}
	}
}
예제 #20
0
void __init xen_init_IRQ(void)
{
	int i;

	init_evtchn_cpu_bindings();

	/* No event channels are 'live' right now. */
	for (i = 0; i < NR_EVENT_CHANNELS; i++)
		mask_evtchn(i);

	/* No IRQ -> event-channel mappings. */
	for (i = 0; i < NR_IRQS; i++)
		irq_info[i] = IRQ_UNBOUND;

	/* Dynamic IRQ space is currently unbound. Zero the refcnts. */
	for (i = 0; i < NR_DYNIRQS; i++) {
		irq_bindcount[dynirq_to_irq(i)] = 0;

		irq_desc[dynirq_to_irq(i)].status  = IRQ_DISABLED;
		irq_desc[dynirq_to_irq(i)].action  = NULL;
		irq_desc[dynirq_to_irq(i)].depth   = 1;
		irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
	}

	/* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
	for (i = 0; i < NR_PIRQS; i++) {
		irq_bindcount[pirq_to_irq(i)] = 1;

#ifdef RTC_IRQ
		/* If not domain 0, force our RTC driver to fail its probe. */
		if ((i == RTC_IRQ) &&
		    !is_initial_xendomain())
			continue;
#endif

		irq_desc[pirq_to_irq(i)].status  = IRQ_DISABLED;
		irq_desc[pirq_to_irq(i)].action  = NULL;
		irq_desc[pirq_to_irq(i)].depth   = 1;
		irq_desc[pirq_to_irq(i)].handler = &pirq_type;
	}
}
예제 #21
0
void free_consfront(struct consfront_dev *dev)
{
    char* err = NULL;
    XenbusState state;

    char path[strlen(dev->backend) + 1 + 5 + 1];
    char nodename[strlen(dev->nodename) + 1 + 5 + 1];

    snprintf(path, sizeof(path), "%s/state", dev->backend);
    snprintf(nodename, sizeof(nodename), "%s/state", dev->nodename);

    if ((err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateClosing)) != NULL) {
        printk("free_consfront: error changing state to %d: %s\n",
                XenbusStateClosing, err);
        goto close;
    }
    state = xenbus_read_integer(path);
    while (err == NULL && state < XenbusStateClosing)
        err = xenbus_wait_for_state_change(path, &state, &dev->events);
    if (err) free(err);

    if ((err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateClosed)) != NULL) {
        printk("free_consfront: error changing state to %d: %s\n",
                XenbusStateClosed, err);
        goto close;
    }

close:
    if (err) free(err);
    xenbus_unwatch_path_token(XBT_NIL, path, path);

    mask_evtchn(dev->evtchn);
    unbind_evtchn(dev->evtchn);
    free(dev->backend);
    free(dev->nodename);

    gnttab_end_access(dev->ring_ref);

    free_page(dev->ring);
    free(dev);
}
예제 #22
0
void 
evtchn_device_upcall(int port)
{
	mtx_lock(&upcall_lock);

	mask_evtchn(port);
	clear_evtchn(port);

	if ( ring != NULL ) {
		if ( (ring_prod - ring_cons) < EVTCHN_RING_SIZE ) {
			ring[EVTCHN_RING_MASK(ring_prod)] = (uint16_t)port;
			if ( ring_cons == ring_prod++ ) {
				wakeup(evtchn_waddr);
			}
		}
		else {
			ring_overflow = 1;
		}
	}

	mtx_unlock(&upcall_lock);
}
예제 #23
0
static long evtchn_ioctl(struct file *file,
			 unsigned int cmd, unsigned long arg)
{
	int rc;
	struct per_user_data *u = file->private_data;
	void __user *uarg = (void __user *) arg;

	switch (cmd) {
	case IOCTL_EVTCHN_BIND_VIRQ: {
		struct ioctl_evtchn_bind_virq bind;
		struct evtchn_bind_virq bind_virq;

		rc = -EFAULT;
		if (copy_from_user(&bind, uarg, sizeof(bind)))
			break;

		bind_virq.virq = bind.virq;
		bind_virq.vcpu = 0;
		rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
						 &bind_virq);
		if (rc != 0)
			break;

		rc = bind_virq.port;
		evtchn_bind_to_user(u, rc);
		break;
	}

	case IOCTL_EVTCHN_BIND_INTERDOMAIN: {
		struct ioctl_evtchn_bind_interdomain bind;
		struct evtchn_bind_interdomain bind_interdomain;

		rc = -EFAULT;
		if (copy_from_user(&bind, uarg, sizeof(bind)))
			break;

		bind_interdomain.remote_dom  = bind.remote_domain;
		bind_interdomain.remote_port = bind.remote_port;
		rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
						 &bind_interdomain);
		if (rc != 0)
			break;

		rc = bind_interdomain.local_port;
		evtchn_bind_to_user(u, rc);
		break;
	}

	case IOCTL_EVTCHN_BIND_UNBOUND_PORT: {
		struct ioctl_evtchn_bind_unbound_port bind;
		struct evtchn_alloc_unbound alloc_unbound;

		rc = -EFAULT;
		if (copy_from_user(&bind, uarg, sizeof(bind)))
			break;

		alloc_unbound.dom        = DOMID_SELF;
		alloc_unbound.remote_dom = bind.remote_domain;
		rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
						 &alloc_unbound);
		if (rc != 0)
			break;

		rc = alloc_unbound.port;
		evtchn_bind_to_user(u, rc);
		break;
	}

	case IOCTL_EVTCHN_UNBIND: {
		struct ioctl_evtchn_unbind unbind;
		struct evtchn_close close;
		int ret;

		rc = -EFAULT;
		if (copy_from_user(&unbind, uarg, sizeof(unbind)))
			break;

		rc = -EINVAL;
		if (unbind.port >= NR_EVENT_CHANNELS)
			break;

		spin_lock_irq(&port_user_lock);
    
		rc = -ENOTCONN;
		if (port_user[unbind.port] != u) {
			spin_unlock_irq(&port_user_lock);
			break;
		}

		port_user[unbind.port] = NULL;
		mask_evtchn(unbind.port);
		rebind_evtchn_to_cpu(unbind.port, 0);

		spin_unlock_irq(&port_user_lock);

		close.port = unbind.port;
		ret = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
		BUG_ON(ret);

		rc = 0;
		break;
	}

	case IOCTL_EVTCHN_NOTIFY: {
		struct ioctl_evtchn_notify notify;

		rc = -EFAULT;
		if (copy_from_user(&notify, uarg, sizeof(notify)))
			break;

		if (notify.port >= NR_EVENT_CHANNELS) {
			rc = -EINVAL;
		} else if (port_user[notify.port] != u) {
			rc = -ENOTCONN;
		} else {
			notify_remote_via_evtchn(notify.port);
			rc = 0;
		}
		break;
	}

	case IOCTL_EVTCHN_RESET: {
		/* Initialise the ring to empty. Clear errors. */
		mutex_lock(&u->ring_cons_mutex);
		spin_lock_irq(&port_user_lock);
		u->ring_cons = u->ring_prod = u->ring_overflow = 0;
		spin_unlock_irq(&port_user_lock);
		mutex_unlock(&u->ring_cons_mutex);
		rc = 0;
		break;
	}

	default:
		rc = -ENOSYS;
		break;
	}

	return rc;
}
예제 #24
0
void irq_resume(void)
{
	struct evtchn_bind_virq bind_virq;
	struct evtchn_bind_ipi  bind_ipi;
	int cpu, pirq, virq, ipi, irq, evtchn;

	init_evtchn_cpu_bindings();

	/* New event-channel space is not 'live' yet. */
	for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
		mask_evtchn(evtchn);

	/* Check that no PIRQs are still bound. */
	for (pirq = 0; pirq < NR_PIRQS; pirq++)
		BUG_ON(irq_info[pirq_to_irq(pirq)] != IRQ_UNBOUND);

	/* Secondary CPUs must have no VIRQ or IPI bindings. */
	for (cpu = 1; cpu < NR_CPUS; cpu++) {
		for (virq = 0; virq < NR_VIRQS; virq++)
			BUG_ON(per_cpu(virq_to_irq, cpu)[virq] != -1);
		for (ipi = 0; ipi < NR_IPIS; ipi++)
			BUG_ON(per_cpu(ipi_to_irq, cpu)[ipi] != -1);
	}

	/* No IRQ <-> event-channel mappings. */
	for (irq = 0; irq < NR_IRQS; irq++)
		irq_info[irq] &= ~0xFFFF; /* zap event-channel binding */
	for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
		evtchn_to_irq[evtchn] = -1;

	/* Primary CPU: rebind VIRQs automatically. */
	for (virq = 0; virq < NR_VIRQS; virq++) {
		if ((irq = per_cpu(virq_to_irq, 0)[virq]) == -1)
			continue;

		BUG_ON(irq_info[irq] != mk_irq_info(IRQT_VIRQ, virq, 0));

		/* Get a new binding from Xen. */
		bind_virq.virq = virq;
		bind_virq.vcpu = 0;
		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
						&bind_virq) != 0)
			BUG();
		evtchn = bind_virq.port;

		/* Record the new mapping. */
		evtchn_to_irq[evtchn] = irq;
		irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);

		/* Ready for use. */
		unmask_evtchn(evtchn);
	}

	/* Primary CPU: rebind IPIs automatically. */
	for (ipi = 0; ipi < NR_IPIS; ipi++) {
		if ((irq = per_cpu(ipi_to_irq, 0)[ipi]) == -1)
			continue;

		BUG_ON(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));

		/* Get a new binding from Xen. */
		bind_ipi.vcpu = 0;
		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
						&bind_ipi) != 0)
			BUG();
		evtchn = bind_ipi.port;

		/* Record the new mapping. */
		evtchn_to_irq[evtchn] = irq;
		irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);

		/* Ready for use. */
		unmask_evtchn(evtchn);
	}
}
예제 #25
0
파일: tpmfront.c 프로젝트: dzan/xenOnArm
void shutdown_tpmfront(struct tpmfront_dev* dev)
{
   char* err;
   char path[512];
   if(dev == NULL) {
      return;
   }
   TPMFRONT_LOG("Shutting down tpmfront\n");
   /* disconnect */
   if(dev->state == XenbusStateConnected) {
      /* Tell backend we are closing */
      dev->state = XenbusStateClosing;
      if((err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%u", (unsigned int) dev->state))) {
	 TPMFRONT_ERR("Unable to write to %s, error was %s", dev->nodename, err);
	 free(err);
      }

      /* Clean up xenstore entries */
      snprintf(path, 512, "%s/event-channel", dev->nodename);
      if((err = xenbus_rm(XBT_NIL, path))) {
	 free(err);
      }
      snprintf(path, 512, "%s/ring-ref", dev->nodename);
      if((err = xenbus_rm(XBT_NIL, path))) {
	 free(err);
      }

      /* Tell backend we are closed */
      dev->state = XenbusStateClosed;
      if((err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%u", (unsigned int) dev->state))) {
	 TPMFRONT_ERR("Unable to write to %s, error was %s", dev->nodename, err);
	 free(err);
      }

      /* Wait for the backend to close and unmap shared pages, ignore any errors */
      wait_for_backend_state_changed(dev, XenbusStateClosed);

      /* Prepare for a later reopen (possibly by a kexec'd kernel) */
      dev->state = XenbusStateInitialising;
      if((err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%u", (unsigned int) dev->state))) {
	 TPMFRONT_ERR("Unable to write to %s, error was %s", dev->nodename, err);
	 free(err);
      }

      /* Close event channel and unmap shared page */
      mask_evtchn(dev->evtchn);
      unbind_evtchn(dev->evtchn);
      gnttab_end_access(dev->ring_ref);

      free_page(dev->page);
   }

   /* Cleanup memory usage */
   if(dev->respbuf) {
      free(dev->respbuf);
   }
   if(dev->bepath) {
      free(dev->bepath);
   }
   if(dev->nodename) {
      free(dev->nodename);
   }
   free(dev);
}
예제 #26
0
파일: netfront.c 프로젝트: cnplab/mini-os
/**
 * Transmit function for pbufs which can handle checksum and segmentation offloading for TCPv4 and TCPv6
 */
err_t netfront_xmit_pbuf(struct netfront_dev *dev, struct pbuf *p, int co_type, int push)
{
	struct netif_tx_request *first_tx;
	struct netif_extra_info *gso;
	int slots;
	int used = 0;
#ifdef CONFIG_NETFRONT_GSO
	int sego;
#endif /* CONFIG_NETFRONT_GSO */
#ifdef CONFIG_NETFRONT_WAITFORTX
	unsigned long flags;
	DEFINE_WAIT(w);
#endif /* CONFIG_NETFRONT_WAITFORTX */

	/* Counts how many slots we require for this buf */
	slots = netfront_count_pbuf_slots(dev, p);
#ifdef CONFIG_NETFRONT_GSO
#if TCP_GSO /* GSO flag is only available if lwIP is built with GSO support */
	sego = (p->flags & PBUF_FLAG_GSO) ? 1 : 0;
#else
	sego = 0;
#endif
	/* GSO requires checksum offloading set */
	BUG_ON(sego && !(co_type & (XEN_NETIF_GSO_TYPE_TCPV4 | XEN_NETIF_GSO_TYPE_TCPV6)));
#endif /* CONFIG_NETFRONT_GSO */

	/* Checks if there are enough requests for this many slots (gso requires one slot more) */
#ifdef CONFIG_NETFRONT_GSO
	BUG_ON(!netfront_tx_possible(dev, slots + sego));
#else
	BUG_ON(!netfront_tx_possible(dev, slots));
#endif /* CONFIG_NETFRONT_GSO */

#ifdef CONFIG_NETFRONT_WAITFORTX
	local_irq_save(flags);
#endif /* CONFIG_NETFRONT_WAITFORTX */
#ifdef CONFIG_NETFRONT_GSO
	if (unlikely(!netfront_tx_available(dev, slots + sego))) {
#else
	if (unlikely(!netfront_tx_available(dev, slots))) {
#endif /* CONFIG_NETFRONT_GSO */
		netfront_xmit_push(dev);
#ifdef CONFIG_NETFRONT_WAITFORTX
 try_again:
#ifdef CONFIG_NETFRONT_GSO
		if (!netfront_tx_available(dev, slots + sego)) {
#else
		if (!netfront_tx_available(dev, slots)) {
#endif /* CONFIG_NETFRONT_GSO */
#ifndef CONFIG_NETFRONT_WAITFORTX_BUSYLOOP
			add_waiter(w, netfront_txqueue); /* release thread until space is free'd */
			local_irq_restore(flags);
			schedule();
			local_irq_save(flags);
#endif /* !CONFIG_NETFRONT_WAITFORTX_BUSYLOOP */
			netfront_tx_buf_gc(dev);
			goto try_again;
		}
		remove_waiter(w, netfront_txqueue); /* release thread until space is free'd */
#else
		return ERR_MEM;
#endif /* CONFIG_NETFRONT_WAITFORTX */
	}
#ifdef CONFIG_NETFRONT_WAITFORTX
	local_irq_restore(flags);
#endif /* CONFIG_NETFRONT_WAITFORTX */

	/* Set extras if packet is GSO kind */
	first_tx = netfront_get_page(dev);
	ASSERT(first_tx != NULL);
#if defined CONFIG_NETFRONT_GSO && TCP_GSO
	if (sego) {
		gso = (struct netif_extra_info *) RING_GET_REQUEST(&dev->tx, dev->tx.req_prod_pvt++);

		first_tx->flags |= NETTXF_extra_info;
		gso->u.gso.size = p->gso_size; /* segmentation size */
		gso->u.gso.type = co_type; /* XEN_NETIF_GSO_TYPE_TCPV4, XEN_NETIF_GSO_TYPE_TCPV6 */
		gso->u.gso.pad = 0;
		gso->u.gso.features = 0;

		gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
		gso->flags = 0;

		used++;
	}
#endif /* CONFIG_NETFRONT_GSO */

	/* Make TX requests for the pbuf */
#ifdef CONFIG_NETFRONT_PERSISTENT_GRANTS
	netfront_make_txreqs_pgnt(dev, first_tx, p, &used);
#else
	netfront_make_txreqs(dev, first_tx, p, &used);
#endif
	ASSERT(slots >= used); /* we should have taken at most the number slots that we estimated before */
	ASSERT(slots <= XEN_NETIF_NR_SLOTS_MIN); /* we should never take more slots than the backend supports */

	/* partially checksummed (offload enabled), or checksummed */
	first_tx->flags |= co_type ? ((NETTXF_csum_blank) | (NETTXF_data_validated)) : (NETTXF_data_validated);

	push |= (((dev)->tx.req_prod_pvt - (dev)->tx.rsp_cons) <= NET_TX_RING_SIZE / 2);
	if (push)
		netfront_xmit_push(dev);

#ifdef CONFIG_NETFRONT_STATS
	++dev->txpkts;
	dev->txbytes += p->tot_len;
#endif
	dprintk("tx: %c%c%c %u bytes (%u slots)\n", sego ? 'S' : '-', co_type ? 'C' : '-', push ? 'P' : '-', p->tot_len, slots);
	return ERR_OK;
}

void netfront_xmit_push(struct netfront_dev *dev)
{
	int flags;

	netfront_xmit_notify(dev);

	/* Collects any outstanding responses for more requests */
	local_irq_save(flags);
	netfront_tx_buf_gc(dev);
	local_irq_restore(flags);
}

void netfront_set_rx_pbuf_handler(struct netfront_dev *dev,
				  void (*thenetif_rx)(struct pbuf *p, void *arg),
				  void *arg)
{
	if (dev->netif_rx_pbuf && dev->netif_rx_pbuf != netif_rx_pbuf)
		printk("Replacing netif_rx_pbuf handler for dev %s\n", dev->nodename);

	dev->netif_rx = NULL;
	dev->netif_rx_pbuf = thenetif_rx;
	dev->netif_rx_arg = arg;
}
#endif

static void free_netfront(struct netfront_dev *dev)
{
	int i;
	int separate_tx_rx_irq = (dev->tx_evtchn != dev->rx_evtchn);

	free(dev->mac);
	free(dev->backend);

#ifdef CONFIG_NETMAP
	if (dev->netmap)
		return;
#endif

	for(i=0; i<NET_TX_RING_SIZE; i++)
		down(&dev->tx_sem);

	mask_evtchn(dev->tx_evtchn);
	if (separate_tx_rx_irq)
		mask_evtchn(dev->rx_evtchn);

	gnttab_end_access(dev->rx_ring_ref);
	gnttab_end_access(dev->tx_ring_ref);

	free_page(dev->rx.sring);
	free_page(dev->tx.sring);

	unbind_evtchn(dev->tx_evtchn);
	if (separate_tx_rx_irq)
		unbind_evtchn(dev->rx_evtchn);

#ifdef CONFIG_NETFRONT_PERSISTENT_GRANTS
	for(i=0; i<NET_RX_RING_SIZE; i++) {
		if (dev->rx_buffers[i].page) {
			gnttab_end_access(dev->rx_buffers[i].gref);
			free_page(dev->rx_buffers[i].page);
		}
	}
#else
	for(i=0; i<NET_RX_BUFFERS; i++) {
		if (dev->rx_buffer_pool[i].page) {
			if (dev->rx_buffer_pool[i].gref != GRANT_INVALID_REF)
				gnttab_end_access(dev->rx_buffer_pool[i].gref);
			free_page(dev->rx_buffer_pool[i].page);
		}
	}
#endif

#if defined CONFIG_NETFRONT_PERSISTENT_GRANTS || !defined CONFIG_NETFRONT_LWIP_ONLY
	for(i=0; i<NET_TX_RING_SIZE; i++) {
		if (dev->tx_buffers[i].page) {
#ifndef CONFIG_NETFRONT_PERSISTENT_GRANTS
			if (dev->tx_buffers[i].gref != GRANT_INVALID_REF)
#endif
			gnttab_end_access(dev->tx_buffers[i].gref);
			free_page(dev->tx_buffers[i].page);
		}
	}
#endif
}