Exemplo n.º 1
0
void shutdown_fbfront(struct fbfront_dev *dev)
{
    char* err;
    char *nodename = dev->nodename;

    char path[strlen(dev->backend) + 1 + 5 + 1];

    printk("close fb: backend at %s\n",dev->backend);

    snprintf(path, sizeof(path), "%s/state", dev->backend);
    err = xenbus_printf(XBT_NIL, nodename, "state", "%u", 5); /* closing */
    xenbus_wait_for_value(path, "5", &dev->events);

    err = xenbus_printf(XBT_NIL, nodename, "state", "%u", 6);
    xenbus_wait_for_value(path, "6", &dev->events);

    err = xenbus_printf(XBT_NIL, nodename, "state", "%u", 1);
    // does not work yet
    //xenbus_wait_for_value(path, "2", &dev->events);

    xenbus_unwatch_path(XBT_NIL, path);

    snprintf(path, sizeof(path), "%s/page-ref", nodename);
    xenbus_rm(XBT_NIL, path);
    snprintf(path, sizeof(path), "%s/event-channel", nodename);
    xenbus_rm(XBT_NIL, path);
    snprintf(path, sizeof(path), "%s/protocol", nodename);
    xenbus_rm(XBT_NIL, path);
    snprintf(path, sizeof(path), "%s/feature-update", nodename);
    xenbus_rm(XBT_NIL, path);

    unbind_evtchn(dev->evtchn);

    free_fbfront(dev);
}
Exemplo n.º 2
0
static void free_netfront(struct netfront_dev *dev)
{
    int i;

    free(dev->mac);
    free(dev->backend);
#ifdef CONFIG_NETMAP
	if (dev->netmap)
			return;
#endif
    for(i=0;i<NET_TX_RING_SIZE;i++)
	down(&dev->tx_sem);

    mask_evtchn(dev->evtchn);

    gnttab_end_access(dev->rx_ring_ref);
    gnttab_end_access(dev->tx_ring_ref);

    free_page(dev->rx.sring);
    free_page(dev->tx.sring);

    unbind_evtchn(dev->evtchn);

    for(i=0;i<NET_RX_RING_SIZE;i++) {
	gnttab_end_access(dev->rx_buffers[i].gref);
	free_page(dev->rx_buffers[i].page);
    }

    for(i=0;i<NET_TX_RING_SIZE;i++) {
	if (dev->tx_buffers[i].page) {
	    gnttab_end_access(dev->tx_buffers[i].gref);
	    free_page(dev->tx_buffers[i].page);
	}
    }
}
Exemplo n.º 3
0
static void free_netfront(struct netfront_dev *dev)
{
    int i;

    for(i=0;i<NET_TX_RING_SIZE;i++)
	down(&dev->tx_sem);

    mask_evtchn(dev->evtchn);

    free(dev->mac);
    free(dev->backend);

    gnttab_end_access(dev->rx_ring_ref);
    gnttab_end_access(dev->tx_ring_ref);

    free_page(dev->rx.sring);
    free_page(dev->tx.sring);

    unbind_evtchn(dev->evtchn);

    for(i=0;i<NET_RX_RING_SIZE;i++) {
	gnttab_end_access(dev->rx_buffers[i].gref);
	free_page(dev->rx_buffers[i].page);
    }

    for(i=0;i<NET_TX_RING_SIZE;i++)
	if (dev->tx_buffers[i].page)
	    free_page(dev->tx_buffers[i].page);

    free(dev->nodename);
    free(dev);
}
Exemplo n.º 4
0
void arch_unbind_ports(void)
{
    if(debug_port != -1)
    {
        mask_evtchn(debug_port);
        unbind_evtchn(debug_port);
    }
}
Exemplo n.º 5
0
int xc_evtchn_close(int xce_handle)
{
    int i;
    for (i = 0; i < MAX_EVTCHN_PORTS; i++)
        if (files[xce_handle].evtchn.ports[i].bound)
            unbind_evtchn(files[xce_handle].evtchn.ports[i].port);
    files[xce_handle].type = FTYPE_NONE;
    return 0;
}
Exemplo n.º 6
0
static void free_kbdfront(struct kbdfront_dev *dev)
{
    mask_evtchn(dev->evtchn);

    free(dev->backend);

    free_page(dev->page);

    unbind_evtchn(dev->evtchn);

    free(dev->nodename);
    free(dev);
}
Exemplo n.º 7
0
static void free_blkfront(struct blkfront_dev *dev)
{
    mask_evtchn(dev->evtchn);

    free(dev->backend);

    gnttab_end_access(dev->ring_ref);
    free_page(dev->ring.sring);

    unbind_evtchn(dev->evtchn);

    free(dev->nodename);
    free(dev);
}
Exemplo n.º 8
0
static void free_pcifront(struct pcifront_dev *dev)
{
    mask_evtchn(dev->evtchn);

    free(dev->backend);

    gnttab_end_access(dev->info_ref);
    free_page(dev->info);

    unbind_evtchn(dev->evtchn);

    free(dev->nodename);
    free(dev);
}
Exemplo n.º 9
0
static int tpmfront_connect(struct tpmfront_dev* dev)
{
   char* err;
   /* Create shared page */
   dev->page = (vtpm_shared_page_t*) alloc_page();
   if(dev->page == NULL) {
      TPMFRONT_ERR("Unable to allocate page for shared memory\n");
      goto error;
   }
   memset(dev->page, 0, PAGE_SIZE);
   dev->ring_ref = gnttab_grant_access(dev->bedomid, virt_to_mfn(dev->page), 0);
   TPMFRONT_DEBUG("grant ref is %lu\n", (unsigned long) dev->ring_ref);

   /*Create event channel */
   if(evtchn_alloc_unbound(dev->bedomid, tpmfront_handler, dev, &dev->evtchn)) {
      TPMFRONT_ERR("Unable to allocate event channel\n");
      goto error_postmap;
   }
   unmask_evtchn(dev->evtchn);
   TPMFRONT_DEBUG("event channel is %lu\n", (unsigned long) dev->evtchn);

   /* Write the entries to xenstore */
   if(publish_xenbus(dev)) {
      goto error_postevtchn;
   }

   /* Change state to connected */
   dev->state = XenbusStateConnected;

   /* Tell the backend that we are ready */
   if((err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%u", dev->state))) {
      TPMFRONT_ERR("Unable to write to xenstore %s/state, value=%u", dev->nodename, XenbusStateConnected);
      free(err);
      goto error;
   }

   return 0;
error_postevtchn:
      mask_evtchn(dev->evtchn);
      unbind_evtchn(dev->evtchn);
error_postmap:
      gnttab_end_access(dev->ring_ref);
      free_page(dev->page);
error:
   return -1;
}
Exemplo n.º 10
0
int xc_evtchn_unbind(int xce_handle, evtchn_port_t port)
{
    int i;
    for (i = 0; i < MAX_EVTCHN_PORTS; i++)
	if (files[xce_handle].evtchn.ports[i].port == port) {
	    files[xce_handle].evtchn.ports[i].port = -1;
	    break;
	}
    if (i == MAX_EVTCHN_PORTS) {
	printf("Warning: couldn't find port %"PRId32" for xc handle %x\n", port, xce_handle);
	errno = -EINVAL;
	return -1;
    }
    files[xce_handle].evtchn.ports[i].bound = 0;
    unbind_evtchn(port);
    return 0;
}
Exemplo n.º 11
0
void free_consfront(struct consfront_dev *dev)
{
    char* err = NULL;
    XenbusState state;

    char path[strlen(dev->backend) + 1 + 5 + 1];
    char nodename[strlen(dev->nodename) + 1 + 5 + 1];

    snprintf(path, sizeof(path), "%s/state", dev->backend);
    snprintf(nodename, sizeof(nodename), "%s/state", dev->nodename);

    if ((err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateClosing)) != NULL) {
        printk("free_consfront: error changing state to %d: %s\n",
                XenbusStateClosing, err);
        goto close;
    }
    state = xenbus_read_integer(path);
    while (err == NULL && state < XenbusStateClosing)
        err = xenbus_wait_for_state_change(path, &state, &dev->events);
    if (err) free(err);

    if ((err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateClosed)) != NULL) {
        printk("free_consfront: error changing state to %d: %s\n",
                XenbusStateClosed, err);
        goto close;
    }

close:
    if (err) free(err);
    xenbus_unwatch_path_token(XBT_NIL, path, path);

    mask_evtchn(dev->evtchn);
    unbind_evtchn(dev->evtchn);
    free(dev->backend);
    free(dev->nodename);

    gnttab_end_access(dev->ring_ref);

    free_page(dev->ring);
    free(dev);
}
Exemplo n.º 12
0
/**
 * Transmit function for pbufs which can handle checksum and segmentation offloading for TCPv4 and TCPv6
 */
err_t netfront_xmit_pbuf(struct netfront_dev *dev, struct pbuf *p, int co_type, int push)
{
	struct netif_tx_request *first_tx;
	struct netif_extra_info *gso;
	int slots;
	int used = 0;
#ifdef CONFIG_NETFRONT_GSO
	int sego;
#endif /* CONFIG_NETFRONT_GSO */
#ifdef CONFIG_NETFRONT_WAITFORTX
	unsigned long flags;
	DEFINE_WAIT(w);
#endif /* CONFIG_NETFRONT_WAITFORTX */

	/* Counts how many slots we require for this buf */
	slots = netfront_count_pbuf_slots(dev, p);
#ifdef CONFIG_NETFRONT_GSO
#if TCP_GSO /* GSO flag is only available if lwIP is built with GSO support */
	sego = (p->flags & PBUF_FLAG_GSO) ? 1 : 0;
#else
	sego = 0;
#endif
	/* GSO requires checksum offloading set */
	BUG_ON(sego && !(co_type & (XEN_NETIF_GSO_TYPE_TCPV4 | XEN_NETIF_GSO_TYPE_TCPV6)));
#endif /* CONFIG_NETFRONT_GSO */

	/* Checks if there are enough requests for this many slots (gso requires one slot more) */
#ifdef CONFIG_NETFRONT_GSO
	BUG_ON(!netfront_tx_possible(dev, slots + sego));
#else
	BUG_ON(!netfront_tx_possible(dev, slots));
#endif /* CONFIG_NETFRONT_GSO */

#ifdef CONFIG_NETFRONT_WAITFORTX
	local_irq_save(flags);
#endif /* CONFIG_NETFRONT_WAITFORTX */
#ifdef CONFIG_NETFRONT_GSO
	if (unlikely(!netfront_tx_available(dev, slots + sego))) {
#else
	if (unlikely(!netfront_tx_available(dev, slots))) {
#endif /* CONFIG_NETFRONT_GSO */
		netfront_xmit_push(dev);
#ifdef CONFIG_NETFRONT_WAITFORTX
 try_again:
#ifdef CONFIG_NETFRONT_GSO
		if (!netfront_tx_available(dev, slots + sego)) {
#else
		if (!netfront_tx_available(dev, slots)) {
#endif /* CONFIG_NETFRONT_GSO */
#ifndef CONFIG_NETFRONT_WAITFORTX_BUSYLOOP
			add_waiter(w, netfront_txqueue); /* release thread until space is free'd */
			local_irq_restore(flags);
			schedule();
			local_irq_save(flags);
#endif /* !CONFIG_NETFRONT_WAITFORTX_BUSYLOOP */
			netfront_tx_buf_gc(dev);
			goto try_again;
		}
		remove_waiter(w, netfront_txqueue); /* release thread until space is free'd */
#else
		return ERR_MEM;
#endif /* CONFIG_NETFRONT_WAITFORTX */
	}
#ifdef CONFIG_NETFRONT_WAITFORTX
	local_irq_restore(flags);
#endif /* CONFIG_NETFRONT_WAITFORTX */

	/* Set extras if packet is GSO kind */
	first_tx = netfront_get_page(dev);
	ASSERT(first_tx != NULL);
#if defined CONFIG_NETFRONT_GSO && TCP_GSO
	if (sego) {
		gso = (struct netif_extra_info *) RING_GET_REQUEST(&dev->tx, dev->tx.req_prod_pvt++);

		first_tx->flags |= NETTXF_extra_info;
		gso->u.gso.size = p->gso_size; /* segmentation size */
		gso->u.gso.type = co_type; /* XEN_NETIF_GSO_TYPE_TCPV4, XEN_NETIF_GSO_TYPE_TCPV6 */
		gso->u.gso.pad = 0;
		gso->u.gso.features = 0;

		gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
		gso->flags = 0;

		used++;
	}
#endif /* CONFIG_NETFRONT_GSO */

	/* Make TX requests for the pbuf */
#ifdef CONFIG_NETFRONT_PERSISTENT_GRANTS
	netfront_make_txreqs_pgnt(dev, first_tx, p, &used);
#else
	netfront_make_txreqs(dev, first_tx, p, &used);
#endif
	ASSERT(slots >= used); /* we should have taken at most the number slots that we estimated before */
	ASSERT(slots <= XEN_NETIF_NR_SLOTS_MIN); /* we should never take more slots than the backend supports */

	/* partially checksummed (offload enabled), or checksummed */
	first_tx->flags |= co_type ? ((NETTXF_csum_blank) | (NETTXF_data_validated)) : (NETTXF_data_validated);

	push |= (((dev)->tx.req_prod_pvt - (dev)->tx.rsp_cons) <= NET_TX_RING_SIZE / 2);
	if (push)
		netfront_xmit_push(dev);

#ifdef CONFIG_NETFRONT_STATS
	++dev->txpkts;
	dev->txbytes += p->tot_len;
#endif
	dprintk("tx: %c%c%c %u bytes (%u slots)\n", sego ? 'S' : '-', co_type ? 'C' : '-', push ? 'P' : '-', p->tot_len, slots);
	return ERR_OK;
}

void netfront_xmit_push(struct netfront_dev *dev)
{
	int flags;

	netfront_xmit_notify(dev);

	/* Collects any outstanding responses for more requests */
	local_irq_save(flags);
	netfront_tx_buf_gc(dev);
	local_irq_restore(flags);
}

void netfront_set_rx_pbuf_handler(struct netfront_dev *dev,
				  void (*thenetif_rx)(struct pbuf *p, void *arg),
				  void *arg)
{
	if (dev->netif_rx_pbuf && dev->netif_rx_pbuf != netif_rx_pbuf)
		printk("Replacing netif_rx_pbuf handler for dev %s\n", dev->nodename);

	dev->netif_rx = NULL;
	dev->netif_rx_pbuf = thenetif_rx;
	dev->netif_rx_arg = arg;
}
#endif

static void free_netfront(struct netfront_dev *dev)
{
	int i;
	int separate_tx_rx_irq = (dev->tx_evtchn != dev->rx_evtchn);

	free(dev->mac);
	free(dev->backend);

#ifdef CONFIG_NETMAP
	if (dev->netmap)
		return;
#endif

	for(i=0; i<NET_TX_RING_SIZE; i++)
		down(&dev->tx_sem);

	mask_evtchn(dev->tx_evtchn);
	if (separate_tx_rx_irq)
		mask_evtchn(dev->rx_evtchn);

	gnttab_end_access(dev->rx_ring_ref);
	gnttab_end_access(dev->tx_ring_ref);

	free_page(dev->rx.sring);
	free_page(dev->tx.sring);

	unbind_evtchn(dev->tx_evtchn);
	if (separate_tx_rx_irq)
		unbind_evtchn(dev->rx_evtchn);

#ifdef CONFIG_NETFRONT_PERSISTENT_GRANTS
	for(i=0; i<NET_RX_RING_SIZE; i++) {
		if (dev->rx_buffers[i].page) {
			gnttab_end_access(dev->rx_buffers[i].gref);
			free_page(dev->rx_buffers[i].page);
		}
	}
#else
	for(i=0; i<NET_RX_BUFFERS; i++) {
		if (dev->rx_buffer_pool[i].page) {
			if (dev->rx_buffer_pool[i].gref != GRANT_INVALID_REF)
				gnttab_end_access(dev->rx_buffer_pool[i].gref);
			free_page(dev->rx_buffer_pool[i].page);
		}
	}
#endif

#if defined CONFIG_NETFRONT_PERSISTENT_GRANTS || !defined CONFIG_NETFRONT_LWIP_ONLY
	for(i=0; i<NET_TX_RING_SIZE; i++) {
		if (dev->tx_buffers[i].page) {
#ifndef CONFIG_NETFRONT_PERSISTENT_GRANTS
			if (dev->tx_buffers[i].gref != GRANT_INVALID_REF)
#endif
			gnttab_end_access(dev->tx_buffers[i].gref);
			free_page(dev->tx_buffers[i].page);
		}
	}
#endif
}
Exemplo n.º 13
0
void shutdown_tpmfront(struct tpmfront_dev* dev)
{
   char* err;
   char path[512];
   if(dev == NULL) {
      return;
   }
   TPMFRONT_LOG("Shutting down tpmfront\n");
   /* disconnect */
   if(dev->state == XenbusStateConnected) {
      /* Tell backend we are closing */
      dev->state = XenbusStateClosing;
      if((err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%u", (unsigned int) dev->state))) {
	 TPMFRONT_ERR("Unable to write to %s, error was %s", dev->nodename, err);
	 free(err);
      }

      /* Clean up xenstore entries */
      snprintf(path, 512, "%s/event-channel", dev->nodename);
      if((err = xenbus_rm(XBT_NIL, path))) {
	 free(err);
      }
      snprintf(path, 512, "%s/ring-ref", dev->nodename);
      if((err = xenbus_rm(XBT_NIL, path))) {
	 free(err);
      }

      /* Tell backend we are closed */
      dev->state = XenbusStateClosed;
      if((err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%u", (unsigned int) dev->state))) {
	 TPMFRONT_ERR("Unable to write to %s, error was %s", dev->nodename, err);
	 free(err);
      }

      /* Wait for the backend to close and unmap shared pages, ignore any errors */
      wait_for_backend_state_changed(dev, XenbusStateClosed);

      /* Prepare for a later reopen (possibly by a kexec'd kernel) */
      dev->state = XenbusStateInitialising;
      if((err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%u", (unsigned int) dev->state))) {
	 TPMFRONT_ERR("Unable to write to %s, error was %s", dev->nodename, err);
	 free(err);
      }

      /* Close event channel and unmap shared page */
      mask_evtchn(dev->evtchn);
      unbind_evtchn(dev->evtchn);
      gnttab_end_access(dev->ring_ref);

      free_page(dev->page);
   }

   /* Cleanup memory usage */
   if(dev->respbuf) {
      free(dev->respbuf);
   }
   if(dev->bepath) {
      free(dev->bepath);
   }
   if(dev->nodename) {
      free(dev->nodename);
   }
   free(dev);
}