Exemple #1
0
static void netfront_tx_buf_gc(struct netfront_dev *dev)
{
	RING_IDX cons, prod;
	unsigned short id;
#ifndef CONFIG_NETFRONT_PERSISTENT_GRANTS
	struct net_txbuffer *buf;
#endif

	do {
		prod = dev->tx.sring->rsp_prod;
		rmb(); /* Ensure we see responses up to 'rp'. */

		for (cons = dev->tx.rsp_cons; cons != prod; cons++) {
			struct netif_tx_response *txrsp;

			txrsp = RING_GET_RESPONSE(&dev->tx, cons);
			if (txrsp->status == NETIF_RSP_NULL)
				continue;

			if (txrsp->status == NETIF_RSP_DROPPED)
				printk("netif drop for tx\n");

			if (txrsp->status == NETIF_RSP_ERROR)
				printk("netif error for tx\n");

			id  = txrsp->id;
			BUG_ON(id >= NET_TX_RING_SIZE);

#ifndef CONFIG_NETFRONT_PERSISTENT_GRANTS
			buf = &dev->tx_buffers[id];
			gnttab_end_access(buf->gref);
			buf->gref = GRANT_INVALID_REF;
#ifdef HAVE_LWIP
			if (buf->pbuf) {
				pbuf_free(buf->pbuf);
				buf->pbuf = NULL;
			}
#endif /* HAVE_LWIP */
#endif /* CONFIG_NETFRONT_PERSISTENT_GRANTS */
			add_id_to_freelist(id, dev->tx_freelist);
			up(&dev->tx_sem);
		}

		dev->tx.rsp_cons = prod;

		/*
		 * Set a new event, then check for race with update of tx_cons.
		 * Note that it is essential to schedule a callback, no matter
		 * how few tx_buffers are pending. Even if there is space in the
		 * transmit ring, higher layers may be blocked because too much
		 * data is outstanding: in such cases notification from Xen is
		 * likely to be the only kick that we'll get.
		 */
		dev->tx.sring->rsp_event =
			prod + ((dev->tx.sring->req_prod - prod) >> 1) + 1;
		mb();
	} while ((cons == prod) && (prod != dev->tx.sring->rsp_prod));
}
static void xennet_tx_buf_gc(struct net_device *dev)
{
	RING_IDX cons, prod;
	unsigned short id;
	struct netfront_info *np = netdev_priv(dev);
	struct sk_buff *skb;

	BUG_ON(!netif_carrier_ok(dev));

	do {
		prod = np->tx.sring->rsp_prod;
		rmb(); /* Ensure we see responses up to 'rp'. */

		for (cons = np->tx.rsp_cons; cons != prod; cons++) {
			struct xen_netif_tx_response *txrsp;

			txrsp = RING_GET_RESPONSE(&np->tx, cons);
			if (txrsp->status == NETIF_RSP_NULL)
				continue;

			id  = txrsp->id;
			skb = np->tx_skbs[id].skb;
			if (unlikely(gnttab_query_foreign_access(
				np->grant_tx_ref[id]) != 0)) {
				printk(KERN_ALERT "xennet_tx_buf_gc: warning "
				       "-- grant still in use by backend "
				       "domain.\n");
				BUG();
			}
			gnttab_end_foreign_access_ref(
				np->grant_tx_ref[id], GNTMAP_readonly);
			gnttab_release_grant_reference(
				&np->gref_tx_head, np->grant_tx_ref[id]);
			np->grant_tx_ref[id] = GRANT_INVALID_REF;
			add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
			dev_kfree_skb_irq(skb);
		}

		np->tx.rsp_cons = prod;

		/*
		 * Set a new event, then check for race with update of tx_cons.
		 * Note that it is essential to schedule a callback, no matter
		 * how few buffers are pending. Even if there is space in the
		 * transmit ring, higher layers may be blocked because too much
		 * data is outstanding: in such cases notification from Xen is
		 * likely to be the only kick that we'll get.
		 */
		np->tx.sring->rsp_event =
			prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
		mb();		/* update shared area */
	} while ((cons == prod) && (prod != np->tx.sring->rsp_prod));

	xennet_maybe_wake_tx(dev);
}
Exemple #3
0
static irqreturn_t ixp_interrupt(int irq, void *dev_id)
{
	struct ixp_response *bret;
	RING_IDX i, rp;
	struct ixpfront_info *info = (struct ixpfront_info *)dev_id;
	int error;


	if (unlikely(info->connected != IXP_STATE_CONNECTED)) {
		return IRQ_HANDLED;
	}

 again:
	rp = info->ring.sring->rsp_prod;
	rmb(); /* Ensure we see queued responses up to 'rp'. */

	for (i = info->ring.rsp_cons; i != rp; i++) {
		unsigned long id;

		bret = RING_GET_RESPONSE(&info->ring, i);
		id   = bret->id;
		
		ixp_install_response(info, bret);
    		ixp_completion(&info->shadow[id]);

		add_id_to_freelist(info, id);

		error = (bret->status == IXPIF_RSP_OKAY) ? 0 : -EIO;
		switch (bret->operation) {
		case IXP_OP_3DES_ENCRYPT:
			if (unlikely(bret->status != IXPIF_RSP_OKAY))
				dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
					"request: %x\n", bret->status);

			break;
		default:
			BUG();
		}
	}

	info->ring.rsp_cons = i;

	if (i != info->ring.req_prod_pvt) {
		int more_to_do;
		RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
		if (more_to_do)
			goto again;
	} else
		info->ring.sring->rsp_event = i + 1;

	return IRQ_HANDLED;
}
Exemple #4
0
void network_tx_buf_gc(struct netfront_dev *dev)
{
    RING_IDX cons, prod;
    unsigned short id;

    do {
        prod = dev->tx.sring->rsp_prod;
        rmb(); /* Ensure we see responses up to 'rp'. */
        //printk("cons = %ld, prod = %ld\n", dev->tx.rsp_cons, prod); //farewellkou

        for (cons = dev->tx.rsp_cons; cons != prod; cons++) 
        {
            struct netif_tx_response *txrsp;
            struct net_buffer *buf;

            txrsp = RING_GET_RESPONSE(&dev->tx, cons);
            if (txrsp->status == NETIF_RSP_NULL){
                continue;
            }

            if (txrsp->status == NETIF_RSP_ERROR){
                printk("packet error\n");
            }

            id  = txrsp->id;
            BUG_ON(id >= NET_TX_RING_SIZE);
            buf = &dev->tx_buffers[id];
            gnttab_end_access(buf->gref);
            buf->gref=GRANT_INVALID_REF;

	    add_id_to_freelist(id,dev->tx_freelist);
	    up(&dev->tx_sem);
        }

        dev->tx.rsp_cons = prod;

        /*
         * Set a new event, then check for race with update of tx_cons.
         * Note that it is essential to schedule a callback, no matter
         * how few tx_buffers are pending. Even if there is space in the
         * transmit ring, higher layers may be blocked because too much
         * data is outstanding: in such cases notification from Xen is
         * likely to be the only kick that we'll get.
         */
        dev->tx.sring->rsp_event =
            prod + ((dev->tx.sring->req_prod - prod) >> 1) + 1;
        mb();
    } while ((cons == prod) && (prod != dev->tx.sring->rsp_prod));
}
Exemple #5
0
static void xennet_tx_buf_gc(struct net_device *dev)
{
	RING_IDX cons, prod;
	unsigned short id;
	struct netfront_info *np = netdev_priv(dev);
	struct sk_buff *skb;

	BUG_ON(!netif_carrier_ok(dev));

	do {
		prod = np->tx.sring->rsp_prod;
		rmb(); 

		for (cons = np->tx.rsp_cons; cons != prod; cons++) {
			struct xen_netif_tx_response *txrsp;

			txrsp = RING_GET_RESPONSE(&np->tx, cons);
			if (txrsp->status == NETIF_RSP_NULL)
				continue;

			id  = txrsp->id;
			skb = np->tx_skbs[id].skb;
			if (unlikely(gnttab_query_foreign_access(
				np->grant_tx_ref[id]) != 0)) {
				printk(KERN_ALERT "xennet_tx_buf_gc: warning "
				       "-- grant still in use by backend "
				       "domain.\n");
				BUG();
			}
			gnttab_end_foreign_access_ref(
				np->grant_tx_ref[id], GNTMAP_readonly);
			gnttab_release_grant_reference(
				&np->gref_tx_head, np->grant_tx_ref[id]);
			np->grant_tx_ref[id] = GRANT_INVALID_REF;
			add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
			dev_kfree_skb_irq(skb);
		}

		np->tx.rsp_cons = prod;

		
		np->tx.sring->rsp_event =
			prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
		mb();		
	} while ((cons == prod) && (prod != np->tx.sring->rsp_prod));

	xennet_maybe_wake_tx(dev);
}
Exemple #6
0
static void allocate_request_array(struct fs_mount *mount)
{
    int i, nr_entries = mount->nr_entries;
    struct fs_request *requests;
    unsigned short *freelist;
    
    requests = malloc(sizeof(struct fs_request) *nr_entries);
    freelist = malloc(sizeof(unsigned short) * (nr_entries + 1)); 
    memset(requests, 0, sizeof(struct fs_request) * nr_entries);
    memset(freelist, 0, sizeof(unsigned short) * (nr_entries + 1));
    for(i=0; i< nr_entries; i++)
    {
        requests[i].active = 0; 
        add_id_to_freelist(i, freelist);
    }
    mount->requests = requests;
    mount->freelist = freelist;
}
Exemple #7
0
static struct net_rxbuffer *netfront_get_rxbuffer(struct netfront_dev *dev)
{
	struct net_rxbuffer *buf;
	unsigned short id;

	if (unlikely(dev->rx_avail == 0))
		return NULL; /* out of rx buffers */

	id = get_id_from_freelist(dev->rx_freelist);
	buf = &dev->rx_buffer_pool[id];
	buf->id = id;
#ifdef HAVE_LWIP
	if (unlikely(netfront_init_rxpbuf(buf, dev) == NULL)) {
		/* could not allocate custom pbuf */
		add_id_to_freelist(id, dev->rx_freelist);
		return NULL;
	}
#endif /* HAVE_LWIP */
	dev->rx_avail--;
	return buf;
}
Exemple #8
0
static void dispatch_response(struct fs_mount *mount, int priv_req_id)
{
    int i;
    struct fs_op *op;
    struct fs_request *req = &mount->requests[priv_req_id];

    for(i=0;;i++)
    {
        op = fsops[i];
        /* We should dispatch a response before reaching the end of the array */
        assert(op != NULL);
        if(op->type == req->req_shadow.type)
        {
            printf("Found op for type=%d\n", op->type);
            /* There needs to be a response handler */
            assert(op->response_handler != NULL);
            op->response_handler(mount, req);
            break;
        }
    }

    req->active = 0;
    add_id_to_freelist(priv_req_id, mount->freelist);
}
Exemple #9
0
struct netfront_dev *init_netfront(char *_nodename, void (*thenetif_rx)(unsigned char* data, int len), unsigned char rawmac[6], char **ip)
{
    xenbus_transaction_t xbt;
    char* err;
    char* message=NULL;
    struct netif_tx_sring *txs;
    struct netif_rx_sring *rxs;
    int retry=0;
    int i;
    char* msg;
    char nodename[256];
    char path[256];
    struct netfront_dev *dev;
    static int netfrontends = 0;

    if (!_nodename)
        snprintf(nodename, sizeof(nodename), "device/vif/%d", netfrontends);
    else
        strncpy(nodename, _nodename, strlen(nodename));
    netfrontends++;

    if (!thenetif_rx)
	thenetif_rx = netif_rx;

    printk("************************ NETFRONT for %s **********\n\n\n", nodename);

    dev = malloc(sizeof(*dev));
    memset(dev, 0, sizeof(*dev));
    dev->nodename = strdup(nodename);
#ifdef HAVE_LIBC
    dev->fd = -1;
#endif

    printk("net TX ring size %d\n", NET_TX_RING_SIZE);
    printk("net RX ring size %d\n", NET_RX_RING_SIZE);
    init_SEMAPHORE(&dev->tx_sem, NET_TX_RING_SIZE);
    for(i=0;i<NET_TX_RING_SIZE;i++)
    {
	add_id_to_freelist(i,dev->tx_freelist);
        dev->tx_buffers[i].page = NULL;
    }

    for(i=0;i<NET_RX_RING_SIZE;i++)
    {
	/* TODO: that's a lot of memory */
        dev->rx_buffers[i].page = (char*)alloc_page();
    }

    snprintf(path, sizeof(path), "%s/backend-id", nodename);
    dev->dom = xenbus_read_integer(path);
#ifdef HAVE_LIBC
    if (thenetif_rx == NETIF_SELECT_RX)
        evtchn_alloc_unbound(dev->dom, netfront_select_handler, dev, &dev->evtchn);
    else
#endif
        evtchn_alloc_unbound(dev->dom, netfront_handler, dev, &dev->evtchn);

    txs = (struct netif_tx_sring *) alloc_page();
    rxs = (struct netif_rx_sring *) alloc_page();
    memset(txs,0,PAGE_SIZE);
    memset(rxs,0,PAGE_SIZE);


    SHARED_RING_INIT(txs);
    SHARED_RING_INIT(rxs);
    FRONT_RING_INIT(&dev->tx, txs, PAGE_SIZE);
    FRONT_RING_INIT(&dev->rx, rxs, PAGE_SIZE);

    dev->tx_ring_ref = gnttab_grant_access(dev->dom,virt_to_mfn(txs),0);
    dev->rx_ring_ref = gnttab_grant_access(dev->dom,virt_to_mfn(rxs),0);

    init_rx_buffers(dev);

    dev->netif_rx = thenetif_rx;

    dev->events = NULL;

again:
    err = xenbus_transaction_start(&xbt);
    if (err) {
        printk("starting transaction\n");
    }

    err = xenbus_printf(xbt, nodename, "tx-ring-ref","%u",
                dev->tx_ring_ref);
    if (err) {
        message = "writing tx ring-ref";
        goto abort_transaction;
    }
    err = xenbus_printf(xbt, nodename, "rx-ring-ref","%u",
                dev->rx_ring_ref);
    if (err) {
        message = "writing rx ring-ref";
        goto abort_transaction;
    }
    err = xenbus_printf(xbt, nodename,
                "event-channel", "%u", dev->evtchn);
    if (err) {
        message = "writing event-channel";
        goto abort_transaction;
    }

    err = xenbus_printf(xbt, nodename, "request-rx-copy", "%u", 1);

    if (err) {
        message = "writing request-rx-copy";
        goto abort_transaction;
    }

    snprintf(path, sizeof(path), "%s/state", nodename);
    err = xenbus_switch_state(xbt, path, XenbusStateConnected);
    if (err) {
        message = "switching state";
        goto abort_transaction;
    }

    err = xenbus_transaction_end(xbt, 0, &retry);
    if (retry) {
            goto again;
        printk("completing transaction\n");
    }

    goto done;

abort_transaction:
    xenbus_transaction_end(xbt, 1, &retry);
    goto error;

done:

    snprintf(path, sizeof(path), "%s/backend", nodename);
    msg = xenbus_read(XBT_NIL, path, &dev->backend);
    snprintf(path, sizeof(path), "%s/mac", nodename);
    msg = xenbus_read(XBT_NIL, path, &dev->mac);

    if ((dev->backend == NULL) || (dev->mac == NULL)) {
        printk("%s: backend/mac failed\n", __func__);
        goto error;
    }

    printk("backend at %s\n",dev->backend);
    printk("mac is %s\n",dev->mac);

    {
        XenbusState state;
        char path[strlen(dev->backend) + 1 + 5 + 1];
        snprintf(path, sizeof(path), "%s/state", dev->backend);

        xenbus_watch_path_token(XBT_NIL, path, path, &dev->events);

        err = NULL;
        state = xenbus_read_integer(path);
        while (err == NULL && state < XenbusStateConnected)
            err = xenbus_wait_for_state_change(path, &state, &dev->events);
        if (state != XenbusStateConnected) {
            printk("backend not avalable, state=%d\n", state);
            xenbus_unwatch_path(XBT_NIL, path);
            goto error;
        }

        if (ip) {
            snprintf(path, sizeof(path), "%s/ip", dev->backend);
            xenbus_read(XBT_NIL, path, ip);
        }
    }

    printk("**************************\n");

    unmask_evtchn(dev->evtchn);

        /* Special conversion specifier 'hh' needed for __ia64__. Without
           this mini-os panics with 'Unaligned reference'. */
    if (rawmac)
	sscanf(dev->mac,"%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
            &rawmac[0],
            &rawmac[1],
            &rawmac[2],
            &rawmac[3],
            &rawmac[4],
            &rawmac[5]);

    return dev;
error:
    free_netfront(dev);
    return NULL;
}
Exemple #10
0
static void netfront_release_rxbuffer(struct net_rxbuffer *buf, struct netfront_dev *dev)
{
	add_id_to_freelist(buf->id, dev->rx_freelist);
	dev->rx_avail++;
}
Exemple #11
0
static struct netfront_dev *_init_netfront(struct netfront_dev *dev,
					   unsigned char rawmac[6],
					   char **ip)
{
	xenbus_transaction_t xbt;
	char* err = NULL;
	const char* message=NULL;
	struct netif_tx_sring *txs;
	struct netif_rx_sring *rxs;
	int feature_split_evtchn;
	int retry=0;
	int i;
	char* msg = NULL;
	char path[256];

	snprintf(path, sizeof(path), "%s/backend-id", dev->nodename);
	dev->dom = xenbus_read_integer(path);

	snprintf(path, sizeof(path), "%s/backend", dev->nodename);
	msg = xenbus_read(XBT_NIL, path, &dev->backend);
	snprintf(path, sizeof(path), "%s/mac", dev->nodename);
	msg = xenbus_read(XBT_NIL, path, &dev->mac);
	if ((dev->backend == NULL) || (dev->mac == NULL)) {
		printk("%s: backend/mac failed\n", __func__);
		goto error;
	}

#ifdef CONFIG_NETMAP
	snprintf(path, sizeof(path), "%s/feature-netmap", dev->backend);
	dev->netmap = xenbus_read_integer(path) > 0 ? 1 : 0;

	if (dev->netmap) {
			dev->na = init_netfront_netmap(dev, dev->netif_rx);
			goto skip;
	}
#endif
	/* Check feature-split-event-channels */
	snprintf(path, sizeof(path), "%s/feature-split-event-channels",
		 dev->backend);
	feature_split_evtchn = xenbus_read_integer(path) > 0 ? 1 : 0;
#ifdef HAVE_LIBC
	/* Force the use of a single event channel */
	if (dev->netif_rx == NETIF_SELECT_RX)
		feature_split_evtchn = 0;
#endif

	printk("************************ NETFRONT for %s **********\n\n\n",
	       dev->nodename);

	init_SEMAPHORE(&dev->tx_sem, NET_TX_RING_SIZE);
	for(i=0;i<NET_TX_RING_SIZE;i++)
	{
		add_id_to_freelist(i,dev->tx_freelist);
#if defined CONFIG_NETFRONT_PERSISTENT_GRANTS || !defined CONFIG_NETFRONT_LWIP_ONLY
		dev->tx_buffers[i].page = (void*)alloc_page();
		BUG_ON(dev->tx_buffers[i].page == NULL);
#ifdef CONFIG_NETFRONT_PERSISTENT_GRANTS
		dev->tx_buffers[i].gref = gnttab_grant_access(dev->dom,
							      virt_to_mfn(dev->tx_buffers[i].page), 0);
		BUG_ON(dev->tx_buffers[i].gref == GRANT_INVALID_REF);
		dprintk("tx[%d]: page = %p, gref=0x%x\n", i, dev->tx_buffers[i].page, dev->tx_buffers[i].gref);
#endif
#endif
	}
#if defined CONFIG_NETFRONT_PERSISTENT_GRANTS || !defined CONFIG_NETFRONT_LWIP_ONLY
	printk("net TX ring size %d, %lu KB\n", NET_TX_RING_SIZE, (unsigned long)(NET_TX_RING_SIZE * PAGE_SIZE)/1024);
#else
	printk("net TX ring size %d\n", NET_TX_RING_SIZE);
#endif

#ifdef CONFIG_NETFRONT_PERSISTENT_GRANTS
	for(i=0;i<NET_RX_RING_SIZE;i++)
	{
	/* TODO: that's a lot of memory */
		dev->rx_buffers[i].page = (void*)alloc_page();
		BUG_ON(dev->rx_buffers[i].page == NULL);
		dprintk("rx[%d]: page = %p\n", i, dev->rx_buffers[i].page);
	}
	printk("net RX ring size %d, %lu KB\n", NET_RX_RING_SIZE, (unsigned long)(NET_RX_RING_SIZE * PAGE_SIZE)/1024);
#else
	for(i=0;i<NET_RX_RING_SIZE;i++)
		dev->rx_buffers[i] = NULL;
	for(i=0;i<NET_RX_BUFFERS;i++)
	{
		/* allocate rx buffer pool */
		dev->rx_buffer_pool[i].page = (void*)alloc_page();
		BUG_ON(dev->rx_buffer_pool[i].page == NULL);
		dprintk("rx[%d]: page = %p\n", i, dev->rx_buffer_pool[i].page);
		add_id_to_freelist(i,dev->rx_freelist);
	}
	dev->rx_avail = NET_RX_BUFFERS;
	printk("net RX ring size %d, %lu KB buffer space\n", NET_RX_RING_SIZE, (unsigned long)(NET_RX_BUFFERS * PAGE_SIZE)/1024);
#endif

	if (feature_split_evtchn) {
		evtchn_alloc_unbound(dev->dom, netfront_tx_handler, dev,
				     &dev->tx_evtchn);
		evtchn_alloc_unbound(dev->dom, netfront_rx_handler, dev,
				     &dev->rx_evtchn);
		printk("split event channels enabled\n");
	} else {
#ifdef HAVE_LIBC
		if (dev->netif_rx == NETIF_SELECT_RX)
			evtchn_alloc_unbound(dev->dom, netfront_select_handler,
					     dev, &dev->tx_evtchn);
		else
#endif
			evtchn_alloc_unbound(dev->dom, netfront_handler,
					     dev, &dev->tx_evtchn);
		dev->rx_evtchn = dev->tx_evtchn;
	}

#ifdef CONFIG_NETFRONT_PERSISTENT_GRANTS
	printk("persistent grants enabled\n");
#endif

	txs = (struct netif_tx_sring *) alloc_page();
	rxs = (struct netif_rx_sring *) alloc_page();
	memset(txs,0,PAGE_SIZE);
	memset(rxs,0,PAGE_SIZE);


	SHARED_RING_INIT(txs);
	SHARED_RING_INIT(rxs);
	FRONT_RING_INIT(&dev->tx, txs, PAGE_SIZE);
	FRONT_RING_INIT(&dev->rx, rxs, PAGE_SIZE);

	dev->tx_ring_ref = gnttab_grant_access(dev->dom,virt_to_mfn(txs),0);
	BUG_ON(dev->tx_ring_ref == GRANT_INVALID_REF);
	dev->rx_ring_ref = gnttab_grant_access(dev->dom,virt_to_mfn(rxs),0);
	BUG_ON(dev->rx_ring_ref == GRANT_INVALID_REF);

	init_rx_buffers(dev);

	dev->events = NULL;

again:
	err = xenbus_transaction_start(&xbt);
	if (err) {
		printk("starting transaction\n");
		free(err);
	}

	err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref","%u",
				dev->tx_ring_ref);
	if (err) {
		message = "writing tx ring-ref";
		goto abort_transaction;
	}
	err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref","%u",
				dev->rx_ring_ref);
	if (err) {
		message = "writing rx ring-ref";
		goto abort_transaction;
	}

	if (feature_split_evtchn) {
		err = xenbus_printf(xbt, dev->nodename,
					"event-channel-tx", "%u", dev->tx_evtchn);
		if (err) {
			message = "writing event-channel-tx";
			goto abort_transaction;
		}
		err = xenbus_printf(xbt, dev->nodename,
					"event-channel-rx", "%u", dev->rx_evtchn);
		if (err) {
			message = "writing event-channel-rx";
			goto abort_transaction;
		}
	} else {
		err = xenbus_printf(xbt, dev->nodename,
					"event-channel", "%u", dev->tx_evtchn);
		if (err) {
			message = "writing event-channel";
			goto abort_transaction;
		}
	}

	err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%u", 1);

	if (err) {
		message = "writing feature-rx-notify";
		goto abort_transaction;
	}

#ifdef CONFIG_NETFRONT_PERSISTENT_GRANTS
	err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u", 1);

	if (err) {
		message = "writing feature-persistent";
		goto abort_transaction;
	}
#endif

	err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", 1);

	if (err) {
		message = "writing request-rx-copy";
		goto abort_transaction;
	}

#if defined(CONFIG_NETFRONT_GSO) && defined(HAVE_LWIP)
	err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%u", 1);

	if (err) {
		message = "writing feature-sg";
		goto abort_transaction;
	}

	err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%u", 1);

	if (err) {
		message = "writing feature-gso-tcpv4";
		goto abort_transaction;
	}

	err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv6", "%u", 1);

	if (err) {
		message = "writing feature-gso-tcpv6";
		goto abort_transaction;
	}
#endif

	snprintf(path, sizeof(path), "%s/state", dev->nodename);
	err = xenbus_switch_state(xbt, path, XenbusStateConnected);
	if (err) {
		message = "switching state";
		goto abort_transaction;
	}

	err = xenbus_transaction_end(xbt, 0, &retry);
	free(err);
	if (retry) {
		goto again;
		printk("completing transaction\n");
	}

	goto done;

abort_transaction:
	free(err);
	err = xenbus_transaction_end(xbt, 1, &retry);
	printk("Abort transaction %s\n", message);
	goto error;

done:

	snprintf(path, sizeof(path), "%s/mac", dev->nodename);
	msg = xenbus_read(XBT_NIL, path, &dev->mac);

	if (dev->mac == NULL) {
		printk("%s: backend/mac failed\n", __func__);
		goto error;
	}

	printk("backend at %s\n",dev->backend);
	printk("mac is %s\n",dev->mac);

	{
		XenbusState state;
		char path[strlen(dev->backend) + strlen("/state") + 1];
		snprintf(path, sizeof(path), "%s/state", dev->backend);

		xenbus_watch_path_token(XBT_NIL, path, path, &dev->events);

		err = NULL;
		state = xenbus_read_integer(path);
		while (err == NULL && state < XenbusStateConnected)
			err = xenbus_wait_for_state_change(path, &state, &dev->events);
		if (state != XenbusStateConnected) {
			printk("backend not avalable, state=%d\n", state);
			xenbus_unwatch_path_token(XBT_NIL, path, path);
			goto error;
		}

		if (ip) {
			snprintf(path, sizeof(path), "%s/ip", dev->backend);
			xenbus_read(XBT_NIL, path, ip);
		}
	}

	printk("**************************\n");

	unmask_evtchn(dev->tx_evtchn);
	if (feature_split_evtchn)
		unmask_evtchn(dev->rx_evtchn);

#ifdef CONFIG_NETMAP
skip:
	if (dev->netmap)
		connect_netfront(dev);
#endif

	/* Special conversion specifier 'hh' needed for __ia64__. Without
	   this mini-os panics with 'Unaligned reference'. */
	if (rawmac)
		sscanf(dev->mac,"%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
				&rawmac[0],
				&rawmac[1],
				&rawmac[2],
				&rawmac[3],
				&rawmac[4],
				&rawmac[5]);

#ifdef CONFIG_SELECT_POLL
	dev->fd = alloc_fd(FTYPE_TAP);
	files[dev->fd].read = 0;
#endif
#ifdef CONFIG_NETFRONT_STATS
	netfront_reset_txcounters(dev);
#endif
	return dev;
error:
	free(msg);
	free(err);
	free_netfront(dev);
	return NULL;
}