Exemple #1
0
static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page,
			 unsigned int evtchn)
{
	int err;

	/* Already connected through? */
	if (blkif->irq)
		return 0;

	blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE);
	if (!blkif->blk_ring_area)
		return -ENOMEM;

	err = map_frontend_page(blkif, shared_page);
	if (err) {
		free_vm_area(blkif->blk_ring_area);
		return err;
	}

	switch (blkif->blk_protocol) {
	case BLKIF_PROTOCOL_NATIVE:
	{
		struct blkif_sring *sring;
		sring = (struct blkif_sring *)blkif->blk_ring_area->addr;
		BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
		break;
	}
	case BLKIF_PROTOCOL_X86_32:
	{
		struct blkif_x86_32_sring *sring_x86_32;
		sring_x86_32 = (struct blkif_x86_32_sring *)blkif->blk_ring_area->addr;
		BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
		break;
	}
	case BLKIF_PROTOCOL_X86_64:
	{
		struct blkif_x86_64_sring *sring_x86_64;
		sring_x86_64 = (struct blkif_x86_64_sring *)blkif->blk_ring_area->addr;
		BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
		break;
	}
	default:
		BUG();
	}

	err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn,
						    xen_blkif_be_int, 0,
						    "blkif-backend", blkif);
	if (err < 0) {
		unmap_frontend_page(blkif);
		free_vm_area(blkif->blk_ring_area);
		blkif->blk_rings.common.sring = NULL;
		return err;
	}
	blkif->irq = err;

	return 0;
}
Exemple #2
0
static int blk_connect(struct XenDevice *xendev)
{
    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);

    if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1)
	return -1;
    if (xenstore_read_fe_int(&blkdev->xendev, "event-channel",
                             &blkdev->xendev.remote_port) == -1)
	return -1;

    blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
    if (blkdev->xendev.protocol) {
        if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0)
            blkdev->protocol = BLKIF_PROTOCOL_X86_32;
        if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0)
            blkdev->protocol = BLKIF_PROTOCOL_X86_64;
    }

    blkdev->sring = xc_gnttab_map_grant_ref(blkdev->xendev.gnttabdev,
					    blkdev->xendev.dom,
					    blkdev->ring_ref,
					    PROT_READ | PROT_WRITE);
    if (!blkdev->sring)
	return -1;
    blkdev->cnt_map++;

    switch (blkdev->protocol) {
    case BLKIF_PROTOCOL_NATIVE:
    {
	blkif_sring_t *sring_native = blkdev->sring;
	BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE);
	break;
    }
    case BLKIF_PROTOCOL_X86_32:
    {
	blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring;

        BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE);
	break;
    }
    case BLKIF_PROTOCOL_X86_64:
    {
	blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring;

        BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE);
	break;
    }
    }

    xen_be_bind_evtchn(&blkdev->xendev);

    xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, "
		  "remote port %d, local port %d\n",
		  blkdev->xendev.protocol, blkdev->ring_ref,
		  blkdev->xendev.remote_port, blkdev->xendev.local_port);
    return 0;
}
Exemple #3
0
static int net_connect(struct XenDevice *xendev)
{
    struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev);
    int rx_copy;

    if (xenstore_read_fe_int(&netdev->xendev, "tx-ring-ref",
                             &netdev->tx_ring_ref) == -1) {
        return -1;
    }
    if (xenstore_read_fe_int(&netdev->xendev, "rx-ring-ref",
                             &netdev->rx_ring_ref) == -1) {
        return 1;
    }
    if (xenstore_read_fe_int(&netdev->xendev, "event-channel",
                             &netdev->xendev.remote_port) == -1) {
        return -1;
    }

    if (xenstore_read_fe_int(&netdev->xendev, "request-rx-copy", &rx_copy) == -1) {
        rx_copy = 0;
    }
    if (rx_copy == 0) {
        xen_be_printf(&netdev->xendev, 0, "frontend doesn't support rx-copy.\n");
        return -1;
    }

    netdev->txs = xc_gnttab_map_grant_ref(netdev->xendev.gnttabdev,
                                          netdev->xendev.dom,
                                          netdev->tx_ring_ref,
                                          PROT_READ | PROT_WRITE);
    if (!netdev->txs) {
        return -1;
    }
    netdev->rxs = xc_gnttab_map_grant_ref(netdev->xendev.gnttabdev,
                                          netdev->xendev.dom,
                                          netdev->rx_ring_ref,
                                          PROT_READ | PROT_WRITE);
    if (!netdev->rxs) {
        xc_gnttab_munmap(netdev->xendev.gnttabdev, netdev->txs, 1);
        netdev->txs = NULL;
        return -1;
    }
    BACK_RING_INIT(&netdev->tx_ring, netdev->txs, XC_PAGE_SIZE);
    BACK_RING_INIT(&netdev->rx_ring, netdev->rxs, XC_PAGE_SIZE);

    xen_be_bind_evtchn(&netdev->xendev);

    xen_be_printf(&netdev->xendev, 1, "ok: tx-ring-ref %d, rx-ring-ref %d, "
                  "remote port %d, local port %d\n",
                  netdev->tx_ring_ref, netdev->rx_ring_ref,
                  netdev->xendev.remote_port, netdev->xendev.local_port);

    net_tx_packets(netdev);
    return 0;
}
static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page,
			 unsigned int evtchn)
{
	int err;

	/* Already connected through? */
	if (blkif->irq)
		return 0;

	err = xenbus_map_ring_valloc(blkif->be->dev, shared_page, &blkif->blk_ring);
	if (err < 0)
		return err;

	switch (blkif->blk_protocol) {
	case BLKIF_PROTOCOL_NATIVE:
	{
		struct blkif_sring *sring;
		sring = (struct blkif_sring *)blkif->blk_ring;
		BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
		break;
	}
	case BLKIF_PROTOCOL_X86_32:
	{
		struct blkif_x86_32_sring *sring_x86_32;
		sring_x86_32 = (struct blkif_x86_32_sring *)blkif->blk_ring;
		BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
		break;
	}
	case BLKIF_PROTOCOL_X86_64:
	{
		struct blkif_x86_64_sring *sring_x86_64;
		sring_x86_64 = (struct blkif_x86_64_sring *)blkif->blk_ring;
		BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
		break;
	}
	default:
		BUG();
	}

	err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn,
						    xen_blkif_be_int, 0,
						    "blkif-backend", blkif);
	if (err < 0) {
		xenbus_unmap_ring_vfree(blkif->be->dev, blkif->blk_ring);
		blkif->blk_rings.common.sring = NULL;
		return err;
	}
	blkif->irq = err;

	return 0;
}
Exemple #5
0
/**
* map frontend ring page
* bind event channel
* init 
**/
static int chrif_map(struct xen_chrif *chrif, unsigned long ring_ref, unsigned int evtchn)
{
	int err;
    chrif_sring_t *sring;

	chrif->comms_area = alloc_vm_area(PAGE_SIZE, NULL);
	if(chrif->comms_area == NULL){
		free_vm_area(chrif->comms_area);
		printk("\nxen: dom0: could not allocate shared_page");
		return -ENOMEM;
	}
	
	err = map_frontend_pages(chrif, ring_ref);
	if(err){
		free_vm_area(chrif->comms_area);
        printk("\nxen: dom0: map frontend page fail");
		return err;
	}

    sring = (chrif_sring_t *)chrif->comms_area->addr;
    BACK_RING_INIT(&chrif->chr_ring, sring, PAGE_SIZE);
	
    err = bind_interdomain_evtchn_to_irqhandler(chrif->domid, evtchn, chrif_int, 0, "domtest2", chrif);
    if (err < 0) {
        printk(KERN_DEBUG "\nxen: dom0: chrif_int failed binding to evtchn");
		unmap_frontend_pages(chrif); 
		return -EFAULT;
    }

	chrif->irq = err;
    printk(KERN_DEBUG "\nxen: dom0:bind event channel fineshed: irq = %d\n", chrif->irq);
	
    printk("\nxen: dom0: chrif map finished, otherend_id");
    return 0;
}
Exemple #6
0
static int ctl_close(struct tdlog_state* s)
{
  while (s->connected) {
    s->connected--;
    tapdisk_server_unregister_event(s->connections[s->connected].id);
    close(s->connections[s->connected].fd);
    s->connections[s->connected].fd = -1;
    s->connections[s->connected].id = 0;
  }

  if (s->ctl.fd >= 0) {
    tapdisk_server_unregister_event(s->ctl.id);
    close(s->ctl.fd);
    s->ctl.fd = -1;
    s->ctl.id = 0;
  }

  if (s->ctlpath) {
    unlink(s->ctlpath);
    free(s->ctlpath);
    s->ctlpath = NULL;
  }

  /* XXX this must be fixed once requests are actually in flight */
  /* could just drain the existing ring here first */
  if (s->sring) {
    SHARED_RING_INIT(s->sring);
    BACK_RING_INIT(&s->bring, s->sring, SRINGSIZE);
  }

  return 0;
}
Exemple #7
0
int scsiback_init_sring(struct vscsibk_info *info,
		unsigned long ring_ref, unsigned int evtchn)
{
	struct vscsiif_sring *sring;
	int err;

	if (info->irq) {
		printk(KERN_ERR "scsiback: Already connected through?\n");
		return -1;
	}

	info->ring_area = alloc_vm_area(PAGE_SIZE);
	if (!info)
		return -ENOMEM;

	err = map_frontend_page(info, ring_ref);
	if (err)
		goto free_vm;

	sring = (struct vscsiif_sring *) info->ring_area->addr;
	BACK_RING_INIT(&info->ring, sring, PAGE_SIZE);

	err = bind_interdomain_evtchn_to_irqhandler(
			info->domid, evtchn,
			scsiback_intr, 0, "vscsiif-backend", info);

	if (err < 0)
		goto unmap_page;
		
	info->irq = err;

	return 0;

unmap_page:
	unmap_frontend_page(info);
free_vm:
	free_vm_area(info->ring_area);

	return err;
}
//map the ring_page,init backend ring and bind event channle
static int connection_establishment(void )
{
    int err;
    chrif_sring_t *sring;
    struct vm_struct *v_start;
    //map the granted page
    v_start = map_sharedpage(info.ring_gref);
    //init back ring and bing event channel
    unmap_ops.host_addr = (unsigned long)(v_start->addr);
    unmap_ops.handle = ops.handle;
    sring = (chrif_sring_t *)v_start->addr;
    BACK_RING_INIT(&info.ring, sring, PAGE_SIZE);
    err = bind_interdomain_evtchn_to_irqhandler(info.remoteDomain, info.evtchn, chrif_int, 0, "dom0", &info);
    if (err < 0) {
        printk(KERN_DEBUG "\nxen: dom0: gnt_init failed binding to evtchn");
        err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unmap_ops, 1);
        return -EFAULT;
    }
    info.irq = err;
    printk(KERN_DEBUG "\nxen: dom0: irq = %d\n", info.irq);
    return 0;
}
int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn)
{
	blkif_sring_t *sring;
	int err;
	struct evtchn_bind_interdomain bind_interdomain;

	if ((blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL)
		return -ENOMEM;

	err = map_frontend_page(blkif, shared_page);
	if (err) {
		free_vm_area(blkif->blk_ring_area);
		return err;
	}

	bind_interdomain.remote_dom  = blkif->domid;
	bind_interdomain.remote_port = evtchn;

	err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
					  &bind_interdomain);
	if (err) {
		unmap_frontend_page(blkif);
		free_vm_area(blkif->blk_ring_area);
		return err;
	}

	blkif->evtchn = bind_interdomain.local_port;

	sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
	BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);

	blkif->irq = bind_evtchn_to_irqhandler(
		blkif->evtchn, blkif_be_int, 0, "blkif-backend", blkif);

	blkif->status = CONNECTED;

	return 0;
}
Exemple #10
0
static int map_new_dev(struct td_state *s, int minor)
{
	int tap_fd;
	tapdev_info_t *info = s->ring_info;
	char *devname;
	fd_list_entry_t *ptr;
	int page_size;

	if (asprintf(&devname,"%s/%s%d", BLKTAP_DEV_DIR, BLKTAP_DEV_NAME, minor) == -1)
		return -1;
	tap_fd = open(devname, O_RDWR);
	if (tap_fd == -1) 
	{
		DPRINTF("open failed on dev %s!\n",devname);
		goto fail;
	} 
	info->fd = tap_fd;

	/*Map the shared memory*/
	page_size = getpagesize();
	info->mem = mmap(0, page_size * BLKTAP_MMAP_REGION_SIZE, 
			  PROT_READ | PROT_WRITE, MAP_SHARED, info->fd, 0);
	if ((long int)info->mem == -1) 
	{
		DPRINTF("mmap failed on dev %s!\n",devname);
		goto fail;
	}

	/* assign the rings to the mapped memory */ 
	info->sring = (blkif_sring_t *)((unsigned long)info->mem);
	BACK_RING_INIT(&info->fe_ring, info->sring, page_size);
	
	info->vstart = 
	        (unsigned long)info->mem + (BLKTAP_RING_PAGES * page_size);

	ioctl(info->fd, BLKTAP_IOCTL_SENDPID, process );
	ioctl(info->fd, BLKTAP_IOCTL_SETMODE, BLKTAP_MODE_INTERPOSE );
	free(devname);

	/*Update the fd entry*/
	ptr = fd_start;
	while (ptr != NULL) {
		if (s == ptr->s) {
			ptr->tap_fd = tap_fd;

			/* Setup fd_handler for qemu main loop */
			DPRINTF("set tap_fd = %d\n", tap_fd);
			qemu_set_fd_handler2(tap_fd, NULL, &handle_blktap_iomsg, NULL, s);

			break;
		}
		ptr = ptr->next;
	}	


	DPRINTF("map_new_dev = %d\n", minor);
	return minor;

 fail:
	free(devname);
	return -1;
}
Exemple #11
0
static int device_probe(struct xenbus_device* dev, const struct xenbus_device_id* id)
{
        struct backendinfo* binfo;
//      int i;
//      char *p;
        struct vm_struct *v_start;
        int err;
        as_sring_t *sring;
	char *gref,*port;
        binfo = kmalloc(sizeof(*binfo),GFP_KERNEL);
        if (!binfo) {
               xenbus_dev_error(dev, -ENOMEM, "allocating info structure");
               return -ENOMEM;
         }
        memset(binfo,0,sizeof(*binfo));
        binfo->dev = dev;
        printk(KERN_ALERT"\nProbe fired!\n");
        
	gref = xenbus_read(XBT_NIL, binfo->dev->otherend, "gref", NULL);
        port = xenbus_read(XBT_NIL, binfo->dev->otherend, "port", NULL);
	info.gref=mycti(gref);
	info.evtchn=mycti(port);
        printk("Xenstore read port and gref success: %d, %d \n", info.evtchn, info.gref);
        info.remoteDomain = binfo->dev->otherend_id;
        printk(KERN_DEBUG "\nxen: dom0: gnt_init with gref = %d\n", info.gref);
        v_start = alloc_vm_area(PAGE_SIZE,NULL);
        if(v_start == 0){
           free_vm_area(v_start);
           printk("\nxen: dom0:could not allocate page");
           return -EFAULT;
        }
        gnttab_set_map_op(&ops,(unsigned long)v_start->addr,GNTMAP_host_map,info.gref,info.remoteDomain);
       if(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,&ops,1)){
       	   printk(KERN_DEBUG"\nxen:dom0:HYPERVISOR map grant ref failed");
           return -EFAULT;
       }
       if (ops.status) {
           printk(KERN_DEBUG "\nxen: dom0: HYPERVISOR map grant ref failed status = %d", ops.status);
           return -EFAULT;
       }
       printk(KERN_DEBUG "\nxen:dom0:shared_page=%x, handle = %x, status = %x", (unsigned int)v_start->addr, ops.handle, ops.status);

       unmap_ops.host_addr = (unsigned long)(v_start->addr);
       unmap_ops.handle = ops.handle;
       //////////////
       /*
       p = (char *)(v_start->addr) + PAGE_SIZE - 1;
       printk(KERN_DEBUG "\nbytes in page");
       for (i = 0;i <= 10; i++, p--) {
       printk(KERN_DEBUG "%c", *p);
       }
      */
       ////////////////
       sring = (as_sring_t *)v_start->addr;
       BACK_RING_INIT(&info.ring, sring, PAGE_SIZE);
       err = bind_interdomain_evtchn_to_irqhandler(info.remoteDomain, info.evtchn, as_int, 0, "dom0", &info);
       if (err < 0) {
          printk(KERN_DEBUG "\nxen:dom0: gnt_init failed binding to evtchn");
          err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unmap_ops, 1);
          return -EFAULT;
       }
       info.irq = err;
       printk(KERN_DEBUG "\nxen:dom0:end gnt_int: int = %d", info.irq);
       return 0;
}
Exemple #12
0
int netif_map(netif_t *netif, unsigned long tx_ring_ref,
	      unsigned long rx_ring_ref, unsigned int evtchn)
{
	int err = -ENOMEM;
	netif_tx_sring_t *txs;
	netif_rx_sring_t *rxs;
	struct evtchn_bind_interdomain bind_interdomain;

	/* Already connected through? */
	if (netif->irq)
		return 0;

	netif->tx_comms_area = alloc_vm_area(PAGE_SIZE);
	if (netif->tx_comms_area == NULL)
		return -ENOMEM;
	netif->rx_comms_area = alloc_vm_area(PAGE_SIZE);
	if (netif->rx_comms_area == NULL)
		goto err_rx;

	err = map_frontend_pages(netif, tx_ring_ref, rx_ring_ref);
	if (err)
		goto err_map;

	bind_interdomain.remote_dom = netif->domid;
	bind_interdomain.remote_port = evtchn;

	err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
					  &bind_interdomain);
	if (err)
		goto err_hypervisor;

	netif->evtchn = bind_interdomain.local_port;

	netif->irq = bind_evtchn_to_irqhandler(
		netif->evtchn, netif_be_int, 0, netif->dev->name, netif);
	disable_irq(netif->irq);

	txs = (netif_tx_sring_t *)netif->tx_comms_area->addr;
	BACK_RING_INIT(&netif->tx, txs, PAGE_SIZE);

	rxs = (netif_rx_sring_t *)
		((char *)netif->rx_comms_area->addr);
	BACK_RING_INIT(&netif->rx, rxs, PAGE_SIZE);

	netif->rx_req_cons_peek = 0;

	netif_get(netif);
	wmb(); /* Other CPUs see new state before interface is started. */

	rtnl_lock();
	netif->status = CONNECTED;
	wmb();
	if (netif_running(netif->dev))
		__netif_up(netif);
	rtnl_unlock();

	return 0;
err_hypervisor:
	unmap_frontend_pages(netif);
err_map:
	free_vm_area(netif->rx_comms_area);
err_rx:
	free_vm_area(netif->tx_comms_area);
	return err;
}
Exemple #13
0
void xen_block_dataplane_start(XenBlockDataPlane *dataplane,
                               const unsigned int ring_ref[],
                               unsigned int nr_ring_ref,
                               unsigned int event_channel,
                               unsigned int protocol,
                               Error **errp)
{
    XenDevice *xendev = dataplane->xendev;
    Error *local_err = NULL;
    unsigned int ring_size;
    unsigned int i;

    dataplane->nr_ring_ref = nr_ring_ref;
    dataplane->ring_ref = g_new(unsigned int, nr_ring_ref);

    for (i = 0; i < nr_ring_ref; i++) {
        dataplane->ring_ref[i] = ring_ref[i];
    }

    dataplane->protocol = protocol;

    ring_size = XC_PAGE_SIZE * dataplane->nr_ring_ref;
    switch (dataplane->protocol) {
    case BLKIF_PROTOCOL_NATIVE:
    {
        dataplane->max_requests = __CONST_RING_SIZE(blkif, ring_size);
        break;
    }
    case BLKIF_PROTOCOL_X86_32:
    {
        dataplane->max_requests = __CONST_RING_SIZE(blkif_x86_32, ring_size);
        break;
    }
    case BLKIF_PROTOCOL_X86_64:
    {
        dataplane->max_requests = __CONST_RING_SIZE(blkif_x86_64, ring_size);
        break;
    }
    default:
        error_setg(errp, "unknown protocol %u", dataplane->protocol);
        return;
    }

    xen_device_set_max_grant_refs(xendev, dataplane->nr_ring_ref,
                                  &local_err);
    if (local_err) {
        error_propagate(errp, local_err);
        goto stop;
    }

    dataplane->sring = xen_device_map_grant_refs(xendev,
                                              dataplane->ring_ref,
                                              dataplane->nr_ring_ref,
                                              PROT_READ | PROT_WRITE,
                                              &local_err);
    if (local_err) {
        error_propagate(errp, local_err);
        goto stop;
    }

    switch (dataplane->protocol) {
    case BLKIF_PROTOCOL_NATIVE:
    {
        blkif_sring_t *sring_native = dataplane->sring;

        BACK_RING_INIT(&dataplane->rings.native, sring_native, ring_size);
        break;
    }
    case BLKIF_PROTOCOL_X86_32:
    {
        blkif_x86_32_sring_t *sring_x86_32 = dataplane->sring;

        BACK_RING_INIT(&dataplane->rings.x86_32_part, sring_x86_32,
                       ring_size);
        break;
    }
    case BLKIF_PROTOCOL_X86_64:
    {
        blkif_x86_64_sring_t *sring_x86_64 = dataplane->sring;

        BACK_RING_INIT(&dataplane->rings.x86_64_part, sring_x86_64,
                       ring_size);
        break;
    }
    }

    dataplane->event_channel =
        xen_device_bind_event_channel(xendev, event_channel,
                                      xen_block_dataplane_event, dataplane,
                                      &local_err);
    if (local_err) {
        error_propagate(errp, local_err);
        goto stop;
    }

    aio_context_acquire(dataplane->ctx);
    blk_set_aio_context(dataplane->blk, dataplane->ctx);
    aio_context_release(dataplane->ctx);
    return;

stop:
    xen_block_dataplane_stop(dataplane);
}