Ejemplo n.º 1
0
/*
 * map  one page for ring
 */
static int map_frontend_pages(struct xen_chrif *chrif, grant_ref_t ring_ref)
{
	struct gnttab_map_grant_ref op;
    gnttab_set_map_op(&op, (unsigned long)chrif->comms_area->addr, GNTMAP_host_map, ring_ref, chrif->domid);
   
	if(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)){
       	printk(KERN_DEBUG "\nxen: dom0: HYPERVISOR map grant ref failed");
        return -EFAULT;
    }

    if (op.status) {
		struct gnttab_unmap_grant_ref unop;
		gnttab_set_unmap_op(&unop, (unsigned long)chrif->comms_area->addr, GNTMAP_host_map, op.handle);
		HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unop, 1);
        printk(KERN_DEBUG "\nxen: dom0: op.status fail");
        return op.status;
    }

	/* record ring_ref and handle */
	chrif->shmem_ref = ring_ref;
	chrif->shmem_handle = op.handle;
    
	printk(KERN_DEBUG "\nxen: dom0: map page success, page=%x, handle = %x, status = %x", 
        (unsigned int)chrif->comms_area->addr, op.handle, op.status);
    
    printk("\nxen: dom0: map frontend pages finished,otherend_id");
	return 0;
}
Ejemplo n.º 2
0
static int map_frontend_page( struct vscsibk_info *info,
				unsigned long ring_ref)
{
	struct gnttab_map_grant_ref op;
	int err;

	gnttab_set_map_op(&op, (unsigned long)info->ring_area->addr,
				GNTMAP_host_map, ring_ref,
				info->domid);

    do {
	    err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
	    BUG_ON(err);
        msleep(10);
    } while(op.status == GNTST_eagain);

	if (op.status) {
		printk(KERN_ERR "scsiback: Grant table operation failure !\n");
		return op.status;
	}

	info->shmem_ref    = ring_ref;
	info->shmem_handle = op.handle;

	return (GNTST_okay);
}
Ejemplo n.º 3
0
static int map_frontend_pages(
	netif_t *netif, grant_ref_t tx_ring_ref, grant_ref_t rx_ring_ref)
{
	struct gnttab_map_grant_ref op;
	int ret;

	gnttab_set_map_op(&op, (unsigned long)netif->tx_comms_area->addr,
			  GNTMAP_host_map, tx_ring_ref, netif->domid);
    
	lock_vm_area(netif->tx_comms_area);
	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
	unlock_vm_area(netif->tx_comms_area);
	BUG_ON(ret);

	if (op.status) { 
		DPRINTK(" Gnttab failure mapping tx_ring_ref!\n");
		return op.status;
	}

	netif->tx_shmem_ref    = tx_ring_ref;
	netif->tx_shmem_handle = op.handle;

	gnttab_set_map_op(&op, (unsigned long)netif->rx_comms_area->addr,
			  GNTMAP_host_map, rx_ring_ref, netif->domid);

	lock_vm_area(netif->rx_comms_area);
	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
	unlock_vm_area(netif->rx_comms_area);
	BUG_ON(ret);

	if (op.status) {
		DPRINTK(" Gnttab failure mapping rx_ring_ref!\n");
		return op.status;
	}

	netif->rx_shmem_ref    = rx_ring_ref;
	netif->rx_shmem_handle = op.handle;

	return 0;
}
Ejemplo n.º 4
0
Archivo: netback.c Proyecto: 7799/linux
static inline void xenvif_tx_create_map_op(struct xenvif *vif,
					  u16 pending_idx,
					  struct xen_netif_tx_request *txp,
					  struct gnttab_map_grant_ref *mop)
{
	vif->pages_to_map[mop-vif->tx_map_ops] = vif->mmap_pages[pending_idx];
	gnttab_set_map_op(mop, idx_to_kaddr(vif, pending_idx),
			  GNTMAP_host_map | GNTMAP_readonly,
			  txp->gref, vif->domid);

	memcpy(&vif->pending_tx_info[pending_idx].req, txp,
	       sizeof(*txp));
}
Ejemplo n.º 5
0
/* Based on Rusty Russell's skeleton driver's map_page */
struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev,
					 const grant_ref_t refs[],
					 unsigned int nr)
{
	grant_handle_t *handles = kmalloc(nr * sizeof(*handles), GFP_KERNEL);
	struct vm_struct *area;
	unsigned int i;

	if (!handles)
		return ERR_PTR(-ENOMEM);

	area = alloc_vm_area(nr * PAGE_SIZE);
	if (!area) {
		kfree(handles);
		return ERR_PTR(-ENOMEM);
	}

	for (i = 0; i < nr; ++i) {
		struct gnttab_map_grant_ref op;

		gnttab_set_map_op(&op,
				  (unsigned long)area->addr + i * PAGE_SIZE,
				  GNTMAP_host_map, refs[i],
				  dev->otherend_id);
	
		gnttab_check_GNTST_eagain_do_while(GNTTABOP_map_grant_ref,
						   &op);

		if (op.status == GNTST_okay) {
			handles[i] = op.handle;
			continue;
		}

		unmap_ring_vfree(dev, area, i, handles);
		xenbus_dev_fatal(dev, op.status,
				 "mapping page %u (ref %#x, dom%d)",
				 i, refs[i], dev->otherend_id);
		BUG_ON(!IS_ERR(ERR_PTR(op.status)));
		return ERR_PTR(-EINVAL);
	}

	/* Stuff the handle array in an unused field. */
	area->phys_addr = (unsigned long)handles;

	return area;
}
Ejemplo n.º 6
0
static int map_frontend_page(struct xen_blkif *blkif, unsigned long shared_page)
{
	struct gnttab_map_grant_ref op;

	gnttab_set_map_op(&op, (unsigned long)blkif->blk_ring_area->addr,
			  GNTMAP_host_map, shared_page, blkif->domid);

	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
		BUG();

	if (op.status) {
		DPRINTK("Grant table operation failure !\n");
		return op.status;
	}

	blkif->shmem_ref = shared_page;
	blkif->shmem_handle = op.handle;

	return 0;
}
Ejemplo n.º 7
0
//map the shared page
static struct vm_struct*  map_sharedpage(grant_ref_t gref)
{
    struct vm_struct *vm_point;
    vm_point = alloc_vm_area(PAGE_SIZE, NULL);
    if(vm_point == 0) {
        free_vm_area(vm_point);
        printk("\nxen: dom0: could not allocate shared_page");
        return -EFAULT;
    }
    gnttab_set_map_op(&ops, (unsigned long)vm_point->addr, GNTMAP_host_map, gref, info.remoteDomain);
    if(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &ops, 1)) {
        printk(KERN_DEBUG "\nxen: dom0: HYPERVISOR map grant ref failed");
        return -EFAULT;
    }
    if (ops.status) {
        printk(KERN_DEBUG "\nxen: dom0: HYPERVISOR map grant ref failed status = %d", ops.status);
        return -EFAULT;
    }
    printk(KERN_DEBUG "\nxen: dom0: map shared page success, shared_page=%x, handle = %x, status = %x", (unsigned int)vm_point->addr, ops.handle, ops.status);
    return vm_point;
}
Ejemplo n.º 8
0
int init_module(void)
{
	//a pointer to a vm_struct<>
	//include .addr,.size .page .nr_pages .phys_addr
	struct vm_struct * v_start;
	int ret;

	printk("xen:init_module with gref = %d\n",gref);
	//this func reserves a range of kernel address space
	//and allocates pagetables to map that area, but no actual mapping here.
	v_start = alloc_vm_area(PAGE_SIZE , NULL);
	if(v_start==0){
		free_vm_area(v_start);
		printk("xen:could not allocate page\n");
		return -1;
	}
	//static inline void
	//gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
	//                  uint32_t flags, grant_ref_t ref, domid_t domid)
	gnttab_set_map_op(&ops, (unsigned long)v_start->addr, GNTMAP_host_map,
				 gref , remoteDomain);
	//return 0,if success the hypercall
	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &ops, 1);
	if(ret){ 
		printk("xen: HYPERSIOR map grant ref failed.\n");
		return -1;
	}
	//check the staus from the out param of the ops
	if(ops.status != 0){
		printk("xen: HYPERSIOR map grant ref failed with status %d.\n",ops.status);
		return -1;
	}
	//printk handle n the dmesg
	printk("xen:map grant success:\n\tshared_page=%x, handle=%d\n",(unsigned long)v_start->addr,ops.handle);
	//if mapped,init the unmap ops with the handle and host_addr
	unmap_ops.host_addr=(unsigned long)v_start->addr;
	unmap_ops.handle=ops.handle;
	
	return 0;	
}
Ejemplo n.º 9
0
/* Based on xenbus_backend_client.c:xenbus_map_ring() */
static int net_accel_map_grant(struct xenbus_device *dev, int gnt_ref,
                               grant_handle_t *handle, void *vaddr,
                               u64 *dev_bus_addr, unsigned flags)
{
    struct gnttab_map_grant_ref op;

    gnttab_set_map_op(&op, (unsigned long)vaddr, flags,
                      gnt_ref, dev->otherend_id);

    BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1));

    if (op.status != GNTST_okay) {
        xenbus_dev_error
        (dev, op.status,
         "failed mapping in shared page %d from domain %d\n",
         gnt_ref, dev->otherend_id);
    } else {
        *handle = op.handle;
        if (dev_bus_addr)
            *dev_bus_addr = op.dev_bus_addr;
    }

    return op.status;
}
Ejemplo n.º 10
0
static int omx_xen_accept_gref_list(omx_xenif_t * omx_xenif,
				    struct omx_xen_user_region_segment *seg,
				    uint32_t gref, void **vaddr, uint8_t part)
{
	int ret = 0;
	struct backend_info *be = omx_xenif->be;
	struct vm_struct *area;
	pte_t *pte;
	struct gnttab_map_grant_ref ops = {
		.flags = GNTMAP_host_map | GNTMAP_contains_pte,
		//.flags = GNTMAP_host_map,
		.ref = gref,
		.dom = be->remoteDomain,
	};

	dprintk_in();

	area = alloc_vm_area(PAGE_SIZE, &pte);
	if (!area) {
		ret = -ENOMEM;
		goto out;
	}

	seg->vm_gref[part] = area;

	ops.host_addr = arbitrary_virt_to_machine(pte).maddr;

	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &ops, 1)) {
		printk_err("HYPERVISOR map grant ref failed");
		ret = -ENOSYS;
		goto out;
	}
	dprintk_deb("addr=%#lx, mfn=%#lx, kaddr=%#lx\n",
		    (unsigned long)area->addr, ops.dev_bus_addr >> PAGE_SHIFT,
		    ops.host_addr);
	if (ops.status) {
		printk_err("HYPERVISOR map grant ref failed status = %d",
			   ops.status);

		ret = ops.status;
		goto out;
	}

	dprintk_deb("gref_offset = %#x\n", seg->gref_offset);
	*vaddr = (area->addr + seg->gref_offset);

	ret = ops.handle;
#if 0
	for (i = 0; i < (size + 2); i++) {
		dprintk_deb("gref_list[%d] = %u\n", i,
			    *(((uint32_t *) * vaddr) + i));
	}
#endif

	seg->all_handle[part] = ops.handle;
	dprintk_deb("vaddr = %p, area->addr=%p, handle[%d]=%d\n", vaddr,
		    area->addr, part, seg->all_handle[part]);

out:
	dprintk_out();
	return ret;
}

int omx_xen_register_user_segment(omx_xenif_t * omx_xenif,
				  struct omx_ring_msg_register_user_segment *req)
{

	struct backend_info *be = omx_xenif->be;
	void *vaddr = NULL;
	uint32_t **gref_list;
	struct page **page_list;
	struct omxback_dev *omxdev = be->omxdev;
	struct omx_endpoint *endpoint;
	struct omx_xen_user_region *region;
	struct omx_xen_user_region_segment *seg;
	int ret = 0;
	int i = 0, k = 0;
	uint8_t eid, nr_parts;
	uint16_t first_page_offset, gref_offset;
	uint32_t sid, id, nr_grefs, nr_pages, length,
	    gref[OMX_XEN_GRANT_PAGES_MAX];
	uint64_t domU_vaddr;
	int idx = 0, sidx = 0;
	struct gnttab_map_grant_ref *map;
	struct gnttab_unmap_grant_ref *unmap;

	dprintk_in();

	TIMER_START(&t_reg_seg);
	sid = req->sid;
	id = req->rid;
	eid = req->eid;
	domU_vaddr = req->aligned_vaddr;
	nr_grefs = req->nr_grefs;
	nr_pages = req->nr_pages;
	nr_parts = req->nr_parts;
	length = req->length;
	dprintk_deb("nr_parts = %#x\n", nr_parts);
	for (k = 0; k < nr_parts; k++) {
		gref[k] = req->gref[k];
		dprintk_deb("printing gref = %lu\n", gref[k]);
	}
	gref_offset = req->gref_offset;
	first_page_offset = req->first_page_offset;
	endpoint = omxdev->endpoints[eid];

	region = rcu_dereference_protected(endpoint->xen_regions[id], 1);
	if (unlikely(!region)) {
		printk_err(KERN_ERR "Cannot access non-existing region %d\n",
			   id);
		ret = -EINVAL;
		goto out;
	}
	dprintk_deb("Got region @%#lx id=%u\n", (unsigned long)region, id);

	seg = &region->segments[sid];
	if (unlikely(!seg)) {
		printk(KERN_ERR "Cannot access non-existing segment %d\n", sid);
		ret = -EINVAL;
		goto out;
	}
	dprintk_deb("Got segment @%#lx id=%u\n", (unsigned long)seg, sid);

	seg->gref_offset = gref_offset;
	dprintk_deb
	    ("Offset of actual list of grant references (in the frontend) = %#x\n",
	     gref_offset);

	for (k = 0; k < nr_parts; k++) {
		seg->all_gref[k] = gref[k];
		dprintk_deb("grant reference for list of grefs = %#x\n",
			    gref[k]);
	}
	seg->nr_parts = nr_parts;
	dprintk_deb("parts of gref list = %#x\n", nr_parts);

	TIMER_START(&t_alloc_pages);
	gref_list = kzalloc(sizeof(uint32_t *) * nr_parts, GFP_ATOMIC);
	if (!gref_list) {
		ret = -ENOMEM;
		printk_err("gref list is NULL, ENOMEM!!!\n");
		goto out;
	}

	map =
	    kzalloc(sizeof(struct gnttab_map_grant_ref) * nr_pages,
		    GFP_ATOMIC);
	if (!map) {
		ret = -ENOMEM;
		printk_err(" map is NULL, ENOMEM!!!\n");
		goto out;
	}
	unmap =
	    kzalloc(sizeof(struct gnttab_unmap_grant_ref) * nr_pages,
		    GFP_ATOMIC);
	if (!unmap) {
		ret = -ENOMEM;
		printk_err(" unmap is NULL, ENOMEM!!!\n");
		goto out;
	}

#ifdef OMX_XEN_COOKIES
	seg->cookie = omx_xen_page_get_cookie(omx_xenif, nr_pages);
	if (!seg->cookie) {
		printk_err("cannot get cookie\n");
		goto out;
	}
	page_list = seg->cookie->pages;
#else
	page_list = kzalloc(sizeof(struct page *) * nr_pages, GFP_ATOMIC);
	if (!page_list) {
		ret = -ENOMEM;
		printk_err(" page list is NULL, ENOMEM!!!\n");
		goto out;
	}

	ret = alloc_xenballooned_pages(nr_pages, page_list, false /* lowmem */);
	if (ret) {
		printk_err("cannot allocate xenballooned_pages\n");
		goto out;
	}
#endif
	TIMER_STOP(&t_alloc_pages);

	TIMER_START(&t_accept_gref_list);
	for (k = 0; k < nr_parts; k++) {
		ret =
		    omx_xen_accept_gref_list(omx_xenif, seg, gref[k], &vaddr,
					     k);
		if (ret < 0) {
			printk_err("Cannot accept gref list, = %d\n", ret);
			goto out;
		}

		gref_list[k] = (uint32_t *) vaddr;
		if (!gref_list) {
			printk_err("gref_list is NULL!!!, = %p\n", gref_list);
			ret = -ENOSYS;
			goto out;
		}
	}
	TIMER_STOP(&t_accept_gref_list);
	seg->gref_list = gref_list;

	seg->nr_pages = nr_pages;
	seg->first_page_offset = first_page_offset;

	i = 0;
	idx = 0;
	sidx = 0;
	seg->map = map;
	seg->unmap = unmap;
	while (i < nr_pages) {
		void *tmp_vaddr;
		unsigned long addr = (unsigned long)pfn_to_kaddr(page_to_pfn(page_list[i]));
		if (sidx % 256 == 0)
			dprintk_deb("gref_list[%d][%d] = %#x\n", idx, sidx,
				    gref_list[idx][sidx]);


		gnttab_set_map_op(&map[i], addr, GNTMAP_host_map,
				  gref_list[idx][sidx], be->remoteDomain);
		gnttab_set_unmap_op(&unmap[i], addr, GNTMAP_host_map, -1 /* handle */ );
		i++;
		if ((unlikely(i % nr_grefs == 0))) {
			idx++;
			sidx = 0;
		} else {
			sidx++;
		}
		//printk(KERN_INFO "idx=%d, i=%d, sidx=%d\n", idx, i, sidx);
	}
	TIMER_START(&t_accept_grants);
        ret = gnttab_map_refs(map, NULL, page_list, nr_pages);
        if (ret) {
		printk_err("Error mapping, ret= %d\n", ret);
                goto out;
	}
	TIMER_STOP(&t_accept_grants);

        for (i = 0; i < nr_pages; i++) {
                if (map[i].status) {
                        ret = -EINVAL;
			printk_err("idx %d, status =%d\n", i, map[i].status);
			goto out;
		}
                else {
                        //BUG_ON(map->map_ops[i].handle == -1);
                        unmap[i].handle = map[i].handle;
                        dprintk_deb("map handle=%d\n", map[i].handle);
                }
        }

	seg->pages = page_list;
	seg->nr_pages = nr_pages;
	seg->length = length;
	region->total_length += length;
	dprintk_deb("total_length = %#lx, nrpages=%lu, pages = %#lx\n",
		    region->total_length, seg->nr_pages,
		    (unsigned long)seg->pages);
	goto all_ok;
out:
	printk_err("error registering, try to debug MORE!!!!\n");

all_ok:
	TIMER_STOP(&t_reg_seg);
	dprintk_out();
	return ret;
}

int omx_xen_create_user_region(omx_xenif_t * omx_xenif, uint32_t id,
			       uint64_t vaddr, uint32_t nr_segments,
			       uint32_t nr_pages, uint32_t nr_grefs,
			       uint8_t eid)
{

	struct backend_info *be = omx_xenif->be;
	struct omxback_dev *omxdev = be->omxdev;
	struct omx_endpoint *endpoint = omxdev->endpoints[eid];
	struct omx_xen_user_region *region;
	int ret = 0;

	dprintk_in();
	TIMER_START(&t_create_reg);
	//udelay(1000);
	/* allocate the relevant region */
	region =
	    kzalloc(sizeof(struct omx_xen_user_region) +
		    nr_segments * sizeof(struct omx_xen_user_region_segment),
		    GFP_KERNEL);
	if (!region) {
		printk_err
		    ("No memory to allocate the region/segment buffers\n");
		ret = -ENOMEM;
		goto out;
	}

	/* init stuff needed :S */
	kref_init(&region->refcount);
	region->total_length = 0;
	region->nr_vmalloc_segments = 0;

	region->total_registered_length = 0;

	region->id = id;
	region->nr_segments = nr_segments;
	region->eid = eid;

	region->endpoint = endpoint;
	region->dirty = 0;

	if (unlikely(rcu_access_pointer(endpoint->xen_regions[id]) != NULL)) {
		printk(KERN_ERR "Cannot create busy region %d\n", id);
		ret = -EBUSY;
		goto out;
	}

	rcu_assign_pointer(endpoint->xen_regions[id], region);

out:
	TIMER_STOP(&t_create_reg);
	dprintk_out();
	return ret;
}

/* Various region/segment handler functions */

void
omx_xen_user_region_destroy_segments(struct omx_xen_user_region *region,
				     struct omx_endpoint *endpoint)
{
	int i;

	dprintk_in();
	if (!endpoint) {
		printk_err("endpoint is null!!\n");
		return;
	}
	for (i = 0; i < region->nr_segments; i++)
		omx_xen_deregister_user_segment(endpoint->be->omx_xenif,
						region->id, i,
						endpoint->endpoint_index);

	dprintk_out();
}
Ejemplo n.º 11
0
static int device_probe(struct xenbus_device* dev, const struct xenbus_device_id* id)
{
        struct backendinfo* binfo;
//      int i;
//      char *p;
        struct vm_struct *v_start;
        int err;
        as_sring_t *sring;
	char *gref,*port;
        binfo = kmalloc(sizeof(*binfo),GFP_KERNEL);
        if (!binfo) {
               xenbus_dev_error(dev, -ENOMEM, "allocating info structure");
               return -ENOMEM;
         }
        memset(binfo,0,sizeof(*binfo));
        binfo->dev = dev;
        printk(KERN_ALERT"\nProbe fired!\n");
        
	gref = xenbus_read(XBT_NIL, binfo->dev->otherend, "gref", NULL);
        port = xenbus_read(XBT_NIL, binfo->dev->otherend, "port", NULL);
	info.gref=mycti(gref);
	info.evtchn=mycti(port);
        printk("Xenstore read port and gref success: %d, %d \n", info.evtchn, info.gref);
        info.remoteDomain = binfo->dev->otherend_id;
        printk(KERN_DEBUG "\nxen: dom0: gnt_init with gref = %d\n", info.gref);
        v_start = alloc_vm_area(PAGE_SIZE,NULL);
        if(v_start == 0){
           free_vm_area(v_start);
           printk("\nxen: dom0:could not allocate page");
           return -EFAULT;
        }
        gnttab_set_map_op(&ops,(unsigned long)v_start->addr,GNTMAP_host_map,info.gref,info.remoteDomain);
       if(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,&ops,1)){
       	   printk(KERN_DEBUG"\nxen:dom0:HYPERVISOR map grant ref failed");
           return -EFAULT;
       }
       if (ops.status) {
           printk(KERN_DEBUG "\nxen: dom0: HYPERVISOR map grant ref failed status = %d", ops.status);
           return -EFAULT;
       }
       printk(KERN_DEBUG "\nxen:dom0:shared_page=%x, handle = %x, status = %x", (unsigned int)v_start->addr, ops.handle, ops.status);

       unmap_ops.host_addr = (unsigned long)(v_start->addr);
       unmap_ops.handle = ops.handle;
       //////////////
       /*
       p = (char *)(v_start->addr) + PAGE_SIZE - 1;
       printk(KERN_DEBUG "\nbytes in page");
       for (i = 0;i <= 10; i++, p--) {
       printk(KERN_DEBUG "%c", *p);
       }
      */
       ////////////////
       sring = (as_sring_t *)v_start->addr;
       BACK_RING_INIT(&info.ring, sring, PAGE_SIZE);
       err = bind_interdomain_evtchn_to_irqhandler(info.remoteDomain, info.evtchn, as_int, 0, "dom0", &info);
       if (err < 0) {
          printk(KERN_DEBUG "\nxen:dom0: gnt_init failed binding to evtchn");
          err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unmap_ops, 1);
          return -EFAULT;
       }
       info.irq = err;
       printk(KERN_DEBUG "\nxen:dom0:end gnt_int: int = %d", info.irq);
       return 0;
}