Ejemplo n.º 1
0
/**
* map frontend ring page
* bind event channel
* init 
**/
static int chrif_map(struct xen_chrif *chrif, unsigned long ring_ref, unsigned int evtchn)
{
	int err;
    chrif_sring_t *sring;

	chrif->comms_area = alloc_vm_area(PAGE_SIZE, NULL);
	if(chrif->comms_area == NULL){
		free_vm_area(chrif->comms_area);
		printk("\nxen: dom0: could not allocate shared_page");
		return -ENOMEM;
	}
	
	err = map_frontend_pages(chrif, ring_ref);
	if(err){
		free_vm_area(chrif->comms_area);
        printk("\nxen: dom0: map frontend page fail");
		return err;
	}

    sring = (chrif_sring_t *)chrif->comms_area->addr;
    BACK_RING_INIT(&chrif->chr_ring, sring, PAGE_SIZE);
	
    err = bind_interdomain_evtchn_to_irqhandler(chrif->domid, evtchn, chrif_int, 0, "domtest2", chrif);
    if (err < 0) {
        printk(KERN_DEBUG "\nxen: dom0: chrif_int failed binding to evtchn");
		unmap_frontend_pages(chrif); 
		return -EFAULT;
    }

	chrif->irq = err;
    printk(KERN_DEBUG "\nxen: dom0:bind event channel fineshed: irq = %d\n", chrif->irq);
	
    printk("\nxen: dom0: chrif map finished, otherend_id");
    return 0;
}
Ejemplo n.º 2
0
static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page,
			 unsigned int evtchn)
{
	int err;

	/* Already connected through? */
	if (blkif->irq)
		return 0;

	blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE);
	if (!blkif->blk_ring_area)
		return -ENOMEM;

	err = map_frontend_page(blkif, shared_page);
	if (err) {
		free_vm_area(blkif->blk_ring_area);
		return err;
	}

	switch (blkif->blk_protocol) {
	case BLKIF_PROTOCOL_NATIVE:
	{
		struct blkif_sring *sring;
		sring = (struct blkif_sring *)blkif->blk_ring_area->addr;
		BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
		break;
	}
	case BLKIF_PROTOCOL_X86_32:
	{
		struct blkif_x86_32_sring *sring_x86_32;
		sring_x86_32 = (struct blkif_x86_32_sring *)blkif->blk_ring_area->addr;
		BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
		break;
	}
	case BLKIF_PROTOCOL_X86_64:
	{
		struct blkif_x86_64_sring *sring_x86_64;
		sring_x86_64 = (struct blkif_x86_64_sring *)blkif->blk_ring_area->addr;
		BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
		break;
	}
	default:
		BUG();
	}

	err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn,
						    xen_blkif_be_int, 0,
						    "blkif-backend", blkif);
	if (err < 0) {
		unmap_frontend_page(blkif);
		free_vm_area(blkif->blk_ring_area);
		blkif->blk_rings.common.sring = NULL;
		return err;
	}
	blkif->irq = err;

	return 0;
}
Ejemplo n.º 3
0
/*
 * Remap an arbitrary physical address space into the kernel virtual
 * address space. Needed when the kernel wants to access high addresses
 * directly.
 *
 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 * have to convert them into an offset in a page-aligned mapping, but the
 * caller shouldn't need to know that small detail.
 */
static void __iomem *__ioremap_caller(phys_addr_t addr, size_t size,
	pgprot_t prot, void *caller)
{
	phys_addr_t last_addr;
	unsigned long offset, vaddr;
	struct vm_struct *area;

	/* Disallow wrap-around or zero size */
	last_addr = addr + size - 1;
	if (!size || last_addr < addr)
		return NULL;

	/* Page-align mappings */
	offset = addr & (~PAGE_MASK);
	addr &= PAGE_MASK;
	size = PAGE_ALIGN(size + offset);

	area = get_vm_area_caller(size, VM_IOREMAP, caller);
	if (!area)
		return NULL;
	vaddr = (unsigned long)area->addr;

	if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) {
		free_vm_area(area);
		return NULL;
	}

	return (void __iomem *)(vaddr + offset);
}
Ejemplo n.º 4
0
static int unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *area,
			    unsigned int nr, grant_handle_t handles[])
{
	unsigned int i;
	int err = 0;

	for (i = 0; i < nr; ++i) {
		struct gnttab_unmap_grant_ref op;

		gnttab_set_unmap_op(&op,
				    (unsigned long)area->addr + i * PAGE_SIZE,
				    GNTMAP_host_map, handles[i]);

		if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
					      &op, 1))
			BUG();
		if (op.status == GNTST_okay)
			continue;

		xenbus_dev_error(dev, op.status,
				 "unmapping page %u (handle %#x)",
				 i, handles[i]);
		err = -EINVAL;
	}

	if (!err) {
		free_vm_area(area);
		kfree(handles);
	}

	return err;
}
Ejemplo n.º 5
0
static void netif_free(netif_t *netif)
{
	atomic_dec(&netif->refcnt);
	wait_event(netif->waiting_to_free, atomic_read(&netif->refcnt) == 0);

	if (netif->irq)
		unbind_from_irqhandler(netif->irq, netif);
	
	unregister_netdev(netif->dev);

	if (netif->tx.sring) {
		unmap_frontend_pages(netif);
		free_vm_area(netif->tx_comms_area);
		free_vm_area(netif->rx_comms_area);
	}

	free_netdev(netif->dev);
}
Ejemplo n.º 6
0
static void _stp_handle_start(struct _stp_msg_start *st)
{
	int handle_startup;

	mutex_lock(&_stp_transport_mutex);
	handle_startup = (! _stp_start_called && ! _stp_exit_called);
	_stp_start_called = 1;
	mutex_unlock(&_stp_transport_mutex);
	
	if (handle_startup) {
		dbug_trans(1, "stp_handle_start\n");

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23) // linux commit #5f4352fb
#if LINUX_VERSION_CODE <  KERNEL_VERSION(2,6,29) // linux commit #9be260a6
#ifdef STAPCONF_VM_AREA
		{ /* PR9740: workaround for kernel valloc bug. */
                  /* PR14611: not required except within above kernel range. */
			void *dummy;
#ifdef STAPCONF_VM_AREA_PTE
			dummy = alloc_vm_area (PAGE_SIZE, NULL);
#else
			dummy = alloc_vm_area (PAGE_SIZE);
#endif
			free_vm_area (dummy);
		}
#endif
#endif
#endif

		_stp_target = st->target;
		st->res = systemtap_module_init();
		if (st->res == 0)
			_stp_probes_started = 1;

                /* Register the module notifier. */
                if (!_stp_module_notifier_active) {
                        int rc = register_module_notifier(& _stp_module_notifier_nb);
                        if (rc == 0)
                                _stp_module_notifier_active = 1;
                        else
                                _stp_warn ("Cannot register module notifier (%d)\n", rc);
                }

		/* Called from the user context in response to a proc
		   file write (in _stp_ctl_write_cmd), so may notify
		   the reader directly. */
		_stp_ctl_send_notify(STP_START, st, sizeof(*st));

		/* Register the panic notifier. */
#if STP_TRANSPORT_VERSION == 2
		atomic_notifier_chain_register(&panic_notifier_list, &_stp_module_panic_notifier_nb);
#endif
	}
}
Ejemplo n.º 7
0
/*
 * unmap one page of ring
 */
static void unmap_frontend_pages(struct xen_chrif *chrif)
{
	struct gnttab_unmap_grant_ref unop;
	
	gnttab_set_unmap_op(&unop, (unsigned long)chrif->comms_area->addr, 
		GNTMAP_host_map, chrif->shmem_handle);

	if(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unop, 1))
		printk(KERN_DEBUG "\nxen:dom0: unmapped shared page failed");
 
	free_vm_area(chrif->comms_area);
	printk(KERN_DEBUG "\nxen:dom0: unmapped frontend pages finished");
}
Ejemplo n.º 8
0
int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn)
{
	blkif_sring_t *sring;
	int err;
	struct evtchn_bind_interdomain bind_interdomain;

	if ((blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL)
		return -ENOMEM;

	err = map_frontend_page(blkif, shared_page);
	if (err) {
		free_vm_area(blkif->blk_ring_area);
		return err;
	}

	bind_interdomain.remote_dom  = blkif->domid;
	bind_interdomain.remote_port = evtchn;

	err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
					  &bind_interdomain);
	if (err) {
		unmap_frontend_page(blkif);
		free_vm_area(blkif->blk_ring_area);
		return err;
	}

	blkif->evtchn = bind_interdomain.local_port;

	sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
	BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);

	blkif->irq = bind_evtchn_to_irqhandler(
		blkif->evtchn, blkif_be_int, 0, "blkif-backend", blkif);

	blkif->status = CONNECTED;

	return 0;
}
Ejemplo n.º 9
0
static void free_blkif(void *arg)
{
	blkif_t *blkif = (blkif_t *)arg;

	if (blkif->irq)
		unbind_from_irqhandler(blkif->irq, blkif);

	if (blkif->blk_ring.sring) {
		unmap_frontend_page(blkif);
		free_vm_area(blkif->blk_ring_area);
		blkif->blk_ring.sring = NULL;
	}

	kmem_cache_free(blkif_cachep, blkif);
}
Ejemplo n.º 10
0
/* Undo the result of the mapping */
static void net_accel_unmap_grants_vfree(struct xenbus_device *dev,
        unsigned flags, void *priv)
{
    struct net_accel_valloc_grant_mapping *map =
        (struct net_accel_valloc_grant_mapping *)priv;

    void *addr = map->vm->addr;
    int npages = map->pages;
    int i;

    for (i = 0; i < npages; i++) {
        net_accel_unmap_grant(dev, map->grant_handles[i], addr, 0,
                              flags);
        addr = (void*)((unsigned long)addr + PAGE_SIZE);
    }
    free_vm_area(map->vm);
    kfree(map);
}
Ejemplo n.º 11
0
struct vm_struct *alloc_vm_area(unsigned long size)
{
	struct vm_struct *area;

	area = get_vm_area(size, VM_IOREMAP);
	if (area == NULL)
		return NULL;

	/*
	 * This ensures that page tables are constructed for this region
	 * of kernel virtual address space and mapped into init_mm.
	 */
	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
				area->size, f, NULL)) {
		free_vm_area(area);
		return NULL;
	}

	return area;
}
Ejemplo n.º 12
0
void scsiback_disconnect(struct vscsibk_info *info)
{
	if (info->kthread) {
		kthread_stop(info->kthread);
		info->kthread = NULL;
	}

	wait_event(info->waiting_to_free, 
		atomic_read(&info->nr_unreplied_reqs) == 0);

	if (info->irq) {
		unbind_from_irqhandler(info->irq, info);
		info->irq = 0;
	}

	if (info->ring.sring) {
		unmap_frontend_page(info);
		free_vm_area(info->ring_area);
		info->ring.sring = NULL;
	}
}
Ejemplo n.º 13
0
//map the shared page
static struct vm_struct*  map_sharedpage(grant_ref_t gref)
{
    struct vm_struct *vm_point;
    vm_point = alloc_vm_area(PAGE_SIZE, NULL);
    if(vm_point == 0) {
        free_vm_area(vm_point);
        printk("\nxen: dom0: could not allocate shared_page");
        return -EFAULT;
    }
    gnttab_set_map_op(&ops, (unsigned long)vm_point->addr, GNTMAP_host_map, gref, info.remoteDomain);
    if(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &ops, 1)) {
        printk(KERN_DEBUG "\nxen: dom0: HYPERVISOR map grant ref failed");
        return -EFAULT;
    }
    if (ops.status) {
        printk(KERN_DEBUG "\nxen: dom0: HYPERVISOR map grant ref failed status = %d", ops.status);
        return -EFAULT;
    }
    printk(KERN_DEBUG "\nxen: dom0: map shared page success, shared_page=%x, handle = %x, status = %x", (unsigned int)vm_point->addr, ops.handle, ops.status);
    return vm_point;
}
Ejemplo n.º 14
0
int scsiback_init_sring(struct vscsibk_info *info,
		unsigned long ring_ref, unsigned int evtchn)
{
	struct vscsiif_sring *sring;
	int err;

	if (info->irq) {
		printk(KERN_ERR "scsiback: Already connected through?\n");
		return -1;
	}

	info->ring_area = alloc_vm_area(PAGE_SIZE);
	if (!info)
		return -ENOMEM;

	err = map_frontend_page(info, ring_ref);
	if (err)
		goto free_vm;

	sring = (struct vscsiif_sring *) info->ring_area->addr;
	BACK_RING_INIT(&info->ring, sring, PAGE_SIZE);

	err = bind_interdomain_evtchn_to_irqhandler(
			info->domid, evtchn,
			scsiback_intr, 0, "vscsiif-backend", info);

	if (err < 0)
		goto unmap_page;
		
	info->irq = err;

	return 0;

unmap_page:
	unmap_frontend_page(info);
free_vm:
	free_vm_area(info->ring_area);

	return err;
}
Ejemplo n.º 15
0
int init_module(void)
{
	//a pointer to a vm_struct<>
	//include .addr,.size .page .nr_pages .phys_addr
	struct vm_struct * v_start;
	int ret;

	printk("xen:init_module with gref = %d\n",gref);
	//this func reserves a range of kernel address space
	//and allocates pagetables to map that area, but no actual mapping here.
	v_start = alloc_vm_area(PAGE_SIZE , NULL);
	if(v_start==0){
		free_vm_area(v_start);
		printk("xen:could not allocate page\n");
		return -1;
	}
	//static inline void
	//gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
	//                  uint32_t flags, grant_ref_t ref, domid_t domid)
	gnttab_set_map_op(&ops, (unsigned long)v_start->addr, GNTMAP_host_map,
				 gref , remoteDomain);
	//return 0,if success the hypercall
	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &ops, 1);
	if(ret){ 
		printk("xen: HYPERSIOR map grant ref failed.\n");
		return -1;
	}
	//check the staus from the out param of the ops
	if(ops.status != 0){
		printk("xen: HYPERSIOR map grant ref failed with status %d.\n",ops.status);
		return -1;
	}
	//printk handle n the dmesg
	printk("xen:map grant success:\n\tshared_page=%x, handle=%d\n",(unsigned long)v_start->addr,ops.handle);
	//if mapped,init the unmap ops with the handle and host_addr
	unmap_ops.host_addr=(unsigned long)v_start->addr;
	unmap_ops.handle=ops.handle;
	
	return 0;	
}
Ejemplo n.º 16
0
static void xen_blkif_disconnect(struct xen_blkif *blkif)
{
	if (blkif->xenblkd) {
		kthread_stop(blkif->xenblkd);
		blkif->xenblkd = NULL;
	}

	atomic_dec(&blkif->refcnt);
	wait_event(blkif->waiting_to_free, atomic_read(&blkif->refcnt) == 0);
	atomic_inc(&blkif->refcnt);

	if (blkif->irq) {
		unbind_from_irqhandler(blkif->irq, blkif);
		blkif->irq = 0;
	}

	if (blkif->blk_rings.common.sring) {
		unmap_frontend_page(blkif);
		free_vm_area(blkif->blk_ring_area);
		blkif->blk_rings.common.sring = NULL;
	}
}
Ejemplo n.º 17
0
void *nvmap_mmap(struct nvmap_handle_ref *ref)
{
	struct nvmap_handle *h;
	pgprot_t prot;
	unsigned long adj_size;
	unsigned long offs;
	struct vm_struct *v;
	void *p;

	h = nvmap_handle_get(ref->handle);
	if (!h)
		return NULL;

	prot = nvmap_pgprot(h, pgprot_kernel);

	if (h->heap_pgalloc)
		return vm_map_ram(h->pgalloc.pages, h->size >> PAGE_SHIFT,
				  -1, prot);

	/* carveout - explicitly map the pfns into a vmalloc area */

	nvmap_usecount_inc(h);

	adj_size = h->carveout->base & ~PAGE_MASK;
	adj_size += h->size;
	adj_size = PAGE_ALIGN(adj_size);

	v = alloc_vm_area(adj_size);
	if (!v) {
		nvmap_usecount_dec(h);
		nvmap_handle_put(h);
		return NULL;
	}

	p = v->addr + (h->carveout->base & ~PAGE_MASK);

	for (offs = 0; offs < adj_size; offs += PAGE_SIZE) {
		unsigned long addr = (unsigned long) v->addr + offs;
		unsigned int pfn;
		pgd_t *pgd;
		pud_t *pud;
		pmd_t *pmd;
		pte_t *pte;

		pfn = __phys_to_pfn(h->carveout->base + offs);
		pgd = pgd_offset_k(addr);
		pud = pud_alloc(&init_mm, pgd, addr);
		if (!pud)
			break;
		pmd = pmd_alloc(&init_mm, pud, addr);
		if (!pmd)
			break;
		pte = pte_alloc_kernel(pmd, addr);
		if (!pte)
			break;
		set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
		flush_tlb_kernel_page(addr);
	}

	if (offs != adj_size) {
		free_vm_area(v);
		nvmap_usecount_dec(h);
		nvmap_handle_put(h);
		return NULL;
	}

	/* leave the handle ref count incremented by 1, so that
	 * the handle will not be freed while the kernel mapping exists.
	 * nvmap_handle_put will be called by unmapping this address */
	return p;
}
Ejemplo n.º 18
0
static irqreturn_t chrif_int(int irq, void *dev_id)
{
    int err;
    RING_IDX rc, rp;
    int more_to_do, notify;
    chrif_request_t req;
    chrif_response_t resp;
    printk(KERN_INFO "\n------------------------------start response-------------------------------------");
    printk(KERN_DEBUG "\nxen: Dom0: chrif_int called with dev_id=%x info=%x", (unsigned int)dev_id, (unsigned int) &info);
    rc = info.ring.req_cons;
    rp = info.ring.sring->req_prod;
    printk(KERN_DEBUG "\nxen: Dom0: rc = %d rp = %d", rc, rp);

    while (rc != rp) {
        if (RING_REQUEST_CONS_OVERFLOW(&info.ring, rc))
            break;
        memcpy(&req, RING_GET_REQUEST(&info.ring, rc), sizeof(req));
        resp.id = req.id;
        resp.operation = req.operation;
        resp.status = req.status + 1;
        printk(KERN_DEBUG "\nxen: Dom0: Recvd at IDX-%d: id = %d, op=%d, status=%d", rc, req.id, req.operation, req.status);
        info.ring.req_cons = ++rc;
        barrier();

        printk(KERN_DEBUG "\nxen: Dom0: operation:  %s", op_name(resp.operation));
        switch(resp.operation) {
        case CHRIF_OP_OPEN:
            info.chrif_filp = filp_open(DEVICE_PATH, O_RDWR, 0);
            printk(KERN_DEBUG "\nxen: dom0: response open");
            break;
        case CHRIF_OP_READ: {
            resp.rdwr.len = req.rdwr.len;
            //struct pdma_info pdma_info;
            //memset(op_page->addr, 0, resp.rdwr.len);
            old_fs = get_fs();
            set_fs(get_ds());
            //get read size of block
            //err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_INFO, (unsigned long)&pdma_info);
            //read data from device to page
            //err =info.chrif_filp->f_op->read(info.chrif_filp, op_page->addr, resp.rdwr.len, &info.chrif_filp->f_pos);
            set_fs(old_fs);
            if(err < 0)
                printk(KERN_DEBUG "\nxen: Dom0: read %u bytes error", resp.rdwr.len);
            printk(KERN_DEBUG "\nxen: dom0: response read");
            break;
        }
        case CHRIF_OP_WRITE: {
            int i = 0, count, ret;
            struct vm_struct *op_page;
            struct gnttab_map_grant_ref op_page_ops;
            struct gnttab_unmap_grant_ref op_page_unmap_ops;
            resp.rdwr.len = req.rdwr.len;

            count = resp.rdwr.len/4096;
            printk(KERN_DEBUG "\nxen: Dom0: write %u bytes %d times", resp.rdwr.len, count);

            block_buf = (char *)kmalloc(resp.rdwr.len, GFP_KERNEL);
            memset(block_buf, 0, resp.rdwr.len);

            while(i < count) {
                resp.op_gref[i] = req.op_gref[i];
                printk(KERN_DEBUG "\nxen: dom0: req.op_gref[0]: %d", resp.op_gref[i]);

                op_page = alloc_vm_area(PAGE_SIZE, NULL);
                if(op_page == 0) {
                    free_vm_area(op_page);
                    printk("\nxen: dom0: could not allocate shared_page");
                    return -EFAULT;
                }
                /*gnttab_set_map_op(&op_page_ops, (unsigned long)op_page->addr, GNTMAP_host_map, resp.op_gref[i], info.remoteDomain);

                 op_page_unmap_ops.host_addr = (unsigned long)(op_page->addr);
                 unmap_ops.handle = op_page_ops.handle;
                 if(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op_page_ops, 1)){
                     printk(KERN_DEBUG "\nxen: dom0: HYPERVISOR map grant ref failed");
                     return -EFAULT;
                 }
                 if (op_page_ops.status) {
                     printk(KERN_DEBUG "\nxen: dom0: HYPERVISOR map grant ref failed status = %d", op_page_ops.status);
                     return -EFAULT;
                 }
                 printk(KERN_DEBUG "\nxen: dom0: map shared page success, shared_page=%x, handle = %x, status = %x", (unsigned int)op_page->addr, op_page_ops.handle, op_page_ops.status);

                 memcpy(block_buf+i*4096, op_page->addr, 4096);
                 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op_page_unmap_ops, 1);
                 if (ret == 0) {
                     printk(KERN_DEBUG "\nxen: dom0: dom0_exit: unmapped shared frame");
                 } else {
                     printk(KERN_DEBUG "\nxen: dom0: dom0_exit: unmapped shared frame failed");
                 }
                 free_vm_area(op_page);*/
                i++;
            }

            /*  old_fs = get_fs();
            set_fs(get_ds());
            //write data from page to device
            //err = info.chrif_filp->f_op->write(info.chrif_filp, block_buf, resp.rdwr.len, &info.chrif_filp->f_pos);
            set_fs(old_fs);
              if(err < 0)
            	printk(KERN_DEBUG "\nxen: Dom0: write %u bytes error", resp.rdwr.len);

              */ //kfree(block_buf);
            printk(KERN_DEBUG "\nxen: dom0: response write");
            break;
        }
        case CHRIF_OP_IOCTL: {
            resp.ioc_parm.cmd = req.ioc_parm.cmd;
            switch(resp.ioc_parm.cmd) {
            case PDMA_IOC_START_DMA: {
                old_fs = get_fs();
                set_fs(get_ds());
                err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_START_DMA, NULL);
                set_fs(old_fs);
                if(err) {
                    printk(KERN_DEBUG "\nxen: Dom0: start-dma ioctl failed");
                    resp.status = 0;
                } else  printk(KERN_DEBUG "\nxen: Dom0: start-dma ioctl success");
                //err = call_ioctl(info.chrif_filp, PDMA_IOC_START_DMA, NULL);
                break;
            }
            case PDMA_IOC_STOP_DMA: {
                //err = call_ioctl(info.chrif_filp, PDMA_IOC_STOP_DMA, NULL);
                old_fs = get_fs();
                set_fs(get_ds());
                err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_STOP_DMA, NULL);
                set_fs(old_fs);
                if(err) {
                    printk(KERN_DEBUG "\nxen: Dom0: stop-dma ioctl failed");
                    resp.status = 0;
                } else  printk(KERN_DEBUG "\nxen: Dom0: stop-dma ioctl success");
                break;
            }
            case PDMA_IOC_INFO: {
                struct pdma_info pdma_info;
                //err = call_ioctl(info.chrif_filp, PDMA_IOC_INFO, (unsigned long)&pdma_info);
                old_fs = get_fs();
                set_fs(get_ds());
                err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_INFO, (unsigned long)&pdma_info);
                set_fs(old_fs);
                if(err) {
                    printk(KERN_DEBUG "\nxen: Dom0: info ioctl failed");
                    resp.status = 0;
                } else  printk(KERN_DEBUG "\nxen: Dom0: info ioctl success");
                resp.ioc_parm.info = pdma_info;
                break;
            }
            case PDMA_IOC_STAT: {
                struct pdma_stat pdma_stat;
                //err = call_ioctl(info.chrif_filp, PDMA_IOC_STAT, (unsigned long)&pdma_stat);
                old_fs = get_fs();
                set_fs(get_ds());
                err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_STAT, (unsigned long)&pdma_stat);
                set_fs(old_fs);
                if(err) {
                    printk(KERN_DEBUG "\nxen: Dom0: stat ioctl failed");
                    resp.status = 0;
                } else  printk(KERN_DEBUG "\nxen: Dom0: stat ioctl success");
                resp.ioc_parm.stat = pdma_stat;
                break;
            }
            case PDMA_IOC_RW_REG: {
                struct pdma_rw_reg ctrl = req.ioc_parm.ctrl;
                //err = call_ioctl(info.chrif_filp, PDMA_IOC_RW_REG, (unsigned long)&ctrl);
                old_fs = get_fs();
                set_fs(get_ds());
                err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_RW_REG, (unsigned long)&ctrl);
                set_fs(old_fs);
                if(err) {
                    printk(KERN_DEBUG "\nxen: Dom0: rw-reg ioctl failed");
                    resp.status = 0;
                } else  printk(KERN_DEBUG "\nxen: Dom0: rw-reg ioctl success");
                resp.ioc_parm.ctrl = ctrl;
                break;
            }
            default:
                printk(KERN_DEBUG "\nxen: Dom0: unknow the operation");
                break;
            }
            printk(KERN_INFO "\nxen: Dom0: response ioctl");
            break;
        }
        case CHRIF_OP_CLOSE:
            filp_close(info.chrif_filp, NULL);
            printk(KERN_INFO "\nxen: Dom0: response close");
            break;
        default:
            printk(KERN_DEBUG "\nxen: Dom0: unknow the operation");
            break;
        }

        memcpy(RING_GET_RESPONSE(&info.ring, info.ring.rsp_prod_pvt), &resp, sizeof(resp));
        info.ring.rsp_prod_pvt++;
        //put response and check whether or not notify domU
        RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info.ring, notify);
        if (info.ring.rsp_prod_pvt == info.ring.req_cons)
        {
            RING_FINAL_CHECK_FOR_REQUESTS(&info.ring, more_to_do);
        }
        else if (RING_HAS_UNCONSUMED_REQUESTS(&info.ring))
        {
            more_to_do = 1;
        }
        if (notify)
        {
            printk(KERN_DEBUG "\nxen:dom0:send notify to domu");
            notify_remote_via_irq(info.irq);
        }
    }
    return IRQ_HANDLED;
}
Ejemplo n.º 19
0
int init_module(void)
{
	int i, ret = -ENOMEM;
	struct vm_struct *vm;
	struct page *pages[2] = { NULL, NULL };
	unsigned char *buf;

	pr_info("MyMapTest Begin\n");

	vm = alloc_vm_area(2 * PAGE_SIZE);
	if (!vm) {
		pr_info("Failed to allocate vm area\n");
		goto out;
	}

	pages[0] = alloc_page(GFP_KERNEL);
	pages[1] = alloc_page(GFP_KERNEL);
	if (!pages[0] || !pages[1]) {
		pr_info("Page allocation failed\n");
		goto out;
	}

	/* Fill pages with test pattern */
	buf = kmap_atomic(pages[0]);
	for (i = 0; i < PAGE_SIZE; i++)
		buf[i] = 'a';
	kunmap_atomic(buf);

	buf = kmap_atomic(pages[1]);
	for (i = 0; i < PAGE_SIZE; i++)
		buf[i] = 'z';
	kunmap_atomic(buf);

	buf = NULL;

	/*
	 * Now, map both pages *contiguously* using a different method
	 * and verify contents of each page.
	 */
	ret = map_kernel_range_noflush((unsigned long)vm->addr, 2 * PAGE_SIZE,
				PAGE_KERNEL, pages);
	pr_info("map_kernel_range_noflush returned: %d\n", ret);

	buf = vm->addr;

	for (i = 0; i < PAGE_SIZE; i++) {
		if (buf[i] != 'a')
			pr_info("mismatch in page-0 at location %d\n", i);
	}

	for (i = PAGE_SIZE; i <= PAGE_SIZE; i++) {
		if (buf[i] != 'z')
			pr_info("mismatch in page-1 at location %d\n", i);
	}

	unmap_kernel_range_noflush((unsigned long)vm->addr, 2 * PAGE_SIZE);

	__flush_tlb_one((unsigned long)buf);
	__flush_tlb_one((unsigned long)buf + PAGE_SIZE);

	ret = 0;	/* Success */
out:
	if (vm)
		free_vm_area(vm);
	if (pages[0])
		__free_page(pages[0]);
	if (pages[1])
		__free_page(pages[1]);

	/*
	 * A non 0 return means init_module failed; module can't be loaded. 
	 */
	return ret;
}
Ejemplo n.º 20
0
int netif_map(netif_t *netif, unsigned long tx_ring_ref,
	      unsigned long rx_ring_ref, unsigned int evtchn)
{
	int err = -ENOMEM;
	netif_tx_sring_t *txs;
	netif_rx_sring_t *rxs;
	struct evtchn_bind_interdomain bind_interdomain;

	/* Already connected through? */
	if (netif->irq)
		return 0;

	netif->tx_comms_area = alloc_vm_area(PAGE_SIZE);
	if (netif->tx_comms_area == NULL)
		return -ENOMEM;
	netif->rx_comms_area = alloc_vm_area(PAGE_SIZE);
	if (netif->rx_comms_area == NULL)
		goto err_rx;

	err = map_frontend_pages(netif, tx_ring_ref, rx_ring_ref);
	if (err)
		goto err_map;

	bind_interdomain.remote_dom = netif->domid;
	bind_interdomain.remote_port = evtchn;

	err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
					  &bind_interdomain);
	if (err)
		goto err_hypervisor;

	netif->evtchn = bind_interdomain.local_port;

	netif->irq = bind_evtchn_to_irqhandler(
		netif->evtchn, netif_be_int, 0, netif->dev->name, netif);
	disable_irq(netif->irq);

	txs = (netif_tx_sring_t *)netif->tx_comms_area->addr;
	BACK_RING_INIT(&netif->tx, txs, PAGE_SIZE);

	rxs = (netif_rx_sring_t *)
		((char *)netif->rx_comms_area->addr);
	BACK_RING_INIT(&netif->rx, rxs, PAGE_SIZE);

	netif->rx_req_cons_peek = 0;

	netif_get(netif);
	wmb(); /* Other CPUs see new state before interface is started. */

	rtnl_lock();
	netif->status = CONNECTED;
	wmb();
	if (netif_running(netif->dev))
		__netif_up(netif);
	rtnl_unlock();

	return 0;
err_hypervisor:
	unmap_frontend_pages(netif);
err_map:
	free_vm_area(netif->rx_comms_area);
err_rx:
	free_vm_area(netif->tx_comms_area);
	return err;
}
Ejemplo n.º 21
0
/**
 * xenbus_map_ring_valloc
 * @dev: xenbus device
 * @gnt_ref: grant reference
 * @vaddr: pointer to address to be filled out by mapping
 *
 * Based on Rusty Russell's skeleton driver's map_page.
 * Map a page of memory into this domain from another domain's grant table.
 * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
 * page to that address, and sets *vaddr to that address.
 * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
 * or -ENOMEM on error. If an error is returned, device will switch to
 * XenbusStateClosing and the error message will be saved in XenStore.
 */
int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
{
	struct gnttab_map_grant_ref op = {
		.flags = GNTMAP_host_map,
		.ref   = gnt_ref,
		.dom   = dev->otherend_id,
	};
	struct vm_struct *area;

	*vaddr = NULL;

	area = alloc_vm_area(PAGE_SIZE);
	if (!area)
		return -ENOMEM;

	op.host_addr = (unsigned long)area->addr;

	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
		BUG();

	if (op.status != GNTST_okay) {
		free_vm_area(area);
		xenbus_dev_fatal(dev, op.status,
				 "mapping in shared page %d from domain %d",
				 gnt_ref, dev->otherend_id);
		return op.status;
	}

	/* Stuff the handle in an unused field */
	area->phys_addr = (unsigned long)op.handle;

	*vaddr = area->addr;
	return 0;
}
EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);


/**
 * xenbus_map_ring
 * @dev: xenbus device
 * @gnt_ref: grant reference
 * @handle: pointer to grant handle to be filled
 * @vaddr: address to be mapped to
 *
 * Map a page of memory into this domain from another domain's grant table.
 * xenbus_map_ring does not allocate the virtual address space (you must do
 * this yourself!). It only maps in the page to the specified address.
 * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
 * or -ENOMEM on error. If an error is returned, device will switch to
 * XenbusStateClosing and the error message will be saved in XenStore.
 */
int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
		    grant_handle_t *handle, void *vaddr)
{
	struct gnttab_map_grant_ref op = {
		.host_addr = (unsigned long)vaddr,
		.flags     = GNTMAP_host_map,
		.ref       = gnt_ref,
		.dom       = dev->otherend_id,
	};

	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
		BUG();

	if (op.status != GNTST_okay) {
		xenbus_dev_fatal(dev, op.status,
				 "mapping in shared page %d from domain %d",
				 gnt_ref, dev->otherend_id);
	} else
		*handle = op.handle;

	return op.status;
}
EXPORT_SYMBOL_GPL(xenbus_map_ring);


/**
 * xenbus_unmap_ring_vfree
 * @dev: xenbus device
 * @vaddr: addr to unmap
 *
 * Based on Rusty Russell's skeleton driver's unmap_page.
 * Unmap a page of memory in this domain that was imported from another domain.
 * Use xenbus_unmap_ring_vfree if you mapped in your memory with
 * xenbus_map_ring_valloc (it will free the virtual address space).
 * Returns 0 on success and returns GNTST_* on error
 * (see xen/include/interface/grant_table.h).
 */
int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
{
	struct vm_struct *area;
	struct gnttab_unmap_grant_ref op = {
		.host_addr = (unsigned long)vaddr,
	};

	/* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr)
	 * method so that we don't have to muck with vmalloc internals here.
	 * We could force the user to hang on to their struct vm_struct from
	 * xenbus_map_ring_valloc, but these 6 lines considerably simplify
	 * this API.
	 */
	read_lock(&vmlist_lock);
	for (area = vmlist; area != NULL; area = area->next) {
		if (area->addr == vaddr)
			break;
	}
	read_unlock(&vmlist_lock);

	if (!area) {
		xenbus_dev_error(dev, -ENOENT,
				 "can't find mapped virtual address %p", vaddr);
		return GNTST_bad_virt_addr;
	}

	op.handle = (grant_handle_t)area->phys_addr;

	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
		BUG();

	if (op.status == GNTST_okay)
		free_vm_area(area);
	else
		xenbus_dev_error(dev, op.status,
				 "unmapping page at handle %d error %d",
				 (int16_t)area->phys_addr, op.status);

	return op.status;
}
EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);


/**
 * xenbus_unmap_ring
 * @dev: xenbus device
 * @handle: grant handle
 * @vaddr: addr to unmap
 *
 * Unmap a page of memory in this domain that was imported from another domain.
 * Returns 0 on success and returns GNTST_* on error
 * (see xen/include/interface/grant_table.h).
 */
int xenbus_unmap_ring(struct xenbus_device *dev,
		      grant_handle_t handle, void *vaddr)
{
	struct gnttab_unmap_grant_ref op = {
		.host_addr = (unsigned long)vaddr,
		.handle    = handle,
	};

	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
		BUG();

	if (op.status != GNTST_okay)
		xenbus_dev_error(dev, op.status,
				 "unmapping page at handle %d error %d",
				 handle, op.status);

	return op.status;
}
EXPORT_SYMBOL_GPL(xenbus_unmap_ring);


/**
 * xenbus_read_driver_state
 * @path: path for driver
 *
 * Return the state of the driver rooted at the given store path, or
 * XenbusStateUnknown if no state can be read.
 */
enum xenbus_state xenbus_read_driver_state(const char *path)
{
	enum xenbus_state result;
	int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
	if (err)
		result = XenbusStateUnknown;

	return result;
}
EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
Ejemplo n.º 22
0
static void ghes_ioremap_exit(void)
{
	free_vm_area(ghes_ioremap_area);
}
Ejemplo n.º 23
0
void plat_free_vm_area(struct vm_struct *vmas)
{
	free_vm_area(vmas);
}
Ejemplo n.º 24
0
/*
 * Remap an arbitrary physical address space into the kernel virtual
 * address space. It transparently creates kernel huge I/O mapping when
 * the physical address is aligned by a huge page size (1GB or 2MB) and
 * the requested size is at least the huge page size.
 *
 * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
 * Therefore, the mapping code falls back to use a smaller page toward 4KB
 * when a mapping range is covered by non-WB type of MTRRs.
 *
 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 * have to convert them into an offset in a page-aligned mapping, but the
 * caller shouldn't need to know that small detail.
 */
static void __iomem *__ioremap_caller(resource_size_t phys_addr,
		unsigned long size, enum page_cache_mode pcm, void *caller)
{
	unsigned long offset, vaddr;
	resource_size_t pfn, last_pfn, last_addr;
	const resource_size_t unaligned_phys_addr = phys_addr;
	const unsigned long unaligned_size = size;
	struct vm_struct *area;
	enum page_cache_mode new_pcm;
	pgprot_t prot;
	int retval;
	void __iomem *ret_addr;

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;

	if (!phys_addr_valid(phys_addr)) {
		printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
		       (unsigned long long)phys_addr);
		WARN_ON_ONCE(1);
		return NULL;
	}

	/*
	 * Don't remap the low PCI/ISA area, it's always mapped..
	 */
	if (is_ISA_range(phys_addr, last_addr))
		return (__force void __iomem *)phys_to_virt(phys_addr);

	/*
	 * Don't allow anybody to remap normal RAM that we're using..
	 */
	pfn      = phys_addr >> PAGE_SHIFT;
	last_pfn = last_addr >> PAGE_SHIFT;
	if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
					  __ioremap_check_ram) == 1) {
		WARN_ONCE(1, "ioremap on RAM at 0x%llx - 0x%llx\n",
					phys_addr, last_addr);
		return NULL;
	}

	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PHYSICAL_PAGE_MASK;
	size = PAGE_ALIGN(last_addr+1) - phys_addr;

	retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
						pcm, &new_pcm);
	if (retval) {
		printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
		return NULL;
	}

	if (pcm != new_pcm) {
		if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
			printk(KERN_ERR
		"ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
				(unsigned long long)phys_addr,
				(unsigned long long)(phys_addr + size),
				pcm, new_pcm);
			goto err_free_memtype;
		}
		pcm = new_pcm;
	}

	prot = PAGE_KERNEL_IO;
	switch (pcm) {
	case _PAGE_CACHE_MODE_UC:
	default:
		prot = __pgprot(pgprot_val(prot) |
				cachemode2protval(_PAGE_CACHE_MODE_UC));
		break;
	case _PAGE_CACHE_MODE_UC_MINUS:
		prot = __pgprot(pgprot_val(prot) |
				cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
		break;
	case _PAGE_CACHE_MODE_WC:
		prot = __pgprot(pgprot_val(prot) |
				cachemode2protval(_PAGE_CACHE_MODE_WC));
		break;
	case _PAGE_CACHE_MODE_WB:
		break;
	}

	/*
	 * Ok, go for it..
	 */
	area = get_vm_area_caller(size, VM_IOREMAP, caller);
	if (!area)
		goto err_free_memtype;
	area->phys_addr = phys_addr;
	vaddr = (unsigned long) area->addr;

	if (kernel_map_sync_memtype(phys_addr, size, pcm))
		goto err_free_area;

	if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
		goto err_free_area;

	ret_addr = (void __iomem *) (vaddr + offset);
	mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);

	/*
	 * Check if the request spans more than any BAR in the iomem resource
	 * tree.
	 */
	WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size),
		  KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");

	return ret_addr;
err_free_area:
	free_vm_area(area);
err_free_memtype:
	free_memtype(phys_addr, phys_addr + size);
	return NULL;
}
Ejemplo n.º 25
0
static int device_probe(struct xenbus_device* dev, const struct xenbus_device_id* id)
{
        struct backendinfo* binfo;
//      int i;
//      char *p;
        struct vm_struct *v_start;
        int err;
        as_sring_t *sring;
	char *gref,*port;
        binfo = kmalloc(sizeof(*binfo),GFP_KERNEL);
        if (!binfo) {
               xenbus_dev_error(dev, -ENOMEM, "allocating info structure");
               return -ENOMEM;
         }
        memset(binfo,0,sizeof(*binfo));
        binfo->dev = dev;
        printk(KERN_ALERT"\nProbe fired!\n");
        
	gref = xenbus_read(XBT_NIL, binfo->dev->otherend, "gref", NULL);
        port = xenbus_read(XBT_NIL, binfo->dev->otherend, "port", NULL);
	info.gref=mycti(gref);
	info.evtchn=mycti(port);
        printk("Xenstore read port and gref success: %d, %d \n", info.evtchn, info.gref);
        info.remoteDomain = binfo->dev->otherend_id;
        printk(KERN_DEBUG "\nxen: dom0: gnt_init with gref = %d\n", info.gref);
        v_start = alloc_vm_area(PAGE_SIZE,NULL);
        if(v_start == 0){
           free_vm_area(v_start);
           printk("\nxen: dom0:could not allocate page");
           return -EFAULT;
        }
        gnttab_set_map_op(&ops,(unsigned long)v_start->addr,GNTMAP_host_map,info.gref,info.remoteDomain);
       if(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,&ops,1)){
       	   printk(KERN_DEBUG"\nxen:dom0:HYPERVISOR map grant ref failed");
           return -EFAULT;
       }
       if (ops.status) {
           printk(KERN_DEBUG "\nxen: dom0: HYPERVISOR map grant ref failed status = %d", ops.status);
           return -EFAULT;
       }
       printk(KERN_DEBUG "\nxen:dom0:shared_page=%x, handle = %x, status = %x", (unsigned int)v_start->addr, ops.handle, ops.status);

       unmap_ops.host_addr = (unsigned long)(v_start->addr);
       unmap_ops.handle = ops.handle;
       //////////////
       /*
       p = (char *)(v_start->addr) + PAGE_SIZE - 1;
       printk(KERN_DEBUG "\nbytes in page");
       for (i = 0;i <= 10; i++, p--) {
       printk(KERN_DEBUG "%c", *p);
       }
      */
       ////////////////
       sring = (as_sring_t *)v_start->addr;
       BACK_RING_INIT(&info.ring, sring, PAGE_SIZE);
       err = bind_interdomain_evtchn_to_irqhandler(info.remoteDomain, info.evtchn, as_int, 0, "dom0", &info);
       if (err < 0) {
          printk(KERN_DEBUG "\nxen:dom0: gnt_init failed binding to evtchn");
          err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unmap_ops, 1);
          return -EFAULT;
       }
       info.irq = err;
       printk(KERN_DEBUG "\nxen:dom0:end gnt_int: int = %d", info.irq);
       return 0;
}
Ejemplo n.º 26
0
/* Map a series of grants into a contiguous virtual area */
static void *net_accel_map_grants_valloc(struct xenbus_device *dev,
        unsigned *grants, int npages,
        unsigned flags, void **priv, int *errno)
{
    struct net_accel_valloc_grant_mapping *map;
    struct vm_struct *vm;
    void *addr;
    int i, j, rc;

    vm  = alloc_vm_area(PAGE_SIZE * npages);
    if (vm == NULL) {
        EPRINTK("No memory from alloc_vm_area.\n");
        return NULL;
    }
    /*
     * Get a structure in which we will record all the info needed
     * to undo the mapping.
     */
    map = kzalloc(sizeof(struct net_accel_valloc_grant_mapping)  +
                  npages * sizeof(grant_handle_t), GFP_KERNEL);
    if (map == NULL) {
        EPRINTK("No memory for net_accel_valloc_grant_mapping\n");
        free_vm_area(vm);
        return NULL;
    }
    map->vm = vm;
    map->pages = npages;

    /* Do the actual mapping */
    addr = vm->addr;
    if(errno != NULL) *errno = 0;
    for (i = 0; i < npages; i++) {
        rc = net_accel_map_grant(dev, grants[i], map->grant_handles + i,
                                 addr, NULL, flags);
        if (rc != 0)
        {
            if(errno != NULL)
                *errno = (rc == GNTST_eagain ? -EAGAIN : -EINVAL);
            goto undo;
        }
        addr = (void*)((unsigned long)addr + PAGE_SIZE);
    }

    if (priv)
        *priv = (void *)map;
    else
        kfree(map);

    return vm->addr;

undo:
    EPRINTK("Aborting contig map due to single map failure %d (%d of %d)\n",
            rc, i+1, npages);
    for (j = 0; j < i; j++) {
        addr = (void*)((unsigned long)vm->addr + (j * PAGE_SIZE));
        net_accel_unmap_grant(dev, map->grant_handles[j], addr, 0,
                              flags);
    }
    free_vm_area(vm);
    kfree(map);
    return NULL;
}
Ejemplo n.º 27
0
int init_module(void)
{
	struct vm_struct * v_start;
	int ret, err;

	struct xenbus_transaction trans;
        int rc;
	
	log_info("init_module");

	rc = xenbus_transaction_start(&trans);
        check(rc==0, "transaction start failed");

        rc = xenbus_scanf(trans, "yinwuzhe", "gref", "%d",&gref);
	check(rc!=-ERANGE, "xenbus_scanf failed");
	rc = xenbus_scanf(trans, "yinwuzhe", "port", "%d",&port);
	check(rc!=-ERANGE, "xenbus_scanf failed");
        xenbus_transaction_end(trans, 0);

        log_info("read from the xenstore is gref=%d,port=%d",gref, port);
	
	//this func reserves a range of kernel address space
	//and allocates pagetables to map that area, but no actual mapping here.
	v_start = alloc_vm_area(PAGE_SIZE , NULL);
	check(v_start, "xen:could not allocate page\n");
	
	/*
	//gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
	//                  uint32_t flags, grant_ref_t ref, domid_t domid)
	gnttab_set_map_op(&ops, (unsigned long)v_start->addr, GNTMAP_host_map,
				 gref , remoteDomain);
	
	//do the map hypercall
	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &ops, 1);
	check(ret==0, "xen: HYPERSIOR map grant ref failed.\n");
	
	//check the staus from the out param of the ops
	check(ops.status==0, "xen: HYPERSIOR map grant ref failed with status %d.\n",ops.status);
	
	//printk handle 
	log_info("xen:map grant success:\n\tshared_page=%x, handle=%d\n",
			(unsigned long)v_start->addr,ops.handle);
	
	//if mapped,init the unmap ops with the handle and host_addr
	unmap_ops.host_addr=(unsigned long)v_start->addr;
	unmap_ops.handle=ops.handle;
	*/

	info.remoteDomain=0;
	info.evtchn =port;
	err = bind_interdomain_evtchn_to_irqhandler(info.remoteDomain, info.evtchn, 
			handle_evt, 0,"get-grant",&info);
	check(err>=0,"bind_interdomain_evtchn_to_irqhandler failed.");
	info.irq=err;
	log_info("evtchn port = %d, the handle irq = %d", info.evtchn, info.irq);
	return 0;	
	
error:
	free_vm_area(v_start);
	return -1;
}
Ejemplo n.º 28
0
static void arch_gnttab_vfree(struct gnttab_vm_area *area)
{
	free_vm_area(area->area);
	kfree(area->ptes);
}
Ejemplo n.º 29
0
int omx_xen_deregister_user_segment(omx_xenif_t * omx_xenif, uint32_t id,
				    uint32_t sid, uint8_t eid)
{
	struct gnttab_unmap_grant_ref ops;
	struct backend_info *be = omx_xenif->be;
	struct omxback_dev *dev = be->omxdev;
	struct omx_endpoint *endpoint = dev->endpoints[eid];
	struct omx_xen_user_region *region;
	struct omx_xen_user_region_segment *seg;
	int i, k, ret = 0;
	unsigned int level;

	dprintk_in();

	TIMER_START(&t_dereg_seg);
	if (eid < 0 && eid >= 255) {
		printk_err
		    ("Wrong endpoint number (%u) check your frontend/backend communication!\n",
		     eid);
		ret = -EINVAL;
		goto out;
	}

	region = rcu_dereference_protected(endpoint->xen_regions[id], 1);
	if (unlikely(!region)) {
		printk_err(
		       "%s: Cannot access non-existing region %d\n", __func__, id);
		//ret = -EINVAL;
		goto out;
	}
	seg = &region->segments[sid];


	TIMER_START(&t_release_grants);
	if (!seg->unmap) {
		printk_err("seg->unmap is NULL\n");
		ret = -EINVAL;
		goto out;
	}
	gnttab_unmap_refs(seg->unmap, NULL, seg->pages, seg->nr_pages);
	TIMER_STOP(&t_release_grants);

	TIMER_START(&t_release_gref_list);
	for (k = 0; k < seg->nr_parts; k++) {
#ifdef EXTRA_DEBUG_OMX
		if (!seg->vm_gref) {
			printk(KERN_ERR "vm_gref is NULL\n");
			ret = -EFAULT;
			goto out;
		}
		if (!seg->vm_gref[k]) {
			printk(KERN_ERR "vm_gref[%d] is NULL\n", k);
			ret = -EFAULT;
			goto out;
		}
		if (!seg->vm_gref[k]->addr) {
			printk(KERN_ERR "vm_gref[%d]->addr is NULL\n", k);
			ret = -EFAULT;
			goto out;
		}
		if (!seg->all_handle[k]) {
			printk(KERN_ERR "all_handle[%d] is NULL\n", k);
			ret = -EINVAL;
			goto out;
		}
#endif
		gnttab_set_unmap_op(&ops, (unsigned long)seg->vm_gref[k]->addr,
				    GNTMAP_host_map | GNTMAP_contains_pte,
				    seg->all_handle[k]);
		ops.host_addr =
		    arbitrary_virt_to_machine(lookup_address
					      ((unsigned long)(seg->vm_gref[k]->
							       addr),
					       &level)).maddr;

		dprintk_deb("putting vm_area[%d] %#lx, handle = %#x \n", k,
			    (unsigned long)seg->vm_gref[k], seg->all_handle[k]);
		if (HYPERVISOR_grant_table_op
		    (GNTTABOP_unmap_grant_ref, &ops, 1)){
			printk_err
				("HYPERVISOR operation failed\n");
			//BUG();
		}
		if (ops.status) {
			printk_err
				("HYPERVISOR unmap grant ref[%d]=%#lx failed status = %d",
				 k, seg->all_handle[k], ops.status);
			ret = ops.status;
			goto out;
		}
	}
	TIMER_STOP(&t_release_gref_list);

	TIMER_START(&t_free_pages);
	for (k=0;k<seg->nr_parts;k++)
		if (ops.status == GNTST_okay)
			free_vm_area(seg->vm_gref[k]);

	kfree(seg->map);
	kfree(seg->unmap);
	kfree(seg->gref_list);
#ifdef OMX_XEN_COOKIES
	omx_xen_page_put_cookie(omx_xenif, seg->cookie);
#else
	free_xenballooned_pages(seg->nr_pages, seg->pages);
	kfree(seg->pages);
#endif
	TIMER_STOP(&t_free_pages);

out:
	TIMER_STOP(&t_dereg_seg);
	dprintk_out();
	return ret;

}