Пример #1
0
static void _stp_handle_start(struct _stp_msg_start *st)
{
	int handle_startup;

	mutex_lock(&_stp_transport_mutex);
	handle_startup = (! _stp_start_called && ! _stp_exit_called);
	_stp_start_called = 1;
	mutex_unlock(&_stp_transport_mutex);
	
	if (handle_startup) {
		dbug_trans(1, "stp_handle_start\n");

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23) // linux commit #5f4352fb
#if LINUX_VERSION_CODE <  KERNEL_VERSION(2,6,29) // linux commit #9be260a6
#ifdef STAPCONF_VM_AREA
		{ /* PR9740: workaround for kernel valloc bug. */
                  /* PR14611: not required except within above kernel range. */
			void *dummy;
#ifdef STAPCONF_VM_AREA_PTE
			dummy = alloc_vm_area (PAGE_SIZE, NULL);
#else
			dummy = alloc_vm_area (PAGE_SIZE);
#endif
			free_vm_area (dummy);
		}
#endif
#endif
#endif

		_stp_target = st->target;
		st->res = systemtap_module_init();
		if (st->res == 0)
			_stp_probes_started = 1;

                /* Register the module notifier. */
                if (!_stp_module_notifier_active) {
                        int rc = register_module_notifier(& _stp_module_notifier_nb);
                        if (rc == 0)
                                _stp_module_notifier_active = 1;
                        else
                                _stp_warn ("Cannot register module notifier (%d)\n", rc);
                }

		/* Called from the user context in response to a proc
		   file write (in _stp_ctl_write_cmd), so may notify
		   the reader directly. */
		_stp_ctl_send_notify(STP_START, st, sizeof(*st));

		/* Register the panic notifier. */
#if STP_TRANSPORT_VERSION == 2
		atomic_notifier_chain_register(&panic_notifier_list, &_stp_module_panic_notifier_nb);
#endif
	}
}
Пример #2
0
void *arch_gnttab_alloc_shared(unsigned long *frames)
{
	struct vm_struct *area;
	area = alloc_vm_area(PAGE_SIZE * max_nr_grant_frames());
	BUG_ON(area == NULL);
	return area->addr;
}
Пример #3
0
/**
* map frontend ring page
* bind event channel
* init 
**/
static int chrif_map(struct xen_chrif *chrif, unsigned long ring_ref, unsigned int evtchn)
{
	int err;
    chrif_sring_t *sring;

	chrif->comms_area = alloc_vm_area(PAGE_SIZE, NULL);
	if(chrif->comms_area == NULL){
		free_vm_area(chrif->comms_area);
		printk("\nxen: dom0: could not allocate shared_page");
		return -ENOMEM;
	}
	
	err = map_frontend_pages(chrif, ring_ref);
	if(err){
		free_vm_area(chrif->comms_area);
        printk("\nxen: dom0: map frontend page fail");
		return err;
	}

    sring = (chrif_sring_t *)chrif->comms_area->addr;
    BACK_RING_INIT(&chrif->chr_ring, sring, PAGE_SIZE);
	
    err = bind_interdomain_evtchn_to_irqhandler(chrif->domid, evtchn, chrif_int, 0, "domtest2", chrif);
    if (err < 0) {
        printk(KERN_DEBUG "\nxen: dom0: chrif_int failed binding to evtchn");
		unmap_frontend_pages(chrif); 
		return -EFAULT;
    }

	chrif->irq = err;
    printk(KERN_DEBUG "\nxen: dom0:bind event channel fineshed: irq = %d\n", chrif->irq);
	
    printk("\nxen: dom0: chrif map finished, otherend_id");
    return 0;
}
Пример #4
0
static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page,
			 unsigned int evtchn)
{
	int err;

	/* Already connected through? */
	if (blkif->irq)
		return 0;

	blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE);
	if (!blkif->blk_ring_area)
		return -ENOMEM;

	err = map_frontend_page(blkif, shared_page);
	if (err) {
		free_vm_area(blkif->blk_ring_area);
		return err;
	}

	switch (blkif->blk_protocol) {
	case BLKIF_PROTOCOL_NATIVE:
	{
		struct blkif_sring *sring;
		sring = (struct blkif_sring *)blkif->blk_ring_area->addr;
		BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
		break;
	}
	case BLKIF_PROTOCOL_X86_32:
	{
		struct blkif_x86_32_sring *sring_x86_32;
		sring_x86_32 = (struct blkif_x86_32_sring *)blkif->blk_ring_area->addr;
		BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
		break;
	}
	case BLKIF_PROTOCOL_X86_64:
	{
		struct blkif_x86_64_sring *sring_x86_64;
		sring_x86_64 = (struct blkif_x86_64_sring *)blkif->blk_ring_area->addr;
		BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
		break;
	}
	default:
		BUG();
	}

	err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn,
						    xen_blkif_be_int, 0,
						    "blkif-backend", blkif);
	if (err < 0) {
		unmap_frontend_page(blkif);
		free_vm_area(blkif->blk_ring_area);
		blkif->blk_rings.common.sring = NULL;
		return err;
	}
	blkif->irq = err;

	return 0;
}
Пример #5
0
static int arch_gnttab_valloc(struct gnttab_vm_area *area, unsigned nr_frames)
{
	area->ptes = kmalloc(sizeof(pte_t *) * nr_frames, GFP_KERNEL);
	if (area->ptes == NULL)
		return -ENOMEM;

	area->area = alloc_vm_area(PAGE_SIZE * nr_frames, area->ptes);
	if (area->area == NULL) {
		kfree(area->ptes);
		return -ENOMEM;
	}

	return 0;
}
Пример #6
0
/* Based on Rusty Russell's skeleton driver's map_page */
struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev,
					 const grant_ref_t refs[],
					 unsigned int nr)
{
	grant_handle_t *handles = kmalloc(nr * sizeof(*handles), GFP_KERNEL);
	struct vm_struct *area;
	unsigned int i;

	if (!handles)
		return ERR_PTR(-ENOMEM);

	area = alloc_vm_area(nr * PAGE_SIZE);
	if (!area) {
		kfree(handles);
		return ERR_PTR(-ENOMEM);
	}

	for (i = 0; i < nr; ++i) {
		struct gnttab_map_grant_ref op;

		gnttab_set_map_op(&op,
				  (unsigned long)area->addr + i * PAGE_SIZE,
				  GNTMAP_host_map, refs[i],
				  dev->otherend_id);
	
		gnttab_check_GNTST_eagain_do_while(GNTTABOP_map_grant_ref,
						   &op);

		if (op.status == GNTST_okay) {
			handles[i] = op.handle;
			continue;
		}

		unmap_ring_vfree(dev, area, i, handles);
		xenbus_dev_fatal(dev, op.status,
				 "mapping page %u (ref %#x, dom%d)",
				 i, refs[i], dev->otherend_id);
		BUG_ON(!IS_ERR(ERR_PTR(op.status)));
		return ERR_PTR(-EINVAL);
	}

	/* Stuff the handle array in an unused field. */
	area->phys_addr = (unsigned long)handles;

	return area;
}
Пример #7
0
static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
{
	struct gnttab_setup_table setup;
	unsigned long *frames;
	unsigned int nr_gframes = end_idx + 1;
	int rc;

	frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC);
	if (!frames)
		return -ENOMEM;

	setup.dom        = DOMID_SELF;
	setup.nr_frames  = nr_gframes;
	set_xen_guest_handle(setup.frame_list, frames);

	rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
	if (rc == -ENOSYS) {
		kfree(frames);
		return -ENOSYS;
	}

	BUG_ON(rc || setup.status);

#ifndef __ia64__
	if (shared == NULL) {
		struct vm_struct *area;
		area = alloc_vm_area(PAGE_SIZE * max_nr_grant_frames());
		BUG_ON(area == NULL);
		shared = area->addr;
	}
	rc = apply_to_page_range(&init_mm, (unsigned long)shared,
				 PAGE_SIZE * nr_gframes,
				 map_pte_fn, &frames);
	BUG_ON(rc);
        frames -= nr_gframes; /* adjust after map_pte_fn() */
#else
	shared = __va(frames[0] << PAGE_SHIFT);
#endif

	kfree(frames);

	return 0;
}
Пример #8
0
int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
			   unsigned long max_nr_gframes,
			   struct grant_entry **__shared)
{
	int rc;
	struct grant_entry *shared = *__shared;

	if (shared == NULL) {
		struct vm_struct *area =
			alloc_vm_area(PAGE_SIZE * max_nr_gframes);
		BUG_ON(area == NULL);
		shared = area->addr;
		*__shared = shared;
	}

	rc = apply_to_page_range(&init_mm, (unsigned long)shared,
				 PAGE_SIZE * nr_gframes,
				 map_pte_fn, &frames);
	return rc;
}
Пример #9
0
//map the shared page
static struct vm_struct*  map_sharedpage(grant_ref_t gref)
{
    struct vm_struct *vm_point;
    vm_point = alloc_vm_area(PAGE_SIZE, NULL);
    if(vm_point == 0) {
        free_vm_area(vm_point);
        printk("\nxen: dom0: could not allocate shared_page");
        return -EFAULT;
    }
    gnttab_set_map_op(&ops, (unsigned long)vm_point->addr, GNTMAP_host_map, gref, info.remoteDomain);
    if(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &ops, 1)) {
        printk(KERN_DEBUG "\nxen: dom0: HYPERVISOR map grant ref failed");
        return -EFAULT;
    }
    if (ops.status) {
        printk(KERN_DEBUG "\nxen: dom0: HYPERVISOR map grant ref failed status = %d", ops.status);
        return -EFAULT;
    }
    printk(KERN_DEBUG "\nxen: dom0: map shared page success, shared_page=%x, handle = %x, status = %x", (unsigned int)vm_point->addr, ops.handle, ops.status);
    return vm_point;
}
Пример #10
0
int init_module(void)
{
	//a pointer to a vm_struct<>
	//include .addr,.size .page .nr_pages .phys_addr
	struct vm_struct * v_start;
	int ret;

	printk("xen:init_module with gref = %d\n",gref);
	//this func reserves a range of kernel address space
	//and allocates pagetables to map that area, but no actual mapping here.
	v_start = alloc_vm_area(PAGE_SIZE , NULL);
	if(v_start==0){
		free_vm_area(v_start);
		printk("xen:could not allocate page\n");
		return -1;
	}
	//static inline void
	//gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
	//                  uint32_t flags, grant_ref_t ref, domid_t domid)
	gnttab_set_map_op(&ops, (unsigned long)v_start->addr, GNTMAP_host_map,
				 gref , remoteDomain);
	//return 0,if success the hypercall
	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &ops, 1);
	if(ret){ 
		printk("xen: HYPERSIOR map grant ref failed.\n");
		return -1;
	}
	//check the staus from the out param of the ops
	if(ops.status != 0){
		printk("xen: HYPERSIOR map grant ref failed with status %d.\n",ops.status);
		return -1;
	}
	//printk handle n the dmesg
	printk("xen:map grant success:\n\tshared_page=%x, handle=%d\n",(unsigned long)v_start->addr,ops.handle);
	//if mapped,init the unmap ops with the handle and host_addr
	unmap_ops.host_addr=(unsigned long)v_start->addr;
	unmap_ops.handle=ops.handle;
	
	return 0;	
}
Пример #11
0
int scsiback_init_sring(struct vscsibk_info *info,
		unsigned long ring_ref, unsigned int evtchn)
{
	struct vscsiif_sring *sring;
	int err;

	if (info->irq) {
		printk(KERN_ERR "scsiback: Already connected through?\n");
		return -1;
	}

	info->ring_area = alloc_vm_area(PAGE_SIZE);
	if (!info)
		return -ENOMEM;

	err = map_frontend_page(info, ring_ref);
	if (err)
		goto free_vm;

	sring = (struct vscsiif_sring *) info->ring_area->addr;
	BACK_RING_INIT(&info->ring, sring, PAGE_SIZE);

	err = bind_interdomain_evtchn_to_irqhandler(
			info->domid, evtchn,
			scsiback_intr, 0, "vscsiif-backend", info);

	if (err < 0)
		goto unmap_page;
		
	info->irq = err;

	return 0;

unmap_page:
	unmap_frontend_page(info);
free_vm:
	free_vm_area(info->ring_area);

	return err;
}
Пример #12
0
int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn)
{
	blkif_sring_t *sring;
	int err;
	struct evtchn_bind_interdomain bind_interdomain;

	if ((blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL)
		return -ENOMEM;

	err = map_frontend_page(blkif, shared_page);
	if (err) {
		free_vm_area(blkif->blk_ring_area);
		return err;
	}

	bind_interdomain.remote_dom  = blkif->domid;
	bind_interdomain.remote_port = evtchn;

	err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
					  &bind_interdomain);
	if (err) {
		unmap_frontend_page(blkif);
		free_vm_area(blkif->blk_ring_area);
		return err;
	}

	blkif->evtchn = bind_interdomain.local_port;

	sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
	BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);

	blkif->irq = bind_evtchn_to_irqhandler(
		blkif->evtchn, blkif_be_int, 0, "blkif-backend", blkif);

	blkif->status = CONNECTED;

	return 0;
}
Пример #13
0
static int device_probe(struct xenbus_device* dev, const struct xenbus_device_id* id)
{
        struct backendinfo* binfo;
//      int i;
//      char *p;
        struct vm_struct *v_start;
        int err;
        as_sring_t *sring;
	char *gref,*port;
        binfo = kmalloc(sizeof(*binfo),GFP_KERNEL);
        if (!binfo) {
               xenbus_dev_error(dev, -ENOMEM, "allocating info structure");
               return -ENOMEM;
         }
        memset(binfo,0,sizeof(*binfo));
        binfo->dev = dev;
        printk(KERN_ALERT"\nProbe fired!\n");
        
	gref = xenbus_read(XBT_NIL, binfo->dev->otherend, "gref", NULL);
        port = xenbus_read(XBT_NIL, binfo->dev->otherend, "port", NULL);
	info.gref=mycti(gref);
	info.evtchn=mycti(port);
        printk("Xenstore read port and gref success: %d, %d \n", info.evtchn, info.gref);
        info.remoteDomain = binfo->dev->otherend_id;
        printk(KERN_DEBUG "\nxen: dom0: gnt_init with gref = %d\n", info.gref);
        v_start = alloc_vm_area(PAGE_SIZE,NULL);
        if(v_start == 0){
           free_vm_area(v_start);
           printk("\nxen: dom0:could not allocate page");
           return -EFAULT;
        }
        gnttab_set_map_op(&ops,(unsigned long)v_start->addr,GNTMAP_host_map,info.gref,info.remoteDomain);
       if(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,&ops,1)){
       	   printk(KERN_DEBUG"\nxen:dom0:HYPERVISOR map grant ref failed");
           return -EFAULT;
       }
       if (ops.status) {
           printk(KERN_DEBUG "\nxen: dom0: HYPERVISOR map grant ref failed status = %d", ops.status);
           return -EFAULT;
       }
       printk(KERN_DEBUG "\nxen:dom0:shared_page=%x, handle = %x, status = %x", (unsigned int)v_start->addr, ops.handle, ops.status);

       unmap_ops.host_addr = (unsigned long)(v_start->addr);
       unmap_ops.handle = ops.handle;
       //////////////
       /*
       p = (char *)(v_start->addr) + PAGE_SIZE - 1;
       printk(KERN_DEBUG "\nbytes in page");
       for (i = 0;i <= 10; i++, p--) {
       printk(KERN_DEBUG "%c", *p);
       }
      */
       ////////////////
       sring = (as_sring_t *)v_start->addr;
       BACK_RING_INIT(&info.ring, sring, PAGE_SIZE);
       err = bind_interdomain_evtchn_to_irqhandler(info.remoteDomain, info.evtchn, as_int, 0, "dom0", &info);
       if (err < 0) {
          printk(KERN_DEBUG "\nxen:dom0: gnt_init failed binding to evtchn");
          err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unmap_ops, 1);
          return -EFAULT;
       }
       info.irq = err;
       printk(KERN_DEBUG "\nxen:dom0:end gnt_int: int = %d", info.irq);
       return 0;
}
Пример #14
0
void *nvmap_mmap(struct nvmap_handle_ref *ref)
{
	struct nvmap_handle *h;
	pgprot_t prot;
	unsigned long adj_size;
	unsigned long offs;
	struct vm_struct *v;
	void *p;

	h = nvmap_handle_get(ref->handle);
	if (!h)
		return NULL;

	prot = nvmap_pgprot(h, pgprot_kernel);

	if (h->heap_pgalloc)
		return vm_map_ram(h->pgalloc.pages, h->size >> PAGE_SHIFT,
				  -1, prot);

	/* carveout - explicitly map the pfns into a vmalloc area */

	nvmap_usecount_inc(h);

	adj_size = h->carveout->base & ~PAGE_MASK;
	adj_size += h->size;
	adj_size = PAGE_ALIGN(adj_size);

	v = alloc_vm_area(adj_size);
	if (!v) {
		nvmap_usecount_dec(h);
		nvmap_handle_put(h);
		return NULL;
	}

	p = v->addr + (h->carveout->base & ~PAGE_MASK);

	for (offs = 0; offs < adj_size; offs += PAGE_SIZE) {
		unsigned long addr = (unsigned long) v->addr + offs;
		unsigned int pfn;
		pgd_t *pgd;
		pud_t *pud;
		pmd_t *pmd;
		pte_t *pte;

		pfn = __phys_to_pfn(h->carveout->base + offs);
		pgd = pgd_offset_k(addr);
		pud = pud_alloc(&init_mm, pgd, addr);
		if (!pud)
			break;
		pmd = pmd_alloc(&init_mm, pud, addr);
		if (!pmd)
			break;
		pte = pte_alloc_kernel(pmd, addr);
		if (!pte)
			break;
		set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
		flush_tlb_kernel_page(addr);
	}

	if (offs != adj_size) {
		free_vm_area(v);
		nvmap_usecount_dec(h);
		nvmap_handle_put(h);
		return NULL;
	}

	/* leave the handle ref count incremented by 1, so that
	 * the handle will not be freed while the kernel mapping exists.
	 * nvmap_handle_put will be called by unmapping this address */
	return p;
}
Пример #15
0
static irqreturn_t chrif_int(int irq, void *dev_id)
{
    int err;
    RING_IDX rc, rp;
    int more_to_do, notify;
    chrif_request_t req;
    chrif_response_t resp;
    printk(KERN_INFO "\n------------------------------start response-------------------------------------");
    printk(KERN_DEBUG "\nxen: Dom0: chrif_int called with dev_id=%x info=%x", (unsigned int)dev_id, (unsigned int) &info);
    rc = info.ring.req_cons;
    rp = info.ring.sring->req_prod;
    printk(KERN_DEBUG "\nxen: Dom0: rc = %d rp = %d", rc, rp);

    while (rc != rp) {
        if (RING_REQUEST_CONS_OVERFLOW(&info.ring, rc))
            break;
        memcpy(&req, RING_GET_REQUEST(&info.ring, rc), sizeof(req));
        resp.id = req.id;
        resp.operation = req.operation;
        resp.status = req.status + 1;
        printk(KERN_DEBUG "\nxen: Dom0: Recvd at IDX-%d: id = %d, op=%d, status=%d", rc, req.id, req.operation, req.status);
        info.ring.req_cons = ++rc;
        barrier();

        printk(KERN_DEBUG "\nxen: Dom0: operation:  %s", op_name(resp.operation));
        switch(resp.operation) {
        case CHRIF_OP_OPEN:
            info.chrif_filp = filp_open(DEVICE_PATH, O_RDWR, 0);
            printk(KERN_DEBUG "\nxen: dom0: response open");
            break;
        case CHRIF_OP_READ: {
            resp.rdwr.len = req.rdwr.len;
            //struct pdma_info pdma_info;
            //memset(op_page->addr, 0, resp.rdwr.len);
            old_fs = get_fs();
            set_fs(get_ds());
            //get read size of block
            //err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_INFO, (unsigned long)&pdma_info);
            //read data from device to page
            //err =info.chrif_filp->f_op->read(info.chrif_filp, op_page->addr, resp.rdwr.len, &info.chrif_filp->f_pos);
            set_fs(old_fs);
            if(err < 0)
                printk(KERN_DEBUG "\nxen: Dom0: read %u bytes error", resp.rdwr.len);
            printk(KERN_DEBUG "\nxen: dom0: response read");
            break;
        }
        case CHRIF_OP_WRITE: {
            int i = 0, count, ret;
            struct vm_struct *op_page;
            struct gnttab_map_grant_ref op_page_ops;
            struct gnttab_unmap_grant_ref op_page_unmap_ops;
            resp.rdwr.len = req.rdwr.len;

            count = resp.rdwr.len/4096;
            printk(KERN_DEBUG "\nxen: Dom0: write %u bytes %d times", resp.rdwr.len, count);

            block_buf = (char *)kmalloc(resp.rdwr.len, GFP_KERNEL);
            memset(block_buf, 0, resp.rdwr.len);

            while(i < count) {
                resp.op_gref[i] = req.op_gref[i];
                printk(KERN_DEBUG "\nxen: dom0: req.op_gref[0]: %d", resp.op_gref[i]);

                op_page = alloc_vm_area(PAGE_SIZE, NULL);
                if(op_page == 0) {
                    free_vm_area(op_page);
                    printk("\nxen: dom0: could not allocate shared_page");
                    return -EFAULT;
                }
                /*gnttab_set_map_op(&op_page_ops, (unsigned long)op_page->addr, GNTMAP_host_map, resp.op_gref[i], info.remoteDomain);

                 op_page_unmap_ops.host_addr = (unsigned long)(op_page->addr);
                 unmap_ops.handle = op_page_ops.handle;
                 if(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op_page_ops, 1)){
                     printk(KERN_DEBUG "\nxen: dom0: HYPERVISOR map grant ref failed");
                     return -EFAULT;
                 }
                 if (op_page_ops.status) {
                     printk(KERN_DEBUG "\nxen: dom0: HYPERVISOR map grant ref failed status = %d", op_page_ops.status);
                     return -EFAULT;
                 }
                 printk(KERN_DEBUG "\nxen: dom0: map shared page success, shared_page=%x, handle = %x, status = %x", (unsigned int)op_page->addr, op_page_ops.handle, op_page_ops.status);

                 memcpy(block_buf+i*4096, op_page->addr, 4096);
                 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op_page_unmap_ops, 1);
                 if (ret == 0) {
                     printk(KERN_DEBUG "\nxen: dom0: dom0_exit: unmapped shared frame");
                 } else {
                     printk(KERN_DEBUG "\nxen: dom0: dom0_exit: unmapped shared frame failed");
                 }
                 free_vm_area(op_page);*/
                i++;
            }

            /*  old_fs = get_fs();
            set_fs(get_ds());
            //write data from page to device
            //err = info.chrif_filp->f_op->write(info.chrif_filp, block_buf, resp.rdwr.len, &info.chrif_filp->f_pos);
            set_fs(old_fs);
              if(err < 0)
            	printk(KERN_DEBUG "\nxen: Dom0: write %u bytes error", resp.rdwr.len);

              */ //kfree(block_buf);
            printk(KERN_DEBUG "\nxen: dom0: response write");
            break;
        }
        case CHRIF_OP_IOCTL: {
            resp.ioc_parm.cmd = req.ioc_parm.cmd;
            switch(resp.ioc_parm.cmd) {
            case PDMA_IOC_START_DMA: {
                old_fs = get_fs();
                set_fs(get_ds());
                err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_START_DMA, NULL);
                set_fs(old_fs);
                if(err) {
                    printk(KERN_DEBUG "\nxen: Dom0: start-dma ioctl failed");
                    resp.status = 0;
                } else  printk(KERN_DEBUG "\nxen: Dom0: start-dma ioctl success");
                //err = call_ioctl(info.chrif_filp, PDMA_IOC_START_DMA, NULL);
                break;
            }
            case PDMA_IOC_STOP_DMA: {
                //err = call_ioctl(info.chrif_filp, PDMA_IOC_STOP_DMA, NULL);
                old_fs = get_fs();
                set_fs(get_ds());
                err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_STOP_DMA, NULL);
                set_fs(old_fs);
                if(err) {
                    printk(KERN_DEBUG "\nxen: Dom0: stop-dma ioctl failed");
                    resp.status = 0;
                } else  printk(KERN_DEBUG "\nxen: Dom0: stop-dma ioctl success");
                break;
            }
            case PDMA_IOC_INFO: {
                struct pdma_info pdma_info;
                //err = call_ioctl(info.chrif_filp, PDMA_IOC_INFO, (unsigned long)&pdma_info);
                old_fs = get_fs();
                set_fs(get_ds());
                err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_INFO, (unsigned long)&pdma_info);
                set_fs(old_fs);
                if(err) {
                    printk(KERN_DEBUG "\nxen: Dom0: info ioctl failed");
                    resp.status = 0;
                } else  printk(KERN_DEBUG "\nxen: Dom0: info ioctl success");
                resp.ioc_parm.info = pdma_info;
                break;
            }
            case PDMA_IOC_STAT: {
                struct pdma_stat pdma_stat;
                //err = call_ioctl(info.chrif_filp, PDMA_IOC_STAT, (unsigned long)&pdma_stat);
                old_fs = get_fs();
                set_fs(get_ds());
                err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_STAT, (unsigned long)&pdma_stat);
                set_fs(old_fs);
                if(err) {
                    printk(KERN_DEBUG "\nxen: Dom0: stat ioctl failed");
                    resp.status = 0;
                } else  printk(KERN_DEBUG "\nxen: Dom0: stat ioctl success");
                resp.ioc_parm.stat = pdma_stat;
                break;
            }
            case PDMA_IOC_RW_REG: {
                struct pdma_rw_reg ctrl = req.ioc_parm.ctrl;
                //err = call_ioctl(info.chrif_filp, PDMA_IOC_RW_REG, (unsigned long)&ctrl);
                old_fs = get_fs();
                set_fs(get_ds());
                err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_RW_REG, (unsigned long)&ctrl);
                set_fs(old_fs);
                if(err) {
                    printk(KERN_DEBUG "\nxen: Dom0: rw-reg ioctl failed");
                    resp.status = 0;
                } else  printk(KERN_DEBUG "\nxen: Dom0: rw-reg ioctl success");
                resp.ioc_parm.ctrl = ctrl;
                break;
            }
            default:
                printk(KERN_DEBUG "\nxen: Dom0: unknow the operation");
                break;
            }
            printk(KERN_INFO "\nxen: Dom0: response ioctl");
            break;
        }
        case CHRIF_OP_CLOSE:
            filp_close(info.chrif_filp, NULL);
            printk(KERN_INFO "\nxen: Dom0: response close");
            break;
        default:
            printk(KERN_DEBUG "\nxen: Dom0: unknow the operation");
            break;
        }

        memcpy(RING_GET_RESPONSE(&info.ring, info.ring.rsp_prod_pvt), &resp, sizeof(resp));
        info.ring.rsp_prod_pvt++;
        //put response and check whether or not notify domU
        RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info.ring, notify);
        if (info.ring.rsp_prod_pvt == info.ring.req_cons)
        {
            RING_FINAL_CHECK_FOR_REQUESTS(&info.ring, more_to_do);
        }
        else if (RING_HAS_UNCONSUMED_REQUESTS(&info.ring))
        {
            more_to_do = 1;
        }
        if (notify)
        {
            printk(KERN_DEBUG "\nxen:dom0:send notify to domu");
            notify_remote_via_irq(info.irq);
        }
    }
    return IRQ_HANDLED;
}
Пример #16
0
int init_module(void)
{
	int i, ret = -ENOMEM;
	struct vm_struct *vm;
	struct page *pages[2] = { NULL, NULL };
	unsigned char *buf;

	pr_info("MyMapTest Begin\n");

	vm = alloc_vm_area(2 * PAGE_SIZE);
	if (!vm) {
		pr_info("Failed to allocate vm area\n");
		goto out;
	}

	pages[0] = alloc_page(GFP_KERNEL);
	pages[1] = alloc_page(GFP_KERNEL);
	if (!pages[0] || !pages[1]) {
		pr_info("Page allocation failed\n");
		goto out;
	}

	/* Fill pages with test pattern */
	buf = kmap_atomic(pages[0]);
	for (i = 0; i < PAGE_SIZE; i++)
		buf[i] = 'a';
	kunmap_atomic(buf);

	buf = kmap_atomic(pages[1]);
	for (i = 0; i < PAGE_SIZE; i++)
		buf[i] = 'z';
	kunmap_atomic(buf);

	buf = NULL;

	/*
	 * Now, map both pages *contiguously* using a different method
	 * and verify contents of each page.
	 */
	ret = map_kernel_range_noflush((unsigned long)vm->addr, 2 * PAGE_SIZE,
				PAGE_KERNEL, pages);
	pr_info("map_kernel_range_noflush returned: %d\n", ret);

	buf = vm->addr;

	for (i = 0; i < PAGE_SIZE; i++) {
		if (buf[i] != 'a')
			pr_info("mismatch in page-0 at location %d\n", i);
	}

	for (i = PAGE_SIZE; i <= PAGE_SIZE; i++) {
		if (buf[i] != 'z')
			pr_info("mismatch in page-1 at location %d\n", i);
	}

	unmap_kernel_range_noflush((unsigned long)vm->addr, 2 * PAGE_SIZE);

	__flush_tlb_one((unsigned long)buf);
	__flush_tlb_one((unsigned long)buf + PAGE_SIZE);

	ret = 0;	/* Success */
out:
	if (vm)
		free_vm_area(vm);
	if (pages[0])
		__free_page(pages[0]);
	if (pages[1])
		__free_page(pages[1]);

	/*
	 * A non 0 return means init_module failed; module can't be loaded. 
	 */
	return ret;
}
Пример #17
0
static int omx_xen_accept_gref_list(omx_xenif_t * omx_xenif,
				    struct omx_xen_user_region_segment *seg,
				    uint32_t gref, void **vaddr, uint8_t part)
{
	int ret = 0;
	struct backend_info *be = omx_xenif->be;
	struct vm_struct *area;
	pte_t *pte;
	struct gnttab_map_grant_ref ops = {
		.flags = GNTMAP_host_map | GNTMAP_contains_pte,
		//.flags = GNTMAP_host_map,
		.ref = gref,
		.dom = be->remoteDomain,
	};

	dprintk_in();

	area = alloc_vm_area(PAGE_SIZE, &pte);
	if (!area) {
		ret = -ENOMEM;
		goto out;
	}

	seg->vm_gref[part] = area;

	ops.host_addr = arbitrary_virt_to_machine(pte).maddr;

	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &ops, 1)) {
		printk_err("HYPERVISOR map grant ref failed");
		ret = -ENOSYS;
		goto out;
	}
	dprintk_deb("addr=%#lx, mfn=%#lx, kaddr=%#lx\n",
		    (unsigned long)area->addr, ops.dev_bus_addr >> PAGE_SHIFT,
		    ops.host_addr);
	if (ops.status) {
		printk_err("HYPERVISOR map grant ref failed status = %d",
			   ops.status);

		ret = ops.status;
		goto out;
	}

	dprintk_deb("gref_offset = %#x\n", seg->gref_offset);
	*vaddr = (area->addr + seg->gref_offset);

	ret = ops.handle;
#if 0
	for (i = 0; i < (size + 2); i++) {
		dprintk_deb("gref_list[%d] = %u\n", i,
			    *(((uint32_t *) * vaddr) + i));
	}
#endif

	seg->all_handle[part] = ops.handle;
	dprintk_deb("vaddr = %p, area->addr=%p, handle[%d]=%d\n", vaddr,
		    area->addr, part, seg->all_handle[part]);

out:
	dprintk_out();
	return ret;
}

int omx_xen_register_user_segment(omx_xenif_t * omx_xenif,
				  struct omx_ring_msg_register_user_segment *req)
{

	struct backend_info *be = omx_xenif->be;
	void *vaddr = NULL;
	uint32_t **gref_list;
	struct page **page_list;
	struct omxback_dev *omxdev = be->omxdev;
	struct omx_endpoint *endpoint;
	struct omx_xen_user_region *region;
	struct omx_xen_user_region_segment *seg;
	int ret = 0;
	int i = 0, k = 0;
	uint8_t eid, nr_parts;
	uint16_t first_page_offset, gref_offset;
	uint32_t sid, id, nr_grefs, nr_pages, length,
	    gref[OMX_XEN_GRANT_PAGES_MAX];
	uint64_t domU_vaddr;
	int idx = 0, sidx = 0;
	struct gnttab_map_grant_ref *map;
	struct gnttab_unmap_grant_ref *unmap;

	dprintk_in();

	TIMER_START(&t_reg_seg);
	sid = req->sid;
	id = req->rid;
	eid = req->eid;
	domU_vaddr = req->aligned_vaddr;
	nr_grefs = req->nr_grefs;
	nr_pages = req->nr_pages;
	nr_parts = req->nr_parts;
	length = req->length;
	dprintk_deb("nr_parts = %#x\n", nr_parts);
	for (k = 0; k < nr_parts; k++) {
		gref[k] = req->gref[k];
		dprintk_deb("printing gref = %lu\n", gref[k]);
	}
	gref_offset = req->gref_offset;
	first_page_offset = req->first_page_offset;
	endpoint = omxdev->endpoints[eid];

	region = rcu_dereference_protected(endpoint->xen_regions[id], 1);
	if (unlikely(!region)) {
		printk_err(KERN_ERR "Cannot access non-existing region %d\n",
			   id);
		ret = -EINVAL;
		goto out;
	}
	dprintk_deb("Got region @%#lx id=%u\n", (unsigned long)region, id);

	seg = &region->segments[sid];
	if (unlikely(!seg)) {
		printk(KERN_ERR "Cannot access non-existing segment %d\n", sid);
		ret = -EINVAL;
		goto out;
	}
	dprintk_deb("Got segment @%#lx id=%u\n", (unsigned long)seg, sid);

	seg->gref_offset = gref_offset;
	dprintk_deb
	    ("Offset of actual list of grant references (in the frontend) = %#x\n",
	     gref_offset);

	for (k = 0; k < nr_parts; k++) {
		seg->all_gref[k] = gref[k];
		dprintk_deb("grant reference for list of grefs = %#x\n",
			    gref[k]);
	}
	seg->nr_parts = nr_parts;
	dprintk_deb("parts of gref list = %#x\n", nr_parts);

	TIMER_START(&t_alloc_pages);
	gref_list = kzalloc(sizeof(uint32_t *) * nr_parts, GFP_ATOMIC);
	if (!gref_list) {
		ret = -ENOMEM;
		printk_err("gref list is NULL, ENOMEM!!!\n");
		goto out;
	}

	map =
	    kzalloc(sizeof(struct gnttab_map_grant_ref) * nr_pages,
		    GFP_ATOMIC);
	if (!map) {
		ret = -ENOMEM;
		printk_err(" map is NULL, ENOMEM!!!\n");
		goto out;
	}
	unmap =
	    kzalloc(sizeof(struct gnttab_unmap_grant_ref) * nr_pages,
		    GFP_ATOMIC);
	if (!unmap) {
		ret = -ENOMEM;
		printk_err(" unmap is NULL, ENOMEM!!!\n");
		goto out;
	}

#ifdef OMX_XEN_COOKIES
	seg->cookie = omx_xen_page_get_cookie(omx_xenif, nr_pages);
	if (!seg->cookie) {
		printk_err("cannot get cookie\n");
		goto out;
	}
	page_list = seg->cookie->pages;
#else
	page_list = kzalloc(sizeof(struct page *) * nr_pages, GFP_ATOMIC);
	if (!page_list) {
		ret = -ENOMEM;
		printk_err(" page list is NULL, ENOMEM!!!\n");
		goto out;
	}

	ret = alloc_xenballooned_pages(nr_pages, page_list, false /* lowmem */);
	if (ret) {
		printk_err("cannot allocate xenballooned_pages\n");
		goto out;
	}
#endif
	TIMER_STOP(&t_alloc_pages);

	TIMER_START(&t_accept_gref_list);
	for (k = 0; k < nr_parts; k++) {
		ret =
		    omx_xen_accept_gref_list(omx_xenif, seg, gref[k], &vaddr,
					     k);
		if (ret < 0) {
			printk_err("Cannot accept gref list, = %d\n", ret);
			goto out;
		}

		gref_list[k] = (uint32_t *) vaddr;
		if (!gref_list) {
			printk_err("gref_list is NULL!!!, = %p\n", gref_list);
			ret = -ENOSYS;
			goto out;
		}
	}
	TIMER_STOP(&t_accept_gref_list);
	seg->gref_list = gref_list;

	seg->nr_pages = nr_pages;
	seg->first_page_offset = first_page_offset;

	i = 0;
	idx = 0;
	sidx = 0;
	seg->map = map;
	seg->unmap = unmap;
	while (i < nr_pages) {
		void *tmp_vaddr;
		unsigned long addr = (unsigned long)pfn_to_kaddr(page_to_pfn(page_list[i]));
		if (sidx % 256 == 0)
			dprintk_deb("gref_list[%d][%d] = %#x\n", idx, sidx,
				    gref_list[idx][sidx]);


		gnttab_set_map_op(&map[i], addr, GNTMAP_host_map,
				  gref_list[idx][sidx], be->remoteDomain);
		gnttab_set_unmap_op(&unmap[i], addr, GNTMAP_host_map, -1 /* handle */ );
		i++;
		if ((unlikely(i % nr_grefs == 0))) {
			idx++;
			sidx = 0;
		} else {
			sidx++;
		}
		//printk(KERN_INFO "idx=%d, i=%d, sidx=%d\n", idx, i, sidx);
	}
	TIMER_START(&t_accept_grants);
        ret = gnttab_map_refs(map, NULL, page_list, nr_pages);
        if (ret) {
		printk_err("Error mapping, ret= %d\n", ret);
                goto out;
	}
	TIMER_STOP(&t_accept_grants);

        for (i = 0; i < nr_pages; i++) {
                if (map[i].status) {
                        ret = -EINVAL;
			printk_err("idx %d, status =%d\n", i, map[i].status);
			goto out;
		}
                else {
                        //BUG_ON(map->map_ops[i].handle == -1);
                        unmap[i].handle = map[i].handle;
                        dprintk_deb("map handle=%d\n", map[i].handle);
                }
        }

	seg->pages = page_list;
	seg->nr_pages = nr_pages;
	seg->length = length;
	region->total_length += length;
	dprintk_deb("total_length = %#lx, nrpages=%lu, pages = %#lx\n",
		    region->total_length, seg->nr_pages,
		    (unsigned long)seg->pages);
	goto all_ok;
out:
	printk_err("error registering, try to debug MORE!!!!\n");

all_ok:
	TIMER_STOP(&t_reg_seg);
	dprintk_out();
	return ret;
}

int omx_xen_create_user_region(omx_xenif_t * omx_xenif, uint32_t id,
			       uint64_t vaddr, uint32_t nr_segments,
			       uint32_t nr_pages, uint32_t nr_grefs,
			       uint8_t eid)
{

	struct backend_info *be = omx_xenif->be;
	struct omxback_dev *omxdev = be->omxdev;
	struct omx_endpoint *endpoint = omxdev->endpoints[eid];
	struct omx_xen_user_region *region;
	int ret = 0;

	dprintk_in();
	TIMER_START(&t_create_reg);
	//udelay(1000);
	/* allocate the relevant region */
	region =
	    kzalloc(sizeof(struct omx_xen_user_region) +
		    nr_segments * sizeof(struct omx_xen_user_region_segment),
		    GFP_KERNEL);
	if (!region) {
		printk_err
		    ("No memory to allocate the region/segment buffers\n");
		ret = -ENOMEM;
		goto out;
	}

	/* init stuff needed :S */
	kref_init(&region->refcount);
	region->total_length = 0;
	region->nr_vmalloc_segments = 0;

	region->total_registered_length = 0;

	region->id = id;
	region->nr_segments = nr_segments;
	region->eid = eid;

	region->endpoint = endpoint;
	region->dirty = 0;

	if (unlikely(rcu_access_pointer(endpoint->xen_regions[id]) != NULL)) {
		printk(KERN_ERR "Cannot create busy region %d\n", id);
		ret = -EBUSY;
		goto out;
	}

	rcu_assign_pointer(endpoint->xen_regions[id], region);

out:
	TIMER_STOP(&t_create_reg);
	dprintk_out();
	return ret;
}

/* Various region/segment handler functions */

void
omx_xen_user_region_destroy_segments(struct omx_xen_user_region *region,
				     struct omx_endpoint *endpoint)
{
	int i;

	dprintk_in();
	if (!endpoint) {
		printk_err("endpoint is null!!\n");
		return;
	}
	for (i = 0; i < region->nr_segments; i++)
		omx_xen_deregister_user_segment(endpoint->be->omx_xenif,
						region->id, i,
						endpoint->endpoint_index);

	dprintk_out();
}
Пример #18
0
/**
 * xenbus_map_ring_valloc
 * @dev: xenbus device
 * @gnt_ref: grant reference
 * @vaddr: pointer to address to be filled out by mapping
 *
 * Based on Rusty Russell's skeleton driver's map_page.
 * Map a page of memory into this domain from another domain's grant table.
 * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
 * page to that address, and sets *vaddr to that address.
 * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
 * or -ENOMEM on error. If an error is returned, device will switch to
 * XenbusStateClosing and the error message will be saved in XenStore.
 */
int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
{
	struct gnttab_map_grant_ref op = {
		.flags = GNTMAP_host_map,
		.ref   = gnt_ref,
		.dom   = dev->otherend_id,
	};
	struct vm_struct *area;

	*vaddr = NULL;

	area = alloc_vm_area(PAGE_SIZE);
	if (!area)
		return -ENOMEM;

	op.host_addr = (unsigned long)area->addr;

	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
		BUG();

	if (op.status != GNTST_okay) {
		free_vm_area(area);
		xenbus_dev_fatal(dev, op.status,
				 "mapping in shared page %d from domain %d",
				 gnt_ref, dev->otherend_id);
		return op.status;
	}

	/* Stuff the handle in an unused field */
	area->phys_addr = (unsigned long)op.handle;

	*vaddr = area->addr;
	return 0;
}
EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);


/**
 * xenbus_map_ring
 * @dev: xenbus device
 * @gnt_ref: grant reference
 * @handle: pointer to grant handle to be filled
 * @vaddr: address to be mapped to
 *
 * Map a page of memory into this domain from another domain's grant table.
 * xenbus_map_ring does not allocate the virtual address space (you must do
 * this yourself!). It only maps in the page to the specified address.
 * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
 * or -ENOMEM on error. If an error is returned, device will switch to
 * XenbusStateClosing and the error message will be saved in XenStore.
 */
int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
		    grant_handle_t *handle, void *vaddr)
{
	struct gnttab_map_grant_ref op = {
		.host_addr = (unsigned long)vaddr,
		.flags     = GNTMAP_host_map,
		.ref       = gnt_ref,
		.dom       = dev->otherend_id,
	};

	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
		BUG();

	if (op.status != GNTST_okay) {
		xenbus_dev_fatal(dev, op.status,
				 "mapping in shared page %d from domain %d",
				 gnt_ref, dev->otherend_id);
	} else
		*handle = op.handle;

	return op.status;
}
EXPORT_SYMBOL_GPL(xenbus_map_ring);


/**
 * xenbus_unmap_ring_vfree
 * @dev: xenbus device
 * @vaddr: addr to unmap
 *
 * Based on Rusty Russell's skeleton driver's unmap_page.
 * Unmap a page of memory in this domain that was imported from another domain.
 * Use xenbus_unmap_ring_vfree if you mapped in your memory with
 * xenbus_map_ring_valloc (it will free the virtual address space).
 * Returns 0 on success and returns GNTST_* on error
 * (see xen/include/interface/grant_table.h).
 */
int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
{
	struct vm_struct *area;
	struct gnttab_unmap_grant_ref op = {
		.host_addr = (unsigned long)vaddr,
	};

	/* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr)
	 * method so that we don't have to muck with vmalloc internals here.
	 * We could force the user to hang on to their struct vm_struct from
	 * xenbus_map_ring_valloc, but these 6 lines considerably simplify
	 * this API.
	 */
	read_lock(&vmlist_lock);
	for (area = vmlist; area != NULL; area = area->next) {
		if (area->addr == vaddr)
			break;
	}
	read_unlock(&vmlist_lock);

	if (!area) {
		xenbus_dev_error(dev, -ENOENT,
				 "can't find mapped virtual address %p", vaddr);
		return GNTST_bad_virt_addr;
	}

	op.handle = (grant_handle_t)area->phys_addr;

	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
		BUG();

	if (op.status == GNTST_okay)
		free_vm_area(area);
	else
		xenbus_dev_error(dev, op.status,
				 "unmapping page at handle %d error %d",
				 (int16_t)area->phys_addr, op.status);

	return op.status;
}
EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);


/**
 * xenbus_unmap_ring
 * @dev: xenbus device
 * @handle: grant handle
 * @vaddr: addr to unmap
 *
 * Unmap a page of memory in this domain that was imported from another domain.
 * Returns 0 on success and returns GNTST_* on error
 * (see xen/include/interface/grant_table.h).
 */
int xenbus_unmap_ring(struct xenbus_device *dev,
		      grant_handle_t handle, void *vaddr)
{
	struct gnttab_unmap_grant_ref op = {
		.host_addr = (unsigned long)vaddr,
		.handle    = handle,
	};

	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
		BUG();

	if (op.status != GNTST_okay)
		xenbus_dev_error(dev, op.status,
				 "unmapping page at handle %d error %d",
				 handle, op.status);

	return op.status;
}
EXPORT_SYMBOL_GPL(xenbus_unmap_ring);


/**
 * xenbus_read_driver_state
 * @path: path for driver
 *
 * Return the state of the driver rooted at the given store path, or
 * XenbusStateUnknown if no state can be read.
 */
enum xenbus_state xenbus_read_driver_state(const char *path)
{
	enum xenbus_state result;
	int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
	if (err)
		result = XenbusStateUnknown;

	return result;
}
EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
Пример #19
0
int netif_map(netif_t *netif, unsigned long tx_ring_ref,
	      unsigned long rx_ring_ref, unsigned int evtchn)
{
	int err = -ENOMEM;
	netif_tx_sring_t *txs;
	netif_rx_sring_t *rxs;
	struct evtchn_bind_interdomain bind_interdomain;

	/* Already connected through? */
	if (netif->irq)
		return 0;

	netif->tx_comms_area = alloc_vm_area(PAGE_SIZE);
	if (netif->tx_comms_area == NULL)
		return -ENOMEM;
	netif->rx_comms_area = alloc_vm_area(PAGE_SIZE);
	if (netif->rx_comms_area == NULL)
		goto err_rx;

	err = map_frontend_pages(netif, tx_ring_ref, rx_ring_ref);
	if (err)
		goto err_map;

	bind_interdomain.remote_dom = netif->domid;
	bind_interdomain.remote_port = evtchn;

	err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
					  &bind_interdomain);
	if (err)
		goto err_hypervisor;

	netif->evtchn = bind_interdomain.local_port;

	netif->irq = bind_evtchn_to_irqhandler(
		netif->evtchn, netif_be_int, 0, netif->dev->name, netif);
	disable_irq(netif->irq);

	txs = (netif_tx_sring_t *)netif->tx_comms_area->addr;
	BACK_RING_INIT(&netif->tx, txs, PAGE_SIZE);

	rxs = (netif_rx_sring_t *)
		((char *)netif->rx_comms_area->addr);
	BACK_RING_INIT(&netif->rx, rxs, PAGE_SIZE);

	netif->rx_req_cons_peek = 0;

	netif_get(netif);
	wmb(); /* Other CPUs see new state before interface is started. */

	rtnl_lock();
	netif->status = CONNECTED;
	wmb();
	if (netif_running(netif->dev))
		__netif_up(netif);
	rtnl_unlock();

	return 0;
err_hypervisor:
	unmap_frontend_pages(netif);
err_map:
	free_vm_area(netif->rx_comms_area);
err_rx:
	free_vm_area(netif->tx_comms_area);
	return err;
}
Пример #20
0
int init_module(void)
{
	struct vm_struct * v_start;
	int ret, err;

	struct xenbus_transaction trans;
        int rc;
	
	log_info("init_module");

	rc = xenbus_transaction_start(&trans);
        check(rc==0, "transaction start failed");

        rc = xenbus_scanf(trans, "yinwuzhe", "gref", "%d",&gref);
	check(rc!=-ERANGE, "xenbus_scanf failed");
	rc = xenbus_scanf(trans, "yinwuzhe", "port", "%d",&port);
	check(rc!=-ERANGE, "xenbus_scanf failed");
        xenbus_transaction_end(trans, 0);

        log_info("read from the xenstore is gref=%d,port=%d",gref, port);
	
	//this func reserves a range of kernel address space
	//and allocates pagetables to map that area, but no actual mapping here.
	v_start = alloc_vm_area(PAGE_SIZE , NULL);
	check(v_start, "xen:could not allocate page\n");
	
	/*
	//gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
	//                  uint32_t flags, grant_ref_t ref, domid_t domid)
	gnttab_set_map_op(&ops, (unsigned long)v_start->addr, GNTMAP_host_map,
				 gref , remoteDomain);
	
	//do the map hypercall
	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &ops, 1);
	check(ret==0, "xen: HYPERSIOR map grant ref failed.\n");
	
	//check the staus from the out param of the ops
	check(ops.status==0, "xen: HYPERSIOR map grant ref failed with status %d.\n",ops.status);
	
	//printk handle 
	log_info("xen:map grant success:\n\tshared_page=%x, handle=%d\n",
			(unsigned long)v_start->addr,ops.handle);
	
	//if mapped,init the unmap ops with the handle and host_addr
	unmap_ops.host_addr=(unsigned long)v_start->addr;
	unmap_ops.handle=ops.handle;
	*/

	info.remoteDomain=0;
	info.evtchn =port;
	err = bind_interdomain_evtchn_to_irqhandler(info.remoteDomain, info.evtchn, 
			handle_evt, 0,"get-grant",&info);
	check(err>=0,"bind_interdomain_evtchn_to_irqhandler failed.");
	info.irq=err;
	log_info("evtchn port = %d, the handle irq = %d", info.evtchn, info.irq);
	return 0;	
	
error:
	free_vm_area(v_start);
	return -1;
}
Пример #21
0
/* Map a series of grants into a contiguous virtual area */
static void *net_accel_map_grants_valloc(struct xenbus_device *dev,
        unsigned *grants, int npages,
        unsigned flags, void **priv, int *errno)
{
    struct net_accel_valloc_grant_mapping *map;
    struct vm_struct *vm;
    void *addr;
    int i, j, rc;

    vm  = alloc_vm_area(PAGE_SIZE * npages);
    if (vm == NULL) {
        EPRINTK("No memory from alloc_vm_area.\n");
        return NULL;
    }
    /*
     * Get a structure in which we will record all the info needed
     * to undo the mapping.
     */
    map = kzalloc(sizeof(struct net_accel_valloc_grant_mapping)  +
                  npages * sizeof(grant_handle_t), GFP_KERNEL);
    if (map == NULL) {
        EPRINTK("No memory for net_accel_valloc_grant_mapping\n");
        free_vm_area(vm);
        return NULL;
    }
    map->vm = vm;
    map->pages = npages;

    /* Do the actual mapping */
    addr = vm->addr;
    if(errno != NULL) *errno = 0;
    for (i = 0; i < npages; i++) {
        rc = net_accel_map_grant(dev, grants[i], map->grant_handles + i,
                                 addr, NULL, flags);
        if (rc != 0)
        {
            if(errno != NULL)
                *errno = (rc == GNTST_eagain ? -EAGAIN : -EINVAL);
            goto undo;
        }
        addr = (void*)((unsigned long)addr + PAGE_SIZE);
    }

    if (priv)
        *priv = (void *)map;
    else
        kfree(map);

    return vm->addr;

undo:
    EPRINTK("Aborting contig map due to single map failure %d (%d of %d)\n",
            rc, i+1, npages);
    for (j = 0; j < i; j++) {
        addr = (void*)((unsigned long)vm->addr + (j * PAGE_SIZE));
        net_accel_unmap_grant(dev, map->grant_handles[j], addr, 0,
                              flags);
    }
    free_vm_area(vm);
    kfree(map);
    return NULL;
}