예제 #1
0
/**
 * allocates "pages" number of pages and returns
 * the start addr of the vm area.
 */
struct vm_struct *plat_get_vm_area(int pages)
{
	struct vm_struct *vmas;

	vmas = __get_vm_area(PAGE_SIZE * pages, VM_IOREMAP,
			VMALLOC_START, VMALLOC_END);

	return vmas;
}
예제 #2
0
void *module_alloc_exec(unsigned long size)
{
	struct vm_struct *area;

	if (size == 0)
		return NULL;

	area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
	return area ? area->addr : NULL;
}
예제 #3
0
static int ghes_ioremap_init(void)
{
	ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES,
		VM_IOREMAP, VMALLOC_START, VMALLOC_END);
	if (!ghes_ioremap_area) {
		pr_err(GHES_PFX "Failed to allocate virtual memory area for atomic ioremap.\n");
		return -ENOMEM;
	}

	return 0;
}
예제 #4
0
void *module_alloc(unsigned long size)
{
	struct vm_struct *area;

	size = PAGE_ALIGN(size);
	if (!size)
		return NULL;

	area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
	if (!area)
		return NULL;

	return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL_EXEC);
}
예제 #5
0
static void *__module_alloc(unsigned long size, pgprot_t prot)
{
	struct vm_struct *area;

	if (!size)
		return NULL;
	size = PAGE_ALIGN(size);
	if (size > MODULES_LEN)
		return NULL;

	area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
	if (!area)
		return NULL;

	return __vmalloc_area(area, GFP_KERNEL | __GFP_ZERO, prot);
}
예제 #6
0
/**
 * Allocate executable kernel memory in the module range.
 *
 * @returns Pointer to a allocation header success.  NULL on failure.
 *
 * @param   cb          The size the user requested.
 */
static PRTMEMHDR rtR0MemAllocExecVmArea(size_t cb)
{
    size_t const        cbAlloc = RT_ALIGN_Z(sizeof(RTMEMLNXHDREX) + cb, PAGE_SIZE);
    size_t const        cPages  = cbAlloc >> PAGE_SHIFT;
    struct page       **papPages;
    struct vm_struct   *pVmArea;
    size_t              iPage;

    pVmArea = __get_vm_area(cbAlloc, VM_ALLOC, MODULES_VADDR, MODULES_END);
    if (!pVmArea)
        return NULL;
    pVmArea->nr_pages = 0;    /* paranoia? */
    pVmArea->pages    = NULL; /* paranoia? */

    papPages = (struct page **)kmalloc(cPages * sizeof(papPages[0]), GFP_KERNEL | __GFP_NOWARN);
    if (!papPages)
    {
        vunmap(pVmArea->addr);
        return NULL;
    }

    for (iPage = 0; iPage < cPages; iPage++)
    {
        papPages[iPage] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN);
        if (!papPages[iPage])
            break;
    }
    if (iPage == cPages)
    {
        /*
         * Map the pages.
         *
         * Not entirely sure we really need to set nr_pages and pages here, but
         * they provide a very convenient place for storing something we need
         * in the free function, if nothing else...
         */
# if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
        struct page **papPagesIterator = papPages;
# endif
        pVmArea->nr_pages = cPages;
        pVmArea->pages    = papPages;
        if (!map_vm_area(pVmArea, PAGE_KERNEL_EXEC,
# if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
                         &papPagesIterator
# else
                         papPages
# endif
                         ))
        {
            PRTMEMLNXHDREX pHdrEx = (PRTMEMLNXHDREX)pVmArea->addr;
            pHdrEx->pVmArea     = pVmArea;
            pHdrEx->pvDummy     = NULL;
            return &pHdrEx->Hdr;
        }
        /* bail out */
# if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
        pVmArea->nr_pages = papPagesIterator - papPages;
# endif
    }

    vunmap(pVmArea->addr);

    while (iPage-- > 0)
        __free_page(papPages[iPage]);
    kfree(papPages);

    return NULL;
}
예제 #7
0
static int __devinit electra_cf_probe(struct of_device *ofdev,
				      const struct of_device_id *match)
{
	struct device *device = &ofdev->dev;
	struct device_node *np = ofdev->node;
	struct electra_cf_socket   *cf;
	struct resource mem, io;
	int status;
	const unsigned int *prop;
	int err;
	struct vm_struct *area;

	err = of_address_to_resource(np, 0, &mem);
	if (err)
		return -EINVAL;

	err = of_address_to_resource(np, 1, &io);
	if (err)
		return -EINVAL;

	cf = kzalloc(sizeof *cf, GFP_KERNEL);
	if (!cf)
		return -ENOMEM;

	setup_timer(&cf->timer, electra_cf_timer, (unsigned long)cf);
	cf->irq = NO_IRQ;

	cf->ofdev = ofdev;
	cf->mem_phys = mem.start;
	cf->mem_size = PAGE_ALIGN(mem.end - mem.start);
	cf->mem_base = ioremap(cf->mem_phys, cf->mem_size);
	cf->io_size = PAGE_ALIGN(io.end - io.start);

	area = __get_vm_area(cf->io_size, 0, PHB_IO_BASE, PHB_IO_END);
	if (area == NULL)
		return -ENOMEM;

	cf->io_virt = (void __iomem *)(area->addr);

	cf->gpio_base = ioremap(0xfc103000, 0x1000);
	dev_set_drvdata(device, cf);

	if (!cf->mem_base || !cf->io_virt || !cf->gpio_base ||
	    (__ioremap_at(io.start, cf->io_virt, cf->io_size,
		_PAGE_NO_CACHE | _PAGE_GUARDED) == NULL)) {
		dev_err(device, "can't ioremap ranges\n");
		status = -ENOMEM;
		goto fail1;
	}


	cf->io_base = (unsigned long)cf->io_virt - VMALLOC_END;

	cf->iomem.start = (unsigned long)cf->mem_base;
	cf->iomem.end = (unsigned long)cf->mem_base + (mem.end - mem.start);
	cf->iomem.flags = IORESOURCE_MEM;

	cf->irq = irq_of_parse_and_map(np, 0);

	status = request_irq(cf->irq, electra_cf_irq, IRQF_SHARED,
			     driver_name, cf);
	if (status < 0) {
		dev_err(device, "request_irq failed\n");
		goto fail1;
	}

	cf->socket.pci_irq = cf->irq;

	prop = of_get_property(np, "card-detect-gpio", NULL);
	if (!prop)
		goto fail1;
	cf->gpio_detect = *prop;

	prop = of_get_property(np, "card-vsense-gpio", NULL);
	if (!prop)
		goto fail1;
	cf->gpio_vsense = *prop;

	prop = of_get_property(np, "card-3v-gpio", NULL);
	if (!prop)
		goto fail1;
	cf->gpio_3v = *prop;

	prop = of_get_property(np, "card-5v-gpio", NULL);
	if (!prop)
		goto fail1;
	cf->gpio_5v = *prop;

	cf->socket.io_offset = cf->io_base;

	/* reserve chip-select regions */
	if (!request_mem_region(cf->mem_phys, cf->mem_size, driver_name)) {
		status = -ENXIO;
		dev_err(device, "Can't claim memory region\n");
		goto fail1;
	}

	if (!request_region(cf->io_base, cf->io_size, driver_name)) {
		status = -ENXIO;
		dev_err(device, "Can't claim I/O region\n");
		goto fail2;
	}

	cf->socket.owner = THIS_MODULE;
	cf->socket.dev.parent = &ofdev->dev;
	cf->socket.ops = &electra_cf_ops;
	cf->socket.resource_ops = &pccard_static_ops;
	cf->socket.features = SS_CAP_PCCARD | SS_CAP_STATIC_MAP |
				SS_CAP_MEM_ALIGN;
	cf->socket.map_size = 0x800;

	status = pcmcia_register_socket(&cf->socket);
	if (status < 0) {
		dev_err(device, "pcmcia_register_socket failed\n");
		goto fail3;
	}

	dev_info(device, "at mem 0x%lx io 0x%llx irq %d\n",
		 cf->mem_phys, io.start, cf->irq);

	cf->active = 1;
	electra_cf_timer((unsigned long)cf);
	return 0;

fail3:
	release_region(cf->io_base, cf->io_size);
fail2:
	release_mem_region(cf->mem_phys, cf->mem_size);
fail1:
	if (cf->irq != NO_IRQ)
		free_irq(cf->irq, cf);

	if (cf->io_virt)
		__iounmap_at(cf->io_virt, cf->io_size);
	if (cf->mem_base)
		iounmap(cf->mem_base);
	if (cf->gpio_base)
		iounmap(cf->gpio_base);
	device_init_wakeup(&ofdev->dev, 0);
	kfree(cf);
	return status;

}
예제 #8
0
/*H:010
 * We need to set up the Switcher at a high virtual address.  Remember the
 * Switcher is a few hundred bytes of assembler code which actually changes the
 * CPU to run the Guest, and then changes back to the Host when a trap or
 * interrupt happens.
 *
 * The Switcher code must be at the same virtual address in the Guest as the
 * Host since it will be running as the switchover occurs.
 *
 * Trying to map memory at a particular address is an unusual thing to do, so
 * it's not a simple one-liner.
 */
static __init int map_switcher(void)
{
	int i, err;
	struct page **pagep;

	/*
	 * Map the Switcher in to high memory.
	 *
	 * It turns out that if we choose the address 0xFFC00000 (4MB under the
	 * top virtual address), it makes setting up the page tables really
	 * easy.
	 */

	/*
	 * We allocate an array of struct page pointers.  map_vm_area() wants
	 * this, rather than just an array of pages.
	 */
	switcher_page = kmalloc(sizeof(switcher_page[0])*TOTAL_SWITCHER_PAGES,
				GFP_KERNEL);
	if (!switcher_page) {
		err = -ENOMEM;
		goto out;
	}

	/*
	 * Now we actually allocate the pages.  The Guest will see these pages,
	 * so we make sure they're zeroed.
	 */
	for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) {
		switcher_page[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
		if (!switcher_page[i]) {
			err = -ENOMEM;
			goto free_some_pages;
		}
	}

	/*
	 * First we check that the Switcher won't overlap the fixmap area at
	 * the top of memory.  It's currently nowhere near, but it could have
	 * very strange effects if it ever happened.
	 */
	if (SWITCHER_ADDR + (TOTAL_SWITCHER_PAGES+1)*PAGE_SIZE > FIXADDR_START){
		err = -ENOMEM;
		printk("lguest: mapping switcher would thwack fixmap\n");
		goto free_pages;
	}

	/*
	 * Now we reserve the "virtual memory area" we want: 0xFFC00000
	 * (SWITCHER_ADDR).  We might not get it in theory, but in practice
	 * it's worked so far.  The end address needs +1 because __get_vm_area
	 * allocates an extra guard page, so we need space for that.
	 */
	switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
				     VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
				     + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
	if (!switcher_vma) {
		err = -ENOMEM;
		printk("lguest: could not map switcher pages high\n");
		goto free_pages;
	}

	/*
	 * This code actually sets up the pages we've allocated to appear at
	 * SWITCHER_ADDR.  map_vm_area() takes the vma we allocated above, the
	 * kind of pages we're mapping (kernel pages), and a pointer to our
	 * array of struct pages.  It increments that pointer, but we don't
	 * care.
	 */
	pagep = switcher_page;
	err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, &pagep);
	if (err) {
		printk("lguest: map_vm_area failed: %i\n", err);
		goto free_vma;
	}

	/*
	 * Now the Switcher is mapped at the right address, we can't fail!
	 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
	 */
	memcpy(switcher_vma->addr, start_switcher_text,
	       end_switcher_text - start_switcher_text);

	printk(KERN_INFO "lguest: mapped switcher at %p\n",
	       switcher_vma->addr);
	/* And we succeeded... */
	return 0;

free_vma:
	vunmap(switcher_vma->addr);
free_pages:
	i = TOTAL_SWITCHER_PAGES;
free_some_pages:
	for (--i; i >= 0; i--)
		__free_pages(switcher_page[i], 0);
	kfree(switcher_page);
out:
	return err;
}
예제 #9
0
static __init int map_switcher(void)
{
	int i, err;
	struct page **pagep;

	

	
	switcher_page = kmalloc(sizeof(switcher_page[0])*TOTAL_SWITCHER_PAGES,
				GFP_KERNEL);
	if (!switcher_page) {
		err = -ENOMEM;
		goto out;
	}

	
	for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) {
		switcher_page[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
		if (!switcher_page[i]) {
			err = -ENOMEM;
			goto free_some_pages;
		}
	}

	
	if (SWITCHER_ADDR + (TOTAL_SWITCHER_PAGES+1)*PAGE_SIZE > FIXADDR_START){
		err = -ENOMEM;
		printk("lguest: mapping switcher would thwack fixmap\n");
		goto free_pages;
	}

	
	switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
				     VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
				     + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
	if (!switcher_vma) {
		err = -ENOMEM;
		printk("lguest: could not map switcher pages high\n");
		goto free_pages;
	}

	
	pagep = switcher_page;
	err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, &pagep);
	if (err) {
		printk("lguest: map_vm_area failed: %i\n", err);
		goto free_vma;
	}

	
	memcpy(switcher_vma->addr, start_switcher_text,
	       end_switcher_text - start_switcher_text);

	printk(KERN_INFO "lguest: mapped switcher at %p\n",
	       switcher_vma->addr);
	
	return 0;

free_vma:
	vunmap(switcher_vma->addr);
free_pages:
	i = TOTAL_SWITCHER_PAGES;
free_some_pages:
	for (--i; i >= 0; i--)
		__free_pages(switcher_page[i], 0);
	kfree(switcher_page);
out:
	return err;
}
예제 #10
0
파일: core.c 프로젝트: AshishNamdev/linux
/*H:010
 * We need to set up the Switcher at a high virtual address.  Remember the
 * Switcher is a few hundred bytes of assembler code which actually changes the
 * CPU to run the Guest, and then changes back to the Host when a trap or
 * interrupt happens.
 *
 * The Switcher code must be at the same virtual address in the Guest as the
 * Host since it will be running as the switchover occurs.
 *
 * Trying to map memory at a particular address is an unusual thing to do, so
 * it's not a simple one-liner.
 */
static __init int map_switcher(void)
{
	int i, err;

	/*
	 * Map the Switcher in to high memory.
	 *
	 * It turns out that if we choose the address 0xFFC00000 (4MB under the
	 * top virtual address), it makes setting up the page tables really
	 * easy.
	 */

	/* We assume Switcher text fits into a single page. */
	if (end_switcher_text - start_switcher_text > PAGE_SIZE) {
		printk(KERN_ERR "lguest: switcher text too large (%zu)\n",
		       end_switcher_text - start_switcher_text);
		return -EINVAL;
	}

	/*
	 * We allocate an array of struct page pointers.  map_vm_area() wants
	 * this, rather than just an array of pages.
	 */
	lg_switcher_pages = kmalloc(sizeof(lg_switcher_pages[0])
				    * TOTAL_SWITCHER_PAGES,
				    GFP_KERNEL);
	if (!lg_switcher_pages) {
		err = -ENOMEM;
		goto out;
	}

	/*
	 * Now we actually allocate the pages.  The Guest will see these pages,
	 * so we make sure they're zeroed.
	 */
	for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) {
		lg_switcher_pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
		if (!lg_switcher_pages[i]) {
			err = -ENOMEM;
			goto free_some_pages;
		}
	}

	/*
	 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
	 * It goes in the first page, which we map in momentarily.
	 */
	memcpy(kmap(lg_switcher_pages[0]), start_switcher_text,
	       end_switcher_text - start_switcher_text);
	kunmap(lg_switcher_pages[0]);

	/*
	 * We place the Switcher underneath the fixmap area, which is the
	 * highest virtual address we can get.  This is important, since we
	 * tell the Guest it can't access this memory, so we want its ceiling
	 * as high as possible.
	 */
	switcher_addr = FIXADDR_START - TOTAL_SWITCHER_PAGES*PAGE_SIZE;

	/*
	 * Now we reserve the "virtual memory area"s we want.  We might
	 * not get them in theory, but in practice it's worked so far.
	 *
	 * We want the switcher text to be read-only and executable, and
	 * the stacks to be read-write and non-executable.
	 */
	switcher_text_vma = __get_vm_area(PAGE_SIZE, VM_ALLOC|VM_NO_GUARD,
					  switcher_addr,
					  switcher_addr + PAGE_SIZE);

	if (!switcher_text_vma) {
		err = -ENOMEM;
		printk("lguest: could not map switcher pages high\n");
		goto free_pages;
	}

	switcher_stacks_vma = __get_vm_area(SWITCHER_STACK_PAGES * PAGE_SIZE,
					    VM_ALLOC|VM_NO_GUARD,
					    switcher_addr + PAGE_SIZE,
					    switcher_addr + TOTAL_SWITCHER_PAGES * PAGE_SIZE);
	if (!switcher_stacks_vma) {
		err = -ENOMEM;
		printk("lguest: could not map switcher pages high\n");
		goto free_text_vma;
	}

	/*
	 * This code actually sets up the pages we've allocated to appear at
	 * switcher_addr.  map_vm_area() takes the vma we allocated above, the
	 * kind of pages we're mapping (kernel text pages and kernel writable
	 * pages respectively), and a pointer to our array of struct pages.
	 */
	err = map_vm_area(switcher_text_vma, PAGE_KERNEL_RX, lg_switcher_pages);
	if (err) {
		printk("lguest: text map_vm_area failed: %i\n", err);
		goto free_vmas;
	}

	err = map_vm_area(switcher_stacks_vma, PAGE_KERNEL,
			  lg_switcher_pages + SWITCHER_TEXT_PAGES);
	if (err) {
		printk("lguest: stacks map_vm_area failed: %i\n", err);
		goto free_vmas;
	}

	/*
	 * Now the Switcher is mapped at the right address, we can't fail!
	 */
	printk(KERN_INFO "lguest: mapped switcher at %p\n",
	       switcher_text_vma->addr);
	/* And we succeeded... */
	return 0;

free_vmas:
	/* Undoes map_vm_area and __get_vm_area */
	vunmap(switcher_stacks_vma->addr);
free_text_vma:
	vunmap(switcher_text_vma->addr);
free_pages:
	i = TOTAL_SWITCHER_PAGES;
free_some_pages:
	for (--i; i >= 0; i--)
		__free_pages(lg_switcher_pages[i], 0);
	kfree(lg_switcher_pages);
out:
	return err;
}
예제 #11
0
/**
 * Allocate executable kernel memory in the module range.
 *
 * @returns Pointer to a allocation header success.  NULL on failure.
 *
 * @param   cb          The size the user requested.
 */
static PRTMEMHDR rtR0MemAllocExecVmArea(size_t cb)
{
    size_t const        cbAlloc = RT_ALIGN_Z(sizeof(RTMEMLNXHDREX) + cb, PAGE_SIZE);
    size_t const        cPages  = cbAlloc >> PAGE_SHIFT;
    struct page       **papPages;
    struct vm_struct   *pVmArea;
    size_t              iPage;

    pVmArea = __get_vm_area(cbAlloc, VM_ALLOC, MODULES_VADDR, MODULES_END);
    if (!pVmArea)
        return NULL;
    pVmArea->nr_pages = 0;    /* paranoia? */
    pVmArea->pages    = NULL; /* paranoia? */

    papPages = (struct page **)kmalloc(cPages * sizeof(papPages[0]), GFP_KERNEL);
    if (!papPages)
    {
        vunmap(pVmArea->addr);
        return NULL;
    }

    for (iPage = 0; iPage < cPages; iPage++)
    {
        papPages[iPage] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN);
        if (!papPages[iPage])
            break;
    }
    if (iPage == cPages)
    {
        /*
         * Map the pages.  The API requires an iterator argument, which can be
         * used, in case of failure, to figure out how much was actually
         * mapped.  Not sure how useful this really is, but whatever.
         *
         * Not entirely sure we really need to set nr_pages and pages here, but
         * they provide a very convenient place for storing something we need
         * in the free function, if nothing else...
         */
        struct page **papPagesIterator = papPages;
        pVmArea->nr_pages = cPages;
        pVmArea->pages    = papPages;
        if (!map_vm_area(pVmArea, PAGE_KERNEL_EXEC, &papPagesIterator))
        {
            PRTMEMLNXHDREX pHdrEx = (PRTMEMLNXHDREX)pVmArea->addr;
            pHdrEx->pVmArea     = pVmArea;
            pHdrEx->pvDummy     = NULL;
            return &pHdrEx->Hdr;
        }

        /* bail out */
        pVmArea->nr_pages = papPagesIterator - papPages;
    }

    vunmap(pVmArea->addr);

    while (iPage-- > 0)
        __free_page(papPages[iPage]);
    kfree(papPages);

    return NULL;
}
예제 #12
0
파일: vmalloc.c 프로젝트: RKX1209/Abyon
struct vm_struct *get_vm_area(unsigned long size,unsigned long flags){
  return __get_vm_area(size,flags,VMALLOC_START,VMALLOC_END);
}
예제 #13
0
/*H:010
 * We need to set up the Switcher at a high virtual address.  Remember the
 * Switcher is a few hundred bytes of assembler code which actually changes the
 * CPU to run the Guest, and then changes back to the Host when a trap or
 * interrupt happens.
 *
 * The Switcher code must be at the same virtual address in the Guest as the
 * Host since it will be running as the switchover occurs.
 *
 * Trying to map memory at a particular address is an unusual thing to do, so
 * it's not a simple one-liner.
 */
static __init int map_switcher(void)
{
	int i, err;
	struct page **pagep;

	/*
	 * Map the Switcher in to high memory.
	 *
	 * It turns out that if we choose the address 0xFFC00000 (4MB under the
	 * top virtual address), it makes setting up the page tables really
	 * easy.
	 */

	/* We assume Switcher text fits into a single page. */
	if (end_switcher_text - start_switcher_text > PAGE_SIZE) {
		printk(KERN_ERR "lguest: switcher text too large (%zu)\n",
		       end_switcher_text - start_switcher_text);
		return -EINVAL;
	}

	/*
	 * We allocate an array of struct page pointers.  map_vm_area() wants
	 * this, rather than just an array of pages.
	 */
	lg_switcher_pages = kmalloc(sizeof(lg_switcher_pages[0])
				    * TOTAL_SWITCHER_PAGES,
				    GFP_KERNEL);
	if (!lg_switcher_pages) {
		err = -ENOMEM;
		goto out;
	}

	/*
	 * Now we actually allocate the pages.  The Guest will see these pages,
	 * so we make sure they're zeroed.
	 */
	for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) {
		lg_switcher_pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
		if (!lg_switcher_pages[i]) {
			err = -ENOMEM;
			goto free_some_pages;
		}
	}

	/*
	 * We place the Switcher underneath the fixmap area, which is the
	 * highest virtual address we can get.  This is important, since we
	 * tell the Guest it can't access this memory, so we want its ceiling
	 * as high as possible.
	 */
	switcher_addr = FIXADDR_START - (TOTAL_SWITCHER_PAGES+1)*PAGE_SIZE;

	/*
	 * Now we reserve the "virtual memory area" we want.  We might
	 * not get it in theory, but in practice it's worked so far.
	 * The end address needs +1 because __get_vm_area allocates an
	 * extra guard page, so we need space for that.
	 */
	switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
				     VM_ALLOC, switcher_addr, switcher_addr
				     + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
	if (!switcher_vma) {
		err = -ENOMEM;
		printk("lguest: could not map switcher pages high\n");
		goto free_pages;
	}

	/*
	 * This code actually sets up the pages we've allocated to appear at
	 * switcher_addr.  map_vm_area() takes the vma we allocated above, the
	 * kind of pages we're mapping (kernel pages), and a pointer to our
	 * array of struct pages.  It increments that pointer, but we don't
	 * care.
	 */
	pagep = lg_switcher_pages;
	err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, &pagep);
	if (err) {
		printk("lguest: map_vm_area failed: %i\n", err);
		goto free_vma;
	}

	/*
	 * Now the Switcher is mapped at the right address, we can't fail!
	 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
	 */
	memcpy(switcher_vma->addr, start_switcher_text,
	       end_switcher_text - start_switcher_text);

	printk(KERN_INFO "lguest: mapped switcher at %p\n",
	       switcher_vma->addr);
	/* And we succeeded... */
	return 0;

free_vma:
	vunmap(switcher_vma->addr);
free_pages:
	i = TOTAL_SWITCHER_PAGES;
free_some_pages:
	for (--i; i >= 0; i--)
		__free_pages(lg_switcher_pages[i], 0);
	kfree(lg_switcher_pages);
out:
	return err;
}