Ejemplo n.º 1
0
Archivo: pgd.c Proyecto: 03199618/linux
/*
 * need to get a 16k page for level 1
 */
pgd_t *pgd_alloc(struct mm_struct *mm)
{
	pgd_t *new_pgd, *init_pgd;
	pud_t *new_pud, *init_pud;
	pmd_t *new_pmd, *init_pmd;
	pte_t *new_pte, *init_pte;

	new_pgd = __pgd_alloc();
	if (!new_pgd)
		goto no_pgd;

	memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));

	/*
	 * Copy over the kernel and IO PGD entries
	 */
	init_pgd = pgd_offset_k(0);
	memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD,
		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));

	clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));

#ifdef CONFIG_ARM_LPAE
	/*
	 * Allocate PMD table for modules and pkmap mappings.
	 */
	new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR),
			    MODULES_VADDR);
	if (!new_pud)
		goto no_pud;

	new_pmd = pmd_alloc(mm, new_pud, 0);
	if (!new_pmd)
		goto no_pmd;
#endif

	if (!vectors_high()) {
		/*
		 * On ARM, first page must always be allocated since it
		 * contains the machine vectors. The vectors are always high
		 * with LPAE.
		 */
		new_pud = pud_alloc(mm, new_pgd, 0);
		if (!new_pud)
			goto no_pud;

		new_pmd = pmd_alloc(mm, new_pud, 0);
		if (!new_pmd)
			goto no_pmd;

		new_pte = pte_alloc_map(mm, NULL, new_pmd, 0);
		if (!new_pte)
			goto no_pte;

		init_pud = pud_offset(init_pgd, 0);
		init_pmd = pmd_offset(init_pud, 0);
		init_pte = pte_offset_map(init_pmd, 0);
		set_pte_ext(new_pte, *init_pte, 0);
		pte_unmap(init_pte);
		pte_unmap(new_pte);
	}

	return new_pgd;

no_pte:
	pmd_free(mm, new_pmd);
no_pmd:
	pud_free(mm, new_pud);
no_pud:
	__pgd_free(new_pgd);
no_pgd:
	return NULL;
}
Ejemplo n.º 2
0
void *nvmap_mmap(struct nvmap_handle_ref *ref)
{
	struct nvmap_handle *h;
	pgprot_t prot;
	unsigned long adj_size;
	unsigned long offs;
	struct vm_struct *v;
	void *p;

	h = nvmap_handle_get(ref->handle);
	if (!h)
		return NULL;

	prot = nvmap_pgprot(h, pgprot_kernel);

	if (h->heap_pgalloc)
		return vm_map_ram(h->pgalloc.pages, h->size >> PAGE_SHIFT,
				  -1, prot);

	/* carveout - explicitly map the pfns into a vmalloc area */

	nvmap_usecount_inc(h);

	adj_size = h->carveout->base & ~PAGE_MASK;
	adj_size += h->size;
	adj_size = PAGE_ALIGN(adj_size);

	v = alloc_vm_area(adj_size, NULL);
	if (!v) {
		nvmap_usecount_dec(h);
		nvmap_handle_put(h);
		return NULL;
	}

	p = v->addr + (h->carveout->base & ~PAGE_MASK);

	for (offs = 0; offs < adj_size; offs += PAGE_SIZE) {
		unsigned long addr = (unsigned long) v->addr + offs;
		unsigned int pfn;
		pgd_t *pgd;
		pud_t *pud;
		pmd_t *pmd;
		pte_t *pte;

		pfn = __phys_to_pfn(h->carveout->base + offs);
		pgd = pgd_offset_k(addr);
		pud = pud_alloc(&init_mm, pgd, addr);
		if (!pud)
			break;
		pmd = pmd_alloc(&init_mm, pud, addr);
		if (!pmd)
			break;
		pte = pte_alloc_kernel(pmd, addr);
		if (!pte)
			break;
		set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
		flush_tlb_kernel_page(addr);
	}

	if (offs != adj_size) {
		free_vm_area(v);
		nvmap_usecount_dec(h);
		nvmap_handle_put(h);
		return NULL;
	}

	/* leave the handle ref count incremented by 1, so that
	 * the handle will not be freed while the kernel mapping exists.
	 * nvmap_handle_put will be called by unmapping this address */
	return p;
}
Ejemplo n.º 3
0
/*
 * copy one vm_area from one task to the other. Assumes the page tables
 * already present in the new task to be cleared in the whole range
 * covered by this vma.
 *
 * 08Jan98 Merged into one routine from several inline routines to reduce
 *         variable count and make things faster. -jj
 *
 * dst->page_table_lock is held on entry and exit,
 * but may be dropped within pmd_alloc() and pte_alloc().
 */
int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
			struct vm_area_struct *vma)
{
	pgd_t * src_pgd, * dst_pgd;
	unsigned long address = vma->vm_start;
	unsigned long end = vma->vm_end;
	unsigned long cow = (vma->vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE;

	src_pgd = pgd_offset(src, address)-1;
	dst_pgd = pgd_offset(dst, address)-1;

	for (;;) {
		pmd_t * src_pmd, * dst_pmd;

		src_pgd++; dst_pgd++;
		
		/* copy_pmd_range */
		
		if (pgd_none(*src_pgd))
			goto skip_copy_pmd_range;
		if (pgd_bad(*src_pgd)) {
			pgd_ERROR(*src_pgd);
			pgd_clear(src_pgd);
skip_copy_pmd_range:	address = (address + PGDIR_SIZE) & PGDIR_MASK;
			if (!address || (address >= end))
				goto out;
			continue;
		}

		src_pmd = pmd_offset(src_pgd, address);
		dst_pmd = pmd_alloc(dst, dst_pgd, address);
		if (!dst_pmd)
			goto nomem;

		do {
			pte_t * src_pte, * dst_pte;
		
			/* copy_pte_range */
		
			if (pmd_none(*src_pmd))
				goto skip_copy_pte_range;
			if (pmd_bad(*src_pmd)) {
				pmd_ERROR(*src_pmd);
				pmd_clear(src_pmd);
skip_copy_pte_range:		address = (address + PMD_SIZE) & PMD_MASK;
				if (address >= end)
					goto out;
				goto cont_copy_pmd_range;
			}

			src_pte = pte_offset(src_pmd, address);
			dst_pte = pte_alloc(dst, dst_pmd, address);
			if (!dst_pte)
				goto nomem;

			spin_lock(&src->page_table_lock);			
			do {
				pte_t pte = *src_pte;
				struct page *ptepage;
				
				/* copy_one_pte */

				if (pte_none(pte))
					goto cont_copy_pte_range_noset;
				if (!pte_present(pte)) {
					swap_duplicate(pte_to_swp_entry(pte));
					goto cont_copy_pte_range;
				}
				ptepage = pte_page(pte);
				if ((!VALID_PAGE(ptepage)) || 
				    PageReserved(ptepage))
					goto cont_copy_pte_range;

				/* If it's a COW mapping, write protect it both in the parent and the child */
				if (cow) {
					ptep_set_wrprotect(src_pte);
					pte = *src_pte;
				}

				/* If it's a shared mapping, mark it clean in the child */
				if (vma->vm_flags & VM_SHARED)
					pte = pte_mkclean(pte);
				pte = pte_mkold(pte);
				get_page(ptepage);
				dst->rss++;

cont_copy_pte_range:		set_pte(dst_pte, pte);
cont_copy_pte_range_noset:	address += PAGE_SIZE;
				if (address >= end)
					goto out_unlock;
				src_pte++;
				dst_pte++;
			} while ((unsigned long)src_pte & PTE_TABLE_MASK);
			spin_unlock(&src->page_table_lock);
		
cont_copy_pmd_range:	src_pmd++;
			dst_pmd++;
		} while ((unsigned long)src_pmd & PMD_TABLE_MASK);
	}
out_unlock:
	spin_unlock(&src->page_table_lock);
out:
	return 0;
nomem:
	return -ENOMEM;
}
Ejemplo n.º 4
0
void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
{
	struct vm_struct *area;
	unsigned long virtaddr, retaddr;
	long offset;
	pgd_t *pgd_dir;
	pmd_t *pmd_dir;
	pte_t *pte_dir;

	/*
	 * Don't allow mappings that wrap..
	 */
	if (!size || physaddr > (unsigned long)(-size))
		return NULL;

#ifdef CONFIG_AMIGA
	if (MACH_IS_AMIGA) {
		if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
		    && (cacheflag == IOMAP_NOCACHE_SER))
			return (void __iomem *)physaddr;
	}
#endif

#ifdef DEBUG
	printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
#endif
	/*
	 * Mappings have to be aligned
	 */
	offset = physaddr & (IO_SIZE - 1);
	physaddr &= -IO_SIZE;
	size = (size + offset + IO_SIZE - 1) & -IO_SIZE;

	/*
	 * Ok, go for it..
	 */
	area = get_io_area(size);
	if (!area)
		return NULL;

	virtaddr = (unsigned long)area->addr;
	retaddr = virtaddr + offset;
#ifdef DEBUG
	printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
#endif

	/*
	 * add cache and table flags to physical address
	 */
	if (CPU_IS_040_OR_060) {
		physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
			     _PAGE_ACCESSED | _PAGE_DIRTY);
		switch (cacheflag) {
		case IOMAP_FULL_CACHING:
			physaddr |= _PAGE_CACHE040;
			break;
		case IOMAP_NOCACHE_SER:
		default:
			physaddr |= _PAGE_NOCACHE_S;
			break;
		case IOMAP_NOCACHE_NONSER:
			physaddr |= _PAGE_NOCACHE;
			break;
		case IOMAP_WRITETHROUGH:
			physaddr |= _PAGE_CACHE040W;
			break;
		}
	} else {
		physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED |
			     _PAGE_DIRTY | _PAGE_READWRITE);
		switch (cacheflag) {
		case IOMAP_NOCACHE_SER:
		case IOMAP_NOCACHE_NONSER:
		default:
			physaddr |= _PAGE_NOCACHE030;
			break;
		case IOMAP_FULL_CACHING:
		case IOMAP_WRITETHROUGH:
			break;
		}
	}

	while ((long)size > 0) {
#ifdef DEBUG
		if (!(virtaddr & (PTRTREESIZE-1)))
			printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
#endif
		pgd_dir = pgd_offset_k(virtaddr);
		pmd_dir = pmd_alloc(&init_mm, pgd_dir, virtaddr);
		if (!pmd_dir) {
			printk("ioremap: no mem for pmd_dir\n");
			return NULL;
		}

		if (CPU_IS_020_OR_030) {
			pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
			physaddr += PTRTREESIZE;
			virtaddr += PTRTREESIZE;
			size -= PTRTREESIZE;
		} else {
			pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
			if (!pte_dir) {
				printk("ioremap: no mem for pte_dir\n");
				return NULL;
			}

			pte_val(*pte_dir) = physaddr;
			virtaddr += PAGE_SIZE;
			physaddr += PAGE_SIZE;
			size -= PAGE_SIZE;
		}
	}
#ifdef DEBUG
	printk("\n");
#endif
	flush_tlb_all();

	return (void __iomem *)retaddr;
}
Ejemplo n.º 5
0
/*
 * need to get a 16k page for level 1
 */
pgd_t *get_pgd_slow(struct mm_struct *mm)
{
	pgd_t *new_pgd, *init_pgd;
	pmd_t *new_pmd, *init_pmd;
	pte_t *new_pte, *init_pte;

	new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);
	if (!new_pgd)
		goto no_pgd;

	memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));

	init_pgd = pgd_offset_k(0);

	if (vectors_base() == 0) {
		init_pmd = pmd_offset(init_pgd, 0);
		init_pte = pte_offset(init_pmd, 0);

		/*
		 * This lock is here just to satisfy pmd_alloc and pte_lock
		 */
		spin_lock(&mm->page_table_lock);

		/*
		 * On ARM, first page must always be allocated since it
		 * contains the machine vectors.
		 */
		new_pmd = pmd_alloc(mm, new_pgd, 0);
		if (!new_pmd)
			goto no_pmd;

		new_pte = pte_alloc(mm, new_pmd, 0);
		if (!new_pte)
			goto no_pte;

		set_pte(new_pte, *init_pte);

		spin_unlock(&mm->page_table_lock);
	}

	/*
	 * Copy over the kernel and IO PGD entries
	 */
	memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
		       (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));

	/*
	 * FIXME: this should not be necessary
	 */
	clean_cache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));

	return new_pgd;

no_pte:
	spin_unlock(&mm->page_table_lock);
	pmd_free(new_pmd);
	free_pages((unsigned long)new_pgd, 2);
	return NULL;

no_pmd:
	spin_unlock(&mm->page_table_lock);
	free_pages((unsigned long)new_pgd, 2);
	return NULL;

no_pgd:
	return NULL;
}
Ejemplo n.º 6
0
pgd_t *pgd_alloc(struct mm_struct *mm)
{
	pgd_t *new_pgd, *init_pgd;
	pud_t *new_pud, *init_pud;
	pmd_t *new_pmd, *init_pmd;
	pte_t *new_pte, *init_pte;

	new_pgd = __pgd_alloc();
	if (!new_pgd)
		goto no_pgd;

	memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));

	init_pgd = pgd_offset_k(0);
	memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD,
		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));

	clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));

#ifdef CONFIG_ARM_LPAE
	new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR),
			    MODULES_VADDR);
	if (!new_pud)
		goto no_pud;

	new_pmd = pmd_alloc(mm, new_pud, 0);
	if (!new_pmd)
		goto no_pmd;
#endif

	if (!vectors_high()) {
		new_pud = pud_alloc(mm, new_pgd, 0);
		if (!new_pud)
			goto no_pud;

		new_pmd = pmd_alloc(mm, new_pud, 0);
		if (!new_pmd)
			goto no_pmd;

		new_pte = pte_alloc_map(mm, NULL, new_pmd, 0);
		if (!new_pte)
			goto no_pte;

		init_pud = pud_offset(init_pgd, 0);
		init_pmd = pmd_offset(init_pud, 0);
		init_pte = pte_offset_map(init_pmd, 0);
		set_pte_ext(new_pte, *init_pte, 0);
		pte_unmap(init_pte);
		pte_unmap(new_pte);
	}

	return new_pgd;

no_pte:
	pmd_free(mm, new_pmd);
no_pmd:
	pud_free(mm, new_pud);
no_pud:
	__pgd_free(new_pgd);
no_pgd:
	return NULL;
}
Ejemplo n.º 7
0
/*
 * need to get a 16k page for level 1
 */
pgd_t *pgd_alloc(struct mm_struct *mm)
{
	pgd_t *new_pgd, *init_pgd;
	pud_t *new_pud, *init_pud;
	pmd_t *new_pmd, *init_pmd;
	pte_t *new_pte, *init_pte;

	new_pgd = __pgd_alloc();
	if (!new_pgd)
		goto no_pgd;

	memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));

	/*
	 * Copy over the kernel and IO PGD entries
	 */
	init_pgd = pgd_offset_k(0);
	memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD,
		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));

	clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));

#ifdef CONFIG_ARM_LPAE
	/*
	 * Allocate PMD table for modules and pkmap mappings.
	 */
	new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR),
			    MODULES_VADDR);
	if (!new_pud)
		goto no_pud;

	new_pmd = pmd_alloc(mm, new_pud, 0);
	if (!new_pmd)
		goto no_pmd;
#endif

	if (!vectors_high()) {
		/*
		 * On ARM, first page must always be allocated since it
		 * contains the machine vectors. The vectors are always high
		 * with LPAE.
		 */
		new_pud = pud_alloc(mm, new_pgd, 0);
		if (!new_pud)
			goto no_pud;

		new_pmd = pmd_alloc(mm, new_pud, 0);
		if (!new_pmd)
			goto no_pmd;

		new_pte = pte_alloc_map(mm, new_pmd, 0);
		if (!new_pte)
			goto no_pte;

#ifndef CONFIG_ARM_LPAE
		/*
		 * Modify the PTE pointer to have the correct domain.  This
		 * needs to be the vectors domain to avoid the low vectors
		 * being unmapped.
		 */
		pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK;
		pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS);
#endif

		init_pud = pud_offset(init_pgd, 0);
		init_pmd = pmd_offset(init_pud, 0);
		init_pte = pte_offset_map(init_pmd, 0);
		set_pte_ext(new_pte + 0, init_pte[0], 0);
		set_pte_ext(new_pte + 1, init_pte[1], 0);
		pte_unmap(init_pte);
		pte_unmap(new_pte);
	}

	return new_pgd;

no_pte:
	pmd_free(mm, new_pmd);
	mm_dec_nr_pmds(mm);
no_pmd:
	pud_free(mm, new_pud);
no_pud:
	__pgd_free(new_pgd);
no_pgd:
	return NULL;
}
Ejemplo n.º 8
0
/*
 * need to get a 16k page for level 1
 */
pgd_t *pgd_alloc(struct mm_struct *mm)
{
	pgd_t *new_pgd, *init_pgd;
	pud_t *new_pud, *init_pud;
	pmd_t *new_pmd, *init_pmd;
	pte_t *new_pte, *init_pte;

#if defined(CONFIG_SYNO_ARMADA_ARCH)
	new_pgd = __pgd_alloc();
#elif defined(CONFIG_SYNO_COMCERTO)
	new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, get_order(16384));
#else
	new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);
#endif
	if (!new_pgd)
		goto no_pgd;

	memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));

	/*
	 * Copy over the kernel and IO PGD entries
	 */
	init_pgd = pgd_offset_k(0);
	memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD,
		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));

	clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));

#if defined(CONFIG_SYNO_ARMADA_ARCH) && defined(CONFIG_ARM_LPAE)
	/*
	 * Allocate PMD table for modules and pkmap mappings.
	 */
	new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR),
			    MODULES_VADDR);
	if (!new_pud)
		goto no_pud;

	new_pmd = pmd_alloc(mm, new_pud, 0);
	if (!new_pmd)
		goto no_pmd;
#endif

	if (!vectors_high()) {
		/*
		 * On ARM, first page must always be allocated since it
#if defined(CONFIG_SYNO_ARMADA_ARCH)
		 * contains the machine vectors. The vectors are always high
		 * with LPAE.
#else
		 * contains the machine vectors.
#endif
		 */
		new_pud = pud_alloc(mm, new_pgd, 0);
		if (!new_pud)
			goto no_pud;

		new_pmd = pmd_alloc(mm, new_pud, 0);
		if (!new_pmd)
			goto no_pmd;

		new_pte = pte_alloc_map(mm, NULL, new_pmd, 0);
		if (!new_pte)
			goto no_pte;

		init_pud = pud_offset(init_pgd, 0);
		init_pmd = pmd_offset(init_pud, 0);
		init_pte = pte_offset_map(init_pmd, 0);
		set_pte_ext(new_pte, *init_pte, 0);
		pte_unmap(init_pte);
		pte_unmap(new_pte);
	}

	return new_pgd;

no_pte:
	pmd_free(mm, new_pmd);
no_pmd:
	pud_free(mm, new_pud);
no_pud:
#if defined(CONFIG_SYNO_ARMADA_ARCH)
	__pgd_free(new_pgd);
#elif defined(CONFIG_SYNO_COMCERTO)
	free_pages((unsigned long)new_pgd, get_order(16384));
#else
	free_pages((unsigned long)new_pgd, 2);
#endif
no_pgd:
	return NULL;
}
Ejemplo n.º 9
0
pgd_t *get_percpu_init_pgd(struct mm_struct *mm, unsigned int cpu)
{
	pgd_t *new_pgd, *init_pgd;
	pud_t *new_pud, *init_pud;
	pmd_t *new_pmd, *init_pmd;
	pte_t *new_pte, *init_pte;

	new_pgd = per_cpu(percpu_init_pgd, cpu);
	if (new_pgd == NULL)
		new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);

	if (!new_pgd)
		goto no_pgd;

	memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));

	/*
	 * Copy over the kernel and IO PGD entries
	 */
	init_pgd = pgd_offset_k(0);
	memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD,
		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));

	clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));

	if (!vectors_high()) {
		/*
		 * On ARM, first page must always be allocated since it
		 * contains the machine vectors.
		 */
		new_pud = pud_alloc(mm, new_pgd, 0);
		if (!new_pud)
			goto no_pud;

		new_pmd = pmd_alloc(mm, new_pud, 0);
		if (!new_pmd)
			goto no_pmd;

		new_pte = pte_alloc_map(mm, NULL, new_pmd, 0);
		if (!new_pte)
			goto no_pte;

		init_pud = pud_offset(init_pgd, 0);
		init_pmd = pmd_offset(init_pud, 0);
		init_pte = pte_offset_map(init_pmd, 0);
		set_pte_ext(new_pte, *init_pte, 0);
		pte_unmap(init_pte);
		pte_unmap(new_pte);
	}

	per_cpu(percpu_init_pgd, cpu) = new_pgd;
	return new_pgd;

no_pte:
	pmd_free(mm, new_pmd);
no_pmd:
	pud_free(mm, new_pud);
no_pud:
	free_pages((unsigned long)new_pgd, 2);
	new_pgd = NULL;
no_pgd:
	per_cpu(percpu_init_pgd, cpu) = new_pgd;
	return NULL;
}
Ejemplo n.º 10
0
/*
 * This isn't really reliable by any means..
 */
int mem_mmap(struct inode * inode, struct file * file,
	     struct vm_area_struct * vma)
{
	struct task_struct *tsk;
	pgd_t *src_dir, *dest_dir;
	pmd_t *src_middle, *dest_middle;
	pte_t *src_table, *dest_table;
	unsigned long stmp, dtmp, mapnr;
	struct vm_area_struct *src_vma = NULL;

	/* Get the source's task information */

	tsk = get_task(inode->i_ino >> 16);

	if (!tsk)
		return -ESRCH;

	/* Ensure that we have a valid source area.  (Has to be mmap'ed and
	 have valid page information.)  We can't map shared memory at the
	 moment because working out the vm_area_struct & nattach stuff isn't
	 worth it. */

	src_vma = tsk->mm->mmap;
	stmp = vma->vm_offset;
	while (stmp < vma->vm_offset + (vma->vm_end - vma->vm_start)) {
		while (src_vma && stmp > src_vma->vm_end)
			src_vma = src_vma->vm_next;
		if (!src_vma || (src_vma->vm_flags & VM_SHM))
			return -EINVAL;

		src_dir = pgd_offset(tsk->mm, stmp);
		if (pgd_none(*src_dir))
			return -EINVAL;
		if (pgd_bad(*src_dir)) {
			printk("Bad source page dir entry %08lx\n", pgd_val(*src_dir));
			return -EINVAL;
		}
		src_middle = pmd_offset(src_dir, stmp);
		if (pmd_none(*src_middle))
			return -EINVAL;
		if (pmd_bad(*src_middle)) {
			printk("Bad source page middle entry %08lx\n", pmd_val(*src_middle));
			return -EINVAL;
		}
		src_table = pte_offset(src_middle, stmp);
		if (pte_none(*src_table))
			return -EINVAL;

		if (stmp < src_vma->vm_start) {
			if (!(src_vma->vm_flags & VM_GROWSDOWN))
				return -EINVAL;
			if (src_vma->vm_end - stmp > current->rlim[RLIMIT_STACK].rlim_cur)
				return -EINVAL;
		}
		stmp += PAGE_SIZE;
	}

	src_vma = tsk->mm->mmap;
	stmp    = vma->vm_offset;
	dtmp    = vma->vm_start;

	flush_cache_range(vma->vm_mm, vma->vm_start, vma->vm_end);
	flush_cache_range(src_vma->vm_mm, src_vma->vm_start, src_vma->vm_end);
	while (dtmp < vma->vm_end) {
		while (src_vma && stmp > src_vma->vm_end)
			src_vma = src_vma->vm_next;

		src_dir = pgd_offset(tsk->mm, stmp);
		src_middle = pmd_offset(src_dir, stmp);
		src_table = pte_offset(src_middle, stmp);

		dest_dir = pgd_offset(current->mm, dtmp);
		dest_middle = pmd_alloc(dest_dir, dtmp);
		if (!dest_middle)
			return -ENOMEM;
		dest_table = pte_alloc(dest_middle, dtmp);
		if (!dest_table)
			return -ENOMEM;

		if (!pte_present(*src_table))
			do_no_page(tsk, src_vma, stmp, 1);

		if ((vma->vm_flags & VM_WRITE) && !pte_write(*src_table))
			do_wp_page(tsk, src_vma, stmp, 1);

		set_pte(src_table, pte_mkdirty(*src_table));
		set_pte(dest_table, *src_table);
                mapnr = MAP_NR(pte_page(*src_table));
		if (mapnr < MAP_NR(high_memory))
                        mem_map[mapnr].count++;

		stmp += PAGE_SIZE;
		dtmp += PAGE_SIZE;
	}

	flush_tlb_range(vma->vm_mm, vma->vm_start, vma->vm_end);
	flush_tlb_range(src_vma->vm_mm, src_vma->vm_start, src_vma->vm_end);
	return 0;
}
Ejemplo n.º 11
0
/*
 * do_no_page() tries to create a new page mapping. It aggressively
 * tries to share with existing pages, but makes a separate copy if
 * the "write_access" parameter is true in order to avoid the next
 * page fault.
 *
 * As this is called only for pages that do not currently exist, we
 * do not need to flush old virtual caches or the TLB.
 */
void do_no_page(struct task_struct * tsk, struct vm_area_struct * vma,
	unsigned long address, int write_access)
{
	pgd_t * pgd;
	pmd_t * pmd;
	pte_t * page_table;
	pte_t entry;
	unsigned long page;

	pgd = pgd_offset(tsk->mm, address);
	pmd = pmd_alloc(pgd, address);
	if (!pmd)
		goto no_memory;
	page_table = pte_alloc(pmd, address);
	if (!page_table)
		goto no_memory;
	entry = *page_table;
	if (pte_present(entry))
		goto is_present;
	if (!pte_none(entry))
		goto swap_page;
	address &= PAGE_MASK;
	if (!vma->vm_ops || !vma->vm_ops->nopage)
		goto anonymous_page;
	/*
	 * The third argument is "no_share", which tells the low-level code
	 * to copy, not share the page even if sharing is possible.  It's
	 * essentially an early COW detection 
	 */
	page = vma->vm_ops->nopage(vma, address, 
		(vma->vm_flags & VM_SHARED)?0:write_access);
	if (!page)
		goto sigbus;
	++tsk->maj_flt;
	++vma->vm_mm->rss;
	/*
	 * This silly early PAGE_DIRTY setting removes a race
	 * due to the bad i386 page protection. But it's valid
	 * for other architectures too.
	 *
	 * Note that if write_access is true, we either now have
	 * a exclusive copy of the page, or this is a shared mapping,
	 * so we can make it writable and dirty to avoid having to
	 * handle that later.
	 */
	flush_page_to_ram(page);
	entry = mk_pte(page, vma->vm_page_prot);
	if (write_access) {
		entry = pte_mkwrite(pte_mkdirty(entry));
	} else if (mem_map[MAP_NR(page)].count > 1 && !(vma->vm_flags & VM_SHARED))
		entry = pte_wrprotect(entry);
	put_page(page_table, entry);
	/* no need to invalidate: a not-present page shouldn't be cached */
	return;

anonymous_page:
	entry = pte_wrprotect(mk_pte(ZERO_PAGE, vma->vm_page_prot));
	if (write_access) {
		unsigned long page = __get_free_page(GFP_KERNEL);
		if (!page)
			goto sigbus;
		memset((void *) page, 0, PAGE_SIZE);
		entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
		vma->vm_mm->rss++;
		tsk->min_flt++;
		flush_page_to_ram(page);
	}
	put_page(page_table, entry);
	return;

sigbus:
	force_sig(SIGBUS, current);
	put_page(page_table, BAD_PAGE);
	/* no need to invalidate, wasn't present */
	return;

swap_page:
	do_swap_page(tsk, vma, address, page_table, entry, write_access);
	return;

no_memory:
	oom(tsk);
is_present:
	return;
}
Ejemplo n.º 12
0
static int fault_in_page(int taskid,
			 struct vm_area_struct *vma,
			 unsigned long address, int write)
{
	static unsigned last_address;
	static int last_task, loop_counter;
	struct task_struct *tsk = task[taskid];
	pgd_t *pgd;
	pmd_t *pmd;
	pte_t *pte;

	if (!tsk || !tsk->mm)
		return 1;

	if (!vma || (write && !(vma->vm_flags & VM_WRITE)))
	  goto bad_area;
	if (vma->vm_start > address)
	  goto bad_area;

	if (address == last_address && taskid == last_task) {
		loop_counter++;
	} else {
		loop_counter = 0;
		last_address = address; 
		last_task = taskid;
	}

	if (loop_counter == WRITE_LIMIT && !write) {
		printk("MSC bug? setting write request\n");
		stats.errors++;
		write = 1;
	}

	if (loop_counter == LOOP_LIMIT) {
		printk("MSC bug? failing request\n");
		stats.errors++;
		return 1;
	}

	pgd = pgd_offset(vma->vm_mm, address);
	pmd = pmd_alloc(pgd,address);
	if(!pmd)
		goto no_memory;
	pte = pte_alloc(pmd, address);
	if(!pte)
		goto no_memory;
	if(!pte_present(*pte)) {
		handle_mm_fault(tsk, vma, address, write);
		goto finish_up;
	}
	set_pte(pte, pte_mkyoung(*pte));
	flush_tlb_page(vma, address);
	if(!write)
		goto finish_up;
	if(pte_write(*pte)) {
		set_pte(pte, pte_mkdirty(*pte));
		flush_tlb_page(vma, address);
		goto finish_up;
	}
	handle_mm_fault(tsk, vma, address, write);

	/* Fall through for do_wp_page */
finish_up:
	stats.success++;
	return 0;

no_memory:
	stats.failure++;
	oom(tsk);
	return 1;
	
bad_area:	  
	stats.failure++;
	tsk->tss.sig_address = address;
	tsk->tss.sig_desc = SUBSIG_NOMAPPING;
	send_sig(SIGSEGV, tsk, 1);
	return 1;
}
Ejemplo n.º 13
0
/*
 * need to get a 16k page for level 1
 */
pgd_t *pgd_alloc(struct mm_struct *mm)
{
	pgd_t *new_pgd, *init_pgd;
	pud_t *new_pud, *init_pud;
	pmd_t *new_pmd, *init_pmd;
	pte_t *new_pte, *init_pte;
	unsigned long flags;

	new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);
	if (!new_pgd)
		goto no_pgd;

	memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));

	/*
	 * Copy over the kernel and IO PGD entries
	 */
	init_pgd = pgd_offset_k(0);
	pgd_list_lock(flags);
	memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD,
		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
	pgd_list_add(new_pgd);
	pgd_list_unlock(flags);

	clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));

	if (!vectors_high()) {
#ifdef CONFIG_ARM_FCSE
		/* FCSE does not work without high vectors. */
		BUG();
#endif /* CONFIG_ARM_FCSE */

		/*
		 * On ARM, first page must always be allocated since it
		 * contains the machine vectors.
		 */
		new_pud = pud_alloc(mm, new_pgd, 0);
		if (!new_pud)
			goto no_pud;

		new_pmd = pmd_alloc(mm, new_pud, 0);
		if (!new_pmd)
			goto no_pmd;

		new_pte = pte_alloc_map(mm, NULL, new_pmd, 0);
		if (!new_pte)
			goto no_pte;

		init_pud = pud_offset(init_pgd, 0);
		init_pmd = pmd_offset(init_pud, 0);
		init_pte = pte_offset_map(init_pmd, 0);
		set_pte_ext(new_pte, *init_pte, 0);
		pte_unmap(init_pte);
		pte_unmap(new_pte);
	}

	return new_pgd;

no_pte:
	pmd_free(mm, new_pmd);
no_pmd:
	pud_free(mm, new_pud);
no_pud:
	free_pages((unsigned long)new_pgd, 2);
no_pgd:
	return NULL;
}