Exemplo n.º 1
0
/* mm->page_table_lock is held. mmap_sem is not held */
static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long end, int count, zone_t * classzone)
{
	pte_t * pte;
	unsigned long pmd_end;

	DEFINE_LOCK_COUNT();

	if (pmd_none(*dir))
		return count;
	if (pmd_bad(*dir)) {
		pmd_ERROR(*dir);
		pmd_clear(dir);
		return count;
	}
	
	pte = pte_offset(dir, address);
	
	pmd_end = (address + PMD_SIZE) & PMD_MASK;
	if (end > pmd_end)
		end = pmd_end;

	do {
		if (pte_present(*pte)) {
			struct page *page = pte_page(*pte);

			if (VALID_PAGE(page) && !PageReserved(page)) {
				count -= try_to_swap_out(mm, vma, address, pte, page, classzone);
				if (!count) {
					address += PAGE_SIZE;
					break;
				}
				/* we reach this with a lock depth of 1 or 2 */
#if 0
				if (TEST_LOCK_COUNT(4)) {
					if (conditional_schedule_needed())
						return count;
					RESET_LOCK_COUNT();
				}
#endif
			}
		}
		address += PAGE_SIZE;
		pte++;
	} while (address && (address < end));
	mm->swap_address = address;
	return count;
}
Exemplo n.º 2
0
/*
 * This routine puts a long into any process space by following the page
 * tables. NOTE! You should check that the long isn't on a page boundary,
 * and that it is in the task area before calling this: this routine does
 * no checking.
 *
 * Now keeps R/W state of page so that a text page stays readonly
 * even if a debugger scribbles breakpoints into it.  -M.U-
 */
static void put_long(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long addr,
	unsigned long data)
{
	pgd_t *pgdir;
	pmd_t *pgmiddle;
	pte_t *pgtable;
	unsigned long page;

repeat:
	pgdir = pgd_offset(vma->vm_mm, addr);
	if (!pgd_present(*pgdir)) {
		do_no_page(tsk, vma, addr, 1);
		goto repeat;
	}
	if (pgd_bad(*pgdir)) {
		printk("ptrace: bad page directory %08lx\n", pgd_val(*pgdir));
		pgd_clear(pgdir);
		return;
	}
	pgmiddle = pmd_offset(pgdir, addr);
	if (pmd_none(*pgmiddle)) {
		do_no_page(tsk, vma, addr, 1);
		goto repeat;
	}
	if (pmd_bad(*pgmiddle)) {
		printk("ptrace: bad page middle %08lx\n", pmd_val(*pgmiddle));
		pmd_clear(pgmiddle);
		return;
	}
	pgtable = pte_offset(pgmiddle, addr);
	if (!pte_present(*pgtable)) {
		do_no_page(tsk, vma, addr, 1);
		goto repeat;
	}
	page = pte_page(*pgtable);
	if (!pte_write(*pgtable)) {
		do_wp_page(tsk, vma, addr, 1);
		goto repeat;
	}
/* this is a hack for non-kernel-mapped video buffers and similar */
	if (page < high_memory)
		*(unsigned long *) (page + (addr & ~PAGE_MASK)) = data;
/* we're bypassing pagetables, so we have to set the dirty bit ourselves */
/* this should also re-instate whatever read-only mode there was before */
	set_pte(pgtable, pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
	flush_tlb();
}
Exemplo n.º 3
0
/**
 * flush_icache_page_range - Flush dcache and invalidate icache for part of a
 *				single page
 * @start: The starting virtual address of the page part.
 * @end: The ending virtual address of the page part.
 *
 * Invalidate the icache for part of a single page, as determined by the
 * virtual addresses given.  The page must be in the paged area.  The dcache is
 * not flushed as the cache must be in write-through mode to get here.
 */
static void flush_icache_page_range(unsigned long start, unsigned long end)
{
	unsigned long addr, size, off;
	struct page *page;
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *ppte, pte;

	/* work out how much of the page to flush */
	off = start & ~PAGE_MASK;
	size = end - start;

	/* get the physical address the page is mapped to from the page
	 * tables */
	pgd = pgd_offset(current->mm, start);
	if (!pgd || !pgd_val(*pgd))
		return;

	pud = pud_offset(pgd, start);
	if (!pud || !pud_val(*pud))
		return;

	pmd = pmd_offset(pud, start);
	if (!pmd || !pmd_val(*pmd))
		return;

	ppte = pte_offset_map(pmd, start);
	if (!ppte)
		return;
	pte = *ppte;
	pte_unmap(ppte);

	if (pte_none(pte))
		return;

	page = pte_page(pte);
	if (!page)
		return;

	addr = page_to_phys(page);

	/* invalidate the icache coverage on that region */
	mn10300_local_icache_inv_range2(addr + off, size);
	smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end);
}
Exemplo n.º 4
0
/*
 * The performance critical leaf functions are made noinline otherwise gcc
 * inlines everything into a single function which results in too much
 * register pressure.
 */
static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
		unsigned long end, int write, struct page **pages, int *nr)
{
	unsigned long mask, result;
	pte_t *ptep;

	if (tlb_type == hypervisor) {
		result = _PAGE_PRESENT_4V|_PAGE_P_4V;
		if (write)
			result |= _PAGE_WRITE_4V;
	} else {
		result = _PAGE_PRESENT_4U|_PAGE_P_4U;
		if (write)
			result |= _PAGE_WRITE_4U;
	}
	mask = result | _PAGE_SPECIAL;

	ptep = pte_offset_kernel(&pmd, addr);
	do {
		struct page *page, *head;
		pte_t pte = *ptep;

		if ((pte_val(pte) & mask) != result)
			return 0;
		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));

		/* The hugepage case is simplified on sparc64 because
		 * we encode the sub-page pfn offsets into the
		 * hugepage PTEs.  We could optimize this in the future
		 * use page_cache_add_speculative() for the hugepage case.
		 */
		page = pte_page(pte);
		head = compound_head(page);
		if (!page_cache_get_speculative(head))
			return 0;
		if (unlikely(pte_val(pte) != pte_val(*ptep))) {
			put_page(head);
			return 0;
		}

		pages[*nr] = page;
		(*nr)++;
	} while (ptep++, addr += PAGE_SIZE, addr != end);

	return 1;
}
Exemplo n.º 5
0
static inline void forget_pte(pte_t page)
{
	if (pte_none(page))
		return;
	if (pte_present(page)) {
		struct page *ptpage = pte_page(page);
		if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
			return;
		/* 
		 * free_page() used to be able to clear swap cache
		 * entries.  We may now have to do it manually.  
		 */
		free_page_and_swap_cache(ptpage);
		return;
	}
	swap_free(pte_to_swp_entry(page));
}
Exemplo n.º 6
0
static int get_gate_page(struct mm_struct *mm, unsigned long address,
		unsigned int gup_flags, struct vm_area_struct **vma,
		struct page **page)
{
	pgd_t *pgd;
	p4d_t *p4d;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	int ret = -EFAULT;

	/* user gate pages are read-only */
	if (gup_flags & FOLL_WRITE)
		return -EFAULT;
	if (address > TASK_SIZE)
		pgd = pgd_offset_k(address);
	else
		pgd = pgd_offset_gate(mm, address);
	BUG_ON(pgd_none(*pgd));
	p4d = p4d_offset(pgd, address);
	BUG_ON(p4d_none(*p4d));
	pud = pud_offset(p4d, address);
	BUG_ON(pud_none(*pud));
	pmd = pmd_offset(pud, address);
	if (pmd_none(*pmd))
		return -EFAULT;
	VM_BUG_ON(pmd_trans_huge(*pmd));
	pte = pte_offset_map(pmd, address);
	if (pte_none(*pte))
		goto unmap;
	*vma = get_gate_vma(mm);
	if (!page)
		goto out;
	*page = vm_normal_page(*vma, address, *pte);
	if (!*page) {
		if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
			goto unmap;
		*page = pte_page(*pte);
	}
	get_page(*page);
out:
	ret = 0;
unmap:
	pte_unmap(pte);
	return ret;
}
Exemplo n.º 7
0
static inline void forget_pte(pte_t page)
{
	if (pte_none(page))
		return;
	if (pte_present(page)) {
		unsigned long addr = pte_page(page);
		if (MAP_NR(addr) >= max_mapnr || PageReserved(mem_map+MAP_NR(addr)))
			return;
		/* 
		 * free_page() used to be able to clear swap cache
		 * entries.  We may now have to do it manually.  
		 */
		free_page_and_swap_cache(addr);
		return;
	}
	swap_free(pte_val(page));
}
Exemplo n.º 8
0
/*
 * Return indicates whether a page was freed so caller can adjust rss
 */
static inline int free_pte(pte_t pte)
{
	if (pte_present(pte)) {
		struct page *page = pte_page(pte);
		if ((!VALID_PAGE(page)) || PageReserved(page))
			return 0;
		/* 
		 * free_page() used to be able to clear swap cache
		 * entries.  We may now have to do it manually.  
		 */
		if (pte_dirty(pte) && page->mapping)
			set_page_dirty(page);
		free_page_and_swap_cache(page);
		return 1;
	}
	swap_free(pte_to_swp_entry(pte));
	return 0;
}
Exemplo n.º 9
0
Arquivo: gup.c Projeto: 1800alex/linux
/*
 * The performance critical leaf functions are made noinline otherwise gcc
 * inlines everything into a single function which results in too much
 * register pressure.
 */
static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
		unsigned long end, int write, struct page **pages, int *nr)
{
	u64 mask, result;
	pte_t *ptep;

#ifdef CONFIG_X2TLB
	result = _PAGE_PRESENT | _PAGE_EXT(_PAGE_EXT_KERN_READ | _PAGE_EXT_USER_READ);
	if (write)
		result |= _PAGE_EXT(_PAGE_EXT_KERN_WRITE | _PAGE_EXT_USER_WRITE);
#elif defined(CONFIG_SUPERH64)
	result = _PAGE_PRESENT | _PAGE_USER | _PAGE_READ;
	if (write)
		result |= _PAGE_WRITE;
#else
	result = _PAGE_PRESENT | _PAGE_USER;
	if (write)
		result |= _PAGE_RW;
#endif

	mask = result | _PAGE_SPECIAL;

	ptep = pte_offset_map(&pmd, addr);
	do {
		pte_t pte = gup_get_pte(ptep);
		struct page *page;

		if ((pte_val(pte) & mask) != result) {
			pte_unmap(ptep);
			return 0;
		}
		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
		page = pte_page(pte);
		get_page(page);
		__flush_anon_page(page, addr);
		flush_dcache_page(page);
		pages[*nr] = page;
		(*nr)++;

	} while (ptep++, addr += PAGE_SIZE, addr != end);
	pte_unmap(ptep - 1);

	return 1;
}
Exemplo n.º 10
0
struct page *kmem_vm_nopage(struct vm_area_struct *vma, unsigned long address, int write)
{
	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
	unsigned long kaddr;
	pgd_t *pgd;
	pmd_t *pmd;
	pte_t *ptep, pte;
	struct page *page = NULL;

	/* address is user VA; convert to kernel VA of desired page */
	kaddr = (address - vma->vm_start) + offset;
	kaddr = VMALLOC_VMADDR(kaddr);

	spin_lock(&init_mm.page_table_lock);

	/* Lookup page structure for kernel VA */
	pgd = pgd_offset(&init_mm, kaddr);
	if (pgd_none(*pgd) || pgd_bad(*pgd))
		goto out;
	pmd = pmd_offset(pgd, kaddr);
	if (pmd_none(*pmd) || pmd_bad(*pmd))
		goto out;
	ptep = pte_offset(pmd, kaddr);
	if (!ptep)
		goto out;
	pte = *ptep;
	if (!pte_present(pte))
		goto out;
	if (write && !pte_write(pte))
		goto out;
	page = pte_page(pte);
	if (!VALID_PAGE(page)) {
		page = NULL;
		goto out;
	}

	/* Increment reference count on page */
	get_page(page);

out:
	spin_unlock(&init_mm.page_table_lock);

	return page;
}
Exemplo n.º 11
0
/**
 * get_struct_page - Gets a struct page for a particular address
 * @address - the address of the page we need
 *
 * Two versions of this function have to be provided for working
 * between the 2.4 and 2.5 kernels. Rather than littering the
 * function with #defines, there is just two separate copies.
 * Look at the one that is relevant to the kernel you're using
 */
struct page *get_struct_page(unsigned long addr)
{
	struct mm_struct *mm;
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *ptep, pte;
	unsigned long pfn;
	struct page *page=NULL;

	mm = current->mm;
	/* Is this possible? */
	if (!mm) return NULL;

	spin_lock(&mm->page_table_lock);

	pgd = pgd_offset(mm, addr);
	if (!pgd_none(*pgd) && !pgd_bad(*pgd)) {
		pud = pud_offset(pgd, addr);
		if (!pud_none(*pud) && !pud_bad(*pud)) {
			pmd = pmd_offset(pud, addr);
			if (!pmd_none(*pmd) && !pmd_bad(*pmd)) {
				/*
			 	* disable preemption because of potential kmap().
			 	* page_table_lock should already have disabled
			 	* preemtion.  But, be paranoid.
			 	*/
				preempt_disable();
				ptep = pte_offset_map(pmd, addr);
				pte = *ptep;
				pte_unmap(ptep);
				preempt_enable();
				if (pte_present(pte)) {
					pfn = pte_pfn(pte);
					if (pfn_valid(pfn))
						page = pte_page(pte);
				}
			}
		}
	}

	spin_unlock(&mm->page_table_lock);
	return page;
}
Exemplo n.º 12
0
/*
 * Called by asm hashtable.S for doing lazy icache flush
 */
unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
{
	struct page *page;

	if (!pfn_valid(pte_pfn(pte)))
		return pp;

	page = pte_page(pte);

	/* page is dirty */
	if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
		if (trap == 0x400) {
			__flush_dcache_icache(page_address(page));
			set_bit(PG_arch_1, &page->flags);
		} else
			pp |= HPTE_R_N;
	}
	return pp;
}
Exemplo n.º 13
0
void update_mmu_cache(struct vm_area_struct * vma,
		      unsigned long address, pte_t pte)
{
	unsigned long flags;
	unsigned long pteval;
	unsigned long vpn;

	/* Ptrace may call this routine. */
	if (vma && current->active_mm != vma->vm_mm)
		return;

#if defined(CONFIG_SH7705_CACHE_32KB)
	{
		struct page *page = pte_page(pte);
		unsigned long pfn = pte_pfn(pte);

		if (pfn_valid(pfn) && !test_bit(PG_mapped, &page->flags)) {
			unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;

			__flush_wback_region((void *)P1SEGADDR(phys),
					     PAGE_SIZE);
			__set_bit(PG_mapped, &page->flags);
		}
	}
#endif

	local_irq_save(flags);

	/* Set PTEH register */
	vpn = (address & MMU_VPN_MASK) | get_asid();
	ctrl_outl(vpn, MMU_PTEH);

	pteval = pte_val(pte);

	/* Set PTEL register */
	pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
	/* conveniently, we want all the software flags to be 0 anyway */
	ctrl_outl(pteval, MMU_PTEL);

	/* Load the TLB */
	asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
	local_irq_restore(flags);
}
Exemplo n.º 14
0
static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
		unsigned long end, int write, struct page **pages, int *nr)
{
	unsigned long mask, result;
	pte_t *ptep;

	if (tlb_type == hypervisor) {
		result = _PAGE_PRESENT_4V|_PAGE_P_4V;
		if (write)
			result |= _PAGE_WRITE_4V;
	} else {
		result = _PAGE_PRESENT_4U|_PAGE_P_4U;
		if (write)
			result |= _PAGE_WRITE_4U;
	}
	mask = result | _PAGE_SPECIAL;

	ptep = pte_offset_kernel(&pmd, addr);
	do {
		struct page *page, *head;
		pte_t pte = *ptep;

		if ((pte_val(pte) & mask) != result)
			return 0;
		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));

		page = pte_page(pte);
		head = compound_head(page);
		if (!page_cache_get_speculative(head))
			return 0;
		if (unlikely(pte_val(pte) != pte_val(*ptep))) {
			put_page(head);
			return 0;
		}
		if (head != page)
			get_huge_page_tail(page);

		pages[*nr] = page;
		(*nr)++;
	} while (ptep++, addr += PAGE_SIZE, addr != end);

	return 1;
}
Exemplo n.º 15
0
/* Ensure all existing pages follow the policy. */
static int
verify_pages(struct mm_struct *mm,
	     unsigned long addr, unsigned long end, unsigned long *nodes)
{
	while (addr < end) {
		struct page *p;
		pte_t *pte;
		pmd_t *pmd;
		pud_t *pud;
		pgd_t *pgd;
		pgd = pgd_offset(mm, addr);
		if (pgd_none(*pgd)) {
			unsigned long next = (addr + PGDIR_SIZE) & PGDIR_MASK;
			if (next > addr)
				break;
			addr = next;
			continue;
		}
		pud = pud_offset(pgd, addr);
		if (pud_none(*pud)) {
			addr = (addr + PUD_SIZE) & PUD_MASK;
			continue;
		}
		pmd = pmd_offset(pud, addr);
		if (pmd_none(*pmd)) {
			addr = (addr + PMD_SIZE) & PMD_MASK;
			continue;
		}
		p = NULL;
		pte = pte_offset_map(pmd, addr);
		if (pte_present(*pte))
			p = pte_page(*pte);
		pte_unmap(pte);
		if (p) {
			unsigned nid = page_to_nid(p);
			if (!test_bit(nid, nodes))
				return -EIO;
		}
		addr += PAGE_SIZE;
	}
	return 0;
}
Exemplo n.º 16
0
/*
 * This routine gets a long from any process space by following the page
 * tables. NOTE! You should check that the long isn't on a page boundary,
 * and that it is in the task area before calling this: this routine does
 * no checking.
 *
 */
static unsigned long get_long(struct task_struct * tsk, 
	struct vm_area_struct * vma, unsigned long addr)
{
	pgd_t * pgdir;
	pmd_t * pgmiddle;
	pte_t * pgtable;
	unsigned long page;

repeat:
	pgdir = pgd_offset(vma->vm_mm, addr);
	if (pgd_none(*pgdir)) {
		do_no_page(tsk, vma, addr, 0);
		goto repeat;
	}
	if (pgd_bad(*pgdir)) {
		printk("ptrace: bad page directory %08lx\n", pgd_val(*pgdir));
		pgd_clear(pgdir);
		return 0;
	}
	pgmiddle = pmd_offset(pgdir,addr);
	if (pmd_none(*pgmiddle)) {
		do_no_page(tsk, vma, addr, 0);
		goto repeat;
	}
	if (pmd_bad(*pgmiddle)) {
		printk("ptrace: bad page directory %08lx\n",
		       pmd_val(*pgmiddle));
		pmd_clear(pgmiddle);
		return 0;
	}
	pgtable = pte_offset(pgmiddle, addr);
	if (!pte_present(*pgtable)) {
		do_no_page(tsk, vma, addr, 0);
		goto repeat;
	}
	page = pte_page(*pgtable);
/* this is a hack for non-kernel-mapped video buffers and similar */
	if (page >= high_memory)
		return 0;
	page += addr & ~PAGE_MASK;
	return *(unsigned long *) page;
}
Exemplo n.º 17
0
void __update_cache(struct vm_area_struct *vma, unsigned long address,
        pte_t pte)
{
	unsigned long addr;
	struct page *page;

	if (!cpu_has_dc_aliases)
		return;

	page = pte_page(pte);
	if (VALID_PAGE(page) && page->mapping &&
	    (page->flags & (1UL << PG_dcache_dirty))) {
		if (pages_do_alias((unsigned long)page_address(page), address & PAGE_MASK)) {
			addr = (unsigned long) page_address(page);
			flush_data_cache_page(addr);
		}

		ClearPageDcacheDirty(page);
	}
}
Exemplo n.º 18
0
static inline unsigned long uvirt_to_kva(pgd_t *pgd, unsigned long adr)
{
  unsigned long ret = 0UL;
  pmd_t *pmd;
  pte_t *ptep, pte;
  
  if (!pgd_none(*pgd)) {
    pmd = pmd_offset(pgd, adr);
    if (!pmd_none(*pmd)) {
      ptep = pte_offset_kernel(pmd, adr);
      pte = *ptep;
      if(pte_present(pte)) {
	ret = (unsigned long) page_address(pte_page(pte));
	ret |= (adr & (PAGE_SIZE - 1));
      }
    }
  }
  //  printk(KERN_INFO "uv2kva(%lx-->%lx) \n", adr, ret);
  return ret;
}
Exemplo n.º 19
0
void kunmap_atomic(void *kvaddr, enum km_type type)
{
	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
	unsigned int idx = type + KM_TYPE_NR * smp_processor_id();

	if (kvaddr >= (void *)FIXADDR_START) {
		__cpuc_flush_dcache_page((void *)vaddr);
#ifdef CONFIG_DEBUG_HIGHMEM
		BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
		set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
		local_flush_tlb_kernel_page(vaddr);
#else
		(void) idx;  /* to kill a warning */
#endif
	} else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
		/* this address was obtained through kmap_high_get() */
		kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
	}
	pagefault_enable();
}
Exemplo n.º 20
0
static int show_vma_pages(pmd_t *dir, unsigned long start, unsigned long end, unsigned long *total_pages, unsigned long *total_fpages, struct task_struct *ptask)
{
	pte_t *pte;
	struct page *page;
	unsigned long address = start;

	////printk(KERN_NOTICE "SHOW_VMA_PAGES(*PMD(0x%x))\n", pmd_val(*dir));////
	if (pmd_none(*dir))
		return 0;

	while (address < end)
	{
		pte = pte_offset(dir,address);
		////printk(KERN_NOTICE "PTE(0x%x)\n",pte_val(*pte)); ////
		if (pte_present(*pte))
		{
			*total_pages = (*total_pages) + 1;
			page = pte_page(*pte);
			////printk(KERN_NOTICE "PAGE(0x%x)\n", page);////
			if (page->mapping)
			{
				struct inode *inode;
				struct dentry *dentry;
				struct list_head *head;
				struct list_head *next;

				*total_fpages = (*total_fpages) + 1;
				inode = page->mapping->host;
				head = &(inode->i_dentry);
				next = head->next;
				while (next != head)
				{
					dentry = list_entry(next, struct dentry, d_alias);
					next = next->next;
					printk(KERN_NOTICE "(%s):PAGE(0x%x) of FILE(%s)\n", ptask->comm, (unsigned int)page, dentry->d_name.name);
				}
			}
			else
				printk(KERN_NOTICE "(%s):PAGE(0x%x)\n", ptask->comm, (unsigned int)page);
		}
Exemplo n.º 21
0
/* Converts a kernel virtual address into a kernel virtual
   address that is part of the direct mapping between
   virtual and physical address. If you e.g. allocated
   memory with vmalloc(), you get virtual addresses part
   of an own area. By converting such an address, 
   you receive a kernel virtual address that you can
   e.g. feed into virt_to_phys() or MAP_NR().
   Note: the function below works for one page. If you
   have a set of pages, in a vmalloc allocated area,
  each page may have a different virtual address in
   the direct mapping.
   Return 0 if no mapping found.
*/
volatile void *virt_to_kseg(volatile void *address)
{
        pgd_t *pgd; pmd_t *pmd; pte_t *ptep, pte;
        unsigned long ret=0UL;
        
        /* if we are below the max direct mappings, we use the
           direct conversion function
        */ 
        if (MAP_NR(address) < max_mapnr)
                return(address);

        /* else we really have to parse the page table to get the map nr */

        /* get the page global directory out of the kernel memory map. */
        pgd = pgd_offset_k((unsigned long)address);

        /* check whether we found an entry */
        if (!pgd_none(*pgd))
        {
               /* get the page middle directory */
               pmd = pmd_offset(pgd, (unsigned long)address);
               /* check for a valid entry */
               if (!pmd_none(*pmd))
               {
                    /* get a pointer to the page table entry */
                    ptep = pte_offset(pmd, (unsigned long)address);
                    /* get the page table entry itself */
                    pte = *ptep;
                    /* check for a valid page */
                    if (pte_present(pte))
                    {
                      /* get the kseg address of the page */
                      ret = (unsigned long)pte_page(pte);
                      /* add the offset within the page to the page address */
                      ret |= ((unsigned long)address & (PAGE_SIZE - 1));
                    }
               }
        }
        return((volatile void *)ret);
}
Exemplo n.º 22
0
/*
 * This is lazy way to flush icache provided the CPU has the NX feature enabled.
 * This is called from set_pte.
 */
void mic_flush_icache_nx(pte_t *ptep, pte_t pte)
{
	/*
	 * Donot continue if the icache snoop is enabled
	 * or if the NX feature doesnt exist
	 */
	if(icache_snoop || !is_nx_support)
		return;

	/*
	 * Similar to the ia64 set_pte code
	 * We only flush and set PG_arch_1 bit if the page is
	 * present && page is user page && has backing page struct
	 * && page is executable &&
	 * (page swapin or new page or page migration ||
	 * copy_on_write with page copying)
	 */
	if (pte_present(pte) && pte_user(pte) && pfn_valid(pte_pfn(pte)) &&
		!pte_no_exec(pte) && (!pte_present(*ptep) ||
		pte_pfn(*ptep) != pte_pfn(pte)))
		mic_flush_icache_lazy(pte_page(pte));
}
Exemplo n.º 23
0
/*
 * Only called after testing nattch and SHM_DEST.
 * Here pages, pgtable and shmid_kernel are freed.
 */
static void killseg (int id)
{
    struct shmid_kernel *shp;
    int i, numpages;

    shp = shm_segs[id];
    if (shp == IPC_NOID || shp == IPC_UNUSED) {
        printk ("shm nono: killseg called on unused seg id=%d\n", id);
        return;
    }
    shp->u.shm_perm.seq++;     /* for shmat */
    shm_seq = (shm_seq+1) % ((unsigned)(1<<31)/SHMMNI); /* increment, but avoid overflow */
    shm_segs[id] = (struct shmid_kernel *) IPC_UNUSED;
    used_segs--;
    if (id == max_shmid)
        while (max_shmid && (shm_segs[--max_shmid] == IPC_UNUSED));
    if (!shp->shm_pages) {
        printk ("shm nono: killseg shp->pages=NULL. id=%d\n", id);
        return;
    }
    numpages = shp->shm_npages;
    for (i = 0; i < numpages ; i++) {
        pte_t pte;
        pte = __pte(shp->shm_pages[i]);
        if (pte_none(pte))
            continue;
        if (pte_present(pte)) {
            free_page (pte_page(pte));
            shm_rss--;
        } else {
            swap_free(pte_val(pte));
            shm_swp--;
        }
    }
    vfree(shp->shm_pages);
    shm_tot -= numpages;
    kfree(shp);
    return;
}
Exemplo n.º 24
0
/*
 * This routine puts a long into any process space by following the page
 * tables. NOTE! You should check that the long isn't on a page boundary,
 * and that it is in the task area before calling this: this routine does
 * no checking.
 *
 * Now keeps R/W state of page so that a text page stays readonly
 * even if a debugger scribbles breakpoints into it.  -M.U-
 */
static void put_long(struct vm_area_struct * vma, unsigned long addr,
	unsigned long data)
{
	pgd_t *pgdir;
	pte_t *pgtable;
	unsigned long page;

repeat:
	pgdir = PAGE_DIR_OFFSET(vma->vm_task, addr);
	if (!pgd_present(*pgdir)) {
		do_no_page(vma, addr, 1);
		goto repeat;
	}
	if (pgd_bad(*pgdir)) {
		printk("ptrace: bad page directory %08lx\n", pgd_val(*pgdir));
		pgd_clear(pgdir);
		return;
	}
	pgtable = (pte_t *) (PAGE_PTR(addr) + pgd_page(*pgdir));
	if (!pte_present(*pgtable)) {
		do_no_page(vma, addr, 1);
		goto repeat;
	}
	page = pte_page(*pgtable);
	if (!pte_write(*pgtable)) {
		do_wp_page(vma, addr, 1);
		goto repeat;
	}
/* this is a hack for non-kernel-mapped video buffers and similar */
	if (page < high_memory) {
		page += addr & ~PAGE_MASK;
		*(unsigned long *) page = data;
	}
/* we're bypassing pagetables, so we have to set the dirty bit ourselves */
/* this should also re-instate whatever read-only mode there was before */
	*pgtable = pte_mkdirty(mk_pte(page, vma->vm_page_prot));
	invalidate();
}
Exemplo n.º 25
0
/*
 * The performance critical leaf functions are made noinline otherwise gcc
 * inlines everything into a single function which results in too much
 * register pressure.
 */
static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
		unsigned long end, int write, struct page **pages, int *nr)
{
	unsigned long mask, result;
	pte_t *ptep;

	result = _PAGE_PRESENT|_PAGE_USER;
	if (write)
		result |= _PAGE_RW;
	mask = result | _PAGE_SPECIAL;

	ptep = pte_offset_kernel(&pmd, addr);
	do {
		pte_t pte = READ_ONCE(*ptep);
		struct page *page;
		/*
		 * Similar to the PMD case, NUMA hinting must take slow path
		 */
		if (pte_numa(pte))
			return 0;

		if ((pte_val(pte) & mask) != result)
			return 0;
		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
		page = pte_page(pte);
		if (!page_cache_get_speculative(page))
			return 0;
		if (unlikely(pte_val(pte) != pte_val(*ptep))) {
			put_page(page);
			return 0;
		}
		pages[*nr] = page;
		(*nr)++;

	} while (ptep++, addr += PAGE_SIZE, addr != end);

	return 1;
}
Exemplo n.º 26
0
Arquivo: dev_dma.c Projeto: 119/ipnc
unsigned long DMA_getPhysAddr(unsigned long virtp)
{
  pgd_t *pgd;
  pmd_t *pmd;
  pte_t *pte;
  struct mm_struct *mm = current->mm;

  pgd = pgd_offset(mm, virtp);
  if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
      pmd = pmd_offset(pgd, virtp);

      if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
          pte = pte_offset_kernel(pmd, virtp);

          if (pte_present(*pte)) {
              return __pa(page_address(pte_page(*pte)) +
                          (virtp & ~PAGE_MASK));
          }
      }
  }

  return 0;
}
Exemplo n.º 27
0
/* mm->page_table_lock is held. mmap_sem is not held */
static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long end, int count, zone_t * classzone)
{
	pte_t * pte;
	unsigned long pmd_end;

	if (pmd_none(*dir))
		return count;
	if (pmd_bad(*dir)) {
		pmd_ERROR(*dir);
		pmd_clear(dir);
		return count;
	}
	
	pte = pte_offset(dir, address);
	
	pmd_end = (address + PMD_SIZE) & PMD_MASK;
	if (end > pmd_end)
		end = pmd_end;

	do {
		if (pte_present(*pte)) {
			struct page *page = pte_page(*pte);

			if (VALID_PAGE(page) && !PageReserved(page)) {
				count -= try_to_swap_out(mm, vma, address, pte, page, classzone);
				if (!count) {
					address += PAGE_SIZE;
					break;
				}
			}
		}
		address += PAGE_SIZE;
		pte++;
	} while (address && (address < end));
	mm->swap_address = address;
	return count;
}
Exemplo n.º 28
0
static void flush_all_zero_pkmaps(void)
{
	int i;

	flush_cache_kmaps();

	for (i = 0; i < LAST_PKMAP; i++) {
		struct page *page;

		/*
		 * zero means we don't have anything to do,
		 * >1 means that it is still in use. Only
		 * a count of 1 means that it is free but
		 * needs to be unmapped
		 */
		if (pkmap_count[i] != 1)
			continue;
		pkmap_count[i] = 0;

		/* sanity check */
		if (pte_none(pkmap_page_table[i]))
			BUG();

		/*
		 * Don't need an atomic fetch-and-clear op here;
		 * no-one has the page mapped, and cannot get at
		 * its virtual address (and hence PTE) without first
		 * getting the kmap_lock (which is held here).
		 * So no dangers, even with speculative execution.
		 */
		page = pte_page(pkmap_page_table[i]);
		pte_clear(&pkmap_page_table[i]);

		set_page_address(page, NULL);
	}
	flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
}
Exemplo n.º 29
0
/* 
 * This is the core of the direct rendering engine.
 */
struct page *
sgi_graphics_nopage (struct vm_area_struct *vma, unsigned long address, int
		     no_share)
{
	pgd_t *pgd; pmd_t *pmd; pte_t *pte; 
	int board = GRAPHICS_CARD (vma->vm_dentry->d_inode->i_rdev);

	unsigned long virt_add, phys_add;

#ifdef DEBUG
	printk ("Got a page fault for board %d address=%lx guser=%lx\n", board,
		address, (unsigned long) cards[board].g_user);
#endif
	
	/* Figure out if another process has this mapped, and revoke the mapping
	 * in that case. */
	if (cards[board].g_user && cards[board].g_user != current) {
		/* FIXME: save graphics context here, dump it to rendering
		 * node? */

		remove_mapping(cards[board].g_user, vma->vm_start, vma->vm_end);
	}

	cards [board].g_user = current;

	/* Map the physical address of the newport registers into the address
	 * space of this process */

	virt_add = address & PAGE_MASK;
	phys_add = cards[board].g_regs + virt_add - vma->vm_start;
	remap_page_range(virt_add, phys_add, PAGE_SIZE, vma->vm_page_prot);

	pgd = pgd_offset(current->mm, address);
	pmd = pmd_offset(pgd, address);
	pte = pte_offset(pmd, address);
	return pte_page(*pte);
}
Exemplo n.º 30
0
static long flush_l1_cache_range(unsigned long addr, int size)
{
	struct page *page;
	pte_t *pte_ptr;
	unsigned long end;

	for (end = addr; end < (addr + size); end += KGSL_PAGESIZE) {
		pte_ptr = kgsl_get_pte_from_vaddr(end);
		if (!pte_ptr)
			return -EINVAL;

		page = pte_page(pte_val(*pte_ptr));
		if (!page) {
			KGSL_DRV_ERR("could not find page for pte\n");
			pte_unmap(pte_ptr);
			return -EINVAL;
		}

		pte_unmap(pte_ptr);
		flush_dcache_page(page);
	}

	return 0;
}