コード例 #1
0
ファイル: ioremap.c プロジェクト: GodFox/magx_kernel_xpixl
void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
{
	unsigned long last_addr;
	void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
	if (!p) 
		return p; 

	/* Guaranteed to be > phys_addr, as per __ioremap() */
	last_addr = phys_addr + size - 1;

	if (last_addr < virt_to_phys(high_memory)) { 
		struct page *ppage = virt_to_page(__va(phys_addr));		
		unsigned long npages;

		phys_addr &= PAGE_MASK;

		/* This might overflow and become zero.. */
		last_addr = PAGE_ALIGN(last_addr);

		/* .. but that's ok, because modulo-2**n arithmetic will make
	 	* the page-aligned "last - first" come out right.
	 	*/
		npages = (last_addr - phys_addr) >> PAGE_SHIFT;

		if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) { 
			iounmap(p); 
			p = NULL;
		}
		global_flush_tlb();
	}

	return p;					
}
コード例 #2
0
ファイル: hijacks.c プロジェクト: Safe3/tpe-lkm
static inline void set_addr_ro(unsigned long addr, bool flag) {

#if (defined(CONFIG_XEN) || defined(CONFIG_X86_PAE)) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
	struct page *pg;

	pgprot_t prot;
	pg = virt_to_page(addr);
	prot.pgprot = VM_READ;
	change_page_attr(pg, 1, prot);
#else
	unsigned int level;
	pte_t *pte;

	// only set back to readonly if it was readonly before
	if (flag) {
		pte = tpe_lookup_address(addr, &level);

#if !defined(CONFIG_X86_64) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
		pte_val(*pte) = pte_val(*pte) &~_PAGE_RW;
#else
		pte->pte = pte->pte &~_PAGE_RW;
#endif
	}
#endif

}
コード例 #3
0
ファイル: hijacks.c プロジェクト: Safe3/tpe-lkm
static inline void set_addr_rw(unsigned long addr, bool *flag) {

#if (defined(CONFIG_XEN) || defined(CONFIG_X86_PAE)) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
	struct page *pg;

	pgprot_t prot;
	pg = virt_to_page(addr);
	prot.pgprot = VM_READ | VM_WRITE;
	change_page_attr(pg, 1, prot);
#else
	unsigned int level;
	pte_t *pte;

	*flag = true;

	pte = tpe_lookup_address(addr, &level);

#if !defined(CONFIG_X86_64) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
	if (pte_val(*pte) & _PAGE_RW) *flag = false;
	else pte_val(*pte) |= _PAGE_RW;
#else
	if (pte->pte & _PAGE_RW) *flag = false;
	else pte->pte |= _PAGE_RW;
#endif
#endif

}
コード例 #4
0
ファイル: hijacks.c プロジェクト: b3h3moth/tpe-lkm
void set_addr_ro(unsigned long addr, bool flag) {

    struct page *pg;

    pgprot_t prot;
    pg = virt_to_page(addr);
    prot.pgprot = VM_READ;
    change_page_attr(pg, 1, prot);

}
コード例 #5
0
ファイル: pageattr.c プロジェクト: 1x23/unifi-gpl
void kernel_map_pages(struct page *page, int numpages, int enable)
{
	if (PageHighMem(page))
		return;
	/* the return value is ignored - the calls cannot fail,
	 * large pages are disabled at boot time.
	 */
	change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
	/* we should perform an IPI and flush all tlbs,
	 * but that can deadlock->flush only current cpu.
	 */
	__flush_tlb_all();
}
コード例 #6
0
ファイル: init.c プロジェクト: Broadcom/stblinux-2.6.18
void mark_rodata_ro(void)
{
	unsigned long addr = (unsigned long)__start_rodata;

	for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
		change_page_attr(virt_to_page(addr), 1, PAGE_KERNEL_RO);

	printk("Write protecting the kernel read-only data: %uk\n",
			(__end_rodata - __start_rodata) >> 10);

	/*
	 * change_page_attr() requires a global_flush_tlb() call after it.
	 * We do this after the printk so that if something went wrong in the
	 * change, the printk gets out at least to give a better debug hint
	 * of who is the culprit.
	 */
	global_flush_tlb();
}
コード例 #7
0
int set_memory_rw(unsigned long addr, int numpages)
{
	change_page_attr(addr, numpages, pte_mkwrite);
	return 0;
}
コード例 #8
0
int set_memory_ro(unsigned long addr, int numpages)
{
	change_page_attr(addr, numpages, pte_wrprotect);
	return 0;
}