static int vmlfb_alloc_vram_area(struct vram_area *va, unsigned max_order, unsigned min_order) { gfp_t flags; unsigned long i; pgprot_t wc_pageprot; wc_pageprot = PAGE_KERNEL_NOCACHE; max_order++; do { /* * Really try hard to get the needed memory. * We need memory below the first 32MB, so we * add the __GFP_DMA flag that guarantees that we are * below the first 16MB. */ flags = __GFP_DMA | __GFP_HIGH; va->logical = __get_free_pages(flags, --max_order); } while (va->logical == 0 && max_order > min_order); if (!va->logical) return -ENOMEM; va->phys = virt_to_phys((void *)va->logical); va->size = PAGE_SIZE << max_order; va->order = max_order; /* * It seems like __get_free_pages only ups the usage count * of the first page. This doesn't work with nopage mapping, so * up the usage count once more. */ memset((void *)va->logical, 0x00, va->size); for (i = va->logical; i < va->logical + va->size; i += PAGE_SIZE) { get_page(virt_to_page(i)); } /* * Change caching policy of the linear kernel map to avoid * mapping type conflicts with user-space mappings. * The first global_flush_tlb() is really only there to do a global * wbinvd(). */ global_flush_tlb(); change_page_attr(virt_to_page(va->logical), va->size >> PAGE_SHIFT, wc_pageprot); global_flush_tlb(); printk(KERN_DEBUG MODULE_NAME ": Allocated %ld bytes vram area at 0x%08lx\n", va->size, va->phys); return 0; }
void mark_rodata_ro(void) { unsigned long start = (unsigned long)_stext, end; #ifdef CONFIG_HOTPLUG_CPU /* It must still be possible to apply SMP alternatives. */ if (num_possible_cpus() > 1) start = (unsigned long)_etext; #endif #ifdef CONFIG_KPROBES start = (unsigned long)__start_rodata; #endif end = (unsigned long)__end_rodata; start = (start + PAGE_SIZE - 1) & PAGE_MASK; end &= PAGE_MASK; if (end <= start) return; change_page_attr_addr(start, (end - start) >> PAGE_SHIFT, PAGE_KERNEL_RO); printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", (end - start) >> 10); /* * change_page_attr_addr() requires a global_flush_tlb() call after it. * We do this after the printk so that if something went wrong in the * change, the printk gets out at least to give a better debug hint * of who is the culprit. */ global_flush_tlb(); }
void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size) { unsigned long last_addr; void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD); if (!p) return p; /* Guaranteed to be > phys_addr, as per __ioremap() */ last_addr = phys_addr + size - 1; if (last_addr < virt_to_phys(high_memory)) { struct page *ppage = virt_to_page(__va(phys_addr)); unsigned long npages; phys_addr &= PAGE_MASK; /* This might overflow and become zero.. */ last_addr = PAGE_ALIGN(last_addr); /* .. but that's ok, because modulo-2**n arithmetic will make * the page-aligned "last - first" come out right. */ npages = (last_addr - phys_addr) >> PAGE_SHIFT; if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) { iounmap(p); p = NULL; } global_flush_tlb(); } return p; }
static void vmlfb_free_vram_area(struct vram_area *va) { unsigned long j; if (va->logical) { /* * Reset the linear kernel map caching policy. */ change_page_attr(virt_to_page(va->logical), va->size >> PAGE_SHIFT, PAGE_KERNEL); global_flush_tlb(); /* * Decrease the usage count on the pages we've used * to compensate for upping when allocating. */ for (j = va->logical; j < va->logical + va->size; j += PAGE_SIZE) { (void)put_page_testzero(virt_to_page(j)); } printk(KERN_DEBUG MODULE_NAME ": Freeing %ld bytes vram area at 0x%08lx\n", va->size, va->phys); free_pages(va->logical, va->order); va->logical = 0; }
static void i460_destroy_page (void *page, int flags) { if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) { agp_generic_destroy_page(page, flags); global_flush_tlb(); } }
/* * If the I/O (GART) page size is bigger than the kernel page size, we don't want to * allocate memory until we know where it is to be bound in the aperture (a * multi-kernel-page alloc might fit inside of an already allocated GART page). * * Let's just hope nobody counts on the allocated AGP memory being there before bind time * (I don't think current drivers do)... */ static void *i460_alloc_page (struct agp_bridge_data *bridge) { void *page; if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) { page = agp_generic_alloc_page(agp_bridge); global_flush_tlb(); } else /* Returning NULL would cause problems */ /* AK: really dubious code. */ page = (void *)~0UL; return page; }
/* * Fix up the linear direct mapping of the kernel to avoid cache attribute * conflicts. */ static int ioremap_change_attr(unsigned long phys_addr, unsigned long size, unsigned long flags) { int err = 0; if (phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) { unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; unsigned long vaddr = (unsigned long) __va(phys_addr); /* * Must use a address here and not struct page because the phys addr * can be a in hole between nodes and not have an memmap entry. */ err = change_page_attr_addr(vaddr,npages,__pgprot(__PAGE_KERNEL|flags)); if (!err) global_flush_tlb(); }
void iounmap(volatile void __iomem *addr) { struct vm_struct *p; if ((void __force *) addr <= high_memory) return; p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr)); if (!p) { printk("__iounmap: bad address %p\n", addr); return; } if (p->flags && p->phys_addr < virt_to_phys(high_memory)) { change_page_attr(virt_to_page(__va(p->phys_addr)), p->size >> PAGE_SHIFT, PAGE_KERNEL); global_flush_tlb(); }
void mark_rodata_ro(void) { unsigned long addr = (unsigned long)__start_rodata; for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE) change_page_attr(virt_to_page(addr), 1, PAGE_KERNEL_RO); printk("Write protecting the kernel read-only data: %uk\n", (__end_rodata - __start_rodata) >> 10); /* * change_page_attr() requires a global_flush_tlb() call after it. * We do this after the printk so that if something went wrong in the * change, the printk gets out at least to give a better debug hint * of who is the culprit. */ global_flush_tlb(); }
void free_init_pages(char *what, unsigned long begin, unsigned long end) { unsigned long addr; if (begin >= end) return; printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); for (addr = begin; addr < end; addr += PAGE_SIZE) { ClearPageReserved(virt_to_page(addr)); init_page_count(virt_to_page(addr)); memset((void *)(addr & ~(PAGE_SIZE-1)), POISON_FREE_INITMEM, PAGE_SIZE); if (addr >= __START_KERNEL_map) change_page_attr_addr(addr, 1, __pgprot(0)); free_page(addr); totalram_pages++; } if (addr > __START_KERNEL_map) global_flush_tlb(); }
// Reset all referenced bits to 0. static void clear_ref_bits (void) { void *cur_addr; pte_t *pte; int i; for (i = 0; i < cr_num_drivers; i++) { for (cur_addr = cr_base_address[i]; cur_addr < cr_base_address[i] + cr_module_size[i]; cur_addr += PAGE_SIZE) { pte = virt_to_pte (cur_addr); if (pte != NULL) { *pte = pte_mkold(*pte); // kunmap_atomic (page, KM_IRQ1); pte_unmap(pte); } } } global_flush_tlb(); }