void __kunmap_atomic(void *kvaddr) { unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; int idx, type; if (kvaddr >= (void *)FIXADDR_START) { type = kmap_atomic_idx(); idx = type + KM_TYPE_NR * smp_processor_id(); if (cache_is_vivt()) __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); local_flush_tlb_kernel_page(vaddr); #else (void) idx; /* to kill a warning */ #endif kmap_atomic_idx_pop(); } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { /* this address was obtained through kmap_high_get() */ kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); } pagefault_enable(); }
struct page *kmap_to_page(void *vaddr) { unsigned long addr = (unsigned long)vaddr; if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) { int i = (addr - PKMAP_ADDR(0)) >> PAGE_SHIFT; return pte_page(pkmap_page_table[i]); }
void __init mem_init(void) { int i; #ifndef __tilegx__ void *last; #endif #ifdef CONFIG_FLATMEM BUG_ON(!mem_map); #endif #ifdef CONFIG_HIGHMEM /* check that fixmap and pkmap do not overlap */ if (PKMAP_ADDR(LAST_PKMAP-1) >= FIXADDR_START) { pr_err("fixmap and kmap areas overlap - this will crash\n"); pr_err("pkstart: %lxh pkend: %lxh fixstart %lxh\n", PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP-1), FIXADDR_START); BUG(); } #endif set_max_mapnr_init(); /* this will put all bootmem onto the freelists */ free_all_bootmem(); #ifndef CONFIG_64BIT /* count all remaining LOWMEM and give all HIGHMEM to page allocator */ set_non_bootmem_pages_init(); #endif mem_init_print_info(NULL); /* * In debug mode, dump some interesting memory mappings. */ #ifdef CONFIG_HIGHMEM printk(KERN_DEBUG " KMAP %#lx - %#lx\n", FIXADDR_START, FIXADDR_TOP + PAGE_SIZE - 1); printk(KERN_DEBUG " PKMAP %#lx - %#lx\n", PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP) - 1); #endif printk(KERN_DEBUG " VMALLOC %#lx - %#lx\n", _VMALLOC_START, _VMALLOC_END - 1); #ifdef __tilegx__ for (i = MAX_NUMNODES-1; i >= 0; --i) { struct pglist_data *node = &node_data[i]; if (node->node_present_pages) { unsigned long start = (unsigned long) pfn_to_kaddr(node->node_start_pfn); unsigned long end = start + (node->node_present_pages << PAGE_SHIFT); printk(KERN_DEBUG " MEM%d %#lx - %#lx\n", i, start, end - 1); } } #else last =
struct page *kmap_atomic_to_page(const void *ptr) { unsigned long vaddr = (unsigned long)ptr; pte_t *pte; if (vaddr < FIXADDR_START) { if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) return pte_page(pkmap_page_table[PKMAP_NR(vaddr)]); else return virt_to_page(ptr); } pte = TOP_PTE(vaddr); return pte_page(*pte); }
struct page *kmap_to_page(void *ptr) { struct page *page; if ((unsigned long)ptr < PKMAP_ADDR(0)) return virt_to_page(ptr); page = pte_page(pkmap_page_table[PKMAP_NR((unsigned long)ptr)]); return page; }
void kunmap_virt(void *ptr) { struct page *page; if ((unsigned long)ptr < PKMAP_ADDR(0)) return; page = pte_page(pkmap_page_table[PKMAP_NR((unsigned long)ptr)]); kunmap(page); }
static void flush_all_zero_pkmaps(void) { int i; flush_cache_kmaps(); for (i = 0; i < LAST_PKMAP; i++) { struct page *page; /* * zero means we don't have anything to do, * >1 means that it is still in use. Only * a count of 1 means that it is free but * needs to be unmapped */ if (pkmap_count[i] != 1) continue; pkmap_count[i] = 0; /* sanity check */ if (pte_none(pkmap_page_table[i])) BUG(); /* * Don't need an atomic fetch-and-clear op here; * no-one has the page mapped, and cannot get at * its virtual address (and hence PTE) without first * getting the kmap_lock (which is held here). * So no dangers, even with speculative execution. */ page = pte_page(pkmap_page_table[i]); pte_clear(&pkmap_page_table[i]); set_page_address(page, NULL); } flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); }
static inline unsigned long map_new_virtual(struct page *page) { unsigned long vaddr; int count; start: count = LAST_PKMAP; /* Find an empty entry */ for (;;) { last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK; if (!last_pkmap_nr) { flush_all_zero_pkmaps(); count = LAST_PKMAP; } if (!pkmap_count[last_pkmap_nr]) break; /* Found a usable entry */ if (--count) continue; /* * Sleep for somebody else to unmap their entries */ { DECLARE_WAITQUEUE(wait, current); __set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&pkmap_map_wait, &wait); spin_unlock(&kmap_lock); schedule(); remove_wait_queue(&pkmap_map_wait, &wait); spin_lock(&kmap_lock); /* Somebody else might have mapped it while we slept */ if (page_address(page)) return (unsigned long)page_address(page); /* Re-start */ goto start; } } vaddr = PKMAP_ADDR(last_pkmap_nr); set_pte(&(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); pkmap_count[last_pkmap_nr] = 1; set_page_address(page, (void *)vaddr); return vaddr; }
void __init mem_init(void) { int codesize, datasize, initsize; int i; #ifndef __tilegx__ void *last; #endif #ifdef CONFIG_FLATMEM BUG_ON(!mem_map); #endif #ifdef CONFIG_HIGHMEM /* check that fixmap and pkmap do not overlap */ if (PKMAP_ADDR(LAST_PKMAP-1) >= FIXADDR_START) { pr_err("fixmap and kmap areas overlap" " - this will crash\n"); pr_err("pkstart: %lxh pkend: %lxh fixstart %lxh\n", PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP-1), FIXADDR_START); BUG(); } #endif set_max_mapnr_init(); /* this will put all bootmem onto the freelists */ totalram_pages += free_all_bootmem(); #ifndef CONFIG_64BIT /* count all remaining LOWMEM and give all HIGHMEM to page allocator */ set_non_bootmem_pages_init(); #endif codesize = (unsigned long)&_etext - (unsigned long)&_text; datasize = (unsigned long)&_end - (unsigned long)&_sdata; initsize = (unsigned long)&_einittext - (unsigned long)&_sinittext; initsize += (unsigned long)&_einitdata - (unsigned long)&_sinitdata; pr_info("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init, %ldk highmem)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), num_physpages << (PAGE_SHIFT-10), codesize >> 10, datasize >> 10, initsize >> 10, (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)) ); /* * In debug mode, dump some interesting memory mappings. */ #ifdef CONFIG_HIGHMEM printk(KERN_DEBUG " KMAP %#lx - %#lx\n", FIXADDR_START, FIXADDR_TOP + PAGE_SIZE - 1); printk(KERN_DEBUG " PKMAP %#lx - %#lx\n", PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP) - 1); #endif #ifdef CONFIG_HUGEVMAP printk(KERN_DEBUG " HUGEMAP %#lx - %#lx\n", HUGE_VMAP_BASE, HUGE_VMAP_END - 1); #endif printk(KERN_DEBUG " VMALLOC %#lx - %#lx\n", _VMALLOC_START, _VMALLOC_END - 1); #ifdef __tilegx__ for (i = MAX_NUMNODES-1; i >= 0; --i) { struct pglist_data *node = &node_data[i]; if (node->node_present_pages) { unsigned long start = (unsigned long) pfn_to_kaddr(node->node_start_pfn); unsigned long end = start + (node->node_present_pages << PAGE_SHIFT); printk(KERN_DEBUG " MEM%d %#lx - %#lx\n", i, start, end - 1); } } #else last = high_memory; for (i = MAX_NUMNODES-1; i >= 0; --i) { if ((unsigned long)vbase_map[i] != -1UL) { printk(KERN_DEBUG " LOWMEM%d %#lx - %#lx\n", i, (unsigned long) (vbase_map[i]), (unsigned long) (last-1)); last = vbase_map[i]; } } #endif #ifndef __tilegx__ /* * Convert from using one lock for all atomic operations to * one per cpu. */ __init_atomic_per_cpu(); #endif }