void __kunmap_atomic(void *kvaddr) { unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; int type; if (vaddr < __fix_to_virt(FIX_KMAP_END)) { pagefault_enable(); return; } type = kmap_atomic_idx(); #ifdef CONFIG_DEBUG_HIGHMEM { unsigned int idx; idx = type + KM_TYPE_NR * smp_processor_id(); BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); /* * force other mappings to Oops if they'll try to access * this pte without first remap it */ pte_clear(&init_mm, vaddr, kmap_pte-idx); local_flush_tlb_page(NULL, vaddr); } #endif kmap_atomic_idx_pop(); pagefault_enable(); }
void iounmap_atomic(void __iomem *kvaddr) { unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; if (vaddr >= __fix_to_virt(FIX_KMAP_END) && vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) { int idx, type; type = kmap_atomic_idx(); idx = type + KM_TYPE_NR * smp_processor_id(); #ifdef CONFIG_DEBUG_HIGHMEM WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); #endif /* * Force other mappings to Oops if they'll try to access this * pte without first remap it. Keeping stale mappings around * is a bad idea also, in case the page changes cacheability * attributes or becomes a protected page in a hypervisor. */ kpte_clear_flush(kmap_pte-idx, vaddr); kmap_atomic_idx_pop(); } pagefault_enable(); preempt_enable(); }
void homecache_finv_map_page(struct page *page, int home) { unsigned long flags; unsigned long va; pte_t *ptep; pte_t pte; if (home == PAGE_HOME_UNCACHED) return; local_irq_save(flags); #ifdef CONFIG_HIGHMEM va = __fix_to_virt(FIX_KMAP_BEGIN + kmap_atomic_idx_push() + (KM_TYPE_NR * smp_processor_id())); #else va = __fix_to_virt(FIX_HOMECACHE_BEGIN + smp_processor_id()); #endif ptep = virt_to_kpte(va); pte = pfn_pte(page_to_pfn(page), PAGE_KERNEL); __set_pte(ptep, pte_set_home(pte, home)); homecache_finv_page_va((void *)va, home); __pte_clear(ptep); hv_flush_page(va, PAGE_SIZE); #ifdef CONFIG_HIGHMEM kmap_atomic_idx_pop(); #endif local_irq_restore(flags); }
void __kunmap_atomic(void *kvaddr) { unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; if (vaddr >= __fix_to_virt(FIX_KMAP_END) && vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) { int idx, type; type = kmap_atomic_idx(); idx = type + KM_TYPE_NR * smp_processor_id(); #ifdef CONFIG_DEBUG_HIGHMEM WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); #endif kpte_clear_flush(kmap_pte-idx, vaddr); kmap_atomic_idx_pop(); arch_flush_lazy_mmu_mode(); } #ifdef CONFIG_DEBUG_HIGHMEM else { BUG_ON(vaddr < PAGE_OFFSET); BUG_ON(vaddr >= (unsigned long)high_memory); } #endif pagefault_enable(); }
int __init vsyscall_init(void) { printk("VSYSCALL: consistency checks..."); if ((unsigned long) &vgettimeofday != VSYSCALL_ADDR(__NR_vgettimeofday)) { printk("vgettimeofday link addr broken\n"); printk("VSYSCALL: vsyscall_init failed!\n"); return -EFAULT; } if ((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime)) { printk("vtime link addr broken\n"); printk("VSYSCALL: vsyscall_init failed!\n"); return -EFAULT; } if (VSYSCALL_ADDR(0) != __fix_to_virt(FIX_VSYSCALL_GTOD_FIRST_PAGE)) { printk("fixmap first vsyscall 0x%lx should be 0x%x\n", __fix_to_virt(FIX_VSYSCALL_GTOD_FIRST_PAGE), VSYSCALL_ADDR(0)); printk("VSYSCALL: vsyscall_init failed!\n"); return -EFAULT; } printk("passed...mapping..."); map_vsyscall(); printk("done.\n"); vsyscall_mapped = 1; printk("VSYSCALL: fixmap virt addr: 0x%lx\n", __fix_to_virt(FIX_VSYSCALL_GTOD_FIRST_PAGE)); return 0; }
static int __init vsyscall_init(void) { if ((unsigned long) &vgettimeofday != VSYSCALL_ADDR(__NR_vgettimeofday)) panic("vgettimeofday link addr broken"); if ((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime)) panic("vtime link addr broken"); if (VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)) panic("fixmap first vsyscall %lx should be %lx", __fix_to_virt(VSYSCALL_FIRST_PAGE), VSYSCALL_ADDR(0)); map_vsyscall(); return 0; }
void *kmap_atomic(struct page *page, enum km_type type) { unsigned int idx; unsigned long vaddr; void *kmap; pagefault_disable(); if (!PageHighMem(page)) return page_address(page); debug_kmap_atomic(type); kmap = kmap_high_get(page); if (kmap) return kmap; idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM /* * With debugging enabled, kunmap_atomic forces that entry to 0. * Make sure it was indeed properly unmapped. */ BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); #endif set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0); /* * When debugging is off, kunmap_atomic leaves the previous mapping * in place, so this TLB flush ensures the TLB is updated with the * new mapping. */ local_flush_tlb_kernel_page(vaddr); return (void *)vaddr; }
void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte) { unsigned int idx, cpu = smp_processor_id(); int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); unsigned long vaddr, flags; pte_t pte, *ptep; idx = KM_L1_CACHE + KM_TYPE_NR * cpu; vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); ptep = TOP_PTE(vaddr); pte = mk_pte(page, kmap_prot); BUG_ON(pte_val(*ptep) != pte_val(pte)); BUG_ON(*depth <= 0); raw_local_irq_save(flags); (*depth)--; if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) { set_pte_ext(ptep, saved_pte, 0); local_flush_tlb_kernel_page(vaddr); } raw_local_irq_restore(flags); if (!in_interrupt()) preempt_enable(); }
void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte) { unsigned int idx, cpu = smp_processor_id(); int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); unsigned long vaddr, flags; pte_t pte, *ptep; idx = KM_L1_CACHE + KM_TYPE_NR * cpu; vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); ptep = TOP_PTE(vaddr); pte = mk_pte(page, kmap_prot); if (!in_interrupt()) preempt_disable(); raw_local_irq_save(flags); (*depth)++; if (pte_val(*ptep) == pte_val(pte)) { *saved_pte = pte; } else { *saved_pte = *ptep; set_pte_ext(ptep, pte, 0); local_flush_tlb_kernel_page(vaddr); } raw_local_irq_restore(flags); return (void *)vaddr; }
void __kunmap_atomic(void *kvaddr) { unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; int idx, type; if (kvaddr >= (void *)FIXADDR_START) { type = kmap_atomic_idx(); idx = type + KM_TYPE_NR * smp_processor_id(); if (cache_is_vivt()) __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); local_flush_tlb_kernel_page(vaddr); #else (void) idx; /* to kill a warning */ #endif kmap_atomic_idx_pop(); } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { /* this address was obtained through kmap_high_get() */ kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); } pagefault_enable(); }
static void kmap_remove_unused_cpu(int cpu) { int start_idx, idx, type; int need_flush = 0; pagefault_disable(); type = kmap_atomic_idx(); start_idx = type + 1 + KM_TYPE_NR * cpu; for (idx = start_idx; idx < KM_TYPE_NR + KM_TYPE_NR * cpu; idx++) { unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); pte_t ptep; ptep = get_top_pte(vaddr); if (ptep) { set_top_pte(vaddr, __pte(0)); need_flush = 1; } } pagefault_enable(); /* flush the caches and tlb if required */ if (need_flush) { local_flush_tlb_all(); flush_cache_all(); } }
void *kmap_atomic(struct page *page) { unsigned long vaddr; long idx, type; preempt_disable(); pagefault_disable(); if (!PageHighMem(page)) return page_address(page); type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); /* XXX Fix - Anton */ #if 0 __flush_cache_one(vaddr); #else flush_cache_all(); #endif #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(!pte_none(*(kmap_pte-idx))); #endif set_pte(kmap_pte-idx, mk_pte(page, kmap_prot)); /* XXX Fix - Anton */ #if 0 __flush_tlb_one(vaddr); #else flush_tlb_all(); #endif return (void*) vaddr; }
/** * 建立临时内核映射 * type和CPU共同确定用哪个固定映射的线性地址映射请求页。 */ void *kmap_atomic(struct page *page, enum km_type type) { enum fixed_addresses idx; unsigned long vaddr; /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ inc_preempt_count(); /** * 如果被映射的页不属于高端内存,当然用不着映射。直接返回线性地址就行了。 */ if (!PageHighMem(page)) return page_address(page); /** * 通过type和CPU确定线性地址。 */ idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM if (!pte_none(*(kmap_pte-idx))) BUG(); #endif /** * 将线性地址与页表项建立映射。 */ set_pte(kmap_pte-idx, mk_pte(page, kmap_prot)); /** * 当然,最后必须刷新一下TLB。然后才能返回线性地址。 */ __flush_tlb_one(vaddr); return (void*) vaddr; }
int __init sysenter_setup(void) { void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC); const void *vsyscall; size_t vsyscall_len; syscall_pages[0] = virt_to_page(syscall_page); gate_vma_init(); printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO)); if (!boot_cpu_has(X86_FEATURE_SEP)) { vsyscall = &vsyscall_int80_start; vsyscall_len = &vsyscall_int80_end - &vsyscall_int80_start; } else { vsyscall = &vsyscall_sysenter_start; vsyscall_len = &vsyscall_sysenter_end - &vsyscall_sysenter_start; } memcpy(syscall_page, vsyscall, vsyscall_len); relocate_vdso(syscall_page); return 0; }
void __kunmap_atomic(void *kvaddr, enum km_type type) { unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); if (vaddr < FIXADDR_START) { // FIXME pagefault_enable(); return; } BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); /* * Protect against multiple unmaps * Can't cache flush an unmapped page. */ if ( kmap_atomic_maps[smp_processor_id()].map[type].vaddr ) { kmap_atomic_maps[smp_processor_id()].map[type].page = (struct page *)0; kmap_atomic_maps[smp_processor_id()].map[type].vaddr = (void *) 0; flush_data_cache_page((unsigned long)vaddr); } #ifdef CONFIG_DEBUG_HIGHMEM /* * force other mappings to Oops if they'll try to access * this pte without first remap it */ pte_clear(&init_mm, vaddr, kmap_pte-idx); local_flush_tlb_one(vaddr); #endif pagefault_enable(); }
static void ce4100_serial_fixup(int port, struct uart_port *up, unsigned short *capabilites) { #ifdef CONFIG_EARLY_PRINTK /* * Over ride the legacy port configuration that comes from * asm/serial.h. Using the ioport driver then switching to the * PCI memmaped driver hangs the IOAPIC */ if (up->iotype != UPIO_MEM32) { up->uartclk = 14745600; up->mapbase = 0xdffe0200; set_fixmap_nocache(FIX_EARLYCON_MEM_BASE, up->mapbase & PAGE_MASK); up->membase = (void __iomem *)__fix_to_virt(FIX_EARLYCON_MEM_BASE); up->membase += up->mapbase & ~PAGE_MASK; up->mapbase += port * 0x100; up->membase += port * 0x100; up->iotype = UPIO_MEM32; up->regshift = 2; up->irq = 4; } #endif up->iobase = 0; up->serial_in = ce4100_mem_serial_in; up->serial_out = ce4100_mem_serial_out; *capabilites |= (1 << 12); }
void mrfld_early_console_init(void) { u32 ctrlr0 = 0; set_fixmap_nocache(FIX_EARLYCON_MEM_BASE, MRFLD_REGBASE_SSP5); pssp = (void *)(__fix_to_virt(FIX_EARLYCON_MEM_BASE) + (MRFLD_REGBASE_SSP5 & (PAGE_SIZE - 1))); if (intel_mid_identify_sim() == INTEL_MID_CPU_SIMULATION_NONE) ssp_timing_wr = 1; /* mask interrupts, clear enable and set DSS config */ /* SSPSCLK on active transfers only */ if (intel_mid_identify_sim() != INTEL_MID_CPU_SIMULATION_SLE) { if (ssp_timing_wr) { dw_writel(pssp, ctrl0, 0xc12c0f); dw_writel(pssp, ctrl1, 0x0); } else { dw_writel(pssp, ctrl0, 0xc0000f); dw_writel(pssp, ctrl1, 0x10000000); } } dw_readl(pssp, sr); /* enable port */ if (intel_mid_identify_sim() != INTEL_MID_CPU_SIMULATION_SLE) { ctrlr0 = dw_readl(pssp, ctrl0); ctrlr0 |= 0x80; dw_writel(pssp, ctrl0, ctrlr0); } }
void kunmap_atomic(void *kvaddr, enum km_type type) { #ifdef CONFIG_DEBUG_HIGHMEM unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); if (vaddr < FIXADDR_START) { // FIXME dec_preempt_count(); preempt_check_resched(); return; } if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx)) BUG(); /* * force other mappings to Oops if they'll try to access * this pte without first remap it */ pte_clear(&init_mm, vaddr, kmap_pte-idx); __flush_tlb_one(vaddr); #endif dec_preempt_count(); preempt_check_resched(); }
void kunmap_atomic(void *kvaddr, enum km_type type) { #ifdef CONFIG_DEBUG_HIGHMEM unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; unsigned long idx = type + KM_TYPE_NR*smp_processor_id(); if (vaddr < FIXADDR_START) { // FIXME pagefault_enable(); return; } BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx)); /* XXX Fix - Anton */ #if 0 __flush_cache_one(vaddr); #else flush_cache_all(); #endif /* * force other mappings to Oops if they'll try to access * this pte without first remap it */ pte_clear(&init_mm, vaddr, kmap_pte-idx); /* XXX Fix - Anton */ #if 0 __flush_tlb_one(vaddr); #else flush_tlb_all(); #endif #endif pagefault_enable(); }
void *kmap_atomic(struct page *page, enum km_type type) { unsigned long idx; unsigned long vaddr; /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ pagefault_disable(); if (!PageHighMem(page)) return page_address(page); debug_kmap_atomic(type); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); /* XXX Fix - Anton */ #if 0 __flush_cache_one(vaddr); #else flush_cache_all(); #endif #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(!pte_none(*(kmap_pte-idx))); #endif set_pte(kmap_pte-idx, mk_pte(page, kmap_prot)); /* XXX Fix - Anton */ #if 0 __flush_tlb_one(vaddr); #else flush_tlb_all(); #endif return (void*) vaddr; }
void __init init_apic_mappings(void) { unsigned long apic_phys; /* * If no local APIC can be found then set up a fake all * zeroes page to simulate the local APIC and another * one for the IO-APIC. */ if (!smp_found_config && detect_init_APIC()) { apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); apic_phys = __pa(apic_phys); } else apic_phys = mp_lapic_addr; set_fixmap_nocache(FIX_APIC_BASE, apic_phys); Dprintk("mapped APIC to %08lx (%08lx)\n", APIC_BASE, apic_phys); /* * Fetch the APIC ID of the BSP in case we have a * default configuration (or the MP table is broken). */ if (boot_cpu_id == -1U) boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID)); #ifdef CONFIG_X86_IO_APIC { unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0; int i; for (i = 0; i < nr_ioapics; i++) { if (smp_found_config) { ioapic_phys = mp_ioapics[i].mpc_apicaddr; if (!ioapic_phys) { printk(KERN_ERR "WARNING: bogus zero IO-APIC address found in MPTABLE, disabling IO-APIC support\n"); smp_found_config = 0; skip_ioapic_setup = 1; goto fake_ioapic_page; } } else { fake_ioapic_page: ioapic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); ioapic_phys = __pa(ioapic_phys); } set_fixmap_nocache(idx, ioapic_phys); Dprintk("mapped IOAPIC to %08lx (%08lx)\n", __fix_to_virt(idx), ioapic_phys); idx++; } } #endif }
void __init map_vsyscall(void) { extern char __vsyscall_page; unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page); unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page); __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, vsyscall_mode == NATIVE ? PAGE_KERNEL_VSYSCALL : PAGE_KERNEL_VVAR); BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) != (unsigned long)VSYSCALL_START); __set_fixmap(VVAR_PAGE, physaddr_vvar_page, PAGE_KERNEL_VVAR); BUILD_BUG_ON((unsigned long)__fix_to_virt(VVAR_PAGE) != (unsigned long)VVAR_ADDRESS); }
void __init kmap_init(void) { unsigned long kmap_vstart; /* cache the first kmap pte */ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); kmap_pte = kmap_get_fixmap_pte(kmap_vstart); }
static inline void set_fixmap_pte(int idx, pte_t pte) { unsigned long vaddr = __fix_to_virt(idx); pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr); set_pte_ext(ptep, pte, 0); local_flush_tlb_kernel_page(vaddr); }
void __init init_apic_mappings(void) { unsigned long apic_phys; /* * If no local APIC can be found then set up a fake all * zeroes page to simulate the local APIC and another * one for the IO-APIC. */ if (!smp_found_config && detect_init_APIC()) { apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); apic_phys = __pa(apic_phys); } else apic_phys = mp_lapic_addr; set_fixmap_nocache(FIX_APIC_BASE, apic_phys); apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n", APIC_BASE, apic_phys); /* Put local APIC into the resource map. */ lapic_resource.start = apic_phys; lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1; insert_resource(&iomem_resource, &lapic_resource); /* * Fetch the APIC ID of the BSP in case we have a * default configuration (or the MP table is broken). */ boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID)); { unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0; int i; struct resource *ioapic_res; ioapic_res = ioapic_setup_resources(); for (i = 0; i < nr_ioapics; i++) { if (smp_found_config) { ioapic_phys = mp_ioapics[i].mpc_apicaddr; } else { ioapic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); ioapic_phys = __pa(ioapic_phys); } set_fixmap_nocache(idx, ioapic_phys); apic_printk(APIC_VERBOSE, "mapped IOAPIC to %016lx (%016lx)\n", __fix_to_virt(idx), ioapic_phys); idx++; if (ioapic_res != NULL) { ioapic_res->start = ioapic_phys; ioapic_res->end = ioapic_phys + (4 * 1024) - 1; ioapic_res++; } } } }
static void __init highmem_init(void) { pr_debug("%x\n", (u32)PKMAP_BASE); map_page(PKMAP_BASE, 0, 0); /* XXX gross */ pkmap_page_table = virt_to_kpte(PKMAP_BASE); kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); kmap_prot = PAGE_KERNEL; }
static int __init vsyscall_init(void) { BUG_ON(VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)); on_each_cpu(cpu_vsyscall_init, NULL, 1); /* notifier priority > KVM */ hotcpu_notifier(cpu_vsyscall_notifier, 30); return 0; }
void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags) { unsigned long address = __fix_to_virt(idx); if (idx >= __end_of_fixed_addresses) { BUG(); return; } set_pte_pfn(address, phys >> PAGE_SHIFT, flags); }
static int __init parse_options(struct early_serial8250_device *device, char *options) { struct uart_port *port = &device->port; int mmio, length; if (!options) return -ENODEV; port->uartclk = BASE_BAUD * 16; if (!strncmp(options, "mmio,", 5)) { port->iotype = UPIO_MEM; port->mapbase = simple_strtoul(options + 5, &options, 0); #ifdef CONFIG_FIX_EARLYCON_MEM set_fixmap_nocache(FIX_EARLYCON_MEM_BASE, port->mapbase & PAGE_MASK); port->membase = (void __iomem *)__fix_to_virt(FIX_EARLYCON_MEM_BASE); port->membase += port->mapbase & ~PAGE_MASK; #else port->membase = ioremap_nocache(port->mapbase, 64); if (!port->membase) { printk(KERN_ERR "%s: Couldn't ioremap 0x%llx\n", __func__, (unsigned long long)port->mapbase); return -ENOMEM; } #endif mmio = 1; } else if (!strncmp(options, "io,", 3)) { port->iotype = UPIO_PORT; port->iobase = simple_strtoul(options + 3, &options, 0); mmio = 0; } else return -EINVAL; options = strchr(options, ','); if (options) { options++; device->baud = simple_strtoul(options, NULL, 0); length = min(strcspn(options, " "), sizeof(device->options)); strncpy(device->options, options, length); } else { device->baud = probe_baud(port); snprintf(device->options, sizeof(device->options), "%u", device->baud); } printk(KERN_INFO "Early serial console at %s 0x%llx (options '%s')\n", mmio ? "MMIO" : "I/O port", mmio ? (unsigned long long) port->mapbase : (unsigned long long) port->iobase, device->options); return 0; }
void __init pagetable_init(void) { unsigned long vaddr; pgd_t *pgd_base; #ifdef CONFIG_HIGHMEM pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; #endif /* Initialize the entire pgd. */ pgd_init((unsigned long)swapper_pg_dir); pgd_init((unsigned long)swapper_pg_dir + sizeof(pgd_t) * USER_PTRS_PER_PGD); pgd_base = swapper_pg_dir; /* * Fixed mappings: */ #ifdef CONFIG_BCM53000_HIGHMEM vaddr = __fix_to_virt(VALIAS_IDX(__end_of_fixed_addresses - 1)) & PMD_MASK; #else vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; #endif fixrange_init(vaddr, 0, pgd_base); #ifdef CONFIG_HIGHMEM /* * Permanent kmaps: */ vaddr = PKMAP_BASE; fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); pgd = swapper_pg_dir + __pgd_offset(vaddr); pud = pud_offset(pgd, vaddr); pmd = pmd_offset(pud, vaddr); pte = pte_offset_kernel(pmd, vaddr); pkmap_page_table = pte; #endif }