static void shmedia_mapioaddr(unsigned long pa, unsigned long va) { pgd_t *pgdp; pmd_t *pmdp; pte_t *ptep; unsigned long flags = 1; /* 1 = CB0-1 device */ DEBUG_IOREMAP(("shmedia_mapiopage pa %08x va %08x\n", pa, va)); pgdp = pgd_offset_k(va); if (pgd_none(*pgdp)) { pmdp = alloc_bootmem_low_pages(PTRS_PER_PMD * sizeof(pmd_t)); if (pmdp == NULL) panic("No memory for pmd\n"); memset(pmdp, 0, PTRS_PER_PGD * sizeof(pmd_t)); set_pgd(pgdp, __pgd((unsigned long)pmdp | _KERNPG_TABLE)); } pmdp = pmd_offset(pgdp, va); if (pmd_none(*pmdp)) { ptep = alloc_bootmem_low_pages(PTRS_PER_PTE * sizeof(pte_t)); if (ptep == NULL) panic("No memory for pte\n"); clear_page((void *)ptep); set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE)); } ptep = pte_offset(pmdp, va); set_pte(ptep, mk_pte_phys(pa, __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED |_PAGE_SHARED | flags))); }
void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) { unsigned long i, bytes; bytes = nslabs << IO_TLB_SHIFT; io_tlb_nslabs = nslabs; io_tlb_start = tlb; io_tlb_end = io_tlb_start + bytes; /* * Allocate and initialize the free list array. This array is used * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE * between io_tlb_start and io_tlb_end. */ io_tlb_list = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); for (i = 0; i < io_tlb_nslabs; i++) io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); io_tlb_index = 0; io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); /* * Get the overflow emergency buffer */ io_tlb_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow)); if (!io_tlb_overflow_buffer) panic("Cannot allocate SWIOTLB overflow buffer!\n"); if (verbose) swiotlb_print_info(); }
void __init page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; int pgd_idx; unsigned long vaddr; vaddr = start & PMD_MASK; end = (end + PMD_SIZE - 1) & PMD_MASK; pgd_idx = pgd_index(vaddr); pgd = pgd_base + pgd_idx; for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { BUG_ON(pgd_none(*pgd)); pud = pud_offset(pgd, 0); BUG_ON(pud_none(*pud)); pmd = pmd_offset(pud, 0); if (!pmd_present(*pmd)) { pte_t *pte_table; pte_table = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); memset(pte_table, 0, PAGE_SIZE); pmd_populate_kernel(&init_mm, pmd, pte_table); } vaddr += PMD_SIZE; } }
int __init init_maps(unsigned long physmem, unsigned long iomem, unsigned long highmem) { struct page *p, *map; unsigned long phys_len, phys_pages, highmem_len, highmem_pages; unsigned long iomem_len, iomem_pages, total_len, total_pages; int i; phys_pages = physmem >> PAGE_SHIFT; phys_len = phys_pages * sizeof(struct page); iomem_pages = iomem >> PAGE_SHIFT; iomem_len = iomem_pages * sizeof(struct page); highmem_pages = highmem >> PAGE_SHIFT; highmem_len = highmem_pages * sizeof(struct page); total_pages = phys_pages + iomem_pages + highmem_pages; total_len = phys_len + iomem_len + highmem_len; map = alloc_bootmem_low_pages(total_len); if (map == NULL) return -ENOMEM; for (i = 0; i < total_pages; i++) { p = &map[i]; memset(p, 0, sizeof(struct page)); SetPageReserved(p); INIT_LIST_HEAD(&p->lru); } max_mapnr = total_pages; return 0; }
void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) { unsigned long i, bytes; bytes = nslabs << IO_TLB_SHIFT; io_tlb_nslabs = nslabs; io_tlb_start = tlb; io_tlb_end = io_tlb_start + bytes; /* */ io_tlb_list = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); for (i = 0; i < io_tlb_nslabs; i++) io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); io_tlb_index = 0; io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); /* */ io_tlb_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow)); if (!io_tlb_overflow_buffer) panic("Cannot allocate SWIOTLB overflow buffer!\n"); if (verbose) swiotlb_print_info(); }
/* * Map all physical memory into kernel's address space. * * This is explicitly coded for two-level page tables, so if you need * something else then this needs to change. */ static void __init map_ram(void) { unsigned long v, p, e; pgprot_t prot; pgd_t *pge; pud_t *pue; pmd_t *pme; pte_t *pte; /* These mark extents of read-only kernel pages... * ...from vmlinux.lds.S */ struct memblock_region *region; v = PAGE_OFFSET; for_each_memblock(memory, region) { p = (u32) region->base & PAGE_MASK; e = p + (u32) region->size; v = (u32) __va(p); pge = pgd_offset_k(v); while (p < e) { int j; pue = pud_offset(pge, v); pme = pmd_offset(pue, v); if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) { panic("%s: OR1K kernel hardcoded for " "two-level page tables", __func__); } /* Alloc one page for holding PTE's... */ pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte))); /* Fill the newly allocated page with PTE'S */ for (j = 0; p < e && j < PTRS_PER_PGD; v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) { if (v >= (u32) _e_kernel_ro || v < (u32) _s_kernel_ro) prot = PAGE_KERNEL; else prot = PAGE_KERNEL_RO; set_pte(pte, mk_pte_phys(p, prot)); } pge++; } printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__, region->base, region->base + region->size); }
/* * Create a page table and place a pointer to it in a middle page * directory entry. */ static pte_t * __init one_page_table_init(pmd_t *pmd) { if (pmd_none(*pmd)) { pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); if (page_table != pte_offset_kernel(pmd, 0)) BUG(); return page_table; } return pte_offset_kernel(pmd, 0); }
static pte_t * __init kernel_page_table(void) { pte_t *ptablep; ptablep = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); clear_page(ptablep); __flush_page_to_ram(ptablep); flush_tlb_kernel_page(ptablep); nocache_page(ptablep); return ptablep; }
/* * Creates a middle page table and puts a pointer to it in the * given global directory entry. This only returns the gd entry * in non-PAE compilation mode, since the middle layer is folded. */ static pmd_t * __init one_md_table_init(pgd_t *pgd) { pud_t *pud; pmd_t *pmd_table; #ifdef CONFIG_X86_PAE if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); pud = pud_offset(pgd, 0); BUG_ON(pmd_table != pmd_offset(pud, 0)); }
/* * This contains the code to setup the memory map on an ARM2/ARM250/ARM3 * machine. This is both processor & architecture specific, and requires * some more work to get it to fit into our separate processor and * architecture structure. */ void __init memtable_init(struct meminfo *mi) { pte_t *pte; int i; page_nr = max_low_pfn; pte = alloc_bootmem_low_pages(PTRS_PER_PTE * sizeof(pte_t)); pte[0] = mk_pte_phys(PAGE_OFFSET + 491520, PAGE_READONLY); pmd_populate(&init_mm, pmd_offset(swapper_pg_dir, 0), pte); for (i = 1; i < PTRS_PER_PGD; i++) pgd_val(swapper_pg_dir[i]) = 0; }
/* * We are called very early to get the low memory for the * SMP bootup trampoline page. */ void __init smp_alloc_memory(void) { trampoline_base = (void *) alloc_bootmem_low_pages(PAGE_SIZE); /* * Has to be in very low memory so we can execute * real-mode AP code. */ if (__pa(trampoline_base) >= 0x9F000) BUG(); /* * Make the SMP trampoline executable: */ trampoline_exec = set_kernel_exec((unsigned long)trampoline_base, 1); }
static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr) { pgd_t *pgd_k; pud_t *pud_k; pmd_t *pmd_k; pte_t *pte_k; pgd_k = pgd_offset_k(kvaddr); pud_k = pud_offset(pgd_k, kvaddr); pmd_k = pmd_offset(pud_k, kvaddr); pte_k = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); pmd_populate_kernel(&init_mm, pmd_k, pte_k); return pte_k; }
void __init atari_stram_reserve_pages(void *start_mem) { /* */ if (!kernel_in_stram) reserve_bootmem(0, PAGE_SIZE, BOOTMEM_DEFAULT); stram_pool.start = (resource_size_t)alloc_bootmem_low_pages(pool_size); stram_pool.end = stram_pool.start + pool_size - 1; request_resource(&iomem_resource, &stram_pool); pr_debug("atari_stram pool: size = %lu bytes, resource = %pR\n", pool_size, &stram_pool); }
/* * Creates a middle page table and puts a pointer to it in the * given global directory entry. This only returns the gd entry * in non-PAE compilation mode, since the middle layer is folded. */ static pmd_t * __init one_md_table_init(pgd_t *pgd) { pud_t *pud; pmd_t *pmd_table; #ifdef CONFIG_X86_PAE pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); pud = pud_offset(pgd, 0); if (pmd_table != pmd_offset(pud, 0)) BUG(); #else pud = pud_offset(pgd, 0); pmd_table = pmd_offset(pud, 0); #endif return pmd_table; }
/* * Create a page table and place a pointer to it in a middle page * directory entry: */ static pte_t * __init one_page_table_init(pmd_t *pmd) { if (!(pmd_val(*pmd) & _PAGE_PRESENT)) { pte_t *page_table = NULL; if (after_bootmem) { #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK) page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); #endif if (!page_table) page_table = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); } else page_table = (pte_t *)alloc_low_page(); paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT); set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); BUG_ON(page_table != pte_offset_kernel(pmd, 0)); }
/* * call when 'bigphysarea=' is given on the commandline. * * Strangely, bootmem is still active during this call, but * during the processing of the initcalls it isn't anymore! * So we alloc the needed memory here instead of bigphysarea_init(). */ static int __init bigphysarea_setup(char *str) { int par; if (get_option(&str,&par)) { bigphysarea_pages = par; // Alloc the memory bigphysarea = alloc_bootmem_low_pages(bigphysarea_pages<<PAGE_SHIFT); if (!bigphysarea) { printk(KERN_CRIT "bigphysarea: not enough memory for %d pages\n",bigphysarea_pages); return -ENOMEM; } // register the resource for it mem_resource.start = bigphysarea; mem_resource.end = mem_resource.start + (bigphysarea_pages<<PAGE_SHIFT); request_resource(&iomem_resource, &mem_resource); } return 1; }
/* * Add a PAGE mapping between VIRT and PHYS in domain * DOMAIN with protection PROT. Note that due to the * way we map the PTEs, we must allocate two PTE_SIZE'd * blocks - one for the Linux pte table, and one for * the hardware pte table. */ static inline void alloc_init_page(unsigned long virt, unsigned long phys, int domain, int prot) { pmd_t *pmdp; pte_t *ptep; pmdp = pmd_offset(pgd_offset_k(virt), virt); if (pmd_none(*pmdp)) { pte_t *ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t)); ptep += PTRS_PER_PTE; set_pmd(pmdp, __mk_pmd(ptep, PMD_TYPE_TABLE | PMD_DOMAIN(domain))); } ptep = pte_offset(pmdp, virt); set_pte(ptep, mk_pte_phys(phys, __pgprot(prot))); }
/* * called by console_init() in drivers/char/tty_io.c at boot-time. */ static int __init sclp_console_init(void) { void *page; int i; int rc; if (!CONSOLE_IS_SCLP) return 0; rc = sclp_rw_init(); if (rc) return rc; /* Allocate pages for output buffering */ INIT_LIST_HEAD(&sclp_con_pages); for (i = 0; i < MAX_CONSOLE_PAGES; i++) { page = alloc_bootmem_low_pages(PAGE_SIZE); list_add_tail((struct list_head *) page, &sclp_con_pages); } INIT_LIST_HEAD(&sclp_con_outqueue); spin_lock_init(&sclp_con_lock); sclp_con_buffer_count = 0; sclp_conbuf = NULL; init_timer(&sclp_con_timer); /* Set output format */ if (MACHINE_IS_VM) /* * save 4 characters for the CPU number * written at start of each line by VM/CP */ sclp_con_columns = 76; else sclp_con_columns = 80; sclp_con_width_htab = 8; /* enable printk-access to this driver */ atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb); register_reboot_notifier(&on_reboot_nb); register_console(&sclp_console); return 0; }
static pmd_t * __init kernel_ptr_table(void) { if (!last_pgtable) { unsigned long pmd, last; int i; /* Find the last ptr table that was used in head.S and * reuse the remaining space in that page for further * ptr tables. */ last = (unsigned long)kernel_pg_dir; for (i = 0; i < PTRS_PER_PGD; i++) { if (!pgd_present(kernel_pg_dir[i])) continue; pmd = __pgd_page(kernel_pg_dir[i]); if (pmd > last) last = pmd; } last_pgtable = (pmd_t *)last; #ifdef DEBUG printk("kernel_ptr_init: %p\n", last_pgtable); #endif } last_pgtable += PTRS_PER_PMD; if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) { last_pgtable = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE); clear_page(last_pgtable); __flush_page_to_ram(last_pgtable); flush_tlb_kernel_page(last_pgtable); nocache_page(last_pgtable); } return last_pgtable; }
void __init swiotlb_init_with_default_size(size_t default_size, int verbose) { unsigned long bytes; if (!io_tlb_nslabs) { io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); } bytes = io_tlb_nslabs << IO_TLB_SHIFT; /* * Get IO TLB memory from the low pages */ io_tlb_start = alloc_bootmem_low_pages(bytes); if (!io_tlb_start) panic("Cannot allocate SWIOTLB buffer"); swiotlb_init_with_tbl(io_tlb_start, io_tlb_nslabs, verbose); } void __init swiotlb_init(int verbose) { swiotlb_init_with_default_size(64 * (1<<20), verbose); /* default to 64MB */ } /* * Systems with larger DMA zones (those that don't support ISA) can * initialize the swiotlb later using the slab allocator if needed.
swiotlb_print_info(); } void __init swiotlb_init_with_default_size(size_t default_size, int verbose) { unsigned long bytes; if (!io_tlb_nslabs) { io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); } bytes = io_tlb_nslabs << IO_TLB_SHIFT; io_tlb_start = alloc_bootmem_low_pages(PAGE_ALIGN(bytes)); if (!io_tlb_start) panic("Cannot allocate SWIOTLB buffer"); swiotlb_init_with_tbl(io_tlb_start, io_tlb_nslabs, verbose); } void __init swiotlb_init(int verbose) { swiotlb_init_with_default_size(64 * (1<<20), verbose); } int swiotlb_late_init_with_default_size(size_t default_size) {
void __init setup_arch(char **cmdline_p) { unsigned long kernel_end; #if defined(CONFIG_XEN_PRIVILEGED_GUEST) struct e820entry *machine_e820; struct xen_memory_map memmap; #endif #ifdef CONFIG_XEN /* Register a call for panic conditions. */ atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block); ROOT_DEV = MKDEV(RAMDISK_MAJOR,0); kernel_end = 0; /* dummy */ screen_info = SCREEN_INFO; if (xen_start_info->flags & SIF_INITDOMAIN) { /* This is drawn from a dump from vgacon:startup in * standard Linux. */ screen_info.orig_video_mode = 3; screen_info.orig_video_isVGA = 1; screen_info.orig_video_lines = 25; screen_info.orig_video_cols = 80; screen_info.orig_video_ega_bx = 3; screen_info.orig_video_points = 16; } else screen_info.orig_video_isVGA = 0; edid_info = EDID_INFO; saved_video_mode = SAVED_VIDEO_MODE; bootloader_type = LOADER_TYPE; #ifdef CONFIG_BLK_DEV_RAM rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK; rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0); rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0); #endif setup_xen_features(); HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables); ARCH_SETUP #else ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV); screen_info = SCREEN_INFO; edid_info = EDID_INFO; saved_video_mode = SAVED_VIDEO_MODE; bootloader_type = LOADER_TYPE; #ifdef CONFIG_BLK_DEV_RAM rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK; rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0); rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0); #endif #endif /* !CONFIG_XEN */ setup_memory_region(); copy_edd(); if (!MOUNT_ROOT_RDONLY) root_mountflags &= ~MS_RDONLY; init_mm.start_code = (unsigned long) &_text; init_mm.end_code = (unsigned long) &_etext; init_mm.end_data = (unsigned long) &_edata; init_mm.brk = (unsigned long) &_end; #ifndef CONFIG_XEN code_resource.start = virt_to_phys(&_text); code_resource.end = virt_to_phys(&_etext)-1; data_resource.start = virt_to_phys(&_etext); data_resource.end = virt_to_phys(&_edata)-1; #endif parse_cmdline_early(cmdline_p); early_identify_cpu(&boot_cpu_data); /* * partially used pages are not usable - thus * we are rounding upwards: */ end_pfn = e820_end_of_ram(); num_physpages = end_pfn; /* for pfn_valid */ check_efer(); #ifndef CONFIG_XEN discover_ebda(); #endif init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT)); #ifdef CONFIG_ACPI_NUMA /* * Parse SRAT to discover nodes. */ acpi_numa_init(); #endif #ifdef CONFIG_NUMA numa_initmem_init(0, end_pfn); #else contig_initmem_init(0, end_pfn); #endif /* Reserve direct mapping */ reserve_bootmem_generic(table_start << PAGE_SHIFT, (table_end - table_start) << PAGE_SHIFT); /* reserve kernel */ kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE); reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY); #ifdef CONFIG_XEN /* reserve physmap, start info and initial page tables */ reserve_bootmem(kernel_end, (table_start<<PAGE_SHIFT)-kernel_end); #else /* * reserve physical page 0 - it's a special BIOS page on many boxes, * enabling clean reboots, SMP operation, laptop functions. */ reserve_bootmem_generic(0, PAGE_SIZE); /* reserve ebda region */ if (ebda_addr) reserve_bootmem_generic(ebda_addr, ebda_size); #endif #ifdef CONFIG_SMP /* * But first pinch a few for the stack/trampoline stuff * FIXME: Don't need the extra page at 4K, but need to fix * trampoline before removing it. (see the GDT stuff) */ reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE); /* Reserve SMP trampoline */ reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE); #endif #ifdef CONFIG_ACPI_SLEEP /* * Reserve low memory region for sleep support. */ acpi_reserve_bootmem(); #endif #ifdef CONFIG_XEN #ifdef CONFIG_BLK_DEV_INITRD if (xen_start_info->mod_start) { if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) { /*reserve_bootmem_generic(INITRD_START, INITRD_SIZE);*/ initrd_start = INITRD_START + PAGE_OFFSET; initrd_end = initrd_start+INITRD_SIZE; initrd_below_start_ok = 1; } else { printk(KERN_ERR "initrd extends beyond end of memory " "(0x%08lx > 0x%08lx)\ndisabling initrd\n", (unsigned long)(INITRD_START + INITRD_SIZE), (unsigned long)(end_pfn << PAGE_SHIFT)); initrd_start = 0; } } #endif #else /* CONFIG_XEN */ #ifdef CONFIG_BLK_DEV_INITRD if (LOADER_TYPE && INITRD_START) { if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) { reserve_bootmem_generic(INITRD_START, INITRD_SIZE); initrd_start = INITRD_START ? INITRD_START + PAGE_OFFSET : 0; initrd_end = initrd_start+INITRD_SIZE; } else { printk(KERN_ERR "initrd extends beyond end of memory " "(0x%08lx > 0x%08lx)\ndisabling initrd\n", (unsigned long)(INITRD_START + INITRD_SIZE), (unsigned long)(end_pfn << PAGE_SHIFT)); initrd_start = 0; } } #endif #endif /* !CONFIG_XEN */ #ifdef CONFIG_KEXEC if (crashk_res.start != crashk_res.end) { reserve_bootmem(crashk_res.start, crashk_res.end - crashk_res.start + 1); } #endif paging_init(); #ifdef CONFIG_X86_LOCAL_APIC /* * Find and reserve possible boot-time SMP configuration: */ find_smp_config(); #endif #ifdef CONFIG_XEN { int i, j, k, fpp; unsigned long va; /* 'Initial mapping' of initrd must be destroyed. */ for (va = xen_start_info->mod_start; va < (xen_start_info->mod_start+xen_start_info->mod_len); va += PAGE_SIZE) { HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0); } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Make sure we have a large enough P->M table. */ phys_to_machine_mapping = alloc_bootmem( end_pfn * sizeof(unsigned long)); memset(phys_to_machine_mapping, ~0, end_pfn * sizeof(unsigned long)); memcpy(phys_to_machine_mapping, (unsigned long *)xen_start_info->mfn_list, xen_start_info->nr_pages * sizeof(unsigned long)); free_bootmem( __pa(xen_start_info->mfn_list), PFN_PHYS(PFN_UP(xen_start_info->nr_pages * sizeof(unsigned long)))); /* Destroyed 'initial mapping' of old p2m table. */ for (va = xen_start_info->mfn_list; va < (xen_start_info->mfn_list + (xen_start_info->nr_pages*sizeof(unsigned long))); va += PAGE_SIZE) { HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0); } /* * Initialise the list of the frames that specify the * list of frames that make up the p2m table. Used by * save/restore. */ pfn_to_mfn_frame_list_list = alloc_bootmem(PAGE_SIZE); HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = virt_to_mfn(pfn_to_mfn_frame_list_list); fpp = PAGE_SIZE/sizeof(unsigned long); for (i=0, j=0, k=-1; i< end_pfn; i+=fpp, j++) { if ((j % fpp) == 0) { k++; BUG_ON(k>=fpp); pfn_to_mfn_frame_list[k] = alloc_bootmem(PAGE_SIZE); pfn_to_mfn_frame_list_list[k] = virt_to_mfn(pfn_to_mfn_frame_list[k]); j=0; } pfn_to_mfn_frame_list[k][j] = virt_to_mfn(&phys_to_machine_mapping[i]); } HYPERVISOR_shared_info->arch.max_pfn = end_pfn; } } if (xen_start_info->flags & SIF_INITDOMAIN) dmi_scan_machine(); if ( ! (xen_start_info->flags & SIF_INITDOMAIN)) { acpi_disabled = 1; #ifdef CONFIG_ACPI acpi_ht = 0; #endif } #endif #ifndef CONFIG_XEN check_ioapic(); #endif zap_low_mappings(0); /* * set this early, so we dont allocate cpu0 * if MADT list doesnt list BSP first * mpparse.c/MP_processor_info() allocates logical cpu numbers. */ cpu_set(0, cpu_present_map); #ifdef CONFIG_ACPI /* * Initialize the ACPI boot-time table parser (gets the RSDP and SDT). * Call this early for SRAT node setup. */ acpi_boot_table_init(); /* * Read APIC and some other early information from ACPI tables. */ acpi_boot_init(); #endif init_cpu_to_node(); #ifdef CONFIG_X86_LOCAL_APIC /* * get boot-time SMP configuration: */ if (smp_found_config) get_smp_config(); #ifndef CONFIG_XEN init_apic_mappings(); #endif #endif #if defined(CONFIG_XEN) && defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU) prefill_possible_map(); #endif /* * Request address space for all standard RAM and ROM resources * and also for regions reported as reserved by the e820. */ #if defined(CONFIG_XEN_PRIVILEGED_GUEST) probe_roms(); if (xen_start_info->flags & SIF_INITDOMAIN) { machine_e820 = alloc_bootmem_low_pages(PAGE_SIZE); memmap.nr_entries = E820MAX; set_xen_guest_handle(memmap.buffer, machine_e820); BUG_ON(HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap)); e820_reserve_resources(machine_e820, memmap.nr_entries); } #elif !defined(CONFIG_XEN) probe_roms(); e820_reserve_resources(e820.map, e820.nr_map); #endif request_resource(&iomem_resource, &video_ram_resource); { unsigned i; /* request I/O space for devices used on all i[345]86 PCs */ for (i = 0; i < STANDARD_IO_RESOURCES; i++) request_resource(&ioport_resource, &standard_io_resources[i]); } #if defined(CONFIG_XEN_PRIVILEGED_GUEST) if (xen_start_info->flags & SIF_INITDOMAIN) { e820_setup_gap(machine_e820, memmap.nr_entries); free_bootmem(__pa(machine_e820), PAGE_SIZE); } #elif !defined(CONFIG_XEN) e820_setup_gap(e820.map, e820.nr_map); #endif #ifdef CONFIG_GART_IOMMU iommu_hole_init(); #endif #ifdef CONFIG_XEN { struct physdev_set_iopl set_iopl; set_iopl.iopl = 1; HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl); if (xen_start_info->flags & SIF_INITDOMAIN) { if (!(xen_start_info->flags & SIF_PRIVILEGED)) panic("Xen granted us console access " "but not privileged status"); #ifdef CONFIG_VT #if defined(CONFIG_VGA_CONSOLE) conswitchp = &vga_con; #elif defined(CONFIG_DUMMY_CONSOLE) conswitchp = &dummy_con; #endif #endif } else { extern int console_use_vt; console_use_vt = 0; } } #else /* CONFIG_XEN */ #ifdef CONFIG_VT #if defined(CONFIG_VGA_CONSOLE) conswitchp = &vga_con; #elif defined(CONFIG_DUMMY_CONSOLE) conswitchp = &dummy_con; #endif #endif #endif /* !CONFIG_XEN */ }
void * __weak __init swiotlb_alloc_boot(size_t size, unsigned long nslabs) { return alloc_bootmem_low_pages(size); }
/* MM here stands for multi-media */ void bcm21553_mm_mem_init(void) { int ret, size; uint32_t v3d_mem_phys_base = CONFIG_MM_MEMPOOL_BASE_ADDR; #if (CONFIG_MM_MEMPOOL_BASE_ADDR <= 0) #if defined (CONFIG_BMEM) bmem_phys_base = get_mmpool_base(BMEM_SIZE); #else #ifdef CONFIG_GE_WRAP ge_mem_phys_base = get_mmpool_base(gememalloc_SIZE); #endif #endif #ifdef CONFIG_BRCM_V3D size = v3d_mempool_size; #if defined (CONFIG_BMEM) size += BMEM_SIZE; #else #ifdef CONFIG_GE_WRAP size += gememalloc_SIZE; #endif #endif v3d_mem_phys_base = get_mmpool_base(size); #endif #else #if defined(CONFIG_BRCM_V3D) #if defined (CONFIG_BMEM) bmem_phys_base += v3d_mempool_size; #else #if defined(CONFIG_GE_WRAP) ge_mem_phys_base += v3d_mempool_size; #endif #endif #endif #endif #ifdef CONFIG_BRCM_V3D if (v3d_mempool_size) { ret = reserve_bootmem(v3d_mem_phys_base, v3d_mempool_size, BOOTMEM_EXCLUSIVE); if (ret < 0) { printk(KERN_ERR "Failed to allocate memory for v3d\n"); return; } v3d_mempool_base = phys_to_virt(v3d_mem_phys_base); pr_info("v3d phys[0x%08x] virt[0x%08x] size[0x%08x] \n", v3d_mem_phys_base, (uint32_t)v3d_mempool_base, (int)v3d_mempool_size); } else { v3d_mempool_base = NULL; v3d_mem_phys_base = 0; } #endif #if defined (CONFIG_BMEM) ret = reserve_bootmem(bmem_phys_base, BMEM_SIZE, BOOTMEM_EXCLUSIVE); if (ret < 0) { printk(KERN_ERR "Failed to allocate memory for ge\n"); return; } bmem_mempool_base = phys_to_virt(bmem_phys_base); pr_info("bmem phys[0x%08x] virt[0x%08x] size[0x%08x] \n", bmem_phys_base, (uint32_t)bmem_mempool_base, BMEM_SIZE); #else #ifdef CONFIG_GE_WRAP ret = reserve_bootmem(ge_mem_phys_base, gememalloc_SIZE, BOOTMEM_EXCLUSIVE); if (ret < 0) { printk(KERN_ERR "Failed to allocate memory for ge\n"); return; } ge_mempool_base = phys_to_virt(ge_mem_phys_base); pr_info("ge phys[0x%08x] virt[0x%08x] size[0x%08x] \n", ge_mem_phys_base, (uint32_t)ge_mempool_base, gememalloc_SIZE); #endif #endif #if defined (CONFIG_BMEM) #ifdef CONFIG_HANTRO_WRAP memalloc_mempool_base = alloc_bootmem_low_pages(2 * PAGE_SIZE); pr_info("memalloc(hantro) phys[0x%08x] virt[0x%08x] size[0x%08x] \n", (uint32_t)virt_to_phys(memalloc_mempool_base), (uint32_t)memalloc_mempool_base, (uint32_t)(2 * PAGE_SIZE)); #endif cam_mempool_base = alloc_bootmem_low_pages(2 * PAGE_SIZE); pr_info("pmem(camera) phys[0x%08x] virt[0x%08x] size[0x%08x] \n", (uint32_t)virt_to_phys(cam_mempool_base), (uint32_t)cam_mempool_base, (uint32_t)(2 * PAGE_SIZE)); #else #ifdef CONFIG_HANTRO_WRAP memalloc_mempool_base = alloc_bootmem_low_pages(MEMALLOC_SIZE + SZ_2M); #endif cam_mempool_base = alloc_bootmem_low_pages(1024 * 1024 * 8); #endif }
* structures for the software IO TLB used to implement the DMA API. */ void swiotlb_init_with_default_size (size_t default_size) { unsigned long i; if (!io_tlb_nslabs) { io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); } /* * Get IO TLB memory from the low pages */ io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * (1 << IO_TLB_SHIFT)); if (!io_tlb_start) panic("Cannot allocate SWIOTLB buffer"); io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); /* * Allocate and initialize the free list array. This array is used * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE * between io_tlb_start and io_tlb_end. */ io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int)); for (i = 0; i < io_tlb_nslabs; i++) io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); io_tlb_index = 0; io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));