/* * 3215 console initialization code called from console_init(). * NOTE: This is called before kmalloc is available. */ static int __init con3215_init(void) { struct ccw_device *cdev; struct raw3215_info *raw; struct raw3215_req *req; int i; /* Check if 3215 is to be the console */ if (!CONSOLE_IS_3215) return -ENODEV; /* Set the console mode for VM */ if (MACHINE_IS_VM) { cpcmd("TERM CONMODE 3215", NULL, 0); cpcmd("TERM AUTOCR OFF", NULL, 0); } /* allocate 3215 request structures */ raw3215_freelist = NULL; spin_lock_init(&raw3215_freelist_lock); for (i = 0; i < NR_3215_REQ; i++) { req = (struct raw3215_req *) alloc_bootmem_low(sizeof(struct raw3215_req)); req->next = raw3215_freelist; raw3215_freelist = req; } cdev = ccw_device_probe_console(); if (!cdev) return -ENODEV; raw3215[0] = raw = (struct raw3215_info *) alloc_bootmem_low(sizeof(struct raw3215_info)); memset(raw, 0, sizeof(struct raw3215_info)); raw->buffer = (char *) alloc_bootmem_low(RAW3215_BUFFER_SIZE); raw->inbuf = (char *) alloc_bootmem_low(RAW3215_INBUF_SIZE); raw->cdev = cdev; raw->lock = get_ccwdev_lock(cdev); cdev->dev.driver_data = raw; cdev->handler = raw3215_irq; raw->flags |= RAW3215_FIXED; tasklet_init(&raw->tasklet, (void (*)(unsigned long)) raw3215_tasklet, (unsigned long) raw); init_waitqueue_head(&raw->empty_wait); /* Request the console irq */ if (raw3215_startup(raw) != 0) { free_bootmem((unsigned long) raw->inbuf, RAW3215_INBUF_SIZE); free_bootmem((unsigned long) raw->buffer, RAW3215_BUFFER_SIZE); free_bootmem((unsigned long) raw, sizeof(struct raw3215_info)); raw3215[0] = NULL; printk("Couldn't find a 3215 console device\n"); return -ENODEV; } register_console(&con3215); return 0; }
/* * 3215 console initialization code called from console_init(). * NOTE: This is called before kmalloc is available. */ void __init con3215_init(void) { raw3215_info *raw; raw3215_req *req; int irq; int i; /* Check if 3215 is to be the console */ if (!CONSOLE_IS_3215) return; irq = raw3215_find_dev(0); /* Set the console mode for VM */ if (MACHINE_IS_VM) { cpcmd("TERM CONMODE 3215", NULL, 0); cpcmd("TERM AUTOCR OFF", NULL, 0); } /* allocate 3215 request structures */ raw3215_freelist = NULL; spin_lock_init(&raw3215_freelist_lock); for (i = 0; i < NR_3215_REQ; i++) { req = (raw3215_req *) alloc_bootmem_low(sizeof(raw3215_req)); req->next = raw3215_freelist; raw3215_freelist = req; } ctrlchar_init(); #ifdef CONFIG_TN3215_CONSOLE raw3215[0] = raw = (raw3215_info *) alloc_bootmem_low(sizeof(raw3215_info)); memset(raw, 0, sizeof(raw3215_info)); raw->buffer = (char *) alloc_bootmem_low(RAW3215_BUFFER_SIZE); raw->inbuf = (char *) alloc_bootmem_low(RAW3215_INBUF_SIZE); /* Find the first console */ raw->irq = raw3215_find_dev(0); raw->flags |= RAW3215_FIXED; raw->tqueue.routine = raw3215_softint; raw->tqueue.data = raw; init_waitqueue_head(&raw->empty_wait); /* Request the console irq */ if ( raw3215_startup(raw) != 0 ) raw->irq = -1; if (raw->irq != -1) { register_console(&con3215); } else { free_bootmem((unsigned long) raw->inbuf, RAW3215_INBUF_SIZE); free_bootmem((unsigned long) raw->buffer, RAW3215_BUFFER_SIZE); free_bootmem((unsigned long) raw, sizeof(raw3215_info)); raw3215[0] = NULL; printk("Couldn't find a 3215 console device\n"); } #endif }
static void __init setup_resources(void) { struct resource *res, *std_res, *sub_res; int i, j; code_resource.start = (unsigned long) &_text; code_resource.end = (unsigned long) &_etext - 1; data_resource.start = (unsigned long) &_etext; data_resource.end = (unsigned long) &_edata - 1; bss_resource.start = (unsigned long) &__bss_start; bss_resource.end = (unsigned long) &__bss_stop - 1; for (i = 0; i < MEMORY_CHUNKS; i++) { if (!memory_chunk[i].size) continue; if (memory_chunk[i].type == CHUNK_OLDMEM || memory_chunk[i].type == CHUNK_CRASHK) continue; res = alloc_bootmem_low(sizeof(*res)); res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; switch (memory_chunk[i].type) { case CHUNK_READ_WRITE: case CHUNK_CRASHK: res->name = "System RAM"; break; case CHUNK_READ_ONLY: res->name = "System ROM"; res->flags |= IORESOURCE_READONLY; break; default: res->name = "reserved"; } res->start = memory_chunk[i].addr; res->end = res->start + memory_chunk[i].size - 1; request_resource(&iomem_resource, res); for (j = 0; j < ARRAY_SIZE(standard_resources); j++) { std_res = standard_resources[j]; if (std_res->start < res->start || std_res->start > res->end) continue; if (std_res->end > res->end) { sub_res = alloc_bootmem_low(sizeof(*sub_res)); *sub_res = *std_res; sub_res->end = res->end; std_res->start = res->end + 1; request_resource(res, sub_res); } else { request_resource(res, std_res); } } } }
/* * This is main public interface: somehow allocate a ST-RAM block * * - If we're before mem_init(), we have to make a static allocation. The * region is taken in the kernel data area (if the kernel is in ST-RAM) or * from the start of ST-RAM (if the kernel is in TT-RAM) and added to the * rsvd_stram_* region. The ST-RAM is somewhere in the middle of kernel * address space in the latter case. * * - If mem_init() already has been called, try with __get_dma_pages(). * This has the disadvantage that it's very hard to get more than 1 page, * and it is likely to fail :-( * */ void *atari_stram_alloc(long size, const char *owner) { void *addr = NULL; BLOCK *block; int flags; DPRINTK("atari_stram_alloc(size=%08lx,owner=%s)\n", size, owner); if (!mem_init_done) return alloc_bootmem_low(size); else { /* After mem_init(): can only resort to __get_dma_pages() */ addr = (void *)__get_dma_pages(GFP_KERNEL, get_order(size)); flags = BLOCK_GFP; DPRINTK( "atari_stram_alloc: after mem_init, " "get_pages=%p\n", addr ); } if (addr) { if (!(block = add_region( addr, size ))) { /* out of memory for BLOCK structure :-( */ DPRINTK( "atari_stram_alloc: out of mem for BLOCK -- " "freeing again\n" ); free_pages((unsigned long)addr, get_order(size)); return( NULL ); } block->owner = owner; block->flags |= flags; } return( addr ); }
/** * acpi_reserve_bootmem - do _very_ early ACPI initialisation * * We allocate a page in low memory for the wakeup * routine for when we come back from a sleep state. The * runtime allocator allows specification of <16M pages, but not * <1M pages. */ void __init acpi_reserve_bootmem(void) { acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE*2); if ((&wakeup_end - &wakeup_start) > (PAGE_SIZE*2)) printk(KERN_CRIT "ACPI: Wakeup code way too big, will crash on attempt to suspend\n"); }
void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) { unsigned long i, bytes; bytes = nslabs << IO_TLB_SHIFT; io_tlb_nslabs = nslabs; io_tlb_start = tlb; io_tlb_end = io_tlb_start + bytes; /* * Allocate and initialize the free list array. This array is used * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE * between io_tlb_start and io_tlb_end. */ io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int)); for (i = 0; i < io_tlb_nslabs; i++) io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); io_tlb_index = 0; io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t)); /* * Get the overflow emergency buffer */ io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); if (!io_tlb_overflow_buffer) panic("Cannot allocate SWIOTLB overflow buffer!\n"); if (verbose) swiotlb_print_info(); }
static void __init request_standard_resources(void) { struct memblock_region *region; struct resource *res; kernel_code.start = virt_to_phys(_text); kernel_code.end = virt_to_phys(__init_begin - 1); kernel_data.start = virt_to_phys(_sdata); kernel_data.end = virt_to_phys(_end - 1); for_each_memblock(memory, region) { res = alloc_bootmem_low(sizeof(*res)); if (memblock_is_nomap(region)) { res->name = "reserved"; res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; } else { res->name = "System RAM"; res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; } res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; request_resource(&iomem_resource, res); if (kernel_code.start >= res->start && kernel_code.end <= res->end) request_resource(res, &kernel_code); if (kernel_data.start >= res->start && kernel_data.end <= res->end) request_resource(res, &kernel_data); }
/** * acpi_reserve_bootmem - do _very_ early ACPI initialisation * * We allocate a page in low memory for the wakeup * routine for when we come back from a sleep state. The * runtime allocator allows specification of <16M pages, but not * <1M pages. */ void __init acpi_reserve_bootmem(void) { #ifndef CONFIG_ACPI_PV_SLEEP acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE); if ((&wakeup_end - &wakeup_start) > PAGE_SIZE) printk(KERN_CRIT "ACPI: Wakeup code way too big, will crash on attempt to suspend\n"); #endif }
/* requires nr_cpu_ids to be initialized */ static void __init setup_cpumask_of_cpu(void) { int i; /* alloc_bootmem zeroes memory */ cpumask_of_cpu_map = alloc_bootmem_low(sizeof(cpumask_t) * nr_cpu_ids); for (i = 0; i < nr_cpu_ids; i++) cpu_set(i, cpumask_of_cpu_map[i]); }
/** * acpi_reserve_bootmem - do _very_ early ACPI initialisation * * We allocate a page from the first 1MB of memory for the wakeup * routine for when we come back from a sleep state. The * runtime allocator allows specification of <16MB pages, but not * <1MB pages. */ void __init acpi_reserve_bootmem(void) { if ((&wakeup_end - &wakeup_start) > PAGE_SIZE) { printk(KERN_ERR "ACPI: Wakeup code way too big, S3 disabled.\n"); return; } acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE); if (!acpi_wakeup_address) printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n"); }
static void __init request_standard_resources(struct meminfo *mi) { struct resource *res; int i; kernel_code.start = init_mm.start_code; kernel_code.end = init_mm.end_code - 1; #ifdef CONFIG_XIP_KERNEL kernel_data.start = init_mm.start_data; #else kernel_data.start = init_mm.end_code; #endif kernel_data.end = init_mm.brk - 1; for (i = 0; i < mi->nr_banks; i++) { unsigned long virt_start, virt_end; if (mi->bank[i].size == 0) continue; virt_start = mi->bank[i].start; virt_end = virt_start + mi->bank[i].size - 1; res = alloc_bootmem_low(sizeof(*res)); res->name = "System RAM"; res->start = virt_start; res->end = virt_end; res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; request_resource(&iomem_resource, res); if (kernel_code.start >= res->start && kernel_code.end <= res->end) request_resource(res, &kernel_code); if (kernel_data.start >= res->start && kernel_data.end <= res->end) request_resource(res, &kernel_data); } /* FIXME - needed? if (mdesc->video_start) { video_ram.start = mdesc->video_start; video_ram.end = mdesc->video_end; request_resource(&iomem_resource, &video_ram); }*/ /* * Some machines don't have the possibility of ever * possessing lp1 or lp2 */ if (0) /* FIXME - need to do this for A5k at least */ request_resource(&ioport_resource, &lp0); }
static void __init request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc) { struct resource *res; int i; kernel_code.start = __virt_to_phys(init_mm.start_code); kernel_code.end = __virt_to_phys(init_mm.end_code - 1); kernel_data.start = __virt_to_phys(init_mm.end_code); kernel_data.end = __virt_to_phys(init_mm.brk - 1); for (i = 0; i < mi->nr_banks; i++) { unsigned long virt_start, virt_end; if (mi->bank[i].size == 0) continue; virt_start = __phys_to_virt(mi->bank[i].start); virt_end = virt_start + mi->bank[i].size - 1; res = alloc_bootmem_low(sizeof(*res)); res->name = "System RAM"; res->start = __virt_to_phys(virt_start); res->end = __virt_to_phys(virt_end); res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; request_resource(&iomem_resource, res); if (kernel_code.start >= res->start && kernel_code.end <= res->end) request_resource(res, &kernel_code); if (kernel_data.start >= res->start && kernel_data.end <= res->end) request_resource(res, &kernel_data); } if (mdesc->video_start) { video_ram.start = mdesc->video_start; video_ram.end = mdesc->video_end; request_resource(&iomem_resource, &video_ram); } /* * Some machines don't have the possibility of ever * possessing lp0, lp1 or lp2 */ if (mdesc->reserve_lp0) request_resource(&ioport_resource, &lp0); if (mdesc->reserve_lp1) request_resource(&ioport_resource, &lp1); if (mdesc->reserve_lp2) request_resource(&ioport_resource, &lp2); }
static void __init setup_resources(void) { struct resource *res, *std_res, *sub_res; struct memblock_region *reg; int j; code_resource.start = (unsigned long) &_text; code_resource.end = (unsigned long) &_etext - 1; data_resource.start = (unsigned long) &_etext; data_resource.end = (unsigned long) &_edata - 1; bss_resource.start = (unsigned long) &__bss_start; bss_resource.end = (unsigned long) &__bss_stop - 1; for_each_memblock(memory, reg) { res = alloc_bootmem_low(sizeof(*res)); res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; res->name = "System RAM"; res->start = reg->base; res->end = reg->base + reg->size - 1; request_resource(&iomem_resource, res); for (j = 0; j < ARRAY_SIZE(standard_resources); j++) { std_res = standard_resources[j]; if (std_res->start < res->start || std_res->start > res->end) continue; if (std_res->end > res->end) { sub_res = alloc_bootmem_low(sizeof(*sub_res)); *sub_res = *std_res; sub_res->end = res->end; std_res->start = res->end + 1; request_resource(res, sub_res); } else { request_resource(res, std_res); } } }
void imap_mem_reserve(void) { struct imap_reservemem_device *dev; int i; for(i = 0; i < sizeof(reservemem_devs) / sizeof(reservemem_devs[0]); i++) { dev = &reservemem_devs[i]; if (dev->size > 0) { dev->paddr = virt_to_phys(alloc_bootmem_low(dev->size)); printk(KERN_INFO \ "iMAPx200: %lu bytes SDRAM reserved " "for %s at 0x%08x\n", (unsigned long) dev->size, dev->name, dev->paddr); } } }
/** * acpi_reserve_bootmem - do _very_ early ACPI initialisation * * We allocate a page from the first 1MB of memory for the wakeup * routine for when we come back from a sleep state. The * runtime allocator allows specification of <16MB pages, but not * <1MB pages. */ void __init acpi_reserve_bootmem(void) { if ((&wakeup_code_end - &wakeup_code_start) > WAKEUP_SIZE) { printk(KERN_ERR "ACPI: Wakeup code way too big, S3 disabled.\n"); return; } acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE); if (!acpi_realmode) { printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n"); return; } acpi_wakeup_address = virt_to_phys((void *)acpi_realmode); }
void s3c64xx_reserve_bootmem(void) { struct s3c_media_device *mdev; int i; for(i = 0; i < sizeof(s3c_mdevs) / sizeof(s3c_mdevs[0]); i++) { mdev = &s3c_mdevs[i]; if (mdev->memsize > 0) { mdev->paddr = virt_to_phys(alloc_bootmem_low(mdev->memsize)); printk(KERN_INFO \ "s3c64xx: %lu bytes SDRAM reserved " "for %s at 0x%08x\n", (unsigned long) mdev->memsize, \ mdev->name, mdev->paddr); } } }
static void __init request_standard_resources(void) { struct memblock_region *region; struct resource *res; unsigned long i = 0; kernel_code.start = __pa_symbol(_text); kernel_code.end = __pa_symbol(__init_begin - 1); kernel_data.start = __pa_symbol(_sdata); kernel_data.end = __pa_symbol(_end - 1); num_standard_resources = memblock.memory.cnt; standard_resources = alloc_bootmem_low(num_standard_resources * sizeof(*standard_resources)); for_each_memblock(memory, region) { res = &standard_resources[i++]; if (memblock_is_nomap(region)) { res->name = "reserved"; res->flags = IORESOURCE_MEM; } else { res->name = "System RAM"; res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; } res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; request_resource(&iomem_resource, res); if (kernel_code.start >= res->start && kernel_code.end <= res->end) request_resource(res, &kernel_code); if (kernel_data.start >= res->start && kernel_data.end <= res->end) request_resource(res, &kernel_data); #ifdef CONFIG_KEXEC_CORE /* Userspace will find "Crash kernel" region in /proc/iomem. */ if (crashk_res.end && crashk_res.start >= res->start && crashk_res.end <= res->end) request_resource(res, &crashk_res); #endif }
/* * Allocate node_to_cpumask_map based on number of available nodes * Requires node_possible_map to be valid. * * Note: node_to_cpumask() is not valid until after this is done. */ static void __init setup_node_to_cpumask_map(void) { unsigned int node, num = 0; cpumask_t *map; /* setup nr_node_ids if not done yet */ if (nr_node_ids == MAX_NUMNODES) { for_each_node_mask(node, node_possible_map) num = node; nr_node_ids = num + 1; } /* allocate the map */ map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t)); Dprintk(KERN_DEBUG "Node to cpumask map at %p for %d nodes\n", map, nr_node_ids); /* node_to_cpumask() will now work */ node_to_cpumask_map = map; }
static void __init setup_resources(void) { struct resource *res, *sub_res; int i; code_resource.start = (unsigned long) &_text; code_resource.end = (unsigned long) &_etext - 1; data_resource.start = (unsigned long) &_etext; data_resource.end = (unsigned long) &_edata - 1; for (i = 0; i < MEMORY_CHUNKS; i++) { if (!memory_chunk[i].size) continue; res = alloc_bootmem_low(sizeof(struct resource)); res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; switch (memory_chunk[i].type) { case CHUNK_READ_WRITE: res->name = "System RAM"; break; case CHUNK_READ_ONLY: res->name = "System ROM"; res->flags |= IORESOURCE_READONLY; break; default: res->name = "reserved"; } res->start = memory_chunk[i].addr; res->end = memory_chunk[i].addr + memory_chunk[i].size - 1; request_resource(&iomem_resource, res); if (code_resource.start >= res->start && code_resource.start <= res->end && code_resource.end > res->end) { sub_res = alloc_bootmem_low(sizeof(struct resource)); memcpy(sub_res, &code_resource, sizeof(struct resource)); sub_res->end = res->end; code_resource.start = res->end + 1; request_resource(res, sub_res); } if (code_resource.start >= res->start && code_resource.start <= res->end && code_resource.end <= res->end) request_resource(res, &code_resource); if (data_resource.start >= res->start && data_resource.start <= res->end && data_resource.end > res->end) { sub_res = alloc_bootmem_low(sizeof(struct resource)); memcpy(sub_res, &data_resource, sizeof(struct resource)); sub_res->end = res->end; data_resource.start = res->end + 1; request_resource(res, sub_res); } if (data_resource.start >= res->start && data_resource.start <= res->end && data_resource.end <= res->end) request_resource(res, &data_resource); } }
/** * acpi_reserve_bootmem - do _very_ early ACPI initialisation * * We allocate a page in low memory for the wakeup * routine for when we come back from a sleep state. The * runtime allocator allows specification of <16M pages, but not * <1M pages. */ void __init acpi_reserve_bootmem(void) { acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE); if (!acpi_wakeup_address) printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n"); }
asmlinkage void __init start_kernel(void) { char * command_line; extern struct kernel_param __start___param[], __stop___param[]; #ifdef CONFIG_RTAI_RTSPMM unsigned int indice_part; /* Size of the needed memory block by the configuration */ unsigned long rt_mem_block_size = 0; #endif /* * Interrupts are still disabled. Do necessary setups, then * enable them */ lock_kernel(); page_address_init(); printk(linux_banner); setup_arch(&command_line); setup_per_cpu_areas(); /* * Mark the boot cpu "online" so that it can call console drivers in * printk() and can access its per-cpu storage. */ smp_prepare_boot_cpu(); /* * Set up the scheduler prior starting any interrupts (such as the * timer interrupt). Full topology setup happens at smp_init() * time - but meanwhile we still have a functioning scheduler. */ sched_init(); /* * Disable preemption - early bootup scheduling is extremely * fragile until we cpu_idle() for the first time. */ preempt_disable(); build_all_zonelists(); page_alloc_init(); early_init_hardirqs(); printk("Kernel command line: %s\n", saved_command_line); parse_early_param(); parse_args("Booting kernel", command_line, __start___param, __stop___param - __start___param, &unknown_bootoption); sort_main_extable(); trap_init(); rcu_init(); init_IRQ(); pidhash_init(); init_timers(); softirq_init(); time_init(); /* * HACK ALERT! This is early. We're enabling the console before * we've done PCI setups etc, and console_init() must be aware of * this. But we do want output early, in case something goes wrong. */ console_init(); if (panic_later) panic(panic_later, panic_param); #ifdef CONFIG_RTAI_RTSPMM /* Allocate a big and continuous memory block for the module SPMM included in the RTAI functionalities */ printk("--- Memory Allocation for the module rt_spmm ---\n"); /* WARNING We need to add some space for the structures vrtxptext and vrtxpt and the partitions bitmap that the module rt_spmm uses to handle the blocks in each partition */ /* for each defined partitions */ for(indice_part = 0; indice_part < RT_MAX_PART_NUM; indice_part ++) { if ((rt_partitions_table[indice_part].block_size != 0) && (rt_partitions_table[indice_part].num_of_blocks != 0)) { rt_partitions_table[indice_part].part_size = (rt_partitions_table[indice_part].block_size + XN_NBBY) *rt_partitions_table[indice_part].num_of_blocks + + sizeof(vrtxptext_t)+sizeof(vrtxpt_t); rt_mem_block_size += rt_partitions_table[indice_part].part_size; } } #ifdef CONFIG_RTAI_PART_DMA printk("Allocate memory in the low part of memory\n"); rt_mem_block_ptr=(void*)alloc_bootmem_low(rt_mem_block_size + PAGE_SIZE-1); #else printk("Allocate memory in the standard part of memory\n"); rt_mem_block_ptr=(void*)alloc_bootmem(rt_mem_block_size + PAGE_SIZE-1); #endif /* CONFIG_PART_DMA */ printk("Needed Memory Size : %lu\n", rt_mem_block_size); printk("Allocated Memory Size : %lu\n", rt_mem_block_size + PAGE_SIZE-1); printk("Memory block address : 0x%x\n", (unsigned int)rt_mem_block_ptr); printk("-----------------------------------------------\n"); #endif /* CONFIG_RTAI_RTSPMM */ profile_init(); local_irq_enable(); #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start && !initrd_below_start_ok && initrd_start < min_low_pfn << PAGE_SHIFT) { printk(KERN_CRIT "initrd overwritten (0x%08lx < 0x%08lx) - " "disabling it.\n",initrd_start,min_low_pfn << PAGE_SHIFT); initrd_start = 0; } #endif vfs_caches_init_early(); mem_init(); kmem_cache_init(); numa_policy_init(); if (late_time_init) late_time_init(); calibrate_delay(); pidmap_init(); pgtable_cache_init(); prio_tree_init(); anon_vma_init(); #ifdef CONFIG_X86 if (efi_enabled) efi_enter_virtual_mode(); #endif fork_init(num_physpages); proc_caches_init(); buffer_init(); unnamed_dev_init(); security_init(); vfs_caches_init(num_physpages); #ifdef CONFIG_MOT_FEAT_DEVICE_TREE mothwcfg_init(); #endif /* CONFIG_MOT_FEAT_DEVICE_TREE */ radix_tree_init(); signals_init(); /* rootfs populating might need page-writeback */ page_writeback_init(); #ifdef CONFIG_PROC_FS proc_root_init(); #endif check_bugs(); acpi_early_init(); /* before LAPIC and SMP init */ /* Do the rest non-__init'ed, we're now alive */ rest_init(); }