static void copy_bootdata(char *real_mode_data) { #ifndef CONFIG_SIMICS int new_data; char * command_line; #endif /* #ifndef CONFIG_SIMICS */ memcpy(x86_boot_params, real_mode_data, 2048); #ifndef CONFIG_SIMICS /* * If running on Simics we boot using load_kernel after starting * Simics. Control is passed in 16 bit real mode directly to the loaded * kernel. After going to 32 bit protected and setting up a stack * control is passed to the common setup code which detects memory sets up the VGA * and establish 64 bit mode: there is no command line nor any of this. */ new_data = *(int *) (x86_boot_params + NEW_CL_POINTER); if (!new_data) { if (OLD_CL_MAGIC != * (uval16 *) OLD_CL_MAGIC_ADDR) { early_printk("so old bootloader that it does not support commandline?!\n"); return; } new_data = OLD_CL_BASE_ADDR + * (uval16 *) OLD_CL_OFFSET; early_printk("old bootloader convention, maybe loadlin?\n"); } command_line = (char *) ((uval64)(new_data)); memcpy(saved_command_line, command_line, 2048); early_printk("Bootdata ok (command line is %s)\n", saved_command_line); #endif /* #ifndef CONFIG_SIMICS */ }
void __init dt_uart_init(void) { struct dt_device_node *dev; int ret; const char *devalias = opt_dtuart; char *options; if ( !console_has("dtuart") || !strcmp(opt_dtuart, "") ) { early_printk("No console\n"); return; } options = strchr(opt_dtuart, ','); if ( options != NULL ) *(options++) = '\0'; else options = ""; early_printk("Looking for UART console %s\n", devalias); dev = dt_find_node_by_alias(devalias); if ( !dev ) { early_printk("Unable to find device \"%s\"\n", devalias); return; } ret = device_init(dev, DEVICE_SERIAL, options); if ( ret ) early_printk("Unable to initialize serial: %d\n", ret); }
void __init machine_early_init(const char *cmdline, unsigned int ram, unsigned int fdt) { unsigned long *src, *dst = (unsigned long *)0x0; /* clearing bss section */ memset(__bss_start, 0, __bss_stop-__bss_start); memset(_ssbss, 0, _esbss-_ssbss); /* * Copy command line passed from bootloader, or use default * if none provided, or forced */ #ifndef CONFIG_CMDLINE_BOOL if (cmdline && cmdline[0] != '\0') strlcpy(cmd_line, cmdline, COMMAND_LINE_SIZE); #endif /* initialize device tree for usage in early_printk */ early_init_devtree((void *)_fdt_start); #ifdef CONFIG_EARLY_PRINTK setup_early_printk(NULL); #endif early_printk("Ramdisk addr 0x%08x, FDT 0x%08x\n", ram, fdt); printk(KERN_NOTICE "Found FDT at 0x%08x\n", fdt); #ifdef CONFIG_MTD_UCLINUX { int size; unsigned int romfs_base; romfs_base = (ram ? ram : (unsigned int)&__init_end); /* if CONFIG_MTD_UCLINUX_EBSS is defined, assume ROMFS is at the * end of kernel, which is ROMFS_LOCATION defined above. */ size = PAGE_ALIGN(get_romfs_len((unsigned *)romfs_base)); early_printk("Found romfs @ 0x%08x (0x%08x)\n", romfs_base, size); early_printk("#### klimit %p ####\n", klimit); BUG_ON(size < 0); /* What else can we do? */ /* Use memmove to handle likely case of memory overlap */ early_printk("Moving 0x%08x bytes from 0x%08x to 0x%08x\n", size, romfs_base, (unsigned)&_ebss); memmove(&_ebss, (int *)romfs_base, size); /* update klimit */ klimit += PAGE_ALIGN(size); early_printk("New klimit: 0x%08x\n", (unsigned)klimit); } #endif for (src = __ivt_start; src < __ivt_end; src++, dst++) *dst = *src; /* Initialize global data */ per_cpu(KM, 0) = 0x1; /* We start in kernel mode */ per_cpu(CURRENT_SAVE, 0) = (unsigned long)current; }
/* TODO: Parse UART config from the command line */ static int __init exynos4210_uart_init(struct dt_device_node *dev, const void *data) { const char *config = data; struct exynos4210_uart *uart; int res; u64 addr, size; if ( strcmp(config, "") ) { early_printk("WARNING: UART configuration is not supported\n"); } uart = &exynos4210_com; /* uart->clock_hz = 0x16e3600; */ uart->baud = BAUD_AUTO; uart->data_bits = 8; uart->parity = PARITY_NONE; uart->stop_bits = 1; res = dt_device_get_address(dev, 0, &addr, &size); if ( res ) { early_printk("exynos4210: Unable to retrieve the base" " address of the UART\n"); return res; } uart->regs = ioremap_nocache(addr, size); if ( !uart->regs ) { early_printk("exynos4210: Unable to map the UART memory\n"); return -ENOMEM; } res = dt_device_get_irq(dev, 0, &uart->irq); if ( res ) { early_printk("exynos4210: Unable to retrieve the IRQ\n"); return res; } uart->vuart.base_addr = addr; uart->vuart.size = size; uart->vuart.data_off = UTXH; uart->vuart.status_off = UTRSTAT; uart->vuart.status = UTRSTAT_TXE | UTRSTAT_TXFE; /* Register with generic serial driver. */ serial_register_uart(SERHND_DTUART, &exynos4210_uart_driver, uart); dt_device_set_used_by(dev, DOMID_XEN); return 0; }
void early_panic(const char *fmt, ...) { va_list ap; arch_local_irq_disable_all(); va_start(ap, fmt); early_printk("Kernel panic - not syncing: "); early_vprintk(fmt, ap); early_printk("\n"); va_end(ap); dump_stack(); hv_halt(); }
static void clear_bss(void) { extern char __bss_start[]; extern char __bss_end[]; early_clear(); #ifdef DEBUG_BOOT early_printk("Clearing %ld bss bytes...\n", (unsigned long) __bss_end - (unsigned long) __bss_start); #endif /* #ifdef DEBUG_BOOT */ memset(__bss_start, 0, (unsigned long) __bss_end - (unsigned long) __bss_start); #ifdef DEBUG_BOOT early_printk("ok\n"); #endif /* #ifdef DEBUG_BOOT */ }
void __init x86_64_start_kernel(char * real_mode_data) { int i; /* clear bss before set_intr_gate with early_idt_handler */ clear_bss(); /* Make NULL pointers segfault */ zap_identity_mappings(); for (i = 0; i < IDT_ENTRIES; i++) set_intr_gate(i, early_idt_handler); load_idt((const struct desc_ptr *)&idt_descr); early_printk("Kernel alive\n"); for (i = 0; i < NR_CPUS; i++) cpu_pda(i) = &boot_cpu_pda[i]; pda_init(0); copy_bootdata(__va(real_mode_data)); #ifdef CONFIG_SMP cpu_set(0, cpu_online_map); #endif start_kernel(); }
void __init aw_clkevt_init(void) { int ret; u32 val = 0; timer_cpu_base = ioremap_nocache(SW_PA_TIMERC_IO_BASE, 0x1000); pr_info("%s: timer base 0x%08x\n", __func__, (int)timer_cpu_base); /* disable & clear all timers */ writel(0x0, timer_cpu_base + TMR_IRQ_EN_REG_OFF); writel(0x1ff, timer_cpu_base + TMR_IRQ_STA_REG_OFF); /* diff from 33 */ /* init timer0 */ writel(TIMER0_VALUE, timer_cpu_base + TMR0_INTV_VALUE_REG_OFF); val = 0 << 7; /* continuous mode */ val |= 0b100 << 4; /* pre-scale: 16 */ val |= 0b01 << 2; /* src: osc24M */ val |= 1 << 1; /* reload interval value */ writel(val, timer_cpu_base + TMR0_CTRL_REG_OFF); /* register timer0 interrupt */ ret = setup_irq(AW_IRQ_TIMER0, &sun7i_timer_irq); if (ret) early_printk("failed to setup irq %d\n", 36); /* enable timer0 */ writel(0x1, timer_cpu_base + TMR_IRQ_EN_REG_OFF); /* register clock event */ sun7i_timer0_clockevent.mult = div_sc(AW_CLOCK_SRC/AW_CLOCK_DIV, NSEC_PER_SEC, sun7i_timer0_clockevent.shift); sun7i_timer0_clockevent.max_delta_ns = clockevent_delta2ns(0xff, &sun7i_timer0_clockevent); //sun7i_timer0_clockevent.min_delta_ns = clockevent_delta2ns(0x1, &sun7i_timer0_clockevent)+100000; sun7i_timer0_clockevent.min_delta_ns = clockevent_delta2ns(0x1, &sun7i_timer0_clockevent); /* liugang */ sun7i_timer0_clockevent.cpumask = cpu_all_mask; sun7i_timer0_clockevent.irq = sun7i_timer_irq.irq; early_printk("%s: sun7i_timer0_clockevent mult %d, max_delta_ns %d, min_delta_ns %d, cpumask 0x%08x, irq %d\n", __func__, (int)sun7i_timer0_clockevent.mult, (int)sun7i_timer0_clockevent.max_delta_ns, (int)sun7i_timer0_clockevent.min_delta_ns, (int)sun7i_timer0_clockevent.cpumask, (int)sun7i_timer0_clockevent.irq); clockevents_register_device(&sun7i_timer0_clockevent); #ifdef CONFIG_AW_TIME_DELAY use_time_delay(); #endif }
void __init x86_64_start_kernel(char * real_mode_data) { int i; /* * Build-time sanity checks on the kernel image and module * area mappings. (these are purely build-time and produce no code) */ BUILD_BUG_ON(MODULES_VADDR < KERNEL_IMAGE_START); BUILD_BUG_ON(MODULES_VADDR-KERNEL_IMAGE_START < KERNEL_IMAGE_SIZE); BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE); BUILD_BUG_ON((KERNEL_IMAGE_START & ~PMD_MASK) != 0); BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0); BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL)); BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == (__START_KERNEL & PGDIR_MASK))); BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END); /* clear bss before set_intr_gate with early_idt_handler */ clear_bss(); /* Make NULL pointers segfault */ zap_identity_mappings(); /* Cleanup the over mapped high alias */ cleanup_highmap(); for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) { #ifdef CONFIG_EARLY_PRINTK set_intr_gate(i, &early_idt_handlers[i]); #else set_intr_gate(i, early_idt_handler); #endif } load_idt((const struct desc_ptr *)&idt_descr); early_printk("Kernel alive\n"); x86_64_init_pda(); early_printk("Kernel really alive\n"); x86_64_start_reservations(real_mode_data); }
/** * get_xen_paddr - get physical address to relocate Xen to * * Xen is relocated to as near to the top of RAM as possible and * aligned to a XEN_PADDR_ALIGN boundary. */ static paddr_t __init get_xen_paddr(void) { struct dt_mem_info *mi = &early_info.mem; paddr_t min_size; paddr_t paddr = 0, last_end; int i; min_size = (_end - _start + (XEN_PADDR_ALIGN-1)) & ~(XEN_PADDR_ALIGN-1); last_end = mi->bank[0].start; /* Find the highest bank with enough space. */ for ( i = 0; i < mi->nr_banks; i++ ) { const struct membank *bank = &mi->bank[i]; paddr_t s, e; /* We can only deal with contiguous memory at the moment */ if ( last_end != bank->start ) break; last_end = bank->start + bank->size; if ( bank->size >= min_size ) { e = consider_modules(bank->start, bank->start + bank->size, min_size, XEN_PADDR_ALIGN, 1); if ( !e ) continue; #ifdef CONFIG_ARM_32 /* Xen must be under 4GB */ if ( e > 0x100000000ULL ) e = 0x100000000ULL; if ( e < bank->start ) continue; #endif s = e - min_size; if ( s > paddr ) paddr = s; } } if ( !paddr ) early_panic("Not enough memory to relocate Xen"); early_printk("Placing Xen at 0x%"PRIpaddr"-0x%"PRIpaddr"\n", paddr, paddr + min_size); early_info.modules.module[MOD_XEN].start = paddr; early_info.modules.module[MOD_XEN].size = min_size; return paddr; }
void x86_64_start_kernel(char * real_mode_data) { extern void kinit(); clear_bss(); /* must be the first thing in C and must not depend on .bss to be zero */ early_printk("booting amd64 k42...\n"); copy_bootdata(real_mode_data); setup_boot_cpu_data(); pda_init(0); kinit(); }
void exynos4_setup_mshci_cfg_gpio(struct platform_device *dev, int width) { unsigned int gpio; struct s3c_mshci_platdata *pdata = dev->dev.platform_data; #ifndef CONFIG_KERNEL_PANIC_DUMP //ly 20120412 early_printk("exynos4_setup_mshci_cfg_gpio\n"); #endif /* Set all the necessary GPG0/GPG1 pins to special-function 2 */ for (gpio = EXYNOS4_GPK0(0); gpio < EXYNOS4_GPK0(2); gpio++) { s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(3)); if ( gpio == EXYNOS4_GPK0(0) ) s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE); else s3c_gpio_setpull(gpio, S3C_GPIO_PULL_UP); } /* if CDn pin is used as eMMC_EN pin, it might make a problem So, a built-in type eMMC is embedded, it dose not set CDn pin */ if ( pdata->cd_type != S3C_MSHCI_CD_PERMANENT ) { s3c_gpio_cfgpin(EXYNOS4_GPK0(2), S3C_GPIO_SFN(3)); s3c_gpio_setpull(EXYNOS4_GPK0(2), S3C_GPIO_PULL_UP); } switch (width) { case 8: for (gpio = EXYNOS4_GPK1(3); gpio <= EXYNOS4_GPK1(6); gpio++) { s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(4)); s3c_gpio_setpull(gpio, S3C_GPIO_PULL_UP); } __raw_writel(0x2AAA, GPK1DRV); case 4: /* GPK[3:6] special-funtion 2 */ for (gpio = EXYNOS4_GPK0(3); gpio <= EXYNOS4_GPK0(6); gpio++) { s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(3)); s3c_gpio_setpull(gpio, S3C_GPIO_PULL_UP); } __raw_writel(0x2AAA, GPK0DRV); break; case 1: /* GPK[3] special-funtion 2 */ for (gpio = EXYNOS4_GPK0(3); gpio < EXYNOS4_GPK0(4); gpio++) { s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(3)); s3c_gpio_setpull(gpio, S3C_GPIO_PULL_UP); } __raw_writel(0xAA, GPK0DRV); default: break; } }
int __init setup_early_printk(char *opt) { if (early_console_initialized) return 1; base_addr = early_uartlite_console(); if (base_addr) { early_console_initialized = 1; early_printk("early_printk_console is enabled at 0x%08x\n", base_addr); /* register_console(early_console); */ return 0; } else return 1; }
asmlinkage void __init x86_64_start_kernel(char * real_mode_data) { int i; /* * Build-time sanity checks on the kernel image and module * area mappings. (these are purely build-time and produce no code) */ BUILD_BUG_ON(MODULES_VADDR < __START_KERNEL_map); BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_IMAGE_SIZE); BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE); BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0); BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0); BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL)); BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == (__START_KERNEL & PGDIR_MASK))); BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END); /* Kill off the identity-map trampoline */ reset_early_page_tables(); /* clear bss before set_intr_gate with early_idt_handler */ clear_bss(); for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) set_intr_gate(i, early_idt_handler_array[i]); load_idt((const struct desc_ptr *)&idt_descr); copy_bootdata(__va(real_mode_data)); /* * Load microcode early on BSP. */ load_ucode_bsp(); if (console_loglevel == 10) early_printk("Kernel alive\n"); clear_page(init_level4_pgt); /* set init_level4_pgt kernel high mapping*/ init_level4_pgt[511] = early_level4_pgt[511]; x86_64_start_reservations(real_mode_data); }
void __init x86_64_start_kernel(char * real_mode_data) { int i; /* * Build-time sanity checks on the kernel image and module * area mappings. (these are purely build-time and produce no code) */ /* 문제 있으면 BUILD_BUG_ON 매크로로 컴파일시 에러가 뜬다. */ /* 커널 이미지 크기가 커서 모듈 주소를 침범하면 에러발생 */ BUILD_BUG_ON(MODULES_VADDR < KERNEL_IMAGE_START); BUILD_BUG_ON(MODULES_VADDR-KERNEL_IMAGE_START < KERNEL_IMAGE_SIZE); BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE); BUILD_BUG_ON((KERNEL_IMAGE_START & ~PMD_MASK) != 0); BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0); BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL)); BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == (__START_KERNEL & PGDIR_MASK))); BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END); /* clear bss before set_intr_gate with early_idt_handler */ /* bss 초기화 (__bss_start부터 __bss_stop) */ clear_bss(); /* Make NULL pointers segfault */ zap_identity_mappings(); max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT; /* 512M / 4K 매핑되는 최대 페이지 프레임 넘버*/ /* 예외 처리 인터럽트들 설정 */ for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) { #ifdef CONFIG_EARLY_PRINTK set_intr_gate(i, &early_idt_handlers[i]); /* 인터럽트 예외처리 루틴들을 쓴다. */ #else set_intr_gate(i, early_idt_handler); #endif } load_idt((const struct desc_ptr *)&idt_descr); /* lidt로 interrupt descriptor table을 읽어온다. */ if (console_loglevel == 10) early_printk("Kernel alive\n"); x86_64_start_reservations(real_mode_data); }
int __init setup_early_printk(char *opt) { if (early_console_initialized) return 1; base_addr = early_uartlite_console(); if (base_addr) { early_console_initialized = 1; #ifdef CONFIG_MMU early_console_reg_tlb_alloc(base_addr); #endif early_printk("early_printk_console is enabled at 0x%08x\n", base_addr); /* register_console(early_console); */ return 0; } else return 1; }
void __init x86_64_start_kernel(char * real_mode_data) { char *s; clear_bss(); /* must be the first thing in C and must not depend on .bss to be zero */ pda_init(0); copy_bootdata(real_mode_data); s = strstr(saved_command_line, "earlyprintk="); if (s != NULL) setup_early_printk(s+12); #ifdef CONFIG_DISCONTIGMEM extern int numa_setup(char *); s = strstr(saved_command_line, "numa="); if (s != NULL) numa_setup(s+5); #endif early_printk("booting x86_64 kernel... "); setup_boot_cpu_data(); start_kernel(); }
static void __init find_early_table_space(unsigned long end) { unsigned long puds, pmds, tables, start; puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) + round_up(pmds * sizeof(pmd_t), PAGE_SIZE); /* RED-PEN putting page tables only on node 0 could cause a hotspot and fill up ZONE_DMA. The page tables need roughly 0.5KB per GB. */ start = 0x8000; table_start = find_e820_area(start, end, tables); if (table_start == -1UL) panic("Cannot find space for the kernel page tables"); table_start >>= PAGE_SHIFT; table_end = table_start; early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n", end, table_start << PAGE_SHIFT, (table_start << PAGE_SHIFT) + tables); }
void __init x86_64_start_kernel(char * real_mode_data) { int i; /* * Build-time sanity checks on the kernel image and module * area mappings. (these are purely build-time and produce no code) */ BUILD_BUG_ON(MODULES_VADDR < KERNEL_IMAGE_START); BUILD_BUG_ON(MODULES_VADDR-KERNEL_IMAGE_START < KERNEL_IMAGE_SIZE); BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE); BUILD_BUG_ON((KERNEL_IMAGE_START & ~PMD_MASK) != 0); BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0); BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL)); BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == (__START_KERNEL & PGDIR_MASK))); /* clear bss before set_intr_gate with early_idt_handler */ clear_bss(); /* Make NULL pointers segfault */ zap_identity_mappings(); /* Cleanup the over mapped high alias */ cleanup_highmap(); for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) { #ifdef CONFIG_EARLY_PRINTK set_intr_gate(i, &early_idt_handlers[i]); #else set_intr_gate(i, early_idt_handler); #endif } load_idt((const struct desc_ptr *)&idt_descr); early_printk("Kernel alive\n"); for (i = 0; i < NR_CPUS; i++) cpu_pda(i) = &boot_cpu_pda[i]; pda_init(0); copy_bootdata(__va(real_mode_data)); reserve_early(__pa_symbol(&_text), __pa_symbol(&_end), "TEXT DATA BSS"); #ifdef CONFIG_BLK_DEV_INITRD /* Reserve INITRD */ if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { unsigned long ramdisk_image = boot_params.hdr.ramdisk_image; unsigned long ramdisk_size = boot_params.hdr.ramdisk_size; unsigned long ramdisk_end = ramdisk_image + ramdisk_size; reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); } #endif reserve_ebda_region(); reserve_setup_data(); /* * At this point everything still needed from the boot loader * or BIOS or kernel text should be early reserved or marked not * RAM in e820. All other memory is free game. */ start_kernel(); }
/* this is called from assembler and passed control in long mode 64 bit * interrupts disabled. * At this stage the first 32MB have been mapped with 2MB pages. */ extern "C" void kinit() { extern char __bss_end[]; struct KernelInitArgs kernelInitArgs; MemoryMgrPrimitiveKern *memory = &kernelInitArgs.memory; uval vp = 0; /* master processor */ uval vaddr; /* on this machine like all x86 machines nowaydays the boot * image is loaded at 1MB. This is hard coded here. */ extern code start_real; codeAddress kernPhysStartAddress = &start_real; extern code kernVirtStart; early_printk("kernPhysStartAddress 0x%lx \n", (unsigned long)kernPhysStartAddress); /* We ignore memory below the 1meg boundary. PhysSize is the * size of memory above the boundary. */ uval physSize = BOOT_MINIMUM_REAL_MEMORY -0x100000; uval physStart = 0x0; uval physEnd = physStart + 0x100000 + physSize; early_printk("BOOT_MINIMUM_REAL_MEMORY 0x%lx, physStart 0x%lx," " physEnd 0x%lx, physSize 0x%lx \n", BOOT_MINIMUM_REAL_MEMORY, physStart, physEnd, physSize); /* * We want to map all of physical memory into a V->R region. We choose a * base for the V->R region (virtBase) that makes the kernel land correctly * at its link origin, &kernVirtStart. This link origin must wind up * mapped to the physical location at which the kernel was loaded * (kernPhysStartAddress). */ uval virtBase = (uval) (&kernVirtStart - kernPhysStartAddress); early_printk("&kernVirtStart 0x%lx virtBase 0x%lx \n", (unsigned long long)&kernVirtStart, (unsigned long long)virtBase); /* * Memory from __end_bss * to the end of physical memory is available for allocation. * Correct first for the 2MB page mapping the kernel. */ early_printk("__bss_end is 0x%lx physEnd is 0x%lx \n", __bss_end , physEnd); uval allocStart = ALIGN_UP(__bss_end, SEGMENT_SIZE); uval allocEnd = virtBase + physEnd; early_printk("allocStart is 0x%lx allocEnd is 0x%lx \n", allocStart, allocEnd); memory->init(physStart, physEnd, virtBase, allocStart, allocEnd); /* * Remove mappings between allocStart and * BOOT_MINIMUM_REAL_MEMORY to allow 4KB page mapping for * that range. No need to tlb invalidate, unless they are * touched (debugging). Actually we need to keep the first * 2MB mapping above allocStart so that we can initialize the * first 2 (or 3 if we need a PDP page as well) 4KB pages * which are PDE and PTE pages for the V->R mapping before * they are themselves mapped as 4KB pages. */ early_printk("top page real address is 0x%lx \n", (uval)&level4_pgt); uval level1_pgt_virt = memory->virtFromPhys((uval)&level4_pgt); early_printk("top page real address is 0x%lx \n", (uval)level4_pgt & ~0xfff); early_printk("top page virtual address is 0x%lx \n", (uval )level1_pgt_virt); for (vaddr = allocStart + SEGMENT_SIZE; vaddr < allocEnd; vaddr += SEGMENT_SIZE) { #ifndef NDEBUG // early_printk("removing pde, pml4 at virtual address 0x%lx \n", EARLY_VADDR_TO_L1_PTE_P(level1_pgt_virt, vaddr, memory)); TOUCH(EARLY_VADDR_TO_L1_PTE_P(level1_pgt_virt, vaddr, memory)); // early_printk("removing pde, pdp at virtual address 0x%lx \n", EARLY_VADDR_TO_L2_PTE_P(level1_pgt_virt, vaddr, memory)); TOUCH(EARLY_VADDR_TO_L2_PTE_P(level1_pgt_virt, vaddr, memory)); // early_printk("removing pde at virtual address 0x%lx \n", EARLY_VADDR_TO_L3_PTE_P(level1_pgt_virt, vaddr, memory)); TOUCH(EARLY_VADDR_TO_L3_PTE_P(level1_pgt_virt, vaddr, memory)); #endif /* #ifndef NDEBUG */ EARLY_VADDR_TO_L3_PTE_P(level1_pgt_virt, vaddr, memory)->P = 0; EARLY_VADDR_TO_L3_PTE_P(level1_pgt_virt, vaddr, memory)->PS = 0; EARLY_VADDR_TO_L3_PTE_P(level1_pgt_virt, vaddr, memory)->G = 0; EARLY_VADDR_TO_L3_PTE_P(level1_pgt_virt, vaddr, memory)->Frame = 0; __flush_tlb_one(vaddr); } /* * Because of the 2MB page mapping for the kernel no * unused space can be recuperated at a 4KB page granularity. * We may want to map the fringe bss with 4KB page(s) * or alternatively make free for (pinned only) 4KB allocation * the unused 4KB pages unused in the 2MB pages at this point. XXX dangerous */ early_printk("Calling InitKernelMappings\n"); InitKernelMappings(0, memory); // kernelInitArgs.onSim = onSim; not there anymore but where is it set XXX kernelInitArgs.vp = 0; kernelInitArgs.barrierP = 0; #define LOOP_NUMBER 0x000fffff // iteration counter for delay init_PIC(LOOP_NUMBER); early_printk("Calling InitIdt\n"); InitIdt(); // initialize int handlers early_printk("Calling enableHardwareInterrupts\n"); enableHardwareInterrupts(); early_printk("Calling thinwireInit\n"); thinwireInit(memory); /* no thinwire console XXX taken from mips64 but check */ early_printk("Calling LocalConsole and switching to tty \n"); LocalConsole::Init(vp, memory, CONSOLE_CHANNEL, 1, 0 ); err_printf("Calling KernelInit.C\n"); /* Remove the V=R initial mapping only used for jumping to * the final mapping, i.e the first 2MB. XXX todo should not * do it until VGABASE has been relocated currently mapped * V==R XXX cannot use early_printk() from now on. */ L3_PTE *p; p = EARLY_VADDR_TO_L3_PTE_P(level1_pgt_virt,(uval)0x100000,memory); p->P = 0; p->PS = 0; p->G = 0; p->Frame = 0; __flush_tlb_one(0x100000); KernelInit(kernelInitArgs); /* NOTREACHED */ }
/* Add xenheap memory that was not already added to the boot allocator. */ init_xenheap_pages(pfn_to_paddr(xenheap_mfn_start), pfn_to_paddr(boot_mfn_start)); end_boot_allocator(); } #else /* CONFIG_ARM_64 */ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size) { paddr_t ram_start = ~0; paddr_t ram_end = 0; paddr_t ram_size = 0; int bank; unsigned long dtb_pages; void *fdt; total_pages = 0; for ( bank = 0 ; bank < early_info.mem.nr_banks; bank++ ) { paddr_t bank_start = early_info.mem.bank[bank].start; paddr_t bank_size = early_info.mem.bank[bank].size; paddr_t bank_end = bank_start + bank_size; paddr_t s, e; paddr_t new_ram_size = ram_size + bank_size; paddr_t new_ram_start = min(ram_start,bank_start); paddr_t new_ram_end = max(ram_end,bank_end); /* * We allow non-contigious regions so long as at least half of * the total RAM region actually contains RAM. We actually * fudge this slightly and require that adding the current * bank does not cause us to violate this restriction. * * This restriction ensures that the frametable (which is not * currently sparse) does not consume all available RAM. */ if ( bank > 0 && 2 * new_ram_size < new_ram_end - new_ram_start ) /* Would create memory map which is too sparse, so stop here. */ break; ram_start = new_ram_start; ram_end = new_ram_end; ram_size = new_ram_size; setup_xenheap_mappings(bank_start>>PAGE_SHIFT, bank_size>>PAGE_SHIFT); s = bank_start; while ( s < bank_end ) { paddr_t n = bank_end; e = next_module(s, &n); if ( e == ~(paddr_t)0 ) { e = n = bank_end; } if ( e > bank_end ) e = bank_end; xenheap_mfn_end = e; dt_unreserved_regions(s, e, init_boot_pages, 0); s = n; } } if ( bank != early_info.mem.nr_banks ) { early_printk("WARNING: only using %d out of %d memory banks\n", bank, early_info.mem.nr_banks); early_info.mem.nr_banks = bank; } total_pages += ram_size >> PAGE_SHIFT; xenheap_virt_end = XENHEAP_VIRT_START + ram_end - ram_start; xenheap_mfn_start = ram_start >> PAGE_SHIFT; xenheap_mfn_end = ram_end >> PAGE_SHIFT; xenheap_max_mfn(xenheap_mfn_end); /* * Need enough mapped pages for copying the DTB. */ dtb_pages = (dtb_size + PAGE_SIZE-1) >> PAGE_SHIFT; /* Copy the DTB. */ fdt = mfn_to_virt(alloc_boot_pages(dtb_pages, 1)); copy_from_paddr(fdt, dtb_paddr, dtb_size, BUFFERABLE); device_tree_flattened = fdt; setup_frametable_mappings(ram_start, ram_end); max_page = PFN_DOWN(ram_end); end_boot_allocator(); }
/* C entry point for boot CPU */ void __init start_xen(unsigned long boot_phys_offset, unsigned long fdt_paddr, unsigned long cpuid) { size_t fdt_size; int cpus, i; const char *cmdline; setup_cache(); percpu_init_areas(); set_processor_id(0); /* needed early, for smp_processor_id() */ smp_clear_cpu_maps(); /* This is mapped by head.S */ device_tree_flattened = (void *)BOOT_FDT_VIRT_START + (fdt_paddr & ((1 << SECOND_SHIFT) - 1)); fdt_size = device_tree_early_init(device_tree_flattened, fdt_paddr); cmdline = device_tree_bootargs(device_tree_flattened); early_printk("Command line: %s\n", cmdline); cmdline_parse(cmdline); setup_pagetables(boot_phys_offset, get_xen_paddr()); setup_mm(fdt_paddr, fdt_size); vm_init(); dt_unflatten_host_device_tree(); dt_irq_xlate = gic_irq_xlate; dt_uart_init(); console_init_preirq(); system_state = SYS_STATE_boot; processor_id(); platform_init(); smp_init_cpus(); cpus = smp_get_max_cpus(); init_xen_time(); gic_init(); set_current((struct vcpu *)0xfffff000); /* debug sanity */ idle_vcpu[0] = current; init_traps(); setup_virt_paging(); p2m_vmid_allocator_init(); softirq_init(); tasklet_subsys_init(); init_IRQ(); gic_route_ppis(); gic_route_spis(); init_maintenance_interrupt(); init_timer_interrupt(); timer_init(); init_idle_domain(); rcu_init(); arch_init_memory(); local_irq_enable(); local_abort_enable(); smp_prepare_cpus(cpus); initialize_keytable(); console_init_postirq(); do_presmp_initcalls(); for_each_present_cpu ( i ) { if ( (num_online_cpus() < cpus) && !cpu_online(i) ) { int ret = cpu_up(i); if ( ret != 0 ) printk("Failed to bring up CPU %u (error %d)\n", i, ret); } } printk("Brought up %ld CPUs\n", (long)num_online_cpus()); /* TODO: smp_cpus_done(); */ do_initcalls(); /* Create initial domain 0. */ dom0 = domain_create(0, 0, 0); if ( IS_ERR(dom0) || (alloc_dom0_vcpu0() == NULL) ) panic("Error creating domain 0"); dom0->is_privileged = 1; dom0->target = NULL; if ( construct_dom0(dom0) != 0) panic("Could not set up DOM0 guest OS"); /* Scrub RAM that is still free and so may go to an unprivileged domain. */ scrub_heap_pages(); init_constructors(); console_endboot(); /* Hide UART from DOM0 if we're using it */ serial_endboot(); system_state = SYS_STATE_active; domain_unpause_by_systemcontroller(dom0); /* Switch on to the dynamically allocated stack for the idle vcpu * since the static one we're running on is about to be freed. */ memcpy(idle_vcpu[0]->arch.cpu_info, get_cpu_info(), sizeof(struct cpu_info)); switch_stack_and_jump(idle_vcpu[0]->arch.cpu_info, init_done); }
static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size) { paddr_t ram_start, ram_end, ram_size; paddr_t contig_start, contig_end; paddr_t s, e; unsigned long ram_pages; unsigned long heap_pages, xenheap_pages, domheap_pages; unsigned long dtb_pages; unsigned long boot_mfn_start, boot_mfn_end; int i; void *fdt; if ( !early_info.mem.nr_banks ) early_panic("No memory bank"); /* * We are going to accumulate two regions here. * * The first is the bounds of the initial memory region which is * contiguous with the first bank. For simplicity the xenheap is * always allocated from this region. * * The second is the complete bounds of the regions containing RAM * (ie. from the lowest RAM address to the highest), which * includes any holes. * * We also track the number of actual RAM pages (i.e. not counting * the holes). */ ram_size = early_info.mem.bank[0].size; contig_start = ram_start = early_info.mem.bank[0].start; contig_end = ram_end = ram_start + ram_size; for ( i = 1; i < early_info.mem.nr_banks; i++ ) { paddr_t bank_start = early_info.mem.bank[i].start; paddr_t bank_size = early_info.mem.bank[i].size; paddr_t bank_end = bank_start + bank_size; paddr_t new_ram_size = ram_size + bank_size; paddr_t new_ram_start = min(ram_start,bank_start); paddr_t new_ram_end = max(ram_end,bank_end); /* * If the new bank is contiguous with the initial contiguous * region then incorporate it into the contiguous region. * * Otherwise we allow non-contigious regions so long as at * least half of the total RAM region actually contains * RAM. We actually fudge this slightly and require that * adding the current bank does not cause us to violate this * restriction. * * This restriction ensures that the frametable (which is not * currently sparse) does not consume all available RAM. */ if ( bank_start == contig_end ) contig_end = bank_end; else if ( bank_end == contig_start ) contig_start = bank_start; else if ( 2 * new_ram_size < new_ram_end - new_ram_start ) /* Would create memory map which is too sparse, so stop here. */ break; ram_size = new_ram_size; ram_start = new_ram_start; ram_end = new_ram_end; } if ( i != early_info.mem.nr_banks ) { early_printk("WARNING: only using %d out of %d memory banks\n", i, early_info.mem.nr_banks); early_info.mem.nr_banks = i; } total_pages = ram_pages = ram_size >> PAGE_SHIFT; /* * Locate the xenheap using these constraints: * * - must be 32 MiB aligned * - must not include Xen itself or the boot modules * - must be at most 1/8 the total RAM in the system * - must be at least 128M * * We try to allocate the largest xenheap possible within these * constraints. */ heap_pages = ram_pages; xenheap_pages = (heap_pages/8 + 0x1fffUL) & ~0x1fffUL; xenheap_pages = max(xenheap_pages, 128UL<<(20-PAGE_SHIFT)); do { /* xenheap is always in the initial contiguous region */ e = consider_modules(contig_start, contig_end, pfn_to_paddr(xenheap_pages), 32<<20, 0); if ( e ) break; xenheap_pages >>= 1; } while ( xenheap_pages > 128<<(20-PAGE_SHIFT) ); if ( ! e ) early_panic("Not not enough space for xenheap"); domheap_pages = heap_pages - xenheap_pages; early_printk("Xen heap: %"PRIpaddr"-%"PRIpaddr" (%lu pages)\n", e - (pfn_to_paddr(xenheap_pages)), e, xenheap_pages); early_printk("Dom heap: %lu pages\n", domheap_pages); setup_xenheap_mappings((e >> PAGE_SHIFT) - xenheap_pages, xenheap_pages); /* * Need a single mapped page for populating bootmem_region_list * and enough mapped pages for copying the DTB. */ dtb_pages = (dtb_size + PAGE_SIZE-1) >> PAGE_SHIFT; boot_mfn_start = xenheap_mfn_end - dtb_pages - 1; boot_mfn_end = xenheap_mfn_end; init_boot_pages(pfn_to_paddr(boot_mfn_start), pfn_to_paddr(boot_mfn_end)); /* Copy the DTB. */ fdt = mfn_to_virt(alloc_boot_pages(dtb_pages, 1)); copy_from_paddr(fdt, dtb_paddr, dtb_size, BUFFERABLE); device_tree_flattened = fdt; /* Add non-xenheap memory */ for ( i = 0; i < early_info.mem.nr_banks; i++ ) { paddr_t bank_start = early_info.mem.bank[i].start; paddr_t bank_end = bank_start + early_info.mem.bank[i].size; s = bank_start; while ( s < bank_end ) { paddr_t n = bank_end; e = next_module(s, &n); if ( e == ~(paddr_t)0 ) { e = n = ram_end; } /* * Module in a RAM bank other than the one which we are * not dealing with here. */ if ( e > bank_end ) e = bank_end; /* Avoid the xenheap */ if ( s < pfn_to_paddr(xenheap_mfn_start+xenheap_pages) && pfn_to_paddr(xenheap_mfn_start) < e ) { e = pfn_to_paddr(xenheap_mfn_start); n = pfn_to_paddr(xenheap_mfn_start+xenheap_pages); } dt_unreserved_regions(s, e, init_boot_pages, 0); s = n; } } /* Frame table covers all of RAM region, including holes */ setup_frametable_mappings(ram_start, ram_end); max_page = PFN_DOWN(ram_end); /* Add xenheap memory that was not already added to the boot allocator. */ init_xenheap_pages(pfn_to_paddr(xenheap_mfn_start), pfn_to_paddr(boot_mfn_start)); end_boot_allocator(); }
void __init video_init(void) { struct lfb_prop lfbp; unsigned char *lfb; paddr_t hdlcd_start, hdlcd_size; paddr_t framebuffer_start, framebuffer_size; const char *mode_string; char _mode_string[16]; int bytes_per_pixel = 4; struct color_masks *c = NULL; struct modeline *videomode = NULL; int i; const struct dt_device_node *dev; const __be32 *cells; u32 lenp; int res; dev = dt_find_compatible_node(NULL, NULL, "arm,hdlcd"); if ( !dev ) { early_printk("HDLCD: Cannot find node compatible with \"arm,hdcld\"\n"); return; } res = dt_device_get_address(dev, 0, &hdlcd_start, &hdlcd_size); if ( !res ) { early_printk("HDLCD: Unable to retrieve MMIO base address\n"); return; } cells = dt_get_property(dev, "framebuffer", &lenp); if ( !cells ) { early_printk("HDLCD: Unable to retrieve framebuffer property\n"); return; } framebuffer_start = dt_next_cell(dt_n_addr_cells(dev), &cells); framebuffer_size = dt_next_cell(dt_n_size_cells(dev), &cells); if ( !hdlcd_start ) { early_printk(KERN_ERR "HDLCD: address missing from device tree, disabling driver\n"); return; } if ( !framebuffer_start ) { early_printk(KERN_ERR "HDLCD: framebuffer address missing from device tree, disabling driver\n"); return; } res = dt_property_read_string(dev, "mode", &mode_string); if ( res ) { get_color_masks("32", &c); memcpy(_mode_string, "1280x1024@60", strlen("1280x1024@60") + 1); bytes_per_pixel = 4; } else if ( strlen(mode_string) < strlen("800x600@60") || strlen(mode_string) > sizeof(_mode_string) - 1 ) { early_printk(KERN_ERR "HDLCD: invalid modeline=%s\n", mode_string); return; } else { char *s = strchr(mode_string, '-'); if ( !s ) { early_printk(KERN_INFO "HDLCD: bpp not found in modeline %s, assume 32 bpp\n", mode_string); get_color_masks("32", &c); memcpy(_mode_string, mode_string, strlen(mode_string) + 1); bytes_per_pixel = 4; } else { if ( strlen(s) < 6 ) { early_printk(KERN_ERR "HDLCD: invalid mode %s\n", mode_string); return; } s++; if ( get_color_masks(s, &c) < 0 ) { early_printk(KERN_WARNING "HDLCD: unsupported bpp %s\n", s); return; } bytes_per_pixel = simple_strtoll(s, NULL, 10) / 8; } i = s - mode_string - 1; memcpy(_mode_string, mode_string, i); memcpy(_mode_string + i, mode_string + i + 3, 4); } for ( i = 0; i < ARRAY_SIZE(videomodes); i++ ) { if ( !strcmp(_mode_string, videomodes[i].mode) ) { videomode = &videomodes[i]; break; } } if ( !videomode ) { early_printk(KERN_WARNING "HDLCD: unsupported videomode %s\n", _mode_string); return; } if ( framebuffer_size < bytes_per_pixel * videomode->xres * videomode->yres ) { early_printk(KERN_ERR "HDLCD: the framebuffer is too small, disabling the HDLCD driver\n"); return; } early_printk(KERN_INFO "Initializing HDLCD driver\n"); lfb = ioremap_wc(framebuffer_start, framebuffer_size); if ( !lfb ) { early_printk(KERN_ERR "Couldn't map the framebuffer\n"); return; } memset(lfb, 0x00, bytes_per_pixel * videomode->xres * videomode->yres); /* uses FIXMAP_MISC */ set_pixclock(videomode->pixclock); set_fixmap(FIXMAP_MISC, hdlcd_start >> PAGE_SHIFT, DEV_SHARED); HDLCD[HDLCD_COMMAND] = 0; HDLCD[HDLCD_LINELENGTH] = videomode->xres * bytes_per_pixel; HDLCD[HDLCD_LINECOUNT] = videomode->yres - 1; HDLCD[HDLCD_LINEPITCH] = videomode->xres * bytes_per_pixel; HDLCD[HDLCD_PF] = ((bytes_per_pixel - 1) << 3); HDLCD[HDLCD_INTMASK] = 0; HDLCD[HDLCD_FBBASE] = framebuffer_start; HDLCD[HDLCD_BUS] = 0xf00 | (1 << 4); HDLCD[HDLCD_VBACK] = videomode->vback - 1; HDLCD[HDLCD_VSYNC] = videomode->vsync - 1; HDLCD[HDLCD_VDATA] = videomode->yres - 1; HDLCD[HDLCD_VFRONT] = videomode->vfront - 1; HDLCD[HDLCD_HBACK] = videomode->hback - 1; HDLCD[HDLCD_HSYNC] = videomode->hsync - 1; HDLCD[HDLCD_HDATA] = videomode->xres - 1; HDLCD[HDLCD_HFRONT] = videomode->hfront - 1; HDLCD[HDLCD_POLARITIES] = (1 << 2) | (1 << 3); HDLCD[HDLCD_RED] = (c->red_size << 8) | c->red_shift; HDLCD[HDLCD_GREEN] = (c->green_size << 8) | c->green_shift; HDLCD[HDLCD_BLUE] = (c->blue_size << 8) | c->blue_shift; HDLCD[HDLCD_COMMAND] = 1; clear_fixmap(FIXMAP_MISC); lfbp.pixel_on = (((1 << c->red_size) - 1) << c->red_shift) | (((1 << c->green_size) - 1) << c->green_shift) | (((1 << c->blue_size) - 1) << c->blue_shift); lfbp.lfb = lfb; lfbp.font = &font_vga_8x16; lfbp.bits_per_pixel = bytes_per_pixel*8; lfbp.bytes_per_line = bytes_per_pixel*videomode->xres; lfbp.width = videomode->xres; lfbp.height = videomode->yres; lfbp.flush = hdlcd_flush; lfbp.text_columns = videomode->xres / 8; lfbp.text_rows = videomode->yres / 16; if ( lfb_init(&lfbp) < 0 ) return; video_puts = lfb_scroll_puts; }