void __init init_arch(bp_tag_t *bp_start) { sysmem.nr_banks = 0; /* Parse boot parameters */ if (bp_start) parse_bootparam(bp_start); #ifdef CONFIG_OF early_init_devtree(dtb_start); #endif if (sysmem.nr_banks == 0) { sysmem.nr_banks = 1; sysmem.bank[0].start = PLATFORM_DEFAULT_MEM_START; sysmem.bank[0].end = PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE; } #ifdef CONFIG_CMDLINE_BOOL if (!command_line[0]) strlcpy(command_line, default_command_line, COMMAND_LINE_SIZE); #endif /* Early hook for platforms */ platform_init(bp_start); /* Initialize MMU. */ init_mmu(); }
/*ARGSUSED*/ int ptable_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) { pfn_t pfn; uint_t mflag = 0; uint64_t level = (uint64_t)-1; init_mmu(); if (mmu.num_level == 0) return (DCMD_ERR); if ((flags & DCMD_ADDRSPEC) == 0) return (DCMD_USAGE); if (mdb_getopts(argc, argv, 'm', MDB_OPT_SETBITS, TRUE, &mflag, 'l', MDB_OPT_UINT64, &level, NULL) != argc) return (DCMD_USAGE); if (level != (uint64_t)-1 && level > mmu.max_level) { mdb_warn("invalid level %lu\n", level); return (DCMD_ERR); } pfn = (pfn_t)addr; if (mflag) pfn = mdb_mfn_to_pfn(pfn); return (do_ptable_dcmd(pfn, level)); }
/*ARGSUSED*/ int pte_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) { uint64_t level = 0; init_mmu(); if (mmu.num_level == 0) return (DCMD_ERR); if ((flags & DCMD_ADDRSPEC) == 0) return (DCMD_USAGE); if (mdb_getopts(argc, argv, 'l', MDB_OPT_UINT64, &level) != argc) return (DCMD_USAGE); if (level > mmu.max_level) { mdb_warn("invalid level %lu\n", level); return (DCMD_ERR); } if (addr == 0) return (DCMD_OK); return (do_pte_dcmd((int)level, addr)); }
void __init cpu_init (void) { int cpu_id = smp_processor_id(); if (test_and_set_bit(cpu_id, &cpu_initialized)) { printk(KERN_WARNING "CPU#%d already initialized!\n", cpu_id); for ( ; ; ) local_irq_enable(); } printk(KERN_INFO "Initializing CPU#%d\n", cpu_id); /* Set up and load the per-CPU TSS and LDT */ atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; if (current->mm) BUG(); /* Force FPU initialization */ current_thread_info()->status = 0; clear_used_math(); #ifdef CONFIG_MMU /* Set up MMU */ init_mmu(); #endif /* Set up ICUIMASK */ outl(0x00070000, M32R_ICU_IMASK_PORTL); /* imask=111 */ }
void __init init_arch(bp_tag_t *bp_start) { #ifdef CONFIG_BLK_DEV_INITRD initrd_start = &__initrd_start; initrd_end = &__initrd_end; #endif sysmem.nr_banks = 0; #ifdef CONFIG_CMDLINE_BOOL strcpy(command_line, default_command_line); #endif /* Parse boot parameters */ if (bp_start) parse_bootparam(bp_start); if (sysmem.nr_banks == 0) { sysmem.nr_banks = 1; sysmem.bank[0].start = PLATFORM_DEFAULT_MEM_START; sysmem.bank[0].end = PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE; } /* Early hook for platforms */ platform_init(bp_start); /* Initialize MMU. */ init_mmu(); }
mfn_t mdb_pfn_to_mfn(pfn_t pfn) { init_mmu(); if (mfn_list == NULL || pfn >= mfn_count) return (-(mfn_t)1); return (mfn_list[pfn]); }
int va2pfn_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) { uintptr_t addrspace; char *addrspace_str = NULL; int piped = flags & DCMD_PIPE_OUT; pfn_t pfn; pfn_t mfn; int rc; init_mmu(); if (mmu.num_level == 0) return (DCMD_ERR); if (mdb_getopts(argc, argv, 'a', MDB_OPT_STR, &addrspace_str) != argc) return (DCMD_USAGE); if ((flags & DCMD_ADDRSPEC) == 0) return (DCMD_USAGE); /* * parse the address space */ if (addrspace_str != NULL) addrspace = mdb_strtoull(addrspace_str); else addrspace = 0; rc = do_va2pa(addr, (struct as *)addrspace, !piped, NULL, &mfn); if (rc != DCMD_OK) return (rc); if ((pfn = mdb_mfn_to_pfn(mfn)) == -(pfn_t)1) { mdb_warn("Invalid mfn %lr\n", mfn); return (DCMD_ERR); } if (piped) { mdb_printf("0x%lr\n", pfn); return (DCMD_OK); } mdb_printf("Virtual address 0x%p maps pfn 0x%lr", addr, pfn); if (is_xpv) mdb_printf(" (mfn 0x%lr)", mfn); mdb_printf("\n"); return (DCMD_OK); }
int platform_vtop(uintptr_t addr, struct as *asp, physaddr_t *pap) { if (asp == NULL) return (DCMD_ERR); init_mmu(); if (mmu.num_level == 0) return (DCMD_ERR); return (do_va2pa(addr, asp, 0, pap, NULL)); }
void secondary_start_kernel(void) { struct mm_struct *mm = &init_mm; unsigned int cpu = smp_processor_id(); init_mmu(); #ifdef CONFIG_DEBUG_KERNEL if (boot_secondary_processors == 0) { pr_debug("%s: boot_secondary_processors:%d; Hanging cpu:%d\n", __func__, boot_secondary_processors, cpu); for (;;) __asm__ __volatile__ ("waiti " __stringify(LOCKLEVEL)); } pr_debug("%s: boot_secondary_processors:%d; Booting cpu:%d\n", __func__, boot_secondary_processors, cpu); #endif /* Init EXCSAVE1 */ secondary_trap_init(); /* All kernel threads share the same mm context. */ mmget(mm); mmgrab(mm); current->active_mm = mm; cpumask_set_cpu(cpu, mm_cpumask(mm)); enter_lazy_tlb(mm, current); preempt_disable(); trace_hardirqs_off(); calibrate_delay(); notify_cpu_starting(cpu); secondary_init_irq(); local_timer_setup(cpu); set_cpu_online(cpu, true); local_irq_enable(); complete(&cpu_running); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); }
mfn_t mdb_pfn_to_mfn(pfn_t pfn) { mfn_t mfn; init_mmu(); if (mfn_list_addr == NULL || pfn >= mfn_count) return (-(mfn_t)1); if (mdb_vread(&mfn, sizeof (mfn), (uintptr_t)(mfn_list_addr + (pfn * sizeof (mfn_t)))) == -1) return (-(mfn_t)1); return (mfn); }
/*ARGSUSED*/ int htables_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) { hat_t *hat; init_mmu(); if (mmu.num_level == 0) return (DCMD_ERR); if ((flags & DCMD_ADDRSPEC) == 0) return (DCMD_USAGE); hat = (hat_t *)addr; return (do_htables_dcmd(hat)); }
static int initialize_session_( uint index ) { kernel_vars_t *kv; ulong kv_phys; if( g_sesstab->magic == 1 ) return -EMOLSECURITY; /* printk("initialize_session\n" ); */ if( g_sesstab->kvars[index] ) return -EMOLINUSE; if( !g_num_sessions && perform_actions() ) return -EMOLGENERAL; if( !(kv=alloc_kvar_pages()) ) goto error; memset( kv, 0, NUM_KVARS_PAGES * 0x1000 ); kv->session_index = index; kv->kvars_virt = kv; kv_phys = tophys_mol(kv); kv->kvars_tophys_offs = kv_phys - (ulong)kv; if( init_mmu(kv) ) goto error; init_host_irqs(kv); initialize_spr_table( kv ); msr_altered( kv ); g_num_sessions++; g_sesstab->kvars_ph[index] = kv_phys; g_sesstab->kvars[index] = kv; return 0; error: if( !g_num_sessions ) cleanup_actions(); if( kv ) free_kvar_pages( kv ); return -EMOLGENERAL; }
int main(void) { #ifdef ENABLE_MMU init_mmu(); #endif //marquee(); #ifdef IRQ_TEST init_button(); led_init(); init_irq(); #endif while (1); return 0; }
pfn_t mdb_mfn_to_pfn(mfn_t mfn) { pfn_t pfn; init_mmu(); if (mfn_list == NULL) return (-(pfn_t)1); for (pfn = 0; pfn < mfn_count; ++pfn) { if (mfn_list[pfn] != mfn) continue; return (pfn); } return (-(pfn_t)1); }
/*ARGSUSED*/ int pte_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) { int level = 0; uint64_t pte = 0; char *level_str = NULL; char *pte_str = NULL; init_mmu(); if (mmu.num_level == 0) return (DCMD_ERR); if (mdb_getopts(argc, argv, 'p', MDB_OPT_STR, &pte_str, 'l', MDB_OPT_STR, &level_str) != argc) return (DCMD_USAGE); /* * parse the PTE to decode, if it's 0, we don't do anything */ if (pte_str != NULL) { pte = mdb_strtoull(pte_str); } else { if ((flags & DCMD_ADDRSPEC) == 0) return (DCMD_USAGE); pte = addr; } if (pte == 0) return (DCMD_OK); /* * parse the level if supplied */ if (level_str != NULL) { level = mdb_strtoull(level_str); if (level < 0 || level > mmu.max_level) return (DCMD_ERR); } return (do_pte_dcmd(level, pte)); }
void __init init_arch(bp_tag_t *bp_start) { /* Parse boot parameters */ if (bp_start) parse_bootparam(bp_start); #ifdef CONFIG_OF early_init_devtree(dtb_start); #endif #ifdef CONFIG_CMDLINE_BOOL if (!command_line[0]) strlcpy(command_line, default_command_line, COMMAND_LINE_SIZE); #endif /* Early hook for platforms */ platform_init(bp_start); /* Initialize MMU. */ init_mmu(); }
/*ARGSUSED*/ int ptable_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) { pfn_t pfn; uint_t mflag = 0; init_mmu(); if (mmu.num_level == 0) return (DCMD_ERR); if ((flags & DCMD_ADDRSPEC) == 0) return (DCMD_USAGE); if (mdb_getopts(argc, argv, 'm', MDB_OPT_SETBITS, TRUE, &mflag, NULL) != argc) return (DCMD_USAGE); pfn = (pfn_t)addr; if (mflag) pfn = mdb_mfn_to_pfn(pfn); return (do_ptable_dcmd(pfn)); }
int ptmap_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) { physaddr_t paddrs[MAX_NUM_LEVEL] = { 0, }; size_t entry[MAX_NUM_LEVEL] = { 0, }; uintptr_t start = (uintptr_t)-1; boolean_t writable = B_FALSE; boolean_t user = B_FALSE; boolean_t wflag = B_FALSE; level_t curlevel; if ((flags & DCMD_ADDRSPEC) == 0) return (DCMD_USAGE); if (mdb_getopts(argc, argv, 'w', MDB_OPT_SETBITS, TRUE, &wflag, NULL) != argc) return (DCMD_USAGE); init_mmu(); if (mmu.num_level == 0) return (DCMD_ERR); curlevel = mmu.max_level; paddrs[curlevel] = addr & MMU_PAGEMASK; for (;;) { physaddr_t pte_addr; x86pte_t pte; pte_addr = paddrs[curlevel] + (entry[curlevel] << mmu.pte_size_shift); if (mdb_pread(&pte, sizeof (pte), pte_addr) != sizeof (pte)) { mdb_warn("couldn't read pte at %p", pte_addr); return (DCMD_ERR); } if (PTE_GET(pte, PT_VALID) == 0) { if (start != (uintptr_t)-1) { ptmap_report(entry, start, user, writable, wflag); start = (uintptr_t)-1; } } else if (curlevel == 0 || PTE_GET(pte, PT_PAGESIZE)) { if (start == (uintptr_t)-1) { start = entry2va(entry); user = PTE_GET(pte, PT_USER); writable = PTE_GET(pte, PT_WRITABLE); } else if (user != PTE_GET(pte, PT_USER) || writable != PTE_GET(pte, PT_WRITABLE)) { ptmap_report(entry, start, user, writable, wflag); start = entry2va(entry); user = PTE_GET(pte, PT_USER); writable = PTE_GET(pte, PT_WRITABLE); } } else { /* Descend a level. */ physaddr_t pa = mmu_ptob(pte2mfn(pte, curlevel)); paddrs[--curlevel] = pa; entry[curlevel] = 0; continue; } while (++entry[curlevel] == mmu.ptes_per_table) { /* Ascend back up. */ entry[curlevel] = 0; if (curlevel == mmu.max_level) { if (start != (uintptr_t)-1) { ptmap_report(entry, start, user, writable, wflag); } goto out; } curlevel++; } } out: return (DCMD_OK); }
int main(void) { struct task *next; /* Set the CPU speed */ uint32_t skuid = read32(DEVICEID_BASE + DEVICEID_SKUID_OFFSET); uint32_t cpuspeed_id = skuid & DEVICEID_SKUID_CPUSPEED_MASK; uint32_t clksel_val = (1<<19) | 12; if(cpuspeed_id == DEVICEID_SKUID_CPUSPEED_720) clksel_val |= (720 << 8); else if(cpuspeed_id == DEVICEID_SKUID_CPUSPEED_600) clksel_val |= (600 << 8); else panic("Unsupported CPU!"); write32(CM_MPU_BASE + PRM_CLKSEL1_PLL_MPU_OFFSET, clksel_val); /* Basic hardware initialization */ init_cpumodes(); // set up CPU modes for interrupt handling intc_init(); // initialize interrupt controller gpio_init(); // initialize gpio interrupt system /* Start up hardware */ timers_init(); // must come first, since it initializes the watchdog eth_init(); uart_init(); /* For some reason, turning on the caches causes the kernel to hang after finishing the third invocation. Maybe we have to clear the caches here, or enable the MMU. */ printk("mmu init\n"); prep_pagetable(); init_mmu(); printk("cache init\n"); init_cache(); /* Initialize other interrupts */ init_interrupts(); /* Initialize task queues */ init_tasks(); /* Initialize idle task */ syscall_spawn(NULL, 7, idle_task, NULL, 0, SPAWN_DAEMON); pmu_enable(); trace_init(); printk("userspace init\n"); /* Initialize first user program */ syscall_spawn(NULL, 6, init_task, NULL, 0, 0); while (nondaemon_count > 0) { next = schedule(); task_activate(next); check_stack(next); } pmu_disable(); intc_reset(); eth_deinit(); deinit_mmu(); return 0; }