/* * First C code run on the secondary CPUs after being started up by * the master. */ asmlinkage void start_secondary(void) { unsigned int cpu; cpu_probe(); cpu_report(); per_cpu_trap_init(); prom_init_secondary(); /* * XXX parity protection should be folded in here when it's converted * to an option instead of something based on .cputype */ #ifndef CONFIG_CPU_CAVIUM_OCTEON /* There is no reason to waste time doing this on Octeon. All the cores are on the same chip and are the same speed by definition */ calibrate_delay(); #endif preempt_disable(); cpu = smp_processor_id(); cpu_data[cpu].udelay_val = loops_per_jiffy; prom_smp_finish(); cpu_set(cpu, cpu_callin_map); cpu_idle(); }
asmlinkage void start_secondary(void) { unsigned int cpu = smp_processor_id(); struct mm_struct *mm = &init_mm; enable_mmu(); mmgrab(mm); mmget(mm); current->active_mm = mm; #ifdef CONFIG_MMU enter_lazy_tlb(mm, current); local_flush_tlb_all(); #endif per_cpu_trap_init(); preempt_disable(); notify_cpu_starting(cpu); local_irq_enable(); calibrate_delay(); smp_store_cpu_info(cpu); set_cpu_online(cpu, true); per_cpu(cpu_state, cpu) = CPU_ONLINE; cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); }
asmlinkage void __cpuinit start_secondary(void) { unsigned int cpu; struct mm_struct *mm = &init_mm; atomic_inc(&mm->mm_count); atomic_inc(&mm->mm_users); current->active_mm = mm; BUG_ON(current->mm); enter_lazy_tlb(mm, current); per_cpu_trap_init(); preempt_disable(); local_irq_enable(); calibrate_delay(); cpu = smp_processor_id(); smp_store_cpu_info(cpu); cpu_set(cpu, cpu_online_map); cpu_idle(); }
void __cpuinit arch_cpu_pre_online(void *arg) { unsigned int cpuid = hard_smp_processor_id(); register_percpu_ce(cpuid); calibrate_delay(); smp_store_cpu_info(cpuid); local_ops->cache_all(); local_ops->tlb_all(); switch(sparc_cpu_model) { case sun4m: sun4m_cpu_pre_online(arg); break; case sun4d: sun4d_cpu_pre_online(arg); break; case sparc_leon: leon_cpu_pre_online(arg); break; default: BUG(); } }
/* * First C code run on the secondary CPUs after being started up by * the master. */ asmlinkage void start_secondary(void) { unsigned int cpu; cpu_probe(); cpu_report(); per_cpu_trap_init(); prom_init_secondary(); /* * XXX parity protection should be folded in here when it's converted * to an option instead of something based on .cputype */ calibrate_delay(); preempt_disable(); cpu = smp_processor_id(); cpu_data[cpu].udelay_val = loops_per_jiffy; prom_smp_finish(); cpu_set(cpu, cpu_callin_map); cpu_idle(); }
/** * omap2_clk_switch_mpurate_at_boot - switch ARM MPU rate by boot-time argument * @mpurate_ck_name: clk name of the clock to change rate * * Change the ARM MPU clock rate to the rate specified on the command * line, if one was specified. @mpurate_ck_name should be * "virt_prcm_set" on OMAP2xxx and "dpll1_ck" on OMAP34xx/OMAP36xx. * XXX Does not handle voltage scaling - on OMAP2xxx this is currently * handled by the virt_prcm_set clock, but this should be handled by * the OPP layer. XXX This is intended to be handled by the OPP layer * code in the near future and should be removed from the clock code. * Returns -EINVAL if 'mpurate' is zero or if clk_set_rate() rejects * the rate, -ENOENT if the struct clk referred to by @mpurate_ck_name * cannot be found, or 0 upon success. */ int __init omap2_clk_switch_mpurate_at_boot(const char *mpurate_ck_name) { struct clk *mpurate_ck; int r; if (!mpurate) return -EINVAL; mpurate_ck = clk_get(NULL, mpurate_ck_name); if (WARN(IS_ERR(mpurate_ck), "Failed to get %s.\n", mpurate_ck_name)) return -ENOENT; r = clk_set_rate(mpurate_ck, mpurate); if (IS_ERR_VALUE(r)) { WARN(1, "clock: %s: unable to set MPU rate to %d: %d\n", mpurate_ck->name, mpurate, r); return -EINVAL; } calibrate_delay(); recalculate_root_clocks(); clk_put(mpurate_ck); return 0; }
asmlinkage void start_secondary(void) { unsigned int cpu = smp_processor_id(); struct mm_struct *mm = &init_mm; enable_mmu(); atomic_inc(&mm->mm_count); atomic_inc(&mm->mm_users); current->active_mm = mm; enter_lazy_tlb(mm, current); local_flush_tlb_all(); per_cpu_trap_init(); preempt_disable(); notify_cpu_starting(cpu); local_irq_enable(); /* Enable local timers */ local_timer_setup(cpu); calibrate_delay(); smp_store_cpu_info(cpu); set_cpu_online(cpu, true); per_cpu(cpu_state, cpu) = CPU_ONLINE; cpu_startup_entry(CPUHP_ONLINE); }
/* * First C code run on the secondary CPUs after being started up by * the master. */ asmlinkage __cpuinit void start_secondary(void) { unsigned int cpu; #ifdef CONFIG_MIPS_MT_SMTC /* Only do cpu_probe for first TC of CPU */ if ((read_c0_tcbind() & TCBIND_CURTC) == 0) #endif /* CONFIG_MIPS_MT_SMTC */ cpu_probe(); cpu_report(); per_cpu_trap_init(); prom_init_secondary(); /* * XXX parity protection should be folded in here when it's converted * to an option instead of something based on .cputype */ calibrate_delay(); preempt_disable(); cpu = smp_processor_id(); cpu_data[cpu].udelay_val = loops_per_jiffy; prom_smp_finish(); cpu_set(cpu, cpu_callin_map); cpu_idle(); }
void __cpuinit platform_secondary_init(unsigned int cpu) { local_irq_disable(); /* Clone setup for peripheral interrupt sources from CoreA. */ bfin_write_SICB_IMASK0(bfin_read_SICA_IMASK0()); bfin_write_SICB_IMASK1(bfin_read_SICA_IMASK1()); SSYNC(); /* Clone setup for IARs from CoreA. */ bfin_write_SICB_IAR0(bfin_read_SICA_IAR0()); bfin_write_SICB_IAR1(bfin_read_SICA_IAR1()); bfin_write_SICB_IAR2(bfin_read_SICA_IAR2()); bfin_write_SICB_IAR3(bfin_read_SICA_IAR3()); bfin_write_SICB_IAR4(bfin_read_SICA_IAR4()); bfin_write_SICB_IAR5(bfin_read_SICA_IAR5()); bfin_write_SICB_IAR6(bfin_read_SICA_IAR6()); bfin_write_SICB_IAR7(bfin_read_SICA_IAR7()); SSYNC(); local_irq_enable(); /* Calibrate loops per jiffy value. */ calibrate_delay(); /* Store CPU-private information to the cpu_data array. */ bfin_setup_cpudata(cpu); /* We are done with local CPU inits, unblock the boot CPU. */ cpu_set(cpu, cpu_callin_map); spin_lock(&boot_lock); spin_unlock(&boot_lock); }
void __init smp_callin(void) { #if 0 calibrate_delay(); smp_store_cpu_info(cpuid); #endif }
/* Make sure ACPU clock is not PLL3, so PLL3 can be re-programmed. */ static void __init move_off_scpll(void) { struct clkctl_acpu_speed *tgt_s = &acpu_freq_tbl[PLL3_CALIBRATION_IDX]; BUG_ON(tgt_s->pll == ACPU_PLL_3); select_clk_source(tgt_s); select_core_source(tgt_s->core_src_sel); drv_state.current_speed = tgt_s; calibrate_delay(); }
/* * Start the real-time clock. */ void clockattach(device_t parent, device_t self, void *aux) { const char *clockchip; unsigned short interval; int chipfreq; #ifdef DRACO u_char dracorev; #endif if (eclockfreq == 0) eclockfreq = 715909; /* guess NTSC */ chipfreq = eclockfreq; #ifdef DRACO dracorev = is_draco(); if (dracorev >= 4) { chipfreq = eclockfreq / 7; clockchip = "QuickLogic"; } else if (dracorev) { clockcia = (struct CIA *)CIAAbase; clockchip = "CIA A"; } else #endif { clockcia = (struct CIA *)CIABbase; clockchip = "CIA B"; } amiga_clk_interval = chipfreq / hz; if (self != NULL) { /* real autoconfig? */ printf(": %s system hz %d hardware hz %d\n", clockchip, hz, chipfreq); clk_timecounter.tc_name = clockchip; clk_timecounter.tc_frequency = chipfreq; tc_init(&clk_timecounter); } #ifdef DRACO if (dracorev >= 4) { /* * can't preload anything beforehand, timer is free_running; * but need this for delay calibration. */ draco_ioct->io_timerlo = amiga_clk_interval & 0xff; draco_ioct->io_timerhi = amiga_clk_interval >> 8; calibrate_delay(self); return; }
/* * Report back to the Boot Processor during boot time or to the caller processor * during CPU online. */ static void smp_callin(void) { int cpuid, phys_id; /* * If waken up by an INIT in an 82489DX configuration * cpu_callout_mask guarantees we don't get here before * an INIT_deassert IPI reaches our local APIC, so it is * now safe to touch our local APIC. */ cpuid = smp_processor_id(); /* * (This works even if the APIC is not enabled.) */ phys_id = read_apic_id(); /* * the boot CPU has finished the init stage and is spinning * on callin_map until we finish. We are free to set up this * CPU, first the APIC. (this is probably redundant on most * boards) */ apic_ap_setup(); /* * Save our processor parameters. Note: this information * is needed for clock calibration. */ smp_store_cpu_info(cpuid); /* * Get our bogomips. * Update loops_per_jiffy in cpu_data. Previous call to * smp_store_cpu_info() stored a value that is close but not as * accurate as the value just calculated. */ calibrate_delay(); cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy; pr_debug("Stack at about %p\n", &cpuid); /* * This must be done before setting cpu_online_mask * or calling notify_cpu_starting. */ set_cpu_sibling_map(raw_smp_processor_id()); wmb(); notify_cpu_starting(cpuid); /* * Allow the master to continue. */ cpumask_set_cpu(cpuid, cpu_callin_mask); }
static void __init smp_callin (void) { int cpuid, phys_id; extern void ia64_init_itm(void); #ifdef CONFIG_PERFMON extern void pfm_init_percpu(void); #endif cpuid = smp_processor_id(); phys_id = hard_smp_processor_id(); if (test_and_set_bit(cpuid, &cpu_online_map)) { printk("huh, phys CPU#0x%x, CPU#0x%x already present??\n", phys_id, cpuid); BUG(); } smp_setup_percpu_timer(); /* * Synchronize the ITC with the BP */ Dprintk("Going to syncup ITC with BP.\n"); ia64_sync_itc(0); /* * Get our bogomips. */ ia64_init_itm(); /* * Set I/O port base per CPU */ ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); #ifdef CONFIG_IA64_MCA ia64_mca_cmc_vector_setup(); /* Setup vector on AP & enable */ ia64_mca_check_errors(); /* For post-failure MCA error logging */ #endif #ifdef CONFIG_PERFMON pfm_init_percpu(); #endif local_irq_enable(); calibrate_delay(); local_cpu_data->loops_per_jiffy = loops_per_jiffy; /* * Allow the master to continue. */ set_bit(cpuid, &cpu_callin_map); Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid); }
__initfunc(int wanpipe_init(void)) #endif { printk(KERN_INFO "%s v%u.%u %s\n", fullname, MOD_VERSION, MOD_RELEASE, copyright); exec_idle = calibrate_delay(EXEC_DELAY); #ifdef WANDEBUG printk(KERN_DEBUG "%s: exec_idle = %d\n", modname, exec_idle); #endif return 0; }
static void __init smp_online(void) { int cpu_id = smp_processor_id(); local_irq_enable(); /* Get our bogomips. */ calibrate_delay(); /* Save our processor parameters */ smp_store_cpu_info(cpu_id); cpu_set(cpu_id, cpu_online_map); }
void secondary_start_kernel(void) { struct mm_struct *mm = &init_mm; unsigned int cpu = smp_processor_id(); init_mmu(); #ifdef CONFIG_DEBUG_KERNEL if (boot_secondary_processors == 0) { pr_debug("%s: boot_secondary_processors:%d; Hanging cpu:%d\n", __func__, boot_secondary_processors, cpu); for (;;) __asm__ __volatile__ ("waiti " __stringify(LOCKLEVEL)); } pr_debug("%s: boot_secondary_processors:%d; Booting cpu:%d\n", __func__, boot_secondary_processors, cpu); #endif /* Init EXCSAVE1 */ secondary_trap_init(); /* All kernel threads share the same mm context. */ mmget(mm); mmgrab(mm); current->active_mm = mm; cpumask_set_cpu(cpu, mm_cpumask(mm)); enter_lazy_tlb(mm, current); preempt_disable(); trace_hardirqs_off(); calibrate_delay(); notify_cpu_starting(cpu); secondary_init_irq(); local_timer_setup(cpu); set_cpu_online(cpu, true); local_irq_enable(); complete(&cpu_running); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); }
static void __init smp_online(void) { int cpu_id = smp_processor_id(); notify_cpu_starting(cpu_id); local_irq_enable(); /* Get our bogomips. */ calibrate_delay(); /* Save our processor parameters */ smp_store_cpu_info(cpu_id); set_cpu_online(cpu_id, true); }
/* * This is the kernel main routine. When the boot process is completed, this * function is called. */ void start_kernel(void) { int pid, i; char *names[N_CONSOLES] = { "PRG1", "PRG2", "PRG3", "PRG4", "PRG5", "PRG6" }; // init traps init_traps(); // initialize IRQs init_irq(); // initialize the tty driver. The tty is composed by a keyboard driver // that read input keys a print the relative ASCII code on video. tty_init(); // initialize the timer. time_init(); // initialize the scheduler sched_init(); printk("Kernel info: %u bytes, start at 0x%x0 end at 0x%x0.\n", &__KERNEL_END__-K_START, K_START, &__KERNEL_END__); sti(); // calibrate delay calibrate_delay(); // floppy driver initialization floppy_init(); // switch in user mode move_to_user_mode(); // for a process for each console and execute the program PRGi for (i=0; i<N_CONSOLES; ++i) { // spawn process i and run PRG1 pid = fork(); if (pid == 0) { exec(names[i]); } else if (pid < 0) { print("idle: cannot duplicate myself.\n"); } } print("Idle: ok!\n"); // idle loop while(1); }
/* Force ACPU core and L2 cache clocks to their AFAB sources. */ static void __init force_all_to_afab(void) { int cpu; for_each_possible_cpu(cpu) { select_clk_source_div(cpu, &acpu_freq_tbl[AFAB_IDX]); select_core_source(cpu, 0); drv_state.current_speed[cpu] = &acpu_freq_tbl[AFAB_IDX]; l2_vote[cpu] = &acpu_freq_tbl[AFAB_IDX]; } select_core_source(L2, 0); drv_state.current_l2_speed = &acpu_freq_tbl[AFAB_IDX]; /* Both cores are assumed to have the same lpj values when on AFAB. */ calibrate_delay(); }
void start_kernel(void) { seg_t base, end; /* We set the idle task as #0, and init_task() will be task #1 */ sched_init(); /* This block of functions don't need console */ setup_arch(&base, &end); mm_init(base, end); buffer_init(); inode_init(); init_IRQ(); tty_init(); init_console(); #if (CONFIG_BOGOMIPS == 0) calibrate_delay(); #endif device_setup(); #ifdef CONFIG_SOCKET sock_init(); #endif fs_init(); mm_stat(base, end); printk("ELKS version %s\n", system_utsname.release); kfork_proc(init_task); wake_up_process(&task[1]); /* * We are now the idle task. We won't run unless no other process can run. */ while (1) { schedule(); #ifdef CONFIG_IDLE_HALT idle_halt (); #endif } }
void start_kernel(void) { seg_t base, end; /* We set the scheduler up as task #0, and this as task #1 */ setup_arch(&base, &end); mm_init(base, end); init_IRQ(); init_console(); #if 0 calibrate_delay(); #endif setup_mm(); /* Architecture specifics */ tty_init(); buffer_init(); #ifdef CONFIG_SOCKET sock_init(); #endif device_setup(); inode_init(); fs_init(); sched_init(); printk("ELKS version %s\n", system_utsname.release); task[0].t_kstackm = KSTACK_MAGIC; task[0].next_run = task[0].prev_run = &task[0]; kfork_proc(&task[1], init_task); /* * We are now the idle task. We won't run unless no other process can run. */ while (1){ schedule(); } }
asmlinkage void __init start_kernel(void) { char * command_line; extern struct kernel_param __start___param[], __stop___param[]; smp_setup_processor_id(); /* * Need to run as early as possible, to initialize the * lockdep hash: */ lockdep_init(); debug_objects_early_init(); /* * Set up the the initial canary ASAP: */ boot_init_stack_canary(); cgroup_init_early(); local_irq_disable(); early_boot_irqs_off(); early_init_irq_lock_class(); /* * Interrupts are still disabled. Do necessary setups, then * enable them */ lock_kernel(); tick_init(); boot_cpu_init(); page_address_init(); printk(KERN_NOTICE "%s", linux_banner); setup_arch(&command_line); mm_init_owner(&init_mm, &init_task); setup_command_line(command_line); setup_nr_cpu_ids(); setup_per_cpu_areas(); smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ build_all_zonelists(NULL); page_alloc_init(); printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line); //[email protected] 2011.11.14 begin //support lcd compatible //reviewed by [email protected] #if defined(CONFIG_LCD_DRV_ALL) char *p = strstr(boot_command_line, "lcd="); if (p) { lcd_drv_index = p[4] - 'A'; printk("lcd index = %d", lcd_drv_index); } #endif //[email protected] 2011.11.14 end parse_early_param(); parse_args("Booting kernel", static_command_line, __start___param, __stop___param - __start___param, &unknown_bootoption); /* * These use large bootmem allocations and must precede * kmem_cache_init() */ pidhash_init(); vfs_caches_init_early(); sort_main_extable(); trap_init(); mm_init(); /* * Set up the scheduler prior starting any interrupts (such as the * timer interrupt). Full topology setup happens at smp_init() * time - but meanwhile we still have a functioning scheduler. */ sched_init(); /* * Disable preemption - early bootup scheduling is extremely * fragile until we cpu_idle() for the first time. */ preempt_disable(); if (!irqs_disabled()) { printk(KERN_WARNING "start_kernel(): bug: interrupts were " "enabled *very* early, fixing it\n"); local_irq_disable(); } rcu_init(); radix_tree_init(); /* init some links before init_ISA_irqs() */ early_irq_init(); init_IRQ(); prio_tree_init(); init_timers(); hrtimers_init(); softirq_init(); timekeeping_init(); time_init(); profile_init(); if (!irqs_disabled()) printk(KERN_CRIT "start_kernel(): bug: interrupts were " "enabled early\n"); early_boot_irqs_on(); local_irq_enable(); /* Interrupts are enabled now so all GFP allocations are safe. */ gfp_allowed_mask = __GFP_BITS_MASK; kmem_cache_init_late(); /* * HACK ALERT! This is early. We're enabling the console before * we've done PCI setups etc, and console_init() must be aware of * this. But we do want output early, in case something goes wrong. */ console_init(); if (panic_later) panic(panic_later, panic_param); lockdep_info(); /* * Need to run this when irqs are enabled, because it wants * to self-test [hard/soft]-irqs on/off lock inversion bugs * too: */ locking_selftest(); #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start && !initrd_below_start_ok && page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) { printk(KERN_CRIT "initrd overwritten (0x%08lx < 0x%08lx) - " "disabling it.\n", page_to_pfn(virt_to_page((void *)initrd_start)), min_low_pfn); initrd_start = 0; } #endif page_cgroup_init(); enable_debug_pagealloc(); kmemtrace_init(); kmemleak_init(); debug_objects_mem_init(); idr_init_cache(); setup_per_cpu_pageset(); numa_policy_init(); if (late_time_init) late_time_init(); sched_clock_init(); calibrate_delay(); pidmap_init(); anon_vma_init(); #ifdef CONFIG_X86 if (efi_enabled) efi_enter_virtual_mode(); #endif thread_info_cache_init(); cred_init(); fork_init(totalram_pages); proc_caches_init(); buffer_init(); key_init(); security_init(); dbg_late_init(); vfs_caches_init(totalram_pages); signals_init(); /* rootfs populating might need page-writeback */ page_writeback_init(); #ifdef CONFIG_PROC_FS proc_root_init(); #endif cgroup_init(); cpuset_init(); taskstats_init_early(); delayacct_init(); check_bugs(); acpi_early_init(); /* before LAPIC and SMP init */ sfi_init_late(); ftrace_init(); /* Do the rest non-__init'ed, we're now alive */ rest_init(); }
/* * Report back to the Boot Processor during boot time or to the caller processor * during CPU online. */ static void __cpuinit smp_callin(void) { int cpuid, phys_id; unsigned long timeout; /* * If waken up by an INIT in an 82489DX configuration * we may get here before an INIT-deassert IPI reaches * our local APIC. We have to wait for the IPI or we'll * lock up on an APIC access. * * Since CPU0 is not wakened up by INIT, it doesn't wait for the IPI. */ cpuid = smp_processor_id(); if (apic->wait_for_init_deassert && cpuid != 0) apic->wait_for_init_deassert(&init_deasserted); /* * (This works even if the APIC is not enabled.) */ phys_id = read_apic_id(); if (cpumask_test_cpu(cpuid, cpu_callin_mask)) { panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__, phys_id, cpuid); } pr_debug("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id); /* * STARTUP IPIs are fragile beasts as they might sometimes * trigger some glue motherboard logic. Complete APIC bus * silence for 1 second, this overestimates the time the * boot CPU is spending to send the up to 2 STARTUP IPIs * by a factor of two. This should be enough. */ /* * Waiting 2s total for startup (udelay is not yet working) */ timeout = jiffies + 2*HZ; while (time_before(jiffies, timeout)) { /* * Has the boot CPU finished it's STARTUP sequence? */ if (cpumask_test_cpu(cpuid, cpu_callout_mask)) break; cpu_relax(); } if (!time_before(jiffies, timeout)) { panic("%s: CPU%d started up but did not get a callout!\n", __func__, cpuid); } /* * the boot CPU has finished the init stage and is spinning * on callin_map until we finish. We are free to set up this * CPU, first the APIC. (this is probably redundant on most * boards) */ pr_debug("CALLIN, before setup_local_APIC()\n"); if (apic->smp_callin_clear_local_apic) apic->smp_callin_clear_local_apic(); setup_local_APIC(); end_local_APIC_setup(); /* * Need to setup vector mappings before we enable interrupts. */ setup_vector_irq(smp_processor_id()); /* * Save our processor parameters. Note: this information * is needed for clock calibration. */ smp_store_cpu_info(cpuid); /* * Get our bogomips. * Update loops_per_jiffy in cpu_data. Previous call to * smp_store_cpu_info() stored a value that is close but not as * accurate as the value just calculated. */ calibrate_delay(); cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy; pr_debug("Stack at about %p\n", &cpuid); /* * This must be done before setting cpu_online_mask * or calling notify_cpu_starting. */ set_cpu_sibling_map(raw_smp_processor_id()); wmb(); notify_cpu_starting(cpuid); /* * Allow the master to continue. */ cpumask_set_cpu(cpuid, cpu_callin_mask); }
static void __cpuinit smp_callin (void) { int cpuid, phys_id, itc_master; struct cpuinfo_ia64 *last_cpuinfo, *this_cpuinfo; extern void ia64_init_itm(void); extern volatile int time_keeper_id; #ifdef CONFIG_PERFMON extern void pfm_init_percpu(void); #endif cpuid = smp_processor_id(); phys_id = hard_smp_processor_id(); itc_master = time_keeper_id; if (cpu_online(cpuid)) { printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n", phys_id, cpuid); BUG(); } fix_b0_for_bsp(); lock_ipi_calllock(); spin_lock(&vector_lock); /* Setup the per cpu irq handling data structures */ __setup_vector_irq(cpuid); cpu_set(cpuid, cpu_online_map); unlock_ipi_calllock(); per_cpu(cpu_state, cpuid) = CPU_ONLINE; spin_unlock(&vector_lock); smp_setup_percpu_timer(); ia64_mca_cmc_vector_setup(); /* Setup vector on AP */ #ifdef CONFIG_PERFMON pfm_init_percpu(); #endif local_irq_enable(); if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) { /* * Synchronize the ITC with the BP. Need to do this after irqs are * enabled because ia64_sync_itc() calls smp_call_function_single(), which * calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls * local_bh_enable(), which bugs out if irqs are not enabled... */ Dprintk("Going to syncup ITC with ITC Master.\n"); ia64_sync_itc(itc_master); } /* * Get our bogomips. */ ia64_init_itm(); /* * Delay calibration can be skipped if new processor is identical to the * previous processor. */ last_cpuinfo = cpu_data(cpuid - 1); this_cpuinfo = local_cpu_data; if (last_cpuinfo->itc_freq != this_cpuinfo->itc_freq || last_cpuinfo->proc_freq != this_cpuinfo->proc_freq || last_cpuinfo->features != this_cpuinfo->features || last_cpuinfo->revision != this_cpuinfo->revision || last_cpuinfo->family != this_cpuinfo->family || last_cpuinfo->archrev != this_cpuinfo->archrev || last_cpuinfo->model != this_cpuinfo->model) calibrate_delay(); local_cpu_data->loops_per_jiffy = loops_per_jiffy; #ifdef CONFIG_IA32_SUPPORT ia32_gdt_init(); #endif /* * Allow the master to continue. */ cpu_set(cpuid, cpu_callin_map); Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid); }
/* * Report back to the Boot Processor during boot time or to the caller processor * during CPU online. */ static void smp_callin(void) { int cpuid, phys_id; /* * If waken up by an INIT in an 82489DX configuration * we may get here before an INIT-deassert IPI reaches * our local APIC. We have to wait for the IPI or we'll * lock up on an APIC access. * * Since CPU0 is not wakened up by INIT, it doesn't wait for the IPI. */ cpuid = smp_processor_id(); if (apic->wait_for_init_deassert && cpuid) while (!atomic_read(&init_deasserted)) cpu_relax(); /* * (This works even if the APIC is not enabled.) */ phys_id = read_apic_id(); /* * the boot CPU has finished the init stage and is spinning * on callin_map until we finish. We are free to set up this * CPU, first the APIC. (this is probably redundant on most * boards) */ pr_debug("CALLIN, before setup_local_APIC()\n"); if (apic->smp_callin_clear_local_apic) apic->smp_callin_clear_local_apic(); setup_local_APIC(); end_local_APIC_setup(); /* * Need to setup vector mappings before we enable interrupts. */ setup_vector_irq(smp_processor_id()); /* * Save our processor parameters. Note: this information * is needed for clock calibration. */ smp_store_cpu_info(cpuid); /* * Get our bogomips. * Update loops_per_jiffy in cpu_data. Previous call to * smp_store_cpu_info() stored a value that is close but not as * accurate as the value just calculated. */ calibrate_delay(); cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy; pr_debug("Stack at about %p\n", &cpuid); /* * This must be done before setting cpu_online_mask * or calling notify_cpu_starting. */ set_cpu_sibling_map(raw_smp_processor_id()); wmb(); notify_cpu_starting(cpuid); /* * Allow the master to continue. */ cpumask_set_cpu(cpuid, cpu_callin_mask); }
asmlinkage void __init start_kernel(void) { char * command_line; extern char saved_command_line[]; /* * Interrupts are still disabled. Do necessary setups, then * enable them */ lock_kernel(); printk(linux_banner); setup_arch(&command_line); printk("Kernel command line: %s\n", saved_command_line); parse_options(command_line); trap_init(); init_IRQ(); sched_init(); softirq_init(); time_init(); /* * HACK ALERT! This is early. We're enabling the console before * we've done PCI setups etc, and console_init() must be aware of * this. But we do want output early, in case something goes wrong. */ console_init(); #ifdef CONFIG_MODULES init_modules(); #endif if (prof_shift) { unsigned int size; /* only text is profiled */ prof_len = (unsigned long) &_etext - (unsigned long) &_stext; prof_len >>= prof_shift; size = prof_len * sizeof(unsigned int) + PAGE_SIZE-1; prof_buffer = (unsigned int *) alloc_bootmem(size); } kmem_cache_init(); sti(); calibrate_delay(); #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start && !initrd_below_start_ok && initrd_start < min_low_pfn << PAGE_SHIFT) { printk(KERN_CRIT "initrd overwritten (0x%08lx < 0x%08lx) - " "disabling it.\n",initrd_start,min_low_pfn << PAGE_SHIFT); initrd_start = 0; } #endif mem_init(); kmem_cache_sizes_init(); pgtable_cache_init(); /* * For architectures that have highmem, num_mappedpages represents * the amount of memory the kernel can use. For other architectures * it's the same as the total pages. We need both numbers because * some subsystems need to initialize based on how much memory the * kernel can use. */ if (num_mappedpages == 0) num_mappedpages = num_physpages; fork_init(num_mappedpages); proc_caches_init(); vfs_caches_init(num_physpages); buffer_init(num_physpages); page_cache_init(num_physpages); #if defined(CONFIG_ARCH_S390) ccwcache_init(); #endif signals_init(); #ifdef CONFIG_PROC_FS proc_root_init(); #endif check_bugs(); printk("POSIX conformance testing by UNIFIX\n"); /* * We count on the initial thread going ok * Like idlers init is an unlocked kernel thread, which will * make syscalls (and thus be locked). */ smp_init(); #if defined(CONFIG_SYSVIPC) ipc_init(); #endif rest_init(); }
static void __devinit smp_callin (void) { int cpuid, phys_id; extern void ia64_init_itm(void); #ifdef CONFIG_PERFMON extern void pfm_init_percpu(void); #endif cpuid = smp_processor_id(); phys_id = hard_smp_processor_id(); if (cpu_online(cpuid)) { printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n", phys_id, cpuid); BUG(); } lock_ipi_calllock(); cpu_set(cpuid, cpu_online_map); unlock_ipi_calllock(); per_cpu(cpu_state, cpuid) = CPU_ONLINE; smp_setup_percpu_timer(); ia64_mca_cmc_vector_setup(); /* Setup vector on AP */ #ifdef CONFIG_PERFMON pfm_init_percpu(); #endif local_irq_enable(); if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) { /* * Synchronize the ITC with the BP. Need to do this after irqs are * enabled because ia64_sync_itc() calls smp_call_function_single(), which * calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls * local_bh_enable(), which bugs out if irqs are not enabled... */ Dprintk("Going to syncup ITC with BP.\n"); ia64_sync_itc(0); } /* * Get our bogomips. */ ia64_init_itm(); calibrate_delay(); local_cpu_data->loops_per_jiffy = loops_per_jiffy; #ifdef CONFIG_IA32_SUPPORT ia32_gdt_init(); #endif /* * Allow the master to continue. */ cpu_set(cpuid, cpu_callin_map); Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid); }
int jam(char* filename, char* action) { BOOL error = FALSE; long offset = 0L; long error_line = 0L; JAM_RETURN_TYPE crc_result = JAMC_SUCCESS; JAM_RETURN_TYPE exec_result = JAMC_SUCCESS; unsigned short expected_crc = 0; unsigned short actual_crc = 0; char key[33] = {0}; char value[257] = {0}; int exit_status = 0; int arg = 0; int exit_code = 0; time_t start_time = 0; time_t end_time = 0; int time_delta = 0; char *workspace = NULL; char *init_list[10]; int init_count = 0; FILE *fp = NULL; struct stat sbuf; long workspace_size = 0; char *exit_string = NULL; verbose = FALSE; init_list[0] = NULL; if (!jtag_hardware_initialized) { if (!initialize_jtag_hardware()) return DRIVER_ERROR; } if (_access(filename, 0) != 0) { //fprintf(stderr, "Error: can't access file \"%s\"\n", filename); return INVALID_FILE; } else { //get length of file if (stat(filename, &sbuf) == 0) file_length = sbuf.st_size; fopen_s(&fp, filename, "rb"); if (fp == NULL) { //fprintf(stderr, "Error: can't open file \"%s\"\n", filename); return INVALID_FILE; } else { //Read entire file into a buffer file_buffer = (char *) malloc((size_t) file_length); if (file_buffer == NULL) { //fprintf(stderr, "Error: can't allocate memory (%d Kbytes)\n", //(int) (file_length / 1024L)); return MEMORY_ERROR; } else { if (fread(file_buffer, 1, (size_t) file_length, fp) != (size_t) file_length) { //fprintf(stderr, "Error reading file \"%s\"\n", filename); return INVALID_FILE; } } fclose(fp); } if (exit_status == 0) { windows_nt = !(GetVersion() & 0x80000000); calibrate_delay(); crc_result = jam_check_crc(file_buffer, file_length, &expected_crc, &actual_crc); switch (crc_result) { case JAMC_SUCCESS: //printf("CRC matched: CRC value = %04X\n", actual_crc); break; case JAMC_CRC_ERROR: //printf("CRC mismatch: expected %04X, actual %04X\n", expected_crc, actual_crc); break; case JAMC_UNEXPECTED_END: //printf("Expected CRC not found, actual CRC value = %04X\n", actual_crc); break; default: //printf("CRC function returned error code %d\n", crc_result); break; } exec_result = jam_execute(file_buffer, file_length, workspace, workspace_size, action, init_list, &error_line, &exit_code, NULL); if (exec_result == JAMC_SUCCESS) { switch (exit_code) { case 0: exit_code = SUCCESS; break; //exit_string = "Success"; break; case 1: exit_code = SUCCESS; break; //exit_string = "Illegal initialization values"; break; case 2: exit_code = UNRECOGNIZED_DEVICE; break; //exit_string = "Unrecognized device"; break; case 3: exit_code = SUCCESS; break; //exit_string = "Device revision is not supported"; break; case 4: exit_code = PROGRAM_FAILURE; break; //exit_string = "Device programming failure"; break; case 5: exit_code = SUCCESS; break; //exit_string = "Device is not blank"; break; case 6: exit_code = PROGRAM_NOT_VERIFIED; break; //exit_string = "Device verify failure"; break; case 7: exit_code = SUCCESS; break; //exit_string = "SRAM configuration failure"; break; default: exit_code = SUCCESS; break; //exit_string = "Unknown exit code"; break; } } else if (exec_result < MAX_ERROR_CODE) { exit_code = (error_line += 100); } else exit_code = UNKNOWN_ERROR; } } if (jtag_hardware_initialized) close_jtag_hardware(); return (exit_code); }
asmlinkage void __init start_kernel(void) { char * command_line; extern struct kernel_param __start___param[], __stop___param[]; smp_setup_processor_id(); /* * Need to run as early as possible, to initialize the * lockdep hash: */ unwind_init(); lockdep_init(); local_irq_disable(); early_boot_irqs_off(); early_init_irq_lock_class(); /* * Interrupts are still disabled. Do necessary setups, then * enable them */ lock_kernel(); boot_cpu_init(); page_address_init(); printk(KERN_NOTICE); printk(linux_banner); setup_arch(&command_line); setup_per_cpu_areas(); smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ /* * Set up the scheduler prior starting any interrupts (such as the * timer interrupt). Full topology setup happens at smp_init() * time - but meanwhile we still have a functioning scheduler. */ sched_init(); /* * Disable preemption - early bootup scheduling is extremely * fragile until we cpu_idle() for the first time. */ preempt_disable(); build_all_zonelists(); page_alloc_init(); printk(KERN_NOTICE "Kernel command line: %s\n", saved_command_line); parse_early_param(); parse_args("Booting kernel", command_line, __start___param, __stop___param - __start___param, &unknown_bootoption); sort_main_extable(); trap_init(); rcu_init(); init_IRQ(); pidhash_init(); init_timers(); hrtimers_init(); softirq_init(); timekeeping_init(); time_init(); profile_init(); if (!irqs_disabled()) printk("start_kernel(): bug: interrupts were enabled early\n"); early_boot_irqs_on(); local_irq_enable(); /* * HACK ALERT! This is early. We're enabling the console before * we've done PCI setups etc, and console_init() must be aware of * this. But we do want output early, in case something goes wrong. */ console_init(); if (panic_later) panic(panic_later, panic_param); lockdep_info(); /* * Need to run this when irqs are enabled, because it wants * to self-test [hard/soft]-irqs on/off lock inversion bugs * too: */ locking_selftest(); #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start && !initrd_below_start_ok && initrd_start < min_low_pfn << PAGE_SHIFT) { printk(KERN_CRIT "initrd overwritten (0x%08lx < 0x%08lx) - " "disabling it.\n",initrd_start,min_low_pfn << PAGE_SHIFT); initrd_start = 0; } #endif vfs_caches_init_early(); cpuset_init_early(); mem_init(); kmem_cache_init(); setup_per_cpu_pageset(); numa_policy_init(); if (late_time_init) late_time_init(); calibrate_delay(); pidmap_init(); pgtable_cache_init(); prio_tree_init(); anon_vma_init(); #ifdef CONFIG_X86 if (efi_enabled) efi_enter_virtual_mode(); #endif fork_init(num_physpages); proc_caches_init(); buffer_init(); unnamed_dev_init(); key_init(); security_init(); vfs_caches_init(num_physpages); radix_tree_init(); signals_init(); /* rootfs populating might need page-writeback */ page_writeback_init(); #ifdef CONFIG_PROC_FS proc_root_init(); #endif cpuset_init(); taskstats_init_early(); delayacct_init(); check_bugs(); acpi_early_init(); /* before LAPIC and SMP init */ /* Do the rest non-__init'ed, we're now alive */ rest_init(); }