/* Activate a secondary processor. */ int __devinit start_secondary(void *unused) { unsigned int cpu = smp_processor_id(); struct device_node *l2_cache; int i, base; atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; smp_store_cpu_info(cpu); set_dec(tb_ticks_per_jiffy); preempt_disable(); cpu_callin_map[cpu] = 1; smp_ops->setup_cpu(cpu); if (smp_ops->take_timebase) smp_ops->take_timebase(); if (system_state > SYSTEM_BOOTING) snapshot_timebase(); secondary_cpu_time_init(); ipi_call_lock(); notify_cpu_starting(cpu); cpu_set(cpu, cpu_online_map); /* Update sibling maps */ base = cpu_first_thread_in_core(cpu); for (i = 0; i < threads_per_core; i++) { if (cpu_is_offline(base + i)) continue; cpu_set(cpu, per_cpu(cpu_sibling_map, base + i)); cpu_set(base + i, per_cpu(cpu_sibling_map, cpu)); /* cpu_core_map should be a superset of * cpu_sibling_map even if we don't have cache * information, so update the former here, too. */ cpu_set(cpu, per_cpu(cpu_core_map, base +i)); cpu_set(base + i, per_cpu(cpu_core_map, cpu)); } l2_cache = cpu_to_l2cache(cpu); for_each_online_cpu(i) { struct device_node *np = cpu_to_l2cache(i); if (!np) continue; if (np == l2_cache) { cpu_set(cpu, per_cpu(cpu_core_map, i)); cpu_set(i, per_cpu(cpu_core_map, cpu)); } of_node_put(np); } of_node_put(l2_cache); ipi_call_unlock(); local_irq_enable(); cpu_idle(); return 0; }
/* This function is only called on the boot processor */ void __init time_init(void) { time_t sec, old_sec; unsigned old_stamp, stamp, elapsed; if (ppc_md.time_init != NULL) timezone_offset = ppc_md.time_init(); if (__USE_RTC()) { /* 601 processor: dec counts down by 128 every 128ns */ tb_ticks_per_jiffy = DECREMENTER_COUNT_601; /* mulhwu_scale_factor(1000000000, 1000000) is 0x418937 */ tb_to_us = 0x418937; } else { ppc_md.calibrate_decr(); tb_to_ns_scale = mulhwu(tb_to_us, 1000 << 10); } /* Now that the decrementer is calibrated, it can be used in case the * clock is stuck, but the fact that we have to handle the 601 * makes things more complex. Repeatedly read the RTC until the * next second boundary to try to achieve some precision. If there * is no RTC, we still need to set tb_last_stamp and * last_jiffy_stamp(cpu 0) to the current stamp. */ stamp = get_native_tbl(); if (ppc_md.get_rtc_time) { sec = ppc_md.get_rtc_time(); elapsed = 0; do { old_stamp = stamp; old_sec = sec; stamp = get_native_tbl(); if (__USE_RTC() && stamp < old_stamp) old_stamp -= 1000000000; elapsed += stamp - old_stamp; sec = ppc_md.get_rtc_time(); } while ( sec == old_sec && elapsed < 2*HZ*tb_ticks_per_jiffy); if (sec==old_sec) printk("Warning: real time clock seems stuck!\n"); xtime.tv_sec = sec; xtime.tv_nsec = 0; /* No update now, we just read the time from the RTC ! */ last_rtc_update = xtime.tv_sec; } last_jiffy_stamp(0) = tb_last_stamp = stamp; /* Not exact, but the timer interrupt takes care of this */ set_dec(tb_ticks_per_jiffy); /* If platform provided a timezone (pmac), we correct the time */ if (timezone_offset) { sys_tz.tz_minuteswest = -timezone_offset / 60; sys_tz.tz_dsttime = 0; xtime.tv_sec -= timezone_offset; } set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); }
void wakeup_decrementer(void) { set_dec(tb_ticks_per_jiffy); /* No currently-supported powerbook has a 601, * so use get_tbl, not native */ last_jiffy_stamp(0) = tb_last_stamp = get_tbl(); }
static inline void rthal_setup_periodic_dec(void) { #ifdef CONFIG_40x mtspr(SPRN_TCR, mfspr(SPRN_TCR) | TCR_ARE); /* Auto-reload on. */ mtspr(SPRN_PIT, tb_ticks_per_jiffy); #else /* !CONFIG_40x */ set_dec(tb_ticks_per_jiffy); #endif /* CONFIG_40x */ }
/* * timer_interrupt - gets called when the decrementer overflows, * with interrupts disabled. */ int timer_interrupt(struct pt_regs * regs) { int next_dec; unsigned long cur_tb; struct paca_struct *lpaca = get_paca(); unsigned long cpu = lpaca->xPacaIndex; irq_enter(); #ifndef CONFIG_PPC_ISERIES ppc64_do_profile(regs); #endif lpaca->xLpPaca.xIntDword.xFields.xDecrInt = 0; while (lpaca->next_jiffy_update_tb <= (cur_tb = get_tb())) { #ifdef CONFIG_SMP smp_local_timer_interrupt(regs); #endif if (cpu == boot_cpuid) { write_seqlock(&xtime_lock); tb_last_stamp = lpaca->next_jiffy_update_tb; do_timer(regs); timer_sync_xtime( cur_tb ); timer_check_rtc(); write_sequnlock(&xtime_lock); if ( adjusting_time && (time_adjust == 0) ) ppc_adjtimex(); } lpaca->next_jiffy_update_tb += tb_ticks_per_jiffy; } next_dec = lpaca->next_jiffy_update_tb - cur_tb; if (next_dec > lpaca->default_decr) next_dec = lpaca->default_decr; set_dec(next_dec); #ifdef CONFIG_PPC_ISERIES { struct ItLpQueue *lpq = lpaca->lpQueuePtr; if (lpq && ItLpQueue_isLpIntPending(lpq)) lpEvent_count += ItLpQueue_process(lpq, regs); } #endif irq_exit(); return 1; }
int interrupt_init (void) { int ret; /* call cpu specific function from $(CPU)/interrupts.c */ ret = interrupt_init_cpu (&decrementer_count); if (ret) return ret; set_dec (decrementer_count); set_msr (get_msr () | MSR_EE); return (0); }
void __init smp_callin(void) { int cpu = current->processor; smp_store_cpu_info(cpu); set_dec(paca[cpu].default_decr); cpu_callin_map[cpu] = 1; ppc_md.smp_setup_cpu(cpu); init_idle(); set_bit(smp_processor_id(), &cpu_online_map); while(!smp_commenced) { barrier(); } __sti(); }
void __init smp_callin(void) { int cpu = current->processor; smp_store_cpu_info(cpu); smp_ops->setup_cpu(cpu); set_dec(tb_ticks_per_jiffy); cpu_online_map |= 1UL << cpu; mb(); cpu_callin_map[cpu] = 1; while(!smp_commenced) barrier(); /* see smp_commence for more info */ if (!smp_tb_synchronized && smp_num_cpus == 2) { smp_software_tb_sync(cpu); } __sti(); }
/* Processor coming up starts here */ int __devinit start_secondary(void *unused) { int cpu; atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; cpu = smp_processor_id(); smp_store_cpu_info(cpu); set_dec(tb_ticks_per_jiffy); cpu_callin_map[cpu] = 1; printk("CPU %i done callin...\n", cpu); smp_ops->setup_cpu(cpu); printk("CPU %i done setup...\n", cpu); local_irq_enable(); smp_ops->take_timebase(); printk("CPU %i done timebase take...\n", cpu); return cpu_idle(NULL); }
void __init time_init(void) { /* This function is only called on the boot processor */ unsigned long flags; struct rtc_time tm; ppc_md.calibrate_decr(); #ifdef CONFIG_PPC_ISERIES if (!piranha_simulator) #endif ppc_md.get_boot_time(&tm); write_seqlock_irqsave(&xtime_lock, flags); xtime.tv_sec = mktime(tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); tb_last_stamp = get_tb(); do_gtod.tb_orig_stamp = tb_last_stamp; do_gtod.varp = &do_gtod.vars[0]; do_gtod.var_idx = 0; do_gtod.varp->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC; do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; do_gtod.varp->tb_to_xs = tb_to_xs; do_gtod.tb_to_us = tb_to_us; xtime_sync_interval = tb_ticks_per_sec - (tb_ticks_per_sec/8); next_xtime_sync_tb = tb_last_stamp + xtime_sync_interval; time_freq = 0; xtime.tv_nsec = 0; last_rtc_update = xtime.tv_sec; set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); write_sequnlock_irqrestore(&xtime_lock, flags); /* Not exact, but the timer interrupt takes care of this */ set_dec(tb_ticks_per_jiffy); }
void timer_interrupt (struct pt_regs *regs) { /* call cpu specific function from $(CPU)/interrupts.c */ timer_interrupt_cpu (regs); /* Restore Decrementer Count */ set_dec (decrementer_count); timestamp++; #if defined(CONFIG_WATCHDOG) || defined (CONFIG_HW_WATCHDOG) if ((timestamp % (CFG_WATCHDOG_FREQ)) == 0) WATCHDOG_RESET (); #endif /* CONFIG_WATCHDOG || CONFIG_HW_WATCHDOG */ #ifdef CONFIG_STATUS_LED status_led_tick (timestamp); #endif /* CONFIG_STATUS_LED */ #ifdef CONFIG_SHOW_ACTIVITY board_show_activity (timestamp); #endif /* CONFIG_SHOW_ACTIVITY */ }
/* Activate a secondary processor. */ int __devinit start_secondary(void *unused) { unsigned int cpu = smp_processor_id(); struct device_node *l2_cache; int i, base; atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; smp_store_cpu_info(cpu); #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) /* Clear any pending timer interrupts */ mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS); /* Enable decrementer interrupt */ mtspr(SPRN_TCR, TCR_DIE); #endif set_dec(tb_ticks_per_jiffy); preempt_disable(); cpu_callin_map[cpu] = 1; if (smp_ops->setup_cpu) smp_ops->setup_cpu(cpu); if (smp_ops->take_timebase) smp_ops->take_timebase(); if (system_state > SYSTEM_BOOTING) snapshot_timebase(); secondary_cpu_time_init(); ipi_call_lock(); notify_cpu_starting(cpu); set_cpu_online(cpu, true); /* Update sibling maps */ base = cpu_first_thread_in_core(cpu); for (i = 0; i < threads_per_core; i++) { if (cpu_is_offline(base + i)) continue; cpumask_set_cpu(cpu, cpu_sibling_mask(base + i)); cpumask_set_cpu(base + i, cpu_sibling_mask(cpu)); /* cpu_core_map should be a superset of * cpu_sibling_map even if we don't have cache * information, so update the former here, too. */ cpumask_set_cpu(cpu, cpu_core_mask(base + i)); cpumask_set_cpu(base + i, cpu_core_mask(cpu)); } l2_cache = cpu_to_l2cache(cpu); for_each_online_cpu(i) { struct device_node *np = cpu_to_l2cache(i); if (!np) continue; if (np == l2_cache) { cpumask_set_cpu(cpu, cpu_core_mask(i)); cpumask_set_cpu(i, cpu_core_mask(cpu)); } of_node_put(np); } of_node_put(l2_cache); ipi_call_unlock(); local_irq_enable(); cpu_idle(); return 0; }
void __init smp_software_tb_sync(int cpu) { #define PASSES 4 /* 4 passes.. */ int pass; int i, j; /* stop - start will be the number of timebase ticks it takes for cpu0 * to send a message to all others and the first reponse to show up. * * ASSUMPTION: this time is similiar for all cpus * ASSUMPTION: the time to send a one-way message is ping/2 */ register unsigned long start = 0; register unsigned long stop = 0; register unsigned long temp = 0; set_tb(0, 0); /* multiple passes to get in l1 cache.. */ for (pass = 2; pass < 2+PASSES; pass++){ if (cpu == 0){ mb(); for (i = j = 1; i < smp_num_cpus; i++, j++){ /* skip stuck cpus */ while (!cpu_callin_map[j]) ++j; while (cpu_callin_map[j] != pass) barrier(); } mb(); tb_sync_flag = pass; start = get_tbl(); /* start timing */ while (tb_sync_flag) mb(); stop = get_tbl(); /* end timing */ /* theoretically, the divisor should be 2, but * I get better results on my dual mtx. someone * please report results on other smp machines.. */ tb_offset = (stop-start)/4; mb(); tb_sync_flag = pass; udelay(10); mb(); tb_sync_flag = 0; mb(); set_tb(0,0); mb(); } else { cpu_callin_map[cpu] = pass; mb(); while (!tb_sync_flag) mb(); /* wait for cpu0 */ mb(); tb_sync_flag = 0; /* send response for timing */ mb(); while (!tb_sync_flag) mb(); temp = tb_offset; /* make sure offset is loaded */ while (tb_sync_flag) mb(); set_tb(0,temp); /* now, set the timebase */ mb(); } } if (cpu == 0) { smp_tb_synchronized = 1; printk("smp_software_tb_sync: %d passes, final offset: %ld\n", PASSES, tb_offset); } /* so time.c doesn't get confused */ set_dec(tb_ticks_per_jiffy); last_jiffy_stamp(cpu) = 0; }
/* * timer_interrupt - gets called when the decrementer overflows, * with interrupts disabled. * We set it up to overflow again in 1/HZ seconds. */ void timer_interrupt(struct pt_regs * regs) { int next_dec; unsigned long cpu = smp_processor_id(); unsigned jiffy_stamp = last_jiffy_stamp(cpu); extern void do_IRQ(struct pt_regs *); if (atomic_read(&ppc_n_lost_interrupts) != 0) do_IRQ(regs); MARK(kernel_trap_entry, "%d struct pt_regs %p", regs->trap, regs); irq_enter(); while ((next_dec = tb_ticks_per_jiffy - tb_delta(&jiffy_stamp)) <= 0) { jiffy_stamp += tb_ticks_per_jiffy; profile_tick(CPU_PROFILING, regs); update_process_times(user_mode(regs)); if (smp_processor_id()) continue; /* We are in an interrupt, no need to save/restore flags */ write_seqlock(&xtime_lock); tb_last_stamp = jiffy_stamp; #ifdef CONFIG_LTT ltt_reset_timestamp(); #endif //CONFIG_LTT do_timer(regs); /* * update the rtc when needed, this should be performed on the * right fraction of a second. Half or full second ? * Full second works on mk48t59 clocks, others need testing. * Note that this update is basically only used through * the adjtimex system calls. Setting the HW clock in * any other way is a /dev/rtc and userland business. * This is still wrong by -0.5/+1.5 jiffies because of the * timer interrupt resolution and possible delay, but here we * hit a quantization limit which can only be solved by higher * resolution timers and decoupling time management from timer * interrupts. This is also wrong on the clocks * which require being written at the half second boundary. * We should have an rtc call that only sets the minutes and * seconds like on Intel to avoid problems with non UTC clocks. */ if ( ppc_md.set_rtc_time && ntp_synced() && xtime.tv_sec - last_rtc_update >= 659 && abs((xtime.tv_nsec / 1000) - (1000000-1000000/HZ)) < 500000/HZ && jiffies - wall_jiffies == 1) { if (ppc_md.set_rtc_time(xtime.tv_sec+1 + timezone_offset) == 0) last_rtc_update = xtime.tv_sec+1; else /* Try again one minute later */ last_rtc_update += 60; } write_sequnlock(&xtime_lock); } if ( !disarm_decr[smp_processor_id()] ) set_dec(next_dec); last_jiffy_stamp(cpu) = jiffy_stamp; if (ppc_md.heartbeat && !ppc_md.heartbeat_count--) ppc_md.heartbeat(); irq_exit(); trace_kernel_trap_exit(); MARK(kernel_trap_exit, MARK_NOARGS); }
/* Activate a secondary processor. */ __cpuinit void start_secondary(void *unused) { unsigned int cpu = smp_processor_id(); struct device_node *l2_cache; int i, base; atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; smp_store_cpu_info(cpu); set_dec(tb_ticks_per_jiffy); preempt_disable(); cpu_callin_map[cpu] = 1; if (smp_ops->setup_cpu) smp_ops->setup_cpu(cpu); if (smp_ops->take_timebase) smp_ops->take_timebase(); secondary_cpu_time_init(); #ifdef CONFIG_PPC64 if (system_state == SYSTEM_RUNNING) vdso_data->processorCount++; vdso_getcpu_init(); #endif notify_cpu_starting(cpu); set_cpu_online(cpu, true); /* Update sibling maps */ base = cpu_first_thread_sibling(cpu); for (i = 0; i < threads_per_core; i++) { if (cpu_is_offline(base + i)) continue; cpumask_set_cpu(cpu, cpu_sibling_mask(base + i)); cpumask_set_cpu(base + i, cpu_sibling_mask(cpu)); /* cpu_core_map should be a superset of * cpu_sibling_map even if we don't have cache * information, so update the former here, too. */ cpumask_set_cpu(cpu, cpu_core_mask(base + i)); cpumask_set_cpu(base + i, cpu_core_mask(cpu)); } l2_cache = cpu_to_l2cache(cpu); for_each_online_cpu(i) { struct device_node *np = cpu_to_l2cache(i); if (!np) continue; if (np == l2_cache) { cpumask_set_cpu(cpu, cpu_core_mask(i)); cpumask_set_cpu(i, cpu_core_mask(cpu)); } of_node_put(np); } of_node_put(l2_cache); local_irq_enable(); cpu_startup_entry(CPUHP_ONLINE); BUG(); }
/* * timer_interrupt - gets called when the decrementer overflows, * with interrupts disabled. * We set it up to overflow again in 1/HZ seconds. */ int timer_interrupt(struct pt_regs * regs) { int next_dec; unsigned long cpu = smp_processor_id(); unsigned jiffy_stamp = last_jiffy_stamp(cpu); extern void do_IRQ(struct pt_regs *); if (atomic_read(&ppc_n_lost_interrupts) != 0) do_IRQ(regs); hardirq_enter(cpu); while ((next_dec = tb_ticks_per_jiffy - tb_delta(&jiffy_stamp)) < 0) { jiffy_stamp += tb_ticks_per_jiffy; if (!user_mode(regs)) ppc_do_profile(instruction_pointer(regs)); if (unlikely(!heartbeat_count(cpu)--) && heartbeat_reset(cpu)) { ppc_md.heartbeat(); heartbeat_count(cpu) = heartbeat_reset(cpu); } if (cpu) continue; /* We are in an interrupt, no need to save/restore flags */ write_lock(&xtime_lock); tb_last_stamp = jiffy_stamp; do_timer(regs); /* * update the rtc when needed, this should be performed on the * right fraction of a second. Half or full second ? * Full second works on mk48t59 clocks, others need testing. * Note that this update is basically only used through * the adjtimex system calls. Setting the HW clock in * any other way is a /dev/rtc and userland business. * This is still wrong by -0.5/+1.5 jiffies because of the * timer interrupt resolution and possible delay, but here we * hit a quantization limit which can only be solved by higher * resolution timers and decoupling time management from timer * interrupts. This is also wrong on the clocks * which require being written at the half second boundary. * We should have an rtc call that only sets the minutes and * seconds like on Intel to avoid problems with non UTC clocks. */ if ( ppc_md.set_rtc_time && (time_status & STA_UNSYNC) == 0 && xtime.tv_sec - last_rtc_update >= 659 && abs(xtime.tv_usec - (1000000-1000000/HZ)) < 500000/HZ && jiffies - wall_jiffies == 1) { if (ppc_md.set_rtc_time(xtime.tv_sec+1 + time_offset) == 0) last_rtc_update = xtime.tv_sec+1; else /* Try again one minute later */ last_rtc_update += 60; } write_unlock(&xtime_lock); } if (!disarm_decr[cpu]) set_dec(next_dec); last_jiffy_stamp(cpu) = jiffy_stamp; #ifdef CONFIG_SMP smp_local_timer_interrupt(regs); #endif /* CONFIG_SMP */ hardirq_exit(cpu); if (softirq_pending(cpu)) do_softirq(); return 1; /* lets ret_from_int know we can do checks */ }