char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) { int n = 0; struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id]; struct bcr_identity *core = &cpu->core; const struct cpuinfo_data *tbl; int be = 0; #ifdef CONFIG_CPU_BIG_ENDIAN be = 1; #endif FIX_PTR(cpu); n += scnprintf(buf + n, len - n, "\nARC IDENTITY\t: Family [%#02x]" " Cpu-id [%#02x] Chip-id [%#4x]\n", core->family, core->cpu_id, core->chip_id); for (tbl = &arc_cpu_tbl[0]; tbl->info.id != 0; tbl++) { if ((core->family >= tbl->info.id) && (core->family <= tbl->up_range)) { n += scnprintf(buf + n, len - n, "processor\t: %s %s\n", tbl->info.str, be ? "[Big Endian]" : ""); break; } } if (tbl->info.id == 0) n += scnprintf(buf + n, len - n, "UNKNOWN ARC Processor\n"); n += scnprintf(buf + n, len - n, "CPU speed\t: %u.%02u Mhz\n", (unsigned int)(arc_get_core_freq() / 1000000), (unsigned int)(arc_get_core_freq() / 10000) % 100); n += scnprintf(buf + n, len - n, "Timers\t\t: %s %s\n", (cpu->timers & 0x200) ? "TIMER1" : "", (cpu->timers & 0x100) ? "TIMER0" : ""); n += scnprintf(buf + n, len - n, "Vect Tbl Base\t: %#x\n", cpu->vec_base); n += scnprintf(buf + n, len - n, "UNCACHED Base\t: %#x\n", cpu->uncached_base); return buf; }
static void __init arc_set_early_base_baud(unsigned long dt_root) { unsigned int core_clk = arc_get_core_freq(); if (of_flat_dt_is_compatible(dt_root, "abilis,arc-tb10x")) arc_base_baud = core_clk/3; else arc_base_baud = core_clk; }
static int arc_clkevent_set_periodic(struct clock_event_device *dev) { /* * At X Hz, 1 sec = 1000ms -> X cycles; * 10ms -> X / 100 cycles */ arc_timer_event_setup(arc_get_core_freq() / HZ); return 0; }
/* * Setup the local event timer for @cpu */ void arc_local_timer_setup() { struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); int cpu = smp_processor_id(); evt->cpumask = cpumask_of(cpu); clockevents_config_and_register(evt, arc_get_core_freq(), 0, ARC_TIMER_MAX); /* setup the per-cpu timer IRQ handler - for all cpus */ arc_request_percpu_irq(TIMER0_IRQ, cpu, timer_irq_handler, "Timer0 (per-cpu-tick)", evt); }
static void arc_clkevent_set_mode(enum clock_event_mode mode, struct clock_event_device *dev) { switch (mode) { case CLOCK_EVT_MODE_PERIODIC: arc_timer_event_setup(arc_get_core_freq() / HZ); break; case CLOCK_EVT_MODE_ONESHOT: break; default: break; } return; }
/* * Setup the local event timer for @cpu */ void arc_local_timer_setup(unsigned int cpu) { struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu); clk->cpumask = cpumask_of(cpu); clockevents_config_and_register(clk, arc_get_core_freq(), 0, ARC_TIMER_MAX); /* * setup the per-cpu timer IRQ handler - for all cpus * For non boot CPU explicitly unmask at intc * setup_irq() -> .. -> irq_startup() already does this on boot-cpu */ if (!cpu) setup_irq(TIMER0_IRQ, &arc_timer_irq); else arch_unmask_irq(TIMER0_IRQ); }
/* * Called from start_kernel() - boot CPU only * * -Sets up h/w timers as applicable on boot cpu * -Also sets up any global state needed for timer subsystem: * - for "counting" timer, registers a clocksource, usable across CPUs * (provided that underlying counter h/w is synchronized across cores) * - for "event" timer, sets up TIMER0 IRQ (as that is platform agnostic) */ void __init time_init(void) { /* * sets up the timekeeping free-flowing counter which also returns * whether the counter is usable as clocksource */ if (arc_counter_setup()) /* * CLK upto 4.29 GHz can be safely represented in 32 bits * because Max 32 bit number is 4,294,967,295 */ clocksource_register_hz(&arc_counter, arc_get_core_freq()); /* sets up the periodic event timer */ arc_local_timer_setup(smp_processor_id()); if (machine_desc->init_time) machine_desc->init_time(); }
/* * Setup the local event timer for @cpu * N.B. weak so that some exotic ARC SoCs can completely override it */ void __attribute__((weak)) __cpuinit arc_local_timer_setup(unsigned int cpu) { struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu); clockevents_calc_mult_shift(clk, arc_get_core_freq(), 5); clk->max_delta_ns = clockevent_delta2ns(ARC_TIMER_MAX, clk); clk->cpumask = cpumask_of(cpu); clockevents_register_device(clk); /* * setup the per-cpu timer IRQ handler - for all cpus * For non boot CPU explicitly unmask at intc * setup_irq() -> .. -> irq_startup() already does this on boot-cpu */ if (!cpu) setup_irq(TIMER0_IRQ, &arc_timer_irq); else arch_unmask_irq(TIMER0_IRQ); }
static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) { struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id]; struct bcr_identity *core = &cpu->core; const struct cpuinfo_data *tbl; char *isa_nm; int i, be, atomic; int n = 0; FIX_PTR(cpu); if (is_isa_arcompact()) { isa_nm = "ARCompact"; be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); atomic = cpu->isa.atomic1; if (!cpu->isa.ver) /* ISA BCR absent, use Kconfig info */ atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC); } else { isa_nm = "ARCv2"; be = cpu->isa.be; atomic = cpu->isa.atomic; } n += scnprintf(buf + n, len - n, "\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n", core->family, core->cpu_id, core->chip_id); for (tbl = &arc_cpu_tbl[0]; tbl->info.id != 0; tbl++) { if ((core->family >= tbl->info.id) && (core->family <= tbl->up_range)) { n += scnprintf(buf + n, len - n, "processor [%d]\t: %s (%s ISA) %s\n", cpu_id, tbl->info.str, isa_nm, IS_AVAIL1(be, "[Big-Endian]")); break; } } if (tbl->info.id == 0) n += scnprintf(buf + n, len - n, "UNKNOWN ARC Processor\n"); n += scnprintf(buf + n, len - n, "CPU speed\t: %u.%02u Mhz\n", (unsigned int)(arc_get_core_freq() / 1000000), (unsigned int)(arc_get_core_freq() / 10000) % 100); n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ", IS_AVAIL1(cpu->extn.timer0, "Timer0 "), IS_AVAIL1(cpu->extn.timer1, "Timer1 "), IS_AVAIL2(cpu->extn.rtc, "Local-64-bit-Ctr ", CONFIG_ARC_HAS_RTC)); n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s", IS_AVAIL2(atomic, "atomic ", CONFIG_ARC_HAS_LLSC), IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64), IS_AVAIL1(cpu->isa.unalign, "unalign (not used)")); if (i) n += scnprintf(buf + n, len - n, "\n\t\t: "); if (cpu->extn_mpy.ver) { if (cpu->extn_mpy.ver <= 0x2) { /* ARCompact */ n += scnprintf(buf + n, len - n, "mpy "); } else { int opt = 2; /* stock MPY/MPYH */ if (cpu->extn_mpy.dsp) /* OPT 7-9 */ opt = cpu->extn_mpy.dsp + 6; n += scnprintf(buf + n, len - n, "mpy[opt %d] ", opt); } } n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n", IS_AVAIL1(cpu->isa.div_rem, "div_rem "), IS_AVAIL1(cpu->extn.norm, "norm "), IS_AVAIL1(cpu->extn.barrel, "barrel-shift "), IS_AVAIL1(cpu->extn.swap, "swap "), IS_AVAIL1(cpu->extn.minmax, "minmax "), IS_AVAIL1(cpu->extn.crc, "crc "), IS_AVAIL2(1, "swape", CONFIG_ARC_HAS_SWAPE)); if (cpu->bpu.ver) n += scnprintf(buf + n, len - n, "BPU\t\t: %s%s match, cache:%d, Predict Table:%d\n", IS_AVAIL1(cpu->bpu.full, "full"), IS_AVAIL1(!cpu->bpu.full, "partial"), cpu->bpu.num_cache, cpu->bpu.num_pred); return buf; }