示例#1
0
static int __init godnet_clocksource_init(void)
{
	unsigned long rate = timer1_clk_khz * 1000;

#ifdef CONFIG_LOCAL_TIMERS
	twd_base = (void *)IO_ADDRESS(REG_BASE_A9_PERI) + REG_SC_LOCAL_TIMER;
	edb_trace();
	edb_putstr("twd_base :\n");
	edb_puthex((int)twd_base);
	edb_putstr("\n");
#endif
	writel(0, CFG_TIMER_VABASE + REG_TIMER1_CONTROL);
	writel(0xffffffff, CFG_TIMER_VABASE + REG_TIMER1_RELOAD);
	writel(0xffffffff, CFG_TIMER_VABASE + REG_TIMER1_VALUE);
	writel(CFG_TIMER_CONTROL, CFG_TIMER_VABASE + REG_TIMER1_CONTROL);

	/* caculate the mult/shift by clock rate to gain more accratury */
	if (clocksource_register_hz(&godnet_clocksource, rate))
		panic("register clocksouce :%s error\n",
				godnet_clocksource.name);

	/* force check the mult/shift of clocksource */
	init_fixed_sched_clock(&cd, godnet_update_sched_clock, 32, rate,
			godnet_clocksource.mult, godnet_clocksource.shift);

	return 0;
}
void __init platform_smp_prepare_cpus(unsigned int max_cpus)
{
	unsigned int ncores = get_core_count();
	unsigned long addr;
	int i;

	edb_trace(1);
	edb_putstr("smp_prepare_cpus\n");

	/* sanity check */
	if (ncores == 0) {
		printk(KERN_ERR
			"hisik3: strange CM count of 0? Default to 1\n");

		ncores = 1;
	}

	if (ncores > NR_CPUS) {
		printk(KERN_WARNING
		       "hisik3: no. of cores (%d) greater than configured "
		       "maximum of %d - clipping\n",
		       ncores, NR_CPUS);
		ncores = NR_CPUS;
	}

	/*
	 * are we trying to boot more cores than exist?
	 */
	if (max_cpus > ncores) {
		WARN(1, "hisik3: smp max cpus should NOT more cores than exist\n");
		max_cpus = ncores;
	}

	/*
	 * Initialise the present map, which describes the set of CPUs
	 * actually populated at the present time.
	 */
	for (i = 0; i < max_cpus; i++)
		set_cpu_present(i, true);

	scu_enable(scu_base_addr());


	addr = (unsigned long) IO_ADDRESS(MEMORY_AXI_SECOND_CPU_BOOT_ADDR);

	printk("poke_milo addr 0x%lx at 0x%x\n", addr, virt_to_phys(k3v2_secondary_startup));

	/*
	 * Write the address of secondary startup into the system-wide flags
	 * register. The BootMonitor waits for this register to become
	 * non-zero.
	 */
	writel(BSYM(virt_to_phys(k3v2_secondary_startup)), addr);

	wmb();
	flush_cache_all();

	edb_putstr("smp_prepare_cpus out\n");
}
示例#3
0
/* enable timer clk in sctl reg */
static void enable_timer_clk(void)
{
	struct clk *clk;
	int retval;

	clk = clk_get(NULL, "clk_timer0");
	if (IS_ERR(clk)) {
		edb_putstr("clk timer0 get failed\n");
		return;
	}

	retval = clk_enable(clk);
	if (retval) {
		edb_putstr("clk_enable timer0 failed\n");
		return;
	}
}
示例#4
0
void __init godarm_map_io(void)
{
	int i;

	iotable_init(godarm_io_desc, ARRAY_SIZE(godarm_io_desc));

	for (i = 0; i < ARRAY_SIZE(godarm_io_desc); i++) {
		edb_putstr(" V: ");     edb_puthex(godarm_io_desc[i].virtual);
		edb_putstr(" P: ");     edb_puthex(godarm_io_desc[i].pfn);
		edb_putstr(" S: ");     edb_puthex(godarm_io_desc[i].length);
		edb_putstr(" T: ");     edb_putul(godarm_io_desc[i].type);
		edb_putstr("\n");
	}

	early_init();

	edb_trace();
}
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	unsigned long timeout;

	edb_putstr("boot_secondary\n");

	/*
	 * set synchronisation state between this boot processor
	 * and the secondary one
	 */
	spin_lock(&boot_lock);

	/*
	 * This is really belt and braces; we hold unintended secondary
	 * CPUs in the holding pen until we're ready for them.  However,
	 * since we haven't sent them a soft interrupt, they shouldn't
	 * be there.
	 */
	pen_release = cpu;
	__cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
	outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));

	wmb();

	/*
	 * Send the secondary CPU a soft interrupt, thereby causing
	 * the boot monitor to read the system wide flags register,
	 * and branch to the address found there.
	 */
	gic_raise_softirq(cpumask_of(cpu), (GIC_SECURE_INT_FLAG | 1));

	timeout = jiffies + (1 * HZ);
	while (time_before(jiffies, timeout)) {
		smp_rmb();
		if (pen_release == -1)
			break;

		udelay(10);
	}

	/*
	 * now the secondary core is starting up let it run its
	 * calibrations, then wait for it to finish
	 */
	spin_unlock(&boot_lock);

	return pen_release != -1 ? -ENOSYS : 0;
}
示例#6
0
static inline void platform_do_lowpower(unsigned int cpu)
{
	/*
	 * there is no power-control hardware on this platform, so all
	 * we can do is put the core into WFI; this is safe as the calling
	 * code will have already disabled interrupts
	 */
#ifdef CONFIG_PM
	hilpm_cpu_godpsleep();
#endif
	if (pen_release != cpu) {
		for (;;) {
			/*
			 * here's the WFI
			 */
			asm("WFI\n"
			 :
			 :
			 : "memory", "cc");

		if (pen_release == cpu) {
			/*
			 * OK, proper wakeup, we're done
			 */
			break;
		}

		/*
		 * getting here, means that we have come out of WFI without
		 * having been woken up - this shouldn't happen
		 *
		 * The trouble is, letting people know about this is not really
		 * possible, since we are currently running incoherently, and
		 * therefore cannot safely call printk() or anything else
		 */
		edb_putstr("[PM]WARNING:Spurious Wakeup!\r\n");
	}

	}

	return;
}
/*
 * Initialise the CPU possible map early - this describes the CPUs
 * which may be present or become present in the system.
 */
void __init smp_init_cpus(void)
{
	unsigned int i, ncores = get_core_count();

	edb_putstr("smp_init_cpus\n");

	/* sanity check */
	if (ncores > NR_CPUS) {
		printk(KERN_WARNING
			"hisik3: no. of cores (%d) greater than configured "
			"maximum of %d - clipping\n",
			ncores, NR_CPUS);
		ncores = NR_CPUS;
	}

	for (i = 0; i < ncores; i++)
		set_cpu_possible(i, true);

	set_smp_cross_call(gic_raise_softirq);
}
void __cpuinit platform_secondary_init(unsigned int cpu)
{
	edb_putstr("platform_secondary_init\n");

	/*
	 * if any interrupts are already enabled for the primary
	 * core (e.g. timer irq), then they will not have been enabled
	 * for us: do so
	 */
	gic_secondary_init(0);

	/*
	 * let the primary processor know we're out of the
	 * pen, then head off into the C entry point
	 */
	pen_release = -1;
	smp_wmb();

	/*
	 * Synchronise with the boot thread.
	 */
	spin_lock(&boot_lock);
	spin_unlock(&boot_lock);
}
示例#9
0
static int hisik3_pm_enter(suspend_state_t state)
{
	unsigned long flage = 0;

	switch (state) {
		case PM_SUSPEND_STANDBY:
		case PM_SUSPEND_MEM:
			break;
		default:
			return -EINVAL;
	}

	if (has_wake_lock(WAKE_LOCK_SUSPEND)) {
		printk("hisik3_pm_enter has wake lock\n");
		return -EAGAIN;
	}

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

	local_irq_save(flage);

	hisik3_pm_save_gic();

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

#ifdef CONFIG_CACHE_L2X0
	hisik3_pm_disable_l2x0();
#endif

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

	/*set pmu to low power*/
	pmulowpower(1);

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

	/* here is an workround way to delay 40ms
         * make sure LDO0 is poweroff very clean */
	mdelay(40);

	/* protect timer0_0 timer0_1 and disable timer0 clk */
	protect_timer0_register();

#ifdef CONFIG_LOWPM_DEBUG
	/*set io to lowpower mode*/
	ioshowstatus(1);
	setiolowpower();
	ioshowstatus(1);

	/*set pmu to low power mode*/
	pmulowpower_show(1);
	pmulowpowerall(1);
	pmulowpower_show(1);
#endif

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

	edb_putstr("[PM]Enter hilpm_cpu_godpsleep...\r\n");

#ifdef CONFIG_LOWPM_DEBUG
	/*time enable*/
	timer0_0_enable();

	/*rtc*/
	rtc_enable();
#endif

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

	hilpm_cpu_godpsleep();

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

	/*
	 *the status has been changed in fastboot,
	 *it causes difference with kernel's status,
	 */
	pmctrl_reinit();
	pctrl_reinit();
	sysctrl_reinit();

	/*uart init.*/
	edb_reinit();

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

#ifdef CONFIG_LOWPM_DEBUG
	/*restore debug uart0*/
	debuguart_reinit();

	/*disable timer0*/
	timer0_0_disable();

	/*restore pmu config*/
	pmulowpowerall(0);
#endif

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

	/*PMU regs restore*/
	pmulowpower(0);

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

	/* restore timer0_0 timer0_1 and enable timer0 clk */
	restore_timer0_register();

	flush_cache_all();

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

#ifdef CONFIG_CACHE_L2X0
	hisik3_pm_enable_l2x0();
#endif

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

	hisik3_pm_retore_gic();

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

	local_irq_restore(flage);

	pr_notice("[PM]Restore OK.\r\n");

	return 0;
}