Ejemplo n.º 1
0
static int __init idle_setup(char *str)
{
	if (!str)
		return -EINVAL;

	if (!strcmp(str, "poll")) {
		pr_info("using polling idle threads\n");
		boot_option_idle_override = IDLE_POLL;
		cpu_idle_poll_ctrl(true);
	} else if (!strcmp(str, "halt")) {
		/*
		 * When the boot option of idle=halt is added, halt is
		 * forced to be used for CPU idle. In such case CPU C2/C3
		 * won't be used again.
		 * To continue to load the CPU idle driver, don't touch
		 * the boot_option_idle_override.
		 */
		x86_idle = default_idle;
		boot_option_idle_override = IDLE_HALT;
	} else if (!strcmp(str, "nomwait")) {
		/*
		 * If the boot option of "idle=nomwait" is added,
		 * it means that mwait will be disabled for CPU C2/C3
		 * states. In such case it won't touch the variable
		 * of boot_option_idle_override.
		 */
		boot_option_idle_override = IDLE_NOMWAIT;
	} else
		return -1;

	return 0;
}
Ejemplo n.º 2
0
static void __init orion5x_dt_init(void)
{
	char *dev_name;
	u32 dev, rev;

	orion5x_id(&dev, &rev, &dev_name);
	printk(KERN_INFO "Orion ID: %s. TCLK=%d.\n", dev_name, orion5x_tclk);

	BUG_ON(mvebu_mbus_dt_init(false));

	/*
	 * Setup Orion address map
	 */
	orion5x_setup_wins();

	/*
	 * Don't issue "Wait for Interrupt" instruction if we are
	 * running on D0 5281 silicon.
	 */
	if (dev == MV88F5281_DEV_ID && rev == MV88F5281_REV_D0) {
		printk(KERN_INFO "Orion: Applying 5281 D0 WFI workaround.\n");
		cpu_idle_poll_ctrl(true);
	}

	if (of_machine_is_compatible("maxtor,shared-storage-2"))
		mss2_init();

	of_platform_populate(NULL, of_default_bus_match_table,
			     orion5x_auxdata_lookup, NULL);
}
Ejemplo n.º 3
0
void __init gemini_init_irq(void)
{
	unsigned int i, mode = 0, level = 0;

	/*
	 * Disable the idle handler by default since it is buggy
	 * For more info see arch/arm/mach-gemini/idle.c
	 */
	cpu_idle_poll_ctrl(true);

	request_resource(&iomem_resource, &irq_resource);

	for (i = 0; i < NR_IRQS; i++) {
		irq_set_chip(i, &gemini_irq_chip);
		if((i >= IRQ_TIMER1 && i <= IRQ_TIMER3) || (i >= IRQ_SERIRQ0 && i <= IRQ_SERIRQ1)) {
			irq_set_handler(i, handle_edge_irq);
			mode |= 1 << i;
			level |= 1 << i;
		} else {			
			irq_set_handler(i, handle_level_irq);
		}
		set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
	}

	/* Disable all interrupts */
	__raw_writel(0, IRQ_MASK(IO_ADDRESS(GEMINI_INTERRUPT_BASE)));
	__raw_writel(0, FIQ_MASK(IO_ADDRESS(GEMINI_INTERRUPT_BASE)));

	/* Set interrupt mode */
	__raw_writel(mode, IRQ_TMODE(IO_ADDRESS(GEMINI_INTERRUPT_BASE)));
	__raw_writel(level, IRQ_TLEVEL(IO_ADDRESS(GEMINI_INTERRUPT_BASE)));
}
Ejemplo n.º 4
0
void __init ixp4xx_init_irq(void)
{
	int i = 0;

	/*
	 * ixp4xx does not implement the XScale PWRMODE register
	 * so it must not call cpu_do_idle().
	 */
	cpu_idle_poll_ctrl(true);

	/* Route all sources to IRQ instead of FIQ */
	*IXP4XX_ICLR = 0x0;

	/* Disable all interrupt */
	*IXP4XX_ICMR = 0x0; 

	if (cpu_is_ixp46x() || cpu_is_ixp43x()) {
		/* Route upper 32 sources to IRQ instead of FIQ */
		*IXP4XX_ICLR2 = 0x00;

		/* Disable upper 32 interrupts */
		*IXP4XX_ICMR2 = 0x00;
	}

        /* Default to all level triggered */
	for(i = 0; i < NR_IRQS; i++) {
		irq_set_chip_and_handler(i, &ixp4xx_irq_chip,
					 handle_level_irq);
		set_irq_flags(i, IRQF_VALID);
	}
}
Ejemplo n.º 5
0
static void am33xx_pm_end(void)
{
	am33xx_m3_state_machine_reset();

	cpu_idle_poll_ctrl(false);

	return;
}
Ejemplo n.º 6
0
void __init nuc900_board_init(struct platform_device **device, int size)
{
	cpu_idle_poll_ctrl(true);
	platform_add_devices(device, size);
	platform_add_devices(nuc900_public_dev, ARRAY_SIZE(nuc900_public_dev));
	spi_register_board_info(nuc900_spi_board_info,
					ARRAY_SIZE(nuc900_spi_board_info));
}
Ejemplo n.º 7
0
static int am33xx_pm_begin(suspend_state_t state)
{
	int i;

	unsigned long param4;
	int pos;

	cpu_idle_poll_ctrl(true);

	param4 = DS_IPC_DEFAULT;

	wkup_m3_reset_data_pos();
	if (am33xx_i2c_sleep_sequence) {
		pos = wkup_m3_copy_data(am33xx_i2c_sleep_sequence,
						i2c_sleep_sequence_sz);
		/* Lower 16 bits stores offset to sleep sequence */
		param4 &= ~0xffff;
		param4 |= pos;
	}

	if (am33xx_i2c_wake_sequence) {
		pos = wkup_m3_copy_data(am33xx_i2c_wake_sequence,
						i2c_wake_sequence_sz);
		/* Upper 16 bits stores offset to wake sequence */
		param4 &= ~0xffff0000;
		param4 |= pos << 16;
	}

	switch (state) {
	case PM_SUSPEND_MEM:
		am33xx_pm->ipc.reg1	= IPC_CMD_DS0;
		break;
	case PM_SUSPEND_STANDBY:
		am33xx_pm->ipc.reg1	= IPC_CMD_STANDBY;
		break;
	}

	am33xx_pm->ipc.reg2		= DS_IPC_DEFAULT;
	am33xx_pm->ipc.reg3		= DS_IPC_DEFAULT;
	am33xx_pm->ipc.reg5		= param4;
	wkup_m3_pm_set_cmd(&am33xx_pm->ipc);

	am33xx_pm->state = M3_STATE_MSG_FOR_LP;

	if (!wkup_m3_ping()) {
		i = wait_for_completion_timeout(&am33xx_pm_sync,
					msecs_to_jiffies(500));
		if (!i) {
			WARN(1, "PM: MPU<->CM3 sync failure\n");
			return -1;
		}
	} else {
		pr_warn("PM: Unable to ping CM3\n");
		return -1;
	}

	return 0;
}
Ejemplo n.º 8
0
static int __init parisc_idle_init(void)
{
	const char *marker;

	/* check QEMU/SeaBIOS marker in PAGE0 */
	marker = (char *) &PAGE0->pad0;
	running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0);

	if (!running_on_qemu)
		cpu_idle_poll_ctrl(1);

	return 0;
}
Ejemplo n.º 9
0
Archivo: time.c Proyecto: 44670/linux
static int comparator_set_oneshot(struct clock_event_device *evdev)
{
	pr_debug("%s: %s\n", __func__, evdev->name);

	disable_cpu_idle_poll = true;
	/*
	 * If we're using the COUNT and COMPARE registers we
	 * need to force idle poll.
	 */
	cpu_idle_poll_ctrl(true);

	return 0;
}
Ejemplo n.º 10
0
static int am33xx_pm_begin(suspend_state_t state)
{
	int ret = -EINVAL;

	cpu_idle_poll_ctrl(true);

	switch (state) {
	case PM_SUSPEND_MEM:
	case PM_SUSPEND_STANDBY:
		ret = wkup_m3_prepare_low_power(state);
		break;
	}

	return ret;
}
Ejemplo n.º 11
0
Archivo: time.c Proyecto: 44670/linux
static int comparator_shutdown(struct clock_event_device *evdev)
{
	pr_debug("%s: %s\n", __func__, evdev->name);
	sysreg_write(COMPARE, 0);

	if (disable_cpu_idle_poll) {
		disable_cpu_idle_poll = false;
		/*
		 * Only disable idle poll if we have forced that
		 * in a previous call.
		 */
		cpu_idle_poll_ctrl(false);
	}
	return 0;
}
Ejemplo n.º 12
0
static void am33xx_pm_end(void)
{
	wkup_m3_finish_low_power();

	if (rtc_only_idle) {
		if (retrigger_irq)
			/*
			 * 32 bits of Interrupt Set-Pending correspond to 32
			 * 32 interupts. Compute the bit offset of the
			 * Interrupt and set that particular bit.
			 * Compute the register offset by dividing interrupt
			 * number by 32 and mutiplying by 4
			 */
			writel_relaxed(1 << (retrigger_irq & 31),
				       gic_dist_base + GIC_INT_SET_PENDING_BASE
				       + retrigger_irq / 32 * 4);
		rtc_write_scratch(omap_rtc, RTC_SCRATCH_MAGIC_REG, 0);
	}

	rtc_only_idle = 0;
	cpu_idle_poll_ctrl(false);
}
Ejemplo n.º 13
0
void __init orion5x_init(void)
{
	char *dev_name;
	u32 dev, rev;

	orion5x_id(&dev, &rev, &dev_name);
	printk(KERN_INFO "Orion ID: %s. TCLK=%d.\n", dev_name, orion5x_tclk);

	/*
	 * Setup Orion address map
	 */
	orion5x_setup_wins();

	/* Setup root of clk tree */
	clk_init();

	/*
	 * Don't issue "Wait for Interrupt" instruction if we are
	 * running on D0 5281 silicon.
	 */
	if (dev == MV88F5281_DEV_ID && rev == MV88F5281_REV_D0) {
		printk(KERN_INFO "Orion: Applying 5281 D0 WFI workaround.\n");
		cpu_idle_poll_ctrl(true);
	}

	/*
	 * The 5082/5181l/5182/6082/6082l/6183 have crypto
	 * while 5180n/5181/5281 don't have crypto.
	 */
	if ((dev == MV88F5181_DEV_ID && rev >= MV88F5181L_REV_A0) ||
	    dev == MV88F5182_DEV_ID || dev == MV88F6183_DEV_ID)
		orion5x_crypto_init();

	/*
	 * Register watchdog driver
	 */
	orion5x_wdt_init();
}
Ejemplo n.º 14
0
int __init ft010_of_init_irq(struct device_node *node,
			      struct device_node *parent)
{
	struct ft010_irq_data *f = &firq;

	/*
	 * Disable the idle handler by default since it is buggy
	 * For more info see arch/arm/mach-gemini/idle.c
	 */
	cpu_idle_poll_ctrl(true);

	f->base = of_iomap(node, 0);
	WARN(!f->base, "unable to map gemini irq registers\n");

	/* Disable all interrupts */
	writel(0, FT010_IRQ_MASK(f->base));
	writel(0, FT010_FIQ_MASK(f->base));

	f->domain = irq_domain_add_simple(node, FT010_NUM_IRQS, 0,
					  &ft010_irqdomain_ops, f);
	set_handle_irq(ft010_irqchip_handle_irq);

	return 0;
}
Ejemplo n.º 15
0
static void shark_init_early(void)
{
	cpu_idle_poll_ctrl(true);
}
Ejemplo n.º 16
0
static int exynos_enter_idle_state(struct cpuidle_device *dev,
				struct cpuidle_driver *drv, int index)
{
	int (*func)(struct cpuidle_device *, struct cpuidle_driver *, int);

	switch (index) {
	case IDLE_C1:
		func = exynos_enter_idle;
		break;
	case IDLE_C2:
		func = exynos_enter_c2;
		break;
	case IDLE_LPM:
		/*
		 * In exynos, system can enter LPM when only boot core is running.
		 * In other words, non-boot cores should be shutdown to enter LPM.
		 */
#if defined(CONFIG_EXYNOS_MARCH_DYNAMIC_CPU_HOTPLUG)
		if (nonboot_cpus_working() || lcd_is_on == true) {
#else
		if (nonboot_cpus_working()) {
#endif
			index = find_available_low_state(dev, drv, index);
			return exynos_enter_idle_state(dev, drv, index);
		} else {
			func = exynos_enter_lpm;
		}
		break;
	default:
		pr_err("%s : Invalid index: %d\n", __func__, index);
		return -EINVAL;
	}

	return (*func)(dev, drv, index);
}

/***************************************************************************
 *                            Define notifier call                         *
 ***************************************************************************/
static int exynos_cpuidle_notifier_event(struct notifier_block *this,
					  unsigned long event,
					  void *ptr)
{
	switch (event) {
	case PM_SUSPEND_PREPARE:
		cpu_idle_poll_ctrl(true);
		return NOTIFY_OK;
	case PM_POST_RESTORE:
	case PM_POST_SUSPEND:
		cpu_idle_poll_ctrl(false);
		return NOTIFY_OK;
	}

	return NOTIFY_DONE;
}

static struct notifier_block exynos_cpuidle_notifier = {
	.notifier_call = exynos_cpuidle_notifier_event,
};

static int exynos_cpuidle_reboot_notifier(struct notifier_block *this,
				unsigned long event, void *_cmd)
{
	switch (event) {
	case SYSTEM_POWER_OFF:
	case SYS_RESTART:
		cpu_idle_poll_ctrl(true);
		break;
	}

	return NOTIFY_OK;
}

static struct notifier_block exynos_cpuidle_reboot_nb = {
	.notifier_call = exynos_cpuidle_reboot_notifier,
};

/***************************************************************************
 *                     Association with PSCI and DT                        *
 ***************************************************************************/
typedef int (*suspend_init_fn)(struct cpuidle_driver *,
				struct device_node *[]);

struct cpu_suspend_ops {
        const char *id;
        suspend_init_fn init_fn;
};

static const struct cpu_suspend_ops suspend_operations[] __initconst = {
        {"arm,psci", psci_dt_register_idle_states},
        {}
};

static __init const struct cpu_suspend_ops *get_suspend_ops(const char *str)
{
        int i;

        if (!str)
                return NULL;

        for (i = 0; suspend_operations[i].id; i++)
                if (!strcmp(suspend_operations[i].id, str))
                        return &suspend_operations[i];

        return NULL;
}