Пример #1
0
Файл: traps.c Проект: 8l/glendix
static inline void preempt_conditional_cli(struct pt_regs *regs)
{
	if (regs->flags & X86_EFLAGS_IF)
		local_irq_disable();
	dec_preempt_count();
}
Пример #2
0
static int i2c_pxa_probe(struct platform_device *dev)
{
	struct pxa_i2c *i2c = &i2c_pxa;
	struct resource *res;
#ifdef CONFIG_I2C_PXA_SLAVE
	struct i2c_pxa_platform_data *plat = dev->dev.platform_data;
#endif
	int ret;
	int irq;

	res = platform_get_resource(dev, IORESOURCE_MEM, 0);
	irq = platform_get_irq(dev, 0);
	if (res == NULL || irq < 0)
		return -ENODEV;

	if (!request_mem_region(res->start, res_len(res), res->name))
		return -ENOMEM;

	i2c = kmalloc(sizeof(struct pxa_i2c), GFP_KERNEL);
	if (!i2c) {
		ret = -ENOMEM;
		goto emalloc;
	}

	memcpy(i2c, &i2c_pxa, sizeof(struct pxa_i2c));
	init_waitqueue_head(&i2c->wait);
	i2c->adap.name[strlen(i2c->adap.name) - 1] = '0' + dev->id % 10;

	i2c->reg_base = ioremap(res->start, res_len(res));
	if (!i2c->reg_base) {
		ret = -EIO;
		goto eremap;
	}

	i2c->iobase = res->start;
	i2c->iosize = res_len(res);

	i2c->irq = irq;

	i2c->slave_addr = I2C_PXA_SLAVE_ADDR;

#ifdef CONFIG_I2C_PXA_SLAVE
	if (plat) {
		i2c->slave_addr = plat->slave_addr;
		i2c->slave = plat->slave;
	}
#endif

	switch (dev->id) {
	case 0:
#ifdef CONFIG_PXA27x
		pxa_gpio_mode(GPIO117_I2CSCL_MD);
		pxa_gpio_mode(GPIO118_I2CSDA_MD);
#endif
		pxa_set_cken(CKEN14_I2C, 1);
		break;
#ifdef CONFIG_PXA27x
	case 1:
		local_irq_disable();
		PCFR |= PCFR_PI2CEN;
		local_irq_enable();
		pxa_set_cken(CKEN15_PWRI2C, 1);
#endif
	}

	ret = request_irq(irq, i2c_pxa_handler, IRQF_DISABLED,
			  i2c->adap.name, i2c);
	if (ret)
		goto ereqirq;


	i2c_pxa_reset(i2c);

	i2c->adap.algo_data = i2c;
	i2c->adap.dev.parent = &dev->dev;

	ret = i2c_add_adapter(&i2c->adap);
	if (ret < 0) {
		printk(KERN_INFO "I2C: Failed to add bus\n");
		goto eadapt;
	}

	platform_set_drvdata(dev, i2c);

#ifdef CONFIG_I2C_PXA_SLAVE
	printk(KERN_INFO "I2C: %s: PXA I2C adapter, slave address %d\n",
	       i2c->adap.dev.bus_id, i2c->slave_addr);
#else
	printk(KERN_INFO "I2C: %s: PXA I2C adapter\n",
	       i2c->adap.dev.bus_id);
#endif
	return 0;

eadapt:
	free_irq(irq, i2c);
ereqirq:
	switch (dev->id) {
	case 0:
		pxa_set_cken(CKEN14_I2C, 0);
		break;
#ifdef CONFIG_PXA27x
	case 1:
		pxa_set_cken(CKEN15_PWRI2C, 0);
		local_irq_disable();
		PCFR &= ~PCFR_PI2CEN;
		local_irq_enable();
#endif
	}
eremap:
	kfree(i2c);
emalloc:
	release_mem_region(res->start, res_len(res));
	return ret;
}
Пример #3
0
/* default implementation */
void __attribute__ ((weak)) arch_suspend_disable_irqs(void)
{
	local_irq_disable();
}
/**
 * acpi_idle_enter_simple - enters an ACPI state without BM handling
 * @dev: the target CPU
 * @index: the index of suggested state
 */
static int acpi_idle_enter_simple(struct cpuidle_device *dev, int index)
{
	struct acpi_processor *pr;
	struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
	struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
	ktime_t  kt1, kt2;
	s64 idle_time_ns;
	s64 idle_time;

	pr = __this_cpu_read(processors);
	dev->last_residency = 0;

	if (unlikely(!pr))
		return -EINVAL;

	local_irq_disable();

	if (cx->entry_method != ACPI_CSTATE_FFH) {
		current_thread_info()->status &= ~TS_POLLING;
		/*
		 * TS_POLLING-cleared state must be visible before we test
		 * NEED_RESCHED:
		 */
		smp_mb();

		if (unlikely(need_resched())) {
			current_thread_info()->status |= TS_POLLING;
			local_irq_enable();
			return -EINVAL;
		}
	}

	/*
	 * Must be done before busmaster disable as we might need to
	 * access HPET !
	 */
	lapic_timer_state_broadcast(pr, cx, 1);

	if (cx->type == ACPI_STATE_C3)
		ACPI_FLUSH_CPU_CACHE();

	kt1 = ktime_get_real();
	/* Tell the scheduler that we are going deep-idle: */
	sched_clock_idle_sleep_event();
	acpi_idle_do_entry(cx);
	kt2 = ktime_get_real();
	idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
	idle_time = idle_time_ns;
	do_div(idle_time, NSEC_PER_USEC);

	/* Update device last_residency*/
	dev->last_residency = (int)idle_time;

	/* Tell the scheduler how much we idled: */
	sched_clock_idle_wakeup_event(idle_time_ns);

	local_irq_enable();
	if (cx->entry_method != ACPI_CSTATE_FFH)
		current_thread_info()->status |= TS_POLLING;

	cx->usage++;

	lapic_timer_state_broadcast(pr, cx, 0);
	cx->time += idle_time;
	return index;
}
/*
 * Halting simply requires that the secondary CPUs stop performing any
 * activity (executing tasks, handling interrupts). smp_send_stop()
 * achieves this.
 */
void machine_halt(void)
{
	local_irq_disable();
	smp_send_stop();
	while (1);
}
Пример #6
0
/**
 * hibernation_platform_enter - Power off the system using the platform driver.
 */
int hibernation_platform_enter(void)
{
    int error;

    if (!hibernation_ops)
        return -ENOSYS;

    /*
     * We have cancelled the power transition by running
     * hibernation_ops->finish() before saving the image, so we should let
     * the firmware know that we're going to enter the sleep state after all
     */
    error = hibernation_ops->begin();
    if (error)
        goto Close;

    entering_platform_hibernation = true;
    suspend_console();
    error = dpm_suspend_start(PMSG_HIBERNATE);
    if (error) {
        if (hibernation_ops->recover)
            hibernation_ops->recover();
        goto Resume_devices;
    }

    error = dpm_suspend_noirq(PMSG_HIBERNATE);
    if (error)
        goto Resume_devices;

    error = hibernation_ops->prepare();
    if (error)
        goto Platform_finish;

    error = disable_nonboot_cpus();
    if (error)
        goto Platform_finish;

    local_irq_disable();
    syscore_suspend();
    if (pm_wakeup_pending()) {
        error = -EAGAIN;
        goto Power_up;
    }

    hibernation_ops->enter();
    /* We should never get here */
    while (1);

Power_up:
    syscore_resume();
    local_irq_enable();
    enable_nonboot_cpus();

Platform_finish:
    hibernation_ops->finish();

    dpm_resume_noirq(PMSG_RESTORE);

Resume_devices:
    entering_platform_hibernation = false;
    dpm_resume_end(PMSG_RESTORE);
    resume_console();

Close:
    hibernation_ops->end();

    return error;
}
Пример #7
0
static void ibm44x_power_off(void)
{
	local_irq_disable();
	for(;;);
}
Пример #8
0
irqreturn_t __irq_entry
ipi_interrupt(int irq, void *dev_id) 
{
	int this_cpu = smp_processor_id();
	struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu);
	unsigned long ops;
	unsigned long flags;

	/* Count this now; we may make a call that never returns. */
	p->ipi_count++;

	mb();	/* Order interrupt and bit testing. */

	for (;;) {
		spinlock_t *lock = &per_cpu(ipi_lock, this_cpu);
		spin_lock_irqsave(lock, flags);
		ops = p->pending_ipi;
		p->pending_ipi = 0;
		spin_unlock_irqrestore(lock, flags);

		mb(); /* Order bit clearing and data access. */

		if (!ops)
		    break;

		while (ops) {
			unsigned long which = ffz(~ops);

			ops &= ~(1 << which);

			switch (which) {
			case IPI_NOP:
				smp_debug(100, KERN_DEBUG "CPU%d IPI_NOP\n", this_cpu);
				break;
				
			case IPI_RESCHEDULE:
				smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu);
				scheduler_ipi();
				break;

			case IPI_CALL_FUNC:
				smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu);
				generic_smp_call_function_interrupt();
				break;

			case IPI_CALL_FUNC_SINGLE:
				smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC_SINGLE\n", this_cpu);
				generic_smp_call_function_single_interrupt();
				break;

			case IPI_CPU_START:
				smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_START\n", this_cpu);
				break;

			case IPI_CPU_STOP:
				smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_STOP\n", this_cpu);
				halt_processor();
				break;

			case IPI_CPU_TEST:
				smp_debug(100, KERN_DEBUG "CPU%d is alive!\n", this_cpu);
				break;

			default:
				printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n",
					this_cpu, which);
				return IRQ_NONE;
			} /* Switch */
		/* let in any pending interrupts */
		local_irq_enable();
		local_irq_disable();
		} /* while (ops) */
	}
	return IRQ_HANDLED;
}
Пример #9
0
asmlinkage void __init start_kernel(void)
{
	char * command_line;
	extern struct kernel_param __start___param[], __stop___param[];

	
	smp_setup_processor_id();

	/*
	 * Need to run as early as possible, to initialize the
	 * lockdep hash:
	 */
	lockdep_init();
	debug_objects_early_init();
	cgroup_init_early();

	local_irq_disable();
	early_boot_irqs_off();
	early_init_irq_lock_class();

/*
 * Interrupts are still disabled. Do necessary setups, then
 * enable them
 */
	lock_kernel();
	tick_init();
	boot_cpu_init();
	page_address_init();
	printk(KERN_NOTICE);
	printk(linux_banner);
	setup_arch(&command_line);
	mm_init_owner(&init_mm, &init_task);
	setup_command_line(command_line);
	setup_per_cpu_areas();
	setup_nr_cpu_ids();
	smp_prepare_boot_cpu();	/* arch-specific boot-cpu hooks */

	/*
	 * Set up the scheduler prior starting any interrupts (such as the
	 * timer interrupt). Full topology setup happens at smp_init()
	 * time - but meanwhile we still have a functioning scheduler.
	 */
	sched_init();
	/*
	 * Disable preemption - early bootup scheduling is extremely
	 * fragile until we cpu_idle() for the first time.
	 */
	preempt_disable();
	build_all_zonelists();
	page_alloc_init();
	printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line);
	parse_early_param();
	parse_args("Booting kernel", static_command_line, __start___param,
		   __stop___param - __start___param,
		   &unknown_bootoption);
	if (!irqs_disabled()) {
		printk(KERN_WARNING "start_kernel(): bug: interrupts were "
				"enabled *very* early, fixing it\n");
		local_irq_disable();
	}
	sort_main_extable();
	trap_init();
	rcu_init();
	/* init some links before init_ISA_irqs() */
	early_irq_init();
	init_IRQ();
	pidhash_init();
	init_timers();
	hrtimers_init();
	softirq_init();
	timekeeping_init();
	time_init();
	sched_clock_init();
	profile_init();
	if (!irqs_disabled())
		printk(KERN_CRIT "start_kernel(): bug: interrupts were "
				 "enabled early\n");
	early_boot_irqs_on();
	local_irq_enable();

	/*
	 * HACK ALERT! This is early. We're enabling the console before
	 * we've done PCI setups etc, and console_init() must be aware of
	 * this. But we do want output early, in case something goes wrong.
	 */
	console_init();
	if (panic_later)
		panic(panic_later, panic_param);

	lockdep_info();

	/*
	 * Need to run this when irqs are enabled, because it wants
	 * to self-test [hard/soft]-irqs on/off lock inversion bugs
	 * too:
	 */
	locking_selftest();

#ifdef CONFIG_BLK_DEV_INITRD
	if (initrd_start && !initrd_below_start_ok &&
	    page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) {
		printk(KERN_CRIT "initrd overwritten (0x%08lx < 0x%08lx) - "
		    "disabling it.\n",
		    page_to_pfn(virt_to_page((void *)initrd_start)),
		    min_low_pfn);
		initrd_start = 0;
	}
#endif
	vmalloc_init();
	vfs_caches_init_early();
	cpuset_init_early();
	page_cgroup_init();
	mem_init();
	enable_debug_pagealloc();
	cpu_hotplug_init();
	kmem_cache_init();
	debug_objects_mem_init();
	idr_init_cache();
	setup_per_cpu_pageset();
	numa_policy_init();
	if (late_time_init)
		late_time_init();
	calibrate_delay();
	pidmap_init();
	pgtable_cache_init();
	prio_tree_init();
	anon_vma_init();
#ifdef CONFIG_X86
	if (efi_enabled)
		efi_enter_virtual_mode();
#endif
	thread_info_cache_init();
	cred_init();
	fork_init(num_physpages);
	proc_caches_init();
	buffer_init();
	key_init();
	security_init();
	vfs_caches_init(num_physpages);
	radix_tree_init();
	signals_init();
	/* rootfs populating might need page-writeback */
	page_writeback_init();
#ifdef CONFIG_PROC_FS
	proc_root_init();
#endif
	cgroup_init();
	cpuset_init();
	taskstats_init_early();
	delayacct_init();

	check_bugs();

	acpi_early_init(); /* before LAPIC and SMP init */

	ftrace_init();

#ifdef CONFIG_KERNEL_DEBUG_SEC
	kernel_sec_init();
#endif
	
	/* Do the rest non-__init'ed, we're now alive */
	rest_init();
}
Пример #10
0
static void vr41xx_restart(char *command)
{
	local_irq_disable();
	software_reset();
	while (1) ;
}
Пример #11
0
static void vr41xx_halt(void)
{
	local_irq_disable();
	printk(KERN_NOTICE "\nYou can turn off the power supply\n");
	__asm__("hibernate;\n");
}
Пример #12
0
static int __init calibrate_APIC_clock(void)
{
    struct clock_event_device *levt = &__get_cpu_var(lapic_events);
    void (*real_handler)(struct clock_event_device *dev);
    unsigned long deltaj;
    long delta, deltatsc;
    int pm_referenced = 0;

    local_irq_disable();


    real_handler = global_clock_event->event_handler;
    global_clock_event->event_handler = lapic_cal_handler;


    __setup_APIC_LVTT(0xffffffff, 0, 0);


    local_irq_enable();

    while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
        cpu_relax();

    local_irq_disable();


    global_clock_event->event_handler = real_handler;


    delta = lapic_cal_t1 - lapic_cal_t2;
    apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta);

    deltatsc = (long)(lapic_cal_tsc2 - lapic_cal_tsc1);


    pm_referenced = !calibrate_by_pmtimer(lapic_cal_pm2 - lapic_cal_pm1,
                                          &delta, &deltatsc);


    lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS,
                                   lapic_clockevent.shift);
    lapic_clockevent.max_delta_ns =
        clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
    lapic_clockevent.min_delta_ns =
        clockevent_delta2ns(0xF, &lapic_clockevent);

    calibration_result = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS;

    apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta);
    apic_printk(APIC_VERBOSE, "..... mult: %ld\n", lapic_clockevent.mult);
    apic_printk(APIC_VERBOSE, "..... calibration result: %u\n",
                calibration_result);

    if (cpu_has_tsc) {
        apic_printk(APIC_VERBOSE, "..... CPU clock speed is "
                    "%ld.%04ld MHz.\n",
                    (deltatsc / LAPIC_CAL_LOOPS) / (1000000 / HZ),
                    (deltatsc / LAPIC_CAL_LOOPS) % (1000000 / HZ));
    }

    apic_printk(APIC_VERBOSE, "..... host bus clock speed is "
                "%u.%04u MHz.\n",
                calibration_result / (1000000 / HZ),
                calibration_result % (1000000 / HZ));


    if (calibration_result < (1000000 / HZ)) {
        local_irq_enable();
        pr_warning("APIC frequency too slow, disabling apic timer\n");
        return -1;
    }

    levt->features &= ~CLOCK_EVT_FEAT_DUMMY;


    if (!pm_referenced) {
        apic_printk(APIC_VERBOSE, "... verify APIC timer\n");


        levt->event_handler = lapic_cal_handler;
        lapic_timer_setup(CLOCK_EVT_MODE_PERIODIC, levt);
        lapic_cal_loops = -1;


        local_irq_enable();

        while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
            cpu_relax();


        lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, levt);


        deltaj = lapic_cal_j2 - lapic_cal_j1;
        apic_printk(APIC_VERBOSE, "... jiffies delta = %lu\n", deltaj);


        if (deltaj >= LAPIC_CAL_LOOPS-2 && deltaj <= LAPIC_CAL_LOOPS+2)
            apic_printk(APIC_VERBOSE, "... jiffies result ok\n");
        else
            levt->features |= CLOCK_EVT_FEAT_DUMMY;
    } else
        local_irq_enable();

    if (levt->features & CLOCK_EVT_FEAT_DUMMY) {
        pr_warning("APIC timer disabled due to verification failure\n");
        return -1;
    }

    return 0;
}
Пример #13
0
/*
 * ------------------------------------------------------------
 * mcfrs_open() and friends
 * ------------------------------------------------------------
 */
static int block_til_ready(struct tty_struct *tty, struct file * filp,
			   struct mcf_serial *info)
{
	DECLARE_WAITQUEUE(wait, current);
	int	retval;
	int	do_clocal = 0;

	/*
	 * If the device is in the middle of being closed, then block
	 * until it's done, and then try again.
	 */
	if (info->flags & ASYNC_CLOSING) {
		interruptible_sleep_on(&info->close_wait);
#ifdef SERIAL_DO_RESTART
		if (info->flags & ASYNC_HUP_NOTIFY)
			return -EAGAIN;
		else
			return -ERESTARTSYS;
#else
		return -EAGAIN;
#endif
	}
	
	/*
	 * If non-blocking mode is set, or the port is not enabled,
	 * then make the check up front and then exit.
	 */
	if ((filp->f_flags & O_NONBLOCK) ||
	    (tty->flags & (1 << TTY_IO_ERROR))) {
		info->flags |= ASYNC_NORMAL_ACTIVE;
		return 0;
	}

	if (tty->termios->c_cflag & CLOCAL)
		do_clocal = 1;

	/*
	 * Block waiting for the carrier detect and the line to become
	 * free (i.e., not in use by the callout).  While we are in
	 * this loop, info->count is dropped by one, so that
	 * mcfrs_close() knows when to free things.  We restore it upon
	 * exit, either normal or abnormal.
	 */
	retval = 0;
	add_wait_queue(&info->open_wait, &wait);
#ifdef SERIAL_DEBUG_OPEN
	printk("block_til_ready before block: ttyS%d, count = %d\n",
	       info->line, info->count);
#endif
	info->count--;
	info->blocked_open++;
	while (1) {
		local_irq_disable();
		mcfrs_setsignals(info, 1, 1);
		local_irq_enable();
		current->state = TASK_INTERRUPTIBLE;
		if (tty_hung_up_p(filp) ||
		    !(info->flags & ASYNC_INITIALIZED)) {
#ifdef SERIAL_DO_RESTART
			if (info->flags & ASYNC_HUP_NOTIFY)
				retval = -EAGAIN;
			else
				retval = -ERESTARTSYS;	
#else
			retval = -EAGAIN;
#endif
			break;
		}
		if (!(info->flags & ASYNC_CLOSING) &&
		    (do_clocal || (mcfrs_getsignals(info) & TIOCM_CD)))
			break;
		if (signal_pending(current)) {
			retval = -ERESTARTSYS;
			break;
		}
#ifdef SERIAL_DEBUG_OPEN
		printk("block_til_ready blocking: ttyS%d, count = %d\n",
		       info->line, info->count);
#endif
		schedule();
	}
	current->state = TASK_RUNNING;
	remove_wait_queue(&info->open_wait, &wait);
	if (!tty_hung_up_p(filp))
		info->count++;
	info->blocked_open--;
#ifdef SERIAL_DEBUG_OPEN
	printk("block_til_ready after blocking: ttyS%d, count = %d\n",
	       info->line, info->count);
#endif
	if (retval)
		return retval;
	info->flags |= ASYNC_NORMAL_ACTIVE;
	return 0;
}	
Пример #14
0
/*
 * do_IRQ handles all normal device IRQ's
 */
asmlinkage void do_IRQ(int irq, struct pt_regs * regs)
{
    struct irqdesc * desc;
    struct irqaction * action;
    int cpu;
#ifdef CONFIG_ILATENCY
    {
        extern void interrupt_overhead_start(void);

        interrupt_overhead_start();
    }
#endif /* CONFIG_ILATENCY */

    irq = fixup_irq(irq);

    /*
     * Some hardware gives randomly wrong interrupts.  Rather
     * than crashing, do something sensible.
     */
    if (irq >= NR_IRQS)
        goto bad_irq;

    /* this is called recursively in some cases, so... */
    if (!in_irq())
        preempt_lock_start(-99);

    desc = irq_desc + irq;

    TRACE_IRQ_ENTRY(irq, !(user_mode(regs)));

    spin_lock(&irq_controller_lock);
    desc->mask_ack(irq);
    spin_unlock(&irq_controller_lock);

    cpu = smp_processor_id();
    irq_enter(cpu, irq);
    kstat.irqs[cpu][irq]++;
    desc->triggered = 1;

    /* Return with this interrupt masked if no action */
    action = desc->action;

    if (action) {
        int status = 0;

        if (desc->nomask) {
            spin_lock(&irq_controller_lock);
            desc->unmask(irq);
            spin_unlock(&irq_controller_lock);
        }

        if (!(action->flags & SA_INTERRUPT))
            local_irq_enable();

#ifdef CONFIG_ILATENCY
        {
            extern void interrupt_overhead_stop(void);

            interrupt_overhead_stop();
        }
#endif /* CONFIG_ILATENCY */
        do {
            status |= action->flags;
            action->handler(irq, action->dev_id, regs);
            action = action->next;
        } while (action);

        if (status & SA_SAMPLE_RANDOM)
            add_interrupt_randomness(irq);
        local_irq_disable();

        if (!desc->nomask && desc->enabled) {
            spin_lock(&irq_controller_lock);
            desc->unmask(irq);
            spin_unlock(&irq_controller_lock);
        }
    }

    /*
     * Debug measure - hopefully we can continue if an
     * IRQ lockup problem occurs...
     */
    check_irq_lock(desc, irq, regs);

    irq_exit(cpu, irq);
    TRACE_IRQ_EXIT();

    if (!in_irq())
        preempt_lock_stop();

    if (softirq_pending(cpu))
        do_softirq();
#ifdef CONFIG_ILATENCY
    /*
     * until entry.S gets this call do it here.
     */
    intr_ret_from_exception();
#endif /* CONFIG_ILATENCY */
    return;

bad_irq:
    irq_err_count += 1;
    printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
    return;
}
Пример #15
0
/**
 * create_image - Create a hibernation image.
 * @platform_mode: Whether or not to use the platform driver.
 *
 * Execute device drivers' .freeze_noirq() callbacks, create a hibernation image
 * and execute the drivers' .thaw_noirq() callbacks.
 *
 * Control reappears in this routine after the subsequent restore.
 */
static int create_image(int platform_mode)
{
    int error;

    error = dpm_suspend_noirq(PMSG_FREEZE);
    if (error) {
        printk(KERN_ERR "PM: Some devices failed to power down, "
               "aborting hibernation\n");
        return error;
    }

    error = platform_pre_snapshot(platform_mode);
    if (error || hibernation_test(TEST_PLATFORM))
        goto Platform_finish;

    error = disable_nonboot_cpus();
    if (error || hibernation_test(TEST_CPUS)
            || hibernation_testmode(HIBERNATION_TEST))
        goto Enable_cpus;

    local_irq_disable();

    error = syscore_suspend();
    if (error) {
        printk(KERN_ERR "PM: Some system devices failed to power down, "
               "aborting hibernation\n");
        goto Enable_irqs;
    }

    if (hibernation_test(TEST_CORE) || pm_wakeup_pending())
        goto Power_up;

    in_suspend = 1;
    save_processor_state();
    error = swsusp_arch_suspend();
    if (error)
        printk(KERN_ERR "PM: Error %d creating hibernation image\n",
               error);
    /* Restore control flow magically appears here */
    restore_processor_state();
    if (!in_suspend) {
        events_check_enabled = false;
        platform_leave(platform_mode);
    }

Power_up:
    syscore_resume();

Enable_irqs:
    local_irq_enable();

Enable_cpus:
    enable_nonboot_cpus();

Platform_finish:
    platform_finish(platform_mode);

    dpm_resume_noirq(in_suspend ?
                     (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);

    return error;
}
Пример #16
0
/*
 * notification of exit
 * - the exit_status is as sys_wait() would return
 * - notification includes fatal signals
 */
static void thread_exit(struct ethread *thread, int exit_status)
{
	struct eprocess	*process = thread->threads_process;
	BOOLEAN last;

	/* if Terminated, do nothing */
	ktrace("thread %p, exit_status %ld\n", thread, thread->exit_status);

	thread->terminated = 1;

	/* Can't terminate a thread if it attached another process */
	if (thread->tcb.apc_state_index) {
		return;
	}

	/* TODO: Lower to Passive Level */
	
	/* Lock the Process before we modify its thread entries */
	lock_process(process);

	list_del(&thread->thread_list_entry);

	/* TODO: close port */
	
	/* TODO: Rundown Win32 Structures */

    	/* Set the last Thread Exit Status */
	process->last_thread_exit_status = thread->exit_status;
	
	/* The last Thread shuts down the Process */
	if ((last = list_empty(&process->thread_list_head))) {
		/* Save the Exit Time if not already done by NtTerminateProcess. This 
		   happens when the last thread just terminates without explicitly 
		   terminating the process. TODO */
#if 0
		process->exit_time = thread->exit_time;
#endif
		__exit_process(process);
	}

#if 0
	if (thread->tcb.win32thread) {
		kfree(thread->tcb.win32thread);
		thread->tcb.win32thread = NULL;
	}
#endif

	/* Free the TEB, if last thread, teb is freed by exit() */
	if (thread->tcb.teb && !last) {
		delete_teb(thread->tcb.teb);
		thread->tcb.teb = NULL;
	}

	list_del(&thread->tcb.thread_list_entry);

	/* Unlock the Process */		
	unlock_process(process);

	/* Rundown Mutexes */
	rundown_thread();

	/* Satisfy waits */
	local_irq_disable();
	thread->tcb.header.signal_state = true;
	if (!list_empty(&thread->tcb.header.wait_list_head))
		wait_test((struct dispatcher_header *)&thread->tcb, IO_NO_INCREMENT);
	local_irq_enable();
} /* end thread_exit() */
Пример #17
0
/**
 * resume_target_kernel - Restore system state from a hibernation image.
 * @platform_mode: Whether or not to use the platform driver.
 *
 * Execute device drivers' .freeze_noirq() callbacks, restore the contents of
 * highmem that have not been restored yet from the image and run the low-level
 * code that will restore the remaining contents of memory and switch to the
 * just restored target kernel.
 */
static int resume_target_kernel(bool platform_mode)
{
    int error;

    error = dpm_suspend_noirq(PMSG_QUIESCE);
    if (error) {
        printk(KERN_ERR "PM: Some devices failed to power down, "
               "aborting resume\n");
        return error;
    }

    error = platform_pre_restore(platform_mode);
    if (error)
        goto Cleanup;

    error = disable_nonboot_cpus();
    if (error)
        goto Enable_cpus;

    local_irq_disable();

    error = syscore_suspend();
    if (error)
        goto Enable_irqs;

    save_processor_state();
    error = restore_highmem();
    if (!error) {
        error = swsusp_arch_resume();
        /*
         * The code below is only ever reached in case of a failure.
         * Otherwise, execution continues at the place where
         * swsusp_arch_suspend() was called.
         */
        BUG_ON(!error);
        /*
         * This call to restore_highmem() reverts the changes made by
         * the previous one.
         */
        restore_highmem();
    }
    /*
     * The only reason why swsusp_arch_resume() can fail is memory being
     * very tight, so we have to free it as soon as we can to avoid
     * subsequent failures.
     */
    swsusp_free();
    restore_processor_state();
    touch_softlockup_watchdog();

    syscore_resume();

Enable_irqs:
    local_irq_enable();

Enable_cpus:
    enable_nonboot_cpus();

Cleanup:
    platform_restore_cleanup(platform_mode);

    dpm_resume_noirq(PMSG_RECOVER);

    return error;
}
Пример #18
0
void __init setup_secondary_APIC_clock(void)
{
	local_irq_disable(); /* FIXME: Do we need this? --RR */
	setup_APIC_timer(calibration_result);
	local_irq_enable();
}
Пример #19
0
static void ibm44x_restart(char *cmd)
{
	local_irq_disable();
	abort();
}
Пример #20
0
void __init sun4m_init_IRQ(void)
{
	int ie_node,i;
	struct linux_prom_registers int_regs[PROMREG_MAX];
	int num_regs;
	struct resource r;
	int mid;
    
	local_irq_disable();
	if((ie_node = prom_searchsiblings(prom_getchild(prom_root_node), "obio")) == 0 ||
	   (ie_node = prom_getchild (ie_node)) == 0 ||
	   (ie_node = prom_searchsiblings (ie_node, "interrupt")) == 0) {
		prom_printf("Cannot find /obio/interrupt node\n");
		prom_halt();
	}
	num_regs = prom_getproperty(ie_node, "reg", (char *) int_regs,
				    sizeof(int_regs));
	num_regs = (num_regs/sizeof(struct linux_prom_registers));
    
	/* Apply the obio ranges to these registers. */
	prom_apply_obio_ranges(int_regs, num_regs);
    
	int_regs[4].phys_addr = int_regs[num_regs-1].phys_addr;
	int_regs[4].reg_size = int_regs[num_regs-1].reg_size;
	int_regs[4].which_io = int_regs[num_regs-1].which_io;
	for(ie_node = 1; ie_node < 4; ie_node++) {
		int_regs[ie_node].phys_addr = int_regs[ie_node-1].phys_addr + PAGE_SIZE;
		int_regs[ie_node].reg_size = int_regs[ie_node-1].reg_size;
		int_regs[ie_node].which_io = int_regs[ie_node-1].which_io;
	}

	memset((char *)&r, 0, sizeof(struct resource));
	/* Map the interrupt registers for all possible cpus. */
	r.flags = int_regs[0].which_io;
	r.start = int_regs[0].phys_addr;
	sun4m_interrupts = (struct sun4m_intregs *) sbus_ioremap(&r, 0,
	    PAGE_SIZE*SUN4M_NCPUS, "interrupts_percpu");

	/* Map the system interrupt control registers. */
	r.flags = int_regs[4].which_io;
	r.start = int_regs[4].phys_addr;
	sbus_ioremap(&r, 0, int_regs[4].reg_size, "interrupts_system");

	sun4m_interrupts->set = ~SUN4M_INT_MASKALL;
	for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++)
		sun4m_interrupts->cpu_intregs[mid].clear = ~0x17fff;

	if (!cpu_find_by_instance(1, NULL, NULL)) {
		/* system wide interrupts go to cpu 0, this should always
		 * be safe because it is guaranteed to be fitted or OBP doesn't
		 * come up
		 *
		 * Not sure, but writing here on SLAVIO systems may puke
		 * so I don't do it unless there is more than 1 cpu.
		 */
		irq_rcvreg = (unsigned long *)
				&sun4m_interrupts->undirected_target;
		sun4m_interrupts->undirected_target = 0;
	}
	BTFIXUPSET_CALL(sbint_to_irq, sun4m_sbint_to_irq, BTFIXUPCALL_NORM);
	BTFIXUPSET_CALL(enable_irq, sun4m_enable_irq, BTFIXUPCALL_NORM);
	BTFIXUPSET_CALL(disable_irq, sun4m_disable_irq, BTFIXUPCALL_NORM);
	BTFIXUPSET_CALL(enable_pil_irq, sun4m_enable_pil_irq, BTFIXUPCALL_NORM);
	BTFIXUPSET_CALL(disable_pil_irq, sun4m_disable_pil_irq, BTFIXUPCALL_NORM);
	BTFIXUPSET_CALL(clear_clock_irq, sun4m_clear_clock_irq, BTFIXUPCALL_NORM);
	BTFIXUPSET_CALL(clear_profile_irq, sun4m_clear_profile_irq, BTFIXUPCALL_NORM);
	BTFIXUPSET_CALL(load_profile_irq, sun4m_load_profile_irq, BTFIXUPCALL_NORM);
	sparc_init_timers = sun4m_init_timers;
#ifdef CONFIG_SMP
	BTFIXUPSET_CALL(set_cpu_int, sun4m_send_ipi, BTFIXUPCALL_NORM);
	BTFIXUPSET_CALL(clear_cpu_int, sun4m_clear_ipi, BTFIXUPCALL_NORM);
	BTFIXUPSET_CALL(set_irq_udt, sun4m_set_udt, BTFIXUPCALL_NORM);
#endif
	/* Cannot enable interrupts until OBP ticker is disabled. */
}
Пример #21
0
static void ibm44x_halt(void)
{
	local_irq_disable();
	for(;;);
}
Пример #22
0
Файл: kgdb.c Проект: 08opt/linux
void kgdb_roundup_cpus(unsigned long flags)
{
	local_irq_enable();
	smp_call_function(hexagon_kgdb_nmi_hook, NULL, 0);
	local_irq_disable();
}
/**
 * acpi_idle_enter_bm - enters C3 with proper BM handling
 * @dev: the target CPU
 * @index: the index of suggested state
 *
 * If BM is detected, the deepest non-C3 idle state is entered instead.
 */
static int acpi_idle_enter_bm(struct cpuidle_device *dev, int index)
{
	struct acpi_processor *pr;
	struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
	struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
	ktime_t  kt1, kt2;
	s64 idle_time_ns;
	s64 idle_time;


	pr = __this_cpu_read(processors);
	dev->last_residency = 0;

	if (unlikely(!pr))
		return -EINVAL;

	if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
		if (dev->safe_state_index >= 0) {
			return dev->states[dev->safe_state_index].enter(dev,
						dev->safe_state_index);
		} else {
			local_irq_disable();
			acpi_safe_halt();
			local_irq_enable();
			return -EINVAL;
		}
	}

	local_irq_disable();

	if (cx->entry_method != ACPI_CSTATE_FFH) {
		current_thread_info()->status &= ~TS_POLLING;
		/*
		 * TS_POLLING-cleared state must be visible before we test
		 * NEED_RESCHED:
		 */
		smp_mb();

		if (unlikely(need_resched())) {
			current_thread_info()->status |= TS_POLLING;
			local_irq_enable();
			return -EINVAL;
		}
	}

	acpi_unlazy_tlb(smp_processor_id());

	/* Tell the scheduler that we are going deep-idle: */
	sched_clock_idle_sleep_event();
	/*
	 * Must be done before busmaster disable as we might need to
	 * access HPET !
	 */
	lapic_timer_state_broadcast(pr, cx, 1);

	kt1 = ktime_get_real();
	/*
	 * disable bus master
	 * bm_check implies we need ARB_DIS
	 * !bm_check implies we need cache flush
	 * bm_control implies whether we can do ARB_DIS
	 *
	 * That leaves a case where bm_check is set and bm_control is
	 * not set. In that case we cannot do much, we enter C3
	 * without doing anything.
	 */
	if (pr->flags.bm_check && pr->flags.bm_control) {
		raw_spin_lock(&c3_lock);
		c3_cpu_count++;
		/* Disable bus master arbitration when all CPUs are in C3 */
		if (c3_cpu_count == num_online_cpus())
			acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
		raw_spin_unlock(&c3_lock);
	} else if (!pr->flags.bm_check) {
		ACPI_FLUSH_CPU_CACHE();
	}

	acpi_idle_do_entry(cx);

	/* Re-enable bus master arbitration */
	if (pr->flags.bm_check && pr->flags.bm_control) {
		raw_spin_lock(&c3_lock);
		acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
		c3_cpu_count--;
		raw_spin_unlock(&c3_lock);
	}
	kt2 = ktime_get_real();
	idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
	idle_time = idle_time_ns;
	do_div(idle_time, NSEC_PER_USEC);

	/* Update device last_residency*/
	dev->last_residency = (int)idle_time;

	/* Tell the scheduler how much we idled: */
	sched_clock_idle_wakeup_event(idle_time_ns);

	local_irq_enable();
	if (cx->entry_method != ACPI_CSTATE_FFH)
		current_thread_info()->status |= TS_POLLING;

	cx->usage++;

	lapic_timer_state_broadcast(pr, cx, 0);
	cx->time += idle_time;
	return index;
}
Пример #24
0
/**
 * nohz_restart_sched_tick - restart the idle tick from the idle task
 *
 * Restart the idle tick when the CPU is woken up from idle
 */
void tick_nohz_restart_sched_tick(void)
{
	int cpu = smp_processor_id();
	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
	unsigned long ticks;
	ktime_t now, delta;

	if (!ts->tick_stopped)
		return;

	/* Update jiffies first */
	now = ktime_get();

	local_irq_disable();
	select_nohz_load_balancer(0);
	tick_do_update_jiffies64(now);
	cpu_clear(cpu, nohz_cpu_mask);

	/* Account the idle time */
	delta = ktime_sub(now, ts->idle_entrytime);
	ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);

	/*
	 * We stopped the tick in idle. Update process times would miss the
	 * time we slept as update_process_times does only a 1 tick
	 * accounting. Enforce that this is accounted to idle !
	 */
	ticks = jiffies - ts->idle_jiffies;
	/*
	 * We might be one off. Do not randomly account a huge number of ticks!
	 */
	if (ticks && ticks < LONG_MAX) {
		add_preempt_count(HARDIRQ_OFFSET);
		account_system_time(current, HARDIRQ_OFFSET,
				    jiffies_to_cputime(ticks));
		sub_preempt_count(HARDIRQ_OFFSET);
	}

	/*
	 * Cancel the scheduled timer and restore the tick
	 */
	ts->tick_stopped  = 0;
	hrtimer_cancel(&ts->sched_timer);
	ts->sched_timer.expires = ts->idle_tick;

	while (1) {
		/* Forward the time to expire in the future */
		hrtimer_forward(&ts->sched_timer, now, tick_period);

		if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
			hrtimer_start(&ts->sched_timer,
				      ts->sched_timer.expires,
				      HRTIMER_MODE_ABS);
			/* Check, if the timer was already in the past */
			if (hrtimer_active(&ts->sched_timer))
				break;
		} else {
			if (!tick_program_event(ts->sched_timer.expires, 0))
				break;
		}
		/* Update jiffies and reread time */
		tick_do_update_jiffies64(now);
		now = ktime_get();
	}
	local_irq_enable();
}
Пример #25
0
/*
 * Generic idle loop implementation
 *
 * Called with polling cleared.
 */
static void cpu_idle_loop(void)
{
	while (1) {
		/*
		 * If the arch has a polling bit, we maintain an invariant:
		 *
		 * Our polling bit is clear if we're not scheduled (i.e. if
		 * rq->curr != rq->idle).  This means that, if rq->idle has
		 * the polling bit set, then setting need_resched is
		 * guaranteed to cause the cpu to reschedule.
		 */

		__current_set_polling();
		tick_nohz_idle_enter();

		while (!need_resched()) {
			check_pgt_cache();
			rmb();

			if (cpu_is_offline(smp_processor_id()))
				arch_cpu_idle_dead();

			local_irq_disable();
			arch_cpu_idle_enter();

			/*
			 * In poll mode we reenable interrupts and spin.
			 *
			 * Also if we detected in the wakeup from idle
			 * path that the tick broadcast device expired
			 * for us, we don't want to go deep idle as we
			 * know that the IPI is going to arrive right
			 * away
			 */
			if (cpu_idle_force_poll || tick_check_broadcast_expired())
				cpu_idle_poll();
			else
				cpuidle_idle_call();

			arch_cpu_idle_exit();
			/*
			 * We need to test and propagate the TIF_NEED_RESCHED
			 * bit here because we might not have send the
			 * reschedule IPI to idle tasks.
			 */
			if (tif_need_resched())
				set_preempt_need_resched();
		}
		tick_nohz_idle_exit();
		__current_clr_polling();

		/*
		 * We promise to call sched_ttwu_pending and reschedule
		 * if need_resched is set while polling is set.  That
		 * means that clearing polling needs to be visible
		 * before doing these things.
		 */
		smp_mb();

		sched_ttwu_pending();
		schedule_preempt_disabled();
	}
}
Пример #26
0
void factory_cpu0_idle_test(void)
{
    int cpu = 0;
#ifdef CONFIG_SMP
    int i = 0;
    int ret = 0;
    int cpu_pwrdn_flag[nr_cpu_ids];
#endif

    spin_lock(&factory_lock);
    cpu = smp_processor_id();
    spin_unlock(&factory_lock);

    printk("[%s]it's cpu%d\n", __func__, cpu);

#ifdef CONFIG_SMP
    mutex_lock(&ftm_cpu_prepare);
    disable_hotplug_policy(true, nr_cpu_ids);
    memset(cpu_pwrdn_flag, 0, nr_cpu_ids * sizeof(int));
    for (i = 1; i < nr_cpu_ids; i++) {
        if (cpu_online(i)) {
            cpu_pwrdn_flag[i] = 1;
            ret = cpu_down(i);
            dcm_info("[%s]cpu_down(cpu%d) return %d, cpu1_killed=%u\n", __func__, i, ret, cpu1_killed);
        } else {
            dcm_info("[%s]no need to power down cpu%d\n", __func__, i);
        }
    }
    mutex_unlock(&ftm_cpu_prepare);
#endif

#ifdef CONFIG_LOCAL_WDT
    mpcore_wk_wdt_stop();
#endif
    mtk_wdt_disable(); // disable watch dog
    
    //this should be set by low power requirement.
#ifdef IDLE_LOW_POWER_TEST
    enable_low_power_settings();
#endif
    local_irq_disable();
    go_to_idle();
    local_irq_enable();
#ifdef IDLE_LOW_POWER_TEST
    disable_low_power_settings();
#endif

#ifdef CONFIG_SMP
    mutex_lock(&ftm_cpu_prepare);
    for (i = 1; i < nr_cpu_ids; i++) {
        if (cpu_pwrdn_flag[i] == 1) {
            ret = cpu_up(i);
            dcm_info("[%s]cpu_up(cpu%d) return %d, cpu1_killed=%u\n", __func__, i, ret, cpu1_killed);
        } else {
            dcm_info("[%s]no need to power up cpu%d\n", __func__, i);
        }
    }
    disable_hotplug_policy(false, nr_cpu_ids);
    mutex_unlock(&ftm_cpu_prepare);
#endif
}
Пример #27
0
int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
                       unsigned int exit_nr)
{
	enum emulation_result er;
	int r = RESUME_HOST;

	/* update before a new last_exit_type is rewritten */
	kvmppc_update_timing_stats(vcpu);

	local_irq_enable();

	run->exit_reason = KVM_EXIT_UNKNOWN;
	run->ready_for_interrupt_injection = 1;

	switch (exit_nr) {
	case BOOKE_INTERRUPT_MACHINE_CHECK:
		printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
		kvmppc_dump_vcpu(vcpu);
		r = RESUME_HOST;
		break;

	case BOOKE_INTERRUPT_EXTERNAL:
		kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
		if (need_resched())
			cond_resched();
		r = RESUME_GUEST;
		break;

	case BOOKE_INTERRUPT_DECREMENTER:
		/* Since we switched IVPR back to the host's value, the host
		 * handled this interrupt the moment we enabled interrupts.
		 * Now we just offer it a chance to reschedule the guest. */
		kvmppc_account_exit(vcpu, DEC_EXITS);
		if (need_resched())
			cond_resched();
		r = RESUME_GUEST;
		break;

	case BOOKE_INTERRUPT_PROGRAM:
		if (vcpu->arch.msr & MSR_PR) {
			/* Program traps generated by user-level software must be handled
			 * by the guest kernel. */
			kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
			r = RESUME_GUEST;
			kvmppc_account_exit(vcpu, USR_PR_INST);
			break;
		}

		er = kvmppc_emulate_instruction(run, vcpu);
		switch (er) {
		case EMULATE_DONE:
			/* don't overwrite subtypes, just account kvm_stats */
			kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
			/* Future optimization: only reload non-volatiles if
			 * they were actually modified by emulation. */
			r = RESUME_GUEST_NV;
			break;
		case EMULATE_DO_DCR:
			run->exit_reason = KVM_EXIT_DCR;
			r = RESUME_HOST;
			break;
		case EMULATE_FAIL:
			/* XXX Deliver Program interrupt to guest. */
			printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
			       __func__, vcpu->arch.pc, vcpu->arch.last_inst);
			/* For debugging, encode the failing instruction and
			 * report it to userspace. */
			run->hw.hardware_exit_reason = ~0ULL << 32;
			run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
			r = RESUME_HOST;
			break;
		default:
			BUG();
		}
		break;

	case BOOKE_INTERRUPT_FP_UNAVAIL:
		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
		kvmppc_account_exit(vcpu, FP_UNAVAIL);
		r = RESUME_GUEST;
		break;

	case BOOKE_INTERRUPT_SPE_UNAVAIL:
		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_UNAVAIL);
		r = RESUME_GUEST;
		break;

	case BOOKE_INTERRUPT_SPE_FP_DATA:
		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
		r = RESUME_GUEST;
		break;

	case BOOKE_INTERRUPT_SPE_FP_ROUND:
		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
		r = RESUME_GUEST;
		break;

	case BOOKE_INTERRUPT_DATA_STORAGE:
		kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
		                               vcpu->arch.fault_esr);
		kvmppc_account_exit(vcpu, DSI_EXITS);
		r = RESUME_GUEST;
		break;

	case BOOKE_INTERRUPT_INST_STORAGE:
		kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
		kvmppc_account_exit(vcpu, ISI_EXITS);
		r = RESUME_GUEST;
		break;

	case BOOKE_INTERRUPT_SYSCALL:
		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
		kvmppc_account_exit(vcpu, SYSCALL_EXITS);
		r = RESUME_GUEST;
		break;

	case BOOKE_INTERRUPT_DTLB_MISS: {
		unsigned long eaddr = vcpu->arch.fault_dear;
		int gtlb_index;
		gpa_t gpaddr;
		gfn_t gfn;

		/* Check the guest TLB. */
		gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
		if (gtlb_index < 0) {
			/* The guest didn't have a mapping for it. */
			kvmppc_core_queue_dtlb_miss(vcpu,
			                            vcpu->arch.fault_dear,
			                            vcpu->arch.fault_esr);
			kvmppc_mmu_dtlb_miss(vcpu);
			kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
			r = RESUME_GUEST;
			break;
		}

		gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
		gfn = gpaddr >> PAGE_SHIFT;

		if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
			/* The guest TLB had a mapping, but the shadow TLB
			 * didn't, and it is RAM. This could be because:
			 * a) the entry is mapping the host kernel, or
			 * b) the guest used a large mapping which we're faking
			 * Either way, we need to satisfy the fault without
			 * invoking the guest. */
			kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
			kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
			r = RESUME_GUEST;
		} else {
			/* Guest has mapped and accessed a page which is not
			 * actually RAM. */
			vcpu->arch.paddr_accessed = gpaddr;
			r = kvmppc_emulate_mmio(run, vcpu);
			kvmppc_account_exit(vcpu, MMIO_EXITS);
		}

		break;
	}

	case BOOKE_INTERRUPT_ITLB_MISS: {
		unsigned long eaddr = vcpu->arch.pc;
		gpa_t gpaddr;
		gfn_t gfn;
		int gtlb_index;

		r = RESUME_GUEST;

		/* Check the guest TLB. */
		gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
		if (gtlb_index < 0) {
			/* The guest didn't have a mapping for it. */
			kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
			kvmppc_mmu_itlb_miss(vcpu);
			kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
			break;
		}

		kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);

		gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
		gfn = gpaddr >> PAGE_SHIFT;

		if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
			/* The guest TLB had a mapping, but the shadow TLB
			 * didn't. This could be because:
			 * a) the entry is mapping the host kernel, or
			 * b) the guest used a large mapping which we're faking
			 * Either way, we need to satisfy the fault without
			 * invoking the guest. */
			kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
		} else {
			/* Guest mapped and leaped at non-RAM! */
			kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
		}

		break;
	}

	case BOOKE_INTERRUPT_DEBUG: {
		u32 dbsr;

		vcpu->arch.pc = mfspr(SPRN_CSRR0);

		/* clear IAC events in DBSR register */
		dbsr = mfspr(SPRN_DBSR);
		dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
		mtspr(SPRN_DBSR, dbsr);

		run->exit_reason = KVM_EXIT_DEBUG;
		kvmppc_account_exit(vcpu, DEBUG_EXITS);
		r = RESUME_HOST;
		break;
	}

	default:
		printk(KERN_EMERG "exit_nr %d\n", exit_nr);
		BUG();
	}

	local_irq_disable();

	kvmppc_core_deliver_interrupts(vcpu);

	if (!(r & RESUME_HOST)) {
		/* To avoid clobbering exit_reason, only check for signals if
		 * we aren't already exiting to userspace for some other
		 * reason. */
		if (signal_pending(current)) {
			run->exit_reason = KVM_EXIT_INTR;
			r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
			kvmppc_account_exit(vcpu, SIGNAL_EXITS);
		}
	}

	return r;
}
Пример #28
0
static void acpi_processor_idle(void)
{
    struct acpi_processor_power *power = processor_powers[smp_processor_id()];
    struct acpi_processor_cx *cx = NULL;
    int next_state;
    uint64_t t1, t2 = 0;
    u32 exp = 0, pred = 0;
    u32 irq_traced[4] = { 0 };

    if ( max_cstate > 0 && power && !sched_has_urgent_vcpu() &&
         (next_state = cpuidle_current_governor->select(power)) > 0 )
    {
        cx = &power->states[next_state];
        if ( cx->type == ACPI_STATE_C3 && power->flags.bm_check &&
             acpi_idle_bm_check() )
            cx = power->safe_state;
        if ( cx->idx > max_cstate )
            cx = &power->states[max_cstate];
        menu_get_trace_data(&exp, &pred);
    }
    if ( !cx )
    {
        if ( pm_idle_save )
            pm_idle_save();
        else
            safe_halt();
        return;
    }

    cpufreq_dbs_timer_suspend();

    sched_tick_suspend();
    /* sched_tick_suspend() can raise TIMER_SOFTIRQ. Process it now. */
    process_pending_softirqs();

    /*
     * Interrupts must be disabled during bus mastering calculations and
     * for C2/C3 transitions.
     */
    local_irq_disable();

    if ( !cpu_is_haltable(smp_processor_id()) )
    {
        local_irq_enable();
        sched_tick_resume();
        cpufreq_dbs_timer_resume();
        return;
    }

    if ( (cx->type == ACPI_STATE_C3) && errata_c6_eoi_workaround() )
        cx = power->safe_state;


    /*
     * Sleep:
     * ------
     * Invoke the current Cx state to put the processor to sleep.
     */
    switch ( cx->type )
    {
    case ACPI_STATE_C1:
    case ACPI_STATE_C2:
        if ( cx->type == ACPI_STATE_C1 || local_apic_timer_c2_ok )
        {
            /* Get start time (ticks) */
            t1 = cpuidle_get_tick();
            /* Trace cpu idle entry */
            TRACE_4D(TRC_PM_IDLE_ENTRY, cx->idx, t1, exp, pred);

            update_last_cx_stat(power, cx, t1);

            /* Invoke C2 */
            acpi_idle_do_entry(cx);
            /* Get end time (ticks) */
            t2 = cpuidle_get_tick();
            trace_exit_reason(irq_traced);
            /* Trace cpu idle exit */
            TRACE_6D(TRC_PM_IDLE_EXIT, cx->idx, t2,
                     irq_traced[0], irq_traced[1], irq_traced[2], irq_traced[3]);
            /* Update statistics */
            update_idle_stats(power, cx, t1, t2);
            /* Re-enable interrupts */
            local_irq_enable();
            break;
        }

    case ACPI_STATE_C3:
        /*
         * Before invoking C3, be aware that TSC/APIC timer may be 
         * stopped by H/W. Without carefully handling of TSC/APIC stop issues,
         * deep C state can't work correctly.
         */
        /* preparing APIC stop */
        lapic_timer_off();

        /* Get start time (ticks) */
        t1 = cpuidle_get_tick();
        /* Trace cpu idle entry */
        TRACE_4D(TRC_PM_IDLE_ENTRY, cx->idx, t1, exp, pred);

        update_last_cx_stat(power, cx, t1);

        /*
         * disable bus master
         * bm_check implies we need ARB_DIS
         * !bm_check implies we need cache flush
         * bm_control implies whether we can do ARB_DIS
         *
         * That leaves a case where bm_check is set and bm_control is
         * not set. In that case we cannot do much, we enter C3
         * without doing anything.
         */
        if ( cx->type != ACPI_STATE_C3 )
            /* nothing to be done here */;
        else if ( power->flags.bm_check && power->flags.bm_control )
        {
            spin_lock(&c3_cpu_status.lock);
            if ( ++c3_cpu_status.count == num_online_cpus() )
            {
                /*
                 * All CPUs are trying to go to C3
                 * Disable bus master arbitration
                 */
                acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
            }
            spin_unlock(&c3_cpu_status.lock);
        }
        else if ( !power->flags.bm_check )
        {
            /* SMP with no shared cache... Invalidate cache  */
            ACPI_FLUSH_CPU_CACHE();
        }

        /* Invoke C3 */
        acpi_idle_do_entry(cx);

        if ( (cx->type == ACPI_STATE_C3) &&
             power->flags.bm_check && power->flags.bm_control )
        {
            /* Enable bus master arbitration */
            spin_lock(&c3_cpu_status.lock);
            if ( c3_cpu_status.count-- == num_online_cpus() )
                acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
            spin_unlock(&c3_cpu_status.lock);
        }

        /* Get end time (ticks) */
        t2 = cpuidle_get_tick();

        /* recovering TSC */
        cstate_restore_tsc();
        trace_exit_reason(irq_traced);
        /* Trace cpu idle exit */
        TRACE_6D(TRC_PM_IDLE_EXIT, cx->idx, t2,
                 irq_traced[0], irq_traced[1], irq_traced[2], irq_traced[3]);

        /* Update statistics */
        update_idle_stats(power, cx, t1, t2);
        /* Re-enable interrupts */
        local_irq_enable();
        /* recovering APIC */
        lapic_timer_on();

        break;

    default:
        /* Now in C0 */
        power->last_state = &power->states[0];
        local_irq_enable();
        sched_tick_resume();
        cpufreq_dbs_timer_resume();
        return;
    }

    /* Now in C0 */
    power->last_state = &power->states[0];

    sched_tick_resume();
    cpufreq_dbs_timer_resume();

    if ( cpuidle_current_governor->reflect )
        cpuidle_current_governor->reflect(power);
}
Пример #29
0
/* default implementation */
void __weak arch_suspend_disable_irqs(void)
{
	local_irq_disable();
}
Пример #30
0
int hibernation_platform_enter(void)
{
    int error;
    gfp_t saved_mask;

    if (!hibernation_ops)
        return -ENOSYS;

    /*
     * We have cancelled the power transition by running
     * hibernation_ops->finish() before saving the image, so we should let
     * the firmware know that we're going to enter the sleep state after all
     */
    error = hibernation_ops->begin();
    if (error)
        goto Close;

    entering_platform_hibernation = true;
    suspend_console();
    saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
    error = dpm_suspend_start(PMSG_HIBERNATE);
    if (error) {
        if (hibernation_ops->recover)
            hibernation_ops->recover();
        goto Resume_devices;
    }

    error = dpm_suspend_noirq(PMSG_HIBERNATE);
    if (error)
        goto Resume_devices;

    error = hibernation_ops->prepare();
    if (error)
        goto Platform_finish;

    error = disable_nonboot_cpus();
    if (error)
        goto Platform_finish;

    local_irq_disable();
    sysdev_suspend(PMSG_HIBERNATE);
    hibernation_ops->enter();
    /* We should never get here */
    while (1);

    /*
     * We don't need to reenable the nonboot CPUs or resume consoles, since
     * the system is going to be halted anyway.
     */
Platform_finish:
    hibernation_ops->finish();

    dpm_suspend_noirq(PMSG_RESTORE);

Resume_devices:
    entering_platform_hibernation = false;
    dpm_resume_end(PMSG_RESTORE);
    set_gfp_allowed_mask(saved_mask);
    resume_console();

Close:
    hibernation_ops->end();

    return error;
}