/**
 * suspend_enter - Make the system enter the given sleep state.
 * @state: System sleep state to enter.
 * @wakeup: Returns information that the sleep state should not be re-entered.
 *
 * This function should be called after devices have been suspended.
 */
static int suspend_enter(suspend_state_t state, bool *wakeup)
{
	int error;

#ifdef CONFIG_SEC_GPIO_DVS
	/************************ Caution !!! ****************************/
	/* This function must be located in appropriate SLEEP position
     * in accordance with the specification of each BB vendor.
     */
	/************************ Caution !!! ****************************/
	gpio_dvs_check_sleepgpio();
#ifdef SECGPIO_SLEEP_DEBUGGING
	/************************ Caution !!! ****************************/
	/* This func. must be located in an appropriate position for GPIO SLEEP debugging
     * in accordance with the specification of each BB vendor, and 
     * the func. must be called after calling the function "gpio_dvs_check_sleepgpio"
     */
	/************************ Caution !!! ****************************/
	gpio_dvs_set_sleepgpio();
#endif
#endif

	if (need_suspend_ops(state) && suspend_ops->prepare) {
		error = suspend_ops->prepare();
		if (error)
			goto Platform_finish;
	}

	error = dpm_suspend_end(PMSG_SUSPEND);
	if (error) {
		printk(KERN_ERR "PM: Some devices failed to power down\n");
		goto Platform_finish;
	}

	if (need_suspend_ops(state) && suspend_ops->prepare_late) {
		error = suspend_ops->prepare_late();
		if (error)
			goto Platform_wake;
	}

	if (suspend_test(TEST_PLATFORM))
		goto Platform_wake;

	/*
	 * PM_SUSPEND_FREEZE equals
	 * frozen processes + suspended devices + idle processors.
	 * Thus we should invoke freeze_enter() soon after
	 * all the devices are suspended.
	 */
	if (state == PM_SUSPEND_FREEZE) {
		freeze_enter();
		goto Platform_wake;
	}

	error = disable_nonboot_cpus();
	if (error || suspend_test(TEST_CPUS))
		goto Enable_cpus;

	arch_suspend_disable_irqs();
	BUG_ON(!irqs_disabled());

	error = syscore_suspend();
	if (!error) {
		*wakeup = pm_wakeup_pending();
		if (!(suspend_test(TEST_CORE) || *wakeup)) {
			error = suspend_ops->enter(state);
			events_check_enabled = false;
		}
		syscore_resume();
	}
#if defined(CONFIG_SEC_GPIO_DVS) && defined(SECGPIO_SLEEP_DEBUGGING)
	/************************ Caution !!! ****************************/
	/* This function must be located in an appropriate position
	 * to undo gpio SLEEP debugging setting when DUT wakes up.
	 * It should be implemented in accordance with the specification of each BB vendor.
	 */
	/************************ Caution !!! ****************************/
	gpio_dvs_undo_sleepgpio();
#endif

	arch_suspend_enable_irqs();
	BUG_ON(irqs_disabled());

 Enable_cpus:
	enable_nonboot_cpus();

 Platform_wake:
	if (need_suspend_ops(state) && suspend_ops->wake)
		suspend_ops->wake();

	dpm_resume_start(PMSG_RESUME);

 Platform_finish:
	if (need_suspend_ops(state) && suspend_ops->finish)
		suspend_ops->finish();

	return error;
}
Example #2
0
/**
 * suspend_enter - Make the system enter the given sleep state.
 * @state: System sleep state to enter.
 * @wakeup: Returns information that the sleep state should not be re-entered.
 *
 * This function should be called after devices have been suspended.
 */
static int suspend_enter(suspend_state_t state, bool *wakeup)
{
	char suspend_abort[MAX_SUSPEND_ABORT_LEN];
	int error, last_dev;

	if (need_suspend_ops(state) && suspend_ops->prepare) {
		error = suspend_ops->prepare();
		if (error)
			goto Platform_finish;
	}

	error = dpm_suspend_end(PMSG_SUSPEND);
	if (error) {
		last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
		last_dev %= REC_FAILED_NUM;
		printk(KERN_ERR "PM: Some devices failed to power down\n");
		log_suspend_abort_reason("%s device failed to power down",
			suspend_stats.failed_devs[last_dev]);
		goto Platform_finish;
	}

	if (need_suspend_ops(state) && suspend_ops->prepare_late) {
		error = suspend_ops->prepare_late();
		if (error)
			goto Platform_wake;
	}

	if (suspend_test(TEST_PLATFORM))
		goto Platform_wake;

	/*
	 * PM_SUSPEND_FREEZE equals
	 * frozen processes + suspended devices + idle processors.
	 * Thus we should invoke freeze_enter() soon after
	 * all the devices are suspended.
	 */
	if (state == PM_SUSPEND_FREEZE) {
		freeze_enter();
		goto Platform_wake;
	}

	error = disable_nonboot_cpus();
	if (error || suspend_test(TEST_CPUS)) {
		log_suspend_abort_reason("Disabling non-boot cpus failed");
		goto Enable_cpus;
	}

	arch_suspend_disable_irqs();
	BUG_ON(!irqs_disabled());

	error = syscore_suspend();
	if (!error) {
		*wakeup = pm_wakeup_pending();
		if (!(suspend_test(TEST_CORE) || *wakeup)) {
			error = suspend_ops->enter(state);
			events_check_enabled = false;
		} else if (*wakeup) {
			pm_get_active_wakeup_sources(suspend_abort,
				MAX_SUSPEND_ABORT_LEN);
			log_suspend_abort_reason(suspend_abort);
			error = -EBUSY;
		}

		start_logging_wakeup_reasons();
		syscore_resume();
	}

	arch_suspend_enable_irqs();
	BUG_ON(irqs_disabled());

 Enable_cpus:
	enable_nonboot_cpus();

 Platform_wake:
	if (need_suspend_ops(state) && suspend_ops->wake)
		suspend_ops->wake();

	dpm_resume_start(PMSG_RESUME);

 Platform_finish:
	if (need_suspend_ops(state) && suspend_ops->finish)
		suspend_ops->finish();

	return error;
}
Example #3
0
File: idle.c Project: Abhi1919/ath
/**
 * cpuidle_idle_call - the main idle function
 *
 * NOTE: no locks or semaphores should be used here
 *
 * On archs that support TIF_POLLING_NRFLAG, is called with polling
 * set, and it returns with polling set.  If it ever stops polling, it
 * must clear the polling bit.
 */
static void cpuidle_idle_call(void)
{
	struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
	int next_state, entered_state;
	unsigned int broadcast;
	bool reflect;

	/*
	 * Check if the idle task must be rescheduled. If it is the
	 * case, exit the function after re-enabling the local irq.
	 */
	if (need_resched()) {
		local_irq_enable();
		return;
	}

	/*
	 * During the idle period, stop measuring the disabled irqs
	 * critical sections latencies
	 */
	stop_critical_timings();

	/*
	 * Tell the RCU framework we are entering an idle section,
	 * so no more rcu read side critical sections and one more
	 * step to the grace period
	 */
	rcu_idle_enter();

	if (cpuidle_not_available(drv, dev))
		goto use_default;

	/*
	 * Suspend-to-idle ("freeze") is a system state in which all user space
	 * has been frozen, all I/O devices have been suspended and the only
	 * activity happens here and in iterrupts (if any).  In that case bypass
	 * the cpuidle governor and go stratight for the deepest idle state
	 * available.  Possibly also suspend the local tick and the entire
	 * timekeeping to prevent timer interrupts from kicking us out of idle
	 * until a proper wakeup interrupt happens.
	 */
	if (idle_should_freeze()) {
		entered_state = cpuidle_enter_freeze(drv, dev);
		if (entered_state >= 0) {
			local_irq_enable();
			goto exit_idle;
		}

		reflect = false;
		next_state = cpuidle_find_deepest_state(drv, dev);
	} else {
		reflect = true;
		/*
		 * Ask the cpuidle framework to choose a convenient idle state.
		 */
		next_state = cpuidle_select(drv, dev);
	}
	/* Fall back to the default arch idle method on errors. */
	if (next_state < 0)
		goto use_default;

	/*
	 * The idle task must be scheduled, it is pointless to
	 * go to idle, just update no idle residency and get
	 * out of this function
	 */
	if (current_clr_polling_and_test()) {
		dev->last_residency = 0;
		entered_state = next_state;
		local_irq_enable();
		goto exit_idle;
	}

	broadcast = drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP;

	/*
	 * Tell the time framework to switch to a broadcast timer
	 * because our local timer will be shutdown. If a local timer
	 * is used from another cpu as a broadcast timer, this call may
	 * fail if it is not available
	 */
	if (broadcast && tick_broadcast_enter())
		goto use_default;

	/* Take note of the planned idle state. */
	idle_set_state(this_rq(), &drv->states[next_state]);

	/*
	 * Enter the idle state previously returned by the governor decision.
	 * This function will block until an interrupt occurs and will take
	 * care of re-enabling the local interrupts
	 */
	entered_state = cpuidle_enter(drv, dev, next_state);

	/* The cpu is no longer idle or about to enter idle. */
	idle_set_state(this_rq(), NULL);

	if (broadcast)
		tick_broadcast_exit();

	/*
	 * Give the governor an opportunity to reflect on the outcome
	 */
	if (reflect)
		cpuidle_reflect(dev, entered_state);

exit_idle:
	__current_set_polling();

	/*
	 * It is up to the idle functions to reenable local interrupts
	 */
	if (WARN_ON_ONCE(irqs_disabled()))
		local_irq_enable();

	rcu_idle_exit();
	start_critical_timings();
	return;

use_default:
	/*
	 * We can't use the cpuidle framework, let's use the default
	 * idle routine.
	 */
	if (current_clr_polling_and_test())
		local_irq_enable();
	else
		arch_cpu_idle();

	goto exit_idle;
}
static int sprd_pm_deepsleep(suspend_state_t state)
{
    int ret_val = 0;
    unsigned long flags;
    u32 battery_time, cur_time;
    battery_time = cur_time = get_sys_cnt();
    unsigned int cpu = smp_processor_id();

    /* add for debug & statisic*/
    clr_sleep_mode();
    time_statisic_begin();

#if defined(CONFIG_NKERNEL) && !defined(CONFIG_NKERNEL_PM_MASTER)
    hw_local_irq_disable();
    os_ctx->suspend_to_memory(os_ctx);
    hw_local_irq_enable();
#else
    /*
    * when we get here, only boot cpu still alive
    */
    if (smp_processor_id()) {
        __WARN();
        goto enter_exit;
    }

    while(1) {
        hw_local_irq_disable();
        local_fiq_disable();
        local_irq_save(flags);

        if (arch_local_irq_pending()) {
            /* add for debug & statisic*/
            irq_wakeup_set();

            local_irq_restore(flags);
            hw_local_irq_enable();
            local_fiq_enable();
            break;
        } else {
            local_irq_restore(flags);
            WARN_ONCE(!irqs_disabled(), "#####: Interrupts enabled in pm_enter()!\n");
#if defined(CONFIG_NKERNEL)
            /*
            * return value 0 means that other guest OS  are all idle
            */
            ret_val = os_ctx->idle(os_ctx);
            if (0 == ret_val) {
#ifdef CONFIG_NKERNEL_PM_MASTER
                os_ctx->smp_cpu_stop(0);
#endif
                sprd_cpu_deep_sleep(cpu);
#ifdef CONFIG_NKERNEL_PM_MASTER
                os_ctx->smp_cpu_start(0, 0);/* the 2nd parameter is meaningless*/
#endif
            } else {
                printk("******** os_ctx->idle return %d ********\n", ret_val);
            }
#else
            sprd_cpu_deep_sleep(cpu);
#endif
            hw_local_irq_enable();
            local_fiq_enable();
        }

        battery_sleep();
        cur_time = get_sys_cnt();
        if ((cur_time -  battery_time) > BATTERY_CHECK_INTERVAL) {
            battery_time = cur_time;
            if (sprd_check_battery()) {
                printk("###: battery low!\n");
                break;
            }
        }
    }/*end while*/

    time_statisic_end();
#endif

enter_exit:
    return ret_val;
}
Example #5
0
void ide_unregister(unsigned int index)
{
	ide_drive_t *drive;
	ide_hwif_t *hwif, *g;
	static ide_hwif_t tmp_hwif; /* protected by ide_cfg_sem */
	ide_hwgroup_t *hwgroup;
	int irq_count = 0, unit;

	BUG_ON(index >= MAX_HWIFS);

	BUG_ON(in_interrupt());
	BUG_ON(irqs_disabled());
	down(&ide_cfg_sem);
	spin_lock_irq(&ide_lock);
	hwif = &ide_hwifs[index];
	if (!hwif->present)
		goto abort;
	for (unit = 0; unit < MAX_DRIVES; ++unit) {
		drive = &hwif->drives[unit];
		if (!drive->present)
			continue;
		spin_unlock_irq(&ide_lock);
		device_unregister(&drive->gendev);
		wait_for_completion(&drive->gendev_rel_comp);
		spin_lock_irq(&ide_lock);
	}
	hwif->present = 0;

	spin_unlock_irq(&ide_lock);

	ide_proc_unregister_port(hwif);

	hwgroup = hwif->hwgroup;
	/*
	 * free the irq if we were the only hwif using it
	 */
	g = hwgroup->hwif;
	do {
		if (g->irq == hwif->irq)
			++irq_count;
		g = g->next;
	} while (g != hwgroup->hwif);
	if (irq_count == 1)
		free_irq(hwif->irq, hwgroup);

	spin_lock_irq(&ide_lock);
	/*
	 * Note that we only release the standard ports,
	 * and do not even try to handle any extra ports
	 * allocated for weird IDE interface chipsets.
	 */
	ide_hwif_release_regions(hwif);

	/*
	 * Remove us from the hwgroup, and free
	 * the hwgroup if we were the only member
	 */
	if (hwif->next == hwif) {
		BUG_ON(hwgroup->hwif != hwif);
		kfree(hwgroup);
	} else {
		/* There is another interface in hwgroup.
		 * Unlink us, and set hwgroup->drive and ->hwif to
		 * something sane.
		 */
		g = hwgroup->hwif;
		while (g->next != hwif)
			g = g->next;
		g->next = hwif->next;
		if (hwgroup->hwif == hwif) {
			/* Chose a random hwif for hwgroup->hwif.
			 * It's guaranteed that there are no drives
			 * left in the hwgroup.
			 */
			BUG_ON(hwgroup->drive != NULL);
			hwgroup->hwif = g;
		}
		BUG_ON(hwgroup->hwif == hwif);
	}

	/* More messed up locking ... */
	spin_unlock_irq(&ide_lock);
	device_unregister(&hwif->gendev);
	wait_for_completion(&hwif->gendev_rel_comp);

	/*
	 * Remove us from the kernel's knowledge
	 */
	blk_unregister_region(MKDEV(hwif->major, 0), MAX_DRIVES<<PARTN_BITS);
	kfree(hwif->sg_table);
	unregister_blkdev(hwif->major, hwif->name);
	spin_lock_irq(&ide_lock);

	if (hwif->dma_base) {
		(void) ide_release_dma(hwif);

		hwif->dma_base = 0;
		hwif->dma_master = 0;
		hwif->dma_command = 0;
		hwif->dma_vendor1 = 0;
		hwif->dma_status = 0;
		hwif->dma_vendor3 = 0;
		hwif->dma_prdtable = 0;

		hwif->extra_base  = 0;
		hwif->extra_ports = 0;
	}

	/* copy original settings */
	tmp_hwif = *hwif;

	/* restore hwif data to pristine status */
	init_hwif_data(hwif, index);
	init_hwif_default(hwif, index);

	ide_hwif_restore(hwif, &tmp_hwif);

abort:
	spin_unlock_irq(&ide_lock);
	up(&ide_cfg_sem);
}
Example #6
0
irqreturn_t
handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
{
	irqreturn_t retval = IRQ_NONE;
#ifdef CONFIG_SAMSUNG_KERNEL_DEBUG
	int cpu;
	cpu = smp_processor_id();
#endif
	unsigned int flags = 0, irq = desc->irq_data.irq;

	do {
		irqreturn_t res;

		trace_irq_handler_entry(irq, action);
#ifdef CONFIG_SAMSUNG_KERNEL_DEBUG
#ifdef CONFIG_SAMSUNG_LOG_BUF
		if (a_log_irq) {
			log_idx++;
			if ((unsigned int)log_idx >= IRQ_LOG_MAX)
				log_idx = 0;

			a_log_irq[log_idx].time = cpu_clock(cpu);
			a_log_irq[log_idx].cpu = cpu;
			a_log_irq[log_idx].id = irq;
			a_log_irq[log_idx].ret = -1;
		} else if (log_buf_irq) {
			a_log_irq = (irq_log_t*)log_buf_irq;
		}
#endif
#endif
		res = action->handler(irq, action->dev_id);
		trace_irq_handler_exit(irq, action, res);
#ifdef CONFIG_SAMSUNG_KERNEL_DEBUG
#ifdef CONFIG_SAMSUNG_LOG_BUF
		if (a_log_irq) {
			log_idx++;
			if ((unsigned int)log_idx >= IRQ_LOG_MAX)
				log_idx = 0;
			
			a_log_irq[log_idx].time = cpu_clock(cpu);
			a_log_irq[log_idx].cpu = cpu;
			a_log_irq[log_idx].id = irq;
			a_log_irq[log_idx].ret = res;
		}
#endif          
#endif
		if (WARN_ONCE(!irqs_disabled(),"irq %u handler %pF enabled interrupts\n",
			      irq, action->handler))
			local_irq_disable();

		switch (res) {
		case IRQ_WAKE_THREAD:
			/*
			 * Catch drivers which return WAKE_THREAD but
			 * did not set up a thread function
			 */
			if (unlikely(!action->thread_fn)) {
				warn_no_thread(irq, action);
				break;
			}

			irq_wake_thread(desc, action);

			/* Fall through to add to randomness */
		case IRQ_HANDLED:
			flags |= action->flags;
			break;

		default:
			break;
		}

		retval |= res;
		action = action->next;
	} while (action);

	add_interrupt_randomness(irq, flags);

	if (!noirqdebug)
		note_interrupt(irq, desc, retval);
	return retval;
}
Example #7
0
/**
 * suspend_enter - Make the system enter the given sleep state.
 * @state: System sleep state to enter.
 * @wakeup: Returns information that the sleep state should not be re-entered.
 *
 * This function should be called after devices have been suspended.
 */
static int suspend_enter(suspend_state_t state, bool *wakeup)
{
	int error;

	if (need_suspend_ops(state) && suspend_ops->prepare) {
		error = suspend_ops->prepare();
		if (error)
			goto Platform_finish;
	}

	error = dpm_suspend_end(PMSG_SUSPEND);
	if (error) {
		printk(KERN_ERR "PM: Some devices failed to power down\n");
		goto Platform_finish;
	}

	if (need_suspend_ops(state) && suspend_ops->prepare_late) {
		error = suspend_ops->prepare_late();
		if (error)
			goto Platform_wake;
	}

	if (suspend_test(TEST_PLATFORM))
		goto Platform_wake;

	/*
	 * PM_SUSPEND_FREEZE equals
	 * frozen processes + suspended devices + idle processors.
	 * Thus we should invoke freeze_enter() soon after
	 * all the devices are suspended.
	 */
	if (state == PM_SUSPEND_FREEZE) {
		freeze_enter();
		goto Platform_wake;
	}

	error = disable_nonboot_cpus();
	if (error || suspend_test(TEST_CPUS))
		goto Enable_cpus;

	arch_suspend_disable_irqs();
	BUG_ON(!irqs_disabled());

	error = syscore_suspend();
	if (!error) {
		*wakeup = pm_wakeup_pending();
		if (!(suspend_test(TEST_CORE) || *wakeup)) {
			error = suspend_ops->enter(state);
			events_check_enabled = false;
		}
		syscore_resume();
	}

	arch_suspend_enable_irqs();
	BUG_ON(irqs_disabled());

 Enable_cpus:
	enable_nonboot_cpus();

 Platform_wake:
	if (need_suspend_ops(state) && suspend_ops->wake)
		suspend_ops->wake();

	dpm_resume_start(PMSG_RESUME);

 Platform_finish:
	if (need_suspend_ops(state) && suspend_ops->finish)
		suspend_ops->finish();

	return error;
}
Example #8
0
File: main.c Project: janfj/dd-wrt
asmlinkage void __init start_kernel(void)
{
	char * command_line;
	extern struct kernel_param __start___param[], __stop___param[];

	smp_setup_processor_id();

	/*
	 * Need to run as early as possible, to initialize the
	 * lockdep hash:
	 */
	unwind_init();
	lockdep_init();

	local_irq_disable();
	early_boot_irqs_off();
	early_init_irq_lock_class();

/*
 * Interrupts are still disabled. Do necessary setups, then
 * enable them
 */
	lock_kernel();
	tick_init();
	boot_cpu_init();
	page_address_init();
	printk(KERN_NOTICE);
	printk(linux_banner);
	setup_arch(&command_line);
	setup_command_line(command_line);
	unwind_setup();
	setup_per_cpu_areas();
	smp_prepare_boot_cpu();	/* arch-specific boot-cpu hooks */
	wbd222_wdt_setup();
	wbd222_wdt_touch();

	/*
	 * Set up the scheduler prior starting any interrupts (such as the
	 * timer interrupt). Full topology setup happens at smp_init()
	 * time - but meanwhile we still have a functioning scheduler.
	 */
	sched_init();
	/*
	 * Disable preemption - early bootup scheduling is extremely
	 * fragile until we cpu_idle() for the first time.
	 */
	preempt_disable();
	build_all_zonelists();
	page_alloc_init();
	printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line);
	parse_early_param();
	parse_args("Booting kernel", static_command_line, __start___param,
		   __stop___param - __start___param,
		   &unknown_bootoption);
	if (!irqs_disabled()) {
		printk(KERN_WARNING "start_kernel(): bug: interrupts were "
				"enabled *very* early, fixing it\n");
		local_irq_disable();
	}
	sort_main_extable();
	trap_init();
	rcu_init();
	init_IRQ();
	pidhash_init();
	init_timers();
	hrtimers_init();
	softirq_init();
	timekeeping_init();
	time_init();
	profile_init();
	if (!irqs_disabled())
		printk("start_kernel(): bug: interrupts were enabled early\n");
	early_boot_irqs_on();
	local_irq_enable();

	/*
	 * HACK ALERT! This is early. We're enabling the console before
	 * we've done PCI setups etc, and console_init() must be aware of
	 * this. But we do want output early, in case something goes wrong.
	 */
	console_init();
	if (panic_later)
		panic(panic_later, panic_param);

	lockdep_info();

	/*
	 * Need to run this when irqs are enabled, because it wants
	 * to self-test [hard/soft]-irqs on/off lock inversion bugs
	 * too:
	 */
	locking_selftest();

#ifdef CONFIG_BLK_DEV_INITRD
	if (initrd_start && !initrd_below_start_ok &&
			initrd_start < min_low_pfn << PAGE_SHIFT) {
		printk(KERN_CRIT "initrd overwritten (0x%08lx < 0x%08lx) - "
		    "disabling it.\n",initrd_start,min_low_pfn << PAGE_SHIFT);
		initrd_start = 0;
	}
#endif
	vfs_caches_init_early();
	cpuset_init_early();
	mem_init();
	kmem_cache_init();
	setup_per_cpu_pageset();
	numa_policy_init();
	if (late_time_init)
		late_time_init();
	calibrate_delay();
	pidmap_init();
	pgtable_cache_init();
	prio_tree_init();
	anon_vma_init();
#ifdef CONFIG_X86
	if (efi_enabled)
		efi_enter_virtual_mode();
#endif
	fork_init(num_physpages);
	proc_caches_init();
	buffer_init();
	unnamed_dev_init();
	key_init();
	security_init();
	vfs_caches_init(num_physpages);
	radix_tree_init();
	signals_init();
	/* rootfs populating might need page-writeback */
	page_writeback_init();
#ifdef CONFIG_PROC_FS
	proc_root_init();
#endif
	cpuset_init();
	taskstats_init_early();
	delayacct_init();

	check_bugs();

	acpi_early_init(); /* before LAPIC and SMP init */

	/* Do the rest non-__init'ed, we're now alive */
	wbd222_wdt_touch();
	rest_init();
}
Example #9
0
/**
 *	suspend_enter - enter the desired system sleep state.
 *	@state:		state to enter
 *
 *	This function should be called after devices have been suspended.
 */
static int suspend_enter(suspend_state_t state)
{
	int error;

	if (suspend_ops->prepare) {
		error = suspend_ops->prepare();
		if (error)
			return error;
	}

	error = dpm_suspend_noirq(PMSG_SUSPEND);
	if (error) {
		printk(KERN_ERR "PM: Some devices failed to power down\n");
		goto Platfrom_finish;
	}

	if (suspend_ops->prepare_late) {
		error = suspend_ops->prepare_late();
		if (error)
			goto Power_up_devices;
	}

	if (suspend_test(TEST_PLATFORM))
		goto Platform_wake;

	error = disable_nonboot_cpus();
	if (error || suspend_test(TEST_CPUS))
		goto Enable_cpus;

	arch_suspend_disable_irqs();
	BUG_ON(!irqs_disabled());

	error = sysdev_suspend(PMSG_SUSPEND);
	if (!error) {
		if (!suspend_test(TEST_CORE) && pm_check_wakeup_events()) {
			error = suspend_ops->enter(state);
			events_check_enabled = false;
		}
		/* Workaround for possible L2 cache coherency issue
		   where preempt_count remains zero */ 
		preempt_count() = 0;
		sysdev_resume();
	}

	arch_suspend_enable_irqs();
	BUG_ON(irqs_disabled());

 Enable_cpus:
	enable_nonboot_cpus();

 Platform_wake:
	if (suspend_ops->wake)
		suspend_ops->wake();

 Power_up_devices:
	dpm_resume_noirq(PMSG_RESUME);

 Platfrom_finish:
	if (suspend_ops->finish)
		suspend_ops->finish();

	return error;
}
/**
<<<<<<< HEAD
 * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
=======
 * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section?
>>>>>>> 296c66da8a02d52243f45b80521febece5ed498a
 *
 * Check for bottom half being disabled, which covers both the
 * CONFIG_PROVE_RCU and not cases.  Note that if someone uses
 * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
<<<<<<< HEAD
 * will show the situation.  This is useful for debug checks in functions
 * that require that they be called within an RCU read-side critical
 * section.
=======
 * will show the situation.
>>>>>>> 296c66da8a02d52243f45b80521febece5ed498a
 *
 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
 */
int rcu_read_lock_bh_held(void)
{
	if (!debug_lockdep_rcu_enabled())
		return 1;
	return in_softirq() || irqs_disabled();
}
Example #11
0
File: tlb.c Project: xros/linux
/*
 * flush_tlb_func_common()'s memory ordering requirement is that any
 * TLB fills that happen after we flush the TLB are ordered after we
 * read active_mm's tlb_gen.  We don't need any explicit barriers
 * because all x86 flush operations are serializing and the
 * atomic64_read operation won't be reordered by the compiler.
 */
static void flush_tlb_func_common(const struct flush_tlb_info *f,
				  bool local, enum tlb_flush_reason reason)
{
	/*
	 * We have three different tlb_gen values in here.  They are:
	 *
	 * - mm_tlb_gen:     the latest generation.
	 * - local_tlb_gen:  the generation that this CPU has already caught
	 *                   up to.
	 * - f->new_tlb_gen: the generation that the requester of the flush
	 *                   wants us to catch up to.
	 */
	struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
	u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
	u64 mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen);
	u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);

	/* This code cannot presently handle being reentered. */
	VM_WARN_ON(!irqs_disabled());

	if (unlikely(loaded_mm == &init_mm))
		return;

	VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
		   loaded_mm->context.ctx_id);

	if (this_cpu_read(cpu_tlbstate.is_lazy)) {
		/*
		 * We're in lazy mode.  We need to at least flush our
		 * paging-structure cache to avoid speculatively reading
		 * garbage into our TLB.  Since switching to init_mm is barely
		 * slower than a minimal flush, just switch to init_mm.
		 */
		switch_mm_irqs_off(NULL, &init_mm, NULL);
		return;
	}

	if (unlikely(local_tlb_gen == mm_tlb_gen)) {
		/*
		 * There's nothing to do: we're already up to date.  This can
		 * happen if two concurrent flushes happen -- the first flush to
		 * be handled can catch us all the way up, leaving no work for
		 * the second flush.
		 */
		trace_tlb_flush(reason, 0);
		return;
	}

	WARN_ON_ONCE(local_tlb_gen > mm_tlb_gen);
	WARN_ON_ONCE(f->new_tlb_gen > mm_tlb_gen);

	/*
	 * If we get to this point, we know that our TLB is out of date.
	 * This does not strictly imply that we need to flush (it's
	 * possible that f->new_tlb_gen <= local_tlb_gen), but we're
	 * going to need to flush in the very near future, so we might
	 * as well get it over with.
	 *
	 * The only question is whether to do a full or partial flush.
	 *
	 * We do a partial flush if requested and two extra conditions
	 * are met:
	 *
	 * 1. f->new_tlb_gen == local_tlb_gen + 1.  We have an invariant that
	 *    we've always done all needed flushes to catch up to
	 *    local_tlb_gen.  If, for example, local_tlb_gen == 2 and
	 *    f->new_tlb_gen == 3, then we know that the flush needed to bring
	 *    us up to date for tlb_gen 3 is the partial flush we're
	 *    processing.
	 *
	 *    As an example of why this check is needed, suppose that there
	 *    are two concurrent flushes.  The first is a full flush that
	 *    changes context.tlb_gen from 1 to 2.  The second is a partial
	 *    flush that changes context.tlb_gen from 2 to 3.  If they get
	 *    processed on this CPU in reverse order, we'll see
	 *     local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL.
	 *    If we were to use __flush_tlb_single() and set local_tlb_gen to
	 *    3, we'd be break the invariant: we'd update local_tlb_gen above
	 *    1 without the full flush that's needed for tlb_gen 2.
	 *
	 * 2. f->new_tlb_gen == mm_tlb_gen.  This is purely an optimiation.
	 *    Partial TLB flushes are not all that much cheaper than full TLB
	 *    flushes, so it seems unlikely that it would be a performance win
	 *    to do a partial flush if that won't bring our TLB fully up to
	 *    date.  By doing a full flush instead, we can increase
	 *    local_tlb_gen all the way to mm_tlb_gen and we can probably
	 *    avoid another flush in the very near future.
	 */
	if (f->end != TLB_FLUSH_ALL &&
	    f->new_tlb_gen == local_tlb_gen + 1 &&
	    f->new_tlb_gen == mm_tlb_gen) {
		/* Partial flush */
		unsigned long addr;
		unsigned long nr_pages = (f->end - f->start) >> PAGE_SHIFT;

		addr = f->start;
		while (addr < f->end) {
			__flush_tlb_single(addr);
			addr += PAGE_SIZE;
		}
		if (local)
			count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_pages);
		trace_tlb_flush(reason, nr_pages);
	} else {
Example #12
0
File: tlb.c Project: xros/linux
void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
			struct task_struct *tsk)
{
	struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm);
	u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
	unsigned cpu = smp_processor_id();
	u64 next_tlb_gen;

	/*
	 * NB: The scheduler will call us with prev == next when switching
	 * from lazy TLB mode to normal mode if active_mm isn't changing.
	 * When this happens, we don't assume that CR3 (and hence
	 * cpu_tlbstate.loaded_mm) matches next.
	 *
	 * NB: leave_mm() calls us with prev == NULL and tsk == NULL.
	 */

	/* We don't want flush_tlb_func_* to run concurrently with us. */
	if (IS_ENABLED(CONFIG_PROVE_LOCKING))
		WARN_ON_ONCE(!irqs_disabled());

	/*
	 * Verify that CR3 is what we think it is.  This will catch
	 * hypothetical buggy code that directly switches to swapper_pg_dir
	 * without going through leave_mm() / switch_mm_irqs_off() or that
	 * does something like write_cr3(read_cr3_pa()).
	 *
	 * Only do this check if CONFIG_DEBUG_VM=y because __read_cr3()
	 * isn't free.
	 */
#ifdef CONFIG_DEBUG_VM
	if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev->pgd, prev_asid))) {
		/*
		 * If we were to BUG here, we'd be very likely to kill
		 * the system so hard that we don't see the call trace.
		 * Try to recover instead by ignoring the error and doing
		 * a global flush to minimize the chance of corruption.
		 *
		 * (This is far from being a fully correct recovery.
		 *  Architecturally, the CPU could prefetch something
		 *  back into an incorrect ASID slot and leave it there
		 *  to cause trouble down the road.  It's better than
		 *  nothing, though.)
		 */
		__flush_tlb_all();
	}
#endif
	this_cpu_write(cpu_tlbstate.is_lazy, false);

	if (real_prev == next) {
		VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
			   next->context.ctx_id);

		/*
		 * We don't currently support having a real mm loaded without
		 * our cpu set in mm_cpumask().  We have all the bookkeeping
		 * in place to figure out whether we would need to flush
		 * if our cpu were cleared in mm_cpumask(), but we don't
		 * currently use it.
		 */
		if (WARN_ON_ONCE(real_prev != &init_mm &&
				 !cpumask_test_cpu(cpu, mm_cpumask(next))))
			cpumask_set_cpu(cpu, mm_cpumask(next));

		return;
	} else {
		u16 new_asid;
		bool need_flush;

		if (IS_ENABLED(CONFIG_VMAP_STACK)) {
			/*
			 * If our current stack is in vmalloc space and isn't
			 * mapped in the new pgd, we'll double-fault.  Forcibly
			 * map it.
			 */
			sync_current_stack_to_mm(next);
		}

		/* Stop remote flushes for the previous mm */
		VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(real_prev)) &&
				real_prev != &init_mm);
		cpumask_clear_cpu(cpu, mm_cpumask(real_prev));

		/*
		 * Start remote flushes and then read tlb_gen.
		 */
		cpumask_set_cpu(cpu, mm_cpumask(next));
		next_tlb_gen = atomic64_read(&next->context.tlb_gen);

		choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);

		if (need_flush) {
			this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
			this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
			load_new_mm_cr3(next->pgd, new_asid, true);

			/*
			 * NB: This gets called via leave_mm() in the idle path
			 * where RCU functions differently.  Tracing normally
			 * uses RCU, so we need to use the _rcuidle variant.
			 *
			 * (There is no good reason for this.  The idle code should
			 *  be rearranged to call this before rcu_idle_enter().)
			 */
			trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
		} else {
			/* The new ASID is already up to date. */
			load_new_mm_cr3(next->pgd, new_asid, false);

			/* See above wrt _rcuidle. */
			trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
		}

		this_cpu_write(cpu_tlbstate.loaded_mm, next);
		this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
	}

	load_mm_cr4(next);
	switch_ldt(real_prev, next);
}
Example #13
0
asmlinkage void __do_softirq(void)
{
	struct softirq_action *h;
	__u32 pending;
	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
	int cpu;
	unsigned long old_flags = current->flags;
	int max_restart = MAX_SOFTIRQ_RESTART;

	/*
	 * Mask out PF_MEMALLOC s current task context is borrowed for the
	 * softirq. A softirq handled such as network RX might set PF_MEMALLOC
	 * again if the socket is related to swap
	 */
	current->flags &= ~PF_MEMALLOC;

	pending = local_softirq_pending();
	account_irq_enter_time(current);

	__local_bh_disable((unsigned long)__builtin_return_address(0),
				SOFTIRQ_OFFSET);
	lockdep_softirq_enter();

	cpu = smp_processor_id();
restart:
	/* Reset the pending bitmask before enabling irqs */
	set_softirq_pending(0);

	local_irq_enable();

	h = softirq_vec;

	do {
		if (pending & 1) {
			unsigned int vec_nr = h - softirq_vec;
			int prev_count = preempt_count();

			kstat_incr_softirqs_this_cpu(vec_nr);

			trace_softirq_entry(vec_nr);
			exynos_ss_softirq(ESS_FLAG_SOFTIRQ, h->action, irqs_disabled(), ESS_FLAG_IN);
			h->action(h);
			exynos_ss_softirq(ESS_FLAG_SOFTIRQ, h->action, irqs_disabled(), ESS_FLAG_OUT);
			trace_softirq_exit(vec_nr);
			if (unlikely(prev_count != preempt_count())) {
				printk(KERN_ERR "huh, entered softirq %u %s %p"
				       "with preempt_count %08x,"
				       " exited with %08x?\n", vec_nr,
				       softirq_to_name[vec_nr], h->action,
				       prev_count, preempt_count());
				preempt_count() = prev_count;
			}

			rcu_bh_qs(cpu);
		}
		h++;
		pending >>= 1;
	} while (pending);

	local_irq_disable();

	pending = local_softirq_pending();
	if (pending) {
		if (time_before(jiffies, end) && !need_resched() &&
		    --max_restart)
			goto restart;

		wakeup_softirqd();
	}

	lockdep_softirq_exit();

	account_irq_exit_time(current);
	__local_bh_enable(SOFTIRQ_OFFSET);
	tsk_restore_flags(current, old_flags, PF_MEMALLOC);
}
Example #14
0
irqreturn_t
handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
{
	irqreturn_t retval = IRQ_NONE;
	unsigned int flags = 0, irq = desc->irq_data.irq;
#ifdef CONFIG_MSM_SM_EVENT
	sm_msm_irq_data_t sm_irq;
#endif

	if (irq==32)  //the smd dev irq is 32
	{
		smd_irq_stamp[smd_irq_stamp_index++] = jiffies;
		smd_irq_stamp_index = smd_irq_stamp_index & (MAX_SMD_IRQ_STAMP_NUM - 1 );
	}
	last_irq_stamp = jiffies;
	do {
		irqreturn_t res;

		trace_irq_handler_entry(irq, action);
#ifdef CONFIG_MSM_SM_EVENT
		sm_irq.func_addr = (unsigned int)action->handler;
		sm_irq.irq_num = irq;
		sm_add_event(SM_IRQ_EVENT | IRQ_EVENT_ENTER,
			SM_EVENT_START, 0, &sm_irq, sizeof(sm_msm_irq_data_t));
#endif
		res = action->handler(irq, action->dev_id);
		trace_irq_handler_exit(irq, action, res);

/*
#ifdef CONFIG_MSM_SM_EVENT
		sm_add_event(SM_IRQ_EVENT | IRQ_EVENT_LEAVE,
			SM_EVENT_END, 0, &sm_irq, sizeof(sm_msm_irq_data_t));
#endif
*/
		if (WARN_ONCE(!irqs_disabled(),"irq %u handler %pF enabled interrupts\n",
			      irq, action->handler))
			local_irq_disable();

		switch (res) {
		case IRQ_WAKE_THREAD:
			/*
			 * Catch drivers which return WAKE_THREAD but
			 * did not set up a thread function
			 */
			if (unlikely(!action->thread_fn)) {
				warn_no_thread(irq, action);
				break;
			}

			irq_wake_thread(desc, action);

			/* Fall through to add to randomness */
		case IRQ_HANDLED:
			flags |= action->flags;
			break;

		default:
			break;
		}

		retval |= res;
		action = action->next;
	} while (action);

	add_interrupt_randomness(irq, flags);

	if (!noirqdebug)
		note_interrupt(irq, desc, retval);
	return retval;
}
Example #15
0
static void scst_no_dlm_pr_write_unlock(struct scst_device *dev,
					struct scst_lksb *pr_lksb)
{
	scst_pr_write_unlock(dev);
}

static bool scst_no_dlm_reserved(struct scst_device *dev)
{
	return dev->reserved_by;
}

static void scst_no_dlm_res_lock(struct scst_device *dev,
				 struct scst_lksb *pr_lksb)
	__acquires(&dev->dev_lock)
{
	EXTRACHECKS_BUG_ON(in_irq() || irqs_disabled());
	spin_lock_bh(&dev->dev_lock);
}

static void scst_no_dlm_res_unlock(struct scst_device *dev,
				   struct scst_lksb *pr_lksb)
	__releases(&dev->dev_lock)
{
	spin_unlock_bh(&dev->dev_lock);
}

static bool scst_no_dlm_is_rsv_holder(struct scst_device *dev,
				      struct scst_session *sess)
{
	EXTRACHECKS_BUG_ON(sess == NULL);
	return dev->reserved_by == sess;
Example #16
0
int zd_usb_enable_int(struct zd_usb *usb)
{
	int r;
	struct usb_device *udev = zd_usb_to_usbdev(usb);
	struct zd_usb_interrupt *intr = &usb->intr;
	struct urb *urb;

	dev_dbg_f(zd_usb_dev(usb), "\n");

	urb = usb_alloc_urb(0, GFP_KERNEL);
	if (!urb) {
		r = -ENOMEM;
		goto out;
	}

	ZD_ASSERT(!irqs_disabled());
	spin_lock_irq(&intr->lock);
	if (intr->urb) {
		spin_unlock_irq(&intr->lock);
		r = 0;
		goto error_free_urb;
	}
	intr->urb = urb;
	spin_unlock_irq(&intr->lock);

	r = -ENOMEM;
	intr->buffer = usb_alloc_coherent(udev, USB_MAX_EP_INT_BUFFER,
					  GFP_KERNEL, &intr->buffer_dma);
	if (!intr->buffer) {
		dev_dbg_f(zd_usb_dev(usb),
			"couldn't allocate transfer_buffer\n");
		goto error_set_urb_null;
	}

	usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, EP_INT_IN),
			 intr->buffer, USB_MAX_EP_INT_BUFFER,
			 int_urb_complete, usb,
			 intr->interval);
	urb->transfer_dma = intr->buffer_dma;
	urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;

	dev_dbg_f(zd_usb_dev(usb), "submit urb %p\n", intr->urb);
	r = usb_submit_urb(urb, GFP_KERNEL);
	if (r) {
		dev_dbg_f(zd_usb_dev(usb),
			 "Couldn't submit urb. Error number %d\n", r);
		goto error;
	}

	return 0;
error:
	usb_free_coherent(udev, USB_MAX_EP_INT_BUFFER,
			  intr->buffer, intr->buffer_dma);
error_set_urb_null:
	spin_lock_irq(&intr->lock);
	intr->urb = NULL;
	spin_unlock_irq(&intr->lock);
error_free_urb:
	usb_free_urb(urb);
out:
	return r;
}
Example #17
0
/**
 *	suspend_enter - enter the desired system sleep state.
 *	@state:		state to enter
 *
 *	This function should be called after devices have been suspended.
 */
static int suspend_enter(suspend_state_t state)
{
	int error;
/*
#ifdef CONFIG_CPU_FREQ
	dvfs_set_max_freq_lock();
#endif */ /* CONFIG_CPU_FREQ */
	if (suspend_ops->prepare) {
		error = suspend_ops->prepare();
		if (error)
			return error;
	}

	error = dpm_suspend_noirq(PMSG_SUSPEND);
	if (error) {
		printk(KERN_ERR "PM: Some devices failed to power down\n");
		goto Platfrom_finish;
	}

	if (suspend_ops->prepare_late) {
		error = suspend_ops->prepare_late();
		if (error)
			goto Power_up_devices;
	}

	if (suspend_test(TEST_PLATFORM))
		goto Platform_wake;

	error = disable_nonboot_cpus();
	if (error || suspend_test(TEST_CPUS))
		goto Enable_cpus;

	arch_suspend_disable_irqs();
	BUG_ON(!irqs_disabled());

	error = sysdev_suspend(PMSG_SUSPEND);
	if (!error) {
		if (!suspend_test(TEST_CORE))
			error = suspend_ops->enter(state);
		sysdev_resume();
	}

	arch_suspend_enable_irqs();
	BUG_ON(irqs_disabled());
/*#ifdef CONFIG_CPU_FREQ
	dvfs_set_max_freq_unlock();       
#endif*/ /* CONFIG_CPU_FREQ */

 Enable_cpus:
	enable_nonboot_cpus();

 Platform_wake:
	if (suspend_ops->wake)
		suspend_ops->wake();

 Power_up_devices:
	dpm_resume_noirq(PMSG_RESUME);

 Platfrom_finish:
	if (suspend_ops->finish)
		suspend_ops->finish();

	return error;
}
asmlinkage void __init start_kernel(void)
{
	char * command_line;
	extern struct kernel_param __start___param[], __stop___param[];

	smp_setup_processor_id();

	/*
	 * Need to run as early as possible, to initialize the
	 * lockdep hash:
	 */
	lockdep_init();
	debug_objects_early_init();

	/*
	 * Set up the the initial canary ASAP:
	 */
	boot_init_stack_canary();

	cgroup_init_early();

	local_irq_disable();
	early_boot_irqs_off();
	early_init_irq_lock_class();

/*
 * Interrupts are still disabled. Do necessary setups, then
 * enable them
 */
	lock_kernel();
	tick_init();
	boot_cpu_init();
	page_address_init();
	printk(KERN_NOTICE "%s", linux_banner);
	setup_arch(&command_line);
	mm_init_owner(&init_mm, &init_task);
	setup_command_line(command_line);
	setup_nr_cpu_ids();

	/* 完成per_CPU变量副本的生成,
	 * 而且会对per-CPU变量的动态分配机制进行初始化*/
	setup_per_cpu_areas();
	smp_prepare_boot_cpu();	/* arch-specific boot-cpu hooks */

	build_all_zonelists();
	page_alloc_init();

	printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line);
	parse_early_param();
	parse_args("Booting kernel", static_command_line, __start___param,
		   __stop___param - __start___param,
		   &unknown_bootoption);
	/*
	 * These use large bootmem allocations and must precede
	 * kmem_cache_init()
	 */
	pidhash_init();
	vfs_caches_init_early();
	sort_main_extable();
	trap_init();
	mm_init();
	/*
	 * Set up the scheduler prior starting any interrupts (such as the
	 * timer interrupt). Full topology setup happens at smp_init()
	 * time - but meanwhile we still have a functioning scheduler.
	 */
	sched_init();
	/*
	 * Disable preemption - early bootup scheduling is extremely
	 * fragile until we cpu_idle() for the first time.
	 */
	preempt_disable();
	if (!irqs_disabled()) {
		printk(KERN_WARNING "start_kernel(): bug: interrupts were "
				"enabled *very* early, fixing it\n");
		local_irq_disable();
	}
	rcu_init();
	/* init some links before init_ISA_irqs() */
	early_irq_init();
	init_IRQ();
	prio_tree_init();
	init_timers();
	hrtimers_init();
	softirq_init();
	timekeeping_init();
	time_init();
	profile_init();
	if (!irqs_disabled())
		printk(KERN_CRIT "start_kernel(): bug: interrupts were "
				 "enabled early\n");
	early_boot_irqs_on();
	local_irq_enable();

	/* Interrupts are enabled now so all GFP allocations are safe. */
	set_gfp_allowed_mask(__GFP_BITS_MASK);

	kmem_cache_init_late();

	/*
	 * HACK ALERT! This is early. We're enabling the console before
	 * we've done PCI setups etc, and console_init() must be aware of
	 * this. But we do want output early, in case something goes wrong.
	 */
	console_init();
	if (panic_later)
		panic(panic_later, panic_param);

	lockdep_info();

	/*
	 * Need to run this when irqs are enabled, because it wants
	 * to self-test [hard/soft]-irqs on/off lock inversion bugs
	 * too:
	 */
	locking_selftest();

#ifdef CONFIG_BLK_DEV_INITRD
	if (initrd_start && !initrd_below_start_ok &&
	    page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) {
		printk(KERN_CRIT "initrd overwritten (0x%08lx < 0x%08lx) - "
		    "disabling it.\n",
		    page_to_pfn(virt_to_page((void *)initrd_start)),
		    min_low_pfn);
		initrd_start = 0;
	}
#endif
	page_cgroup_init();
	enable_debug_pagealloc();
	kmemtrace_init();
	kmemleak_init();
	debug_objects_mem_init();
	idr_init_cache();
	setup_per_cpu_pageset();
	numa_policy_init();
	if (late_time_init)
		late_time_init();
	sched_clock_init();
	calibrate_delay();
	pidmap_init();
	anon_vma_init();
#ifdef CONFIG_X86
	if (efi_enabled)
		efi_enter_virtual_mode();
#endif
#ifdef CONFIG_X86_ESPFIX64
	/* Should be run before the first non-init thread is created */
	init_espfix_bsp();
#endif
	thread_info_cache_init();
	cred_init();
	fork_init(totalram_pages);
	proc_caches_init();
	buffer_init();
	key_init();
	security_init();
	vfs_caches_init(totalram_pages);
	radix_tree_init();
	signals_init();
	/* rootfs populating might need page-writeback */
	page_writeback_init();
#ifdef CONFIG_PROC_FS
	proc_root_init();
#endif
	cgroup_init();
	cpuset_init();
	taskstats_init_early();
	delayacct_init();

	check_bugs();

	acpi_early_init(); /* before LAPIC and SMP init */
	sfi_init_late();

	ftrace_init();

	/* Do the rest non-__init'ed, we're now alive */
	rest_init();
}
Example #19
0
int atari_tt_hwclk( int op, struct rtc_time *t )
{
    int sec=0, min=0, hour=0, day=0, mon=0, year=0, wday=0;
    unsigned long	flags;
    unsigned char	ctrl;
    int pm = 0;

    ctrl = RTC_READ(RTC_CONTROL); /* control registers are
                                   * independent from the UIP */

    if (op) {
        /* write: prepare values */

        sec  = t->tm_sec;
        min  = t->tm_min;
        hour = t->tm_hour;
        day  = t->tm_mday;
        mon  = t->tm_mon + 1;
        year = t->tm_year - atari_rtc_year_offset;
        wday = t->tm_wday + (t->tm_wday >= 0);

        if (!(ctrl & RTC_24H)) {
	    if (hour > 11) {
		pm = 0x80;
		if (hour != 12)
		    hour -= 12;
	    }
	    else if (hour == 0)
		hour = 12;
        }

        if (!(ctrl & RTC_DM_BINARY)) {
	    sec = bin2bcd(sec);
	    min = bin2bcd(min);
	    hour = bin2bcd(hour);
	    day = bin2bcd(day);
	    mon = bin2bcd(mon);
	    year = bin2bcd(year);
	    if (wday >= 0)
		wday = bin2bcd(wday);
        }
    }

    /* Reading/writing the clock registers is a bit critical due to
     * the regular update cycle of the RTC. While an update is in
     * progress, registers 0..9 shouldn't be touched.
     * The problem is solved like that: If an update is currently in
     * progress (the UIP bit is set), the process sleeps for a while
     * (50ms). This really should be enough, since the update cycle
     * normally needs 2 ms.
     * If the UIP bit reads as 0, we have at least 244 usecs until the
     * update starts. This should be enough... But to be sure,
     * additionally the RTC_SET bit is set to prevent an update cycle.
     */

    while( RTC_READ(RTC_FREQ_SELECT) & RTC_UIP ) {
	if (in_atomic() || irqs_disabled())
	    mdelay(1);
	else
	    schedule_timeout_interruptible(HWCLK_POLL_INTERVAL);
    }

    local_irq_save(flags);
    RTC_WRITE( RTC_CONTROL, ctrl | RTC_SET );
    if (!op) {
        sec  = RTC_READ( RTC_SECONDS );
        min  = RTC_READ( RTC_MINUTES );
        hour = RTC_READ( RTC_HOURS );
        day  = RTC_READ( RTC_DAY_OF_MONTH );
        mon  = RTC_READ( RTC_MONTH );
        year = RTC_READ( RTC_YEAR );
        wday = RTC_READ( RTC_DAY_OF_WEEK );
    }
    else {
        RTC_WRITE( RTC_SECONDS, sec );
        RTC_WRITE( RTC_MINUTES, min );
        RTC_WRITE( RTC_HOURS, hour + pm);
        RTC_WRITE( RTC_DAY_OF_MONTH, day );
        RTC_WRITE( RTC_MONTH, mon );
        RTC_WRITE( RTC_YEAR, year );
        if (wday >= 0) RTC_WRITE( RTC_DAY_OF_WEEK, wday );
    }
    RTC_WRITE( RTC_CONTROL, ctrl & ~RTC_SET );
    local_irq_restore(flags);

    if (!op) {
        /* read: adjust values */

        if (hour & 0x80) {
	    hour &= ~0x80;
	    pm = 1;
	}

	if (!(ctrl & RTC_DM_BINARY)) {
	    sec = bcd2bin(sec);
	    min = bcd2bin(min);
	    hour = bcd2bin(hour);
	    day = bcd2bin(day);
	    mon = bcd2bin(mon);
	    year = bcd2bin(year);
	    wday = bcd2bin(wday);
        }

        if (!(ctrl & RTC_24H)) {
	    if (!pm && hour == 12)
		hour = 0;
	    else if (pm && hour != 12)
		hour += 12;
        }

        t->tm_sec  = sec;
        t->tm_min  = min;
        t->tm_hour = hour;
        t->tm_mday = day;
        t->tm_mon  = mon - 1;
        t->tm_year = year + atari_rtc_year_offset;
        t->tm_wday = wday - 1;
    }

    return( 0 );
}
static int suspend_enter(suspend_state_t state)
{
	int error;

	if (suspend_ops->prepare) {
		error = suspend_ops->prepare();
		if (error)
			goto Platform_finish;
	}

	error = dpm_suspend_noirq(PMSG_SUSPEND);
	if (error) {
		printk(KERN_ERR "PM: Some devices failed to power down\n");
		goto Platform_finish;
	}

	if (use_dvfs)
	  wmt_suspend_target(0, CPUFREQ_RELATION_L, 1);

	if (suspend_ops->prepare_late) {
		error = suspend_ops->prepare_late();
		if (error)
			goto Platform_wake;
	}

	if (suspend_test(TEST_PLATFORM))
		goto Platform_wake;

	error = disable_nonboot_cpus();
	if (error || suspend_test(TEST_CPUS))
		goto Enable_cpus;

	arch_suspend_disable_irqs();
	BUG_ON(!irqs_disabled());

	error = syscore_suspend();
	if (!error) {
		if (!(suspend_test(TEST_CORE) || pm_wakeup_pending())) {
			error = suspend_ops->enter(state);
			events_check_enabled = false;
		}
		syscore_resume();
	}

	arch_suspend_enable_irqs();
	BUG_ON(irqs_disabled());

 Enable_cpus:
	enable_nonboot_cpus();

 Platform_wake:
	if (suspend_ops->wake)
		suspend_ops->wake();

	dpm_resume_noirq(PMSG_RESUME);

 Platform_finish:
	if (suspend_ops->finish)
		suspend_ops->finish();

	return error;
}
Example #21
0
static inline int
irq_trace(void)
{
	return ((trace_type & TRACER_IRQS_OFF) &&
		irqs_disabled());
}
irqreturn_t
handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
{
	irqreturn_t retval = IRQ_NONE;
	unsigned int random = 0, irq = desc->irq_data.irq;

#ifdef CONFIG_MTPROF_IRQ_DURATION
	unsigned long long t1, t2, dur;
#ifdef CONFIG_ISR_MONITOR
	char aee_str[40];
#endif
#endif
	do {
		irqreturn_t res;

		trace_irq_handler_entry(irq, action);
#ifdef CONFIG_MTPROF_IRQ_DURATION
		t1 = sched_clock();
		res = action->handler(irq, action->dev_id);
		t2 = sched_clock();
		dur = t2 - t1;
		action->duration += dur;
		action->count++;
		action->dur_max = max(dur,action->dur_max);
		action->dur_min = min(dur,action->dur_min);
#ifdef CONFIG_MTPROF_CPUTIME
		if(mtsched_enabled == 1)
		{
			int isr_find = 0;
			struct mtk_isr_info *mtk_isr_point = current->se.mtk_isr;
			struct mtk_isr_info *mtk_isr_current = mtk_isr_point;
			char *isr_name = NULL;
			
			current->se.mtk_isr_time += dur;
			while(mtk_isr_point != NULL)
			{
				if(mtk_isr_point->isr_num == irq)
				{
					mtk_isr_point->isr_time += dur;
					mtk_isr_point->isr_count++;
					isr_find = 1;
					break;
				}
				mtk_isr_current = mtk_isr_point;
				mtk_isr_point = mtk_isr_point -> next;
			}

			if(isr_find == 0)
			{
				mtk_isr_point =  kmalloc(sizeof(struct mtk_isr_info), GFP_ATOMIC);
				if(mtk_isr_point == NULL)
				{
					printk(KERN_ERR"cant' alloc mtk_isr_info mem!\n");					
				}
				else
				{
					mtk_isr_point->isr_num = irq;
					mtk_isr_point->isr_time = dur;
					mtk_isr_point->isr_count = 1;
					mtk_isr_point->next = NULL;
					if(mtk_isr_current == NULL)
					{
						current->se.mtk_isr = mtk_isr_point;
					}
					else
					{
						mtk_isr_current->next  = mtk_isr_point;
					}

					isr_name = kmalloc(sizeof(action->name),GFP_ATOMIC);
					if(isr_name != NULL)
					{
						strcpy(isr_name, action->name);
						mtk_isr_point->isr_name = isr_name;
					}
					else
					{
						printk(KERN_ERR"cant' alloc isr_name mem!\n");
					}
					current->se.mtk_isr_count++;
				}	
			}
		}
#endif		
#ifdef CONFIG_ISR_MONITOR 
		if(unlikely(dur>TIME_3MS)){
		    if(in_white_list(irq)){
			printk("[ISR Monitor] Warning! ISR%d:%s too long, %llu ns > 3 ms, t1:%llu, t2:%llu\n", irq, action->name, dur, t1, t2);
		    }else if(dur>TIME_6MS){
			sprintf( aee_str, "ISR#%d:%s too long>6ms\n", irq, action->name);
			aee_kernel_exception( aee_str,"isr_monitor\n");
			printk("[ISR Monitor] Warning! ISR%d:%s too long, %llu ns > 10 ms, t1:%llu, t2:%llu\n", irq, action->name, dur, t1, t2);
		    }else{
			sprintf( aee_str, "ISR#%d:%s too long>3ms\n", irq, action->name);
			aee_kernel_warning( aee_str,"isr_monitor\n");
			printk("[ISR Monitor] Warning! ISR%d:%s too long, %llu ns > 3 ms, t1:%llu, t2:%llu\n", irq, action->name, dur, t1, t2);
		    }
		}
#endif

#else
		res = action->handler(irq, action->dev_id);
#endif
		trace_irq_handler_exit(irq, action, res);

		if (WARN_ONCE(!irqs_disabled(),"irq %u handler %pF enabled interrupts\n",
			      irq, action->handler))
			local_irq_disable();

		switch (res) {
		case IRQ_WAKE_THREAD:
			/*
			 * Catch drivers which return WAKE_THREAD but
			 * did not set up a thread function
			 */
			if (unlikely(!action->thread_fn)) {
				warn_no_thread(irq, action);
				break;
			}

			irq_wake_thread(desc, action);

			/* Fall through to add to randomness */
		case IRQ_HANDLED:
			random |= action->flags;
			break;

		default:
			break;
		}

		retval |= res;
		action = action->next;
	} while (action);

	if (random & IRQF_SAMPLE_RANDOM)
		add_interrupt_randomness(irq);

	if (!noirqdebug)
		note_interrupt(irq, desc, retval);
	return retval;
}
/**
 * suspend_enter - Make the system enter the given sleep state.
 * @state: System sleep state to enter.
 * @wakeup: Returns information that the sleep state should not be re-entered.
 *
 * This function should be called after devices have been suspended.
 */
static int suspend_enter(suspend_state_t state, bool *wakeup)
{
	char suspend_abort[MAX_SUSPEND_ABORT_LEN];
	int error, last_dev;

	if (suspend_ops->prepare) {
		error = suspend_ops->prepare();
		if (error)
			goto Platform_finish;
	}

	error = dpm_suspend_end(PMSG_SUSPEND);
	if (error) {
		last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
		last_dev %= REC_FAILED_NUM;
		printk(KERN_ERR "PM: Some devices failed to power down\n");
		log_suspend_abort_reason("%s device failed to power down",
			suspend_stats.failed_devs[last_dev]);
		goto Platform_finish;
	}

	if (suspend_ops->prepare_late) {
		error = suspend_ops->prepare_late();
		if (error)
			goto Platform_wake;
	}

	if (suspend_test(TEST_PLATFORM))
		goto Platform_wake;

	error = disable_nonboot_cpus();
	if (error || suspend_test(TEST_CPUS)) {
		log_suspend_abort_reason("Disabling non-boot cpus failed");
		goto Enable_cpus;
	}

	arch_suspend_disable_irqs();
	BUG_ON(!irqs_disabled());

	error = syscore_suspend();
	if (!error) {
		*wakeup = pm_wakeup_pending();
		if (!(suspend_test(TEST_CORE) || *wakeup)) {
			error = suspend_ops->enter(state);
			events_check_enabled = false;
		} else {
			pm_get_active_wakeup_sources(suspend_abort,
				MAX_SUSPEND_ABORT_LEN);
			log_suspend_abort_reason(suspend_abort);
		}
		syscore_resume();
	}

	arch_suspend_enable_irqs();
	BUG_ON(irqs_disabled());

 Enable_cpus:
	enable_nonboot_cpus();

 Platform_wake:
	if (suspend_ops->wake)
		suspend_ops->wake();

	dpm_resume_start(PMSG_RESUME);

 Platform_finish:
	if (suspend_ops->finish)
		suspend_ops->finish();

	return error;
}
Example #24
0
/*
 * First write to nvram, if fatal error, that is the only
 * place we log the info.  The error will be picked up
 * on the next reboot by rtasd.  If not fatal, run the
 * method for the type of error.  Currently, only RTAS
 * errors have methods implemented, but in the future
 * there might be a need to store data in nvram before a
 * call to panic().
 *
 * XXX We write to nvram periodically, to indicate error has
 * been written and sync'd, but there is a possibility
 * that if we don't shutdown correctly, a duplicate error
 * record will be created on next reboot.
 */
void pSeries_log_error(char *buf, unsigned int err_type, int fatal)
{
    unsigned long offset;
    unsigned long s;
    int len = 0;

    pr_debug("rtasd: logging event\n");
    if (buf == NULL)
        return;

    spin_lock_irqsave(&rtasd_log_lock, s);

    /* get length and increase count */
    switch (err_type & ERR_TYPE_MASK) {
    case ERR_TYPE_RTAS_LOG:
        len = log_rtas_len(buf);
        if (!(err_type & ERR_FLAG_BOOT))
            error_log_cnt++;
        break;
    case ERR_TYPE_KERNEL_PANIC:
    default:
        WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */
        spin_unlock_irqrestore(&rtasd_log_lock, s);
        return;
    }

#ifdef CONFIG_PPC64
    /* Write error to NVRAM */
    if (logging_enabled && !(err_type & ERR_FLAG_BOOT))
        nvram_write_error_log(buf, len, err_type, error_log_cnt);
#endif /* CONFIG_PPC64 */

    /*
     * rtas errors can occur during boot, and we do want to capture
     * those somewhere, even if nvram isn't ready (why not?), and even
     * if rtasd isn't ready. Put them into the boot log, at least.
     */
    if ((err_type & ERR_TYPE_MASK) == ERR_TYPE_RTAS_LOG)
        printk_log_rtas(buf, len);

    /* Check to see if we need to or have stopped logging */
    if (fatal || !logging_enabled) {
        logging_enabled = 0;
        WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */
        spin_unlock_irqrestore(&rtasd_log_lock, s);
        return;
    }

    /* call type specific method for error */
    switch (err_type & ERR_TYPE_MASK) {
    case ERR_TYPE_RTAS_LOG:
        offset = rtas_error_log_buffer_max *
                 ((rtas_log_start+rtas_log_size) & LOG_NUMBER_MASK);

        /* First copy over sequence number */
        memcpy(&rtas_log_buf[offset], (void *) &error_log_cnt, sizeof(int));

        /* Second copy over error log data */
        offset += sizeof(int);
        memcpy(&rtas_log_buf[offset], buf, len);

        if (rtas_log_size < LOG_NUMBER)
            rtas_log_size += 1;
        else
            rtas_log_start += 1;

        WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */
        spin_unlock_irqrestore(&rtasd_log_lock, s);
        wake_up_interruptible(&rtas_log_wait);
        break;
    case ERR_TYPE_KERNEL_PANIC:
    default:
        WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */
        spin_unlock_irqrestore(&rtasd_log_lock, s);
        return;
    }

}
Example #25
0
static u32 handle_rpc(struct smc_param *param)
{
	switch (TEESMC_RETURN_GET_RPC_FUNC(param->a0)) {
	case TEESMC_RPC_FUNC_ALLOC_ARG:
		param->a1 = tee_shm_pool_alloc(DEV, TZop.Allocator,
					param->a1, 4);
	case TEESMC_RPC_FUNC_ALLOC_PAYLOAD:
		/* Can't support payload shared memory with this interface */
		param->a2 = 0;
		break;
	case TEESMC_RPC_FUNC_FREE_ARG:
		tee_shm_pool_free(DEV, TZop.Allocator, param->a1, 0);
		break;
	case TEESMC_RPC_FUNC_FREE_PAYLOAD:
		/* Can't support payload shared memory with this interface */
		break;
	case TEESMC_ST_RPC_FUNC_ALLOC_PAYLOAD:
	{
		struct tee_shm *shm;

		shm = tee_shm_allocate(&TZop, 0, param->a1, 0);
		if (!shm) {
			param->a1 = 0;
			break;
		}
		param->a1 = shm->paddr;
		param->a2 = (uint32_t)shm;
		break;
	}
	case TEESMC_ST_RPC_FUNC_FREE_PAYLOAD:
		if (param->a1)
			tee_shm_unallocate((struct tee_shm *)param->a1);
		break;
	case TEESMC_RPC_FUNC_IRQ:
		break;
	case TEESMC_RPC_FUNC_CMD:
	{
		struct teesmc32_arg *arg32;
		struct teesmc32_param *params;
		struct tee_rpc_invoke inv;
		size_t n;
		uint32_t ret;

		arg32 = tee_shm_pool_p2v(DEV, TZop.Allocator, param->a1);

		if (arg32->num_params > TEE_RPC_BUFFER_NUMBER) {
			arg32->ret = TEEC_ERROR_GENERIC;
			goto out;
		}

		params = TEESMC32_GET_PARAMS(arg32);

		memset(&inv, 0, sizeof(inv));
		inv.cmd = arg32->cmd;
		/*
		 * Set a suitable error code in case tee-supplicant
		 * ignores the request.
		 */
		inv.res = TEEC_ERROR_NOT_IMPLEMENTED;
		inv.nbr_bf = arg32->num_params;
		for (n = 0; n < arg32->num_params; n++) {
			inv.cmds[n].buffer = (void *)params[n].u.memref.buf_ptr;
			inv.cmds[n].size = params[n].u.memref.size;
			switch (params[n].attr & TEESMC_ATTR_TYPE_MASK) {
			case TEESMC_ATTR_TYPE_VALUE_INPUT:
			case TEESMC_ATTR_TYPE_VALUE_OUTPUT:
			case TEESMC_ATTR_TYPE_VALUE_INOUT:
				inv.cmds[n].type = TEE_RPC_VALUE;
				break;
			case TEESMC_ATTR_TYPE_MEMREF_INPUT:
			case TEESMC_ATTR_TYPE_MEMREF_OUTPUT:
			case TEESMC_ATTR_TYPE_MEMREF_INOUT:
				inv.cmds[n].type = TEE_RPC_BUFFER;
				break;
			default:
				arg32->ret = TEEC_ERROR_GENERIC;
				goto out;
			}
		}

		ret = tee_supp_cmd(&TZop, TEE_RPC_ICMD_INVOKE,
					  &inv, sizeof(inv));
		if (ret == TEEC_RPC_OK)
			arg32->ret = inv.res;

		for (n = 0; n < arg32->num_params; n++) {
			switch (params[n].attr & TEESMC_ATTR_TYPE_MASK) {
			case TEESMC_ATTR_TYPE_VALUE_INPUT:
			case TEESMC_ATTR_TYPE_VALUE_OUTPUT:
			case TEESMC_ATTR_TYPE_VALUE_INOUT:
			case TEESMC_ATTR_TYPE_MEMREF_OUTPUT:
			case TEESMC_ATTR_TYPE_MEMREF_INOUT:
				/*
				 * Allow supplicant to assign a new pointer
				 * to an out-buffer. Needed when the
				 * supplicant allocates a new buffer, for
				 * instance when loading a TA.
				 */
				params[n].u.memref.buf_ptr =
						(uint32_t)inv.cmds[n].buffer;
				params[n].u.memref.size = inv.cmds[n].size;
				break;
			default:
				break;
			}
		}

		break;
	}
	default:
		dev_warn(DEV, "Unknown RPC func 0x%x\n",
			 TEESMC_RETURN_GET_RPC_FUNC(param->a0));
		break;
	}

out:
	if (irqs_disabled())
		return TEESMC32_FASTCALL_RETURN_FROM_RPC;
	else
		return TEESMC32_CALL_RETURN_FROM_RPC;
}