Ejemplo n.º 1
0
static const struct cpumask *summit_target_cpus(void)
{
	
	return cpumask_of(0);
}
Ejemplo n.º 2
0
void arch_send_call_function_single_ipi(int cpu)
{
	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
}
Ejemplo n.º 3
0
/* Requires cpu_add_remove_lock to be held */
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
{
	int err, nr_calls = 0;
	void *hcpu = (void *)(long)cpu;
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
	struct take_cpu_down_param tcd_param = {
		.mod = mod,
		.hcpu = hcpu,
	};

	if (num_online_cpus() == 1)
		return -EBUSY;

	if (!cpu_online(cpu))
		return -EINVAL;

	cpu_hotplug_begin();

	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
	if (err) {
		nr_calls--;
		__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
		printk("%s: attempt to take down CPU %u failed\n",
				__func__, cpu);
		goto out_release;
	}

	err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
	if (err) {
		/* CPU didn't die: tell everyone.  Can't complain. */
		cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);

		goto out_release;
	}
	BUG_ON(cpu_online(cpu));

	/*
	 * The migration_call() CPU_DYING callback will have removed all
	 * runnable tasks from the cpu, there's only the idle task left now
	 * that the migration thread is done doing the stop_machine thing.
	 *
	 * Wait for the stop thread to go away.
	 */
	while (!idle_cpu(cpu))
		cpu_relax();

	/* This actually kills the CPU. */
	__cpu_die(cpu);

	/* CPU is completely dead: tell everyone.  Too late to complain. */
	cpu_notify_nofail(CPU_DEAD | mod, hcpu);

	check_for_tasks(cpu);

out_release:
	cpu_hotplug_done();
	trace_sched_cpu_hotplug(cpu, err, 0);
	if (!err)
		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
	return err;
}
Ejemplo n.º 4
0
/*
 * This maps the physical memory to kernel virtual address space, a total
 * of max_low_pfn pages, by creating page tables starting from address
 * PAGE_OFFSET.
 *
 * This routine transitions us from using a set of compiled-in large
 * pages to using some more precise caching, including removing access
 * to code pages mapped at PAGE_OFFSET (executed only at MEM_SV_START)
 * marking read-only data as locally cacheable, striping the remaining
 * .data and .bss across all the available tiles, and removing access
 * to pages above the top of RAM (thus ensuring a page fault from a bad
 * virtual address rather than a hypervisor shoot down for accessing
 * memory outside the assigned limits).
 */
static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
{
	unsigned long long irqmask;
	unsigned long address, pfn;
	pmd_t *pmd;
	pte_t *pte;
	int pte_ofs;
	const struct cpumask *my_cpu_mask = cpumask_of(smp_processor_id());
	struct cpumask kstripe_mask;
	int rc, i;

#if CHIP_HAS_CBOX_HOME_MAP()
	if (ktext_arg_seen && ktext_hash) {
		pr_warning("warning: \"ktext\" boot argument ignored"
			   " if \"kcache_hash\" sets up text hash-for-home\n");
		ktext_small = 0;
	}

	if (kdata_arg_seen && kdata_hash) {
		pr_warning("warning: \"kdata\" boot argument ignored"
			   " if \"kcache_hash\" sets up data hash-for-home\n");
	}

	if (kdata_huge && !hash_default) {
		pr_warning("warning: disabling \"kdata=huge\"; requires"
			  " kcache_hash=all or =allbutstack\n");
		kdata_huge = 0;
	}
#endif

	/*
	 * Set up a mask for cpus to use for kernel striping.
	 * This is normally all cpus, but minus dataplane cpus if any.
	 * If the dataplane covers the whole chip, we stripe over
	 * the whole chip too.
	 */
	cpumask_copy(&kstripe_mask, cpu_possible_mask);
	if (!kdata_arg_seen)
		kdata_mask = kstripe_mask;

	/* Allocate and fill in L2 page tables */
	for (i = 0; i < MAX_NUMNODES; ++i) {
#ifdef CONFIG_HIGHMEM
		unsigned long end_pfn = node_lowmem_end_pfn[i];
#else
		unsigned long end_pfn = node_end_pfn[i];
#endif
		unsigned long end_huge_pfn = 0;

		/* Pre-shatter the last huge page to allow per-cpu pages. */
		if (kdata_huge)
			end_huge_pfn = end_pfn - (HPAGE_SIZE >> PAGE_SHIFT);

		pfn = node_start_pfn[i];

		/* Allocate enough memory to hold L2 page tables for node. */
		init_prealloc_ptes(i, end_pfn - pfn);

		address = (unsigned long) pfn_to_kaddr(pfn);
		while (pfn < end_pfn) {
			BUG_ON(address & (HPAGE_SIZE-1));
			pmd = get_pmd(pgtables, address);
			pte = get_prealloc_pte(pfn);
			if (pfn < end_huge_pfn) {
				pgprot_t prot = init_pgprot(address);
				*(pte_t *)pmd = pte_mkhuge(pfn_pte(pfn, prot));
				for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE;
				     pfn++, pte_ofs++, address += PAGE_SIZE)
					pte[pte_ofs] = pfn_pte(pfn, prot);
			} else {
				if (kdata_huge)
					printk(KERN_DEBUG "pre-shattered huge"
					       " page at %#lx\n", address);
				for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE;
				     pfn++, pte_ofs++, address += PAGE_SIZE) {
					pgprot_t prot = init_pgprot(address);
					pte[pte_ofs] = pfn_pte(pfn, prot);
				}
				assign_pte(pmd, pte);
			}
		}
	}

	/*
	 * Set or check ktext_map now that we have cpu_possible_mask
	 * and kstripe_mask to work with.
	 */
	if (ktext_all)
		cpumask_copy(&ktext_mask, cpu_possible_mask);
	else if (ktext_nondataplane)
		ktext_mask = kstripe_mask;
	else if (!cpumask_empty(&ktext_mask)) {
		/* Sanity-check any mask that was requested */
		struct cpumask bad;
		cpumask_andnot(&bad, &ktext_mask, cpu_possible_mask);
		cpumask_and(&ktext_mask, &ktext_mask, cpu_possible_mask);
		if (!cpumask_empty(&bad)) {
			char buf[NR_CPUS * 5];
			cpulist_scnprintf(buf, sizeof(buf), &bad);
			pr_info("ktext: not using unavailable cpus %s\n", buf);
		}
		if (cpumask_empty(&ktext_mask)) {
			pr_warning("ktext: no valid cpus; caching on %d.\n",
				   smp_processor_id());
			cpumask_copy(&ktext_mask,
				     cpumask_of(smp_processor_id()));
		}
	}

	address = MEM_SV_INTRPT;
	pmd = get_pmd(pgtables, address);
	pfn = 0;  /* code starts at PA 0 */
	if (ktext_small) {
		/* Allocate an L2 PTE for the kernel text */
		int cpu = 0;
		pgprot_t prot = construct_pgprot(PAGE_KERNEL_EXEC,
						 PAGE_HOME_IMMUTABLE);

		if (ktext_local) {
			if (ktext_nocache)
				prot = hv_pte_set_mode(prot,
						       HV_PTE_MODE_UNCACHED);
			else
				prot = hv_pte_set_mode(prot,
						       HV_PTE_MODE_CACHE_NO_L3);
		} else {
			prot = hv_pte_set_mode(prot,
					       HV_PTE_MODE_CACHE_TILE_L3);
			cpu = cpumask_first(&ktext_mask);

			prot = ktext_set_nocache(prot);
		}

		BUG_ON(address != (unsigned long)_stext);
		pte = NULL;
		for (; address < (unsigned long)_einittext;
		     pfn++, address += PAGE_SIZE) {
			pte_ofs = pte_index(address);
			if (pte_ofs == 0) {
				if (pte)
					assign_pte(pmd++, pte);
				pte = alloc_pte();
			}
			if (!ktext_local) {
				prot = set_remote_cache_cpu(prot, cpu);
				cpu = cpumask_next(cpu, &ktext_mask);
				if (cpu == NR_CPUS)
					cpu = cpumask_first(&ktext_mask);
			}
			pte[pte_ofs] = pfn_pte(pfn, prot);
		}
		if (pte)
			assign_pte(pmd, pte);
	} else {
		pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC);
		pteval = pte_mkhuge(pteval);
#if CHIP_HAS_CBOX_HOME_MAP()
		if (ktext_hash) {
			pteval = hv_pte_set_mode(pteval,
						 HV_PTE_MODE_CACHE_HASH_L3);
			pteval = ktext_set_nocache(pteval);
		} else
#endif /* CHIP_HAS_CBOX_HOME_MAP() */
		if (cpumask_weight(&ktext_mask) == 1) {
			pteval = set_remote_cache_cpu(pteval,
					      cpumask_first(&ktext_mask));
			pteval = hv_pte_set_mode(pteval,
						 HV_PTE_MODE_CACHE_TILE_L3);
			pteval = ktext_set_nocache(pteval);
		} else if (ktext_nocache)
			pteval = hv_pte_set_mode(pteval,
						 HV_PTE_MODE_UNCACHED);
		else
			pteval = hv_pte_set_mode(pteval,
						 HV_PTE_MODE_CACHE_NO_L3);
		for (; address < (unsigned long)_einittext;
		     pfn += PFN_DOWN(HPAGE_SIZE), address += HPAGE_SIZE)
			*(pte_t *)(pmd++) = pfn_pte(pfn, pteval);
	}

	/* Set swapper_pgprot here so it is flushed to memory right away. */
	swapper_pgprot = init_pgprot((unsigned long)swapper_pg_dir);

	/*
	 * Since we may be changing the caching of the stack and page
	 * table itself, we invoke an assembly helper to do the
	 * following steps:
	 *
	 *  - flush the cache so we start with an empty slate
	 *  - install pgtables[] as the real page table
	 *  - flush the TLB so the new page table takes effect
	 */
	irqmask = interrupt_mask_save_mask();
	interrupt_mask_set_mask(-1ULL);
	rc = flush_and_install_context(__pa(pgtables),
				       init_pgprot((unsigned long)pgtables),
				       __get_cpu_var(current_asid),
				       cpumask_bits(my_cpu_mask));
	interrupt_mask_restore_mask(irqmask);
	BUG_ON(rc != 0);

	/* Copy the page table back to the normal swapper_pg_dir. */
	memcpy(pgd_base, pgtables, sizeof(pgtables));
	__install_page_table(pgd_base, __get_cpu_var(current_asid),
			     swapper_pgprot);

	/*
	 * We just read swapper_pgprot and thus brought it into the cache,
	 * with its new home & caching mode.  When we start the other CPUs,
	 * they're going to reference swapper_pgprot via their initial fake
	 * VA-is-PA mappings, which cache everything locally.  At that
	 * time, if it's in our cache with a conflicting home, the
	 * simulator's coherence checker will complain.  So, flush it out
	 * of our cache; we're not going to ever use it again anyway.
	 */
	__insn_finv(&swapper_pgprot);
}
Ejemplo n.º 5
0
static void xen_smp_send_call_function_single_ipi(int cpu)
{
	xen_send_IPI_mask(cpumask_of(cpu),
			  XEN_CALL_FUNCTION_SINGLE_VECTOR);
}
Ejemplo n.º 6
0
/*
 * Check, if the new registered device should be used.
 */
static int tick_check_new_device(struct clock_event_device *newdev)
{
	struct clock_event_device *curdev;
	struct tick_device *td;
	int cpu, ret = NOTIFY_OK;
	unsigned long flags;

	raw_spin_lock_irqsave(&tick_device_lock, flags);

	cpu = smp_processor_id();
	if (!cpumask_test_cpu(cpu, newdev->cpumask))
		goto out_bc;

	td = &per_cpu(tick_cpu_device, cpu);
	curdev = td->evtdev;

	/* cpu local device ? */
	if (!cpumask_equal(newdev->cpumask, cpumask_of(cpu))) {

		/*
		 * If the cpu affinity of the device interrupt can not
		 * be set, ignore it.
		 */
		if (!irq_can_set_affinity(newdev->irq))
			goto out_bc;

		/*
		 * If we have a cpu local device already, do not replace it
		 * by a non cpu local device
		 */
		if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu)))
			goto out_bc;
	}

	/*
	 * If we have an active device, then check the rating and the oneshot
	 * feature.
	 */
	if (curdev) {
		/*
		 * Prefer one shot capable devices !
		 */
		if ((curdev->features & CLOCK_EVT_FEAT_ONESHOT) &&
		    !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
			goto out_bc;
		/*
		 * Check the rating
		 */
		if (curdev->rating >= newdev->rating)
			goto out_bc;
	}

	/*
	 * Replace the eventually existing device by the new
	 * device. If the current device is the broadcast device, do
	 * not give it back to the clockevents layer !
	 */
	if (tick_is_broadcast_device(curdev)) {
		clockevents_shutdown(curdev);
		curdev = NULL;
	}
	clockevents_exchange_device(curdev, newdev);
	tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
	if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
		tick_oneshot_notify();

	raw_spin_unlock_irqrestore(&tick_device_lock, flags);
	return NOTIFY_STOP;

out_bc:
	/*
	 * Can the new device be used as a broadcast device ?
	 */
	if (tick_check_broadcast_device(newdev))
		ret = NOTIFY_STOP;

	raw_spin_unlock_irqrestore(&tick_device_lock, flags);

	return ret;
}
Ejemplo n.º 7
0
/**
 * kthread_create_on_node - create a kthread.
 * @threadfn: the function to run until signal_pending(current).
 * @data: data ptr for @threadfn.
 * @node: memory node number.
 * @namefmt: printf-style name for the thread.
 *
 * Description: This helper function creates and names a kernel
 * thread.  The thread will be stopped: use wake_up_process() to start
 * it.  See also kthread_run().
 *
 * If thread is going to be bound on a particular cpu, give its node
 * in @node, to get NUMA affinity for kthread stack, or else give -1.
 * When woken, the thread will run @threadfn() with @data as its
 * argument. @threadfn() can either call do_exit() directly if it is a
 * standalone thread for which no one will call kthread_stop(), or
 * return when 'kthread_should_stop()' is true (which means
 * kthread_stop() has been called).  The return value should be zero
 * or a negative error number; it will be passed to kthread_stop().
 *
 * Returns a task_struct or ERR_PTR(-ENOMEM).
 */
struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
					   void *data, int node,
					   const char namefmt[],
					   ...)
{
	DECLARE_COMPLETION_ONSTACK(done);
	struct task_struct *task;
	struct kthread_create_info *create = kmalloc(sizeof(*create),
						     GFP_KERNEL);

	if (!create)
		return ERR_PTR(-ENOMEM);
	create->threadfn = threadfn;
	create->data = data;
	create->node = node;
	create->done = &done;

	spin_lock(&kthread_create_lock);
	list_add_tail(&create->list, &kthread_create_list);
	spin_unlock(&kthread_create_lock);

	wake_up_process(kthreadd_task);
	/*
	 * Wait for completion in killable state, for I might be chosen by
	 * the OOM killer while kthreadd is trying to allocate memory for
	 * new kernel thread.
	 */
	if (unlikely(wait_for_completion_killable(&done))) {
		int i = 0;

		/*
		 * I got SIGKILL, but wait for 10 more seconds for completion
		 * unless chosen by the OOM killer. This delay is there as a
		 * workaround for boot failure caused by SIGKILL upon device
		 * driver initialization timeout.
		 */
		while (i++ < 10 && !test_tsk_thread_flag(current, TIF_MEMDIE))
			if (wait_for_completion_timeout(&done, HZ))
				goto ready;
		/*
		 * If I was SIGKILLed before kthreadd (or new kernel thread)
		 * calls complete(), leave the cleanup of this structure to
		 * that thread.
		 */
		if (xchg(&create->done, NULL))
			return ERR_PTR(-ENOMEM);
		/*
		 * kthreadd (or new kernel thread) will call complete()
		 * shortly.
		 */
		wait_for_completion(&done);
	}
ready:
	task = create->result;
	if (!IS_ERR(task)) {
		static const struct sched_param param = { .sched_priority = 0 };
		va_list args;

		va_start(args, namefmt);
		vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
		va_end(args);
		/*
		 * root may have changed our (kthreadd's) priority or CPU mask.
		 * The kernel thread should not inherit these properties.
		 */
		sched_setscheduler_nocheck(task, SCHED_NORMAL, &param);
		set_cpus_allowed_ptr(task, cpu_all_mask);
	}
	kfree(create);
	return task;
}
EXPORT_SYMBOL(kthread_create_on_node);

static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
{
	/* Must have done schedule() in kthread() before we set_task_cpu */
	if (!wait_task_inactive(p, state)) {
		WARN_ON(1);
		return;
	}
	/* It's safe because the task is inactive. */
	do_set_cpus_allowed(p, cpumask_of(cpu));
	p->flags |= PF_NO_SETAFFINITY;
}

/**
 * kthread_bind - bind a just-created kthread to a cpu.
 * @p: thread created by kthread_create().
 * @cpu: cpu (might not be online, must be possible) for @k to run on.
 *
 * Description: This function is equivalent to set_cpus_allowed(),
 * except that @cpu doesn't need to be online, and the thread must be
 * stopped (i.e., just returned from kthread_create()).
 */
void kthread_bind(struct task_struct *p, unsigned int cpu)
{
	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
}
Ejemplo n.º 8
0
/*
 * Called at the top of init() to launch all the other CPUs.
 * They run free to complete their initialization and then wait
 * until they get an IPI from the boot cpu to come online.
 */
void __init smp_prepare_cpus(unsigned int max_cpus)
{
	long rc;
	int cpu, cpu_count;
	int boot_cpu = smp_processor_id();

	current_thread_info()->cpu = boot_cpu;

	/*
	 * Pin this task to the boot CPU while we bring up the others,
	 * just to make sure we don't uselessly migrate as they come up.
	 */
	rc = sched_setaffinity(current->pid, cpumask_of(boot_cpu));
	if (rc != 0)
		pr_err("Couldn't set init affinity to boot cpu (%ld)\n", rc);

	/* Print information about disabled and dataplane cpus. */
	print_disabled_cpus();

	/*
	 * Tell the messaging subsystem how to respond to the
	 * startup message.  We use a level of indirection to avoid
	 * confusing the linker with the fact that the messaging
	 * subsystem is calling __init code.
	 */
	start_cpu_function_addr = (unsigned long) &online_secondary;

	/* Set up thread context for all new processors. */
	cpu_count = 1;
	for (cpu = 0; cpu < NR_CPUS; ++cpu)	{
		struct task_struct *idle;

		if (cpu == boot_cpu)
			continue;

		if (!cpu_possible(cpu)) {
			/*
			 * Make this processor do nothing on boot.
			 * Note that we don't give the boot_pc function
			 * a stack, so it has to be assembly code.
			 */
			per_cpu(boot_sp, cpu) = 0;
			per_cpu(boot_pc, cpu) = (unsigned long) smp_nap;
			continue;
		}

		/* Create a new idle thread to run start_secondary() */
		idle = fork_idle(cpu);
		if (IS_ERR(idle))
			panic("failed fork for CPU %d", cpu);
		idle->thread.pc = (unsigned long) start_secondary;

		/* Make this thread the boot thread for this processor */
		per_cpu(boot_sp, cpu) = task_ksp0(idle);
		per_cpu(boot_pc, cpu) = idle->thread.pc;

		++cpu_count;
	}
	BUG_ON(cpu_count > (max_cpus ? max_cpus : 1));

	/* Fire up the other tiles, if any */
	init_cpu_present(cpu_possible_mask);
	if (cpumask_weight(cpu_present_mask) > 1) {
		mb();  /* make sure all data is visible to new processors */
		hv_start_all_tiles();
	}
}
Ejemplo n.º 9
0
/* Requires cpu_add_remove_lock to be held */
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
{
	int err, nr_calls = 0;
	void *hcpu = (void *)(long)cpu;
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
	struct take_cpu_down_param tcd_param = {
		.mod = mod,
		.hcpu = hcpu,
	};

	if (num_online_cpus() == 1)
		return -EBUSY;

	if (!cpu_online(cpu))
		return -EINVAL;

	cpu_hotplug_begin();

	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
	if (err) {
		nr_calls--;
		__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
		printk("%s: attempt to take down CPU %u failed\n",
				__func__, cpu);
		goto out_release;
	}

	/*
	 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
	 * and RCU users of this state to go away such that all new such users
	 * will observe it.
	 *
	 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
	 * not imply sync_sched(), so explicitly call both.
	 *
	 * Do sync before park smpboot threads to take care the rcu boost case.
	 */
#ifdef CONFIG_PREEMPT
	synchronize_sched();
#endif
	synchronize_rcu();

	smpboot_park_threads(cpu);

	/*
	 * So now all preempt/rcu users must observe !cpu_active().
	 */

	err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
	if (err) {
		/* CPU didn't die: tell everyone.  Can't complain. */
		smpboot_unpark_threads(cpu);
		cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
		goto out_release;
	}
	BUG_ON(cpu_online(cpu));

	/*
	 * The migration_call() CPU_DYING callback will have removed all
	 * runnable tasks from the cpu, there's only the idle task left now
	 * that the migration thread is done doing the stop_machine thing.
	 *
	 * Wait for the stop thread to go away.
	 */
	while (!idle_cpu(cpu))
		cpu_relax();

	/* This actually kills the CPU. */
	__cpu_die(cpu);

	/* CPU is completely dead: tell everyone.  Too late to complain. */
	cpu_notify_nofail(CPU_DEAD | mod, hcpu);

	check_for_tasks(cpu);

out_release:
	cpu_hotplug_done();
	if (!err)
		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
	return err;
}
Ejemplo n.º 10
0
Archivo: manage.c Proyecto: mdamt/linux
static void do_suspend(void)
{
	int err;
	struct suspend_info si;

	shutting_down = SHUTDOWN_SUSPEND;

	err = freeze_processes();
	if (err) {
		pr_err("%s: freeze processes failed %d\n", __func__, err);
		goto out;
	}

	err = freeze_kernel_threads();
	if (err) {
		pr_err("%s: freeze kernel threads failed %d\n", __func__, err);
		goto out_thaw;
	}

	err = dpm_suspend_start(PMSG_FREEZE);
	if (err) {
		pr_err("%s: dpm_suspend_start %d\n", __func__, err);
		goto out_thaw;
	}

	printk(KERN_DEBUG "suspending xenstore...\n");
	xs_suspend();

	err = dpm_suspend_end(PMSG_FREEZE);
	if (err) {
		pr_err("dpm_suspend_end failed: %d\n", err);
		si.cancelled = 0;
		goto out_resume;
	}

	xen_arch_suspend();

	si.cancelled = 1;

	err = stop_machine(xen_suspend, &si, cpumask_of(0));

	/* Resume console as early as possible. */
	if (!si.cancelled)
		xen_console_resume();

	raw_notifier_call_chain(&xen_resume_notifier, 0, NULL);

	dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE);

	if (err) {
		pr_err("failed to start xen_suspend: %d\n", err);
		si.cancelled = 1;
	}

	xen_arch_resume();

out_resume:
	if (!si.cancelled)
		xs_resume();
	else
		xs_suspend_cancel();

	dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE);

out_thaw:
	thaw_processes();
out:
	shutting_down = SHUTDOWN_INVALID;
}
Ejemplo n.º 11
0
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
    unsigned long timeout;

    /*
     * Set synchronisation state between this boot processor
     * and the secondary one
     */
    spin_lock(&boot_lock);

    /*
     * This is really belt and braces; we hold unintended secondary
     * CPUs in the holding pen until we're ready for them.  However,
     * since we haven't sent them a soft interrupt, they shouldn't
     * be there.
     */
    //writew((BSYM(virt_to_phys(vexpress_secondary_startup))>>16), SECOND_START_ADDR_HI);
    //writew(BSYM(virt_to_phys(vexpress_secondary_startup)), SECOND_START_ADDR_LO);
    writel(0xbabe, SECOND_MAGIC_NUMBER_ADRESS);
    writel(BSYM(virt_to_phys(vexpress_secondary_startup)), SECOND_START_ADDR);
    __cpuc_flush_kern_all();
    pen_release = cpu;

#if defined(CONFIG_MSTAR_STR_DBGMSG)
    {
        unsigned int *ptr=&pen_release;
        printk("pen_release = 0x%08x, addr= 0x%08x, pen_release ptr = 0x%08x\n ",pen_release,&pen_release,*ptr);
    }
#endif

    __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
    outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));

    /*
     * Send the secondary CPU a soft interrupt, thereby causing
     * the boot monitor to read the system wide flags register,
     * and branch to the address found there.
     */
    smp_cross_call(cpumask_of(cpu));

    timeout = jiffies + (1 * HZ);
    while (time_before(jiffies, timeout)) {
        smp_rmb();
        if (pen_release == -1)
            break;

        udelay(10);
    }
#if defined(CONFIG_MSTAR_STR_DBGMSG)
    {
        unsigned int *ptr=&pen_release;
        printk("pen_release = 0x%08x, addr= 0x%08x, pen_release ptr = 0x%08x\n ",pen_release,&pen_release,*ptr);
    }
#endif
    /*
     * now the secondary core is starting up let it run its
     * calibrations, then wait for it to finish
     */
    spin_unlock(&boot_lock);
    return pen_release != -1 ? -ENOSYS : 0;
}
Ejemplo n.º 12
0
Archivo: smp.c Proyecto: B-Rich/L4Reap
void native_send_call_func_single_ipi(int cpu)
{
	apic->send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR);
}
Ejemplo n.º 13
0
Archivo: apic.c Proyecto: robhoes/xen
void smp_send_state_dump(unsigned int cpu)
{
    /* We overload the spurious interrupt handler to handle the dump. */
    per_cpu(state_dump_pending, cpu) = 1;
    send_IPI_mask(cpumask_of(cpu), SPURIOUS_APIC_VECTOR);
}
Ejemplo n.º 14
0
int pv_init(struct pv_init_t *init, struct pv_config_t **config)
{
	struct pv_dev *dev;
	int ret;

	if (!init || (init->id >= MAX_PV_INST) || !init->irq
#ifdef PV_HAS_CLK
		|| !init->apb_clk_name || !init->pix_clk_name
#endif
		|| !init->base_addr || !init->err_cb || !init->eof_cb) {
		pv_err("Invalid input parameters\n");
		ret = -EINVAL;
		goto done;
	}
	if (g_pv_init[init->id]) {
		pv_err("All instances of PV have already been initialised\n");
		ret = -EPERM;
		goto done;
	}

	dev = kzalloc(sizeof(struct pv_dev), GFP_KERNEL);
	if (!dev) {
		pv_err("couldn't allocate memory for pv_data\n");
		ret = -ENOMEM;
		goto done;
	}
	ret = request_irq(init->irq, pv_isr, IRQF_TRIGGER_HIGH, "PV", dev);
	if (ret < 0) {
		pv_err("failed to get irq\n");
		goto fail;
	}
	irq_set_affinity(init->irq, cpumask_of(1));
#ifdef PV_HAS_CLK
	dev->apb_clk = clk_get(NULL, init->apb_clk_name);
	if (IS_ERR(dev->apb_clk)) {
		pv_err("failed to get %s\n", init->apb_clk_name);
		ret = PTR_ERR(dev->apb_clk);
		goto fail;
	}

	dev->pix_clk = clk_get(NULL, init->pix_clk_name);
	if (IS_ERR(dev->pix_clk)) {
		pv_err("failed to get %s\n", init->pix_clk_name);
		ret = PTR_ERR(dev->pix_clk);
		goto fail;
	}
#endif
	dev->id = init->id;
	dev->irq = init->irq;
	dev->base_addr = init->base_addr;
	dev->err_cb = init->err_cb;
	dev->eof_cb = init->eof_cb;
	INIT_WORK(&dev->err_work, err_cb);
	INIT_WORK(&dev->eof_work, eof_cb);
	dev->state = PV_INIT_DONE;
	if (!g_display_enabled) {
		printk("%s:%d\n", __func__, __LINE__);
		dev->state = PV_INIT_DONE;
	} else {
		pv_clk_enable(dev);
		printk("enabling pv isr in init\n");
#ifdef INT_4_LONG_PKT
		writel(VFP_START, dev->base_addr + REG_PV_INTEN); //workaround1
#endif
		dev->state = PV_ENABLED;
	}
	g_pv_init[init->id] = true;
	*config = &dev->vid_config;
	ret = 0;
	goto done;

fail:
	free_irq(init->irq, NULL);
	kfree(dev);
done:
	return ret;
}
Ejemplo n.º 15
0
static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	static struct clockdomain *cpu1_clkdm;
	static bool booted;
	static struct powerdomain *cpu1_pwrdm;
	void __iomem *base = omap_get_wakeupgen_base();

	/*
	 * Set synchronisation state between this boot processor
	 * and the secondary one
	 */
	spin_lock(&boot_lock);

	/*
	 * Update the AuxCoreBoot0 with boot state for secondary core.
	 * omap4_secondary_startup() routine will hold the secondary core till
	 * the AuxCoreBoot1 register is updated with cpu state
	 * A barrier is added to ensure that write buffer is drained
	 */
	if (omap_secure_apis_support())
		omap_modify_auxcoreboot0(0x200, 0xfffffdff);
	else
		__raw_writel(0x20, base + OMAP_AUX_CORE_BOOT_0);

	if (!cpu1_clkdm && !cpu1_pwrdm) {
		cpu1_clkdm = clkdm_lookup("mpu1_clkdm");
		cpu1_pwrdm = pwrdm_lookup("cpu1_pwrdm");
	}

	/*
	 * The SGI(Software Generated Interrupts) are not wakeup capable
	 * from low power states. This is known limitation on OMAP4 and
	 * needs to be worked around by using software forced clockdomain
	 * wake-up. To wakeup CPU1, CPU0 forces the CPU1 clockdomain to
	 * software force wakeup. The clockdomain is then put back to
	 * hardware supervised mode.
	 * More details can be found in OMAP4430 TRM - Version J
	 * Section :
	 *	4.3.4.2 Power States of CPU0 and CPU1
	 */
	if (booted && cpu1_pwrdm && cpu1_clkdm) {
		/*
		 * GIC distributor control register has changed between
		 * CortexA9 r1pX and r2pX. The Control Register secure
		 * banked version is now composed of 2 bits:
		 * bit 0 == Secure Enable
		 * bit 1 == Non-Secure Enable
		 * The Non-Secure banked register has not changed
		 * Because the ROM Code is based on the r1pX GIC, the CPU1
		 * GIC restoration will cause a problem to CPU0 Non-Secure SW.
		 * The workaround must be:
		 * 1) Before doing the CPU1 wakeup, CPU0 must disable
		 * the GIC distributor
		 * 2) CPU1 must re-enable the GIC distributor on
		 * it's wakeup path.
		 */
		if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD)) {
			local_irq_disable();
			gic_dist_disable();
		}

		/*
		 * Ensure that CPU power state is set to ON to avoid CPU
		 * powerdomain transition on wfi
		 */
		clkdm_wakeup(cpu1_clkdm);
		omap_set_pwrdm_state(cpu1_pwrdm, PWRDM_POWER_ON);
		clkdm_allow_idle(cpu1_clkdm);

		if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD)) {
			while (gic_dist_disabled()) {
				udelay(1);
				cpu_relax();
			}
			gic_timer_retrigger();
			local_irq_enable();
		}
	} else {
		dsb_sev();
		booted = true;
	}

	arch_send_wakeup_ipi_mask(cpumask_of(cpu));

	/*
	 * Now the secondary core is starting up let it run its
	 * calibrations, then wait for it to finish
	 */
	spin_unlock(&boot_lock);

	return 0;
}
Ejemplo n.º 16
0
static int acpi_cpufreq_target(struct cpufreq_policy *policy,
			       unsigned int index)
{
	struct acpi_cpufreq_data *data = policy->driver_data;
	struct acpi_processor_performance *perf;
	struct drv_cmd cmd;
	unsigned int next_perf_state = 0; /* Index into perf table */
	int result = 0;

	if (unlikely(data == NULL || data->freq_table == NULL)) {
		return -ENODEV;
	}

	perf = to_perf_data(data);
	next_perf_state = data->freq_table[index].driver_data;
	if (perf->state == next_perf_state) {
		if (unlikely(data->resume)) {
			pr_debug("Called after resume, resetting to P%d\n",
				next_perf_state);
			data->resume = 0;
		} else {
			pr_debug("Already at target state (P%d)\n",
				next_perf_state);
			goto out;
		}
	}

	switch (data->cpu_feature) {
	case SYSTEM_INTEL_MSR_CAPABLE:
		cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
		cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
		cmd.val = (u32) perf->states[next_perf_state].control;
		break;
	case SYSTEM_AMD_MSR_CAPABLE:
		cmd.type = SYSTEM_AMD_MSR_CAPABLE;
		cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
		cmd.val = (u32) perf->states[next_perf_state].control;
		break;
	case SYSTEM_IO_CAPABLE:
		cmd.type = SYSTEM_IO_CAPABLE;
		cmd.addr.io.port = perf->control_register.address;
		cmd.addr.io.bit_width = perf->control_register.bit_width;
		cmd.val = (u32) perf->states[next_perf_state].control;
		break;
	default:
		result = -ENODEV;
		goto out;
	}

	/* cpufreq holds the hotplug lock, so we are safe from here on */
	if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
		cmd.mask = policy->cpus;
	else
		cmd.mask = cpumask_of(policy->cpu);

	drv_write(&cmd);

	if (acpi_pstate_strict) {
		if (!check_freqs(cmd.mask, data->freq_table[index].frequency,
					data)) {
			pr_debug("acpi_cpufreq_target failed (%d)\n",
				policy->cpu);
			result = -EAGAIN;
		}
	}

	if (!result)
		perf->state = next_perf_state;

out:
	return result;
}
Ejemplo n.º 17
0
static void __init ixp4xx_clockevent_init(void)
{
	clockevent_ixp4xx.cpumask = cpumask_of(0);
	clockevents_config_and_register(&clockevent_ixp4xx, IXP4XX_TIMER_FREQ,
					0xf, 0xfffffffe);
}
Ejemplo n.º 18
0
/*==========================================================================*
 * Name:         do_boot_cpu
 *
 * Description:  This routine boot up one AP.
 *
 * Born on Date: 2002.02.05
 *
 * Arguments:    phys_id - Target CPU physical ID
 *
 * Returns:      void (cannot fail)
 *
 * Modification log:
 * Date       Who Description
 * ---------- --- --------------------------------------------------------
 * 2003-06-24 hy  modify for linux-2.5.69
 *
 *==========================================================================*/
static void __init do_boot_cpu(int phys_id)
{
	struct task_struct *idle;
	unsigned long send_status, boot_status;
	int timeout, cpu_id;

	cpu_id = ++cpucount;

	/*
	 * We can't use kernel_thread since we must avoid to
	 * reschedule the child.
	 */
	idle = fork_idle(cpu_id);
	if (IS_ERR(idle))
		panic("failed fork for CPU#%d.", cpu_id);

	idle->thread.lr = (unsigned long)start_secondary;

	map_cpu_to_physid(cpu_id, phys_id);

	/* So we see what's up   */
	printk("Booting processor %d/%d\n", phys_id, cpu_id);
	stack_start.spi = (void *)idle->thread.sp;
	task_thread_info(idle)->cpu = cpu_id;

	/*
	 * Send Startup IPI
	 *   1.IPI received by CPU#(phys_id).
	 *   2.CPU#(phys_id) enter startup_AP (arch/m32r/kernel/head.S)
	 *   3.CPU#(phys_id) enter start_secondary()
	 */
	send_status = 0;
	boot_status = 0;

	cpumask_set_cpu(phys_id, &cpu_bootout_map);

	/* Send Startup IPI */
	send_IPI_mask_phys(cpumask_of(phys_id), CPU_BOOT_IPI, 0);

	Dprintk("Waiting for send to finish...\n");
	timeout = 0;

	/* Wait 100[ms] */
	do {
		Dprintk("+");
		udelay(1000);
		send_status = !cpumask_test_cpu(phys_id, &cpu_bootin_map);
	} while (send_status && (timeout++ < 100));

	Dprintk("After Startup.\n");

	if (!send_status) {
		/*
		 * allow APs to start initializing.
		 */
		Dprintk("Before Callout %d.\n", cpu_id);
		cpumask_set_cpu(cpu_id, &cpu_callout_map);
		Dprintk("After Callout %d.\n", cpu_id);

		/*
		 * Wait 5s total for a response
		 */
		for (timeout = 0; timeout < 5000; timeout++) {
			if (cpumask_test_cpu(cpu_id, &cpu_callin_map))
				break;	/* It has booted */
			udelay(1000);
		}

		if (cpumask_test_cpu(cpu_id, &cpu_callin_map)) {
			/* number CPUs logically, starting from 1 (BSP is 0) */
			Dprintk("OK.\n");
		} else {
			boot_status = 1;
			printk("Not responding.\n");
		}
	} else
		printk("IPI never delivered???\n");

	if (send_status || boot_status) {
		unmap_cpu_to_physid(cpu_id, phys_id);
		cpumask_clear_cpu(cpu_id, &cpu_callout_map);
		cpumask_clear_cpu(cpu_id, &cpu_callin_map);
		cpumask_clear_cpu(cpu_id, &cpu_initialized);
		cpucount--;
	}
}
Ejemplo n.º 19
0
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
    unsigned long timeout;

    /*
     * set synchronisation state between this boot processor
     * and the secondary one
     */
    spin_lock(&boot_lock);

    /*
     * The secondary processor is waiting to be released from
     * the holding pen - release it, then wait for it to flag
     * that it has been released by resetting pen_release.
     *
     * Note that "pen_release" is the hardware CPU ID, whereas
     * "cpu" is Linux's internal ID.
     */
    pen_release = cpu;
    smp_wmb();

    clean_dcache_area((void *)  &pen_release, sizeof(pen_release));
    outer_clean_range(__pa(&pen_release), __pa(&pen_release +
                      sizeof(pen_release)));

    dsb_sev();

    /*
     * Timeout set on purpose in jiffies so that on slow processors
     * that must also have low HZ it will wait longer.
     */
    timeout = jiffies + 128;

    udelay(100);

    /*
     * If the secondary CPU was waiting on WFE, it should
     * be already watching <pen_release>, or it could be
     * waiting in WFI, send it an IPI to be sure it wakes.
     */

    if( pen_release != -1 )
    {
        smp_cross_call(cpumask_of(cpu));
    }

    while (time_before(jiffies, timeout)) {
        smp_rmb();
        if (pen_release == -1)
            break;

        udelay(10);
    }

    if (arch_is_coherent()) {
        outer_cache.inv_range = NULL;
        outer_cache.clean_range = NULL;
        outer_cache.flush_range = NULL;
        outer_cache.sync = NULL;
    }

    /*
     * now the secondary core is starting up let it run its
     * calibrations, then wait for it to finish
     */
    spin_unlock(&boot_lock);

    return pen_release != -1 ? -ENOSYS : 0;
}
Ejemplo n.º 20
0
const cpumask_t *vector_allocation_cpumask_phys(int cpu)
{
	return cpumask_of(cpu);
}
Ejemplo n.º 21
0
void tegra_cputimer_reset_irq_affinity(int cpu)
{
	/* Reassign the affinity of the wake IRQ to CPU0 */
	(void)irq_set_affinity(tegra_cputimer_irq[cpu].irq,
			       cpumask_of(0));
}
Ejemplo n.º 22
0
static int powernow_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
    unsigned int i;
    unsigned int valid_states = 0;
    unsigned int cpu = policy->cpu;
    struct acpi_cpufreq_data *data;
    unsigned int result = 0;
    struct processor_performance *perf;
    u32 max_hw_pstate;
    uint64_t msr_content;
    struct cpuinfo_x86 *c = &cpu_data[policy->cpu];

    data = xzalloc(struct acpi_cpufreq_data);
    if (!data)
        return -ENOMEM;

    cpufreq_drv_data[cpu] = data;

    data->acpi_data = &processor_pminfo[cpu]->perf;

    perf = data->acpi_data;
    policy->shared_type = perf->shared_type;

    if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
        policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
        cpumask_set_cpu(cpu, policy->cpus);
        if (cpumask_weight(policy->cpus) != 1) {
            printk(XENLOG_WARNING "Unsupported sharing type %d (%u CPUs)\n",
                   policy->shared_type, cpumask_weight(policy->cpus));
            result = -ENODEV;
            goto err_unreg;
        }
    } else {
        cpumask_copy(policy->cpus, cpumask_of(cpu));
    }

    /* capability check */
    if (perf->state_count <= 1) {
        printk("No P-States\n");
        result = -ENODEV;
        goto err_unreg;
    }
    rdmsrl(MSR_PSTATE_CUR_LIMIT, msr_content);
    max_hw_pstate = (msr_content & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT;

    if (perf->control_register.space_id != perf->status_register.space_id) {
        result = -ENODEV;
        goto err_unreg;
    }

    data->freq_table = xmalloc_array(struct cpufreq_frequency_table, 
                                    (perf->state_count+1));
    if (!data->freq_table) {
        result = -ENOMEM;
        goto err_unreg;
    }

    /* detect transition latency */
    policy->cpuinfo.transition_latency = 0;
    for (i=0; i<perf->state_count; i++) {
        if ((perf->states[i].transition_latency * 1000) >
            policy->cpuinfo.transition_latency)
            policy->cpuinfo.transition_latency =
                perf->states[i].transition_latency * 1000;
    }

    policy->governor = cpufreq_opt_governor ? : CPUFREQ_DEFAULT_GOVERNOR;

    /* table init */
    for (i = 0; i < perf->state_count && i <= max_hw_pstate; i++) {
        if (i > 0 && perf->states[i].core_frequency >=
            data->freq_table[valid_states-1].frequency / 1000)
            continue;

        data->freq_table[valid_states].index = perf->states[i].control & HW_PSTATE_MASK;
        data->freq_table[valid_states].frequency =
            perf->states[i].core_frequency * 1000;
        valid_states++;
    }
    data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
    perf->state = 0;

    result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
    if (result)
        goto err_freqfree;

    if (c->cpuid_level >= 6)
        on_selected_cpus(cpumask_of(cpu), feature_detect, policy, 1);
      
    /*
     * the first call to ->target() should result in us actually
     * writing something to the appropriate registers.
     */
    data->arch_cpu_flags |= ARCH_CPU_FLAG_RESUME;

    policy->cur = data->freq_table[i].frequency;
    return result;

err_freqfree:
    xfree(data->freq_table);
err_unreg:
    xfree(data);
    cpufreq_drv_data[cpu] = NULL;

    return result;
}
Ejemplo n.º 23
0
static u32 drv_read(struct acpi_cpufreq_data *data, const struct cpumask *mask)
{
	struct acpi_processor_performance *perf = to_perf_data(data);
	struct drv_cmd cmd = {
		.reg = &perf->control_register,
		.func.read = data->cpu_freq_read,
	};
	int err;

	err = smp_call_function_any(mask, do_drv_read, &cmd, 1);
	WARN_ON_ONCE(err);	/* smp_call_function_any() was buggy? */
	return cmd.val;
}

/* Called via smp_call_function_many(), on the target CPUs */
static void do_drv_write(void *_cmd)
{
	struct drv_cmd *cmd = _cmd;

	cmd->func.write(cmd->reg, cmd->val);
}

static void drv_write(struct acpi_cpufreq_data *data,
		      const struct cpumask *mask, u32 val)
{
	struct acpi_processor_performance *perf = to_perf_data(data);
	struct drv_cmd cmd = {
		.reg = &perf->control_register,
		.val = val,
		.func.write = data->cpu_freq_write,
	};
	int this_cpu;

	this_cpu = get_cpu();
	if (cpumask_test_cpu(this_cpu, mask))
		do_drv_write(&cmd);

	smp_call_function_many(mask, do_drv_write, &cmd, 1);
	put_cpu();
}

static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
{
	u32 val;

	if (unlikely(cpumask_empty(mask)))
		return 0;

	val = drv_read(data, mask);

	pr_debug("get_cur_val = %u\n", val);

	return val;
}

static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
{
	struct acpi_cpufreq_data *data;
	struct cpufreq_policy *policy;
	unsigned int freq;
	unsigned int cached_freq;

	pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);

	policy = cpufreq_cpu_get_raw(cpu);
	if (unlikely(!policy))
		return 0;

	data = policy->driver_data;
	if (unlikely(!data || !policy->freq_table))
		return 0;

	cached_freq = policy->freq_table[to_perf_data(data)->state].frequency;
	freq = extract_freq(policy, get_cur_val(cpumask_of(cpu), data));
	if (freq != cached_freq) {
		/*
		 * The dreaded BIOS frequency change behind our back.
		 * Force set the frequency on next target call.
		 */
		data->resume = 1;
	}

	pr_debug("cur freq = %u\n", freq);

	return freq;
}

static unsigned int check_freqs(struct cpufreq_policy *policy,
				const struct cpumask *mask, unsigned int freq)
{
	struct acpi_cpufreq_data *data = policy->driver_data;
	unsigned int cur_freq;
	unsigned int i;

	for (i = 0; i < 100; i++) {
		cur_freq = extract_freq(policy, get_cur_val(mask, data));
		if (cur_freq == freq)
			return 1;
		udelay(10);
	}
	return 0;
}

static int acpi_cpufreq_target(struct cpufreq_policy *policy,
			       unsigned int index)
{
	struct acpi_cpufreq_data *data = policy->driver_data;
	struct acpi_processor_performance *perf;
	const struct cpumask *mask;
	unsigned int next_perf_state = 0; /* Index into perf table */
	int result = 0;

	if (unlikely(!data)) {
		return -ENODEV;
	}

	perf = to_perf_data(data);
	next_perf_state = policy->freq_table[index].driver_data;
	if (perf->state == next_perf_state) {
		if (unlikely(data->resume)) {
			pr_debug("Called after resume, resetting to P%d\n",
				next_perf_state);
			data->resume = 0;
		} else {
			pr_debug("Already at target state (P%d)\n",
				next_perf_state);
			return 0;
		}
	}

	/*
	 * The core won't allow CPUs to go away until the governor has been
	 * stopped, so we can rely on the stability of policy->cpus.
	 */
	mask = policy->shared_type == CPUFREQ_SHARED_TYPE_ANY ?
		cpumask_of(policy->cpu) : policy->cpus;

	drv_write(data, mask, perf->states[next_perf_state].control);

	if (acpi_pstate_strict) {
		if (!check_freqs(policy, mask,
				 policy->freq_table[index].frequency)) {
			pr_debug("acpi_cpufreq_target failed (%d)\n",
				policy->cpu);
			result = -EAGAIN;
		}
	}

	if (!result)
		perf->state = next_perf_state;

	return result;
}

unsigned int acpi_cpufreq_fast_switch(struct cpufreq_policy *policy,
				      unsigned int target_freq)
{
	struct acpi_cpufreq_data *data = policy->driver_data;
	struct acpi_processor_performance *perf;
	struct cpufreq_frequency_table *entry;
	unsigned int next_perf_state, next_freq, index;

	/*
	 * Find the closest frequency above target_freq.
	 */
	if (policy->cached_target_freq == target_freq)
		index = policy->cached_resolved_idx;
	else
		index = cpufreq_table_find_index_dl(policy, target_freq);

	entry = &policy->freq_table[index];
	next_freq = entry->frequency;
	next_perf_state = entry->driver_data;

	perf = to_perf_data(data);
	if (perf->state == next_perf_state) {
		if (unlikely(data->resume))
			data->resume = 0;
		else
			return next_freq;
	}

	data->cpu_freq_write(&perf->control_register,
			     perf->states[next_perf_state].control);
	perf->state = next_perf_state;
	return next_freq;
}

static unsigned long
acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
{
	struct acpi_processor_performance *perf;

	perf = to_perf_data(data);
	if (cpu_khz) {
		/* search the closest match to cpu_khz */
		unsigned int i;
		unsigned long freq;
		unsigned long freqn = perf->states[0].core_frequency * 1000;

		for (i = 0; i < (perf->state_count-1); i++) {
			freq = freqn;
			freqn = perf->states[i+1].core_frequency * 1000;
			if ((2 * cpu_khz) > (freqn + freq)) {
				perf->state = i;
				return freq;
			}
		}
		perf->state = perf->state_count-1;
		return freqn;
	} else {
		/* assume CPU is at P0... */
		perf->state = 0;
		return perf->states[0].core_frequency * 1000;
	}
}

static void free_acpi_perf_data(void)
{
	unsigned int i;

	/* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
	for_each_possible_cpu(i)
		free_cpumask_var(per_cpu_ptr(acpi_perf_data, i)
				 ->shared_cpu_map);
	free_percpu(acpi_perf_data);
}
static int __devinit s5p_ehci_probe(struct platform_device *pdev)
{
	struct s5p_ehci_platdata *pdata;
	struct s5p_ehci_hcd *s5p_ehci;
	struct usb_hcd *hcd;
	struct ehci_hcd *ehci;
	struct resource *res;
	int irq;
	int err;

	pdata = pdev->dev.platform_data;
	if (!pdata) {
		dev_err(&pdev->dev, "No platform data defined\n");
		return -EINVAL;
	}

	s5p_ehci = kzalloc(sizeof(struct s5p_ehci_hcd), GFP_KERNEL);
	if (!s5p_ehci)
		return -ENOMEM;
	s5p_ehci->dev = &pdev->dev;

	hcd = usb_create_hcd(&s5p_ehci_hc_driver, &pdev->dev,
					dev_name(&pdev->dev));
	if (!hcd) {
		dev_err(&pdev->dev, "Unable to create HCD\n");
		err = -ENOMEM;
		goto fail_hcd;
	}

	s5p_ehci->hcd = hcd;
	s5p_ehci->clk = clk_get(&pdev->dev, "usbhost");

	if (IS_ERR(s5p_ehci->clk)) {
		dev_err(&pdev->dev, "Failed to get usbhost clock\n");
		err = PTR_ERR(s5p_ehci->clk);
		goto fail_clk;
	}

	err = clk_enable(s5p_ehci->clk);
	if (err)
		goto fail_clken;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(&pdev->dev, "Failed to get I/O memory\n");
		err = -ENXIO;
		goto fail_io;
	}

	hcd->rsrc_start = res->start;
	hcd->rsrc_len = resource_size(res);
	hcd->regs = ioremap(res->start, resource_size(res));
	if (!hcd->regs) {
		dev_err(&pdev->dev, "Failed to remap I/O memory\n");
		err = -ENOMEM;
		goto fail_io;
	}

	irq = platform_get_irq(pdev, 0);
	if (!irq) {
		dev_err(&pdev->dev, "Failed to get IRQ\n");
		err = -ENODEV;
		goto fail;
	}

	platform_set_drvdata(pdev, s5p_ehci);

	s5p_ehci_phy_init(pdev);

	ehci = hcd_to_ehci(hcd);
	ehci->caps = hcd->regs;
	ehci->regs = hcd->regs +
		HC_LENGTH(ehci, readl(&ehci->caps->hc_capbase));

	dbg_hcs_params(ehci, "reset");
	dbg_hcc_params(ehci, "reset");

	/* cache this readonly data; minimize chip reads */
	ehci->hcs_params = readl(&ehci->caps->hcs_params);

	err = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
	if (err) {
		dev_err(&pdev->dev, "Failed to add USB HCD\n");
		goto fail;
	}

	create_ehci_sys_file(ehci);
	s5p_ehci->power_on = 1;

#ifdef CONFIG_USB_SUSPEND
	pm_runtime_set_active(&pdev->dev);
	pm_runtime_enable(&pdev->dev);
#endif
#ifdef CONFIG_MDM_HSIC_PM
	/* halt controller before driving suspend on ths bus */
	ehci->susp_sof_bug = 1;

	set_host_stat(hsic_pm_dev, POWER_ON);
	pm_runtime_allow(&pdev->dev);
	pm_runtime_set_autosuspend_delay(&hcd->self.root_hub->dev, 0);

	pm_runtime_forbid(&pdev->dev);
	enable_periodic(ehci);
#endif

#ifdef CONFIG_EHCI_IRQ_DISTRIBUTION
	if (num_possible_cpus() > 1) {
		s5p_ehci_irq_no = irq;
		s5p_ehci_irq_cpu = s5p_ehci_cpus[num_possible_cpus() - 1];
		irq_set_affinity(s5p_ehci_irq_no, cpumask_of(s5p_ehci_irq_cpu));
		register_cpu_notifier(&s5p_ehci_cpu_notifier);
	}
#endif

#if defined(CONFIG_LINK_DEVICE_HSIC) || defined(CONFIG_LINK_DEVICE_USB)
	/* for cp enumeration */
	pm_runtime_forbid(&pdev->dev);
	/*HSIC IPC control the ACTIVE_STATE*/
	if (pdata && pdata->noti_host_states)
		pdata->noti_host_states(pdev, S5P_HOST_ON);
#endif

	return 0;

fail:
	iounmap(hcd->regs);
fail_io:
	clk_disable(s5p_ehci->clk);
fail_clken:
	clk_put(s5p_ehci->clk);
fail_clk:
	usb_put_hcd(hcd);
fail_hcd:
	kfree(s5p_ehci);
	return err;
}
Ejemplo n.º 25
0
static void do_suspend(void)
{
	int err;
	struct suspend_info si;

	shutting_down = SHUTDOWN_SUSPEND;

	err = freeze_processes();
	if (err) {
		pr_err("%s: freeze failed %d\n", __func__, err);
		goto out;
	}

	err = dpm_suspend_start(PMSG_FREEZE);
	if (err) {
		pr_err("%s: dpm_suspend_start %d\n", __func__, err);
		goto out_thaw;
	}

	printk(KERN_DEBUG "suspending xenstore...\n");
	xs_suspend();

	err = dpm_suspend_end(PMSG_FREEZE);
	if (err) {
		pr_err("dpm_suspend_end failed: %d\n", err);
		si.cancelled = 0;
		goto out_resume;
	}

	si.cancelled = 1;

	if (xen_hvm_domain()) {
		si.arg = 0UL;
		si.pre = NULL;
		si.post = &xen_hvm_post_suspend;
	} else {
		si.arg = virt_to_mfn(xen_start_info);
		si.pre = &xen_pre_suspend;
		si.post = &xen_post_suspend;
	}

	err = stop_machine(xen_suspend, &si, cpumask_of(0));

	dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE);

	if (err) {
		pr_err("failed to start xen_suspend: %d\n", err);
		si.cancelled = 1;
	}

out_resume:
	if (!si.cancelled) {
		xen_arch_resume();
		xs_resume();
	} else
		xs_suspend_cancel();

	dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE);

out_thaw:
	thaw_processes();
out:
	shutting_down = SHUTDOWN_INVALID;
}
Ejemplo n.º 26
0
static int __init tegra_init_timer(struct device_node *np, bool tegra20)
{
	struct timer_of *to;
	int cpu, ret;

	to = this_cpu_ptr(&tegra_to);
	ret = timer_of_init(np, to);
	if (ret)
		goto out;

	timer_reg_base = timer_of_base(to);

	/*
	 * Configure microsecond timers to have 1MHz clock
	 * Config register is 0xqqww, where qq is "dividend", ww is "divisor"
	 * Uses n+1 scheme
	 */
	switch (timer_of_rate(to)) {
	case 12000000:
		usec_config = 0x000b; /* (11+1)/(0+1) */
		break;
	case 12800000:
		usec_config = 0x043f; /* (63+1)/(4+1) */
		break;
	case 13000000:
		usec_config = 0x000c; /* (12+1)/(0+1) */
		break;
	case 16800000:
		usec_config = 0x0453; /* (83+1)/(4+1) */
		break;
	case 19200000:
		usec_config = 0x045f; /* (95+1)/(4+1) */
		break;
	case 26000000:
		usec_config = 0x0019; /* (25+1)/(0+1) */
		break;
	case 38400000:
		usec_config = 0x04bf; /* (191+1)/(4+1) */
		break;
	case 48000000:
		usec_config = 0x002f; /* (47+1)/(0+1) */
		break;
	default:
		ret = -EINVAL;
		goto out;
	}

	writel_relaxed(usec_config, timer_reg_base + TIMERUS_USEC_CFG);

	for_each_possible_cpu(cpu) {
		struct timer_of *cpu_to = per_cpu_ptr(&tegra_to, cpu);
		unsigned int base = tegra_base_for_cpu(cpu, tegra20);
		unsigned int idx = tegra_irq_idx_for_cpu(cpu, tegra20);

		/*
		 * TIMER1-9 are fixed to 1MHz, TIMER10-13 are running off the
		 * parent clock.
		 */
		if (tegra20)
			cpu_to->of_clk.rate = 1000000;

		cpu_to = per_cpu_ptr(&tegra_to, cpu);
		cpu_to->of_base.base = timer_reg_base + base;
		cpu_to->clkevt.cpumask = cpumask_of(cpu);
		cpu_to->clkevt.irq = irq_of_parse_and_map(np, idx);
		if (!cpu_to->clkevt.irq) {
			pr_err("failed to map irq for cpu%d\n", cpu);
			ret = -EINVAL;
			goto out_irq;
		}

		irq_set_status_flags(cpu_to->clkevt.irq, IRQ_NOAUTOEN);
		ret = request_irq(cpu_to->clkevt.irq, tegra_timer_isr,
				  IRQF_TIMER | IRQF_NOBALANCING,
				  cpu_to->clkevt.name, &cpu_to->clkevt);
		if (ret) {
			pr_err("failed to set up irq for cpu%d: %d\n",
			       cpu, ret);
			irq_dispose_mapping(cpu_to->clkevt.irq);
			cpu_to->clkevt.irq = 0;
			goto out_irq;
		}
	}

	sched_clock_register(tegra_read_sched_clock, 32, 1000000);

	ret = clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
				    "timer_us", 1000000,
				    300, 32, clocksource_mmio_readl_up);
	if (ret)
		pr_err("failed to register clocksource: %d\n", ret);

#ifdef CONFIG_ARM
	register_current_timer_delay(&tegra_delay_timer);
#endif

	ret = cpuhp_setup_state(CPUHP_AP_TEGRA_TIMER_STARTING,
				"AP_TEGRA_TIMER_STARTING", tegra_timer_setup,
				tegra_timer_stop);
	if (ret)
		pr_err("failed to set up cpu hp state: %d\n", ret);

	return ret;

out_irq:
	for_each_possible_cpu(cpu) {
		struct timer_of *cpu_to;

		cpu_to = per_cpu_ptr(&tegra_to, cpu);
		if (cpu_to->clkevt.irq) {
			free_irq(cpu_to->clkevt.irq, &cpu_to->clkevt);
			irq_dispose_mapping(cpu_to->clkevt.irq);
		}
	}
out:
	timer_of_cleanup(to);

	return ret;
}
Ejemplo n.º 27
0
void smp_send_reschedule(int cpu)
{
	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
}
Ejemplo n.º 28
0
static int __cpuinit highbank_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	gic_raise_softirq(cpumask_of(cpu), 0);
	return 0;
}
Ejemplo n.º 29
0
static void do_suspend(void)
{
	int err;
	struct suspend_info si;

	shutting_down = SHUTDOWN_SUSPEND;

#ifdef CONFIG_PREEMPT
	/* If the kernel is preemptible, we need to freeze all the processes
	   to prevent them from being in the middle of a pagetable update
	   during suspend. */
	err = freeze_processes();
	if (err) {
		printk(KERN_ERR "xen suspend: freeze failed %d\n", err);
		goto out;
	}
#endif

	err = dpm_suspend_start(PMSG_FREEZE);
	if (err) {
		printk(KERN_ERR "xen suspend: dpm_suspend_start %d\n", err);
		goto out_thaw;
	}

	printk(KERN_DEBUG "suspending xenstore...\n");
	xs_suspend();

	err = dpm_suspend_noirq(PMSG_FREEZE);
	if (err) {
		printk(KERN_ERR "dpm_suspend_noirq failed: %d\n", err);
		goto out_resume;
	}

	si.cancelled = 1;

	if (xen_hvm_domain()) {
		si.arg = 0UL;
		si.pre = NULL;
		si.post = &xen_hvm_post_suspend;
	} else {
		si.arg = virt_to_mfn(xen_start_info);
		si.pre = &xen_pre_suspend;
		si.post = &xen_post_suspend;
	}

	err = stop_machine(xen_suspend, &si, cpumask_of(0));

	dpm_resume_noirq(si.cancelled ? PMSG_THAW : PMSG_RESTORE);

	if (err) {
		printk(KERN_ERR "failed to start xen_suspend: %d\n", err);
		si.cancelled = 1;
	}

out_resume:
	if (!si.cancelled) {
		xen_arch_resume();
		xs_resume();
	} else
		xs_suspend_cancel();

	dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE);

	/* Make sure timer events get retriggered on all CPUs */
	clock_was_set();

out_thaw:
#ifdef CONFIG_PREEMPT
	thaw_processes();
out:
#endif
	shutting_down = SHUTDOWN_INVALID;
}
Ejemplo n.º 30
0
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	unsigned long timeout;

	/*
	 * Set synchronisation state between this boot processor
	 * and the secondary one
	 */
	spin_lock(&boot_lock);

	/*
	 * The secondary processor is waiting to be released from
	 * the holding pen - release it, then wait for it to flag
	 * that it has been released by resetting pen_release.
	 *
	 * Note that "pen_release" is the hardware CPU ID, whereas
	 * "cpu" is Linux's internal ID.
	 */
	write_pen_release(cpu);

	if (!(__raw_readl(S5P_ARM_CORE1_STATUS) & S5P_CORE_LOCAL_PWR_EN)) {
		__raw_writel(S5P_CORE_LOCAL_PWR_EN,
			     S5P_ARM_CORE1_CONFIGURATION);

		timeout = 10;

		/* wait max 10 ms until cpu1 is on */
		while ((__raw_readl(S5P_ARM_CORE1_STATUS)
			& S5P_CORE_LOCAL_PWR_EN) != S5P_CORE_LOCAL_PWR_EN) {
			if (timeout-- == 0)
				break;

			mdelay(1);
		}

		if (timeout == 0) {
			printk(KERN_ERR "cpu1 power enable failed");
			spin_unlock(&boot_lock);
			return -ETIMEDOUT;
		}
	}
	/*
	 * Send the secondary CPU a soft interrupt, thereby causing
	 * the boot monitor to read the system wide flags register,
	 * and branch to the address found there.
	 */

	timeout = jiffies + (1 * HZ);
	while (time_before(jiffies, timeout)) {
		smp_rmb();

		__raw_writel(BSYM(virt_to_phys(exynos4_secondary_startup)),
			CPU1_BOOT_REG);
		gic_raise_softirq(cpumask_of(cpu), 1);

		if (pen_release == -1)
			break;

		udelay(10);
	}

	/*
	 * now the secondary core is starting up let it run its
	 * calibrations, then wait for it to finish
	 */
	spin_unlock(&boot_lock);

	return pen_release != -1 ? -ENOSYS : 0;
}