Exemplo n.º 1
0
/*
 * TSC-warp measurement loop running on both CPUs.  This is not called
 * if there is no TSC.
 */
static cycles_t check_tsc_warp(unsigned int timeout)
{
	cycles_t start, now, prev, end, cur_max_warp = 0;
	int i, cur_warps = 0;

	start = rdtsc_ordered();
	/*
	 * The measurement runs for 'timeout' msecs:
	 */
	end = start + (cycles_t) tsc_khz * timeout;
	now = start;

	for (i = 0; ; i++) {
		/*
		 * We take the global lock, measure TSC, save the
		 * previous TSC that was measured (possibly on
		 * another CPU) and update the previous TSC timestamp.
		 */
		arch_spin_lock(&sync_lock);
		prev = last_tsc;
		now = rdtsc_ordered();
		last_tsc = now;
		arch_spin_unlock(&sync_lock);

		/*
		 * Be nice every now and then (and also check whether
		 * measurement is done [we also insert a 10 million
		 * loops safety exit, so we dont lock up in case the
		 * TSC readout is totally broken]):
		 */
		if (unlikely(!(i & 7))) {
			if (now > end || i > 10000000)
				break;
			cpu_relax();
			touch_nmi_watchdog();
		}
		/*
		 * Outside the critical section we can now see whether
		 * we saw a time-warp of the TSC going backwards:
		 */
		if (unlikely(prev > now)) {
			arch_spin_lock(&sync_lock);
			max_warp = max(max_warp, prev - now);
			cur_max_warp = max_warp;
			/*
			 * Check whether this bounces back and forth. Only
			 * one CPU should observe time going backwards.
			 */
			if (cur_warps != nr_warps)
				random_warps++;
			nr_warps++;
			cur_warps = nr_warps;
			arch_spin_unlock(&sync_lock);
		}
	}
	WARN(!(now-start),
		"Warning: zero tsc calibration delta: %Ld [max: %Ld]\n",
			now-start, end-start);
	return cur_max_warp;
}
Exemplo n.º 2
0
static void ics_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
			     struct kvmppc_icp *icp)
{
	int i;

	unsigned long flags;

	local_irq_save(flags);
	arch_spin_lock(&ics->lock);

	for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
		struct ics_irq_state *state = &ics->irq_state[i];

		if (!state->resend)
			continue;

		XICS_DBG("resend %#x prio %#x\n", state->number,
			      state->priority);

		arch_spin_unlock(&ics->lock);
		local_irq_restore(flags);
		icp_deliver_irq(xics, icp, state->number);
		local_irq_save(flags);
		arch_spin_lock(&ics->lock);
	}

	arch_spin_unlock(&ics->lock);
	local_irq_restore(flags);
}
Exemplo n.º 3
0
static bool write_xive(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
		       struct ics_irq_state *state,
		       u32 server, u32 priority, u32 saved_priority)
{
	bool deliver;
	unsigned long flags;

	local_irq_save(flags);
	arch_spin_lock(&ics->lock);

	state->server = server;
	state->priority = priority;
	state->saved_priority = saved_priority;
	deliver = false;
	if ((state->masked_pending || state->resend) && priority != MASKED) {
		state->masked_pending = 0;
		state->resend = 0;
		deliver = true;
	}

	arch_spin_unlock(&ics->lock);
	local_irq_restore(flags);

	return deliver;
}
Exemplo n.º 4
0
/*
 * PMON is a fragile beast.  It'll blow up once the mappings it's littering
 * right into the middle of KSEG3 are blown away so we have to grab the slave
 * core early and keep it in a waiting loop.
 */
void __init prom_grab_secondary(void)
{
	arch_spin_lock(&launch_lock);

	pmon_cpustart(1, &prom_smp_bootstrap,
	              launchstack + LAUNCHSTACK_SIZE, 0);
}
Exemplo n.º 5
0
/*
 * allocate memory from mempool, the allocated memory will be free
 * util ktap exit.
 * TODO: lock-free allocation
 */
void *kp_mempool_alloc(ktap_state_t *ks, int size)
{
	ktap_global_state_t *g = G(ks);
	void *mempool = g->mempool;
	void *freepos = g->mp_freepos;
	void *addr;
	unsigned long flags;

	local_irq_save(flags);
	arch_spin_lock(&g->mp_lock);

	if (unlikely((unsigned long)((char *)freepos + size)) >
		     (unsigned long)((char *)mempool + g->mp_size)) {
		addr = NULL;
		goto out;
	}

	addr = freepos;
	g->mp_freepos = (char *)freepos + size;
 out:

	arch_spin_unlock(&g->mp_lock);
	local_irq_restore(flags);
	return addr;
}
static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
{
	unsigned long flags, cnt = 0;
	int cpu, ret = 0;

	/*                                        */
	local_irq_save(flags);
	arch_spin_lock(&ftrace_max_lock);

	cnt = ring_buffer_entries(tr->buffer);

	/*
                                                                    
                                                              
                                                             
                                                               
                   
  */
	tracing_off();
	for_each_possible_cpu(cpu) {
		ret = trace_test_buffer_cpu(tr, cpu);
		if (ret)
			break;
	}
	tracing_on();
	arch_spin_unlock(&ftrace_max_lock);
	local_irq_restore(flags);

	if (count)
		*count = cnt;

	return ret;
}
Exemplo n.º 7
0
int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, u32 *priority)
{
	struct kvmppc_xics *xics = kvm->arch.xics;
	struct kvmppc_ics *ics;
	struct ics_irq_state *state;
	u16 src;
	unsigned long flags;

	if (!xics)
		return -ENODEV;

	ics = kvmppc_xics_find_ics(xics, irq, &src);
	if (!ics)
		return -EINVAL;
	state = &ics->irq_state[src];

	local_irq_save(flags);
	arch_spin_lock(&ics->lock);
	*server = state->server;
	*priority = state->priority;
	arch_spin_unlock(&ics->lock);
	local_irq_restore(flags);

	return 0;
}
Exemplo n.º 8
0
/*
 * Test the trace buffer to see if all the elements
 * are still sane.
 */
static int __maybe_unused trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
{
	unsigned long flags, cnt = 0;
	int cpu, ret = 0;

	/* Don't allow flipping of max traces now */
	local_irq_save(flags);
	arch_spin_lock(&buf->tr->max_lock);

	cnt = ring_buffer_entries(buf->buffer);

	/*
	 * The trace_test_buffer_cpu runs a while loop to consume all data.
	 * If the calling tracer is broken, and is constantly filling
	 * the buffer, this will run forever, and hard lock the box.
	 * We disable the ring buffer while we do this test to prevent
	 * a hard lock up.
	 */
	tracing_off();
	for_each_possible_cpu(cpu) {
		ret = trace_test_buffer_cpu(buf, cpu);
		if (ret)
			break;
	}
	tracing_on();
	arch_spin_unlock(&buf->tr->max_lock);
	local_irq_restore(flags);

	if (count)
		*count = cnt;

	return ret;
}
Exemplo n.º 9
0
u64 notrace trace_clock_global(void)
{
	unsigned long flags;
	int this_cpu;
	u64 now;

	local_irq_save(flags);

	this_cpu = raw_smp_processor_id();
	now = cpu_clock(this_cpu);
	if (unlikely(in_nmi()))
		goto out;

	arch_spin_lock(&trace_clock_struct.lock);

	if ((s64)(now - trace_clock_struct.prev_time) < 0)
		now = trace_clock_struct.prev_time + 1;

	trace_clock_struct.prev_time = now;

	arch_spin_unlock(&trace_clock_struct.lock);

 out:
	local_irq_restore(flags);

	return now;
}
Exemplo n.º 10
0
static int sunxi_cpu_power_down_c2state(struct cpuidle_device *dev, \
                                               struct cpuidle_driver *drv, \
                                               int index)
{
	unsigned int mpidr = read_cpuid_mpidr();
	unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
	unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);

	cpu_pm_enter();
	//cpu_cluster_pm_enter();
	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
	smp_wmb();

	cpu_suspend(CPUIDLE_FLAG_C2_STATE, sunxi_powerdown_c2_finisher);

	/*
	 * Since this is called with IRQs enabled, and no arch_spin_lock_irq
	 * variant exists, we need to disable IRQs manually here.
	 */
	local_irq_disable();

	arch_spin_lock(&sun8i_mcpm_lock);
	sun8i_cpu_use_count[cluster][cpu]++;
	sun8i_cluster_use_count[cluster]++;
	arch_spin_unlock(&sun8i_mcpm_lock);

	local_irq_enable();

	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
	//cpu_cluster_pm_exit();
	cpu_pm_exit();

	return index;
}
static void
probe_wakeup(struct rq *rq, struct task_struct *p, int success)
{
	struct trace_array_cpu *data;
	int cpu = smp_processor_id();
	unsigned long flags;
	long disabled;
	int pc;

	if (likely(!tracer_enabled))
		return;

	tracing_record_cmdline(p);
	tracing_record_cmdline(current);

	if ((wakeup_rt && !rt_task(p)) ||
			p->prio >= wakeup_prio ||
			p->prio >= current->prio)
		return;

	pc = preempt_count();
	disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
	if (unlikely(disabled != 1))
		goto out;

	/* interrupts should be off from try_to_wake_up */
	arch_spin_lock(&wakeup_lock);

	/* check for races. */
	if (!tracer_enabled || p->prio >= wakeup_prio)
		goto out_locked;

	/* reset the trace */
	__wakeup_reset(wakeup_trace);

	wakeup_cpu = task_cpu(p);
	wakeup_current_cpu = wakeup_cpu;
	wakeup_prio = p->prio;

	wakeup_task = p;
	get_task_struct(wakeup_task);

	local_save_flags(flags);

	data = wakeup_trace->data[wakeup_cpu];
	data->preempt_timestamp = ftrace_now(cpu);
	tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);

	/*
	 * We must be careful in using CALLER_ADDR2. But since wake_up
	 * is not called by an assembly function  (where as schedule is)
	 * it should be safe to use it here.
	 */
	trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);

out_locked:
	arch_spin_unlock(&wakeup_lock);
out:
	atomic_dec(&wakeup_trace->data[cpu]->disabled);
}
Exemplo n.º 12
0
int mcpm_cpu_powered_up(void)
{
	unsigned int mpidr, cpu, cluster;
	bool cpu_was_down, first_man;
	unsigned long flags;

	if (!platform_ops)
		return -EUNATCH;

	mpidr = read_cpuid_mpidr();
	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
	local_irq_save(flags);
	arch_spin_lock(&mcpm_lock);

	cpu_was_down = !mcpm_cpu_use_count[cluster][cpu];
	first_man = mcpm_cluster_unused(cluster);

	if (first_man && platform_ops->cluster_is_up)
		platform_ops->cluster_is_up(cluster);
	if (cpu_was_down)
		mcpm_cpu_use_count[cluster][cpu] = 1;
	if (platform_ops->cpu_is_up)
		platform_ops->cpu_is_up(cpu, cluster);

	arch_spin_unlock(&mcpm_lock);
	local_irq_restore(flags);

	return 0;
}
Exemplo n.º 13
0
unsigned __kprobes long oops_begin(void)
{
	int cpu;
	unsigned long flags;

	/* notify the hw-branch tracer so it may disable tracing and
	   add the last trace to the trace buffer -
	   the earlier this happens, the more useful the trace. */
	trace_hw_branch_oops();

	oops_enter();

	/* racy, but better than risking deadlock. */
	raw_local_irq_save(flags);
	cpu = smp_processor_id();
	if (!arch_spin_trylock(&die_lock)) {
		if (cpu == die_owner)
			/* nested oops. should stop eventually */;
		else
			arch_spin_lock(&die_lock);
	}
	die_nest_count++;
	die_owner = cpu;
	console_verbose();
	bust_spinlocks(1);
	return flags;
}
Exemplo n.º 14
0
static int kvmppc_xive_native_sync_source(struct kvmppc_xive *xive,
					  long irq, u64 addr)
{
	struct kvmppc_xive_src_block *sb;
	struct kvmppc_xive_irq_state *state;
	struct xive_irq_data *xd;
	u32 hw_num;
	u16 src;
	int rc = 0;

	pr_devel("%s irq=0x%lx", __func__, irq);

	sb = kvmppc_xive_find_source(xive, irq, &src);
	if (!sb)
		return -ENOENT;

	state = &sb->irq_state[src];

	rc = -EINVAL;

	arch_spin_lock(&sb->lock);

	if (state->valid) {
		kvmppc_xive_select_irq(state, &hw_num, &xd);
		xive_native_sync_source(hw_num);
		rc = 0;
	}

	arch_spin_unlock(&sb->lock);
	return rc;
}
static void __spin_lock_debug(raw_spinlock_t *lock)
{
	u64 i;
	u64 loops = loops_per_jiffy * HZ;

	for (i = 0; i < loops; i++) {
		if (arch_spin_trylock(&lock->raw_lock))
			return;
		__delay(1);
	}
	/* lockup suspected: */
	spin_bug(lock, "lockup suspected");
#ifdef CONFIG_SMP
	trigger_all_cpu_backtrace();
#endif

	/*
	 * The trylock above was causing a livelock.  Give the lower level arch
	 * specific lock code a chance to acquire the lock. We have already
	 * printed a warning/backtrace at this point. The non-debug arch
	 * specific code might actually succeed in acquiring the lock.  If it is
	 * not successful, the end-result is the same - there is no forward
	 * progress.
	 */
	arch_spin_lock(&lock->raw_lock);
}
Exemplo n.º 16
0
static int __kprobes
arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self,
			 unsigned long cmd, void *__args)
{
	struct die_args *args = __args;
	struct pt_regs *regs;
	int cpu;

	switch (cmd) {
	case DIE_NMI:
		break;

	default:
		return NOTIFY_DONE;
	}

	regs = args->regs;
	cpu = smp_processor_id();

	if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
		static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;

		arch_spin_lock(&lock);
		printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
		show_regs(regs);
		dump_stack();
		arch_spin_unlock(&lock);
		cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
		return NOTIFY_STOP;
	}

	return NOTIFY_DONE;
}
Exemplo n.º 17
0
void lg_local_lock_cpu(struct lglock *lg, int cpu)
{
	arch_spinlock_t *lock;

	preempt_disable();
	lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
	lock = per_cpu_ptr(lg->lock, cpu);
	arch_spin_lock(lock);
}
Exemplo n.º 18
0
void rtas_take_timebase(void)
{
	while (!timebase)
		barrier();
	arch_spin_lock(&timebase_lock);
	set_tb(timebase >> 32, timebase & 0xffffffff);
	timebase = 0;
	arch_spin_unlock(&timebase_lock);
}
Exemplo n.º 19
0
DLLEXPORT void ke_spin_lock(struct ke_spinlock * lock)
{
//	char str[32];
	//str[h2c(str, __builtin_return_address(0))] = 0;
//	serial_puts("lock:", 1000);
//	serial_puts(str, 1000);
//	serial_puts("\n", 1000);
	arch_spin_lock((arch_spinlock_t*)lock);
}
Exemplo n.º 20
0
/*
 * notrace prevents trace shims from getting inserted where they
 * should not. Global jumps and ldrex/strex must not be inserted
 * in power down sequences where caches and MMU may be turned off.
 */
static int notrace sunxi_powerdown_c2_finisher(unsigned long flg)
{
	/* MCPM works with HW CPU identifiers */
	unsigned int mpidr = read_cpuid_mpidr();
	unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
	unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
	bool last_man = false;
	struct sunxi_enter_idle_para sunxi_idle_para;

	mcpm_set_entry_vector(cpu, cluster, cpu_resume);

	arch_spin_lock(&sun8i_mcpm_lock);
	sun8i_cpu_use_count[cluster][cpu]--;
	/* check is the last-man, and set flg */
	sun8i_cluster_use_count[cluster]--;
	if (sun8i_cluster_use_count[cluster] == 0) {
		writel(1, CLUSTER_CPUX_FLG(cluster, cpu));
		last_man = true;
	}
	arch_spin_unlock(&sun8i_mcpm_lock);

	/* call cpus to power off */
	sunxi_idle_para.flags = (unsigned long)mpidr | flg;
	sunxi_idle_para.resume_addr = (void *)(virt_to_phys(mcpm_entry_point));
	arisc_enter_cpuidle(NULL, NULL, &sunxi_idle_para);

	if (last_man) {
		int t = 0;

		/* wait for cpus received this message and respond,
		 * for reconfirm is this cpu the man really, then clear flg
		 */
		while (1) {
			udelay(2);
			if (readl(CLUSTER_CPUS_FLG(cluster, cpu)) == 2) {
				writel(0, CLUSTER_CPUX_FLG(cluster, cpu));
				break; /* last_man is true */
			} else if (readl(CLUSTER_CPUS_FLG(cluster, cpu)) == 3) {
				writel(0, CLUSTER_CPUX_FLG(cluster, cpu));
				goto out; /* last_man is false */
			}
			if(++t > 5000) {
				printk(KERN_WARNING "cpu%didle time out!\n",  \
				                     cluster * 4 + cpu);
				t = 0;
			}
		}
		sunxi_idle_cluster_die(cluster);
	}
out:
	sunxi_idle_cpu_die();

	/* return value != 0 means failure */
	return 1;
}
void do_raw_spin_lock(raw_spinlock_t *lock)
{
	debug_spin_lock_before(lock);
#if 0 /* Temporarily comment out for testing hrtimer spinlock issue */
	if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
		__spin_lock_debug(lock);
#else
	arch_spin_lock(&lock->raw_lock);
#endif
	debug_spin_lock_after(lock);
}
Exemplo n.º 22
0
unsigned long __ipipe_spin_lock_irqsave(ipipe_spinlock_t *lock)
{
	unsigned long flags;
	int s;

	flags = hard_local_irq_save();
	arch_spin_lock(&lock->arch_lock);
	s = __test_and_set_bit(IPIPE_STALL_FLAG, &__ipipe_current_context->status);

	return arch_mangle_irq_bits(s, flags);
}
static void wakeup_reset(struct trace_array *tr)
{
	unsigned long flags;

	tracing_reset_online_cpus(tr);

	local_irq_save(flags);
	arch_spin_lock(&wakeup_lock);
	__wakeup_reset(tr);
	arch_spin_unlock(&wakeup_lock);
	local_irq_restore(flags);
}
Exemplo n.º 24
0
void lg_global_lock(struct lglock *lg)
{
	int i;

	preempt_disable();
	lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
	for_each_possible_cpu(i) {
		arch_spinlock_t *lock;
		lock = per_cpu_ptr(lg->lock, i);
		arch_spin_lock(lock);
	}
}
Exemplo n.º 25
0
static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
{
	arch_spinlock_t *l = (void *)lock;
	union {
		__u32 val;
		arch_spinlock_t lock;
	} u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };

	compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
	BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
	BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
	arch_spin_lock(l);
}
Exemplo n.º 26
0
static int kvmppc_xive_reset(struct kvmppc_xive *xive)
{
	struct kvm *kvm = xive->kvm;
	struct kvm_vcpu *vcpu;
	unsigned int i;

	pr_devel("%s\n", __func__);

	mutex_lock(&kvm->lock);

	kvm_for_each_vcpu(i, vcpu, kvm) {
		struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
		unsigned int prio;

		if (!xc)
			continue;

		kvmppc_xive_disable_vcpu_interrupts(vcpu);

		for (prio = 0; prio < KVMPPC_XIVE_Q_COUNT; prio++) {

			/* Single escalation, no queue 7 */
			if (prio == 7 && xive->single_escalation)
				break;

			if (xc->esc_virq[prio]) {
				free_irq(xc->esc_virq[prio], vcpu);
				irq_dispose_mapping(xc->esc_virq[prio]);
				kfree(xc->esc_virq_names[prio]);
				xc->esc_virq[prio] = 0;
			}

			kvmppc_xive_native_cleanup_queue(vcpu, prio);
		}
	}

	for (i = 0; i <= xive->max_sbid; i++) {
		struct kvmppc_xive_src_block *sb = xive->src_blocks[i];

		if (sb) {
			arch_spin_lock(&sb->lock);
			kvmppc_xive_reset_sources(sb);
			arch_spin_unlock(&sb->lock);
		}
	}

	mutex_unlock(&kvm->lock);

	return 0;
}
Exemplo n.º 27
0
static vm_fault_t xive_native_esb_fault(struct vm_fault *vmf)
{
	struct vm_area_struct *vma = vmf->vma;
	struct kvm_device *dev = vma->vm_file->private_data;
	struct kvmppc_xive *xive = dev->private;
	struct kvmppc_xive_src_block *sb;
	struct kvmppc_xive_irq_state *state;
	struct xive_irq_data *xd;
	u32 hw_num;
	u16 src;
	u64 page;
	unsigned long irq;
	u64 page_offset;

	/*
	 * Linux/KVM uses a two pages ESB setting, one for trigger and
	 * one for EOI
	 */
	page_offset = vmf->pgoff - vma->vm_pgoff;
	irq = page_offset / 2;

	sb = kvmppc_xive_find_source(xive, irq, &src);
	if (!sb) {
		pr_devel("%s: source %lx not found !\n", __func__, irq);
		return VM_FAULT_SIGBUS;
	}

	state = &sb->irq_state[src];
	kvmppc_xive_select_irq(state, &hw_num, &xd);

	arch_spin_lock(&sb->lock);

	/*
	 * first/even page is for trigger
	 * second/odd page is for EOI and management.
	 */
	page = page_offset % 2 ? xd->eoi_page : xd->trig_page;
	arch_spin_unlock(&sb->lock);

	if (WARN_ON(!page)) {
		pr_err("%s: accessing invalid ESB page for source %lx !\n",
		       __func__, irq);
		return VM_FAULT_SIGBUS;
	}

	vmf_insert_pfn(vma, vmf->address, page >> PAGE_SHIFT);
	return VM_FAULT_NOPAGE;
}
Exemplo n.º 28
0
void rtas_give_timebase(void)
{
	unsigned long flags;

	local_irq_save(flags);
	hard_irq_disable();
	arch_spin_lock(&timebase_lock);
	rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
	timebase = get_tb();
	arch_spin_unlock(&timebase_lock);

	while (timebase)
		barrier();
	rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
	local_irq_restore(flags);
}
Exemplo n.º 29
0
void mcpm_cpu_suspend(void)
{
	if (WARN_ON_ONCE(!platform_ops))
		return;

	/* Some platforms might have to enable special resume modes, etc. */
	if (platform_ops->cpu_suspend_prepare) {
		unsigned int mpidr = read_cpuid_mpidr();
		unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
		unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); 
		arch_spin_lock(&mcpm_lock);
		platform_ops->cpu_suspend_prepare(cpu, cluster);
		arch_spin_unlock(&mcpm_lock);
	}
	mcpm_cpu_power_down();
}
Exemplo n.º 30
0
static int kvmppc_xive_native_update_source_config(struct kvmppc_xive *xive,
					struct kvmppc_xive_src_block *sb,
					struct kvmppc_xive_irq_state *state,
					u32 server, u8 priority, bool masked,
					u32 eisn)
{
	struct kvm *kvm = xive->kvm;
	u32 hw_num;
	int rc = 0;

	arch_spin_lock(&sb->lock);

	if (state->act_server == server && state->act_priority == priority &&
	    state->eisn == eisn)
		goto unlock;

	pr_devel("new_act_prio=%d new_act_server=%d mask=%d act_server=%d act_prio=%d\n",
		 priority, server, masked, state->act_server,
		 state->act_priority);

	kvmppc_xive_select_irq(state, &hw_num, NULL);

	if (priority != MASKED && !masked) {
		rc = kvmppc_xive_select_target(kvm, &server, priority);
		if (rc)
			goto unlock;

		state->act_priority = priority;
		state->act_server = server;
		state->eisn = eisn;

		rc = xive_native_configure_irq(hw_num,
					       kvmppc_xive_vp(xive, server),
					       priority, eisn);
	} else {
		state->act_priority = MASKED;
		state->act_server = 0;
		state->eisn = 0;

		rc = xive_native_configure_irq(hw_num, 0, MASKED, 0);
	}

unlock:
	arch_spin_unlock(&sb->lock);
	return rc;
}