Exemplo n.º 1
0
/*
 * TSC-warp measurement loop running on both CPUs.  This is not called
 * if there is no TSC.
 */
static cycles_t check_tsc_warp(unsigned int timeout)
{
	cycles_t start, now, prev, end, cur_max_warp = 0;
	int i, cur_warps = 0;

	start = rdtsc_ordered();
	/*
	 * The measurement runs for 'timeout' msecs:
	 */
	end = start + (cycles_t) tsc_khz * timeout;
	now = start;

	for (i = 0; ; i++) {
		/*
		 * We take the global lock, measure TSC, save the
		 * previous TSC that was measured (possibly on
		 * another CPU) and update the previous TSC timestamp.
		 */
		arch_spin_lock(&sync_lock);
		prev = last_tsc;
		now = rdtsc_ordered();
		last_tsc = now;
		arch_spin_unlock(&sync_lock);

		/*
		 * Be nice every now and then (and also check whether
		 * measurement is done [we also insert a 10 million
		 * loops safety exit, so we dont lock up in case the
		 * TSC readout is totally broken]):
		 */
		if (unlikely(!(i & 7))) {
			if (now > end || i > 10000000)
				break;
			cpu_relax();
			touch_nmi_watchdog();
		}
		/*
		 * Outside the critical section we can now see whether
		 * we saw a time-warp of the TSC going backwards:
		 */
		if (unlikely(prev > now)) {
			arch_spin_lock(&sync_lock);
			max_warp = max(max_warp, prev - now);
			cur_max_warp = max_warp;
			/*
			 * Check whether this bounces back and forth. Only
			 * one CPU should observe time going backwards.
			 */
			if (cur_warps != nr_warps)
				random_warps++;
			nr_warps++;
			cur_warps = nr_warps;
			arch_spin_unlock(&sync_lock);
		}
	}
	WARN(!(now-start),
		"Warning: zero tsc calibration delta: %Ld [max: %Ld]\n",
			now-start, end-start);
	return cur_max_warp;
}
Exemplo n.º 2
0
static void ics_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
			     struct kvmppc_icp *icp)
{
	int i;

	unsigned long flags;

	local_irq_save(flags);
	arch_spin_lock(&ics->lock);

	for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
		struct ics_irq_state *state = &ics->irq_state[i];

		if (!state->resend)
			continue;

		XICS_DBG("resend %#x prio %#x\n", state->number,
			      state->priority);

		arch_spin_unlock(&ics->lock);
		local_irq_restore(flags);
		icp_deliver_irq(xics, icp, state->number);
		local_irq_save(flags);
		arch_spin_lock(&ics->lock);
	}

	arch_spin_unlock(&ics->lock);
	local_irq_restore(flags);
}
Exemplo n.º 3
0
static bool write_xive(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
		       struct ics_irq_state *state,
		       u32 server, u32 priority, u32 saved_priority)
{
	bool deliver;
	unsigned long flags;

	local_irq_save(flags);
	arch_spin_lock(&ics->lock);

	state->server = server;
	state->priority = priority;
	state->saved_priority = saved_priority;
	deliver = false;
	if ((state->masked_pending || state->resend) && priority != MASKED) {
		state->masked_pending = 0;
		state->resend = 0;
		deliver = true;
	}

	arch_spin_unlock(&ics->lock);
	local_irq_restore(flags);

	return deliver;
}
Exemplo n.º 4
0
static int kvmppc_xive_native_sync_source(struct kvmppc_xive *xive,
					  long irq, u64 addr)
{
	struct kvmppc_xive_src_block *sb;
	struct kvmppc_xive_irq_state *state;
	struct xive_irq_data *xd;
	u32 hw_num;
	u16 src;
	int rc = 0;

	pr_devel("%s irq=0x%lx", __func__, irq);

	sb = kvmppc_xive_find_source(xive, irq, &src);
	if (!sb)
		return -ENOENT;

	state = &sb->irq_state[src];

	rc = -EINVAL;

	arch_spin_lock(&sb->lock);

	if (state->valid) {
		kvmppc_xive_select_irq(state, &hw_num, &xd);
		xive_native_sync_source(hw_num);
		rc = 0;
	}

	arch_spin_unlock(&sb->lock);
	return rc;
}
Exemplo n.º 5
0
/*
 * allocate memory from mempool, the allocated memory will be free
 * util ktap exit.
 * TODO: lock-free allocation
 */
void *kp_mempool_alloc(ktap_state_t *ks, int size)
{
	ktap_global_state_t *g = G(ks);
	void *mempool = g->mempool;
	void *freepos = g->mp_freepos;
	void *addr;
	unsigned long flags;

	local_irq_save(flags);
	arch_spin_lock(&g->mp_lock);

	if (unlikely((unsigned long)((char *)freepos + size)) >
		     (unsigned long)((char *)mempool + g->mp_size)) {
		addr = NULL;
		goto out;
	}

	addr = freepos;
	g->mp_freepos = (char *)freepos + size;
 out:

	arch_spin_unlock(&g->mp_lock);
	local_irq_restore(flags);
	return addr;
}
Exemplo n.º 6
0
/*
 * Test the trace buffer to see if all the elements
 * are still sane.
 */
static int __maybe_unused trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
{
	unsigned long flags, cnt = 0;
	int cpu, ret = 0;

	/* Don't allow flipping of max traces now */
	local_irq_save(flags);
	arch_spin_lock(&buf->tr->max_lock);

	cnt = ring_buffer_entries(buf->buffer);

	/*
	 * The trace_test_buffer_cpu runs a while loop to consume all data.
	 * If the calling tracer is broken, and is constantly filling
	 * the buffer, this will run forever, and hard lock the box.
	 * We disable the ring buffer while we do this test to prevent
	 * a hard lock up.
	 */
	tracing_off();
	for_each_possible_cpu(cpu) {
		ret = trace_test_buffer_cpu(buf, cpu);
		if (ret)
			break;
	}
	tracing_on();
	arch_spin_unlock(&buf->tr->max_lock);
	local_irq_restore(flags);

	if (count)
		*count = cnt;

	return ret;
}
Exemplo n.º 7
0
u64 notrace trace_clock_global(void)
{
	unsigned long flags;
	int this_cpu;
	u64 now;

	local_irq_save(flags);

	this_cpu = raw_smp_processor_id();
	now = cpu_clock(this_cpu);
	if (unlikely(in_nmi()))
		goto out;

	arch_spin_lock(&trace_clock_struct.lock);

	if ((s64)(now - trace_clock_struct.prev_time) < 0)
		now = trace_clock_struct.prev_time + 1;

	trace_clock_struct.prev_time = now;

	arch_spin_unlock(&trace_clock_struct.lock);

 out:
	local_irq_restore(flags);

	return now;
}
Exemplo n.º 8
0
int mcpm_cpu_powered_up(void)
{
	unsigned int mpidr, cpu, cluster;
	bool cpu_was_down, first_man;
	unsigned long flags;

	if (!platform_ops)
		return -EUNATCH;

	mpidr = read_cpuid_mpidr();
	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
	local_irq_save(flags);
	arch_spin_lock(&mcpm_lock);

	cpu_was_down = !mcpm_cpu_use_count[cluster][cpu];
	first_man = mcpm_cluster_unused(cluster);

	if (first_man && platform_ops->cluster_is_up)
		platform_ops->cluster_is_up(cluster);
	if (cpu_was_down)
		mcpm_cpu_use_count[cluster][cpu] = 1;
	if (platform_ops->cpu_is_up)
		platform_ops->cpu_is_up(cpu, cluster);

	arch_spin_unlock(&mcpm_lock);
	local_irq_restore(flags);

	return 0;
}
Exemplo n.º 9
0
static int __kprobes
arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self,
			 unsigned long cmd, void *__args)
{
	struct die_args *args = __args;
	struct pt_regs *regs;
	int cpu;

	switch (cmd) {
	case DIE_NMI:
		break;

	default:
		return NOTIFY_DONE;
	}

	regs = args->regs;
	cpu = smp_processor_id();

	if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
		static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;

		arch_spin_lock(&lock);
		printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
		show_regs(regs);
		dump_stack();
		arch_spin_unlock(&lock);
		cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
		return NOTIFY_STOP;
	}

	return NOTIFY_DONE;
}
Exemplo n.º 10
0
int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, u32 *priority)
{
	struct kvmppc_xics *xics = kvm->arch.xics;
	struct kvmppc_ics *ics;
	struct ics_irq_state *state;
	u16 src;
	unsigned long flags;

	if (!xics)
		return -ENODEV;

	ics = kvmppc_xics_find_ics(xics, irq, &src);
	if (!ics)
		return -EINVAL;
	state = &ics->irq_state[src];

	local_irq_save(flags);
	arch_spin_lock(&ics->lock);
	*server = state->server;
	*priority = state->priority;
	arch_spin_unlock(&ics->lock);
	local_irq_restore(flags);

	return 0;
}
Exemplo n.º 11
0
void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
{
	if (regs && kexec_should_crash(current))
		crash_kexec(regs);

	bust_spinlocks(0);
	die_owner = -1;
	add_taint(TAINT_DIE);
	die_nest_count--;
	if (!die_nest_count)
		/* Nest count reaches zero, release the lock. */
		arch_spin_unlock(&die_lock);
	raw_local_irq_restore(flags);
	oops_exit();

	if (!signr)
		return;
	if (in_interrupt())
		panic("Fatal exception in interrupt");
	if (panic_on_oops)
		panic("Fatal exception");

	gr_handle_kernel_exploit();

	do_group_exit(signr);
}
Exemplo n.º 12
0
void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
{
    if (regs && kexec_should_crash(current))
        crash_kexec(regs);

    bust_spinlocks(0);
    die_owner = -1;
    add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
    die_nest_count--;
    if (!die_nest_count)
        /* Nest count reaches zero, release the lock. */
        arch_spin_unlock(&die_lock);
    raw_local_irq_restore(flags);
    oops_exit();

    if (!signr)
        return;
    if (in_interrupt())
        panic("Fatal exception in interrupt");
    if (panic_on_oops)
        panic("Fatal exception");

    /*
     * We're not going to return, but we might be on an IST stack or
     * have very little stack space left.  Rewind the stack and kill
     * the task.
     */
    rewind_stack_do_exit(signr);
}
static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
{
	unsigned long flags, cnt = 0;
	int cpu, ret = 0;

	/*                                        */
	local_irq_save(flags);
	arch_spin_lock(&ftrace_max_lock);

	cnt = ring_buffer_entries(tr->buffer);

	/*
                                                                    
                                                              
                                                             
                                                               
                   
  */
	tracing_off();
	for_each_possible_cpu(cpu) {
		ret = trace_test_buffer_cpu(tr, cpu);
		if (ret)
			break;
	}
	tracing_on();
	arch_spin_unlock(&ftrace_max_lock);
	local_irq_restore(flags);

	if (count)
		*count = cnt;

	return ret;
}
Exemplo n.º 14
0
static int sunxi_cpu_power_down_c2state(struct cpuidle_device *dev, \
                                               struct cpuidle_driver *drv, \
                                               int index)
{
	unsigned int mpidr = read_cpuid_mpidr();
	unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
	unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);

	cpu_pm_enter();
	//cpu_cluster_pm_enter();
	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
	smp_wmb();

	cpu_suspend(CPUIDLE_FLAG_C2_STATE, sunxi_powerdown_c2_finisher);

	/*
	 * Since this is called with IRQs enabled, and no arch_spin_lock_irq
	 * variant exists, we need to disable IRQs manually here.
	 */
	local_irq_disable();

	arch_spin_lock(&sun8i_mcpm_lock);
	sun8i_cpu_use_count[cluster][cpu]++;
	sun8i_cluster_use_count[cluster]++;
	arch_spin_unlock(&sun8i_mcpm_lock);

	local_irq_enable();

	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
	//cpu_cluster_pm_exit();
	cpu_pm_exit();

	return index;
}
static void
probe_wakeup(struct rq *rq, struct task_struct *p, int success)
{
	struct trace_array_cpu *data;
	int cpu = smp_processor_id();
	unsigned long flags;
	long disabled;
	int pc;

	if (likely(!tracer_enabled))
		return;

	tracing_record_cmdline(p);
	tracing_record_cmdline(current);

	if ((wakeup_rt && !rt_task(p)) ||
			p->prio >= wakeup_prio ||
			p->prio >= current->prio)
		return;

	pc = preempt_count();
	disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
	if (unlikely(disabled != 1))
		goto out;

	/* interrupts should be off from try_to_wake_up */
	arch_spin_lock(&wakeup_lock);

	/* check for races. */
	if (!tracer_enabled || p->prio >= wakeup_prio)
		goto out_locked;

	/* reset the trace */
	__wakeup_reset(wakeup_trace);

	wakeup_cpu = task_cpu(p);
	wakeup_current_cpu = wakeup_cpu;
	wakeup_prio = p->prio;

	wakeup_task = p;
	get_task_struct(wakeup_task);

	local_save_flags(flags);

	data = wakeup_trace->data[wakeup_cpu];
	data->preempt_timestamp = ftrace_now(cpu);
	tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);

	/*
	 * We must be careful in using CALLER_ADDR2. But since wake_up
	 * is not called by an assembly function  (where as schedule is)
	 * it should be safe to use it here.
	 */
	trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);

out_locked:
	arch_spin_unlock(&wakeup_lock);
out:
	atomic_dec(&wakeup_trace->data[cpu]->disabled);
}
Exemplo n.º 16
0
void __ipipe_spin_unlock_irqrestore(ipipe_spinlock_t *lock,
				    unsigned long x)
{
	arch_spin_unlock(&lock->arch_lock);
	if (!arch_demangle_irq_bits(&x))
		__clear_bit(IPIPE_STALL_FLAG, &__ipipe_current_context->status);
	hard_local_irq_restore(x);
}
Exemplo n.º 17
0
void lg_local_unlock_cpu(struct lglock *lg, int cpu)
{
	arch_spinlock_t *lock;

	lock_release(&lg->lock_dep_map, 1, _RET_IP_);
	lock = per_cpu_ptr(lg->lock, cpu);
	arch_spin_unlock(lock);
	preempt_enable();
}
Exemplo n.º 18
0
void rtas_take_timebase(void)
{
	while (!timebase)
		barrier();
	arch_spin_lock(&timebase_lock);
	set_tb(timebase >> 32, timebase & 0xffffffff);
	timebase = 0;
	arch_spin_unlock(&timebase_lock);
}
Exemplo n.º 19
0
/*
 * Firmware CPU startup hook
 * Complicated by PMON's weird interface which tries to minimic the UNIX fork.
 * It launches the next * available CPU and copies some information on the
 * stack so the first thing we do is throw away that stuff and load useful
 * values into the registers ...
 */
static void __cpuinit yos_boot_secondary(int cpu, struct task_struct *idle)
{
	unsigned long gp = (unsigned long) task_thread_info(idle);
	unsigned long sp = __KSTK_TOS(idle);

	secondary_sp = sp;
	secondary_gp = gp;

	arch_spin_unlock(&launch_lock);
}
Exemplo n.º 20
0
/*
 * notrace prevents trace shims from getting inserted where they
 * should not. Global jumps and ldrex/strex must not be inserted
 * in power down sequences where caches and MMU may be turned off.
 */
static int notrace sunxi_powerdown_c2_finisher(unsigned long flg)
{
	/* MCPM works with HW CPU identifiers */
	unsigned int mpidr = read_cpuid_mpidr();
	unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
	unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
	bool last_man = false;
	struct sunxi_enter_idle_para sunxi_idle_para;

	mcpm_set_entry_vector(cpu, cluster, cpu_resume);

	arch_spin_lock(&sun8i_mcpm_lock);
	sun8i_cpu_use_count[cluster][cpu]--;
	/* check is the last-man, and set flg */
	sun8i_cluster_use_count[cluster]--;
	if (sun8i_cluster_use_count[cluster] == 0) {
		writel(1, CLUSTER_CPUX_FLG(cluster, cpu));
		last_man = true;
	}
	arch_spin_unlock(&sun8i_mcpm_lock);

	/* call cpus to power off */
	sunxi_idle_para.flags = (unsigned long)mpidr | flg;
	sunxi_idle_para.resume_addr = (void *)(virt_to_phys(mcpm_entry_point));
	arisc_enter_cpuidle(NULL, NULL, &sunxi_idle_para);

	if (last_man) {
		int t = 0;

		/* wait for cpus received this message and respond,
		 * for reconfirm is this cpu the man really, then clear flg
		 */
		while (1) {
			udelay(2);
			if (readl(CLUSTER_CPUS_FLG(cluster, cpu)) == 2) {
				writel(0, CLUSTER_CPUX_FLG(cluster, cpu));
				break; /* last_man is true */
			} else if (readl(CLUSTER_CPUS_FLG(cluster, cpu)) == 3) {
				writel(0, CLUSTER_CPUX_FLG(cluster, cpu));
				goto out; /* last_man is false */
			}
			if(++t > 5000) {
				printk(KERN_WARNING "cpu%didle time out!\n",  \
				                     cluster * 4 + cpu);
				t = 0;
			}
		}
		sunxi_idle_cluster_die(cluster);
	}
out:
	sunxi_idle_cpu_die();

	/* return value != 0 means failure */
	return 1;
}
Exemplo n.º 21
0
void lg_global_unlock(struct lglock *lg)
{
	int i;

	lock_release(&lg->lock_dep_map, 1, _RET_IP_);
	for_each_possible_cpu(i) {
		arch_spinlock_t *lock;
		lock = per_cpu_ptr(lg->lock, i);
		arch_spin_unlock(lock);
	}
	preempt_enable();
}
static void wakeup_reset(struct trace_array *tr)
{
	unsigned long flags;

	tracing_reset_online_cpus(tr);

	local_irq_save(flags);
	arch_spin_lock(&wakeup_lock);
	__wakeup_reset(tr);
	arch_spin_unlock(&wakeup_lock);
	local_irq_restore(flags);
}
Exemplo n.º 23
0
static int kvmppc_xive_reset(struct kvmppc_xive *xive)
{
	struct kvm *kvm = xive->kvm;
	struct kvm_vcpu *vcpu;
	unsigned int i;

	pr_devel("%s\n", __func__);

	mutex_lock(&kvm->lock);

	kvm_for_each_vcpu(i, vcpu, kvm) {
		struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
		unsigned int prio;

		if (!xc)
			continue;

		kvmppc_xive_disable_vcpu_interrupts(vcpu);

		for (prio = 0; prio < KVMPPC_XIVE_Q_COUNT; prio++) {

			/* Single escalation, no queue 7 */
			if (prio == 7 && xive->single_escalation)
				break;

			if (xc->esc_virq[prio]) {
				free_irq(xc->esc_virq[prio], vcpu);
				irq_dispose_mapping(xc->esc_virq[prio]);
				kfree(xc->esc_virq_names[prio]);
				xc->esc_virq[prio] = 0;
			}

			kvmppc_xive_native_cleanup_queue(vcpu, prio);
		}
	}

	for (i = 0; i <= xive->max_sbid; i++) {
		struct kvmppc_xive_src_block *sb = xive->src_blocks[i];

		if (sb) {
			arch_spin_lock(&sb->lock);
			kvmppc_xive_reset_sources(sb);
			arch_spin_unlock(&sb->lock);
		}
	}

	mutex_unlock(&kvm->lock);

	return 0;
}
Exemplo n.º 24
0
static int driver_open( struct inode *device, struct file *instance )
{
    printk(KERN_INFO "Open file called\n");
    while (!spin_trylock( &lock ))
    {
        printk(KERN_INFO "sleeping...\n");
        msleep( 200 );
    }

    printk(KERN_INFO "got it...\n");
    msleep( 3000 );
    arch_spin_unlock( &lock );

    return 0;
}
Exemplo n.º 25
0
static vm_fault_t xive_native_esb_fault(struct vm_fault *vmf)
{
	struct vm_area_struct *vma = vmf->vma;
	struct kvm_device *dev = vma->vm_file->private_data;
	struct kvmppc_xive *xive = dev->private;
	struct kvmppc_xive_src_block *sb;
	struct kvmppc_xive_irq_state *state;
	struct xive_irq_data *xd;
	u32 hw_num;
	u16 src;
	u64 page;
	unsigned long irq;
	u64 page_offset;

	/*
	 * Linux/KVM uses a two pages ESB setting, one for trigger and
	 * one for EOI
	 */
	page_offset = vmf->pgoff - vma->vm_pgoff;
	irq = page_offset / 2;

	sb = kvmppc_xive_find_source(xive, irq, &src);
	if (!sb) {
		pr_devel("%s: source %lx not found !\n", __func__, irq);
		return VM_FAULT_SIGBUS;
	}

	state = &sb->irq_state[src];
	kvmppc_xive_select_irq(state, &hw_num, &xd);

	arch_spin_lock(&sb->lock);

	/*
	 * first/even page is for trigger
	 * second/odd page is for EOI and management.
	 */
	page = page_offset % 2 ? xd->eoi_page : xd->trig_page;
	arch_spin_unlock(&sb->lock);

	if (WARN_ON(!page)) {
		pr_err("%s: accessing invalid ESB page for source %lx !\n",
		       __func__, irq);
		return VM_FAULT_SIGBUS;
	}

	vmf_insert_pfn(vma, vmf->address, page >> PAGE_SHIFT);
	return VM_FAULT_NOPAGE;
}
Exemplo n.º 26
0
void rtas_give_timebase(void)
{
	unsigned long flags;

	local_irq_save(flags);
	hard_irq_disable();
	arch_spin_lock(&timebase_lock);
	rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
	timebase = get_tb();
	arch_spin_unlock(&timebase_lock);

	while (timebase)
		barrier();
	rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
	local_irq_restore(flags);
}
Exemplo n.º 27
0
void mcpm_cpu_suspend(void)
{
	if (WARN_ON_ONCE(!platform_ops))
		return;

	/* Some platforms might have to enable special resume modes, etc. */
	if (platform_ops->cpu_suspend_prepare) {
		unsigned int mpidr = read_cpuid_mpidr();
		unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
		unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); 
		arch_spin_lock(&mcpm_lock);
		platform_ops->cpu_suspend_prepare(cpu, cluster);
		arch_spin_unlock(&mcpm_lock);
	}
	mcpm_cpu_power_down();
}
Exemplo n.º 28
0
static int kvmppc_xive_native_update_source_config(struct kvmppc_xive *xive,
					struct kvmppc_xive_src_block *sb,
					struct kvmppc_xive_irq_state *state,
					u32 server, u8 priority, bool masked,
					u32 eisn)
{
	struct kvm *kvm = xive->kvm;
	u32 hw_num;
	int rc = 0;

	arch_spin_lock(&sb->lock);

	if (state->act_server == server && state->act_priority == priority &&
	    state->eisn == eisn)
		goto unlock;

	pr_devel("new_act_prio=%d new_act_server=%d mask=%d act_server=%d act_prio=%d\n",
		 priority, server, masked, state->act_server,
		 state->act_priority);

	kvmppc_xive_select_irq(state, &hw_num, NULL);

	if (priority != MASKED && !masked) {
		rc = kvmppc_xive_select_target(kvm, &server, priority);
		if (rc)
			goto unlock;

		state->act_priority = priority;
		state->act_server = server;
		state->eisn = eisn;

		rc = xive_native_configure_irq(hw_num,
					       kvmppc_xive_vp(xive, server),
					       priority, eisn);
	} else {
		state->act_priority = MASKED;
		state->act_server = 0;
		state->eisn = 0;

		rc = xive_native_configure_irq(hw_num, 0, MASKED, 0);
	}

unlock:
	arch_spin_unlock(&sb->lock);
	return rc;
}
Exemplo n.º 29
0
static void mmp_pm_powered_up(void)
{
    int mpidr, cpu, cluster;
    unsigned long flags;

    mpidr = read_cpuid_mpidr();
    cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
    cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
    pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
    BUG_ON(cluster >= MAX_NR_CLUSTERS || cpu >= MAX_CPUS_PER_CLUSTER);

    cpu_dcstat_event(cpu_dcstat_clk, cpu, CPU_IDLE_EXIT, MAX_LPM_INDEX);
#ifdef CONFIG_VOLDC_STAT
    vol_dcstat_event(VLSTAT_LPM_EXIT, 0, 0);
    vol_ledstatus_event(MAX_LPM_INDEX);
#endif
    trace_pxa_cpu_idle(LPM_EXIT(0), cpu, cluster);

    local_irq_save(flags);
    arch_spin_lock(&mmp_lpm_lock);

    if (cluster_is_idle(cluster)) {
        if (mmp_wake_saved && mmp_idle->ops->restore_wakeup) {
            mmp_wake_saved = 0;
            mmp_idle->ops->restore_wakeup();
        }
        /* If hardware really shutdown MP subsystem */
        if (!(readl_relaxed(regs_addr_get_va(REGS_ADDR_GIC_DIST) +
                            GIC_DIST_CTRL) & 0x1)) {
            pr_debug("%s: cpu%u: cluster%u is up!\n", __func__, cpu, cluster);
            cpu_cluster_pm_exit();
        }
    }

    if (!mmp_pm_use_count[cluster][cpu])
        mmp_pm_use_count[cluster][cpu] = 1;

    mmp_enter_lpm[cluster][cpu] = 0;

    if (mmp_idle->ops->clr_pmu)
        mmp_idle->ops->clr_pmu(cpu);

    arch_spin_unlock(&mmp_lpm_lock);
    local_irq_restore(flags);
}
Exemplo n.º 30
0
static int mmp_pm_power_up(unsigned int cpu, unsigned int cluster)
{
    pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
    if (cluster >= MAX_NR_CLUSTERS || cpu >= MAX_CPUS_PER_CLUSTER)
        return -EINVAL;

    if (up_mode)
        return -EINVAL;

    cpu_dcstat_event(cpu_dcstat_clk, cpu, CPU_IDLE_EXIT, MAX_LPM_INDEX);

    /*
     * Since this is called with IRQs enabled, and no arch_spin_lock_irq
     * variant exists, we need to disable IRQs manually here.
     */
    local_irq_disable();
    arch_spin_lock(&mmp_lpm_lock);
    /*
     * TODO: Check if we need to do cluster related ops here?
     * (Seems no need since this function should be called by
     * other core, which should not enter lpm at this point).
     */
    mmp_pm_use_count[cluster][cpu]++;

    if (mmp_pm_use_count[cluster][cpu] == 1) {
        mmp_cpu_power_up(cpu, cluster);
    } else if (mmp_pm_use_count[cluster][cpu] != 2) {
        /*
         * The only possible values are:
         * 0 = CPU down
         * 1 = CPU (still) up
         * 2 = CPU requested to be up before it had a chance
         *     to actually make itself down.
         * Any other value is a bug.
         */
        BUG();
    }

    arch_spin_unlock(&mmp_lpm_lock);
    local_irq_enable();

    return 0;
}