Пример #1
0
static int twd_rate_change(struct notifier_block *nb,
	unsigned long flags, void *data)
{
	struct clk_notifier_data *cnd = data;

	/*
	 * The twd clock events must be reprogrammed to account for the new
	 * frequency.  The timer is local to a cpu, so cross-call to the
	 * changing cpu.
	 */
	if (flags == POST_RATE_CHANGE)
		smp_call_function(twd_update_frequency,
				  (void *)&cnd->new_rate, 1);

	return NOTIFY_OK;
}
Пример #2
0
static inline void do_cpuid(int cpu, u32 reg, u32 *data)
{
  struct cpuid_command cmd;
  
  preempt_disable();
  if ( cpu == smp_processor_id() ) {
    cpuid(reg, &data[0], &data[1], &data[2], &data[3]);
  } else {
    cmd.cpu  = cpu;
    cmd.reg  = reg;
    cmd.data = data;
    
    smp_call_function(cpuid_smp_cpuid, &cmd, 1, 1);
  }
  preempt_enable();
}
Пример #3
0
/*
 * Stop all CPUs and turn off local APICs and the IO-APIC, so other OSs see a 
 * clean IRQ state.
 */
void smp_send_stop(void)
{
    int timeout = 10;

    smp_call_function(stop_this_cpu, NULL, 0);

    /* Wait 10ms for all other CPUs to go offline. */
    while ( (num_online_cpus() > 1) && (timeout-- > 0) )
        mdelay(1);

    local_irq_disable();
    __stop_this_cpu();
    disable_IO_APIC();
    hpet_disable();
    local_irq_enable();
}
Пример #4
0
/*
 * The following tlb flush calls are invoked when old translations are
 * being torn down, or pte attributes are changing. For single threaded
 * address spaces, a new context is obtained on the current cpu, and tlb
 * context on other cpus are invalidated to force a new context allocation
 * at switch_mm time, should the mm ever be used on other cpus. For
 * multithreaded address spaces, intercpu interrupts have to be sent.
 * Another case where intercpu interrupts are required is when the target
 * mm might be active on another cpu (eg debuggers doing the flushes on
 * behalf of debugees, kswapd stealing pages from another process etc).
 * Kanoj 07/00.
 */
void flush_tlb_mm(struct mm_struct *mm)
{
	preempt_disable();

	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
		smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
	} else {
		int i;
		for_each_online_cpu(i)
			if (smp_processor_id() != i)
				cpu_context(i, mm) = 0;
	}
	local_flush_tlb_mm(mm);

	preempt_enable();
}
Пример #5
0
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
	if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
		struct flush_tlb_data fd;

		fd.vma = vma;
		fd.addr1 = page;
		smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1);
	} else {
		int i;
		for (i = 0; i < smp_num_cpus; i++)
			if (smp_processor_id() != i)
				CPU_CONTEXT(i, vma->vm_mm) = 0;
	}
	local_flush_tlb_page(vma, page);
}
Пример #6
0
static int read_pat_on_cpus(void)
{
  int fail[2] = {0, 0};

  int rc = read_pat(&compat_pat_wc.original_pat);
  if( rc != 0 )
    return rc;

  smp_call_function((void(*)(void*))read_pat_on_cpu, &fail, 1, 1);

  if( fail[0] )
    return -EIO;
  if( fail[1] )
    return -EFAULT;
  return 0;
}
Пример #7
0
int __init check_nmi_watchdog (void)
{
	volatile int endflag = 0;
	int *counts;
	int cpu;

	counts = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
	if (!counts)
		return -1;

	printk(KERN_INFO "testing NMI watchdog ... ");

#ifdef CONFIG_SMP
	if (nmi_watchdog == NMI_LOCAL_APIC)
		smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
#endif

	for (cpu = 0; cpu < NR_CPUS; cpu++)
		counts[cpu] = cpu_pda(cpu)->__nmi_count;
	local_irq_enable();
	mdelay((10*1000)/nmi_hz); // wait 10 ticks

	for_each_online_cpu(cpu) {
		if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) {
			endflag = 1;
			printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
			       cpu,
			       counts[cpu],
			       cpu_pda(cpu)->__nmi_count);
			nmi_active = 0;
			lapic_nmi_owner &= ~LAPIC_NMI_WATCHDOG;
			nmi_perfctr_msr = 0;
			kfree(counts);
			return -1;
		}
	}
	endflag = 1;
	printk("OK.\n");

	/* now that we know it works we can reduce NMI frequency to
	   something more reasonable; makes a difference in some configs */
	if (nmi_watchdog == NMI_LOCAL_APIC)
		nmi_hz = 1;

	kfree(counts);
	return 0;
}
Пример #8
0
/*
 * VFP support code initialisation.
 */
static int __init vfp_init(void)
{
    unsigned int vfpsid;
    unsigned int cpu_arch = cpu_architecture();

    if (cpu_arch >= CPU_ARCH_ARMv6)
        vfp_enable(NULL);

    /*
     * First check that there is a VFP that we can use.
     * The handler is already setup to just log calls, so
     * we just need to read the VFPSID register.
     */
    vfp_vector = vfp_testing_entry;
    barrier();
    vfpsid = fmrx(FPSID);
    barrier();
    vfp_vector = vfp_null_entry;

    printk(KERN_INFO "VFP support v0.3: ");
    if (VFP_arch)
        printk("not present\n");
    else if (vfpsid & FPSID_NODOUBLE) {
        printk("no double precision support\n");
    } else {
        smp_call_function(vfp_enable, NULL, 1, 1);

        VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT;  /* Extract the architecture version */
        printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
            (vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,
            (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT,
            (vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT,
            (vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT,
            (vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT);

        vfp_vector = vfp_support_entry;

        thread_register_notifier(&vfp_notifier_block);

        /*
         * We detected VFP, and the support code is
         * in place; report VFP support to userspace.
         */
        elf_hwcap |= HWCAP_VFP;
    }
    return 0;
}
Пример #9
0
static void kexec_prepare_cpus(void)
{
	int my_cpu, i, notified=-1;

	smp_call_function(kexec_smp_down, NULL, /* wait */0);
	my_cpu = get_cpu();

	/* check the others cpus are now down (via paca hw cpu id == -1) */
	for (i=0; i < NR_CPUS; i++) {
		if (i == my_cpu)
			continue;

		while (paca[i].hw_cpu_id != -1) {
			barrier();
			if (!cpu_possible(i)) {
				printk("kexec: cpu %d hw_cpu_id %d is not"
						" possible, ignoring\n",
						i, paca[i].hw_cpu_id);
				break;
			}
			if (!cpu_online(i)) {
				/* Fixme: this can be spinning in
				 * pSeries_secondary_wait with a paca
				 * waiting for it to go online.
				 */
				printk("kexec: cpu %d hw_cpu_id %d is not"
						" online, ignoring\n",
						i, paca[i].hw_cpu_id);
				break;
			}
			if (i != notified) {
				printk( "kexec: waiting for cpu %d (physical"
						" %d) to go down\n",
						i, paca[i].hw_cpu_id);
				notified = i;
			}
		}
	}

	/* after we tell the others to go down */
	if (ppc_md.kexec_cpu_down)
		ppc_md.kexec_cpu_down(0, 0);

	put_cpu();

	local_irq_disable();
}
Пример #10
0
int pause_other_cpus(pause_fn_t fn, void *arg)
{
	int ret = 0;
	unsigned long flags;
	int online_cpus_cnt;
	unsigned long count;

	preempt_disable();
	BLOCKER_DEBUG("Running pause on cpu %d\n", smp_processor_id());

	atomic_set(&blocker_count, 0);
	online_cpus_cnt = num_online_cpus();
	if (online_cpus_cnt > 1) {
		count = jiffies + HZ * MAX_BLOCKER_WAIT_MSEC / 1000 + 1;
		smp_call_function(blocker, NULL, false);
		while (time_before(jiffies, count)) {
			if (atomic_read(&blocker_count) + 1 == online_cpus_cnt)
				break;
		}

		if (!time_before(jiffies, count)) {
			pr_err("BLOCKER: Failed %s, online:%d, count:%d\n",
				__func__, online_cpus_cnt,
				(int)atomic_read(&blocker_count));
			atomic_set(&blocker_count, BLOCKER_INVALID);
			ret = -1;
			goto error;
		}
	}
	local_irq_save(flags);
	atomic_inc(&blocker_count);
	BLOCKER_DEBUG("In critical section on cpu %d\n", smp_processor_id());
	if (fn && atomic_read(&blocker_count) == online_cpus_cnt)
		ret = fn(arg);
	else
		pr_debug("Skip calling fn in blocker! fn: 0x%08X, rsp: %d\n",
		       (unsigned int)fn, atomic_read(&blocker_count));

       /* Release other CPUs */
	atomic_set(&blocker_count, BLOCKER_INVALID);
	local_irq_restore(flags);
error:
	BLOCKER_DEBUG("Finishing pause on cpu %d\n", smp_processor_id());
	preempt_enable();
	return ret;
}
Пример #11
0
RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
{
    int rc;
    RTMPARGS Args;

    Args.pfnWorker = pfnWorker;
    Args.pvUser1 = pvUser1;
    Args.pvUser2 = pvUser2;
    Args.idCpu = idCpu;
    Args.cHits = 0;

    if (!RTMpIsCpuPossible(idCpu))
        return VERR_CPU_NOT_FOUND;

# ifdef preempt_disable
    preempt_disable();
# endif
    if (idCpu != RTMpCpuId())
    {
        if (RTMpIsCpuOnline(idCpu))
        {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
            rc = smp_call_function_single(idCpu, rtmpLinuxWrapper, &Args, 1 /* wait */);
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
            rc = smp_call_function_single(idCpu, rtmpLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
#else /* older kernels */
            rc = smp_call_function(rtmpOnSpecificLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
#endif /* older kernels */
            Assert(rc == 0);
            rc = Args.cHits ? VINF_SUCCESS : VERR_CPU_OFFLINE;
        }
        else
            rc = VERR_CPU_OFFLINE;
    }
    else
    {
        rtmpLinuxWrapper(&Args);
        rc = VINF_SUCCESS;
    }
# ifdef preempt_enable
    preempt_enable();
# endif

    NOREF(rc);
    return rc;
}
Пример #12
0
void flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
{
	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
		struct flush_tlb_data fd;

		fd.mm = mm;
		fd.addr1 = start;
		fd.addr2 = end;
		smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1);
	} else {
		int i;
		for (i = 0; i < smp_num_cpus; i++)
			if (smp_processor_id() != i)
				cpu_context(i, mm) = 0;
	}
	local_flush_tlb_range(mm, start, end);
}
Пример #13
0
static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
                                   int wait)
{
	preempt_disable();

	/*
	 * It's possible that this gets called early on when IRQs are
	 * still disabled due to ioremapping by the boot CPU, so don't
	 * even attempt IPIs unless there are other CPUs online.
	 */
	if (num_online_cpus() > 1)
		smp_call_function(func, info, wait);

	func(info);

	preempt_enable();
}
Пример #14
0
int __init check_nmi_watchdog (void)
{
    static unsigned int __initdata prev_nmi_count[NR_CPUS];
    int cpu;
    bool_t ok = 1;

    if ( !nmi_watchdog )
        return 0;

    printk("Testing NMI watchdog on all CPUs:");

    for_each_online_cpu ( cpu )
        prev_nmi_count[cpu] = nmi_count(cpu);

    /* Wait for 10 ticks.  Busy-wait on all CPUs: the LAPIC counter that
     * the NMI watchdog uses only runs while the core's not halted */
    if ( nmi_watchdog == NMI_LOCAL_APIC )
        smp_call_function(wait_for_nmis, NULL, 0);
    wait_for_nmis(NULL);

    for_each_online_cpu ( cpu )
    {
        if ( nmi_count(cpu) - prev_nmi_count[cpu] <= 5 )
        {
            printk(" %d", cpu);
            ok = 0;
        }
    }

    printk(" %s\n", ok ? "ok" : "stuck");

    /*
     * Now that we know it works we can reduce NMI frequency to
     * something more reasonable; makes a difference in some configs.
     * There's a limit to how slow we can go because writing the perfctr
     * MSRs only sets the low 32 bits, with the top 8 bits sign-extended
     * from those, so it's not possible to set up a delay larger than
     * 2^31 cycles and smaller than (2^40 - 2^31) cycles. 
     * (Intel SDM, section 18.22.2)
     */
    if ( nmi_watchdog == NMI_LOCAL_APIC )
        nmi_hz = max(1ul, cpu_khz >> 20);

    return 0;
}
Пример #15
0
static struct ia64_pal_retval pal_cache_flush(struct kvm_vcpu *vcpu)
{
	u64 gr28, gr29, gr30, gr31;
	struct ia64_pal_retval result = {0, 0, 0, 0};
	struct cache_flush_args args = {0, 0, 0, 0};
	long psr;

	gr28 = gr29 = gr30 = gr31 = 0;
	kvm_get_pal_call_data(vcpu, &gr28, &gr29, &gr30, &gr31);

	if (gr31 != 0)
		printk(KERN_ERR"vcpu:%p called cache_flush error!\n", vcpu);

	/* Always call Host Pal in int=1 */
	gr30 &= ~PAL_CACHE_FLUSH_CHK_INTRS;
	args.cache_type = gr29;
	args.operation = gr30;
	smp_call_function(remote_pal_cache_flush,
				(void *)&args, 1);
	if (args.status != 0)
		printk(KERN_ERR"pal_cache_flush error!,"
				"status:0x%lx\n", args.status);
	/*
	 * Call Host PAL cache flush
	 * Clear psr.ic when call PAL_CACHE_FLUSH
	 */
	local_irq_save(psr);
	result.status = ia64_pal_cache_flush(gr29, gr30, &result.v1,
						&result.v0);
	local_irq_restore(psr);
	if (result.status != 0)
		printk(KERN_ERR"vcpu:%p crashed due to cache_flush err:%ld"
				"in1:%lx,in2:%lx\n",
				vcpu, result.status, gr29, gr30);

#if 0
	if (gr29 == PAL_CACHE_TYPE_COHERENT) {
		cpus_setall(vcpu->arch.cache_coherent_map);
		cpu_clear(vcpu->cpu, vcpu->arch.cache_coherent_map);
		cpus_setall(cpu_cache_coherent_map);
		cpu_clear(vcpu->cpu, cpu_cache_coherent_map);
	}
#endif
	return result;
}
Пример #16
0
RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
{
    IPRT_LINUX_SAVE_EFL_AC();
    int rc;
    RTMPARGS Args;

    RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
    Args.pfnWorker = pfnWorker;
    Args.pvUser1 = pvUser1;
    Args.pvUser2 = pvUser2;
    Args.idCpu = idCpu;
    Args.cHits = 0;

    if (!RTMpIsCpuPossible(idCpu))
        return VERR_CPU_NOT_FOUND;

    RTThreadPreemptDisable(&PreemptState);
    if (idCpu != RTMpCpuId())
    {
        if (RTMpIsCpuOnline(idCpu))
        {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
            rc = smp_call_function_single(idCpu, rtmpLinuxWrapper, &Args, 1 /* wait */);
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
            rc = smp_call_function_single(idCpu, rtmpLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
#else /* older kernels */
            rc = smp_call_function(rtmpOnSpecificLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
#endif /* older kernels */
            Assert(rc == 0);
            rc = Args.cHits ? VINF_SUCCESS : VERR_CPU_OFFLINE;
        }
        else
            rc = VERR_CPU_OFFLINE;
    }
    else
    {
        rtmpLinuxWrapper(&Args);
        rc = VINF_SUCCESS;
    }
    RTThreadPreemptRestore(&PreemptState);;

    NOREF(rc);
    IPRT_LINUX_RESTORE_EFL_AC();
    return rc;
}
Пример #17
0
asmlinkage int
sys_perfmonctl (int cmd, void *data) 
{ 
	struct perfmon_struct *pdata;
	int err;

	printk("sys_perfmonctl: cmd = 0x%x\n", cmd); 
	pdata = kmalloc(sizeof(struct perfmon_struct), GFP_USER);
	err = __copy_from_user(pdata, data, sizeof(struct perfmon_struct));
	switch(cmd) {
	case PMC_CMD_BUFFER:
		perfmon_buffer_ctl(data); 
		break;
	case PMC_CMD_DUMP:
		perfmon_dump_ctl(data); 
		break;
	case PMC_CMD_DECR_PROFILE: /* NIA time sampling */
		decr_profile(data); 
		break;
	case PMC_CMD_PROFILE:
		perfmon_profile_ctl(pdata); 
		break;
	case PMC_CMD_TRACE:
		perfmon_trace_ctl(pdata); 
		break;
	case PMC_CMD_TIMESLICE:
		perfmon_timeslice_ctl(pdata); 
		break;
#if 0
	case PMC_OP_TIMESLICE:
		pmc_enable_timeslice(pdata); 
		break;
	case PMC_OP_DUMP_TIMESLICE:
		pmc_dump_timeslice(pdata); 
		smp_call_function(pmc_dump_timeslice, (void *)pdata, 0, 1);
		break;
#endif
	default:
		printk("Perfmon: Unknown command\n");
		break;
	}

	kfree(pdata); 
	return 0;
}
Пример #18
0
int fiq_glue_register_handler(struct fiq_glue_handler *handler)
{
	int ret;

	if (!handler || !handler->fiq) {
		ret = -EINVAL;
		goto err_bad_arg;
	}

	mutex_lock(&fiq_glue_lock);

	if (!trusty_dev) {
		ret = -ENODEV;
		goto err_no_trusty;
	}

	handler->next = fiq_handlers;
	/*
	 * Write barrier paired with smp_read_barrier_depends in
	 * trusty_fiq_handler. Make sure next pointer is updated before
	 * fiq_handlers so trusty_fiq_handler does not see an uninitialized
	 * value and terminate early or crash.
	 */
	smp_wmb();
	fiq_handlers = handler;

	smp_call_function(smp_nop_call, NULL, true);

	if (!handler->next) {
		ret = fiq_glue_set_handler();
		if (ret)
			goto err_set_fiq_handler;
	}

	mutex_unlock(&fiq_glue_lock);
	return 0;

err_set_fiq_handler:
	fiq_handlers = handler->next;
err_no_trusty:
	mutex_unlock(&fiq_glue_lock);
err_bad_arg:
	pr_err("%s: failed, %d\n", __func__, ret);
	return ret;
}
Пример #19
0
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
	preempt_disable();
	if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
		struct flush_tlb_data fd;

		fd.vma = vma;
		fd.addr1 = page;
		smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1);
	} else {
		int i;
		for (i = 0; i < num_online_cpus(); i++)
			if (smp_processor_id() != i)
				cpu_context(i, vma->vm_mm) = 0;
	}
	local_flush_tlb_page(vma, page);
	preempt_enable();
}
Пример #20
0
void __init setup_APIC_clocks (void)
{
	printk("Using local APIC timer interrupts.\n");
	using_apic_timer = 1;

	__cli();

	calibration_result = calibrate_APIC_clock();
	/*
	 * Now set up the timer for real.
	 */
	setup_APIC_timer((void *)calibration_result);

	__sti();

	/* and update all other cpus */
	smp_call_function(setup_APIC_timer, (void *)calibration_result, 1, 1);
}
Пример #21
0
/**
 * flush_tlb_others - Tell the specified CPUs to invalidate their TLBs
 * @cpumask: The list of CPUs to target.
 * @mm: The VM context to flush from (if va!=FLUSH_ALL).
 * @va: Virtual address to flush or FLUSH_ALL to flush everything.
 */
static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
			     unsigned long va)
{
	cpumask_t tmp;

	/* A couple of sanity checks (to be removed):
	 * - mask must not be empty
	 * - current CPU must not be in mask
	 * - we do not send IPIs to as-yet unbooted CPUs.
	 */
	BUG_ON(!mm);
	BUG_ON(cpumask_empty(&cpumask));
	BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask));

	cpumask_and(&tmp, &cpumask, cpu_online_mask);
	BUG_ON(!cpumask_equal(&cpumask, &tmp));

	/* I'm not happy about this global shared spinlock in the MM hot path,
	 * but we'll see how contended it is.
	 *
	 * Temporarily this turns IRQs off, so that lockups are detected by the
	 * NMI watchdog.
	 */
	spin_lock(&tlbstate_lock);

	flush_mm = mm;
	flush_va = va;
#if NR_CPUS <= BITS_PER_LONG
	atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]);
#else
#error Not supported.
#endif

	/* FIXME: if NR_CPUS>=3, change send_IPI_mask */
	smp_call_function(smp_flush_tlb, NULL, 1);

	while (!cpumask_empty(&flush_cpumask))
		/* Lockup detection does not belong here */
		smp_mb();

	flush_mm = NULL;
	flush_va = 0;
	spin_unlock(&tlbstate_lock);
}
Пример #22
0
void machine_crash_shutdown(struct pt_regs *regs)
{
	unsigned long msecs;

	atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);

	local_irq_enable();
	smp_call_function(machine_crash_nonpanic_core, NULL, false);
	msecs = 1000; /* Wait at most a second for the other cpus to stop */
	while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
		mdelay(1);
		msecs--;
	}
	local_irq_disable();

	crash_save_cpu(regs, smp_processor_id());

	printk(KERN_INFO "Loading crashdump kernel...\n");
}
Пример #23
0
static int __init mips_smp_init(void)
{
    cpumask_t forbidden;
    unsigned int cpu;

    cpus_clear(forbidden);
#ifdef CONFIG_SMP
    smp_call_function(mips_setup_cpu_mask, &forbidden, 1);
#endif
    mips_setup_cpu_mask(&forbidden);
    if (cpus_empty(forbidden))
        return 0;
    perfctr_cpus_forbidden_mask = forbidden;
    for(cpu = 0; cpu < NR_CPUS; ++cpu)
        if (cpu_isset(cpu, forbidden))
            printk(" %u", cpu);
        printk("\n");
    return 0;
}
Пример #24
0
void flush_all_cpu_caches(void)
{
        unsigned int cpu, cluster, target_cpu;

	preempt_disable();
	cpu = smp_processor_id();
	cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);

	if (!cluster)
		target_cpu = first_cpu(hmp_slow_cpu_mask);
	else
		target_cpu = first_cpu(hmp_fast_cpu_mask);

	smp_call_function(flush_all_cpu_cache, NULL, 1);
	smp_call_function_single(target_cpu, flush_all_cluster_cache, NULL, 1);
	flush_cache_all();

	preempt_enable();
}
Пример #25
0
/**
 * Start using a coprocessor.
 * @acop: mask of coprocessor to be used.
 * @mm: The mm the coprocessor to associate with. Most likely current mm.
 *
 * Return a positive PID if successful. Negative errno otherwise.
 * The returned PID will be fed to the coprocessor to determine if an
 * icswx transaction is authenticated.
 */
int use_cop(unsigned long acop, struct mm_struct *mm)
{
	int ret;

	if (!cpu_has_feature(CPU_FTR_ICSWX))
		return -ENODEV;

	if (!mm || !acop)
		return -EINVAL;

	/* We need to make sure mm_users doesn't change */
	down_read(&mm->mmap_sem);
	spin_lock(mm->context.cop_lockp);

	if (mm->context.cop_pid == COP_PID_NONE) {
		ret = new_cop_pid(&cop_ida, COP_PID_MIN, COP_PID_MAX,
				  &mmu_context_acop_lock);
		if (ret < 0)
			goto out;

		mm->context.cop_pid = ret;
	}
	mm->context.acop |= acop;

	sync_cop(mm);

	/*
	 * If this is a threaded process then there might be other threads
	 * running. We need to send an IPI to force them to pick up any
	 * change in PID and ACOP.
	 */
	if (atomic_read(&mm->mm_users) > 1)
		smp_call_function(sync_cop, mm, 1);

	ret = mm->context.cop_pid;

out:
	spin_unlock(mm->context.cop_lockp);
	up_read(&mm->mmap_sem);

	return ret;
}
Пример #26
0
static void cfe_linux_exit(void)
{
#ifdef CONFIG_SMP
    if (smp_processor_id()) {
        if (reboot_smp) {
            /* Don't repeat the process from another CPU */
            for (;;);
        } else {
            /* Get CPU 0 to do the cfe_exit */
            reboot_smp = 1;
            smp_call_function((void *)_machine_restart, NULL, 1, 0);
            for (;;);
        }
    }
#endif
    printk("passing control back to CFE\n");
    cfe_exit(1, 0);
    printk("cfe_exit returned??\n");
    while(1);
}
static void cpuidle_profile_main_start(void)
{
	if (profile_ongoing) {
		pr_err("cpuidle profile is ongoing\n");
		return;
	}

	clear_time();
	profile_start_time = ktime_get();

	profile_ongoing = 1;

	/* Wakeup all cpus and clear own profile data to start profile */
	preempt_disable();
	clear_profile_info(&per_cpu(profile_info, smp_processor_id()), state_count);
	smp_call_function(call_cpu_start_profile, NULL, 1);
	preempt_enable();

	pr_info("cpuidle profile start\n");
}
Пример #28
0
static inline int do_wrmsr(int cpu, u32 reg, u32 eax, u32 edx)
{
	struct msr_command cmd;
	int ret;

	preempt_disable();
	if (cpu == smp_processor_id()) {
		ret = wrmsr_eio(reg, eax, edx);
	} else {
		cmd.cpu = cpu;
		cmd.reg = reg;
		cmd.data[0] = eax;
		cmd.data[1] = edx;

		smp_call_function(msr_smp_wrmsr, &cmd, 1, 1);
		ret = cmd.err;
	}
	preempt_enable();
	return ret;
}
Пример #29
0
static void ATTRIB_NORET cfe_linux_exit(void *arg)
{
	int warm = *(int *)arg;

	if (smp_processor_id()) {
		static int reboot_smp;

		/* Don't repeat the process from another CPU */
		if (!reboot_smp) {
			/* Get CPU 0 to do the cfe_exit */
			reboot_smp = 1;
			smp_call_function(cfe_linux_exit, arg, 1, 0);
		}
	} else {
		printk("Passing control back to CFE...\n");
		cfe_exit(warm, 0);
		printk("cfe_exit returned??\n");
	}
	while (1);
}
Пример #30
0
void flow_cache_flush(void)
{
	struct flow_flush_info info;
	static DECLARE_MUTEX(flow_flush_sem);

	/* Don't want cpus going down or up during this. */
	lock_cpu_hotplug();
	down(&flow_flush_sem);
	atomic_set(&info.cpuleft, num_online_cpus());
	init_completion(&info.completion);

	local_bh_disable();
	smp_call_function(flow_cache_flush_per_cpu, &info, 1, 0);
	flow_cache_flush_tasklet((unsigned long)&info);
	local_bh_enable();

	wait_for_completion(&info.completion);
	up(&flow_flush_sem);
	unlock_cpu_hotplug();
}