コード例 #1
0
static int __init xen_init_events(void)
{
	if (!xen_domain() || xen_events_irq < 0)
		return -ENODEV;

	xen_init_IRQ();

	if (request_percpu_irq(xen_events_irq, xen_arm_callback,
			"events", &xen_vcpu)) {
		pr_err("Error requesting IRQ %d\n", xen_events_irq);
		return -EINVAL;
	}

	on_each_cpu(xen_percpu_init, NULL, 0);

	return 0;
}
コード例 #2
0
ファイル: proc-pmon.c プロジェクト: lishaman/minispeaker
static int pmon_read_proc(char *page, char **start, off_t off,
			  int count, int *eof, void *data)
{
	int cpu,len = 0;
	on_each_cpu(on_each_cpu_pmon_read, NULL, 1);

#define PRINT(ARGS...) len += sprintf (page+len, ##ARGS)

	for_each_online_cpu(cpu)
	PRINT("CPU%d:\n%x %x %x %x\n",cpu,
			per_cpu(csr, cpu),
			per_cpu(high, cpu),
			per_cpu(lc, cpu),
			per_cpu(rc, cpu));

	return len;
}
コード例 #3
0
int fiq_glue_register_handler(struct fiq_glue_handler *handler)
{
	int ret;
	int cpu;

	if (!handler || !handler->fiq)
		return -EINVAL;

	mutex_lock(&fiq_glue_lock);
	if (fiq_stack) {
		ret = -EBUSY;
		goto err_busy;
	}

	for_each_possible_cpu(cpu) {
		void *stack;
		stack = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
		if (WARN_ON(!stack)) {
			ret = -ENOMEM;
			goto err_alloc_fiq_stack;
		}
		per_cpu(fiq_stack, cpu) = stack;
	}

	ret = claim_fiq(&fiq_debbuger_fiq_handler);
	if (WARN_ON(ret))
		goto err_claim_fiq;

	current_handler = handler;
	on_each_cpu(fiq_glue_setup_helper, handler, true);
	set_fiq_handler(&fiq_glue, &fiq_glue_end - &fiq_glue);

	mutex_unlock(&fiq_glue_lock);
	return 0;

err_claim_fiq:
err_alloc_fiq_stack:
	for_each_possible_cpu(cpu) {
		__free_pages(per_cpu(fiq_stack, cpu), THREAD_SIZE_ORDER);
		per_cpu(fiq_stack, cpu) = NULL;
	}
err_busy:
	mutex_unlock(&fiq_glue_lock);
	return ret;
}
コード例 #4
0
ファイル: non-fatal.c プロジェクト: amodj/Utopia
static void mce_work_fn(void *data)
{ 
	on_each_cpu(mce_checkregs, NULL, 1, 1);

	if (variable_period) {
		if (adjust)
			period /= (adjust + 1);
		else
			period *= 2;
		if (period > MCE_PERIOD_MAX)
			period = MCE_PERIOD_MAX;
		if (period < MCE_PERIOD_MIN)
			period = MCE_PERIOD_MIN;
	}

	set_timer(&mce_timer, NOW() + period);
	adjust = 0;
}
コード例 #5
0
ファイル: smp_tlb.c プロジェクト: iamroot9C-arm/linux
/** 20131026
 * CONFIG_SMP일 경우
 * start ~ end 사이의 커널 주소 공간에 대해 flush tlb 를 수행
 **/
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
	/** 20131026
	 * tlb operation이 broadcast되어야 하는 경우
	 * (operation이 local structures에만 반영되는 경우)
	 **/
	if (tlb_ops_need_broadcast()) {
		/** 20131026
		 * tlb_args 구조체를 채운다.
		 * kernel range이므로 vm_area_struct는 채우지 않는다.
		 **/
		struct tlb_args ta;
		ta.ta_start = start;
		ta.ta_end = end;
		on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
	} else
		local_flush_tlb_kernel_range(start, end);
}
コード例 #6
0
ファイル: perf_event_cpu.c プロジェクト: 1youhun1/linux
static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
{
	int cpu;
	for_each_possible_cpu(cpu) {
		struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
		events->events = per_cpu(hw_events, cpu);
		events->used_mask = per_cpu(used_mask, cpu);
		raw_spin_lock_init(&events->pmu_lock);
	}

	cpu_pmu->get_hw_events	= cpu_pmu_get_cpu_events;
	cpu_pmu->request_irq	= cpu_pmu_request_irq;
	cpu_pmu->free_irq	= cpu_pmu_free_irq;

	/* Ensure the PMU has sane values out of reset. */
	if (cpu_pmu->reset)
		on_each_cpu(cpu_pmu->reset, cpu_pmu, 1);
}
コード例 #7
0
ファイル: nmi_int.c プロジェクト: garyvan/openwrt-1.6
static void nmi_shutdown(void)
{
	struct op_msrs *msrs;

	get_online_cpus();
	unregister_cpu_notifier(&oprofile_cpu_nb);
	on_each_cpu(nmi_cpu_shutdown, NULL, 1);
	nmi_enabled = 0;
	ctr_running = 0;
	put_online_cpus();
	/* make variables visible to the nmi handler: */
	smp_mb();
	unregister_nmi_handler(NMI_LOCAL, "oprofile");
	msrs = &get_cpu_var(cpu_msrs);
	model->shutdown(msrs);
	free_msrs();
	put_cpu_var(cpu_msrs);
}
コード例 #8
0
static int twd_cpufreq_transition(struct notifier_block *nb,
    unsigned long state, void *data)
{
    struct cpufreq_freqs *freqs = data;

    /*
     * The twd clock events must be reprogrammed to account for the new
     * frequency.  The timer is local to a cpu, so cross-call to the
     * changing cpu.
     */
    if (state == CPUFREQ_POSTCHANGE || state == CPUFREQ_RESUMECHANGE)
    {
        on_each_cpu( twd_update_frequency,NULL, 1);
        printk("new rate %u\n",twd_timer_rate);
    }
    printk("%s %d %u\n",__func__,state,twd_timer_rate);
    return NOTIFY_OK;
}
コード例 #9
0
static int create_hash_tables(void)
{
	int cpu;

	for_each_online_cpu(cpu) {
		int node = cpu_to_mem(cpu);
		struct page *page;

		page = alloc_pages_exact_node(node,
				GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
				0);
		if (!page)
			goto out_cleanup;
		per_cpu(cpu_profile_hits, cpu)[1]
				= (struct profile_hit *)page_address(page);
		page = alloc_pages_exact_node(node,
				GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
				0);
		if (!page)
			goto out_cleanup;
		per_cpu(cpu_profile_hits, cpu)[0]
				= (struct profile_hit *)page_address(page);
	}
	return 0;
out_cleanup:
	prof_on = 0;
	smp_mb();
	on_each_cpu(profile_nop, NULL, 1);
	for_each_online_cpu(cpu) {
		struct page *page;

		if (per_cpu(cpu_profile_hits, cpu)[0]) {
			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
			per_cpu(cpu_profile_hits, cpu)[0] = NULL;
			__free_page(page);
		}
		if (per_cpu(cpu_profile_hits, cpu)[1]) {
			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
			per_cpu(cpu_profile_hits, cpu)[1] = NULL;
			__free_page(page);
		}
	}
	return -1;
}
コード例 #10
0
ファイル: pte.c プロジェクト: Ksys-labs/l4linux
static void unmap_log_add(struct mm_struct *mm,
                          unsigned long uaddr, unsigned long rights)
{
	int i, do_flush = 0;
	struct unmap_log_t *log = &__get_cpu_var(unmap_log);
	unsigned long flags;

	BUG_ON(!mm);

	local_irq_save(flags);

	BUG_ON(log->cnt >= L4X_MM_CONTEXT_UNMAP_LOG_COUNT);

	i = log->cnt++;
	log->log[i].addr   = uaddr & PAGE_MASK;
	log->log[i].mm     = mm;
	log->log[i].rights = rights;
	log->log[i].size   = PAGE_SHIFT;

	/* _simple_ merge with previous entries */
	while (i) {
		struct unmap_log_entry_t *prev = &log->log[i - 1];
		struct unmap_log_entry_t *cur  = &log->log[i];

		if (   prev->addr + (1 << prev->size) == cur->addr
		    && prev->size == cur->size
		    && (prev->addr & ((1 << (prev->size + 1)) - 1)) == 0
		    && prev->mm == cur->mm
		    && prev->rights == cur->rights) {
			prev->size += 1;
			log->cnt--;
			i--;
		} else
			break;
	}

	do_flush = log->cnt == L4X_MM_CONTEXT_UNMAP_LOG_COUNT;
	local_irq_restore(flags);

	if (do_flush) {
		on_each_cpu(empty_func, NULL, 1);
		l4x_unmap_log_flush();
	}
}
コード例 #11
0
/* Initialize the CPU-measurement counter facility */
int __kernel_cpumcf_begin(void)
{
	int flags = PMC_INIT;
	int err = 0;

	spin_lock(&cpumcf_owner_lock);
	if (cpumcf_owner)
		err = -EBUSY;
	else
		cpumcf_owner = __builtin_return_address(0);
	spin_unlock(&cpumcf_owner_lock);
	if (err)
		return err;

	on_each_cpu(cpum_cf_setup_cpu, &flags, 1);
	irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);

	return 0;
}
コード例 #12
0
ファイル: nmi_int.c プロジェクト: 325116067/semc-qsd8x50
static int nmi_setup(void)
{
	int err = 0;
	int cpu;

	if (!allocate_msrs())
		err = -ENOMEM;
	else if (!nmi_setup_mux())
		err = -ENOMEM;
	else
		err = register_die_notifier(&profile_exceptions_nb);

	if (err) {
		free_msrs();
		nmi_shutdown_mux();
		return err;
	}

	/* We need to serialize save and setup for HT because the subset
	 * of msrs are distinct for save and setup operations
	 */

	/* Assume saved/restored counters are the same on all CPUs */
	model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
	for_each_possible_cpu(cpu) {
		if (!cpu)
			continue;

		memcpy(per_cpu(cpu_msrs, cpu).counters,
		       per_cpu(cpu_msrs, 0).counters,
		       sizeof(struct op_msr) * model->num_counters);

		memcpy(per_cpu(cpu_msrs, cpu).controls,
		       per_cpu(cpu_msrs, 0).controls,
		       sizeof(struct op_msr) * model->num_controls);

		mux_clone(cpu);
	}
	on_each_cpu(nmi_cpu_setup, NULL, 1);
	nmi_enabled = 1;
	return 0;
}
コード例 #13
0
static void trace_bts_prepare(struct trace_iterator *iter)
{
	int cpu;

	get_online_cpus();
	for_each_online_cpu(cpu)
    if (likely(per_cpu(hwb_tracer, cpu)))
        ds_suspend_bts(per_cpu(hwb_tracer, cpu));
	/*
	 * We need to collect the trace on the respective cpu since ftrace
	 * implicitly adds the record for the current cpu.
	 * Once that is more flexible, we could collect the data from any cpu.
	 */
	on_each_cpu(trace_bts_cpu, iter->tr, 1);

	for_each_online_cpu(cpu)
    if (likely(per_cpu(hwb_tracer, cpu)))
        ds_suspend_bts(per_cpu(hwb_tracer, cpu));
	put_online_cpus();
}
コード例 #14
0
ファイル: nmi_int.c プロジェクト: 0day-ci/xen
int nmi_reserve_counters(void)
{
	if (!allocate_msrs())
		return -ENOMEM;

	/* We walk a thin line between law and rape here.
	 * We need to be careful to install our NMI handler
	 * without actually triggering any NMIs as this will
	 * break the core code horrifically.
	 */
	if (reserve_lapic_nmi() < 0) {
		free_msrs();
		return -EBUSY;
	}
	/* We need to serialize save and setup for HT because the subset
	 * of msrs are distinct for save and setup operations
	 */
	on_each_cpu(nmi_save_registers, NULL, 1);
	return 0;
}
コード例 #15
0
ファイル: sysenter.c プロジェクト: 12019/hg556a_source
static int __init sysenter_setup(void)
{
	unsigned long page = get_zeroed_page(GFP_ATOMIC);

	__set_fixmap(FIX_VSYSCALL, __pa(page), PAGE_READONLY_EXEC);

	if (!boot_cpu_has(X86_FEATURE_SEP)) {
		memcpy((void *) page,
		       &vsyscall_int80_start,
		       &vsyscall_int80_end - &vsyscall_int80_start);
		return 0;
	}

	memcpy((void *) page,
	       &vsyscall_sysenter_start,
	       &vsyscall_sysenter_end - &vsyscall_sysenter_start);

	on_each_cpu(enable_sep_cpu, NULL, 1, 1);
	return 0;
}
コード例 #16
0
ファイル: kvm.c プロジェクト: 0-T-0/ps4-linux
static void kvm_use_magic_page(void)
{
	u32 *p;
	u32 *start, *end;
	u32 tmp;
	u32 features;

	/* Tell the host to map the magic page to -4096 on all CPUs */
	on_each_cpu(kvm_map_magic_page, &features, 1);

	/* Quick self-test to see if the mapping works */
	if (__get_user(tmp, (u32*)KVM_MAGIC_PAGE)) {
		kvm_patching_worked = false;
		return;
	}

	/* Now loop through all code and find instructions */
	start = (void*)_stext;
	end = (void*)_etext;

	/*
	 * Being interrupted in the middle of patching would
	 * be bad for SPRG4-7, which KVM can't keep in sync
	 * with emulated accesses because reads don't trap.
	 */
	local_irq_disable();

	for (p = start; p < end; p++) {
		/* Avoid patching the template code */
		if (p >= kvm_template_start && p < kvm_template_end) {
			p = kvm_template_end - 1;
			continue;
		}
		kvm_check_ins(p, features);
	}

	local_irq_enable();

	printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
			 kvm_patching_worked ? "worked" : "failed");
}
コード例 #17
0
ファイル: hugetlbpage.c プロジェクト: 08opt/linux
void hugetlb_prefault_arch_hook(struct mm_struct *mm)
{
	struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE];

	if (likely(tp->tsb != NULL))
		return;

	tsb_grow(mm, MM_TSB_HUGE, 0);
	tsb_context_switch(mm);
	smp_tsb_sync(mm);

	/* On UltraSPARC-III+ and later, configure the second half of
	 * the Data-TLB for huge pages.
	 */
	if (tlb_type == cheetah_plus) {
		unsigned long ctx;

		spin_lock(&ctx_alloc_lock);
		ctx = mm->context.sparc64_ctx_val;
		ctx &= ~CTX_PGSZ_MASK;
		ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
		ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;

		if (ctx != mm->context.sparc64_ctx_val) {
			/* When changing the page size fields, we
			 * must perform a context flush so that no
			 * stale entries match.  This flush must
			 * occur with the original context register
			 * settings.
			 */
			do_flush_tlb_mm(mm);

			/* Reload the context register of all processors
			 * also executing in this address space.
			 */
			mm->context.sparc64_ctx_val = ctx;
			on_each_cpu(context_reload, mm, 0);
		}
		spin_unlock(&ctx_alloc_lock);
	}
}
コード例 #18
0
/*
 * proc handler for /proc/sys/tile/userspace_perf_counters
 */
int userspace_perf_counters_handler(struct ctl_table *table, int write,
		void __user *buffer, size_t *lenp,
		loff_t *ppos)
{
	int ret, changed;
	unsigned int old_userspace_perf_counters;

	/* Read /proc/sys/tile/userspace_perf_counters */
	if (!write) {
		ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
		return ret;
	}

	/* Write /proc/sys/tile/userspace_perf_counters */
	old_userspace_perf_counters = userspace_perf_counters;
	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
	changed = userspace_perf_counters != old_userspace_perf_counters;

	/*
	 * Do something only if the value of userspace_perf_counters
	 * is changed.
	 */
	if  (ret == 0 && changed) {

		if (userspace_perf_counters == 1) {
			if (reserve_pmc_hardware(userspace_perf_handler)) {
				pr_warning("PMC hardware busy (reserved "
					"by perf_event or oprofile)\n");
				userspace_perf_counters =
					old_userspace_perf_counters;
				return -EBUSY;
			}
		} else
			release_pmc_hardware();

		/* Set MPL_PERF_COUNT_SET_X on each tile. */
		on_each_cpu(set_perf_count_sprs, NULL, 1);
	}

	return ret;
}
コード例 #19
0
/**************************************************************
* mt hotplug mechanism control interface for procfs test0
***************************************************************/
static int mt_hotplug_mechanism_read_test0(char *buf, char **start, off_t off, int count, int *eof, void *data)
{
    char *p = buf;
    
    p += sprintf(p, "%d\n", g_test0);
    *eof = 1;
    
    HOTPLUG_INFO("mt_hotplug_mechanism_read_test0, hotplug_cpu_count: %d\n", atomic_read(&hotplug_cpu_count));
    on_each_cpu((smp_call_func_t)dump_stack, NULL, 1);
    
    mt65xx_reg_sync_writel(8, 0xf0200080);
    printk(KERN_EMERG "CPU%u, debug event: 0x%08x, debug monitor: 0x%08x\n", 0, *(volatile u32 *)(0xf0200080), *(volatile u32 *)(0xf0200084));
    mt65xx_reg_sync_writel(9, 0xf0200080);
    printk(KERN_EMERG "CPU%u, debug event: 0x%08x, debug monitor: 0x%08x\n", 1, *(volatile u32 *)(0xf0200080), *(volatile u32 *)(0xf0200084));
    mt65xx_reg_sync_writel(10, 0xf0200080);
    printk(KERN_EMERG "CPU%u, debug event: 0x%08x, debug monitor: 0x%08x\n", 2, *(volatile u32 *)(0xf0200080), *(volatile u32 *)(0xf0200084));
    mt65xx_reg_sync_writel(11, 0xf0200080);
    printk(KERN_EMERG "CPU%u, debug event: 0x%08x, debug monitor: 0x%08x\n", 3, *(volatile u32 *)(0xf0200080), *(volatile u32 *)(0xf0200084));
    
    return p - buf;
}
コード例 #20
0
void
drm_clflush_virt_range(char *addr, unsigned long length)
{
#if defined(CONFIG_X86)
	if (cpu_has_clflush) {
		char *end = addr + length;
		mb();
		for (; addr < end; addr += boot_cpu_data.x86_clflush_size)
			clflush(addr);
		clflush(end - 1);
		mb();
		return;
	}

	if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
		printk(KERN_ERR "Timed out waiting for cache flush.\n");
#else
	printk(KERN_ERR "Architecture has no drm_cache.c support\n");
	WARN_ON_ONCE(1);
#endif
}
コード例 #21
0
int start_tbs(bool is_start_paused)
{
	int ret;

	ret = register_hotcpu_notifier(&cpu_notifier_for_timer);
	if (ret != 0)
		return ret;

	if (!is_start_paused)
	{
		tbs_running = true;
	}
	else
	{
		tbs_running = false;
	}

	on_each_cpu(__start_tbs, NULL, 1);

	return 0;
}
コード例 #22
0
ファイル: perf_event_cpu.c プロジェクト: BlueLover-zm/prd
static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
{
	int i, irq, irqs;
	struct platform_device *pmu_device = cpu_pmu->plat_device;

	irqs = min(pmu_device->num_resources, num_possible_cpus());

	irq = platform_get_irq(pmu_device, 0);
	if (irq >= 0 && irq_is_percpu(irq)) {
		on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1);
		free_percpu_irq(irq, &percpu_pmu);
	} else {
		for (i = 0; i < irqs; ++i) {
			if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs))
				continue;
			irq = platform_get_irq(pmu_device, i);
			if (irq >= 0)
				free_irq(irq, cpu_pmu);
		}
	}
}
コード例 #23
0
ファイル: mpc85xx_edac.c プロジェクト: E-LLP/n900
static int __init mpc85xx_mc_init(void)
{
	int res = 0;

	printk(KERN_INFO "Freescale(R) MPC85xx EDAC driver, "
	       "(C) 2006 Montavista Software\n");

	/* make sure error reporting method is sane */
	switch (edac_op_state) {
	case EDAC_OPSTATE_POLL:
	case EDAC_OPSTATE_INT:
		break;
	default:
		edac_op_state = EDAC_OPSTATE_INT;
		break;
	}

	res = of_register_platform_driver(&mpc85xx_mc_err_driver);
	if (res)
		printk(KERN_WARNING EDAC_MOD_STR "MC fails to register\n");

	res = of_register_platform_driver(&mpc85xx_l2_err_driver);
	if (res)
		printk(KERN_WARNING EDAC_MOD_STR "L2 fails to register\n");

#ifdef CONFIG_PCI
	res = of_register_platform_driver(&mpc85xx_pci_err_driver);
	if (res)
		printk(KERN_WARNING EDAC_MOD_STR "PCI fails to register\n");
#endif

	/*
	 * need to clear HID1[RFXE] to disable machine check int
	 * so we can catch it
	 */
	if (edac_op_state == EDAC_OPSTATE_INT)
		on_each_cpu(mpc85xx_mc_clear_rfxe, NULL, 0);

	return 0;
}
コード例 #24
0
static int __init check_nmi_watchdog(void)
{
	unsigned int *prev_nmi_count;
	int cpu, err;

	prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(unsigned int), GFP_KERNEL);
	if (!prev_nmi_count) {
		err = -ENOMEM;
		goto error;
	}

	printk(KERN_INFO "Testing NMI watchdog ... ");

	smp_call_function(nmi_cpu_busy, (void *)&endflag, 0);

	for_each_possible_cpu(cpu)
		prev_nmi_count[cpu] = get_nmi_count(cpu);
	local_irq_enable();
	mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */

	for_each_online_cpu(cpu) {
		if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5)
			report_broken_nmi(cpu, prev_nmi_count);
	}
	endflag = 1;
	if (!nmi_usable) {
		kfree(prev_nmi_count);
		err = -ENODEV;
		goto error;
	}
	printk("OK.\n");

	nmi_hz = 1;

	kfree(prev_nmi_count);
	return 0;
error:
	on_each_cpu(stop_watchdog, NULL, 1);
	return err;
}
コード例 #25
0
ファイル: rcupdate.c プロジェクト: kizukukoto/WDN900_GPL
/*
 * Orchestrate the specified type of RCU barrier, waiting for all
 * RCU callbacks of the specified type to complete.
 */
static void _rcu_barrier(enum rcu_barrier type)
{
	BUG_ON(in_interrupt());
	/* Take cpucontrol mutex to protect against CPU hotplug */
	mutex_lock(&rcu_barrier_mutex);
	init_completion(&rcu_barrier_completion);
	/*
	 * Initialize rcu_barrier_cpu_count to 1, then invoke
	 * rcu_barrier_func() on each CPU, so that each CPU also has
	 * incremented rcu_barrier_cpu_count.  Only then is it safe to
	 * decrement rcu_barrier_cpu_count -- otherwise the first CPU
	 * might complete its grace period before all of the other CPUs
	 * did their increment, causing this function to return too
	 * early.
	 */
	atomic_set(&rcu_barrier_cpu_count, 1);
	on_each_cpu(rcu_barrier_func, (void *)type, 1);
	if (atomic_dec_and_test(&rcu_barrier_cpu_count))
		complete(&rcu_barrier_completion);
	wait_for_completion(&rcu_barrier_completion);
	mutex_unlock(&rcu_barrier_mutex);
}
コード例 #26
0
ファイル: octeon-tra.c プロジェクト: AlickHill/Lantern
/**
 * This function is called when the trace buffer hits a trigger
 * or fills. We don't enable the fill interrupt, so it should
 * only be on triggers.
 *
 * @param cpl    Interrupt number
 * @param dev_id unused
 *
 * @return IRQ status, should always be IRQ_HANDLED
 */
static irqreturn_t octeon_tra_interrupt(int cpl, void *dev_id)
{
	/* Stop the trace buffer in case it is still running. A trigger
	   should have already stopped it */
	cvmx_tra_enable(0);
	/* Clear the trace buffer interrupt status */
	cvmx_write_csr(CVMX_TRA_INT_STATUS, cvmx_read_csr(CVMX_TRA_INT_STATUS));

	/* We can optionally stop the other cores */
	if (OCTEON_TRA_DUMP_CORES_ON_INTERRUPT) {
		pr_info("Octeon Trace Buffer Dumping Core state\n");
		on_each_cpu(octeon_tra_dump_regs, NULL, 1);
	}

	pr_info("Octeon Trace Buffer Start\n");
	cvmx_tra_display();
	pr_info("Octeon Trace Buffer End\n");

	/* Restart the trace buffer */
	cvmx_tra_enable(1);
	return IRQ_HANDLED;
}
コード例 #27
0
ファイル: perf_event_cpu.c プロジェクト: davyb1964/rxe-dev
static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
{
    int err;
    int cpu;
    struct pmu_hw_events __percpu *cpu_hw_events;

    cpu_hw_events = alloc_percpu(struct pmu_hw_events);
    if (!cpu_hw_events)
        return -ENOMEM;

    cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify;
    err = register_cpu_notifier(&cpu_pmu->hotplug_nb);
    if (err)
        goto out_hw_events;

    for_each_possible_cpu(cpu) {
        struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
        raw_spin_lock_init(&events->pmu_lock);
        events->percpu_pmu = cpu_pmu;
    }

    cpu_pmu->hw_events	= cpu_hw_events;
    cpu_pmu->request_irq	= cpu_pmu_request_irq;
    cpu_pmu->free_irq	= cpu_pmu_free_irq;

    /* Ensure the PMU has sane values out of reset. */
    if (cpu_pmu->reset)
        on_each_cpu(cpu_pmu->reset, cpu_pmu, 1);

    /* If no interrupts available, set the corresponding capability flag */
    if (!platform_get_irq(cpu_pmu->plat_device, 0))
        cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;

    return 0;

out_hw_events:
    free_percpu(cpu_hw_events);
    return err;
}
コード例 #28
0
void
drm_clflush_sg(struct sg_table *st)
{
#if defined(CONFIG_X86)
	if (cpu_has_clflush) {
		struct sg_page_iter sg_iter;

		mb();
		for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
			drm_clflush_page(sg_page_iter_page(&sg_iter));
		mb();

		return;
	}

	if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
		printk(KERN_ERR "Timed out waiting for cache flush.\n");
#else
	printk(KERN_ERR "Architecture has no drm_cache.c support\n");
	WARN_ON_ONCE(1);
#endif
}
コード例 #29
0
ファイル: profile.c プロジェクト: gnensis/linux-2.6.15
static void profile_flip_buffers(void)
{
	int i, j, cpu;

	down(&profile_flip_mutex);
	j = per_cpu(cpu_profile_flip, get_cpu());
	put_cpu();
	on_each_cpu(__profile_flip_buffers, NULL, 0, 1);
	for_each_online_cpu(cpu) {
		struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j];
		for (i = 0; i < NR_PROFILE_HIT; ++i) {
			if (!hits[i].hits) {
				if (hits[i].pc)
					hits[i].pc = 0;
				continue;
			}
			atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
			hits[i].hits = hits[i].pc = 0;
		}
	}
	up(&profile_flip_mutex);
}
コード例 #30
0
ファイル: perf_event_cpu.c プロジェクト: BlueLover-zm/prd
static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
{
	int cpu;
	for_each_possible_cpu(cpu) {
		struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
		events->events = per_cpu(hw_events, cpu);
		events->used_mask = per_cpu(used_mask, cpu);
		raw_spin_lock_init(&events->pmu_lock);
		per_cpu(percpu_pmu, cpu) = cpu_pmu;
	}

	cpu_pmu->get_hw_events	= cpu_pmu_get_cpu_events;
	cpu_pmu->request_irq	= cpu_pmu_request_irq;
	cpu_pmu->free_irq	= cpu_pmu_free_irq;

	/* Ensure the PMU has sane values out of reset. */
	if (cpu_pmu->reset)
		on_each_cpu(cpu_pmu->reset, cpu_pmu, 1);

	/* If no interrupts available, set the corresponding capability flag */
	if (!platform_get_irq(cpu_pmu->plat_device, 0))
		cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
}