Example #1
0
/*
Try exclusive cpu run func, the others wait it for finishing.
 If try fail, you can try again.
 NOTE: It need call at non-automatic context, because of mutex_lock @ cpu_maps_update_begin*/
int try_exclu_cpu_exe(exl_call_func_t func, void * p_arg)
{
	unsigned int cpu;
	unsigned long irq_flags;
	unsigned long jiffy_timeout;
	unsigned long count=0;
	int ret;
	/*Protect hotplug scenary*/
	cpu_maps_update_begin();

	timeout_flag = 0; // clean timeout flag;

	for(cpu=0; cpu< CONFIG_NR_CPUS; cpu++)
		if(per_cpu(in_wait, cpu))
		{
			printk("The previous call is not complete yet!\n");
			ret = -1;
			goto finish2;
		}

	smp_call_function(/*(void (*) (void * info))*/smp_wait, NULL, 0);

	irq_flags = arch_local_irq_save();

	jiffy_timeout = jiffies + HZ/2; //0.5s
	while(count+1 != num_online_cpus())//the other cpus all in wait loop when count+1 == num_online_cpus()
	{
		if(time_after(jiffies, jiffy_timeout))
		{
			printk("Cannot stall other cpus. Timeout!\n");

			timeout_flag = 1;

			ret = -1;
			goto finish1;
		}

		for(cpu=0, count=0; cpu< CONFIG_NR_CPUS; cpu++)
			if(per_cpu(in_wait, cpu) == SMP_FLAG_GETED)
				count ++;
	}

	ret = func(p_arg);

finish1:
	for(cpu=0; cpu< CONFIG_NR_CPUS; cpu++)
		per_cpu(in_wait, cpu) = SMP_FLAG_IDLE;

	arch_local_irq_restore(irq_flags);

finish2:
	cpu_maps_update_done();
	return ret;
}
Example #2
0
static cycle_t arc_counter_read(struct clocksource *cs)
{
	unsigned long flags;
	union {
#ifdef CONFIG_CPU_BIG_ENDIAN
		struct { u32 high, low; };
#else
		struct { u32 low, high; };
#endif
		cycle_t  full;
	} stamp;

	flags = arch_local_irq_save();

	__asm__ __volatile(
	"	.extCoreRegister tsch, 58,  r, cannot_shortcut	\n"
	"	rtsc %0, 0	\n"
	"	mov  %1, 0	\n"
	: "=r" (stamp.low), "=r" (stamp.high));

	arch_local_irq_restore(flags);

	return stamp.full;
}