static int multi_get(void *data, u64 *val)
{
	ktime_t finish;
	struct spin_multi_state ms;
	struct spin_multi_per_thread t1, t2;

	ms.lock = __RAW_SPIN_LOCK_UNLOCKED("multi_get");
	ms.loops = 1000000;

	atomic_set(&ms.start_wait, 2);
	atomic_set(&ms.enter_wait, 2);
	atomic_set(&ms.exit_wait, 2);
	t1.state = &ms;
	t2.state = &ms;

	kthread_run(multi_other, &t2, "multi_get");

	multi_other(&t1);

	finish = ktime_get();

	*val = ktime_us_delta(finish, t1.start);

	return 0;
}
Пример #2
0
#include <asm/uaccess.h>

#include <trace/events/timer.h>

/*
 * The timer bases:
 *
 * There are more clockids then hrtimer bases. Thus, we index
 * into the timer bases by the hrtimer_base_type enum. When trying
 * to reach a base using a clockid, hrtimer_clockid_to_base()
 * is used to convert from clockid to the proper hrtimer_base_type.
 */
DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
{

	.lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
	.clock_base =
	{
		{
			.index = HRTIMER_BASE_MONOTONIC,
			.clockid = CLOCK_MONOTONIC,
			.get_time = &ktime_get,
			.resolution = KTIME_LOW_RES,
		},
		{
			.index = HRTIMER_BASE_REALTIME,
			.clockid = CLOCK_REALTIME,
			.get_time = &ktime_get_real,
			.resolution = KTIME_LOW_RES,
		},
		{
Пример #3
0
	for (i = 0; i < initcnt; i++) {
		desc = alloc_desc(i, node, NULL);
		set_bit(i, allocated_irqs);
		irq_insert_desc(i, desc);
	}
	return arch_early_irq_init();
}

#else /* !CONFIG_SPARSE_IRQ */

struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
	[0 ... NR_IRQS-1] = {
		.handle_irq	= handle_bad_irq,
		.depth		= 1,
		.lock		= __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
	}
};

int __init early_irq_init(void)
{
	int count, i, node = first_online_node;
	struct irq_desc *desc;

	init_irq_default_affinity();

	printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);

	desc = irq_desc;
	count = ARRAY_SIZE(irq_desc);
Пример #4
0
#include <linux/irq.h>
#include <linux/seq_file.h>
#include <asm/irq_handler.h>
#include <asm/trace.h>
#include <asm/pda.h>

static atomic_t irq_err_count;
void ack_bad_irq(unsigned int irq)
{
	atomic_inc(&irq_err_count);
	printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
}

static struct irq_desc bad_irq_desc = {
	.handle_irq = handle_bad_irq,
	.lock = __RAW_SPIN_LOCK_UNLOCKED(bad_irq_desc.lock),
};

#ifdef CONFIG_CPUMASK_OFFSTACK
/* We are not allocating a variable-sized bad_irq_desc.affinity */
#error "Blackfin architecture does not support CONFIG_CPUMASK_OFFSTACK."
#endif

#ifdef CONFIG_PROC_FS
int arch_show_interrupts(struct seq_file *p, int prec)
{
	int j;

	seq_printf(p, "%*s: ", prec, "NMI");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", cpu_pda[j].__nmi_count);
#define L2X0_EVENT_CNT_CFG_INTR_MASK		0x3
#define L2X0_EVENT_CNT_CFG_INTR_DISABLED	0x0
#define L2X0_EVENT_CNT_CFG_INTR_INCREMENT	0x1
#define L2X0_EVENT_CNT_CFG_INTR_OVERFLOW	0x2

#define L2X0_NUM_COUNTERS			2
static struct arm_pmu l2x0_pmu;

static u32 l2x0pmu_max_event_id = 0xf;

static struct perf_event *events[2];
static unsigned long used_mask[BITS_TO_LONGS(2)];
static struct pmu_hw_events l2x0pmu_hw_events = {
	.events = events,
	.used_mask = used_mask,
	.pmu_lock = __RAW_SPIN_LOCK_UNLOCKED(l2x0pmu_hw_events.pmu_lock),
};

#define COUNTER_CFG_ADDR(idx)	(l2x0_base + L2X0_EVENT_CNT0_CFG - 4*idx)

#define COUNTER_CTRL_ADDR	(l2x0_base + L2X0_EVENT_CNT_CTRL)

#define COUNTER_ADDR(idx)	(l2x0_base + L2X0_EVENT_CNT0_VAL - 4*idx)

static void l2x0_clear_interrupts(u32 flags)
{
	writel_relaxed(flags, l2x0_base + L2X0_INTR_CLEAR);
}

static struct pmu_hw_events *l2x0pmu_get_hw_events(void)
{