Exemplo n.º 1
0
static void rtas_event_scan(struct work_struct *w)
{
	unsigned int cpu;

	do_event_scan();

	get_online_cpus();

	cpu = next_cpu(smp_processor_id(), cpu_online_map);
	if (cpu == NR_CPUS) {
		cpu = first_cpu(cpu_online_map);

		if (first_pass) {
			first_pass = 0;
			event_scan_delay = 30*HZ/rtas_event_scan_rate;

			if (surveillance_timeout != -1) {
				pr_debug("rtasd: enabling surveillance\n");
				enable_surveillance(surveillance_timeout);
				pr_debug("rtasd: surveillance enabled\n");
			}
		}
	}

	schedule_delayed_work_on(cpu, &event_scan_work,
		__round_jiffies_relative(event_scan_delay, cpu));

	put_online_cpus();
}
Exemplo n.º 2
0
static void run_ipi_test_tasklet(unsigned long ignore)
{
    cpumask_t mask;

    BUG_ON(!local_irq_is_enabled());

    if (!done_initialisation) {
	printk("Running initialisation; x2 apic enabled %d\n", x2apic_enabled);
	set_intr_gate(IPI_TEST_VECTOR, ipi_test_interrupt);
	test_cpu_x = 0;
	test_cpu_y = 1;
	done_initialisation = 1;
    } else {
	unsigned long time_taken = finish_time - start_time;
	printk("CPUs %d -> %d took %ld nanoseconds to perform %ld round trips; RTT %ldns\n",
	       test_cpu_x, test_cpu_y,
	       time_taken, nr_trips - INITIAL_DISCARD,
	       time_taken / (nr_trips - INITIAL_DISCARD));
	printk("%d -> %d send IPI time %ld nanoseconds (%ld each)\n",
	       test_cpu_x, test_cpu_y,
	       send_ipi_time,
	       send_ipi_time / (nr_trips - INITIAL_DISCARD));
	nr_trips = 0;
	test_cpu_y = next_cpu(test_cpu_y, cpu_online_map);
	if (test_cpu_y == test_cpu_x)
	    test_cpu_y = next_cpu(test_cpu_y, cpu_online_map);
	if (test_cpu_y == NR_CPUS) {
	    test_cpu_x = next_cpu(test_cpu_x, cpu_online_map);
	    if (test_cpu_x == NR_CPUS) {
		printk("Finished test\n");
		machine_restart(0);
	    }
	    test_cpu_y = 0;
	}
    }

    BUG_ON(test_cpu_x == test_cpu_y);

    if (test_cpu_x == smp_processor_id()) {
	local_irq_disable();
	__smp_ipi_test_interrupt();
	local_irq_enable();
    } else {
	mask = cpumask_of_cpu(test_cpu_x);
	send_IPI_mask(&mask, IPI_TEST_VECTOR);
    }
}
Exemplo n.º 3
0
static unsigned int next_bind_cpu(cpumask_t map)
{
	static unsigned int bind_cpu;
	bind_cpu = next_cpu(bind_cpu, map);
	if (bind_cpu >= NR_CPUS)
		bind_cpu = first_cpu(map);
	return bind_cpu;
}
Exemplo n.º 4
0
static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask)
{
	int i = 0, old_cpu, cpu, int_on, k;
	u64 cur_ints;
	struct irq_desc *desc = irq_desc + irq;
	unsigned long flags;
	unsigned int irq_dirty;

	i = first_cpu(mask);
	if (next_cpu(i, mask) <= NR_CPUS) {
		printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq);
		return;
	}

	/* Convert logical CPU to physical CPU */
	cpu = cpu_logical_map(i);

	/* Protect against other affinity changers and IMR manipulation */
	spin_lock_irqsave(&desc->lock, flags);
	spin_lock(&bcm1480_imr_lock);

	/* Swizzle each CPU's IMR (but leave the IP selection alone) */
	old_cpu = bcm1480_irq_owner[irq];
	irq_dirty = irq;
	if ((irq_dirty >= BCM1480_NR_IRQS_HALF) && (irq_dirty <= BCM1480_NR_IRQS)) {
		irq_dirty -= BCM1480_NR_IRQS_HALF;
	}

	for (k=0; k<2; k++) { /* Loop through high and low interrupt mask register */
		cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(old_cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
		int_on = !(cur_ints & (((u64) 1) << irq_dirty));
		if (int_on) {
			/* If it was on, mask it */
			cur_ints |= (((u64) 1) << irq_dirty);
			____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(old_cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
		}
		bcm1480_irq_owner[irq] = cpu;
		if (int_on) {
			/* unmask for the new CPU */
			cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
			cur_ints &= ~(((u64) 1) << irq_dirty);
			____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
		}
	}
	spin_unlock(&bcm1480_imr_lock);
	spin_unlock_irqrestore(&desc->lock, flags);
}
Exemplo n.º 5
0
static inline int find_next_online_cpu(struct ehca_comp_pool *pool)
{
	int cpu;
	unsigned long flags;

	WARN_ON_ONCE(!in_interrupt());
	if (ehca_debug_level >= 3)
		ehca_dmp(&cpu_online_map, sizeof(cpumask_t), "");

	spin_lock_irqsave(&pool->last_cpu_lock, flags);
	cpu = next_cpu(pool->last_cpu, cpu_online_map);
	if (cpu == NR_CPUS)
		cpu = first_cpu(cpu_online_map);
	pool->last_cpu = cpu;
	spin_unlock_irqrestore(&pool->last_cpu_lock, flags);

	return cpu;
}
Exemplo n.º 6
0
static void do_event_scan_all_cpus(long delay)
{
	int cpu;

	lock_cpu_hotplug();
	cpu = first_cpu(cpu_online_map);
	for (;;) {
		set_cpus_allowed(current, cpumask_of_cpu(cpu));
		do_event_scan(rtas_token("event-scan"));
		set_cpus_allowed(current, CPU_MASK_ALL);

		/* Drop hotplug lock, and sleep for the specified delay */
		unlock_cpu_hotplug();
		msleep_interruptible(delay);
		lock_cpu_hotplug();

		cpu = next_cpu(cpu, cpu_online_map);
		if (cpu == NR_CPUS)
			break;
	}
	unlock_cpu_hotplug();
}
Exemplo n.º 7
0
static int rtasd(void *unused)
{
	unsigned int err_type;
	int cpu = 0;
	int event_scan = rtas_token("event-scan");
	int rc;

	daemonize("rtasd");

	if (event_scan == RTAS_UNKNOWN_SERVICE || get_eventscan_parms() == -1)
		goto error;

	rtas_log_buf = vmalloc(rtas_error_log_buffer_max*LOG_NUMBER);
	if (!rtas_log_buf) {
		printk(KERN_ERR "rtasd: no memory\n");
		goto error;
	}

	printk(KERN_ERR "RTAS daemon started\n");

	DEBUG("will sleep for %d jiffies\n", (HZ*60/rtas_event_scan_rate) / 2);

	/* See if we have any error stored in NVRAM */
	memset(logdata, 0, rtas_error_log_max);

	rc = nvram_read_error_log(logdata, rtas_error_log_max, &err_type);

	/* We can use rtas_log_buf now */
	no_logging = 0;

	if (!rc) {
		if (err_type != ERR_FLAG_ALREADY_LOGGED) {
			pSeries_log_error(logdata, err_type | ERR_FLAG_BOOT, 0);
		}
	}

	/* First pass. */
	lock_cpu_hotplug();
	for_each_online_cpu(cpu) {
		DEBUG("scheduling on %d\n", cpu);
		set_cpus_allowed(current, cpumask_of_cpu(cpu));
		DEBUG("watchdog scheduled on cpu %d\n", smp_processor_id());

		do_event_scan(event_scan);
		set_current_state(TASK_INTERRUPTIBLE);
		schedule_timeout(HZ);
	}
	unlock_cpu_hotplug();

	if (surveillance_timeout != -1) {
		DEBUG("enabling surveillance\n");
		enable_surveillance(surveillance_timeout);
		DEBUG("surveillance enabled\n");
	}

	lock_cpu_hotplug();
	cpu = first_cpu(cpu_online_map);
	for (;;) {
		set_cpus_allowed(current, cpumask_of_cpu(cpu));
		do_event_scan(event_scan);
		set_cpus_allowed(current, CPU_MASK_ALL);

		/* Drop hotplug lock, and sleep for a bit (at least
		 * one second since some machines have problems if we
		 * call event-scan too quickly). */
		unlock_cpu_hotplug();
		set_current_state(TASK_INTERRUPTIBLE);
		schedule_timeout((HZ*60/rtas_event_scan_rate) / 2);
		lock_cpu_hotplug();

		cpu = next_cpu(cpu, cpu_online_map);
		if (cpu == NR_CPUS)
			cpu = first_cpu(cpu_online_map);
	}

error:
	/* Should delete proc entries */
	return -EINVAL;
}
Exemplo n.º 8
0
int first_cpu(struct bitmask* bitmask)
{
    return next_cpu( bitmask, 0);
}