Ejemplo n.º 1
0
void
sn_init_irq_desc(void) {
	int i;
	irq_desc_t *base_desc = _irq_desc, *p;

	for (i=0; i < NR_CPUS; i++) {
		p =  page_address(alloc_pages_node(local_cnodeid(), GFP_KERNEL,
			get_order(sizeof(struct irq_desc) * NR_IRQS) ) );
		ASSERT(p);
		memcpy(p, base_desc, sizeof(struct irq_desc) * NR_IRQS);
		_sn_irq_desc[i] = p;
	}
}
Ejemplo n.º 2
0
/*
 * One of these threads is started per cpu.  Each thread is responsible
 * for loading that cpu's bte interface and then writing to the
 * test buffer.  The transfers are set in a round-robin fashion.
 * The end result is that each test buffer is being written into
 * by the previous node and both cpu's at the same time as the
 * local bte is transferring it to the next node.
 */
static int
brt_notify_thrd(void *__bind_cpu)
{
	int bind_cpu = (long int)__bind_cpu;
	int cpu = cpu_logical_map(bind_cpu);
	nodepda_t *nxt_node;
	long tmout_itc_intvls;
	long tmout;
	long passes;
	long good_xfer_cnt;
	u64 src_phys, dst_phys;
	int i;
	volatile char *src_buf;
	u64 *notify;

	atomic_inc(&brt_thread_cnt);
	daemonize();
	set_user_nice(current, 19);
	sigfillset(&current->blocked);

	/* Migrate to the right CPU */
	set_cpus_allowed(current, 1UL << cpu);

	/* Calculate the uSec timeout itc offset. */
	tmout_itc_intvls = local_cpu_data->cyc_per_usec * hang_usec;

	if (local_cnodeid() == (numnodes - 1)) {
		nxt_node = NODEPDA(0);
	} else {
		nxt_node = NODEPDA(local_cnodeid() + 1);
	}

	src_buf = nodepda->bte_if[0].bte_test_buf;
	src_phys = __pa(src_buf);
	dst_phys = __pa(nxt_node->bte_if[0].bte_test_buf);

	notify = kmalloc(L1_CACHE_BYTES, GFP_KERNEL);
	ASSERT(!((u64) notify & L1_CACHE_MASK));

	printk("BTE Hang %d xfer 0x%lx -> 0x%lx, Notify=0x%lx\n",
	       smp_processor_id(), src_phys, dst_phys, (u64) notify);

	passes = 0;
	good_xfer_cnt = 0;

	/* Loop until signalled to exit. */
	while (!brt_exit_flag) {
		/*
		 * A hang will prevent further transfers.
		 * NOTE: Sometimes, it appears like a hang occurred and
		 * then transfers begin again.  This just means that
		 * there is NUMA congestion and the hang_usec param
		 * should be increased.
		 */
		if (!(*notify & IBLS_BUSY)) {
			if ((bte_copy(src_phys,
				      dst_phys,
				      4UL * L1_CACHE_BYTES,
				      BTE_NOTIFY,
				      (void *)notify)) != BTE_SUCCESS) {
				printk("<0>Cpu %d Could not "
				       "allocate a bte.\n",
				       smp_processor_id());
				continue;
			}

			tmout = ia64_get_itc() + tmout_itc_intvls;

			while ((*notify & IBLS_BUSY) &&
			       (ia64_get_itc() < tmout)) {


				/* Push data out with the processor. */
				for (i = 0; i < (4 * L1_CACHE_BYTES);
				     i += L1_CACHE_BYTES) {
					src_buf[i] = (passes % 128);
				}
			};

			if (*notify & IBLS_BUSY) {
				printk("<0>Cpu %d BTE appears to have "
				       "hung.\n", smp_processor_id());
			} else {
				good_xfer_cnt++;
			}
		}

		/* Every x passes, take a little break. */
		if (!(++passes % 40)) {
			passes = 0;
			schedule_timeout(0.01 * HZ);
		}
	}

	kfree(notify);

	printk("Cpu %d had %ld good passes\n",
	       smp_processor_id(), good_xfer_cnt);

	atomic_dec(&brt_thread_cnt);
	return (0);
}