示例#1
0
static void
send_ipi_interrupt(cpumask_t *mask, int vector)
{
# if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
	/***********************************************/
	/*   Theres  'flat' and theres 'cluster'. The  */
	/*   cluster  functions  handle  more  than 8  */
	/*   cpus. The flat does not - since the APIC  */
	/*   only has room for an 8-bit cpu mask.      */
	/***********************************************/
	static void (*send_IPI_mask)(cpumask_t, int);
	if (send_IPI_mask == NULL)
	        send_IPI_mask = get_proc_addr("cluster_send_IPI_mask");
	if (send_IPI_mask == NULL) dtrace_printf("HELP ON send_ipi_interrupt!\n"); else
	        send_IPI_mask(*mask, vector);
# elif LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 28)
	/***********************************************/
	/*   Issue with GPL/inlined function.	       */
	/***********************************************/
	{
	void send_IPI_mask_sequence(cpumask_t mask, int vector);
	static void (*send_IPI_mask_sequence_ptr)(cpumask_t, int);
	if (send_IPI_mask_sequence_ptr == NULL)
		send_IPI_mask_sequence_ptr = get_proc_addr("send_IPI_mask_sequence");
	send_IPI_mask_sequence_ptr(*mask, vector);
	}
# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)
	send_IPI_mask(*mask, vector);
# else
	if (x_apic == NULL) {
		static void (*flat_send_IPI_mask)(cpumask_t *, int);
		if (flat_send_IPI_mask == NULL) 
			flat_send_IPI_mask = get_proc_addr("flat_send_IPI_mask");

		if (flat_send_IPI_mask)  {
			flat_send_IPI_mask(mask, vector);
			return;
		}
		dtrace_linux_panic("x_apic is null - giving up\n");
		return;
	}
	x_apic->send_IPI_mask(mask, vector);
# endif
}
示例#2
0
void
xcall_init(void)
{	int	i;

	if ((x_apic = get_proc_addr("apic")) == NULL &&
	    (x_apic = get_proc_addr("apic_ops")) == NULL) {
		/***********************************************/
		/*   This might be a problem. It might not.    */
		/***********************************************/
		printk("init_xcall: cannot locate 'apic'\n");
	}
	if (x_apic)
		x_apic = *(void **) x_apic;

	for (i = 0; i < nr_cpus; i++) {
		xcalls[i] = kzalloc(nr_cpus * sizeof (struct xcalls), GFP_KERNEL);
		if (xcalls[i] == NULL) {
			dtrace_linux_panic("Cannot allocate xcalls[%d][%d] array.\n", nr_cpus, nr_cpus);
			return;
		}
	}
}
示例#3
0
int
ack_wait(int c, int attempts)
{
	unsigned long cnt = 0;
	int	cnt1 = 0;
	volatile struct xcalls *xc = &xcalls[smp_processor_id()][c];

	/***********************************************/
	/*   Avoid holding on to a stale cache line.   */
	/***********************************************/
	while (dtrace_cas32((void *) &xc->xc_state, XC_WORKING, XC_WORKING) != XC_IDLE) {
		if (attempts-- <= 0)
			return 0;

		barrier();

		/***********************************************/
		/*   Be HT friendly.			       */
		/***********************************************/
//		smt_pause();

		cnt_xcall6++;
		/***********************************************/
		/*   Keep track of the max.		       */
		/***********************************************/
		if (cnt > cnt_xcall5)
			cnt_xcall5 = cnt;

		/***********************************************/
		/*   On  my  Dual Core 2.26GHz system, we are  */
		/*   seeing counters in the range of hundreds  */
		/*   to  maybe  2,000,000  for  more  extreme  */
		/*   cases.  (This  is  inside  a VM). During  */
		/*   debugging,  we  found  problems with the  */
		/*   two  cores  not  seeing  each  other  --  */
		/*   possibly because I wasnt doing the right  */
		/*   things to ensure memory barriers were in  */
		/*   place.				       */
		/*   					       */
		/*   We  dont  want  to  wait forever because  */
		/*   that  will  crash/hang your machine, but  */
		/*   we  do  need to give up if its taken far  */
		/*   too long.				       */
		/***********************************************/
//		if (cnt++ == 50 * 1000 * 1000UL) {
		if (cnt++ == 1 * 1000 * 1000UL) {
			cnt = 0;
			cnt_xcall4++;

			if (cnt1 == 0) {
				/***********************************************/
				/*   Looks like we are having trouble getting  */
				/*   the interrupt, so try for an NMI.	       */
				/***********************************************/
				cpumask_t mask;
				cpus_clear(mask);
				cpu_set(c, mask);
//				nmi_masks[c] = 1;
//				send_ipi_interrupt(&mask, 2); //NMI_VECTOR);
			}

			if (1) {
//				set_console_on(1);
				dtrace_printf("ack_wait cpu=%d xcall %staking too long! c=%d [xcall1=%lu]\n", 
					smp_processor_id(), 
					cnt1 ? "STILL " : "",
					c, cnt_xcall1);
				//dump_stack();
//				set_console_on(0);
			}

			if (cnt1++ > 3) {
				dump_xcalls();
				dtrace_linux_panic("xcall taking too long");
				break;
			}
		}
	}

	if (xcall_debug) {
		dtrace_printf("[%d] ack_wait finished c=%d cnt=%lu (max=%lu)\n", smp_processor_id(), c, cnt, cnt_xcall5);
	}
	return 1;
}