Exemple #1
0
int rt_release_irq (unsigned irq)
{
	ipipe_free_irq(&rtai_domain, irq);
	if (IsolCpusMask && irq < IPIPE_NR_XIRQS) {
		rt_assign_irq_to_cpu(irq, rtai_realtime_irq[irq].cpumask);
	}
	return 0;
}
int init_module(void)
{
	RESET_COUNT = SECS*100;
	rt_mount_rtai();
	rt_assign_irq_to_cpu(TIMER_8254_IRQ, 1 << hard_cpu_id());
	rt_request_timer(just_ret, COUNT, 1);
	rt_request_global_irq(TIMER_8254_IRQ, calibrate);
	printk("\n->>> HERE WE GO (PRINTING EVERY %d SECONDS, 'make stop' to end calibrating) <<<-\n\n", RESET_COUNT/100);
	return 0;
}
Exemple #3
0
static int
__latency_init(void)
{

	/* XXX check option ranges here */

	/* register a proc entry */
#ifdef CONFIG_PROC_FS
	create_proc_read_entry("rtai/latency_calibrate", /* name             */
	                       0,			 /* default mode     */
	                       NULL, 			 /* parent dir       */
			       proc_read, 		 /* function         */
			       NULL			 /* client data      */
	);
#endif

	rtf_create(DEBUG_FIFO, 16000);	/* create a fifo length: 16000 bytes */
	rt_linux_use_fpu(use_fpu);	/* declare if we use the FPU         */

	rt_task_init(			/* create our measuring task         */
			    &thread,	/* poiter to our RT_TASK             */
			    fun,	/* implementation of the task        */
			    0,		/* we could transfer data -> task    */
			    3000,	/* stack size                        */
			    0,		/* priority                          */
			    use_fpu,	/* do we use the FPU?                */
			    0		/* signal? XXX                       */
	);

	rt_set_runnable_on_cpus(	/* select on which CPUs the task is  */
		&thread,		/* allowed to run                    */
		RUN_ON_CPUS
	);

	/* Test if we have to start the timer                                */
	if (start_timer || (start_timer = !rt_is_hard_timer_running())) {
		if (timer_mode) {
			rt_set_periodic_mode();
		} else {
			rt_set_oneshot_mode();
		}
		rt_assign_irq_to_cpu(TIMER_8254_IRQ, TIMER_TO_CPU);
		period_counts = start_rt_timer(nano2count(period));
	} else {
		period_counts = nano2count(period);
	}

	loops = (1000000000*avrgtime)/period;

	/* Calculate the start time for the task. */
	/* We set this to "now plus 10 periods"   */
	expected = rt_get_time() + 10 * period_counts;
	rt_task_make_periodic(&thread, expected, period_counts);
	return 0;
}
Exemple #4
0
int rt_request_irq (unsigned irq, int (*handler)(unsigned irq, void *cookie), void *cookie, int retmode)
{
	int ret;
	 ret = ipipe_request_irq(&rtai_domain, irq, (void *)handler, cookie, NULL);
	if (!ret) {
		rtai_realtime_irq[irq].retmode = retmode ? 1 : 0;
		if (IsolCpusMask && irq < IPIPE_NR_XIRQS) {
			rtai_realtime_irq[irq].cpumask = rt_assign_irq_to_cpu(irq, IsolCpusMask);
		}
	}
	return ret;
}
Exemple #5
0
int rt_release_irq (unsigned irq)
{
	unsigned long flags;
	if (irq >= RTAI_NR_IRQS || !rtai_realtime_irq[irq].handler) {
		return -EINVAL;
	}

	flags = rtai_critical_enter(NULL);
	rtai_realtime_irq[irq].handler = NULL;
	rtai_realtime_irq[irq].irq_ack = (void *)hal_root_domain->irqs[irq].acknowledge;
	rtai_critical_exit(flags);

	if (IsolCpusMask && irq < IPIPE_NR_XIRQS) {
		rt_assign_irq_to_cpu(irq, rtai_realtime_irq[irq].cpumask);
	}

	return 0;
}
int init_module(void)
{
	rt_assign_irq_to_cpu(0, 0);
	rtf_create(CMDF, 10000);
	if (Mode) {
		rt_typed_sem_init(&sem, 0, SEM_TYPE);
	}
	rt_task_init_cpuid(&thread, intr_handler, 0, STACK_SIZE, 0, 0, 0, 0);
	rt_task_resume(&thread);
#ifdef IRQEXT
	rt_request_timer((void *)rt_timer_tick_ext, imuldiv(TICK, FREQ_8254, 1000000000), 0);
	rt_set_global_irq_ext(0, 1, 0);
#else
	rt_request_timer((void *)rt_timer_tick, imuldiv(TICK, FREQ_8254, 1000000000), 0);
#endif
	SETUP_8254_TSC_EMULATION;
	return 0;
}
int rt_lock_cpu(int cpu, int irqsl[], int nirqsl)
{
	int irq;
	if (cpu >= smp_num_cpus || cpu < 0) {
		return -EINVAL;
	}
	if (!idle_task.magic) {
		rt_task_init_cpuid(&idle_task, idle_fun, 0, STACK_SIZE, RT_LINUX_PRIORITY - 1, 0, 0, cpu);
	}
	rtai_cpu = cpu;
	if (irqsl && nirqsl < MAXIRQS && nirqsl > 0) {
		memcpy(&irqs, irqsl, (nirqs = nirqsl)*sizeof(int));
	} else if (!nirqs) {
		return -EINVAL;
	}
	for (irq = 0; irq < nirqs; irq++) {
		rt_assign_irq_to_cpu(irqs[irq], (1 << (1 - cpu)));
	}	
	return 0;
}
int init_module(void)
{
	int i;

#ifdef ONE_SHOT
	rt_set_oneshot_mode();
#endif
	rt_task_init(&thread[0], driver, 0, STACK_SIZE, 0, 0, 0);
	for (i = 1; i < NTASKS; i++) {
		rt_task_init(&thread[i], fun, i, STACK_SIZE, 0, 0, 0);
	}
	tick_period = start_rt_timer(nano2count(TICK_PERIOD));
	rt_assign_irq_to_cpu(TIMER_8254_IRQ, TIMER_TO_CPU);
	for (i = 0; i < NTASKS; i++) {
		rt_task_resume(&thread[i]);
	}
	for (i = 0; i < NTASKS; i++) {
		rt_set_runnable_on_cpus(&thread[i], RUN_ON_CPUS); 
	}
	return 0;
}
Exemple #9
0
int rt_request_irq(unsigned irq, int (*handler)(unsigned irq, void *cookie), void *cookie, int retmode)
{
	unsigned long flags;

	if (handler == NULL || irq >= RTAI_NR_IRQS) {
		return -EINVAL;
	}
	if (rtai_realtime_irq[irq].handler != NULL) {
		return -EBUSY;
	}

	flags = rtai_critical_enter(NULL);
	rtai_realtime_irq[irq].handler = (void *)handler;
	rtai_realtime_irq[irq].cookie  = cookie;
	rtai_realtime_irq[irq].retmode = retmode ? 1 : 0;
	rtai_realtime_irq[irq].irq_ack = (void *)hal_root_domain->irqs[irq].acknowledge;
	rtai_critical_exit(flags);

	if (IsolCpusMask && irq < IPIPE_NR_XIRQS) {
		rtai_realtime_irq[irq].cpumask = rt_assign_irq_to_cpu(irq, IsolCpusMask);
	}

	return 0;
}
static long long user_srq(unsigned long whatever)
{
	extern int calibrate_8254(void);
	unsigned long args[MAXARGS];
	int ret;

	ret = copy_from_user(args, (unsigned long *)whatever, MAXARGS*sizeof(unsigned long));
	switch (args[0]) {
		case CAL_8254: {
			return calibrate_8254();
			break;
		}

		case KTHREADS:
		case KLATENCY: {
			rt_set_oneshot_mode();
			period = start_rt_timer(nano2count(args[1]));
			if (args[0] == KLATENCY) {
				rt_task_init_cpuid(&rtask, spv, args[2], STACKSIZE, 0, 0, 0, hard_cpu_id());
			} else {
//				rt_kthread_init_cpuid(&rtask, spv, args[2], STACKSIZE, 0, 0, 0, hard_cpu_id());	
			}
			expected = rt_get_time() + 100*period;
			rt_task_make_periodic(&rtask, expected, period);
			break;
		}

		case END_KLATENCY: {
			stop_rt_timer();
			rt_task_delete(&rtask);
			break;
		}

		case FREQ_CAL: {
			times.intrs = -1;
			reset_count = args[1]*HZ;
			count = 0;
			rt_assign_irq_to_cpu(TIMER_8254_IRQ, 1 << hard_cpu_id());
			rt_request_timer(just_ret, COUNT, 1);
			rt_request_global_irq(TIMER_8254_IRQ, calibrate);
			break;
		}

		case END_FREQ_CAL: {
		        rt_free_timer();
		        rt_reset_irq_to_sym_mode(TIMER_8254_IRQ);
		        rt_free_global_irq(TIMER_8254_IRQ);
			break;
		}

		case BUS_CHECK: {
			loops = maxj = 0;
			bus_period = imuldiv(args[1], CPU_FREQ, 1000000000);
			bus_threshold = imuldiv(args[2], CPU_FREQ, 1000000000);
			use_parport = args[3];
			rt_assign_irq_to_cpu(TIMER_8254_IRQ, 1 << hard_cpu_id());
			rt_request_timer((void *)rt_timer_tick_ext, imuldiv(args[1], FREQ_8254, 1000000000), 0);
			rt_set_global_irq_ext(TIMER_8254_IRQ, 1, 0);
			break;
		}

		case END_BUS_CHECK: {
		        rt_free_timer();
		        rt_reset_irq_to_sym_mode(TIMER_8254_IRQ);
			break;
		}
		case GET_PARAMS: {
			rtf_put(0, &params, sizeof(params));
			break;
		}
	} 
	return 0;
}
Exemple #11
0
int init_rtai_proxy( void )
{
	rt_assign_irq_to_cpu(TIMER_IRQ, ALLOWED_CPUS);
	rt_request_global_irq(TIMER_IRQ, (void *)tmrisr);
	return 0;
}