Example #1
0
File: wd.c Project: cjecho/RTAI
static void fun(long none)
{
	volatile double s, a[MAXDIM], b[MAXDIM];
	long msg;
	RTIME period, wait_delay, sync_time, aim_time;
	*worst_lat = -2000000000;
	wait_delay = nano2count(WAIT_DELAY);
	period     = nano2count(PERIOD);
	for(msg = 0; msg < MAXDIM; msg++) {
		a[msg] = b[msg] = 3.141592;
	}
	rtai_cli();
	aim_time  = rt_get_time();
	sync_time = aim_time + wait_delay;
	aim_time += period;
	while (!rt_receive_if(NULL, &msg)) {
		WAIT_AIM_TIME();
		sync_time = rt_get_time();
		msg = abs((long)(sync_time - aim_time));
		sync_time = aim_time + wait_delay;
		aim_time  += period;
		if (msg > *worst_lat) {
			*worst_lat = msg;
		}
		s = dot(a,b, MAXDIM);
		rt_busy_sleep(WORKING_TIME);
		rt_sleep_until(sync_time);
	}
	rtai_sti();
}
Example #2
0
static void timer_handler(unsigned long data)
{
	static int first;
	if (!first) {
		first = 1;
		rtai_cli();
	}
	CHECK_FLAGS();
	while ((ovr = rt_irq_wait_if(TIMER_IRQ)) > 0) {
		if (ovr == RT_IRQ_TASK_ERR) return;
		if (ovr > 0) {
			/* overrun processing, if any, goes here */
			rt_sem_signal(dspsem);
		}
	}
	/* normal processing goes here */
	intcnt++;
	rtc_enable_irq(TIMER_IRQ, TIMER_FRQ);
	rt_sem_signal(dspsem);
}
Example #3
0
File: hal.c Project: ArcEye/RTAI
int rt_request_timers(void *rtai_time_handler)
{
	int cpuid;

	if (!rt_linux_hrt_next_shot) {
		rt_linux_hrt_next_shot = _rt_linux_hrt_next_shot;
	}
	for (cpuid = 0; cpuid < num_active_cpus(); cpuid++) {
		struct rt_times *rtimes;
		int ret;
		ret = ipipe_timer_start(rtai_time_handler, rt_linux_hrt_set_mode, rt_linux_hrt_next_shot, cpuid);
		if (ret < 0 || ret == CLOCK_EVT_MODE_SHUTDOWN) {
			printk("THE TIMERS REQUEST FAILED RETURNING %d FOR CPUID %d, FREEING ALL TIMERS.\n", ret, cpuid);
			do {
				ipipe_timer_stop(cpuid);
			} while (--cpuid >= 0);
			return -1;
		}
		rtimes = &rt_smp_times[cpuid];
		if (ret == CLOCK_EVT_MODE_ONESHOT || ret == CLOCK_EVT_MODE_UNUSED) {
			rtimes->linux_tick = 0;
		} else {
			rt_smp_times[0].linux_tick = rtai_llimd((1000000000 + HZ/2)/HZ, rtai_tunables.clock_freq, 1000000000);
		}			
		rtimes->tick_time  = rtai_rdtsc();
                rtimes->intr_time  = rtimes->tick_time + rtimes->linux_tick;
                rtimes->linux_time = rtimes->tick_time + rtimes->linux_tick;
		rtimes->periodic_tick = rtimes->linux_tick;
	}
#if 0 // #ifndef CONFIG_X86_LOCAL_APIC, for calibrating 8254 with our set delay
	rtai_cli();
	outb(0x30, 0x43);
	rt_set_timer_delay(rtai_tunables.clock_freq/50000);
	rtai_sti();
#endif
	return 0;
}
Example #4
0
File: usi.c Project: Enextuse/RTAI
static void usi_cli(void)
{
	rtai_cli();
}
Example #5
0
File: sys.c Project: ArcEye/RTAI
static inline long long handle_lxrt_request (unsigned int lxsrq, long *arg, RT_TASK *task)
{
#define larg ((struct arg *)arg)

	union {unsigned long name; RT_TASK *rt_task; SEM *sem; MBX *mbx; RWL *rwl; SPL *spl; int i; void *p; long long ll; } arg0;
	int srq;

	if (likely((srq = SRQ(lxsrq)) < MAX_LXRT_FUN)) {
		unsigned long type;
		struct rt_fun_entry *funcm;
/*
 * The next two lines of code do a lot. It makes possible to extend the use of
 * USP to any other real time module service in user space, both for soft and
 * hard real time. Concept contributed and copyrighted by: Giuseppe Renoldi 
 * ([email protected]).
 */
		if (unlikely(!(funcm = rt_fun_ext[INDX(lxsrq)]))) {
			rt_printk("BAD: null rt_fun_ext, no module for extension %d?\n", INDX(lxsrq));
			return -ENOSYS;
		}
		if (!(type = funcm[srq].type)) {
			return ((RTAI_SYSCALL_MODE long long (*)(unsigned long, ...))funcm[srq].fun)(RTAI_FUN_ARGS);
		}
		if (unlikely(NEED_TO_RW(type))) {
			lxrt_fun_call_wbuf(task, funcm[srq].fun, LXRT_NARG(lxsrq), arg, type);
		} else {
			lxrt_fun_call(task, funcm[srq].fun, LXRT_NARG(lxsrq), arg);
	        }
		return task->retval;
	}

	arg0.name = arg[0];
	switch (srq) {
		case LXRT_GET_ADR: {
			arg0.p = rt_get_adr(arg0.name);
			return arg0.ll;
		}

		case LXRT_GET_NAME: {
			arg0.name = rt_get_name(arg0.p);
			return arg0.ll;
		}

		case LXRT_TASK_INIT: {
			struct arg { unsigned long name; long prio, stack_size, max_msg_size, cpus_allowed; };
			arg0.rt_task = __task_init(arg0.name, larg->prio, larg->stack_size, larg->max_msg_size, larg->cpus_allowed);
			return arg0.ll;
		}

		case LXRT_TASK_DELETE: {
			arg0.i = __task_delete(arg0.rt_task ? arg0.rt_task : task);
			return arg0.ll;
		}

		case LXRT_SEM_INIT: {
			if (rt_get_adr(arg0.name)) {
				return 0;
			}
			if ((arg0.sem = rt_malloc(sizeof(SEM)))) {
				struct arg { unsigned long name; long cnt; long typ; };
				lxrt_typed_sem_init(arg0.sem, larg->cnt, larg->typ);
				if (rt_register(larg->name, arg0.sem, IS_SEM, current)) {
					return arg0.ll;
				} else {
					rt_free(arg0.sem);
				}
			}
			return 0;
		}

		case LXRT_SEM_DELETE: {
			if (lxrt_sem_delete(arg0.sem)) {
				arg0.i = -EFAULT;
				return arg0.ll;
			}
			rt_free(arg0.sem);
			arg0.i = rt_drg_on_adr(arg0.sem);
			return arg0.ll;
		}

		case LXRT_MBX_INIT: {
			if (rt_get_adr(arg0.name)) {
				return 0;
			}
			if ((arg0.mbx = rt_malloc(sizeof(MBX)))) {
				struct arg { unsigned long name; long size; int qtype; };
				if (lxrt_typed_mbx_init(arg0.mbx, larg->size, larg->qtype) < 0) {
					rt_free(arg0.mbx);
					return 0;
				}
				if (rt_register(larg->name, arg0.mbx, IS_MBX, current)) {
					return arg0.ll;
				} else {
					rt_free(arg0.mbx);
				}
			}
			return 0;
		}

		case LXRT_MBX_DELETE: {
			if (lxrt_mbx_delete(arg0.mbx)) {
				arg0.i = -EFAULT;
				return arg0.ll;
			}
			rt_free(arg0.mbx);
			arg0.i = rt_drg_on_adr(arg0.mbx);
			return arg0.ll;
		}

		case LXRT_RWL_INIT: {
			if (rt_get_adr(arg0.name)) {
				return 0;
			}
			if ((arg0.rwl = rt_malloc(sizeof(RWL)))) {
				struct arg { unsigned long name; long type; };
				lxrt_typed_rwl_init(arg0.rwl, larg->type);
				if (rt_register(larg->name, arg0.rwl, IS_SEM, current)) {
					return arg0.ll;
				} else {
					rt_free(arg0.rwl);
				}
			}
			return 0;
		}

		case LXRT_RWL_DELETE: {
			if (lxrt_rwl_delete(arg0.rwl)) {
				arg0.i = -EFAULT;
				return arg0.ll;
			}
			rt_free(arg0.rwl);
			arg0.i = rt_drg_on_adr(arg0.rwl);
			return arg0.ll;
		}

		case LXRT_SPL_INIT: {
			if (rt_get_adr(arg0.name)) {
				return 0;
			}
			if ((arg0.spl = rt_malloc(sizeof(SPL)))) {
				struct arg { unsigned long name; };
				lxrt_spl_init(arg0.spl);
				if (rt_register(larg->name, arg0.spl, IS_SEM, current)) {
					return arg0.ll;
				} else {
					rt_free(arg0.spl);
				}
			}
			return 0;
		}

		case LXRT_SPL_DELETE: {
			if (lxrt_spl_delete(arg0.spl)) {
				arg0.i = -EFAULT;
				return arg0.ll;
			}
			rt_free(arg0.spl);
			arg0.i = rt_drg_on_adr(arg0.spl);
			return arg0.ll;
		}

		case MAKE_HARD_RT: {
			rt_make_hard_real_time(task);
			return 0;
			if (!task || task->is_hard) {
				 return 0;
			}
			steal_from_linux(task);
			return 0;
		}

		case MAKE_SOFT_RT: {
			rt_make_soft_real_time(task);
			return 0;
			if (!task || !task->is_hard) {
				return 0;
			}
			if (task->is_hard < 0) {
				task->is_hard = 0;
			} else {
				give_back_to_linux(task, 0);
			}
			return 0;
		}
		case PRINT_TO_SCREEN: {
			struct arg { char *display; long nch; };
			arg0.i = rtai_print_to_screen("%s", larg->display);
			return arg0.ll;
		}

		case PRINTK: {
			struct arg { char *display; long nch; };
			arg0.i = rt_printk("%s", larg->display);
			return arg0.ll;
		}

		case NONROOT_HRT: {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
			current->cap_effective |= ((1 << CAP_IPC_LOCK)  |
						   (1 << CAP_SYS_RAWIO) |
						   (1 << CAP_SYS_NICE));
#else
			set_lxrt_perm(CAP_IPC_LOCK);
			set_lxrt_perm(CAP_SYS_RAWIO);
			set_lxrt_perm(CAP_SYS_NICE);
#endif
			return 0;
		}

		case RT_BUDDY: {
			arg0.rt_task = task && rtai_tskext(current, TSKEXT1) == current ? task : NULL;
			return arg0.ll;
		}

		case HRT_USE_FPU: {
			struct arg { RT_TASK *task; long use_fpu; };
			if(!larg->use_fpu) {
				clear_lnxtsk_uses_fpu((larg->task)->lnxtsk);
			} else {
				init_fpu((larg->task)->lnxtsk);
			}
			return 0;
		}

                case GET_USP_FLAGS: {
                        arg0.name = arg0.rt_task->usp_flags;
			return arg0.ll;
                }
                case SET_USP_FLAGS: {
                        struct arg { RT_TASK *task; unsigned long flags; };
                        arg0.rt_task->usp_flags = larg->flags;
                        arg0.rt_task->force_soft = (arg0.rt_task->is_hard > 0) && (larg->flags & arg0.rt_task->usp_flags_mask & FORCE_SOFT);
                        return 0;
                }

                case GET_USP_FLG_MSK: {
                        arg0.name = arg0.rt_task->usp_flags_mask;
			return arg0.ll;
                }

                case SET_USP_FLG_MSK: {
                        task->usp_flags_mask = arg0.name;
                        task->force_soft = (task->is_hard > 0) && (task->usp_flags & arg0.name & FORCE_SOFT);
                        return 0;
                }

                case HARD_SOFT_TOGGLER: {
			if (arg0.rt_task && arg0.rt_task->lnxtsk) {
				return (arg0.rt_task->lnxtsk)->pid;
			} 
#ifdef CONFIG_RTAI_HARD_SOFT_TOGGLER
			  else if (task) {
				rtai_cli();
				if (task->is_hard > 0) {
					rt_make_soft_real_time(task); 
				} else {
					rt_make_hard_real_time(task);
				}
				rtai_sti();
			}
#endif
                        return 0;
                }

		case IS_HARD: {
			arg0.i = arg0.rt_task || (arg0.rt_task = rtai_tskext_t(current, TSKEXT0)) ? arg0.rt_task->is_hard : 0;
			return arg0.ll;
		}
		case GET_EXECTIME: {
			struct arg { RT_TASK *task; RTIME *exectime; };
			rt_get_exectime(larg->task, larg->exectime);
                        return 0;
		}
		case NEXT_PERIOD: {
			return rt_task_next_period();
		}
		case GET_TIMEORIG: {
			struct arg { RTIME *time_orig; };
			if (larg->time_orig) {
				RTIME time_orig[2];
				rt_gettimeorig(time_orig);
				rt_copy_to_user(larg->time_orig, time_orig, sizeof(time_orig));
			} else {
				rt_gettimeorig(NULL);
			}
                        return 0;
		}

		case LINUX_SERVER: {
			struct arg { struct linux_syscalls_list syscalls; };
			if (larg->syscalls.nr) {
				if (larg->syscalls.task->linux_syscall_server) {
					RT_TASK *serv;
					rt_get_user(serv, &larg->syscalls.serv);
					rt_task_masked_unblock(serv, ~RT_SCHED_READY);
				}
				larg->syscalls.task->linux_syscall_server = larg->syscalls.serv;
				rtai_set_linux_task_priority(current, (larg->syscalls.task)->lnxtsk->policy, (larg->syscalls.task)->lnxtsk->rt_priority);
				arg0.rt_task = __task_init((unsigned long)larg->syscalls.task, larg->syscalls.task->base_priority >= BASE_SOFT_PRIORITY ? larg->syscalls.task->base_priority - BASE_SOFT_PRIORITY : larg->syscalls.task->base_priority, 0, 0, 1 << larg->syscalls.task->runnable_on_cpus);

				larg->syscalls.task->linux_syscall_server = arg0.rt_task;
				arg0.rt_task->linux_syscall_server = larg->syscalls.serv;

				return arg0.ll;
			} else {
				if (!larg->syscalls.task) {
					larg->syscalls.task = RT_CURRENT;
				}
				if ((arg0.rt_task = larg->syscalls.task->linux_syscall_server)) {
					larg->syscalls.task->linux_syscall_server = NULL;
					arg0.rt_task->suspdepth = -RTE_HIGERR;
					rt_task_masked_unblock(arg0.rt_task, ~RT_SCHED_READY);
				}
			}
			return 0;
		}

		case KERNEL_CALIBRATOR: {
			struct arg { long period, loops, Latency; };
#if CONFIG_RTAI_SCHED_LATENCY <= 1 || (RTAI_KERN_BUSY_ALIGN_RET_DELAY >= 0) || (RTAI_USER_BUSY_ALIGN_RET_DELAY >= 0) 
			extern int rt_smp_half_tick[];
			int cpu;
			rtai_tunables.sched_latency = rtai_imuldiv(abs((int)larg->Latency), rtai_tunables.clock_freq, 1000000000);
			if (rtai_tunables.sched_latency < rtai_tunables.setup_time_TIMER_CPUNIT) {
				rtai_tunables.sched_latency = rtai_tunables.setup_time_TIMER_CPUNIT;
			}
			for (cpu = 0; cpu < RTAI_NR_CPUS; cpu++) {
				rt_smp_half_tick[cpu] = rtai_tunables.sched_latency/2;
			}
#endif
			return larg->Latency < 0 ? 0 : kernel_calibrator_spv(larg->period, larg->loops, task);
		}

		case GET_CPU_FREQ: {
			extern struct calibration_data rtai_tunables;
			return rtai_tunables.clock_freq;
		}

	        default: {
		    rt_printk("RTAI/LXRT: Unknown srq #%d\n", srq);
		    arg0.i = -ENOSYS;
		    return arg0.ll;
		}
	}
	return 0;
}