Ejemplo n.º 1
0
void _rtapi_task_wrapper(void * task_id_hack) {
    int ret;
    int task_id = (int)(long) task_id_hack; // ugly, but I ain't gonna fix it
    task_data *task = &task_array[task_id];

    /* use the argument to point to the task data */
    if (task->period < period) task->period = period;
    task->ratio = task->period / period;
    rtapi_print_msg(RTAPI_MSG_DBG,
		    "rtapi_task_wrapper: task %p '%s' period=%d "
		    "prio=%d ratio=%d\n",
		    task, task->name, task->ratio * period,
		    task->prio, task->ratio);

    ostask_self[task_id]  = rt_task_self();

    // starting the thread with the TF_NOWAIT flag implies it is not periodic
    // https://github.com/machinekit/machinekit/issues/237#issuecomment-126590880
    // NB this assumes rtapi_wait() is NOT called on this thread any more
    // see thread_task() where this is handled for now

    if (!(task->flags & TF_NOWAIT)) {
	if ((ret = rt_task_set_periodic(NULL, TM_NOW, task->ratio * period)) < 0) {
	    rtapi_print_msg(RTAPI_MSG_ERR,
			    "ERROR: rt_task_set_periodic(%d,%s) failed %d %s\n",
			    task_id, task->name, ret, strerror(-ret));
	    // really nothing one can realistically do here,
	    // so just enable forensics
	    abort();
	}
    }
#ifdef USE_SIGXCPU
    // required to enable delivery of the SIGXCPU signal
    rt_task_set_mode(0, T_WARNSW, NULL);
#endif

    _rtapi_task_update_stats_hook(); // initial recording

 #ifdef TRIGGER_SIGXCPU_ONCE
    // enable this for testing only:
    // this should cause a domain switch due to the write()
    // system call and hence a single SIGXCPU posted,
    // causing an XU_SIGXCPU exception
    // verifies rtapi_task_update_status_hook() works properly
    // and thread_status counters are updated
    printf("--- once in task_wrapper\n");
#endif

    /* call the task function with the task argument */
    (task->taskcode) (task->arg);
    
    /* if the task ever returns, we record that fact */
    task->state = ENDED;
    rtapi_print_msg(RTAPI_MSG_ERR,
		    "ERROR: reached end of wrapper for task %d '%s'\n", 
		    task_id, task->name);
}
Ejemplo n.º 2
0
int _rtapi_task_update_stats(void)
{
#ifdef HAVE_RTAPI_TASK_UPDATE_STATS_HOOK
    extern int _rtapi_task_update_stats_hook(void);

    return _rtapi_task_update_stats_hook();
#else
    return -ENOSYS;  // not implemented in this flavor
#endif
}
Ejemplo n.º 3
0
// trampoline to current handler
static void signal_handler(int sig, siginfo_t *si, void *uctx)
{
    int task_id = _rtapi_task_update_stats_hook();
    if (task_id > -1) {
	rtapi_threadstatus_t *ts = &global_data->thread_status[task_id];

	rtapi_exception_detail_t detail = {0};
	detail.task_id = task_id;

	if (rt_exception_handler)
	    rt_exception_handler(XU_SIGXCPU, &detail, ts);
    } else {
	rtapi_print_msg(RTAPI_MSG_ERR, "BUG: SIGXCPU - cant identify task\n");
	if (rt_exception_handler)
	    rt_exception_handler(XU_SIGXCPU_BUG, NULL, NULL);
    }
}
Ejemplo n.º 4
0
int _rtapi_wait_hook(void) {
    struct timespec ts;
    task_data *task = rtapi_this_task();

    if (extra_task_data[task_id(task)].deleted)
	pthread_exit(0);

    clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME,
		    &extra_task_data[task_id(task)].next_time, NULL);
    _rtapi_advance_time(&extra_task_data[task_id(task)].next_time,
		       task->period, 0);
    clock_gettime(CLOCK_MONOTONIC, &ts);
    if (ts.tv_sec > extra_task_data[task_id(task)].next_time.tv_sec
	|| (ts.tv_sec == extra_task_data[task_id(task)].next_time.tv_sec
	    && ts.tv_nsec > extra_task_data[task_id(task)].next_time.tv_nsec)) {

	// timing went wrong:

	// update stats counters in thread status
	_rtapi_task_update_stats_hook();

	rtapi_threadstatus_t *ts =
	    &global_data->thread_status[task_id(task)];

	ts->flavor.rtpreempt.wait_errors++;

	rtapi_exception_detail_t detail = {0};
	detail.task_id = task_id(task);

#ifndef RTAPI_POSIX
	if (rt_exception_handler)
	    rt_exception_handler(RTP_DEADLINE_MISSED, &detail, ts);
#endif
    }
    return 0;
}
Ejemplo n.º 5
0
void _rtapi_wait_hook(void) {
    unsigned long overruns = 0;
    int result =  rt_task_wait_period(&overruns);

    if (result) {
	// something went wrong:

	// update stats counters in thread status
	_rtapi_task_update_stats_hook();

	// inquire, fill in
	// exception descriptor, and call exception handler

	int task_id = _rtapi_task_self();

	// paranoid, but you never know; this index off and
	// things will go haywire really fast
	if ((task_id < 0) || (task_id > RTAPI_MAX_TASKS)) {
	    rtapi_print_msg(RTAPI_MSG_ERR,
			    "_rtapi_wait_hook: BUG - task_id out of range: %d\n",
			    task_id);
	    // maybe should call a BUG exception here
	    return;
	}

	// task_data *task = &(task_array[task_id]);
	rtapi_exception_detail_t detail = {0};
	rtapi_threadstatus_t *ts = &global_data->thread_status[task_id];
	rtapi_exception_t type;

	// exception descriptor
	detail.task_id = task_id;
	detail.error_code = result;

	switch (result) {

	case -ETIMEDOUT:
	    // release point was missed
	    detail.flavor.xeno.overruns = overruns;

	    // update thread status in global_data
	    ts->flavor.xeno.wait_errors++;
	    ts->flavor.xeno.total_overruns += overruns;
	    type = XK_ETIMEDOUT;
	    break;

	case -EWOULDBLOCK:
	    // returned if rt_task_set_periodic() has not previously
	    // been called for the calling task. This is clearly
	    // a Xenomai API usage error.
	    ts->api_errors++;
	    type = XK_EWOULDBLOCK;
	    break;

	case -EINTR:
	    // returned if rt_task_unblock() has been called for
	    // the waiting task before the next periodic release
	    // point has been reached. In this case, the overrun
	    // counter is reset too.
	    // a Xenomai API usage error.
	    ts->api_errors++;
	    type = XK_EINTR;
	    break;

	case -EPERM:
	    // returned if this service was called from a
	    // context which cannot sleep (e.g. interrupt,
	    // non-realtime or scheduler locked).
	    // a Xenomai API usage error.
	    ts->api_errors++;
	    type = XK_EPERM;
	    break;

	default:
	    // the above should handle all possible returns
	    // as per manual, so at least leave a scent
	    // (or what Jeff calls a 'canary value')
	    ts->other_errors++;
	    type = XK_UNDOCUMENTED;
	}
	if (rt_exception_handler)
		rt_exception_handler(type, &detail, ts);
    }  // else: ok - no overruns;
}
Ejemplo n.º 6
0
static void *realtime_thread(void *arg) {
    task_data *task = arg;

    rtapi_set_task(task);


    if (task->period < period)
	task->period = period;
    task->ratio = task->period / period;

    extra_task_data[task_id(task)].tid = (pid_t) syscall(SYS_gettid);

    rtapi_print_msg(RTAPI_MSG_INFO,
		    "RTAPI: task '%s' at %p period = %d ratio=%d id=%d TID=%d\n",
		    task->name,
		    task, task->period, task->ratio,
		    task_id(task), extra_task_data[task_id(task)].tid);

    if (realtime_set_affinity(task))
	goto error;
    if (realtime_set_priority(task)) {
#ifdef RTAPI_POSIX // This requires privs - tell user how to obtain them
	rtapi_print_msg(RTAPI_MSG_ERR, 
			"to get non-preemptive scheduling with POSIX threads,");
	rtapi_print_msg(RTAPI_MSG_ERR, 
			"you need to run 'sudo setcap cap_sys_nice=pe libexec/rtapi_app_posix'");
	rtapi_print_msg(RTAPI_MSG_ERR, 
			"your might have to install setcap (e.g.'sudo apt-get install libcap2-bin') to do this.");
#else
	goto error;
#endif
    }

    /* We're done initializing. Open the barrier. */
    pthread_barrier_wait(&extra_task_data[task_id(task)].thread_init_barrier);

    clock_gettime(CLOCK_MONOTONIC, &extra_task_data[task_id(task)].next_time);
    _rtapi_advance_time(&extra_task_data[task_id(task)].next_time,
		       task->period, 0);

    _rtapi_task_update_stats_hook(); // inital stats update

    /* The task should not pagefault at all. So record initial counts now.
     * Note that currently we _do_ receive a few pagefaults in the
     * taskcode init. This is noncritical and probably not worth
     * fixing. */
    {
	struct rusage ru;

	if (getrusage(RUSAGE_THREAD, &ru)) {
	    rtapi_print_msg(RTAPI_MSG_ERR,"getrusage(): %d - %s\n",
			    errno, strerror(-errno));
	} else {
	    rtapi_threadstatus_t *ts =
		&global_data->thread_status[task_id(task)];
	    ts->flavor.rtpreempt.startup_ru_nivcsw = ru.ru_nivcsw;
	    ts->flavor.rtpreempt.startup_ru_minflt = ru.ru_minflt;
	    ts->flavor.rtpreempt.startup_ru_majflt = ru.ru_majflt;
	}
    }

    /* call the task function with the task argument */
    task->taskcode(task->arg);

    rtapi_print_msg
	(RTAPI_MSG_ERR,
	 "ERROR: reached end of realtime thread for task %d\n",
	 task_id(task));
    extra_task_data[task_id(task)].deleted = 1;

    return NULL;
 error:
    /* Signal that we're dead and open the barrier. */
    extra_task_data[task_id(task)].deleted = 1;
    pthread_barrier_wait(&extra_task_data[task_id(task)].thread_init_barrier);
    return NULL;
}