static void rtswitch_ktask(void *cookie) { struct taskarg *arg = (struct taskarg *) cookie; rtswitch_context_t *ctx = arg->ctx; rtswitch_task_t *task = arg->task; unsigned to, i = 0; to = task->base.index; rtswitch_pend_rt(ctx, task->base.index); for(;;) { if (task->base.flags & RTTST_SWTEST_USE_FPU) fp_regs_set(task->base.index + i * 1000); switch(i % 3) { case 0: /* to == from means "return to last task" */ rtswitch_to_rt(ctx, task->base.index, task->base.index); break; case 1: if (++to == task->base.index) ++to; if (to > ctx->tasks_count - 1) to = 0; if (to == task->base.index) ++to; /* Fall through. */ case 2: rtswitch_to_rt(ctx, task->base.index, to); } if (task->base.flags & RTTST_SWTEST_USE_FPU) { unsigned fp_val, expected; expected = task->base.index + i * 1000; fp_val = fp_regs_check(expected); if (fp_val != expected) { if (task->base.flags & RTTST_SWTEST_FREEZE) xntrace_user_freeze(0, 0); handle_ktask_error(ctx, fp_val); } } if (++i == 4000000) i = 0; } }
/* * timer thread * * Modes: * - clock_nanosleep based * - cyclic timer based * * Clock: * - CLOCK_MONOTONIC * - CLOCK_REALTIME * - CLOCK_MONOTONIC_HR * - CLOCK_REALTIME_HR * */ static void *timerthread(void *param) { struct thread_param *par = param; struct sched_param schedp; sigset_t sigset; struct timespec now, next, interval; struct thread_stat *stat = par->stats; int policy = par->prio ? SCHED_FIFO : SCHED_OTHER; int err; #ifdef __UNSUPPORTED struct itimerval itimer; struct sigevent sigev; timer_t timer; struct itimerspec tspec; #endif #if (INGO_TRACE + TGLX_TRACE) int stopped = 0; #endif interval.tv_sec = par->interval / USEC_PER_SEC; interval.tv_nsec = (par->interval % USEC_PER_SEC) * 1000; #if INGO_TRACE system("echo 1 > /proc/sys/kernel/trace_all_cpus"); system("echo 1 > /proc/sys/kernel/trace_enabled"); system("echo 1 > /proc/sys/kernel/trace_freerunning"); system("echo 0 > /proc/sys/kernel/trace_print_at_crash"); system("echo 1 > /proc/sys/kernel/trace_user_triggered"); system("echo 0 > /proc/sys/kernel/trace_user_trigger_irq"); system("echo 0 > /proc/sys/kernel/trace_verbose"); system("echo 0 > /proc/sys/kernel/preempt_thresh"); system("echo 0 > /proc/sys/kernel/wakeup_timing"); system("echo 0 > /proc/sys/kernel/preempt_max_latency"); #endif stat->tid = gettid(); sigemptyset(&sigset); sigaddset(&sigset, par->signal); sigprocmask(SIG_BLOCK, &sigset, NULL); #ifdef __UNSUPPORTED if (par->mode == MODE_CYCLIC) { sigev.sigev_notify = SIGEV_THREAD_ID | SIGEV_SIGNAL; sigev.sigev_signo = par->signal; sigev.sigev_notify_thread_id = stat->tid; timer_create(par->clock, &sigev, &timer); tspec.it_interval = interval; } #endif memset(&schedp, 0, sizeof(schedp)); schedp.sched_priority = par->prio; err = pthread_setschedparam(pthread_self(), policy, &schedp); #ifdef __XENO__ if (err) { fprintf(stderr, "pthread_setschedparam: %s\n" "(modprobe xeno_posix?)\n", strerror(err)); test_shutdown = 1; return (void *) 1; } #endif /* Get current time */ next = start; next.tv_sec++; #ifdef __UNSUPPORTED if (par->mode == MODE_CYCLIC) { if (par->timermode == TIMER_ABSTIME) tspec.it_value = next; else { tspec.it_value.tv_nsec = 0; tspec.it_value.tv_sec = 1; } timer_settime(timer, par->timermode, &tspec, NULL); } if (par->mode == MODE_SYS_ITIMER) { itimer.it_value.tv_sec = 1; itimer.it_value.tv_usec = 0; itimer.it_interval.tv_sec = interval.tv_sec; itimer.it_interval.tv_usec = interval.tv_nsec / 1000; setitimer (ITIMER_REAL, &itimer, NULL); } #endif stat->threadstarted++; #if (INGO_TRACE + TGLX_TRACE) gettimeofday(0,(struct timezone *)1); #endif while (!test_shutdown) { long diff; #ifdef __UNSUPPORTED int sigs; #endif /* Wait for next period */ switch (par->mode) { #ifdef __UNSUPPORTED case MODE_CYCLIC: case MODE_SYS_ITIMER: if (sigwait(&sigset, &sigs) < 0) goto out; break; #endif case MODE_CLOCK_NANOSLEEP: if (par->timermode == TIMER_ABSTIME) clock_nanosleep(par->clock, TIMER_ABSTIME, &next, NULL); else { clock_gettime(par->clock, &now); clock_nanosleep(par->clock, TIMER_RELTIME, &interval, NULL); next.tv_sec = now.tv_sec + interval.tv_sec; next.tv_nsec = now.tv_nsec + interval.tv_nsec; tsnorm(&next); } break; #ifdef __UNSUPPORTED case MODE_SYS_NANOSLEEP: clock_gettime(par->clock, &now); nanosleep(&interval, NULL); next.tv_sec = now.tv_sec + interval.tv_sec; next.tv_nsec = now.tv_nsec + interval.tv_nsec; tsnorm(&next); break; #endif } clock_gettime(par->clock, &now); diff = calcdiff(now, next); if (diff < stat->min) stat->min = diff; if (diff > stat->max) { stat->max = diff; #if IPIPE_TRACE if (stat->traced) xntrace_user_freeze(diff, 0); #endif } stat->avg += (double) diff; #if (INGO_TRACE + TGLX_TRACE) if (!stopped && (diff > tracelimit)) { stopped++; gettimeofday(0,0); test_shutdown++; } #endif stat->act = diff; stat->cycles++; if (par->bufmsk) stat->values[stat->cycles & par->bufmsk] = diff; next.tv_sec += interval.tv_sec; next.tv_nsec += interval.tv_nsec; tsnorm(&next); if (par->max_cycles && par->max_cycles == stat->cycles) break; } #ifdef __UNSUPPORTED out: if (par->mode == MODE_CYCLIC) timer_delete(timer); if (par->mode == MODE_SYS_ITIMER) { itimer.it_value.tv_sec = 0; itimer.it_value.tv_usec = 0; itimer.it_interval.tv_sec = 0; itimer.it_interval.tv_usec = 0; setitimer (ITIMER_REAL, &itimer, NULL); } #endif /* switch to normal */ schedp.sched_priority = 0; pthread_setschedparam(pthread_self(), SCHED_OTHER, &schedp); stat->threadstarted = -1; return NULL; }
void latency(void *cookie) { int err, count, nsamples, warmup = 1; RTIME expected_tsc, period_tsc, start_ticks, fault_threshold; RT_TIMER_INFO timer_info; unsigned old_relaxed = 0; err = rt_timer_inquire(&timer_info); if (err) { fprintf(stderr, "latency: rt_timer_inquire, code %d\n", err); return; } fault_threshold = rt_timer_ns2tsc(CONFIG_XENO_DEFAULT_PERIOD); nsamples = ONE_BILLION / period_ns / 1000; period_tsc = rt_timer_ns2tsc(period_ns); /* start time: one millisecond from now. */ start_ticks = timer_info.date + rt_timer_ns2ticks(1000000); expected_tsc = timer_info.tsc + rt_timer_ns2tsc(1000000); err = rt_task_set_periodic(NULL, start_ticks, rt_timer_ns2ticks(period_ns)); if (err) { fprintf(stderr, "latency: failed to set periodic, code %d\n", err); return; } for (;;) { long minj = TEN_MILLION, maxj = -TEN_MILLION, dt; long overrun = 0; long long sumj; test_loops++; for (count = sumj = 0; count < nsamples; count++) { unsigned new_relaxed; unsigned long ov; expected_tsc += period_tsc; err = rt_task_wait_period(&ov); dt = (long)(rt_timer_tsc() - expected_tsc); new_relaxed = sampling_relaxed; if (dt > maxj) { if (new_relaxed != old_relaxed && dt > fault_threshold) max_relaxed += new_relaxed - old_relaxed; maxj = dt; } old_relaxed = new_relaxed; if (dt < minj) minj = dt; sumj += dt; if (err) { if (err != -ETIMEDOUT) { fprintf(stderr, "latency: wait period failed, code %d\n", err); exit(EXIT_FAILURE); /* Timer stopped. */ } overrun += ov; expected_tsc += period_tsc * ov; } if (freeze_max && (dt > gmaxjitter) && !(finished || warmup)) { xntrace_user_freeze(rt_timer_tsc2ns(dt), 0); gmaxjitter = dt; } if (!(finished || warmup) && need_histo()) add_histogram(histogram_avg, dt); } if (!warmup) { if (!finished && need_histo()) { add_histogram(histogram_max, maxj); add_histogram(histogram_min, minj); } minjitter = minj; if (minj < gminjitter) gminjitter = minj; maxjitter = maxj; if (maxj > gmaxjitter) gmaxjitter = maxj; avgjitter = sumj / nsamples; gavgjitter += avgjitter; goverrun += overrun; rt_sem_v(&display_sem); } if (warmup && test_loops == WARMUP_TIME) { test_loops = 0; warmup = 0; } } }
void faulthand(int sig) { xntrace_user_freeze(0, 1); signal(sig, SIG_DFL); kill(getpid(), sig); }