int __thr_umtx_timedlock(volatile umtx_t *mtx, const struct timespec *timeout) { struct timespec ts, ts2, ts3; int timo, ret; if ((timeout->tv_sec < 0) || (timeout->tv_sec == 0 && timeout->tv_nsec <= 0)) return (ETIMEDOUT); /* XXX there should have MONO timer! */ clock_gettime(CLOCK_REALTIME, &ts); TIMESPEC_ADD(&ts, &ts, timeout); ts2 = *timeout; for (;;) { if (ts2.tv_nsec) { timo = (int)(ts2.tv_nsec / 1000); if (timo == 0) timo = 1; } else { timo = 1000000; } ret = __thr_umtx_lock(mtx, timo); if (ret != ETIMEDOUT) break; clock_gettime(CLOCK_REALTIME, &ts3); TIMESPEC_SUB(&ts2, &ts, &ts3); if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) { ret = ETIMEDOUT; break; } } return (ret); }
/* * Regular umtx wait that cannot return EINTR */ int _thr_umtx_wait(volatile umtx_t *mtx, int exp, const struct timespec *timeout, int clockid) { struct timespec ts, ts2, ts3; int timo, errval, ret = 0; cpu_ccfence(); if (*mtx != exp) return (0); if (timeout == NULL) { /* * NOTE: If no timeout, EINTR cannot be returned. Ignore * EINTR. */ while ((errval = _umtx_sleep_err(mtx, exp, 10000000)) > 0) { if (errval == EBUSY) break; #if 0 if (errval == ETIMEDOUT || errval == EWOULDBLOCK) { if (*mtx != exp) { fprintf(stderr, "thr_umtx_wait: FAULT VALUE CHANGE " "%d -> %d oncond %p\n", exp, *mtx, mtx); } } #endif if (*mtx != exp) return(0); } return (ret); } /* * Timed waits can return EINTR */ if ((timeout->tv_sec < 0) || (timeout->tv_sec == 0 && timeout->tv_nsec <= 0)) return (ETIMEDOUT); clock_gettime(clockid, &ts); TIMESPEC_ADD(&ts, &ts, timeout); ts2 = *timeout; for (;;) { if (ts2.tv_nsec) { timo = (int)(ts2.tv_nsec / 1000); if (timo == 0) timo = 1; } else { timo = 1000000; } if ((errval = _umtx_sleep_err(mtx, exp, timo)) > 0) { if (errval == EBUSY) { ret = 0; break; } if (errval == EINTR) { ret = EINTR; break; } } clock_gettime(clockid, &ts3); TIMESPEC_SUB(&ts2, &ts, &ts3); if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) { ret = ETIMEDOUT; break; } } return (ret); }
static int join_common(pthread_t pthread, void **thread_return, const struct timespec *abstime) { struct pthread *curthread = tls_get_curthread(); struct timespec ts, ts2, *tsp; void *tmp; long state; int oldcancel; int ret = 0; if (pthread == NULL) return (EINVAL); if (pthread == curthread) return (EDEADLK); THREAD_LIST_LOCK(curthread); if ((ret = _thr_find_thread(curthread, pthread, 1)) != 0) { ret = ESRCH; } else if ((pthread->tlflags & TLFLAGS_DETACHED) != 0) { ret = ESRCH; } else if (pthread->joiner != NULL) { /* Multiple joiners are not supported. */ ret = ENOTSUP; } if (ret) { THREAD_LIST_UNLOCK(curthread); return (ret); } /* Set the running thread to be the joiner: */ pthread->joiner = curthread; THREAD_LIST_UNLOCK(curthread); THR_CLEANUP_PUSH(curthread, backout_join, pthread); oldcancel = _thr_cancel_enter(curthread); while ((state = pthread->state) != PS_DEAD) { if (abstime != NULL) { clock_gettime(CLOCK_REALTIME, &ts); TIMESPEC_SUB(&ts2, abstime, &ts); if (ts2.tv_sec < 0) { ret = ETIMEDOUT; break; } tsp = &ts2; } else tsp = NULL; ret = _thr_umtx_wait(&pthread->state, state, tsp, CLOCK_REALTIME); if (ret == ETIMEDOUT) break; } _thr_cancel_leave(curthread, oldcancel); THR_CLEANUP_POP(curthread, 0); if (ret == ETIMEDOUT) { THREAD_LIST_LOCK(curthread); pthread->joiner = NULL; THREAD_LIST_UNLOCK(curthread); } else { ret = 0; tmp = pthread->ret; THREAD_LIST_LOCK(curthread); pthread->tlflags |= TLFLAGS_DETACHED; pthread->joiner = NULL; THR_GCLIST_ADD(pthread); THREAD_LIST_UNLOCK(curthread); if (thread_return != NULL) *thread_return = tmp; } return (ret); }
/* * Cancellation behavior: * if the thread is canceled, joinee is not recycled. */ static int join_common(pthread_t pthread, void **thread_return, const struct timespec *abstime) { struct pthread *curthread = _get_curthread(); struct timespec ts, ts2, *tsp; void *tmp; long tid; int ret = 0; if (pthread == NULL) return (EINVAL); if (pthread == curthread) return (EDEADLK); if ((ret = _thr_find_thread(curthread, pthread, 1)) != 0) return (ESRCH); if ((pthread->flags & THR_FLAGS_DETACHED) != 0) { ret = EINVAL; } else if (pthread->joiner != NULL) { /* Multiple joiners are not supported. */ ret = ENOTSUP; } if (ret) { THR_THREAD_UNLOCK(curthread, pthread); return (ret); } /* Set the running thread to be the joiner: */ pthread->joiner = curthread; THR_THREAD_UNLOCK(curthread, pthread); THR_CLEANUP_PUSH(curthread, backout_join, pthread); _thr_cancel_enter(curthread); tid = pthread->tid; while (pthread->tid != TID_TERMINATED) { _thr_testcancel(curthread); if (abstime != NULL) { clock_gettime(CLOCK_REALTIME, &ts); TIMESPEC_SUB(&ts2, abstime, &ts); if (ts2.tv_sec < 0) { ret = ETIMEDOUT; break; } tsp = &ts2; } else tsp = NULL; ret = _thr_umtx_wait(&pthread->tid, tid, tsp); if (ret == ETIMEDOUT) break; } _thr_cancel_leave(curthread, 0); THR_CLEANUP_POP(curthread, 0); if (ret == ETIMEDOUT) { THR_THREAD_LOCK(curthread, pthread); pthread->joiner = NULL; THR_THREAD_UNLOCK(curthread, pthread); } else { ret = 0; tmp = pthread->ret; THR_THREAD_LOCK(curthread, pthread); pthread->flags |= THR_FLAGS_DETACHED; pthread->joiner = NULL; _thr_try_gc(curthread, pthread); /* thread lock released */ if (thread_return != NULL) *thread_return = tmp; } return (ret); }
void _main(int argc, char *argv[]) { (void)argc; (void)argv; syslog(LOG_INFO, "initializing core"); /* init SCL subsystem: */ syslog(LOG_INFO, "initializing signaling and communication link (SCL)"); if (scl_init("core") != 0) { syslog(LOG_CRIT, "could not init scl module"); exit(EXIT_FAILURE); } /* init params subsystem: */ syslog(LOG_INFO, "initializing opcd interface"); opcd_params_init("core.", 1); /* initialize logger: */ syslog(LOG_INFO, "opening logger"); if (logger_open() != 0) { syslog(LOG_CRIT, "could not open logger"); exit(EXIT_FAILURE); } syslog(LOG_CRIT, "logger opened"); sleep(1); /* give scl some time to establish a link between publisher and subscriber */ LOG(LL_INFO, "+------------------+"); LOG(LL_INFO, "| core startup |"); LOG(LL_INFO, "+------------------+"); LOG(LL_INFO, "initializing system"); /* set-up real-time scheduling: */ struct sched_param sp; sp.sched_priority = sched_get_priority_max(SCHED_FIFO); sched_setscheduler(getpid(), SCHED_FIFO, &sp); if (mlockall(MCL_CURRENT | MCL_FUTURE)) { LOG(LL_ERROR, "mlockall() failed"); exit(EXIT_FAILURE); } /* initialize hardware/drivers: */ omap_i2c_bus_init(); baro_altimeter_init(); ultra_altimeter_init(); ahrs_init(); motors_init(); voltage_reader_start(); //gps_init(); LOG(LL_INFO, "initializing model/controller"); model_init(); ctrl_init(); /* initialize command interface */ LOG(LL_INFO, "initializing cmd interface"); cmd_init(); /* prepare main loop: */ for (int i = 0; i < NUM_AVG; i++) { output_avg[i] = sliding_avg_create(OUTPUT_RATIO, 0.0f); } LOG(LL_INFO, "system up and running"); struct timespec ts_curr; struct timespec ts_prev; struct timespec ts_diff; clock_gettime(CLOCK_REALTIME, &ts_curr); /* run model and controller: */ while (1) { /* calculate dt: */ ts_prev = ts_curr; clock_gettime(CLOCK_REALTIME, &ts_curr); TIMESPEC_SUB(ts_diff, ts_curr, ts_prev); float dt = (float)ts_diff.tv_sec + (float)ts_diff.tv_nsec / (float)NSEC_PER_SEC; /* read sensor values into model input structure: */ model_input_t model_input; model_input.dt = dt; ahrs_read(&model_input.ahrs_data); gps_read(&model_input.gps_data); model_input.ultra_z = ultra_altimeter_read(); model_input.baro_z = baro_altimeter_read(); /* execute model step: */ model_state_t model_state; model_step(&model_state, &model_input); /* execute controller step: */ mixer_in_t mixer_in; ctrl_step(&mixer_in, dt, &model_state); /* set up mixer input: */ mixer_in.pitch = sliding_avg_calc(output_avg[AVG_PITCH], mixer_in.pitch); mixer_in.roll = sliding_avg_calc(output_avg[AVG_ROLL], mixer_in.roll); mixer_in.yaw = sliding_avg_calc(output_avg[AVG_YAW], mixer_in.yaw); mixer_in.gas = sliding_avg_calc(output_avg[AVG_GAS], mixer_in.gas); /* write data to motor mixer: */ EVERY_N_TIMES(OUTPUT_RATIO, motors_write(&mixer_in)); } }