/** * Merge dynamic dtm */ void atlaas::merge() { bool is_vertical; size_t index = 0; float time_ref = get_reference_time(); auto it = internal.begin(); for (auto& dyninfo : dyninter) { if ( dyninfo[N_POINTS] > 0 && ( (*it)[N_POINTS] < 1 || (*it)[DIST_SQ] - dyninfo[DIST_SQ] > -4 ) ) { /* compute the real variance (according to Knuth's bible) */ if (dyninfo[N_POINTS] > 2) dyninfo[VARIANCE] /= dyninfo[N_POINTS] - 1; is_vertical = dyninfo[VARIANCE] > variance_threshold; if ( (*it)[N_POINTS] < 1 || ( dyninfo[N_POINTS] > 2 && (*it)[DIST_SQ] - dyninfo[DIST_SQ] > 4 ) ) { // init *it = dyninfo; } else if (use_swap) { if ( is_vertical == ( (*it)[VARIANCE] > variance_threshold) ) { // same state // if the cells are flat and differ more than 10cm, swap if (!is_vertical && (( (*it)[Z_MEAN] - dyninfo[Z_MEAN] ) > 0.1 )) { gndinter[index] = *it; *it = dyninfo; // TODO (*it)[DYNAMIC] += 1.0; } else { merge(*it, dyninfo); } } else if ( is_vertical ) { // was flat, backup the cell in ground swap gndinter[index] = *it; *it = dyninfo; // TODO (*it)[DYNAMIC] += 1.0; } else { // was vertical, revert ground and merge *it = gndinter[index]; merge(*it, dyninfo); // TODO (*it)[DYNAMIC] += 1.0; // TODO gndinter[index] = zeros; ??? } } else { merge(*it, dyninfo); } (*it)[TIME] = time_ref; } it++; index++; } }
void *Task::run(void *arg) { Task *p = reinterpret_cast<Task *>(arg); struct sched_param sched_attr; int policy; pthread_t tid = pthread_self(); Scheduler &sched = Scheduler::get_instance(); m_thread_syncpoint.lock(); m_thread_syncpoint.condition_satisfied(); /* Set the scheduler parameters. Linux at least fails when the * parameters are set before creating the thread. The parameters must * be set BY the affected thread. Stupid little penguins!!! */ if(pthread_getschedparam(tid, &policy, &sched_attr) != 0) { m_thread_syncpoint.release(); m_thread_syncpoint.lock(); EPRINTF("Failed getting task schedule parameters\n"); return NULL; } policy = SCHED_FIFO; sched_attr.sched_priority = p->m_props.prio; if(pthread_setschedparam(tid, policy, &sched_attr) != 0) { m_thread_syncpoint.release(); m_thread_syncpoint.unlock(); EPRINTF("Failed setting task schedule parameters\n"); EPRINTF("Did you remember sudo?\n"); return NULL; } /* Make sure the thread is being scheduled at the right priority. Linux * requires that the application be run as root or permissions have * been set through PAM to allow use of real time thread priority * scheduling. Otherwise, the checks below will fail and we'll exit the * thread. */ if(pthread_getschedparam(tid, &policy, &sched_attr) != 0) { m_thread_syncpoint.release(); m_thread_syncpoint.unlock(); EPRINTF("Failed getting task schedule parameters\n"); return NULL; } if(SCHED_FIFO != policy) { m_thread_syncpoint.release(); m_thread_syncpoint.unlock(); EPRINTF("Failed to set real time scheduling policy\n"); return NULL; } if(sched_attr.sched_priority != static_cast<int>(p->m_props.prio)) { m_thread_syncpoint.release(); m_thread_syncpoint.unlock(); EPRINTF("Failed to set real time priority for task %s\n", p->m_name); return NULL; } DPRINTF("Successfully registered task %s at priority %d\n", p->m_name, sched_attr.sched_priority); units::Nanoseconds ref_time(0); p->m_operational = true; m_thread_syncpoint.release(); m_thread_syncpoint.unlock(); while(true) { units::Nanoseconds start_time(0); get_time(start_time); /* If this is a periodic tasks, wait to be scheduled. */ if(p->m_impl->rategroup_sync) { if(p->m_impl->first_pass) { /* We grab the lock and hold it while we're executing so no * other task jumps in. One task at a time per * rategroup! The lock gets released once we get into the * wait call. */ p->m_impl->rategroup_sync->lock(); /* Compute a wake up time to be used on the first pass. */ p->m_impl->expected_wake_time = get_reference_time(); p->m_impl->expected_wake_time -= p->m_impl->expected_wake_time % p->m_props.period; p->m_impl->first_pass = false; } /* We use reference time as a post condition for the wait. The * reference time when we wake up should be later than the time * at which we went to sleep. Otherwise, we've experienced a * spurious wakeup. */ p->m_impl->expected_wake_time += p->m_props.period; DPRINTF("%s: expected wake time = %" PRId64 "\n", p->m_name, int64_t(p->m_impl->expected_wake_time)); while((true == p->m_operational) && (ref_time = get_reference_time()) < p->m_impl->expected_wake_time) { DPRINTF("%s: Tref = %" PRId64 ", expected_wake_time = %" PRId64 "\n", p->m_name, int64_t(ref_time), int64_t(p->m_impl->expected_wake_time)); if(false == p->m_impl->rategroup_sync->wait()) { EPRINTF("%s: Error in periodic wait\n", p->m_name); p->m_impl->rategroup_sync->unlock(); return NULL; } DPRINTF("%s:Woke up\n", p->m_name); } DPRINTF("%s: Tref = %" PRId64 ", expected_wake_time = %" PRId64 "\n", p->m_name, int64_t(ref_time), int64_t(p->m_impl->expected_wake_time)); /* Wait for all of the tasks to be awoken and ready to run. * This relies on testing against an inverted wait condition * because we're not going to clear the condition until all the * tasks are awake. The end of frame task is allowed to begin * execution because it is the last task on the list (in * priority order). The end of frame task signals all of the * others that they may proceed. */ if(false == p->m_is_eof_task) { p->m_impl->rategroup_sync->inverse_wait(); } } if(false == p->m_operational) { DPRINTF("%s: No longer operational\n", p->m_name); if(p->m_impl->rategroup_sync) { p->m_impl->rategroup_sync->unlock(); } return NULL; } if(p->m_props.is_present_in_schedule(sched.get_schedule()) || (0 == p->m_props.period)) { DPRINTF("Executing task %s\n", p->m_name); if(false == p->execute()) { DPRINTF("Task %s exiting\n", p->m_name); p->terminate(); if(p->m_impl->rategroup_sync) { p->m_impl->rategroup_sync->unlock(); } return NULL; } units::Nanoseconds end_time(0); get_time(end_time); p->m_props.last_runtime = end_time - start_time; if(p->m_props.last_runtime > p->m_props.max_runtime) { p->m_props.max_runtime = p->m_props.last_runtime; } DPRINTF("%s last_runtime = %" PRId64 ", max_runtime = %" PRId64 "\n", p->m_name, int64_t(p->m_props.last_runtime), int64_t(p->m_props.max_runtime)); } } return NULL; }