void olsr_print_neighbor_table(void) { /* The whole function doesn't do anything else. */ const int iplen = olsr_cnf->ip_version == AF_INET ? (INET_ADDRSTRLEN - 1) : (INET6_ADDRSTRLEN - 1); int idx; OLSR_PRINTF(1, "\n--- %s ------------------------------------------------ NEIGHBORS\n\n" "%*s\tHyst\tLQ\tETX\tSYM MPR MPRS will\n", olsr_wallclock_string(), iplen, "IP address"); for (idx = 0; idx < HASHSIZE; idx++) { struct neighbor_entry *neigh; for (neigh = neighbortable[idx].next; neigh != &neighbortable[idx]; neigh = neigh->next) { struct link_entry *lnk = get_best_link_to_neighbor(&neigh->neighbor_main_addr); if (lnk) { struct ipaddr_str buf; struct lqtextbuffer lqbuffer1, lqbuffer2; OLSR_PRINTF(1, "%-*s\t%5.3f\t%s\t%s\t%s %s %s %d\n", iplen, olsr_ip_to_string(&buf, &neigh->neighbor_main_addr), (double)lnk->L_link_quality, get_link_entry_text(lnk, '/', &lqbuffer1), get_linkcost_text(lnk->linkcost,false, &lqbuffer2), neigh->status == SYM ? "YES " : "NO ", neigh->is_mpr ? "YES " : "NO ", olsr_lookup_mprs_set(&neigh->neighbor_main_addr) == NULL ? "NO " : "YES ", neigh->willingness); } } } }
/** * Walk through the timer list and check if any timer is ready to fire. * Callback the provided function with the context pointer. */ static void walk_timers(uint32_t * last_run) { unsigned int total_timers_walked = 0, total_timers_fired = 0; unsigned int wheel_slot_walks = 0; /* * Check the required wheel slots since the last time a timer walk was invoked, * or check *all* the wheel slots, whatever is less work. * The latter is meant as a safety belt if the scheduler falls behind. */ while ((*last_run <= now_times) && (wheel_slot_walks < TIMER_WHEEL_SLOTS)) { struct list_node tmp_head_node; /* keep some statistics */ unsigned int timers_walked = 0, timers_fired = 0; /* Get the hash slot for this clocktick */ struct list_node *const timer_head_node = &timer_wheel[*last_run & TIMER_WHEEL_MASK]; /* Walk all entries hanging off this hash bucket. We treat this basically as a stack * so that we always know if and where the next element is. */ list_head_init(&tmp_head_node); while (!list_is_empty(timer_head_node)) { /* the top element */ struct list_node *const timer_node = timer_head_node->next; struct timer_entry *const timer = list2timer(timer_node); /* * Dequeue and insert to a temporary list. * We do this to avoid loosing our walking context when * multiple timers fire. */ list_remove(timer_node); list_add_after(&tmp_head_node, timer_node); timers_walked++; /* Ready to fire ? */ if (TIMED_OUT(timer->timer_clock)) { OLSR_PRINTF(7, "TIMER: fire %s timer %p, ctx %p, " "at clocktick %u (%s)\n", timer->timer_cookie->ci_name, timer, timer->timer_cb_context, (unsigned int)*last_run, olsr_wallclock_string()); /* This timer is expired, call into the provided callback function */ timer->timer_cb(timer->timer_cb_context); /* Only act on actually running timers */ if (timer->timer_flags & OLSR_TIMER_RUNNING) { /* * Don't restart the periodic timer if the callback function has * stopped the timer. */ if (timer->timer_period) { /* For periodical timers, rehash the random number and restart */ timer->timer_random = random(); olsr_change_timer(timer, timer->timer_period, timer->timer_jitter_pct, OLSR_TIMER_PERIODIC); } else { /* Singleshot timers are stopped */ olsr_stop_timer(timer); } } timers_fired++; } } /* * Now merge the temporary list back to the old bucket. */ list_merge(timer_head_node, &tmp_head_node); /* keep some statistics */ total_timers_walked += timers_walked; total_timers_fired += timers_fired; /* Increment the time slot and wheel slot walk iteration */ (*last_run)++; wheel_slot_walks++; } OLSR_PRINTF(7, "TIMER: processed %4u/%d clockwheel slots, " "timers walked %4u/%u, timers fired %u\n", wheel_slot_walks, TIMER_WHEEL_SLOTS, total_timers_walked, timer_mem_cookie->ci_usage, total_timers_fired); /* * If the scheduler has slipped and we have walked all wheel slots, * reset the last timer run. */ *last_run = now_times; }