/** * Unlink, delete and free a nbr2_list entry. */ static void olsr_del_nbr2_list(struct neighbor_2_list_entry *nbr2_list) { struct neighbor_2_entry *nbr2; nbr2 = nbr2_list->neighbor_2; if (nbr2->neighbor_2_pointer < 1) { DEQUEUE_ELEM(nbr2); free(nbr2); } /* * Kill running timers. */ olsr_stop_timer(nbr2_list->nbr2_list_timer); nbr2_list->nbr2_list_timer = NULL; /* Dequeue */ DEQUEUE_ELEM(nbr2_list); free(nbr2_list); /* Set flags to recalculate the MPR set and the routing table */ changes_neighborhood = true; changes_topology = true; }
/** * called at unload: free everything * * XXX: should I delete the hosts/services/resolv.conf files on exit? */ void name_destructor(void) { OLSR_PRINTF(2, "NAME PLUGIN: exit. cleaning up...\n"); free_name_entry_list(&my_names); free_name_entry_list(&my_services); free_name_entry_list(&my_forwarders); free_all_list_entries(name_list); free_all_list_entries(service_list); free_all_list_entries(forwarder_list); free_all_list_entries(latlon_list); olsr_stop_timer(write_file_timer); olsr_stop_timer(msg_gen_timer); regfree(®ex_t_name); regfree(®ex_t_service); mapwrite_exit(); }
/** * destructor - called at unload */ #ifdef _WRS_KERNEL void olsrd_dotdraw_exit(void) #else /* _WRS_KERNEL */ void olsr_plugin_exit(void) #endif /* _WRS_KERNEL */ { if (writetimer_entry) { close(outbuffer_socket); abuf_free(&outbuffer); olsr_stop_timer(writetimer_entry); } if (ipc_socket != -1) { CLOSE(ipc_socket); } }
/** * Stop and delete all timers. */ void olsr_flush_timers(void) { struct list_node *timer_head_node; unsigned int wheel_slot = 0; for (wheel_slot = 0; wheel_slot < TIMER_WHEEL_SLOTS; wheel_slot++) { timer_head_node = &timer_wheel[wheel_slot & TIMER_WHEEL_MASK]; /* Kill all entries hanging off this hash bucket. */ while (!list_is_empty(timer_head_node)) { olsr_stop_timer(list2timer(timer_head_node->next)); } } }
static bool olsr_delete_hna_net_entry(struct hna_net *net_to_delete) { #ifdef DEBUG struct ipaddr_str buf1, buf2; #endif /* DEBUG */ struct hna_entry *hna_gw; bool removed_entry = false; #ifdef __linux__ if (is_prefix_inetgw(&net_to_delete->hna_prefix)) { /* modify smart gateway entry if necessary */ olsr_delete_gateway_entry(&net_to_delete->hna_gw->A_gateway_addr, net_to_delete->hna_prefix.prefix_len, false); } #endif /* __linux__ */ olsr_stop_timer(net_to_delete->hna_net_timer); net_to_delete->hna_net_timer = NULL; /* be pedandic */ hna_gw = net_to_delete->hna_gw; #ifdef DEBUG OLSR_PRINTF(5, "HNA: timeout %s via hna-gw %s\n", olsr_ip_prefix_to_string(&net_to_delete->hna_prefix), olsr_ip_to_string(&buf2, &hna_gw->A_gateway_addr)); #endif /* DEBUG */ /* * Delete the rt_path for the entry. */ olsr_delete_routing_table(&net_to_delete->hna_prefix.prefix, net_to_delete->hna_prefix.prefix_len, &hna_gw->A_gateway_addr); DEQUEUE_ELEM(net_to_delete); /* Delete hna_gw if empty */ if (hna_gw->networks.next == &hna_gw->networks) { DEQUEUE_ELEM(hna_gw); olsr_cookie_free(hna_entry_mem_cookie, hna_gw); removed_entry = true; } olsr_cookie_free(hna_net_mem_cookie, net_to_delete); return removed_entry; }
/* * This is the one stop shop for all sort of timer manipulation. * Depending on the paseed in parameters a new timer is started, * or an existing timer is started or an existing timer is * terminated. */ void olsr_set_timer(struct timer_entry **timer_ptr, unsigned int rel_time, uint8_t jitter_pct, bool periodical, timer_cb_func cb_func, void *context, struct olsr_cookie_info *cookie) { if (cookie) { cookie = def_timer_ci; } if (rel_time == 0) { /* No good future time provided, kill it. */ olsr_stop_timer(*timer_ptr); *timer_ptr = NULL; } else if ((*timer_ptr) == NULL) { /* No timer running, kick it. */ *timer_ptr = olsr_start_timer(rel_time, jitter_pct, periodical, cb_func, context, cookie); } else { olsr_change_timer(*timer_ptr, rel_time, jitter_pct, periodical); } }
/** * Walk through the timer list and check if any timer is ready to fire. * Callback the provided function with the context pointer. */ static void walk_timers(uint32_t * last_run) { unsigned int total_timers_walked = 0, total_timers_fired = 0; unsigned int wheel_slot_walks = 0; /* * Check the required wheel slots since the last time a timer walk was invoked, * or check *all* the wheel slots, whatever is less work. * The latter is meant as a safety belt if the scheduler falls behind. */ while ((*last_run <= now_times) && (wheel_slot_walks < TIMER_WHEEL_SLOTS)) { struct list_node tmp_head_node; /* keep some statistics */ unsigned int timers_walked = 0, timers_fired = 0; /* Get the hash slot for this clocktick */ struct list_node *const timer_head_node = &timer_wheel[*last_run & TIMER_WHEEL_MASK]; /* Walk all entries hanging off this hash bucket. We treat this basically as a stack * so that we always know if and where the next element is. */ list_head_init(&tmp_head_node); while (!list_is_empty(timer_head_node)) { /* the top element */ struct list_node *const timer_node = timer_head_node->next; struct timer_entry *const timer = list2timer(timer_node); /* * Dequeue and insert to a temporary list. * We do this to avoid loosing our walking context when * multiple timers fire. */ list_remove(timer_node); list_add_after(&tmp_head_node, timer_node); timers_walked++; /* Ready to fire ? */ if (TIMED_OUT(timer->timer_clock)) { OLSR_PRINTF(7, "TIMER: fire %s timer %p, ctx %p, " "at clocktick %u (%s)\n", timer->timer_cookie->ci_name, timer, timer->timer_cb_context, (unsigned int)*last_run, olsr_wallclock_string()); /* This timer is expired, call into the provided callback function */ timer->timer_cb(timer->timer_cb_context); /* Only act on actually running timers */ if (timer->timer_flags & OLSR_TIMER_RUNNING) { /* * Don't restart the periodic timer if the callback function has * stopped the timer. */ if (timer->timer_period) { /* For periodical timers, rehash the random number and restart */ timer->timer_random = random(); olsr_change_timer(timer, timer->timer_period, timer->timer_jitter_pct, OLSR_TIMER_PERIODIC); } else { /* Singleshot timers are stopped */ olsr_stop_timer(timer); } } timers_fired++; } } /* * Now merge the temporary list back to the old bucket. */ list_merge(timer_head_node, &tmp_head_node); /* keep some statistics */ total_timers_walked += timers_walked; total_timers_fired += timers_fired; /* Increment the time slot and wheel slot walk iteration */ (*last_run)++; wheel_slot_walks++; } OLSR_PRINTF(7, "TIMER: processed %4u/%d clockwheel slots, " "timers walked %4u/%u, timers fired %u\n", wheel_slot_walks, TIMER_WHEEL_SLOTS, total_timers_walked, timer_mem_cookie->ci_usage, total_timers_fired); /* * If the scheduler has slipped and we have walked all wheel slots, * reset the last timer run. */ *last_run = now_times; }
/** * Look through the gateway list and select the best gateway * depending on the distance to this router */ static void gw_default_choose_gateway(void) { uint64_t cost_ipv4_threshold = UINT64_MAX; uint64_t cost_ipv6_threshold = UINT64_MAX; bool eval_cost_ipv4_threshold = false; bool eval_cost_ipv6_threshold = false; struct gateway_entry *inet_ipv4 = NULL; struct gateway_entry *inet_ipv6 = NULL; uint64_t cost_ipv4 = UINT64_MAX; uint64_t cost_ipv6 = UINT64_MAX; struct gateway_entry *gw; struct tc_entry *tc; bool dual; if (olsr_cnf->smart_gw_thresh) { /* determine the path cost thresholds */ gw = olsr_get_ipv4_inet_gateway(); if (gw) { tc = olsr_lookup_tc_entry(&gw->originator); if (tc) { uint64_t cost = gw_default_weigh_costs(tc->path_cost, gw->uplink, gw->downlink); cost_ipv4_threshold = gw_default_calc_threshold(cost); eval_cost_ipv4_threshold = true; } } gw = olsr_get_ipv6_inet_gateway(); if (gw) { tc = olsr_lookup_tc_entry(&gw->originator); if (tc) { uint64_t cost = gw_default_weigh_costs(tc->path_cost, gw->uplink, gw->downlink); cost_ipv6_threshold = gw_default_calc_threshold(cost); eval_cost_ipv6_threshold = true; } } } OLSR_FOR_ALL_GATEWAY_ENTRIES(gw) { uint64_t path_cost; tc = olsr_lookup_tc_entry(&gw->originator); if (!tc) { /* gateways should not exist without tc entry */ continue; } if (tc->path_cost == ROUTE_COST_BROKEN) { /* do not consider nodes with an infinite ETX */ continue; } if (!gw->uplink || !gw->downlink) { /* do not consider nodes without bandwidth or with a uni-directional link */ continue; } /* determine the path cost */ path_cost = gw_default_weigh_costs(tc->path_cost, gw->uplink, gw->downlink); if (!gw_def_finished_ipv4 && gw->ipv4 && gw->ipv4nat == olsr_cnf->smart_gw_allow_nat && path_cost < cost_ipv4 && (!eval_cost_ipv4_threshold || (path_cost < cost_ipv4_threshold))) { inet_ipv4 = gw; cost_ipv4 = path_cost; } if (!gw_def_finished_ipv6 && gw->ipv6 && path_cost < cost_ipv6 && (!eval_cost_ipv6_threshold || (path_cost < cost_ipv6_threshold))) { inet_ipv6 = gw; cost_ipv6 = path_cost; } } OLSR_FOR_ALL_GATEWAY_ENTRIES_END(gw) /* determine if we found an IPv4 and IPv6 gateway */ gw_def_finished_ipv4 |= (inet_ipv4 != NULL); gw_def_finished_ipv6 |= (inet_ipv6 != NULL); /* determine if we are dealing with a dual stack gateway */ dual = (inet_ipv4 == inet_ipv6) && (inet_ipv4 != NULL); if (inet_ipv4) { /* we are dealing with an IPv4 or dual stack gateway */ olsr_set_inet_gateway(&inet_ipv4->originator, true, dual); } if (inet_ipv6 && !dual) { /* we are dealing with an IPv6-only gateway */ olsr_set_inet_gateway(&inet_ipv6->originator, false, true); } if ((olsr_cnf->smart_gw_thresh == 0) && gw_def_finished_ipv4 && gw_def_finished_ipv6) { /* stop looking for a better gateway */ olsr_stop_timer(gw_def_timer); gw_def_timer = NULL; } }
/** * Look through the gateway list and select the best gateway * depending on the distance to this router */ static void gw_default_choose_gateway(void) { uint64_t cost_ipv4_threshold = UINT64_MAX; uint64_t cost_ipv6_threshold = UINT64_MAX; bool cost_ipv4_threshold_valid = false; bool cost_ipv6_threshold_valid = false; struct gateway_entry *chosen_gw_ipv4 = NULL; struct gateway_entry *chosen_gw_ipv6 = NULL; uint64_t chosen_gw_ipv4_costs = UINT64_MAX; uint64_t chosen_gw_ipv6_costs = UINT64_MAX; struct gateway_entry *gw; bool dual = false; if (olsr_cnf->smart_gw_thresh) { /* determine the path cost thresholds */ uint64_t cost = gw_default_getcosts(olsr_get_inet_gateway(false)); if (cost != UINT64_MAX) { cost_ipv4_threshold = gw_default_calc_threshold(cost); cost_ipv4_threshold_valid = true; } cost = gw_default_getcosts(olsr_get_inet_gateway(true)); if (cost != UINT64_MAX) { cost_ipv6_threshold = gw_default_calc_threshold(cost); cost_ipv6_threshold_valid = true; } } OLSR_FOR_ALL_GATEWAY_ENTRIES(gw) { uint64_t gw_cost = gw_default_getcosts(gw); if (gw_cost == UINT64_MAX) { /* never select a node with infinite costs */ continue; } if (gw_def_choose_new_ipv4_gw) { bool gw_eligible_v4 = gw->ipv4 /* && (olsr_cnf->ip_version == AF_INET || olsr_cnf->use_niit) *//* contained in gw_def_choose_new_ipv4_gw */ && (olsr_cnf->smart_gw_allow_nat || !gw->ipv4nat); if (gw_eligible_v4 && gw_cost < chosen_gw_ipv4_costs && (!cost_ipv4_threshold_valid || (gw_cost < cost_ipv4_threshold))) { chosen_gw_ipv4 = gw; chosen_gw_ipv4_costs = gw_cost; } } if (gw_def_choose_new_ipv6_gw) { bool gw_eligible_v6 = gw->ipv6 /* && olsr_cnf->ip_version == AF_INET6 *//* contained in gw_def_choose_new_ipv6_gw */; if (gw_eligible_v6 && gw_cost < chosen_gw_ipv6_costs && (!cost_ipv6_threshold_valid || (gw_cost < cost_ipv6_threshold))) { chosen_gw_ipv6 = gw; chosen_gw_ipv6_costs = gw_cost; } } } OLSR_FOR_ALL_GATEWAY_ENTRIES_END(gw) /* determine if we should keep looking for IPv4 and/or IPv6 gateways */ gw_def_choose_new_ipv4_gw = gw_def_choose_new_ipv4_gw && (chosen_gw_ipv4 == NULL); gw_def_choose_new_ipv6_gw = gw_def_choose_new_ipv6_gw && (chosen_gw_ipv6 == NULL); /* determine if we are dealing with a dual stack gateway */ dual = chosen_gw_ipv4 && (chosen_gw_ipv4 == chosen_gw_ipv6); if (chosen_gw_ipv4) { /* we are dealing with an IPv4 or dual stack gateway */ olsr_set_inet_gateway(&chosen_gw_ipv4->originator, chosen_gw_ipv4_costs, true, dual); } if (chosen_gw_ipv6 && !dual) { /* we are dealing with an IPv6-only gateway */ olsr_set_inet_gateway(&chosen_gw_ipv6->originator, chosen_gw_ipv6_costs, false, true); } if ((olsr_cnf->smart_gw_thresh == 0) && !gw_def_choose_new_ipv4_gw && !gw_def_choose_new_ipv6_gw) { /* stop looking for a better gateway */ olsr_stop_timer(gw_def_timer); gw_def_timer = NULL; } }
void olsr_remove_interface(struct olsr_if * iface) { struct interface *ifp, *tmp_ifp; ifp = iface->interf; OLSR_PRINTF(1, "Removing interface %s (%d)\n", iface->name, ifp->if_index); olsr_syslog(OLSR_LOG_INFO, "Removing interface %s\n", iface->name); olsr_delete_link_entry_by_ip(&ifp->ip_addr); /* *Call possible ifchange functions registered by plugins */ olsr_trigger_ifchange(ifp->if_index, ifp, IFCHG_IF_REMOVE); /* cleanup routes over this interface */ olsr_delete_interface_routes(ifp->if_index); /* Dequeue */ if (ifp == ifnet) { ifnet = ifp->int_next; } else { tmp_ifp = ifnet; while (tmp_ifp->int_next != ifp) { tmp_ifp = tmp_ifp->int_next; } tmp_ifp->int_next = ifp->int_next; } /* Remove output buffer */ net_remove_buffer(ifp); /* Check main addr */ /* deactivated to prevent change of originator IP */ #if 0 if (ipequal(&olsr_cnf->main_addr, &ifp->ip_addr)) { if (ifnet == NULL) { /* No more interfaces */ memset(&olsr_cnf->main_addr, 0, olsr_cnf->ipsize); OLSR_PRINTF(1, "No more interfaces...\n"); } else { struct ipaddr_str buf; olsr_cnf->main_addr = ifnet->ip_addr; OLSR_PRINTF(1, "New main address: %s\n", olsr_ip_to_string(&buf, &olsr_cnf->main_addr)); olsr_syslog(OLSR_LOG_INFO, "New main address: %s\n", olsr_ip_to_string(&buf, &olsr_cnf->main_addr)); } } #endif /* 0 */ /* * Deregister functions for periodic message generation */ olsr_stop_timer(ifp->hello_gen_timer); olsr_stop_timer(ifp->tc_gen_timer); olsr_stop_timer(ifp->mid_gen_timer); olsr_stop_timer(ifp->hna_gen_timer); iface->configured = 0; iface->interf = NULL; /* Close olsr socket */ remove_olsr_socket(ifp->olsr_socket, &olsr_input, NULL); close(ifp->olsr_socket); remove_olsr_socket(ifp->send_socket, &olsr_input, NULL); close(ifp->send_socket); /* Free memory */ free(ifp->int_name); free(ifp); if ((ifnet == NULL) && (!olsr_cnf->allow_no_interfaces)) { olsr_syslog(OLSR_LOG_INFO, "No more active interfaces - exiting.\n"); olsr_exit("No more active interfaces - exiting.\n", EXIT_FAILURE); } }