void _handle_snd_mc_ra(gnrc_netif_t *netif) { gnrc_netif_acquire(netif); assert(netif != NULL); if (!gnrc_netif_is_6ln(netif)) { bool final_ra = (netif->ipv6.ra_sent > (UINT8_MAX - NDP_MAX_FIN_RA_NUMOF)); uint32_t next_ra_time = random_uint32_range(NDP_MIN_RA_INTERVAL_MS, NDP_MAX_RA_INTERVAL_MS); uint32_t next_scheduled = _evtimer_lookup(netif, GNRC_IPV6_NIB_SND_MC_RA); /* router has router advertising interface or the RA is one of the * (now deactivated) routers final one (and there is no next * scheduled within the possible time for next_ra_time) */ if ((final_ra && (next_scheduled > NDP_MAX_RA_INTERVAL_MS)) || gnrc_netif_is_rtr_adv(netif)) { _snd_rtr_advs(netif, NULL, final_ra); netif->ipv6.last_ra = (xtimer_now_usec64() / US_PER_MS) & UINT32_MAX; if ((netif->ipv6.ra_sent < NDP_MAX_INIT_RA_NUMOF) || final_ra) { if ((netif->ipv6.ra_sent < NDP_MAX_INIT_RA_NUMOF) && (next_ra_time > NDP_MAX_INIT_RA_INTERVAL)) { next_ra_time = NDP_MAX_INIT_RA_INTERVAL; } netif->ipv6.ra_sent++; } /* netif->ipv6.ra_sent overflowed => this was our last final RA */ if (netif->ipv6.ra_sent != 0) { _evtimer_add(netif, GNRC_IPV6_NIB_SND_MC_RA, &netif->ipv6.snd_mc_ra, next_ra_time); } } } gnrc_netif_release(netif); }
void gnrc_ndp_host_init(gnrc_ipv6_netif_t *iface) { uint32_t interval = random_uint32_range(0, GNRC_NDP_MAX_RTR_SOL_DELAY * US_PER_SEC); mutex_lock(&iface->mutex); iface->rtr_sol_count = GNRC_NDP_MAX_RTR_SOL_NUMOF; DEBUG("ndp host: delayed initial router solicitation by %" PRIu32 " usec.\n", interval); _reschedule_rtr_sol(iface, interval); mutex_unlock(&iface->mutex); }
static inline void _set_reach_time(gnrc_ipv6_netif_t *if_entry, uint32_t mean) { uint32_t reach_time = random_uint32_range(GNRC_NDP_MIN_RAND, GNRC_NDP_MAX_RAND); if_entry->reach_time_base = mean; /* to avoid floating point number computation and have higher value entropy, the * boundaries for the random value are multiplied by 10 and we need to account for that */ reach_time = (reach_time * if_entry->reach_time_base) / 10; if_entry->reach_time = reach_time; }
static void _send_rtr_adv(gnrc_ipv6_netif_t *iface, ipv6_addr_t *dst) { bool fin; uint32_t interval; mutex_lock(&iface->mutex); fin = (iface->adv_ltime == 0); assert((iface->min_adv_int != 0) && (iface->max_adv_int != 0)); interval = random_uint32_range(iface->min_adv_int, iface->max_adv_int); if (!fin && !((iface->flags | GNRC_IPV6_NETIF_FLAGS_ROUTER) && (iface->flags | GNRC_IPV6_NETIF_FLAGS_RTR_ADV))) { DEBUG("ndp rtr: interface %" PRIkernel_pid " is not an advertising interface\n", iface->pid); return; } if (iface->rtr_adv_count > 1) { /* regard for off-by-one error */ iface->rtr_adv_count--; if (!fin && (interval > GNRC_NDP_MAX_INIT_RTR_ADV_INT)) { interval = GNRC_NDP_MAX_INIT_RTR_ADV_INT; } } if (!fin || (iface->rtr_adv_count > 1)) { /* regard for off-by-one-error */ /* reset timer for next router advertisement */ xtimer_remove(&iface->rtr_adv_timer); iface->rtr_adv_msg.type = GNRC_NDP_MSG_RTR_ADV_RETRANS; iface->rtr_adv_msg.content.ptr = (char *) iface; xtimer_set_msg(&iface->rtr_adv_timer, interval * SEC_IN_USEC, &iface->rtr_adv_msg, gnrc_ipv6_pid); } mutex_unlock(&iface->mutex); for (int i = 0; i < GNRC_IPV6_NETIF_ADDR_NUMOF; i++) { ipv6_addr_t *src = &iface->addrs[i].addr; if (!ipv6_addr_is_unspecified(src) && ipv6_addr_is_link_local(src) && !gnrc_ipv6_netif_addr_is_non_unicast(src)) { /* send one for every link local address (ideally there is only one) */ gnrc_ndp_internal_send_rtr_adv(iface->pid, src, dst, fin); } } }
void gnrc_ndp_rtr_sol_handle(kernel_pid_t iface, gnrc_pktsnip_t *pkt, ipv6_hdr_t *ipv6, ndp_rtr_sol_t *rtr_sol, size_t icmpv6_size) { gnrc_ipv6_netif_t *if_entry = gnrc_ipv6_netif_get(iface); if (if_entry->flags & GNRC_IPV6_NETIF_FLAGS_ROUTER) { gnrc_ipv6_nc_t *nc_entry; int sicmpv6_size = (int)icmpv6_size, l2src_len = 0; uint8_t l2src[GNRC_IPV6_NC_L2_ADDR_MAX]; uint16_t opt_offset = 0; uint8_t *buf = (uint8_t *)(rtr_sol + 1); /* check validity */ if ((ipv6->hl != 255) || (rtr_sol->code != 0) || (icmpv6_size < sizeof(ndp_rtr_sol_t))) { DEBUG("ndp: router solicitation was invalid\n"); return; } sicmpv6_size -= sizeof(ndp_rtr_sol_t); while (sicmpv6_size > 0) { ndp_opt_t *opt = (ndp_opt_t *)(buf + opt_offset); switch (opt->type) { case NDP_OPT_SL2A: l2src_len = gnrc_ndp_internal_sl2a_opt_handle(pkt, ipv6, rtr_sol->type, opt, l2src); if (l2src_len < 0) { /* -ENOTSUP can not happen */ /* invalid source link-layer address option */ return; } _stale_nc(iface, &ipv6->src, l2src, l2src_len); break; default: /* silently discard all other options */ break; } opt_offset += (opt->len * 8); sicmpv6_size -= (opt->len * 8); #if ENABLE_DEBUG if (sicmpv6_size < 0) { DEBUG("ndp: Option parsing out of sync.\n"); } #endif } /* send delayed */ if (if_entry->flags & GNRC_IPV6_NETIF_FLAGS_RTR_ADV) { uint32_t delay; uint32_t ms = GNRC_NDP_MAX_RTR_ADV_DELAY; #ifdef MODULE_GNRC_SIXLOWPAN_ND_ROUTER if (if_entry->flags & GNRC_IPV6_NETIF_FLAGS_SIXLOWPAN) { ms = GNRC_SIXLOWPAN_ND_MAX_RTR_ADV_DELAY; } #endif delay = random_uint32_range(0, ms); xtimer_remove(&if_entry->rtr_adv_timer); #ifdef MODULE_GNRC_SIXLOWPAN_ND_ROUTER /* in case of a 6LBR we have to check if the interface is actually * the 6lo interface */ if (if_entry->flags & GNRC_IPV6_NETIF_FLAGS_SIXLOWPAN) { gnrc_ipv6_nc_t *nc_entry = gnrc_ipv6_nc_get(iface, &ipv6->src); if (nc_entry != NULL) { if_entry->rtr_adv_msg.type = GNRC_NDP_MSG_RTR_ADV_SIXLOWPAN_DELAY; if_entry->rtr_adv_msg.content.ptr = nc_entry; xtimer_set_msg(&if_entry->rtr_adv_timer, delay, &if_entry->rtr_adv_msg, gnrc_ipv6_pid); } } #elif defined(MODULE_GNRC_NDP_ROUTER) || defined(MODULE_GNRC_SIXLOWPAN_ND_BORDER_ROUTER) if (ipv6_addr_is_unspecified(&ipv6->src)) { /* either multicast, if source unspecified */ if_entry->rtr_adv_msg.type = GNRC_NDP_MSG_RTR_ADV_RETRANS; if_entry->rtr_adv_msg.content.ptr = if_entry; xtimer_set_msg(&if_entry->rtr_adv_timer, delay, &if_entry->rtr_adv_msg, gnrc_ipv6_pid); } else { /* or unicast, if source is known */ /* XXX: can't just use GNRC_NETAPI_MSG_TYPE_SND, since the next retransmission * must also be set. */ nc_entry = gnrc_ipv6_nc_get(iface, &ipv6->src); if (nc_entry) { xtimer_set_msg(&nc_entry->rtr_adv_timer, delay, &nc_entry->rtr_adv_msg, gnrc_ipv6_pid); } } #endif } nc_entry = gnrc_ipv6_nc_get(iface, &ipv6->src); if (nc_entry != NULL) { /* unset isRouter flag * (https://tools.ietf.org/html/rfc4861#section-6.2.6) */ nc_entry->flags &= ~GNRC_IPV6_NC_IS_ROUTER; } } /* otherwise ignore silently */ }
static void _sleep_management(gnrc_netif_t *netif) { /* If a packet is scheduled, no other (possible earlier) packet can be * sent before the first one is handled, even no broadcast */ if (!gnrc_lwmac_timeout_is_running(netif, GNRC_LWMAC_TIMEOUT_WAIT_DEST_WAKEUP)) { gnrc_mac_tx_neighbor_t *neighbour; /* Check if there is packet remaining for retransmission */ if (netif->mac.tx.current_neighbor != NULL) { neighbour = netif->mac.tx.current_neighbor; } else { /* Check if there are broadcasts to send and transmit immediately */ if (gnrc_priority_pktqueue_length(&(netif->mac.tx.neighbors[0].queue)) > 0) { netif->mac.tx.current_neighbor = &(netif->mac.tx.neighbors[0]); lwmac_set_state(netif, GNRC_LWMAC_TRANSMITTING); return; } neighbour = _next_tx_neighbor(netif); } if (neighbour != NULL) { /* if phase is unknown, send immediately. */ if (neighbour->phase > RTT_TICKS_TO_US(GNRC_LWMAC_WAKEUP_INTERVAL_US)) { netif->mac.tx.current_neighbor = neighbour; gnrc_lwmac_set_tx_continue(netif, false); netif->mac.tx.tx_burst_count = 0; lwmac_set_state(netif, GNRC_LWMAC_TRANSMITTING); return; } /* Offset in microseconds when the earliest (phase) destination * node wakes up that we have packets for. */ uint32_t time_until_tx = RTT_TICKS_TO_US(_gnrc_lwmac_ticks_until_phase(neighbour->phase)); /* If there's not enough time to prepare a WR to catch the phase * postpone to next interval */ if (time_until_tx < GNRC_LWMAC_WR_PREPARATION_US) { time_until_tx += GNRC_LWMAC_WAKEUP_INTERVAL_US; } time_until_tx -= GNRC_LWMAC_WR_PREPARATION_US; /* add a random time before goto TX, for avoiding one node for * always holding the medium (if the receiver's phase is recorded earlier in this * particular node) */ uint32_t random_backoff; random_backoff = random_uint32_range(0, GNRC_LWMAC_TIME_BETWEEN_WR_US); time_until_tx = time_until_tx + random_backoff; gnrc_lwmac_set_timeout(netif, GNRC_LWMAC_TIMEOUT_WAIT_DEST_WAKEUP, time_until_tx); /* Register neighbour to be the next */ netif->mac.tx.current_neighbor = neighbour; /* Stop dutycycling, we're preparing to send. This prevents the * timeout arriving late, so that the destination phase would * be missed. */ /* TODO: bad for power savings */ rtt_handler(GNRC_LWMAC_EVENT_RTT_PAUSE, netif); } } else if (gnrc_lwmac_timeout_is_expired(netif, GNRC_LWMAC_TIMEOUT_WAIT_DEST_WAKEUP)) { LOG_DEBUG("[LWMAC] Got timeout for dest wakeup, ticks: %" PRIu32 "\n", rtt_get_counter()); gnrc_lwmac_set_tx_continue(netif, false); netif->mac.tx.tx_burst_count = 0; lwmac_set_state(netif, GNRC_LWMAC_TRANSMITTING); } }
void lwmac_set_state(gnrc_netif_t *netif, gnrc_lwmac_state_t newstate) { gnrc_lwmac_state_t oldstate = netif->mac.lwmac.state; if (newstate == oldstate) { return; } if (newstate >= GNRC_LWMAC_STATE_COUNT) { LOG_ERROR("ERROR: [LWMAC] Trying to set invalid state %u\n", newstate); return; } /* Already change state, but might be reverted to oldstate when needed */ netif->mac.lwmac.state = newstate; /* Actions when leaving old state */ switch (oldstate) { case GNRC_LWMAC_RECEIVING: case GNRC_LWMAC_TRANSMITTING: { /* Enable duty cycling again */ rtt_handler(GNRC_LWMAC_EVENT_RTT_RESUME, netif); #if (GNRC_LWMAC_ENABLE_DUTYCYLE_RECORD == 1) /* Output duty-cycle ratio */ uint64_t duty; duty = (uint64_t) rtt_get_counter(); duty = ((uint64_t) netif->mac.lwmac.awake_duration_sum_ticks) * 100 / (duty - (uint64_t)netif->mac.lwmac.system_start_time_ticks); printf("[LWMAC]: achieved duty-cycle: %lu %% \n", (uint32_t)duty); #endif break; } case GNRC_LWMAC_SLEEPING: { gnrc_lwmac_clear_timeout(netif, GNRC_LWMAC_TIMEOUT_WAKEUP_PERIOD); break; } default: break; } /* Actions when entering new state */ switch (newstate) { /*********************** Operation states *********************************/ case GNRC_LWMAC_LISTENING: { _gnrc_lwmac_set_netdev_state(netif, NETOPT_STATE_IDLE); break; } case GNRC_LWMAC_SLEEPING: { /* Put transceiver to sleep */ _gnrc_lwmac_set_netdev_state(netif, NETOPT_STATE_SLEEP); /* We may have come here through RTT handler, so timeout may still be active */ gnrc_lwmac_clear_timeout(netif, GNRC_LWMAC_TIMEOUT_WAKEUP_PERIOD); if (gnrc_lwmac_get_phase_backoff(netif)) { gnrc_lwmac_set_phase_backoff(netif, false); uint32_t alarm; rtt_clear_alarm(); alarm = random_uint32_range(RTT_US_TO_TICKS((3 * GNRC_LWMAC_WAKEUP_DURATION_US / 2)), RTT_US_TO_TICKS(GNRC_LWMAC_WAKEUP_INTERVAL_US - (3 * GNRC_LWMAC_WAKEUP_DURATION_US / 2))); LOG_WARNING("WARNING: [LWMAC] phase backoffed: %lu us\n", RTT_TICKS_TO_US(alarm)); netif->mac.lwmac.last_wakeup = netif->mac.lwmac.last_wakeup + alarm; alarm = _next_inphase_event(netif->mac.lwmac.last_wakeup, RTT_US_TO_TICKS(GNRC_LWMAC_WAKEUP_INTERVAL_US)); rtt_set_alarm(alarm, rtt_cb, (void *) GNRC_LWMAC_EVENT_RTT_WAKEUP_PENDING); } /* Return immediately, so no rescheduling */ return; } /* Trying to send data */ case GNRC_LWMAC_TRANSMITTING: { rtt_handler(GNRC_LWMAC_EVENT_RTT_PAUSE, netif); /**< No duty cycling while RXing */ _gnrc_lwmac_set_netdev_state(netif, NETOPT_STATE_IDLE); /**< Power up netdev */ break; } /* Receiving incoming data */ case GNRC_LWMAC_RECEIVING: { rtt_handler(GNRC_LWMAC_EVENT_RTT_PAUSE, netif); /**< No duty cycling while TXing */ _gnrc_lwmac_set_netdev_state(netif, NETOPT_STATE_IDLE); /**< Power up netdev */ break; } case GNRC_LWMAC_STOPPED: { _gnrc_lwmac_set_netdev_state(netif, NETOPT_STATE_OFF); break; } /*********************** Control states ***********************************/ case GNRC_LWMAC_START: { rtt_handler(GNRC_LWMAC_EVENT_RTT_START, netif); lwmac_set_state(netif, GNRC_LWMAC_LISTENING); break; } case GNRC_LWMAC_STOP: { rtt_handler(GNRC_LWMAC_EVENT_RTT_STOP, netif); lwmac_set_state(netif, GNRC_LWMAC_STOPPED); break; } case GNRC_LWMAC_RESET: { LOG_WARNING("WARNING: [LWMAC] Reset not yet implemented\n"); lwmac_set_state(netif, GNRC_LWMAC_STOP); lwmac_set_state(netif, GNRC_LWMAC_START); break; } /**************************************************************************/ default: { LOG_DEBUG("[LWMAC] No actions for entering state %u\n", newstate); return; } } lwmac_schedule_update(netif); }
/* Event/Message loop for gcoap _pid thread. */ static void *_event_loop(void *arg) { msg_t msg_rcvd; (void)arg; msg_init_queue(_msg_queue, GCOAP_MSG_QUEUE_SIZE); sock_udp_ep_t local; memset(&local, 0, sizeof(sock_udp_ep_t)); local.family = AF_INET6; local.netif = SOCK_ADDR_ANY_NETIF; local.port = GCOAP_PORT; int res = sock_udp_create(&_sock, &local, NULL, 0); if (res < 0) { DEBUG("gcoap: cannot create sock: %d\n", res); return 0; } while(1) { res = msg_try_receive(&msg_rcvd); if (res > 0) { switch (msg_rcvd.type) { case GCOAP_MSG_TYPE_TIMEOUT: { gcoap_request_memo_t *memo = (gcoap_request_memo_t *)msg_rcvd.content.ptr; /* no retries remaining */ if ((memo->send_limit == GCOAP_SEND_LIMIT_NON) || (memo->send_limit == 0)) { _expire_request(memo); } /* reduce retries remaining, double timeout and resend */ else { memo->send_limit--; unsigned i = COAP_MAX_RETRANSMIT - memo->send_limit; uint32_t timeout = ((uint32_t)COAP_ACK_TIMEOUT << i) * US_PER_SEC; uint32_t variance = ((uint32_t)COAP_ACK_VARIANCE << i) * US_PER_SEC; timeout = random_uint32_range(timeout, timeout + variance); ssize_t bytes = sock_udp_send(&_sock, memo->msg.data.pdu_buf, memo->msg.data.pdu_len, &memo->remote_ep); if (bytes > 0) { xtimer_set_msg(&memo->response_timer, timeout, &memo->timeout_msg, _pid); } else { DEBUG("gcoap: sock resend failed: %d\n", (int)bytes); _expire_request(memo); } } break; } default: break; } } _listen(&_sock); } return 0; }
void gnrc_gomach_process_preamble_ack(gnrc_netif_t *netif, gnrc_pktsnip_t *pkt) { assert(netif != NULL); assert(pkt != NULL); gnrc_gomach_frame_preamble_ack_t *gomach_preamble_ack_hdr = NULL; gnrc_pktsnip_t *gomach_snip = gnrc_pktsnip_search_type(pkt, GNRC_NETTYPE_GOMACH); if (gomach_snip == NULL) { LOG_ERROR("[GOMACH]: No gomach_snip found in gnrc_gomach_process_preamble_ack().\n"); return; } else { gomach_preamble_ack_hdr = gomach_snip->data; } if (gomach_preamble_ack_hdr == NULL) { LOG_ERROR("[GOMACH]: preamble_ack_hdr is null.\n"); return; } /* Mark the neighbor as phase-known */ netif->mac.tx.current_neighbor->mac_type = GNRC_GOMACH_TYPE_KNOWN; /* Fetch and deduce the exact wake-up phase of the neighbor. */ long int phase_us = gnrc_gomach_phase_now(netif) - gomach_preamble_ack_hdr->phase_in_us; if (phase_us < 0) { phase_us += GNRC_GOMACH_SUPERFRAME_DURATION_US; } if (((uint32_t)phase_us > (GNRC_GOMACH_SUPERFRAME_DURATION_US - GNRC_GOMACH_CP_MIN_GAP_US)) || ((uint32_t)phase_us < GNRC_GOMACH_CP_MIN_GAP_US)) { LOG_DEBUG("[GOMACH] t2u: own phase is close to the neighbor's.\n"); gnrc_gomach_set_phase_backoff(netif, true); /* Set a random phase-backoff value. */ netif->mac.prot.gomach.backoff_phase_us = random_uint32_range(GNRC_GOMACH_CP_MIN_GAP_US, (GNRC_GOMACH_SUPERFRAME_DURATION_US - GNRC_GOMACH_CP_MIN_GAP_US)); } netif->mac.tx.current_neighbor->cp_phase = phase_us; /* Record the public-channel phase of the neighbor. */ if (gnrc_gomach_get_enter_new_cycle(netif) && ((uint32_t)phase_us > gnrc_gomach_phase_now(netif))) { if (gnrc_gomach_get_on_pubchan_1(netif)) { netif->mac.tx.current_neighbor->pub_chanseq = netif->mac.prot.gomach.pub_channel_2; } else { netif->mac.tx.current_neighbor->pub_chanseq = netif->mac.prot.gomach.pub_channel_1; } } else { if (gnrc_gomach_get_on_pubchan_1(netif)) { netif->mac.tx.current_neighbor->pub_chanseq = netif->mac.prot.gomach.pub_channel_1; } else { netif->mac.tx.current_neighbor->pub_chanseq = netif->mac.prot.gomach.pub_channel_2; } } }