void handle_synchro_timeout(socket_internal_t *current_socket) { msg_t send; if (thread_getstatus(current_socket->recv_pid) == STATUS_RECEIVE_BLOCKED) { timex_t now; vtimer_now(&now); if ((current_socket->socket_values.tcp_control.no_of_retries == 0) && (timex_uint64(timex_sub(now, current_socket->socket_values.tcp_control.last_packet_time)) > TCP_SYN_INITIAL_TIMEOUT)) { current_socket->socket_values.tcp_control.no_of_retries++; net_msg_send(&send, current_socket->recv_pid, 0, TCP_RETRY); } else if ((current_socket->socket_values.tcp_control.no_of_retries > 0) && (timex_uint64(timex_sub(now, current_socket->socket_values.tcp_control.last_packet_time)) > (current_socket->socket_values.tcp_control.no_of_retries * TCP_SYN_TIMEOUT + TCP_SYN_INITIAL_TIMEOUT))) { current_socket->socket_values.tcp_control.no_of_retries++; if (current_socket->socket_values.tcp_control.no_of_retries > TCP_MAX_SYN_RETRIES) { net_msg_send(&send, current_socket->recv_pid, 0, TCP_TIMEOUT); } else { net_msg_send(&send, current_socket->recv_pid, 0, TCP_RETRY); } } } }
/** * Function executed by NHDP thread receiving messages in an endless loop */ static void *_nhdp_runner(void *arg) { nhdp_if_entry_t *if_entry; msg_t msg_rcvd, msg_queue[NHDP_MSG_QUEUE_SIZE]; (void)arg; msg_init_queue(msg_queue, NHDP_MSG_QUEUE_SIZE); while (1) { msg_receive(&msg_rcvd); switch (msg_rcvd.type) { case HELLO_TIMER: mutex_lock(&send_rcv_mutex); if_entry = msg_rcvd.content.ptr; nhdp_writer_send_hello(if_entry); /* TODO: Add jitter */ /* Schedule next sending */ xtimer_set_msg64(&if_entry->if_timer, timex_uint64(if_entry->hello_interval), &msg_rcvd, thread_getpid()); mutex_unlock(&send_rcv_mutex); break; #if (NHDP_METRIC_NEEDS_TIMER) case NHDP_METRIC_TIMER: mutex_lock(&send_rcv_mutex); /* Process necessary metric computations */ iib_process_metric_refresh(); /* Schedule next sending */ metric_msg.type = NHDP_METRIC_TIMER; metric_msg.content.ptr = NULL; xtimer_set_msg64(&metric_timer, timex_uint64(metric_interval), metric_msg, thread_getpid()); mutex_unlock(&send_rcv_mutex); break; #endif default: break; } } return 0; }
void handle_established(socket_internal_t *current_socket) { msg_t send; double current_timeout = current_socket->socket_values.tcp_control.rto; if (current_timeout < SECOND) { current_timeout = SECOND; } uint8_t i; if ((current_socket->socket_values.tcp_control.send_nxt > current_socket->socket_values.tcp_control.send_una) && (thread_getstatus(current_socket->send_pid) == STATUS_RECEIVE_BLOCKED)) { for (i = 0; i < current_socket->socket_values.tcp_control.no_of_retries; i++) { current_timeout *= 2; } timex_t now; vtimer_now(&now); if (current_timeout > TCP_ACK_MAX_TIMEOUT) { net_msg_send(&send, current_socket->send_pid, 0, TCP_TIMEOUT); } else if (timex_uint64(timex_sub(now, current_socket->socket_values.tcp_control.last_packet_time)) > current_timeout) { current_socket->socket_values.tcp_control.no_of_retries++; net_msg_send(&send, current_socket->send_pid, 0, TCP_RETRY); } } }
/* router-only functions from net/gnrc/sixlowpan/nd.h */ void gnrc_sixlowpan_nd_opt_abr_handle(kernel_pid_t iface, ndp_rtr_adv_t *rtr_adv, int sicmpv6_size, sixlowpan_nd_opt_abr_t *abr_opt) { uint16_t opt_offset = 0; uint8_t *buf = (uint8_t *)(rtr_adv + 1); gnrc_sixlowpan_nd_router_abr_t *abr; timex_t t = { 0, 0 }; if (_is_me(&abr_opt->braddr)) { return; } /* validity and version was checked in previously called * gnrc_sixlowpan_nd_router_abr_older() */ abr = _get_abr(&abr_opt->braddr); if (abr == NULL) { return; } abr->ltime = byteorder_ntohs(abr_opt->ltime); if (abr->ltime == 0) { abr->ltime = GNRC_SIXLOWPAN_ND_BORDER_ROUTER_DEFAULT_LTIME; return; } sicmpv6_size -= sizeof(ndp_rtr_adv_t); while (sicmpv6_size > 0) { ndp_opt_t *opt = (ndp_opt_t *)(buf + opt_offset); switch (opt->type) { case NDP_OPT_PI: _add_prefix(iface, abr, (ndp_opt_pi_t *)opt); case NDP_OPT_6CTX: _add_ctx(abr, (sixlowpan_nd_opt_6ctx_t *)opt); default: break; } opt_offset += (opt->len * 8); sicmpv6_size -= (opt->len * 8); } abr->version = (uint32_t)byteorder_ntohs(abr_opt->vlow); abr->version |= ((uint32_t)byteorder_ntohs(abr_opt->vhigh)) << 16; abr->addr.u64[0] = abr_opt->braddr.u64[0]; abr->addr.u64[1] = abr_opt->braddr.u64[1]; memset(abr->ctxs, 0, sizeof(abr->ctxs)); abr->prfs = NULL; t.seconds = abr->ltime * 60; xtimer_remove(&abr->ltimer); abr->ltimer_msg.type = GNRC_SIXLOWPAN_ND_MSG_ABR_TIMEOUT; abr->ltimer_msg.content.ptr = (char *) abr; xtimer_set_msg(&abr->ltimer, (uint32_t) timex_uint64(t), &abr->ltimer_msg, gnrc_ipv6_pid); }
static void add_seq_entry(uint8_t src, uint8_t id) { /* Remove all entries with given source to avoid short time overflow * of one bit counter (of the source node). So a valid packet would get * lost (especially important in constant RX mode). */ int i; for (i = 0; i < MAX_SEQ_BUFFER_SIZE; i++) { if (seq_buffer[i].source == src) { seq_buffer[i].source = 0; /* Reset */ } } /* Add new entry */ seq_buffer[seq_buffer_pos].source = src; seq_buffer[seq_buffer_pos].identification = id; timex_t now; vtimer_now(&now); seq_buffer[seq_buffer_pos].m_ticks = timex_uint64(now); /* Store 16 bit sequence number of layer 0 for speedup */ last_seq_num = src; last_seq_num <<= 8; last_seq_num += id; seq_buffer_pos++; if (seq_buffer_pos == MAX_SEQ_BUFFER_SIZE) { seq_buffer_pos = 0; } }
gnrc_ipv6_nc_t *gnrc_ipv6_nc_still_reachable(const ipv6_addr_t *ipv6_addr) { gnrc_ipv6_nc_t *entry = gnrc_ipv6_nc_get(KERNEL_PID_UNDEF, ipv6_addr); if (entry == NULL) { DEBUG("ipv6_nc: No entry found for %s\n", ipv6_addr_to_str(addr_str, ipv6_addr, sizeof(addr_str))); return NULL; } if ((gnrc_ipv6_nc_get_state(entry) != GNRC_IPV6_NC_STATE_INCOMPLETE) && (gnrc_ipv6_nc_get_state(entry) != GNRC_IPV6_NC_STATE_UNMANAGED)) { #if defined(MODULE_GNRC_IPV6_NETIF) && defined(MODULE_VTIMER) && defined(MODULE_GNRC_IPV6) gnrc_ipv6_netif_t *iface = gnrc_ipv6_netif_get(entry->iface); timex_t t = iface->reach_time; gnrc_ndp_internal_reset_nbr_sol_timer(entry, (uint32_t) timex_uint64(t), GNRC_NDP_MSG_NC_STATE_TIMEOUT, gnrc_ipv6_pid); #endif DEBUG("ipv6_nc: Marking entry %s as reachable\n", ipv6_addr_to_str(addr_str, ipv6_addr, sizeof(addr_str))); entry->flags &= ~(GNRC_IPV6_NC_STATE_MASK >> GNRC_IPV6_NC_STATE_POS); entry->flags |= (GNRC_IPV6_NC_STATE_REACHABLE >> GNRC_IPV6_NC_STATE_POS); }
static void calc_rtt(void) { timex_t rtt = timex_sub(end, start); rtt_sum = timex_add(rtt_sum, rtt); l2_ping_stats.last_rtt = rtt; l2_ping_stats.avg_rtt = timex_from_uint64(timex_uint64(rtt_sum) / l2_ping_stats.pong_count); if (timex_cmp(rtt, l2_ping_stats.max_rtt) > 0) { l2_ping_stats.max_rtt = rtt; } if (timex_cmp(rtt, l2_ping_stats.min_rtt) < 0) { l2_ping_stats.min_rtt = rtt; } }
cv_status condition_variable::wait_until(unique_lock<mutex>& lock, const time_point& timeout_time) { xtimer_t timer; // todo: use function to wait for absolute timepoint once available timex_t before; xtimer_now_timex(&before); auto diff = timex_sub(timeout_time.native_handle(), before); xtimer_set_wakeup(&timer, timex_uint64(diff), sched_active_pid); wait(lock); timex_t after; xtimer_now_timex(&after); xtimer_remove(&timer); auto cmp = timex_cmp(after, timeout_time.native_handle()); return cmp < 1 ? cv_status::no_timeout : cv_status::timeout; }
kernel_pid_t nhdp_start(void) { if (nhdp_pid == KERNEL_PID_UNDEF) { /* Init destination address for NHDP's packets */ /* Start the NHDP thread */ nhdp_pid = thread_create(nhdp_stack, sizeof(nhdp_stack), THREAD_PRIORITY_MAIN - 1, THREAD_CREATE_STACKTEST, _nhdp_runner, NULL, "NHDP"); #if (NHDP_METRIC_NEEDS_TIMER) /* Configure periodic timer message to refresh metric values */ if (nhdp_pid != KERNEL_PID_UNDEF) { metric_interval = timex_from_uint64(DAT_REFRESH_INTERVAL * SEC_IN_USEC); metric_msg.type = NHDP_METRIC_TIMER; metric_msg.content.ptr = NULL; xtimer_set_msg64(&metric_timer, timex_uint64(metric_interval), metric_msg, nhdp_pid); } #endif } return nhdp_pid; }
static bool contains_seq_entry(uint8_t src, uint8_t id) { int i; uint32_t cmp; timex_t now_timex; vtimer_now(&now_timex); for (i = 0; i < MAX_SEQ_BUFFER_SIZE; i++) { if ((seq_buffer[i].source == src) && (seq_buffer[i].identification == id)) { /* Check if time stamp is OK */ cmp = (radio_mode == CC1100_MODE_WOR) ? cc1100_wor_config.rx_interval : 16000; /* constant RX ~16ms */ if ((timex_uint64(now_timex) - seq_buffer[i].m_ticks < cmp)) { return true; } else { seq_buffer[i].source = 0; /* Reset */ } } } return false; }
/*---------------------------------------------------------------------------*/ int cc1100_send_csmaca(radio_address_t address, protocol_t protocol, int priority, char *payload, radio_packet_length_t payload_len) { uint16_t min_window_size; uint16_t max_window_size; uint16_t difs; uint16_t slottime; switch(priority) { case PRIORITY_ALARM: min_window_size = PRIO_ALARM_MIN_WINDOW_SIZE; max_window_size = PRIO_ALARM_MAX_WINDOW_SIZE; difs = PRIO_ALARM_DIFS; slottime = PRIO_ALARM_SLOTTIME; break; case PRIORITY_WARNING: min_window_size = PRIO_WARN_MIN_WINDOW_SIZE; max_window_size = PRIO_WARN_MAX_WINDOW_SIZE; difs = PRIO_WARN_DIFS; slottime = PRIO_WARN_SLOTTIME; break; default: min_window_size = PRIO_DATA_MIN_WINDOW_SIZE; max_window_size = PRIO_DATA_MAX_WINDOW_SIZE; difs = PRIO_DATA_DIFS; slottime = PRIO_DATA_SLOTTIME; } /* Calculate collisions per second */ if (collision_state == COLLISION_STATE_INITIAL) { vtimer_now(&collision_measurement_start); collision_count = 0; collisions_per_sec = 0; collision_state = COLLISION_STATE_MEASURE; } else if (collision_state == COLLISION_STATE_MEASURE) { timex_t now; vtimer_now(&now); timex_t timespan = timex_sub(now, collision_measurement_start); if (timex_cmp(timespan, timex_set(1, 0)) > 0) { collisions_per_sec = (collision_count * 1000000) / (double) timex_uint64(timespan); if (collisions_per_sec > 0.5 && collisions_per_sec <= 2.2) { timex_t now; vtimer_now(&now); collision_measurement_start = now; collision_state = COLLISION_STATE_KEEP; } else if (collisions_per_sec > 2.2) { timex_t now; vtimer_now(&now); collision_measurement_start = now; collision_state = COLLISION_STATE_KEEP; } else { collision_state = COLLISION_STATE_INITIAL; } } } else if (collision_state == COLLISION_STATE_KEEP) { timex_t now; vtimer_now(&now); timex_t timespan = timex_sub(now, collision_measurement_start); if (timex_cmp(timespan, timex_set(5, 0)) > 0) { collision_state = COLLISION_STATE_INITIAL; } } /* Adjust initial window size according to collision rate */ if (collisions_per_sec > 0.5 && collisions_per_sec <= 2.2) { min_window_size *= 2; } else if (collisions_per_sec > 2.2) { min_window_size *= 4; } uint16_t windowSize = min_window_size; /* Start with window size of PRIO_XXX_MIN_WINDOW_SIZE */ uint16_t backoff = 0; /* Backoff between 1 and windowSize */ uint32_t total; /* Holds the total wait time before send try */ uint32_t cs_timeout; /* Current carrier sense timeout value */ if (protocol == 0) { return RADIO_INVALID_PARAM; /* Not allowed, protocol id must be greater zero */ } cc1100_phy_mutex_lock(); /* Lock radio for exclusive access */ /* Get carrier sense timeout based on overall error rate till now */ send_csmaca_calls++; int fail_percentage = (send_csmaca_calls_cs_timeout * 100) / send_csmaca_calls; if (fail_percentage == 0) { fail_percentage = 1; } cs_timeout = CARRIER_SENSE_TIMEOUT / fail_percentage; if (cs_timeout < CARRIER_SENSE_TIMEOUT_MIN) { cs_timeout = CARRIER_SENSE_TIMEOUT_MIN; } cc1100_cs_init(); /* Initialize carrier sensing */ window: if (backoff != 0) { goto cycle; /* If backoff was 0 */ } windowSize *= 2; /* ...double the current window size */ if (windowSize > max_window_size) { windowSize = max_window_size; /* This is the maximum size allowed */ } backoff = rand() % windowSize; /* ...and choose new backoff */ backoff += (uint16_t) 1; cycle: cs_timeout_flag = 0; /* Carrier sense timeout flag */ cs_hwtimer_id = hwtimer_set(cs_timeout, /* Set hwtimer to set CS timeout flag */ cs_timeout_cb, NULL); while (cc1100_cs_read()) { /* Wait until air is free */ if (cs_timeout_flag) { send_csmaca_calls_cs_timeout++; #ifndef CSMACA_MAC_AGGRESSIVE_MODE cc1100_phy_mutex_unlock(); cc1100_go_after_tx(); /* Go from RX to default mode */ return RADIO_CS_TIMEOUT; /* Return immediately */ #endif #ifdef CSMACA_MAC_AGGRESSIVE_MODE goto send; /* Send anyway */ #endif } } hwtimer_remove(cs_hwtimer_id); /* Remove hwtimer */ cc1100_cs_write_cca(1); /* Air is free now */ cc1100_cs_set_enabled(true); if (cc1100_cs_read()) { goto window; /* GDO0 triggers on rising edge, so */ } /* test once after interrupt is enabled */ if (backoff > 0) { backoff--; /* Decrement backoff counter */ } total = slottime; /* Calculate total wait time */ total *= (uint32_t)backoff; /* Slot vector set */ total += difs; /* ...and standard DIFS wait time */ cs_timeout_flag = 0; /* Carrier sense timeout flag */ cs_hwtimer_id = hwtimer_set(total, /* Set hwtimer to set CS timeout flag */ cs_timeout_cb, NULL); while (!cs_timeout_flag || !cc1100_cs_read_cca()) { /* Wait until timeout is finished */ if (cc1100_cs_read_cca() == 0) { /* Is the air still free? */ hwtimer_remove(cs_hwtimer_id); goto window; /* No. Go back to new wait period. */ } } cc1100_cs_set_enabled(false); #ifdef CSMACA_MAC_AGGRESSIVE_MODE send: #endif int res = cc1100_send(address, protocol, priority, payload, payload_len); if (res < 0) { collision_count++; } return res; }
void gnrc_ndp_internal_send_rtr_adv(kernel_pid_t iface, ipv6_addr_t *src, ipv6_addr_t *dst, bool fin) { gnrc_pktsnip_t *hdr, *pkt = NULL; ipv6_addr_t all_nodes = IPV6_ADDR_ALL_NODES_LINK_LOCAL; gnrc_ipv6_netif_t *ipv6_iface = gnrc_ipv6_netif_get(iface); uint32_t reach_time = 0, retrans_timer = 0; uint16_t adv_ltime = 0; uint8_t cur_hl = 0; if (dst == NULL) { dst = &all_nodes; } DEBUG("ndp internal: send router advertisement (iface: %" PRIkernel_pid ", dst: %s%s\n", iface, ipv6_addr_to_str(addr_str, dst, sizeof(addr_str)), fin ? ", final" : ""); mutex_lock(&ipv6_iface->mutex); hdr = _add_pios(ipv6_iface, pkt); if (hdr == NULL) { /* pkt already released in _add_pios */ mutex_unlock(&ipv6_iface->mutex); return; } pkt = hdr; if (ipv6_iface->flags & GNRC_IPV6_NETIF_FLAGS_ADV_MTU) { if ((hdr = gnrc_ndp_opt_mtu_build(ipv6_iface->mtu, pkt)) == NULL) { DEBUG("ndp rtr: no space left in packet buffer\n"); mutex_unlock(&ipv6_iface->mutex); gnrc_pktbuf_release(pkt); return; } pkt = hdr; } if (src == NULL) { /* get address from source selection algorithm */ src = gnrc_ipv6_netif_find_best_src_addr(iface, dst); } /* add SL2A for source address */ if (src != NULL) { DEBUG(" - SL2A\n"); uint8_t l2src[8]; size_t l2src_len; /* optimization note: MAY also be omitted to facilitate in-bound load balancing over * replicated interfaces. * source: https://tools.ietf.org/html/rfc4861#section-6.2.3 */ l2src_len = _get_l2src(iface, l2src, sizeof(l2src)); if (l2src_len > 0) { /* add source address link-layer address option */ hdr = gnrc_ndp_opt_sl2a_build(l2src, l2src_len, NULL); if (hdr == NULL) { DEBUG("ndp internal: error allocating Source Link-layer address option.\n"); mutex_unlock(&ipv6_iface->mutex); gnrc_pktbuf_release(pkt); return; } pkt = hdr; } } if (ipv6_iface->flags & GNRC_IPV6_NETIF_FLAGS_ADV_CUR_HL) { cur_hl = ipv6_iface->cur_hl; } if (ipv6_iface->flags & GNRC_IPV6_NETIF_FLAGS_ADV_REACH_TIME) { uint64_t tmp = timex_uint64(ipv6_iface->reach_time) / MS_IN_USEC; if (tmp > (3600 * SEC_IN_MS)) { /* tmp > 1 hour */ tmp = (3600 * SEC_IN_MS); } reach_time = (uint32_t)tmp; } if (ipv6_iface->flags & GNRC_IPV6_NETIF_FLAGS_ADV_RETRANS_TIMER) { uint64_t tmp = timex_uint64(ipv6_iface->retrans_timer) / MS_IN_USEC; if (tmp > UINT32_MAX) { tmp = UINT32_MAX; } retrans_timer = (uint32_t)tmp; } if (!fin) { adv_ltime = ipv6_iface->adv_ltime; } mutex_unlock(&ipv6_iface->mutex); hdr = gnrc_ndp_rtr_adv_build(cur_hl, (ipv6_iface->flags & (GNRC_IPV6_NETIF_FLAGS_OTHER_CONF | GNRC_IPV6_NETIF_FLAGS_MANAGED)) >> 8, adv_ltime, reach_time, retrans_timer, pkt); if (hdr == NULL) { DEBUG("ndp internal: error allocating router advertisement.\n"); gnrc_pktbuf_release(pkt); return; } pkt = hdr; hdr = _build_headers(iface, pkt, dst, src); if (hdr == NULL) { DEBUG("ndp internal: error adding lower-layer headers.\n"); gnrc_pktbuf_release(pkt); return; } gnrc_netapi_send(gnrc_ipv6_pid, hdr); }