void gpio_irq_enable(gpio_t pin) { int isr_map_entry =_isr_map_entry(pin); int _state_index = _gpio_isr_map[isr_map_entry]; if (_state_index == 0xff) { DEBUG("gpio_irq_enable(): trying to enable unconfigured pin.\n"); return; } _gpio_configure(pin, bf_isset(_gpio_rising, _state_index), bf_isset(_gpio_falling, _state_index)); }
void icn_initInterest(uint16_t seq) { if (WANT_CONTENT) { uint32_t tmp1; uint16_t tmp2; tmp1 = _getSmallestMissing(); LOG_DEBUG("Smallest missing is %lu\n", tmp1); tmp2 = seq; if ((tmp1 < NUMBER_OF_CHUNKS) && (tmp1 >= 0)) { LOG_INFO("Scheduling retransmission for %lu\n", tmp1); vtimer_remove(&retry_vt); vtimer_set_msg(&retry_vt, retry_interval, thread_getpid(), ICN_RESEND_INTEREST, &tmp1); } if (bf_isset(received_chunks, seq)) { LOG_INFO("Already received a chunk for %u, not sending again\n", seq); return; } #if FLOW_CONTROL if (seq > (receive_counter + FLOW_THR)) { LOG_INFO("Flow control, seq is %u, receive counter is %u\n", seq, receive_counter); return; } #endif /* create packet */ ng_pktsnip_t *pkt; icn_pkt_t icn_pkt; icn_pkt.type = ICN_INTEREST; icn_pkt.seq = seq; memcpy(icn_pkt.payload, interest, strlen(interest) + 1); pkt = ng_pktbuf_add(NULL, &icn_pkt, sizeof(icn_pkt_t), NG_NETTYPE_UNDEF); // send interest packet if (tmp2 < NUMBER_OF_CHUNKS) { LOG_INFO("Sending Interest for %u to %s\n", seq, ng_netif_addr_to_str(l2addr_str, sizeof(l2addr_str), CONTENT_STORE->uint8, ADDR_LEN_64B)); icn_send(CONTENT_STORE, pkt); } if (tmp2 < NUMBER_OF_CHUNKS) { tmp2++; #if TIMED_SENDING vtimer_remove(&periodic_vt); vtimer_set_msg(&periodic_vt, interval, thread_getpid(), ICN_SEND_INTEREST, &tmp2); #else icn_initInterest(tmp2); #endif } } else { LOG_DEBUG("nothing to do\n"); } }
static inline int32_t _getSmallestMissing(void) { for (unsigned i = 0; i < NUMBER_OF_CHUNKS; i++) { if(!bf_isset(received_chunks, i)) { return i; } } return -1; }
static uint8_t _find_by_prefix_unsafe(ipv6_addr_t **res, gnrc_ipv6_netif_t *iface, const ipv6_addr_t *addr, uint8_t *only) { uint8_t best_match = 0; for (int i = 0; i < GNRC_IPV6_NETIF_ADDR_NUMOF; i++) { uint8_t match; if ((only != NULL) && !(bf_isset(only, i))) { continue; } if (((only != NULL) && gnrc_ipv6_netif_addr_is_non_unicast(&(iface->addrs[i].addr))) || ipv6_addr_is_unspecified(&(iface->addrs[i].addr))) { continue; } match = ipv6_addr_match_prefix(&(iface->addrs[i].addr), addr); if ((only == NULL) && !ipv6_addr_is_multicast(addr) && (match < iface->addrs[i].prefix_len)) { /* match but not of same subnet */ continue; } if (match > best_match) { if (res != NULL) { *res = &(iface->addrs[i].addr); } best_match = match; } } #if ENABLE_DEBUG if (*res != NULL) { DEBUG("ipv6 netif: Found %s on interface %" PRIkernel_pid " matching ", ipv6_addr_to_str(addr_str, *res, sizeof(addr_str)), iface->pid); DEBUG("%s by %" PRIu8 " bits (used as source address = %s)\n", ipv6_addr_to_str(addr_str, addr, sizeof(addr_str)), best_match, (only != NULL) ? "true" : "false"); } else { DEBUG("ipv6 netif: Did not found any address on interface %" PRIkernel_pid " matching %s (used as source address = %s)\n", iface->pid, ipv6_addr_to_str(addr_str, addr, sizeof(addr_str)), (only != NULL) ? "true" : "false"); } #endif return best_match; }
bool gnrc_ipv6_whitelisted(const ipv6_addr_t *addr) { for (int i = 0; i < GNRC_IPV6_WHITELIST_SIZE; i++) { if (bf_isset(gnrc_ipv6_whitelist_set, i) && ipv6_addr_equal(addr, &gnrc_ipv6_whitelist[i])) { return true; } } return false; }
int gnrc_ipv6_whitelist_add(const ipv6_addr_t *addr) { for (int i = 0; i < GNRC_IPV6_WHITELIST_SIZE; i++) { if (!bf_isset(gnrc_ipv6_whitelist_set, i)) { bf_set(gnrc_ipv6_whitelist_set, i); gnrc_ipv6_whitelist[i].u64[0].u64 = addr->u64[0].u64; gnrc_ipv6_whitelist[i].u64[1].u64 = addr->u64[1].u64; DEBUG("IPv6 whitelist: whitelisted %s\n", ipv6_addr_to_str(addr_str, addr, sizeof(addr_str))); return 0; } } return -1; }
static unsigned _match(const gnrc_netif_t *netif, const ipv6_addr_t *addr, const uint8_t *filter, int *idx) { unsigned best_match = 0; assert(idx != NULL); *idx = -1; for (int i = 0; i < GNRC_NETIF_IPV6_ADDRS_NUMOF; i++) { unsigned match; if ((netif->ipv6.addrs_flags[i] == 0) || ((filter != NULL) && _addr_anycast(netif, i)) || /* discard const intentionally */ ((filter != NULL) && !(bf_isset((uint8_t *)filter, i)))) { continue; } match = ipv6_addr_match_prefix(&(netif->ipv6.addrs[i]), addr); if (((match > 64U) || !ipv6_addr_is_link_local(&(netif->ipv6.addrs[i]))) && (match >= best_match)) { if (idx != NULL) { *idx = i; } best_match = match; } } if (*idx >= 0) { DEBUG("gnrc_netif: Found %s on interface %" PRIkernel_pid " matching ", ipv6_addr_to_str(addr_str, &netif->ipv6.addrs[*idx], sizeof(addr_str)), netif->pid); DEBUG("%s by %u bits (used as source address = %s)\n", ipv6_addr_to_str(addr_str, addr, sizeof(addr_str)), best_match, (filter != NULL) ? "true" : "false"); } else { DEBUG("gnrc_netif: Did not found any address on interface %" PRIkernel_pid " matching %s (used as source address = %s)\n", netif->pid, ipv6_addr_to_str(addr_str, addr, sizeof(addr_str)), (filter != NULL) ? "true" : "false"); } return best_match; }
void gnrc_sixlowpan_nd_router_abr_remove(gnrc_sixlowpan_nd_router_abr_t *abr) { for (int i = 0; i < GNRC_SIXLOWPAN_CTX_SIZE; i++) { if (bf_isset(abr->ctxs, i)) { gnrc_sixlowpan_ctx_remove(i); bf_unset(abr->ctxs, i); } } while (abr->prfs != NULL) { gnrc_sixlowpan_nd_router_prf_t *prefix = abr->prfs; LL_DELETE(abr->prfs, prefix); gnrc_ipv6_netif_remove_addr(prefix->iface->pid, &prefix->prefix->addr); prefix->next = NULL; prefix->iface = NULL; prefix->prefix = NULL; } ipv6_addr_set_unspecified(&abr->addr); abr->version = 0; }
/* initializes "out" */ static void __get_complete_components(GQueue *out, struct ice_agent *ag, GTree *t, unsigned int flag) { GQueue compo1 = G_QUEUE_INIT; GList *l; struct ice_candidate_pair *pair1, *pairX; struct ice_candidate *cand; unsigned int i; __get_pairs_by_component(&compo1, t, 1); g_queue_init(out); for (l = compo1.head; l; l = l->next) { pair1 = l->data; g_queue_clear(out); g_queue_push_tail(out, pair1); for (i = 2; i <= ag->active_components; i++) { cand = __foundation_lookup(ag, &pair1->remote_candidate->foundation, i); if (!cand) goto next_foundation; pairX = __pair_lookup(ag, cand, pair1->local_address); if (!pairX) goto next_foundation; if (!bf_isset(&pairX->pair_flags, flag)) goto next_foundation; g_queue_push_tail(out, pairX); } goto found; next_foundation: ; } /* nothing found */ g_queue_clear(out); found: g_queue_clear(&compo1); }
int bf_get_unset(uint8_t field[], int size) { int result = -1; int nbytes = (size + 7) / 8; int i = 0; unsigned state = irq_disable(); /* skip full bytes */ for (int j = 0; (j < nbytes) && (field[j] == 255); j++) { i += 8; } for (; i < size; i++) { if (!bf_isset(field, i)) { bf_set(field, i); result = i; break; } } irq_restore(state); return(result); }
/** @brief Find the best candidate among the configured addresses * for a certain destination address according to the 8 rules * specified in RFC 6734, section 5. * @see <a href="http://tools.ietf.org/html/rfc6724#section-5"> * RFC6724, section 5 * </a> * * @param[in] iface The interface for sending. * @param[in] dst The destination IPv6 address. * @param[in, out] candidate_set The preselected set of candidate addresses as * a bitfield. * * @pre @p dst is not unspecified. * * @return The best matching candidate found on @p iface, may be NULL if none * is found. */ static ipv6_addr_t *_source_address_selection(gnrc_ipv6_netif_t *iface, const ipv6_addr_t *dst, uint8_t *candidate_set) { /* create temporary set for assigning "points" to candidates wining in the * corresponding rules. */ uint8_t winner_set[GNRC_IPV6_NETIF_ADDR_NUMOF]; memset(winner_set, 0, GNRC_IPV6_NETIF_ADDR_NUMOF); uint8_t max_pts = 0; /* _create_candidate_set() assures that `dest` is not unspecified and if * `dst` is loopback rule 1 will fire anyway. */ uint8_t dst_scope = _get_scope(dst, true); DEBUG("finding the best match within the source address candidates\n"); for (int i = 0; i < GNRC_IPV6_NETIF_ADDR_NUMOF; i++) { gnrc_ipv6_netif_addr_t *iter = &(iface->addrs[i]); DEBUG("Checking address: %s\n", ipv6_addr_to_str(addr_str, &(iter->addr), sizeof(addr_str))); /* entries which are not part of the candidate set can be ignored */ if (!(bf_isset(candidate_set, i))) { DEBUG("Not part of the candidate set - skipping\n"); continue; } /* Rule 1: if we have an address configured that equals the destination * use this one as source */ if (ipv6_addr_equal(&(iter->addr), dst)) { DEBUG("Ease one - rule 1\n"); return &(iter->addr); } /* Rule 2: Prefer appropriate scope. */ /* both link local */ uint8_t candidate_scope = _get_scope(&(iter->addr), false); if (candidate_scope == dst_scope) { DEBUG("winner for rule 2 (same scope) found\n"); winner_set[i] += RULE_2A_PTS; if (winner_set[i] > max_pts) { max_pts = RULE_2A_PTS; } } else if (candidate_scope < dst_scope) { DEBUG("winner for rule 2 (smaller scope) found\n"); winner_set[i] += RULE_2B_PTS; if (winner_set[i] > max_pts) { max_pts = winner_set[i]; } } /* Rule 3: Avoid deprecated addresses. */ if (iter->preferred > 0) { DEBUG("winner for rule 3 found\n"); winner_set[i] += RULE_3_PTS; if (winner_set[i] > max_pts) { max_pts = winner_set[i]; } } /* Rule 4: Prefer home addresses. * Does not apply, gnrc does not support Mobile IP. * TODO: update as soon as gnrc supports Mobile IP */ /* Rule 5: Prefer outgoing interface. * RFC 6724 says: * "It is RECOMMENDED that the candidate source addresses be the set of * unicast addresses assigned to the interface that will be used to * send to the destination (the "outgoing" interface). On routers, * the candidate set MAY include unicast addresses assigned to any * interface that forwards packets, subject to the restrictions * described below." * Currently this implementation uses ALWAYS source addresses assigned * to the outgoing interface. Hence, Rule 5 is always fulfilled. */ /* Rule 6: Prefer matching label. * Flow labels are currently not supported by gnrc. * TODO: update as soon as gnrc supports flow labels */ /* Rule 7: Prefer temporary addresses. * Temporary addresses are currently not supported by gnrc. * TODO: update as soon as gnrc supports temporary addresses */ } /* reset candidate set to mark winners */ memset(candidate_set, 0, (GNRC_IPV6_NETIF_ADDR_NUMOF / 8) + 1); /* check if we have a clear winner */ /* collect candidates with maximum points */ for (int i = 0; i < GNRC_IPV6_NETIF_ADDR_NUMOF; i++) { if (winner_set[i] == max_pts) { bf_set(candidate_set, i); } } /* otherwise apply rule 8: Use longest matching prefix. */ ipv6_addr_t *res = NULL; _find_by_prefix_unsafe(&res, iface, dst, candidate_set); return res; }
void gnrc_ndp_internal_send_rtr_adv(kernel_pid_t iface, ipv6_addr_t *src, ipv6_addr_t *dst, bool fin) { gnrc_pktsnip_t *hdr, *pkt = NULL; ipv6_addr_t all_nodes = IPV6_ADDR_ALL_NODES_LINK_LOCAL; gnrc_ipv6_netif_t *ipv6_iface = gnrc_ipv6_netif_get(iface); uint32_t reach_time = 0, retrans_timer = 0; uint16_t adv_ltime = 0; uint8_t cur_hl = 0; if (dst == NULL) { dst = &all_nodes; } DEBUG("ndp internal: send router advertisement (iface: %" PRIkernel_pid ", dst: %s%s\n", iface, ipv6_addr_to_str(addr_str, dst, sizeof(addr_str)), fin ? ", final" : ""); mutex_lock(&ipv6_iface->mutex); #ifdef MODULE_GNRC_SIXLOWPAN_ND_ROUTER if (!(ipv6_iface->flags & GNRC_IPV6_NETIF_FLAGS_SIXLOWPAN)) { #endif hdr = _add_pios(ipv6_iface, pkt); if (hdr == NULL) { /* pkt already released in _add_pios */ mutex_unlock(&ipv6_iface->mutex); return; } pkt = hdr; #ifdef MODULE_GNRC_SIXLOWPAN_ND_ROUTER } else { gnrc_sixlowpan_nd_router_abr_t *abr = gnrc_sixlowpan_nd_router_abr_get(); if (abr != NULL) { gnrc_sixlowpan_nd_router_prf_t *prf = abr->prfs; /* add prefixes from border router */ while (prf) { bool processed_before = false; /* skip if prefix does not belong to iface */ if (prf->iface != ipv6_iface) { prf = prf->next; continue; } /* skip if prefix has been processed already */ for (gnrc_sixlowpan_nd_router_prf_t *tmp = abr->prfs; tmp != prf; tmp = tmp->next) { if ((processed_before = _check_prefixes(prf->prefix, tmp->prefix))) { break; } } if (processed_before) { prf = prf->next; continue; } if (_pio_from_iface_addr(&hdr, prf->prefix, pkt)) { if (hdr != NULL) { pkt = hdr; } else { DEBUG("ndp rtr: error allocating PIO\n"); gnrc_pktbuf_release(pkt); return; } } prf = prf->next; } for (unsigned int i = 0; i < GNRC_SIXLOWPAN_CTX_SIZE; i++) { gnrc_sixlowpan_ctx_t *ctx; if (!bf_isset(abr->ctxs, i)) { continue; } ctx = gnrc_sixlowpan_ctx_lookup_id(i); hdr = gnrc_sixlowpan_nd_opt_6ctx_build(ctx->prefix_len, ctx->flags_id, ctx->ltime, &ctx->prefix, pkt); if (hdr == NULL) { DEBUG("ndp rtr: error allocating 6CO\n"); gnrc_pktbuf_release(pkt); return; } pkt = hdr; } hdr = gnrc_sixlowpan_nd_opt_abr_build(abr->version, abr->ltime, &abr->addr, pkt); if (hdr == NULL) { DEBUG("ndp internal: error allocating ABRO.\n"); gnrc_pktbuf_release(pkt); return; } pkt = hdr; } } #endif /* MODULE_GNRC_SIXLOWPAN_ND_ROUTER */ if (ipv6_iface->flags & GNRC_IPV6_NETIF_FLAGS_ADV_MTU) { if ((hdr = gnrc_ndp_opt_mtu_build(ipv6_iface->mtu, pkt)) == NULL) { DEBUG("ndp rtr: no space left in packet buffer\n"); mutex_unlock(&ipv6_iface->mutex); gnrc_pktbuf_release(pkt); return; } pkt = hdr; } if (src == NULL) { mutex_unlock(&ipv6_iface->mutex); /* get address from source selection algorithm */ src = gnrc_ipv6_netif_find_best_src_addr(iface, dst); mutex_lock(&ipv6_iface->mutex); } /* add SL2A for source address */ if (src != NULL) { DEBUG(" - SL2A\n"); uint8_t l2src[8]; size_t l2src_len; /* optimization note: MAY also be omitted to facilitate in-bound load balancing over * replicated interfaces. * source: https://tools.ietf.org/html/rfc4861#section-6.2.3 */ l2src_len = _get_l2src(iface, l2src, sizeof(l2src)); if (l2src_len > 0) { /* add source address link-layer address option */ hdr = gnrc_ndp_opt_sl2a_build(l2src, l2src_len, pkt); if (hdr == NULL) { DEBUG("ndp internal: error allocating Source Link-layer address option.\n"); mutex_unlock(&ipv6_iface->mutex); gnrc_pktbuf_release(pkt); return; } pkt = hdr; } } if (ipv6_iface->flags & GNRC_IPV6_NETIF_FLAGS_ADV_CUR_HL) { cur_hl = ipv6_iface->cur_hl; } if (ipv6_iface->flags & GNRC_IPV6_NETIF_FLAGS_ADV_REACH_TIME) { if (ipv6_iface->reach_time > (3600 * SEC_IN_USEC)) { /* reach_time > 1 hour */ reach_time = (3600 * SEC_IN_MS); } else { reach_time = ipv6_iface->reach_time / MS_IN_USEC; } } if (ipv6_iface->flags & GNRC_IPV6_NETIF_FLAGS_ADV_RETRANS_TIMER) { retrans_timer = ipv6_iface->retrans_timer / MS_IN_USEC; } if (!fin) { adv_ltime = ipv6_iface->adv_ltime; } mutex_unlock(&ipv6_iface->mutex); hdr = gnrc_ndp_rtr_adv_build(cur_hl, (ipv6_iface->flags & (GNRC_IPV6_NETIF_FLAGS_OTHER_CONF | GNRC_IPV6_NETIF_FLAGS_MANAGED)) >> 8, adv_ltime, reach_time, retrans_timer, pkt); if (hdr == NULL) { DEBUG("ndp internal: error allocating router advertisement.\n"); gnrc_pktbuf_release(pkt); return; } pkt = hdr; hdr = _build_headers(iface, pkt, dst, src); if (hdr == NULL) { DEBUG("ndp internal: error adding lower-layer headers.\n"); gnrc_pktbuf_release(pkt); return; } else if (gnrc_netapi_send(gnrc_ipv6_pid, hdr) < 1) { DEBUG("ndp internal: unable to send router advertisement\n"); gnrc_pktbuf_release(hdr); } }
static void rcv(ng_pktsnip_t *pkt) { ng_pktsnip_t *netif_pkt = pkt; ng_netif_hdr_t *netif_hdr; netif_pkt = pkt->next; if (netif_pkt->type == NG_NETTYPE_NETIF) { netif_hdr = netif_pkt->data; LOG_DEBUG("Received packet from %s\n", ng_netif_addr_to_str(l2addr_str, sizeof(l2addr_str), ng_netif_hdr_get_src_addr(netif_hdr), netif_hdr->src_l2addr_len)); icn_pkt_t *icn_pkt = (icn_pkt_t*) pkt->data; //od_hex_dump(pkt->data, pkt->size, OD_WIDTH_DEFAULT); switch (icn_pkt->type) { case ICN_INTEREST: if (HAS_CONTENT) { LOG_INFO("received interest, have content, sequence number is %u\n", icn_pkt->seq); icn_initContent((eui64_t*)ng_netif_hdr_get_src_addr(netif_hdr), icn_pkt->seq); ng_pktbuf_release(pkt); } else { memcpy(&pit_entry, ng_netif_hdr_get_src_addr(netif_hdr), ADDR_LEN_64B); pit_ctr++; /* forward to CS node */ pkt->next = NULL; ng_pktbuf_release(netif_pkt); icn_send(CONTENT_STORE, pkt); } break; case ICN_CONTENT: if (pit_ctr) { LOG_DEBUG("Forwarding chunk to PIT entry: %s\n", ng_netif_addr_to_str(l2addr_str, sizeof(l2addr_str), pit_entry.uint8, ADDR_LEN_64B)); pkt->next = NULL; ng_pktbuf_release(netif_pkt); icn_send(&pit_entry, pkt); if (--pit_ctr <= 0) { pit_ctr = 0; } } else if (WANT_CONTENT) { uint16_t tmp; tmp = icn_pkt->seq; if (bf_isset(received_chunks, tmp)) { LOG_ERROR("Duplicate chunk number %u\n", tmp); } else { LOG_INFO("received content that I was waiting for with sequence number %u\n", tmp); LOG_INFO("Current send counter is %u\n", send_counter); bf_set(received_chunks, tmp); } receive_counter = (receive_counter < tmp) ? tmp : receive_counter; ng_pktbuf_release(pkt); } else { LOG_ERROR("Received content, but no PIT entry is available\n"); ng_pktbuf_release(pkt); } break; case ICN_BACKGROUND: ng_pktbuf_release(pkt); break; default: LOG_ERROR("unexpected packet received\n"); ng_pktbuf_release(pkt); } } else { LOG_ERROR("unknown snippet\n"); } }