static buffer_t *pana_eap_payload_to_avp(buffer_t *buf) { uint8_t *ptr; uint16_t eap_len, padding; eap_len = buffer_data_length(buf); padding = eap_len; if ((buf = buffer_headroom(buf, 8)) == 0) { return NULL; } buffer_data_reserve_header(buf, 8); //tr_debug("EAP AVP LEN: %02x", eap_len); ptr = buffer_data_pointer(buf); ptr = pana_avp_base_write(AVP_EAP_PAYLOAD_CODE, eap_len, ptr, 0, 0); //Check Padding padding %= 4; if (padding) { padding = 4 - padding; //tr_debug("Add Pad: %02x", padding); if ((buf = buffer_headroom(buf, padding)) != 0) { uint8_t *ptr2; buffer_data_reserve_header(buf, padding); ptr = buffer_data_pointer(buf); ptr2 = ptr; ptr += padding; memmove(ptr2, ptr, eap_len + 8); } else { return NULL; } } return buf; }
buffer_t *pana_relay_avp_build(buffer_t *buf, sec_suite_t *suite) { uint8_t *ptr, *adr_ptr; uint16_t relay_len, padding; relay_len = buffer_data_length(buf); padding = relay_len; buf->socket = socket_dereference(buf->socket); buf->session_ptr = NULL; if ((buf = buffer_headroom(buf, 36)) == 0) { return buf; } else { buffer_data_reserve_header(buf, 36); ptr = buffer_data_pointer(buf); ptr = pana_avp_base_write(AVP_PAC_INFO_CODE, 18, ptr, 0, 0); //SET Relay IPV6 address if (suite->pana_session.user_server) { memcpy(ptr, suite->session_address, 16); ptr += 16; ptr = common_write_16_bit(suite->session_port, ptr); adr_ptr = protocol_6lowpan_nd_border_router_address_get(buf->interface->nwk_id); if (adr_ptr) { memcpy(buf->src_sa.address, adr_ptr, 16); memcpy(buf->dst_sa.address, suite->pana_session.session_relay_address, 16); buf->dst_sa.port = suite->pana_session.relay_port; } } else { memcpy(ptr, buf->src_sa.address, 16); ptr += 16; ptr = common_write_16_bit(buf->src_sa.port, ptr); } //PADDING for PAC ptr = common_write_16_bit(0, ptr); //PANA Relay AVP header Write data is already there ptr = pana_avp_base_write(AVP_RELAY_MSG_CODE, relay_len, ptr, 0, 0); } //Enable security for relay allways by Default buf->options.ll_security_bypass_tx = false; padding %= 4; if (padding) { padding = 4 - padding; //tr_debug("Add Pad: %02x", padding); if ((buf = buffer_headroom(buf, padding)) != 0) { uint8_t *ptr2; buffer_data_reserve_header(buf, padding); ptr = buffer_data_pointer(buf); ptr2 = ptr; ptr += padding; memmove(ptr2, ptr, relay_len + 36); } } return buf; }
buffer_t *build_pana_base(buffer_t *buf, pana_header_t *header, sec_suite_t *suite) { uint8_t *ptr; buf->session_ptr = NULL; buf = buffer_headroom(buf, PANA_HEADER_LENGTH); if (!buf) { return NULL; } buf = buffer_turnaround(buf); // In case we're reusing a buffer ptr = buffer_data_reserve_header(buf, PANA_HEADER_LENGTH); header->payload_len = buffer_data_length(buf); pana_header_write(ptr, header); buf->src_sa.port = UDP_PORT_PANA; buf->dst_sa.port = suite->session_port; buf->info = (buffer_info_t)(B_DIR_DOWN + B_FROM_APP + B_TO_UDP); if (header->type != PANA_MSG_RELAY || suite->pana_session.user_server) { buffer_socket_set(buf, socket_pointer_get(pana_socket)); buf->session_ptr = suite; } else { buf->socket = socket_dereference(buf->socket); buf->session_ptr = NULL; } //tr_debug("From Pana-> Core"); buf->interface = suite->interface; tr_debug("PANA len: %d", header->payload_len); return buf; }
void pana_eap_payload_down(buffer_t *buf, const uint8_t *nonce, sec_suite_t *suite) { buf = pana_eap_payload_to_avp(buf); if (!buf) { return; } if (nonce) { if ((buf = buffer_headroom(buf, 24)) == 0) { return; } buffer_data_reserve_header(buf, 24); uint8_t *ptr = buffer_data_pointer(buf); ptr = pana_avp_write_n_bytes(AVP_NONCE_CODE, 16, nonce, ptr); } pana_down(buf, suite); }
/* * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Option Type | Opt Data Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |O|R|F|0|0|0|0|0| RPLInstanceID | SenderRank | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | (sub-TLVs) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Figure 1: RPL Option */ static buffer_t *rpl_data_exthdr_provider_hbh_2(buffer_t *buf, rpl_instance_t *instance, rpl_neighbour_t *neighbour, ipv6_exthdr_stage_t stage, int16_t *result) { ipv6_route_info_t *route_info = &buf->route->route_info; /* This can be called both for routes which only use HbH headers (eg DIO) * as well as one-hop DAO_SR routes which would normally use source routing * headers, if there was more than one hop. For DAO_SR, neighbour will be * NULL. */ rpl_dodag_t *dodag = rpl_instance_current_dodag(instance); if (!dodag) { *result = -1; return buf; } bool destination_in_instance = false; uint16_t ext_size = 0; if (addr_ipv6_equal(route_info->next_hop_addr, buf->dst_sa.address) || addr_ipv6_equal(buf->dst_sa.address, dodag->id)) { destination_in_instance = true; if (buf->rpl_option) { /* Forwarding an existing option - preserve it */ uint8_t opt_size = buf->rpl_option[0]; ext_size = 2 + opt_size; ext_size = (ext_size + 7) &~ 7; } else { /* Generating our own option - fixed size, no TLVs */ ext_size = 8; } } switch (stage) { case IPV6_EXTHDR_SIZE: *result = ext_size; return buf; case IPV6_EXTHDR_INSERT: { if (!destination_in_instance) { /* We don't add a header - we'll do it on the tunnel */ *result = 0; return buf; } buf = buffer_headroom(buf, ext_size); if (!buf) { return NULL; } uint8_t *ext = buffer_data_reserve_header(buf, ext_size); ext[0] = buf->options.type; buf->options.type = IPV6_NH_HOP_BY_HOP; ext[1] = ext_size / 8 - 1; uint8_t *opt = ext + 2; opt[0] = IPV6_OPTION_RPL; if (buf->rpl_option) { /* Get back the RPL option we stripped off an outer IP header */ memcpy(opt + 1, buf->rpl_option, 1 + buf->rpl_option[0]); ns_dyn_mem_free(buf->rpl_option); buf->rpl_option = NULL; } else { opt[1] = 4; // option length opt[2] = 0; // placeholder opt[3] = instance->id; /* For upwards routes we can deduce that DODAGID must be * the destination, so set the D flag. */ if (rpl_instance_id_is_local(instance->id) && !rpl_data_is_rpl_downward_route(route_info->source)) { opt[3] |= RPL_INSTANCE_DEST; } common_write_16_bit(RPL_RANK_INFINITE, opt + 4); // SenderRank (placeholder) } /* Pad HbH header if necessary. */ uint8_t pad_len = ext + ext_size - (opt + 2 + opt[1]); if (pad_len == 1) { opt[0] = IPV6_OPTION_PAD1; } else if (pad_len > 1) { opt[0] = IPV6_OPTION_PADN; opt[1] = pad_len - 2; memset(opt + 2, 0, pad_len - 2); } // don't forget to set the "RPL option present" marker buf->options.ip_extflags |= IPEXT_HBH_RPL; *result = 0; return buf; } case IPV6_EXTHDR_MODIFY: { uint8_t *opt; uint16_t sender_rank; rpl_data_locate_info(buf, &opt, NULL); if (!opt) { *result = IPV6_EXTHDR_MODIFY_TUNNEL; // Tunnel to next hop in general case, but if going to DODAGID, // it can tunnel all the way (and it HAS to if it is a local // DODAG). if (!addr_ipv6_equal(buf->dst_sa.address, dodag->id)) { memcpy(buf->dst_sa.address, route_info->next_hop_addr, 16); } buf->src_sa.addr_type = ADDR_NONE; // force auto-selection return buf; } if (buf->ip_routed_up) { /* Check for rank errors - RFC 6550 11.2.2.2. */ /* Note that RPL spec does not say that packets from nodes of * equal rank are errors, but we treat them as such to get * reliable sibling loop detection - we require sender rank to be * strictly less for Down packets and strictly greater for Up. */ sender_rank = common_read_16_bit(opt + 4); rpl_cmp_t cmp = rpl_rank_compare_dagrank_rank(dodag, sender_rank, instance->current_rank); rpl_cmp_t expected_cmp = (opt[2] & RPL_OPT_DOWN) ? RPL_CMP_LESS : RPL_CMP_GREATER; if (cmp != expected_cmp) { /* Set the Rank-Error bit; if already set, drop */ if (opt[2] & RPL_OPT_RANK_ERROR) { protocol_stats_update(STATS_RPL_ROUTELOOP, 1); tr_info("Forwarding inconsistency R"); rpl_instance_inconsistency(instance); *result = -1; return buf; } else { opt[2] |= RPL_OPT_RANK_ERROR; } } } if (buf->rpl_flag_error & RPL_OPT_FWD_ERROR) { opt[2] |= RPL_OPT_FWD_ERROR; } else if (rpl_data_is_rpl_downward_route(route_info->source)) { opt[2] |= RPL_OPT_DOWN; } else { opt[2] &= ~RPL_OPT_DOWN; } /* Set the D flag for local instances */ if (rpl_instance_id_is_local(instance->id)) { if (addr_ipv6_equal(dodag->id, buf->dst_sa.address)) { opt[3] |= RPL_INSTANCE_DEST; } else if (addr_ipv6_equal(dodag->id, buf->src_sa.address)) { opt[3] &=~ RPL_INSTANCE_DEST; } else { tr_error("Local instance invalid %s[%d]: %s -> %s", trace_ipv6(dodag->id), instance->id, trace_ipv6(buf->src_sa.address), trace_ipv6(buf->dst_sa.address)); *result = -1; return buf; } } /* RPL 11.2.2.2. says we set SenderRank to infinite when forwarding * across a version discontinuity. (Must be up - we don't know versions * of downward routes). */ if ((buf->rpl_flag_error & RPL_OPT_FWD_ERROR) || rpl_data_is_rpl_downward_route(route_info->source) || !neighbour || neighbour->dodag_version == instance->current_dodag_version) { sender_rank = nrpl_dag_rank(dodag, instance->current_rank); } else { sender_rank = RPL_RANK_INFINITE; } common_write_16_bit(sender_rank, opt + 4); *result = 0; return buf; } default: return buffer_free(buf); } }