static int start_https(struct http_client_ctx *ctx) { struct k_sem startup_sync; /* Start the thread that handles HTTPS traffic. */ if (ctx->https.tid) { return -EALREADY; } NET_DBG("Starting HTTPS thread for %p", ctx); k_sem_init(&startup_sync, 0, 1); ctx->https.tid = k_thread_create(&ctx->https.thread, ctx->https.stack, ctx->https.stack_size, (k_thread_entry_t)https_handler, ctx, &startup_sync, 0, K_PRIO_COOP(7), 0, 0); /* Wait until we know that the HTTPS thread startup was ok */ if (k_sem_take(&startup_sync, HTTPS_STARTUP_TIMEOUT) < 0) { https_shutdown(ctx); return -ECANCELED; } NET_DBG("HTTPS thread %p started for %p", ctx->https.tid, ctx); return 0; }
static int on_body(struct http_parser *parser, const char *at, size_t length) { struct http_client_ctx *ctx = CONTAINER_OF(parser, struct http_client_ctx, parser); ctx->rsp.body_found = 1; ctx->rsp.processed += length; NET_DBG("Processed %zd length %zd", ctx->rsp.processed, length); if (!ctx->rsp.body_start) { ctx->rsp.body_start = (u8_t *)at; } if (ctx->rsp.cb) { NET_DBG("Calling callback for partitioned %zd len data", ctx->rsp.data_len); ctx->rsp.cb(ctx, ctx->rsp.response_buf, ctx->rsp.response_buf_len, ctx->rsp.data_len, HTTP_DATA_MORE, ctx->req.user_data); /* Re-use the result buffer and start to fill it again */ ctx->rsp.data_len = 0; } return 0; }
static enum net_verdict net_bt_recv(struct net_if *iface, struct net_buf *buf) { uint32_t src; uint32_t dst; NET_DBG("iface %p buf %p len %u", iface, buf, net_buf_frags_len(buf)); /* Uncompress will drop the current fragment. Buf ll src/dst address * will then be wrong and must be updated according to the new fragment. */ src = net_nbuf_ll_src(buf)->addr ? net_nbuf_ll_src(buf)->addr - net_nbuf_ll(buf) : 0; dst = net_nbuf_ll_dst(buf)->addr ? net_nbuf_ll_dst(buf)->addr - net_nbuf_ll(buf) : 0; if (!net_6lo_uncompress(buf)) { NET_DBG("Packet decompression failed"); return NET_DROP; } net_nbuf_ll_src(buf)->addr = src ? net_nbuf_ll(buf) + src : NULL; net_nbuf_ll_dst(buf)->addr = dst ? net_nbuf_ll(buf) + dst : NULL; return NET_CONTINUE; }
static void ipsp_recv(struct bt_l2cap_chan *chan, struct net_buf *buf) { struct bt_context *ctxt = CHAN_CTXT(chan); struct net_buf *nbuf; NET_DBG("Incoming data channel %p len %u", chan, net_buf_frags_len(buf)); /* Get buffer for bearer / protocol related data */ nbuf = net_nbuf_get_reserve_rx(0); /* Set destination address */ net_nbuf_ll_dst(nbuf)->addr = ctxt->src.val; net_nbuf_ll_dst(nbuf)->len = sizeof(ctxt->src); /* Set source address */ net_nbuf_ll_src(nbuf)->addr = ctxt->dst.val; net_nbuf_ll_src(nbuf)->len = sizeof(ctxt->dst); /* Add data buffer as fragment of RX buffer, take a reference while * doing so since L2CAP will unref the buffer after return. */ net_buf_frag_add(nbuf, net_buf_ref(buf)); if (net_recv_data(ctxt->iface, nbuf) < 0) { NET_DBG("Packet dropped by NET stack"); net_nbuf_unref(nbuf); } }
static void net_tx_fiber(void) { NET_DBG("Starting TX fiber\n"); while (1) { struct net_buf *buf; uint8_t run; /* Get next packet from application - wait if necessary */ buf = nano_fifo_get_wait(&netdev.tx_queue); NET_DBG("Sending (buf %p, len %u) to IP stack\n", buf, buf->len); if (check_and_send_packet(buf) < 0) { /* Release buffer on error */ net_buf_put(buf); continue; } NET_BUF_CHECK_IF_NOT_IN_USE(buf); /* Check for any events that we might need to process */ do { run = process_run(buf); } while (run > 0); /* Check stack usage (no-op if not enabled) */ analyze_stacks(buf, &buf); } }
struct net_buf *l2_buf_get_reserve(uint16_t reserve_head) #endif { struct net_buf *buf; buf = net_buf_get(&free_l2_bufs, reserve_head); if (!buf) { #ifdef DEBUG_L2_BUFS NET_ERR("Failed to get free L2 buffer (%s():%d)\n", caller, line); #else NET_ERR("Failed to get free L2 buffer\n"); #endif return NULL; } dec_free_l2_bufs(buf); NET_BUF_CHECK_IF_NOT_IN_USE(buf); #ifdef DEBUG_L2_BUFS NET_DBG("[%d] buf %p reserve %u ref %d (%s():%d)\n", get_free_l2_bufs(), buf, reserve_head, buf->ref, caller, line); #else NET_DBG("buf %p reserve %u ref %d\n", buf, reserve_head, buf->ref); #endif packetbuf_clear(buf); return buf; }
static void tcp_received(struct net_app_ctx *ctx, struct net_pkt *pkt, int status, void *user_data) { struct data *data = ctx->user_data; ARG_UNUSED(user_data); ARG_UNUSED(status); if (!pkt || net_pkt_appdatalen(pkt) == 0) { if (pkt) { net_pkt_unref(pkt); } return; } NET_DBG("Sent %d bytes, received %u bytes", data->expecting_tcp, net_pkt_appdatalen(pkt)); if (!compare_tcp_data(pkt, data->expecting_tcp, data->received_tcp)) { NET_DBG("Data mismatch"); } else { data->received_tcp += net_pkt_appdatalen(pkt); } if (data->expecting_tcp <= data->received_tcp) { /* Send more data */ send_tcp_data(ctx, data); } net_pkt_unref(pkt); }
static void ipsp_recv(struct bt_l2cap_chan *chan, struct net_buf *buf) { struct bt_context *ctxt = CHAN_CTXT(chan); struct net_pkt *pkt; NET_DBG("Incoming data channel %p len %zu", chan, net_buf_frags_len(buf)); /* Get packet for bearer / protocol related data */ pkt = net_pkt_get_reserve_rx(0, K_FOREVER); /* Set destination address */ net_pkt_ll_dst(pkt)->addr = ctxt->src.val; net_pkt_ll_dst(pkt)->len = sizeof(ctxt->src); net_pkt_ll_dst(pkt)->type = NET_LINK_BLUETOOTH; /* Set source address */ net_pkt_ll_src(pkt)->addr = ctxt->dst.val; net_pkt_ll_src(pkt)->len = sizeof(ctxt->dst); net_pkt_ll_src(pkt)->type = NET_LINK_BLUETOOTH; /* Add data buffer as fragment of RX buffer, take a reference while * doing so since L2CAP will unref the buffer after return. */ net_pkt_frag_add(pkt, net_buf_ref(buf)); if (net_recv_data(ctxt->iface, pkt) < 0) { NET_DBG("Packet dropped by NET stack"); net_pkt_unref(pkt); } }
static int net_rpl_mrhof_update_mc(struct net_rpl_instance *instance) { #if defined(CONFIG_NET_RPL_MC_NONE) instance->mc.type = NET_RPL_MC_NONE; return 0; #else u16_t path_metric; struct net_rpl_dag *dag; #if defined(CONFIG_NET_RPL_MC_ENERGY) u8_t type; instance->mc.type = NET_RPL_MC_ENERGY; #else instance->mc.type = NET_RPL_MC_ETX; #endif instance->mc.flags = NET_RPL_MC_FLAG_P; instance->mc.aggregated = NET_RPL_MC_A_ADDITIVE; instance->mc.precedence = 0; dag = instance->current_dag; if (!net_rpl_dag_is_joined(dag)) { NET_DBG("Cannot update the metric container when not joined."); return -EINVAL; } if (dag->rank == NET_RPL_ROOT_RANK(instance)) { path_metric = 0; } else { path_metric = calculate_path_metric(dag->preferred_parent); } #if defined(CONFIG_NET_RPL_MC_ETX) instance->mc.length = sizeof(instance->mc.obj.etx); instance->mc.obj.etx = path_metric; NET_DBG("My path ETX to the root is %u.%u\n", instance->mc.obj.etx / RPL_DAG_MC_ETX_DIVISOR, (instance->mc.obj.etx % RPL_DAG_MC_ETX_DIVISOR * 100) / NET_RPL_DAG_MC_ETX_DIVISOR); #elif defined(CONFIG_NET_RPL_MC_ENERGY) instance->mc.length = sizeof(instance->mc.obj.energy); if (dag->rank == NET_RPL_ROOT_RANK(instance)) { type = NET_RPL_MC_NODE_TYPE_MAINS; } else { type = NET_RPL_MC_NODE_TYPE_BATTERY; } instance->mc.obj.energy.flags = type << NET_RPL_MC_ENERGY_TYPE; instance->mc.obj.energy.estimation = path_metric; #endif return 0; #endif /* CONFIG_NET_RPL_MC_NONE */ }
static inline enum net_verdict process_data(struct net_pkt *pkt, bool is_loopback) { int ret; bool locally_routed = false; #if defined(CONFIG_NET_IPV6_FRAGMENT) /* If the packet is routed back to us when we have reassembled * an IPv6 packet, then do not pass it to L2 as the packet does * not have link layer headers in it. */ if (net_pkt_ipv6_fragment_start(pkt)) { locally_routed = true; } #endif /* If there is no data, then drop the packet. */ if (!pkt->frags) { NET_DBG("Corrupted packet (frags %p)", pkt->frags); net_stats_update_processing_error(); return NET_DROP; } if (!is_loopback && !locally_routed) { ret = net_if_recv_data(net_pkt_iface(pkt), pkt); if (ret != NET_CONTINUE) { if (ret == NET_DROP) { NET_DBG("Packet %p discarded by L2", pkt); net_stats_update_processing_error(); } return ret; } } /* IP version and header length. */ switch (NET_IPV6_HDR(pkt)->vtc & 0xf0) { #if defined(CONFIG_NET_IPV6) case 0x60: net_stats_update_ipv6_recv(); net_pkt_set_family(pkt, PF_INET6); return net_ipv6_process_pkt(pkt); #endif #if defined(CONFIG_NET_IPV4) case 0x40: net_stats_update_ipv4_recv(); net_pkt_set_family(pkt, PF_INET); return net_ipv4_process_pkt(pkt); #endif } NET_DBG("Unknown IP family packet (0x%x)", NET_IPV6_HDR(pkt)->vtc & 0xf0); net_stats_update_ip_errors_protoerr(); net_stats_update_ip_errors_vhlerr(); return NET_DROP; }
static struct net_buf *ip_buf_get_reserve(enum ip_buf_type type, uint16_t reserve_head) #endif { struct net_buf *buf = NULL; /* Note that we do not reserve any space in front of the * buffer so buf->data points to first byte of the IP header. * This is done like this so that IP stack works the same * way as BT and 802.15.4 stacks. * * The reserve_head variable in the function will tell * the size of the IP + other headers if there are any. * That variable is only used to calculate the pointer * where the application data starts. */ switch (type) { case IP_BUF_RX: buf = net_buf_get(&free_rx_bufs, 0); dec_free_rx_bufs(buf); break; case IP_BUF_TX: buf = net_buf_get(&free_tx_bufs, 0); dec_free_tx_bufs(buf); break; } if (!buf) { #ifdef DEBUG_IP_BUFS NET_ERR("Failed to get free %s buffer (%s():%d)\n", type2str(type), caller, line); #else NET_ERR("Failed to get free %s buffer\n", type2str(type)); #endif return NULL; } ip_buf_type(buf) = type; ip_buf_appdata(buf) = buf->data + reserve_head; ip_buf_appdatalen(buf) = 0; ip_buf_reserve(buf) = reserve_head; net_buf_add(buf, reserve_head); NET_BUF_CHECK_IF_NOT_IN_USE(buf); #ifdef DEBUG_IP_BUFS NET_DBG("%s [%d] buf %p reserve %u ref %d (%s():%d)\n", type2str(type), get_frees(type), buf, reserve_head, buf->ref, caller, line); #else NET_DBG("%s buf %p reserve %u ref %d\n", type2str(type), buf, reserve_head, buf->ref); #endif return buf; }
static enum net_verdict net_bt_recv(struct net_if *iface, struct net_pkt *pkt) { NET_DBG("iface %p pkt %p len %zu", iface, pkt, net_pkt_get_len(pkt)); if (!net_6lo_uncompress(pkt)) { NET_DBG("Packet decompression failed"); return NET_DROP; } return NET_CONTINUE; }
struct net_nbr *net_nbr_ref(struct net_nbr *nbr) #endif { #if defined(CONFIG_NET_DEBUG_IPV6_NBR_CACHE) NET_DBG("nbr %p ref %u (%s():%d)", nbr, nbr->ref + 1, caller, line); #else NET_DBG("nbr %p ref %u", nbr, nbr->ref + 1); #endif nbr->ref++; return nbr; }
static void processing_data(struct net_pkt *pkt, bool is_loopback) { switch (process_data(pkt, is_loopback)) { case NET_OK: NET_DBG("Consumed pkt %p", pkt); break; case NET_DROP: default: NET_DBG("Dropping pkt %p", pkt); net_pkt_unref(pkt); break; } }
static void dhcpv4_timeout(struct k_work *work) { struct net_if *iface = CONTAINER_OF(work, struct net_if, dhcpv4_timeout); if (!iface) { NET_DBG("Invalid iface"); return; } switch (iface->dhcpv4.state) { case NET_DHCPV4_DISCOVER: /* Failed to get OFFER message, send DISCOVER again */ send_discover(iface); break; case NET_DHCPV4_REQUEST: /* * Maximum number of renewal attempts failed, so start * from the beginning. */ if (iface->dhcpv4.attempts >= DHCPV4_MAX_NUMBER_OF_ATTEMPTS) { send_discover(iface); } else { /* Repeat requests until max number of attempts */ send_request(iface, false); } break; case NET_DHCPV4_RENEWAL: if (iface->dhcpv4.attempts >= DHCPV4_MAX_NUMBER_OF_ATTEMPTS) { if (!net_if_ipv4_addr_rm(iface, &iface->dhcpv4.requested_ip)) { NET_DBG("Failed to remove addr from iface"); } /* * Maximum number of renewal attempts failed, so start * from the beginning. */ send_discover(iface); } else { /* Repeat renewal request for max number of attempts */ send_request(iface, true); } break; default: break; } }
/* Internal function to send network data to uIP stack */ static int check_and_send_packet(struct net_buf *buf) { struct net_tuple *tuple; struct simple_udp_connection *udp; int ret = 0; if (!netdev.drv) { return -EINVAL; } tuple = net_context_get_tuple(buf->context); if (!tuple) { return -EINVAL; } switch (tuple->ip_proto) { case IPPROTO_UDP: udp = net_context_get_udp_connection(buf->context); if (!net_context_get_receiver_registered(buf->context)) { ret = simple_udp_register(udp, tuple->local_port, #ifdef CONFIG_NETWORKING_WITH_IPV6 (uip_ip6addr_t *)&tuple->remote_addr->in6_addr, #else (uip_ip4addr_t *)&tuple->remote_addr->in_addr, #endif tuple->remote_port, udp_packet_reply, buf); if (!ret) { NET_DBG("UDP connection creation failed\n"); ret = -ENOENT; break; } net_context_set_receiver_registered(buf->context); } simple_udp_send(buf, udp, buf->data, buf->len); ret = 0; break; case IPPROTO_TCP: NET_DBG("TCP not yet supported\n"); ret = -EINVAL; break; case IPPROTO_ICMPV6: NET_DBG("ICMPv6 not yet supported\n"); ret = -EINVAL; break; } return ret; }
static struct net_rpl_parent * net_rpl_mrhof_best_parent(struct net_if *iface, struct net_rpl_parent *parent1, struct net_rpl_parent *parent2) { struct net_rpl_dag *dag; u16_t min_diff; u16_t p1_metric; u16_t p2_metric; dag = parent1->dag; /* Both parents are in the same DAG. */ min_diff = NET_RPL_MC_ETX_DIVISOR / MRHOF_PARENT_SWITCH_THRESHOLD_DIV; p1_metric = calculate_path_metric(parent1); p2_metric = calculate_path_metric(parent2); /* Maintain stability of the preferred parent in case of similar * ranks. */ if (parent1 == dag->preferred_parent || parent2 == dag->preferred_parent) { if (p1_metric < p2_metric + min_diff && p1_metric > p2_metric - min_diff) { NET_DBG("MRHOF hysteresis %u <= %u <= %u", p2_metric - min_diff, p1_metric, p2_metric + min_diff); return dag->preferred_parent; } } return p1_metric < p2_metric ? parent1 : parent2; }
void net_dhcpv4_start(struct net_if *iface) { int ret; iface->dhcpv4.state = NET_DHCPV4_INIT; iface->dhcpv4.attempts = 0; iface->dhcpv4.lease_time = 0; iface->dhcpv4.renewal_time = 0; /* A DHCP client MUST choose xid's in such a way as to * minimize the change of using and xid identical to one used * by another client. Choose a random xid st startup and * increment it on each new request. */ iface->dhcpv4.xid = sys_rand32_get(); /* * Register UDP input callback on * DHCPV4_SERVER_PORT(67) and DHCPV4_CLIENT_PORT(68) for * all dhcpv4 related incoming packets. */ ret = net_udp_register(NULL, NULL, DHCPV4_SERVER_PORT, DHCPV4_CLIENT_PORT, net_dhcpv4_input, NULL, NULL); if (ret < 0) { NET_DBG("UDP callback registration failed"); return; } send_discover(iface); }
void ieee802154_init(struct net_if *iface) { struct ieee802154_context *ctx = net_if_l2_data(iface); const struct ieee802154_radio_api *radio = iface->dev->driver_api; const u8_t *mac = iface->link_addr.addr; u8_t long_addr[8]; NET_DBG("Initializing IEEE 802.15.4 stack on iface %p", iface); ieee802154_mgmt_init(iface); #ifdef CONFIG_NET_L2_IEEE802154_SECURITY if (ieee802154_security_init(&ctx->sec_ctx)) { NET_ERR("Initializing link-layer security failed"); } #endif sys_memcpy_swap(long_addr, mac, 8); radio->set_ieee_addr(iface->dev, long_addr); memcpy(ctx->ext_addr, long_addr, 8); if (!radio->set_txpower(iface->dev, CONFIG_NET_L2_IEEE802154_RADIO_DFLT_TX_POWER)) { ctx->tx_power = CONFIG_NET_L2_IEEE802154_RADIO_DFLT_TX_POWER; } radio->start(iface->dev); }
static int net_init(struct device *unused) { int status = 0; NET_DBG("Priority %d", CONFIG_NET_INIT_PRIO); net_shell_init(); net_pkt_init(); net_context_init(); l2_init(); l3_init(); net_mgmt_event_init(); init_rx_queue(); #if CONFIG_NET_DHCPV4 status = dhcpv4_init(); if (status) { return status; } #endif return status; }
static bool net_pkt_is_compact(struct net_pkt *pkt) { struct net_buf *frag, *last; size_t total = 0, calc; int count = 0; last = NULL; frag = pkt->frags; while (frag) { total += frag->len; count++; last = frag; frag = frag->frags; } NET_ASSERT(last); if (!last) { return false; } calc = count * (last->size) - net_buf_tailroom(last) - count * (net_buf_headroom(last)); if (total == calc) { return true; } NET_DBG("Not compacted total %zu real %zu", total, calc); return false; }
static inline struct ieee802154_address_field * validate_addr(uint8_t *buf, uint8_t **p_buf, enum ieee802154_addressing_mode mode, bool pan_id_compression) { *p_buf = buf; NET_DBG("Buf %p - mode %d - pan id comp %d", buf, mode, pan_id_compression); if (mode == IEEE802154_ADDR_MODE_NONE) { return NULL; } if (!pan_id_compression) { *p_buf += IEEE802154_PAN_ID_LENGTH; } if (mode == IEEE802154_ADDR_MODE_SHORT) { *p_buf += IEEE802154_SHORT_ADDR_LENGTH; } else { /* IEEE802154_ADDR_MODE_EXTENDED */ *p_buf += IEEE802154_EXT_ADDR_LENGTH; } return (struct ieee802154_address_field *)buf; }
void net_nbr_unref(struct net_nbr *nbr) #endif { #if defined(CONFIG_NET_DEBUG_IPV6_NBR_CACHE) NET_DBG("nbr %p ref %u (%s():%d)", nbr, nbr->ref - 1, caller, line); #else NET_DBG("nbr %p ref %u", nbr, nbr->ref - 1); #endif if (--nbr->ref) { return; } if (nbr->remove) { nbr->remove(nbr); } }
uint16_t ieee802154_compute_header_size(struct net_if *iface, struct in6_addr *dst) { uint16_t hdr_len = sizeof(struct ieee802154_fcf_seq); /** if dst is NULL, we'll consider it as a brodcast header */ if (!dst || net_is_ipv6_addr_mcast(dst) || net_is_ipv6_addr_unspecified(dst)) { /* 4 dst pan/addr + 8 src addr */ hdr_len += IEEE802154_PAN_ID_LENGTH + IEEE802154_SHORT_ADDR_LENGTH + IEEE802154_EXT_ADDR_LENGTH; } else { struct net_nbr *nbr; nbr = net_ipv6_nbr_lookup(iface, dst); if (nbr) { /* ToDo: handle short addresses */ /* dst pan/addr + src addr */ hdr_len += IEEE802154_PAN_ID_LENGTH + (IEEE802154_EXT_ADDR_LENGTH * 2); } else { /* src pan/addr only */ hdr_len += IEEE802154_PAN_ID_LENGTH + IEEE802154_EXT_ADDR_LENGTH; } } /* Todo: handle security aux header */ NET_DBG("Computed size of %u", hdr_len); return hdr_len; }
bool ieee802154_validate_frame(uint8_t *buf, uint8_t length, struct ieee802154_mpdu *mpdu) { uint8_t *p_buf; if (length > IEEE802154_MTU || length < IEEE802154_MIN_LENGTH) { NET_DBG("Wrong packet length: %d", length); return false; } mpdu->mhr.fs = validate_fc_seq(buf, &p_buf); if (!mpdu->mhr.fs) { return false; } /* ToDo: Support later version's frame types */ if (mpdu->mhr.fs->fc.frame_type > IEEE802154_FRAME_TYPE_MAC_COMMAND) { return false; } mpdu->mhr.dst_addr = validate_addr(p_buf, &p_buf, mpdu->mhr.fs->fc.dst_addr_mode, false); mpdu->mhr.src_addr = validate_addr(p_buf, &p_buf, mpdu->mhr.fs->fc.src_addr_mode, (mpdu->mhr.fs->fc.pan_id_comp)); return validate_payload_and_mfr(mpdu, buf, p_buf, length); }
int net_set_mac(uint8_t *mac, uint8_t len) { if ((len > UIP_LLADDR_LEN) || (len != 6 && len != 8)) { NET_ERR("Wrong ll addr len, len %d, max %d\n", len, UIP_LLADDR_LEN); return -EINVAL; } linkaddr_set_node_addr((linkaddr_t *)mac); #ifdef CONFIG_NETWORKING_WITH_IPV6 { uip_ds6_addr_t *lladdr; uip_ds6_set_lladdr((uip_lladdr_t *)mac); lladdr = uip_ds6_get_link_local(-1); NET_DBG("Tentative link-local IPv6 address "); PRINT6ADDR(&lladdr->ipaddr); PRINTF("\n"); lladdr->state = ADDR_AUTOCONF; } #endif return 0; }
static void ipsp_connected(struct bt_l2cap_chan *chan) { struct bt_context *ctxt = CHAN_CTXT(chan); struct bt_conn_info info; #if defined(CONFIG_NET_DEBUG_L2_BLUETOOTH) char src[BT_ADDR_LE_STR_LEN]; char dst[BT_ADDR_LE_STR_LEN]; #endif bt_conn_get_info(chan->conn, &info); #if defined(CONFIG_NET_DEBUG_L2_BLUETOOTH) bt_addr_le_to_str(info.le.src, src, sizeof(src)); bt_addr_le_to_str(info.le.dst, dst, sizeof(dst)); NET_DBG("Channel %p Source %s connected to Destination %s", chan, src, dst); #endif /* Swap bytes since net APIs expect big endian address */ sys_memcpy_swap(ctxt->src.val, info.le.src->a.val, sizeof(ctxt->src)); sys_memcpy_swap(ctxt->dst.val, info.le.dst->a.val, sizeof(ctxt->dst)); net_if_set_link_addr(ctxt->iface, ctxt->src.val, sizeof(ctxt->src.val)); /* Set iface up */ net_if_up(ctxt->iface); }
/* Called by application when it wants to receive network data */ struct net_buf *net_receive(struct net_context *context) { struct nano_fifo *rx_queue = net_context_get_queue(context); struct net_tuple *tuple; int ret = 0; tuple = net_context_get_tuple(context); if (!tuple) { return NULL; } switch (tuple->ip_proto) { case IPPROTO_UDP: if (!net_context_get_receiver_registered(context)) { struct simple_udp_connection *udp = net_context_get_udp_connection(context); ret = simple_udp_register(udp, tuple->local_port, #ifdef CONFIG_NETWORKING_WITH_IPV6 (uip_ip6addr_t *)&tuple->remote_addr->in6_addr, #else (uip_ip4addr_t *)&tuple->remote_addr->in_addr, #endif tuple->remote_port, udp_packet_receive, context); if (!ret) { NET_DBG("UDP connection listener failed\n"); ret = -ENOENT; break; } } net_context_set_receiver_registered(context); ret = 0; break; case IPPROTO_TCP: NET_DBG("TCP not yet supported\n"); ret = -EINVAL; break; case IPPROTO_ICMPV6: NET_DBG("ICMPv6 not yet supported\n"); ret = -EINVAL; break; } return nano_fifo_get(rx_queue); }
static int net_bt_init(struct device *dev) { NET_DBG("dev %p driver_data %p", dev, dev->driver_data); bt_l2cap_server_register(&server); return 0; }
void ip_buf_init(void) { NET_DBG("Allocating %d RX and %d TX buffers for IP stack\n", IP_BUF_RX_SIZE, IP_BUF_TX_SIZE); net_buf_pool_init(rx_buffers); net_buf_pool_init(tx_buffers); }