static void udp_received(struct net_context *context, struct net_pkt *pkt, int status, void *user_data) { struct net_pkt *reply_pkt; struct sockaddr dst_addr; sa_family_t family = net_pkt_family(pkt); static char dbg[MAX_DBG_PRINT + 1]; int ret; snprintf(dbg, MAX_DBG_PRINT, "UDP IPv%c", family == AF_INET6 ? '6' : '4'); set_dst_addr(family, pkt, &dst_addr); reply_pkt = build_reply_pkt(dbg, context, pkt); net_pkt_unref(pkt); ret = net_context_sendto(reply_pkt, &dst_addr, family == AF_INET6 ? sizeof(struct sockaddr_in6) : sizeof(struct sockaddr_in), pkt_sent, 0, UINT_TO_POINTER(net_pkt_get_len(reply_pkt)), user_data); if (ret < 0) { printk("Cannot send data to peer (%d)", ret); net_pkt_unref(reply_pkt); } }
/* Send encrypted data */ static int ssl_tx(void *context, const unsigned char *buf, size_t size) { struct http_client_ctx *ctx = context; struct net_pkt *send_pkt; int ret, len; send_pkt = net_pkt_get_tx(ctx->tcp.ctx, BUF_ALLOC_TIMEOUT); if (!send_pkt) { return MBEDTLS_ERR_SSL_ALLOC_FAILED; } ret = net_pkt_append_all(send_pkt, size, (u8_t *)buf, BUF_ALLOC_TIMEOUT); if (!ret) { /* Cannot append data */ net_pkt_unref(send_pkt); return MBEDTLS_ERR_SSL_ALLOC_FAILED; } len = size; ret = net_context_send(send_pkt, ssl_sent, K_NO_WAIT, NULL, ctx); if (ret < 0) { net_pkt_unref(send_pkt); return ret; } k_sem_take(&ctx->https.mbedtls.ssl_ctx.tx_sem, K_FOREVER); return len; }
static void tcp_received(struct net_app_ctx *ctx, struct net_pkt *pkt, int status, void *user_data) { struct data *data = ctx->user_data; ARG_UNUSED(user_data); ARG_UNUSED(status); if (!pkt || net_pkt_appdatalen(pkt) == 0) { if (pkt) { net_pkt_unref(pkt); } return; } NET_DBG("Sent %d bytes, received %u bytes", data->expecting_tcp, net_pkt_appdatalen(pkt)); if (!compare_tcp_data(pkt, data->expecting_tcp, data->received_tcp)) { NET_DBG("Data mismatch"); } else { data->received_tcp += net_pkt_appdatalen(pkt); } if (data->expecting_tcp <= data->received_tcp) { /* Send more data */ send_tcp_data(ctx, data); } net_pkt_unref(pkt); }
static void tcp_received(struct net_context *context, struct net_pkt *pkt, int status, void *user_data) { static char dbg[MAX_DBG_PRINT + 1]; sa_family_t family = net_pkt_family(pkt); struct net_pkt *reply_pkt; int ret; snprintf(dbg, MAX_DBG_PRINT, "TCP IPv%c", family == AF_INET6 ? '6' : '4'); reply_pkt = build_reply_pkt(dbg, context, pkt); net_pkt_unref(pkt); ret = net_context_send(reply_pkt, pkt_sent, K_NO_WAIT, UINT_TO_POINTER(net_pkt_get_len(reply_pkt)), NULL); if (ret < 0) { printk("Cannot send data to peer (%d)", ret); net_pkt_unref(reply_pkt); quit(); } }
/* * Reset TX queue when errors are detected */ static void tx_error_handler(Gmac *gmac, struct gmac_queue *queue) { struct net_pkt *pkt; struct ring_buf *tx_frames = &queue->tx_frames; queue->err_tx_flushed_count++; /* Stop transmission, clean transmit pipeline and control registers */ gmac->GMAC_NCR &= ~GMAC_NCR_TXEN; /* Free all pkt resources in the TX path */ while (tx_frames->tail != tx_frames->head) { /* Release net buffer to the buffer pool */ pkt = UINT_TO_POINTER(tx_frames->buf[tx_frames->tail]); net_pkt_unref(pkt); SYS_LOG_DBG("Dropping pkt %p", pkt); MODULO_INC(tx_frames->tail, tx_frames->len); } /* Reinitialize TX descriptor list */ k_sem_reset(&queue->tx_desc_sem); tx_descriptors_init(gmac, queue); for (int i = 0; i < queue->tx_desc_list.len - 1; i++) { k_sem_give(&queue->tx_desc_sem); } /* Restart transmission */ gmac->GMAC_NCR |= GMAC_NCR_TXEN; }
/* * Process successfully sent packets */ static void tx_completed(Gmac *gmac, struct gmac_queue *queue) { struct gmac_desc_list *tx_desc_list = &queue->tx_desc_list; struct gmac_desc *tx_desc; struct net_pkt *pkt; __ASSERT(tx_desc_list->buf[tx_desc_list->tail].w1 & GMAC_TXW1_USED, "first buffer of a frame is not marked as own by GMAC"); while (tx_desc_list->tail != tx_desc_list->head) { tx_desc = &tx_desc_list->buf[tx_desc_list->tail]; MODULO_INC(tx_desc_list->tail, tx_desc_list->len); k_sem_give(&queue->tx_desc_sem); if (tx_desc->w1 & GMAC_TXW1_LASTBUFFER) { /* Release net buffer to the buffer pool */ pkt = UINT_TO_POINTER(ring_buf_get(&queue->tx_frames)); net_pkt_unref(pkt); SYS_LOG_DBG("Dropping pkt %p", pkt); break; } } }
int udp_tx(void *context, const unsigned char *buf, size_t size) { struct udp_context *ctx = context; struct net_context *net_ctx; struct net_pkt *send_pkt; int rc, len; net_ctx = ctx->net_ctx; send_pkt = net_pkt_get_tx(net_ctx, K_FOREVER); if (!send_pkt) { printk("cannot create pkt\n"); return -EIO; } rc = net_pkt_append_all(send_pkt, size, (u8_t *) buf, K_FOREVER); if (!rc) { printk("cannot write buf\n"); return -EIO; } len = net_pkt_get_len(send_pkt); rc = net_context_sendto(send_pkt, &net_ctx->remote, addrlen, NULL, K_FOREVER, NULL, NULL); if (rc < 0) { printk("Cannot send data to peer (%d)\n", rc); net_pkt_unref(send_pkt); return -EIO; } else { return len; } }
static inline void ieee802154_acknowledge(struct net_if *iface, struct ieee802154_mpdu *mpdu) { struct net_pkt *pkt; struct net_buf *frag; if (!mpdu->mhr.fs->fc.ar) { return; } pkt = net_pkt_get_reserve_tx(IEEE802154_ACK_PKT_LENGTH, K_FOREVER); if (!pkt) { return; } frag = net_pkt_get_frag(pkt, K_FOREVER); net_pkt_frag_insert(pkt, frag); if (ieee802154_create_ack_frame(iface, pkt, mpdu->mhr.fs->sequence)) { const struct ieee802154_radio_api *radio = iface->dev->driver_api; net_buf_add(frag, IEEE802154_ACK_PKT_LENGTH); radio->tx(iface->dev, pkt, frag); } net_pkt_unref(pkt); return; }
static void send_tcp_data(struct net_app_ctx *ctx, struct data *data) { struct net_pkt *pkt; size_t len; int ret; do { data->expecting_tcp = sys_rand32_get() % ipsum_len; } while (data->expecting_tcp == 0); data->received_tcp = 0; pkt = prepare_send_pkt(ctx, data->proto, data->expecting_tcp); if (!pkt) { return; } len = net_pkt_get_len(pkt); NET_ASSERT_INFO(data->expecting_tcp == len, "%s data to send %d bytes, real len %zu", data->proto, data->expecting_tcp, len); ret = net_app_send_pkt(ctx, pkt, NULL, 0, K_FOREVER, UINT_TO_POINTER(len)); if (ret < 0) { NET_ERR("Cannot send %s data to peer (%d)", data->proto, ret); net_pkt_unref(pkt); } }
static int sender_iface(struct net_if *iface, struct net_pkt *pkt) { if (!pkt->frags) { DBG("No data to send!\n"); return -ENODATA; } if (test_started) { struct net_if_test *data = iface->dev->driver_data; DBG("Sending at iface %d %p\n", net_if_get_by_iface(iface), iface); if (net_pkt_iface(pkt) != iface) { DBG("Invalid interface %p, expecting %p\n", net_pkt_iface(pkt), iface); test_failed = true; } if (net_if_get_by_iface(iface) != data->idx) { DBG("Invalid interface %d index, expecting %d\n", data->idx, net_if_get_by_iface(iface)); test_failed = true; } } net_pkt_unref(pkt); k_sem_give(&wait_data); return 0; }
/* Allocate and send data to USB Host */ static void send_data(u8_t *cfg, u8_t *data, size_t len) { struct net_pkt *pkt; struct net_buf *buf; pkt = net_pkt_get_reserve_rx(0, K_NO_WAIT); if (!pkt) { SYS_LOG_DBG("No pkt available"); return; } buf = net_pkt_get_frag(pkt, K_NO_WAIT); if (!buf) { SYS_LOG_DBG("No fragment available"); net_pkt_unref(pkt); return; } net_pkt_frag_insert(pkt, buf); SYS_LOG_DBG("queue pkt %p buf %p len %u", pkt, buf, len); /* Add configuration id */ memcpy(net_buf_add(buf, 2), cfg, 2); memcpy(net_buf_add(buf, len), data, len); /* simulate LQI */ net_buf_add(buf, 1); /* simulate FCS */ net_buf_add(buf, 2); k_fifo_put(&tx_queue, pkt); }
static void rx_thread(void) { SYS_LOG_INF("RX thread started"); while (1) { struct net_pkt *pkt; struct net_buf *buf; u8_t specifier; pkt = k_fifo_get(&rx_queue, K_FOREVER); buf = net_buf_frag_last(pkt->frags); SYS_LOG_DBG("Got pkt %p buf %p", pkt, buf); hexdump("SLIP >", buf->data, buf->len); /* TODO: process */ specifier = net_buf_pull_u8(buf); switch (specifier) { case '?': process_request(buf); break; case '!': process_config(pkt); break; default: SYS_LOG_ERR("Unknown message specifier %c", specifier); break; } net_pkt_unref(pkt); k_yield(); } }
static void process_msg(struct slip_context *slip) { u16_t vlan_tag = NET_VLAN_TAG_UNSPEC; struct net_pkt *pkt; pkt = slip_poll_handler(slip); if (!pkt || !pkt->frags) { return; } #if defined(CONFIG_NET_VLAN) { struct net_eth_hdr *hdr = NET_ETH_HDR(pkt); if (ntohs(hdr->type) == NET_ETH_PTYPE_VLAN) { struct net_eth_vlan_hdr *hdr_vlan = (struct net_eth_vlan_hdr *)NET_ETH_HDR(pkt); net_pkt_set_vlan_tci(pkt, ntohs(hdr_vlan->vlan.tci)); vlan_tag = net_pkt_vlan_tag(pkt); } } #endif if (net_recv_data(get_iface(slip, vlan_tag), pkt) < 0) { net_pkt_unref(pkt); } slip->rx = NULL; slip->last = NULL; }
static void ipsp_recv(struct bt_l2cap_chan *chan, struct net_buf *buf) { struct bt_context *ctxt = CHAN_CTXT(chan); struct net_pkt *pkt; NET_DBG("Incoming data channel %p len %zu", chan, net_buf_frags_len(buf)); /* Get packet for bearer / protocol related data */ pkt = net_pkt_get_reserve_rx(0, K_FOREVER); /* Set destination address */ net_pkt_ll_dst(pkt)->addr = ctxt->src.val; net_pkt_ll_dst(pkt)->len = sizeof(ctxt->src); net_pkt_ll_dst(pkt)->type = NET_LINK_BLUETOOTH; /* Set source address */ net_pkt_ll_src(pkt)->addr = ctxt->dst.val; net_pkt_ll_src(pkt)->len = sizeof(ctxt->dst); net_pkt_ll_src(pkt)->type = NET_LINK_BLUETOOTH; /* Add data buffer as fragment of RX buffer, take a reference while * doing so since L2CAP will unref the buffer after return. */ net_pkt_frag_add(pkt, net_buf_ref(buf)); if (net_recv_data(ctxt->iface, pkt) < 0) { NET_DBG("Packet dropped by NET stack"); net_pkt_unref(pkt); } }
static struct net_pkt *prepare_vlan_pkt(struct eth_context *ctx, int count, u16_t *vlan_tag, int *status) { struct net_eth_vlan_hdr *hdr = (struct net_eth_vlan_hdr *)ctx->recv; struct net_pkt *pkt; u8_t pos; if (IS_ENABLED(CONFIG_ETH_NATIVE_POSIX_VLAN_TAG_STRIP)) { count -= NET_ETH_VLAN_HDR_SIZE; } pkt = net_pkt_rx_alloc_with_buffer(ctx->iface, count, AF_UNSPEC, 0, NET_BUF_TIMEOUT); if (!pkt) { *status = -ENOMEM; return NULL; } net_pkt_set_vlan_tci(pkt, ntohs(hdr->vlan.tci)); *vlan_tag = net_pkt_vlan_tag(pkt); pos = 0; if (IS_ENABLED(CONFIG_ETH_NATIVE_POSIX_VLAN_TAG_STRIP)) { if (net_pkt_write(pkt, ctx->recv, 2 * sizeof(struct net_eth_addr))) { goto error; } pos = (2 * sizeof(struct net_eth_addr)) + NET_ETH_VLAN_HDR_SIZE; count -= (2 * sizeof(struct net_eth_addr)); } if (net_pkt_write(pkt, ctx->recv + pos, count)) { goto error; } #if CONFIG_NET_TC_RX_COUNT > 1 { enum net_priority prio; prio = net_vlan2priority(net_pkt_vlan_priority(pkt)); net_pkt_set_priority(pkt, prio); } #endif *status = 0; LOG_DBG("Recv pkt %p len %d", pkt, count); return pkt; error: net_pkt_unref(pkt); *status = -ENOBUFS; return NULL; }
static int loopback_send(struct net_if *iface, struct net_pkt *pkt) { struct net_pkt *cloned; int res; if (!pkt->frags) { SYS_LOG_ERR("No data to send"); return -ENODATA; } /* We need to swap the IP addresses because otherwise * the packet will be dropped. */ if (net_pkt_family(pkt) == AF_INET6) { struct in6_addr addr; net_ipaddr_copy(&addr, &NET_IPV6_HDR(pkt)->src); net_ipaddr_copy(&NET_IPV6_HDR(pkt)->src, &NET_IPV6_HDR(pkt)->dst); net_ipaddr_copy(&NET_IPV6_HDR(pkt)->dst, &addr); } else { struct in_addr addr; net_ipaddr_copy(&addr, &NET_IPV4_HDR(pkt)->src); net_ipaddr_copy(&NET_IPV4_HDR(pkt)->src, &NET_IPV4_HDR(pkt)->dst); net_ipaddr_copy(&NET_IPV4_HDR(pkt)->dst, &addr); } /* We should simulate normal driver meaning that if the packet is * properly sent (which is always in this driver), then the packet * must be dropped. This is very much needed for TCP packets where * the packet is reference counted in various stages of sending. */ cloned = net_pkt_clone(pkt, K_MSEC(100)); if (!cloned) { res = -ENOMEM; goto out; } res = net_recv_data(iface, cloned); if (res < 0) { SYS_LOG_ERR("Data receive failed."); goto out; } net_pkt_unref(pkt); out: /* Let the receiving thread run now */ k_yield(); return res; }
static inline struct net_pkt *prepare_arp_request(struct net_if *iface, struct net_pkt *req, struct net_eth_addr *addr) { struct net_pkt *pkt; struct net_buf *frag; struct net_arp_hdr *hdr, *req_hdr; struct net_eth_hdr *eth, *eth_req; pkt = net_pkt_get_reserve_rx(sizeof(struct net_eth_hdr), K_FOREVER); if (!pkt) { goto fail; } frag = net_pkt_get_frag(pkt, K_FOREVER); if (!frag) { goto fail; } net_pkt_frag_add(pkt, frag); net_pkt_set_iface(pkt, iface); hdr = NET_ARP_HDR(pkt); eth = NET_ETH_HDR(pkt); req_hdr = NET_ARP_HDR(req); eth_req = NET_ETH_HDR(req); eth->type = htons(NET_ETH_PTYPE_ARP); memset(ð->dst.addr, 0xff, sizeof(struct net_eth_addr)); memcpy(ð->src.addr, addr, sizeof(struct net_eth_addr)); hdr->hwtype = htons(NET_ARP_HTYPE_ETH); hdr->protocol = htons(NET_ETH_PTYPE_IP); hdr->hwlen = sizeof(struct net_eth_addr); hdr->protolen = sizeof(struct in_addr); hdr->opcode = htons(NET_ARP_REQUEST); memset(&hdr->dst_hwaddr.addr, 0x00, sizeof(struct net_eth_addr)); memcpy(&hdr->src_hwaddr.addr, addr, sizeof(struct net_eth_addr)); net_ipaddr_copy(&hdr->src_ipaddr, &req_hdr->src_ipaddr); net_ipaddr_copy(&hdr->dst_ipaddr, &req_hdr->dst_ipaddr); net_buf_add(frag, sizeof(struct net_arp_hdr)); return pkt; fail: net_pkt_unref(pkt); return NULL; }
int udp_rx(void *context, unsigned char *buf, size_t size) { struct udp_context *ctx = context; struct net_context *net_ctx = ctx->net_ctx; struct net_pkt *rx_pkt = NULL; struct net_buf *rx_buf; u16_t read_bytes; u8_t *ptr; int pos; int len; int rc; k_sem_take(&ctx->rx_sem, K_FOREVER); read_bytes = net_pkt_appdatalen(ctx->rx_pkt); if (read_bytes > size) { return -ENOMEM; } rx_pkt = ctx->rx_pkt; set_client_address(&net_ctx->remote, rx_pkt); ptr = net_pkt_appdata(rx_pkt); rx_buf = rx_pkt->frags; len = rx_buf->len - (ptr - rx_buf->data); pos = 0; while (rx_buf) { memcpy(buf + pos, ptr, len); pos += len; rx_buf = rx_buf->frags; if (!rx_buf) { break; } ptr = rx_buf->data; len = rx_buf->len; } net_pkt_unref(ctx->rx_pkt); ctx->rx_pkt = NULL; if (read_bytes != pos) { return -EIO; } rc = read_bytes; ctx->remaining = 0; return rc; }
/* Receive encrypted data from network. Put that data into fifo * that will be read by https thread. */ static void ssl_received(struct net_context *context, struct net_pkt *pkt, int status, void *user_data) { struct http_client_ctx *http_ctx = user_data; struct rx_fifo_block *rx_data = NULL; struct k_mem_block block; int ret; ARG_UNUSED(context); ARG_UNUSED(status); if (pkt && !net_pkt_appdatalen(pkt)) { net_pkt_unref(pkt); return; } ret = k_mem_pool_alloc(http_ctx->https.pool, &block, sizeof(struct rx_fifo_block), BUF_ALLOC_TIMEOUT); if (ret < 0) { if (pkt) { net_pkt_unref(pkt); } return; } rx_data = block.data; rx_data->pkt = pkt; /* For freeing memory later */ memcpy(&rx_data->block, &block, sizeof(struct k_mem_block)); k_fifo_put(&http_ctx->https.mbedtls.ssl_ctx.rx_fifo, (void *)rx_data); /* Let the ssl_rx() to run */ k_yield(); }
static int send_response(struct zoap_packet *request, u8_t response_code) { struct net_pkt *pkt; struct net_buf *frag; struct zoap_packet response; u8_t code, type; u16_t id; int r; code = zoap_header_get_code(request); type = zoap_header_get_type(request); id = zoap_header_get_id(request); printk("*******\n"); printk("type: %u code %u id %u\n", type, code, id); printk("*******\n"); pkt = net_pkt_get_reserve(&zoap_pkt_slab, 0, K_NO_WAIT); if (!pkt) { return -ENOMEM; } frag = net_buf_alloc(&zoap_data_pool, K_NO_WAIT); if (!frag) { return -ENOMEM; } net_pkt_frag_add(pkt, frag); r = zoap_packet_init(&response, pkt); if (r < 0) { return -EINVAL; } zoap_header_set_version(&response, 1); zoap_header_set_type(&response, ZOAP_TYPE_ACK); zoap_header_set_code(&response, response_code); zoap_header_set_id(&response, id); do { r = mbedtls_ssl_write(curr_ctx, frag->data, frag->len); } while (r == MBEDTLS_ERR_SSL_WANT_READ || r == MBEDTLS_ERR_SSL_WANT_WRITE); if (r >= 0) { r = 0; } net_pkt_unref(pkt); return r; }
static void processing_data(struct net_pkt *pkt, bool is_loopback) { switch (process_data(pkt, is_loopback)) { case NET_OK: NET_DBG("Consumed pkt %p", pkt); break; case NET_DROP: default: NET_DBG("Dropping pkt %p", pkt); net_pkt_unref(pkt); break; } }
static int read_data(struct eth_context *ctx, int fd) { u16_t vlan_tag = NET_VLAN_TAG_UNSPEC; struct net_if *iface; struct net_pkt *pkt = NULL; int status; int count; count = eth_read_data(fd, ctx->recv, sizeof(ctx->recv)); if (count <= 0) { return 0; } #if defined(CONFIG_NET_VLAN) { struct net_eth_hdr *hdr = (struct net_eth_hdr *)(ctx->recv); if (ntohs(hdr->type) == NET_ETH_PTYPE_VLAN) { pkt = prepare_vlan_pkt(ctx, count, &vlan_tag, &status); if (!pkt) { return status; } } else { pkt = prepare_non_vlan_pkt(ctx, count, &status); if (!pkt) { return status; } net_pkt_set_vlan_tci(pkt, 0); } } #else { pkt = prepare_non_vlan_pkt(ctx, count, &status); if (!pkt) { return status; } } #endif iface = get_iface(ctx, vlan_tag); update_gptp(iface, pkt, false); if (net_recv_data(iface, pkt) < 0) { net_pkt_unref(pkt); } return 0; }
static int tester_send(struct net_if *iface, struct net_pkt *pkt) { if (!pkt->frags) { DBG("No data to send!\n"); return -ENODATA; } DBG("Data was sent successfully\n"); net_pkt_unref(pkt); send_status = 0; return 0; }
static void process_msg(struct slip_context *slip) { struct net_pkt *pkt; pkt = slip_poll_handler(slip); if (!pkt || !pkt->frags) { return; } if (net_recv_data(slip->iface, pkt) < 0) { net_pkt_unref(pkt); } slip->rx = NULL; slip->last = NULL; }
static void telnet_end_client_connection(void) { __printk_hook_install(orig_printk_hook); orig_printk_hook = NULL; k_timer_stop(&send_timer); net_context_put(client_cnx); client_cnx = NULL; if (out_pkt) { net_pkt_unref(out_pkt); } telnet_rb_init(); }
static void https_shutdown(struct http_client_ctx *ctx) { if (!ctx->https.tid) { return; } /* Empty the fifo just in case there is any received packets * still there. */ while (1) { struct rx_fifo_block *rx_data; rx_data = k_fifo_get(&ctx->https.mbedtls.ssl_ctx.rx_fifo, K_NO_WAIT); if (!rx_data) { break; } net_pkt_unref(rx_data->pkt); k_mem_pool_free(&rx_data->block); } k_fifo_cancel_wait(&ctx->https.mbedtls.ssl_ctx.rx_fifo); /* Let the ssl_rx() run if there is anything there waiting */ k_yield(); mbedtls_ssl_close_notify(&ctx->https.mbedtls.ssl); mbedtls_ssl_free(&ctx->https.mbedtls.ssl); mbedtls_ssl_config_free(&ctx->https.mbedtls.conf); mbedtls_ctr_drbg_free(&ctx->https.mbedtls.ctr_drbg); mbedtls_entropy_free(&ctx->https.mbedtls.entropy); #if defined(MBEDTLS_X509_CRT_PARSE_C) mbedtls_x509_crt_free(&ctx->https.mbedtls.ca_cert); #endif tcp_disconnect(ctx); NET_DBG("HTTPS thread %p stopped for %p", ctx->https.tid, ctx); k_thread_abort(ctx->https.tid); ctx->https.tid = 0; }
static void telnet_recv(struct net_context *client, struct net_pkt *pkt, int status, void *user_data) { if (!pkt || status) { telnet_end_client_connection(); SYS_LOG_DBG("Telnet client dropped (AF_INET%s) status %d", net_context_get_family(client) == AF_INET ? "" : "6", status); return; } telnet_handle_input(pkt); net_pkt_unref(pkt); }
static void eth_rx(struct gmac_queue *queue) { struct eth_sam_dev_data *dev_data = CONTAINER_OF(queue, struct eth_sam_dev_data, queue_list); struct net_pkt *rx_frame; /* More than one frame could have been received by GMAC, get all * complete frames stored in the GMAC RX descriptor list. */ rx_frame = frame_get(queue); while (rx_frame) { SYS_LOG_DBG("ETH rx"); if (net_recv_data(dev_data->iface, rx_frame) < 0) { net_pkt_unref(rx_frame); } rx_frame = frame_get(queue); } }
static int bt_iface_send(struct net_if *iface, struct net_pkt *pkt) { struct bt_context *ctxt = net_if_get_device(iface)->driver_data; struct net_buf *frags; int ret; NET_DBG("iface %p pkt %p len %zu", iface, pkt, net_pkt_get_len(pkt)); /* Dettach data fragments for packet */ frags = pkt->frags; pkt->frags = NULL; net_pkt_unref(pkt); ret = bt_l2cap_chan_send(&ctxt->ipsp_chan.chan, frags); if (ret < 0) { return ret; } return ret; }
static void recv_cb(struct net_context *net_ctx, struct net_pkt *pkt, int status, void *data) { struct http_client_ctx *ctx = data; ARG_UNUSED(net_ctx); if (status) { return; } if (!pkt || net_pkt_appdatalen(pkt) == 0) { /* * This block most likely handles a TCP_FIN message. * (this means the connection is now closed) * If we get here, and req.wait.count is still 0 this means * http client is still waiting to parse a response body. * This will will never happen now. Instead of generating * an ETIMEDOUT error in the future, let's unlock the * req.wait semaphore and let the app deal with whatever * data was parsed in the header (IE: http status, etc). */ if (ctx->req.wait.count == 0) { k_sem_give(&ctx->req.wait); } goto out; } /* receive_cb must take ownership of the received packet */ if (ctx->tcp.receive_cb) { ctx->tcp.receive_cb(ctx, pkt); return; } out: if (pkt) { net_pkt_unref(pkt); } }