static void rx_thread(void) { SYS_LOG_INF("RX thread started"); while (1) { struct net_pkt *pkt; struct net_buf *buf; u8_t specifier; pkt = k_fifo_get(&rx_queue, K_FOREVER); buf = net_buf_frag_last(pkt->frags); SYS_LOG_DBG("Got pkt %p buf %p", pkt, buf); hexdump("SLIP >", buf->data, buf->len); /* TODO: process */ specifier = net_buf_pull_u8(buf); switch (specifier) { case '?': process_request(buf); break; case '!': process_config(pkt); break; default: SYS_LOG_ERR("Unknown message specifier %c", specifier); break; } net_pkt_unref(pkt); k_yield(); } }
/* The actual printk hook */ static int telnet_console_out(int c) { int key = irq_lock(); struct line_buf *lb = telnet_rb_get_line_in(); bool yield = false; lb->buf[lb->len++] = (char)c; if (c == '\n' || lb->len == TELNET_LINE_SIZE - 1) { lb->buf[lb->len-1] = NVT_CR; lb->buf[lb->len++] = NVT_LF; telnet_rb_switch(); yield = true; } irq_unlock(key); #ifdef CONFIG_TELNET_CONSOLE_DEBUG_DEEP /* This is ugly, but if one wants to debug telnet, it * will also output the character to original console */ orig_printk_hook(c); #endif if (yield) { k_yield(); } return c; }
static void thread_helper(void *arg1, void *arg2, void *arg3) { k_tid_t self_thread_id; ARG_UNUSED(arg1); ARG_UNUSED(arg2); ARG_UNUSED(arg3); /* * This thread starts off at a higher priority than thread_entry(). * Thus, it should execute immediately. */ thread_evidence++; /* Test that helper will yield to a thread of equal priority */ self_thread_id = k_current_get(); /* Lower priority to that of thread_entry() */ k_thread_priority_set(self_thread_id, self_thread_id->base.prio + 1); k_yield(); /* Yield to thread of equal priority */ thread_evidence++; /* <thread_evidence> should now be 2 */ }
static void rx_thread(void) { BT_DBG(""); while (true) { struct net_buf *buf; buf = net_buf_get(&h5.rx_queue, K_FOREVER); hexdump("=> ", buf->data, buf->len); if (!memcmp(buf->data, sync_req, sizeof(sync_req))) { if (h5.link_state == ACTIVE) { /* TODO Reset H5 */ } h5_send(sync_rsp, HCI_3WIRE_LINK_PKT, sizeof(sync_rsp)); } else if (!memcmp(buf->data, sync_rsp, sizeof(sync_rsp))) { if (h5.link_state == ACTIVE) { /* TODO Reset H5 */ } h5.link_state = INIT; h5_set_txwin(conf_req); h5_send(conf_req, HCI_3WIRE_LINK_PKT, sizeof(conf_req)); } else if (!memcmp(buf->data, conf_req, 2)) { /* * The Host sends Config Response messages without a * Configuration Field. */ h5_send(conf_rsp, HCI_3WIRE_LINK_PKT, sizeof(conf_rsp)); /* Then send Config Request with Configuration Field */ h5_set_txwin(conf_req); h5_send(conf_req, HCI_3WIRE_LINK_PKT, sizeof(conf_req)); } else if (!memcmp(buf->data, conf_rsp, 2)) { h5.link_state = ACTIVE; if (buf->len > 2) { /* Configuration field present */ h5.tx_win = (buf->data[2] & 0x07); } BT_DBG("Finished H5 configuration, tx_win %u", h5.tx_win); } else { BT_ERR("Not handled yet %x %x", buf->data[0], buf->data[1]); } net_buf_unref(buf); /* Make sure we don't hog the CPU if the rx_queue never * gets empty. */ k_yield(); } }
static int loopback_send(struct net_if *iface, struct net_pkt *pkt) { struct net_pkt *cloned; int res; if (!pkt->frags) { SYS_LOG_ERR("No data to send"); return -ENODATA; } /* We need to swap the IP addresses because otherwise * the packet will be dropped. */ if (net_pkt_family(pkt) == AF_INET6) { struct in6_addr addr; net_ipaddr_copy(&addr, &NET_IPV6_HDR(pkt)->src); net_ipaddr_copy(&NET_IPV6_HDR(pkt)->src, &NET_IPV6_HDR(pkt)->dst); net_ipaddr_copy(&NET_IPV6_HDR(pkt)->dst, &addr); } else { struct in_addr addr; net_ipaddr_copy(&addr, &NET_IPV4_HDR(pkt)->src); net_ipaddr_copy(&NET_IPV4_HDR(pkt)->src, &NET_IPV4_HDR(pkt)->dst); net_ipaddr_copy(&NET_IPV4_HDR(pkt)->dst, &addr); } /* We should simulate normal driver meaning that if the packet is * properly sent (which is always in this driver), then the packet * must be dropped. This is very much needed for TCP packets where * the packet is reference counted in various stages of sending. */ cloned = net_pkt_clone(pkt, K_MSEC(100)); if (!cloned) { res = -ENOMEM; goto out; } res = net_recv_data(iface, cloned); if (res < 0) { SYS_LOG_ERR("Data receive failed."); goto out; } net_pkt_unref(pkt); out: /* Let the receiving thread run now */ k_yield(); return res; }
static void rx_thread(void *p1, void *p2, void *p3) { struct net_buf *buf; ARG_UNUSED(p1); ARG_UNUSED(p2); ARG_UNUSED(p3); BT_DBG("started"); while (1) { BT_DBG("rx.buf %p", rx.buf); /* We can only do the allocation if we know the initial * header, since Command Complete/Status events must use the * original command buffer (if available). */ if (rx.have_hdr && !rx.buf) { rx.buf = get_rx(K_FOREVER); BT_DBG("Got rx.buf %p", rx.buf); if (rx.remaining > net_buf_tailroom(rx.buf)) { BT_ERR("Not enough space in buffer"); rx.discard = rx.remaining; reset_rx(); } else { copy_hdr(rx.buf); } } /* Let the ISR continue receiving new packets */ uart_irq_rx_enable(h4_dev); buf = net_buf_get(&rx.fifo, K_FOREVER); do { uart_irq_rx_enable(h4_dev); BT_DBG("Calling bt_recv(%p)", buf); bt_recv(buf); /* Give other threads a chance to run if the ISR * is receiving data so fast that rx.fifo never * or very rarely goes empty. */ k_yield(); uart_irq_rx_disable(h4_dev); buf = net_buf_get(&rx.fifo, K_NO_WAIT); } while (buf); } }
static void tx_thread(void) { SYS_LOG_DBG("Tx thread started"); while (1) { uint8_t cmd; struct net_buf *pkt, *buf; pkt = net_buf_get(&tx_queue, K_FOREVER); buf = net_buf_frag_last(pkt); cmd = net_buf_pull_u8(buf); hexdump(">", buf->data, buf->len); switch (cmd) { case RESET: SYS_LOG_DBG("Reset device"); break; case TX: tx(pkt); break; case START: start(); break; case STOP: stop(); break; case SET_CHANNEL: set_channel(buf->data, buf->len); break; case SET_IEEE_ADDR: set_ieee_addr(buf->data, buf->len); break; case SET_SHORT_ADDR: set_short_addr(buf->data, buf->len); break; case SET_PAN_ID: set_pan_id(buf->data, buf->len); break; default: SYS_LOG_ERR("%x: Not handled for now", cmd); break; } net_nbuf_unref(pkt); k_yield(); } }
static void https_shutdown(struct http_client_ctx *ctx) { if (!ctx->https.tid) { return; } /* Empty the fifo just in case there is any received packets * still there. */ while (1) { struct rx_fifo_block *rx_data; rx_data = k_fifo_get(&ctx->https.mbedtls.ssl_ctx.rx_fifo, K_NO_WAIT); if (!rx_data) { break; } net_pkt_unref(rx_data->pkt); k_mem_pool_free(&rx_data->block); } k_fifo_cancel_wait(&ctx->https.mbedtls.ssl_ctx.rx_fifo); /* Let the ssl_rx() run if there is anything there waiting */ k_yield(); mbedtls_ssl_close_notify(&ctx->https.mbedtls.ssl); mbedtls_ssl_free(&ctx->https.mbedtls.ssl); mbedtls_ssl_config_free(&ctx->https.mbedtls.conf); mbedtls_ctr_drbg_free(&ctx->https.mbedtls.ctr_drbg); mbedtls_entropy_free(&ctx->https.mbedtls.entropy); #if defined(MBEDTLS_X509_CRT_PARSE_C) mbedtls_x509_crt_free(&ctx->https.mbedtls.ca_cert); #endif tcp_disconnect(ctx); NET_DBG("HTTPS thread %p stopped for %p", ctx->https.tid, ctx); k_thread_abort(ctx->https.tid); ctx->https.tid = 0; }
static void net_rx_thread(void) { struct net_pkt *pkt; NET_DBG("Starting RX thread (stack %zu bytes)", sizeof(rx_stack)); /* Starting TX side. The ordering is important here and the TX * can only be started when RX side is ready to receive packets. * We synchronize the startup of the device so that both RX and TX * are only started fully when both are ready to receive or send * data. */ net_if_init(&startup_sync); k_sem_take(&startup_sync, K_FOREVER); /* This will take the interface up and start everything. */ net_if_post_init(); while (1) { #if defined(CONFIG_NET_STATISTICS) || defined(CONFIG_NET_DEBUG_CORE) size_t pkt_len; #endif pkt = k_fifo_get(&rx_queue, K_FOREVER); net_analyze_stack("RX thread", rx_stack, sizeof(rx_stack)); #if defined(CONFIG_NET_STATISTICS) || defined(CONFIG_NET_DEBUG_CORE) pkt_len = net_pkt_get_len(pkt); #endif NET_DBG("Received pkt %p len %zu", pkt, pkt_len); net_stats_update_bytes_recv(pkt_len); processing_data(pkt, false); net_print_statistics(); net_pkt_print(); k_yield(); } }
/** * TX - transmit to SLIP interface */ static void tx_thread(void) { SYS_LOG_DBG("TX thread started"); /* Allow to send one TX */ k_sem_give(&tx_sem); while (1) { struct net_pkt *pkt; struct net_buf *buf; size_t len; k_sem_take(&tx_sem, K_FOREVER); pkt = k_fifo_get(&tx_queue, K_FOREVER); buf = net_buf_frag_last(pkt->frags); len = net_pkt_get_len(pkt); SYS_LOG_DBG("Send pkt %p buf %p len %d", pkt, buf, len); hexdump("SLIP <", buf->data, buf->len); /* Remove LQI */ /* TODO: Reuse get_lqi() */ buf->len -= 1; /* remove FCS 2 bytes */ buf->len -= 2; /* SLIP encode and send */ len = slip_buffer(slip_buf, buf); uart_fifo_fill(uart_dev, slip_buf, len); net_pkt_unref(pkt); #if 0 k_yield(); #endif } }
/* Receive encrypted data from network. Put that data into fifo * that will be read by https thread. */ static void ssl_received(struct net_context *context, struct net_pkt *pkt, int status, void *user_data) { struct http_client_ctx *http_ctx = user_data; struct rx_fifo_block *rx_data = NULL; struct k_mem_block block; int ret; ARG_UNUSED(context); ARG_UNUSED(status); if (pkt && !net_pkt_appdatalen(pkt)) { net_pkt_unref(pkt); return; } ret = k_mem_pool_alloc(http_ctx->https.pool, &block, sizeof(struct rx_fifo_block), BUF_ALLOC_TIMEOUT); if (ret < 0) { if (pkt) { net_pkt_unref(pkt); } return; } rx_data = block.data; rx_data->pkt = pkt; /* For freeing memory later */ memcpy(&rx_data->block, &block, sizeof(struct k_mem_block)); k_fifo_put(&http_ctx->https.mbedtls.ssl_ctx.rx_fifo, (void *)rx_data); /* Let the ssl_rx() to run */ k_yield(); }
void http_client_release(struct http_client_ctx *ctx) { if (!ctx) { return; } #if defined(CONFIG_HTTPS) if (ctx->is_https) { https_shutdown(ctx); } #endif /* CONFIG_HTTPS */ /* https_shutdown() might have released the context already */ if (ctx->tcp.ctx) { net_context_put(ctx->tcp.ctx); ctx->tcp.ctx = NULL; } ctx->tcp.receive_cb = NULL; ctx->rsp.cb = NULL; k_sem_give(&ctx->req.wait); #if defined(CONFIG_DNS_RESOLVER) if (ctx->dns_id) { dns_cancel_addr_info(ctx->dns_id); } #endif /* Let all the pending waiters run */ k_yield(); /* Coverity tells in CID 170742 that the next memset() is * is overwriting the ctx struct. This is false positive as * the struct is initialized with proper size. */ memset(ctx, 0, sizeof(*ctx)); }
static bool run_tests(void) { struct net_pkt *pkt, *pkt2; struct net_buf *frag; struct net_if *iface; struct net_if_addr *ifaddr; struct net_arp_hdr *arp_hdr; struct net_ipv4_hdr *ipv4; struct net_eth_hdr *eth_hdr; int len; struct in_addr dst = { { { 192, 168, 0, 2 } } }; struct in_addr dst_far = { { { 10, 11, 12, 13 } } }; struct in_addr dst_far2 = { { { 172, 16, 14, 186 } } }; struct in_addr src = { { { 192, 168, 0, 1 } } }; struct in_addr netmask = { { { 255, 255, 255, 0 } } }; struct in_addr gw = { { { 192, 168, 0, 42 } } }; net_arp_init(); iface = net_if_get_default(); net_if_ipv4_set_gw(iface, &gw); net_if_ipv4_set_netmask(iface, &netmask); /* Unicast test */ ifaddr = net_if_ipv4_addr_add(iface, &src, NET_ADDR_MANUAL, 0); ifaddr->addr_state = NET_ADDR_PREFERRED; /* Application data for testing */ pkt = net_pkt_get_reserve_tx(sizeof(struct net_eth_hdr), K_FOREVER); if (!pkt) { printk("Out of mem TX\n"); return false; } frag = net_pkt_get_frag(pkt, K_FOREVER); if (!frag) { printk("Out of mem DATA\n"); return false; } net_pkt_frag_add(pkt, frag); net_pkt_set_iface(pkt, iface); setup_eth_header(iface, pkt, &hwaddr, NET_ETH_PTYPE_IP); len = strlen(app_data); if (net_pkt_ll_reserve(pkt) != sizeof(struct net_eth_hdr)) { printk("LL reserve invalid, should be %zd was %d\n", sizeof(struct net_eth_hdr), net_pkt_ll_reserve(pkt)); return false; } ipv4 = (struct net_ipv4_hdr *)net_buf_add(frag, sizeof(struct net_ipv4_hdr)); net_ipaddr_copy(&ipv4->src, &src); net_ipaddr_copy(&ipv4->dst, &dst); memcpy(net_buf_add(frag, len), app_data, len); pkt2 = net_arp_prepare(pkt); /* pkt2 is the ARP packet and pkt is the IPv4 packet and it was * stored in ARP table. */ if (pkt2 == pkt) { /* The packets cannot be the same as the ARP cache has * still room for the pkt. */ printk("ARP cache should still have free space\n"); return false; } if (!pkt2) { printk("ARP pkt is empty\n"); return false; } /* The ARP cache should now have a link to pending net_pkt * that is to be sent after we have got an ARP reply. */ if (!pkt->frags) { printk("Pending pkt fragment is NULL\n"); return false; } pending_pkt = pkt; /* pkt2 should contain the arp header, verify it */ if (memcmp(net_pkt_ll(pkt2), net_eth_broadcast_addr(), sizeof(struct net_eth_addr))) { printk("ARP ETH dest address invalid\n"); net_hexdump("ETH dest wrong ", net_pkt_ll(pkt2), sizeof(struct net_eth_addr)); net_hexdump("ETH dest correct", (u8_t *)net_eth_broadcast_addr(), sizeof(struct net_eth_addr)); return false; } if (memcmp(net_pkt_ll(pkt2) + sizeof(struct net_eth_addr), iface->link_addr.addr, sizeof(struct net_eth_addr))) { printk("ARP ETH source address invalid\n"); net_hexdump("ETH src correct", iface->link_addr.addr, sizeof(struct net_eth_addr)); net_hexdump("ETH src wrong ", net_pkt_ll(pkt2) + sizeof(struct net_eth_addr), sizeof(struct net_eth_addr)); return false; } arp_hdr = NET_ARP_HDR(pkt2); eth_hdr = NET_ETH_HDR(pkt2); if (eth_hdr->type != htons(NET_ETH_PTYPE_ARP)) { printk("ETH type 0x%x, should be 0x%x\n", eth_hdr->type, htons(NET_ETH_PTYPE_ARP)); return false; } if (arp_hdr->hwtype != htons(NET_ARP_HTYPE_ETH)) { printk("ARP hwtype 0x%x, should be 0x%x\n", arp_hdr->hwtype, htons(NET_ARP_HTYPE_ETH)); return false; } if (arp_hdr->protocol != htons(NET_ETH_PTYPE_IP)) { printk("ARP protocol 0x%x, should be 0x%x\n", arp_hdr->protocol, htons(NET_ETH_PTYPE_IP)); return false; } if (arp_hdr->hwlen != sizeof(struct net_eth_addr)) { printk("ARP hwlen 0x%x, should be 0x%zx\n", arp_hdr->hwlen, sizeof(struct net_eth_addr)); return false; } if (arp_hdr->protolen != sizeof(struct in_addr)) { printk("ARP IP addr len 0x%x, should be 0x%zx\n", arp_hdr->protolen, sizeof(struct in_addr)); return false; } if (arp_hdr->opcode != htons(NET_ARP_REQUEST)) { printk("ARP opcode 0x%x, should be 0x%x\n", arp_hdr->opcode, htons(NET_ARP_REQUEST)); return false; } if (!net_ipv4_addr_cmp(&arp_hdr->dst_ipaddr, &NET_IPV4_HDR(pkt)->dst)) { char out[sizeof("xxx.xxx.xxx.xxx")]; snprintk(out, sizeof(out), "%s", net_sprint_ipv4_addr(&arp_hdr->dst_ipaddr)); printk("ARP IP dest invalid %s, should be %s", out, net_sprint_ipv4_addr(&NET_IPV4_HDR(pkt)->dst)); return false; } if (!net_ipv4_addr_cmp(&arp_hdr->src_ipaddr, &NET_IPV4_HDR(pkt)->src)) { char out[sizeof("xxx.xxx.xxx.xxx")]; snprintk(out, sizeof(out), "%s", net_sprint_ipv4_addr(&arp_hdr->src_ipaddr)); printk("ARP IP src invalid %s, should be %s", out, net_sprint_ipv4_addr(&NET_IPV4_HDR(pkt)->src)); return false; } /* We could have send the new ARP request but for this test we * just free it. */ net_pkt_unref(pkt2); if (pkt->ref != 2) { printk("ARP cache should own the original packet\n"); return false; } /* Then a case where target is not in the same subnet */ net_ipaddr_copy(&ipv4->dst, &dst_far); pkt2 = net_arp_prepare(pkt); if (pkt2 == pkt) { printk("ARP cache should not find anything\n"); return false; } if (!pkt2) { printk("ARP pkt2 is empty\n"); return false; } arp_hdr = NET_ARP_HDR(pkt2); if (!net_ipv4_addr_cmp(&arp_hdr->dst_ipaddr, &iface->ipv4.gw)) { char out[sizeof("xxx.xxx.xxx.xxx")]; snprintk(out, sizeof(out), "%s", net_sprint_ipv4_addr(&arp_hdr->dst_ipaddr)); printk("ARP IP dst invalid %s, should be %s\n", out, net_sprint_ipv4_addr(&iface->ipv4.gw)); return false; } net_pkt_unref(pkt2); /* Try to find the same destination again, this should fail as there * is a pending request in ARP cache. */ net_ipaddr_copy(&ipv4->dst, &dst_far); /* Make sure prepare will not free the pkt because it will be * needed in the later test case. */ net_pkt_ref(pkt); pkt2 = net_arp_prepare(pkt); if (!pkt2) { printk("ARP cache is not sending the request again\n"); return false; } net_pkt_unref(pkt2); /* Try to find the different destination, this should fail too * as the cache table should be full. */ net_ipaddr_copy(&ipv4->dst, &dst_far2); /* Make sure prepare will not free the pkt because it will be * needed in the next test case. */ net_pkt_ref(pkt); pkt2 = net_arp_prepare(pkt); if (!pkt2) { printk("ARP cache did not send a req\n"); return false; } /* Restore the original address so that following test case can * work properly. */ net_ipaddr_copy(&ipv4->dst, &dst); /* The arp request packet is now verified, create an arp reply. * The previous value of pkt is stored in arp table and is not lost. */ pkt = net_pkt_get_reserve_rx(sizeof(struct net_eth_hdr), K_FOREVER); if (!pkt) { printk("Out of mem RX reply\n"); return false; } printk("%d pkt %p\n", __LINE__, pkt); frag = net_pkt_get_frag(pkt, K_FOREVER); if (!frag) { printk("Out of mem DATA reply\n"); return false; } printk("%d frag %p\n", __LINE__, frag); net_pkt_frag_add(pkt, frag); net_pkt_set_iface(pkt, iface); arp_hdr = NET_ARP_HDR(pkt); net_buf_add(frag, sizeof(struct net_arp_hdr)); net_ipaddr_copy(&arp_hdr->dst_ipaddr, &dst); net_ipaddr_copy(&arp_hdr->src_ipaddr, &src); pkt2 = prepare_arp_reply(iface, pkt, &hwaddr); if (!pkt2) { printk("ARP reply generation failed."); return false; } /* The pending packet should now be sent */ switch (net_arp_input(pkt2)) { case NET_OK: case NET_CONTINUE: break; case NET_DROP: break; } /* Yielding so that network interface TX thread can proceed. */ k_yield(); if (send_status < 0) { printk("ARP reply was not sent\n"); return false; } if (pkt->ref != 1) { printk("ARP cache should no longer own the original packet\n"); return false; } net_pkt_unref(pkt); /* Then feed in ARP request */ pkt = net_pkt_get_reserve_rx(sizeof(struct net_eth_hdr), K_FOREVER); if (!pkt) { printk("Out of mem RX request\n"); return false; } frag = net_pkt_get_frag(pkt, K_FOREVER); if (!frag) { printk("Out of mem DATA request\n"); return false; } net_pkt_frag_add(pkt, frag); net_pkt_set_iface(pkt, iface); send_status = -EINVAL; arp_hdr = NET_ARP_HDR(pkt); net_buf_add(frag, sizeof(struct net_arp_hdr)); net_ipaddr_copy(&arp_hdr->dst_ipaddr, &src); net_ipaddr_copy(&arp_hdr->src_ipaddr, &dst); setup_eth_header(iface, pkt, &hwaddr, NET_ETH_PTYPE_ARP); pkt2 = prepare_arp_request(iface, pkt, &hwaddr); if (!pkt2) { printk("ARP request generation failed."); return false; } req_test = true; switch (net_arp_input(pkt2)) { case NET_OK: case NET_CONTINUE: break; case NET_DROP: break; } /* Yielding so that network interface TX thread can proceed. */ k_yield(); if (send_status < 0) { printk("ARP req was not sent\n"); return false; } net_pkt_unref(pkt); printk("Network ARP checks passed\n"); return true; }
int http_client_send_req(struct http_client_ctx *ctx, struct http_client_request *req, http_response_cb_t cb, u8_t *response_buf, size_t response_buf_len, void *user_data, s32_t timeout) { int ret; if (!response_buf || response_buf_len == 0) { return -EINVAL; } ctx->rsp.response_buf = response_buf; ctx->rsp.response_buf_len = response_buf_len; client_reset(ctx); /* HTTPS connection is established in https_handler() */ if (!ctx->is_https) { ret = tcp_connect(ctx); if (ret < 0 && ret != -EALREADY) { NET_DBG("TCP connect error (%d)", ret); return ret; } } if (!req->host) { req->host = ctx->server; } ctx->req.host = req->host; ctx->req.method = req->method; ctx->req.user_data = user_data; ctx->rsp.cb = cb; #if defined(CONFIG_HTTPS) if (ctx->is_https) { struct tx_fifo_block *tx_data; struct k_mem_block block; ret = start_https(ctx); if (ret != 0 && ret != -EALREADY) { NET_ERR("HTTPS init failed (%d)", ret); goto out; } ret = k_mem_pool_alloc(ctx->https.pool, &block, sizeof(struct tx_fifo_block), BUF_ALLOC_TIMEOUT); if (ret < 0) { goto out; } tx_data = block.data; tx_data->req = req; memcpy(&tx_data->block, &block, sizeof(struct k_mem_block)); /* We need to pass the HTTPS request to HTTPS thread because * of the mbedtls API stack size requirements. */ k_fifo_put(&ctx->https.mbedtls.ssl_ctx.tx_fifo, (void *)tx_data); /* Let the https_handler() to start to process the message. * * Note that if the timeout > 0 or is K_FOREVER, then this * yield is not really necessary as the k_sem_take() will * let the https handler thread to run. But if the timeout * is K_NO_WAIT, then we need to let the https handler to * run now. */ k_yield(); } else #endif /* CONFIG_HTTPS */ { print_info(ctx, ctx->req.method); ret = http_request(ctx, req, BUF_ALLOC_TIMEOUT); if (ret < 0) { NET_DBG("Send error (%d)", ret); goto out; } } if (timeout != 0 && k_sem_take(&ctx->req.wait, timeout)) { ret = -ETIMEDOUT; goto out; } if (timeout == 0) { return -EINPROGRESS; } return 0; out: tcp_disconnect(ctx); return ret; }
static int eswifi_spi_request(struct eswifi_dev *eswifi, char *cmd, size_t clen, char *rsp, size_t rlen) { struct eswifi_spi_data *spi = eswifi->bus_data; unsigned int offset = 0, to_read = SPI_READ_CHUNK_SIZE; char tmp[2]; int err; LOG_DBG("cmd=%p (%u byte), rsp=%p (%u byte)", cmd, clen, rsp, rlen); /* * CMD/DATA protocol: * 1. Module raises data-ready when ready for **command phase** * 2. Host announces command start by lowering chip-select (csn) * 3. Host write the command (possibly several spi transfers) * 4. Host announces end of command by raising chip-select * 5. Module lowers data-ready signal * 6. Module raises data-ready to signal start of the **data phase** * 7. Host lowers chip-select * 8. Host fetch data as long as data-ready pin is up * 9. Module lowers data-ready to signal the end of the data Phase * 10. Host raises chip-select * * Note: * All commands to the eS-WiFi module must be post-padded with * 0x0A (Line Feed) to an even number of bytes. * All data from eS-WiFi module are post-padded with 0x15(NAK) to an * even number of bytes. */ if (!cmd) { goto data; } /* CMD/DATA READY signals the Command Phase */ err = eswifi_spi_wait_cmddata_ready(spi); if (err) { LOG_ERR("CMD ready timeout\n"); return err; } if (clen % 2) { /* Add post-padding if necessary */ /* cmd is a string so cmd[clen] is 0x00 */ cmd[clen] = 0x0a; clen++; } eswifi_spi_write(eswifi, cmd, clen); /* Our device is flagged with SPI_HOLD_ON_CS|SPI_LOCK_ON, release */ spi_release(spi->spi_dev, &spi->spi_cfg); data: /* CMD/DATA READY signals the Data Phase */ err = eswifi_spi_wait_cmddata_ready(spi); if (err) { LOG_ERR("DATA ready timeout\n"); return err; } while (eswifi_spi_cmddata_ready(spi) && to_read) { to_read = MIN(rlen - offset, to_read); memset(rsp + offset, 0, to_read); eswifi_spi_read(eswifi, rsp + offset, to_read); offset += to_read; k_yield(); } /* Flush remaining data if receiving buffer not large enough */ while (eswifi_spi_cmddata_ready(spi)) { eswifi_spi_read(eswifi, tmp, 2); k_sleep(1); } /* Our device is flagged with SPI_HOLD_ON_CS|SPI_LOCK_ON, release */ spi_release(spi->spi_dev, &spi->spi_cfg); LOG_DBG("success"); return offset; }
/** * * @brief Test the k_yield() routine * * This routine tests the k_yield() routine. It starts another thread * (thus also testing k_thread_spawn() and checks that behaviour of * k_yield() against the cases of there being a higher priority thread, * a lower priority thread, and another thread of equal priority. * * On error, it may set <thread_detected_error> to one of the following values: * 10 - helper thread ran prematurely * 11 - k_yield() did not yield to a higher priority thread * 12 - k_yield() did not yield to an equal prioirty thread * 13 - k_yield() yielded to a lower priority thread * * @return TC_PASS on success * @return TC_FAIL on failure */ static int test_k_yield(void) { k_tid_t self_thread_id; /* * Start a thread of higher priority. Note that since the new thread is * being started from a thread, it will not automatically switch to the * thread as it would if done from a task. */ self_thread_id = k_current_get(); thread_evidence = 0; k_thread_spawn(thread_stack2, THREAD_STACKSIZE, thread_helper, NULL, NULL, NULL, K_PRIO_COOP(THREAD_PRIORITY - 1), 0, 0); if (thread_evidence != 0) { /* ERROR! Helper spawned at higher */ thread_detected_error = 10; /* priority ran prematurely. */ return TC_FAIL; } /* * Test that the thread will yield to the higher priority helper. * <thread_evidence> is still 0. */ k_yield(); if (thread_evidence == 0) { /* ERROR! Did not yield to higher */ thread_detected_error = 11; /* priority thread. */ return TC_FAIL; } if (thread_evidence > 1) { /* ERROR! Helper did not yield to */ thread_detected_error = 12; /* equal priority thread. */ return TC_FAIL; } /* * Raise the priority of thread_entry(). Calling k_yield() should * not result in switching to the helper. */ k_thread_priority_set(self_thread_id, self_thread_id->base.prio - 1); k_yield(); if (thread_evidence != 1) { /* ERROR! Context switched to a lower */ thread_detected_error = 13; /* priority thread! */ return TC_FAIL; } /* * Block on <sem_thread>. This will allow the helper thread to * complete. The main task will wake this thread. */ k_sem_take(&sem_thread, K_FOREVER); return TC_PASS; }
static void lwm2m_rd_client_service(void) { int index; while (true) { for (index = 0; index < client_count; index++) { switch (get_sm_state(index)) { case ENGINE_INIT: sm_do_init(index); break; case ENGINE_DO_BOOTSTRAP: sm_do_bootstrap(index); break; case ENGINE_BOOTSTRAP_SENT: /* wait for bootstrap to be done */ break; case ENGINE_BOOTSTRAP_DONE: sm_bootstrap_done(index); break; case ENGINE_DO_REGISTRATION: sm_do_registration(index); break; case ENGINE_REGISTRATION_SENT: /* wait registration to be done */ break; case ENGINE_REGISTRATION_DONE: sm_registration_done(index); break; case ENGINE_UPDATE_SENT: /* wait update to be done */ break; case ENGINE_DEREGISTER: sm_do_deregister(index); break; case ENGINE_DEREGISTER_SENT: break; case ENGINE_DEREGISTER_FAILED: break; case ENGINE_DEREGISTERED: break; default: SYS_LOG_ERR("Unhandled state: %d", get_sm_state(index)); } k_yield(); } /* * TODO: calculate the diff between the start of the loop * and subtract that from the update interval */ k_sleep(K_MSEC(STATE_MACHINE_UPDATE_INTERVAL)); } }