static void rx_thread(void) { BT_DBG(""); while (true) { struct net_buf *buf; buf = net_buf_get(&h5.rx_queue, K_FOREVER); hexdump("=> ", buf->data, buf->len); if (!memcmp(buf->data, sync_req, sizeof(sync_req))) { if (h5.link_state == ACTIVE) { /* TODO Reset H5 */ } h5_send(sync_rsp, HCI_3WIRE_LINK_PKT, sizeof(sync_rsp)); } else if (!memcmp(buf->data, sync_rsp, sizeof(sync_rsp))) { if (h5.link_state == ACTIVE) { /* TODO Reset H5 */ } h5.link_state = INIT; h5_set_txwin(conf_req); h5_send(conf_req, HCI_3WIRE_LINK_PKT, sizeof(conf_req)); } else if (!memcmp(buf->data, conf_req, 2)) { /* * The Host sends Config Response messages without a * Configuration Field. */ h5_send(conf_rsp, HCI_3WIRE_LINK_PKT, sizeof(conf_rsp)); /* Then send Config Request with Configuration Field */ h5_set_txwin(conf_req); h5_send(conf_req, HCI_3WIRE_LINK_PKT, sizeof(conf_req)); } else if (!memcmp(buf->data, conf_rsp, 2)) { h5.link_state = ACTIVE; if (buf->len > 2) { /* Configuration field present */ h5.tx_win = (buf->data[2] & 0x07); } BT_DBG("Finished H5 configuration, tx_win %u", h5.tx_win); } else { BT_ERR("Not handled yet %x %x", buf->data[0], buf->data[1]); } net_buf_unref(buf); /* Make sure we don't hog the CPU if the rx_queue never * gets empty. */ k_yield(); } }
static void tx_fiber(void) { BT_DBG(""); /* FIXME: make periodic sending */ h5_send(sync_req, HCI_3WIRE_LINK_PKT, sizeof(sync_req)); while (true) { struct net_buf *buf; uint8_t type; BT_DBG("link_state %u", h5.link_state); switch (h5.link_state) { case UNINIT: /* FIXME: send sync */ fiber_sleep(10); break; case INIT: /* FIXME: send conf */ fiber_sleep(10); break; case ACTIVE: buf = nano_fifo_get(&h5.tx_queue, TICKS_UNLIMITED); type = h5_get_type(buf); h5_send(buf->data, type, buf->len); /* buf is dequeued from tx_queue and queued to unack * queue. */ nano_fifo_put(&h5.unack_queue, buf); unack_queue_len++; if (h5.retx_to) { fiber_delayed_start_cancel(h5.retx_to); } h5.retx_to = fiber_delayed_start(retx_stack, sizeof(retx_stack), retx_fiber, 0, 0, 7, 0, H5_TX_ACK_TIMEOUT); break; } } }
static void ack_timeout(struct k_work *work) { ARG_UNUSED(work); BT_DBG(""); h5_send(NULL, HCI_3WIRE_ACK_PKT, 0); /* Analyze stacks */ STACK_ANALYZE("tx_stack", tx_stack); STACK_ANALYZE("rx_stack", rx_stack); }
static void ack_timeout(struct k_work *work) { ARG_UNUSED(work); BT_DBG(""); h5_send(NULL, HCI_3WIRE_ACK_PKT, 0); /* Analyze stacks */ stack_analyze("tx_stack", tx_stack, sizeof(tx_stack)); stack_analyze("rx_stack", rx_stack, sizeof(rx_stack)); }
static void tx_thread(void) { BT_DBG(""); /* FIXME: make periodic sending */ h5_send(sync_req, HCI_3WIRE_LINK_PKT, sizeof(sync_req)); while (true) { struct net_buf *buf; u8_t type; BT_DBG("link_state %u", h5.link_state); switch (h5.link_state) { case UNINIT: /* FIXME: send sync */ k_sleep(100); break; case INIT: /* FIXME: send conf */ k_sleep(100); break; case ACTIVE: buf = net_buf_get(&h5.tx_queue, K_FOREVER); type = h5_get_type(buf); h5_send(buf->data, type, buf->len); /* buf is dequeued from tx_queue and queued to unack * queue. */ net_buf_put(&h5.unack_queue, buf); unack_queue_len++; k_delayed_work_submit(&retx_work, H5_TX_ACK_TIMEOUT); break; } } }
static void ack_fiber(int arg1, int arg2) { ARG_UNUSED(arg1); ARG_UNUSED(arg2); BT_DBG(""); h5.ack_to = NULL; h5_send(NULL, HCI_3WIRE_ACK_PKT, 0); /* Analyze stacks */ stack_analyze("ack_stack", ack_stack, sizeof(ack_stack)); stack_analyze("tx_stack", tx_stack, sizeof(tx_stack)); stack_analyze("rx_stack", rx_stack, sizeof(rx_stack)); stack_analyze("retx_stack", retx_stack, sizeof(retx_stack)); }