/* Delayed work taking care about retransmitting packets */ static void retx_timeout(struct k_work *work) { ARG_UNUSED(work); BT_DBG("unack_queue_len %u", unack_queue_len); if (unack_queue_len) { struct k_fifo tmp_queue; struct net_buf *buf; k_fifo_init(&tmp_queue); /* Queue to temperary queue */ while ((buf = net_buf_get(&h5.tx_queue, K_NO_WAIT))) { net_buf_put(&tmp_queue, buf); } /* Queue unack packets to the beginning of the queue */ while ((buf = net_buf_get(&h5.unack_queue, K_NO_WAIT))) { /* include also packet type */ net_buf_push(buf, sizeof(u8_t)); net_buf_put(&h5.tx_queue, buf); h5.tx_seq = (h5.tx_seq - 1) & 0x07; unack_queue_len--; } /* Queue saved packets from temp queue */ while ((buf = net_buf_get(&tmp_queue, K_NO_WAIT))) { net_buf_put(&h5.tx_queue, buf); } } }
static struct net_buf *ip_buf_get_reserve(enum ip_buf_type type, uint16_t reserve_head) #endif { struct net_buf *buf = NULL; /* Note that we do not reserve any space in front of the * buffer so buf->data points to first byte of the IP header. * This is done like this so that IP stack works the same * way as BT and 802.15.4 stacks. * * The reserve_head variable in the function will tell * the size of the IP + other headers if there are any. * That variable is only used to calculate the pointer * where the application data starts. */ switch (type) { case IP_BUF_RX: buf = net_buf_get(&free_rx_bufs, 0); dec_free_rx_bufs(buf); break; case IP_BUF_TX: buf = net_buf_get(&free_tx_bufs, 0); dec_free_tx_bufs(buf); break; } if (!buf) { #ifdef DEBUG_IP_BUFS NET_ERR("Failed to get free %s buffer (%s():%d)\n", type2str(type), caller, line); #else NET_ERR("Failed to get free %s buffer\n", type2str(type)); #endif return NULL; } ip_buf_type(buf) = type; ip_buf_appdata(buf) = buf->data + reserve_head; ip_buf_appdatalen(buf) = 0; ip_buf_reserve(buf) = reserve_head; net_buf_add(buf, reserve_head); NET_BUF_CHECK_IF_NOT_IN_USE(buf); #ifdef DEBUG_IP_BUFS NET_DBG("%s [%d] buf %p reserve %u ref %d (%s():%d)\n", type2str(type), get_frees(type), buf, reserve_head, buf->ref, caller, line); #else NET_DBG("%s buf %p reserve %u ref %d\n", type2str(type), buf, reserve_head, buf->ref); #endif return buf; }
static void rx_thread(void *p1, void *p2, void *p3) { struct net_buf *buf; ARG_UNUSED(p1); ARG_UNUSED(p2); ARG_UNUSED(p3); BT_DBG("started"); while (1) { BT_DBG("rx.buf %p", rx.buf); /* We can only do the allocation if we know the initial * header, since Command Complete/Status events must use the * original command buffer (if available). */ if (rx.have_hdr && !rx.buf) { rx.buf = get_rx(K_FOREVER); BT_DBG("Got rx.buf %p", rx.buf); if (rx.remaining > net_buf_tailroom(rx.buf)) { BT_ERR("Not enough space in buffer"); rx.discard = rx.remaining; reset_rx(); } else { copy_hdr(rx.buf); } } /* Let the ISR continue receiving new packets */ uart_irq_rx_enable(h4_dev); buf = net_buf_get(&rx.fifo, K_FOREVER); do { uart_irq_rx_enable(h4_dev); BT_DBG("Calling bt_recv(%p)", buf); bt_recv(buf); /* Give other threads a chance to run if the ISR * is receiving data so fast that rx.fifo never * or very rarely goes empty. */ k_yield(); uart_irq_rx_disable(h4_dev); buf = net_buf_get(&rx.fifo, K_NO_WAIT); } while (buf); } }
static inline void process_tx(void) { int bytes; if (!tx.buf) { tx.buf = net_buf_get(&tx.fifo, K_NO_WAIT); if (!tx.buf) { BT_ERR("TX interrupt but no pending buffer!"); uart_irq_tx_disable(h4_dev); return; } } if (!tx.type) { switch (bt_buf_get_type(tx.buf)) { case BT_BUF_ACL_OUT: tx.type = H4_ACL; break; case BT_BUF_CMD: tx.type = H4_CMD; break; default: BT_ERR("Unknown buffer type"); goto done; } bytes = uart_fifo_fill(h4_dev, &tx.type, 1); if (bytes != 1) { BT_WARN("Unable to send H:4 type"); tx.type = H4_NONE; return; } } bytes = uart_fifo_fill(h4_dev, tx.buf->data, tx.buf->len); net_buf_pull(tx.buf, bytes); if (tx.buf->len) { return; } done: tx.type = H4_NONE; net_buf_unref(tx.buf); tx.buf = net_buf_get(&tx.fifo, K_NO_WAIT); if (!tx.buf) { uart_irq_tx_disable(h4_dev); } }
struct net_buf *bt_conn_create_pdu(struct nano_fifo *fifo, size_t reserve) { size_t head_reserve = reserve + sizeof(struct bt_hci_acl_hdr) + CONFIG_BLUETOOTH_HCI_SEND_RESERVE; return net_buf_get(fifo, head_reserve); }
/*---------------------------------------------------------------------------*/ usys udp_send (u16 size, void *data, u16 srcPort, u16 dstPort, u32 ipDst, u8 ipTos) { #if 0 usys cpySize, sizeProcessed ; usys offset, dataSize ; u8 *src ; buf_t *buf ; /* Calc the data size of the buffer */ dataSize = net.mtu - fnsSD.netHeaderSize - MAX_IP_HEADER_SIZE ; /* Calc the data offset in the buffer */ offset = fnsSD.netHeaderSize + MAX_IP_HEADER_SIZE + UDP_HEADER_SIZE ; /* Start building datagrams of size that matches the MTU */ sizeProcessed = 0 ; src = data ; do { /* Get a new buffer */ buf = net_buf_get () ; if (buf == NULL_PTR) return E_MEM ; /* Copy data into buffer */ if ((size - sizeProcessed) <= dataSize) { cpySize = dataSize ; } else { cpySize = size - sizeProcessed ; } memcpy (&buf->data [offset], src, cpySize) ; /* Set src port */ SET_SRC (&buf->data [fnsSD.netHeaderSize + MAX_IP_HEADER_SIZE], srcPort) ; /* Set dst port */ SET_DST (&buf->data [fnsSD.netHeaderSize + MAX_IP_HEADER_SIZE], dstPort) ; /* Set length */ SET_LEN (&buf->data [fnsSD.netHeaderSize + MAX_IP_HEADER_SIZE], cpySize) ; src += cpySize ; sizeProcessed += cpySize ; } while (sizeProcessed != size) ; #endif return GOOD ; } /* End of function udp_send () */
struct net_buf *l2_buf_get_reserve(uint16_t reserve_head) #endif { struct net_buf *buf; buf = net_buf_get(&free_l2_bufs, reserve_head); if (!buf) { #ifdef DEBUG_L2_BUFS NET_ERR("Failed to get free L2 buffer (%s():%d)\n", caller, line); #else NET_ERR("Failed to get free L2 buffer\n"); #endif return NULL; } dec_free_l2_bufs(buf); NET_BUF_CHECK_IF_NOT_IN_USE(buf); #ifdef DEBUG_L2_BUFS NET_DBG("[%d] buf %p reserve %u ref %d (%s():%d)\n", get_free_l2_bufs(), buf, reserve_head, buf->ref, caller, line); #else NET_DBG("buf %p reserve %u ref %d\n", buf, reserve_head, buf->ref); #endif packetbuf_clear(buf); return buf; }
static void rx_thread(void) { BT_DBG(""); while (true) { struct net_buf *buf; buf = net_buf_get(&h5.rx_queue, K_FOREVER); hexdump("=> ", buf->data, buf->len); if (!memcmp(buf->data, sync_req, sizeof(sync_req))) { if (h5.link_state == ACTIVE) { /* TODO Reset H5 */ } h5_send(sync_rsp, HCI_3WIRE_LINK_PKT, sizeof(sync_rsp)); } else if (!memcmp(buf->data, sync_rsp, sizeof(sync_rsp))) { if (h5.link_state == ACTIVE) { /* TODO Reset H5 */ } h5.link_state = INIT; h5_set_txwin(conf_req); h5_send(conf_req, HCI_3WIRE_LINK_PKT, sizeof(conf_req)); } else if (!memcmp(buf->data, conf_req, 2)) { /* * The Host sends Config Response messages without a * Configuration Field. */ h5_send(conf_rsp, HCI_3WIRE_LINK_PKT, sizeof(conf_rsp)); /* Then send Config Request with Configuration Field */ h5_set_txwin(conf_req); h5_send(conf_req, HCI_3WIRE_LINK_PKT, sizeof(conf_req)); } else if (!memcmp(buf->data, conf_rsp, 2)) { h5.link_state = ACTIVE; if (buf->len > 2) { /* Configuration field present */ h5.tx_win = (buf->data[2] & 0x07); } BT_DBG("Finished H5 configuration, tx_win %u", h5.tx_win); } else { BT_ERR("Not handled yet %x %x", buf->data[0], buf->data[1]); } net_buf_unref(buf); /* Make sure we don't hog the CPU if the rx_queue never * gets empty. */ k_yield(); } }
static struct net_buf *l2cap_chan_le_get_tx_buf(struct bt_l2cap_le_chan *ch) { struct net_buf *buf; /* Return current buffer */ if (ch->tx_buf) { buf = ch->tx_buf; ch->tx_buf = NULL; return buf; } return net_buf_get(&ch->tx_queue, K_NO_WAIT); }
static void tx_thread(void) { SYS_LOG_DBG("Tx thread started"); while (1) { uint8_t cmd; struct net_buf *pkt, *buf; pkt = net_buf_get(&tx_queue, K_FOREVER); buf = net_buf_frag_last(pkt); cmd = net_buf_pull_u8(buf); hexdump(">", buf->data, buf->len); switch (cmd) { case RESET: SYS_LOG_DBG("Reset device"); break; case TX: tx(pkt); break; case START: start(); break; case STOP: stop(); break; case SET_CHANNEL: set_channel(buf->data, buf->len); break; case SET_IEEE_ADDR: set_ieee_addr(buf->data, buf->len); break; case SET_SHORT_ADDR: set_short_addr(buf->data, buf->len); break; case SET_PAN_ID: set_pan_id(buf->data, buf->len); break; default: SYS_LOG_ERR("%x: Not handled for now", cmd); break; } net_nbuf_unref(pkt); k_yield(); } }
static void process_unack(void) { u8_t next_seq = h5.tx_seq; u8_t number_removed = unack_queue_len; if (!unack_queue_len) { return; } BT_DBG("rx_ack %u tx_ack %u tx_seq %u unack_queue_len %u", h5.rx_ack, h5.tx_ack, h5.tx_seq, unack_queue_len); while (unack_queue_len > 0) { if (next_seq == h5.rx_ack) { /* Next sequence number is the same as last received * ack number */ break; } number_removed--; /* Similar to (n - 1) % 8 with unsigned conversion */ next_seq = (next_seq - 1) & 0x07; } if (next_seq != h5.rx_ack) { BT_ERR("Wrong sequence: rx_ack %u tx_seq %u next_seq %u", h5.rx_ack, h5.tx_seq, next_seq); } BT_DBG("Need to remove %u packet from the queue", number_removed); while (number_removed) { struct net_buf *buf = net_buf_get(&h5.unack_queue, K_NO_WAIT); if (!buf) { BT_ERR("Unack queue is empty"); break; } /* TODO: print or do something with packet */ BT_DBG("Remove buf from the unack_queue"); net_buf_unref(buf); unack_queue_len--; number_removed--; } }
static void tx_thread(void) { BT_DBG(""); /* FIXME: make periodic sending */ h5_send(sync_req, HCI_3WIRE_LINK_PKT, sizeof(sync_req)); while (true) { struct net_buf *buf; u8_t type; BT_DBG("link_state %u", h5.link_state); switch (h5.link_state) { case UNINIT: /* FIXME: send sync */ k_sleep(100); break; case INIT: /* FIXME: send conf */ k_sleep(100); break; case ACTIVE: buf = net_buf_get(&h5.tx_queue, K_FOREVER); type = h5_get_type(buf); h5_send(buf->data, type, buf->len); /* buf is dequeued from tx_queue and queued to unack * queue. */ net_buf_put(&h5.unack_queue, buf); unack_queue_len++; k_delayed_work_submit(&retx_work, H5_TX_ACK_TIMEOUT); break; } } }
static void l2cap_chan_destroy(struct bt_l2cap_chan *chan) { struct bt_l2cap_le_chan *ch = BT_L2CAP_LE_CHAN(chan); struct net_buf *buf; BT_DBG("chan %p cid 0x%04x", ch, ch->rx.cid); /* Cancel ongoing work */ k_delayed_work_cancel(&chan->rtx_work); /* Remove buffers on the TX queue */ while ((buf = net_buf_get(&ch->tx_queue, K_NO_WAIT))) { net_buf_unref(buf); } /* Destroy segmented SDU if it exists */ if (ch->_sdu) { net_buf_unref(ch->_sdu); ch->_sdu = NULL; ch->_sdu_len = 0; } }
static void bt_uart_isr(struct device *unused) { static int remaining; uint8_t byte; int ret; static uint8_t hdr[4]; ARG_UNUSED(unused); while (uart_irq_update(h5_dev) && uart_irq_is_pending(h5_dev)) { if (!uart_irq_rx_ready(h5_dev)) { if (uart_irq_tx_ready(h5_dev)) { BT_DBG("transmit ready"); } else { BT_DBG("spurious interrupt"); } continue; } ret = uart_fifo_read(h5_dev, &byte, sizeof(byte)); if (!ret) { continue; } switch (h5.rx_state) { case START: if (byte == SLIP_DELIMITER) { h5.rx_state = HEADER; remaining = sizeof(hdr); } break; case HEADER: /* In a case we confuse ending slip delimeter * with starting one. */ if (byte == SLIP_DELIMITER) { remaining = sizeof(hdr); continue; } if (h5_unslip_byte(&byte) < 0) { h5_reset_rx(); continue; } memcpy(&hdr[sizeof(hdr) - remaining], &byte, 1); remaining--; if (remaining) { break; } remaining = H5_HDR_LEN(hdr); switch (H5_HDR_PKT_TYPE(hdr)) { case HCI_EVENT_PKT: h5.rx_buf = bt_buf_get_evt(); if (!h5.rx_buf) { BT_WARN("No available event buffers"); h5_reset_rx(); continue; } h5.rx_state = PAYLOAD; break; case HCI_ACLDATA_PKT: h5.rx_buf = bt_buf_get_acl(); if (!h5.rx_buf) { BT_WARN("No available data buffers"); h5_reset_rx(); continue; } h5.rx_state = PAYLOAD; break; case HCI_3WIRE_LINK_PKT: case HCI_3WIRE_ACK_PKT: h5.rx_buf = net_buf_get(&h5_sig, 0); if (!h5.rx_buf) { BT_WARN("No available signal buffers"); h5_reset_rx(); continue; } h5.rx_state = PAYLOAD; break; default: BT_ERR("Wrong packet type %u", H5_HDR_PKT_TYPE(hdr)); h5.rx_state = END; break; } break; case PAYLOAD: if (h5_unslip_byte(&byte) < 0) { h5_reset_rx(); continue; } memcpy(net_buf_add(h5.rx_buf, sizeof(byte)), &byte, sizeof(byte)); remaining--; if (!remaining) { h5.rx_state = END; } break; case END: if (byte != SLIP_DELIMITER) { BT_ERR("Missing ending SLIP_DELIMITER"); h5_reset_rx(); break; } BT_DBG("Received full packet: type %u", H5_HDR_PKT_TYPE(hdr)); /* Check when full packet is received, it can be done * when parsing packet header but we need to receive * full packet anyway to clear UART. */ if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5.tx_ack) { BT_ERR("Seq expected %u got %u. Drop packet", h5.tx_ack, H5_HDR_SEQ(hdr)); h5_reset_rx(); break; } h5_process_complete_packet(hdr); h5.rx_state = START; break; } } }
void bt_conn_set_state(struct bt_conn *conn, bt_conn_state_t state) { bt_conn_state_t old_state; BT_DBG("%s -> %s", state2str(conn->state), state2str(state)); if (conn->state == state) { BT_WARN("no transition"); return; } old_state = conn->state; conn->state = state; /* Actions needed for exiting the old state */ switch (old_state) { case BT_CONN_DISCONNECTED: /* Take a reference for the first state transition after * bt_conn_add_le() and keep it until reaching DISCONNECTED * again. */ bt_conn_ref(conn); break; case BT_CONN_CONNECT: if (conn->timeout) { fiber_delayed_start_cancel(conn->timeout); conn->timeout = NULL; /* Drop the reference taken by timeout fiber */ bt_conn_unref(conn); } break; default: break; } /* Actions needed for entering the new state */ switch (conn->state) { case BT_CONN_CONNECTED: nano_fifo_init(&conn->tx_queue); fiber_start(conn->stack, sizeof(conn->stack), conn_tx_fiber, (int)bt_conn_ref(conn), 0, 7, 0); bt_l2cap_connected(conn); notify_connected(conn); break; case BT_CONN_DISCONNECTED: /* Notify disconnection and queue a dummy buffer to wake * up and stop the tx fiber for states where it was * running. */ if (old_state == BT_CONN_CONNECTED || old_state == BT_CONN_DISCONNECT) { bt_l2cap_disconnected(conn); notify_disconnected(conn); nano_fifo_put(&conn->tx_queue, net_buf_get(&dummy, 0)); } else if (old_state == BT_CONN_CONNECT) { /* conn->err will be set in this case */ notify_connected(conn); } else if (old_state == BT_CONN_CONNECT_SCAN && conn->err) { /* this indicate LE Create Connection failed */ notify_connected(conn); } /* Release the reference we took for the very first * state transition. */ bt_conn_unref(conn); break; case BT_CONN_CONNECT_SCAN: break; case BT_CONN_CONNECT: /* * Timer is needed only for LE. For other link types controller * will handle connection timeout. */ if (conn->type != BT_CONN_TYPE_LE) { break; } /* Add LE Create Connection timeout */ conn->timeout = fiber_delayed_start(conn->stack, sizeof(conn->stack), timeout_fiber, (int)bt_conn_ref(conn), 0, 7, 0, CONN_TIMEOUT); break; case BT_CONN_DISCONNECT: break; default: BT_WARN("no valid (%u) state was set", state); break; } }