static int unprovisioned_beacon_send(void) { #if defined(CONFIG_BT_MESH_PB_ADV) const struct bt_mesh_prov *prov; struct net_buf *buf; u8_t uri_hash[16] = { 0 }; u16_t oob_info; BT_DBG(""); buf = bt_mesh_adv_create(BT_MESH_ADV_BEACON, UNPROV_XMIT_COUNT, UNPROV_XMIT_INT, K_NO_WAIT); if (!buf) { BT_ERR("Unable to allocate beacon buffer"); return -ENOBUFS; } prov = bt_mesh_prov_get(); net_buf_add_u8(buf, BEACON_TYPE_UNPROVISIONED); net_buf_add_mem(buf, prov->uuid, 16); /* for tmall ble profile, OOB info default is 0x0000 URI hash is optional */ if (prov->uri && bt_mesh_s1(prov->uri, uri_hash) == 0) { oob_info = prov->oob_info | BT_MESH_PROV_OOB_URI; } else { oob_info = prov->oob_info; } net_buf_add_be16(buf, oob_info); net_buf_add_mem(buf, uri_hash, 4); bt_mesh_adv_send(buf, NULL, NULL); net_buf_unref(buf); if (prov->uri) { size_t len; buf = bt_mesh_adv_create(BT_MESH_ADV_URI, UNPROV_XMIT_COUNT, UNPROV_XMIT_INT, K_NO_WAIT); if (!buf) { BT_ERR("Unable to allocate URI buffer"); return -ENOBUFS; } len = strlen(prov->uri); if (net_buf_tailroom(buf) < len) { BT_WARN("Too long URI to fit advertising data"); } else { net_buf_add_mem(buf, prov->uri, len); bt_mesh_adv_send(buf, NULL, NULL); } net_buf_unref(buf); } #endif /* CONFIG_BT_MESH_PB_ADV */ return 0; }
static void setup_ipv4_udp(struct net_pkt *pkt, struct in_addr *remote_addr, struct in_addr *local_addr, u16_t remote_port, u16_t local_port) { NET_IPV4_HDR(pkt)->vhl = 0x45; NET_IPV4_HDR(pkt)->tos = 0; NET_IPV4_HDR(pkt)->len[0] = 0; NET_IPV4_HDR(pkt)->len[1] = NET_UDPH_LEN + sizeof(struct net_ipv4_hdr) + strlen(payload); NET_IPV4_HDR(pkt)->proto = IPPROTO_UDP; net_ipaddr_copy(&NET_IPV4_HDR(pkt)->src, remote_addr); net_ipaddr_copy(&NET_IPV4_HDR(pkt)->dst, local_addr); net_pkt_set_ip_hdr_len(pkt, sizeof(struct net_ipv4_hdr)); net_pkt_set_ipv6_ext_len(pkt, 0); net_buf_add(pkt->frags, net_pkt_ip_hdr_len(pkt) + sizeof(struct net_udp_hdr)); NET_UDP_HDR(pkt)->src_port = htons(remote_port); NET_UDP_HDR(pkt)->dst_port = htons(local_port); net_buf_add_mem(pkt->frags, payload, strlen(payload)); }
static void setup_ipv6_udp(struct net_pkt *pkt, struct in6_addr *remote_addr, struct in6_addr *local_addr, u16_t remote_port, u16_t local_port) { NET_IPV6_HDR(pkt)->vtc = 0x60; NET_IPV6_HDR(pkt)->tcflow = 0; NET_IPV6_HDR(pkt)->flow = 0; NET_IPV6_HDR(pkt)->len[0] = 0; NET_IPV6_HDR(pkt)->len[1] = NET_UDPH_LEN + strlen(payload); NET_IPV6_HDR(pkt)->nexthdr = IPPROTO_UDP; NET_IPV6_HDR(pkt)->hop_limit = 255; net_ipaddr_copy(&NET_IPV6_HDR(pkt)->src, remote_addr); net_ipaddr_copy(&NET_IPV6_HDR(pkt)->dst, local_addr); net_pkt_set_ip_hdr_len(pkt, sizeof(struct net_ipv6_hdr)); net_pkt_set_ipv6_ext_len(pkt, 0); net_buf_add(pkt->frags, net_pkt_ip_hdr_len(pkt) + sizeof(struct net_udp_hdr)); NET_UDP_HDR(pkt)->src_port = htons(remote_port); NET_UDP_HDR(pkt)->dst_port = htons(local_port); net_buf_add_mem(pkt->frags, payload, strlen(payload)); }
static struct net_buf *l2cap_chan_create_seg(struct bt_l2cap_le_chan *ch, struct net_buf *buf, size_t sdu_hdr_len) { struct net_buf *seg; uint16_t headroom; uint16_t len; /* Segment if data (+ data headroom) is bigger than MPS */ if (buf->len + sdu_hdr_len > ch->tx.mps) { goto segment; } /* Segment if there is no space in the user_data */ if (buf->pool->user_data_size < BT_BUF_USER_DATA_MIN) { BT_WARN("Too small buffer user_data_size %u", buf->pool->user_data_size); goto segment; } headroom = sizeof(struct bt_hci_acl_hdr) + sizeof(struct bt_l2cap_hdr) + sdu_hdr_len; /* Check if original buffer has enough headroom and don't have any * fragments. */ if (net_buf_headroom(buf) >= headroom && !buf->frags) { if (sdu_hdr_len) { /* Push SDU length if set */ net_buf_push_le16(buf, net_buf_frags_len(buf)); } return net_buf_ref(buf); } segment: seg = bt_l2cap_create_pdu(&le_data_pool, 0); if (sdu_hdr_len) { net_buf_add_le16(seg, net_buf_frags_len(buf)); } /* Don't send more that TX MPS including SDU length */ len = min(net_buf_tailroom(seg), ch->tx.mps - sdu_hdr_len); /* Limit if original buffer is smaller than the segment */ len = min(buf->len, len); net_buf_add_mem(seg, buf->data, len); net_buf_pull(buf, len); BT_DBG("ch %p seg %p len %u", ch, seg, seg->len); return seg; }
static void l2cap_chan_le_recv_sdu(struct bt_l2cap_le_chan *chan, struct net_buf *buf) { struct net_buf *frag; uint16_t len; BT_DBG("chan %p len %u sdu %zu", chan, buf->len, net_buf_frags_len(chan->_sdu)); if (net_buf_frags_len(chan->_sdu) + buf->len > chan->_sdu_len) { BT_ERR("SDU length mismatch"); bt_l2cap_chan_disconnect(&chan->chan); return; } /* Jump to last fragment */ frag = net_buf_frag_last(chan->_sdu); while (buf->len) { /* Check if there is any space left in the current fragment */ if (!net_buf_tailroom(frag)) { frag = l2cap_alloc_frag(chan); if (!frag) { BT_ERR("Unable to store SDU"); bt_l2cap_chan_disconnect(&chan->chan); return; } } len = min(net_buf_tailroom(frag), buf->len); net_buf_add_mem(frag, buf->data, len); net_buf_pull(buf, len); BT_DBG("frag %p len %u", frag, frag->len); } if (net_buf_frags_len(chan->_sdu) == chan->_sdu_len) { /* Receiving complete SDU, notify channel and reset SDU buf */ chan->chan.ops->recv(&chan->chan, chan->_sdu); net_buf_unref(chan->_sdu); chan->_sdu = NULL; chan->_sdu_len = 0; } l2cap_chan_update_credits(chan); }
static void l2cap_send_reject(struct bt_conn *conn, uint8_t ident, uint16_t reason, void *data, uint8_t data_len) { struct bt_l2cap_cmd_reject *rej; struct net_buf *buf; buf = l2cap_create_le_sig_pdu(BT_L2CAP_CMD_REJECT, ident, sizeof(*rej) + data_len); rej = net_buf_add(buf, sizeof(*rej)); rej->reason = sys_cpu_to_le16(reason); if (data) { net_buf_add_mem(buf, data, data_len); } bt_l2cap_send(conn, BT_L2CAP_CID_LE_SIG, buf); }
static void bt_spi_rx_thread(void) { struct net_buf *buf; u8_t header_master[5] = { SPI_READ, 0x00, 0x00, 0x00, 0x00 }; u8_t header_slave[5]; struct bt_hci_acl_hdr acl_hdr; u8_t size; memset(&txmsg, 0xFF, SPI_MAX_MSG_LEN); while (true) { k_sem_take(&sem_request, K_FOREVER); /* Disable IRQ pin callback to avoid spurious IRQs */ gpio_pin_disable_callback(irq_dev, GPIO_IRQ_PIN); k_sem_take(&sem_busy, K_FOREVER); do { #if defined(CONFIG_BLUETOOTH_SPI_BLUENRG) gpio_pin_write(cs_dev, GPIO_CS_PIN, 1); gpio_pin_write(cs_dev, GPIO_CS_PIN, 0); #endif /* CONFIG_BLUETOOTH_SPI_BLUENRG */ spi_transceive(spi_dev, header_master, 5, header_slave, 5); } while (header_slave[STATUS_HEADER_TOREAD] == 0 || header_slave[STATUS_HEADER_TOREAD] == 0xFF); size = header_slave[STATUS_HEADER_TOREAD]; do { spi_transceive(spi_dev, &txmsg, size, &rxmsg, size); } while (rxmsg[0] == 0); gpio_pin_enable_callback(irq_dev, GPIO_IRQ_PIN); #if defined(CONFIG_BLUETOOTH_SPI_BLUENRG) gpio_pin_write(cs_dev, GPIO_CS_PIN, 1); #endif /* CONFIG_BLUETOOTH_SPI_BLUENRG */ k_sem_give(&sem_busy); spi_dump_message("RX:ed", rxmsg, size); switch (rxmsg[PACKET_TYPE]) { case HCI_EVT: switch (rxmsg[EVT_HEADER_EVENT]) { case BT_HCI_EVT_VENDOR: /* Vendor events are currently unsupported */ bt_spi_handle_vendor_evt(rxmsg); continue; case BT_HCI_EVT_CMD_COMPLETE: case BT_HCI_EVT_CMD_STATUS: buf = bt_buf_get_cmd_complete(K_FOREVER); break; default: buf = bt_buf_get_rx(K_FOREVER); break; } bt_buf_set_type(buf, BT_BUF_EVT); net_buf_add_mem(buf, &rxmsg[1], rxmsg[EVT_HEADER_SIZE] + 2); break; case HCI_ACL: buf = bt_buf_get_rx(K_FOREVER); bt_buf_set_type(buf, BT_BUF_ACL_IN); memcpy(&acl_hdr, &rxmsg[1], sizeof(acl_hdr)); net_buf_add_mem(buf, &acl_hdr, sizeof(acl_hdr)); net_buf_add_mem(buf, &rxmsg[5], sys_le16_to_cpu(acl_hdr.len)); break; default: BT_ERR("Unknown BT buf type %d", rxmsg[0]); continue; } if (rxmsg[PACKET_TYPE] == HCI_EVT && bt_hci_evt_is_prio(rxmsg[EVT_HEADER_EVENT])) { bt_recv_prio(buf); } else { bt_recv(buf); } } }
static inline void copy_hdr(struct net_buf *buf) { net_buf_add_mem(buf, rx.hdr, rx.hdr_len); }
static void bt_uart_isr(struct device *unused) { static int remaining; u8_t byte; int ret; static u8_t hdr[4]; ARG_UNUSED(unused); while (uart_irq_update(h5_dev) && uart_irq_is_pending(h5_dev)) { if (!uart_irq_rx_ready(h5_dev)) { if (uart_irq_tx_ready(h5_dev)) { BT_DBG("transmit ready"); } else { BT_DBG("spurious interrupt"); } /* Only the UART RX path is interrupt-enabled */ break; } ret = uart_fifo_read(h5_dev, &byte, sizeof(byte)); if (!ret) { continue; } switch (h5.rx_state) { case START: if (byte == SLIP_DELIMITER) { h5.rx_state = HEADER; remaining = sizeof(hdr); } break; case HEADER: /* In a case we confuse ending slip delimeter * with starting one. */ if (byte == SLIP_DELIMITER) { remaining = sizeof(hdr); continue; } if (h5_unslip_byte(&byte) < 0) { h5_reset_rx(); continue; } memcpy(&hdr[sizeof(hdr) - remaining], &byte, 1); remaining--; if (remaining) { break; } remaining = H5_HDR_LEN(hdr); switch (H5_HDR_PKT_TYPE(hdr)) { case HCI_EVENT_PKT: /* The buffer is allocated only once we know * the exact event type. */ h5.rx_state = PAYLOAD; break; case HCI_ACLDATA_PKT: h5.rx_buf = bt_buf_get_rx(BT_BUF_ACL_IN, K_NO_WAIT); if (!h5.rx_buf) { BT_WARN("No available data buffers"); h5_reset_rx(); continue; } h5.rx_state = PAYLOAD; break; case HCI_3WIRE_LINK_PKT: case HCI_3WIRE_ACK_PKT: h5.rx_buf = net_buf_alloc(&h5_pool, K_NO_WAIT); if (!h5.rx_buf) { BT_WARN("No available signal buffers"); h5_reset_rx(); continue; } h5.rx_state = PAYLOAD; break; default: BT_ERR("Wrong packet type %u", H5_HDR_PKT_TYPE(hdr)); h5.rx_state = END; break; } break; case PAYLOAD: if (h5_unslip_byte(&byte) < 0) { h5_reset_rx(); continue; } /* Allocate HCI event buffer now that we know the * exact event type. */ if (!h5.rx_buf) { h5.rx_buf = get_evt_buf(byte); if (!h5.rx_buf) { BT_WARN("No available event buffers"); h5_reset_rx(); continue; } } net_buf_add_mem(h5.rx_buf, &byte, sizeof(byte)); remaining--; if (!remaining) { h5.rx_state = END; } break; case END: if (byte != SLIP_DELIMITER) { BT_ERR("Missing ending SLIP_DELIMITER"); h5_reset_rx(); break; } BT_DBG("Received full packet: type %u", H5_HDR_PKT_TYPE(hdr)); /* Check when full packet is received, it can be done * when parsing packet header but we need to receive * full packet anyway to clear UART. */ if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5.tx_ack) { BT_ERR("Seq expected %u got %u. Drop packet", h5.tx_ack, H5_HDR_SEQ(hdr)); h5_reset_rx(); break; } h5_process_complete_packet(hdr); h5.rx_state = START; break; } } }