static int unprovisioned_beacon_send(void) { #if defined(CONFIG_BT_MESH_PB_ADV) const struct bt_mesh_prov *prov; struct net_buf *buf; u8_t uri_hash[16] = { 0 }; u16_t oob_info; BT_DBG(""); buf = bt_mesh_adv_create(BT_MESH_ADV_BEACON, UNPROV_XMIT_COUNT, UNPROV_XMIT_INT, K_NO_WAIT); if (!buf) { BT_ERR("Unable to allocate beacon buffer"); return -ENOBUFS; } prov = bt_mesh_prov_get(); net_buf_add_u8(buf, BEACON_TYPE_UNPROVISIONED); net_buf_add_mem(buf, prov->uuid, 16); /* for tmall ble profile, OOB info default is 0x0000 URI hash is optional */ if (prov->uri && bt_mesh_s1(prov->uri, uri_hash) == 0) { oob_info = prov->oob_info | BT_MESH_PROV_OOB_URI; } else { oob_info = prov->oob_info; } net_buf_add_be16(buf, oob_info); net_buf_add_mem(buf, uri_hash, 4); bt_mesh_adv_send(buf, NULL, NULL); net_buf_unref(buf); if (prov->uri) { size_t len; buf = bt_mesh_adv_create(BT_MESH_ADV_URI, UNPROV_XMIT_COUNT, UNPROV_XMIT_INT, K_NO_WAIT); if (!buf) { BT_ERR("Unable to allocate URI buffer"); return -ENOBUFS; } len = strlen(prov->uri); if (net_buf_tailroom(buf) < len) { BT_WARN("Too long URI to fit advertising data"); } else { net_buf_add_mem(buf, prov->uri, len); bt_mesh_adv_send(buf, NULL, NULL); } net_buf_unref(buf); } #endif /* CONFIG_BT_MESH_PB_ADV */ return 0; }
static int set_advertise_disable(void) { #if 0 struct net_buf *buf; int err; #endif if (!atomic_test_bit(bt_dev.flags, BT_DEV_ADVERTISING)) { return 0; } #if 0 buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_ADV_ENABLE, 1); if (!buf) { return -ENOBUFS; } net_buf_add_u8(buf, BT_HCI_LE_ADV_DISABLE); err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_ADV_ENABLE, buf, NULL); if (err) { return err; } #endif nble_gap_stop_adv_req(NULL); atomic_clear_bit(bt_dev.flags, BT_DEV_ADVERTISING); return 0; }
/** * Vendor handler is executed in the ISR context, queue data for * later processing */ static int wpanusb_vendor_handler(struct usb_setup_packet *setup, int32_t *len, uint8_t **data) { struct net_buf *pkt, *buf; pkt = net_nbuf_get_reserve_tx(0); buf = net_nbuf_get_reserve_data(0); net_buf_frag_insert(pkt, buf); net_buf_add_u8(buf, setup->bRequest); /* Add seq to TX */ if (setup->bRequest == TX) { net_buf_add_u8(buf, setup->wIndex); } memcpy(net_buf_add(buf, *len), *data, *len); SYS_LOG_DBG("len %u seq %u", *len, setup->wIndex); net_buf_put(&tx_queue, pkt); return 0; }
static inline struct net_buf *get_evt_buf(u8_t evt) { struct net_buf *buf; switch (evt) { case BT_HCI_EVT_CMD_COMPLETE: case BT_HCI_EVT_CMD_STATUS: buf = bt_buf_get_cmd_complete(K_NO_WAIT); break; default: buf = bt_buf_get_rx(BT_BUF_EVT, K_NO_WAIT); break; } if (buf) { net_buf_add_u8(h5.rx_buf, evt); } return buf; }
static inline int slip_input_byte(struct slip_context *slip, unsigned char c) { switch (slip->state) { case STATE_GARBAGE: if (c == SLIP_END) { slip->state = STATE_OK; } return 0; case STATE_ESC: if (c == SLIP_ESC_END) { c = SLIP_END; } else if (c == SLIP_ESC_ESC) { c = SLIP_ESC; } else { slip->state = STATE_GARBAGE; SLIP_STATS(slip->garbage++); return 0; } slip->state = STATE_OK; break; case STATE_OK: if (c == SLIP_ESC) { slip->state = STATE_ESC; return 0; } if (c == SLIP_END) { slip->state = STATE_OK; slip->first = false; if (slip->rx) { return 1; } return 0; } if (slip->first && !slip->rx) { /* Must have missed buffer allocation on first byte. */ return 0; } if (!slip->first) { slip->first = true; slip->rx = net_pkt_get_reserve_rx(0, K_NO_WAIT); if (!slip->rx) { SYS_LOG_ERR("[%p] cannot allocate pkt", slip); return 0; } slip->last = net_pkt_get_frag(slip->rx, K_NO_WAIT); if (!slip->last) { SYS_LOG_ERR("[%p] cannot allocate 1st data frag", slip); net_pkt_unref(slip->rx); slip->rx = NULL; return 0; } net_pkt_frag_add(slip->rx, slip->last); slip->ptr = net_pkt_ip_data(slip->rx); } break; } /* It is possible that slip->last is not set during the startup * of the device. If this happens do not continue and overwrite * some random memory. */ if (!slip->last) { return 0; } if (!net_buf_tailroom(slip->last)) { /* We need to allocate a new fragment */ struct net_buf *frag; frag = net_pkt_get_reserve_rx_data(0, K_NO_WAIT); if (!frag) { SYS_LOG_ERR("[%p] cannot allocate next data frag", slip); net_pkt_unref(slip->rx); slip->rx = NULL; slip->last = NULL; return 0; } net_buf_frag_insert(slip->last, frag); slip->last = frag; slip->ptr = slip->last->data; } /* The net_buf_add_u8() cannot add data to ll header so we need * a way to do it. */ if (slip->ptr < slip->last->data) { *slip->ptr = c; } else { slip->ptr = net_buf_add_u8(slip->last, c); } slip->ptr++; return 0; }
static int slip_process_byte(unsigned char c) { struct net_buf *buf; #ifdef VERBOSE_DEBUG SYS_LOG_DBG("recv: state %u byte %x", slip_state, c); #endif switch (slip_state) { case STATE_GARBAGE: if (c == SLIP_END) { slip_state = STATE_OK; } SYS_LOG_DBG("garbage: discard byte %x", c); return 0; case STATE_ESC: if (c == SLIP_ESC_END) { c = SLIP_END; } else if (c == SLIP_ESC_ESC) { c = SLIP_ESC; } else { slip_state = STATE_GARBAGE; return 0; } slip_state = STATE_OK; break; case STATE_OK: if (c == SLIP_ESC) { slip_state = STATE_ESC; return 0; } else if (c == SLIP_END) { return 1; } break; } #ifdef VERBOSE_DEBUG SYS_LOG_DBG("processed: state %u byte %x", slip_state, c); #endif if (!pkt_curr) { pkt_curr = net_pkt_get_reserve_rx(0, K_NO_WAIT); if (!pkt_curr) { SYS_LOG_ERR("No more buffers"); return 0; } buf = net_pkt_get_frag(pkt_curr, K_NO_WAIT); if (!buf) { SYS_LOG_ERR("No more buffers"); net_pkt_unref(pkt_curr); return 0; } net_pkt_frag_insert(pkt_curr, buf); } else { buf = net_buf_frag_last(pkt_curr->frags); } if (!net_buf_tailroom(buf)) { SYS_LOG_ERR("No more buf space: buf %p len %u", buf, buf->len); net_pkt_unref(pkt_curr); pkt_curr = NULL; return 0; } net_buf_add_u8(buf, c); return 0; }