static int bpa10x_recv_event(struct bpa10x_data *data, unsigned char *buf, int size) { BT_DBG("data %p buf %p size %d", data, buf, size); if (data->evt_skb) { struct sk_buff *skb = data->evt_skb; memcpy(skb_put(skb, size), buf, size); if (skb->len == data->evt_len) { data->evt_skb = NULL; data->evt_len = 0; hci_recv_frame(skb); } } else { struct sk_buff *skb; struct hci_event_hdr *hdr; unsigned char pkt_type; int pkt_len = 0; if (size < HCI_EVENT_HDR_SIZE + 1) { BT_ERR("%s event packet block with size %d is too short", data->hdev->name, size); return -EILSEQ; } pkt_type = *buf++; size--; if (pkt_type != HCI_EVENT_PKT) { BT_ERR("%s unexpected event packet start byte 0x%02x", data->hdev->name, pkt_type); return -EPROTO; } hdr = (struct hci_event_hdr *) buf; pkt_len = HCI_EVENT_HDR_SIZE + hdr->plen; skb = bt_skb_alloc(pkt_len, GFP_ATOMIC); if (!skb) { BT_ERR("%s no memory for new event packet", data->hdev->name); return -ENOMEM; } skb->dev = (void *) data->hdev; bt_cb(skb)->pkt_type = pkt_type; memcpy(skb_put(skb, size), buf, size); if (pkt_len == size) { hci_recv_frame(skb); } else { data->evt_skb = skb; data->evt_len = pkt_len; } } return 0; }
static void bpa10x_recv_bulk(struct bpa10x_data *data, unsigned char *buf, int count) { struct hci_acl_hdr *ah; struct hci_sco_hdr *sh; struct hci_vendor_hdr *vh; struct sk_buff *skb; int len; while (count) { switch (*buf++) { case HCI_ACLDATA_PKT: ah = (struct hci_acl_hdr *) buf; len = HCI_ACL_HDR_SIZE + __le16_to_cpu(ah->dlen); skb = bt_skb_alloc(len, GFP_ATOMIC); if (skb) { memcpy(skb_put(skb, len), buf, len); skb->dev = (void *) data->hdev; bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; hci_recv_frame(skb); } break; case HCI_SCODATA_PKT: sh = (struct hci_sco_hdr *) buf; len = HCI_SCO_HDR_SIZE + sh->dlen; skb = bt_skb_alloc(len, GFP_ATOMIC); if (skb) { memcpy(skb_put(skb, len), buf, len); skb->dev = (void *) data->hdev; bt_cb(skb)->pkt_type = HCI_SCODATA_PKT; hci_recv_frame(skb); } break; case HCI_VENDOR_PKT: vh = (struct hci_vendor_hdr *) buf; len = HCI_VENDOR_HDR_SIZE + __le16_to_cpu(vh->dlen); skb = bt_skb_alloc(len, GFP_ATOMIC); if (skb) { memcpy(skb_put(skb, len), buf, len); skb->dev = (void *) data->hdev; bt_cb(skb)->pkt_type = HCI_VENDOR_PKT; hci_recv_frame(skb); } break; default: len = count - 1; break; } buf += len; count -= (len + 1); } }
static inline ssize_t vhci_get_user(struct vhci_data *data, const char __user *buf, size_t count) { struct sk_buff *skb; if (count > HCI_MAX_FRAME_SIZE) return -EINVAL; skb = bt_skb_alloc(count, GFP_KERNEL); if (!skb) return -ENOMEM; if (copy_from_user(skb_put(skb, count), buf, count)) { kfree_skb(skb); return -EFAULT; } skb->dev = (void *) data->hdev; bt_cb(skb)->pkt_type = *((__u8 *) skb->data); skb_pull(skb, 1); hci_recv_frame(skb); return count; }
static inline ssize_t vhci_get_user(struct vhci_data *data, struct iov_iter *from) { size_t len = iov_iter_count(from); struct sk_buff *skb; __u8 pkt_type, opcode; int ret; if (len < 2 || len > HCI_MAX_FRAME_SIZE) return -EINVAL; skb = bt_skb_alloc(len, GFP_KERNEL); if (!skb) return -ENOMEM; if (copy_from_iter(skb_put(skb, len), len, from) != len) { kfree_skb(skb); return -EFAULT; } pkt_type = *((__u8 *) skb->data); skb_pull(skb, 1); switch (pkt_type) { case HCI_EVENT_PKT: case HCI_ACLDATA_PKT: case HCI_SCODATA_PKT: if (!data->hdev) { kfree_skb(skb); return -ENODEV; } hci_skb_pkt_type(skb) = pkt_type; ret = hci_recv_frame(data->hdev, skb); break; case HCI_VENDOR_PKT: cancel_delayed_work_sync(&data->open_timeout); opcode = *((__u8 *) skb->data); skb_pull(skb, 1); if (skb->len > 0) { kfree_skb(skb); return -EINVAL; } kfree_skb(skb); ret = vhci_create_device(data, opcode); break; default: kfree_skb(skb); return -EINVAL; } return (ret < 0) ? ret : len; }
/* Called by Shared Transport layer when receive data is * available */ static long st_receive(void *priv_data, struct sk_buff *skb) { struct ti_st *lhst = priv_data; int err; if (!skb) return -EFAULT; if (!lhst) { kfree_skb(skb); return -EFAULT; } skb->dev = (void *) lhst->hdev; /* Forward skb to HCI core layer */ err = hci_recv_frame(skb); if (err < 0) { BT_ERR("Unable to push skb to HCI core(%d)", err); /* Since the hci_recv_frame, fails at only 1 point in which * case it does free the skb, there is no point in conveying * that the recv has failed, because the ST need not free the * skb - So sending success in this case too. return err;*/ return 0; } lhst->hdev->stat.byte_rx += skb->len; return 0; }
static inline int h4_check_data_len(struct h4_struct *h4, int len) { register int room = skb_tailroom(h4->rx_skb); BT_DBG("len %d room %d", len, room); if (!len) { hci_recv_frame(h4->rx_skb); } else if (len > room) { BT_ERR("Data length is too large"); kfree_skb(h4->rx_skb); } else { h4->rx_state = H4_W4_DATA; h4->rx_count = len; return len; } #ifdef CONFIG_BT_HCIBCM4325 bcm4325_sleep(1); #endif h4->rx_state = H4_W4_PACKET_TYPE; h4->rx_skb = NULL; h4->rx_count = 0; return 0; }
/* Called by Shared Transport layer when receive data is * available */ static long st_receive(void *priv_data, struct sk_buff *skb) { struct ti_st *lhst = priv_data; int err; if (!skb) return -EFAULT; if (!lhst) { kfree_skb(skb); return -EFAULT; } skb->dev = (void *) lhst->hdev; /* Forward skb to HCI core layer */ err = hci_recv_frame(skb); if (err < 0) { BT_ERR("Unable to push skb to HCI core(%d)", err); return 0; } lhst->hdev->stat.byte_rx += skb->len; return 0; }
static void hci_smd_recv_event(void) { int len = 0; int rc = 0; struct sk_buff *skb = NULL; struct hci_smd_data *hsmd = &hs; wake_lock(&hs.wake_lock_rx); len = smd_read_avail(hsmd->event_channel); if (len > HCI_MAX_FRAME_SIZE) { BT_ERR("Frame larger than the allowed size, flushing frame"); rc = smd_read(hsmd->event_channel, NULL, len); goto out_event; } while (len > 0) { skb = bt_skb_alloc(len, GFP_ATOMIC); if (!skb) { BT_ERR("Error in allocating socket buffer"); smd_read(hsmd->event_channel, NULL, len); goto out_event; } rc = smd_read(hsmd->event_channel, skb_put(skb, len), len); if (rc < len) { BT_ERR("Error in reading from the event channel"); goto out_event; } skb->dev = (void *)hsmd->hdev; bt_cb(skb)->pkt_type = HCI_EVENT_PKT; skb_orphan(skb); rc = hci_recv_frame(skb); if (rc < 0) { BT_ERR("Error in passing the packet to HCI Layer"); /* * skb is getting freed in hci_recv_frame, making it * to null to avoid multiple access */ skb = NULL; goto out_event; } len = smd_read_avail(hsmd->event_channel); /* * Start the timer to monitor whether the Rx queue is * empty for releasing the Rx wake lock */ BT_DBG("Rx Timer is starting"); mod_timer(&hsmd->rx_q_timer, jiffies + msecs_to_jiffies(RX_Q_MONITOR)); } out_event: release_lock(); if (rc) kfree_skb(skb); }
static int nokia_recv_radio(struct hci_dev *hdev, struct sk_buff *skb) { /* Packets received on the dedicated radio channel are * HCI events and so feed them back into the core. */ hci_skb_pkt_type(skb) = HCI_EVENT_PKT; return hci_recv_frame(hdev, skb); }
int rtbt_hci_dev_receive(void *bt_dev, int pkt_type, char *buf, int len) { //struct hci_event_hdr hdr; //struct hci_dev *hdev = (struct hci_dev *)skb->dev; struct hci_dev *hdev = 0; struct sk_buff *skb; int status; //int pkt_len; //printk("-->%s(): receive info: pkt_type=%d(%s), len=%d!\n", __FUNCTION__, pkt_type, pkt_type <= 5 ? pkt_type_str[pkt_type] : "ErrPktType", len); switch (pkt_type) { case HCI_EVENT_PKT: if (len < HCI_EVENT_HDR_SIZE) { BT_ERR("event block is too short"); return -EILSEQ; } break; case HCI_ACLDATA_PKT: if (len < HCI_ACL_HDR_SIZE) { BT_ERR("data block is too short"); return -EILSEQ; } break; case HCI_SCODATA_PKT: if (len < HCI_SCO_HDR_SIZE) { BT_ERR("audio block is too short"); return -EILSEQ; } break; } skb = bt_skb_alloc(len, GFP_ATOMIC); if (!skb) { printk("%s no memory for the packet", ((struct hci_dev *)bt_dev)->name); return -ENOMEM; } skb->dev = g_hdev; rtbt_set_pkt_type(skb, pkt_type); memcpy(skb_put(skb, len), buf, len); if (pkt_type == HCI_SCODATA_PKT) printk("-->%s(): send sco data to OS, time=0x%lx\n", __FUNCTION__, jiffies); hdev = (struct hci_dev *)skb->dev; if(hdev){ hdev->stat.byte_rx += len; } status = hci_recv_frame(hdev,skb); //printk("<--%s()\n", __FUNCTION__); return status; }
static void hci_usb_bulk_read(struct urb *urb) { struct hci_usb *husb = (struct hci_usb *) urb->context; unsigned char *data = urb->transfer_buffer; int count = urb->actual_length, status; struct sk_buff *skb; hci_acl_hdr *ah; register __u16 dlen; if (!husb) return; DBG("%s status %d, count %d, flags %x", husb->hdev.name, urb->status, count, urb->transfer_flags); if (urb->status) { /* Do not re-submit URB on critical errors */ switch (urb->status) { case -ENOENT: return; default: goto resubmit; }; } if (!count) goto resubmit; DMP(data, count); ah = (hci_acl_hdr *) data; dlen = le16_to_cpu(ah->dlen); /* Verify frame len and completeness */ if ((count - HCI_ACL_HDR_SIZE) != dlen) { ERR("%s corrupted ACL packet: count %d, plen %d", husb->hdev.name, count, dlen); goto resubmit; } /* Allocate packet */ if (!(skb = bluez_skb_alloc(count, GFP_ATOMIC))) { ERR("Can't allocate mem for new packet"); goto resubmit; } memcpy(skb_put(skb, count), data, count); skb->dev = (void *) &husb->hdev; skb->pkt_type = HCI_ACLDATA_PKT; husb->hdev.stat.byte_rx += skb->len; hci_recv_frame(skb); resubmit: husb->read_urb->dev = husb->udev; if ((status = usb_submit_urb(husb->read_urb))) DBG("%s read URB submit failed %d", husb->hdev.name, status); DBG("%s read URB re-submited", husb->hdev.name); }
static int qca_recv_acl_data(struct hci_dev *hdev, struct sk_buff *skb) { /* We receive debug logs from chip as an ACL packets. * Instead of sending the data to ACL to decode the * received data, we are pushing them to the above layers * as a diagnostic packet. */ if (get_unaligned_le16(skb->data) == QCA_DEBUG_HANDLE) return hci_recv_diag(hdev, skb); return hci_recv_frame(hdev, skb); }
static inline void hci_h4p_recv_frame(struct hci_h4p_info *info, struct sk_buff *skb) { if (unlikely(!test_bit(HCI_RUNNING, &info->hdev->flags))) { NBT_DBG("fw_event\n"); hci_h4p_parse_fw_event(info, skb); } else { hci_recv_frame(skb); NBT_DBG("Frame sent to upper layer\n"); } }
static int hci_smd_recv_frame(struct hci_dev *hdev, int type) { int len; struct sk_buff *skb; struct smd_channel *channel; unsigned char *buf; switch (type) { case BT_CMD: channel = cmd_channel; break; case BT_DATA: channel = data_channel; break; default: return -EINVAL; } len = smd_cur_packet_size(cmd_channel); if (len > HCI_MAX_FRAME_SIZE) return -EINVAL; while (len) { skb = bt_skb_alloc(len, GFP_KERNEL); if (!skb) return -ENOMEM; buf = kmalloc(len, GFP_KERNEL); smd_read(channel, (void *)buf, len); if (memcpy(skb_put(skb, len), buf, len)) { kfree_skb(skb); return -EFAULT; } skb->dev = (void *)hdev; bt_cb(skb)->pkt_type = *((__u8 *) skb->data); skb_pull(skb, 1); hci_recv_frame(skb); kfree(skb); kfree(buf); len = smd_cur_packet_size(cmd_channel); if (len > HCI_MAX_FRAME_SIZE) return -EINVAL; } return 0; }
static void hci_smd_recv_data(void) { int len = 0; int rc = 0; struct sk_buff *skb = NULL; struct hci_smd_data *hsmd = &hs; wake_lock(&hs.wake_lock_rx); len = smd_read_avail(hsmd->data_channel); if (len > HCI_MAX_FRAME_SIZE) { BT_ERR("Frame larger than the allowed size, flushing frame"); smd_read(hsmd->data_channel, NULL, len); goto out_data; } if (len <= 0) goto out_data; skb = bt_skb_alloc(len, GFP_ATOMIC); if (!skb) { BT_ERR("Error in allocating socket buffer"); smd_read(hsmd->data_channel, NULL, len); goto out_data; } rc = smd_read(hsmd->data_channel, skb_put(skb, len), len); if (rc < len) { BT_ERR("Error in reading from the channel"); goto out_data; } skb->dev = (void *)hsmd->hdev; bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; skb_orphan(skb); rc = hci_recv_frame(skb); if (rc < 0) { BT_ERR("Error in passing the packet to HCI Layer"); skb = NULL; goto out_data; } BT_DBG("Rx Timer is starting"); mod_timer(&hsmd->rx_q_timer, jiffies + msecs_to_jiffies(RX_Q_MONITOR)); out_data: release_lock(); if (rc) kfree_skb(skb); }
static inline void brf6150_recv_frame(struct brf6150_info *info, struct sk_buff *skb) { if (unlikely(!test_bit(HCI_RUNNING, &info->hdev->flags))) { NBT_DBG("fw_event\n"); brf6150_parse_fw_event(info); kfree_skb(skb); } else { hci_recv_frame(skb); if (!(brf6150_inb(info, UART_LSR) & UART_LSR_DR)) brf6150_enable_pm_rx(info); NBT_DBG("Frame sent to upper layer\n"); } }
void hci_h4p_bc4_parse_fw_event(struct hci_h4p_info *info, struct sk_buff *skb) { /* Check if this is fw packet */ if (skb->data[0] != 0xff) { hci_recv_frame(info->hdev, skb); return; } if (skb->data[11] || skb->data[12]) { dev_err(info->dev, "Firmware sending command failed\n"); info->fw_error = -EPROTO; } kfree_skb(skb); complete(&info->fw_completion); }
/* Called by Shared Transport layer when receive data is * available */ static long hci_st_receive(void *priv_data, struct sk_buff *skb) { int err; int len; struct hci_st *lhst = (struct hci_st *)priv_data; BTDRV_API_START(); err = 0; len = 0; if (skb == NULL) { BT_DRV_ERR("Invalid SKB received from ST"); BTDRV_API_EXIT(-EFAULT); return -EFAULT; } if (!lhst) { kfree_skb(skb); BT_DRV_ERR("Invalid hci_st memory,freeing SKB"); BTDRV_API_EXIT(-EFAULT); return -EFAULT; } if (!test_bit(BT_DRV_RUNNING, &lhst->flags)) { kfree_skb(skb); BT_DRV_ERR("Device is not running,freeing SKB"); BTDRV_API_EXIT(-EINVAL); return -EINVAL; } len = skb->len; skb->dev = (struct net_device *)lhst->hdev; /* Forward skb to HCI CORE layer */ err = hci_recv_frame(skb); if (err) { kfree_skb(skb); BT_DRV_ERR("Unable to push skb to HCI CORE(%d),freeing SKB", err); BTDRV_API_EXIT(err); return err; } lhst->hdev->stat.byte_rx += len; BTDRV_API_EXIT(0); return 0; }
/* Get packet from user space buffer(already verified) */ static inline ssize_t hci_vhci_get_user(struct hci_vhci_struct *hci_vhci, const char *buf, size_t count) { struct sk_buff *skb; if (count > HCI_MAX_FRAME_SIZE) return -EINVAL; if (!(skb = bluez_skb_alloc(count, GFP_KERNEL))) return -ENOMEM; copy_from_user(skb_put(skb, count), buf, count); skb->dev = (void *) &hci_vhci->hdev; skb->pkt_type = *((__u8 *) skb->data); skb_pull(skb, 1); hci_recv_frame(skb); return count; }
static int btqcomsmd_recv(struct hci_dev *hdev, unsigned type, const void *data, size_t count) { struct sk_buff *skb; void *buf; /* Use GFP_ATOMIC as we're in IRQ context */ skb = bt_skb_alloc(count, GFP_ATOMIC); if (!skb) return -ENOMEM; bt_cb(skb)->pkt_type = type; /* Use io accessor as data might be ioremapped */ buf = skb_put(skb, count); memcpy_fromio(buf, data, count); return hci_recv_frame(hdev, skb); }
static inline int h4_check_data_len(struct h4_struct *h4, int len) { register int room = skb_tailroom(h4->rx_skb); BT_DBG("len %d room %d", len, room); if (!len) { BT_DMP(h4->rx_skb->data, h4->rx_skb->len); hci_recv_frame(h4->rx_skb); } else if (len > room) { BT_ERR("Data length is too large"); kfree_skb(h4->rx_skb); } else { h4->rx_state = H4_W4_DATA; h4->rx_count = len; return len; } h4->rx_state = H4_W4_PACKET_TYPE; h4->rx_skb = NULL; h4->rx_count = 0; return 0; }
static inline int n_hci_check_data_len(struct n_hci *n_hci, int len) { register int room = skb_tailroom(n_hci->rx_skb); DBG("len %d room %d", len, room); if (!len) { DMP(n_hci->rx_skb->data, n_hci->rx_skb->len); hci_recv_frame(n_hci->rx_skb); } else if (len > room) { ERR("Data length is to large"); kfree_skb(n_hci->rx_skb); n_hci->hdev.stat.err_rx++; } else { n_hci->rx_state = WAIT_DATA; n_hci->rx_count = len; return len; } n_hci->rx_state = WAIT_PACKET_TYPE; n_hci->rx_skb = NULL; n_hci->rx_count = 0; return 0; }
static inline int ll_check_data_len(struct hci_dev *hdev, struct ll_struct *ll, int len) { int room = skb_tailroom(ll->rx_skb); BT_DBG("len %d room %d", len, room); if (!len) { hci_recv_frame(hdev, ll->rx_skb); } else if (len > room) { BT_ERR("Data length is too large"); kfree_skb(ll->rx_skb); } else { ll->rx_state = HCILL_W4_DATA; ll->rx_count = len; return len; } ll->rx_state = HCILL_W4_PACKET_TYPE; ll->rx_skb = NULL; ll->rx_count = 0; return 0; }
static inline int brcm_check_data_len(struct brcm_struct *brcm, int len) { register int room = skb_tailroom(brcm->rx_skb); BT_DBG("len %d room %d", len, room); if (!len) { hci_recv_frame(brcm->rx_skb); } else if (len > room) { BT_ERR("Data length is too large"); kfree_skb(brcm->rx_skb); } else { brcm->rx_state = HCIBRCM_W4_DATA; brcm->rx_count = len; return len; } brcm->rx_state = HCIBRCM_W4_PACKET_TYPE; brcm->rx_skb = NULL; brcm->rx_count = 0; return 0; }
static inline int ibs_check_data_len(struct ibs_struct *ibs, int len) { register int room = skb_tailroom(ibs->rx_skb); BT_DBG("len %d room %d", len, room); if (!len) { hci_recv_frame(ibs->rx_skb); } else if (len > room) { BT_ERR("Data length is too large"); kfree_skb(ibs->rx_skb); } else { ibs->rx_state = HCI_IBS_W4_DATA; ibs->rx_count = len; return len; } ibs->rx_state = HCI_IBS_W4_PACKET_TYPE; ibs->rx_skb = NULL; ibs->rx_count = 0; return 0; }
static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned char *buf, int len) { BT_DBG("bfusb %p hdr 0x%02x data %p len %d", data, hdr, buf, len); if (hdr & 0x10) { BT_ERR("%s error in block", data->hdev->name); kfree_skb(data->reassembly); data->reassembly = NULL; return -EIO; } if (hdr & 0x04) { struct sk_buff *skb; unsigned char pkt_type; int pkt_len = 0; if (data->reassembly) { BT_ERR("%s unexpected start block", data->hdev->name); kfree_skb(data->reassembly); data->reassembly = NULL; } if (len < 1) { BT_ERR("%s no packet type found", data->hdev->name); return -EPROTO; } pkt_type = *buf++; len--; switch (pkt_type) { case HCI_EVENT_PKT: if (len >= HCI_EVENT_HDR_SIZE) { struct hci_event_hdr *hdr = (struct hci_event_hdr *) buf; pkt_len = HCI_EVENT_HDR_SIZE + hdr->plen; } else { BT_ERR("%s event block is too short", data->hdev->name); return -EILSEQ; } break; case HCI_ACLDATA_PKT: if (len >= HCI_ACL_HDR_SIZE) { struct hci_acl_hdr *hdr = (struct hci_acl_hdr *) buf; pkt_len = HCI_ACL_HDR_SIZE + __le16_to_cpu(hdr->dlen); } else { BT_ERR("%s data block is too short", data->hdev->name); return -EILSEQ; } break; case HCI_SCODATA_PKT: if (len >= HCI_SCO_HDR_SIZE) { struct hci_sco_hdr *hdr = (struct hci_sco_hdr *) buf; pkt_len = HCI_SCO_HDR_SIZE + hdr->dlen; } else { BT_ERR("%s audio block is too short", data->hdev->name); return -EILSEQ; } break; } skb = bt_skb_alloc(pkt_len, GFP_ATOMIC); if (!skb) { BT_ERR("%s no memory for the packet", data->hdev->name); return -ENOMEM; } skb->dev = (void *) data->hdev; bt_cb(skb)->pkt_type = pkt_type; data->reassembly = skb; } else { if (!data->reassembly) { BT_ERR("%s unexpected continuation block", data->hdev->name); return -EIO; } } if (len > 0) memcpy(skb_put(data->reassembly, len), buf, len); if (hdr & 0x08) { hci_recv_frame(data->reassembly); data->reassembly = NULL; } return 0; }
static int bpa10x_recv(struct hci_dev *hdev, int queue, void *buf, int count) { struct bpa10x_data *data = hdev->driver_data; BT_DBG("%s queue %d buffer %p count %d", hdev->name, queue, buf, count); if (queue < 0 || queue > 1) return -EILSEQ; hdev->stat.byte_rx += count; while (count) { struct sk_buff *skb = data->rx_skb[queue]; struct { __u8 type; int expect; } *scb; int type, len = 0; if (!skb) { /* Start of the frame */ type = *((__u8 *) buf); count--; buf++; switch (type) { case HCI_EVENT_PKT: if (count >= HCI_EVENT_HDR_SIZE) { struct hci_event_hdr *h = buf; len = HCI_EVENT_HDR_SIZE + h->plen; } else return -EILSEQ; break; case HCI_ACLDATA_PKT: if (count >= HCI_ACL_HDR_SIZE) { struct hci_acl_hdr *h = buf; len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen); } else return -EILSEQ; break; case HCI_SCODATA_PKT: if (count >= HCI_SCO_HDR_SIZE) { struct hci_sco_hdr *h = buf; len = HCI_SCO_HDR_SIZE + h->dlen; } else return -EILSEQ; break; case HCI_VENDOR_PKT: if (count >= HCI_VENDOR_HDR_SIZE) { struct hci_vendor_hdr *h = buf; len = HCI_VENDOR_HDR_SIZE + __le16_to_cpu(h->dlen); } else return -EILSEQ; break; } skb = bt_skb_alloc(len, GFP_ATOMIC); if (!skb) { BT_ERR("%s no memory for packet", hdev->name); return -ENOMEM; } skb->dev = (void *) hdev; data->rx_skb[queue] = skb; scb = (void *) skb->cb; scb->type = type; scb->expect = len; } else { /* Continuation */ scb = (void *) skb->cb; len = scb->expect; } len = min(len, count); memcpy(skb_put(skb, len), buf, len); scb->expect -= len; if (scb->expect == 0) { /* Complete frame */ data->rx_skb[queue] = NULL; bt_cb(skb)->pkt_type = scb->type; hci_recv_frame(skb); } count -= len; buf += len; } return 0; }
static void bluecard_receive(struct bluecard_info *info, unsigned int offset) { unsigned int iobase; unsigned char buf[31]; int i, len; if (!info) { BT_ERR("Unknown device"); return; } iobase = info->p_dev->resource[0]->start; if (test_bit(XMIT_SENDING_READY, &(info->tx_state))) bluecard_enable_activity_led(info); len = bluecard_read(iobase, offset, buf, sizeof(buf)); for (i = 0; i < len; i++) { /* Allocate packet */ if (info->rx_skb == NULL) { info->rx_state = RECV_WAIT_PACKET_TYPE; info->rx_count = 0; info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC); if (!info->rx_skb) { BT_ERR("Can't allocate mem for new packet"); return; } } if (info->rx_state == RECV_WAIT_PACKET_TYPE) { bt_cb(info->rx_skb)->pkt_type = buf[i]; switch (bt_cb(info->rx_skb)->pkt_type) { case 0x00: /* init packet */ if (offset != 0x00) { set_bit(XMIT_BUF_ONE_READY, &(info->tx_state)); set_bit(XMIT_BUF_TWO_READY, &(info->tx_state)); set_bit(XMIT_SENDING_READY, &(info->tx_state)); bluecard_write_wakeup(info); } kfree_skb(info->rx_skb); info->rx_skb = NULL; break; case HCI_EVENT_PKT: info->rx_state = RECV_WAIT_EVENT_HEADER; info->rx_count = HCI_EVENT_HDR_SIZE; break; case HCI_ACLDATA_PKT: info->rx_state = RECV_WAIT_ACL_HEADER; info->rx_count = HCI_ACL_HDR_SIZE; break; case HCI_SCODATA_PKT: info->rx_state = RECV_WAIT_SCO_HEADER; info->rx_count = HCI_SCO_HDR_SIZE; break; default: /* unknown packet */ BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type); info->hdev->stat.err_rx++; kfree_skb(info->rx_skb); info->rx_skb = NULL; break; } } else { *skb_put(info->rx_skb, 1) = buf[i]; info->rx_count--; if (info->rx_count == 0) { int dlen; struct hci_event_hdr *eh; struct hci_acl_hdr *ah; struct hci_sco_hdr *sh; switch (info->rx_state) { case RECV_WAIT_EVENT_HEADER: eh = hci_event_hdr(info->rx_skb); info->rx_state = RECV_WAIT_DATA; info->rx_count = eh->plen; break; case RECV_WAIT_ACL_HEADER: ah = hci_acl_hdr(info->rx_skb); dlen = __le16_to_cpu(ah->dlen); info->rx_state = RECV_WAIT_DATA; info->rx_count = dlen; break; case RECV_WAIT_SCO_HEADER: sh = hci_sco_hdr(info->rx_skb); info->rx_state = RECV_WAIT_DATA; info->rx_count = sh->dlen; break; case RECV_WAIT_DATA: hci_recv_frame(info->hdev, info->rx_skb); info->rx_skb = NULL; break; } } } } info->hdev->stat.byte_rx += len; }
/* Recv data */ static int ll_recv(struct hci_uart *hu, const void *data, int count) { struct ll_struct *ll = hu->priv; const char *ptr; struct hci_event_hdr *eh; struct hci_acl_hdr *ah; struct hci_sco_hdr *sh; int len, type, dlen; BT_DBG("hu %p count %d rx_state %ld rx_count %ld", hu, count, ll->rx_state, ll->rx_count); ptr = data; while (count) { if (ll->rx_count) { len = min_t(unsigned int, ll->rx_count, count); memcpy(skb_put(ll->rx_skb, len), ptr, len); ll->rx_count -= len; count -= len; ptr += len; if (ll->rx_count) continue; switch (ll->rx_state) { case HCILL_W4_DATA: BT_DBG("Complete data"); hci_recv_frame(hu->hdev, ll->rx_skb); ll->rx_state = HCILL_W4_PACKET_TYPE; ll->rx_skb = NULL; continue; case HCILL_W4_EVENT_HDR: eh = hci_event_hdr(ll->rx_skb); BT_DBG("Event header: evt 0x%2.2x plen %d", eh->evt, eh->plen); ll_check_data_len(hu->hdev, ll, eh->plen); continue; case HCILL_W4_ACL_HDR: ah = hci_acl_hdr(ll->rx_skb); dlen = __le16_to_cpu(ah->dlen); BT_DBG("ACL header: dlen %d", dlen); ll_check_data_len(hu->hdev, ll, dlen); continue; case HCILL_W4_SCO_HDR: sh = hci_sco_hdr(ll->rx_skb); BT_DBG("SCO header: dlen %d", sh->dlen); ll_check_data_len(hu->hdev, ll, sh->dlen); continue; } } /* HCILL_W4_PACKET_TYPE */ switch (*ptr) { case HCI_EVENT_PKT: BT_DBG("Event packet"); ll->rx_state = HCILL_W4_EVENT_HDR; ll->rx_count = HCI_EVENT_HDR_SIZE; type = HCI_EVENT_PKT; break; case HCI_ACLDATA_PKT: BT_DBG("ACL packet"); ll->rx_state = HCILL_W4_ACL_HDR; ll->rx_count = HCI_ACL_HDR_SIZE; type = HCI_ACLDATA_PKT; break; case HCI_SCODATA_PKT: BT_DBG("SCO packet"); ll->rx_state = HCILL_W4_SCO_HDR; ll->rx_count = HCI_SCO_HDR_SIZE; type = HCI_SCODATA_PKT; break; /* HCILL signals */ case HCILL_GO_TO_SLEEP_IND: BT_DBG("HCILL_GO_TO_SLEEP_IND packet"); ll_device_want_to_sleep(hu); ptr++; count--; continue; case HCILL_GO_TO_SLEEP_ACK: /* shouldn't happen */ BT_ERR("received HCILL_GO_TO_SLEEP_ACK (in state %ld)", ll->hcill_state); ptr++; count--; continue; case HCILL_WAKE_UP_IND: BT_DBG("HCILL_WAKE_UP_IND packet"); ll_device_want_to_wakeup(hu); ptr++; count--; continue; case HCILL_WAKE_UP_ACK: BT_DBG("HCILL_WAKE_UP_ACK packet"); ll_device_woke_up(hu); ptr++; count--; continue; default: BT_ERR("Unknown HCI packet type %2.2x", (__u8)*ptr); hu->hdev->stat.err_rx++; ptr++; count--; continue; } ptr++; count--; /* Allocate packet */ ll->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC); if (!ll->rx_skb) { BT_ERR("Can't allocate mem for new packet"); ll->rx_state = HCILL_W4_PACKET_TYPE; ll->rx_count = 0; return -ENOMEM; } hci_skb_pkt_type(ll->rx_skb) = type; }
static void btuart_receive(btuart_info_t *info) { unsigned int iobase; int boguscount = 0; if (!info) { BT_ERR("Unknown device"); return; } iobase = info->p_dev->io.BasePort1; do { info->hdev->stat.byte_rx++; /* Allocate packet */ if (info->rx_skb == NULL) { info->rx_state = RECV_WAIT_PACKET_TYPE; info->rx_count = 0; if (!(info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) { BT_ERR("Can't allocate mem for new packet"); return; } } if (info->rx_state == RECV_WAIT_PACKET_TYPE) { info->rx_skb->dev = (void *) info->hdev; bt_cb(info->rx_skb)->pkt_type = inb(iobase + UART_RX); switch (bt_cb(info->rx_skb)->pkt_type) { case HCI_EVENT_PKT: info->rx_state = RECV_WAIT_EVENT_HEADER; info->rx_count = HCI_EVENT_HDR_SIZE; break; case HCI_ACLDATA_PKT: info->rx_state = RECV_WAIT_ACL_HEADER; info->rx_count = HCI_ACL_HDR_SIZE; break; case HCI_SCODATA_PKT: info->rx_state = RECV_WAIT_SCO_HEADER; info->rx_count = HCI_SCO_HDR_SIZE; break; default: /* Unknown packet */ BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type); info->hdev->stat.err_rx++; clear_bit(HCI_RUNNING, &(info->hdev->flags)); kfree_skb(info->rx_skb); info->rx_skb = NULL; break; } } else { *skb_put(info->rx_skb, 1) = inb(iobase + UART_RX); info->rx_count--; if (info->rx_count == 0) { int dlen; struct hci_event_hdr *eh; struct hci_acl_hdr *ah; struct hci_sco_hdr *sh; switch (info->rx_state) { case RECV_WAIT_EVENT_HEADER: eh = hci_event_hdr(info->rx_skb); info->rx_state = RECV_WAIT_DATA; info->rx_count = eh->plen; break; case RECV_WAIT_ACL_HEADER: ah = hci_acl_hdr(info->rx_skb); dlen = __le16_to_cpu(ah->dlen); info->rx_state = RECV_WAIT_DATA; info->rx_count = dlen; break; case RECV_WAIT_SCO_HEADER: sh = hci_sco_hdr(info->rx_skb); info->rx_state = RECV_WAIT_DATA; info->rx_count = sh->dlen; break; case RECV_WAIT_DATA: hci_recv_frame(info->rx_skb); info->rx_skb = NULL; break; } } } /* Make sure we don't stay here too long */ if (boguscount++ > 16) break; } while (inb(iobase + UART_LSR) & UART_LSR_DR); }