static void qmi_read_work(struct work_struct *ws) { struct qmi_ctxt *ctxt = container_of(ws, struct qmi_ctxt, read_work); struct smd_channel *ch = ctxt->ch; unsigned char buf[QMI_MAX_PACKET]; int sz; for (;;) { sz = smd_cur_packet_size(ch); if (sz == 0) break; if (sz < smd_read_avail(ch)) break; if (sz > QMI_MAX_PACKET) { smd_read(ch, 0, sz); continue; } if (smd_read(ch, buf, sz) != sz) { printk(KERN_ERR "qmi: not enough data?!\n"); continue; } /* interface selector must be 1 */ if (buf[0] != 0x01) continue; qmi_process_qmux(ctxt, buf + 1, sz - 1); } }
static void nmea_work_func(struct work_struct *ws) { int sz; for (;;) { sz = smd_cur_packet_size(nmea_devp->ch); if (sz == 0) break; if (sz > smd_read_avail(nmea_devp->ch)) break; if (sz > MAX_BUF_SIZE) { smd_read(nmea_devp->ch, 0, sz); continue; } mutex_lock(&nmea_rx_buf_lock); if (smd_read(nmea_devp->ch, nmea_devp->rx_buf, sz) != sz) { mutex_unlock(&nmea_rx_buf_lock); printk(KERN_ERR "nmea: not enough data?!\n"); continue; } nmea_devp->bytes_read = sz; mutex_unlock(&nmea_rx_buf_lock); wake_up_interruptible(&nmea_wait_queue); } }
static void hci_smd_recv_event(void) { int len = 0; int rc = 0; struct sk_buff *skb = NULL; struct hci_smd_data *hsmd = &hs; wake_lock(&hs.wake_lock_rx); len = smd_read_avail(hsmd->event_channel); if (len > HCI_MAX_FRAME_SIZE) { BT_ERR("Frame larger than the allowed size, flushing frame"); rc = smd_read(hsmd->event_channel, NULL, len); goto out_event; } while (len > 0) { skb = bt_skb_alloc(len, GFP_ATOMIC); if (!skb) { BT_ERR("Error in allocating socket buffer"); smd_read(hsmd->event_channel, NULL, len); goto out_event; } rc = smd_read(hsmd->event_channel, skb_put(skb, len), len); if (rc < len) { BT_ERR("Error in reading from the event channel"); goto out_event; } skb->dev = (void *)hsmd->hdev; bt_cb(skb)->pkt_type = HCI_EVENT_PKT; skb_orphan(skb); rc = hci_recv_frame(skb); if (rc < 0) { BT_ERR("Error in passing the packet to HCI Layer"); /* * skb is getting freed in hci_recv_frame, making it * to null to avoid multiple access */ skb = NULL; goto out_event; } len = smd_read_avail(hsmd->event_channel); /* * Start the timer to monitor whether the Rx queue is * empty for releasing the Rx wake lock */ BT_DBG("Rx Timer is starting"); mod_timer(&hsmd->rx_q_timer, jiffies + msecs_to_jiffies(RX_Q_MONITOR)); } out_event: release_lock(); if (rc) kfree_skb(skb); }
static void qmi_read_work(struct work_struct *ws) { //struct qmi_ctxt *ctxt = container_of(ws, struct qmi_ctxt, read_work); //struct smd_channel *ch = ctxt->ch; unsigned char buf[QMI_MAX_PACKET]; struct qmi_ctxt *ctxt; int sz; uint32_t chnum; for (;;) { sz = smd_cur_packet_size(ctrl_ch); if (sz == 0) break; if (sz < smd_read_avail(ctrl_ch)) break; if (sz > QMI_MAX_PACKET) { smd_read(ctrl_ch, NULL, sz); continue; } if (smd_read(ctrl_ch, buf, sz) != sz) { printk(KERN_ERR "qmi: not enough data?!\n"); continue; } DBG("packet: %d\n", sz); // print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, sz); if (sz <= 4) { DBG("packet size less 4\n"); continue; } chnum = *(uint32_t*)&buf[sz - 4]; DBG("chnum = %d\n", chnum); /* interface selector must be 1 */ if (buf[0] != 0x01) continue; if (qmi_device0.ch_num == chnum) ctxt = &qmi_device0; else if (qmi_device1.ch_num == chnum) ctxt = &qmi_device1; else if (qmi_device2.ch_num == chnum) ctxt = &qmi_device2; else { DBG("bad chnum %d\n", chnum); continue; } qmi_process_qmux(ctxt, buf + 1, sz - 1 - 4); } }
static void wcnssctrl_rx_handler(struct work_struct *worker) { int len = 0; int rc = 0; unsigned char buf[WCNSS_MAX_FRAME_SIZE]; struct smd_msg_hdr *phdr; struct wcnss_version *pversion; len = smd_read_avail(penv->smd_ch); if (len > WCNSS_MAX_FRAME_SIZE) { pr_err("wcnss: frame larger than the allowed size\n"); smd_read(penv->smd_ch, NULL, len); return; } if (len <= 0) return; rc = smd_read(penv->smd_ch, buf, len); if (rc < len) { pr_err("wcnss: incomplete data read from smd\n"); return; } phdr = (struct smd_msg_hdr *)buf; switch (phdr->msg_type) { case WCNSS_VERSION_RSP: pversion = (struct wcnss_version *)buf; if (len != sizeof(struct wcnss_version)) { pr_err("wcnss: invalid version data from wcnss %d\n", len); return; } snprintf(penv->wcnss_version, WCNSS_VERSION_LEN, "%02x%02x%02x%02x", pversion->major, pversion->minor, pversion->version, pversion->revision); pr_info("wcnss: version %s\n", penv->wcnss_version); /* * schedule work to download nvbin to riva ccpu, * only if riva major >= 1 and minor >= 4. */ if ((pversion->major >= 1) && (pversion->minor >= 4)) { pr_info("wcnss: schedule dnld work for riva\n"); schedule_work(&penv->wcnssctrl_nvbin_dnld_work); } break; case WCNSS_NVBIN_DNLD_RSP: pr_info("wcnss: received WCNSS_NVBIN_DNLD_RSP from ccpu\n"); break; default: pr_err("wcnss: invalid message type %d\n", phdr->msg_type); } return; }
static void smd_net_data_handler(unsigned long arg) { struct net_device *dev = (struct net_device *) arg; struct smm6260net_private *p = netdev_priv(dev); struct sk_buff *skb; void *ptr = 0; int sz; int err; for (;;) { sz = smd_cur_packet_size(p->ch); if (sz == 0) break; if (smd_read_avail(p->ch) < sz) break; if (sz > SMM6260_NET_DEFAULT_MTU) { ptr = 0; //pr_err("rmnet_recv() discarding %d len\n", sz); }else{ skb = dev_alloc_skb(sz); if (skb == NULL) { //pr_err("smm6260net_recv() cannot allocate skb\n"); } else { skb->dev = dev; ptr = skb_put(skb, sz); wake_lock_timeout(&p->wake_lock, HZ / 2); if (smd_read(p->ch, ptr, sz) != sz) { //pr_err("smm6260net_recv() smd lied about avail?!"); ptr = 0; dev_kfree_skb_irq(skb); } else { skb->protocol = htons(ETH_P_IP);//eth_type_trans(skb, dev); if(count_this_packet(skb)) { /* update out statistics */ #ifdef CONFIG_MSM_RMNET_DEBUG p->wakeups_rcv += smm6260net_cause_wakeup(p); #endif p->stats.rx_packets++; p->stats.rx_bytes += skb->len; } skb_reset_mac_header(skb); netif_rx(skb); //pr_info("%s: smm6260net_recv() size=%d", p->chname, skb->len); } continue; } } if (smd_read(p->ch, ptr, sz) != sz) pr_err("rmnet_recv() smd lied about avail?!"); } }
/* Called in soft-irq context */ static void smd_net_data_handler(unsigned long arg) { struct net_device *dev = (struct net_device *) arg; struct rmnet_private *p = netdev_priv(dev); struct sk_buff *skb; void *ptr = 0; int sz; for (;;) { sz = smd_cur_packet_size(p->ch); if (sz == 0) break; if (smd_read_avail(p->ch) < sz) break; if (sz > 1514) { pr_err("rmnet_recv() discarding %d len\n", sz); ptr = 0; } else { skb = dev_alloc_skb(sz + NET_IP_ALIGN); if (skb == NULL) { pr_err("rmnet_recv() cannot allocate skb\n"); /* out of memory, reschedule a later attempt */ smd_net_data_tasklet.data = (unsigned long)dev; tasklet_schedule(&smd_net_data_tasklet); break; } else { skb->dev = dev; skb_reserve(skb, NET_IP_ALIGN); ptr = skb_put(skb, sz); wake_lock_timeout(&p->wake_lock, HZ / 2); if (smd_read(p->ch, ptr, sz) != sz) { pr_err("rmnet_recv() smd lied about avail?!"); ptr = 0; dev_kfree_skb_irq(skb); } else { skb->protocol = eth_type_trans(skb, dev); if (count_this_packet(ptr, skb->len)) { #ifdef CONFIG_MSM_RMNET_DEBUG p->wakeups_rcv += rmnet_cause_wakeup(p); #endif p->stats.rx_packets++; p->stats.rx_bytes += skb->len; } netif_rx(skb); } continue; } } if (smd_read(p->ch, ptr, sz) != sz) pr_err("rmnet_recv() smd lied about avail?!"); } }
static void hci_smd_recv_data(void) { int len = 0; int rc = 0; struct sk_buff *skb = NULL; struct hci_smd_data *hsmd = &hs; wake_lock(&hs.wake_lock_rx); len = smd_read_avail(hsmd->data_channel); if (len > HCI_MAX_FRAME_SIZE) { BT_ERR("Frame larger than the allowed size, flushing frame"); smd_read(hsmd->data_channel, NULL, len); goto out_data; } if (len <= 0) goto out_data; skb = bt_skb_alloc(len, GFP_ATOMIC); if (!skb) { BT_ERR("Error in allocating socket buffer"); smd_read(hsmd->data_channel, NULL, len); goto out_data; } rc = smd_read(hsmd->data_channel, skb_put(skb, len), len); if (rc < len) { BT_ERR("Error in reading from the channel"); goto out_data; } skb->dev = (void *)hsmd->hdev; bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; skb_orphan(skb); rc = hci_recv_frame(skb); if (rc < 0) { BT_ERR("Error in passing the packet to HCI Layer"); skb = NULL; goto out_data; } BT_DBG("Rx Timer is starting"); mod_timer(&hsmd->rx_q_timer, jiffies + msecs_to_jiffies(RX_Q_MONITOR)); out_data: release_lock(); if (rc) kfree_skb(skb); }
static void grmnet_ctrl_smd_read_w(struct work_struct *w) { struct smd_ch_info *c = container_of(w, struct smd_ch_info, read_w); struct rmnet_ctrl_port *port = c->port; int sz; struct rmnet_ctrl_pkt *cpkt; unsigned long flags; while (1) { sz = smd_cur_packet_size(c->ch); if (sz == 0) break; if (smd_read_avail(c->ch) < sz) break; cpkt = rmnet_alloc_ctrl_pkt(sz, GFP_KERNEL); if (IS_ERR(cpkt)) { pr_err("%s: unable to allocate rmnet control pkt\n", __func__); return; } cpkt->len = smd_read(c->ch, cpkt->buf, sz); /* send it to USB here */ spin_lock_irqsave(&port->port_lock, flags); if (port->port_usb && port->port_usb->send_cpkt_response) { port->port_usb->send_cpkt_response( port->port_usb, cpkt); c->to_host++; } spin_unlock_irqrestore(&port->port_lock, flags); } }
uint32_t rpm_smd_recv_data(uint32_t* len) { rpm_ack_msg *resp; uint32_t ret = 0; /* As per the current design rpm response does not exceed 20 bytes */ uint32_t response[5]; smd_read(&ch, len, SMD_APPS_RPM, response); resp = (rpm_ack_msg *)response; arch_invalidate_cache_range((addr_t)resp, sizeof(rpm_gen_hdr)); if(resp->hdr.type == RPM_CMD_MAGIC && resp->hdr.len == ACK_MSG_LENGTH) { dprintf(SPEW, "Received SUCCESS CMD ACK\n"); } else if (resp->hdr.type == RPM_REQ_MAGIC && resp->hdr.len == ACK_MSG_LENGTH) { dprintf(SPEW, "Received SUCCESS CMD ACK\n"); } else { ret = 1; dprintf(CRITICAL, "Received ERROR ACK \n"); } if(!ret) { ret = sizeof(rpm_gen_hdr) + sizeof(kvp_data); } return ret; }
void __diag_smd_qdsp_send_req(int context) { void *buf; if (driver->chqdsp && (!driver->in_busy_qdsp)) { int r = smd_read_avail(driver->chqdsp); if (r > USB_MAX_IN_BUF) { printk(KERN_INFO "diag dropped num bytes = %d\n", r); return; } if (r > 0) { buf = driver->usb_buf_in_qdsp; if (!buf) { printk(KERN_INFO "Out of diagmem for q6\n"); } else { APPEND_DEBUG('l'); if (context == SMD_CONTEXT) smd_read_from_cb( driver->chqdsp, buf, r); else smd_read(driver->chqdsp, buf, r); APPEND_DEBUG('m'); driver->usb_write_ptr_qdsp->length = r; driver->in_busy_qdsp = 1; diag_device_write(buf, QDSP_DATA); } } } }
void phudiagfwd_read_data_from_smd(void) { int r = smd_read_avail(phudriver->ch); if (r > PHU_MAX_BUF_SIZE) { printk(KERN_ALERT "\n diag: SMD sending in " "packets more than %d bytes", PHU_MAX_BUF_SIZE); return; } if(r > 0) { if(r == phudiagfwd_ring_buf_set_data_before_process(phudriver->in_buf, r)) { smd_read(phudriver->ch, phudriver->in_buf->end, r); phudriver->in_buf->end += r; } else { printk(KERN_INFO "phudiagfwd_read_data_from_smd write out of memory !\n"); } } }
void __diag_smd_wcnss_send_req(void) { void *buf = driver->buf_in_wcnss; int *in_busy_wcnss_ptr = &(driver->in_busy_wcnss); struct diag_request *write_ptr_wcnss = driver->write_ptr_wcnss; if ((!driver->in_busy_wcnss) && driver->ch_wcnss && buf) { int r = smd_read_avail(driver->ch_wcnss); if (r > IN_BUF_SIZE) { if (r < MAX_IN_BUF_SIZE) { pr_err("diag: wcnss packets > %d bytes", r); buf = krealloc(buf, r, GFP_KERNEL); } else { pr_err("diag: wcnss pkt > %d", MAX_IN_BUF_SIZE); return; } } if (r > 0) { if (!buf) { pr_err("Out of diagmem for wcnss\n"); } else { APPEND_DEBUG('i'); smd_read(driver->ch_wcnss, buf, r); APPEND_DEBUG('j'); write_ptr_wcnss->length = r; *in_busy_wcnss_ptr = 1; diag_device_write(buf, WCNSS_DATA, write_ptr_wcnss); } } } }