static int hci_smd_recv_frame(struct hci_dev *hdev, int type) { int len; struct sk_buff *skb; struct smd_channel *channel; unsigned char *buf; switch (type) { case BT_CMD: channel = cmd_channel; break; case BT_DATA: channel = data_channel; break; default: return -EINVAL; } len = smd_cur_packet_size(cmd_channel); if (len > HCI_MAX_FRAME_SIZE) return -EINVAL; while (len) { skb = bt_skb_alloc(len, GFP_KERNEL); if (!skb) return -ENOMEM; buf = kmalloc(len, GFP_KERNEL); smd_read(channel, (void *)buf, len); if (memcpy(skb_put(skb, len), buf, len)) { kfree_skb(skb); return -EFAULT; } skb->dev = (void *)hdev; bt_cb(skb)->pkt_type = *((__u8 *) skb->data); skb_pull(skb, 1); hci_recv_frame(skb); kfree(skb); kfree(buf); len = smd_cur_packet_size(cmd_channel); if (len > HCI_MAX_FRAME_SIZE) return -EINVAL; } return 0; }
static void nmea_work_func(struct work_struct *ws) { int sz; for (;;) { sz = smd_cur_packet_size(nmea_devp->ch); if (sz == 0) break; if (sz > smd_read_avail(nmea_devp->ch)) break; if (sz > MAX_BUF_SIZE) { smd_read(nmea_devp->ch, 0, sz); continue; } mutex_lock(&nmea_rx_buf_lock); if (smd_read(nmea_devp->ch, nmea_devp->rx_buf, sz) != sz) { mutex_unlock(&nmea_rx_buf_lock); printk(KERN_ERR "nmea: not enough data?!\n"); continue; } nmea_devp->bytes_read = sz; mutex_unlock(&nmea_rx_buf_lock); wake_up_interruptible(&nmea_wait_queue); } }
static void smd_lge_notify(void *priv, unsigned event) { switch (event) { case SMD_EVENT_DATA: { int sz; if (psmd_device->ch == 0) { SMD_LGE_INFO("%s : psmd_device->ch is NULL \n", __func__); break; } sz = smd_cur_packet_size(psmd_device->ch); SMD_LGE_DBG("%s : SMD_EVENT_DATA current_packet_size = %d\n", __func__, sz); if ((sz > 0) && (sz <= smd_read_avail(psmd_device->ch))) #if 0 queue_work(smd_lge_wq, &psmd_device->read_work); #else wake_up(&smd_lge_wait_queue); #endif break; } case SMD_EVENT_OPEN: SMD_LGE_INFO("%s : SMD_EVENT_OPEN\n", __func__); break; case SMD_EVENT_CLOSE: SMD_LGE_INFO("%s : SMD_EVENT_CLOSE\n", __func__); break; } }
static void sns_ocmem_smd_read(struct work_struct *ws) { struct smd_channel *ch = sns_ctl.smd_ch; unsigned char *buf = NULL; int sz, len; for (;;) { sz = smd_cur_packet_size(ch); BUG_ON(sz > SMD_BUF_SIZE); len = smd_read_avail(ch); pr_debug("%s: sz=%d, len=%d\n", __func__, sz, len); if (len == 0 || len < sz) break; buf = kzalloc(SMD_BUF_SIZE, GFP_KERNEL); if (buf == NULL) { pr_err("%s: malloc failed", __func__); break; } if (smd_read(ch, buf, sz) != sz) { pr_err("%s: not enough data?!\n", __func__); kfree(buf); continue; } sns_ocmem_smd_process((struct sns_ocmem_hdr_s *)buf, (void *)((char *)buf + sizeof(struct sns_ocmem_hdr_s))); kfree(buf); } }
static void qmi_read_work(struct work_struct *ws) { struct qmi_ctxt *ctxt = container_of(ws, struct qmi_ctxt, read_work); struct smd_channel *ch = ctxt->ch; unsigned char buf[QMI_MAX_PACKET]; int sz; for (;;) { sz = smd_cur_packet_size(ch); if (sz == 0) break; if (sz < smd_read_avail(ch)) break; if (sz > QMI_MAX_PACKET) { smd_read(ch, 0, sz); continue; } if (smd_read(ch, buf, sz) != sz) { printk(KERN_ERR "qmi: not enough data?!\n"); continue; } /* interface selector must be 1 */ if (buf[0] != 0x01) continue; qmi_process_qmux(ctxt, buf + 1, sz - 1); } }
static void qmi_notify(void *priv, unsigned event) { //struct qmi_ctxt *ctxt = priv; switch (event) { case SMD_EVENT_DATA: { int sz; sz = smd_cur_packet_size(ctrl_ch); if ((sz > 0) && (sz <= smd_read_avail(ctrl_ch))) { wake_lock_timeout(&wakelock, HZ / 2); queue_work(qmi_wq, &read_work); } break; } case SMD_EVENT_OPEN: printk(KERN_INFO "qmi: smd opened\n"); queue_work(qmi_wq, &open_work); break; case SMD_EVENT_CLOSE: printk(KERN_INFO "qmi: smd closed\n"); break; } }
static void grmnet_ctrl_smd_read_w(struct work_struct *w) { struct smd_ch_info *c = container_of(w, struct smd_ch_info, read_w); struct rmnet_ctrl_port *port = c->port; int sz; size_t len; void *buf; unsigned long flags; while (1) { sz = smd_cur_packet_size(c->ch); if (sz == 0) break; if (smd_read_avail(c->ch) < sz) break; buf = kmalloc(sz, GFP_KERNEL); if (!buf) return; len = smd_read(c->ch, buf, sz); /* send it to USB here */ spin_lock_irqsave(&port->port_lock, flags); if (port->port_usb && port->port_usb->send_cpkt_response) { port->port_usb->send_cpkt_response(port->port_usb, buf, len); c->to_host++; } kfree(buf); spin_unlock_irqrestore(&port->port_lock, flags); } }
static void grmnet_ctrl_smd_read_w(struct work_struct *w) { struct smd_ch_info *c = container_of(w, struct smd_ch_info, read_w); struct rmnet_ctrl_port *port = c->port; int sz; struct rmnet_ctrl_pkt *cpkt; unsigned long flags; while (1) { sz = smd_cur_packet_size(c->ch); if (sz == 0) break; if (smd_read_avail(c->ch) < sz) break; cpkt = rmnet_alloc_ctrl_pkt(sz, GFP_KERNEL); if (IS_ERR(cpkt)) { pr_err("%s: unable to allocate rmnet control pkt\n", __func__); return; } cpkt->len = smd_read(c->ch, cpkt->buf, sz); /* send it to USB here */ spin_lock_irqsave(&port->port_lock, flags); if (port->port_usb && port->port_usb->send_cpkt_response) { port->port_usb->send_cpkt_response( port->port_usb, cpkt); c->to_host++; } spin_unlock_irqrestore(&port->port_lock, flags); } }
static void smd_vt_notify(void *priv, unsigned event) { unsigned long flags; switch (event) { case SMD_EVENT_DATA: { int sz; if( (smd_vt_devp->open_count != 0) && (smd_vt_devp->ch != 0)){ // SMD bug fix. 0811 sz = smd_cur_packet_size(smd_vt_devp->ch); if ((sz > 0) && (sz <= smd_read_avail(smd_vt_devp->ch))) { /* queue_work(smd_vt_wq, &smd_vt_work); */ spin_lock_irqsave(&smd_vt_read_lock, flags); if (smd_vt_devp->read_avail == 0) { smd_vt_devp->read_avail = sz; wake_up_interruptible(&smd_vt_wait_queue); } spin_unlock_irqrestore(&smd_vt_read_lock, flags); } } break; } case SMD_EVENT_OPEN: smd_vt_devp->open_flag =1; printk(KERN_INFO "smd_vt: smd opened\n"); break; case SMD_EVENT_CLOSE: printk(KERN_INFO "smd_vt: smd closed\n"); break; } }
static void wcn36xx_msm_smd_work(struct work_struct *work) { int avail; int msg_len; void *msg; int ret; struct wcn36xx_msm *wmsm_priv = container_of(work, struct wcn36xx_msm, smd_work); while (1) { msg_len = smd_cur_packet_size(wmsm_priv->smd_ch); if (0 == msg_len) { return; } avail = smd_read_avail(wmsm_priv->smd_ch); if (avail < msg_len) { return; } msg = kmalloc(msg_len, GFP_KERNEL); if (NULL == msg) { return; } ret = smd_read(wmsm_priv->smd_ch, msg, msg_len); if (ret != msg_len) { return; } wmsm_priv->rsp_cb(wmsm_priv->drv_priv, msg, msg_len); kfree(msg); } }
static void apr_tal_notify(void *priv, unsigned event) { struct apr_svc_ch_dev *apr_ch = priv; int len, r_len, sz; int pkt_cnt = 0; unsigned long flags; pr_debug("event = %d\n", event); switch (event) { case SMD_EVENT_DATA: pkt_cnt = 0; spin_lock_irqsave(&apr_ch->lock, flags); check_pending: len = smd_read_avail(apr_ch->ch); if (len < 0) { pr_err("apr_tal: Invalid Read Event :%d\n", len); spin_unlock_irqrestore(&apr_ch->lock, flags); return; } sz = smd_cur_packet_size(apr_ch->ch); if (sz < 0) { pr_debug("pkt size is zero\n"); spin_unlock_irqrestore(&apr_ch->lock, flags); return; } if (!len && !sz && !pkt_cnt) goto check_write_avail; if (!len) { pr_debug("len = %d pkt_cnt = %d\n", len, pkt_cnt); spin_unlock_irqrestore(&apr_ch->lock, flags); return; } r_len = smd_read_from_cb(apr_ch->ch, apr_ch->data, len); if (len != r_len) { pr_err("apr_tal: Invalid Read\n"); spin_unlock_irqrestore(&apr_ch->lock, flags); return; } pkt_cnt++; pr_debug("%d %d %d\n", len, sz, pkt_cnt); if (apr_ch->func) apr_ch->func(apr_ch->data, r_len, apr_ch->priv); goto check_pending; check_write_avail: if (smd_write_avail(apr_ch->ch)) wake_up(&apr_ch->wait); spin_unlock_irqrestore(&apr_ch->lock, flags); break; case SMD_EVENT_OPEN: pr_debug("apr_tal: SMD_EVENT_OPEN\n"); apr_ch->smd_state = 1; wake_up(&apr_ch->wait); break; case SMD_EVENT_CLOSE: pr_debug("apr_tal: SMD_EVENT_CLOSE\n"); break; } }
static void qmi_read_work(struct work_struct *ws) { //struct qmi_ctxt *ctxt = container_of(ws, struct qmi_ctxt, read_work); //struct smd_channel *ch = ctxt->ch; unsigned char buf[QMI_MAX_PACKET]; struct qmi_ctxt *ctxt; int sz; uint32_t chnum; for (;;) { sz = smd_cur_packet_size(ctrl_ch); if (sz == 0) break; if (sz < smd_read_avail(ctrl_ch)) break; if (sz > QMI_MAX_PACKET) { smd_read(ctrl_ch, NULL, sz); continue; } if (smd_read(ctrl_ch, buf, sz) != sz) { printk(KERN_ERR "qmi: not enough data?!\n"); continue; } DBG("packet: %d\n", sz); // print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, sz); if (sz <= 4) { DBG("packet size less 4\n"); continue; } chnum = *(uint32_t*)&buf[sz - 4]; DBG("chnum = %d\n", chnum); /* interface selector must be 1 */ if (buf[0] != 0x01) continue; if (qmi_device0.ch_num == chnum) ctxt = &qmi_device0; else if (qmi_device1.ch_num == chnum) ctxt = &qmi_device1; else if (qmi_device2.ch_num == chnum) ctxt = &qmi_device2; else { DBG("bad chnum %d\n", chnum); continue; } qmi_process_qmux(ctxt, buf + 1, sz - 1 - 4); } }
/* Called in soft-irq context */ static void smd_net_data_handler(unsigned long arg) { struct net_device *dev = (struct net_device *) arg; struct rmnet_private *p = netdev_priv(dev); struct sk_buff *skb; void *ptr = 0; int sz; for (;;) { sz = smd_cur_packet_size(p->ch); if (sz == 0) break; if (smd_read_avail(p->ch) < sz) break; if (sz > 1514) { pr_err("rmnet_recv() discarding %d len\n", sz); ptr = 0; } else { skb = dev_alloc_skb(sz + NET_IP_ALIGN); if (skb == NULL) { pr_err("rmnet_recv() cannot allocate skb\n"); /* out of memory, reschedule a later attempt */ smd_net_data_tasklet.data = (unsigned long)dev; tasklet_schedule(&smd_net_data_tasklet); break; } else { skb->dev = dev; skb_reserve(skb, NET_IP_ALIGN); ptr = skb_put(skb, sz); wake_lock_timeout(&p->wake_lock, HZ / 2); if (smd_read(p->ch, ptr, sz) != sz) { pr_err("rmnet_recv() smd lied about avail?!"); ptr = 0; dev_kfree_skb_irq(skb); } else { skb->protocol = eth_type_trans(skb, dev); if (count_this_packet(ptr, skb->len)) { #ifdef CONFIG_MSM_RMNET_DEBUG p->wakeups_rcv += rmnet_cause_wakeup(p); #endif p->stats.rx_packets++; p->stats.rx_bytes += skb->len; } netif_rx(skb); } continue; } } if (smd_read(p->ch, ptr, sz) != sz) pr_err("rmnet_recv() smd lied about avail?!"); } }
static void smd_net_data_handler(unsigned long arg) { struct net_device *dev = (struct net_device *) arg; struct smm6260net_private *p = netdev_priv(dev); struct sk_buff *skb; void *ptr = 0; int sz; int err; for (;;) { sz = smd_cur_packet_size(p->ch); if (sz == 0) break; if (smd_read_avail(p->ch) < sz) break; if (sz > SMM6260_NET_DEFAULT_MTU) { ptr = 0; //pr_err("rmnet_recv() discarding %d len\n", sz); }else{ skb = dev_alloc_skb(sz); if (skb == NULL) { //pr_err("smm6260net_recv() cannot allocate skb\n"); } else { skb->dev = dev; ptr = skb_put(skb, sz); wake_lock_timeout(&p->wake_lock, HZ / 2); if (smd_read(p->ch, ptr, sz) != sz) { //pr_err("smm6260net_recv() smd lied about avail?!"); ptr = 0; dev_kfree_skb_irq(skb); } else { skb->protocol = htons(ETH_P_IP);//eth_type_trans(skb, dev); if(count_this_packet(skb)) { /* update out statistics */ #ifdef CONFIG_MSM_RMNET_DEBUG p->wakeups_rcv += smm6260net_cause_wakeup(p); #endif p->stats.rx_packets++; p->stats.rx_bytes += skb->len; } skb_reset_mac_header(skb); netif_rx(skb); //pr_info("%s: smm6260net_recv() size=%d", p->chname, skb->len); } continue; } } if (smd_read(p->ch, ptr, sz) != sz) pr_err("rmnet_recv() smd lied about avail?!"); } }
/* * All SMD notifications and messages from Sensors on ADSP are * received by this function * */ void sns_ocmem_smd_notify_data(void *data, unsigned int event) { if (event == SMD_EVENT_DATA) { int sz; pr_debug("%s: Received SMD event Data\n", __func__); sz = smd_cur_packet_size(sns_ctl.smd_ch); if ((sz > 0) && (sz <= smd_read_avail(sns_ctl.smd_ch))) queue_work(sns_ctl.smd_wq, &sns_ctl.smd_read_work); } else if (event == SMD_EVENT_OPEN) { pr_debug("%s: Received SMD event Open\n", __func__); } else if (event == SMD_EVENT_CLOSE) { pr_debug("%s: Received SMD event Close\n", __func__); } }
static void nmea_notify(void *priv, unsigned event) { switch (event) { case SMD_EVENT_DATA: { int sz; sz = smd_cur_packet_size(nmea_devp->ch); if ((sz > 0) && (sz <= smd_read_avail(nmea_devp->ch))) queue_work(nmea_wq, &nmea_work); break; } case SMD_EVENT_OPEN: printk(KERN_INFO "nmea: smd opened\n"); break; case SMD_EVENT_CLOSE: printk(KERN_INFO "nmea: smd closed\n"); break; } }
static int smd_vt_release(struct inode *ip, struct file *fp) { int r = 0; int sz; unsigned long flags; printk(KERN_INFO "smd_vt_release\n"); mutex_lock(&smd_vt_ch_lock); smd_vt_devp->open_count--; if ((smd_vt_devp->open_count == 0) && (smd_vt_devp->ch != 0)) { mutex_lock(&smd_vt_rx_buf_lock); spin_lock_irqsave(&smd_vt_read_lock, flags); smd_vt_devp->read_avail = 0; spin_unlock_irqrestore(&smd_vt_read_lock, flags); sz = smd_cur_packet_size(smd_vt_devp->ch); while((sz != 0) && (sz <= smd_read_avail(smd_vt_devp->ch))) { if (sz > MAX_RX_BUF_SIZE) { smd_read(smd_vt_devp->ch, smd_vt_devp->rx_buf, MAX_RX_BUF_SIZE); sz= sz -MAX_RX_BUF_SIZE; } else{ smd_read(smd_vt_devp->ch, smd_vt_devp->rx_buf, sz); sz =0; } } mutex_unlock(&smd_vt_rx_buf_lock); r = smd_close(smd_vt_devp->ch); smd_vt_devp->ch = 0; smd_vt_devp->open_flag=0; } mutex_unlock(&smd_vt_ch_lock); return r; }
static void smd_net_notify(void *_dev, unsigned event) { struct rmnet_private *p = netdev_priv((struct net_device *)_dev); if (event != SMD_EVENT_DATA) return; spin_lock(&p->lock); if (p->skb && (smd_write_avail(p->ch) >= p->skb->len)) tasklet_hi_schedule(&p->tsklt); spin_unlock(&p->lock); if (smd_read_avail(p->ch) && (smd_read_avail(p->ch) >= smd_cur_packet_size(p->ch))) { smd_net_data_tasklet.data = (unsigned long) _dev; tasklet_schedule(&smd_net_data_tasklet); } }
static void wcn36xx_smd_work(struct work_struct *work) { int msg_len; int avail; void *msg; int ret; struct wcn36xx *wcn = container_of(work, struct wcn36xx, smd_work); if (!wcn) return; while (1) { msg_len = smd_cur_packet_size(wcn->smd_ch); if (0 == msg_len) { complete(&wcn->smd_compl); return; } avail = smd_read_avail(wcn->smd_ch); if (avail < msg_len) { complete(&wcn->smd_compl); return; } msg = kmalloc(msg_len, GFP_KERNEL); if (NULL == msg) { complete(&wcn->smd_compl); return; } ret = smd_read(wcn->smd_ch, msg, msg_len); if (ret != msg_len) { complete(&wcn->smd_compl); return; } wcn36xx_smd_rsp_process(wcn, msg, msg_len); kfree(msg); } }
void diag_smd_wcnss_cntl_notify(void *ctxt, unsigned event) { int r1, r2; if (!(driver->ch_wcnss_cntl)) return; switch (event) { case SMD_EVENT_DATA: r1 = smd_read_avail(driver->ch_wcnss_cntl); r2 = smd_cur_packet_size(driver->ch_wcnss_cntl); if (r1 > 0 && r1 == r2) queue_work(driver->diag_wq, &(driver->diag_read_smd_wcnss_cntl_work)); else pr_debug("diag: incomplete pkt on WCNSS CNTL ch\n"); break; case SMD_EVENT_OPEN: queue_work(driver->diag_cntl_wq, &(driver->diag_wcnss_mask_update_work)); break; } }
static void smd_net_notify(void *_dev, unsigned event) { struct rmnet_private *p = netdev_priv((struct net_device *)_dev); switch (event) { case SMD_EVENT_DATA: spin_lock(&p->lock); if (p->skb && (smd_write_avail(p->ch) >= p->skb->len)) { smd_disable_read_intr(p->ch); tasklet_hi_schedule(&p->tsklt); } spin_unlock(&p->lock); if (smd_read_avail(p->ch) && (smd_read_avail(p->ch) >= smd_cur_packet_size(p->ch))) { smd_net_data_tasklet.data = (unsigned long) _dev; tasklet_schedule(&smd_net_data_tasklet); } break; case SMD_EVENT_OPEN: DBG0("%s: opening SMD port\n", __func__); netif_carrier_on(_dev); if (netif_queue_stopped(_dev)) { DBG0("%s: re-starting if queue\n", __func__); netif_wake_queue(_dev); } break; case SMD_EVENT_CLOSE: DBG0("%s: closing SMD port\n", __func__); netif_carrier_off(_dev); break; } }
static void smd_net_notify(void *_dev, unsigned event) { struct rmnet_private *p = netdev_priv((struct net_device *)_dev); int space; if (event != SMD_EVENT_DATA) return; spin_lock(&p->lock); if (p->skb && ((space=smd_write_avail(p->ch)) >= p->skb->len)) { smd_disable_read_intr(p->ch); pr_warn("warn: notify write resources on ch %s, available %d, needed %d\n", p->chname, space, p->skb->len); tasklet_hi_schedule(&p->tsklt); } spin_unlock(&p->lock); if (smd_read_avail(p->ch) && (smd_read_avail(p->ch) >= smd_cur_packet_size(p->ch))) { smd_net_data_tasklet.data = (unsigned long) _dev; tasklet_schedule(&smd_net_data_tasklet); } }
static void smd_lge_read_work(struct work_struct *ws) { int sz; SMD_LGE_DBG("%s \n", __func__); for (;;) { if (psmd_device->ch == 0) { SMD_LGE_INFO("%s : psmd_device->ch is NULL \n", __func__); break; } sz = smd_cur_packet_size(psmd_device->ch); SMD_LGE_DBG("%s : current packet size = %d\n", __func__, sz); if (sz == 0) { SMD_LGE_DBG("%s : current packet size = %d\n", __func__, sz); break; } if (sz > smd_read_avail(psmd_device->ch)) { SMD_LGE_DBG("%s : current packet size > read_avail \n", __func__); break; } SMD_LGE_DBG("%s : smd_read_avail = %d\n", __func__, smd_read_avail(psmd_device->ch)); if (smd_read(psmd_device->ch, psmd_device->rx_buff, sz) != sz) { SMD_LGE_DBG("%s : read failed\n", __func__); continue; } psmd_device->read_byte = sz; wake_up_interruptible(&smd_lge_wait_queue); } }
static void ssm_app_modem_work_fn(struct work_struct *work) { int sz, rc; struct ssm_common_msg pkt; struct ssm_driver *ssm; ssm = container_of(work, struct ssm_driver, ipc_work); mutex_lock(&ssm->mutex); sz = smd_cur_packet_size(ssm->ch); if ((sz < SSM_MSG_FIELD_LEN) || (sz > ATOM_MSG_LEN)) { dev_dbg(ssm_drv->dev, "Garbled message size\n"); goto unlock; } if (smd_read_avail(ssm->ch) < sz) { dev_err(ssm_drv->dev, "SMD error data in channel\n"); goto unlock; } if (smd_read(ssm->ch, ssm->smd_buffer, sz) != sz) { dev_err(ssm_drv->dev, "Incomplete data\n"); goto unlock; } rc = decode_packet(ssm->smd_buffer, &pkt); if (rc < 0) { dev_err(ssm_drv->dev, "Corrupted header\n"); goto unlock; } process_message(pkt, ssm); unlock: mutex_unlock(&ssm->mutex); }
/* Called in soft-irq context */ static void smd_net_data_handler(unsigned long arg) { struct net_device *dev = (struct net_device *) arg; struct rmnet_private *p = netdev_priv(dev); struct sk_buff *skb; void *ptr = 0; int sz; u32 opmode = p->operation_mode; unsigned long flags; for (;;) { sz = smd_cur_packet_size(p->ch); if (sz == 0) break; if (smd_read_avail(p->ch) < sz) break; skb = dev_alloc_skb(sz + NET_IP_ALIGN); if (skb == NULL) { pr_err("[%s] rmnet_recv() cannot allocate skb\n", dev->name); /* out of memory, reschedule a later attempt */ smd_net_data_tasklet.data = (unsigned long)dev; tasklet_schedule(&smd_net_data_tasklet); break; } else { skb->dev = dev; skb_reserve(skb, NET_IP_ALIGN); ptr = skb_put(skb, sz); wake_lock_timeout(&p->wake_lock, HZ / 2); if (smd_read(p->ch, ptr, sz) != sz) { pr_err("[%s] rmnet_recv() smd lied about avail?!", dev->name); ptr = 0; dev_kfree_skb_irq(skb); } else { /* Handle Rx frame format */ spin_lock_irqsave(&p->lock, flags); opmode = p->operation_mode; spin_unlock_irqrestore(&p->lock, flags); if (RMNET_IS_MODE_IP(opmode)) { /* Driver in IP mode */ skb->protocol = rmnet_ip_type_trans(skb, dev); } else { /* Driver in Ethernet mode */ skb->protocol = eth_type_trans(skb, dev); } if (RMNET_IS_MODE_IP(opmode) || count_this_packet(ptr, skb->len)) { #ifdef CONFIG_MSM_RMNET_DEBUG p->wakeups_rcv += rmnet_cause_wakeup(p); #endif p->stats.rx_packets++; p->stats.rx_bytes += skb->len; } DBG1("[%s] Rx packet #%lu len=%d\n", dev->name, p->stats.rx_packets, skb->len); /* Deliver to network stack */ netif_rx(skb); } continue; } if (smd_read(p->ch, ptr, sz) != sz) pr_err("[%s] rmnet_recv() smd lied about avail?!", dev->name); } }
/* Called in soft-irq context */ static void smd_net_data_handler(unsigned long arg) { struct net_device *dev = (struct net_device *) arg; struct rmnet_private *p = netdev_priv(dev); struct sk_buff *skb; void *ptr = 0; int sz; u32 opmode = p->operation_mode; // unsigned long flags; // int max_package_size; for (;;) { sz = smd_cur_packet_size(p->ch); if (sz == 0) break; if (smd_read_avail(p->ch) < sz) break; //ZTE_RIL_WANGCHENG_20110425 start #ifdef CONFIG_ZTE_PLATFORM if (RMNET_IS_MODE_IP(opmode) ? (sz > ((dev->mtu > RMNET_DEFAULT_MTU_LEN)? dev->mtu:RMNET_DEFAULT_MTU_LEN)) : (sz > (((dev->mtu > RMNET_DEFAULT_MTU_LEN)? dev->mtu:RMNET_DEFAULT_MTU_LEN) + ETH_HLEN))) { #else if (RMNET_IS_MODE_IP(opmode) ? (sz > dev->mtu) : (sz > (dev->mtu + ETH_HLEN))) { #endif pr_err("rmnet_recv() discarding %d len (%d mtu)\n", sz, RMNET_IS_MODE_IP(opmode) ? dev->mtu : (dev->mtu + ETH_HLEN)); ptr = 0; } else { skb = dev_alloc_skb(sz + NET_IP_ALIGN); if (skb == NULL) { pr_err("rmnet_recv() cannot allocate skb\n"); } else { skb->dev = dev; skb_reserve(skb, NET_IP_ALIGN); ptr = skb_put(skb, sz); wake_lock_timeout(&p->wake_lock, HZ / 2); if (smd_read(p->ch, ptr, sz) != sz) { pr_err("rmnet_recv() smd lied about avail?!"); ptr = 0; dev_kfree_skb_irq(skb); } else { /* Handle Rx frame format */ //spin_lock_irqsave(&p->lock, flags); //opmode = p->operation_mode; //spin_unlock_irqrestore(&p->lock, flags); if (RMNET_IS_MODE_IP(opmode)) { /* Driver in IP mode */ skb->protocol = rmnet_ip_type_trans(skb, dev); } else { /* Driver in Ethernet mode */ skb->protocol = eth_type_trans(skb, dev); } if (RMNET_IS_MODE_IP(opmode) || count_this_packet(ptr, skb->len)) { #ifdef CONFIG_MSM_RMNET_DEBUG p->wakeups_rcv += rmnet_cause_wakeup(p); #endif p->stats.rx_packets++; p->stats.rx_bytes += skb->len; } netif_rx(skb); } continue; } } if (smd_read(p->ch, ptr, sz) != sz) pr_err("rmnet_recv() smd lied about avail?!"); } } //ZTE_RIL_RJG_20101103 end static DECLARE_TASKLET(smd_net_data_tasklet, smd_net_data_handler, 0); static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev) { struct rmnet_private *p = netdev_priv(dev); smd_channel_t *ch = p->ch; int smd_ret; struct QMI_QOS_HDR_S *qmih; u32 opmode; unsigned long flags; /* For QoS mode, prepend QMI header and assign flow ID from skb->mark */ spin_lock_irqsave(&p->lock, flags); opmode = p->operation_mode; spin_unlock_irqrestore(&p->lock, flags); if (RMNET_IS_MODE_QOS(opmode)) { qmih = (struct QMI_QOS_HDR_S *) skb_push(skb, sizeof(struct QMI_QOS_HDR_S)); qmih->version = 1; qmih->flags = 0; qmih->flow_id = skb->mark; } dev->trans_start = jiffies; smd_ret = smd_write(ch, skb->data, skb->len); if (smd_ret != skb->len) { pr_err("%s: smd_write returned error %d", __func__, smd_ret); goto xmit_out; } if (RMNET_IS_MODE_IP(opmode) || count_this_packet(skb->data, skb->len)) { p->stats.tx_packets++; p->stats.tx_bytes += skb->len; #ifdef CONFIG_MSM_RMNET_DEBUG p->wakeups_xmit += rmnet_cause_wakeup(p); #endif } xmit_out: /* data xmited, safe to release skb */ dev_kfree_skb_irq(skb); return 0; } static void _rmnet_resume_flow(unsigned long param) { struct net_device *dev = (struct net_device *)param; struct rmnet_private *p = netdev_priv(dev); struct sk_buff *skb = NULL; unsigned long flags; /* xmit and enable the flow only once even if multiple tasklets were scheduled by smd_net_notify */ spin_lock_irqsave(&p->lock, flags); if (p->skb && (smd_write_avail(p->ch) >= p->skb->len)) { skb = p->skb; p->skb = NULL; spin_unlock_irqrestore(&p->lock, flags); _rmnet_xmit(skb, dev); netif_wake_queue(dev); } else spin_unlock_irqrestore(&p->lock, flags); } static void msm_rmnet_unload_modem(void *pil) { if (pil) pil_put(pil); } static void *msm_rmnet_load_modem(struct net_device *dev) { void *pil; int rc; struct rmnet_private *p = netdev_priv(dev); pil = pil_get("modem"); if (IS_ERR(pil)) pr_err("%s: modem load failed\n", __func__); else if (msm_rmnet_modem_wait) { rc = wait_for_completion_interruptible_timeout( &p->complete, msecs_to_jiffies(msm_rmnet_modem_wait * 1000)); if (!rc) rc = -ETIMEDOUT; if (rc < 0) { pr_err("%s: wait for rmnet port failed %d\n", __func__, rc); msm_rmnet_unload_modem(pil); pil = ERR_PTR(rc); } } return pil; }
/* Called in soft-irq context */ static void smd_net_data_handler(unsigned long arg) { struct net_device *dev = (struct net_device *) arg; struct rmnet_private *p = netdev_priv(dev); struct sk_buff *skb; void *ptr = 0; int sz; u32 opmode = p->operation_mode; unsigned long flags; for (;;) { sz = smd_cur_packet_size(p->ch); if (sz == 0) break; if (smd_read_avail(p->ch) < sz) break; if (RMNET_IS_MODE_IP(opmode) ? (sz > dev->mtu) : (sz > (dev->mtu + ETH_HLEN))) { pr_err("rmnet_recv() discarding %d len (%d mtu)\n", sz, RMNET_IS_MODE_IP(opmode) ? dev->mtu : (dev->mtu + ETH_HLEN)); ptr = 0; } else { skb = dev_alloc_skb(sz + NET_IP_ALIGN); if (skb == NULL) { pr_err("rmnet_recv() cannot allocate skb\n"); } else { skb->dev = dev; skb_reserve(skb, NET_IP_ALIGN); ptr = skb_put(skb, sz); wake_lock_timeout(&p->wake_lock, HZ / 2); if (smd_read(p->ch, ptr, sz) != sz) { pr_err("rmnet_recv() smd lied about avail?!"); ptr = 0; dev_kfree_skb_irq(skb); } else { /* Handle Rx frame format */ spin_lock_irqsave(&p->lock, flags); opmode = p->operation_mode; spin_unlock_irqrestore(&p->lock, flags); if (RMNET_IS_MODE_IP(opmode)) { /* Driver in IP mode */ skb->protocol = rmnet_ip_type_trans(skb, dev); } else { /* Driver in Ethernet mode */ skb->protocol = eth_type_trans(skb, dev); } if (RMNET_IS_MODE_IP(opmode) || count_this_packet(ptr, skb->len)) { #if 0 p->wakeups_rcv += rmnet_cause_wakeup(p); #endif p->stats.rx_packets++; p->stats.rx_bytes += skb->len; } netif_rx(skb); } continue; } } if (smd_read(p->ch, ptr, sz) != sz) pr_err("rmnet_recv() smd lied about avail?!"); } }
static int diag_smd_read(void *ctxt, unsigned char *buf, int buf_len) { int pkt_len = 0; int err = 0; int total_recd_partial = 0; int total_recd = 0; uint8_t buf_full = 0; unsigned char *temp_buf = NULL; uint32_t read_len = 0; struct diag_smd_info *smd_info = NULL; if (!ctxt || !buf || buf_len <= 0) return -EIO; smd_info = (struct diag_smd_info *)ctxt; if (!smd_info->hdl || !smd_info->inited || !atomic_read(&smd_info->opened)) return -EIO; /* * Always try to read the data if notification is received from smd * In case if packet size is 0 release the wake source hold earlier */ err = wait_event_interruptible(smd_info->read_wait_q, (smd_info->hdl != NULL) && (atomic_read(&smd_info->opened) == 1)); if (err) { diagfwd_channel_read_done(smd_info->fwd_ctxt, buf, 0); return -ERESTARTSYS; } /* * In this case don't reset the buffers as there is no need to further * read over peripherals. Also release the wake source hold earlier. */ if (atomic_read(&smd_info->diag_state) == 0) { DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s closing read thread. diag state is closed\n", smd_info->name); diag_ws_release(); return 0; } if (!smd_info->hdl || !atomic_read(&smd_info->opened)) { DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s stopping read, hdl: %p, opened: %d\n", smd_info->name, smd_info->hdl, atomic_read(&smd_info->opened)); goto fail_return; } do { total_recd_partial = 0; temp_buf = buf + total_recd; pkt_len = smd_cur_packet_size(smd_info->hdl); if (pkt_len <= 0) break; if (total_recd + pkt_len > buf_len) { buf_full = 1; break; } while (total_recd_partial < pkt_len) { read_len = smd_read_avail(smd_info->hdl); if (!read_len) { wait_event_interruptible(smd_info->read_wait_q, ((atomic_read(&smd_info->opened)) && smd_read_avail(smd_info->hdl))); if (!smd_info->hdl || !atomic_read(&smd_info->opened)) { DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting from wait", smd_info->name); goto fail_return; } } if (pkt_len < read_len) goto fail_return; smd_read(smd_info->hdl, temp_buf, read_len); total_recd_partial += read_len; total_recd += read_len; temp_buf += read_len; } } while (pkt_len > 0); if ((smd_info->type == TYPE_DATA && pkt_len) || buf_full) err = queue_work(smd_info->wq, &(smd_info->read_work)); if (total_recd > 0) { DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s read total bytes: %d\n", smd_info->name, total_recd); diagfwd_channel_read_done(smd_info->fwd_ctxt, buf, total_recd); } else { DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s error in read, err: %d\n", smd_info->name, total_recd); goto fail_return; } return 0; fail_return: diagfwd_channel_read_done(smd_info->fwd_ctxt, buf, 0); return -EINVAL; }
/** @brief Callback function for serializing WCTS Read processing in the control context @param pWCTSCb WCTS Control Block @see @return void */ static void WCTS_PALReadCallback ( WCTS_ControlBlockType* pWCTSCb ) { void* buffer; int packet_size; int available; int bytes_read; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ /*-------------------------------------------------------------------- Sanity check --------------------------------------------------------------------*/ if ((NULL == pWCTSCb) || (WCTS_CB_MAGIC != pWCTSCb->wctsMagic)) { WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_ERROR, "WCTS_PALReadCallback: Invalid parameter received."); return; } /* iterate until no more packets are available */ while (1) { /* check the length of the next packet */ packet_size = smd_cur_packet_size(pWCTSCb->wctsChannel); if (0 == packet_size) { /* No more data to be read */ return; } /* Check how much of the data is available */ available = smd_read_avail(pWCTSCb->wctsChannel); if (available < packet_size) { /* Entire packet not yet ready to be read -- There will be another notification when it is ready */ return; } buffer = wpalMemoryAllocate(packet_size); if (NULL == buffer) { WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_ERROR, "WCTS_PALReadCallback: Memory allocation failure"); WPAL_ASSERT(0); return; } bytes_read = smd_read(pWCTSCb->wctsChannel, buffer, packet_size); if (bytes_read != packet_size) { /*Some problem, do not forward it to WDI.*/ WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_ERROR, "WCTS_PALReadCallback: Failed to read data from SMD"); wpalMemoryFree(buffer); WPAL_ASSERT(0); return; } /* forward the message to the registered handler */ pWCTSCb->wctsRxMsgCB((WCTS_HandleType)pWCTSCb, buffer, packet_size, pWCTSCb->wctsRxMsgCBData); /* Free the allocated buffer*/ wpalMemoryFree(buffer); } } /*WCTS_PALReadCallback*/
static ssize_t smd_vt_read(struct file *fp, char __user *buf, size_t count, loff_t *pos) { int r = 0; int bytes_read = 0; unsigned long flags; int sz, avail; if(count > MAX_RX_BUF_SIZE) count = MAX_RX_BUF_SIZE; for (;;) { mutex_lock(&smd_vt_rx_buf_lock); if( (smd_vt_devp->open_count == 0) || (smd_vt_devp->ch == 0) || (smd_vt_devp->open_flag==0) || (count <=0)){ mutex_unlock(&smd_vt_rx_buf_lock); return 0; } spin_lock_irqsave(&smd_vt_read_lock, flags); smd_vt_devp->read_avail = 0; spin_unlock_irqrestore(&smd_vt_read_lock, flags); sz = smd_cur_packet_size(smd_vt_devp->ch); if (sz > MAX_RX_BUF_SIZE) { // garbage data smd_read(smd_vt_devp->ch, 0, sz); mutex_unlock(&smd_vt_rx_buf_lock); continue; } avail = smd_read_avail(smd_vt_devp->ch); if((sz == avail) && (sz != 0)){ if( sz > count) sz = count; // output bffer size check. bytes_read = smd_read(smd_vt_devp->ch, buf, sz); break; }else if( sz < avail ){ // garbage data smd_read(smd_vt_devp->ch, 0, avail); mutex_unlock(&smd_vt_rx_buf_lock); continue; } // else is data not ready wait data. mutex_unlock(&smd_vt_rx_buf_lock); r = wait_event_interruptible_timeout(smd_vt_wait_queue, smd_vt_devp->read_avail,HZ*5); if (r <= 0) { // timeout or error /* qualify error message */ if (r != -ERESTARTSYS) { /* we get this anytime a signal comes in */ printk(KERN_ERR "ERROR:%s:%i:%s: " "wait_event_interruptible ret %i\n", __FILE__, __LINE__, __func__, r ); } return 0; //r; } } mutex_unlock(&smd_vt_rx_buf_lock); return bytes_read; }