static void handle_bam_mux_cmd(struct work_struct *work) { unsigned long flags; struct bam_mux_hdr *rx_hdr; struct rx_pkt_info *info; struct sk_buff *rx_skb; info = container_of(work, struct rx_pkt_info, work); rx_skb = info->skb; kfree(info); rx_hdr = (struct bam_mux_hdr *)rx_skb->data; DBG_INC_READ_CNT(sizeof(struct bam_mux_hdr)); DBG("%s: magic %x reserved %d cmd %d pad %d ch %d len %d\n", __func__, rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd, rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len); if (rx_hdr->magic_num != BAM_MUX_HDR_MAGIC_NO) { pr_err("%s: dropping invalid hdr. magic %x reserved %d cmd %d" " pad %d ch %d len %d\n", __func__, rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd, rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len); dev_kfree_skb_any(rx_skb); queue_rx(); return; } switch (rx_hdr->cmd) { case BAM_MUX_HDR_CMD_DATA: DBG_INC_READ_CNT(rx_hdr->pkt_len); bam_mux_process_data(rx_skb); break; case BAM_MUX_HDR_CMD_OPEN: spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags); bam_ch[rx_hdr->ch_id].status |= BAM_CH_REMOTE_OPEN; spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags); dev_kfree_skb_any(rx_skb); queue_rx(); break; case BAM_MUX_HDR_CMD_CLOSE: /* probably should drop pending write */ spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags); bam_ch[rx_hdr->ch_id].status &= ~BAM_CH_REMOTE_OPEN; spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags); dev_kfree_skb_any(rx_skb); queue_rx(); break; default: pr_err("%s: dropping invalid hdr. magic %x reserved %d cmd %d" " pad %d ch %d len %d\n", __func__, rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd, rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len); dev_kfree_skb_any(rx_skb); queue_rx(); return; } }
static void sdio_mux_read_data(struct work_struct *work) { struct sk_buff *skb_mux; void *ptr = 0; int sz, rc, len = 0; struct sdio_mux_hdr *hdr; DBG("[lte] %s: reading\n", __func__); /* should probably have a separate read lock */ mutex_lock(&sdio_mux_lock); sz = sdio_read_avail(sdio_mux_ch); DBG("[lte] %s: read avail %d\n", __func__, sz); if (sz <= 0) { if (sz) pr_err("[lte] Error - %s: read avail failed %d\n", __func__, sz); mutex_unlock(&sdio_mux_lock); return; } /* net_ip_aling is probably not required */ if (sdio_partial_pkt.valid) len = sdio_partial_pkt.skb->len; /* If allocation fails attempt to get a smaller chunk of mem */ do { skb_mux = __dev_alloc_skb(sz + NET_IP_ALIGN + len, GFP_KERNEL); if (skb_mux) break; DBG("[lte] %s: cannot allocate skb of size:%d + " "%d (NET_SKB_PAD)\n", __func__, sz + NET_IP_ALIGN + len, NET_SKB_PAD); /* the skb structure adds NET_SKB_PAD bytes to the memory * request, which may push the actual request above PAGE_SIZE * in that case, we need to iterate one more time to make sure * we get the memory request under PAGE_SIZE */ if (sz + NET_IP_ALIGN + len + NET_SKB_PAD <= PAGE_SIZE) { pr_info("[lte] %s: allocation failed. retry later\n", __func__); mutex_unlock(&sdio_mux_lock); queue_delayed_work(sdio_mux_workqueue, &work_sdio_mux_read, msecs_to_jiffies(SDIO_OOM_RETRY_DELAY_MS)); return; } sz /= 2; } while (1); skb_reserve(skb_mux, NET_IP_ALIGN + len); ptr = skb_put(skb_mux, sz); /* half second wakelock is fine? */ wake_lock_timeout(&sdio_mux_ch_wakelock, HZ / 2); rc = sdio_read(sdio_mux_ch, ptr, sz); DBG("[lte] %s: read %d\n", __func__, rc); if (rc) { pr_err("[lte] Error - %s: sdio read failed %d\n", __func__, rc); dev_kfree_skb_any(skb_mux); mutex_unlock(&sdio_mux_lock); queue_delayed_work(sdio_mux_workqueue, &work_sdio_mux_read, 0); return; } mutex_unlock(&sdio_mux_lock); DBG_INC_READ_CNT(sz); DBG("[lte] %s: head %p data %p tail %p end %p len %d\n", __func__, skb_mux->head, skb_mux->data, skb_mux->tail, skb_mux->end, skb_mux->len); /* HTC */ dbg_dump_buf("SDIO_RMNET->RD#", skb_mux->data, skb_mux->len); /* move to a separate function */ /* probably do skb_pull instead of pointer adjustment */ hdr = handle_sdio_partial_pkt(skb_mux); while ((void *)hdr < (void *)skb_mux->tail) { if (((void *)hdr + sizeof(*hdr)) > (void *)skb_mux->tail) { /* handle partial header */ sdio_mux_save_partial_pkt(hdr, skb_mux); break; } if (hdr->magic_num != SDIO_MUX_HDR_MAGIC_NO) { pr_err("[lte] Error - %s: packet error\n", __func__); break; } hdr = handle_sdio_mux_command(hdr, skb_mux); } dev_kfree_skb_any(skb_mux); DBG("[lte] %s: read done\n", __func__); queue_delayed_work(sdio_mux_workqueue, &work_sdio_mux_read, 0); }
static void sdio_mux_read_data(struct work_struct *work) { struct sk_buff *skb_mux; void *ptr = 0; int sz, rc, len = 0; struct sdio_mux_hdr *hdr; //[[2011.10.06 leecy add qualcomm patch static int workqueue_pinned; if (!workqueue_pinned) { struct cpumask cpus; cpumask_clear(&cpus); cpumask_set_cpu(0, &cpus); if (sched_setaffinity(current->pid, &cpus)) pr_err("%s: sdio_dmux set CPU affinity failed\n", __func__); workqueue_pinned = 1; } //2011.10.06 leecy add qualcomm patch ]] DBG("%s: reading\n", __func__); /* should probably have a separate read lock */ mutex_lock(&sdio_mux_lock); sz = sdio_read_avail(sdio_mux_ch); DBG("%s: read avail %d\n", __func__, sz); if (sz <= 0) { if (sz) pr_err("%s: read avail failed %d\n", __func__, sz); mutex_unlock(&sdio_mux_lock); return; } /* net_ip_aling is probably not required */ if (sdio_partial_pkt.valid) len = sdio_partial_pkt.skb->len; /* If allocation fails attempt to get a smaller chunk of mem */ do { skb_mux = __dev_alloc_skb(sz + NET_IP_ALIGN + len, GFP_KERNEL); if (skb_mux) break; pr_err("%s: cannot allocate skb of size:%d + " "%d (NET_SKB_PAD)\n", __func__, sz + NET_IP_ALIGN + len, NET_SKB_PAD); /* the skb structure adds NET_SKB_PAD bytes to the memory * request, which may push the actual request above PAGE_SIZE * in that case, we need to iterate one more time to make sure * we get the memory request under PAGE_SIZE */ if (sz + NET_IP_ALIGN + len + NET_SKB_PAD <= PAGE_SIZE) { pr_err("%s: allocation failed\n", __func__); mutex_unlock(&sdio_mux_lock); return; } sz /= 2; } while (1); skb_reserve(skb_mux, NET_IP_ALIGN + len); ptr = skb_put(skb_mux, sz); /* half second wakelock is fine? */ wake_lock_timeout(&sdio_mux_ch_wakelock, HZ / 2); rc = sdio_read(sdio_mux_ch, ptr, sz); DBG("%s: read %d\n", __func__, rc); if (rc) { pr_err("%s: sdio read failed %d\n", __func__, rc); dev_kfree_skb_any(skb_mux); mutex_unlock(&sdio_mux_lock); queue_work(sdio_mux_workqueue, &work_sdio_mux_read); return; } mutex_unlock(&sdio_mux_lock); DBG_INC_READ_CNT(sz); DBG("%s: head %p data %p tail %p end %p len %d\n", __func__, skb_mux->head, skb_mux->data, skb_mux->tail, skb_mux->end, skb_mux->len); /* move to a separate function */ /* probably do skb_pull instead of pointer adjustment */ hdr = handle_sdio_partial_pkt(skb_mux); while ((void *)hdr < (void *)skb_mux->tail) { if (((void *)hdr + sizeof(*hdr)) > (void *)skb_mux->tail) { /* handle partial header */ sdio_mux_save_partial_pkt(hdr, skb_mux); break; } if (hdr->magic_num != SDIO_MUX_HDR_MAGIC_NO) { pr_err("%s: packet error\n", __func__); break; } hdr = handle_sdio_mux_command(hdr, skb_mux); } dev_kfree_skb_any(skb_mux); DBG("%s: read done\n", __func__); queue_work(sdio_mux_workqueue, &work_sdio_mux_read); }
static void sdio_mux_read_data(struct work_struct *work) { struct sk_buff *skb_mux; void *ptr = 0; int sz, rc, len = 0; struct sdio_mux_hdr *hdr; DBG("%s: reading\n", __func__); /* should probably have a separate read lock */ mutex_lock(&sdio_mux_lock); sz = sdio_read_avail(sdio_mux_ch); DBG("%s: read avail %d\n", __func__, sz); if (sz <= 0) { if (sz) pr_err("%s: read avail failed %d\n", __func__, sz); mutex_unlock(&sdio_mux_lock); return; } /* net_ip_aling is probably not required */ if (sdio_partial_pkt.valid) len = sdio_partial_pkt.skb->len; /* if allocation fails attempt to get a smaller chunk of mem */ do { skb_mux = dev_alloc_skb(sz + NET_IP_ALIGN + len); if (skb_mux) break; pr_err("%s: cannot allocate skb of size:%d\n", __func__, sz + NET_IP_ALIGN + len); if (sz + NET_IP_ALIGN + len <= PAGE_SIZE) { pr_err("%s: allocation failed\n", __func__); mutex_unlock(&sdio_mux_lock); return; } sz /= 2; } while (1); skb_reserve(skb_mux, NET_IP_ALIGN + len); ptr = skb_put(skb_mux, sz); /* half second wakelock is fine? */ wake_lock_timeout(&sdio_mux_ch_wakelock, HZ / 2); rc = sdio_read(sdio_mux_ch, ptr, sz); DBG("%s: read %d\n", __func__, rc); if (rc) { pr_err("%s: sdio read failed %d\n", __func__, rc); dev_kfree_skb_any(skb_mux); mutex_unlock(&sdio_mux_lock); queue_work(sdio_mux_workqueue, &work_sdio_mux_read); return; } mutex_unlock(&sdio_mux_lock); DBG_INC_READ_CNT(sz); DBG("%s: head %p data %p tail %p end %p len %d\n", __func__, skb_mux->head, skb_mux->data, skb_mux->tail, skb_mux->end, skb_mux->len); /* move to a separate function */ /* probably do skb_pull instead of pointer adjustment */ hdr = handle_sdio_partial_pkt(skb_mux); while ((void *)hdr < (void *)skb_mux->tail) { if (((void *)hdr + sizeof(*hdr)) > (void *)skb_mux->tail) { /* handle partial header */ sdio_mux_save_partial_pkt(hdr, skb_mux); break; } if (hdr->magic_num != SDIO_MUX_HDR_MAGIC_NO) { pr_err("%s: packet error\n", __func__); break; } hdr = handle_sdio_mux_command(hdr, skb_mux); } dev_kfree_skb_any(skb_mux); DBG("%s: read done\n", __func__); queue_work(sdio_mux_workqueue, &work_sdio_mux_read); }