static void sdio_mux_write_data(struct work_struct *work) { int i, rc, reschedule = 0; struct sk_buff *skb; unsigned long flags; for (i = 0; i < 8; i++) { spin_lock_irqsave(&sdio_ch[i].lock, flags); if (sdio_ch_is_local_open(i) && sdio_ch[i].skb) { skb = sdio_ch[i].skb; spin_unlock_irqrestore(&sdio_ch[i].lock, flags); DBG("[lte] %s: writing for ch %d\n", __func__, i); rc = sdio_mux_write(skb); if (rc == -EAGAIN) { reschedule = 1; } else if (!rc) { spin_lock_irqsave(&sdio_ch[i].lock, flags); sdio_ch[i].skb = NULL; sdio_ch[i].write_done(sdio_ch[i].priv, skb); spin_unlock_irqrestore(&sdio_ch[i].lock, flags); } } else spin_unlock_irqrestore(&sdio_ch[i].lock, flags); } /* probably should use delayed work */ if (reschedule) queue_work(sdio_mux_workqueue, &work_sdio_mux_write); }
static void sdio_mux_write_data(struct work_struct *work) { int i, rc, reschedule = 0; struct sk_buff *skb; unsigned long flags; for (i = 0; i < SDIO_DMUX_NUM_CHANNELS; ++i) { spin_lock_irqsave(&sdio_ch[i].lock, flags); if (sdio_ch_is_local_open(i) && sdio_ch[i].skb) { skb = sdio_ch[i].skb; spin_unlock_irqrestore(&sdio_ch[i].lock, flags); DBG("%s: writing for ch %d\n", __func__, i); rc = sdio_mux_write(skb); if (rc == -EAGAIN || rc == -ENOMEM) { reschedule = 1; } else if (!rc) { spin_lock_irqsave(&sdio_ch[i].lock, flags); sdio_ch[i].skb = NULL; sdio_ch[i].write_done(sdio_ch[i].priv, skb); spin_unlock_irqrestore(&sdio_ch[i].lock, flags); } } else spin_unlock_irqrestore(&sdio_ch[i].lock, flags); } if (reschedule) queue_delayed_work(sdio_mux_write_workqueue, &delayed_work_sdio_mux_write, msecs_to_jiffies(250)); }
static void sdio_mux_send_open_cmd(uint32_t id) { struct sdio_mux_hdr hdr = { .magic_num = SDIO_MUX_HDR_MAGIC_NO, .cmd = SDIO_MUX_HDR_CMD_OPEN, .reserved = 0, .ch_id = id, .pkt_len = 0, .pad_len = 0 }; sdio_mux_write_cmd((void *)&hdr, sizeof(hdr)); } static void sdio_mux_write_data(struct work_struct *work) { int rc, reschedule = 0; int notify = 0; struct sk_buff *skb; unsigned long flags; int avail; int ch_id; spin_lock_irqsave(&sdio_mux_write_lock, flags); while ((skb = __skb_dequeue(&sdio_mux_write_pool))) { ch_id = ((struct sdio_mux_hdr *)skb->data)->ch_id; avail = sdio_write_avail(sdio_mux_ch); if (avail < skb->len) { /* we may have to wait for write avail * notification from sdio al */ DBG("%s: sdio_write_avail(%d) < skb->len(%d)\n", __func__, avail, skb->len); reschedule = 1; break; } spin_unlock_irqrestore(&sdio_mux_write_lock, flags); rc = sdio_mux_write(skb); spin_lock_irqsave(&sdio_mux_write_lock, flags); if (rc == 0) { spin_lock(&sdio_ch[ch_id].lock); sdio_ch[ch_id].num_tx_pkts--; spin_unlock(&sdio_ch[ch_id].lock); if (sdio_ch[ch_id].write_done) sdio_ch[ch_id].write_done( sdio_ch[ch_id].priv, skb); else dev_kfree_skb_any(skb); } else if (rc == -EAGAIN || rc == -ENOMEM) { /* recoverable error - retry again later */ reschedule = 1; break; } else if (rc == -ENODEV) { /* * sdio_al suffered some kind of fatal error * prevent future writes and clean up pending ones */ fatal_error = 1; do { ch_id = ((struct sdio_mux_hdr *) skb->data)->ch_id; spin_lock(&sdio_ch[ch_id].lock); sdio_ch[ch_id].num_tx_pkts--; spin_unlock(&sdio_ch[ch_id].lock); dev_kfree_skb_any(skb); } while ((skb = __skb_dequeue(&sdio_mux_write_pool))); spin_unlock_irqrestore(&sdio_mux_write_lock, flags); return; } else { /* unknown error condition - drop the * skb and reschedule for the * other skb's */ pr_err("%s: sdio_mux_write error %d" " for ch %d, skb=%p\n", __func__, rc, ch_id, skb); notify = 1; break; } } if (reschedule) { if (sdio_ch_is_in_reset(ch_id)) { notify = 1; } else { __skb_queue_head(&sdio_mux_write_pool, skb); queue_delayed_work(sdio_mux_workqueue, &delayed_work_sdio_mux_write, msecs_to_jiffies(250) ); } } if (notify) { spin_lock(&sdio_ch[ch_id].lock); sdio_ch[ch_id].num_tx_pkts--; spin_unlock(&sdio_ch[ch_id].lock); if (sdio_ch[ch_id].write_done) sdio_ch[ch_id].write_done( sdio_ch[ch_id].priv, skb); else dev_kfree_skb_any(skb); } spin_unlock_irqrestore(&sdio_mux_write_lock, flags); }