static ssize_t dbgfs_frame(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { char *buf; ssize_t size; u32 i, dir, len = 0; struct mem_link_device *mld; struct sbd_link_device *sl; mld = file->private_data; sl = &mld->sbd_link_dev; if (!mld || !sl) return 0; buf = kzalloc(DEBUGFS_BUF_SIZE, GFP_KERNEL); if (!buf) { mif_err("not enough memory...\n"); return 0; } for (i = 0; i < sl->num_channels; i++) for (dir = UL; dir <= DL; dir++) { struct sbd_ring_buffer *rb = sbd_id2rb(sl, i, dir); if (!rb || !sipc_major_ch(rb->ch)) break; len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), ">> ch:%d len:%d size:%d [%s w:%d r:%d]\n", rb->ch, rb->len, rb->buff_size, udl_str(rb->dir), *rb->wp, *rb->rp); len += dump_rb_frame((buf + len), (DEBUGFS_BUF_SIZE - len), rb); len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), "\n"); } len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), "\n"); mif_info("Total output length = %d\n", len); size = simple_read_from_buffer(user_buf, count, ppos, buf, len); kfree(buf); return size; }
static void recv_sbd_ipc_frames(struct mem_link_device *mld, struct mem_snapshot *mst) { struct sbd_link_device *sl = &mld->sbd_link_dev; int i; for (i = 0; i < sl->num_channels; i++) { struct sbd_ring_buffer *rb = sbd_id2rb(sl, i, RX); if (unlikely(rb_empty(rb))) continue; if (likely(sipc_ps_ch(rb->ch))) rx_net_frames_from_rb(rb); else rx_ipc_frames_from_rb(rb); } }
static inline void purge_txq(struct mem_link_device *mld) { struct link_device *ld = &mld->link_dev; int i; /* Purge the skb_q in every TX RB */ if (ld->sbd_ipc) { struct sbd_link_device *sl = &mld->sbd_link_dev; for (i = 0; i < sl->num_channels; i++) { struct sbd_ring_buffer *rb = sbd_id2rb(sl, i, TX); skb_queue_purge(&rb->skb_q); } } /* Purge the skb_txq in every IPC device (IPC_FMT, IPC_RAW, etc.) */ for (i = 0; i < MAX_SIPC5_DEVICES; i++) { struct mem_ipc_device *dev = mld->dev[i]; skb_queue_purge(dev->skb_txq); } }
/** @brief receive all @b IPC message frames in all RXQs In a for loop,\n 1) Checks any REQ_ACK received.\n 2) Receives all IPC link frames in every RXQ.\n 3) Sends RES_ACK if there was REQ_ACK from CP.\n 4) Checks any RES_ACK received.\n @param mld the pointer to a mem_link_device instance @param mst the pointer to a mem_snapshot instance */ void recv_sbd_ipc_frames(struct mem_link_device *mld) { struct sbd_link_device *sl = &mld->sbd_link_dev; int i; for (i = 0; i < sl->num_channels; i++) { struct sbd_ring_buffer *rb = sbd_id2rb(sl, i, RX); if (unlikely(rb_empty(rb))) continue; if (likely(sipc_ps_ch(rb->ch))) { #ifdef CONFIG_LINK_DEVICE_NAPI //mld->link_dev.disable_irq(&mld->link_dev); if (napi_schedule_prep(&rb->iod->napi)) __napi_schedule(&rb->iod->napi); #else rx_net_frames_from_rb(rb, 0); #endif } else { rx_ipc_frames_from_rb(rb); } } }
static enum hrtimer_restart sbd_tx_timer_func(struct hrtimer *timer) { struct mem_link_device *mld; struct link_device *ld; struct modem_ctl *mc; struct sbd_link_device *sl; int i; bool need_schedule; u16 mask; unsigned long flags = 0; mld = container_of(timer, struct mem_link_device, sbd_tx_timer); ld = &mld->link_dev; mc = ld->mc; sl = &mld->sbd_link_dev; need_schedule = false; mask = 0; spin_lock_irqsave(&mc->lock, flags); if (unlikely(!ipc_active(mld))) { spin_unlock_irqrestore(&mc->lock, flags); goto exit; } spin_unlock_irqrestore(&mc->lock, flags); if (mld->link_active) { if (!mld->link_active(mld)) { need_schedule = true; goto exit; } } for (i = 0; i < sl->num_channels; i++) { struct sbd_ring_buffer *rb = sbd_id2rb(sl, i, TX); int ret; ret = tx_frames_to_rb(rb); if (unlikely(ret < 0)) { if (ret == -EBUSY || ret == -ENOSPC) { need_schedule = true; mask = MASK_SEND_DATA; continue; } else { modemctl_notify_event(MDM_CRASH_INVALID_RB); need_schedule = false; goto exit; } } if (ret > 0) mask = MASK_SEND_DATA; if (!skb_queue_empty(&rb->skb_q)) need_schedule = true; } if (!need_schedule) { for (i = 0; i < sl->num_channels; i++) { struct sbd_ring_buffer *rb; rb = sbd_id2rb(sl, i, TX); if (!rb_empty(rb)) { need_schedule = true; break; } } } if (mask) { spin_lock_irqsave(&mc->lock, flags); if (unlikely(!ipc_active(mld))) { spin_unlock_irqrestore(&mc->lock, flags); need_schedule = false; goto exit; } send_ipc_irq(mld, mask2int(mask)); spin_unlock_irqrestore(&mc->lock, flags); } exit: if (need_schedule) { ktime_t ktime = ktime_set(0, ms2ns(TX_PERIOD_MS)); hrtimer_start(timer, ktime, HRTIMER_MODE_REL); } return HRTIMER_NORESTART; }