/** @brief function for the @b init_comm method in a link_device instance @param ld the pointer to a link_device instance @param iod the pointer to an io_device instance */ static int mem_init_comm(struct link_device *ld, struct io_device *iod) { struct mem_link_device *mld = to_mem_link_device(ld); struct modem_ctl *mc = ld->mc; struct io_device *check_iod; int id = iod->id; int fmt2rfs = (SIPC5_CH_ID_RFS_0 - SIPC5_CH_ID_FMT_0); int rfs2fmt = (SIPC5_CH_ID_FMT_0 - SIPC5_CH_ID_RFS_0); if (atomic_read(&mld->cp_boot_done)) return 0; #ifdef CONFIG_LINK_CONTROL_MSG_IOSM if (mld->iosm) { struct sbd_link_device *sl = &mld->sbd_link_dev; struct sbd_ipc_device *sid = sbd_ch2dev(sl, iod->id); if (atomic_read(&sid->config_done)) { tx_iosm_message(mld, IOSM_A2C_OPEN_CH, (u32 *)&id); return 0; } else { mif_err("%s isn't configured channel\n", iod->name); return -ENODEV; } } #endif switch (id) { case SIPC5_CH_ID_FMT_0 ... SIPC5_CH_ID_FMT_9: check_iod = link_get_iod_with_channel(ld, (id + fmt2rfs)); if (check_iod ? atomic_read(&check_iod->opened) : true) { mif_err("%s: %s->INIT_END->%s\n", ld->name, iod->name, mc->name); send_ipc_irq(mld, cmd2int(CMD_INIT_END)); atomic_set(&mld->cp_boot_done, 1); } else { mif_err("%s is not opened yet\n", check_iod->name); } break; case SIPC5_CH_ID_RFS_0 ... SIPC5_CH_ID_RFS_9: check_iod = link_get_iod_with_channel(ld, (id + rfs2fmt)); if (check_iod) { if (atomic_read(&check_iod->opened)) { mif_err("%s: %s->INIT_END->%s\n", ld->name, iod->name, mc->name); send_ipc_irq(mld, cmd2int(CMD_INIT_END)); atomic_set(&mld->cp_boot_done, 1); } else { mif_err("%s not opened yet\n", check_iod->name); } } break; default: break; } return 0; }
static bool rild_ready(struct link_device *ld) { struct io_device *fmt_iod; struct io_device *rfs_iod; int fmt_opened; int rfs_opened; fmt_iod = link_get_iod_with_channel(ld, SIPC5_CH_ID_FMT_0); if (!fmt_iod) { mif_err("%s: No FMT io_device\n", ld->name); return false; } rfs_iod = link_get_iod_with_channel(ld, SIPC5_CH_ID_RFS_0); if (!rfs_iod) { mif_err("%s: No RFS io_device\n", ld->name); return false; } fmt_opened = atomic_read(&fmt_iod->opened); rfs_opened = atomic_read(&rfs_iod->opened); mif_info("%s: %s.opened=%d, %s.opened=%d\n", ld->name, fmt_iod->name, fmt_opened, rfs_iod->name, rfs_opened); if (fmt_opened > 0 && rfs_opened > 0) return true; return false; }
void mif_netif_stop(struct link_device *ld) { struct io_device *iod; if (ld->ipc_version < SIPC_VER_50) iod = link_get_iod_with_channel(ld, 0x20 | RMNET0_CH_ID); else iod = link_get_iod_with_channel(ld, RMNET0_CH_ID); if (iod) iodevs_for_each(iod->msd, iodev_netif_stop, 0); }
/** @brief pass a socket buffer to the DEMUX layer Invokes the recv_skb_single method in the io_device instance to perform receiving IPC messages from each skb. @param mld the pointer to a mem_link_device instance @param skb the pointer to an sk_buff instance @retval "> 0" if succeeded to pass an @b @@skb to the DEMUX layer @retval "< 0" an error code */ static void pass_skb_to_demux(struct mem_link_device *mld, struct sk_buff *skb) { struct link_device *ld = &mld->link_dev; struct io_device *iod; int ch; int ret; ch = sipc5_get_ch_id(skb->data); iod = link_get_iod_with_channel(ld, ch); if (unlikely(!iod)) { mif_err("%s: ERR! No IO device for Ch.%d\n", ld->name, ch); dev_kfree_skb_any(skb); mem_forced_cp_crash(mld); return; } /* Record the RX IO device into the "iod" field in &skb->cb */ skbpriv(skb)->iod = iod; /* Record the RX link device into the "ld" field in &skb->cb */ skbpriv(skb)->ld = ld; #ifdef DEBUG_MODEM_IF_LINK_RX log_ipc_pkt(sipc5_get_ch_id(skb->data), LINK, RX, skb, true, true); #endif ret = iod->recv_skb_single(iod, ld, skb); if (unlikely(ret < 0)) { mif_err("%s: ERR! %s->recv_skb_single fail (%d)\n", ld->name, iod->name, ret); dev_kfree_skb_any(skb); } }
/** @brief function for the @b init_comm method in a link_device instance @param ld the pointer to a link_device instance @param iod the pointer to an io_device instance */ static int mem_init_comm(struct link_device *ld, struct io_device *iod) { struct mem_link_device *mld = to_mem_link_device(ld); struct modem_ctl *mc = ld->mc; struct io_device *check_iod; int id = iod->id; int fmt2rfs = (SIPC5_CH_ID_RFS_0 - SIPC5_CH_ID_FMT_0); int rfs2fmt = (SIPC5_CH_ID_FMT_0 - SIPC5_CH_ID_RFS_0); if (atomic_read(&mld->cp_boot_done)) return 0; switch (id) { case SIPC5_CH_ID_FMT_0 ... SIPC5_CH_ID_FMT_9: check_iod = link_get_iod_with_channel(ld, (id + fmt2rfs)); if (check_iod ? atomic_read(&check_iod->opened) : true) { mif_err("%s: %s->%s: Send 0xC2 (INIT_END)\n", ld->name, iod->name, mc->name); send_ipc_irq(mld, cmd2int(CMD_INIT_END)); atomic_set(&mld->cp_boot_done, 1); } else { mif_err("%s is not opened yet\n", check_iod->name); } break; case SIPC5_CH_ID_RFS_0 ... SIPC5_CH_ID_RFS_9: check_iod = link_get_iod_with_channel(ld, (id + rfs2fmt)); if (check_iod) { if (atomic_read(&check_iod->opened)) { mif_err("%s: %s->%s: Send 0xC2 (INIT_END)\n", ld->name, iod->name, mc->name); send_ipc_irq(mld, cmd2int(CMD_INIT_END)); atomic_set(&mld->cp_boot_done, 1); } else { mif_err("%s not opened yet\n", check_iod->name); } } break; default: break; } return 0; }
static int rx_frames_from_dev(struct mem_link_device *mld, struct mem_ipc_device *dev) { struct link_device *ld = &mld->link_dev; struct sk_buff_head *skb_rxq = dev->skb_rxq; unsigned int qsize = get_rxq_buff_size(dev); unsigned int in = get_rxq_head(dev); unsigned int out = get_rxq_tail(dev); unsigned int size = circ_get_usage(qsize, in, out); int rcvd = 0; if (unlikely(circ_empty(in, out))) return 0; while (rcvd < size) { struct sk_buff *skb; u8 ch; struct io_device *iod; skb = rxq_read(mld, dev, in); if (!skb) break; ch = sipc5_get_ch(skb->data); iod = link_get_iod_with_channel(ld, ch); if (!iod) { mif_err("%s: ERR! No IOD for CH.%d\n", ld->name, ch); dev_kfree_skb_any(skb); mem_forced_cp_crash(mld); break; } /* Record the IO device and the link device into the &skb->cb */ skbpriv(skb)->iod = iod; skbpriv(skb)->ld = ld; skbpriv(skb)->lnk_hdr = iod->link_header; skbpriv(skb)->sipc_ch = ch; /* The $rcvd must be accumulated here, because $skb can be freed in pass_skb_to_demux(). */ rcvd += skb->len; if (likely(sipc_ps_ch(ch))) skb_queue_tail(skb_rxq, skb); else pass_skb_to_demux(mld, skb); } if (rcvd < size) { struct link_device *ld = &mld->link_dev; mif_err("%s: WARN! rcvd %d < size %d\n", ld->name, rcvd, size); } return rcvd; }
void mif_netif_wake(struct link_device *ld) { struct io_device *iod; /** * If ld->suspend_netif_tx is true, this means that there was a SUSPEND * flow control command from CP so MIF must wait for a RESUME command * from CP. */ if (ld->suspend_netif_tx) { mif_info("%s: waiting for FLOW_CTRL_RESUME\n", ld->name); return; } if (ld->ipc_version < SIPC_VER_50) iod = link_get_iod_with_channel(ld, 0x20 | RMNET0_CH_ID); else iod = link_get_iod_with_channel(ld, RMNET0_CH_ID); if (iod) iodevs_for_each(iod->msd, iodev_netif_wake, 0); }
void resume_net_ifaces(struct link_device *ld) { struct io_device *iod; unsigned long flags; spin_lock_irqsave(&ld->netif_lock, flags); if (atomic_read(&ld->netif_stopped) == 0) goto exit; iod = link_get_iod_with_channel(ld, SIPC_CH_ID_PDP_0); if (iod) iodevs_for_each(iod->msd, iodev_netif_wake, 0); atomic_set(&ld->netif_stopped, 0); exit: spin_unlock_irqrestore(&ld->netif_lock, flags); }
void resume_net_iface(struct link_device *ld, unsigned int channel) { struct io_device *iod; unsigned long flags; spin_lock_irqsave(&ld->netif_lock, flags); if (!test_bit(channel, &ld->netif_stop_mask)) { mif_err("channel %d was already resumed!\n", channel); goto exit; } iod = link_get_iod_with_channel(ld, channel); iodev_netif_wake(iod, 0); clear_bit(channel, &ld->netif_stop_mask); exit: spin_unlock_irqrestore(&ld->netif_lock, flags); }
static void dpram_trigger_crash(struct dpram_link_device *dpld) { struct link_device *ld = &dpld->ld; struct io_device *iod; int i; for (i = 0; i < dpld->max_ipc_dev; i++) { mif_info("%s: purging %s_skb_txq\b", ld->name, get_dev_name(i)); skb_queue_purge(ld->skb_txq[i]); } iod = link_get_iod_with_format(ld, IPC_FMT); iod->modem_state_changed(iod, STATE_CRASH_EXIT); iod = link_get_iod_with_format(ld, IPC_BOOT); iod->modem_state_changed(iod, STATE_CRASH_EXIT); iod = link_get_iod_with_channel(ld, PS_DATA_CH_0); if (iod) iodevs_for_each(iod->msd, iodev_netif_stop, 0); }
/** @brief set up an SBD RB (1) build an SBD RB instance in the kernel space\n (2) allocate an SBD array in SHMEM\n (3) allocate a data buffer array in SHMEM if possible\n */ static int setup_sbd_rb(struct sbd_link_device *sl, struct sbd_ring_buffer *rb, enum direction dir, struct sbd_link_attr *link_attr) { unsigned int alloc_size; int i; rb->sl = sl; rb->lnk_hdr = link_attr->lnk_hdr; rb->more = false; rb->total = 0; rb->rcvd = 0; /* Initialize an SBD RB instance in the kernel space. */ rb->id = link_attr->id; rb->ch = link_attr->ch; rb->dir = dir; rb->len = link_attr->rb_len[dir]; rb->buff_size = link_attr->buff_size[dir]; rb->payload_offset = 0; /* Prepare array of pointers to the data buffer for each SBD */ alloc_size = (rb->len * sizeof(u8 *)); rb->buff = kmalloc(alloc_size, GFP_ATOMIC); if (!rb->buff) return -ENOMEM; /* (1) Allocate an array of data buffers in SHMEM. (2) Register the address of each data buffer. */ alloc_size = (rb->len * rb->buff_size); rb->buff_rgn = (u8 *)buff_alloc(sl, alloc_size); if (!rb->buff_rgn) return -ENOMEM; for (i = 0; i < rb->len; i++) rb->buff[i] = rb->buff_rgn + (i * rb->buff_size); #if 0 mif_err("RB[%d:%d][%s] buff_rgn {addr:0x%08X offset:%d size:%d}\n", rb->id, rb->ch, udl_str(dir), (int)rb->buff_rgn, calc_offset(rb->buff_rgn, sl->shmem), alloc_size); #endif /* Prepare SBD array in SHMEM. */ rb->rp = &sl->rp[rb->dir][rb->id]; rb->wp = &sl->wp[rb->dir][rb->id]; alloc_size = (rb->len * sizeof(u32)); rb->addr_v = (u32 *)desc_alloc(sl, alloc_size); if (!rb->addr_v) return -ENOMEM; rb->size_v = (u32 *)desc_alloc(sl, alloc_size); if (!rb->size_v) return -ENOMEM; /* Register each data buffer to the corresponding SBD. */ for (i = 0; i < rb->len; i++) { rb->addr_v[i] = calc_offset(rb->buff[i], sl->shmem); rb->size_v[i] = 0; } rb->iod = link_get_iod_with_channel(sl->ld, rb->ch); rb->ld = sl->ld; return 0; }