static int port_net_recv_req(struct ccci_port *port, struct ccci_request* req) { struct sk_buff *skb = req->skb; struct net_device *dev = ((struct netdev_entity *)port->private_data)->ndev; unsigned int packet_type; int skb_len = req->skb->len; CCCI_DBG_MSG(port->modem->index, NET, "incomming on CH%d\n", port->rx_ch); list_del(&req->entry); // dequeue from queue's list skb_pull(skb, sizeof(struct ccci_header)); packet_type = skb->data[0] & 0xF0; ccmni_make_etherframe(skb->data-ETH_HLEN, dev->dev_addr, packet_type); skb_set_mac_header(skb, -ETH_HLEN); skb->dev = dev; if(packet_type == 0x60) { skb->protocol = htons(ETH_P_IPV6); } else { skb->protocol = htons(ETH_P_IP); } skb->ip_summed = CHECKSUM_NONE; #if defined(CCCI_USE_NAPI) || defined(DUM_NAPI) netif_receive_skb(skb); #else netif_rx(skb); #endif dev->stats.rx_packets++; dev->stats.rx_bytes += skb_len; req->policy = NOOP; ccci_free_req(req); wake_lock_timeout(&port->rx_wakelock, HZ); return 0; }
static int ccmni_v2_receive(ccmni_v2_instance_t *ccmni, const unsigned char *ccmni_ptr, int ccmni_len) { int packet_type, ret = 0; struct sk_buff *skb; ccmni_v2_ctl_block_t *ctl_b = (ccmni_v2_ctl_block_t*)ccmni->owner; int md_id = ctl_b->m_md_id; if ((ccmni == NULL) || (ccmni_ptr == NULL) || (ccmni_len <= 0)) { CCCI_MSG_INF(md_id, "net", "CCMNI%d_receive: invalid private data\n", ccmni->channel); ret = -1; } skb = dev_alloc_skb(ccmni_len); if (skb) { packet_type = ccmni_ptr[0] & 0xF0; memcpy(skb_put(skb, ccmni_len), ccmni_ptr, ccmni_len); ccmni_make_etherframe(skb->data - ETH_HLEN, ccmni->dev->dev_addr, packet_type); skb_set_mac_header(skb, -ETH_HLEN); skb->dev = ccmni->dev; if(packet_type == IPV6_VERSION) { skb->protocol = htons(ETH_P_IPV6); } else { skb->protocol = htons(ETH_P_IP); } //skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_NONE; ret = netif_rx(skb); CCCI_CCMNI_MSG(md_id, "CCMNI%d invoke netif_rx()=%d\n", ccmni->channel, ret); ccmni->dev->stats.rx_packets++; ccmni->dev->stats.rx_bytes += ccmni_len; CCCI_CCMNI_MSG(md_id, "CCMNI%d rx_pkts=%d, stats_rx_bytes=%d\n",ccmni->channel, \ ccmni->dev->stats.rx_packets,ccmni->dev->stats.rx_bytes); ret = 0; } else { CCCI_MSG_INF(md_id, "net", "CCMNI%d Socket buffer allocate fail\n", ccmni->channel); ret = -CCCI_ERR_MEM_CHECK_FAIL; } return ret; }
static int port_net_recv_req(struct ccci_port *port, struct ccci_request* req) { struct sk_buff *skb = req->skb; struct netdev_entity *nent = (struct netdev_entity *)port->private_data; struct net_device *dev = nent->ndev; unsigned int packet_type; int skb_len = req->skb->len; #ifndef FEATURE_SEQ_CHECK_EN struct ccci_header *ccci_h = (struct ccci_header*)req->skb->data; CCCI_DBG_MSG(port->modem->index, NET, "recv on %s, curr_seq=%d\n", port->name, ccci_h->reserved); if(unlikely(nent->rx_seq_num!=0 && (ccci_h->reserved-nent->rx_seq_num)!=1)) { CCCI_ERR_MSG(port->modem->index, NET, "possible packet lost on %s %d->%d\n", port->name, nent->rx_seq_num, ccci_h->reserved); } nent->rx_seq_num = ccci_h->reserved; #else CCCI_DBG_MSG(port->modem->index, NET, "recv on %s\n", port->name); #endif list_del(&req->entry); // dequeue from queue's list skb_pull(skb, sizeof(struct ccci_header)); packet_type = skb->data[0] & 0xF0; ccmni_make_etherframe(skb->data-ETH_HLEN, dev->dev_addr, packet_type); skb_set_mac_header(skb, -ETH_HLEN); skb->dev = dev; if(packet_type == 0x60) { skb->protocol = htons(ETH_P_IPV6); } else { skb->protocol = htons(ETH_P_IP); } skb->ip_summed = CHECKSUM_NONE; if(likely(port->modem->capability & MODEM_CAP_NAPI)) { netif_receive_skb(skb); } else { #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) if(!in_interrupt()) { netif_rx_ni(skb); } else { netif_rx(skb); } #else netif_rx(skb); #endif } dev->stats.rx_packets++; dev->stats.rx_bytes += skb_len; req->policy = NOOP; req->skb = NULL; ccci_free_req(req); wake_lock_timeout(&port->rx_wakelock, HZ); return 0; }
static int ccmni_receive(struct ccmni_instance_t *ccmni, int length) { int counter, ret; struct packet_info_t packet_info; struct complete_ippkt_t *packet; struct complete_ippkt_t *processed_packet; struct sk_buff *skb; struct complete_ippkt_t last_packet = { 0 }; int offset_put_pkt = 0; int offset_parse_frame = 0; int packet_type; struct ccmni_v1_ctl_block_t *ctl_b = (struct ccmni_v1_ctl_block_t *) ccmni->owner; int md_id = ctl_b->m_md_id; CCCI_CCMNI_MSG(md_id, "CCMNI%d_receive() invoke pfp_unframe()\n", ccmni->channel); do { packet_info = pfp_unframe(ccmni->decode_buffer + offset_put_pkt, CCCI1_CCMNI_BUF_SIZE - offset_put_pkt, ccmni->read_buffer + offset_parse_frame, length, ccmni->channel); packet = packet_info.pkt_list; CCCI_CCMNI_MSG(md_id, "CCMNI%d num_complete_pkt=%d after pfp_unframe\n", ccmni->channel, packet_info.num_complete_packets); for (counter = 0; counter < packet_info.num_complete_packets; counter++) { skb = dev_alloc_skb(packet->pkt_size); if (skb) { packet_type = packet->pkt_data[0] & 0xF0; memcpy(skb_put(skb, packet->pkt_size), packet->pkt_data, packet->pkt_size); ccmni_make_etherframe(skb->data - ETH_HLEN, ccmni->dev->dev_addr, packet_type); skb_set_mac_header(skb, -ETH_HLEN); skb->dev = ccmni->dev; if (packet_type == IPV6_VERSION) skb->protocol = htons(ETH_P_IPV6); else skb->protocol = htons(ETH_P_IP); skb->ip_summed = CHECKSUM_NONE; ret = netif_rx(skb); CCCI_CCMNI_MSG(md_id, "CCMNI%d invoke netif_rx()=%d\n", ccmni->channel, ret); ccmni->dev->stats.rx_packets++; ccmni->dev->stats.rx_bytes += packet->pkt_size; CCCI_CCMNI_MSG(md_id, "CCMNI%d rx_pkts=%ld, stats_rx_bytes=%ld\n", ccmni->channel, ccmni->dev->stats.rx_packets, ccmni->dev->stats.rx_bytes); } else { CCCI_DBG_MSG(md_id, "net", "CCMNI%d Socket buffer allocate fail\n", ccmni->channel); } processed_packet = packet; last_packet = *processed_packet; packet = packet->next; /* Only clear the entry_used flag as 0 */ release_one_used_complete_ippkt_entry(processed_packet); }; /* It must to check if it is necessary to invoke the pfp_unframe() * again due to no available complete_ippkt entry */ if (packet_info.try_decode_again == 1) { offset_put_pkt += (last_packet.pkt_data - ccmni->decode_buffer + last_packet.pkt_size); offset_parse_frame += packet_info.consumed_length; } } while (packet_info.try_decode_again == 1); offset_parse_frame += packet_info.consumed_length; return offset_parse_frame; }
static int ccmni_rx_callback(int md_id, int rx_ch, struct sk_buff *skb, void *priv_data) { ccmni_ctl_block_t *ctlb = ccmni_ctl_blk[md_id]; // struct ccci_header *ccci_h = (struct ccci_header*)skb->data; ccmni_instance_t *ccmni = NULL; struct net_device *dev = NULL; int pkt_type, skb_len, ccmni_idx; if (unlikely(ctlb == NULL || ctlb->ccci_ops == NULL)) { CCMNI_ERR_MSG(md_id, "invalid CCMNI ctrl/ops struct for RX_CH(%d)\n", rx_ch); dev_kfree_skb(skb); return -1; } ccmni_idx = get_ccmni_idx_from_ch(md_id, rx_ch); if (unlikely(ccmni_idx < 0)) { CCMNI_ERR_MSG(md_id, "CCMNI rx(%d) skb ch error\n", rx_ch); dev_kfree_skb(skb); return -1; } ccmni = ctlb->ccmni_inst[ccmni_idx]; dev = ccmni->dev; // skb_pull(skb, sizeof(struct ccci_header)); pkt_type = skb->data[0] & 0xF0; ccmni_make_etherframe(skb->data-ETH_HLEN, dev->dev_addr, pkt_type); skb_set_mac_header(skb, -ETH_HLEN); skb->dev = dev; if(pkt_type == 0x60) { skb->protocol = htons(ETH_P_IPV6); } else { skb->protocol = htons(ETH_P_IP); } skb->ip_summed = CHECKSUM_NONE; skb_len = skb->len; if (unlikely(ccmni_debug_level&CCMNI_DBG_LEVEL_RX)) { CCMNI_INF_MSG(md_id, "[RX]CCMNI%d(rx_ch=%d) recv data_len=%d\n", ccmni_idx, rx_ch, skb->len); } if (unlikely(ccmni_debug_level&CCMNI_DBG_LEVEL_RX_SKB)) { ccmni_dbg_skb_header(ccmni->md_id, false, skb); } if(likely(ctlb->ccci_ops->md_ability & MODEM_CAP_NAPI)) { netif_receive_skb(skb); } else { #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) if(!in_interrupt()) { netif_rx_ni(skb); } else { netif_rx(skb); } #else netif_rx(skb); #endif } dev->stats.rx_packets++; dev->stats.rx_bytes += skb_len; wake_lock_timeout(&ctlb->ccmni_wakelock, HZ); return 0; }