static int ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ccci_port *port = *((struct ccci_port **)netdev_priv(dev)); struct ccci_header *ccci_h; int ret; int skb_len = skb->len; static int tx_busy_retry_cnt; int tx_queue, tx_channel; #ifdef PORT_NET_TRACE unsigned long long send_time = 0; unsigned long long total_time = 0; total_time = sched_clock(); #endif #ifndef FEATURE_SEQ_CHECK_EN struct netdev_entity *nent = (struct netdev_entity *)port->private_data; CCCI_DBG_MSG(port->modem->index, NET, "write on %s, len=%d/%d, curr_seq=%d\n", port->name, skb_headroom(skb), skb->len, nent->tx_seq_num); #else CCCI_DBG_MSG(port->modem->index, NET, "write on %s, len=%d/%d\n", port->name, skb_headroom(skb), skb->len); #endif if (unlikely(skb->len > CCCI_NET_MTU)) { CCCI_ERR_MSG(port->modem->index, NET, "exceeds MTU(%d) with %d/%d\n", CCCI_NET_MTU, dev->mtu, skb->len); dev_kfree_skb(skb); dev->stats.tx_dropped++; return NETDEV_TX_OK; } if (unlikely(skb_headroom(skb) < sizeof(struct ccci_header))) { CCCI_ERR_MSG(port->modem->index, NET, "not enough header room on %s, len=%d header=%d hard_header=%d\n", port->name, skb->len, skb_headroom(skb), dev->hard_header_len); dev_kfree_skb(skb); dev->stats.tx_dropped++; return NETDEV_TX_OK; } if (unlikely(port->modem->md_state != READY)) { dev_kfree_skb(skb); dev->stats.tx_dropped++; return NETDEV_TX_OK; } if (likely((port->rx_ch == CCCI_CCMNI1_RX) || (port->rx_ch == CCCI_CCMNI2_RX))) { /* only use on ccmni0 && ccmni1 */ if (unlikely(skb_is_ack(skb))) { tx_channel = port->tx_ch == CCCI_CCMNI1_TX ? CCCI_CCMNI1_DL_ACK : CCCI_CCMNI2_DL_ACK; tx_queue = NET_ACK_TXQ_INDEX(port); } else { tx_channel = port->tx_ch; tx_queue = NET_DAT_TXQ_INDEX(port); } } else { tx_channel = port->tx_ch; tx_queue = NET_DAT_TXQ_INDEX(port); } ccci_h = (struct ccci_header *)skb_push(skb, sizeof(struct ccci_header)); ccci_h->channel = tx_channel; ccci_h->data[0] = 0; ccci_h->data[1] = skb->len; /* as skb->len already included ccci_header after skb_push */ #ifndef FEATURE_SEQ_CHECK_EN ccci_h->reserved = nent->tx_seq_num++; #else ccci_h->reserved = 0; #endif #ifdef PORT_NET_TRACE send_time = sched_clock(); #endif ret = port->modem->ops->send_request(port->modem, tx_queue, NULL, skb); #ifdef PORT_NET_TRACE send_time = sched_clock() - send_time; #endif if (ret) { skb_pull(skb, sizeof(struct ccci_header)); /* undo header, in next retry, we'll reserve header again */ goto tx_busy; } dev->stats.tx_packets++; dev->stats.tx_bytes += skb_len; tx_busy_retry_cnt = 0; #ifdef PORT_NET_TRACE total_time = sched_clock() - total_time; trace_port_net_tx(port->modem->index, tx_queue, port->tx_ch, 0, (unsigned int)send_time, (unsigned int)(total_time)); #endif return NETDEV_TX_OK; tx_busy: if (unlikely(!(port->modem->capability & MODEM_CAP_TXBUSY_STOP))) { if ((tx_busy_retry_cnt) % 20000 == 0) CCCI_INF_MSG(port->modem->index, NET, "%s TX busy: retry_times=%d\n", port->name, tx_busy_retry_cnt); tx_busy_retry_cnt++; } else { port->tx_busy_count++; } #ifdef PORT_NET_TRACE trace_port_net_error(port->modem->index, tx_queue, port->tx_ch, port->tx_busy_count, __LINE__); #endif return NETDEV_TX_BUSY; }
int ccmni_send_pkt(int md_id, int tx_ch, void *data) { struct ccci_modem *md = ccci_get_modem_by_id(md_id); struct ccci_port *port = NULL; /* struct ccci_request *req = NULL; */ struct ccci_header *ccci_h; struct sk_buff *skb = (struct sk_buff *)data; int tx_ch_to_port, tx_queue; int ret; #ifdef PORT_NET_TRACE unsigned long long send_time = 0; unsigned long long get_port_time = 0; unsigned long long total_time = 0; total_time = sched_clock(); #endif if (!md) return CCMNI_ERR_TX_INVAL; if (unlikely(md->md_state != READY)) return CCMNI_ERR_MD_NO_READY; if (tx_ch == CCCI_CCMNI1_DL_ACK) tx_ch_to_port = CCCI_CCMNI1_TX; else if (tx_ch == CCCI_CCMNI2_DL_ACK) tx_ch_to_port = CCCI_CCMNI2_TX; else if (tx_ch == CCCI_CCMNI3_DL_ACK) tx_ch_to_port = CCCI_CCMNI3_TX; else tx_ch_to_port = tx_ch; #ifdef PORT_NET_TRACE get_port_time = sched_clock(); #endif port = md->ops->get_port_by_channel(md, tx_ch_to_port); #ifdef PORT_NET_TRACE get_port_time = sched_clock() - get_port_time; #endif if (!port) { CCCI_ERR_MSG(0, NET, "port==NULL\n"); return CCMNI_ERR_TX_INVAL; } /* req_alloc_time=sched_clock(); */ /* req = ccci_alloc_req(OUT, -1, 1, 0); */ /* req_alloc_time=sched_clock()-req_alloc_time; */ /* if(!req) { */ /* return CCMNI_ERR_TX_BUSY; */ /* } */ if (tx_ch == CCCI_CCMNI1_DL_ACK || tx_ch == CCCI_CCMNI2_DL_ACK || tx_ch == CCCI_CCMNI3_DL_ACK) tx_queue = NET_ACK_TXQ_INDEX(port); else tx_queue = NET_DAT_TXQ_INDEX(port); /* req->skb = skb; */ /* req->policy = FREE; */ ccci_h = (struct ccci_header *)skb_push(skb, sizeof(struct ccci_header)); ccci_h = (struct ccci_header *)skb->data; ccci_h->channel = tx_ch; ccci_h->data[0] = 0; ccci_h->data[1] = skb->len; /* as skb->len already included ccci_header after skb_push */ /* #ifndef FEATURE_SEQ_CHECK_EN */ /* ccci_h->reserved = nent->tx_seq_num++; */ /* #else */ ccci_h->reserved = 0; /* #endif */ CCCI_DBG_MSG(md_id, NET, "port %s send txq=%d: %08X, %08X, %08X, %08X\n", port->name, tx_queue, ccci_h->data[0], ccci_h->data[1], ccci_h->channel, ccci_h->reserved); #ifdef PORT_NET_TRACE send_time = sched_clock(); #endif ret = port->modem->ops->send_request(port->modem, tx_queue, NULL, skb); #ifdef PORT_NET_TRACE send_time = sched_clock() - send_time; #endif if (ret) { skb_pull(skb, sizeof(struct ccci_header)); /* undo header, in next retry, we'll reserve header again */ ret = CCMNI_ERR_TX_BUSY; } else { ret = CCMNI_ERR_TX_OK; } #ifdef PORT_NET_TRACE if (ret == CCMNI_ERR_TX_OK) { total_time = sched_clock() - total_time; trace_port_net_tx(md_id, tx_queue, tx_ch, (unsigned int)get_port_time, (unsigned int)send_time, (unsigned int)(total_time)); } else { trace_port_net_error(port->modem->index, tx_queue, port->tx_ch, port->tx_busy_count, __LINE__); } #endif return ret; }
static int ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ccci_port *port = *((struct ccci_port **)netdev_priv(dev)); struct ccci_request *req = NULL; struct ccci_header *ccci_h; int ret; int skb_len = skb->len; static int tx_busy_retry_cnt = 0; int tx_queue, tx_channel; #ifndef FEATURE_SEQ_CHECK_EN struct netdev_entity *nent = (struct netdev_entity *)port->private_data; CCCI_DBG_MSG(port->modem->index, NET, "write on %s, len=%d/%d, curr_seq=%d\n", port->name, skb_headroom(skb), skb->len, nent->tx_seq_num); #else CCCI_DBG_MSG(port->modem->index, NET, "write on %s, len=%d/%d\n", port->name, skb_headroom(skb), skb->len); #endif if(unlikely(skb->len > CCMNI_MTU)) { CCCI_ERR_MSG(port->modem->index, NET, "exceeds MTU(%d) with %d/%d\n", CCMNI_MTU, dev->mtu, skb->len); dev_kfree_skb(skb); dev->stats.tx_dropped++; return NETDEV_TX_OK; } if(unlikely(skb_headroom(skb) < sizeof(struct ccci_header))) { CCCI_ERR_MSG(port->modem->index, NET, "not enough header room on %s, len=%d header=%d hard_header=%d\n", port->name, skb->len, skb_headroom(skb), dev->hard_header_len); dev_kfree_skb(skb); dev->stats.tx_dropped++; return NETDEV_TX_OK; } if(unlikely(port->modem->md_state != READY)) { dev_kfree_skb(skb); dev->stats.tx_dropped++; return NETDEV_TX_OK; } req = ccci_alloc_req(OUT, -1, 1, 0); if(req) { if(likely(port->rx_ch != CCCI_CCMNI3_RX)) { if(unlikely(skb_is_ack(skb))) { tx_channel = port->tx_ch==CCCI_CCMNI1_TX?CCCI_CCMNI1_DL_ACK:CCCI_CCMNI2_DL_ACK; tx_queue = NET_ACK_TXQ_INDEX(port); } else { tx_channel = port->tx_ch; tx_queue = NET_DAT_TXQ_INDEX(port); } } else { tx_channel = port->tx_ch; tx_queue = NET_DAT_TXQ_INDEX(port); } req->skb = skb; req->policy = FREE; ccci_h = (struct ccci_header*)skb_push(skb, sizeof(struct ccci_header)); ccci_h->channel = tx_channel; ccci_h->data[0] = 0; ccci_h->data[1] = skb->len; // as skb->len already included ccci_header after skb_push #ifndef FEATURE_SEQ_CHECK_EN ccci_h->reserved = nent->tx_seq_num++; #else ccci_h->reserved = 0; #endif ret = port->modem->ops->send_request(port->modem, tx_queue, req); if(ret) { skb_pull(skb, sizeof(struct ccci_header)); // undo header, in next retry, we'll reserve header again req->policy = NOOP; // if you return busy, do NOT free skb as network may still use it ccci_free_req(req); goto tx_busy; } dev->stats.tx_packets++; dev->stats.tx_bytes += skb_len; tx_busy_retry_cnt = 0; } else { CCCI_ERR_MSG(port->modem->index, NET, "fail to alloc request\n"); goto tx_busy; } return NETDEV_TX_OK; tx_busy: if(unlikely(!(port->modem->capability & MODEM_CAP_TXBUSY_STOP))) { if((++tx_busy_retry_cnt)%20000 == 0) CCCI_INF_MSG(port->modem->index, NET, "%s TX busy: retry_times=%d\n", port->name, tx_busy_retry_cnt); } else { port->tx_busy_count++; } return NETDEV_TX_BUSY; }