static int ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct ccci_port *port = *((struct ccci_port **)netdev_priv(dev));
	struct ccci_request *req = NULL;
	struct ccci_header *ccci_h;
	int ret;
	static unsigned int ut_seq_num = 0;
	int skb_len = skb->len;

	CCCI_DBG_MSG(port->modem->index, NET, "tx skb %p on CH%d, len=%d/%d\n", skb, port->tx_ch, skb_headroom(skb), skb->len);
	if (skb->len > CCMNI_MTU) {
		CCCI_ERR_MSG(port->modem->index, NET, "exceeds MTU(%d) with %d/%d\n", CCMNI_MTU, dev->mtu, skb->len);
		dev_kfree_skb(skb);
		dev->stats.tx_dropped++;
		return NETDEV_TX_OK;
    }
    if (skb_headroom(skb) < sizeof(struct ccci_header)) {
		CCCI_ERR_MSG(port->modem->index, NET, "not enough header room on CH%d, len=%d header=%d hard_header=%d\n",
			port->tx_ch, skb->len, skb_headroom(skb), dev->hard_header_len);
		dev_kfree_skb(skb);
		dev->stats.tx_dropped++;
        return NETDEV_TX_OK;
    }

	req = ccci_alloc_req(OUT, -1, 1, 0);
	if(req) {
		req->skb = skb;
		req->policy = FREE;
		ccci_h = (struct ccci_header*)skb_push(skb, sizeof(struct ccci_header));
		ccci_h->channel = port->tx_ch;
		ccci_h->data[0] = 0;
		ccci_h->data[1] = skb->len; // as skb->len already included ccci_header after skb_push
		ccci_h->reserved = ut_seq_num++;
		ret = ccci_port_send_request(port, req);
		if(ret) {
			skb_pull(skb, sizeof(struct ccci_header)); // undo header, in next retry, we'll reserver header again
			req->policy = NOOP; // if you return busy, do NOT free skb as network may still use it
			ccci_free_req(req);
			return NETDEV_TX_BUSY;
		}
		dev->stats.tx_packets++;
		dev->stats.tx_bytes += skb_len;
	} else {
		CCCI_ERR_MSG(port->modem->index, NET, "fail to alloc request\n");
		return NETDEV_TX_BUSY;
	}
	return NETDEV_TX_OK;
}
static void md_ccif_sram_rx_work(struct work_struct *work)
{
    struct md_ccif_ctrl *md_ctrl = container_of(work, struct md_ccif_ctrl, ccif_sram_work);
    struct ccci_modem *md = md_ctrl->rxq[0].modem;
    struct ccci_header* dl_pkg = &md_ctrl->ccif_sram_layout->dl_header;
    struct ccci_header *ccci_h;
    struct ccci_request *new_req = NULL;
    struct ccci_request *req;
    int pkg_size,ret=0,retry_cnt =0;
    //md_ccif_dump("md_ccif_sram_rx_work",md);
    pkg_size = sizeof(struct ccci_header);
    new_req = ccci_alloc_req(IN, pkg_size, 1, 0);
    INIT_LIST_HEAD(&new_req->entry); // as port will run list_del        
    if(new_req->skb==NULL)
    {
        CCCI_ERR_MSG(md->index, TAG, "md_ccif_sram_rx_work:ccci_alloc_req pkg_size=%d failed\n", pkg_size);            
        return;
    }
    skb_put(new_req->skb, pkg_size);
    ccci_h = (struct ccci_header *)new_req->skb->data;
    ccci_h->data[0] = ccif_read32(&dl_pkg->data[0],0);
    ccci_h->data[1] = ccif_read32(&dl_pkg->data[1],0);
	//ccci_h->channel = ccif_read32(&dl_pkg->channel,0);
    *(((u32 *)ccci_h)+2) = ccif_read32((((u32 *)dl_pkg)+2),0);
    ccci_h->reserved= ccif_read32(&dl_pkg->reserved,0);
    if(atomic_cmpxchg(&md->wakeup_src, 1, 0) == 1)
        CCCI_INF_MSG(md->index, TAG, "CCIF_MD wakeup source:(SRX_IDX/%d)\n", *(((u32 *)ccci_h)+2));
    
RETRY:    
    ret = ccci_port_recv_request(md, new_req);
    CCCI_INF_MSG(md->index, TAG, "Rx msg %x %x %x %x ret=%d\n", ccci_h->data[0], ccci_h->data[1], *(((u32 *)ccci_h)+2), ccci_h->reserved,ret);
    if(ret>=0 || ret==-CCCI_ERR_DROP_PACKET) {
        CCCI_INF_MSG(md->index, TAG, "md_ccif_sram_rx_work:ccci_port_recv_request ret=%d\n", ret);
        // step forward
        req = list_entry(req->entry.next, struct ccci_request, entry);
    } else{
示例#3
0
static int ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
    struct ccci_port *port = *((struct ccci_port **)netdev_priv(dev));
    struct ccci_request *req = NULL;
    struct ccci_header *ccci_h;
    int ret;
    int skb_len = skb->len;
    static int tx_busy_retry_cnt = 0;
    int tx_queue, tx_channel;

#ifndef FEATURE_SEQ_CHECK_EN
    struct netdev_entity *nent = (struct netdev_entity *)port->private_data;
    CCCI_DBG_MSG(port->modem->index, NET, "write on %s, len=%d/%d, curr_seq=%d\n",
                 port->name, skb_headroom(skb), skb->len, nent->tx_seq_num);
#else
    CCCI_DBG_MSG(port->modem->index, NET, "write on %s, len=%d/%d\n", port->name, skb_headroom(skb), skb->len);
#endif

    if(unlikely(skb->len > CCMNI_MTU)) {
        CCCI_ERR_MSG(port->modem->index, NET, "exceeds MTU(%d) with %d/%d\n", CCMNI_MTU, dev->mtu, skb->len);
        dev_kfree_skb(skb);
        dev->stats.tx_dropped++;
        return NETDEV_TX_OK;
    }
    if(unlikely(skb_headroom(skb) < sizeof(struct ccci_header))) {
        CCCI_ERR_MSG(port->modem->index, NET, "not enough header room on %s, len=%d header=%d hard_header=%d\n",
                     port->name, skb->len, skb_headroom(skb), dev->hard_header_len);
        dev_kfree_skb(skb);
        dev->stats.tx_dropped++;
        return NETDEV_TX_OK;
    }
    if(unlikely(port->modem->md_state != READY)) {
        dev_kfree_skb(skb);
        dev->stats.tx_dropped++;
        return NETDEV_TX_OK;
    }

    req = ccci_alloc_req(OUT, -1, 1, 0);
    if(req) {
        if(likely(port->rx_ch != CCCI_CCMNI3_RX)) {
            if(unlikely(skb_is_ack(skb))) {
                tx_channel = port->tx_ch==CCCI_CCMNI1_TX?CCCI_CCMNI1_DL_ACK:CCCI_CCMNI2_DL_ACK;
                tx_queue = NET_ACK_TXQ_INDEX(port);
            } else {
                tx_channel = port->tx_ch;
                tx_queue = NET_DAT_TXQ_INDEX(port);
            }
        } else {
            tx_channel = port->tx_ch;
            tx_queue = NET_DAT_TXQ_INDEX(port);
        }

        req->skb = skb;
        req->policy = FREE;
        ccci_h = (struct ccci_header*)skb_push(skb, sizeof(struct ccci_header));
        ccci_h->channel = tx_channel;
        ccci_h->data[0] = 0;
        ccci_h->data[1] = skb->len; // as skb->len already included ccci_header after skb_push
#ifndef FEATURE_SEQ_CHECK_EN
        ccci_h->reserved = nent->tx_seq_num++;
#else
        ccci_h->reserved = 0;
#endif
        ret = port->modem->ops->send_request(port->modem, tx_queue, req);
        if(ret) {
            skb_pull(skb, sizeof(struct ccci_header)); // undo header, in next retry, we'll reserve header again
            req->policy = NOOP; // if you return busy, do NOT free skb as network may still use it
            ccci_free_req(req);
            goto tx_busy;
        }
        dev->stats.tx_packets++;
        dev->stats.tx_bytes += skb_len;
        tx_busy_retry_cnt = 0;
    } else {
        CCCI_ERR_MSG(port->modem->index, NET, "fail to alloc request\n");
        goto tx_busy;
    }
    return NETDEV_TX_OK;

tx_busy:
    if(unlikely(!(port->modem->capability & MODEM_CAP_TXBUSY_STOP))) {
        if((++tx_busy_retry_cnt)%20000 == 0)
            CCCI_INF_MSG(port->modem->index, NET, "%s TX busy: retry_times=%d\n", port->name, tx_busy_retry_cnt);
    } else {
        port->tx_busy_count++;
    }
    return NETDEV_TX_BUSY;
}