static int port_net_recv_req(struct ccci_port *port, struct ccci_request* req)
{
	struct sk_buff *skb = req->skb;
	struct net_device *dev = ((struct netdev_entity *)port->private_data)->ndev;
	unsigned int packet_type;
	int skb_len = req->skb->len;
	
	CCCI_DBG_MSG(port->modem->index, NET, "incomming on CH%d\n", port->rx_ch);
	list_del(&req->entry); // dequeue from queue's list
	skb_pull(skb, sizeof(struct ccci_header));
	packet_type = skb->data[0] & 0xF0;
	ccmni_make_etherframe(skb->data-ETH_HLEN, dev->dev_addr, packet_type);
    skb_set_mac_header(skb, -ETH_HLEN);
	skb->dev = dev;
	if(packet_type == 0x60) {            
		skb->protocol  = htons(ETH_P_IPV6);
	}
	else {
		skb->protocol  = htons(ETH_P_IP);
	}
	skb->ip_summed = CHECKSUM_NONE;
#if defined(CCCI_USE_NAPI) || defined(DUM_NAPI)
	netif_receive_skb(skb);
#else
	netif_rx(skb);
#endif
	dev->stats.rx_packets++;
	dev->stats.rx_bytes += skb_len;
	req->policy = NOOP;
	ccci_free_req(req);
	wake_lock_timeout(&port->rx_wakelock, HZ);
	return 0;
}
/*
 * a write operation may block at 3 stages:
 * 1. ccci_alloc_req
 * 2. wait until the queue has available slot (threshold check)
 * 3. wait until the SDIO transfer is complete --> abandoned, see the reason below.
 * the 1st one is decided by @blk1. and the 2nd and 3rd are decided by @blk2, wating on @wq.
 * NULL is returned if no available skb, even when you set blk1=1.
 *
 * we removed the wait_queue_head_t in ccci_request, so user can NOT wait for certain request to
 * be completed. this is because request will be recycled and its state will be reset, so if a request
 * is completed and then used again, the poor guy who is waiting for it may never see the state
 * transition (FLYING->IDLE/COMPLETE->FLYING) and wait forever.
 */
struct ccci_request *ccci_alloc_req(DIRECTION dir, int size, char blk1, char blk2)
{
    struct ccci_request *req = NULL;

retry:
    req = ccci_req_dequeue(&req_pool);
    if(req) {
        if(size>0) {
            req->skb = ccci_alloc_skb(size, blk1);
            req->policy = RECYCLE;
            if(req->skb)
                CCCI_DBG_MSG(-1, BM, "alloc ok, req=%p skb=%p, len=%d\n", req, req->skb, skb_size(req->skb));
        } else {
            req->skb = NULL;
            req->policy = NOOP;
        }
        req->blocking = blk2;
    } else {
        if(blk1) {
            wait_event_interruptible(req_pool.req_wq, (req_pool.count>0));
            goto retry;
        }
        CCCI_INF_MSG(-1, BM, "fail to alloc req for %ps, no retry\n", __builtin_return_address(0));
    }
    if(unlikely(size>0 && !req->skb)) {
        CCCI_ERR_MSG(-1, BM, "fail to alloc skb for %ps, size=%d\n", __builtin_return_address(0), size);
        req->policy = NOOP;
        ccci_free_req(req);
        req = NULL;
    }
    return req;
}
Beispiel #3
0
/*
 * a write operation may block at 3 stages:
 * 1. ccci_alloc_req
 * 2. wait until the queue has available slot (threshold check)
 * 3. wait until the SDIO transfer is complete --> abandoned, see the reason below.
 * the 1st one is decided by @blk1. and the 2nd and 3rd are decided by @blk2, wating on @wq.
 * NULL is returned if no available skb, even when you set blk1=1.
 *
 * we removed the wait_queue_head_t in ccci_request, so user can NOT wait for certain request to
 * be completed. this is because request will be recycled and its state will be reset, so if a request
 * is completed and then used again, the poor guy who is waiting for it may never see the state 
 * transition (FLYING->IDLE/COMPLETE->FLYING) and wait forever.
 */
struct ccci_request *ccci_alloc_req(DIRECTION dir, int size, char blk1, char blk2)
{
	int i;
	struct ccci_request *req = NULL;
	unsigned long flags;

retry:
	spin_lock_irqsave(&req_pool_lock, flags);
	for(i=0; i<BM_POOL_SIZE; i++) {
		if(req_pool[i].state == IDLE) {
			// important checking when reqeust is passed cross-layer, make sure this request is no longer in any list
			if(req_pool[i].entry.next == LIST_POISON1 && req_pool[i].entry.prev == LIST_POISON2) {
				req = &req_pool[i];
				CCCI_DBG_MSG(-1, BM, "%ps alloc req=%p, i=%d size=%d\n", __builtin_return_address(0), req, i, size);
				req->state = FLYING;
				break;
			} else {
				// should not happen
				CCCI_ERR_MSG(-1, BM, "idle but in list i=%d, from %ps\n", i, __builtin_return_address(0));
				list_del(&req_pool[i].entry);
			}
		}
	}
	if(req) {
		req->dir = dir;
		req_pool_cnt--;
		CCCI_DBG_MSG(-1, BM, "pool count-=%d\n", req_pool_cnt);
	}
	spin_unlock_irqrestore(&req_pool_lock, flags);
	if(req) {
		if(size>0) {
			req->skb = ccci_alloc_skb(size, blk1);
			req->policy = RECYCLE;
			if(req->skb)
				CCCI_DBG_MSG(-1, BM, "alloc ok, req=%p skb=%p, len=%d\n", req, req->skb, skb_size(req->skb));
		} else {
			req->skb = NULL;
			req->policy = NOOP;
		}
		req->blocking = blk2;
	} else {
		if(blk1) {
			wait_event_interruptible(req_pool_wq, (req_pool_cnt>0));
			goto retry;
		}
		CCCI_INF_MSG(-1, BM, "fail to allock req for %ps, no retry\n", __builtin_return_address(0));
	}
	if(unlikely(size>0 && !req->skb)) {
		CCCI_ERR_MSG(-1, BM, "fail to allock skb for %ps, size=%d\n", __builtin_return_address(0), size);
		req->policy = NOOP;
		ccci_free_req(req);
		req = NULL;
	}
	return req;
}
Beispiel #4
0
static int port_net_recv_req(struct ccci_port *port, struct ccci_request* req)
{
    struct sk_buff *skb = req->skb;
    struct netdev_entity *nent = (struct netdev_entity *)port->private_data;
    struct net_device *dev = nent->ndev;
    unsigned int packet_type;
    int skb_len = req->skb->len;

#ifndef FEATURE_SEQ_CHECK_EN
    struct ccci_header *ccci_h = (struct ccci_header*)req->skb->data;
    CCCI_DBG_MSG(port->modem->index, NET, "recv on %s, curr_seq=%d\n", port->name, ccci_h->reserved);
    if(unlikely(nent->rx_seq_num!=0 && (ccci_h->reserved-nent->rx_seq_num)!=1)) {
        CCCI_ERR_MSG(port->modem->index, NET, "possible packet lost on %s %d->%d\n",
                     port->name, nent->rx_seq_num, ccci_h->reserved);
    }
    nent->rx_seq_num = ccci_h->reserved;
#else
    CCCI_DBG_MSG(port->modem->index, NET, "recv on %s\n", port->name);
#endif

    list_del(&req->entry); // dequeue from queue's list
    skb_pull(skb, sizeof(struct ccci_header));
    packet_type = skb->data[0] & 0xF0;
    ccmni_make_etherframe(skb->data-ETH_HLEN, dev->dev_addr, packet_type);
    skb_set_mac_header(skb, -ETH_HLEN);
    skb->dev = dev;
    if(packet_type == 0x60) {
        skb->protocol  = htons(ETH_P_IPV6);
    } else {
        skb->protocol  = htons(ETH_P_IP);
    }
    skb->ip_summed = CHECKSUM_NONE;
    if(likely(port->modem->capability & MODEM_CAP_NAPI)) {
        netif_receive_skb(skb);
    } else {
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
        if(!in_interrupt()) {
            netif_rx_ni(skb);
        } else {
            netif_rx(skb);
        }
#else
        netif_rx(skb);
#endif
    }
    dev->stats.rx_packets++;
    dev->stats.rx_bytes += skb_len;
    req->policy = NOOP;
    req->skb = NULL;
    ccci_free_req(req);
    wake_lock_timeout(&port->rx_wakelock, HZ);
    return 0;
}
static int ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct ccci_port *port = *((struct ccci_port **)netdev_priv(dev));
	struct ccci_request *req = NULL;
	struct ccci_header *ccci_h;
	int ret;
	static unsigned int ut_seq_num = 0;
	int skb_len = skb->len;

	CCCI_DBG_MSG(port->modem->index, NET, "tx skb %p on CH%d, len=%d/%d\n", skb, port->tx_ch, skb_headroom(skb), skb->len);
	if (skb->len > CCMNI_MTU) {
		CCCI_ERR_MSG(port->modem->index, NET, "exceeds MTU(%d) with %d/%d\n", CCMNI_MTU, dev->mtu, skb->len);
		dev_kfree_skb(skb);
		dev->stats.tx_dropped++;
		return NETDEV_TX_OK;
    }
    if (skb_headroom(skb) < sizeof(struct ccci_header)) {
		CCCI_ERR_MSG(port->modem->index, NET, "not enough header room on CH%d, len=%d header=%d hard_header=%d\n",
			port->tx_ch, skb->len, skb_headroom(skb), dev->hard_header_len);
		dev_kfree_skb(skb);
		dev->stats.tx_dropped++;
        return NETDEV_TX_OK;
    }

	req = ccci_alloc_req(OUT, -1, 1, 0);
	if(req) {
		req->skb = skb;
		req->policy = FREE;
		ccci_h = (struct ccci_header*)skb_push(skb, sizeof(struct ccci_header));
		ccci_h->channel = port->tx_ch;
		ccci_h->data[0] = 0;
		ccci_h->data[1] = skb->len; // as skb->len already included ccci_header after skb_push
		ccci_h->reserved = ut_seq_num++;
		ret = ccci_port_send_request(port, req);
		if(ret) {
			skb_pull(skb, sizeof(struct ccci_header)); // undo header, in next retry, we'll reserver header again
			req->policy = NOOP; // if you return busy, do NOT free skb as network may still use it
			ccci_free_req(req);
			return NETDEV_TX_BUSY;
		}
		dev->stats.tx_packets++;
		dev->stats.tx_bytes += skb_len;
	} else {
		CCCI_ERR_MSG(port->modem->index, NET, "fail to alloc request\n");
		return NETDEV_TX_BUSY;
	}
	return NETDEV_TX_OK;
}
Beispiel #6
0
static int ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
    struct ccci_port *port = *((struct ccci_port **)netdev_priv(dev));
    struct ccci_request *req = NULL;
    struct ccci_header *ccci_h;
    int ret;
    int skb_len = skb->len;
    static int tx_busy_retry_cnt = 0;
    int tx_queue, tx_channel;

#ifndef FEATURE_SEQ_CHECK_EN
    struct netdev_entity *nent = (struct netdev_entity *)port->private_data;
    CCCI_DBG_MSG(port->modem->index, NET, "write on %s, len=%d/%d, curr_seq=%d\n",
                 port->name, skb_headroom(skb), skb->len, nent->tx_seq_num);
#else
    CCCI_DBG_MSG(port->modem->index, NET, "write on %s, len=%d/%d\n", port->name, skb_headroom(skb), skb->len);
#endif

    if(unlikely(skb->len > CCMNI_MTU)) {
        CCCI_ERR_MSG(port->modem->index, NET, "exceeds MTU(%d) with %d/%d\n", CCMNI_MTU, dev->mtu, skb->len);
        dev_kfree_skb(skb);
        dev->stats.tx_dropped++;
        return NETDEV_TX_OK;
    }
    if(unlikely(skb_headroom(skb) < sizeof(struct ccci_header))) {
        CCCI_ERR_MSG(port->modem->index, NET, "not enough header room on %s, len=%d header=%d hard_header=%d\n",
                     port->name, skb->len, skb_headroom(skb), dev->hard_header_len);
        dev_kfree_skb(skb);
        dev->stats.tx_dropped++;
        return NETDEV_TX_OK;
    }
    if(unlikely(port->modem->md_state != READY)) {
        dev_kfree_skb(skb);
        dev->stats.tx_dropped++;
        return NETDEV_TX_OK;
    }

    req = ccci_alloc_req(OUT, -1, 1, 0);
    if(req) {
        if(likely(port->rx_ch != CCCI_CCMNI3_RX)) {
            if(unlikely(skb_is_ack(skb))) {
                tx_channel = port->tx_ch==CCCI_CCMNI1_TX?CCCI_CCMNI1_DL_ACK:CCCI_CCMNI2_DL_ACK;
                tx_queue = NET_ACK_TXQ_INDEX(port);
            } else {
                tx_channel = port->tx_ch;
                tx_queue = NET_DAT_TXQ_INDEX(port);
            }
        } else {
            tx_channel = port->tx_ch;
            tx_queue = NET_DAT_TXQ_INDEX(port);
        }

        req->skb = skb;
        req->policy = FREE;
        ccci_h = (struct ccci_header*)skb_push(skb, sizeof(struct ccci_header));
        ccci_h->channel = tx_channel;
        ccci_h->data[0] = 0;
        ccci_h->data[1] = skb->len; // as skb->len already included ccci_header after skb_push
#ifndef FEATURE_SEQ_CHECK_EN
        ccci_h->reserved = nent->tx_seq_num++;
#else
        ccci_h->reserved = 0;
#endif
        ret = port->modem->ops->send_request(port->modem, tx_queue, req);
        if(ret) {
            skb_pull(skb, sizeof(struct ccci_header)); // undo header, in next retry, we'll reserve header again
            req->policy = NOOP; // if you return busy, do NOT free skb as network may still use it
            ccci_free_req(req);
            goto tx_busy;
        }
        dev->stats.tx_packets++;
        dev->stats.tx_bytes += skb_len;
        tx_busy_retry_cnt = 0;
    } else {
        CCCI_ERR_MSG(port->modem->index, NET, "fail to alloc request\n");
        goto tx_busy;
    }
    return NETDEV_TX_OK;

tx_busy:
    if(unlikely(!(port->modem->capability & MODEM_CAP_TXBUSY_STOP))) {
        if((++tx_busy_retry_cnt)%20000 == 0)
            CCCI_INF_MSG(port->modem->index, NET, "%s TX busy: retry_times=%d\n", port->name, tx_busy_retry_cnt);
    } else {
        port->tx_busy_count++;
    }
    return NETDEV_TX_BUSY;
}