Пример #1
0
/*
 * a write operation may block at 3 stages:
 * 1. ccci_alloc_req
 * 2. wait until the queue has available slot (threshold check)
 * 3. wait until the SDIO transfer is complete --> abandoned, see the reason below.
 * the 1st one is decided by @blk1. and the 2nd and 3rd are decided by @blk2, wating on @wq.
 * NULL is returned if no available skb, even when you set blk1=1.
 *
 * we removed the wait_queue_head_t in ccci_request, so user can NOT wait for certain request to
 * be completed. this is because request will be recycled and its state will be reset, so if a request
 * is completed and then used again, the poor guy who is waiting for it may never see the state 
 * transition (FLYING->IDLE/COMPLETE->FLYING) and wait forever.
 */
struct ccci_request *ccci_alloc_req(DIRECTION dir, int size, char blk1, char blk2)
{
	int i;
	struct ccci_request *req = NULL;
	unsigned long flags;

retry:
	spin_lock_irqsave(&req_pool_lock, flags);
	for(i=0; i<BM_POOL_SIZE; i++) {
		if(req_pool[i].state == IDLE) {
			// important checking when reqeust is passed cross-layer, make sure this request is no longer in any list
			if(req_pool[i].entry.next == LIST_POISON1 && req_pool[i].entry.prev == LIST_POISON2) {
				req = &req_pool[i];
				CCCI_DBG_MSG(-1, BM, "%ps alloc req=%p, i=%d size=%d\n", __builtin_return_address(0), req, i, size);
				req->state = FLYING;
				break;
			} else {
				// should not happen
				CCCI_ERR_MSG(-1, BM, "idle but in list i=%d, from %ps\n", i, __builtin_return_address(0));
				list_del(&req_pool[i].entry);
			}
		}
	}
	if(req) {
		req->dir = dir;
		req_pool_cnt--;
		CCCI_DBG_MSG(-1, BM, "pool count-=%d\n", req_pool_cnt);
	}
	spin_unlock_irqrestore(&req_pool_lock, flags);
	if(req) {
		if(size>0) {
			req->skb = ccci_alloc_skb(size, blk1);
			req->policy = RECYCLE;
			if(req->skb)
				CCCI_DBG_MSG(-1, BM, "alloc ok, req=%p skb=%p, len=%d\n", req, req->skb, skb_size(req->skb));
		} else {
			req->skb = NULL;
			req->policy = NOOP;
		}
		req->blocking = blk2;
	} else {
		if(blk1) {
			wait_event_interruptible(req_pool_wq, (req_pool_cnt>0));
			goto retry;
		}
		CCCI_INF_MSG(-1, BM, "fail to allock req for %ps, no retry\n", __builtin_return_address(0));
	}
	if(unlikely(size>0 && !req->skb)) {
		CCCI_ERR_MSG(-1, BM, "fail to allock skb for %ps, size=%d\n", __builtin_return_address(0), size);
		req->policy = NOOP;
		ccci_free_req(req);
		req = NULL;
	}
	return req;
}
Пример #2
0
int ccci_ringbuf_readable(int md_id,struct ccci_ringbuf * ringbuf)
{
    unsigned char *rx_buffer,*outptr;
    unsigned int read, write, size,ccci_pkg_len,ccif_pkg_len;
    unsigned int header,footer,footer_pos,length;
    if(ringbuf==NULL)
    {
        CCCI_ERR_MSG(md_id, TAG, "rbrdb param error,ringbuf==NULL\n");
        return -CCCI_RINGBUF_PARAM_ERR;
    }
    read  = (unsigned int)(ringbuf->rx_control.read);
    write = (unsigned int)(ringbuf->rx_control.write);
    length = (unsigned int)(ringbuf->rx_control.length);    
    rx_buffer=ringbuf->buffer;
    size  = write - read;
    if(size < 0)
        size += length;
    
    CCCI_DBG_MSG(md_id, TAG, "rbrdb:rbf=%p,rx_buf=0x%p,read=%d,write=%d,len=%d\n",ringbuf,rx_buffer,read,write,length);
    if(size < CCIF_HEADER_LEN + CCIF_FOOTER_LEN + CCCI_HEADER_LEN)
    {
        return -CCCI_RINGBUF_EMPTY;
    }
    outptr=(unsigned char *)&header;
    CCIF_RBF_READ(rx_buffer,outptr,sizeof(unsigned int),read,length);
    ccci_pkg_len = header & 0x0000FFFF;
    if((header&0xFFFF0000)!= CCIF_PKG_HEADER)
    {
        CCCI_ERR_MSG(md_id, TAG, "rbrdb:header 0x%x!=0xAABBxxxx\n",header);
        return -CCCI_RINGBUF_BAD_HEADER;    
    }
    ccif_pkg_len = CCIF_HEADER_LEN+ccci_pkg_len+CCIF_FOOTER_LEN;
    if(ccif_pkg_len>size)
    {
        CCCI_ERR_MSG(md_id, TAG, "rbrdb:header ccif_pkg_len(%d) > all data size(%d)\n",ccif_pkg_len,size);
        return -CCCI_RINGBUF_NOT_COMPLETE;
    }    
    footer_pos = read + ccif_pkg_len-CCIF_FOOTER_LEN;
    if(footer_pos >= length)
    {
        footer_pos -= length;
    }
    outptr=(unsigned char *)&footer;
    
    CCIF_RBF_READ(rx_buffer,outptr,sizeof(unsigned int),footer_pos,length);
    if(footer!=CCIF_PKG_FOOTER)
    {
        CCCI_ERR_MSG(md_id, TAG, "rbrdb:ccif_pkg_len=0x%x,footer_pos=0x%x, footer 0x%x!=0xCCDDEEFF\n",ccif_pkg_len,footer_pos,footer);
        ccci_ringbuf_dump(md_id,"readable",rx_buffer, read, length, ccif_pkg_len+8);
        return -CCCI_RINGBUF_BAD_FOOTER;
    }
    return ccci_pkg_len;
}
Пример #3
0
static int ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct ccci_port *port = *((struct ccci_port **)netdev_priv(dev));
	struct ccci_request *req = NULL;
	struct ccci_header *ccci_h;
	int ret;
	static unsigned int ut_seq_num = 0;
	int skb_len = skb->len;

	CCCI_DBG_MSG(port->modem->index, NET, "tx skb %p on CH%d, len=%d/%d\n", skb, port->tx_ch, skb_headroom(skb), skb->len);
	if (skb->len > CCMNI_MTU) {
		CCCI_ERR_MSG(port->modem->index, NET, "exceeds MTU(%d) with %d/%d\n", CCMNI_MTU, dev->mtu, skb->len);
		dev_kfree_skb(skb);
		dev->stats.tx_dropped++;
		return NETDEV_TX_OK;
    }
    if (skb_headroom(skb) < sizeof(struct ccci_header)) {
		CCCI_ERR_MSG(port->modem->index, NET, "not enough header room on CH%d, len=%d header=%d hard_header=%d\n",
			port->tx_ch, skb->len, skb_headroom(skb), dev->hard_header_len);
		dev_kfree_skb(skb);
		dev->stats.tx_dropped++;
        return NETDEV_TX_OK;
    }

	req = ccci_alloc_req(OUT, -1, 1, 0);
	if(req) {
		req->skb = skb;
		req->policy = FREE;
		ccci_h = (struct ccci_header*)skb_push(skb, sizeof(struct ccci_header));
		ccci_h->channel = port->tx_ch;
		ccci_h->data[0] = 0;
		ccci_h->data[1] = skb->len; // as skb->len already included ccci_header after skb_push
		ccci_h->reserved = ut_seq_num++;
		ret = ccci_port_send_request(port, req);
		if(ret) {
			skb_pull(skb, sizeof(struct ccci_header)); // undo header, in next retry, we'll reserver header again
			req->policy = NOOP; // if you return busy, do NOT free skb as network may still use it
			ccci_free_req(req);
			return NETDEV_TX_BUSY;
		}
		dev->stats.tx_packets++;
		dev->stats.tx_bytes += skb_len;
	} else {
		CCCI_ERR_MSG(port->modem->index, NET, "fail to alloc request\n");
		return NETDEV_TX_BUSY;
	}
	return NETDEV_TX_OK;
}
/*
 * a write operation may block at 3 stages:
 * 1. ccci_alloc_req
 * 2. wait until the queue has available slot (threshold check)
 * 3. wait until the SDIO transfer is complete --> abandoned, see the reason below.
 * the 1st one is decided by @blk1. and the 2nd and 3rd are decided by @blk2, wating on @wq.
 * NULL is returned if no available skb, even when you set blk1=1.
 *
 * we removed the wait_queue_head_t in ccci_request, so user can NOT wait for certain request to
 * be completed. this is because request will be recycled and its state will be reset, so if a request
 * is completed and then used again, the poor guy who is waiting for it may never see the state
 * transition (FLYING->IDLE/COMPLETE->FLYING) and wait forever.
 */
struct ccci_request *ccci_alloc_req(DIRECTION dir, int size, char blk1, char blk2)
{
    struct ccci_request *req = NULL;

retry:
    req = ccci_req_dequeue(&req_pool);
    if(req) {
        if(size>0) {
            req->skb = ccci_alloc_skb(size, blk1);
            req->policy = RECYCLE;
            if(req->skb)
                CCCI_DBG_MSG(-1, BM, "alloc ok, req=%p skb=%p, len=%d\n", req, req->skb, skb_size(req->skb));
        } else {
            req->skb = NULL;
            req->policy = NOOP;
        }
        req->blocking = blk2;
    } else {
        if(blk1) {
            wait_event_interruptible(req_pool.req_wq, (req_pool.count>0));
            goto retry;
        }
        CCCI_INF_MSG(-1, BM, "fail to alloc req for %ps, no retry\n", __builtin_return_address(0));
    }
    if(unlikely(size>0 && !req->skb)) {
        CCCI_ERR_MSG(-1, BM, "fail to alloc skb for %ps, size=%d\n", __builtin_return_address(0), size);
        req->policy = NOOP;
        ccci_free_req(req);
        req = NULL;
    }
    return req;
}
Пример #5
0
void ccci_skb_enqueue(struct ccci_skb_queue *queue, struct sk_buff *newsk)
{
	unsigned long flags;

	spin_lock_irqsave(&queue->skb_list.lock, flags);
	if (queue->skb_list.qlen < queue->max_len) {
		__skb_queue_tail(&queue->skb_list, newsk);
		if (queue->skb_list.qlen > queue->max_history)
			queue->max_history = queue->skb_list.qlen;
	} else {
#if 0
		if (queue->pre_filled) {
			CCCI_ERR_MSG(0, BM, "skb queue too long, max=%d\n", queue->max_len);
#else
		if (1) {
#endif

#ifdef CCCI_MEM_BM_DEBUG
			if (ccci_skb_addr_checker(newsk)) {
				CCCI_INF_MSG(-1, BM, "ccci_skb_enqueue:ccci_skb_addr_checker failed!\n");
				ccci_mem_dump(-1, queue, sizeof(struct ccci_skb_queue));
				dump_stack();
			}
#endif
			dev_kfree_skb_any(newsk);
		} else {
			__skb_queue_tail(&queue->skb_list, newsk);
		}
	}
	spin_unlock_irqrestore(&queue->skb_list.lock, flags);
}

void ccci_skb_queue_init(struct ccci_skb_queue *queue, unsigned int skb_size, unsigned int max_len,
	char fill_now)
{
	int i;

	queue->magic_header = SKB_MAGIC_HEADER;
	queue->magic_footer = SKB_MAGIC_FOOTER;
#ifdef CCCI_WP_DEBUG
	if (((unsigned long)queue) == ((unsigned long)(&skb_pool_16))) {
		CCCI_INF_MSG(-1, BM, "ccci_skb_queue_init: add hwp skb_pool_16.magic_footer=%p!\n",
			&queue->magic_footer);
		enable_watchpoint(&queue->magic_footer);
	}
#endif
	skb_queue_head_init(&queue->skb_list);
	queue->max_len = max_len;
	if (fill_now) {
		for (i = 0; i < queue->max_len; i++) {
			struct sk_buff *skb = __alloc_skb_from_kernel(skb_size, GFP_KERNEL);
			if (skb != NULL)
				skb_queue_tail(&queue->skb_list, skb);
		}
		queue->pre_filled = 1;
	} else {
		queue->pre_filled = 0;
	}
	queue->max_history = 0;
}
Пример #6
0
void ccci_free_req(struct ccci_request *req)
{
	unsigned long flags;

	CCCI_DBG_MSG(-1, BM, "%ps free req=%p, policy=%d, skb=%p, len=%d\n", __builtin_return_address(0),
		req, req->policy, req->skb, skb_size(req->skb));
	if(req->skb)
		ccci_free_skb(req->skb, req->policy);
	spin_lock_irqsave(&req_pool_lock, flags);
	// 1. reset the request
	req->state = IDLE;
	req->skb = NULL;
	/*
	 * do NOT reset req->entry here, always maitain it by list API (list_del).
	 * for Tx requests, they are never in any queue, so no extra effort when delete them.
	 * but for Rx request, we must make sure list_del is called once before we free them.
	 */
	if(req->entry.next != LIST_POISON1 || req->entry.prev != LIST_POISON2) {
		CCCI_ERR_MSG(-1, BM, "req %p entry not deleted yet, from %ps\n", req, __builtin_return_address(0));
		list_del(&req->entry);
	}
	// 2. wake up pending allocation
	req_pool_cnt++;
	CCCI_DBG_MSG(-1, BM, "pool count+=%d\n", req_pool_cnt);
	spin_unlock_irqrestore(&req_pool_lock, flags);
	wake_up_all(&req_pool_wq);

}
Пример #7
0
// may return NULL, caller should check
struct sk_buff *ccci_alloc_skb(int size, char blocking)
{
	int count = 0;
	struct sk_buff *skb = NULL;

	if(size>SKB_4K || size<0)
		goto err_exit;
	skb = blocking?NULL:__alloc_skb_from_kernel(size);
	
	if(!skb) {
slow_retry:
		skb = __alloc_skb_from_pool(size);
	}
	if(unlikely(!skb)) {
		if(blocking) {
			CCCI_INF_MSG(-1, BM, "skb pool is empty! size=%d (%d)\n", size, count++);
			msleep(100);
			goto slow_retry;
		} else {
fast_retry:
			skb = __alloc_skb_from_kernel(size);
			if(!skb && count++<20)
				goto fast_retry;
		}
	}
err_exit:
	if(unlikely(!skb))
		CCCI_ERR_MSG(-1, BM, "%ps alloc skb fail, size=%d\n", __builtin_return_address(0), size);
	else
		CCCI_DBG_MSG(-1, BM, "%ps alloc skb %p, size=%d\n", __builtin_return_address(0), skb, size);
	return skb;
}
Пример #8
0
static int port_net_recv_req(struct ccci_port *port, struct ccci_request* req)
{
    struct sk_buff *skb = req->skb;
    struct netdev_entity *nent = (struct netdev_entity *)port->private_data;
    struct net_device *dev = nent->ndev;
    unsigned int packet_type;
    int skb_len = req->skb->len;

#ifndef FEATURE_SEQ_CHECK_EN
    struct ccci_header *ccci_h = (struct ccci_header*)req->skb->data;
    CCCI_DBG_MSG(port->modem->index, NET, "recv on %s, curr_seq=%d\n", port->name, ccci_h->reserved);
    if(unlikely(nent->rx_seq_num!=0 && (ccci_h->reserved-nent->rx_seq_num)!=1)) {
        CCCI_ERR_MSG(port->modem->index, NET, "possible packet lost on %s %d->%d\n",
                     port->name, nent->rx_seq_num, ccci_h->reserved);
    }
    nent->rx_seq_num = ccci_h->reserved;
#else
    CCCI_DBG_MSG(port->modem->index, NET, "recv on %s\n", port->name);
#endif

    list_del(&req->entry); // dequeue from queue's list
    skb_pull(skb, sizeof(struct ccci_header));
    packet_type = skb->data[0] & 0xF0;
    ccmni_make_etherframe(skb->data-ETH_HLEN, dev->dev_addr, packet_type);
    skb_set_mac_header(skb, -ETH_HLEN);
    skb->dev = dev;
    if(packet_type == 0x60) {
        skb->protocol  = htons(ETH_P_IPV6);
    } else {
        skb->protocol  = htons(ETH_P_IP);
    }
    skb->ip_summed = CHECKSUM_NONE;
    if(likely(port->modem->capability & MODEM_CAP_NAPI)) {
        netif_receive_skb(skb);
    } else {
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
        if(!in_interrupt()) {
            netif_rx_ni(skb);
        } else {
            netif_rx(skb);
        }
#else
        netif_rx(skb);
#endif
    }
    dev->stats.rx_packets++;
    dev->stats.rx_bytes += skb_len;
    req->policy = NOOP;
    req->skb = NULL;
    ccci_free_req(req);
    wake_lock_timeout(&port->rx_wakelock, HZ);
    return 0;
}
Пример #9
0
static void __16_reload_work(struct work_struct *work)
{
	struct sk_buff *skb;

	CCCI_DBG_MSG(-1, BM, "refill 16B skb pool\n");
	while (skb_pool_16.skb_list.qlen < SKB_POOL_SIZE_16) {
		skb = __alloc_skb_from_kernel(SKB_16, GFP_KERNEL);
		if (skb)
			skb_queue_tail(&skb_pool_16.skb_list, skb);
		else
			CCCI_ERR_MSG(-1, BM, "fail to reload 16B pool\n");
	}
}
Пример #10
0
static inline struct sk_buff *__alloc_skb_from_kernel(int size)
{
	struct sk_buff *skb = NULL;
	if(size > SKB_1_5K) {
		skb = dev_alloc_skb(SKB_4K);
	} else if(size > SKB_16) {
		skb = dev_alloc_skb(SKB_1_5K);
	} else if(size > 0) {
		skb = dev_alloc_skb(SKB_16);
	}
	if(!skb)
		CCCI_ERR_MSG(-1, BM, "%ps alloc skb from kernel fail, size=%d\n", __builtin_return_address(0), size);
	return skb;
}
Пример #11
0
static inline struct sk_buff *__alloc_skb_from_kernel(int size, gfp_t gfp_mask)
{
	struct sk_buff *skb = NULL;

	if (size > SKB_1_5K)
		skb = __dev_alloc_skb(SKB_4K, gfp_mask);
	else if (size > SKB_16)
		skb = __dev_alloc_skb(SKB_1_5K, gfp_mask);
	else if (size > 0)
		skb = __dev_alloc_skb(SKB_16, gfp_mask);
	if (!skb)
		CCCI_ERR_MSG(-1, BM, "%ps alloc skb from kernel fail, size=%d\n", __builtin_return_address(0), size);
	return skb;
}
void ccci_free_req(struct ccci_request *req)
{
    CCCI_DBG_MSG(-1, BM, "%ps free req=%p, policy=%d, skb=%p\n", __builtin_return_address(0),
                 req, req->policy, req->skb);
    if(req->skb)
        ccci_free_skb(req->skb, req->policy);
    if(req->entry.next != LIST_POISON1 || req->entry.prev != LIST_POISON2) {
        CCCI_ERR_MSG(-1, BM, "req %p entry not deleted yet, from %ps\n", req, __builtin_return_address(0));
        list_del(&req->entry);
    }
    ccci_req_enqueue(&req_pool, req);
    wake_up_all(&req_pool.req_wq);

}
Пример #13
0
int ccci_ringbuf_writeable(int md_id, struct ccci_ringbuf * ringbuf,unsigned int write_size)
{
    int     read, write, size,length;
    if(ringbuf == NULL)
    {
        CCCI_ERR_MSG(md_id, TAG, "rbwb param error,ringbuf == NULL\n");
        return -CCCI_RINGBUF_PARAM_ERR;
    }

    read   = (unsigned int)(ringbuf->tx_control.read);
    write  = (unsigned int)(ringbuf->tx_control.write);
    length = (unsigned int)(ringbuf->tx_control.length);
    if(write_size > length)
    {
        CCCI_ERR_MSG(md_id, TAG, "rbwb param error,writesize(%d) > length(%d)\n",write_size,length);
        return -CCCI_RINGBUF_PARAM_ERR;
    }
    write_size += CCIF_HEADER_LEN+CCIF_FOOTER_LEN;
    if (read == write)
    {
        size = length - 1;
    }
    else if (read < write)
    {
        size =  length - write;
        size += read;
    }
    else
    {
        size = read - write - 1;
    }        
    if(write_size>size)
    {
        //CCCI_INF_MSG(-1, TAG, "rbwb:rbf=%p write_size(%d)>size(%d) r=%d,w=%d\n",ringbuf,write_size,size,read,write);
    }
    return ((write_size<size)?write_size:-(write_size-size));
}
Пример #14
0
static void __4K_reload_work(struct work_struct *work)
{
	struct sk_buff *skb;

	CCCI_DBG_MSG(-1, BM, "refill 4KB skb pool\n");
	while(skb_pool_4K.skb_list.qlen < SKB_POOL_SIZE_4K) {
		skb = dev_alloc_skb(SKB_4K);
		if(!skb)
			skb = __dev_alloc_skb(SKB_4K, GFP_KERNEL);
		if(skb)
			skb_queue_tail(&skb_pool_4K.skb_list, skb);
		else
			CCCI_ERR_MSG(-1, BM, "fail to reload 4KB pool\n");
	}
}
int ccci_sysfs_add_modem(struct ccci_modem *md)
{
	int ret;
	
	if(!ccci_sys_info)
		return -CCCI_ERR_SYSFS_NOT_READY;

	memset(&md->kobj, 0, sizeof(struct kobject));
	ret = kobject_init_and_add(&md->kobj, &ccci_md_ktype, &ccci_sys_info->kobj, "MDSYS%d", md->index+1);
	if (ret < 0) {
		kobject_put(&md->kobj);
        CCCI_ERR_MSG(md->index, SYSFS, "fail to add md kobject\n");
    }
	return ret;
}
Пример #16
0
int ccci_subsys_sysfs_init(void)
{
	int ret = 0;

	ccci_sys_info = kmalloc(sizeof(struct ccci_info), GFP_KERNEL);
	if (!ccci_sys_info)
		return -ENOMEM;

	memset(ccci_sys_info, 0, sizeof(struct ccci_info));

	ret = kobject_init_and_add(&ccci_sys_info->kobj, &ccci_ktype, kernel_kobj, CCCI_KOBJ_NAME);
	if (ret < 0) {
		kobject_put(&ccci_sys_info->kobj);
		CCCI_ERR_MSG(-1, SYSFS, "fail to add ccci kobject\n");
		return ret;
	}

	ccci_sys_info->ccci_attr_count = ARRAY_SIZE(ccci_default_attrs) - 1;
	CCCI_DBG_MSG(-1, SYSFS, "ccci attr cnt %d\n", ccci_sys_info->ccci_attr_count);
	return ret;
}
Пример #17
0
/* may return NULL, caller should check, network should always use blocking as we do not want it consume our own pool */
struct sk_buff *ccci_alloc_skb(int size, char from_pool, char blocking)
{
	int count = 0;
	struct sk_buff *skb = NULL;

#ifdef CCCI_MEM_BM_DEBUG
	ccci_magic_checker();
#endif
	if (size > SKB_4K || size < 0)
		goto err_exit;

	if (from_pool) {
 slow_retry:
		skb = __alloc_skb_from_pool(size);
		if (unlikely(!skb && blocking)) {
			CCCI_INF_MSG(-1, BM, "skb pool is empty! size=%d (%d)\n", size, count++);
			msleep(100);
			goto slow_retry;
		}
	} else {
		if (blocking) {
			skb = __alloc_skb_from_kernel(size, GFP_KERNEL);
		} else {
 fast_retry:
			skb = __alloc_skb_from_kernel(size, GFP_ATOMIC);
			if (!skb && count++ < 20)
				goto fast_retry;
		}
	}
 err_exit:
	if (unlikely(!skb))
		CCCI_ERR_MSG(-1, BM, "%ps alloc skb fail, size=%d\n", __builtin_return_address(0), size);
	else
		CCCI_DBG_MSG(-1, BM, "%ps alloc skb %p, size=%d\n", __builtin_return_address(0), skb, size);
	return skb;
}
Пример #18
0
struct sk_buff *ccci_skb_dequeue(struct ccci_skb_queue *queue)
{
	unsigned long flags;
	struct sk_buff *result;

#ifdef CCCI_MEM_BM_DEBUG
	if (queue->magic_header != SKB_MAGIC_HEADER || queue->magic_footer != SKB_MAGIC_FOOTER) {
		CCCI_ERR_MSG(-1, BM,
			"ccci_skb_dequeue: queue=%lx, skb_pool_16=%lx,  skb_pool_1_5K=%lx, skb_pool_4K=%lx, req_pool=%lx!\n",
			(unsigned long)queue, (unsigned long)&skb_pool_16, (unsigned long)&skb_pool_1_5K,
			(unsigned long)&skb_pool_4K,
			(unsigned long)&req_pool);
		ccci_mem_dump(-1, queue, sizeof(struct ccci_skb_queue));
		dump_stack();
	}
#endif

	spin_lock_irqsave(&queue->skb_list.lock, flags);
	result = __skb_dequeue(&queue->skb_list);
	if (queue->pre_filled && queue->skb_list.qlen < queue->max_len / RELOAD_TH)
		queue_work(pool_reload_work_queue, &queue->reload_work);
	spin_unlock_irqrestore(&queue->skb_list.lock, flags);
	return result;
}
Пример #19
0
static void md_ccif_sram_rx_work(struct work_struct *work)
{
    struct md_ccif_ctrl *md_ctrl = container_of(work, struct md_ccif_ctrl, ccif_sram_work);
    struct ccci_modem *md = md_ctrl->rxq[0].modem;
    struct ccci_header* dl_pkg = &md_ctrl->ccif_sram_layout->dl_header;
    struct ccci_header *ccci_h;
    struct ccci_request *new_req = NULL;
    struct ccci_request *req;
    int pkg_size,ret=0,retry_cnt =0;
    //md_ccif_dump("md_ccif_sram_rx_work",md);
    pkg_size = sizeof(struct ccci_header);
    new_req = ccci_alloc_req(IN, pkg_size, 1, 0);
    INIT_LIST_HEAD(&new_req->entry); // as port will run list_del        
    if(new_req->skb==NULL)
    {
        CCCI_ERR_MSG(md->index, TAG, "md_ccif_sram_rx_work:ccci_alloc_req pkg_size=%d failed\n", pkg_size);            
        return;
    }
    skb_put(new_req->skb, pkg_size);
    ccci_h = (struct ccci_header *)new_req->skb->data;
    ccci_h->data[0] = ccif_read32(&dl_pkg->data[0],0);
    ccci_h->data[1] = ccif_read32(&dl_pkg->data[1],0);
	//ccci_h->channel = ccif_read32(&dl_pkg->channel,0);
    *(((u32 *)ccci_h)+2) = ccif_read32((((u32 *)dl_pkg)+2),0);
    ccci_h->reserved= ccif_read32(&dl_pkg->reserved,0);
    if(atomic_cmpxchg(&md->wakeup_src, 1, 0) == 1)
        CCCI_INF_MSG(md->index, TAG, "CCIF_MD wakeup source:(SRX_IDX/%d)\n", *(((u32 *)ccci_h)+2));
    
RETRY:    
    ret = ccci_port_recv_request(md, new_req);
    CCCI_INF_MSG(md->index, TAG, "Rx msg %x %x %x %x ret=%d\n", ccci_h->data[0], ccci_h->data[1], *(((u32 *)ccci_h)+2), ccci_h->reserved,ret);
    if(ret>=0 || ret==-CCCI_ERR_DROP_PACKET) {
        CCCI_INF_MSG(md->index, TAG, "md_ccif_sram_rx_work:ccci_port_recv_request ret=%d\n", ret);
        // step forward
        req = list_entry(req->entry.next, struct ccci_request, entry);
    } else{
Пример #20
0
int md_port_cfg(struct ccci_modem *md)
{
    switch(md->index)
    {
#ifdef  MTK_ENABLE_MD1    
    case MD_SYS1:
        md->ports = md1_ccci_ports;
        md->port_number = ARRAY_SIZE(md1_ccci_ports);
        break;
#endif
#ifdef  MTK_ENABLE_MD2 
    case MD_SYS2:
        md->ports = md2_ccci_ports;
        md->port_number = ARRAY_SIZE(md2_ccci_ports);
        break;
#endif
    default:
        md->ports = NULL;
        md->port_number =0;
        CCCI_ERR_MSG(md->index, TAG, "md_port_cfg:no md enable\n");
        return -1;
    }
    return 0;
}
int md_ccif_get_modem_hw_info(struct platform_device *dev_ptr,
			      struct ccci_dev_cfg *dev_cfg,
			      struct md_hw_info *hw_info)
{
	struct device_node *node = NULL;

	memset(dev_cfg, 0, sizeof(struct ccci_dev_cfg));
	memset(hw_info, 0, sizeof(struct md_hw_info));

#ifdef CONFIG_OF
	if (dev_ptr->dev.of_node == NULL) {
		CCCI_ERR_MSG(dev_cfg->index, TAG, "modem OF node NULL\n");
		return -1;
	}

	of_property_read_u32(dev_ptr->dev.of_node, "cell-index",
			     &dev_cfg->index);
	CCCI_INF_MSG(dev_cfg->index, TAG, "modem hw info get idx:%d\n",
		     dev_cfg->index);
	if (!get_modem_is_enabled(dev_cfg->index)) {
		CCCI_ERR_MSG(dev_cfg->index, TAG, "modem %d not enable, exit\n",
			     dev_cfg->index + 1);
		return -1;
	}
#else
	struct ccci_dev_cfg *dev_cfg_ptr =
	    (struct ccci_dev_cfg *)dev->dev.platform_data;
	dev_cfg->index = dev_cfg_ptr->index;

	CCCI_INF_MSG(dev_cfg->index, TAG, "modem hw info get idx:%d\n",
		     dev_cfg->index);
	if (!get_modem_is_enabled(dev_cfg->index)) {
		CCCI_ERR_MSG(dev_cfg->index, TAG, "modem %d not enable, exit\n",
			     dev_cfg->index + 1);
		return -1;
	}
#endif

	switch (dev_cfg->index) {
	case 1:		/*MD_SYS2 */
#ifdef CONFIG_OF
		of_property_read_u32(dev_ptr->dev.of_node, "ccif,major",
				     &dev_cfg->major);
		of_property_read_u32(dev_ptr->dev.of_node, "ccif,minor_base",
				     &dev_cfg->minor_base);
		of_property_read_u32(dev_ptr->dev.of_node, "ccif,capability",
				     &dev_cfg->capability);

		hw_info->ap_ccif_base = of_iomap(dev_ptr->dev.of_node, 0);
		/*hw_info->md_ccif_base = hw_info->ap_ccif_base+0x1000; */
		node = of_find_compatible_node(NULL, NULL, "mediatek,MD_CCIF1");
		hw_info->md_ccif_base = of_iomap(node, 0);

		hw_info->ap_ccif_irq_id =
		    irq_of_parse_and_map(dev_ptr->dev.of_node, 0);
		hw_info->md_wdt_irq_id =
		    irq_of_parse_and_map(dev_ptr->dev.of_node, 1);

		/*Device tree using none flag to register irq, sensitivity has set at "irq_of_parse_and_map" */
		hw_info->ap_ccif_irq_flags = IRQF_TRIGGER_NONE;
		hw_info->md_wdt_irq_flags = IRQF_TRIGGER_NONE;
#endif

		hw_info->sram_size = CCIF_SRAM_SIZE;
		hw_info->md_rgu_base = MD2_RGU_BASE;
		hw_info->md_boot_slave_Vector = MD2_BOOT_VECTOR;
		hw_info->md_boot_slave_Key = MD2_BOOT_VECTOR_KEY;
		hw_info->md_boot_slave_En = MD2_BOOT_VECTOR_EN;

#if !defined(CONFIG_MTK_LEGACY)
		clk_scp_sys_md2_main =
		    devm_clk_get(&dev_ptr->dev, "scp-sys-md2-main");
		if (IS_ERR(clk_scp_sys_md2_main)) {
			CCCI_ERR_MSG(dev_cfg->index, TAG,
				     "modem %d get scp-sys-md2-main failed\n",
				     dev_cfg->index + 1);
			return -1;
		}
#endif
		break;
	case 2:		/*MD_SYS3 */
#ifdef CONFIG_OF
		of_property_read_u32(dev_ptr->dev.of_node, "ccif,major",
				     &dev_cfg->major);
		of_property_read_u32(dev_ptr->dev.of_node, "ccif,minor_base",
				     &dev_cfg->minor_base);
		of_property_read_u32(dev_ptr->dev.of_node, "ccif,capability",
				     &dev_cfg->capability);

		hw_info->ap_ccif_base = of_iomap(dev_ptr->dev.of_node, 0);
		/*hw_info->md_ccif_base = hw_info->ap_ccif_base+0x1000; */
		node = of_find_compatible_node(NULL, NULL, "mediatek,MD_CCIF1");
		hw_info->md_ccif_base = of_iomap(node, 0);

		hw_info->ap_ccif_irq_id =
		    irq_of_parse_and_map(dev_ptr->dev.of_node, 0);
		hw_info->md_wdt_irq_id =
		    irq_of_parse_and_map(dev_ptr->dev.of_node, 1);

		/*Device tree using none flag to register irq, sensitivity has set at "irq_of_parse_and_map" */
		hw_info->ap_ccif_irq_flags = IRQF_TRIGGER_NONE;
		hw_info->md_wdt_irq_flags = IRQF_TRIGGER_NONE;

		hw_info->md1_pccif_base =
		    (unsigned long)of_iomap(dev_ptr->dev.of_node, 1);
		hw_info->md3_pccif_base =
		    (unsigned long)of_iomap(dev_ptr->dev.of_node, 2);

		node =
		    of_find_compatible_node(NULL, NULL, "mediatek,INFRACFG_AO");
		hw_info->infra_ao_base = (unsigned long)of_iomap(node, 0);

		node = of_find_compatible_node(NULL, NULL, "mediatek,SLEEP");
		hw_info->sleep_base = (unsigned long)of_iomap(node, 0);

		node = of_find_compatible_node(NULL, NULL, "mediatek,TOPRGU");
		hw_info->toprgu_base = (unsigned long)of_iomap(node, 0);

		CCCI_INF_MSG(dev_cfg->index, TAG,
			     "infra_ao_base=0x%lx, sleep_base=0x%lx, toprgu_base=0x%lx\n",
			     hw_info->infra_ao_base, hw_info->sleep_base,
			     hw_info->toprgu_base);
#endif

		hw_info->sram_size = CCIF_SRAM_SIZE;
		hw_info->md_rgu_base = MD3_RGU_BASE;

#if !defined(CONFIG_MTK_LEGACY)
		clk_scp_sys_md3_main =
		    devm_clk_get(&dev_ptr->dev, "scp-sys-md2-main");
		if (IS_ERR(clk_scp_sys_md3_main)) {
			CCCI_ERR_MSG(dev_cfg->index, TAG,
				     "modem %d get scp-sys-md2-main failed\n",
				     dev_cfg->index + 1);
			return -1;
		}
#endif

		/*no boot slave for md3 */
		/*
		   hw_info->md_boot_slave_Vector = MD3_BOOT_VECTOR;
		   hw_info->md_boot_slave_Key = MD3_BOOT_VECTOR_KEY;
		   hw_info->md_boot_slave_En = MD3_BOOT_VECTOR_EN;
		 */
		break;
	default:
		return -1;
	}

	CCCI_INF_MSG(dev_cfg->index, TAG,
		     "modem ccif of node get dev_major:%d\n", dev_cfg->major);
	CCCI_INF_MSG(dev_cfg->index, TAG,
		     "modem ccif of node get minor_base:%d\n",
		     dev_cfg->minor_base);
	CCCI_INF_MSG(dev_cfg->index, TAG,
		     "modem ccif of node get capability:%d\n",
		     dev_cfg->capability);

	CCCI_INF_MSG(dev_cfg->index, TAG, "ap_ccif_base:0x%p\n",
		     (void *)hw_info->ap_ccif_base);
	CCCI_INF_MSG(dev_cfg->index, TAG, "ccif_irq_id:%d\n",
		     hw_info->ap_ccif_irq_id);
	CCCI_INF_MSG(dev_cfg->index, TAG, "md_wdt_irq_id:%d\n",
		     hw_info->md_wdt_irq_id);

	return 0;
}
Пример #22
0
static void napi_polling_timer_func(unsigned long data)
{
    struct ccci_port *port = (struct ccci_port *)data;
    CCCI_ERR_MSG(port->modem->index, NET, "lost NAPI polling on %s\n", port->name);
}
Пример #23
0
static int ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct ccci_port *port = *((struct ccci_port **)netdev_priv(dev));
	struct ccci_header *ccci_h;
	int ret;
	int skb_len = skb->len;
	static int tx_busy_retry_cnt;
	int tx_queue, tx_channel;
#ifdef PORT_NET_TRACE
	unsigned long long send_time = 0;
	unsigned long long total_time = 0;

	total_time = sched_clock();
#endif
#ifndef FEATURE_SEQ_CHECK_EN
	struct netdev_entity *nent = (struct netdev_entity *)port->private_data;

	CCCI_DBG_MSG(port->modem->index, NET, "write on %s, len=%d/%d, curr_seq=%d\n",
		     port->name, skb_headroom(skb), skb->len, nent->tx_seq_num);
#else
	CCCI_DBG_MSG(port->modem->index, NET, "write on %s, len=%d/%d\n", port->name, skb_headroom(skb), skb->len);
#endif

	if (unlikely(skb->len > CCCI_NET_MTU)) {
		CCCI_ERR_MSG(port->modem->index, NET, "exceeds MTU(%d) with %d/%d\n", CCCI_NET_MTU, dev->mtu, skb->len);
		dev_kfree_skb(skb);
		dev->stats.tx_dropped++;
		return NETDEV_TX_OK;
	}
	if (unlikely(skb_headroom(skb) < sizeof(struct ccci_header))) {
		CCCI_ERR_MSG(port->modem->index, NET, "not enough header room on %s, len=%d header=%d hard_header=%d\n",
			     port->name, skb->len, skb_headroom(skb), dev->hard_header_len);
		dev_kfree_skb(skb);
		dev->stats.tx_dropped++;
		return NETDEV_TX_OK;
	}
	if (unlikely(port->modem->md_state != READY)) {
		dev_kfree_skb(skb);
		dev->stats.tx_dropped++;
		return NETDEV_TX_OK;
	}
	if (likely((port->rx_ch == CCCI_CCMNI1_RX) || (port->rx_ch == CCCI_CCMNI2_RX))) {
		/* only use on ccmni0 && ccmni1 */
		if (unlikely(skb_is_ack(skb))) {
			tx_channel = port->tx_ch == CCCI_CCMNI1_TX ? CCCI_CCMNI1_DL_ACK : CCCI_CCMNI2_DL_ACK;
			tx_queue = NET_ACK_TXQ_INDEX(port);
		} else {
			tx_channel = port->tx_ch;
			tx_queue = NET_DAT_TXQ_INDEX(port);
		}
	} else {
		tx_channel = port->tx_ch;
		tx_queue = NET_DAT_TXQ_INDEX(port);
	}

	ccci_h = (struct ccci_header *)skb_push(skb, sizeof(struct ccci_header));
	ccci_h->channel = tx_channel;
	ccci_h->data[0] = 0;
	ccci_h->data[1] = skb->len;	/* as skb->len already included ccci_header after skb_push */
#ifndef FEATURE_SEQ_CHECK_EN
	ccci_h->reserved = nent->tx_seq_num++;
#else
	ccci_h->reserved = 0;
#endif
#ifdef PORT_NET_TRACE
	send_time = sched_clock();
#endif
	ret = port->modem->ops->send_request(port->modem, tx_queue, NULL, skb);
#ifdef PORT_NET_TRACE
	send_time = sched_clock() - send_time;
#endif
	if (ret) {
		skb_pull(skb, sizeof(struct ccci_header));
		/* undo header, in next retry, we'll reserve header again */
		goto tx_busy;
	}
	dev->stats.tx_packets++;
	dev->stats.tx_bytes += skb_len;

	tx_busy_retry_cnt = 0;
#ifdef PORT_NET_TRACE
	total_time = sched_clock() - total_time;
	trace_port_net_tx(port->modem->index, tx_queue, port->tx_ch, 0,
			  (unsigned int)send_time, (unsigned int)(total_time));
#endif
	return NETDEV_TX_OK;

 tx_busy:
	if (unlikely(!(port->modem->capability & MODEM_CAP_TXBUSY_STOP))) {
		if ((tx_busy_retry_cnt) % 20000 == 0)
			CCCI_INF_MSG(port->modem->index, NET, "%s TX busy: retry_times=%d\n", port->name,
				     tx_busy_retry_cnt);
		tx_busy_retry_cnt++;
	} else {
		port->tx_busy_count++;
	}
#ifdef PORT_NET_TRACE
	trace_port_net_error(port->modem->index, tx_queue, port->tx_ch, port->tx_busy_count, __LINE__);
#endif
	return NETDEV_TX_BUSY;
}
Пример #24
0
int ccmni_send_pkt(int md_id, int tx_ch, void *data)
{
	struct ccci_modem *md = ccci_get_modem_by_id(md_id);
	struct ccci_port *port = NULL;
	/* struct ccci_request *req = NULL; */
	struct ccci_header *ccci_h;
	struct sk_buff *skb = (struct sk_buff *)data;
	int tx_ch_to_port, tx_queue;
	int ret;
#ifdef PORT_NET_TRACE
	unsigned long long send_time = 0;
	unsigned long long get_port_time = 0;
	unsigned long long total_time = 0;

	total_time = sched_clock();
#endif
	if (!md)
		return CCMNI_ERR_TX_INVAL;
	if (unlikely(md->md_state != READY))
		return CCMNI_ERR_MD_NO_READY;

	if (tx_ch == CCCI_CCMNI1_DL_ACK)
		tx_ch_to_port = CCCI_CCMNI1_TX;
	else if (tx_ch == CCCI_CCMNI2_DL_ACK)
		tx_ch_to_port = CCCI_CCMNI2_TX;
	else if (tx_ch == CCCI_CCMNI3_DL_ACK)
		tx_ch_to_port = CCCI_CCMNI3_TX;
	else
		tx_ch_to_port = tx_ch;
#ifdef PORT_NET_TRACE
	get_port_time = sched_clock();
#endif
	port = md->ops->get_port_by_channel(md, tx_ch_to_port);
#ifdef PORT_NET_TRACE
	get_port_time = sched_clock() - get_port_time;
#endif
	if (!port) {
		CCCI_ERR_MSG(0, NET, "port==NULL\n");
		return CCMNI_ERR_TX_INVAL;
	}
	/* req_alloc_time=sched_clock(); */
	/* req = ccci_alloc_req(OUT, -1, 1, 0); */
	/* req_alloc_time=sched_clock()-req_alloc_time; */
	/* if(!req) { */
	/* return CCMNI_ERR_TX_BUSY; */
	/* } */
	if (tx_ch == CCCI_CCMNI1_DL_ACK || tx_ch == CCCI_CCMNI2_DL_ACK || tx_ch == CCCI_CCMNI3_DL_ACK)
		tx_queue = NET_ACK_TXQ_INDEX(port);
	else
		tx_queue = NET_DAT_TXQ_INDEX(port);

	/* req->skb = skb; */
	/* req->policy = FREE; */
	ccci_h = (struct ccci_header *)skb_push(skb, sizeof(struct ccci_header));
	ccci_h = (struct ccci_header *)skb->data;
	ccci_h->channel = tx_ch;
	ccci_h->data[0] = 0;
	ccci_h->data[1] = skb->len;	/* as skb->len already included ccci_header after skb_push */
/* #ifndef FEATURE_SEQ_CHECK_EN */
/* ccci_h->reserved = nent->tx_seq_num++; */
/* #else */
	ccci_h->reserved = 0;
/* #endif */
	CCCI_DBG_MSG(md_id, NET, "port %s send txq=%d: %08X, %08X, %08X, %08X\n", port->name, tx_queue,
		     ccci_h->data[0], ccci_h->data[1], ccci_h->channel, ccci_h->reserved);
#ifdef PORT_NET_TRACE
	send_time = sched_clock();
#endif
	ret = port->modem->ops->send_request(port->modem, tx_queue, NULL, skb);
#ifdef PORT_NET_TRACE
	send_time = sched_clock() - send_time;
#endif
	if (ret) {
		skb_pull(skb, sizeof(struct ccci_header));
			/* undo header, in next retry, we'll reserve header again */
		ret = CCMNI_ERR_TX_BUSY;
	} else {
		ret = CCMNI_ERR_TX_OK;
	}
#ifdef PORT_NET_TRACE
	if (ret == CCMNI_ERR_TX_OK) {
		total_time = sched_clock() - total_time;
		trace_port_net_tx(md_id, tx_queue, tx_ch, (unsigned int)get_port_time, (unsigned int)send_time,
				  (unsigned int)(total_time));
	} else {
		trace_port_net_error(port->modem->index, tx_queue, port->tx_ch, port->tx_busy_count, __LINE__);
	}
#endif
	return ret;
}
Пример #25
0
int ccci_get_ccmni_channel(int md_id, int ccmni_idx, struct ccmni_ch *channel)
{
	int ret = 0;

	switch (ccmni_idx) {
	case 0:
		channel->rx = CCCI_CCMNI1_RX;
		channel->rx_ack = 0xFF;
		channel->tx = CCCI_CCMNI1_TX;
		channel->tx_ack = 0xFF;
		break;
	case 1:
		channel->rx = CCCI_CCMNI2_RX;
		channel->rx_ack = 0xFF;
		channel->tx = CCCI_CCMNI2_TX;
		channel->tx_ack = 0xFF;
		break;
	case 2:
		channel->rx = CCCI_CCMNI3_RX;
		channel->rx_ack = 0xFF;
		channel->tx = CCCI_CCMNI3_TX;
		channel->tx_ack = 0xFF;
		break;
	case 3:
		channel->rx = CCCI_CCMNI4_RX;
		channel->rx_ack = 0xFF;
		channel->tx = CCCI_CCMNI4_TX;
		channel->tx_ack = 0xFF;
		break;
	case 4:
		channel->rx = CCCI_CCMNI5_RX;
		channel->rx_ack = 0xFF;
		channel->tx = CCCI_CCMNI5_TX;
		channel->tx_ack = 0xFF;
		break;
	case 5:
		channel->rx = CCCI_CCMNI6_RX;
		channel->rx_ack = 0xFF;
		channel->tx = CCCI_CCMNI6_TX;
		channel->tx_ack = 0xFF;
		break;
	case 6:
		channel->rx = CCCI_CCMNI7_RX;
		channel->rx_ack = 0xFF;
		channel->tx = CCCI_CCMNI7_TX;
		channel->tx_ack = 0xFF;
		break;
	case 7:
		channel->rx = CCCI_CCMNI8_RX;
		channel->rx_ack = 0xFF;
		channel->tx = CCCI_CCMNI8_TX;
		channel->tx_ack = 0xFF;
		break;
	default:
		CCCI_ERR_MSG(md_id, NET, "invalid ccmni index=%d\n", ccmni_idx);
		ret = -1;
		break;
	}

	return ret;
}
Пример #26
0
int md_ccif_get_modem_hw_info(struct platform_device *dev_ptr, struct ccci_dev_cfg *dev_cfg, struct md_hw_info *hw_info)
{
    struct device_node *node=NULL;
    memset(dev_cfg, 0, sizeof(struct ccci_dev_cfg));
    memset(hw_info, 0, sizeof(struct md_hw_info));

    #ifdef CONFIG_OF
    if(dev_ptr->dev.of_node == NULL) {
        CCCI_ERR_MSG(dev_cfg->index, TAG, "modem OF node NULL\n");
        return -1;
    }

    of_property_read_u32(dev_ptr->dev.of_node, "cell-index", &dev_cfg->index);
    CCCI_INF_MSG(dev_cfg->index, TAG, "modem hw info get idx:%d\n", dev_cfg->index);
    if(!get_modem_is_enabled(dev_cfg->index)) {
        CCCI_ERR_MSG(dev_cfg->index, TAG, "modem %d not enable, exit\n", dev_cfg->index + 1);
        return -1;
    }
    #else
    struct ccci_dev_cfg* dev_cfg_ptr = (struct ccci_dev_cfg*)dev->dev.platform_data;
    dev_cfg->index = dev_cfg_ptr->index;

    CCCI_INF_MSG(dev_cfg->index, TAG, "modem hw info get idx:%d\n", dev_cfg->index);
    if(!get_modem_is_enabled(dev_cfg->index)) {
        CCCI_ERR_MSG(dev_cfg->index, TAG, "modem %d not enable, exit\n", dev_cfg->index + 1);
        return -1;
    }
    #endif

    switch(dev_cfg->index) {
    case 1: //MD_SYS2
        #ifdef CONFIG_OF
        of_property_read_u32(dev_ptr->dev.of_node, "ccif,major", &dev_cfg->major);
        of_property_read_u32(dev_ptr->dev.of_node, "ccif,minor_base", &dev_cfg->minor_base);
        of_property_read_u32(dev_ptr->dev.of_node, "ccif,capability", &dev_cfg->capability);

        hw_info->ap_ccif_base = of_iomap(dev_ptr->dev.of_node, 0);
        //hw_info->md_ccif_base = hw_info->ap_ccif_base+0x1000;
        node = of_find_compatible_node(NULL, NULL, "mediatek,MD_CCIF1");
        hw_info->md_ccif_base = of_iomap(node, 0);

        hw_info->ap_ccif_irq_id = irq_of_parse_and_map(dev_ptr->dev.of_node, 0);
        hw_info->md_wdt_irq_id = irq_of_parse_and_map(dev_ptr->dev.of_node, 1);

        // Device tree using none flag to register irq, sensitivity has set at "irq_of_parse_and_map"
        hw_info->ap_ccif_irq_flags = IRQF_TRIGGER_NONE;
        hw_info->md_wdt_irq_flags = IRQF_TRIGGER_NONE;
        #endif

        hw_info->sram_size = CCIF_SRAM_SIZE;
        hw_info->md_rgu_base = MD2_RGU_BASE;
        hw_info->md_boot_slave_Vector = MD2_BOOT_VECTOR;
        hw_info->md_boot_slave_Key = MD2_BOOT_VECTOR_KEY;
        hw_info->md_boot_slave_En = MD2_BOOT_VECTOR_EN;
        
        break;
    default:
        return -1;
    }

    CCCI_INF_MSG(dev_cfg->index, TAG, "modem ccif of node get dev_major:%d\n", dev_cfg->major);
    CCCI_INF_MSG(dev_cfg->index, TAG, "modem ccif of node get minor_base:%d\n", dev_cfg->minor_base);
    CCCI_INF_MSG(dev_cfg->index, TAG, "modem ccif of node get capability:%d\n", dev_cfg->capability);

    CCCI_INF_MSG(dev_cfg->index, TAG, "ap_ccif_base:0x%p\n",(void*) hw_info->ap_ccif_base);
    CCCI_INF_MSG(dev_cfg->index, TAG, "ccif_irq_id:%d\n", hw_info->ap_ccif_irq_id);
    CCCI_INF_MSG(dev_cfg->index, TAG, "md_wdt_irq_id:%d\n", hw_info->md_wdt_irq_id);

    return 0;
}
Пример #27
0
static int ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
    struct ccci_port *port = *((struct ccci_port **)netdev_priv(dev));
    struct ccci_request *req = NULL;
    struct ccci_header *ccci_h;
    int ret;
    int skb_len = skb->len;
    static int tx_busy_retry_cnt = 0;
    int tx_queue, tx_channel;

#ifndef FEATURE_SEQ_CHECK_EN
    struct netdev_entity *nent = (struct netdev_entity *)port->private_data;
    CCCI_DBG_MSG(port->modem->index, NET, "write on %s, len=%d/%d, curr_seq=%d\n",
                 port->name, skb_headroom(skb), skb->len, nent->tx_seq_num);
#else
    CCCI_DBG_MSG(port->modem->index, NET, "write on %s, len=%d/%d\n", port->name, skb_headroom(skb), skb->len);
#endif

    if(unlikely(skb->len > CCMNI_MTU)) {
        CCCI_ERR_MSG(port->modem->index, NET, "exceeds MTU(%d) with %d/%d\n", CCMNI_MTU, dev->mtu, skb->len);
        dev_kfree_skb(skb);
        dev->stats.tx_dropped++;
        return NETDEV_TX_OK;
    }
    if(unlikely(skb_headroom(skb) < sizeof(struct ccci_header))) {
        CCCI_ERR_MSG(port->modem->index, NET, "not enough header room on %s, len=%d header=%d hard_header=%d\n",
                     port->name, skb->len, skb_headroom(skb), dev->hard_header_len);
        dev_kfree_skb(skb);
        dev->stats.tx_dropped++;
        return NETDEV_TX_OK;
    }
    if(unlikely(port->modem->md_state != READY)) {
        dev_kfree_skb(skb);
        dev->stats.tx_dropped++;
        return NETDEV_TX_OK;
    }

    req = ccci_alloc_req(OUT, -1, 1, 0);
    if(req) {
        if(likely(port->rx_ch != CCCI_CCMNI3_RX)) {
            if(unlikely(skb_is_ack(skb))) {
                tx_channel = port->tx_ch==CCCI_CCMNI1_TX?CCCI_CCMNI1_DL_ACK:CCCI_CCMNI2_DL_ACK;
                tx_queue = NET_ACK_TXQ_INDEX(port);
            } else {
                tx_channel = port->tx_ch;
                tx_queue = NET_DAT_TXQ_INDEX(port);
            }
        } else {
            tx_channel = port->tx_ch;
            tx_queue = NET_DAT_TXQ_INDEX(port);
        }

        req->skb = skb;
        req->policy = FREE;
        ccci_h = (struct ccci_header*)skb_push(skb, sizeof(struct ccci_header));
        ccci_h->channel = tx_channel;
        ccci_h->data[0] = 0;
        ccci_h->data[1] = skb->len; // as skb->len already included ccci_header after skb_push
#ifndef FEATURE_SEQ_CHECK_EN
        ccci_h->reserved = nent->tx_seq_num++;
#else
        ccci_h->reserved = 0;
#endif
        ret = port->modem->ops->send_request(port->modem, tx_queue, req);
        if(ret) {
            skb_pull(skb, sizeof(struct ccci_header)); // undo header, in next retry, we'll reserve header again
            req->policy = NOOP; // if you return busy, do NOT free skb as network may still use it
            ccci_free_req(req);
            goto tx_busy;
        }
        dev->stats.tx_packets++;
        dev->stats.tx_bytes += skb_len;
        tx_busy_retry_cnt = 0;
    } else {
        CCCI_ERR_MSG(port->modem->index, NET, "fail to alloc request\n");
        goto tx_busy;
    }
    return NETDEV_TX_OK;

tx_busy:
    if(unlikely(!(port->modem->capability & MODEM_CAP_TXBUSY_STOP))) {
        if((++tx_busy_retry_cnt)%20000 == 0)
            CCCI_INF_MSG(port->modem->index, NET, "%s TX busy: retry_times=%d\n", port->name, tx_busy_retry_cnt);
    } else {
        port->tx_busy_count++;
    }
    return NETDEV_TX_BUSY;
}
/*
 * a write operation may block at 3 stages:
 * 1. ccci_alloc_req
 * 2. wait until the queue has available slot (threshold check)
 * 3. wait until the SDIO transfer is complete --> abandoned
 * the 1st one is decided by @blk1. and the 2nd and 3rd are decided by @blk2, wating on @wq.
 *
 * we removed the wait_queue_head_t in ccci_request, so user can NOT wait for certain request to
 * be completed. this is because request will be recycled and its state will be reset, so if a request
 * is completed and then used again, the poor guy who is waiting for it may never see the state 
 * transition (FLYING->IDLE/COMPLETE->FLYING) and wait forever.
 */
struct ccci_request *ccci_alloc_req(DIRECTION dir, int size, char blk1, char blk2)
{
	int i;
	struct ccci_request *req = NULL;
	struct sk_buff *skb = NULL;
	unsigned long flags;

#ifdef CCCI_STATISTIC
	core_statistic_data.alloc_count++;
#endif

retry:
	spin_lock_irqsave(&req_pool_lock, flags);
	for(i=0; i<BM_POOL_SIZE; i++) {
		if(req_pool[i].state == IDLE) {
			// important checking when reqeust is passed cross-layer, make sure this request is no longer in any list
			if(req_pool[i].entry.next == LIST_POISON1 && req_pool[i].entry.prev == LIST_POISON2) {
				req = &req_pool[i];
				CCCI_DBG_MSG(-1, BM, "%ps alloc req=%p, i=%d size=%d\n", __builtin_return_address(0), req, i, size);
				req->state = FLYING;
				break;
			} else {
				// should not happen
				CCCI_ERR_MSG(-1, BM, "idle but in list i=%d\n", i);
			}
		}
	}
	if(req) {
		req->dir = dir;
#ifdef CCCI_STATISTIC
		req->time_step = 0;
		req->time_stamp = ktime_get_real();
		memset(req->time_trace, 0, sizeof(req->time_trace));
#endif
		req_pool_cnt--;
		CCCI_DBG_MSG(-1, BM, "pool count-=%d\n", req_pool_cnt);
	}
	spin_unlock_irqrestore(&req_pool_lock, flags);
	if(req) {
		if(size>0) {
			skb = ccci_alloc_skb(size);
			req->skb = skb;
			if(!skb) // should not happen
				CCCI_ERR_MSG(-1, BM, "NULL skb for req %p size %d\n", req, size);
			CCCI_DBG_MSG(-1, BM, "req=%p skb=%p, len=%d\n", req, req->skb, skb_size(req->skb));
		} else {
			req->skb = NULL;
		}
		req->blocking = blk2;
	} else {
#ifdef CCCI_STATISTIC
		core_statistic_data.alloc_empty_count++;
#endif
		if(blk1) {
			wait_event_interruptible(req_pool_wq, (req_pool_cnt>0));
			goto retry;
		}
		CCCI_INF_MSG(-1, BM, "%ps alloc req fail, no retry\n", __builtin_return_address(0));
	}
	
	return req;
}
int md_cd_get_modem_hw_info(struct platform_device *dev_ptr, struct ccci_dev_cfg *dev_cfg, struct md_hw_info *hw_info)
{
    struct device_node *node=NULL;
    memset(dev_cfg, 0, sizeof(struct ccci_dev_cfg));
    memset(hw_info, 0, sizeof(struct md_hw_info));

    if(dev_ptr->dev.of_node == NULL) {
        CCCI_ERR_MSG(dev_cfg->index, TAG, "modem OF node NULL\n");
        return -1;
    }

    of_property_read_u32(dev_ptr->dev.of_node, "cell-index", &dev_cfg->index);
    CCCI_INF_MSG(dev_cfg->index, TAG, "modem hw info get idx:%d\n", dev_cfg->index);
    if(!get_modem_is_enabled(dev_cfg->index)) {
        CCCI_ERR_MSG(dev_cfg->index, TAG, "modem %d not enable, exit\n", dev_cfg->index + 1);
        return -1;
    }

    switch(dev_cfg->index) {
    case 0: //MD_SYS1
        of_property_read_u32(dev_ptr->dev.of_node, "cldma,major", &dev_cfg->major);
        of_property_read_u32(dev_ptr->dev.of_node, "cldma,minor_base", &dev_cfg->minor_base);
        of_property_read_u32(dev_ptr->dev.of_node, "cldma,capability", &dev_cfg->capability);

        hw_info->cldma_ap_ao_base = of_iomap(dev_ptr->dev.of_node, 0);
        hw_info->cldma_md_ao_base = of_iomap(dev_ptr->dev.of_node, 1);
        hw_info->cldma_ap_pdn_base = of_iomap(dev_ptr->dev.of_node, 2);
        hw_info->cldma_md_pdn_base = of_iomap(dev_ptr->dev.of_node, 3);
        hw_info->ap_ccif_base = of_iomap(dev_ptr->dev.of_node, 4);        
        hw_info->md_ccif_base = of_iomap(dev_ptr->dev.of_node, 5);
        hw_info->cldma_irq_id = irq_of_parse_and_map(dev_ptr->dev.of_node, 0);
        hw_info->ap_ccif_irq_id = irq_of_parse_and_map(dev_ptr->dev.of_node, 1);
        hw_info->md_wdt_irq_id = irq_of_parse_and_map(dev_ptr->dev.of_node, 2);

        // Device tree using none flag to register irq, sensitivity has set at "irq_of_parse_and_map"
        hw_info->cldma_irq_flags = IRQF_TRIGGER_NONE;
        hw_info->ap_ccif_irq_flags = IRQF_TRIGGER_NONE;
        hw_info->md_wdt_irq_flags = IRQF_TRIGGER_NONE;
        hw_info->ap2md_bus_timeout_irq_flags = IRQF_TRIGGER_NONE;

        hw_info->sram_size = CCIF_SRAM_SIZE;
        hw_info->md_rgu_base = MD_RGU_BASE;
        hw_info->md_boot_slave_Vector = MD_BOOT_VECTOR;
        hw_info->md_boot_slave_Key = MD_BOOT_VECTOR_KEY;
        hw_info->md_boot_slave_En = MD_BOOT_VECTOR_EN;
#if !defined(CONFIG_MTK_LEGACY)
        clk_scp_sys_md1_main = devm_clk_get(&dev_ptr->dev,"scp-sys-md1-main");
        if(IS_ERR(clk_scp_sys_md1_main)){
            CCCI_ERR_MSG(dev_cfg->index, TAG, "modem %d get scp-sys-md1-main failed\n", dev_cfg->index + 1);
            return -1;
        }
#endif
        break;
    default:
        return -1;
    }

    CCCI_INF_MSG(dev_cfg->index, TAG, "modem cldma of node get dev_major:%d\n", dev_cfg->major);
    CCCI_INF_MSG(dev_cfg->index, TAG, "modem cldma of node get minor_base:%d\n", dev_cfg->minor_base);
    CCCI_INF_MSG(dev_cfg->index, TAG, "modem cldma of node get capability:%d\n", dev_cfg->capability);
    CCCI_INF_MSG(dev_cfg->index, TAG, "ap_cldma: ao_base=0x%p, pdn_base=0x%p\n", (void*)hw_info->cldma_ap_ao_base,(void*)hw_info->cldma_ap_pdn_base);
    CCCI_INF_MSG(dev_cfg->index, TAG, "md_cldma: ao_base=0x%p, pdn_base=0x%p\n",(void*) hw_info->cldma_md_ao_base,(void*) hw_info->cldma_md_pdn_base);

    CCCI_INF_MSG(dev_cfg->index, TAG, "ap_ccif_base:0x%p, md_ccif_base:0x%p\n",(void*) hw_info->ap_ccif_base,(void*) hw_info->md_ccif_base);
    CCCI_INF_MSG(dev_cfg->index, TAG, "cldma_irq_id:%d\n", hw_info->cldma_irq_id);
    CCCI_INF_MSG(dev_cfg->index, TAG, "ccif_irq_id:%d\n", hw_info->ap_ccif_irq_id);
    CCCI_INF_MSG(dev_cfg->index, TAG, "md_wdt_irq_id:%d\n", hw_info->md_wdt_irq_id);

    return 0;
}