Пример #1
0
void ccci_skb_enqueue(struct ccci_skb_queue *queue, struct sk_buff *newsk)
{
	unsigned long flags;

	spin_lock_irqsave(&queue->skb_list.lock, flags);
	if (queue->skb_list.qlen < queue->max_len) {
		__skb_queue_tail(&queue->skb_list, newsk);
		if (queue->skb_list.qlen > queue->max_history)
			queue->max_history = queue->skb_list.qlen;
	} else {
#if 0
		if (queue->pre_filled) {
			CCCI_ERR_MSG(0, BM, "skb queue too long, max=%d\n", queue->max_len);
#else
		if (1) {
#endif

#ifdef CCCI_MEM_BM_DEBUG
			if (ccci_skb_addr_checker(newsk)) {
				CCCI_INF_MSG(-1, BM, "ccci_skb_enqueue:ccci_skb_addr_checker failed!\n");
				ccci_mem_dump(-1, queue, sizeof(struct ccci_skb_queue));
				dump_stack();
			}
#endif
			dev_kfree_skb_any(newsk);
		} else {
			__skb_queue_tail(&queue->skb_list, newsk);
		}
	}
	spin_unlock_irqrestore(&queue->skb_list.lock, flags);
}

void ccci_skb_queue_init(struct ccci_skb_queue *queue, unsigned int skb_size, unsigned int max_len,
	char fill_now)
{
	int i;

	queue->magic_header = SKB_MAGIC_HEADER;
	queue->magic_footer = SKB_MAGIC_FOOTER;
#ifdef CCCI_WP_DEBUG
	if (((unsigned long)queue) == ((unsigned long)(&skb_pool_16))) {
		CCCI_INF_MSG(-1, BM, "ccci_skb_queue_init: add hwp skb_pool_16.magic_footer=%p!\n",
			&queue->magic_footer);
		enable_watchpoint(&queue->magic_footer);
	}
#endif
	skb_queue_head_init(&queue->skb_list);
	queue->max_len = max_len;
	if (fill_now) {
		for (i = 0; i < queue->max_len; i++) {
			struct sk_buff *skb = __alloc_skb_from_kernel(skb_size, GFP_KERNEL);
			if (skb != NULL)
				skb_queue_tail(&queue->skb_list, skb);
		}
		queue->pre_filled = 1;
	} else {
		queue->pre_filled = 0;
	}
	queue->max_history = 0;
}
Пример #2
0
static int ccmni_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
    struct ccci_port *port = *((struct ccci_port **)netdev_priv(dev));

    switch(cmd) {
    case SIOCSTXQSTATE:
        if(!ifr->ifr_ifru.ifru_ivalue) {
            if(atomic_read(&port->usage_cnt) > 0) {
                atomic_dec(&port->usage_cnt);
                netif_stop_queue(dev);
                dev->watchdog_timeo = 60*HZ; // stop queue won't stop Tx watchdog (ndo_tx_timeout)
            }
        } else {
            if(atomic_read(&port->usage_cnt) <=0 ) {
                if(netif_running(dev) && netif_queue_stopped(dev))
                    netif_wake_queue(dev);
                dev->watchdog_timeo = 1*HZ;
                atomic_inc(&port->usage_cnt);
            }
        }
        CCCI_INF_MSG(port->modem->index, NET, "SIOCSTXQSTATE request=%d on %s %d\n", ifr->ifr_ifru.ifru_ivalue, port->name, atomic_read(&port->usage_cnt));
        break;
    default:
        CCCI_INF_MSG(port->modem->index, NET, "unknown ioctl cmd=%d on %s\n", cmd, port->name);
        break;
    }
}
Пример #3
0
struct ccci_ringbuf * ccci_create_ringbuf(int md_id,unsigned char* buf, int buf_size, int rx_size, int tx_size)
{
    int buflen;
    struct ccci_ringbuf * ringbuf=NULL;
    buflen = CCCI_RINGBUF_CTL_LEN+rx_size+tx_size;
    CCCI_INF_MSG(md_id, TAG, "crb:buf=0x%p, buf_size=%d,buflen=%d,rx_size=%d,tx_size=%d,ctr_len=%zu\n",buf,buf_size,buflen,rx_size,tx_size,CCCI_RINGBUF_CTL_LEN);
    if(buf_size < buflen)
        return NULL;
    memset(buf,0x0,buflen);
    //set ccif header
    *((unsigned int*)buf)= CCCI_RBF_HEADER;
    CCCI_DBG_MSG(md_id, TAG, "crb:Header(0x%p)=0x%x\n",buf,*((unsigned int*)buf));
    //set ccif footer
    *((unsigned int*)(buf + buflen-sizeof(int)))= CCCI_RBF_FOOTER;
    CCCI_DBG_MSG(md_id, TAG, "crb:Footer(0x%p)=0x%x\n",buf + buflen-sizeof(int),*((unsigned int*)(buf + buflen-sizeof(int))));
    buf+=sizeof(int);
    ringbuf=(struct ccci_ringbuf *)buf;
    ringbuf->rx_control.length = rx_size;
    ringbuf->rx_control.read   = 0;
    ringbuf->rx_control.write  = 0;

    ringbuf->tx_control.length = tx_size;
    ringbuf->tx_control.read   = 0;
    ringbuf->tx_control.write  = 0;
    CCCI_INF_MSG(md_id, TAG, "crb:rbf=0x%p\n",ringbuf);
    return ringbuf;
}
int md_cd_power_on(struct ccci_modem *md)
{
    int ret = 0;
    struct md_cd_ctrl *md_ctrl = (struct md_cd_ctrl *)md->private_data;
#ifdef FEATURE_RF_CLK_BUF
    //config RFICx as BSI
    mutex_lock(&clk_buf_ctrl_lock); // fixme,clkbuf, ->down(&clk_buf_ctrl_lock_2);
    CCCI_INF_MSG(md->index, TAG, "clock buffer, BSI mode\n"); 
    mt_set_gpio_mode(GPIO_RFIC0_BSI_CK,  GPIO_MODE_01); 
    mt_set_gpio_mode(GPIO_RFIC0_BSI_D0,  GPIO_MODE_01);
    mt_set_gpio_mode(GPIO_RFIC0_BSI_D1,  GPIO_MODE_01);
    mt_set_gpio_mode(GPIO_RFIC0_BSI_D2,  GPIO_MODE_01);
    mt_set_gpio_mode(GPIO_RFIC0_BSI_CS,  GPIO_MODE_01);
#endif
	// power on MD_INFRA and MODEM_TOP
    switch(md->index)
    {
        case MD_SYS1:
       	CCCI_INF_MSG(md->index, TAG, "Call start md_power_on()\n"); 
        ret = md_power_on(SYS_MD1);
        CCCI_INF_MSG(md->index, TAG, "Call end md_power_on() ret=%d\n",ret); 
        break;
    }
#ifdef FEATURE_RF_CLK_BUF 
	mutex_unlock(&clk_buf_ctrl_lock); // fixme,clkbuf, ->delete
#endif
	if(ret)
		return ret;
	// disable MD WDT
	cldma_write32(md_ctrl->md_rgu_base, WDT_MD_MODE, WDT_MD_MODE_KEY);
	return 0;
}
Пример #5
0
void ccci_magic_checker(void)
{
	if (req_pool.magic_header != REQ_MAGIC_HEADER || req_pool.magic_footer != REQ_MAGIC_FOOTER) {
		CCCI_INF_MSG(-1, BM, "req_pool magic error!\n");
		ccci_mem_dump(-1, &req_pool, sizeof(struct ccci_req_queue));
		dump_stack();
	}

	if (skb_pool_16.magic_header != SKB_MAGIC_HEADER || skb_pool_16.magic_footer != SKB_MAGIC_FOOTER) {
		CCCI_INF_MSG(-1, BM, "skb_pool_16 magic error!\n");
		ccci_mem_dump(-1, &skb_pool_16, sizeof(struct ccci_skb_queue));
		dump_stack();
	}

	if (skb_pool_1_5K.magic_header != SKB_MAGIC_HEADER || skb_pool_1_5K.magic_footer != SKB_MAGIC_FOOTER) {
		CCCI_INF_MSG(-1, BM, "skb_pool_1_5K magic error!\n");
		ccci_mem_dump(-1, &skb_pool_1_5K, sizeof(struct ccci_skb_queue));
		dump_stack();
	}

	if (skb_pool_4K.magic_header != SKB_MAGIC_HEADER || skb_pool_4K.magic_footer != SKB_MAGIC_FOOTER) {
		CCCI_INF_MSG(-1, BM, "skb_pool_4K magic error!\n");
		ccci_mem_dump(-1, &skb_pool_4K, sizeof(struct ccci_skb_queue));
		dump_stack();
	}
}
int ccci_modem_suspend(struct platform_device *dev, pm_message_t state)
{
	struct ccci_modem *md = (struct ccci_modem *)dev->dev.platform_data;
	struct md_cd_ctrl *md_ctrl = (struct md_cd_ctrl *)md->private_data;

	CCCI_INF_MSG(md->index, TAG, "AP_BUSY(%p)=%x\n", md_ctrl->ap_ccif_base+APCCIF_BUSY, cldma_read32(md_ctrl->ap_ccif_base, APCCIF_BUSY));
	CCCI_INF_MSG(md->index, TAG, "MD_BUSY(%p)=%x\n", md_ctrl->md_ccif_base+APCCIF_BUSY, cldma_read32(md_ctrl->md_ccif_base, APCCIF_BUSY));

	return 0;
}
int ccci_modem_resume(struct platform_device *dev)
{
    struct ccci_modem *md = (struct ccci_modem *)dev->dev.platform_data;
    struct md_cd_ctrl *md_ctrl = (struct md_cd_ctrl *)md->private_data;
    CCCI_INF_MSG(md->index, TAG, "ccci_modem_resume\n");
	  return 0;
}
int ccci_modem_suspend(struct platform_device *dev, pm_message_t state)
{
    struct ccci_modem *md = (struct ccci_modem *)dev->dev.platform_data;
    struct md_cd_ctrl *md_ctrl = (struct md_cd_ctrl *)md->private_data;
    CCCI_INF_MSG(md->index, TAG, "ccci_modem_suspend\n");
    return 0;
}
Пример #9
0
void ccci_free_skb(struct sk_buff *skb, DATA_POLICY policy)
{
	CCCI_DBG_MSG(-1, BM, "%ps free skb %p, policy=%d, len=%d\n", __builtin_return_address(0),
		     skb, policy, skb_size(skb));
	switch (policy) {
	case RECYCLE:
		/* 1. reset sk_buff (take __alloc_skb as ref.) */
		skb->data = skb->head;
		skb->len = 0;
		skb_reset_tail_pointer(skb);
		/* 2. enqueue */
		if (skb_size(skb) < SKB_1_5K)
			ccci_skb_enqueue(&skb_pool_16, skb);
		else if (skb_size(skb) < SKB_4K)
			ccci_skb_enqueue(&skb_pool_1_5K, skb);
		else
			ccci_skb_enqueue(&skb_pool_4K, skb);
		break;
	case FREE:
#ifdef CCCI_MEM_BM_DEBUG
		if (ccci_skb_addr_checker(skb)) {
			CCCI_INF_MSG(-1, BM, "ccci_skb_addr_checker failed\n");
			dump_stack();
		}
#endif
		dev_kfree_skb_any(skb);
		break;
	case NOOP:
	default:
		break;
	};
}
int md_cd_power_off(struct ccci_modem *md, unsigned int timeout)
{
    int ret = 0;
#ifdef FEATURE_RF_CLK_BUF  
    mutex_lock(&clk_buf_ctrl_lock); 
#endif
    // power off MD_INFRA and MODEM_TOP
    switch(md->index)
    {
        case MD_SYS1:
        ret = md_power_off(SYS_MD1, timeout);
        break;
    }
#ifdef FEATURE_RF_CLK_BUF
    // config RFICx as GPIO
    CCCI_INF_MSG(md->index, TAG, "clock buffer, GPIO mode\n"); 
    mt_set_gpio_mode(GPIO_RFIC0_BSI_CK,  GPIO_MODE_GPIO); 
    mt_set_gpio_mode(GPIO_RFIC0_BSI_D0,  GPIO_MODE_GPIO);
    mt_set_gpio_mode(GPIO_RFIC0_BSI_D1,  GPIO_MODE_GPIO);
    mt_set_gpio_mode(GPIO_RFIC0_BSI_D2,  GPIO_MODE_GPIO);
    mt_set_gpio_mode(GPIO_RFIC0_BSI_CS,  GPIO_MODE_GPIO);
	mutex_unlock(&clk_buf_ctrl_lock);
#endif

    return ret;
}
/*
 * a write operation may block at 3 stages:
 * 1. ccci_alloc_req
 * 2. wait until the queue has available slot (threshold check)
 * 3. wait until the SDIO transfer is complete --> abandoned, see the reason below.
 * the 1st one is decided by @blk1. and the 2nd and 3rd are decided by @blk2, wating on @wq.
 * NULL is returned if no available skb, even when you set blk1=1.
 *
 * we removed the wait_queue_head_t in ccci_request, so user can NOT wait for certain request to
 * be completed. this is because request will be recycled and its state will be reset, so if a request
 * is completed and then used again, the poor guy who is waiting for it may never see the state
 * transition (FLYING->IDLE/COMPLETE->FLYING) and wait forever.
 */
struct ccci_request *ccci_alloc_req(DIRECTION dir, int size, char blk1, char blk2)
{
    struct ccci_request *req = NULL;

retry:
    req = ccci_req_dequeue(&req_pool);
    if(req) {
        if(size>0) {
            req->skb = ccci_alloc_skb(size, blk1);
            req->policy = RECYCLE;
            if(req->skb)
                CCCI_DBG_MSG(-1, BM, "alloc ok, req=%p skb=%p, len=%d\n", req, req->skb, skb_size(req->skb));
        } else {
            req->skb = NULL;
            req->policy = NOOP;
        }
        req->blocking = blk2;
    } else {
        if(blk1) {
            wait_event_interruptible(req_pool.req_wq, (req_pool.count>0));
            goto retry;
        }
        CCCI_INF_MSG(-1, BM, "fail to alloc req for %ps, no retry\n", __builtin_return_address(0));
    }
    if(unlikely(size>0 && !req->skb)) {
        CCCI_ERR_MSG(-1, BM, "fail to alloc skb for %ps, size=%d\n", __builtin_return_address(0), size);
        req->policy = NOOP;
        ccci_free_req(req);
        req = NULL;
    }
    return req;
}
Пример #12
0
int md_ccif_let_md_go(struct ccci_modem *md)
{
    struct md_ccif_ctrl *md_ctrl = (struct md_ccif_ctrl *)md->private_data;

	if(MD_IN_DEBUG(md))
    {
        CCCI_INF_MSG(md->index, TAG, "DBG_FLAG_JTAG is set\n");
        return -1;
    }
    CCCI_INF_MSG(md->index, TAG, "md_ccif_let_md_go\n"); 
    // set the start address to let modem to run
    ccif_write32(md_ctrl->md_boot_slave_Key, 0, MD2_BOOT_VECTOR_KEY_VALUE); // make boot vector programmable
    ccif_write32(md_ctrl->md_boot_slave_Vector, 0, MD2_BOOT_VECTOR_VALUE); // after remap, MD ROM address is 0 from MD's view
    ccif_write32(md_ctrl->md_boot_slave_En, 0, MD2_BOOT_VECTOR_EN_VALUE); // make boot vector take effect
    return 0;
}
Пример #13
0
// may return NULL, caller should check
struct sk_buff *ccci_alloc_skb(int size, char blocking)
{
	int count = 0;
	struct sk_buff *skb = NULL;

	if(size>SKB_4K || size<0)
		goto err_exit;
	skb = blocking?NULL:__alloc_skb_from_kernel(size);
	
	if(!skb) {
slow_retry:
		skb = __alloc_skb_from_pool(size);
	}
	if(unlikely(!skb)) {
		if(blocking) {
			CCCI_INF_MSG(-1, BM, "skb pool is empty! size=%d (%d)\n", size, count++);
			msleep(100);
			goto slow_retry;
		} else {
fast_retry:
			skb = __alloc_skb_from_kernel(size);
			if(!skb && count++<20)
				goto fast_retry;
		}
	}
err_exit:
	if(unlikely(!skb))
		CCCI_ERR_MSG(-1, BM, "%ps alloc skb fail, size=%d\n", __builtin_return_address(0), size);
	else
		CCCI_DBG_MSG(-1, BM, "%ps alloc skb %p, size=%d\n", __builtin_return_address(0), skb, size);
	return skb;
}
Пример #14
0
void ccci_cmpt_mem_dump(int md_id, void *start_addr, int len)
{
	unsigned int *curr_p = (unsigned int *)start_addr;
	unsigned char *curr_ch_p;
	int _64_fix_num = len / 64;
	int tail_num = len % 64;
	char buf[64];
	int i, j;

	if (NULL == curr_p) {
		CCCI_INF_MSG(md_id, BM, "NULL point to dump!\n");
		return;
	}
	if (0 == len) {
		CCCI_INF_MSG(md_id, BM, "Not need to dump\n");
		return;
	}

	/* Fix section */
	for (i = 0; i < _64_fix_num; i++) {
		CCCI_INF_MSG(md_id, BM, "%03X: %X %X %X %X %X %X %X %X %X %X %X %X %X %X %X %X\n",
		       i * 64,
		       *curr_p, *(curr_p + 1), *(curr_p + 2), *(curr_p + 3),
		       *(curr_p + 4), *(curr_p + 5), *(curr_p + 6), *(curr_p + 7),
		       *(curr_p + 8), *(curr_p + 9), *(curr_p + 10), *(curr_p + 11),
		       *(curr_p + 12), *(curr_p + 13), *(curr_p + 14), *(curr_p + 15));
		curr_p += 64/4;
	}

	/* Tail section */
	if (tail_num > 0) {
		curr_ch_p = (unsigned char *)curr_p;
		for (j = 0; j < tail_num; j++) {
			buf[j] = *curr_ch_p;
			curr_ch_p++;
		}
		for (; j < 64; j++)
			buf[j] = 0;
		curr_p = (unsigned int *)buf;
		CCCI_INF_MSG(md_id, BM, "%03X: %X %X %X %X %X %X %X %X %X %X %X %X %X %X %X %X\n",
		       i * 64,
		       *curr_p, *(curr_p + 1), *(curr_p + 2), *(curr_p + 3),
		       *(curr_p + 4), *(curr_p + 5), *(curr_p + 6), *(curr_p + 7),
		       *(curr_p + 8), *(curr_p + 9), *(curr_p + 10), *(curr_p + 11),
		       *(curr_p + 12), *(curr_p + 13), *(curr_p + 14), *(curr_p + 15));
	}
}
void md_cd_dump_debug_register(struct ccci_modem *md)
{
	struct md_cd_ctrl *md_ctrl = (struct md_cd_ctrl *)md->private_data;
	
        md_cd_lock_cldma_clock_src(1);
	CCCI_INF_MSG(md->index, TAG, "Dump MD Bus status %x\n", MD_BUS_STATUS_BASE);
	ccci_mem_dump(md->index,md_ctrl->md_bus_status, MD_BUS_STATUS_LENGTH);
	CCCI_INF_MSG(md->index, TAG, "Dump MD PC monitor %x\n", MD_PC_MONITOR_BASE);
	ccci_write32(md_ctrl->md_pc_monitor, 0, 0x80000000); // stop MD PCMon
	ccci_mem_dump(md->index, md_ctrl->md_pc_monitor, MD_PC_MONITOR_LENGTH);
	ccci_write32(md_ctrl->md_pc_monitor, 0, 0x1); // restart MD PCMon
	CCCI_INF_MSG(md->index, TAG, "Dump MD TOPSM status %x\n", MD_TOPSM_STATUS_BASE);
	ccci_mem_dump(md->index, md_ctrl->md_topsm_status, MD_TOPSM_STATUS_LENGTH);
	CCCI_INF_MSG(md->index, TAG, "Dump MD OST status %x\n", MD_OST_STATUS_BASE);
	ccci_mem_dump(md->index, md_ctrl->md_ost_status, MD_OST_STATUS_LENGTH);
        md_cd_lock_cldma_clock_src(0);
}
/* callback for system power off*/
void ccci_power_off(void)
{
	/*ALPS02057700 workaround:
	* Power on VLTE for system power off backlight work normal
	*/
	CCCI_INF_MSG(-1, CORE, "ccci_power_off:set VLTE on,bit0,1\n");
	pmic_config_interface(0x04D6, 0x1, 0x1, 0); /* bit[0] =>1'b1 */
	udelay(200);
}
void md_cldma_hw_reset(struct ccci_modem *md)
{
    unsigned int reg_value;
    struct md_cd_ctrl *md_ctrl = (struct md_cd_ctrl *)md->private_data;
    CCCI_INF_MSG(md->index, TAG, "md_cldma_hw_reset:rst cldma\n");
    //reset cldma hw
    reg_value = ccci_read32(infra_ao_base,INFRA_RST0_REG);
    reg_value &=~(CLDMA_AO_RST_MASK|CLDMA_PD_RST_MASK);
    reg_value |=(CLDMA_AO_RST_MASK|CLDMA_PD_RST_MASK);
    ccci_write32(infra_ao_base,INFRA_RST0_REG,reg_value);
    CCCI_INF_MSG(md->index, TAG, "md_cldma_hw_reset:clear reset\n");
    //reset cldma clr
    reg_value = ccci_read32(infra_ao_base,INFRA_RST1_REG);
    reg_value &=~(CLDMA_AO_RST_MASK|CLDMA_PD_RST_MASK);
    reg_value |=(CLDMA_AO_RST_MASK|CLDMA_PD_RST_MASK);
    ccci_write32(infra_ao_base,INFRA_RST1_REG,reg_value);
    CCCI_INF_MSG(md->index, TAG, "md_cldma_hw_reset:done\n");
}
int ccci_store_sim_switch_mode(struct ccci_modem *md, int simmode)
{
    if (ccci_cfg_setting.sim_mode!= simmode) {
        ccci_cfg_setting.sim_mode = simmode;
        ccci_send_virtual_md_msg(md, CCCI_MONITOR_CH, CCCI_MD_MSG_CFG_UPDATE, 1);
    } else {
        CCCI_INF_MSG(md->index, CORE, "same sim mode as last time(0x%x)\n", simmode);
    }
    return 0;
}
int ccci_modem_sysresume(void)
{
    CCCI_INF_MSG(0, TAG, "ccci_modem_sysresume\n");
    struct ccci_modem *md;
    md = ccci_get_modem_by_id(0);
    if(md!=NULL){
        ccci_modem_restore_reg(md);
    }
    return 0;
}
Пример #20
0
/*
 * a write operation may block at 3 stages:
 * 1. ccci_alloc_req
 * 2. wait until the queue has available slot (threshold check)
 * 3. wait until the SDIO transfer is complete --> abandoned, see the reason below.
 * the 1st one is decided by @blk1. and the 2nd and 3rd are decided by @blk2, wating on @wq.
 * NULL is returned if no available skb, even when you set blk1=1.
 *
 * we removed the wait_queue_head_t in ccci_request, so user can NOT wait for certain request to
 * be completed. this is because request will be recycled and its state will be reset, so if a request
 * is completed and then used again, the poor guy who is waiting for it may never see the state 
 * transition (FLYING->IDLE/COMPLETE->FLYING) and wait forever.
 */
struct ccci_request *ccci_alloc_req(DIRECTION dir, int size, char blk1, char blk2)
{
	int i;
	struct ccci_request *req = NULL;
	unsigned long flags;

retry:
	spin_lock_irqsave(&req_pool_lock, flags);
	for(i=0; i<BM_POOL_SIZE; i++) {
		if(req_pool[i].state == IDLE) {
			// important checking when reqeust is passed cross-layer, make sure this request is no longer in any list
			if(req_pool[i].entry.next == LIST_POISON1 && req_pool[i].entry.prev == LIST_POISON2) {
				req = &req_pool[i];
				CCCI_DBG_MSG(-1, BM, "%ps alloc req=%p, i=%d size=%d\n", __builtin_return_address(0), req, i, size);
				req->state = FLYING;
				break;
			} else {
				// should not happen
				CCCI_ERR_MSG(-1, BM, "idle but in list i=%d, from %ps\n", i, __builtin_return_address(0));
				list_del(&req_pool[i].entry);
			}
		}
	}
	if(req) {
		req->dir = dir;
		req_pool_cnt--;
		CCCI_DBG_MSG(-1, BM, "pool count-=%d\n", req_pool_cnt);
	}
	spin_unlock_irqrestore(&req_pool_lock, flags);
	if(req) {
		if(size>0) {
			req->skb = ccci_alloc_skb(size, blk1);
			req->policy = RECYCLE;
			if(req->skb)
				CCCI_DBG_MSG(-1, BM, "alloc ok, req=%p skb=%p, len=%d\n", req, req->skb, skb_size(req->skb));
		} else {
			req->skb = NULL;
			req->policy = NOOP;
		}
		req->blocking = blk2;
	} else {
		if(blk1) {
			wait_event_interruptible(req_pool_wq, (req_pool_cnt>0));
			goto retry;
		}
		CCCI_INF_MSG(-1, BM, "fail to allock req for %ps, no retry\n", __builtin_return_address(0));
	}
	if(unlikely(size>0 && !req->skb)) {
		CCCI_ERR_MSG(-1, BM, "fail to allock skb for %ps, size=%d\n", __builtin_return_address(0), size);
		req->policy = NOOP;
		ccci_free_req(req);
		req = NULL;
	}
	return req;
}
Пример #21
0
static int ccmni_close(struct net_device *dev)
{
	struct ccci_port *port = *((struct ccci_port **)netdev_priv(dev));

	atomic_dec(&port->usage_cnt);
	CCCI_INF_MSG(port->modem->index, NET, "port %s close %d\n", port->name, atomic_read(&port->usage_cnt));
	netif_stop_queue(dev);
	if (likely(port->modem->capability & MODEM_CAP_NAPI))
		napi_disable(&((struct netdev_entity *)port->private_data)->napi);
	return 0;
}
Пример #22
0
static int my_wp_handler(phys_addr_t addr)
{

	CCCI_INF_MSG(-1, BM, "[ccci/WP_LCH_DEBUG] access from 0x%p, call bug\n", (void *)addr);
	dump_stack();
	/*BUG();*/

	/* re-enable the watchpoint, since the auto-disable is not working */
	del_hw_watchpoint(&wp_event);
#if 0
	wp_err = add_hw_watchpoint(&wp_event);
	if (wp_err != 0)
		/* error */
		CCCI_INF_MSG(-1, BM, "[mydebug]watchpoint init fail\n");
	else
		/* success */
		CCCI_INF_MSG(-1, BM, "[mydebug]watchpoint init done\n");
#endif
	return 0;
}
int md_cd_let_md_go(struct ccci_modem *md)
{
	struct md_cd_ctrl *md_ctrl = (struct md_cd_ctrl *)md->private_data;
	if(MD_IN_DEBUG(md))
		return -1;
	CCCI_INF_MSG(md->index, TAG, "set MD boot slave\n"); 
	// set the start address to let modem to run
	cldma_write32(md_ctrl->md_boot_slave_Key, 0, 0x3567C766); // make boot vector programmable
	cldma_write32(md_ctrl->md_boot_slave_Vector, 0, 0x00000001); // after remap, MD ROM address is 0 from MD's view, MT6595 uses Thumb code
	cldma_write32(md_ctrl->md_boot_slave_En, 0, 0xA3B66175); // make boot vector take effect
	return 0;
}
Пример #24
0
int md_ccif_power_off(struct ccci_modem *md, unsigned int timeout)
{
    int ret = 0;
    switch(md->index)
    {
        case MD_SYS2:
        ret = md_power_off(SYS_MD2, timeout);
        break;
    }     
    CCCI_INF_MSG(md->index, TAG, "md_ccif_power_off:ret=%d\n",ret); 
    return ret;
}
Пример #25
0
void ccci_mem_dump(int md_id, void *start_addr, int len)
{
	unsigned int *curr_p = (unsigned int *)start_addr;
	unsigned char *curr_ch_p;
	int _16_fix_num = len / 16;
	int tail_num = len % 16;
	char buf[16];
	int i, j;

	if (NULL == curr_p) {
		CCCI_INF_MSG(md_id, BM, "NULL point to dump!\n");
		return;
	}
	if (0 == len) {
		CCCI_INF_MSG(md_id, BM, "Not need to dump\n");
		return;
	}

	CCCI_EXP_INF_MSG(md_id, BM, "Base: %p\n", start_addr);
	/* Fix section */
	for (i = 0; i < _16_fix_num; i++) {
		CCCI_DUMP_MSG2(md_id, BM, "%03X: %08X %08X %08X %08X\n",
		       i * 16, *curr_p, *(curr_p + 1), *(curr_p + 2), *(curr_p + 3));
		curr_p += 4;
	}

	/* Tail section */
	if (tail_num > 0) {
		curr_ch_p = (unsigned char *)curr_p;
		for (j = 0; j < tail_num; j++) {
			buf[j] = *curr_ch_p;
			curr_ch_p++;
		}
		for (; j < 16; j++)
			buf[j] = 0;
		curr_p = (unsigned int *)buf;
		CCCI_DUMP_MSG2(md_id, BM, "%03X: %08X %08X %08X %08X\n",
		       i * 16, *curr_p, *(curr_p + 1), *(curr_p + 2), *(curr_p + 3));
	}
}
Пример #26
0
static void ccci_ringbuf_dump(int md_id, unsigned char* title,unsigned char *buffer,unsigned int  read,unsigned int length, int dump_size)
{
    int i,j;
    unsigned char tmp_buf[256];
    unsigned int write = read+dump_size;
    if(write>=length)
        write-=length;    
    CCCI_INF_MSG(md_id, TAG, "%s rbdump: buf=0x%p, read=%d, write=%d\n",title,buffer, read, write);

    read=(read>>2)<<2; // 4byte align

    write=((write+3)>>2)<<2; // 4byte align
    
    if(write>=length)
        write-=length;
    CCCI_INF_MSG(md_id, TAG, "rbdump:aligned read=%d,write=%d\n",read,write);
    i=read;
    while(1)
    {
        memset(tmp_buf,0,sizeof(tmp_buf));
        snprintf(tmp_buf, sizeof(tmp_buf), "%08X:",i);
        for(j=0;j<4;j++)
        {
            snprintf(tmp_buf, sizeof(tmp_buf), "%s %02X%02X%02X%02X",tmp_buf,*(buffer+i),*(buffer+i+1),*(buffer+i+2),*(buffer+i+3));
            i+=sizeof(unsigned int);    
            if(i>=length)
            {
                i-=length;
            }
            if(i==write)
            {
                goto OUT;
            }            
        }
        CCCI_INF_MSG(md_id, TAG, "%s\n",tmp_buf);
    }
OUT:
    CCCI_INF_MSG(md_id, TAG, "%s\n",tmp_buf);
}
Пример #27
0
/*
static void disable_watchpoint(void)
{
	if (atomic_read(&hwp_enable)) {
		del_hw_watchpoint(&wp_event);
		atomic_set(&hwp_enable, 0);
	}
}
*/
static void enable_watchpoint(void *address)
{
	int wp_err;

	if (atomic_read(&hwp_enable) == 0) {
		init_wp_event(&wp_event, (phys_addr_t) address, (phys_addr_t) address,
			WP_EVENT_TYPE_WRITE, my_wp_handler);
		atomic_set(&hwp_enable, 1);
		wp_err = add_hw_watchpoint(&wp_event);
		if (wp_err)
			CCCI_INF_MSG(-1, BM, "[mydebug]watchpoint init fail,addr=%p\n", address);
	}
}
void md_cd_dump_debug_register(struct ccci_modem *md)
{
	struct md_cd_ctrl *md_ctrl = (struct md_cd_ctrl *)md->private_data;
	unsigned int reg_value;
    md_cd_lock_modem_clock_src(1);
	CCCI_INF_MSG(md->index, TAG, "Dump MD Bus status %x\n", MD_BUS_STATUS_BASE);
	ccci_mem_dump(md->index,md_ctrl->md_bus_status, MD_BUS_STATUS_LENGTH);
	CCCI_INF_MSG(md->index, TAG, "Dump MD PC monitor %x\n", MD_PC_MONITOR_BASE);
	// stop MD PCMon
	reg_value = ccci_read32(md_ctrl->md_pc_monitor,0); 
	reg_value &= ~(0x1<<21);
	ccci_write32(md_ctrl->md_pc_monitor, 0, reg_value); // clear bit[21]
	ccci_write32((md_ctrl->md_pc_monitor+4), 0, 0x80000000); // stop MD PCMon
	ccci_mem_dump(md->index, md_ctrl->md_pc_monitor, MD_PC_MONITOR_LENGTH);
	ccci_write32(md_ctrl->md_pc_monitor+4, 0, 0x1); // restart MD PCMon
	CCCI_INF_MSG(md->index, TAG, "Dump MD TOPSM status %x\n", MD_TOPSM_STATUS_BASE);
	ccci_mem_dump(md->index, md_ctrl->md_topsm_status, MD_TOPSM_STATUS_LENGTH);
	CCCI_INF_MSG(md->index, TAG, "Dump MD OST status %x\n", MD_OST_STATUS_BASE);
	ccci_mem_dump(md->index, md_ctrl->md_ost_status, MD_OST_STATUS_LENGTH);
	CCCI_INF_MSG(md->index, TAG, "Dump MD PLL %x\n", MD_PLL_BASE);
	ccci_mem_dump(md->index, md_ctrl->md_pll, MD_PLL_LENGTH);
    md_cd_lock_modem_clock_src(0);
}
Пример #29
0
static int ccmni_open(struct net_device *dev)
{
    struct ccci_port *port = *((struct ccci_port **)netdev_priv(dev));
    struct netdev_entity *nent = (struct netdev_entity *)port->private_data;

    atomic_inc(&port->usage_cnt);
    CCCI_INF_MSG(port->modem->index, NET, "port %s open %d cap=%X", port->name, atomic_read(&port->usage_cnt), port->modem->capability);
    netif_start_queue(dev);
    if(likely(port->modem->capability & MODEM_CAP_NAPI)) {
        napi_enable(&nent->napi);
        napi_schedule(&nent->napi);
    }
    return 0;
}
int md_ccif_power_on(struct ccci_modem *md)
{
	int ret = 0;
	struct md_ccif_ctrl *md_ctrl = (struct md_ccif_ctrl *)md->private_data;

	switch (md->index) {
	case MD_SYS2:
#if defined(CONFIG_MTK_LEGACY)
		CCCI_INF_MSG(md->index, TAG, "Call start md_power_on()\n");
		ret = md_power_on(SYS_MD2);
		CCCI_INF_MSG(md->index, TAG, "Call end md_power_on() ret=%d\n",
			     ret);
#else
		CCCI_INF_MSG(md->index, TAG,
			     "Call start clk_prepare_enable()\n");
		clk_prepare_enable(clk_scp_sys_md2_main);
		CCCI_INF_MSG(md->index, TAG, "Call end clk_prepare_enable()\n");
#endif
		break;
	case MD_SYS3:
#if defined(CONFIG_MTK_LEGACY)
		CCCI_INF_MSG(md->index, TAG, "Call start md_power_on()\n");
		ret = md_power_on(SYS_MD2);
		CCCI_INF_MSG(md->index, TAG, "Call end md_power_on() ret=%d\n",
			     ret);
#else
		CCCI_INF_MSG(md->index, TAG,
			     "Call start clk_prepare_enable()\n");
		clk_prepare_enable(clk_scp_sys_md3_main);
		CCCI_INF_MSG(md->index, TAG, "Call end clk_prepare_enable()\n");
#endif

		break;
	}
	CCCI_INF_MSG(md->index, TAG, "md_ccif_power_on:ret=%d\n", ret);
	if (ret == 0 && md->index != MD_SYS3) {
		/*disable MD WDT */
		ccif_write32(md_ctrl->md_rgu_base, WDT_MD_MODE,
			     WDT_MD_MODE_KEY);
	}
	return ret;
}