Exemplo n.º 1
0
static void
sdio_set_clock(unsigned int clock)
{
	unsigned int val;
	unsigned int val2;

	val = sdio_read(SDIO_CLK_TOUT_RST);
	val &= ~SDIO_CLK_MASK;
	sdio_write(val, SDIO_CLK_TOUT_RST);	// Stop clock

	if (clock) {
		if (clock == MMC_CLK_HIGH) {
			val |= SDIO_CLK_CLKDIV1 | SDIO_CLK_CLKEN;
			val2 = sdio_read(SDIO_GIO0) | 0x80000000;
			sdio_write(val2, SDIO_GIO0);
			val2 = sdio_read(SDIO_HP_BW) | SDIO_HOST_HS;
			sdio_write(val2, SDIO_HP_BW);
		} else {
			val |= SDIO_CLK_CLKDIV2 | SDIO_CLK_CLKEN;
		}
		sdio_write(val, SDIO_CLK_TOUT_RST);
		while (!((val = sdio_read(SDIO_CLK_TOUT_RST)) & SDIO_CLK_CLKSTA)) {
		}
		val |= SDIO_CLK_SDCLKEN;
		sdio_write(val, SDIO_CLK_TOUT_RST);
	}
}
Exemplo n.º 2
0
static int
sdio_wait_response(void)
{
	int ret;
	unsigned int int_status;

	while (!(sdio_read(SDIO_INT_STS) & SDIO_INT_CMDCOMP)) {
		if (sdio_read(SDIO_INT_STS) & SDIO_INT_ALLERR) {
			break;
		}
	}
	int_status = sdio_read(SDIO_INT_STS);
	ret = int_status & SDIO_INT_ALLERR;
	if (ret != 0) {
		sdio_reset(SDIO_SOFTRST_CMD | SDIO_SOFTRST_DATA);
		if (ret & SDIO_INT_CMD_TOUT) {
			ret = MMC_RES_TIMEOUT;
		}
		return ret;
	}

	if (sdio_read(SDIO_MODE_CMD) & SDIO_CMD_DATA) {
		sdio_write(SDIO_INT_CMDCOMP, SDIO_INT_STS);
	} else {
		sdio_write(int_status, SDIO_INT_STS);
	}
	return 0;
}
Exemplo n.º 3
0
static int
sdio_single_write(int addr, unsigned char *buf)
{
	int i;
	int ret;
	unsigned int address, int_status;
	unsigned int *tmp_buf = (unsigned int *)buf;

	if (*acc_mode) {
		address = addr;
	} else {
		address = addr * MMC_BLOCKLEN_VAL;
	}

	/* CMD24 : WRITE_SINGLE_BLOCK */
	ret = sdio_send_cmd(24, address, MMC_RSP_R1, NULL);
	if (ret != 0){
		return ret;
	}
	while (1) {
		/* wait write enable */
		int_status = sdio_read(SDIO_INT_STS);
		if (int_status & (SDIO_INT_WREADY | SDIO_INT_ALLERR)) {
			break;
		}
	}
	sdio_write(SDIO_INT_WREADY, SDIO_INT_STS);
	if (int_status & SDIO_INT_ALLERR) {
		sdio_reset(SDIO_SOFTRST_CMD | SDIO_SOFTRST_DATA);
		return -1;
	}

	if (buf != NULL) {
		/* 1block write */
		for (i = 0; i < MMC_BLOCKLEN_VAL / 4; i++) {
			sdio_write(*tmp_buf++, SDIO_BUF);
		}
	} else {
		/* 1block clear */
		for (i = 0; i < MMC_BLOCKLEN_VAL / 4; i++) {
			sdio_write(0, SDIO_BUF);
		}
	}

	/* wait for data write end */
	int_status = sdio_read(SDIO_INT_STS);
	while (!(int_status & SDIO_INT_TRANCOMP)) {
		int_status = sdio_read(SDIO_INT_STS);
		if (int_status & SDIO_INT_ALLERR){
			sdio_reset(SDIO_SOFTRST_CMD | SDIO_SOFTRST_DATA);
			return -1;
		}
	}
	sdio_write(int_status, SDIO_INT_STS);
	return 0;
}
Exemplo n.º 4
0
static void
sdio_reset(unsigned int mask)
{
	unsigned int val;

	val = sdio_read(SDIO_CLK_TOUT_RST);
	sdio_write(val | mask, SDIO_CLK_TOUT_RST);

	while (sdio_read(SDIO_CLK_TOUT_RST) & mask) {
	}
}
Exemplo n.º 5
0
void __diag_sdio_send_req(void)
{
	int r = 0;
	void *buf = driver->buf_in_sdio;

	if (driver->sdio_ch && (!driver->in_busy_sdio)) {
		r = sdio_read_avail(driver->sdio_ch);

		if (r > IN_BUF_SIZE) {
			if (r < MAX_IN_BUF_SIZE) {
				pr_err("diag: SDIO sending"
					  " packets more than %d bytes\n", r);
				buf = krealloc(buf, r, GFP_KERNEL);
			} else {
				pr_err("diag: SDIO sending"
			  " in packets more than %d bytes\n", MAX_IN_BUF_SIZE);
				return;
			}
		}
		if (r > 0) {
			if (!buf)
				printk(KERN_INFO "Out of diagmem for SDIO\n");
			else {
				APPEND_DEBUG('i');
				sdio_read(driver->sdio_ch, buf, r);
				APPEND_DEBUG('j');
				driver->write_ptr_mdm->length = r;
				driver->in_busy_sdio = 1;
				diag_device_write(buf, SDIO_DATA,
						 driver->write_ptr_mdm);
			}
		}
	}
}
Exemplo n.º 6
0
/**
 * Loopback Test
 */
static void loopback_test(void)
{
	int ret = 0 ;
	u32 read_avail = 0;
	u32 write_avail = 0;

	while (1) {

		if (test_ctx->exit_flag) {
			pr_info(TEST_MODULE_NAME ":Exit Test.\n");
			return;
		}

		pr_info(TEST_MODULE_NAME "--LOOPBACK WAIT FOR EVENT--.\n");
		/* wait for data ready event */
		wait_event(test_ctx->wait_q,
			   atomic_read(&test_ctx->rx_notify_count));
		atomic_dec(&test_ctx->rx_notify_count);

		read_avail = sdio_read_avail(test_ctx->ch);
		if (read_avail == 0)
			continue;


		write_avail = sdio_write_avail(test_ctx->ch);
		if (write_avail < read_avail) {
			pr_info(TEST_MODULE_NAME
				":not enough write avail.\n");
			continue;
		}

		ret = sdio_read(test_ctx->ch, test_ctx->buf, read_avail);
		if (ret) {
			pr_info(TEST_MODULE_NAME
			       ":worker, sdio_read err=%d.\n", -ret);
			continue;
		}
		test_ctx->rx_bytes += read_avail;

		pr_debug(TEST_MODULE_NAME ":worker total rx bytes = 0x%x.\n",
			 test_ctx->rx_bytes);


			ret = sdio_write(test_ctx->ch,
					 test_ctx->buf, read_avail);
			if (ret) {
				pr_info(TEST_MODULE_NAME
					":loopback sdio_write err=%d.\n",
					-ret);
				continue;
			}
			test_ctx->tx_bytes += read_avail;

			pr_debug(TEST_MODULE_NAME
				 ":loopback total tx bytes = 0x%x.\n",
				 test_ctx->tx_bytes);
		} /* end of while */
}
Exemplo n.º 7
0
static int
sdio_multi_read(int addr, unsigned char *buf, int num)
{
	unsigned int address, int_status, val;
	int ret, i, j;
	unsigned int *tmp_buf = (unsigned int *)buf;

	val = sdio_read(SDIO_BLOCK) & 0xffff;
	sdio_write(val | (num << 16), SDIO_BLOCK);

	if (*acc_mode) {
		address = addr;
	} else {
		address = addr * MMC_BLOCKLEN_VAL;
	}

	/* CMD18 : READ_MULTI_BLOCK */
	ret = sdio_send_cmd(18, address, MMC_RSP_R1, NULL);
	if (ret != 0) {
		return ret;
	}

	while (1) {
		/* wait read enable */
		int_status = sdio_read(SDIO_INT_STS);
		if (int_status & (SDIO_INT_RREADY | SDIO_INT_ALLERR)) {
			break;
		}
	}
	sdio_write(SDIO_INT_RREADY, SDIO_INT_STS);
	if (int_status & SDIO_INT_ALLERR) {
		sdio_reset(SDIO_SOFTRST_CMD | SDIO_SOFTRST_DATA);
		return int_status;
	}

	for (i = 0 ; i < num; i++) {
		while ((sdio_read(SDIO_STATE) & SDIO_STATE_RDEN) == 0) {
			int_status = sdio_read(SDIO_INT_STS);
			if (int_status & (SDIO_INT_ALLERR)) {
				sdio_reset(SDIO_SOFTRST_CMD | SDIO_SOFTRST_DATA);
				return int_status;
			}
		}
		for (j = 0; j < MMC_BLOCKLEN_VAL / 4; j++) {
			*tmp_buf++ = sdio_read(SDIO_BUF);
		}
	}

	/* wait for data read end */
	int_status = sdio_read(SDIO_INT_STS);
	while (!(int_status & SDIO_INT_TRANCOMP)) {
		int_status = sdio_read(SDIO_INT_STS);
		if (int_status & SDIO_INT_ALLERR){
			sdio_reset(SDIO_SOFTRST_CMD | SDIO_SOFTRST_DATA);
			return int_status;
		}
	}
	sdio_write(int_status, SDIO_INT_STS);
	return 0;
}
Exemplo n.º 8
0
static int
sdio_send_extcsd(unsigned char *buf)
{
	int i;
	int ret;
	unsigned int int_status;
	unsigned int *tmp_buf = (unsigned int *)buf;

	/* CMD8 : SEND_EXT_CSD */
	ret = sdio_send_cmd(8, 0, MMC_RSP_R1, NULL);
	if (ret != 0) {
		return ret;
	}

	while (1) {
		/* wait read enable */
		int_status = sdio_read(SDIO_INT_STS);
		if (int_status & (SDIO_INT_RREADY | SDIO_INT_ALLERR)) {
			break;
		}
	}
	sdio_write(SDIO_INT_RREADY, SDIO_INT_STS);
	if (int_status & SDIO_INT_ALLERR) {
		sdio_reset(SDIO_SOFTRST_CMD | SDIO_SOFTRST_DATA);
		return -1;
	}

	/* 1block read */
	for (i = 0; i < MMC_BLOCKLEN_VAL / 4; i++) {
		*tmp_buf++ = sdio_read(SDIO_BUF);
	}

	/* wait for data read end */
	int_status = sdio_read(SDIO_INT_STS);
	while (!(int_status & SDIO_INT_TRANCOMP)) {
		int_status = sdio_read(SDIO_INT_STS);
		if (int_status & SDIO_INT_ALLERR){
			sdio_reset(SDIO_SOFTRST_CMD | SDIO_SOFTRST_DATA);
			return int_status;
		}
	}
	sdio_write(int_status, SDIO_INT_STS);
	return 0;
}
Exemplo n.º 9
0
static void
sdio_set_buswidth(unsigned int width)
{
	unsigned int val;

	val = sdio_read(SDIO_HP_BW);
	if (width == 1) {
		sdio_write(val & ~SDIO_HOST_WIDTH, SDIO_HP_BW);
	} else {
		sdio_write(val | SDIO_HOST_WIDTH, SDIO_HP_BW);
	}
}
Exemplo n.º 10
0
STATIC_PREFIX int fw_load_extl(unsigned por_cfg,unsigned target,unsigned size)
{
    unsigned temp_addr;

#ifdef CONFIG_MESON_TRUSTZONE
	unsigned secure_addr;
	unsigned secure_size;
	unsigned *sram;
#endif
    
#if CONFIG_UCL
    temp_addr=target-0x800000;
#else
    temp_addr=target;
#endif	
    int rc=sdio_read(temp_addr,size,por_cfg);

#if defined(CONFIG_M6_SECU_BOOT)
	aml_m6_sec_boot_check((const unsigned char *)temp_addr);
#endif //CONFIG_M6_SECU_BOOT

#ifdef CONFIG_MESON_TRUSTZONE
	sram = (unsigned*)(AHB_SRAM_BASE + READ_SIZE-SECURE_OS_OFFSET_POSITION_IN_SRAM);
	secure_addr = (*sram) + temp_addr - READ_SIZE;
	sram = (unsigned*)(AHB_SRAM_BASE + READ_SIZE-SECURE_OS_SIZE_POSITION_IN_SRAM);
	secure_size = (*sram);
	secure_load(secure_addr, secure_size);	
#endif	

#if CONFIG_UCL
#ifndef CONFIG_IMPROVE_UCL_DEC
	unsigned len;
    if(!rc){
	    serial_puts("ucl decompress\n");
	    rc=uclDecompress((char*)target,&len,(char*)temp_addr);
        serial_puts("decompress finished\n");
    }
#endif    
#endif

#ifndef CONFIG_IMPROVE_UCL_DEC
    if(!rc)
        rc=check_sum((unsigned*)target,magic_info->crc[1],size);
#else
    if(!rc)
        rc=check_sum((unsigned*)temp_addr,magic_info->crc[1],size);
#endif        
    return rc;
}
Exemplo n.º 11
0
static void
sdio_hw_init(void)
{
#if defined(EMXX_MINIBOOT)
	sdio_write(SDIO_INT_MASK, SDIO_INT_STSEN);
	sdio_write(SDIO_AMBA0_TMODE_SINGLE, SDIO_AMBA0);
#endif
#if 0
	unsigned int val;

	/* initialize SDIO */
	sdio_write(SDIO_MODEN_ENABLE, SDIO_MODEN);
	sdio_write(SDIO_DELAY_REVERSE, SDIO_DELAY);

	val = sdio_read(SDIO_GIO0);
	sdio_write((val & ~SDIO_GIO0_DETECT), SDIO_GIO0);

	udelay(1500);

	sdio_reset(SDIO_SOFTRST_ALL);

	val = (SDIO_POWER_VOLT_30 | SDIO_POWER_POWER);
	sdio_write(val, SDIO_HP_BW);

	sdio_write(SDIO_INT_MASK, SDIO_INT_STSEN);

	val = SDIO_CLK_CLKDIV64 | SDIO_CLK_CLKEN | SDIO_TIMEOUT_COUNT_MAX;
	sdio_write(val, SDIO_CLK_TOUT_RST);
	while (!((val = sdio_read(SDIO_CLK_TOUT_RST)) & SDIO_CLK_CLKSTA)) {
	}
	val |= SDIO_CLK_SDCLKEN;
	sdio_write(val, SDIO_CLK_TOUT_RST);

	udelay(1000);
#endif
}
Exemplo n.º 12
0
static void sdio_smem_read(struct work_struct *work)
{
	int err;
	int read_avail;
	char *data = client.buf;

	if (!sdio_ch_opened)
		return;

	read_avail = sdio_read_avail(channel);
	if (read_avail > bytes_avail ||
		read_avail < 0) {
		pr_err("Error: read_avail=%d bytes_avail=%d\n",
			read_avail, bytes_avail);
		goto read_err;
	}

	if (read_avail == 0)
		return;

	err = sdio_read(channel,
			&data[client.size - bytes_avail],
			read_avail);
	if (err) {
		pr_err("sdio_read error (%d)", err);
		goto read_err;
	}

	bytes_avail -= read_avail;
	pr_debug("read %d bytes (bytes_avail = %d)\n",
			read_avail, bytes_avail);

	if (!bytes_avail) {
		bytes_avail = client.size;
		err = client.cb_func(SDIO_SMEM_EVENT_READ_DONE);
	}
	if (err)
		pr_err("error (%d) on callback\n", err);

	return;

read_err:
	if (sdio_ch_opened)
		client.cb_func(SDIO_SMEM_EVENT_READ_ERR);
	return;
}
Exemplo n.º 13
0
void __diag_sdio_send_req(void)
{
    int r = 0;
    void *buf = driver->buf_in_sdio;

    if (driver->sdio_ch && (!driver->in_busy_sdio)) {
        r = sdio_read_avail(driver->sdio_ch);

        if (r > IN_BUF_SIZE) {
            if (r < MAX_IN_BUF_SIZE) {
                printk(KERN_ALERT "\n diag: SDIO sending"
                       " in packets more than %d bytes", r);
                buf = krealloc(buf, r, GFP_KERNEL);
            } else {
                printk(KERN_ALERT "\n diag: SDIO sending"
                       " in packets more than %d bytes", MAX_IN_BUF_SIZE);
                return;
            }
        }
        if (r > 0) {
            if (!buf)
                printk(KERN_INFO "Out of diagmem for SDIO\n");
            else {
                APPEND_DEBUG('i');
                sdio_read(driver->sdio_ch, buf, r);
                if (((!driver->usb_connected) && (driver->
                                                  logging_mode == USB_MODE)) || (driver->
                                                          logging_mode == NO_LOGGING_MODE)) {
                    /*Drop the diag payload */
                    driver->in_busy_sdio = 0;
                    return;
                }
                APPEND_DEBUG('j');
                driver->write_ptr_mdm->length = r;
                driver->in_busy_sdio = 1;
                diag_device_write(buf, SDIO_DATA,
                                  driver->write_ptr_mdm);
            }
        }
    }
}
Exemplo n.º 14
0
STATIC_PREFIX int fw_load_extl(unsigned por_cfg,unsigned target,unsigned size)
{
    unsigned temp_addr;
#if CONFIG_UCL
    temp_addr=target-0x800000;
#else
    temp_addr=target;
#endif
    int rc=sdio_read(temp_addr,size,por_cfg);   

#if defined(CONFIG_AML_SECU_BOOT_V2)
	if(aml_sec_boot_check((unsigned char *)temp_addr))
	{	
		AML_WATCH_DOG_START();
	}	
#endif //CONFIG_AML_SECU_BOOT_V2

#if CONFIG_UCL
#ifndef CONFIG_IMPROVE_UCL_DEC
	unsigned len;
    if(!rc){
	    serial_puts("ucl decompress...");
	    rc=uclDecompress((char*)target,&len,(char*)temp_addr);
        //serial_puts("decompress finished\n");
        serial_puts(rc?"fail\n":"pass\n");
    }
#endif    
#endif

#ifndef CONFIG_IMPROVE_UCL_DEC
    if(!rc)
        rc=check_sum((unsigned*)target,magic_info->crc[1],size);
#else
    if(!rc)
        rc=check_sum((unsigned*)temp_addr,magic_info->crc[1],size);
#endif        
    return rc;
}
Exemplo n.º 15
0
void gsdio_read_pending(struct gsdio_port *port)
{
	struct sdio_channel *ch;
	char buf[1024];
	int avail;

	if (!port) {
		pr_err("%s: port is null\n", __func__);
		return;
	}

	ch = port->sport_info->ch;

	if (!ch)
		return;

	while ((avail = sdio_read_avail(ch))) {
		if (avail > 1024)
			avail = 1024;
		sdio_read(ch, buf, avail);

		pr_debug("%s: flushed out %d bytes\n", __func__, avail);
	}
}
Exemplo n.º 16
0
/**
 * A2 Perf Test
 */
static void a2_performance_test(void)
{
	int ret = 0 ;
	u32 read_avail = 0;
	u32 write_avail = 0;
	int tx_packet_count = 0;
	int rx_packet_count = 0;
	int size = 0;
	u16 *buf16 = (u16 *) test_ctx->buf;
	int i;
	int total_bytes = 0;
	int max_packets = 10000;

	u64 start_jiffy, end_jiffy, delta_jiffies;
	unsigned int time_msec = 0;

	for (i = 0; i < test_ctx->buf_size / 2; i++)
		buf16[i] = (u16) (i & 0xFFFF);

	pr_info(TEST_MODULE_NAME "--A2 PERFORMANCE TEST START --.\n");

	sdio_set_write_threshold(test_ctx->ch, 2*1024);
	sdio_set_read_threshold(test_ctx->ch, 14*1024);
	sdio_set_poll_time(test_ctx->ch, 0);

	start_jiffy = get_jiffies_64(); /* read the current time */

	while (tx_packet_count < max_packets) {

		if (test_ctx->exit_flag) {
			pr_info(TEST_MODULE_NAME ":Exit Test.\n");
			return;
		}

		/* wait for data ready event */
		/* use a func to avoid compiler optimizations */
		write_avail = sdio_write_avail(test_ctx->ch);
		read_avail = sdio_read_avail(test_ctx->ch);
		if ((write_avail == 0) && (read_avail == 0)) {
			ret = wait_any_notify();
			if (ret)
				goto exit_err;
		}

		write_avail = sdio_write_avail(test_ctx->ch);
		if (write_avail > 0) {
			size = min(test_ctx->buf_size, write_avail) ;
			pr_debug(TEST_MODULE_NAME ":tx size = %d.\n", size);
			if (atomic_read(&test_ctx->tx_notify_count) > 0)
				atomic_dec(&test_ctx->tx_notify_count);
			test_ctx->buf[0] = tx_packet_count;
			test_ctx->buf[(size/4)-1] = tx_packet_count;

			ret = sdio_write(test_ctx->ch, test_ctx->buf, size);
			if (ret) {
				pr_info(TEST_MODULE_NAME
					":sdio_write err=%d.\n",
					-ret);
				goto exit_err;
			}
			tx_packet_count++;
			test_ctx->tx_bytes += size;
		}

		read_avail = sdio_read_avail(test_ctx->ch);
		if (read_avail > 0) {
			size = min(test_ctx->buf_size, read_avail);
			pr_debug(TEST_MODULE_NAME ":rx size = %d.\n", size);
			if (atomic_read(&test_ctx->rx_notify_count) > 0)
				atomic_dec(&test_ctx->rx_notify_count);

			ret = sdio_read(test_ctx->ch, test_ctx->buf, size);
			if (ret) {
				pr_info(TEST_MODULE_NAME
					": sdio_read err=%d.\n",
					-ret);
				goto exit_err;
			}
			rx_packet_count++;
			test_ctx->rx_bytes += size;
		}

		pr_debug(TEST_MODULE_NAME
			 ":total rx bytes = %d , rx_packet#=%d.\n",
			 test_ctx->rx_bytes, rx_packet_count);
		pr_debug(TEST_MODULE_NAME
			 ":total tx bytes = %d , tx_packet#=%d.\n",
			 test_ctx->tx_bytes, tx_packet_count);

	   /* pr_info(TEST_MODULE_NAME ":packet#=%d.\n", tx_packet_count); */

	} /* while (tx_packet_count < max_packets ) */

	end_jiffy = get_jiffies_64(); /* read the current time */

	delta_jiffies = end_jiffy - start_jiffy;
	time_msec = jiffies_to_msecs(delta_jiffies);

	pr_info(TEST_MODULE_NAME ":total rx bytes = 0x%x , rx_packet#=%d.\n",
		test_ctx->rx_bytes, rx_packet_count);
	pr_info(TEST_MODULE_NAME ":total tx bytes = 0x%x , tx_packet#=%d.\n",
		test_ctx->tx_bytes, tx_packet_count);

	total_bytes = (test_ctx->tx_bytes + test_ctx->rx_bytes);
	pr_err(TEST_MODULE_NAME ":total bytes = %d, time msec = %d.\n",
		   total_bytes , (int) time_msec);

	pr_err(TEST_MODULE_NAME ":Performance = %d Mbit/sec.\n",
	(total_bytes / time_msec) * 8 / 1000) ;

	pr_err(TEST_MODULE_NAME "--A2 PERFORMANCE TEST END --.\n");

	pr_err(TEST_MODULE_NAME ": TEST PASS.\n");
	return;

exit_err:
	pr_err(TEST_MODULE_NAME ": TEST FAIL.\n");
	return;
}
Exemplo n.º 17
0
static void sdio_mux_read_data(struct work_struct *work)
{
	struct sk_buff *skb_mux;
	void *ptr = 0;
	int sz, rc, len = 0;
	struct sdio_mux_hdr *hdr;

	DBG("%s: reading\n", __func__);
	/* should probably have a separate read lock */
	mutex_lock(&sdio_mux_lock);
	sz = sdio_read_avail(sdio_mux_ch);
	DBG("%s: read avail %d\n", __func__, sz);
	if (sz <= 0) {
		if (sz)
			pr_err("%s: read avail failed %d\n", __func__, sz);
		mutex_unlock(&sdio_mux_lock);
		return;
	}

	/* net_ip_aling is probably not required */
	if (sdio_partial_pkt.valid)
		len = sdio_partial_pkt.skb->len;
	/* if allocation fails attempt to get a smaller chunk of mem */
	do {
		skb_mux = dev_alloc_skb(sz + NET_IP_ALIGN + len);
		if (skb_mux)
			break;
		pr_err("%s: cannot allocate skb of size:%d\n", __func__,
			sz + NET_IP_ALIGN + len);
		if (sz + NET_IP_ALIGN + len <= PAGE_SIZE) {
			pr_err("%s: allocation failed\n", __func__);
			mutex_unlock(&sdio_mux_lock);
			return;
		}
		sz /= 2;
	} while (1);

	skb_reserve(skb_mux, NET_IP_ALIGN + len);
	ptr = skb_put(skb_mux, sz);

	/* half second wakelock is fine? */
	wake_lock_timeout(&sdio_mux_ch_wakelock, HZ / 2);
	rc = sdio_read(sdio_mux_ch, ptr, sz);
	DBG("%s: read %d\n", __func__, rc);
	if (rc) {
		pr_err("%s: sdio read failed %d\n", __func__, rc);
		dev_kfree_skb_any(skb_mux);
		mutex_unlock(&sdio_mux_lock);
		queue_work(sdio_mux_workqueue, &work_sdio_mux_read);
		return;
	}
	mutex_unlock(&sdio_mux_lock);

	DBG_INC_READ_CNT(sz);
	DBG("%s: head %p data %p tail %p end %p len %d\n", __func__,
	    skb_mux->head, skb_mux->data, skb_mux->tail,
	    skb_mux->end, skb_mux->len);

	/* move to a separate function */
	/* probably do skb_pull instead of pointer adjustment */
	hdr = handle_sdio_partial_pkt(skb_mux);
	while ((void *)hdr < (void *)skb_mux->tail) {

		if (((void *)hdr + sizeof(*hdr)) > (void *)skb_mux->tail) {
			/* handle partial header */
			sdio_mux_save_partial_pkt(hdr, skb_mux);
			break;
		}

		if (hdr->magic_num != SDIO_MUX_HDR_MAGIC_NO) {
			pr_err("%s: packet error\n", __func__);
			break;
		}

		hdr = handle_sdio_mux_command(hdr, skb_mux);
	}
	dev_kfree_skb_any(skb_mux);

	DBG("%s: read done\n", __func__);
	queue_work(sdio_mux_workqueue, &work_sdio_mux_read);
}
Exemplo n.º 18
0
void gsdio_tx_pull(struct work_struct *w)
{
	struct gsdio_port *port = container_of(w, struct gsdio_port, pull);
	struct list_head *pool = &port->write_pool;

	pr_debug("%s: port:%p port#%d pool:%p\n", __func__,
			port, port->port_num, pool);

	if (!port->port_usb) {
		pr_err("%s: usb disconnected\n", __func__);

		/* take out all the pending data from sdio */
		gsdio_read_pending(port);

		return;
	}

	spin_lock_irq(&port->port_lock);

	while (!list_empty(pool)) {
		int avail;
		struct usb_ep *in = port->port_usb->in;
		struct sdio_channel *ch = port->sport_info->ch;
		struct usb_request *req;
		unsigned len = TX_BUF_SIZE;
		int ret;


		req = list_entry(pool->next, struct usb_request, list);

		if (!port->sdio_open) {
			pr_debug("%s: SDIO channel is not open\n", __func__);
			goto tx_pull_end;
		}

		avail = sdio_read_avail(ch);
		if (!avail) {
			/* REVISIT: for ZLP */
			pr_debug("%s: read_avail:%d port:%p port#%d\n",
					__func__, avail, port, port->port_num);
			goto tx_pull_end;
		}

		if (avail > len)
			avail = len;

		list_del(&req->list);

		spin_unlock_irq(&port->port_lock);
		ret = sdio_read(ch, req->buf, avail);
		spin_lock_irq(&port->port_lock);
		if (ret) {
			pr_err("%s: port:%p port#%d sdio read failed err:%d",
					__func__, port, port->port_num, ret);

			/* check if usb is still active */
			if (!port->port_usb)
				gsdio_free_req(in, req);
			else
				list_add(&req->list, pool);
			goto tx_pull_end;
		}

		req->length = avail;

		spin_unlock_irq(&port->port_lock);
		ret = usb_ep_queue(in, req, GFP_KERNEL);
		spin_lock_irq(&port->port_lock);
		if (ret) {
			pr_err("%s: usb ep out queue failed"
					"port:%p, port#%d err:%d\n",
					__func__, port, port->port_num, ret);

			/* could be usb disconnected */
			if (!port->port_usb)
				gsdio_free_req(in, req);
			else
				list_add(&req->list, pool);
			goto tx_pull_end;
		}

		port->nbytes_tolaptop += avail;
	}
tx_pull_end:
	spin_unlock_irq(&port->port_lock);
}
Exemplo n.º 19
0
STATIC_PREFIX int fw_load_intl(unsigned por_cfg,unsigned target,unsigned size)
{
    int rc=0;
    unsigned temp_addr;
#if CONFIG_UCL
    temp_addr=target-0x800000;
#else
    temp_addr=target;
#endif

    unsigned * mem;    
    switch(POR_GET_1ST_CFG(por_cfg))
    {
        case POR_1ST_NAND:
        case POR_1ST_NAND_RB:        	
            rc=nf_read(temp_addr,size);            
            break;
        case POR_1ST_SPI :
        case POR_1ST_SPI_RESERVED :
            mem=(unsigned *)(NOR_START_ADDR+READ_SIZE);
            spi_init();
#if CONFIG_UCL==0
            if((rc=check_sum(target,0,0))!=0)
            {
                return rc;
            }
#endif
            serial_puts("Boot From SPI\n");
            memcpy((unsigned*)temp_addr,mem,size);
            break;
        case POR_1ST_SDIO_C:
        	serial_puts("Boot From SDIO C\n");
        	rc=sdio_read(temp_addr,size,POR_2ND_SDIO_C<<3);
        	break;
        case POR_1ST_SDIO_B:
        	rc=sdio_read(temp_addr,size,POR_2ND_SDIO_B<<3);break;
        case POR_1ST_SDIO_A:
           rc=sdio_read(temp_addr,size,POR_2ND_SDIO_A<<3);break;
           break;
        default:
           return 1;
    }

#if defined(CONFIG_AML_SECU_BOOT_V2)
	if(aml_sec_boot_check((unsigned char *)temp_addr))
	{
		AML_WATCH_DOG_START();
	}
#endif //CONFIG_AML_SECU_BOOT_V2

#if CONFIG_UCL    
#ifndef CONFIG_IMPROVE_UCL_DEC
	unsigned len;    
    if(rc==0){
        serial_puts("ucl decompress...");
        rc=uclDecompress((char*)target,&len,(char*)temp_addr);
        serial_puts(rc?"fail\n":"pass\n");
    }
#endif    
#endif    

#ifndef CONFIG_IMPROVE_UCL_DEC
    if(rc==0)    	
        rc=check_sum((unsigned*)target,magic_info->crc[1],size);
#else
    if(rc==0)    	
        rc=check_sum((unsigned*)temp_addr,magic_info->crc[1],size);
#endif              	
    return rc;
}
void __diag_sdio_send_req(void)
{
	int r = 0;
	void *buf = NULL;
	int *in_busy_ptr = NULL;
	struct diag_request *write_ptr_modem = NULL;
	int retry = 0;
#if defined(CONFIG_MACH_VIGOR)
	int type;
#endif

	if (!driver->in_busy_sdio_1) {
		buf = driver->buf_in_sdio_1;
		write_ptr_modem = driver->write_ptr_mdm_1;
		in_busy_ptr = &(driver->in_busy_sdio_1);
	} else if (!driver->in_busy_sdio_2) {
		buf = driver->buf_in_sdio_2;
		write_ptr_modem = driver->write_ptr_mdm_2;
		in_busy_ptr = &(driver->in_busy_sdio_2);
	}

	APPEND_DEBUG('Z');
	if (driver->sdio_ch && buf) {
		r = sdio_read_avail(driver->sdio_ch);

		if (r > MAX_IN_BUF_SIZE) {
				DIAG_ERR("\n diag: SDIO sending"
					  " in packets more than %d bytes\n", r);
		}
		if (r > 0) {
			if (!buf)
				DIAG_INFO("Out of diagmem for SDIO\n");
			else {
drop:
				APPEND_DEBUG('i');
				sdio_read(driver->sdio_ch, buf, r);
				if ((driver->qxdm2sd_drop) && (driver->logging_mode == USB_MODE)) {
					/*Drop the diag payload */
					DIAG_INFO("%s:Drop the diag payload :%d\n", __func__, retry);
					print_hex_dump(KERN_DEBUG, "Drop Packet Data"
						" from 9K(first 16 bytes)", DUMP_PREFIX_ADDRESS, 16, 1, buf, 16, 1);
					driver->in_busy_sdio_1 = 0;
					driver->in_busy_sdio_2 = 0;
					r=sdio_read_avail(driver->sdio_ch);
					if (++retry > 20) {
						driver->qxdm2sd_drop = 0;
						return;
						}
					if (r)
						goto drop;
					else {
						driver->qxdm2sd_drop = 0;
						return;
						}
				}
				APPEND_DEBUG('j');

				if (diag9k_debug_mask) {
					switch (diag9k_debug_mask) {
					case 1:
						print_hex_dump(KERN_DEBUG, "Read Packet Data"
						" from 9K(first 16 bytes)", DUMP_PREFIX_ADDRESS, 16, 1, buf, 16, 1);
						break;
					case 2:
						print_hex_dump(KERN_DEBUG, "Read Packet Data"
						" from 9K(first 16 bytes)", DUMP_PREFIX_ADDRESS, 16, 1, buf, 16, 1);
						print_hex_dump(KERN_DEBUG, "Read Packet Data"
						" from 9K(last 16 bytes) ", 16, 1, DUMP_PREFIX_ADDRESS, buf+r-16, 16, 1);
						break;
					default:
						print_hex_dump(KERN_DEBUG, "Read Packet Data"
						" from 9K ", DUMP_PREFIX_ADDRESS, 16, 1, buf, r, 1);

					}
				}
#if defined(CONFIG_MACH_VIGOR)
				type = checkcmd_modem_epst(buf);
				if (type) {
					modem_to_userspace(buf, r, type, 1);
					return;
				}
#endif

				write_ptr_modem->length = r;
				*in_busy_ptr = 1;
				diag_device_write(buf, SDIO_DATA,
						 write_ptr_modem);

			}
		}
	}
}
Exemplo n.º 21
0
static void sdio_mux_read_data(struct work_struct *work)
{
	struct sk_buff *skb_mux;
	void *ptr = 0;
	int sz, rc, len = 0;
	struct sdio_mux_hdr *hdr;
//[[2011.10.06 leecy add qualcomm patch	
	static int workqueue_pinned;

	if (!workqueue_pinned) {
		struct cpumask cpus;
		cpumask_clear(&cpus);
		cpumask_set_cpu(0, &cpus);
		
		if (sched_setaffinity(current->pid, &cpus))
			pr_err("%s: sdio_dmux set CPU affinity failed\n", __func__);

		workqueue_pinned = 1;
	}
//2011.10.06 leecy add qualcomm patch	]]

	DBG("%s: reading\n", __func__);
	/* should probably have a separate read lock */
	mutex_lock(&sdio_mux_lock);
	sz = sdio_read_avail(sdio_mux_ch);
	DBG("%s: read avail %d\n", __func__, sz);
	if (sz <= 0) {
		if (sz)
			pr_err("%s: read avail failed %d\n", __func__, sz);
		mutex_unlock(&sdio_mux_lock);
		return;
	}

	/* net_ip_aling is probably not required */
	if (sdio_partial_pkt.valid)
		len = sdio_partial_pkt.skb->len;

	/* If allocation fails attempt to get a smaller chunk of mem */
	do {
		skb_mux = __dev_alloc_skb(sz + NET_IP_ALIGN + len, GFP_KERNEL);
		if (skb_mux)
			break;

		pr_err("%s: cannot allocate skb of size:%d + "
			"%d (NET_SKB_PAD)\n", __func__,
			sz + NET_IP_ALIGN + len, NET_SKB_PAD);
		/* the skb structure adds NET_SKB_PAD bytes to the memory
		 * request, which may push the actual request above PAGE_SIZE
		 * in that case, we need to iterate one more time to make sure
		 * we get the memory request under PAGE_SIZE
		 */
		if (sz + NET_IP_ALIGN + len + NET_SKB_PAD <= PAGE_SIZE) {
			pr_err("%s: allocation failed\n", __func__);
			mutex_unlock(&sdio_mux_lock);
			return;
		}
		sz /= 2;
	} while (1);

	skb_reserve(skb_mux, NET_IP_ALIGN + len);
	ptr = skb_put(skb_mux, sz);

	/* half second wakelock is fine? */
	wake_lock_timeout(&sdio_mux_ch_wakelock, HZ / 2);
	rc = sdio_read(sdio_mux_ch, ptr, sz);
	DBG("%s: read %d\n", __func__, rc);
	if (rc) {
		pr_err("%s: sdio read failed %d\n", __func__, rc);
		dev_kfree_skb_any(skb_mux);
		mutex_unlock(&sdio_mux_lock);
		queue_work(sdio_mux_workqueue, &work_sdio_mux_read);
		return;
	}
	mutex_unlock(&sdio_mux_lock);

	DBG_INC_READ_CNT(sz);
	DBG("%s: head %p data %p tail %p end %p len %d\n", __func__,
	    skb_mux->head, skb_mux->data, skb_mux->tail,
	    skb_mux->end, skb_mux->len);

	/* move to a separate function */
	/* probably do skb_pull instead of pointer adjustment */
	hdr = handle_sdio_partial_pkt(skb_mux);
	while ((void *)hdr < (void *)skb_mux->tail) {

		if (((void *)hdr + sizeof(*hdr)) > (void *)skb_mux->tail) {
			/* handle partial header */
			sdio_mux_save_partial_pkt(hdr, skb_mux);
			break;
		}

		if (hdr->magic_num != SDIO_MUX_HDR_MAGIC_NO) {
			pr_err("%s: packet error\n", __func__);
			break;
		}

		hdr = handle_sdio_mux_command(hdr, skb_mux);
	}
	dev_kfree_skb_any(skb_mux);

	DBG("%s: read done\n", __func__);
	queue_work(sdio_mux_workqueue, &work_sdio_mux_read);
}
Exemplo n.º 22
0
/**
 * sender Test
 */
static void sender_test(void)
{
	int ret = 0 ;
	u32 read_avail = 0;
	u32 write_avail = 0;
	int packet_count = 0;
	int size = 512;
	u16 *buf16 = (u16 *) test_ctx->buf;
	int i;

	for (i = 0 ; i < size / 2 ; i++)
		buf16[i] = (u16) (i & 0xFFFF);

	sdio_set_write_threshold(test_ctx->ch, 4*1024);
	sdio_set_read_threshold(test_ctx->ch, 16*1024); /* N/A with Rx EOT  */
	sdio_set_poll_time(test_ctx->ch, 0); /* N/A with Rx EOT  */

	while (packet_count < 100) {

		if (test_ctx->exit_flag) {
			pr_info(TEST_MODULE_NAME ":Exit Test.\n");
			return;
		}

		pr_info(TEST_MODULE_NAME "--SENDER WAIT FOR EVENT--.\n");

		/* wait for data ready event */
		write_avail = sdio_write_avail(test_ctx->ch);
		pr_debug(TEST_MODULE_NAME ":write_avail=%d\n", write_avail);
		if (write_avail < size) {
			wait_event(test_ctx->wait_q,
				   atomic_read(&test_ctx->tx_notify_count));
			atomic_dec(&test_ctx->tx_notify_count);
		}

		write_avail = sdio_write_avail(test_ctx->ch);
		pr_debug(TEST_MODULE_NAME ":write_avail=%d\n", write_avail);
		if (write_avail < size) {
			pr_info(TEST_MODULE_NAME ":not enough write avail.\n");
			continue;
		}

		test_ctx->buf[0] = packet_count;
		test_ctx->buf[(size/4)-1] = packet_count;

		ret = sdio_write(test_ctx->ch, test_ctx->buf, size);
		if (ret) {
			pr_info(TEST_MODULE_NAME ":sender sdio_write err=%d.\n",
				-ret);
			goto exit_err;
		}

		/* wait for read data ready event */
		pr_debug(TEST_MODULE_NAME ":sender wait for rx data.\n");
		read_avail = sdio_read_avail(test_ctx->ch);
		wait_event(test_ctx->wait_q,
			   atomic_read(&test_ctx->rx_notify_count));
		atomic_dec(&test_ctx->rx_notify_count);

		read_avail = sdio_read_avail(test_ctx->ch);

		if (read_avail != size) {
			pr_info(TEST_MODULE_NAME
				":read_avail size %d not as expected.\n",
				read_avail);
			goto exit_err;
		}

		memset(test_ctx->buf, 0x00, size);

		ret = sdio_read(test_ctx->ch, test_ctx->buf, size);
		if (ret) {
			pr_info(TEST_MODULE_NAME ":sender sdio_read err=%d.\n",
				-ret);
			goto exit_err;
		}


		if ((test_ctx->buf[0] != packet_count) ||
		    (test_ctx->buf[(size/4)-1] != packet_count)) {
			pr_info(TEST_MODULE_NAME
				":sender sdio_read WRONG DATA.\n");
			goto exit_err;
		}

		test_ctx->tx_bytes += size;
		test_ctx->rx_bytes += size;
		packet_count++;

		pr_debug(TEST_MODULE_NAME
			 ":sender total rx bytes = 0x%x , packet#=%d.\n",
			 test_ctx->rx_bytes, packet_count);
		pr_debug(TEST_MODULE_NAME
			 ":sender total tx bytes = 0x%x , packet#=%d.\n",
			 test_ctx->tx_bytes, packet_count);

	} /* end of while */

	sdio_close(test_ctx->ch);

	pr_info(TEST_MODULE_NAME ": TEST PASS.\n");
	return;

exit_err:
	sdio_close(test_ctx->ch);

	pr_info(TEST_MODULE_NAME ": TEST FAIL.\n");
	return;
}
Exemplo n.º 23
0
static int 
sdio_send_cmd(int cmd, int arg, int type, int *resp)
{
	int ret;
	unsigned int val;

	sdio_write(0xffffffff, SDIO_INT_STS);	// Clear ALL

	sdio_write(arg, SDIO_ARG);

	val = SDIO_CMD_INDEX(cmd);

	switch (type) {
	case MMC_RSP_R1:
	case MMC_RSP_R5:
	case MMC_RSP_R6:
	case MMC_RSP_R7:
	case MMC_RSP_R1B:
		val |= SDIO_CMD_CRC_CHK | SDIO_CMD_INDEX_CHK | SDIO_CMD_RESP_48;
		break;
	case MMC_RSP_R2:
		val |= SDIO_CMD_CRC_CHK | SDIO_CMD_RESP_136;
		break;
	case MMC_RSP_R3:
	case MMC_RSP_R4:
		val |= SDIO_CMD_RESP_48;
		break;
	default:
		break;
	}

	switch (cmd) {
#if defined(CONFIG_EMXX_EMMCBOOT)
	case 8:
#endif
	case 17:
		val |= SDIO_CMD_DATA | SDIO_MODE_READ;
		break;
	case 18:
		val |= SDIO_CMD_DATA | SDIO_MODE_READ;
		val |= SDIO_MODE_ACMD12 | SDIO_MODE_MULTI | SDIO_MODE_BLK_COUNT_EN;
		break;
	case 24:
		val |= SDIO_CMD_DATA;
		break;
	case 25:
		val |= SDIO_CMD_DATA;
		val |= SDIO_MODE_ACMD12 | SDIO_MODE_MULTI | SDIO_MODE_BLK_COUNT_EN;
		break;
	default:
		break;
	}

	sdio_write(val, SDIO_MODE_CMD);

	ret = sdio_wait_response();
	if (ret == 0) {
		if ((type == MMC_RSP_R1B) || (cmd == 7)) {
			// wait DATA0 == 1
			while (!(sdio_read(SDIO_STATE) & SDIO_STATE_DAT0)) {
			}
		}
		if (resp != NULL) {
			*resp++ = sdio_read(SDIO_RSP01);
			*resp++ = sdio_read(SDIO_RSP23);
			*resp++ = sdio_read(SDIO_RSP45);
			*resp = sdio_read(SDIO_RSP67);
		}
	}
	return ret;
}
Exemplo n.º 24
0
static void sdio_mux_read_data(struct work_struct *work)
{
	struct sk_buff *skb_mux;
	void *ptr = 0;
	int sz, rc, len = 0;
	struct sdio_mux_hdr *hdr;

	DBG("[lte] %s: reading\n", __func__);
	/* should probably have a separate read lock */
	mutex_lock(&sdio_mux_lock);
	sz = sdio_read_avail(sdio_mux_ch);
	DBG("[lte] %s: read avail %d\n", __func__, sz);
	if (sz <= 0) {
		if (sz)
			pr_err("[lte] Error - %s: read avail failed %d\n", __func__, sz);
		mutex_unlock(&sdio_mux_lock);
		return;
	}

	/* net_ip_aling is probably not required */
	if (sdio_partial_pkt.valid)
		len = sdio_partial_pkt.skb->len;

	/* If allocation fails attempt to get a smaller chunk of mem */
	do {
		skb_mux = __dev_alloc_skb(sz + NET_IP_ALIGN + len, GFP_KERNEL);
		if (skb_mux)
			break;

		DBG("[lte] %s: cannot allocate skb of size:%d + "
				"%d (NET_SKB_PAD)\n",
				__func__, sz + NET_IP_ALIGN + len, NET_SKB_PAD);
		/* the skb structure adds NET_SKB_PAD bytes to the memory
		 * request, which may push the actual request above PAGE_SIZE
		 * in that case, we need to iterate one more time to make sure
		 * we get the memory request under PAGE_SIZE
		 */
		if (sz + NET_IP_ALIGN + len + NET_SKB_PAD <= PAGE_SIZE) {
			pr_info("[lte] %s: allocation failed. retry later\n", __func__);
			mutex_unlock(&sdio_mux_lock);
			queue_delayed_work(sdio_mux_workqueue,
				&work_sdio_mux_read,
				msecs_to_jiffies(SDIO_OOM_RETRY_DELAY_MS));
			return;
		}
		sz /= 2;
	} while (1);

	skb_reserve(skb_mux, NET_IP_ALIGN + len);
	ptr = skb_put(skb_mux, sz);

	/* half second wakelock is fine? */
	wake_lock_timeout(&sdio_mux_ch_wakelock, HZ / 2);
	rc = sdio_read(sdio_mux_ch, ptr, sz);
	DBG("[lte] %s: read %d\n", __func__, rc);
	if (rc) {
		pr_err("[lte] Error - %s: sdio read failed %d\n", __func__, rc);
		dev_kfree_skb_any(skb_mux);
		mutex_unlock(&sdio_mux_lock);
		queue_delayed_work(sdio_mux_workqueue,
			&work_sdio_mux_read, 0);
		return;
	}
	mutex_unlock(&sdio_mux_lock);

	DBG_INC_READ_CNT(sz);
	DBG("[lte] %s: head %p data %p tail %p end %p len %d\n", __func__,
	    skb_mux->head, skb_mux->data, skb_mux->tail,
	    skb_mux->end, skb_mux->len);

	/* HTC */
	dbg_dump_buf("SDIO_RMNET->RD#", skb_mux->data, skb_mux->len);

	/* move to a separate function */
	/* probably do skb_pull instead of pointer adjustment */
	hdr = handle_sdio_partial_pkt(skb_mux);

	while ((void *)hdr < (void *)skb_mux->tail) {

		if (((void *)hdr + sizeof(*hdr)) > (void *)skb_mux->tail) {
			/* handle partial header */
			sdio_mux_save_partial_pkt(hdr, skb_mux);
			break;
		}

		if (hdr->magic_num != SDIO_MUX_HDR_MAGIC_NO) {
			pr_err("[lte] Error - %s: packet error\n", __func__);
			break;
		}

		hdr = handle_sdio_mux_command(hdr, skb_mux);
	}
	dev_kfree_skb_any(skb_mux);

	DBG("[lte] %s: read done\n", __func__);
	queue_delayed_work(sdio_mux_workqueue, &work_sdio_mux_read, 0);
}
Exemplo n.º 25
0
STATIC_PREFIX int fw_load_intl(unsigned por_cfg,unsigned target,unsigned size)
{
    int rc=0;
    unsigned temp_addr;

#ifdef CONFIG_MESON_TRUSTZONE
	unsigned secure_addr;
	unsigned secure_size;
	unsigned *sram;
#endif    
#if CONFIG_UCL
    temp_addr=target-0x800000;
#else
    temp_addr=target;
#endif

    unsigned * mem;    
    switch(POR_GET_1ST_CFG(por_cfg))
    {
        case POR_1ST_NAND:
        case POR_1ST_NAND_RB:        	
            rc=nf_read(temp_addr,size);            
            break;
        case POR_1ST_SPI :
        case POR_1ST_SPI_RESERVED :
            mem=(unsigned *)(NOR_START_ADDR+READ_SIZE);
            spi_init();
#if CONFIG_UCL==0
            if((rc=check_sum(target,0,0))!=0)
            {
                return rc;
            }
#endif
            serial_puts("BootFrom SPI\n");
            memcpy((unsigned*)temp_addr,mem,size);
#ifdef CONFIG_SPI_NOR_SECURE_STORAGE
            serial_puts("BootFrom SPI get storage\n");
            spi_secure_storage_get(NOR_START_ADDR,0,0);
#endif
            break;
        case POR_1ST_SDIO_C:
        	serial_puts("BootFrom SDIO C\n");
        	rc=sdio_read(temp_addr,size,POR_2ND_SDIO_C<<3);
        	break;
        case POR_1ST_SDIO_B:
        	rc=sdio_read(temp_addr,size,POR_2ND_SDIO_B<<3);break;
        case POR_1ST_SDIO_A:
           rc=sdio_read(temp_addr,size,POR_2ND_SDIO_A<<3);break;
           break;
        default:
           return 1;
    }
    
#if defined(CONFIG_M6_SECU_BOOT)
	aml_m6_sec_boot_check((const unsigned char *)temp_addr);
#endif //CONFIG_M6_SECU_BOOT

#ifdef CONFIG_MESON_TRUSTZONE
	sram = (unsigned*)(AHB_SRAM_BASE + READ_SIZE-SECURE_OS_OFFSET_POSITION_IN_SRAM);
	secure_addr = (*sram) + temp_addr - READ_SIZE;
	sram = (unsigned*)(AHB_SRAM_BASE + READ_SIZE-SECURE_OS_SIZE_POSITION_IN_SRAM);
	secure_size = (*sram);
	secure_load(secure_addr, secure_size);	
#endif	  
	
#if CONFIG_UCL    
#ifndef CONFIG_IMPROVE_UCL_DEC
	unsigned len;    
    if(rc==0){
        serial_puts("ucl decompress\n");
        rc=uclDecompress((char*)target,&len,(char*)temp_addr);
        serial_puts(rc?"decompress false\n":"decompress true\n");
    }
#endif    
#endif    

#ifndef CONFIG_IMPROVE_UCL_DEC
    if(rc==0)    	
        rc=check_sum((unsigned*)target,magic_info->crc[1],size);
#else
    if(rc==0)    	
        rc=check_sum((unsigned*)temp_addr,magic_info->crc[1],size);
#endif              	

    return rc;
}