static int mmc_movi_read_req(struct mmc_card *card,
					void *data_buf, u32 arg, u32 blocks)
{
	struct mmc_request mrq = {0};
	struct mmc_command cmd = {0};
	struct mmc_data data = {0};
	struct scatterlist sg;

	/*send request*/
	mrq.cmd = &cmd;
	mrq.data = &data;

	if (blocks > 1)
		cmd.opcode = MMC_READ_MULTIPLE_BLOCK;
	else
		cmd.opcode = MMC_READ_SINGLE_BLOCK;
	cmd.arg = arg;

	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;

	data.blksz = 512;
	data.blocks = blocks;
	data.flags = MMC_DATA_READ;
	data.sg = &sg;
	data.sg_len = 1;

	sg_init_one(&sg, data_buf, data.blksz * data.blocks);

	mmc_set_data_timeout(&data, card);

	mmc_wait_for_req(card->host, &mrq);

	if (cmd.error)
		return cmd.error;

	if (data.error)
		return data.error;

	return 0;
}
Ejemplo n.º 2
0
int sd_send_lock_unlock_cmd(struct mmc_card *card,u8* data_buf,int data_size,int max_buf_size)
{
	int err;
	struct mmc_request mrq = {0};
	struct mmc_command cmd = {0};
	struct mmc_data data = {0};
	struct scatterlist sg;

	cmd.opcode = MMC_LOCK_UNLOCK;
	cmd.arg = 0;
	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;

	data.blksz = data_size;
	data.blocks = 1;
	data.flags = MMC_DATA_WRITE;
	data.sg = &sg;
	data.sg_len = 1;
	mmc_set_data_timeout(&data, card);
	data.timeout_ns = (2*1000*1000*1000);

	mrq.cmd = &cmd;
	mrq.data = &data;

	sg_init_one(&sg, data_buf, max_buf_size);
	mmc_wait_for_req(card->host, &mrq);
	err = cmd.error;
	if (err)
	{
		printk("%s: lock unlock cmd error %d\n", __func__, cmd.error);
		return err;
	}

	err = data.error;
	if (err)
	{
		dev_err(mmc_dev(card->host), "%s: data error %d\n",	__func__, data.error); 
	}
	return err;
}
static void swrm_mmc_prepare_mrq(struct mmc_card *card,
	struct mmc_request *mrq, struct scatterlist *sg, unsigned int sg_len,
	unsigned int lba, unsigned int blocks, unsigned int blksz, int write)
{
	BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);

	if (blocks > 1) {
		mrq->cmd->opcode = write ?
			MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
	} else {
		mrq->cmd->opcode = write ? MMC_WRITE_BLOCK :
			MMC_READ_SINGLE_BLOCK;
	}

	mrq->cmd->arg = lba;
	if (!mmc_card_blockaddr(card))
	{
		mrq->cmd->arg <<= 9;
		printk(KERN_DEBUG "swrm_mmc_prepare_mrq opcode: %d\n",
			mrq->cmd->opcode);
	}
	mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;

	if (blocks == 1) {
		mrq->stop = NULL;
	} else {
		mrq->stop->opcode = MMC_STOP_TRANSMISSION;
		mrq->stop->arg = 0;
		mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
	}

	mrq->data->blksz = blksz;
	mrq->data->blocks = blocks;
	mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
	mrq->data->sg = sg;
	mrq->data->sg_len = sg_len;

	mmc_set_data_timeout(mrq->data, card);
}
Ejemplo n.º 4
0
static void mmc_test_prepare_mrq(struct mmc_test_card *test,
	struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
	unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
{
	BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);

	if (blocks > 1) {
		mrq->cmd->opcode = write ?
			MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
	} else {
		mrq->cmd->opcode = write ?
			MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
	}

	mrq->cmd->arg = dev_addr;
	if (!mmc_card_blockaddr(test->card))
		mrq->cmd->arg <<= 9;

	mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;

	if (blocks == 1)
		mrq->stop = NULL;
	else {
		mrq->stop->opcode = MMC_STOP_TRANSMISSION;
		mrq->stop->arg = 0;
		mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
	}

	mrq->data->blksz = blksz;
	mrq->data->blocks = blocks;
	mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
	mrq->data->sg = sg;
	mrq->data->sg_len = sg_len;

	mmc_set_data_timeout(mrq->data, test->card);
}
static int simple_sd_ioctl_single_rw(struct msdc_ioctl* msdc_ctl)
{
    char l_buf[512];
    struct scatterlist msdc_sg;
    struct mmc_data    msdc_data;
    struct mmc_command msdc_cmd;
    struct mmc_request msdc_mrq;
    struct msdc_host *host_ctl;
   
    host_ctl = mtk_msdc_host[msdc_ctl->host_num];
    BUG_ON(!host_ctl);
    BUG_ON(!host_ctl->mmc);
    BUG_ON(!host_ctl->mmc->card);

    mmc_claim_host(host_ctl->mmc);

#if DEBUG_MMC_IOCTL
    printk("user want access %d partition\n",msdc_ctl->partition);
#endif

    mmc_send_ext_csd(host_ctl->mmc->card, l_buf);
    switch (msdc_ctl->partition){
        case BOOT_PARTITION_1:
            if (0x1 != (l_buf[179] & 0x7)){
                /* change to access boot partition 1 */
                l_buf[179] &= ~0x7;
                l_buf[179] |= 0x1;
                mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000);
            }
            break;
        case BOOT_PARTITION_2:
            if (0x2 != (l_buf[179] & 0x7)){
                /* change to access boot partition 2 */
                l_buf[179] &= ~0x7;
                l_buf[179] |= 0x2;
                mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000);
            }
            break;
        default:
            /* make sure access partition is user data area */
            if (0 != (l_buf[179] & 0x7)){
                /* set back to access user area */
                l_buf[179] &= ~0x7;
                l_buf[179] |= 0x0;
                mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000);
            }
            break;
    }
    
    if(msdc_ctl->total_size > 512){
        msdc_ctl->result = -1;
        return  msdc_ctl->result;
    }

#if DEBUG_MMC_IOCTL
    printk("start MSDC_SINGLE_READ_WRITE !!\n");
#endif    
    memset(&msdc_data, 0, sizeof(struct mmc_data));
    memset(&msdc_mrq, 0, sizeof(struct mmc_request));
    memset(&msdc_cmd, 0, sizeof(struct mmc_command));

    msdc_mrq.cmd = &msdc_cmd;
    msdc_mrq.data = &msdc_data;

    if(msdc_ctl->trans_type)
        dma_force[host_ctl->id] = FORCE_IN_DMA;
    else
        dma_force[host_ctl->id] = FORCE_IN_PIO;

    if (msdc_ctl->iswrite){
        msdc_data.flags = MMC_DATA_WRITE;
        msdc_cmd.opcode = MMC_WRITE_BLOCK;
        msdc_data.blocks = msdc_ctl->total_size / 512;
        if (MSDC_CARD_DUNM_FUNC != msdc_ctl->opcode) {
            if (copy_from_user(sg_msdc_multi_buffer, msdc_ctl->buffer, 512)){
                dma_force[host_ctl->id] = FORCE_NOTHING;
                return -EFAULT;
            }
        } else {
            /* called from other kernel module */
            memcpy(sg_msdc_multi_buffer, msdc_ctl->buffer, 512);
        }
    } else {
        msdc_data.flags = MMC_DATA_READ;
        msdc_cmd.opcode = MMC_READ_SINGLE_BLOCK;
        msdc_data.blocks = msdc_ctl->total_size / 512;

        memset(sg_msdc_multi_buffer, 0 , 512);
    }

    msdc_cmd.arg = msdc_ctl->address;

    if (!mmc_card_blockaddr(host_ctl->mmc->card)){
        printk("the device is used byte address!\n");
        msdc_cmd.arg <<= 9;
    }

    msdc_cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;

    msdc_data.stop = NULL;
    msdc_data.blksz = 512;
    msdc_data.sg = &msdc_sg;
    msdc_data.sg_len = 1;

#if DEBUG_MMC_IOCTL
    printk("single block: ueser buf address is 0x%p!\n",msdc_ctl->buffer);
#endif    
    sg_init_one(&msdc_sg, sg_msdc_multi_buffer, msdc_ctl->total_size);
    mmc_set_data_timeout(&msdc_data, host_ctl->mmc->card);

    mmc_wait_for_req(host_ctl->mmc, &msdc_mrq);

    if (!msdc_ctl->iswrite){
        if (MSDC_CARD_DUNM_FUNC != msdc_ctl->opcode) {
            if (copy_to_user(msdc_ctl->buffer,sg_msdc_multi_buffer,512)){
                dma_force[host_ctl->id] = FORCE_NOTHING;
                return -EFAULT;
                }
        } else {
            /* called from other kernel module */
            memcpy(msdc_ctl->buffer,sg_msdc_multi_buffer,512);
        }
    }

    if (msdc_ctl->partition){
        mmc_send_ext_csd(host_ctl->mmc->card,l_buf);

        if (l_buf[179] & 0x7) {
            /* set back to access user area */
            l_buf[179] &= ~0x7;
            l_buf[179] |= 0x0;
            mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000);
        }
    }

    mmc_release_host(host_ctl->mmc);

    if (msdc_cmd.error)
        msdc_ctl->result= msdc_cmd.error;

    if (msdc_data.error)
        msdc_ctl->result= msdc_data.error;
    else
        msdc_ctl->result= 0;

        dma_force[host_ctl->id] = FORCE_NOTHING;
    return msdc_ctl->result;
}
Ejemplo n.º 6
0
/**
 *  @brief This function use SG mode to read/write data into card memory
 *
 *  @param handle   A Pointer to the moal_handle structure
 *  @param pmbuf_list   Pointer to a linked list of mlan_buffer structure
 *  @param port     Port
 *  @param write    write flag
 *
 *  @return         MLAN_STATUS_SUCCESS or MLAN_STATUS_FAILURE
 */
mlan_status
woal_sdio_rw_mb(moal_handle *handle, pmlan_buffer pmbuf_list, t_u32 port,
		t_u8 write)
{
	struct scatterlist sg_list[SDIO_MP_AGGR_DEF_PKT_LIMIT_MAX];
	int num_sg = pmbuf_list->use_count;
	int i = 0;
	mlan_buffer *pmbuf = NULL;
	struct mmc_request mmc_req;
	struct mmc_command mmc_cmd;
	struct mmc_data mmc_dat;
	struct sdio_func *func = ((struct sdio_mmc_card *)handle->card)->func;
	t_u32 ioport = (port & MLAN_SDIO_IO_PORT_MASK);
	t_u32 blkcnt = pmbuf_list->data_len / MLAN_SDIO_BLOCK_SIZE;
	int status;

	if (num_sg > SDIO_MP_AGGR_DEF_PKT_LIMIT_MAX) {
		PRINTM(MERROR, "ERROR: num_sg=%d", num_sg);
		return MLAN_STATUS_FAILURE;
	}
	sg_init_table(sg_list, num_sg);
	pmbuf = pmbuf_list->pnext;
	for (i = 0; i < num_sg; i++) {
		if (pmbuf == pmbuf_list)
			break;
		sg_set_buf(&sg_list[i], pmbuf->pbuf + pmbuf->data_offset,
			   pmbuf->data_len);
		pmbuf = pmbuf->pnext;
	}
	memset(&mmc_req, 0, sizeof(struct mmc_request));
	memset(&mmc_cmd, 0, sizeof(struct mmc_command));
	memset(&mmc_dat, 0, sizeof(struct mmc_data));

	mmc_dat.sg = sg_list;
	mmc_dat.sg_len = num_sg;
	mmc_dat.blksz = MLAN_SDIO_BLOCK_SIZE;
	mmc_dat.blocks = blkcnt;
	mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;

	mmc_cmd.opcode = SD_IO_RW_EXTENDED;
	mmc_cmd.arg = write ? 1 << 31 : 0;
	mmc_cmd.arg |= (func->num & 0x7) << 28;
	mmc_cmd.arg |= 1 << 27;	/* block basic */
	mmc_cmd.arg |= 0;	/* fix address */
	mmc_cmd.arg |= (ioport & 0x1FFFF) << 9;
	mmc_cmd.arg |= blkcnt & 0x1FF;
	mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;

	mmc_req.cmd = &mmc_cmd;
	mmc_req.data = &mmc_dat;

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
	sdio_claim_host(((struct sdio_mmc_card *)handle->card)->func);
#endif
	mmc_set_data_timeout(&mmc_dat,
			     ((struct sdio_mmc_card *)handle->card)->func->
			     card);
	mmc_wait_for_req(((struct sdio_mmc_card *)handle->card)->func->card->
			 host, &mmc_req);
	if (mmc_cmd.error || mmc_dat.error) {
		PRINTM(MERROR, "CMD53 %s cmd_error = %d data_error=%d\n",
		       write ? "write" : "read", mmc_cmd.error, mmc_dat.error);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)
        /* issue abort cmd52 command through F0*/
        sdio_f0_writeb(((struct sdio_mmc_card *)handle->card)->func, 0x01, SDIO_CCCR_ABORT, &status);
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
	    sdio_release_host(((struct sdio_mmc_card *)handle->card)->func);
#endif
		return MLAN_STATUS_FAILURE;
	}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
    sdio_release_host(((struct sdio_mmc_card *)handle->card)->func);
#endif
	return MLAN_STATUS_SUCCESS;
}
static int mmc_rpmb_send_command(struct mmc_card *card, u8 *buf, __u16 blks,
		__u16 type, u8 req_type)
{
	struct mmc_request mrq = {NULL};
	struct mmc_command cmd = {0};
	struct mmc_command sbc = {0};
	struct mmc_data data = {0};
	struct scatterlist sg;
	u8 *transfer_buf = NULL;

	mrq.sbc = &sbc;
	mrq.cmd = &cmd;
	mrq.data = &data;
	mrq.stop = NULL;
	transfer_buf = kzalloc(512 * blks, GFP_KERNEL);
	if (!transfer_buf)
		return -ENOMEM;

	/*
	 * set CMD23
	 */
	sbc.opcode = MMC_SET_BLOCK_COUNT;
	sbc.arg = blks;
	if ((req_type == RPMB_REQ) && type == RPMB_WRITE_DATA)
		sbc.arg |= 1 << 31;
	sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;

	/*
	 * set CMD25/18
	 */
	sg_init_one(&sg, transfer_buf, 512 * blks);
	if (req_type == RPMB_REQ) {
		cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
		sg_copy_from_buffer(&sg, 1, buf, 512 * blks);
		data.flags |= MMC_DATA_WRITE;
	} else {
		cmd.opcode = MMC_READ_MULTIPLE_BLOCK;
		data.flags |= MMC_DATA_READ;
	}
	cmd.arg = 0;
	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
	data.blksz = 512;
	data.blocks = blks;
	data.sg = &sg;
	data.sg_len = 1;

	mmc_set_data_timeout(&data, card);

	mmc_wait_for_req(card->host, &mrq);

	if (req_type != RPMB_REQ)
		sg_copy_to_buffer(&sg, 1, buf, 512 * blks);

	kfree(transfer_buf);

	if (cmd.error)
		return cmd.error;
	if (data.error)
		return data.error;
	return 0;
}
/*
 * @part: GPP partition part number
 * @addr: GPP write group
 */
int mmc_wp_status(struct mmc_card *card, unsigned int part,
		unsigned int addr, u8 *wp_status)
{
	struct mmc_command cmd = {0};
	struct mmc_data data = {0};
	struct mmc_request mrq = {0};
	struct scatterlist sg;
	u32 status = 0;
	int err = 0;
	u8 *rbuf = NULL;

	if (!card)
		return -ENODEV;

	if (!card->ext_csd.gpp_sz[part - EXT_CSD_PART_CONFIG_ACC_GP0]) {
		pr_err("%s: doesn't have GPP%d\n", __func__,
				part - 3);
		return -ENODEV;
	}

	rbuf = kzalloc(8, GFP_KERNEL);
	if (rbuf == NULL) {
		pr_err("%s: no memory\n", __func__);
		return -ENOMEM;
	}

	cmd.opcode = MMC_SEND_WRITE_PROT_TYPE;
	cmd.arg = addr * card->ext_csd.wpg_sz;
	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;

	data.sg = &sg;
	data.sg_len = 1;
	data.blksz = 8;
	data.blocks = 1;
	data.flags = MMC_DATA_READ;
	sg_init_one(data.sg, rbuf, 8);
	mrq.data = &data;
	mrq.cmd = &cmd;

	mmc_claim_host(card->host);

	mmc_set_data_timeout(&data, card);

	err = mmc_switch_part(card, part);
	if (err) {
		mmc_release_host(card->host);
		dev_err(mmc_dev(card->host), "%s: swith error %d\n",
				__func__, err);
		goto out;
	}

	mmc_wait_for_req(card->host, &mrq);
	if (cmd.error) {
		dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
				__func__, cmd.error);
	}
	if (data.error) {
		dev_err(mmc_dev(card->host), "%s: data error %d\n",
				__func__, data.error);
	}

	/* Must check status to be sure of no errors */
	do {
		err = mmc_send_status(card, &status);
		if (err) {
			pr_err("%s: get card status err %d, status 0x%x\n",
					__func__, err, status);
			goto out;
		}
		if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
			break;
		if (mmc_host_is_spi(card->host))
			break;
	} while (R1_CURRENT_STATE(status) == R1_STATE_PRG);

	if (mmc_host_is_spi(card->host)) {
		if (status & R1_SPI_ILLEGAL_COMMAND) {
			pr_err("%s: error card status 0x%x\n",
					__func__, status);
			goto out;
		}
	} else {
		if (status & 0xFDFFA000)
			pr_warn("%s: unexpected status %#x after switch",
					__func__, status);
		if (status & R1_SWITCH_ERROR) {
			pr_err("%s: card switch error, status 0x%x\n",
					__func__, status);
		}
		if (status & R1_OUT_OF_RANGE) {
			pr_err("%s: addr out of range, status 0x%x\n",
					__func__, status);
			goto out;
		}
	}

	mmc_switch_part(card, EXT_CSD_PART_CONFIG_ACC_USER);

	mmc_release_host(card->host);

	sg_copy_from_buffer(data.sg, 1, rbuf, 8);

	/*
	 * the first write protect group type is in the last two
	 * bits in the last byte read from the device.
	 */
	*wp_status = rbuf[7] & 0x3;

	kfree(rbuf);

	return 0;
out:
	kfree(rbuf);

	return -EPERM;
}
Ejemplo n.º 9
0
    /* called by async task to perform the operation synchronously using direct MMC APIs  */
A_STATUS DoHifReadWriteScatter(HIF_DEVICE *device, BUS_REQUEST *busrequest)
{
    int                     i;
    A_UINT8                 rw;
    A_UINT8                 opcode;
    struct mmc_request      mmcreq;
    struct mmc_command      cmd;
    struct mmc_data         data;
    HIF_SCATTER_REQ_PRIV   *pReqPriv;   
    HIF_SCATTER_REQ        *pReq;       
    A_STATUS                status = A_OK;
    struct                  scatterlist *pSg;
    
    pReqPriv = busrequest->pScatterReq;
    
    A_ASSERT(pReqPriv != NULL);
    
    pReq = pReqPriv->pHifScatterReq;
    
    memset(&mmcreq, 0, sizeof(struct mmc_request));
    memset(&cmd, 0, sizeof(struct mmc_command));
    memset(&data, 0, sizeof(struct mmc_data));
       
    data.blksz = HIF_MBOX_BLOCK_SIZE;
    data.blocks = pReq->TotalLength / HIF_MBOX_BLOCK_SIZE;
                        
    AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, ("HIF-SCATTER: (%s) Address: 0x%X, (BlockLen: %d, BlockCount: %d) , (tot:%d,sg:%d)\n",
              (pReq->Request & HIF_WRITE) ? "WRITE":"READ", pReq->Address, data.blksz, data.blocks,
              pReq->TotalLength,pReq->ValidScatterEntries));
         
    if (pReq->Request  & HIF_WRITE) {
        rw = _CMD53_ARG_WRITE;
        data.flags = MMC_DATA_WRITE;
    } else {
        rw = _CMD53_ARG_READ;
        data.flags = MMC_DATA_READ;
    }

    if (pReq->Request & HIF_FIXED_ADDRESS) {
        opcode = _CMD53_ARG_FIXED_ADDRESS;
    } else {
        opcode = _CMD53_ARG_INCR_ADDRESS;
    }
    
        /* fill SG entries */
    pSg = pReqPriv->sgentries;   
    sg_init_table(pSg, pReq->ValidScatterEntries); 
          
        /* assemble SG list */   
    for (i = 0 ; i < pReq->ValidScatterEntries ; i++, pSg++) {
            /* setup each sg entry */
        if ((unsigned long)pReq->ScatterList[i].pBuffer & 0x3) {
                /* note some scatter engines can handle unaligned buffers, print this
                 * as informational only */
            AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER,
                            ("HIF: (%s) Scatter Buffer is unaligned 0x%lx\n",
                            pReq->Request & HIF_WRITE ? "WRITE":"READ",
                            (unsigned long)pReq->ScatterList[i].pBuffer)); 
        }
        
        AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, ("  %d:  Addr:0x%lX, Len:%d \n",
            i,(unsigned long)pReq->ScatterList[i].pBuffer,pReq->ScatterList[i].Length));
            
        sg_set_buf(pSg, pReq->ScatterList[i].pBuffer, pReq->ScatterList[i].Length);
    }
        /* set scatter-gather table for request */
    data.sg = pReqPriv->sgentries;
    data.sg_len = pReq->ValidScatterEntries;
        /* set command argument */    
    SDIO_SET_CMD53_ARG(cmd.arg, 
                       rw, 
                       device->func->num, 
                       _CMD53_ARG_BLOCK_BASIS, 
                       opcode,  
                       pReq->Address,
                       data.blocks);  
                       
    cmd.opcode = SD_IO_RW_EXTENDED;
    cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
    
    mmcreq.cmd = &cmd;
    mmcreq.data = &data;
    
    mmc_set_data_timeout(&data, device->func->card);    
        /* synchronous call to process request */
    mmc_wait_for_req(device->func->card->host, &mmcreq);
 
    if (cmd.error) {
        status = A_ERROR;   
        AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("HIF-SCATTER: cmd error: %d \n",cmd.error));
    }
               
    if (data.error) {
        status = A_ERROR;
        AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("HIF-SCATTER: data error: %d \n",data.error));   
    }

    if (A_FAILED(status)) {
        AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("HIF-SCATTER: FAILED!!! (%s) Address: 0x%X, Block mode (BlockLen: %d, BlockCount: %d)\n",
              (pReq->Request & HIF_WRITE) ? "WRITE":"READ",pReq->Address, data.blksz, data.blocks));        
    }
    
        /* set completion status, fail or success */
    pReq->CompletionStatus = status;
    
    if (pReq->Request & HIF_ASYNCHRONOUS) {
        AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, ("HIF-SCATTER: async_task completion routine req: 0x%lX (%d)\n",(unsigned long)busrequest, status));
            /* complete the request */
        A_ASSERT(pReq->CompletionRoutine != NULL);
        pReq->CompletionRoutine(pReq);
    } else {
        AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, ("HIF-SCATTER async_task upping busrequest : 0x%lX (%d)\n", (unsigned long)busrequest,status));
            /* signal wait */
        up(&busrequest->sem_req);
    }
                                                               
    return status;   
}
Ejemplo n.º 10
0
static int assd_write_sec_cmd(struct mmc_host *host)
{
	struct mmc_request req;
	struct mmc_command cmd;
	struct mmc_command stp;
	struct mmc_data dat;
	struct scatterlist sg;

	BUG_ON(!host);

	memset(&req, 0, sizeof(struct mmc_request));
	memset(&cmd, 0, sizeof(struct mmc_command));
	memset(&stp, 0, sizeof(struct mmc_command));
	memset(&dat, 0, sizeof(struct mmc_data));

	req.cmd = &cmd;
	req.data = &dat;

	if (test_bit(ASSD_SEND_STOP, &assd_status))
		req.stop = &stp;

	cmd.opcode = ASSD_WRITE_SEC_CMD;
	cmd.arg = 1;
	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;

	dat.blksz = 512;
	dat.blocks = 1;
	dat.flags = MMC_DATA_WRITE;
	dat.sg = &sg;
	dat.sg_len = 1;

	sg_init_one(&sg, assd_block, 512);

	stp.opcode = MMC_STOP_TRANSMISSION;
	stp.arg = 0;
	stp.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;

	mmc_claim_host(host);
	if (host->card == NULL) {

		mmc_release_host(host);
		return -ENODEV;
	}

	mmc_set_data_timeout(&dat, host->card);
	mmc_wait_for_req(host, &req);
	mmc_release_host(host);

	if (cmd.error)
		return cmd.error;

	if (dat.error)
		return dat.error;

	/*
	 * Do not send any STOP_TRANSMISSION command from now on,
	 * if this card does not require a STOP_TRANSMISSION command.
	 */
	if (stp.error == -ETIMEDOUT)
		clear_bit(ASSD_SEND_STOP, &assd_status);

	return 0;
}
Ejemplo n.º 11
0
static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
{
	struct mmc_blk_data *md = mq->data;
	struct mmc_card *card = md->queue.card;
	struct mmc_blk_request brq;
	int ret = 1;

	if (mmc_card_claim_host(card))
		goto flush_queue;

	do {
		struct mmc_command cmd;
		u32 readcmd, writecmd;

		memset(&brq, 0, sizeof(struct mmc_blk_request));
		brq.mrq.cmd = &brq.cmd;
		brq.mrq.data = &brq.data;

		brq.cmd.arg = req->sector;
		if (!mmc_card_blockaddr(card))
			brq.cmd.arg <<= 9;
		brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
		brq.data.blksz = 1 << md->block_bits;
		brq.stop.opcode = MMC_STOP_TRANSMISSION;
		brq.stop.arg = 0;
		brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC;
		brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
		if (brq.data.blocks > card->host->max_blk_count)
			brq.data.blocks = card->host->max_blk_count;

		mmc_set_data_timeout(&brq.data, card, rq_data_dir(req) != READ);

#ifdef CONFIG_MMC_SUPPORT_MOVINAND
		if (mmc_card_movinand(card)) {
			if ((brq.data.blocks > 1) || (rq_data_dir(req) == WRITE)) {
				cmd.opcode = MMC_SET_BLOCK_COUNT;
				cmd.arg = req->nr_sectors;
				cmd.flags = MMC_RSP_R1;
				ret = mmc_wait_for_cmd(card->host, &cmd, 2);
			}
			if (rq_data_dir(req) == READ) {
				if (brq.data.blocks > 1) {
					brq.cmd.opcode = MMC_READ_MULTIPLE_BLOCK;
					brq.data.flags |= (MMC_DATA_READ | MMC_DATA_MULTI);
//					brq.mrq.stop = &brq.stop;
				} else {
					brq.cmd.opcode = MMC_READ_SINGLE_BLOCK;
					brq.data.flags |= MMC_DATA_READ;
					brq.mrq.stop = NULL;
				}
			} else {
				brq.cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
				brq.data.flags |= MMC_DATA_WRITE | MMC_DATA_MULTI;
//				brq.mrq.stop = &brq.stop;
			}
		} else {
#endif

		/*
		 * If the host doesn't support multiple block writes, force
		 * block writes to single block. SD cards are excepted from
		 * this rule as they support querying the number of
		 * successfully written sectors.
		 */
		if (rq_data_dir(req) != READ &&
		    !(card->host->caps & MMC_CAP_MULTIWRITE) &&
		    !mmc_card_sd(card))
			brq.data.blocks = 1;

		if (brq.data.blocks > 1) {
			brq.data.flags |= MMC_DATA_MULTI;
			brq.mrq.stop = &brq.stop;
			readcmd = MMC_READ_MULTIPLE_BLOCK;
			writecmd = MMC_WRITE_MULTIPLE_BLOCK;
		} else {
			brq.mrq.stop = NULL;
			readcmd = MMC_READ_SINGLE_BLOCK;
			writecmd = MMC_WRITE_BLOCK;
		}

		if (rq_data_dir(req) == READ) {
			brq.cmd.opcode = readcmd;
			brq.data.flags |= MMC_DATA_READ;
		} else {
			brq.cmd.opcode = writecmd;
			brq.data.flags |= MMC_DATA_WRITE;
		}
#ifdef CONFIG_MMC_SUPPORT_MOVINAND
		}
#endif

		brq.data.sg = mq->sg;
		brq.data.sg_len = blk_rq_map_sg(req->q, req, brq.data.sg);

		mmc_wait_for_req(card->host, &brq.mrq);
		if (brq.cmd.error) {
			printk(KERN_ERR "%s: error %d sending read/write command\n",
			       req->rq_disk->disk_name, brq.cmd.error);
			goto cmd_err;
		}

		if (brq.data.error) {
			printk(KERN_ERR "%s: error %d transferring data\n",
			       req->rq_disk->disk_name, brq.data.error);
			goto cmd_err;
		}

		if (brq.stop.error) {
			printk(KERN_ERR "%s: error %d sending stop command\n",
			       req->rq_disk->disk_name, brq.stop.error);
			goto cmd_err;
		}

		if (rq_data_dir(req) != READ) {
			do {
				int err;

				cmd.opcode = MMC_SEND_STATUS;
				cmd.arg = card->rca << 16;
				cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
				err = mmc_wait_for_cmd(card->host, &cmd, 5);
				if (err) {
					printk(KERN_ERR "%s: error %d requesting status\n",
					       req->rq_disk->disk_name, err);
					goto cmd_err;
				}
#ifdef CONFIG_MMC_SUPPORT_MOVINAND
				/* Work-around for broken cards setting READY_FOR_DATA
				 * when not actually ready.
				 */
				if (mmc_card_movinand(card)) {
					if (R1_CURRENT_STATE(cmd.resp[0]) == 7)
						cmd.resp[0] &= ~R1_READY_FOR_DATA;
				}
#endif
			} while (!(cmd.resp[0] & R1_READY_FOR_DATA));

#if 0
			if (cmd.resp[0] & ~0x00000900)
				printk(KERN_ERR "%s: status = %08x\n",
				       req->rq_disk->disk_name, cmd.resp[0]);
			if (mmc_decode_status(cmd.resp))
				goto cmd_err;
#endif
		}

		/*
		 * A block was successfully transferred.
		 */
		spin_lock_irq(&md->lock);
		ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
		if (!ret) {
			/*
			 * The whole request completed successfully.
			 */
			add_disk_randomness(req->rq_disk);
			blkdev_dequeue_request(req);
			end_that_request_last(req, 1);
		}
		spin_unlock_irq(&md->lock);
	} while (ret);

	mmc_card_release_host(card);

	return 1;

 cmd_err:
 	/*
 	 * If this is an SD card and we're writing, we can first
 	 * mark the known good sectors as ok.
 	 *
	 * If the card is not SD, we can still ok written sectors
	 * if the controller can do proper error reporting.
	 *
	 * For reads we just fail the entire chunk as that should
	 * be safe in all cases.
	 */
 	if (rq_data_dir(req) != READ && mmc_card_sd(card)) {
		u32 blocks;
		unsigned int bytes;

		blocks = mmc_sd_num_wr_blocks(card);
		if (blocks != (u32)-1) {
			if (card->csd.write_partial)
				bytes = blocks << md->block_bits;
			else
				bytes = blocks << 9;
			spin_lock_irq(&md->lock);
			ret = end_that_request_chunk(req, 1, bytes);
			spin_unlock_irq(&md->lock);
		}
	} else if (rq_data_dir(req) != READ &&
		   (card->host->caps & MMC_CAP_MULTIWRITE)) {
		spin_lock_irq(&md->lock);
		ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
		spin_unlock_irq(&md->lock);
	}

flush_queue:

	mmc_card_release_host(card);

	spin_lock_irq(&md->lock);
	while (ret) {
		ret = end_that_request_chunk(req, 0,
				req->current_nr_sectors << 9);
	}

	add_disk_randomness(req->rq_disk);
	blkdev_dequeue_request(req);
	end_that_request_last(req, 0);
	spin_unlock_irq(&md->lock);

	return 0;
}
Ejemplo n.º 12
0
int mmc_gen_cmd(struct mmc_card *card, void *buf, u8 index, u8 arg1, u8 arg2, u8 mode)
{
	struct mmc_request mrq;
	struct mmc_command cmd;
	struct mmc_data data;
	struct mmc_command stop;
	struct scatterlist sg;
	void *data_buf;

	mmc_set_blocklen(card, 512);

	data_buf = kmalloc(512, GFP_KERNEL);
	if (data_buf == NULL)
		return -ENOMEM;

	memset(&mrq, 0, sizeof(struct mmc_request));
	memset(&cmd, 0, sizeof(struct mmc_command));
	memset(&data, 0, sizeof(struct mmc_data));
	memset(&stop, 0, sizeof(struct mmc_command));

	mrq.cmd = &cmd;
	mrq.data = &data;
	mrq.stop = &stop;

	cmd.opcode = MMC_GEN_CMD;
	cmd.arg = (arg2 << 16) |
		  (arg1 << 8) |
		  (index << 1) |
		  mode;

	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;

	data.blksz = 512;
	data.blocks = 1;
	data.flags = MMC_DATA_READ;
	data.sg = &sg;
	data.sg_len = 1;

	stop.opcode = MMC_STOP_TRANSMISSION;
	stop.arg = 0;
	stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;

	sg_init_one(&sg, data_buf, 512);

	mmc_set_data_timeout(&data, card);

	mmc_claim_host(card->host);
	mmc_wait_for_req(card->host, &mrq);
	mmc_release_host(card->host);

	memcpy(buf, data_buf, 512);
	kfree(data_buf);

	if (cmd.error)
		return cmd.error;
	if (data.error)
		return data.error;
	if (stop.error)
		return stop.error;

	return 0;
}
static int simple_sd_ioctl_multi_rw(struct msdc_ioctl* msdc_ctl)
{
    char l_buf[512];
    struct scatterlist msdc_sg;
    struct mmc_data  msdc_data;
    struct mmc_command msdc_cmd;
    struct mmc_command msdc_stop;

#ifdef MTK_MSDC_USE_CMD23
	struct mmc_command msdc_sbc;
#endif
    
    struct mmc_request  msdc_mrq;
    struct msdc_host *host_ctl;
   
    host_ctl = mtk_msdc_host[msdc_ctl->host_num];
    BUG_ON(!host_ctl);
    BUG_ON(!host_ctl->mmc);
    BUG_ON(!host_ctl->mmc->card);

    mmc_claim_host(host_ctl->mmc);

#if DEBUG_MMC_IOCTL
    printk("user want access %d partition\n",msdc_ctl->partition);
#endif

    mmc_send_ext_csd(host_ctl->mmc->card, l_buf);
    switch (msdc_ctl->partition){
        case BOOT_PARTITION_1:
            if (0x1 != (l_buf[179] & 0x7)){
                /* change to access boot partition 1 */
                l_buf[179] &= ~0x7;
                l_buf[179] |= 0x1;
                mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000);
            }
            break;
        case BOOT_PARTITION_2:
            if (0x2 != (l_buf[179] & 0x7)){
                /* change to access boot partition 2 */
                l_buf[179] &= ~0x7;
                l_buf[179] |= 0x2;
                mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000);
            }
            break;
        default:
            /* make sure access partition is user data area */
            if (0 != (l_buf[179] & 0x7)){
                /* set back to access user area */
                l_buf[179] &= ~0x7;
                l_buf[179] |= 0x0;
                mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000);
            }
            break;
    }

    if(msdc_ctl->total_size > 64*1024){
        msdc_ctl->result = -1;
        return  msdc_ctl->result;
    }

    memset(&msdc_data, 0, sizeof(struct mmc_data));
    memset(&msdc_mrq, 0, sizeof(struct mmc_request));
    memset(&msdc_cmd, 0, sizeof(struct mmc_command));
    memset(&msdc_stop, 0, sizeof(struct mmc_command));

#ifdef MTK_MSDC_USE_CMD23
    memset(&msdc_sbc, 0, sizeof(struct mmc_command));
#endif

    msdc_mrq.cmd = &msdc_cmd;
    msdc_mrq.data = &msdc_data;

    if(msdc_ctl->trans_type)
        dma_force[host_ctl->id] = FORCE_IN_DMA;
    else
        dma_force[host_ctl->id] = FORCE_IN_PIO;

    if (msdc_ctl->iswrite){
        msdc_data.flags = MMC_DATA_WRITE;
        msdc_cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
        msdc_data.blocks = msdc_ctl->total_size / 512;
        if (MSDC_CARD_DUNM_FUNC != msdc_ctl->opcode) {
            if (copy_from_user(sg_msdc_multi_buffer, msdc_ctl->buffer, msdc_ctl->total_size)){
                dma_force[host_ctl->id] = FORCE_NOTHING;
                return -EFAULT; 
            }
        } else {
            /* called from other kernel module */
            memcpy(sg_msdc_multi_buffer, msdc_ctl->buffer, msdc_ctl->total_size);
        }
    } else {
        msdc_data.flags = MMC_DATA_READ;
        msdc_cmd.opcode = MMC_READ_MULTIPLE_BLOCK;
        msdc_data.blocks = msdc_ctl->total_size / 512;
        memset(sg_msdc_multi_buffer, 0 , msdc_ctl->total_size);
    }

#ifdef MTK_MSDC_USE_CMD23
    if ((mmc_card_mmc(host_ctl->mmc->card) || (mmc_card_sd(host_ctl->mmc->card) && host_ctl->mmc->card->scr.cmds & SD_SCR_CMD23_SUPPORT)) && 
            !(host_ctl->mmc->card->quirks & MMC_QUIRK_BLK_NO_CMD23)){
        msdc_mrq.sbc = &msdc_sbc;
        msdc_mrq.sbc->opcode = MMC_SET_BLOCK_COUNT;
        msdc_mrq.sbc->arg = msdc_data.blocks;
        msdc_mrq.sbc->flags = MMC_RSP_R1 | MMC_CMD_AC;
    }
#endif

    msdc_cmd.arg = msdc_ctl->address;

    if (!mmc_card_blockaddr(host_ctl->mmc->card)){
        printk("this device use byte address!!\n");
        msdc_cmd.arg <<= 9;
    }
    msdc_cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;

    msdc_stop.opcode = MMC_STOP_TRANSMISSION;
    msdc_stop.arg = 0;
    msdc_stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;

    msdc_data.stop = &msdc_stop;
    msdc_data.blksz = 512;
    msdc_data.sg = &msdc_sg;
    msdc_data.sg_len = 1;

#if DEBUG_MMC_IOCTL
    printk("total size is %d\n",msdc_ctl->total_size);
#endif
    sg_init_one(&msdc_sg, sg_msdc_multi_buffer, msdc_ctl->total_size);
    mmc_set_data_timeout(&msdc_data, host_ctl->mmc->card);
    mmc_wait_for_req(host_ctl->mmc, &msdc_mrq);

    if (!msdc_ctl->iswrite){
        if (MSDC_CARD_DUNM_FUNC != msdc_ctl->opcode) {
            if (copy_to_user(msdc_ctl->buffer, sg_msdc_multi_buffer, msdc_ctl->total_size)){
                dma_force[host_ctl->id] = FORCE_NOTHING;
                return -EFAULT;
                }
        } else {
            /* called from other kernel module */
            memcpy(msdc_ctl->buffer, sg_msdc_multi_buffer, msdc_ctl->total_size);
        }
    }

    if (msdc_ctl->partition){
        mmc_send_ext_csd(host_ctl->mmc->card,l_buf);

        if (l_buf[179] & 0x7) {
            /* set back to access user area */
            l_buf[179] &= ~0x7;
            l_buf[179] |= 0x0;
            mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000);
        }
    }

    mmc_release_host(host_ctl->mmc);

    if (msdc_cmd.error)
        msdc_ctl->result = msdc_cmd.error;

    if (msdc_data.error){
        msdc_ctl->result = msdc_data.error;
    } else {
        msdc_ctl->result = 0;
    }
        dma_force[host_ctl->id] = FORCE_NOTHING;
    return msdc_ctl->result;

}
Ejemplo n.º 14
0
/*
 * NOTE: void *buf, caller for the buf is required to use DMA-capable
 * buffer or on-stack buffer (with some overhead in callee).
 */
static int
mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
		u32 opcode, void *buf, unsigned len)
{
	struct mmc_request mrq = {NULL};
	struct mmc_command cmd = {0};
	struct mmc_data data = {0};
	struct scatterlist sg;
	void *data_buf;
	int is_on_stack;

	is_on_stack = object_is_on_stack(buf);
	if (is_on_stack) {
		/*
		 * dma onto stack is unsafe/nonportable, but callers to this
		 * routine normally provide temporary on-stack buffers ...
		 */
		data_buf = kmalloc(len, GFP_KERNEL);
		if (!data_buf)
			return -ENOMEM;
	} else
		data_buf = buf;

	mrq.cmd = &cmd;
	mrq.data = &data;

	cmd.opcode = opcode;
	cmd.arg = 0;

	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
	 * rely on callers to never use this with "native" calls for reading
	 * CSD or CID.  Native versions of those commands use the R2 type,
	 * not R1 plus a data block.
	 */
	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;

	data.blksz = len;
	data.blocks = 1;
	data.flags = MMC_DATA_READ;
	data.sg = &sg;
	data.sg_len = 1;

	sg_init_one(&sg, data_buf, len);

	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
		/*
		 * The spec states that CSR and CID accesses have a timeout
		 * of 64 clock cycles.
		 */
		data.timeout_ns = 0;
		data.timeout_clks = 64;
	} else
		mmc_set_data_timeout(&data, card);

	mmc_wait_for_req(host, &mrq);

	if (is_on_stack) {
		memcpy(buf, data_buf, len);
		kfree(data_buf);
	}

#ifdef CONFIG_HUAWEI_EMMC_DSM
	if(cmd.error || data.error)
		if(!strcmp(mmc_hostname(host), "mmc0")){
			DSM_EMMC_LOG(card, DSM_EMMC_SEND_CXD_ERR,
				"opcode:%d failed, cmd.error:%d, data.error:%d\n",
				opcode, cmd.error, data.error);
		}
#endif
	if (cmd.error)
		return cmd.error;
	if (data.error)
		return data.error;

	return 0;
}
Ejemplo n.º 15
0
static int
mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
		  u8 len)
{
	struct mmc_request mrq = {NULL};
	struct mmc_command cmd = {0};
	struct mmc_data data = {0};
	struct scatterlist sg;
	u8 *data_buf;
	u8 *test_buf;
	int i, err;
	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };

	/* dma onto stack is unsafe/nonportable, but callers to this
	 * routine normally provide temporary on-stack buffers ...
	 */
	data_buf = kmalloc(len, GFP_KERNEL);
	if (!data_buf)
		return -ENOMEM;

	if (len == 8)
		test_buf = testdata_8bit;
	else if (len == 4)
		test_buf = testdata_4bit;
	else {
		pr_err("%s: Invalid bus_width %d\n",
		       mmc_hostname(host), len);
		kfree(data_buf);
		return -EINVAL;
	}

	if (opcode == MMC_BUS_TEST_W)
		memcpy(data_buf, test_buf, len);

	mrq.cmd = &cmd;
	mrq.data = &data;
	cmd.opcode = opcode;
	cmd.arg = 0;

	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
	 * rely on callers to never use this with "native" calls for reading
	 * CSD or CID.  Native versions of those commands use the R2 type,
	 * not R1 plus a data block.
	 */
	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;

	data.blksz = len;
	data.blocks = 1;
	if (opcode == MMC_BUS_TEST_R)
		data.flags = MMC_DATA_READ;
	else
		data.flags = MMC_DATA_WRITE;

	data.sg = &sg;
	data.sg_len = 1;
	mmc_set_data_timeout(&data, card);
	sg_init_one(&sg, data_buf, len);
	mmc_wait_for_req(host, &mrq);
	err = 0;
	if (opcode == MMC_BUS_TEST_R) {
		for (i = 0; i < len / 4; i++)
			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
				err = -EIO;
				break;
			}
	}
	kfree(data_buf);

	if (cmd.error)
		return cmd.error;
	if (data.error)
		return data.error;

	return err;
}
Ejemplo n.º 16
0
Archivo: bcmsdh.c Proyecto: Lyude/linux
/**
 * brcmf_sdiod_sglist_rw - SDIO interface function for block data access
 * @sdiodev: brcmfmac sdio device
 * @func: SDIO function
 * @write: direction flag
 * @addr: dongle memory address as source/destination
 * @pkt: skb pointer
 *
 * This function takes the respbonsibility as the interface function to MMC
 * stack for block data access. It assumes that the skb passed down by the
 * caller has already been padded and aligned.
 */
static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
				 struct sdio_func *func,
				 bool write, u32 addr,
				 struct sk_buff_head *pktlist)
{
	unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
	unsigned int max_req_sz, orig_offset, dst_offset;
	unsigned short max_seg_cnt, seg_sz;
	unsigned char *pkt_data, *orig_data, *dst_data;
	struct sk_buff *pkt_next = NULL, *local_pkt_next;
	struct sk_buff_head local_list, *target_list;
	struct mmc_request mmc_req;
	struct mmc_command mmc_cmd;
	struct mmc_data mmc_dat;
	struct scatterlist *sgl;
	int ret = 0;

	if (!pktlist->qlen)
		return -EINVAL;

	target_list = pktlist;
	/* for host with broken sg support, prepare a page aligned list */
	__skb_queue_head_init(&local_list);
	if (!write && sdiodev->settings->bus.sdio.broken_sg_support) {
		req_sz = 0;
		skb_queue_walk(pktlist, pkt_next)
			req_sz += pkt_next->len;
		req_sz = ALIGN(req_sz, func->cur_blksize);
		while (req_sz > PAGE_SIZE) {
			pkt_next = brcmu_pkt_buf_get_skb(PAGE_SIZE);
			if (pkt_next == NULL) {
				ret = -ENOMEM;
				goto exit;
			}
			__skb_queue_tail(&local_list, pkt_next);
			req_sz -= PAGE_SIZE;
		}
		pkt_next = brcmu_pkt_buf_get_skb(req_sz);
		if (pkt_next == NULL) {
			ret = -ENOMEM;
			goto exit;
		}
		__skb_queue_tail(&local_list, pkt_next);
		target_list = &local_list;
	}

	func_blk_sz = func->cur_blksize;
	max_req_sz = sdiodev->max_request_size;
	max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count,
			    target_list->qlen);
	seg_sz = target_list->qlen;
	pkt_offset = 0;
	pkt_next = target_list->next;

	memset(&mmc_req, 0, sizeof(struct mmc_request));
	memset(&mmc_cmd, 0, sizeof(struct mmc_command));
	memset(&mmc_dat, 0, sizeof(struct mmc_data));

	mmc_dat.sg = sdiodev->sgtable.sgl;
	mmc_dat.blksz = func_blk_sz;
	mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
	mmc_cmd.opcode = SD_IO_RW_EXTENDED;
	mmc_cmd.arg = write ? 1<<31 : 0;	/* write flag  */
	mmc_cmd.arg |= (func->num & 0x7) << 28;	/* SDIO func num */
	mmc_cmd.arg |= 1 << 27;			/* block mode */
	/* for function 1 the addr will be incremented */
	mmc_cmd.arg |= (func->num == 1) ? 1 << 26 : 0;
	mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
	mmc_req.cmd = &mmc_cmd;
	mmc_req.data = &mmc_dat;

	while (seg_sz) {
		req_sz = 0;
		sg_cnt = 0;
		sgl = sdiodev->sgtable.sgl;
		/* prep sg table */
		while (pkt_next != (struct sk_buff *)target_list) {
			pkt_data = pkt_next->data + pkt_offset;
			sg_data_sz = pkt_next->len - pkt_offset;
			if (sg_data_sz > sdiodev->max_segment_size)
				sg_data_sz = sdiodev->max_segment_size;
			if (sg_data_sz > max_req_sz - req_sz)
				sg_data_sz = max_req_sz - req_sz;

			sg_set_buf(sgl, pkt_data, sg_data_sz);

			sg_cnt++;
			sgl = sg_next(sgl);
			req_sz += sg_data_sz;
			pkt_offset += sg_data_sz;
			if (pkt_offset == pkt_next->len) {
				pkt_offset = 0;
				pkt_next = pkt_next->next;
			}

			if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt)
				break;
		}
		seg_sz -= sg_cnt;

		if (req_sz % func_blk_sz != 0) {
			brcmf_err("sg request length %u is not %u aligned\n",
				  req_sz, func_blk_sz);
			ret = -ENOTBLK;
			goto exit;
		}

		mmc_dat.sg_len = sg_cnt;
		mmc_dat.blocks = req_sz / func_blk_sz;
		mmc_cmd.arg |= (addr & 0x1FFFF) << 9;	/* address */
		mmc_cmd.arg |= mmc_dat.blocks & 0x1FF;	/* block count */
		/* incrementing addr for function 1 */
		if (func->num == 1)
			addr += req_sz;

		mmc_set_data_timeout(&mmc_dat, func->card);
		mmc_wait_for_req(func->card->host, &mmc_req);

		ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
		if (ret == -ENOMEDIUM) {
			brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
			break;
		} else if (ret != 0) {
			brcmf_err("CMD53 sg block %s failed %d\n",
				  write ? "write" : "read", ret);
			ret = -EIO;
			break;
		}
	}

	if (!write && sdiodev->settings->bus.sdio.broken_sg_support) {
		local_pkt_next = local_list.next;
		orig_offset = 0;
		skb_queue_walk(pktlist, pkt_next) {
			dst_offset = 0;
			do {
				req_sz = local_pkt_next->len - orig_offset;
				req_sz = min_t(uint, pkt_next->len - dst_offset,
					       req_sz);
				orig_data = local_pkt_next->data + orig_offset;
				dst_data = pkt_next->data + dst_offset;
				memcpy(dst_data, orig_data, req_sz);
				orig_offset += req_sz;
				dst_offset += req_sz;
				if (orig_offset == local_pkt_next->len) {
					orig_offset = 0;
					local_pkt_next = local_pkt_next->next;
				}
				if (dst_offset == pkt_next->len)
					break;
			} while (!skb_queue_empty(&local_list));
		}
Ejemplo n.º 17
0
static ssize_t mmc_wr_prot_read(struct file *filp, char __user *ubuf,
		size_t cnt, loff_t *ppos)
{
#define PARTITION_NOT_PROTED 0
#define PARTITION_PROTED 1

	struct mmc_card *card = filp->private_data;

	//used for mmcrequest
	unsigned int wp_group_size;
	struct mmc_request mrq = {NULL};
	struct mmc_command cmd = {0};
	struct mmc_data data = {0};
	struct scatterlist sg;

	void *data_buf;
	unsigned char buf[8];
	unsigned int addr = 0;
	unsigned int init_addr = 0;
	char line_buf[128];

	int i, j, k;
	unsigned char ch;
	unsigned char wp_flag;
	int len = 8;
	unsigned int loop_count = 0;
	unsigned int size = 0;
	unsigned int status_prot = PARTITION_NOT_PROTED;

	struct emmc_partition *p_emmc_partition;

	pr_info("[HW]: eMMC protect driver built on %s @ %s\n", __DATE__, __TIME__);

	p_emmc_partition = g_emmc_partition;
	for(i = 0; i < MAX_EMMC_PARTITION_NUM; i++){
		if(p_emmc_partition->flags == 0)
			break;

		if(strcmp(p_emmc_partition->name, "system")  == 0){
			addr = (unsigned int)(p_emmc_partition->start);
			size = (unsigned int)(p_emmc_partition->size_sectors);
			pr_info("[HW]:%s: partitionname = %s \n", __func__, p_emmc_partition->name);
			pr_info("[HW]:%s: partition start from = 0x%08x \n", __func__, addr);
			pr_info("[HW]:%s: partition size = 0x%08x \n", __func__, size);
		}
		p_emmc_partition++;
	}

	init_addr = addr;

	if(addr < 0)
	{
		pr_err("[HW]:%s:invalid addr = 0x%08x.", __func__, addr);
		if(copy_to_user(ubuf, "fail", strlen("fail "))){
			pr_info("[HW]: %s: copy to user error \n", __func__);
			return -EFAULT;;
		}
		return -1;
	}

	wp_group_size =(512 * 1024) * card->ext_csd.raw_hc_erase_gap_size
		* card->ext_csd.raw_hc_erase_grp_size/512;

	if(addr % wp_group_size == 0){

	}else{
		addr = (addr / wp_group_size) * wp_group_size + wp_group_size;
		pr_info("[HW]:%s: setting start area is not muti size of wp_group_size\n", __func__);
	}

	loop_count = (init_addr + size - addr) / wp_group_size;

	pr_info("[HW]:%s: EXT_CSD_HC_WP_GRP_SIZE = 0x%02x. \n", __func__, card->ext_csd.raw_hc_erase_gap_size);
	pr_info("[HW]:%s: EXT_CSD_HC_ERASE_GRP_SIZE = 0x%02x. \n", __func__, card->ext_csd.raw_hc_erase_grp_size);

	pr_info("[HW]:%s: addr = 0x%08x, wp_group_size=0x%08x, size = 0x%08x \n",__func__, addr, wp_group_size, size);
	pr_info("[HW]:%s: loop_count = 0x%08x \n",__func__, loop_count);

	/* dma onto stack is unsafe/nonportable, but callers to this
	 * routine normally provide temporary on-stack buffers ...
	 */
	addr = addr - wp_group_size * 32;
	for(k=0; k< loop_count/32 + 2; k++){
		data_buf = kmalloc(32, GFP_KERNEL); //dma size 32
		if (data_buf == NULL)
			return -ENOMEM;

		mrq.cmd = &cmd;
		mrq.data = &data;

		cmd.opcode = 31;
		cmd.arg = addr;
		cmd.flags =  MMC_RSP_R1 | MMC_CMD_ADTC;

		data.blksz = len;
		data.blocks = 1;
		data.flags = MMC_DATA_READ;
		data.sg = &sg;
		data.sg_len = 1;

		sg_init_one(&sg, data_buf, len);
		mmc_set_data_timeout(&data, card);
		mmc_claim_host(card->host);
		mmc_wait_for_req(card->host, &mrq);
		mmc_release_host(card->host);

		memcpy(buf, data_buf, len);
		kfree(data_buf);

/*
 *		start to show the detailed read status from the response
*/
#if 0
		for(i = 0; i < 8; i++){
			pr_info("[HW]:%s: buffer = 0x%02x \n", __func__, buf[i]);
		}
#endif
/*
 *		end of show the detailed read status from the response
*/

		for(i = 7; i >= 0; i--)
		{
			ch = buf[i];
			for(j = 0; j < 4; j++)
			{
				wp_flag = ch & 0x3;
				memset(line_buf, 0x00, sizeof(line_buf));
				sprintf(line_buf, "[0x%08x~0x%08x] Write protection group is ", addr, addr + wp_group_size - 1);

				switch(wp_flag)
				{
					case 0:
						strcat(line_buf, "disable");
						break;

					case 1:
						strcat(line_buf, "temporary write protection");
						break;

					case 2:
						strcat(line_buf, "power-on write protection");
						break;

					case 3:
						strcat(line_buf, "permanent write protection");
						break;

					default:
						break;
				}

				pr_info("%s: %s\n", mmc_hostname(card->host), line_buf);

				if( wp_flag == 1){
					if(is_within_group(addr, init_addr, size, wp_group_size) == 0){
						status_prot = PARTITION_PROTED;
						// pr_info("[HW]: %s: addr = 0x%08x, init_addr = 0x%08x, size = 0x%08x, group protected \n", __func__, addr, init_addr, size);
					}
				}
				addr += wp_group_size;
				ch = ch >> 2;
			}
		}
	}

	pr_info("[HW]: %s: end sector = 0x%08x \n", __func__, size + init_addr);

	if (cmd.error)
	{
		pr_err("[HW]:%s:cmd.error=%d.", __func__, cmd.error);
		if(copy_to_user(ubuf, "fail", strlen("fail "))){
			pr_info("[HW]: %s: copy to user error \n", __func__);
			return -EFAULT;;
		}
		return cmd.error;
	}

	if (data.error)
	{
		pr_err("[HW]:%s:data.error=%d.", __func__, data.error);
		if(copy_to_user(ubuf, "fail", strlen("fail "))){
			pr_info("[HW]: %s: copy to user error \n", __func__);
			return -EFAULT;;
		}
		return data.error;
	}

	switch(status_prot){
		case PARTITION_PROTED:
			if(copy_to_user(ubuf, "protected", strlen("protected "))){
				pr_info("[HW]: %s: copy to user error \n", __func__);
				return -EFAULT;;
			}
			pr_info("[HW]: %s: protected \n", __func__);
		break;

		case PARTITION_NOT_PROTED:
			if(copy_to_user(ubuf, "not_protected", strlen("not_protected "))){
				pr_info("[HW]: %s: copy to user error \n", __func__);
				return -EFAULT;;
			}
			pr_info("[HW]: %s: not_protected \n", __func__);
		break;

		default:break;
	}
	return 0;
}
Ejemplo n.º 18
0
static int
mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
                  u32 opcode, void *buf, unsigned len)
{
    struct mmc_request mrq;
    struct mmc_command cmd;
    struct mmc_data data;
    struct scatterlist sg;
    void *data_buf;

    /* dma onto stack is unsafe/nonportable, but callers to this
     * routine normally provide temporary on-stack buffers ...
     */
    data_buf = kmalloc(len, GFP_KERNEL);
    if (data_buf == NULL)
        return -ENOMEM;

    memset(&mrq, 0, sizeof(struct mmc_request));
    memset(&cmd, 0, sizeof(struct mmc_command));
    memset(&data, 0, sizeof(struct mmc_data));

    mrq.cmd = &cmd;
    mrq.data = &data;

    cmd.opcode = opcode;
    cmd.arg = 0;

    /* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
     * rely on callers to never use this with "native" calls for reading
     * CSD or CID.  Native versions of those commands use the R2 type,
     * not R1 plus a data block.
     */
    cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;

    data.blksz = len;
    data.blocks = 1;
    data.flags = MMC_DATA_READ;
    data.sg = &sg;
    data.sg_len = 1;

    sg_init_one(&sg, data_buf, len);

    if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
        /*
         * The spec states that CSR and CID accesses have a timeout
         * of 64 clock cycles.
         */
        data.timeout_ns = 0;
        data.timeout_clks = 64;
    } else
        mmc_set_data_timeout(&data, card);

    mmc_wait_for_req(host, &mrq);

    memcpy(buf, data_buf, len);
    kfree(data_buf);

    if (cmd.error)
        return cmd.error;
    if (data.error)
        return data.error;

    return 0;
}
Ejemplo n.º 19
0
static int mmc_bustest_read(struct mmc_host *host,
				 struct mmc_card *card, int buswidth)
{
	struct mmc_request mrq;
	struct mmc_command cmd;
	struct mmc_data data;
	struct scatterlist sg;
	int bustest_recv_pat[4] = { 0x40, 0x0, 0xA5, 0xAA55 };
	u32 *test_pat;
	int err = 0;

	test_pat = kmalloc(512, GFP_KERNEL);
	if (test_pat == NULL)
		return -ENOMEM;

	memset(test_pat, 0, 512);
	memset(&mrq, 0, sizeof(struct mmc_request));
	memset(&cmd, 0, sizeof(struct mmc_command));
	memset(&data, 0, sizeof(struct mmc_data));

	mrq.cmd = &cmd;
	mrq.data = &data;

	cmd.opcode = MMC_BUSTEST_R;
	cmd.arg = 0;
	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;

	data.blocks = 1;
	data.flags = MMC_DATA_READ;
	data.sg = &sg;
	data.sg_len = 1;

	if (buswidth == MMC_BUS_WIDTH_8) {
		data.blksz = 8;
		sg_init_one(&sg, test_pat, 8);
	} else if (buswidth == MMC_BUS_WIDTH_4) {
		data.blksz = 4;
		sg_init_one(&sg, test_pat, 4);
	} else {
		data.blksz = 1;
		sg_init_one(&sg, test_pat, 1);
	}

	mmc_set_data_timeout(&data, card);
	mmc_wait_for_req(host, &mrq);

	pr_debug("%s: Test pattern received: 0x%x\n", __func__, test_pat[0]);
	if (cmd.error || data.error) {
		pr_err("%s: cmd.error: %d  data.error: %d\n",
			__func__, cmd.error, data.error);
		err = -1;
		goto cmderr;
	}

	if (test_pat[0] == bustest_recv_pat[buswidth])
		pr_debug("%s: Bus test pass for buswidth:%d\n",
						 __func__, buswidth);
	else
		err = -1;
cmderr:
	kfree(test_pat);
	return err;
}