/** * mmc_wait_for_app_cmd - start an application command and wait for completion * @host: MMC host to start command * @rca: RCA to send MMC_APP_CMD to * @cmd: MMC command to start * @retries: maximum number of retries * * Sends a MMC_APP_CMD, checks the card response, sends the command * in the parameter and waits for it to complete. Return any error * that occurred while the command was executing. Do not attempt to * parse the response. */ int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card, struct mmc_command *cmd, int retries) { struct mmc_request mrq; int i, err; BUG_ON(!cmd); BUG_ON(retries < 0); err = MMC_ERR_INVALID; /* * We have to resend MMC_APP_CMD for each attempt so * we cannot use the retries field in mmc_command. */ for (i = 0;i <= retries;i++) { memset(&mrq, 0, sizeof(struct mmc_request)); err = mmc_app_cmd(host, card); if (err != MMC_ERR_NONE) continue; memset(&mrq, 0, sizeof(struct mmc_request)); memset(cmd->resp, 0, sizeof(cmd->resp)); cmd->retries = 0; mrq.cmd = cmd; cmd->data = NULL; mmc_wait_for_req(host, &mrq); err = cmd->error; if (cmd->error == MMC_ERR_NONE) break; } return err; }
static int mmc_movi_read_req(struct mmc_card *card, void *data_buf, u32 arg, u32 blocks) { struct mmc_request mrq = {0}; struct mmc_command cmd = {0}; struct mmc_data data = {0}; struct scatterlist sg; /*send request*/ mrq.cmd = &cmd; mrq.data = &data; if (blocks > 1) cmd.opcode = MMC_READ_MULTIPLE_BLOCK; else cmd.opcode = MMC_READ_SINGLE_BLOCK; cmd.arg = arg; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; data.blksz = 512; data.blocks = blocks; data.flags = MMC_DATA_READ; data.sg = &sg; data.sg_len = 1; sg_init_one(&sg, data_buf, data.blksz * data.blocks); mmc_set_data_timeout(&data, card); mmc_wait_for_req(card->host, &mrq); if (cmd.error) return cmd.error; if (data.error) return data.error; return 0; }
int sd_send_lock_unlock_cmd(struct mmc_card *card,u8* data_buf,int data_size,int max_buf_size) { int err; struct mmc_request mrq = {0}; struct mmc_command cmd = {0}; struct mmc_data data = {0}; struct scatterlist sg; cmd.opcode = MMC_LOCK_UNLOCK; cmd.arg = 0; cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; data.blksz = data_size; data.blocks = 1; data.flags = MMC_DATA_WRITE; data.sg = &sg; data.sg_len = 1; mmc_set_data_timeout(&data, card); data.timeout_ns = (2*1000*1000*1000); mrq.cmd = &cmd; mrq.data = &data; sg_init_one(&sg, data_buf, max_buf_size); mmc_wait_for_req(card->host, &mrq); err = cmd.error; if (err) { printk("%s: lock unlock cmd error %d\n", __func__, cmd.error); return err; } err = data.error; if (err) { dev_err(mmc_dev(card->host), "%s: data error %d\n", __func__, data.error); } return err; }
static int mmc_test_buffer_transfer(struct mmc_test_card *test, u8 *buffer, unsigned addr, unsigned blksz, int write) { int ret; struct mmc_request mrq; struct mmc_command cmd; struct mmc_command stop; struct mmc_data data; struct scatterlist sg; memset(&mrq, 0, sizeof(struct mmc_request)); memset(&cmd, 0, sizeof(struct mmc_command)); memset(&data, 0, sizeof(struct mmc_data)); memset(&stop, 0, sizeof(struct mmc_command)); mrq.cmd = &cmd; mrq.data = &data; mrq.stop = &stop; sg_init_one(&sg, buffer, blksz); mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write); mmc_wait_for_req(test->card->host, &mrq); if (cmd.error) return cmd.error; if (data.error) return data.error; ret = mmc_test_wait_busy(test); if (ret) return ret; return 0; }
static int assd_write_sec_cmd(struct mmc_host *host) { struct mmc_request req; struct mmc_command cmd; struct mmc_command stp; struct mmc_data dat; struct scatterlist sg; BUG_ON(!host); memset(&req, 0, sizeof(struct mmc_request)); memset(&cmd, 0, sizeof(struct mmc_command)); memset(&stp, 0, sizeof(struct mmc_command)); memset(&dat, 0, sizeof(struct mmc_data)); req.cmd = &cmd; req.data = &dat; if (test_bit(ASSD_SEND_STOP, &assd_status)) req.stop = &stp; cmd.opcode = ASSD_WRITE_SEC_CMD; cmd.arg = 1; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; dat.blksz = 512; dat.blocks = 1; dat.flags = MMC_DATA_WRITE; dat.sg = &sg; dat.sg_len = 1; sg_init_one(&sg, assd_block, 512); stp.opcode = MMC_STOP_TRANSMISSION; stp.arg = 0; stp.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; mmc_claim_host(host); if (host->card == NULL) { mmc_release_host(host); return -ENODEV; } mmc_set_data_timeout(&dat, host->card); mmc_wait_for_req(host, &req); mmc_release_host(host); if (cmd.error) return cmd.error; if (dat.error) return dat.error; /* * Do not send any STOP_TRANSMISSION command from now on, * if this card does not require a STOP_TRANSMISSION command. */ if (stp.error == -ETIMEDOUT) clear_bit(ASSD_SEND_STOP, &assd_status); return 0; }
static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; struct mmc_blk_request brq; int ret = 1; if (mmc_card_claim_host(card)) goto flush_queue; do { struct mmc_command cmd; u32 readcmd, writecmd; memset(&brq, 0, sizeof(struct mmc_blk_request)); brq.mrq.cmd = &brq.cmd; brq.mrq.data = &brq.data; brq.cmd.arg = req->sector; if (!mmc_card_blockaddr(card)) brq.cmd.arg <<= 9; brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; brq.data.blksz = 1 << md->block_bits; brq.stop.opcode = MMC_STOP_TRANSMISSION; brq.stop.arg = 0; brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC; brq.data.blocks = req->nr_sectors >> (md->block_bits - 9); if (brq.data.blocks > card->host->max_blk_count) brq.data.blocks = card->host->max_blk_count; mmc_set_data_timeout(&brq.data, card, rq_data_dir(req) != READ); #ifdef CONFIG_MMC_SUPPORT_MOVINAND if (mmc_card_movinand(card)) { if ((brq.data.blocks > 1) || (rq_data_dir(req) == WRITE)) { cmd.opcode = MMC_SET_BLOCK_COUNT; cmd.arg = req->nr_sectors; cmd.flags = MMC_RSP_R1; ret = mmc_wait_for_cmd(card->host, &cmd, 2); } if (rq_data_dir(req) == READ) { if (brq.data.blocks > 1) { brq.cmd.opcode = MMC_READ_MULTIPLE_BLOCK; brq.data.flags |= (MMC_DATA_READ | MMC_DATA_MULTI); // brq.mrq.stop = &brq.stop; } else { brq.cmd.opcode = MMC_READ_SINGLE_BLOCK; brq.data.flags |= MMC_DATA_READ; brq.mrq.stop = NULL; } } else { brq.cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK; brq.data.flags |= MMC_DATA_WRITE | MMC_DATA_MULTI; // brq.mrq.stop = &brq.stop; } } else { #endif /* * If the host doesn't support multiple block writes, force * block writes to single block. SD cards are excepted from * this rule as they support querying the number of * successfully written sectors. */ if (rq_data_dir(req) != READ && !(card->host->caps & MMC_CAP_MULTIWRITE) && !mmc_card_sd(card)) brq.data.blocks = 1; if (brq.data.blocks > 1) { brq.data.flags |= MMC_DATA_MULTI; brq.mrq.stop = &brq.stop; readcmd = MMC_READ_MULTIPLE_BLOCK; writecmd = MMC_WRITE_MULTIPLE_BLOCK; } else { brq.mrq.stop = NULL; readcmd = MMC_READ_SINGLE_BLOCK; writecmd = MMC_WRITE_BLOCK; } if (rq_data_dir(req) == READ) { brq.cmd.opcode = readcmd; brq.data.flags |= MMC_DATA_READ; } else { brq.cmd.opcode = writecmd; brq.data.flags |= MMC_DATA_WRITE; } #ifdef CONFIG_MMC_SUPPORT_MOVINAND } #endif brq.data.sg = mq->sg; brq.data.sg_len = blk_rq_map_sg(req->q, req, brq.data.sg); mmc_wait_for_req(card->host, &brq.mrq); if (brq.cmd.error) { printk(KERN_ERR "%s: error %d sending read/write command\n", req->rq_disk->disk_name, brq.cmd.error); goto cmd_err; } if (brq.data.error) { printk(KERN_ERR "%s: error %d transferring data\n", req->rq_disk->disk_name, brq.data.error); goto cmd_err; } if (brq.stop.error) { printk(KERN_ERR "%s: error %d sending stop command\n", req->rq_disk->disk_name, brq.stop.error); goto cmd_err; } if (rq_data_dir(req) != READ) { do { int err; cmd.opcode = MMC_SEND_STATUS; cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, 5); if (err) { printk(KERN_ERR "%s: error %d requesting status\n", req->rq_disk->disk_name, err); goto cmd_err; } #ifdef CONFIG_MMC_SUPPORT_MOVINAND /* Work-around for broken cards setting READY_FOR_DATA * when not actually ready. */ if (mmc_card_movinand(card)) { if (R1_CURRENT_STATE(cmd.resp[0]) == 7) cmd.resp[0] &= ~R1_READY_FOR_DATA; } #endif } while (!(cmd.resp[0] & R1_READY_FOR_DATA)); #if 0 if (cmd.resp[0] & ~0x00000900) printk(KERN_ERR "%s: status = %08x\n", req->rq_disk->disk_name, cmd.resp[0]); if (mmc_decode_status(cmd.resp)) goto cmd_err; #endif } /* * A block was successfully transferred. */ spin_lock_irq(&md->lock); ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered); if (!ret) { /* * The whole request completed successfully. */ add_disk_randomness(req->rq_disk); blkdev_dequeue_request(req); end_that_request_last(req, 1); } spin_unlock_irq(&md->lock); } while (ret); mmc_card_release_host(card); return 1; cmd_err: /* * If this is an SD card and we're writing, we can first * mark the known good sectors as ok. * * If the card is not SD, we can still ok written sectors * if the controller can do proper error reporting. * * For reads we just fail the entire chunk as that should * be safe in all cases. */ if (rq_data_dir(req) != READ && mmc_card_sd(card)) { u32 blocks; unsigned int bytes; blocks = mmc_sd_num_wr_blocks(card); if (blocks != (u32)-1) { if (card->csd.write_partial) bytes = blocks << md->block_bits; else bytes = blocks << 9; spin_lock_irq(&md->lock); ret = end_that_request_chunk(req, 1, bytes); spin_unlock_irq(&md->lock); } } else if (rq_data_dir(req) != READ && (card->host->caps & MMC_CAP_MULTIWRITE)) { spin_lock_irq(&md->lock); ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered); spin_unlock_irq(&md->lock); } flush_queue: mmc_card_release_host(card); spin_lock_irq(&md->lock); while (ret) { ret = end_that_request_chunk(req, 0, req->current_nr_sectors << 9); } add_disk_randomness(req->rq_disk); blkdev_dequeue_request(req); end_that_request_last(req, 0); spin_unlock_irq(&md->lock); return 0; }
static int mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode, u8 len) { struct mmc_request mrq = {NULL}; struct mmc_command cmd = {0}; struct mmc_data data = {0}; struct scatterlist sg; u8 *data_buf; u8 *test_buf; int i, err; static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 }; static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 }; data_buf = kmalloc(len, GFP_KERNEL); if (!data_buf) return -ENOMEM; if (len == 8) test_buf = testdata_8bit; else if (len == 4) test_buf = testdata_4bit; else { pr_err("%s: Invalid bus_width %d\n", mmc_hostname(host), len); kfree(data_buf); return -EINVAL; } if (opcode == MMC_BUS_TEST_W) memcpy(data_buf, test_buf, len); mrq.cmd = &cmd; mrq.data = &data; cmd.opcode = opcode; cmd.arg = 0; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; data.blksz = len; data.blocks = 1; if (opcode == MMC_BUS_TEST_R) data.flags = MMC_DATA_READ; else data.flags = MMC_DATA_WRITE; data.sg = &sg; data.sg_len = 1; data.timeout_ns = 1000000; data.timeout_clks = 0; sg_init_one(&sg, data_buf, len); mmc_wait_for_req(host, &mrq); err = 0; if (opcode == MMC_BUS_TEST_R) { for (i = 0; i < len / 4; i++) if ((test_buf[i] ^ data_buf[i]) != 0xff) { err = -EIO; break; } } kfree(data_buf); if (cmd.error) return cmd.error; if (data.error) return data.error; return err; }
static int mmc_bustest_read(struct mmc_host *host, struct mmc_card *card, int buswidth) { struct mmc_request mrq; struct mmc_command cmd; struct mmc_data data; struct scatterlist sg; int bustest_recv_pat[4] = { 0x40, 0x0, 0xA5, 0xAA55 }; u32 *test_pat; int err = 0; test_pat = kmalloc(512, GFP_KERNEL); if (test_pat == NULL) return -ENOMEM; memset(test_pat, 0, 512); memset(&mrq, 0, sizeof(struct mmc_request)); memset(&cmd, 0, sizeof(struct mmc_command)); memset(&data, 0, sizeof(struct mmc_data)); mrq.cmd = &cmd; mrq.data = &data; cmd.opcode = MMC_BUSTEST_R; cmd.arg = 0; cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; data.blocks = 1; data.flags = MMC_DATA_READ; data.sg = &sg; data.sg_len = 1; if (buswidth == MMC_BUS_WIDTH_8) { data.blksz = 8; sg_init_one(&sg, test_pat, 8); } else if (buswidth == MMC_BUS_WIDTH_4) { data.blksz = 4; sg_init_one(&sg, test_pat, 4); } else { data.blksz = 1; sg_init_one(&sg, test_pat, 1); } mmc_set_data_timeout(&data, card); mmc_wait_for_req(host, &mrq); pr_debug("%s: Test pattern received: 0x%x\n", __func__, test_pat[0]); if (cmd.error || data.error) { pr_err("%s: cmd.error: %d data.error: %d\n", __func__, cmd.error, data.error); err = -1; goto cmderr; } if (test_pat[0] == bustest_recv_pat[buswidth]) pr_debug("%s: Bus test pass for buswidth:%d\n", __func__, buswidth); else err = -1; cmderr: kfree(test_pat); return err; }
static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) { int err; u32 blocks; struct mmc_request mrq; struct mmc_command cmd; struct mmc_data data; unsigned int timeout_us; struct scatterlist sg; memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = MMC_APP_CMD; cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, 0); if ((err != MMC_ERR_NONE) || !(cmd.resp[0] & R1_APP_CMD)) return (u32)-1; memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = SD_APP_SEND_NUM_WR_BLKS; cmd.arg = 0; cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; memset(&data, 0, sizeof(struct mmc_data)); data.timeout_ns = card->csd.tacc_ns * 100; data.timeout_clks = card->csd.tacc_clks * 100; timeout_us = data.timeout_ns / 1000; timeout_us += data.timeout_clks * 1000 / (card->host->ios.clock / 1000); if (timeout_us > 100000) { data.timeout_ns = 100000000; data.timeout_clks = 0; } data.blksz = 4; data.blocks = 1; data.flags = MMC_DATA_READ; data.sg = &sg; data.sg_len = 1; memset(&mrq, 0, sizeof(struct mmc_request)); mrq.cmd = &cmd; mrq.data = &data; sg_init_one(&sg, &blocks, 4); mmc_wait_for_req(card->host, &mrq); if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) return (u32)-1; blocks = ntohl(blocks); return blocks; }
static int mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode, u8 len) { struct mmc_request mrq = {0}; struct mmc_command cmd = {0}; struct mmc_data data = {0}; struct scatterlist sg; u8 *data_buf; u8 *test_buf; int i, err; static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 }; static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 }; /* dma onto stack is unsafe/nonportable, but callers to this * routine normally provide temporary on-stack buffers ... */ data_buf = kmalloc(len, GFP_KERNEL); if (!data_buf) return -ENOMEM; if (len == 8) test_buf = testdata_8bit; else if (len == 4) test_buf = testdata_4bit; else { printk(KERN_ERR "%s: Invalid bus_width %d\n", mmc_hostname(host), len); kfree(data_buf); return -EINVAL; } if (opcode == MMC_BUS_TEST_W) memcpy(data_buf, test_buf, len); mrq.cmd = &cmd; mrq.data = &data; cmd.opcode = opcode; cmd.arg = 0; /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we * rely on callers to never use this with "native" calls for reading * CSD or CID. Native versions of those commands use the R2 type, * not R1 plus a data block. */ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; data.blksz = len; data.blocks = 1; if (opcode == MMC_BUS_TEST_R) data.flags = MMC_DATA_READ; else data.flags = MMC_DATA_WRITE; data.sg = &sg; data.sg_len = 1; sg_init_one(&sg, data_buf, len); mmc_wait_for_req(host, &mrq); err = 0; if (opcode == MMC_BUS_TEST_R) { for (i = 0; i < len / 4; i++) if ((test_buf[i] ^ data_buf[i]) != 0xff) { err = -EIO; break; } } kfree(data_buf); if (cmd.error) return cmd.error; if (data.error) return data.error; return err; }
int mmc_gen_cmd(struct mmc_card *card, void *buf, u8 index, u8 arg1, u8 arg2, u8 mode) { struct mmc_request mrq; struct mmc_command cmd; struct mmc_data data; struct mmc_command stop; struct scatterlist sg; void *data_buf; mmc_set_blocklen(card, 512); data_buf = kmalloc(512, GFP_KERNEL); if (data_buf == NULL) return -ENOMEM; memset(&mrq, 0, sizeof(struct mmc_request)); memset(&cmd, 0, sizeof(struct mmc_command)); memset(&data, 0, sizeof(struct mmc_data)); memset(&stop, 0, sizeof(struct mmc_command)); mrq.cmd = &cmd; mrq.data = &data; mrq.stop = &stop; cmd.opcode = MMC_GEN_CMD; cmd.arg = (arg2 << 16) | (arg1 << 8) | (index << 1) | mode; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; data.blksz = 512; data.blocks = 1; data.flags = MMC_DATA_READ; data.sg = &sg; data.sg_len = 1; stop.opcode = MMC_STOP_TRANSMISSION; stop.arg = 0; stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; sg_init_one(&sg, data_buf, 512); mmc_set_data_timeout(&data, card); mmc_claim_host(card->host); mmc_wait_for_req(card->host, &mrq); mmc_release_host(card->host); memcpy(buf, data_buf, 512); kfree(data_buf); if (cmd.error) return cmd.error; if (data.error) return data.error; if (stop.error) return stop.error; return 0; }
static int mmc_rpmb_send_command(struct mmc_card *card, u8 *buf, __u16 blks, __u16 type, u8 req_type) { struct mmc_request mrq = {NULL}; struct mmc_command cmd = {0}; struct mmc_command sbc = {0}; struct mmc_data data = {0}; struct scatterlist sg; u8 *transfer_buf = NULL; mrq.sbc = &sbc; mrq.cmd = &cmd; mrq.data = &data; mrq.stop = NULL; transfer_buf = kzalloc(512 * blks, GFP_KERNEL); if (!transfer_buf) return -ENOMEM; /* * set CMD23 */ sbc.opcode = MMC_SET_BLOCK_COUNT; sbc.arg = blks; if ((req_type == RPMB_REQ) && type == RPMB_WRITE_DATA) sbc.arg |= 1 << 31; sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; /* * set CMD25/18 */ sg_init_one(&sg, transfer_buf, 512 * blks); if (req_type == RPMB_REQ) { cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK; sg_copy_from_buffer(&sg, 1, buf, 512 * blks); data.flags |= MMC_DATA_WRITE; } else { cmd.opcode = MMC_READ_MULTIPLE_BLOCK; data.flags |= MMC_DATA_READ; } cmd.arg = 0; cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; data.blksz = 512; data.blocks = blks; data.sg = &sg; data.sg_len = 1; mmc_set_data_timeout(&data, card); mmc_wait_for_req(card->host, &mrq); if (req_type != RPMB_REQ) sg_copy_to_buffer(&sg, 1, buf, 512 * blks); kfree(transfer_buf); if (cmd.error) return cmd.error; if (data.error) return data.error; return 0; }
static int simple_sd_ioctl_single_rw(struct msdc_ioctl* msdc_ctl) { char l_buf[512]; struct scatterlist msdc_sg; struct mmc_data msdc_data; struct mmc_command msdc_cmd; struct mmc_request msdc_mrq; struct msdc_host *host_ctl; host_ctl = mtk_msdc_host[msdc_ctl->host_num]; BUG_ON(!host_ctl); BUG_ON(!host_ctl->mmc); BUG_ON(!host_ctl->mmc->card); mmc_claim_host(host_ctl->mmc); #if DEBUG_MMC_IOCTL printk("user want access %d partition\n",msdc_ctl->partition); #endif mmc_send_ext_csd(host_ctl->mmc->card, l_buf); switch (msdc_ctl->partition){ case BOOT_PARTITION_1: if (0x1 != (l_buf[179] & 0x7)){ /* change to access boot partition 1 */ l_buf[179] &= ~0x7; l_buf[179] |= 0x1; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } break; case BOOT_PARTITION_2: if (0x2 != (l_buf[179] & 0x7)){ /* change to access boot partition 2 */ l_buf[179] &= ~0x7; l_buf[179] |= 0x2; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } break; default: /* make sure access partition is user data area */ if (0 != (l_buf[179] & 0x7)){ /* set back to access user area */ l_buf[179] &= ~0x7; l_buf[179] |= 0x0; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } break; } if(msdc_ctl->total_size > 512){ msdc_ctl->result = -1; return msdc_ctl->result; } #if DEBUG_MMC_IOCTL printk("start MSDC_SINGLE_READ_WRITE !!\n"); #endif memset(&msdc_data, 0, sizeof(struct mmc_data)); memset(&msdc_mrq, 0, sizeof(struct mmc_request)); memset(&msdc_cmd, 0, sizeof(struct mmc_command)); msdc_mrq.cmd = &msdc_cmd; msdc_mrq.data = &msdc_data; if(msdc_ctl->trans_type) dma_force[host_ctl->id] = FORCE_IN_DMA; else dma_force[host_ctl->id] = FORCE_IN_PIO; if (msdc_ctl->iswrite){ msdc_data.flags = MMC_DATA_WRITE; msdc_cmd.opcode = MMC_WRITE_BLOCK; msdc_data.blocks = msdc_ctl->total_size / 512; if (MSDC_CARD_DUNM_FUNC != msdc_ctl->opcode) { if (copy_from_user(sg_msdc_multi_buffer, msdc_ctl->buffer, 512)){ dma_force[host_ctl->id] = FORCE_NOTHING; return -EFAULT; } } else { /* called from other kernel module */ memcpy(sg_msdc_multi_buffer, msdc_ctl->buffer, 512); } } else { msdc_data.flags = MMC_DATA_READ; msdc_cmd.opcode = MMC_READ_SINGLE_BLOCK; msdc_data.blocks = msdc_ctl->total_size / 512; memset(sg_msdc_multi_buffer, 0 , 512); } msdc_cmd.arg = msdc_ctl->address; if (!mmc_card_blockaddr(host_ctl->mmc->card)){ printk("the device is used byte address!\n"); msdc_cmd.arg <<= 9; } msdc_cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; msdc_data.stop = NULL; msdc_data.blksz = 512; msdc_data.sg = &msdc_sg; msdc_data.sg_len = 1; #if DEBUG_MMC_IOCTL printk("single block: ueser buf address is 0x%p!\n",msdc_ctl->buffer); #endif sg_init_one(&msdc_sg, sg_msdc_multi_buffer, msdc_ctl->total_size); mmc_set_data_timeout(&msdc_data, host_ctl->mmc->card); mmc_wait_for_req(host_ctl->mmc, &msdc_mrq); if (!msdc_ctl->iswrite){ if (MSDC_CARD_DUNM_FUNC != msdc_ctl->opcode) { if (copy_to_user(msdc_ctl->buffer,sg_msdc_multi_buffer,512)){ dma_force[host_ctl->id] = FORCE_NOTHING; return -EFAULT; } } else { /* called from other kernel module */ memcpy(msdc_ctl->buffer,sg_msdc_multi_buffer,512); } } if (msdc_ctl->partition){ mmc_send_ext_csd(host_ctl->mmc->card,l_buf); if (l_buf[179] & 0x7) { /* set back to access user area */ l_buf[179] &= ~0x7; l_buf[179] |= 0x0; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } } mmc_release_host(host_ctl->mmc); if (msdc_cmd.error) msdc_ctl->result= msdc_cmd.error; if (msdc_data.error) msdc_ctl->result= msdc_data.error; else msdc_ctl->result= 0; dma_force[host_ctl->id] = FORCE_NOTHING; return msdc_ctl->result; }
static ssize_t mmc_wr_prot_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { #define PARTITION_NOT_PROTED 0 #define PARTITION_PROTED 1 struct mmc_card *card = filp->private_data; //used for mmcrequest unsigned int wp_group_size; struct mmc_request mrq = {NULL}; struct mmc_command cmd = {0}; struct mmc_data data = {0}; struct scatterlist sg; void *data_buf; unsigned char buf[8]; unsigned int addr = 0; unsigned int init_addr = 0; char line_buf[128]; int i, j, k; unsigned char ch; unsigned char wp_flag; int len = 8; unsigned int loop_count = 0; unsigned int size = 0; unsigned int status_prot = PARTITION_NOT_PROTED; struct emmc_partition *p_emmc_partition; pr_info("[HW]: eMMC protect driver built on %s @ %s\n", __DATE__, __TIME__); p_emmc_partition = g_emmc_partition; for(i = 0; i < MAX_EMMC_PARTITION_NUM; i++){ if(p_emmc_partition->flags == 0) break; if(strcmp(p_emmc_partition->name, "system") == 0){ addr = (unsigned int)(p_emmc_partition->start); size = (unsigned int)(p_emmc_partition->size_sectors); pr_info("[HW]:%s: partitionname = %s \n", __func__, p_emmc_partition->name); pr_info("[HW]:%s: partition start from = 0x%08x \n", __func__, addr); pr_info("[HW]:%s: partition size = 0x%08x \n", __func__, size); } p_emmc_partition++; } init_addr = addr; if(addr < 0) { pr_err("[HW]:%s:invalid addr = 0x%08x.", __func__, addr); if(copy_to_user(ubuf, "fail", strlen("fail "))){ pr_info("[HW]: %s: copy to user error \n", __func__); return -EFAULT;; } return -1; } wp_group_size =(512 * 1024) * card->ext_csd.raw_hc_erase_gap_size * card->ext_csd.raw_hc_erase_grp_size/512; if(addr % wp_group_size == 0){ }else{ addr = (addr / wp_group_size) * wp_group_size + wp_group_size; pr_info("[HW]:%s: setting start area is not muti size of wp_group_size\n", __func__); } loop_count = (init_addr + size - addr) / wp_group_size; pr_info("[HW]:%s: EXT_CSD_HC_WP_GRP_SIZE = 0x%02x. \n", __func__, card->ext_csd.raw_hc_erase_gap_size); pr_info("[HW]:%s: EXT_CSD_HC_ERASE_GRP_SIZE = 0x%02x. \n", __func__, card->ext_csd.raw_hc_erase_grp_size); pr_info("[HW]:%s: addr = 0x%08x, wp_group_size=0x%08x, size = 0x%08x \n",__func__, addr, wp_group_size, size); pr_info("[HW]:%s: loop_count = 0x%08x \n",__func__, loop_count); /* dma onto stack is unsafe/nonportable, but callers to this * routine normally provide temporary on-stack buffers ... */ addr = addr - wp_group_size * 32; for(k=0; k< loop_count/32 + 2; k++){ data_buf = kmalloc(32, GFP_KERNEL); //dma size 32 if (data_buf == NULL) return -ENOMEM; mrq.cmd = &cmd; mrq.data = &data; cmd.opcode = 31; cmd.arg = addr; cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; data.blksz = len; data.blocks = 1; data.flags = MMC_DATA_READ; data.sg = &sg; data.sg_len = 1; sg_init_one(&sg, data_buf, len); mmc_set_data_timeout(&data, card); mmc_claim_host(card->host); mmc_wait_for_req(card->host, &mrq); mmc_release_host(card->host); memcpy(buf, data_buf, len); kfree(data_buf); /* * start to show the detailed read status from the response */ #if 0 for(i = 0; i < 8; i++){ pr_info("[HW]:%s: buffer = 0x%02x \n", __func__, buf[i]); } #endif /* * end of show the detailed read status from the response */ for(i = 7; i >= 0; i--) { ch = buf[i]; for(j = 0; j < 4; j++) { wp_flag = ch & 0x3; memset(line_buf, 0x00, sizeof(line_buf)); sprintf(line_buf, "[0x%08x~0x%08x] Write protection group is ", addr, addr + wp_group_size - 1); switch(wp_flag) { case 0: strcat(line_buf, "disable"); break; case 1: strcat(line_buf, "temporary write protection"); break; case 2: strcat(line_buf, "power-on write protection"); break; case 3: strcat(line_buf, "permanent write protection"); break; default: break; } pr_info("%s: %s\n", mmc_hostname(card->host), line_buf); if( wp_flag == 1){ if(is_within_group(addr, init_addr, size, wp_group_size) == 0){ status_prot = PARTITION_PROTED; // pr_info("[HW]: %s: addr = 0x%08x, init_addr = 0x%08x, size = 0x%08x, group protected \n", __func__, addr, init_addr, size); } } addr += wp_group_size; ch = ch >> 2; } } } pr_info("[HW]: %s: end sector = 0x%08x \n", __func__, size + init_addr); if (cmd.error) { pr_err("[HW]:%s:cmd.error=%d.", __func__, cmd.error); if(copy_to_user(ubuf, "fail", strlen("fail "))){ pr_info("[HW]: %s: copy to user error \n", __func__); return -EFAULT;; } return cmd.error; } if (data.error) { pr_err("[HW]:%s:data.error=%d.", __func__, data.error); if(copy_to_user(ubuf, "fail", strlen("fail "))){ pr_info("[HW]: %s: copy to user error \n", __func__); return -EFAULT;; } return data.error; } switch(status_prot){ case PARTITION_PROTED: if(copy_to_user(ubuf, "protected", strlen("protected "))){ pr_info("[HW]: %s: copy to user error \n", __func__); return -EFAULT;; } pr_info("[HW]: %s: protected \n", __func__); break; case PARTITION_NOT_PROTED: if(copy_to_user(ubuf, "not_protected", strlen("not_protected "))){ pr_info("[HW]: %s: copy to user error \n", __func__); return -EFAULT;; } pr_info("[HW]: %s: not_protected \n", __func__); break; default:break; } return 0; }
/* called by async task to perform the operation synchronously using direct MMC APIs */ A_STATUS DoHifReadWriteScatter(HIF_DEVICE *device, BUS_REQUEST *busrequest) { int i; A_UINT8 rw; A_UINT8 opcode; struct mmc_request mmcreq; struct mmc_command cmd; struct mmc_data data; HIF_SCATTER_REQ_PRIV *pReqPriv; HIF_SCATTER_REQ *pReq; A_STATUS status = A_OK; struct scatterlist *pSg; pReqPriv = busrequest->pScatterReq; A_ASSERT(pReqPriv != NULL); pReq = pReqPriv->pHifScatterReq; memset(&mmcreq, 0, sizeof(struct mmc_request)); memset(&cmd, 0, sizeof(struct mmc_command)); memset(&data, 0, sizeof(struct mmc_data)); data.blksz = HIF_MBOX_BLOCK_SIZE; data.blocks = pReq->TotalLength / HIF_MBOX_BLOCK_SIZE; AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, ("HIF-SCATTER: (%s) Address: 0x%X, (BlockLen: %d, BlockCount: %d) , (tot:%d,sg:%d)\n", (pReq->Request & HIF_WRITE) ? "WRITE":"READ", pReq->Address, data.blksz, data.blocks, pReq->TotalLength,pReq->ValidScatterEntries)); if (pReq->Request & HIF_WRITE) { rw = _CMD53_ARG_WRITE; data.flags = MMC_DATA_WRITE; } else { rw = _CMD53_ARG_READ; data.flags = MMC_DATA_READ; } if (pReq->Request & HIF_FIXED_ADDRESS) { opcode = _CMD53_ARG_FIXED_ADDRESS; } else { opcode = _CMD53_ARG_INCR_ADDRESS; } /* fill SG entries */ pSg = pReqPriv->sgentries; sg_init_table(pSg, pReq->ValidScatterEntries); /* assemble SG list */ for (i = 0 ; i < pReq->ValidScatterEntries ; i++, pSg++) { /* setup each sg entry */ if ((unsigned long)pReq->ScatterList[i].pBuffer & 0x3) { /* note some scatter engines can handle unaligned buffers, print this * as informational only */ AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, ("HIF: (%s) Scatter Buffer is unaligned 0x%lx\n", pReq->Request & HIF_WRITE ? "WRITE":"READ", (unsigned long)pReq->ScatterList[i].pBuffer)); } AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, (" %d: Addr:0x%lX, Len:%d \n", i,(unsigned long)pReq->ScatterList[i].pBuffer,pReq->ScatterList[i].Length)); sg_set_buf(pSg, pReq->ScatterList[i].pBuffer, pReq->ScatterList[i].Length); } /* set scatter-gather table for request */ data.sg = pReqPriv->sgentries; data.sg_len = pReq->ValidScatterEntries; /* set command argument */ SDIO_SET_CMD53_ARG(cmd.arg, rw, device->func->num, _CMD53_ARG_BLOCK_BASIS, opcode, pReq->Address, data.blocks); cmd.opcode = SD_IO_RW_EXTENDED; cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC; mmcreq.cmd = &cmd; mmcreq.data = &data; mmc_set_data_timeout(&data, device->func->card); /* synchronous call to process request */ mmc_wait_for_req(device->func->card->host, &mmcreq); if (cmd.error) { status = A_ERROR; AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("HIF-SCATTER: cmd error: %d \n",cmd.error)); } if (data.error) { status = A_ERROR; AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("HIF-SCATTER: data error: %d \n",data.error)); } if (A_FAILED(status)) { AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("HIF-SCATTER: FAILED!!! (%s) Address: 0x%X, Block mode (BlockLen: %d, BlockCount: %d)\n", (pReq->Request & HIF_WRITE) ? "WRITE":"READ",pReq->Address, data.blksz, data.blocks)); } /* set completion status, fail or success */ pReq->CompletionStatus = status; if (pReq->Request & HIF_ASYNCHRONOUS) { AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, ("HIF-SCATTER: async_task completion routine req: 0x%lX (%d)\n",(unsigned long)busrequest, status)); /* complete the request */ A_ASSERT(pReq->CompletionRoutine != NULL); pReq->CompletionRoutine(pReq); } else { AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, ("HIF-SCATTER async_task upping busrequest : 0x%lX (%d)\n", (unsigned long)busrequest,status)); /* signal wait */ up(&busrequest->sem_req); } return status; }
static int CPRM_CMD_SecureRW(struct mmc_card *card, unsigned int command, unsigned int dir, unsigned long arg, unsigned char *buff, unsigned int length) { int err; int i = 0; struct mmc_request mrq; struct mmc_command cmd; struct mmc_command stop; struct mmc_data data; struct scatterlist sg; if (command == SD_ACMD25_SECURE_WRITE_MULTI_BLOCK || command == SD_ACMD18_SECURE_READ_MULTI_BLOCK) { return -EINVAL; } memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = MMC_APP_CMD; cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, 0); if (err) return (u32)-1; if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD)) return (u32)-1; printk("CPRM_CMD_SecureRW: 1, command : %d\n", command); memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = command; if (command == SD_ACMD43_GET_MKB) cmd.arg = arg; else cmd.arg = 0; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; memset(&data, 0, sizeof(struct mmc_data)); data.timeout_ns = 100000000; data.timeout_clks = 0; #if defined(CONFIG_TARGET_LOCALE_NTT) data.timeout_ns = 100000000; data.timeout_clks = 0; #endif data.blksz = length; data.blocks = 1; data.flags = dir; data.sg = &sg; data.sg_len = 1; stop.opcode = MMC_STOP_TRANSMISSION; stop.arg = 0; stop.flags = MMC_RSP_R1B | MMC_CMD_AC; memset(&mrq, 0, sizeof(struct mmc_request)); mrq.cmd = &cmd; mrq.data = &data; if (data.blocks == 1) mrq.stop = NULL; else mrq.stop = &stop; printk(KERN_DEBUG "CPRM_CMD_SecureRW: 2\n"); sg_init_one(&sg, buff, length); printk(KERN_DEBUG "CPRM_CMD_SecureRW: 3\n"); mmc_wait_for_req(card->host, &mrq); printk(KERN_DEBUG "CPRM_CMD_SecureRW: 4\n"); i = 0; do { printk(KERN_DEBUG "%x", buff[i++]); if (i > 10) break; } while (i < length); printk(KERN_DEBUG "\n"); if (cmd.error) { printk(KERN_DEBUG "%s]cmd.error=%d\n ", __func__, cmd.error); return cmd.error; } if (data.error) { printk(KERN_DEBUG "%s]data.error=%d\n ", __func__, data.error); return data.error; } err = mmc_wait_busy(card); printk(KERN_DEBUG "CPRM_CMD_SecureRW: 5\n"); if (err) return err; return 0; }
static int CPRM_CMD_SecureMultiRW(struct mmc_card *card, unsigned int command, unsigned int dir, unsigned long arg, unsigned char *buff, unsigned int length) { int err; struct mmc_request mrq; struct mmc_command cmd; struct mmc_command stop; struct mmc_data data; unsigned long flags; struct scatterlist sg; memset(&cmd, 0, sizeof(struct mmc_command)); memset(&stop, 0, sizeof(struct mmc_command)); cmd.opcode = MMC_APP_CMD; cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, 0); if (err) return (u32)-1; if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD)) return (u32)-1; printk(KERN_DEBUG "CPRM_CMD_SecureRW: 1\n"); memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = command; if (command == SD_ACMD43_GET_MKB) cmd.arg = arg; else cmd.arg = 0; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; memset(&data, 0, sizeof(struct mmc_data)); data.timeout_ns = 100000000; data.timeout_clks = 0; #if defined(CONFIG_TARGET_LOCALE_NTT) data.timeout_ns = 100000000; data.timeout_clks = 0; #endif data.blksz = 512; data.blocks = (length + 511) / 512; data.flags = dir; data.sg = &sg; data.sg_len = 1; stop.opcode = MMC_STOP_TRANSMISSION; stop.arg = 0; stop.flags = MMC_RSP_R1B | MMC_CMD_AC; memset(&mrq, 0, sizeof(struct mmc_request)); mrq.cmd = &cmd; mrq.data = &data; mrq.stop = &stop; printk(KERN_DEBUG "CPRM_CMD_SecureRW: 2\n"); sg_init_one(&sg, buff, length); if (dir == MMC_DATA_WRITE) { local_irq_save(flags); sg_copy_from_buffer(&sg, data.sg_len, buff, length); local_irq_restore(flags); } printk(KERN_DEBUG "CPRM_CMD_SecureRW: 3\n"); mmc_wait_for_req(card->host, &mrq); printk(KERN_DEBUG "CPRM_CMD_SecureRW: 4\n"); if (cmd.error) { printk(KERN_DEBUG "%s]cmd.error=%d\n", __func__, cmd.error); return cmd.error; } if (data.error) { printk(KERN_DEBUG "%s]data.error=%d\n", __func__, data.error); return data.error; } err = mmc_wait_busy(card); printk(KERN_DEBUG "CPRM_CMD_SecureRW: 5\n"); if (dir == MMC_DATA_READ) { local_irq_save(flags); sg_copy_to_buffer(&sg, data.sg_len, buff, length); local_irq_restore(flags); } if (err) return err; return 0; }
int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error) { struct mmc_request mrq = {NULL}; struct mmc_command cmd = {0}; struct mmc_data data = {0}; struct scatterlist sg; struct mmc_ios *ios = &host->ios; const u8 *tuning_block_pattern; int size, err = 0; u8 *data_buf; if (ios->bus_width == MMC_BUS_WIDTH_8) { tuning_block_pattern = tuning_blk_pattern_8bit; size = sizeof(tuning_blk_pattern_8bit); } else if (ios->bus_width == MMC_BUS_WIDTH_4) { tuning_block_pattern = tuning_blk_pattern_4bit; size = sizeof(tuning_blk_pattern_4bit); } else return -EINVAL; data_buf = kzalloc(size, GFP_KERNEL); if (!data_buf) return -ENOMEM; mrq.cmd = &cmd; mrq.data = &data; cmd.opcode = opcode; cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; data.blksz = size; data.blocks = 1; data.flags = MMC_DATA_READ; /* * According to the tuning specs, Tuning process * is normally shorter 40 executions of CMD19, * and timeout value should be shorter than 150 ms */ data.timeout_ns = 150 * NSEC_PER_MSEC; data.sg = &sg; data.sg_len = 1; sg_init_one(&sg, data_buf, size); mmc_wait_for_req(host, &mrq); if (cmd_error) *cmd_error = cmd.error; if (cmd.error) { err = cmd.error; goto out; } if (data.error) { err = data.error; goto out; } if (memcmp(data_buf, tuning_block_pattern, size)) err = -EIO; out: kfree(data_buf); return err; }
/** * brcmf_sdiod_sglist_rw - SDIO interface function for block data access * @sdiodev: brcmfmac sdio device * @func: SDIO function * @write: direction flag * @addr: dongle memory address as source/destination * @pkt: skb pointer * * This function takes the respbonsibility as the interface function to MMC * stack for block data access. It assumes that the skb passed down by the * caller has already been padded and aligned. */ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, struct sdio_func *func, bool write, u32 addr, struct sk_buff_head *pktlist) { unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset; unsigned int max_req_sz, orig_offset, dst_offset; unsigned short max_seg_cnt, seg_sz; unsigned char *pkt_data, *orig_data, *dst_data; struct sk_buff *pkt_next = NULL, *local_pkt_next; struct sk_buff_head local_list, *target_list; struct mmc_request mmc_req; struct mmc_command mmc_cmd; struct mmc_data mmc_dat; struct scatterlist *sgl; int ret = 0; if (!pktlist->qlen) return -EINVAL; target_list = pktlist; /* for host with broken sg support, prepare a page aligned list */ __skb_queue_head_init(&local_list); if (!write && sdiodev->settings->bus.sdio.broken_sg_support) { req_sz = 0; skb_queue_walk(pktlist, pkt_next) req_sz += pkt_next->len; req_sz = ALIGN(req_sz, func->cur_blksize); while (req_sz > PAGE_SIZE) { pkt_next = brcmu_pkt_buf_get_skb(PAGE_SIZE); if (pkt_next == NULL) { ret = -ENOMEM; goto exit; } __skb_queue_tail(&local_list, pkt_next); req_sz -= PAGE_SIZE; } pkt_next = brcmu_pkt_buf_get_skb(req_sz); if (pkt_next == NULL) { ret = -ENOMEM; goto exit; } __skb_queue_tail(&local_list, pkt_next); target_list = &local_list; } func_blk_sz = func->cur_blksize; max_req_sz = sdiodev->max_request_size; max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count, target_list->qlen); seg_sz = target_list->qlen; pkt_offset = 0; pkt_next = target_list->next; memset(&mmc_req, 0, sizeof(struct mmc_request)); memset(&mmc_cmd, 0, sizeof(struct mmc_command)); memset(&mmc_dat, 0, sizeof(struct mmc_data)); mmc_dat.sg = sdiodev->sgtable.sgl; mmc_dat.blksz = func_blk_sz; mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; mmc_cmd.opcode = SD_IO_RW_EXTENDED; mmc_cmd.arg = write ? 1<<31 : 0; /* write flag */ mmc_cmd.arg |= (func->num & 0x7) << 28; /* SDIO func num */ mmc_cmd.arg |= 1 << 27; /* block mode */ /* for function 1 the addr will be incremented */ mmc_cmd.arg |= (func->num == 1) ? 1 << 26 : 0; mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC; mmc_req.cmd = &mmc_cmd; mmc_req.data = &mmc_dat; while (seg_sz) { req_sz = 0; sg_cnt = 0; sgl = sdiodev->sgtable.sgl; /* prep sg table */ while (pkt_next != (struct sk_buff *)target_list) { pkt_data = pkt_next->data + pkt_offset; sg_data_sz = pkt_next->len - pkt_offset; if (sg_data_sz > sdiodev->max_segment_size) sg_data_sz = sdiodev->max_segment_size; if (sg_data_sz > max_req_sz - req_sz) sg_data_sz = max_req_sz - req_sz; sg_set_buf(sgl, pkt_data, sg_data_sz); sg_cnt++; sgl = sg_next(sgl); req_sz += sg_data_sz; pkt_offset += sg_data_sz; if (pkt_offset == pkt_next->len) { pkt_offset = 0; pkt_next = pkt_next->next; } if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt) break; } seg_sz -= sg_cnt; if (req_sz % func_blk_sz != 0) { brcmf_err("sg request length %u is not %u aligned\n", req_sz, func_blk_sz); ret = -ENOTBLK; goto exit; } mmc_dat.sg_len = sg_cnt; mmc_dat.blocks = req_sz / func_blk_sz; mmc_cmd.arg |= (addr & 0x1FFFF) << 9; /* address */ mmc_cmd.arg |= mmc_dat.blocks & 0x1FF; /* block count */ /* incrementing addr for function 1 */ if (func->num == 1) addr += req_sz; mmc_set_data_timeout(&mmc_dat, func->card); mmc_wait_for_req(func->card->host, &mmc_req); ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error; if (ret == -ENOMEDIUM) { brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM); break; } else if (ret != 0) { brcmf_err("CMD53 sg block %s failed %d\n", write ? "write" : "read", ret); ret = -EIO; break; } } if (!write && sdiodev->settings->bus.sdio.broken_sg_support) { local_pkt_next = local_list.next; orig_offset = 0; skb_queue_walk(pktlist, pkt_next) { dst_offset = 0; do { req_sz = local_pkt_next->len - orig_offset; req_sz = min_t(uint, pkt_next->len - dst_offset, req_sz); orig_data = local_pkt_next->data + orig_offset; dst_data = pkt_next->data + dst_offset; memcpy(dst_data, orig_data, req_sz); orig_offset += req_sz; dst_offset += req_sz; if (orig_offset == local_pkt_next->len) { orig_offset = 0; local_pkt_next = local_pkt_next->next; } if (dst_offset == pkt_next->len) break; } while (!skb_queue_empty(&local_list)); }
static int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode) { int tuning_seq_cnt = 3; u8 phase, *data_buf, tuned_phases[16], tuned_phase_cnt = 0; const u32 *tuning_block_pattern = tuning_block_64; int size = sizeof(tuning_block_64); /* Pattern size in bytes */ int rc; struct mmc_host *mmc = host->mmc; struct mmc_ios ios = host->mmc->ios; /* * Tuning is required for SDR104, HS200 and HS400 cards and * if clock frequency is greater than 100MHz in these modes. */ if (host->clock <= 100 * 1000 * 1000 || !((ios.timing == MMC_TIMING_MMC_HS200) || (ios.timing == MMC_TIMING_UHS_SDR104))) return 0; if ((opcode == MMC_SEND_TUNING_BLOCK_HS200) && (mmc->ios.bus_width == MMC_BUS_WIDTH_8)) { tuning_block_pattern = tuning_block_128; size = sizeof(tuning_block_128); } data_buf = kmalloc(size, GFP_KERNEL); if (!data_buf) return -ENOMEM; retry: /* First of all reset the tuning block */ rc = msm_init_cm_dll(host); if (rc) goto out; phase = 0; do { struct mmc_command cmd = { 0 }; struct mmc_data data = { 0 }; struct mmc_request mrq = { .cmd = &cmd, .data = &data }; struct scatterlist sg; /* Set the phase in delay line hw block */ rc = msm_config_cm_dll_phase(host, phase); if (rc) goto out; cmd.opcode = opcode; cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; data.blksz = size; data.blocks = 1; data.flags = MMC_DATA_READ; data.timeout_ns = NSEC_PER_SEC; /* 1 second */ data.sg = &sg; data.sg_len = 1; sg_init_one(&sg, data_buf, size); memset(data_buf, 0, size); mmc_wait_for_req(mmc, &mrq); if (!cmd.error && !data.error && !memcmp(data_buf, tuning_block_pattern, size)) { /* Tuning is successful at this tuning point */ tuned_phases[tuned_phase_cnt++] = phase; dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n", mmc_hostname(mmc), phase); } } while (++phase < ARRAY_SIZE(tuned_phases)); if (tuned_phase_cnt) { rc = msm_find_most_appropriate_phase(host, tuned_phases, tuned_phase_cnt); if (rc < 0) goto out; else phase = rc; /* * Finally set the selected phase in delay * line hw block. */ rc = msm_config_cm_dll_phase(host, phase); if (rc) goto out; dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n", mmc_hostname(mmc), phase); } else { if (--tuning_seq_cnt) goto retry; /* Tuning failed */ dev_dbg(mmc_dev(mmc), "%s: No tuning point found\n", mmc_hostname(mmc)); rc = -EIO; } out: kfree(data_buf); return rc; }
/* * @part: GPP partition part number * @addr: GPP write group */ int mmc_wp_status(struct mmc_card *card, unsigned int part, unsigned int addr, u8 *wp_status) { struct mmc_command cmd = {0}; struct mmc_data data = {0}; struct mmc_request mrq = {0}; struct scatterlist sg; u32 status = 0; int err = 0; u8 *rbuf = NULL; if (!card) return -ENODEV; if (!card->ext_csd.gpp_sz[part - EXT_CSD_PART_CONFIG_ACC_GP0]) { pr_err("%s: doesn't have GPP%d\n", __func__, part - 3); return -ENODEV; } rbuf = kzalloc(8, GFP_KERNEL); if (rbuf == NULL) { pr_err("%s: no memory\n", __func__); return -ENOMEM; } cmd.opcode = MMC_SEND_WRITE_PROT_TYPE; cmd.arg = addr * card->ext_csd.wpg_sz; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; data.sg = &sg; data.sg_len = 1; data.blksz = 8; data.blocks = 1; data.flags = MMC_DATA_READ; sg_init_one(data.sg, rbuf, 8); mrq.data = &data; mrq.cmd = &cmd; mmc_claim_host(card->host); mmc_set_data_timeout(&data, card); err = mmc_switch_part(card, part); if (err) { mmc_release_host(card->host); dev_err(mmc_dev(card->host), "%s: swith error %d\n", __func__, err); goto out; } mmc_wait_for_req(card->host, &mrq); if (cmd.error) { dev_err(mmc_dev(card->host), "%s: cmd error %d\n", __func__, cmd.error); } if (data.error) { dev_err(mmc_dev(card->host), "%s: data error %d\n", __func__, data.error); } /* Must check status to be sure of no errors */ do { err = mmc_send_status(card, &status); if (err) { pr_err("%s: get card status err %d, status 0x%x\n", __func__, err, status); goto out; } if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) break; if (mmc_host_is_spi(card->host)) break; } while (R1_CURRENT_STATE(status) == R1_STATE_PRG); if (mmc_host_is_spi(card->host)) { if (status & R1_SPI_ILLEGAL_COMMAND) { pr_err("%s: error card status 0x%x\n", __func__, status); goto out; } } else { if (status & 0xFDFFA000) pr_warn("%s: unexpected status %#x after switch", __func__, status); if (status & R1_SWITCH_ERROR) { pr_err("%s: card switch error, status 0x%x\n", __func__, status); } if (status & R1_OUT_OF_RANGE) { pr_err("%s: addr out of range, status 0x%x\n", __func__, status); goto out; } } mmc_switch_part(card, EXT_CSD_PART_CONFIG_ACC_USER); mmc_release_host(card->host); sg_copy_from_buffer(data.sg, 1, rbuf, 8); /* * the first write protect group type is in the last two * bits in the last byte read from the device. */ *wp_status = rbuf[7] & 0x3; kfree(rbuf); return 0; out: kfree(rbuf); return -EPERM; }
static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; int ret; if (mmc_card_claim_host(card)) goto cmd_err; do { struct mmc_blk_request brq; struct mmc_command cmd; memset(&brq, 0, sizeof(struct mmc_blk_request)); brq.mrq.cmd = &brq.cmd; brq.mrq.data = &brq.data; brq.cmd.arg = req->sector << 9; brq.cmd.flags = MMC_RSP_R1; brq.data.timeout_ns = card->csd.tacc_ns * 10; brq.data.timeout_clks = card->csd.tacc_clks * 10; brq.data.blksz_bits = md->block_bits; brq.data.blocks = req->nr_sectors >> (md->block_bits - 9); brq.stop.opcode = MMC_STOP_TRANSMISSION; brq.stop.arg = 0; brq.stop.flags = MMC_RSP_R1B; if (rq_data_dir(req) == READ) { brq.cmd.opcode = brq.data.blocks > 1 ? MMC_READ_MULTIPLE_BLOCK : MMC_READ_SINGLE_BLOCK; brq.data.flags |= MMC_DATA_READ; } else { brq.cmd.opcode = MMC_WRITE_BLOCK; brq.cmd.flags = MMC_RSP_R1B; brq.data.flags |= MMC_DATA_WRITE; brq.data.blocks = 1; } brq.mrq.stop = brq.data.blocks > 1 ? &brq.stop : NULL; brq.data.sg = mq->sg; brq.data.sg_len = blk_rq_map_sg(req->q, req, brq.data.sg); mmc_wait_for_req(card->host, &brq.mrq); if (brq.cmd.error) { printk(KERN_ERR "%s: error %d sending read/write command\n", req->rq_disk->disk_name, brq.cmd.error); goto cmd_err; } if (brq.data.error) { printk(KERN_ERR "%s: error %d transferring data\n", req->rq_disk->disk_name, brq.data.error); goto cmd_err; } if (brq.stop.error) { printk(KERN_ERR "%s: error %d sending stop command\n", req->rq_disk->disk_name, brq.stop.error); goto cmd_err; } do { int err; cmd.opcode = MMC_SEND_STATUS; cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_R1; err = mmc_wait_for_cmd(card->host, &cmd, 5); if (err) { printk(KERN_ERR "%s: error %d requesting status\n", req->rq_disk->disk_name, err); goto cmd_err; } } while (!(cmd.resp[0] & R1_READY_FOR_DATA)); #if 0 if (cmd.resp[0] & ~0x00000900) printk(KERN_ERR "%s: status = %08x\n", req->rq_disk->disk_name, cmd.resp[0]); if (mmc_decode_status(cmd.resp)) goto cmd_err; #endif /* * A block was successfully transferred. */ spin_lock_irq(&md->lock); ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered); if (!ret) { /* * The whole request completed successfully. */ add_disk_randomness(req->rq_disk); blkdev_dequeue_request(req); end_that_request_last(req); } spin_unlock_irq(&md->lock); } while (ret); mmc_card_release_host(card); return 1; cmd_err: mmc_card_release_host(card); /* * This is a little draconian, but until we get proper * error handling sorted out here, its the best we can * do - especially as some hosts have no idea how much * data was transferred before the error occurred. */ spin_lock_irq(&md->lock); do { ret = end_that_request_chunk(req, 0, req->current_nr_sectors << 9); } while (ret); add_disk_randomness(req->rq_disk); blkdev_dequeue_request(req); end_that_request_last(req); spin_unlock_irq(&md->lock); return 0; }
/** * @brief This function use SG mode to read/write data into card memory * * @param handle A Pointer to the moal_handle structure * @param pmbuf_list Pointer to a linked list of mlan_buffer structure * @param port Port * @param write write flag * * @return MLAN_STATUS_SUCCESS or MLAN_STATUS_FAILURE */ mlan_status woal_sdio_rw_mb(moal_handle *handle, pmlan_buffer pmbuf_list, t_u32 port, t_u8 write) { struct scatterlist sg_list[SDIO_MP_AGGR_DEF_PKT_LIMIT_MAX]; int num_sg = pmbuf_list->use_count; int i = 0; mlan_buffer *pmbuf = NULL; struct mmc_request mmc_req; struct mmc_command mmc_cmd; struct mmc_data mmc_dat; struct sdio_func *func = ((struct sdio_mmc_card *)handle->card)->func; t_u32 ioport = (port & MLAN_SDIO_IO_PORT_MASK); t_u32 blkcnt = pmbuf_list->data_len / MLAN_SDIO_BLOCK_SIZE; int status; if (num_sg > SDIO_MP_AGGR_DEF_PKT_LIMIT_MAX) { PRINTM(MERROR, "ERROR: num_sg=%d", num_sg); return MLAN_STATUS_FAILURE; } sg_init_table(sg_list, num_sg); pmbuf = pmbuf_list->pnext; for (i = 0; i < num_sg; i++) { if (pmbuf == pmbuf_list) break; sg_set_buf(&sg_list[i], pmbuf->pbuf + pmbuf->data_offset, pmbuf->data_len); pmbuf = pmbuf->pnext; } memset(&mmc_req, 0, sizeof(struct mmc_request)); memset(&mmc_cmd, 0, sizeof(struct mmc_command)); memset(&mmc_dat, 0, sizeof(struct mmc_data)); mmc_dat.sg = sg_list; mmc_dat.sg_len = num_sg; mmc_dat.blksz = MLAN_SDIO_BLOCK_SIZE; mmc_dat.blocks = blkcnt; mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; mmc_cmd.opcode = SD_IO_RW_EXTENDED; mmc_cmd.arg = write ? 1 << 31 : 0; mmc_cmd.arg |= (func->num & 0x7) << 28; mmc_cmd.arg |= 1 << 27; /* block basic */ mmc_cmd.arg |= 0; /* fix address */ mmc_cmd.arg |= (ioport & 0x1FFFF) << 9; mmc_cmd.arg |= blkcnt & 0x1FF; mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC; mmc_req.cmd = &mmc_cmd; mmc_req.data = &mmc_dat; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) sdio_claim_host(((struct sdio_mmc_card *)handle->card)->func); #endif mmc_set_data_timeout(&mmc_dat, ((struct sdio_mmc_card *)handle->card)->func-> card); mmc_wait_for_req(((struct sdio_mmc_card *)handle->card)->func->card-> host, &mmc_req); if (mmc_cmd.error || mmc_dat.error) { PRINTM(MERROR, "CMD53 %s cmd_error = %d data_error=%d\n", write ? "write" : "read", mmc_cmd.error, mmc_dat.error); #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) /* issue abort cmd52 command through F0*/ sdio_f0_writeb(((struct sdio_mmc_card *)handle->card)->func, 0x01, SDIO_CCCR_ABORT, &status); #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) sdio_release_host(((struct sdio_mmc_card *)handle->card)->func); #endif return MLAN_STATUS_FAILURE; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) sdio_release_host(((struct sdio_mmc_card *)handle->card)->func); #endif return MLAN_STATUS_SUCCESS; }
/* * NOTE: void *buf, caller for the buf is required to use DMA-capable * buffer or on-stack buffer (with some overhead in callee). */ static int mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, u32 opcode, void *buf, unsigned len) { struct mmc_request mrq = {NULL}; struct mmc_command cmd = {0}; struct mmc_data data = {0}; struct scatterlist sg; void *data_buf; int is_on_stack; is_on_stack = object_is_on_stack(buf); if (is_on_stack) { /* * dma onto stack is unsafe/nonportable, but callers to this * routine normally provide temporary on-stack buffers ... */ data_buf = kmalloc(len, GFP_KERNEL); if (!data_buf) return -ENOMEM; } else data_buf = buf; mrq.cmd = &cmd; mrq.data = &data; cmd.opcode = opcode; cmd.arg = 0; /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we * rely on callers to never use this with "native" calls for reading * CSD or CID. Native versions of those commands use the R2 type, * not R1 plus a data block. */ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; data.blksz = len; data.blocks = 1; data.flags = MMC_DATA_READ; data.sg = &sg; data.sg_len = 1; sg_init_one(&sg, data_buf, len); if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) { /* * The spec states that CSR and CID accesses have a timeout * of 64 clock cycles. */ data.timeout_ns = 0; data.timeout_clks = 64; } else mmc_set_data_timeout(&data, card); mmc_wait_for_req(host, &mrq); if (is_on_stack) { memcpy(buf, data_buf, len); kfree(data_buf); } #ifdef CONFIG_HUAWEI_EMMC_DSM if(cmd.error || data.error) if(!strcmp(mmc_hostname(host), "mmc0")){ DSM_EMMC_LOG(card, DSM_EMMC_SEND_CXD_ERR, "opcode:%d failed, cmd.error:%d, data.error:%d\n", opcode, cmd.error, data.error); } #endif if (cmd.error) return cmd.error; if (data.error) return data.error; return 0; }
static int simple_sd_ioctl_multi_rw(struct msdc_ioctl* msdc_ctl) { char l_buf[512]; struct scatterlist msdc_sg; struct mmc_data msdc_data; struct mmc_command msdc_cmd; struct mmc_command msdc_stop; #ifdef MTK_MSDC_USE_CMD23 struct mmc_command msdc_sbc; #endif struct mmc_request msdc_mrq; struct msdc_host *host_ctl; host_ctl = mtk_msdc_host[msdc_ctl->host_num]; BUG_ON(!host_ctl); BUG_ON(!host_ctl->mmc); BUG_ON(!host_ctl->mmc->card); mmc_claim_host(host_ctl->mmc); #if DEBUG_MMC_IOCTL printk("user want access %d partition\n",msdc_ctl->partition); #endif mmc_send_ext_csd(host_ctl->mmc->card, l_buf); switch (msdc_ctl->partition){ case BOOT_PARTITION_1: if (0x1 != (l_buf[179] & 0x7)){ /* change to access boot partition 1 */ l_buf[179] &= ~0x7; l_buf[179] |= 0x1; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } break; case BOOT_PARTITION_2: if (0x2 != (l_buf[179] & 0x7)){ /* change to access boot partition 2 */ l_buf[179] &= ~0x7; l_buf[179] |= 0x2; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } break; default: /* make sure access partition is user data area */ if (0 != (l_buf[179] & 0x7)){ /* set back to access user area */ l_buf[179] &= ~0x7; l_buf[179] |= 0x0; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } break; } if(msdc_ctl->total_size > 64*1024){ msdc_ctl->result = -1; return msdc_ctl->result; } memset(&msdc_data, 0, sizeof(struct mmc_data)); memset(&msdc_mrq, 0, sizeof(struct mmc_request)); memset(&msdc_cmd, 0, sizeof(struct mmc_command)); memset(&msdc_stop, 0, sizeof(struct mmc_command)); #ifdef MTK_MSDC_USE_CMD23 memset(&msdc_sbc, 0, sizeof(struct mmc_command)); #endif msdc_mrq.cmd = &msdc_cmd; msdc_mrq.data = &msdc_data; if(msdc_ctl->trans_type) dma_force[host_ctl->id] = FORCE_IN_DMA; else dma_force[host_ctl->id] = FORCE_IN_PIO; if (msdc_ctl->iswrite){ msdc_data.flags = MMC_DATA_WRITE; msdc_cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK; msdc_data.blocks = msdc_ctl->total_size / 512; if (MSDC_CARD_DUNM_FUNC != msdc_ctl->opcode) { if (copy_from_user(sg_msdc_multi_buffer, msdc_ctl->buffer, msdc_ctl->total_size)){ dma_force[host_ctl->id] = FORCE_NOTHING; return -EFAULT; } } else { /* called from other kernel module */ memcpy(sg_msdc_multi_buffer, msdc_ctl->buffer, msdc_ctl->total_size); } } else { msdc_data.flags = MMC_DATA_READ; msdc_cmd.opcode = MMC_READ_MULTIPLE_BLOCK; msdc_data.blocks = msdc_ctl->total_size / 512; memset(sg_msdc_multi_buffer, 0 , msdc_ctl->total_size); } #ifdef MTK_MSDC_USE_CMD23 if ((mmc_card_mmc(host_ctl->mmc->card) || (mmc_card_sd(host_ctl->mmc->card) && host_ctl->mmc->card->scr.cmds & SD_SCR_CMD23_SUPPORT)) && !(host_ctl->mmc->card->quirks & MMC_QUIRK_BLK_NO_CMD23)){ msdc_mrq.sbc = &msdc_sbc; msdc_mrq.sbc->opcode = MMC_SET_BLOCK_COUNT; msdc_mrq.sbc->arg = msdc_data.blocks; msdc_mrq.sbc->flags = MMC_RSP_R1 | MMC_CMD_AC; } #endif msdc_cmd.arg = msdc_ctl->address; if (!mmc_card_blockaddr(host_ctl->mmc->card)){ printk("this device use byte address!!\n"); msdc_cmd.arg <<= 9; } msdc_cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; msdc_stop.opcode = MMC_STOP_TRANSMISSION; msdc_stop.arg = 0; msdc_stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; msdc_data.stop = &msdc_stop; msdc_data.blksz = 512; msdc_data.sg = &msdc_sg; msdc_data.sg_len = 1; #if DEBUG_MMC_IOCTL printk("total size is %d\n",msdc_ctl->total_size); #endif sg_init_one(&msdc_sg, sg_msdc_multi_buffer, msdc_ctl->total_size); mmc_set_data_timeout(&msdc_data, host_ctl->mmc->card); mmc_wait_for_req(host_ctl->mmc, &msdc_mrq); if (!msdc_ctl->iswrite){ if (MSDC_CARD_DUNM_FUNC != msdc_ctl->opcode) { if (copy_to_user(msdc_ctl->buffer, sg_msdc_multi_buffer, msdc_ctl->total_size)){ dma_force[host_ctl->id] = FORCE_NOTHING; return -EFAULT; } } else { /* called from other kernel module */ memcpy(msdc_ctl->buffer, sg_msdc_multi_buffer, msdc_ctl->total_size); } } if (msdc_ctl->partition){ mmc_send_ext_csd(host_ctl->mmc->card,l_buf); if (l_buf[179] & 0x7) { /* set back to access user area */ l_buf[179] &= ~0x7; l_buf[179] |= 0x0; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } } mmc_release_host(host_ctl->mmc); if (msdc_cmd.error) msdc_ctl->result = msdc_cmd.error; if (msdc_data.error){ msdc_ctl->result = msdc_data.error; } else { msdc_ctl->result = 0; } dma_force[host_ctl->id] = FORCE_NOTHING; return msdc_ctl->result; }
int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn, unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz) { struct mmc_request mrq; struct mmc_command cmd; struct mmc_data data; struct scatterlist sg; //BUG_ON(!card); //BUG_ON(fn > 7); //BUG_ON(blocks == 1 && blksz > 512); //WARN_ON(blocks == 0); //WARN_ON(blksz == 0); //memset(&mrq, 0, sizeof(struct mmc_request)); //memset(&cmd, 0, sizeof(struct mmc_command)); //memset(&data, 0, sizeof(struct mmc_data)); mrq.cmd = &cmd; mrq.data = &data; mrq.stop = NULL; cmd.opcode = SD_IO_RW_EXTENDED; cmd.arg = write ? 0x80000000 : 0x00000000; cmd.arg |= fn << 28; cmd.arg |= incr_addr ? 0x04000000 : 0x00000000; cmd.arg |= addr << 9; if (blocks == 1 && blksz <= 512) cmd.arg |= (blksz == 512) ? 0 : blksz; /* byte mode */ else cmd.arg |= 0x08000000 | blocks; /* block mode */ cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC; cmd.retries = 0; cmd.resp[0] = 0; //cmd.resp[1] = 0; //cmd.resp[2] = 0; //cmd.resp[3] = 0; data.blksz = blksz; data.blocks = blocks; data.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; data.sg = &sg; data.sg_len = 1; data.timeout_ns = 1000000000; data.timeout_clks = 0; data.bytes_xfered = 0; sg_init_one(&sg, buf, blksz * blocks); //mmc_set_data_timeout(&data, card); mmc_wait_for_req(card->host, &mrq); if (cmd.error) return cmd.error; else if (data.error) return data.error; //if (mmc_host_is_spi(card->host)) { /* host driver already reported errors */ //} else { if (cmd.resp[0] & R5_ERROR) return -EIO; else if (cmd.resp[0] & R5_FUNCTION_NUMBER) return -EINVAL; else if (cmd.resp[0] & R5_OUT_OF_RANGE) return -ERANGE; //} return 0; }
static int mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, u32 opcode, void *buf, unsigned len) { struct mmc_request mrq; struct mmc_command cmd; struct mmc_data data; struct scatterlist sg; void *data_buf; /* dma onto stack is unsafe/nonportable, but callers to this * routine normally provide temporary on-stack buffers ... */ data_buf = kmalloc(len, GFP_KERNEL); if (data_buf == NULL) return -ENOMEM; memset(&mrq, 0, sizeof(struct mmc_request)); memset(&cmd, 0, sizeof(struct mmc_command)); memset(&data, 0, sizeof(struct mmc_data)); mrq.cmd = &cmd; mrq.data = &data; cmd.opcode = opcode; cmd.arg = 0; /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we * rely on callers to never use this with "native" calls for reading * CSD or CID. Native versions of those commands use the R2 type, * not R1 plus a data block. */ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; data.blksz = len; data.blocks = 1; data.flags = MMC_DATA_READ; data.sg = &sg; data.sg_len = 1; sg_init_one(&sg, data_buf, len); if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) { /* * The spec states that CSR and CID accesses have a timeout * of 64 clock cycles. */ data.timeout_ns = 0; data.timeout_clks = 64; } else mmc_set_data_timeout(&data, card); mmc_wait_for_req(host, &mrq); memcpy(buf, data_buf, len); kfree(data_buf); if (cmd.error) return cmd.error; if (data.error) return data.error; return 0; }
int mmc_test_bus_width(struct mmc_card *card, int bits) { struct mmc_request mrq; struct mmc_command cmd; struct mmc_data data; struct scatterlist sg; int len; u8 test_data_write[8]; u8 test_data_read[64]; switch (bits) { case 8: test_data_write[0] = 0x55; test_data_write[1] = 0xaa; test_data_write[2] = 0x00; test_data_write[3] = 0x00; test_data_write[4] = 0x00; test_data_write[5] = 0x00; test_data_write[6] = 0x00; test_data_write[7] = 0x00; len = 8; break; case 4: test_data_write[0] = 0x5a; test_data_write[1] = 0x00; test_data_write[2] = 0x00; test_data_write[3] = 0x00; len = 4; break; default: return 0; } memset(&mrq, 0, sizeof(struct mmc_request)); memset(&cmd, 0, sizeof(struct mmc_command)); memset(&data, 0, sizeof(struct mmc_data)); cmd.opcode = MMC_BUSTEST_W; cmd.arg = 0; cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; data.flags = MMC_DATA_WRITE; data.blksz = 64; data.blocks = 1; data.sg = &sg; data.sg_len = 1; mrq.cmd = &cmd; mrq.data = &data; sg_init_one(&sg, &test_data_write, 64); /* * The spec states that MMC_BUSTEST_W and BUSTEST_R accesses have a timeout * of 64 clock cycles. */ data.timeout_ns = 0; data.timeout_clks = 64; mmc_wait_for_req(card->host, &mrq); if (cmd.error || data.error ) { printk(KERN_INFO "Failed to send CMD19: %d %d\n", cmd.error, data.error); return 0; } /* Now read back */ memset(&mrq, 0, sizeof(struct mmc_request)); memset(&cmd, 0, sizeof(struct mmc_command)); memset(&data, 0, sizeof(struct mmc_data)); memset (&test_data_read, 0, sizeof(test_data_read)); cmd.opcode = MMC_BUSTEST_R; cmd.arg = 0; cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; data.flags = MMC_DATA_READ; data.blksz = sizeof(test_data_read); data.blocks = 1; data.sg = &sg; data.sg_len = 1; mrq.cmd = &cmd; mrq.data = &data; sg_init_one(&sg, &test_data_read, sizeof(test_data_read)); /* * The spec states that MMC_BUSTEST_W and BUSTEST_R accesses have a timeout * of 64 clock cycles. */ data.timeout_ns = 0; data.timeout_clks = 64; mmc_wait_for_req(card->host, &mrq); if (cmd.error) { printk(KERN_INFO "Failed to send CMD14: %d %d\n", cmd.error, data.error); return 0; } #if 0 #warning PRINT RESULTS FROM CMD14 printk (KERN_INFO "%s: Got %02X %02X %02X %02X\n", __FUNCTION__, test_data_read[0], test_data_read[1], test_data_read[2], test_data_read[3]); #endif switch (bits) { case 8: return (test_data_read[0] == 0xaa && test_data_read[1] == 0x55); case 4: return (test_data_read[0] == 0xa5); } return 0; }