static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; struct mmc_blk_request brq; int ret = 1; if (mmc_card_claim_host(card)) goto flush_queue; do { struct mmc_command cmd; u32 readcmd, writecmd; memset(&brq, 0, sizeof(struct mmc_blk_request)); brq.mrq.cmd = &brq.cmd; brq.mrq.data = &brq.data; brq.cmd.arg = req->sector; if (!mmc_card_blockaddr(card)) brq.cmd.arg <<= 9; brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; brq.data.blksz = 1 << md->block_bits; brq.stop.opcode = MMC_STOP_TRANSMISSION; brq.stop.arg = 0; brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC; brq.data.blocks = req->nr_sectors >> (md->block_bits - 9); if (brq.data.blocks > card->host->max_blk_count) brq.data.blocks = card->host->max_blk_count; mmc_set_data_timeout(&brq.data, card, rq_data_dir(req) != READ); #ifdef CONFIG_MMC_SUPPORT_MOVINAND if (mmc_card_movinand(card)) { if ((brq.data.blocks > 1) || (rq_data_dir(req) == WRITE)) { cmd.opcode = MMC_SET_BLOCK_COUNT; cmd.arg = req->nr_sectors; cmd.flags = MMC_RSP_R1; ret = mmc_wait_for_cmd(card->host, &cmd, 2); } if (rq_data_dir(req) == READ) { if (brq.data.blocks > 1) { brq.cmd.opcode = MMC_READ_MULTIPLE_BLOCK; brq.data.flags |= (MMC_DATA_READ | MMC_DATA_MULTI); // brq.mrq.stop = &brq.stop; } else { brq.cmd.opcode = MMC_READ_SINGLE_BLOCK; brq.data.flags |= MMC_DATA_READ; brq.mrq.stop = NULL; } } else { brq.cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK; brq.data.flags |= MMC_DATA_WRITE | MMC_DATA_MULTI; // brq.mrq.stop = &brq.stop; } } else { #endif /* * If the host doesn't support multiple block writes, force * block writes to single block. SD cards are excepted from * this rule as they support querying the number of * successfully written sectors. */ if (rq_data_dir(req) != READ && !(card->host->caps & MMC_CAP_MULTIWRITE) && !mmc_card_sd(card)) brq.data.blocks = 1; if (brq.data.blocks > 1) { brq.data.flags |= MMC_DATA_MULTI; brq.mrq.stop = &brq.stop; readcmd = MMC_READ_MULTIPLE_BLOCK; writecmd = MMC_WRITE_MULTIPLE_BLOCK; } else { brq.mrq.stop = NULL; readcmd = MMC_READ_SINGLE_BLOCK; writecmd = MMC_WRITE_BLOCK; } if (rq_data_dir(req) == READ) { brq.cmd.opcode = readcmd; brq.data.flags |= MMC_DATA_READ; } else { brq.cmd.opcode = writecmd; brq.data.flags |= MMC_DATA_WRITE; } #ifdef CONFIG_MMC_SUPPORT_MOVINAND } #endif brq.data.sg = mq->sg; brq.data.sg_len = blk_rq_map_sg(req->q, req, brq.data.sg); mmc_wait_for_req(card->host, &brq.mrq); if (brq.cmd.error) { printk(KERN_ERR "%s: error %d sending read/write command\n", req->rq_disk->disk_name, brq.cmd.error); goto cmd_err; } if (brq.data.error) { printk(KERN_ERR "%s: error %d transferring data\n", req->rq_disk->disk_name, brq.data.error); goto cmd_err; } if (brq.stop.error) { printk(KERN_ERR "%s: error %d sending stop command\n", req->rq_disk->disk_name, brq.stop.error); goto cmd_err; } if (rq_data_dir(req) != READ) { do { int err; cmd.opcode = MMC_SEND_STATUS; cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, 5); if (err) { printk(KERN_ERR "%s: error %d requesting status\n", req->rq_disk->disk_name, err); goto cmd_err; } #ifdef CONFIG_MMC_SUPPORT_MOVINAND /* Work-around for broken cards setting READY_FOR_DATA * when not actually ready. */ if (mmc_card_movinand(card)) { if (R1_CURRENT_STATE(cmd.resp[0]) == 7) cmd.resp[0] &= ~R1_READY_FOR_DATA; } #endif } while (!(cmd.resp[0] & R1_READY_FOR_DATA)); #if 0 if (cmd.resp[0] & ~0x00000900) printk(KERN_ERR "%s: status = %08x\n", req->rq_disk->disk_name, cmd.resp[0]); if (mmc_decode_status(cmd.resp)) goto cmd_err; #endif } /* * A block was successfully transferred. */ spin_lock_irq(&md->lock); ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered); if (!ret) { /* * The whole request completed successfully. */ add_disk_randomness(req->rq_disk); blkdev_dequeue_request(req); end_that_request_last(req, 1); } spin_unlock_irq(&md->lock); } while (ret); mmc_card_release_host(card); return 1; cmd_err: /* * If this is an SD card and we're writing, we can first * mark the known good sectors as ok. * * If the card is not SD, we can still ok written sectors * if the controller can do proper error reporting. * * For reads we just fail the entire chunk as that should * be safe in all cases. */ if (rq_data_dir(req) != READ && mmc_card_sd(card)) { u32 blocks; unsigned int bytes; blocks = mmc_sd_num_wr_blocks(card); if (blocks != (u32)-1) { if (card->csd.write_partial) bytes = blocks << md->block_bits; else bytes = blocks << 9; spin_lock_irq(&md->lock); ret = end_that_request_chunk(req, 1, bytes); spin_unlock_irq(&md->lock); } } else if (rq_data_dir(req) != READ && (card->host->caps & MMC_CAP_MULTIWRITE)) { spin_lock_irq(&md->lock); ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered); spin_unlock_irq(&md->lock); } flush_queue: mmc_card_release_host(card); spin_lock_irq(&md->lock); while (ret) { ret = end_that_request_chunk(req, 0, req->current_nr_sectors << 9); } add_disk_randomness(req->rq_disk); blkdev_dequeue_request(req); end_that_request_last(req, 0); spin_unlock_irq(&md->lock); return 0; }
static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; int ret; if (mmc_card_claim_host(card)) goto cmd_err; do { struct mmc_blk_request brq; struct mmc_command cmd; memset(&brq, 0, sizeof(struct mmc_blk_request)); brq.mrq.cmd = &brq.cmd; brq.mrq.data = &brq.data; brq.cmd.arg = req->sector << 9; brq.cmd.flags = MMC_RSP_R1; brq.data.timeout_ns = card->csd.tacc_ns * 10; brq.data.timeout_clks = card->csd.tacc_clks * 10; brq.data.blksz_bits = md->block_bits; brq.data.blocks = req->nr_sectors >> (md->block_bits - 9); brq.stop.opcode = MMC_STOP_TRANSMISSION; brq.stop.arg = 0; brq.stop.flags = MMC_RSP_R1B; if (rq_data_dir(req) == READ) { brq.cmd.opcode = brq.data.blocks > 1 ? MMC_READ_MULTIPLE_BLOCK : MMC_READ_SINGLE_BLOCK; brq.data.flags |= MMC_DATA_READ; } else { brq.cmd.opcode = MMC_WRITE_BLOCK; brq.cmd.flags = MMC_RSP_R1B; brq.data.flags |= MMC_DATA_WRITE; brq.data.blocks = 1; } brq.mrq.stop = brq.data.blocks > 1 ? &brq.stop : NULL; brq.data.sg = mq->sg; brq.data.sg_len = blk_rq_map_sg(req->q, req, brq.data.sg); mmc_wait_for_req(card->host, &brq.mrq); if (brq.cmd.error) { printk(KERN_ERR "%s: error %d sending read/write command\n", req->rq_disk->disk_name, brq.cmd.error); goto cmd_err; } if (brq.data.error) { printk(KERN_ERR "%s: error %d transferring data\n", req->rq_disk->disk_name, brq.data.error); goto cmd_err; } if (brq.stop.error) { printk(KERN_ERR "%s: error %d sending stop command\n", req->rq_disk->disk_name, brq.stop.error); goto cmd_err; } do { int err; cmd.opcode = MMC_SEND_STATUS; cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_R1; err = mmc_wait_for_cmd(card->host, &cmd, 5); if (err) { printk(KERN_ERR "%s: error %d requesting status\n", req->rq_disk->disk_name, err); goto cmd_err; } } while (!(cmd.resp[0] & R1_READY_FOR_DATA)); #if 0 if (cmd.resp[0] & ~0x00000900) printk(KERN_ERR "%s: status = %08x\n", req->rq_disk->disk_name, cmd.resp[0]); if (mmc_decode_status(cmd.resp)) goto cmd_err; #endif /* * A block was successfully transferred. */ spin_lock_irq(&md->lock); ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered); if (!ret) { /* * The whole request completed successfully. */ add_disk_randomness(req->rq_disk); blkdev_dequeue_request(req); end_that_request_last(req); } spin_unlock_irq(&md->lock); } while (ret); mmc_card_release_host(card); return 1; cmd_err: mmc_card_release_host(card); /* * This is a little draconian, but until we get proper * error handling sorted out here, its the best we can * do - especially as some hosts have no idea how much * data was transferred before the error occurred. */ spin_lock_irq(&md->lock); do { ret = end_that_request_chunk(req, 0, req->current_nr_sectors << 9); } while (ret); add_disk_randomness(req->rq_disk); blkdev_dequeue_request(req); end_that_request_last(req); spin_unlock_irq(&md->lock); return 0; }