Пример #1
0
static void
goldfish_mmc_start_command(struct goldfish_mmc_host *host,
			   struct mmc_command *cmd)
{
	u32 cmdreg;
	u32 resptype;
	u32 cmdtype;

	host->cmd = cmd;

	resptype = 0;
	cmdtype = 0;

	/* Our hardware needs to know exact type */
	switch (mmc_resp_type(cmd)) {
	case MMC_RSP_NONE:
		break;
	case MMC_RSP_R1:
	case MMC_RSP_R1B:
		/* resp 1, 1b, 6, 7 */
		resptype = 1;
		break;
	case MMC_RSP_R2:
		resptype = 2;
		break;
	case MMC_RSP_R3:
		resptype = 3;
		break;
	default:
		dev_err(mmc_dev(host->mmc), "Invalid response type: %04x\n",
			mmc_resp_type(cmd));
		break;
	}

	if (mmc_cmd_type(cmd) == MMC_CMD_ADTC) {
		cmdtype = OMAP_MMC_CMDTYPE_ADTC;
	} else if (mmc_cmd_type(cmd) == MMC_CMD_BC) {
		cmdtype = OMAP_MMC_CMDTYPE_BC;
	} else if (mmc_cmd_type(cmd) == MMC_CMD_BCR) {
		cmdtype = OMAP_MMC_CMDTYPE_BCR;
	} else {
		cmdtype = OMAP_MMC_CMDTYPE_AC;
	}

	cmdreg = cmd->opcode | (resptype << 8) | (cmdtype << 12);

	if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
		cmdreg |= 1 << 6;

	if (cmd->flags & MMC_RSP_BUSY)
		cmdreg |= 1 << 11;

	if (host->data && !(host->data->flags & MMC_DATA_WRITE))
		cmdreg |= 1 << 15;

	GOLDFISH_MMC_WRITE(host, MMC_ARG, cmd->arg);
	GOLDFISH_MMC_WRITE(host, MMC_CMD, cmdreg);
}
static void
goldfish_mmc_prepare_data(struct goldfish_mmc_host *host, struct mmc_request *req)
{
	struct mmc_data *data = req->data;
	int block_size;
	unsigned sg_len;
	enum dma_data_direction dma_data_dir;

	host->data = data;
	if (data == NULL) {
		GOLDFISH_MMC_WRITE(host, MMC_BLOCK_LENGTH, 0);
		GOLDFISH_MMC_WRITE(host, MMC_BLOCK_COUNT, 0);
		host->dma_in_use = 0;
		return;
	}

	block_size = data->blksz;

	GOLDFISH_MMC_WRITE(host, MMC_BLOCK_COUNT, data->blocks - 1);
	GOLDFISH_MMC_WRITE(host, MMC_BLOCK_LENGTH, block_size - 1);

	/* cope with calling layer confusion; it issues "single
	 * block" writes using multi-block scatterlists.
	 */
	sg_len = (data->blocks == 1) ? 1 : data->sg_len;

	if (data->flags & MMC_DATA_WRITE)
		dma_data_dir = DMA_TO_DEVICE;
	else
		dma_data_dir = DMA_FROM_DEVICE;
#if defined(CONFIG_ARM) || defined(CONFIG_MIPS)
	host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
				sg_len, dma_data_dir);
#elif defined(CONFIG_X86)
    /*
     * Use NULL for dev for ISA-like devices
     */
	host->sg_len = dma_map_sg(NULL, data->sg,
				sg_len, dma_data_dir);
#else
#error NOT SUPPORTED
#endif
	host->dma_done = 0;
	host->dma_in_use = 1;
	
	if (dma_data_dir == DMA_TO_DEVICE) {
		// we don't really have DMA, so we need to copy to our platform driver buffer
		const uint8_t* src = (uint8_t *)sg_virt(data->sg);
		memcpy(host->virt_base, src, data->sg->length);
	}
}
static int goldfish_mmc_probe(struct platform_device *pdev)
{
	struct mmc_host *mmc;
	struct goldfish_mmc_host *host = NULL;
	struct resource *res;
	int ret = 0;
	int irq;
	dma_addr_t buf_addr;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	irq = platform_get_irq(pdev, 0);
	if (res == NULL || irq < 0)
		return -ENXIO;

	mmc = mmc_alloc_host(sizeof(struct goldfish_mmc_host), &pdev->dev);
	if (mmc == NULL) {
		ret = -ENOMEM;
		goto err_alloc_host_failed;
	}

	host = mmc_priv(mmc);
	host->mmc = mmc;
#if defined(CONFIG_ARM)
	host->reg_base = (void __iomem *)IO_ADDRESS(res->start - IO_START);
	host->virt_base = dma_alloc_writecombine(&pdev->dev, BUFFER_SIZE,
						 &buf_addr, GFP_KERNEL);
#elif defined(CONFIG_X86) || defined(CONFIG_MIPS)
    /*
     * Use NULL for dev for ISA-like devices
     */
	host->reg_base = ioremap(res->start, res->end - res->start + 1);
	host->virt_base = dma_alloc_coherent(NULL, BUFFER_SIZE, &buf_addr, GFP_KERNEL);
#else
#error NOT SUPPORTED
#endif
	if(host->virt_base == 0) {
		ret = -EBUSY;
		goto dma_alloc_failed;
	}
	host->phys_base = buf_addr;

	host->id = pdev->id;
	host->irq = irq;

	mmc->ops = &goldfish_mmc_ops;
	mmc->f_min = 400000;
	mmc->f_max = 24000000;
	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
	mmc->caps = MMC_CAP_4_BIT_DATA;

	/* Use scatterlist DMA to reduce per-transfer costs.
	 * NOTE max_seg_size assumption that small blocks aren't
	 * normally used (except e.g. for reading SD registers).
	 */
	mmc->max_segs = 32;
	mmc->max_blk_size = 2048;	/* MMC_BLOCK_LENGTH is 11 bits (+1) */
	mmc->max_blk_count = 2048;	/* MMC_BLOCK_COUNT is 11 bits (+1) */
	mmc->max_req_size = BUFFER_SIZE;
	mmc->max_seg_size = mmc->max_req_size;

	ret = request_irq(host->irq, goldfish_mmc_irq, 0, DRIVER_NAME, host);
	if (ret)
		goto err_request_irq_failed;

	host->dev = &pdev->dev;
	platform_set_drvdata(pdev, host);

	ret = device_create_file(&pdev->dev, &dev_attr_cover_switch);
	if (ret)
		dev_warn(mmc_dev(host->mmc), "Unable to create sysfs attributes\n");

	GOLDFISH_MMC_WRITE(host, MMC_SET_BUFFER, host->phys_base);	
	GOLDFISH_MMC_WRITE(host, MMC_INT_ENABLE, 
		MMC_STAT_END_OF_CMD | MMC_STAT_END_OF_DATA | MMC_STAT_STATE_CHANGE |
		MMC_STAT_CMD_TIMEOUT);

	mmc_add_host(mmc);

	return 0;

err_request_irq_failed:
#if defined(CONFIG_ARM)
	dma_free_writecombine(&pdev->dev, BUFFER_SIZE, host->virt_base, host->phys_base);
#elif defined(CONFIG_X86) || defined(CONFIG_MIPS)
	dma_free_coherent(NULL, BUFFER_SIZE, host->virt_base, host->phys_base);
#else
#error NOT SUPPORTED
#endif
dma_alloc_failed:
	mmc_free_host(host->mmc);
err_alloc_host_failed:
	return ret;
}
static irqreturn_t goldfish_mmc_irq(int irq, void *dev_id)
{
	struct goldfish_mmc_host * host = (struct goldfish_mmc_host *)dev_id;
	u16 status;
	int end_command = 0;
	int end_transfer = 0;
	int transfer_error = 0;
	int state_changed = 0;
	int cmd_timeout = 0;

	while ((status = GOLDFISH_MMC_READ(host, MMC_INT_STATUS)) != 0) {
		GOLDFISH_MMC_WRITE(host, MMC_INT_STATUS, status);

		if (status & MMC_STAT_END_OF_CMD) {
			end_command = 1;
		}

		if (status & MMC_STAT_END_OF_DATA) {
			end_transfer = 1;
		}
		if (status & MMC_STAT_STATE_CHANGE) {
			state_changed = 1;
		}

                if (status & MMC_STAT_CMD_TIMEOUT) {
			end_command = 0;
			cmd_timeout = 1;
                }
	}

	if (cmd_timeout) {
		struct mmc_request *mrq = host->mrq;
		mrq->cmd->error = -ETIMEDOUT;
		host->mrq = NULL;
		mmc_request_done(host->mmc, mrq);
	}

	if (end_command) {
		goldfish_mmc_cmd_done(host, host->cmd);
	}
	if (transfer_error)
		goldfish_mmc_xfer_done(host, host->data);
	else if (end_transfer) {
		host->dma_done = 1;
		goldfish_mmc_end_of_data(host, host->data);
	} else if (host->data != NULL && host->mrq->cmd->opcode == 13) {
		/*
		 * WORKAROUND -- after porting this driver from 2.6 to 3.4,
		 * this case pops up during device initialization.
		 * This happens as the host stack is sending ACMD13 which
		 * involves data read while goldfish emulator implementation
		 * only supports CMD13 which does not have data phase.
		 * The workaround works by passing garbage result to the stack.
		 * TODO -- To implement proper response
		 */
		host->dma_done = 1;
		goldfish_mmc_end_of_data(host, host->data);
	}

	if (state_changed) {
		u32 state = GOLDFISH_MMC_READ(host, MMC_STATE);
		pr_info("%s: Card detect now %d\n", __func__,
			(state & MMC_STATE_INSERTED));
		mmc_detect_change(host->mmc, 0);
	}

	if (!end_command && !end_transfer &&
	    !transfer_error && !state_changed && !cmd_timeout) {
		status = GOLDFISH_MMC_READ(host, MMC_INT_STATUS);
		dev_info(mmc_dev(host->mmc),"spurious irq 0x%04x\n", status);
		if (status != 0) {
			GOLDFISH_MMC_WRITE(host, MMC_INT_STATUS, status);
			GOLDFISH_MMC_WRITE(host, MMC_INT_ENABLE, 0);
		}
	}

	return IRQ_HANDLED;
}
Пример #5
0
static irqreturn_t goldfish_mmc_irq(int irq, void *dev_id)
{
	struct goldfish_mmc_host * host = (struct goldfish_mmc_host *)dev_id;
	u16 status;
	int end_command = 0;
	int end_transfer = 0;
	int transfer_error = 0;
	int state_changed = 0;
	int cmd_timeout = 0;

	while ((status = GOLDFISH_MMC_READ(host, MMC_INT_STATUS)) != 0) {
		GOLDFISH_MMC_WRITE(host, MMC_INT_STATUS, status);

		if (status & MMC_STAT_END_OF_CMD) {
			end_command = 1;
		}

		if (status & MMC_STAT_END_OF_DATA) {
			end_transfer = 1;
		}
		if (status & MMC_STAT_STATE_CHANGE) {
			state_changed = 1;
		}

                if (status & MMC_STAT_CMD_TIMEOUT) {
			end_command = 0;
			cmd_timeout = 1;
                }
	}

	if (cmd_timeout) {
		struct mmc_request *mrq = host->mrq;
		mrq->cmd->error = -ETIMEDOUT;
		host->mrq = NULL;
		mmc_request_done(host->mmc, mrq);
	}

	if (end_command) {
		goldfish_mmc_cmd_done(host, host->cmd);
	}
	if (transfer_error)
		goldfish_mmc_xfer_done(host, host->data);
	else if (end_transfer) {
		host->dma_done = 1;
		goldfish_mmc_end_of_data(host, host->data);
	}
	if (state_changed) {
		u32 state = GOLDFISH_MMC_READ(host, MMC_STATE);
		pr_info("%s: Card detect now %d\n", __func__,
			(state & MMC_STATE_INSERTED));
		mmc_detect_change(host->mmc, 0);
	}

	if (!end_command && !end_transfer &&
	    !transfer_error && !state_changed && !cmd_timeout) {
		status = GOLDFISH_MMC_READ(host, MMC_INT_STATUS);
		dev_info(mmc_dev(host->mmc),"spurious irq 0x%04x\n", status);
		if (status != 0) {
			GOLDFISH_MMC_WRITE(host, MMC_INT_STATUS, status);
			GOLDFISH_MMC_WRITE(host, MMC_INT_ENABLE, 0);
		}
	}

	return IRQ_HANDLED;
}