Exemple #1
0
/*
 * rrpc_move_valid_pages -- migrate live data off the block
 * @rrpc: the 'rrpc' structure
 * @block: the block from which to migrate live pages
 *
 * Description:
 *   GC algorithms may call this function to migrate remaining live
 *   pages off the block prior to erasing it. This function blocks
 *   further execution until the operation is complete.
 */
static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
{
	struct request_queue *q = rrpc->dev->q;
	struct rrpc_rev_addr *rev;
	struct nvm_rq *rqd;
	struct bio *bio;
	struct page *page;
	int slot;
	int nr_pgs_per_blk = rrpc->dev->pgs_per_blk;
	u64 phys_addr;
	DECLARE_COMPLETION_ONSTACK(wait);

	if (bitmap_full(rblk->invalid_pages, nr_pgs_per_blk))
		return 0;

	bio = bio_alloc(GFP_NOIO, 1);
	if (!bio) {
		pr_err("nvm: could not alloc bio to gc\n");
		return -ENOMEM;
	}

	page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
	if (!page) {
		bio_put(bio);
		return -ENOMEM;
	}

	while ((slot = find_first_zero_bit(rblk->invalid_pages,
					    nr_pgs_per_blk)) < nr_pgs_per_blk) {

		/* Lock laddr */
		phys_addr = (rblk->parent->id * nr_pgs_per_blk) + slot;

try:
		spin_lock(&rrpc->rev_lock);
		/* Get logical address from physical to logical table */
		rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset];
		/* already updated by previous regular write */
		if (rev->addr == ADDR_EMPTY) {
			spin_unlock(&rrpc->rev_lock);
			continue;
		}

		rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1);
		if (IS_ERR_OR_NULL(rqd)) {
			spin_unlock(&rrpc->rev_lock);
			schedule();
			goto try;
		}

		spin_unlock(&rrpc->rev_lock);

		/* Perform read to do GC */
		bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
		bio->bi_rw = READ;
		bio->bi_private = &wait;
		bio->bi_end_io = rrpc_end_sync_bio;

		/* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */
		bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);

		if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
			pr_err("rrpc: gc read failed.\n");
			rrpc_inflight_laddr_release(rrpc, rqd);
			goto finished;
		}
		wait_for_completion_io(&wait);
		if (bio->bi_error) {
			rrpc_inflight_laddr_release(rrpc, rqd);
			goto finished;
		}

		bio_reset(bio);
		reinit_completion(&wait);

		bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
		bio->bi_rw = WRITE;
		bio->bi_private = &wait;
		bio->bi_end_io = rrpc_end_sync_bio;

		bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);

		/* turn the command around and write the data back to a new
		 * address
		 */
		if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
			pr_err("rrpc: gc write failed.\n");
			rrpc_inflight_laddr_release(rrpc, rqd);
			goto finished;
		}
		wait_for_completion_io(&wait);

		rrpc_inflight_laddr_release(rrpc, rqd);
		if (bio->bi_error)
			goto finished;

		bio_reset(bio);
	}

finished:
	mempool_free(page, rrpc->page_pool);
	bio_put(bio);

	if (!bitmap_full(rblk->invalid_pages, nr_pgs_per_blk)) {
		pr_err("nvm: failed to garbage collect block\n");
		return -EIO;
	}

	return 0;
}

static void rrpc_block_gc(struct work_struct *work)
{
	struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
									ws_gc);
	struct rrpc *rrpc = gcb->rrpc;
	struct rrpc_block *rblk = gcb->rblk;
	struct nvm_dev *dev = rrpc->dev;
	struct nvm_lun *lun = rblk->parent->lun;
	struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];

	mempool_free(gcb, rrpc->gcb_pool);
	pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);

	if (rrpc_move_valid_pages(rrpc, rblk))
		goto put_back;

	if (nvm_erase_blk(dev, rblk->parent))
		goto put_back;

	rrpc_put_blk(rrpc, rblk);

	return;

put_back:
	spin_lock(&rlun->lock);
	list_add_tail(&rblk->prio, &rlun->prio_list);
	spin_unlock(&rlun->lock);
}

/* the block with highest number of invalid pages, will be in the beginning
 * of the list
 */
static struct rrpc_block *rblock_max_invalid(struct rrpc_block *ra,
							struct rrpc_block *rb)
{
	if (ra->nr_invalid_pages == rb->nr_invalid_pages)
		return ra;

	return (ra->nr_invalid_pages < rb->nr_invalid_pages) ? rb : ra;
}

/* linearly find the block with highest number of invalid pages
 * requires lun->lock
 */
static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun)
{
	struct list_head *prio_list = &rlun->prio_list;
	struct rrpc_block *rblock, *max;

	BUG_ON(list_empty(prio_list));

	max = list_first_entry(prio_list, struct rrpc_block, prio);
	list_for_each_entry(rblock, prio_list, prio)
		max = rblock_max_invalid(max, rblock);

	return max;
}

static void rrpc_lun_gc(struct work_struct *work)
{
	struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
	struct rrpc *rrpc = rlun->rrpc;
	struct nvm_lun *lun = rlun->parent;
	struct rrpc_block_gc *gcb;
	unsigned int nr_blocks_need;

	nr_blocks_need = rrpc->dev->blks_per_lun / GC_LIMIT_INVERSE;

	if (nr_blocks_need < rrpc->nr_luns)
		nr_blocks_need = rrpc->nr_luns;

	spin_lock(&rlun->lock);
	while (nr_blocks_need > lun->nr_free_blocks &&
					!list_empty(&rlun->prio_list)) {
		struct rrpc_block *rblock = block_prio_find_max(rlun);
		struct nvm_block *block = rblock->parent;

		if (!rblock->nr_invalid_pages)
			break;

		gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
		if (!gcb)
			break;

		list_del_init(&rblock->prio);

		BUG_ON(!block_is_full(rrpc, rblock));

		pr_debug("rrpc: selected block '%lu' for GC\n", block->id);

		gcb->rrpc = rrpc;
		gcb->rblk = rblock;
		INIT_WORK(&gcb->ws_gc, rrpc_block_gc);

		queue_work(rrpc->kgc_wq, &gcb->ws_gc);

		nr_blocks_need--;
	}
	spin_unlock(&rlun->lock);

	/* TODO: Hint that request queue can be started again */
}

static void rrpc_gc_queue(struct work_struct *work)
{
	struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
									ws_gc);
	struct rrpc *rrpc = gcb->rrpc;
	struct rrpc_block *rblk = gcb->rblk;
	struct nvm_lun *lun = rblk->parent->lun;
	struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];

	spin_lock(&rlun->lock);
	list_add_tail(&rblk->prio, &rlun->prio_list);
	spin_unlock(&rlun->lock);

	mempool_free(gcb, rrpc->gcb_pool);
	pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
							rblk->parent->id);
}

static const struct block_device_operations rrpc_fops = {
	.owner		= THIS_MODULE,
};

static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
{
	unsigned int i;
	struct rrpc_lun *rlun, *max_free;

	if (!is_gc)
		return get_next_lun(rrpc);

	/* during GC, we don't care about RR, instead we want to make
	 * sure that we maintain evenness between the block luns.
	 */
	max_free = &rrpc->luns[0];
	/* prevent GC-ing lun from devouring pages of a lun with
	 * little free blocks. We don't take the lock as we only need an
	 * estimate.
	 */
	rrpc_for_each_lun(rrpc, rlun, i) {
		if (rlun->parent->nr_free_blocks >
					max_free->parent->nr_free_blocks)
			max_free = rlun;
	}

	return max_free;
}
Exemple #2
0
static int bcm_qspi_bspi_flash_read(struct spi_device *spi,
				    struct spi_flash_read_message *msg)
{
	struct bcm_qspi *qspi = spi_master_get_devdata(spi->master);
	u32 addr = 0, len, rdlen, len_words;
	int ret = 0;
	unsigned long timeo = msecs_to_jiffies(100);
	struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;

	if (bcm_qspi_bspi_ver_three(qspi))
		if (msg->addr_width == BSPI_ADDRLEN_4BYTES)
			return -EIO;

	bcm_qspi_chip_select(qspi, spi->chip_select);
	bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 0);

	/*
	 * when using flex mode we need to send
	 * the upper address byte to bspi
	 */
	if (bcm_qspi_bspi_ver_three(qspi) == false) {
		addr = msg->from & 0xff000000;
		bcm_qspi_write(qspi, BSPI,
			       BSPI_BSPI_FLASH_UPPER_ADDR_BYTE, addr);
	}

	if (!qspi->xfer_mode.flex_mode)
		addr = msg->from;
	else
		addr = msg->from & 0x00ffffff;

	if (bcm_qspi_bspi_ver_three(qspi) == true)
		addr = (addr + 0xc00000) & 0xffffff;

	/*
	 * read into the entire buffer by breaking the reads
	 * into RAF buffer read lengths
	 */
	len = msg->len;
	qspi->bspi_rf_msg_idx = 0;

	do {
		if (len > BSPI_READ_LENGTH)
			rdlen = BSPI_READ_LENGTH;
		else
			rdlen = len;

		reinit_completion(&qspi->bspi_done);
		bcm_qspi_enable_bspi(qspi);
		len_words = (rdlen + 3) >> 2;
		qspi->bspi_rf_msg = msg;
		qspi->bspi_rf_msg_status = 0;
		qspi->bspi_rf_msg_len = rdlen;
		dev_dbg(&qspi->pdev->dev,
			"bspi xfr addr 0x%x len 0x%x", addr, rdlen);
		bcm_qspi_write(qspi, BSPI, BSPI_RAF_START_ADDR, addr);
		bcm_qspi_write(qspi, BSPI, BSPI_RAF_NUM_WORDS, len_words);
		bcm_qspi_write(qspi, BSPI, BSPI_RAF_WATERMARK, 0);
		if (qspi->soc_intc) {
			/*
			 * clear soc MSPI and BSPI interrupts and enable
			 * BSPI interrupts.
			 */
			soc_intc->bcm_qspi_int_ack(soc_intc, MSPI_BSPI_DONE);
			soc_intc->bcm_qspi_int_set(soc_intc, BSPI_DONE, true);
		}

		/* Must flush previous writes before starting BSPI operation */
		mb();
		bcm_qspi_bspi_lr_start(qspi);
		if (!wait_for_completion_timeout(&qspi->bspi_done, timeo)) {
			dev_err(&qspi->pdev->dev, "timeout waiting for BSPI\n");
			ret = -ETIMEDOUT;
			break;
		}

		/* set msg return length */
		msg->retlen += rdlen;
		addr += rdlen;
		len -= rdlen;
	} while (len);

	return ret;
}
/*
 * Low level master read/write transaction.
 */
static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
			     struct i2c_msg *msg, int stop)
{
	struct omap_i2c_dev *omap = i2c_get_adapdata(adap);
	unsigned long timeout;
	u16 w;

	dev_dbg(omap->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n",
		msg->addr, msg->len, msg->flags, stop);

	if (msg->len == 0)
		return -EINVAL;

	omap->receiver = !!(msg->flags & I2C_M_RD);
	omap_i2c_resize_fifo(omap, msg->len, omap->receiver);

	omap_i2c_write_reg(omap, OMAP_I2C_SA_REG, msg->addr);

	/* REVISIT: Could the STB bit of I2C_CON be used with probing? */
	omap->buf = msg->buf;
	omap->buf_len = msg->len;

	/* make sure writes to omap->buf_len are ordered */
	barrier();

	omap_i2c_write_reg(omap, OMAP_I2C_CNT_REG, omap->buf_len);

	/* Clear the FIFO Buffers */
	w = omap_i2c_read_reg(omap, OMAP_I2C_BUF_REG);
	w |= OMAP_I2C_BUF_RXFIF_CLR | OMAP_I2C_BUF_TXFIF_CLR;
	omap_i2c_write_reg(omap, OMAP_I2C_BUF_REG, w);

	reinit_completion(&omap->cmd_complete);
	omap->cmd_err = 0;

	w = OMAP_I2C_CON_EN | OMAP_I2C_CON_MST | OMAP_I2C_CON_STT;

	/* High speed configuration */
	if (omap->speed > 400)
		w |= OMAP_I2C_CON_OPMODE_HS;

	if (msg->flags & I2C_M_STOP)
		stop = 1;
	if (msg->flags & I2C_M_TEN)
		w |= OMAP_I2C_CON_XA;
	if (!(msg->flags & I2C_M_RD))
		w |= OMAP_I2C_CON_TRX;

	if (!omap->b_hw && stop)
		w |= OMAP_I2C_CON_STP;
	/*
	 * NOTE: STAT_BB bit could became 1 here if another master occupy
	 * the bus. IP successfully complete transfer when the bus will be
	 * free again (BB reset to 0).
	 */
	omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, w);

	/*
	 * Don't write stt and stp together on some hardware.
	 */
	if (omap->b_hw && stop) {
		unsigned long delay = jiffies + OMAP_I2C_TIMEOUT;
		u16 con = omap_i2c_read_reg(omap, OMAP_I2C_CON_REG);
		while (con & OMAP_I2C_CON_STT) {
			con = omap_i2c_read_reg(omap, OMAP_I2C_CON_REG);

			/* Let the user know if i2c is in a bad state */
			if (time_after(jiffies, delay)) {
				dev_err(omap->dev, "controller timed out "
				"waiting for start condition to finish\n");
				return -ETIMEDOUT;
			}
			cpu_relax();
		}

		w |= OMAP_I2C_CON_STP;
		w &= ~OMAP_I2C_CON_STT;
		omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, w);
	}

	/*
	 * REVISIT: We should abort the transfer on signals, but the bus goes
	 * into arbitration and we're currently unable to recover from it.
	 */
	timeout = wait_for_completion_timeout(&omap->cmd_complete,
						OMAP_I2C_TIMEOUT);
	if (timeout == 0) {
		dev_err(omap->dev, "controller timed out\n");
		omap_i2c_reset(omap);
		__omap_i2c_init(omap);
		return -ETIMEDOUT;
	}

	if (likely(!omap->cmd_err))
		return 0;

	/* We have an error */
	if (omap->cmd_err & (OMAP_I2C_STAT_ROVR | OMAP_I2C_STAT_XUDF)) {
		omap_i2c_reset(omap);
		__omap_i2c_init(omap);
		return -EIO;
	}

	if (omap->cmd_err & OMAP_I2C_STAT_AL)
		return -EAGAIN;

	if (omap->cmd_err & OMAP_I2C_STAT_NACK) {
		if (msg->flags & I2C_M_IGNORE_NAK)
			return 0;

		w = omap_i2c_read_reg(omap, OMAP_I2C_CON_REG);
		w |= OMAP_I2C_CON_STP;
		omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, w);
		return -EREMOTEIO;
	}
	return -EIO;
}
Exemple #4
0
static int omap2_onenand_wait(struct mtd_info *mtd, int state)
{
	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
	struct onenand_chip *this = mtd->priv;
	unsigned int intr = 0;
	unsigned int ctrl, ctrl_mask;
	unsigned long timeout;
	u32 syscfg;

	if (state == FL_RESETING || state == FL_PREPARING_ERASE ||
	    state == FL_VERIFYING_ERASE) {
		int i = 21;
		unsigned int intr_flags = ONENAND_INT_MASTER;

		switch (state) {
		case FL_RESETING:
			intr_flags |= ONENAND_INT_RESET;
			break;
		case FL_PREPARING_ERASE:
			intr_flags |= ONENAND_INT_ERASE;
			break;
		case FL_VERIFYING_ERASE:
			i = 101;
			break;
		}

		while (--i) {
			udelay(1);
			intr = read_reg(c, ONENAND_REG_INTERRUPT);
			if (intr & ONENAND_INT_MASTER)
				break;
		}
		ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
		if (ctrl & ONENAND_CTRL_ERROR) {
			wait_err("controller error", state, ctrl, intr);
			return -EIO;
		}
		if ((intr & intr_flags) == intr_flags)
			return 0;
		/* Continue in wait for interrupt branch */
	}

	if (state != FL_READING) {
		int result;

		/* Turn interrupts on */
		syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
		if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
			syscfg |= ONENAND_SYS_CFG1_IOBE;
			write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
			if (c->flags & ONENAND_IN_OMAP34XX)
				/* Add a delay to let GPIO settle */
				syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
		}

		reinit_completion(&c->irq_done);
		if (c->gpio_irq) {
			result = gpio_get_value(c->gpio_irq);
			if (result == -1) {
				ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
				intr = read_reg(c, ONENAND_REG_INTERRUPT);
				wait_err("gpio error", state, ctrl, intr);
				return -EIO;
			}
		} else
			result = 0;
		if (result == 0) {
			int retry_cnt = 0;
retry:
			result = wait_for_completion_timeout(&c->irq_done,
						    msecs_to_jiffies(20));
			if (result == 0) {
				/* Timeout after 20ms */
				ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
				if (ctrl & ONENAND_CTRL_ONGO &&
				    !this->ongoing) {
					/*
					 * The operation seems to be still going
					 * so give it some more time.
					 */
					retry_cnt += 1;
					if (retry_cnt < 3)
						goto retry;
					intr = read_reg(c,
							ONENAND_REG_INTERRUPT);
					wait_err("timeout", state, ctrl, intr);
					return -EIO;
				}
				intr = read_reg(c, ONENAND_REG_INTERRUPT);
				if ((intr & ONENAND_INT_MASTER) == 0)
					wait_warn("timeout", state, ctrl, intr);
			}
		}
	} else {
		int retry_cnt = 0;

		/* Turn interrupts off */
		syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
		syscfg &= ~ONENAND_SYS_CFG1_IOBE;
		write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);

		timeout = jiffies + msecs_to_jiffies(20);
		while (1) {
			if (time_before(jiffies, timeout)) {
				intr = read_reg(c, ONENAND_REG_INTERRUPT);
				if (intr & ONENAND_INT_MASTER)
					break;
			} else {
				/* Timeout after 20ms */
				ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
				if (ctrl & ONENAND_CTRL_ONGO) {
					/*
					 * The operation seems to be still going
					 * so give it some more time.
					 */
					retry_cnt += 1;
					if (retry_cnt < 3) {
						timeout = jiffies +
							  msecs_to_jiffies(20);
						continue;
					}
				}
				break;
			}
		}
	}

	intr = read_reg(c, ONENAND_REG_INTERRUPT);
	ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);

	if (intr & ONENAND_INT_READ) {
		int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);

		if (ecc) {
			unsigned int addr1, addr8;

			addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
			addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
			if (ecc & ONENAND_ECC_2BIT_ALL) {
				printk(KERN_ERR "onenand_wait: ECC error = "
				       "0x%04x, addr1 %#x, addr8 %#x\n",
				       ecc, addr1, addr8);
				mtd->ecc_stats.failed++;
				return -EBADMSG;
			} else if (ecc & ONENAND_ECC_1BIT_ALL) {
				printk(KERN_NOTICE "onenand_wait: correctable "
				       "ECC error = 0x%04x, addr1 %#x, "
				       "addr8 %#x\n", ecc, addr1, addr8);
				mtd->ecc_stats.corrected++;
			}
		}
	} else if (state == FL_READING) {
		wait_err("timeout", state, ctrl, intr);
		return -EIO;
	}

	if (ctrl & ONENAND_CTRL_ERROR) {
		wait_err("controller error", state, ctrl, intr);
		if (ctrl & ONENAND_CTRL_LOCK)
			printk(KERN_ERR "onenand_wait: "
					"Device is write protected!!!\n");
		return -EIO;
	}

	ctrl_mask = 0xFE9F;
	if (this->ongoing)
		ctrl_mask &= ~0x8000;

	if (ctrl & ctrl_mask)
		wait_warn("unexpected controller status", state, ctrl, intr);

	return 0;
}
Exemple #5
0
static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
					 const unsigned char *buffer,
					 int offset, size_t count)
{
	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
	struct onenand_chip *this = mtd->priv;
	dma_addr_t dma_src, dma_dst;
	int bram_offset;
	unsigned long timeout;
	void *buf = (void *)buffer;
	volatile unsigned *done;

	bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
	if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
		goto out_copy;

	/* panic_write() may be in an interrupt context */
	if (in_interrupt() || oops_in_progress)
		goto out_copy;

	if (buf >= high_memory) {
		struct page *p1;

		if (((size_t)buf & PAGE_MASK) !=
		    ((size_t)(buf + count - 1) & PAGE_MASK))
			goto out_copy;
		p1 = vmalloc_to_page(buf);
		if (!p1)
			goto out_copy;
		buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
	}

	dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
	dma_dst = c->phys_base + bram_offset;
	if (dma_mapping_error(&c->pdev->dev, dma_src)) {
		dev_err(&c->pdev->dev,
			"Couldn't DMA map a %d byte buffer\n",
			count);
		return -1;
	}

	omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
				     count >> 2, 1, 0, 0, 0);
	omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
				dma_src, 0, 0);
	omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
				 dma_dst, 0, 0);

	reinit_completion(&c->dma_done);
	omap_start_dma(c->dma_channel);

	timeout = jiffies + msecs_to_jiffies(20);
	done = &c->dma_done.done;
	while (time_before(jiffies, timeout))
		if (*done)
			break;

	dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);

	if (!*done) {
		dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
		goto out_copy;
	}

	return 0;

out_copy:
	memcpy(this->base + bram_offset, buf, count);
	return 0;
}
Exemple #6
0
static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
				struct spi_transfer *transfer)
{
	struct dma_async_tx_descriptor *desc_tx, *desc_rx;
	unsigned long transfer_timeout;
	unsigned long timeout;
	struct spi_master *master = spi_imx->bitbang.master;
	struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;

	/*
	 * The TX DMA setup starts the transfer, so make sure RX is configured
	 * before TX.
	 */
	desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
				rx->sgl, rx->nents, DMA_DEV_TO_MEM,
				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!desc_rx)
		return -EINVAL;

	desc_rx->callback = spi_imx_dma_rx_callback;
	desc_rx->callback_param = (void *)spi_imx;
	dmaengine_submit(desc_rx);
	reinit_completion(&spi_imx->dma_rx_completion);
	dma_async_issue_pending(master->dma_rx);

	desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
				tx->sgl, tx->nents, DMA_MEM_TO_DEV,
				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!desc_tx) {
		dmaengine_terminate_all(master->dma_tx);
		return -EINVAL;
	}

	desc_tx->callback = spi_imx_dma_tx_callback;
	desc_tx->callback_param = (void *)spi_imx;
	dmaengine_submit(desc_tx);
	reinit_completion(&spi_imx->dma_tx_completion);
	dma_async_issue_pending(master->dma_tx);

	transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);

	/* Wait SDMA to finish the data transfer.*/
	timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
						transfer_timeout);
	if (!timeout) {
		dev_err(spi_imx->dev, "I/O Error in DMA TX\n");
		dmaengine_terminate_all(master->dma_tx);
		dmaengine_terminate_all(master->dma_rx);
		return -ETIMEDOUT;
	}

	timeout = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
					      transfer_timeout);
	if (!timeout) {
		dev_err(&master->dev, "I/O Error in DMA RX\n");
		spi_imx->devtype_data->reset(spi_imx);
		dmaengine_terminate_all(master->dma_rx);
		return -ETIMEDOUT;
	}

	return transfer->len;
}
static int dht11_read_raw(struct iio_dev *iio_dev,
			  const struct iio_chan_spec *chan,
			int *val, int *val2, long m)
{
	struct dht11 *dht11 = iio_priv(iio_dev);
	int ret, timeres;

	mutex_lock(&dht11->lock);
	if (dht11->timestamp + DHT11_DATA_VALID_TIME < ktime_get_real_ns()) {
		timeres = ktime_get_resolution_ns();
		if (DHT11_DATA_BIT_HIGH < 2 * timeres) {
			dev_err(dht11->dev, "timeresolution %dns too low\n",
				timeres);
			/* In theory a better clock could become available
			 * at some point ... and there is no error code
			 * that really fits better.
			 */
			ret = -EAGAIN;
			goto err;
		}

		reinit_completion(&dht11->completion);

		dht11->num_edges = 0;
		ret = gpio_direction_output(dht11->gpio, 0);
		if (ret)
			goto err;
		msleep(DHT11_START_TRANSMISSION);
		ret = gpio_direction_input(dht11->gpio);
		if (ret)
			goto err;

		ret = request_irq(dht11->irq, dht11_handle_irq,
				  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
				  iio_dev->name, iio_dev);
		if (ret)
			goto err;

		ret = wait_for_completion_killable_timeout(&dht11->completion,
							   HZ);

		free_irq(dht11->irq, iio_dev);

		if (ret == 0 && dht11->num_edges < DHT11_EDGES_PER_READ - 1) {
			dev_err(&iio_dev->dev,
				"Only %d signal edges detected\n",
					dht11->num_edges);
			ret = -ETIMEDOUT;
		}
		if (ret < 0)
			goto err;

		ret = dht11_decode(dht11,
				   dht11->num_edges == DHT11_EDGES_PER_READ ?
					DHT11_EDGES_PREAMBLE :
					DHT11_EDGES_PREAMBLE - 2,
				timeres);
		if (ret)
			goto err;
	}

	ret = IIO_VAL_INT;
	if (chan->type == IIO_TEMP)
		*val = dht11->temperature;
	else if (chan->type == IIO_HUMIDITYRELATIVE)
		*val = dht11->humidity;
	else
		ret = -EINVAL;
err:
	dht11->num_edges = -1;
	mutex_unlock(&dht11->lock);
	return ret;
}
Exemple #8
0
static int vf610_read_raw(struct iio_dev *indio_dev,
			struct iio_chan_spec const *chan,
			int *val,
			int *val2,
			long mask)
{
	struct vf610_adc *info = iio_priv(indio_dev);
	unsigned int hc_cfg;
	long ret;

	switch (mask) {
	case IIO_CHAN_INFO_RAW:
	case IIO_CHAN_INFO_PROCESSED:
		mutex_lock(&indio_dev->mlock);
		reinit_completion(&info->completion);

		hc_cfg = VF610_ADC_ADCHC(chan->channel);
		hc_cfg |= VF610_ADC_AIEN;
		writel(hc_cfg, info->regs + VF610_REG_ADC_HC0);
		ret = wait_for_completion_interruptible_timeout
				(&info->completion, VF610_ADC_TIMEOUT);
		if (ret == 0) {
			mutex_unlock(&indio_dev->mlock);
			return -ETIMEDOUT;
		}
		if (ret < 0) {
			mutex_unlock(&indio_dev->mlock);
			return ret;
		}

		switch (chan->type) {
		case IIO_VOLTAGE:
			*val = info->value;
			break;
		case IIO_TEMP:
			/*
			* Calculate in degree Celsius times 1000
			* Using sensor slope of 1.84 mV/°C and
			* V at 25°C of 696 mV
			*/
			*val = 25000 - ((int)info->value - 864) * 1000000 / 1840;
			break;
		default:
			mutex_unlock(&indio_dev->mlock);
			return -EINVAL;
		}

		mutex_unlock(&indio_dev->mlock);
		return IIO_VAL_INT;

	case IIO_CHAN_INFO_SCALE:
		*val = info->vref_uv / 1000;
		*val2 = info->adc_feature.res_mode;
		return IIO_VAL_FRACTIONAL_LOG2;

	case IIO_CHAN_INFO_SAMP_FREQ:
		*val = info->sample_freq_avail[info->adc_feature.sample_rate];
		*val2 = 0;
		return IIO_VAL_INT;

	default:
		break;
	}

	return -EINVAL;
}
Exemple #9
0
static int owl_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
			       int num)
{
	struct owl_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
	struct i2c_msg *msg;
	unsigned long time_left, flags;
	unsigned int i2c_cmd, val;
	unsigned int addr;
	int ret, idx;

	spin_lock_irqsave(&i2c_dev->lock, flags);

	/* Reset I2C controller */
	owl_i2c_reset(i2c_dev);

	/* Set bus frequency */
	owl_i2c_set_freq(i2c_dev);

	/*
	 * Spinlock should be released before calling reset FIFO and
	 * bus busy check since those functions may sleep
	 */
	spin_unlock_irqrestore(&i2c_dev->lock, flags);

	/* Reset FIFO */
	ret = owl_i2c_reset_fifo(i2c_dev);
	if (ret)
		goto unlocked_err_exit;

	/* Check for bus busy */
	ret = owl_i2c_check_bus_busy(adap);
	if (ret)
		goto unlocked_err_exit;

	spin_lock_irqsave(&i2c_dev->lock, flags);

	/* Check for Arbitration lost */
	val = readl(i2c_dev->base + OWL_I2C_REG_STAT);
	if (val & OWL_I2C_STAT_LAB) {
		val &= ~OWL_I2C_STAT_LAB;
		writel(val, i2c_dev->base + OWL_I2C_REG_STAT);
		ret = -EAGAIN;
		goto err_exit;
	}

	reinit_completion(&i2c_dev->msg_complete);

	/* Enable I2C controller interrupt */
	owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_CTL,
			   OWL_I2C_CTL_IRQE, true);

	/*
	 * Select: FIFO enable, Master mode, Stop enable, Data count enable,
	 * Send start bit
	 */
	i2c_cmd = OWL_I2C_CMD_SECL | OWL_I2C_CMD_MSS | OWL_I2C_CMD_SE |
		  OWL_I2C_CMD_NS | OWL_I2C_CMD_DE | OWL_I2C_CMD_SBE;

	/* Handle repeated start condition */
	if (num > 1) {
		/* Set internal address length and enable repeated start */
		i2c_cmd |= OWL_I2C_CMD_AS(msgs[0].len + 1) |
			   OWL_I2C_CMD_SAS(1) | OWL_I2C_CMD_RBE;

		/* Write slave address */
		addr = i2c_8bit_addr_from_msg(&msgs[0]);
		writel(addr, i2c_dev->base + OWL_I2C_REG_TXDAT);

		/* Write internal register address */
		for (idx = 0; idx < msgs[0].len; idx++)
			writel(msgs[0].buf[idx],
			       i2c_dev->base + OWL_I2C_REG_TXDAT);

		msg = &msgs[1];
	} else {
		/* Set address length */
		i2c_cmd |= OWL_I2C_CMD_AS(1);
		msg = &msgs[0];
	}

	i2c_dev->msg = msg;
	i2c_dev->msg_ptr = 0;

	/* Set data count for the message */
	writel(msg->len, i2c_dev->base + OWL_I2C_REG_DATCNT);

	addr = i2c_8bit_addr_from_msg(msg);
	writel(addr, i2c_dev->base + OWL_I2C_REG_TXDAT);

	if (!(msg->flags & I2C_M_RD)) {
		/* Write data to FIFO */
		for (idx = 0; idx < msg->len; idx++) {
			/* Check for FIFO full */
			if (readl(i2c_dev->base + OWL_I2C_REG_FIFOSTAT) &
			    OWL_I2C_FIFOSTAT_TFF)
				break;

			writel(msg->buf[idx],
			       i2c_dev->base + OWL_I2C_REG_TXDAT);
		}

		i2c_dev->msg_ptr = idx;
	}

	/* Ignore the NACK if needed */
	if (msg->flags & I2C_M_IGNORE_NAK)
		owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_FIFOCTL,
				   OWL_I2C_FIFOCTL_NIB, true);
	else
		owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_FIFOCTL,
				   OWL_I2C_FIFOCTL_NIB, false);

	/* Start the transfer */
	writel(i2c_cmd, i2c_dev->base + OWL_I2C_REG_CMD);

	spin_unlock_irqrestore(&i2c_dev->lock, flags);

	time_left = wait_for_completion_timeout(&i2c_dev->msg_complete,
						adap->timeout);

	spin_lock_irqsave(&i2c_dev->lock, flags);
	if (time_left == 0) {
		dev_err(&adap->dev, "Transaction timed out\n");
		/* Send stop condition and release the bus */
		owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_CTL,
				   OWL_I2C_CTL_GBCC_STOP | OWL_I2C_CTL_RB,
				   true);
		ret = -ETIMEDOUT;
		goto err_exit;
	}

	ret = i2c_dev->err < 0 ? i2c_dev->err : num;

err_exit:
	spin_unlock_irqrestore(&i2c_dev->lock, flags);

unlocked_err_exit:
	/* Disable I2C controller */
	owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_CTL,
			   OWL_I2C_CTL_EN, false);

	return ret;
}
Exemple #10
0
static int wmt_i2c_write(struct i2c_adapter *adap, struct i2c_msg *pmsg,
			 int last)
{
	struct wmt_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
	u16 val, tcr_val;
	int ret;
	unsigned long wait_result;
	int xfer_len = 0;

	if (!(pmsg->flags & I2C_M_NOSTART)) {
		ret = wmt_i2c_wait_bus_not_busy(i2c_dev);
		if (ret < 0)
			return ret;
	}

	if (pmsg->len == 0) {
		/*
		 * We still need to run through the while (..) once, so
		 * start at -1 and break out early from the loop
		 */
		xfer_len = -1;
		writew(0, i2c_dev->base + REG_CDR);
	} else {
		writew(pmsg->buf[0] & 0xFF, i2c_dev->base + REG_CDR);
	}

	if (!(pmsg->flags & I2C_M_NOSTART)) {
		val = readw(i2c_dev->base + REG_CR);
		val &= ~CR_TX_END;
		writew(val, i2c_dev->base + REG_CR);

		val = readw(i2c_dev->base + REG_CR);
		val |= CR_CPU_RDY;
		writew(val, i2c_dev->base + REG_CR);
	}

	reinit_completion(&i2c_dev->complete);

	if (i2c_dev->mode == I2C_MODE_STANDARD)
		tcr_val = TCR_STANDARD_MODE;
	else
		tcr_val = TCR_FAST_MODE;

	tcr_val |= (TCR_MASTER_WRITE | (pmsg->addr & TCR_SLAVE_ADDR_MASK));

	writew(tcr_val, i2c_dev->base + REG_TCR);

	if (pmsg->flags & I2C_M_NOSTART) {
		val = readw(i2c_dev->base + REG_CR);
		val |= CR_CPU_RDY;
		writew(val, i2c_dev->base + REG_CR);
	}

	while (xfer_len < pmsg->len) {
		wait_result = wait_for_completion_timeout(&i2c_dev->complete,
							msecs_to_jiffies(500));

		if (wait_result == 0)
			return -ETIMEDOUT;

		ret = wmt_check_status(i2c_dev);
		if (ret)
			return ret;

		xfer_len++;

		val = readw(i2c_dev->base + REG_CSR);
		if ((val & CSR_RCV_ACK_MASK) == CSR_RCV_NOT_ACK) {
			dev_dbg(i2c_dev->dev, "write RCV NACK error\n");
			return -EIO;
		}

		if (pmsg->len == 0) {
			val = CR_TX_END | CR_CPU_RDY | CR_ENABLE;
			writew(val, i2c_dev->base + REG_CR);
			break;
		}

		if (xfer_len == pmsg->len) {
			if (last != 1)
				writew(CR_ENABLE, i2c_dev->base + REG_CR);
		} else {
			writew(pmsg->buf[xfer_len] & 0xFF, i2c_dev->base +
								REG_CDR);
			writew(CR_CPU_RDY | CR_ENABLE, i2c_dev->base + REG_CR);
		}
	}

	return 0;
}
Exemple #11
0
static int wmt_i2c_read(struct i2c_adapter *adap, struct i2c_msg *pmsg,
			int last)
{
	struct wmt_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
	u16 val, tcr_val;
	int ret;
	unsigned long wait_result;
	u32 xfer_len = 0;

	if (!(pmsg->flags & I2C_M_NOSTART)) {
		ret = wmt_i2c_wait_bus_not_busy(i2c_dev);
		if (ret < 0)
			return ret;
	}

	val = readw(i2c_dev->base + REG_CR);
	val &= ~CR_TX_END;
	writew(val, i2c_dev->base + REG_CR);

	val = readw(i2c_dev->base + REG_CR);
	val &= ~CR_TX_NEXT_NO_ACK;
	writew(val, i2c_dev->base + REG_CR);

	if (!(pmsg->flags & I2C_M_NOSTART)) {
		val = readw(i2c_dev->base + REG_CR);
		val |= CR_CPU_RDY;
		writew(val, i2c_dev->base + REG_CR);
	}

	if (pmsg->len == 1) {
		val = readw(i2c_dev->base + REG_CR);
		val |= CR_TX_NEXT_NO_ACK;
		writew(val, i2c_dev->base + REG_CR);
	}

	reinit_completion(&i2c_dev->complete);

	if (i2c_dev->mode == I2C_MODE_STANDARD)
		tcr_val = TCR_STANDARD_MODE;
	else
		tcr_val = TCR_FAST_MODE;

	tcr_val |= TCR_MASTER_READ | (pmsg->addr & TCR_SLAVE_ADDR_MASK);

	writew(tcr_val, i2c_dev->base + REG_TCR);

	if (pmsg->flags & I2C_M_NOSTART) {
		val = readw(i2c_dev->base + REG_CR);
		val |= CR_CPU_RDY;
		writew(val, i2c_dev->base + REG_CR);
	}

	while (xfer_len < pmsg->len) {
		wait_result = wait_for_completion_timeout(&i2c_dev->complete,
							msecs_to_jiffies(500));

		if (!wait_result)
			return -ETIMEDOUT;

		ret = wmt_check_status(i2c_dev);
		if (ret)
			return ret;

		pmsg->buf[xfer_len] = readw(i2c_dev->base + REG_CDR) >> 8;
		xfer_len++;

		if (xfer_len == pmsg->len - 1) {
			val = readw(i2c_dev->base + REG_CR);
			val |= (CR_TX_NEXT_NO_ACK | CR_CPU_RDY);
			writew(val, i2c_dev->base + REG_CR);
		} else {
			val = readw(i2c_dev->base + REG_CR);
			val |= CR_CPU_RDY;
			writew(val, i2c_dev->base + REG_CR);
		}
	}

	return 0;
}
Exemple #12
0
/**
 * omap3_bridge_startup() - perform low lever initializations
 * @pdev:      pointer to platform device
 *
 * Initializes recovery, PM and DVFS required data, before calling
 * clk and memory init routines.
 */
static int omap3_bridge_startup(struct platform_device *pdev)
{
	struct omap_dsp_platform_data *pdata = pdev->dev.platform_data;
	struct drv_data *drv_datap = NULL;
	u32 phys_membase, phys_memsize;
	int err;

#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
	bridge_rec_queue = create_workqueue("bridge_rec_queue");
	INIT_WORK(&bridge_recovery_work, bridge_recover);
	reinit_completion(&bridge_comp);
#endif

#ifdef CONFIG_PM
	/* Initialize the wait queue */
	bridge_suspend_data.suspended = 0;
	init_waitqueue_head(&bridge_suspend_data.suspend_wq);

#ifdef CONFIG_TIDSPBRIDGE_DVFS
	for (i = 0; i < 6; i++)
		pdata->mpu_speed[i] = vdd1_rate_table_bridge[i].rate;

	err = cpufreq_register_notifier(&iva_clk_notifier,
					CPUFREQ_TRANSITION_NOTIFIER);
	if (err)
		pr_err("%s: clk_notifier_register failed for iva2_ck\n",
								__func__);
#endif
#endif

	dsp_clk_init();

	drv_datap = kzalloc(sizeof(struct drv_data), GFP_KERNEL);
	if (!drv_datap) {
		err = -ENOMEM;
		goto err1;
	}

	drv_datap->shm_size = shm_size;
	drv_datap->tc_wordswapon = tc_wordswapon;

	if (base_img) {
		drv_datap->base_img = kstrdup(base_img, GFP_KERNEL);
		if (!drv_datap->base_img) {
			err = -ENOMEM;
			goto err2;
		}
	}

	dev_set_drvdata(bridge, drv_datap);

	if (shm_size < 0x10000) {	/* 64 KB */
		err = -EINVAL;
		pr_err("%s: shm size must be at least 64 KB\n", __func__);
		goto err3;
	}
	dev_dbg(bridge, "%s: requested shm_size = 0x%x\n", __func__, shm_size);

	phys_membase = pdata->phys_mempool_base;
	phys_memsize = pdata->phys_mempool_size;
	if (phys_membase > 0 && phys_memsize > 0)
		mem_ext_phys_pool_init(phys_membase, phys_memsize);

	if (tc_wordswapon)
		dev_dbg(bridge, "%s: TC Word Swap is enabled\n", __func__);

	driver_context = dsp_init(&err);
	if (err) {
		pr_err("DSP Bridge driver initialization failed\n");
		goto err4;
	}

	return 0;

err4:
	mem_ext_phys_pool_release();
err3:
	kfree(drv_datap->base_img);
err2:
	kfree(drv_datap);
err1:
#ifdef CONFIG_TIDSPBRIDGE_DVFS
	cpufreq_unregister_notifier(&iva_clk_notifier,
				    CPUFREQ_TRANSITION_NOTIFIER);
#endif
	dsp_clk_exit();

	return err;
}
Exemple #13
0
void bridge_recover_schedule(void)
{
	reinit_completion(&bridge_open_comp);
	recover = true;
	queue_work(bridge_rec_queue, &bridge_recovery_work);
}
Exemple #14
0
static int xlp9xx_i2c_xfer_msg(struct xlp9xx_i2c_dev *priv, struct i2c_msg *msg,
			       int last_msg)
{
	unsigned long timeleft;
	u32 intr_mask, cmd, val;

	priv->msg_buf = msg->buf;
	priv->msg_buf_remaining = priv->msg_len = msg->len;
	priv->msg_err = 0;
	priv->msg_read = (msg->flags & I2C_M_RD);
	reinit_completion(&priv->msg_complete);

	/* Reset FIFO */
	xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_MFIFOCTRL,
			     XLP9XX_I2C_MFIFOCTRL_RST);

	/* set FIFO threshold if reading */
	if (priv->msg_read)
		xlp9xx_i2c_update_rx_fifo_thres(priv);

	/* set slave addr */
	xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_SLAVEADDR,
			     (msg->addr << XLP9XX_I2C_SLAVEADDR_ADDR_SHIFT) |
			     (priv->msg_read ? XLP9XX_I2C_SLAVEADDR_RW : 0));

	/* Build control word for transfer */
	val = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_CTRL);
	if (!priv->msg_read)
		val &= ~XLP9XX_I2C_CTRL_FIFORD;
	else
		val |= XLP9XX_I2C_CTRL_FIFORD;	/* read */

	if (msg->flags & I2C_M_TEN)
		val |= XLP9XX_I2C_CTRL_ADDMODE;	/* 10-bit address mode*/
	else
		val &= ~XLP9XX_I2C_CTRL_ADDMODE;

	/* set data length to be transferred */
	val = (val & ~XLP9XX_I2C_CTRL_MCTLEN_MASK) |
	      (msg->len << XLP9XX_I2C_CTRL_MCTLEN_SHIFT);
	xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_CTRL, val);

	/* fill fifo during tx */
	if (!priv->msg_read)
		xlp9xx_i2c_fill_tx_fifo(priv);

	/* set interrupt mask */
	intr_mask = (XLP9XX_I2C_INTEN_ARLOST | XLP9XX_I2C_INTEN_BUSERR |
		     XLP9XX_I2C_INTEN_NACKADDR | XLP9XX_I2C_INTEN_DATADONE);

	if (priv->msg_read) {
		intr_mask |= XLP9XX_I2C_INTEN_MFIFOHI;
		if (msg->len == 0)
			intr_mask |= XLP9XX_I2C_INTEN_SADDR;
	} else {
		if (msg->len == 0)
			intr_mask |= XLP9XX_I2C_INTEN_SADDR;
		else
			intr_mask |= XLP9XX_I2C_INTEN_MFIFOEMTY;
	}
	xlp9xx_i2c_unmask_irq(priv, intr_mask);

	/* set cmd reg */
	cmd = XLP9XX_I2C_CMD_START;
	cmd |= (priv->msg_read ? XLP9XX_I2C_CMD_READ : XLP9XX_I2C_CMD_WRITE);
	if (last_msg)
		cmd |= XLP9XX_I2C_CMD_STOP;

	xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_CMD, cmd);

	timeleft = msecs_to_jiffies(XLP9XX_I2C_TIMEOUT_MS);
	timeleft = wait_for_completion_timeout(&priv->msg_complete, timeleft);

	if (priv->msg_err) {
		dev_dbg(priv->dev, "transfer error %x!\n", priv->msg_err);
		if (priv->msg_err & XLP9XX_I2C_INTEN_BUSERR)
			xlp9xx_i2c_init(priv);
		return -EIO;
	}

	if (timeleft == 0) {
		dev_dbg(priv->dev, "i2c transfer timed out!\n");
		xlp9xx_i2c_init(priv);
		return -ETIMEDOUT;
	}

	return 0;
}
Exemple #15
0
Fichier : htc.c Projet : muonn/ath
int ath10k_htc_connect_service(struct ath10k_htc *htc,
			       struct ath10k_htc_svc_conn_req *conn_req,
			       struct ath10k_htc_svc_conn_resp *conn_resp)
{
	struct ath10k_htc_msg *msg;
	struct ath10k_htc_conn_svc *req_msg;
	struct ath10k_htc_conn_svc_response resp_msg_dummy;
	struct ath10k_htc_conn_svc_response *resp_msg = &resp_msg_dummy;
	enum ath10k_htc_ep_id assigned_eid = ATH10K_HTC_EP_COUNT;
	struct ath10k_htc_ep *ep;
	struct sk_buff *skb;
	unsigned int max_msg_size = 0;
	int length, status;
	bool disable_credit_flow_ctrl = false;
	u16 message_id, service_id, flags = 0;
	u8 tx_alloc = 0;

	/* special case for HTC pseudo control service */
	if (conn_req->service_id == ATH10K_HTC_SVC_ID_RSVD_CTRL) {
		disable_credit_flow_ctrl = true;
		assigned_eid = ATH10K_HTC_EP_0;
		max_msg_size = ATH10K_HTC_MAX_CTRL_MSG_LEN;
		memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
		goto setup;
	}

	tx_alloc = ath10k_htc_get_credit_allocation(htc,
						    conn_req->service_id);
	if (!tx_alloc)
		ath10k_dbg(ATH10K_DBG_BOOT,
			   "boot htc service %s does not allocate target credits\n",
			   htc_service_name(conn_req->service_id));

	skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
	if (!skb) {
		ath10k_err("Failed to allocate HTC packet\n");
		return -ENOMEM;
	}

	length = sizeof(msg->hdr) + sizeof(msg->connect_service);
	skb_put(skb, length);
	memset(skb->data, 0, length);

	msg = (struct ath10k_htc_msg *)skb->data;
	msg->hdr.message_id =
		__cpu_to_le16(ATH10K_HTC_MSG_CONNECT_SERVICE_ID);

	flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);

	/* Only enable credit flow control for WMI ctrl service */
	if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
		flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
		disable_credit_flow_ctrl = true;
	}

	req_msg = &msg->connect_service;
	req_msg->flags = __cpu_to_le16(flags);
	req_msg->service_id = __cpu_to_le16(conn_req->service_id);

	reinit_completion(&htc->ctl_resp);

	status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
	if (status) {
		kfree_skb(skb);
		return status;
	}

	/* wait for response */
	status = wait_for_completion_timeout(&htc->ctl_resp,
					     ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
	if (status <= 0) {
		if (status == 0)
			status = -ETIMEDOUT;
		ath10k_err("Service connect timeout: %d\n", status);
		return status;
	}

	/* we controlled the buffer creation, it's aligned */
	msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
	resp_msg = &msg->connect_service_response;
	message_id = __le16_to_cpu(msg->hdr.message_id);
	service_id = __le16_to_cpu(resp_msg->service_id);

	if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
	    (htc->control_resp_len < sizeof(msg->hdr) +
	     sizeof(msg->connect_service_response))) {
		ath10k_err("Invalid resp message ID 0x%x", message_id);
		return -EPROTO;
	}

	ath10k_dbg(ATH10K_DBG_HTC,
		   "HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n",
		   htc_service_name(service_id),
		   resp_msg->status, resp_msg->eid);

	conn_resp->connect_resp_code = resp_msg->status;

	/* check response status */
	if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) {
		ath10k_err("HTC Service %s connect request failed: 0x%x)\n",
			   htc_service_name(service_id),
			   resp_msg->status);
		return -EPROTO;
	}

	assigned_eid = (enum ath10k_htc_ep_id)resp_msg->eid;
	max_msg_size = __le16_to_cpu(resp_msg->max_msg_size);

setup:

	if (assigned_eid >= ATH10K_HTC_EP_COUNT)
		return -EPROTO;

	if (max_msg_size == 0)
		return -EPROTO;

	ep = &htc->endpoint[assigned_eid];
	ep->eid = assigned_eid;

	if (ep->service_id != ATH10K_HTC_SVC_ID_UNUSED)
		return -EPROTO;

	/* return assigned endpoint to caller */
	conn_resp->eid = assigned_eid;
	conn_resp->max_msg_len = __le16_to_cpu(resp_msg->max_msg_size);

	/* setup the endpoint */
	ep->service_id = conn_req->service_id;
	ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
	ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size);
	ep->tx_credits = tx_alloc;
	ep->tx_credit_size = htc->target_credit_size;
	ep->tx_credits_per_max_message = ep->max_ep_message_len /
					 htc->target_credit_size;

	if (ep->max_ep_message_len % htc->target_credit_size)
		ep->tx_credits_per_max_message++;

	/* copy all the callbacks */
	ep->ep_ops = conn_req->ep_ops;

	status = ath10k_hif_map_service_to_pipe(htc->ar,
						ep->service_id,
						&ep->ul_pipe_id,
						&ep->dl_pipe_id,
						&ep->ul_is_polled,
						&ep->dl_is_polled);
	if (status)
		return status;

	ath10k_dbg(ATH10K_DBG_BOOT,
		   "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
		   htc_service_name(ep->service_id), ep->ul_pipe_id,
		   ep->dl_pipe_id, ep->eid);

	ath10k_dbg(ATH10K_DBG_BOOT,
		   "boot htc ep %d ul polled %d dl polled %d\n",
		   ep->eid, ep->ul_is_polled, ep->dl_is_polled);

	if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
		ep->tx_credit_flow_enabled = false;
		ath10k_dbg(ATH10K_DBG_BOOT,
			   "boot htc service '%s' eid %d TX flow control disabled\n",
			   htc_service_name(ep->service_id), assigned_eid);
	}

	return status;
}
Exemple #16
0
static int sc27xx_adc_read(struct sc27xx_adc_data *data, int channel,
			   int scale, int *val)
{
	int ret;
	u32 tmp;

	reinit_completion(&data->completion);

	ret = hwspin_lock_timeout_raw(data->hwlock, SC27XX_ADC_HWLOCK_TIMEOUT);
	if (ret) {
		dev_err(data->dev, "timeout to get the hwspinlock\n");
		return ret;
	}

	ret = regmap_update_bits(data->regmap, data->base + SC27XX_ADC_CTL,
				 SC27XX_ADC_EN, SC27XX_ADC_EN);
	if (ret)
		goto unlock_adc;

	/* Configure the channel id and scale */
	tmp = (scale << SC27XX_ADC_SCALE_SHIFT) & SC27XX_ADC_SCALE_MASK;
	tmp |= channel & SC27XX_ADC_CHN_ID_MASK;
	ret = regmap_update_bits(data->regmap, data->base + SC27XX_ADC_CH_CFG,
				 SC27XX_ADC_CHN_ID_MASK | SC27XX_ADC_SCALE_MASK,
				 tmp);
	if (ret)
		goto disable_adc;

	/* Select 12bit conversion mode, and only sample 1 time */
	tmp = SC27XX_ADC_12BIT_MODE;
	tmp |= (0 << SC27XX_ADC_RUN_NUM_SHIFT) & SC27XX_ADC_RUN_NUM_MASK;
	ret = regmap_update_bits(data->regmap, data->base + SC27XX_ADC_CTL,
				 SC27XX_ADC_RUN_NUM_MASK | SC27XX_ADC_12BIT_MODE,
				 tmp);
	if (ret)
		goto disable_adc;

	ret = regmap_update_bits(data->regmap, data->base + SC27XX_ADC_CTL,
				 SC27XX_ADC_CHN_RUN, SC27XX_ADC_CHN_RUN);
	if (ret)
		goto disable_adc;

	ret = wait_for_completion_timeout(&data->completion,
				msecs_to_jiffies(SC27XX_ADC_RDY_TIMEOUT));
	if (!ret) {
		dev_err(data->dev, "read ADC data timeout\n");
		ret = -ETIMEDOUT;
	} else {
		ret = 0;
	}

disable_adc:
	regmap_update_bits(data->regmap, data->base + SC27XX_ADC_CTL,
			   SC27XX_ADC_EN, 0);
unlock_adc:
	hwspin_unlock_raw(data->hwlock);

	if (!ret)
		*val = data->value;

	return ret;
}
Exemple #17
0
static int sun6i_spi_transfer_one(struct spi_master *master,
				  struct spi_device *spi,
				  struct spi_transfer *tfr)
{
	struct sun6i_spi *sspi = spi_master_get_devdata(master);
	unsigned int mclk_rate, div, timeout;
	unsigned int start, end, tx_time;
	unsigned int tx_len = 0;
	int ret = 0;
	u32 reg;

	/* We don't support transfer larger than the FIFO */
	if (tfr->len > SUN6I_FIFO_DEPTH)
		return -EINVAL;

	reinit_completion(&sspi->done);
	sspi->tx_buf = tfr->tx_buf;
	sspi->rx_buf = tfr->rx_buf;
	sspi->len = tfr->len;

	/* Clear pending interrupts */
	sun6i_spi_write(sspi, SUN6I_INT_STA_REG, ~0);

	/* Reset FIFO */
	sun6i_spi_write(sspi, SUN6I_FIFO_CTL_REG,
			SUN6I_FIFO_CTL_RF_RST | SUN6I_FIFO_CTL_TF_RST);

	/*
	 * Setup the transfer control register: Chip Select,
	 * polarities, etc.
	 */
	reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);

	if (spi->mode & SPI_CPOL)
		reg |= SUN6I_TFR_CTL_CPOL;
	else
		reg &= ~SUN6I_TFR_CTL_CPOL;

	if (spi->mode & SPI_CPHA)
		reg |= SUN6I_TFR_CTL_CPHA;
	else
		reg &= ~SUN6I_TFR_CTL_CPHA;

	if (spi->mode & SPI_LSB_FIRST)
		reg |= SUN6I_TFR_CTL_FBS;
	else
		reg &= ~SUN6I_TFR_CTL_FBS;

	/*
	 * If it's a TX only transfer, we don't want to fill the RX
	 * FIFO with bogus data
	 */
	if (sspi->rx_buf)
		reg &= ~SUN6I_TFR_CTL_DHB;
	else
		reg |= SUN6I_TFR_CTL_DHB;

	/* We want to control the chip select manually */
	reg |= SUN6I_TFR_CTL_CS_MANUAL;

	sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg);

	/* Ensure that we have a parent clock fast enough */
	mclk_rate = clk_get_rate(sspi->mclk);
	if (mclk_rate < (2 * spi->max_speed_hz)) {
		clk_set_rate(sspi->mclk, 2 * spi->max_speed_hz);
		mclk_rate = clk_get_rate(sspi->mclk);
	}

	/*
	 * Setup clock divider.
	 *
	 * We have two choices there. Either we can use the clock
	 * divide rate 1, which is calculated thanks to this formula:
	 * SPI_CLK = MOD_CLK / (2 ^ cdr)
	 * Or we can use CDR2, which is calculated with the formula:
	 * SPI_CLK = MOD_CLK / (2 * (cdr + 1))
	 * Wether we use the former or the latter is set through the
	 * DRS bit.
	 *
	 * First try CDR2, and if we can't reach the expected
	 * frequency, fall back to CDR1.
	 */
	div = mclk_rate / (2 * spi->max_speed_hz);
	if (div <= (SUN6I_CLK_CTL_CDR2_MASK + 1)) {
		if (div > 0)
			div--;

		reg = SUN6I_CLK_CTL_CDR2(div) | SUN6I_CLK_CTL_DRS;
	} else {
		div = ilog2(mclk_rate) - ilog2(spi->max_speed_hz);
		reg = SUN6I_CLK_CTL_CDR1(div);
	}

	sun6i_spi_write(sspi, SUN6I_CLK_CTL_REG, reg);

	/* Setup the transfer now... */
	if (sspi->tx_buf)
		tx_len = tfr->len;

	/* Setup the counters */
	sun6i_spi_write(sspi, SUN6I_BURST_CNT_REG, SUN6I_BURST_CNT(tfr->len));
	sun6i_spi_write(sspi, SUN6I_XMIT_CNT_REG, SUN6I_XMIT_CNT(tx_len));
	sun6i_spi_write(sspi, SUN6I_BURST_CTL_CNT_REG,
			SUN6I_BURST_CTL_CNT_STC(tx_len));

	/* Fill the TX FIFO */
	sun6i_spi_fill_fifo(sspi, SUN6I_FIFO_DEPTH);

	/* Enable the interrupts */
	sun6i_spi_write(sspi, SUN6I_INT_CTL_REG, SUN6I_INT_CTL_TC);

	/* Start the transfer */
	reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
	sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg | SUN6I_TFR_CTL_XCH);

	tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U);
	start = jiffies;
	timeout = wait_for_completion_timeout(&sspi->done,
					      msecs_to_jiffies(tx_time));
	end = jiffies;
	if (!timeout) {
		dev_warn(&master->dev,
			 "%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
			 dev_name(&spi->dev), tfr->len, tfr->speed_hz,
			 jiffies_to_msecs(end - start), tx_time);
		ret = -ETIMEDOUT;
		goto out;
	}

	sun6i_spi_drain_fifo(sspi, SUN6I_FIFO_DEPTH);

out:
	sun6i_spi_write(sspi, SUN6I_INT_CTL_REG, 0);

	return ret;
}
Exemple #18
0
static int at91_do_twi_transfer(struct at91_twi_dev *dev)
{
	int ret;
	bool has_unre_flag = dev->pdata->has_unre_flag;

	dev_dbg(dev->dev, "transfer: %s %d bytes.\n",
		(dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);

	reinit_completion(&dev->cmd_complete);
	dev->transfer_status = 0;

	if (!dev->buf_len) {
		at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK);
		at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
	} else if (dev->msg->flags & I2C_M_RD) {
		unsigned start_flags = AT91_TWI_START;

		if (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY) {
			dev_err(dev->dev, "RXRDY still set!");
			at91_twi_read(dev, AT91_TWI_RHR);
		}

		/* if only one byte is to be read, immediately stop transfer */
		if (dev->buf_len <= 1 && !(dev->msg->flags & I2C_M_RECV_LEN))
			start_flags |= AT91_TWI_STOP;
		at91_twi_write(dev, AT91_TWI_CR, start_flags);
		/*
		 * When using dma, the last byte has to be read manually in
		 * order to not send the stop command too late and then
		 * to receive extra data. In practice, there are some issues
		 * if you use the dma to read n-1 bytes because of latency.
		 * Reading n-2 bytes with dma and the two last ones manually
		 * seems to be the best solution.
		 */
		if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
			at91_twi_read_data_dma(dev);
			/*
			 * It is important to enable TXCOMP irq here because
			 * doing it only when transferring the last two bytes
			 * will mask NACK errors since TXCOMP is set when a
			 * NACK occurs.
			 */
			at91_twi_write(dev, AT91_TWI_IER,
			       AT91_TWI_TXCOMP);
		} else
			at91_twi_write(dev, AT91_TWI_IER,
			       AT91_TWI_TXCOMP | AT91_TWI_RXRDY);
	} else {
		if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
			at91_twi_write_data_dma(dev);
			at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
		} else {
			at91_twi_write_next_byte(dev);
			at91_twi_write(dev, AT91_TWI_IER,
				AT91_TWI_TXCOMP | AT91_TWI_TXRDY);
		}
	}

	ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
							dev->adapter.timeout);
	if (ret == 0) {
		dev_err(dev->dev, "controller timed out\n");
		at91_init_twi_bus(dev);
		ret = -ETIMEDOUT;
		goto error;
	}
	if (dev->transfer_status & AT91_TWI_NACK) {
		dev_dbg(dev->dev, "received nack\n");
		ret = -EREMOTEIO;
		goto error;
	}
	if (dev->transfer_status & AT91_TWI_OVRE) {
		dev_err(dev->dev, "overrun while reading\n");
		ret = -EIO;
		goto error;
	}
	if (has_unre_flag && dev->transfer_status & AT91_TWI_UNRE) {
		dev_err(dev->dev, "underrun while writing\n");
		ret = -EIO;
		goto error;
	}
	dev_dbg(dev->dev, "transfer complete\n");

	return 0;

error:
	at91_twi_dma_cleanup(dev);
	return ret;
}