Пример #1
0
/* Convenience register accessors */
static inline void tz1090_gpio_write(struct tz1090_gpio_bank *bank,
			      unsigned int reg_offs, u32 data)
{
	iowrite32(data, bank->reg + reg_offs);
}
Пример #2
0
/**
 * davinci_spi_bufs - functions which will handle transfer data
 * @spi: spi device on which data transfer to be done
 * @t: spi transfer in which transfer info is filled
 *
 * This function will put data to be transferred into data register
 * of SPI controller and then wait until the completion will be marked
 * by the IRQ Handler.
 */
static int davinci_spi_bufs_pio(struct spi_device *spi, struct spi_transfer *t)
{
	struct davinci_spi *davinci_spi;
	int int_status, count, ret;
	u8 conv, tmp;
	u32 tx_data, data1_reg_val;
	u32 buf_val, flg_val;
	struct davinci_spi_platform_data *pdata;

	davinci_spi = spi_master_get_devdata(spi->master);
	pdata = davinci_spi->pdata;

	davinci_spi->tx = t->tx_buf;
	davinci_spi->rx = t->rx_buf;

	/* convert len to words based on bits_per_word */
	conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
	davinci_spi->count = t->len / conv;

	INIT_COMPLETION(davinci_spi->done);

	ret = davinci_spi_bufs_prep(spi, davinci_spi);
	if (ret)
		return ret;

	/* Enable SPI */
	set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);

	iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) |
			(pdata->t2cdelay << SPI_T2CDELAY_SHIFT),
			davinci_spi->base + SPIDELAY);

	count = davinci_spi->count;
	data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT;
	tmp = ~(0x1 << spi->chip_select);

	clear_io_bits(davinci_spi->base + SPIDEF, ~tmp);

	data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT;

	while ((ioread32(davinci_spi->base + SPIBUF)
				& SPIBUF_RXEMPTY_MASK) == 0)
		cpu_relax();

	/* Determine the command to execute READ or WRITE */
	if (t->tx_buf) {
		clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);

		while (1) {
			tx_data = davinci_spi->get_tx(davinci_spi);

			data1_reg_val &= ~(0xFFFF);
			data1_reg_val |= (0xFFFF & tx_data);

			buf_val = ioread32(davinci_spi->base + SPIBUF);
			if ((buf_val & SPIBUF_TXFULL_MASK) == 0) {
				iowrite32(data1_reg_val,
						davinci_spi->base + SPIDAT1);

				count--;
			}
			while (ioread32(davinci_spi->base + SPIBUF)
					& SPIBUF_RXEMPTY_MASK)
				cpu_relax();

			/* getting the returned byte */
			if (t->rx_buf) {
				buf_val = ioread32(davinci_spi->base + SPIBUF);
				davinci_spi->get_rx(buf_val, davinci_spi);
			}
			if (count <= 0)
				break;
		}
	} else {
		if (pdata->poll_mode) {
			while (1) {
				/* keeps the serial clock going */
				if ((ioread32(davinci_spi->base + SPIBUF)
						& SPIBUF_TXFULL_MASK) == 0)
					iowrite32(data1_reg_val,
						davinci_spi->base + SPIDAT1);

				while (ioread32(davinci_spi->base + SPIBUF) &
						SPIBUF_RXEMPTY_MASK)
					cpu_relax();

				flg_val = ioread32(davinci_spi->base + SPIFLG);
				buf_val = ioread32(davinci_spi->base + SPIBUF);

				davinci_spi->get_rx(buf_val, davinci_spi);

				count--;
				if (count <= 0)
					break;
			}
		} else {	/* Receive in Interrupt mode */
			int i;

			for (i = 0; i < davinci_spi->count; i++) {
				set_io_bits(davinci_spi->base + SPIINT,
						SPIINT_BITERR_INTR
						| SPIINT_OVRRUN_INTR
						| SPIINT_RX_INTR);

				iowrite32(data1_reg_val,
						davinci_spi->base + SPIDAT1);

				while (ioread32(davinci_spi->base + SPIINT) &
						SPIINT_RX_INTR)
					cpu_relax();
			}
			iowrite32((data1_reg_val & 0x0ffcffff),
					davinci_spi->base + SPIDAT1);
		}
	}

	/*
	 * Check for bit error, desync error,parity error,timeout error and
	 * receive overflow errors
	 */
	int_status = ioread32(davinci_spi->base + SPIFLG);

	ret = davinci_spi_check_error(davinci_spi, int_status);
	if (ret != 0)
		return ret;

	/* SPI Framework maintains the count only in bytes so convert back */
	davinci_spi->count *= conv;

	return t->len;
}
Пример #3
0
struct tpm_chip* init_tpm_tis(unsigned long baseaddr, int localities, unsigned int irq)
{
   int i;
   unsigned long addr;
   struct tpm_chip* tpm = NULL;
   uint32_t didvid;
   uint32_t intfcaps;
   uint32_t intmask;

   printk("============= Init TPM TIS Driver ==============\n");

   /*Sanity check the localities input */
   if(localities & ~TPM_TIS_EN_LOCLALL) {
      printk("init_tpm_tis() Invalid locality specification! %X\n", localities);
      goto abort_egress;
   }

   printk("IOMEM Machine Base Address: %lX\n", baseaddr);

   /* Create the tpm data structure */
   tpm = malloc(sizeof(struct tpm_chip));
   __init_tpm_chip(tpm);

   /* Set the enabled localities - if 0 we leave default as all enabled */
   if(localities != 0) {
      tpm->enabled_localities = localities;
   }
   printk("Enabled Localities: ");
   for(i = 0; i < 5; ++i) {
      if(locality_enabled(tpm, i)) {
	 printk("%d ", i);
      }
   }
   printk("\n");

   /* Set the base machine address */
   tpm->baseaddr = baseaddr;

   /* Set default timeouts */
   tpm->timeout_a = MILLISECS(TIS_SHORT_TIMEOUT);
   tpm->timeout_b = MILLISECS(TIS_LONG_TIMEOUT);
   tpm->timeout_c = MILLISECS(TIS_SHORT_TIMEOUT);
   tpm->timeout_d = MILLISECS(TIS_SHORT_TIMEOUT);

   /*Map the mmio pages */
   addr = tpm->baseaddr;
   for(i = 0; i < 5; ++i) {
      if(locality_enabled(tpm, i)) {
	 /* Map the page in now */
	 if((tpm->pages[i] = ioremap_nocache(addr, PAGE_SIZE)) == NULL) {
	    printk("Unable to map iomem page a address %p\n", addr);
	    goto abort_egress;
	 }

	 /* Set default locality to the first enabled one */
	 if (tpm->locality < 0) {
	    if(tpm_tis_request_locality(tpm, i) < 0) {
	       printk("Unable to request locality %d??\n", i);
	       goto abort_egress;
	    }
	 }
      }
      addr += PAGE_SIZE;
   }


   /* Get the vendor and device ids */
   didvid = ioread32(TPM_DID_VID(tpm, tpm->locality));
   tpm->did = didvid >> 16;
   tpm->vid = didvid & 0xFFFF;


   /* Get the revision id */
   tpm->rid = ioread8(TPM_RID(tpm, tpm->locality));

   printk("1.2 TPM (device-id=0x%X vendor-id = %X rev-id = %X)\n", tpm->did, tpm->vid, tpm->rid);

   intfcaps = ioread32(TPM_INTF_CAPS(tpm, tpm->locality));
   printk("TPM interface capabilities (0x%x):\n", intfcaps);
   if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
      printk("\tBurst Count Static\n");
   if (intfcaps & TPM_INTF_CMD_READY_INT)
      printk("\tCommand Ready Int Support\n");
   if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
      printk("\tInterrupt Edge Falling\n");
   if (intfcaps & TPM_INTF_INT_EDGE_RISING)
      printk("\tInterrupt Edge Rising\n");
   if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
      printk("\tInterrupt Level Low\n");
   if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
      printk("\tInterrupt Level High\n");
   if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT)
      printk("\tLocality Change Int Support\n");
   if (intfcaps & TPM_INTF_STS_VALID_INT)
      printk("\tSts Valid Int Support\n");
   if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
      printk("\tData Avail Int Support\n");

   /*Interupt setup */
   intmask = ioread32(TPM_INT_ENABLE(tpm, tpm->locality));

   intmask |= TPM_INTF_CMD_READY_INT
      | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
      | TPM_INTF_STS_VALID_INT;

   iowrite32(TPM_INT_ENABLE(tpm, tpm->locality), intmask);

   /*If interupts are enabled, handle it */
   if(irq) {
      if(irq != TPM_PROBE_IRQ) {
	 tpm->irq = irq;
      } else {
	 /*FIXME add irq probing feature later */
	 printk("IRQ probing not implemented\n");
      }
   }

   if(tpm->irq) {
      iowrite8(TPM_INT_VECTOR(tpm, tpm->locality), tpm->irq);

      if(bind_pirq(tpm->irq, 1, tpm_tis_irq_handler, tpm) != 0) {
	 printk("Unabled to request irq: %u for use\n", tpm->irq);
	 printk("Will use polling mode\n");
	 tpm->irq = 0;
      } else {
	 /* Clear all existing */
	 iowrite32(TPM_INT_STATUS(tpm, tpm->locality), ioread32(TPM_INT_STATUS(tpm, tpm->locality)));

	 /* Turn on interrupts */
	 iowrite32(TPM_INT_ENABLE(tpm, tpm->locality), intmask | TPM_GLOBAL_INT_ENABLE);
      }
   }

   if(tpm_get_timeouts(tpm)) {
      printk("Could not get TPM timeouts and durations\n");
      goto abort_egress;
   }
   tpm_continue_selftest(tpm);


   return tpm;
abort_egress:
   if(tpm != NULL) {
      shutdown_tpm_tis(tpm);
   }
   return NULL;
}
Пример #4
0
static void ftmac100_enable_all_int(struct ftmac100 *priv)
{
	iowrite32(INT_MASK_ALL_ENABLED, priv->base + FTMAC100_OFFSET_IMR);
}
Пример #5
0
static inline void ath79_spi_wr(struct ath79_spi *sp, unsigned reg, u32 val)
{
	iowrite32(val, sp->base + reg);
}
Пример #6
0
static void ccp5_destroy(struct ccp_device *ccp)
{
	struct device *dev = ccp->dev;
	struct ccp_cmd_queue *cmd_q;
	struct ccp_cmd *cmd;
	unsigned int i;

	/* Unregister the DMA engine */
	ccp_dmaengine_unregister(ccp);

	/* Unregister the RNG */
	ccp_unregister_rng(ccp);

	/* Remove this device from the list of available units first */
	ccp_del_device(ccp);

	/* We're in the process of tearing down the entire driver;
	 * when all the devices are gone clean up debugfs
	 */
	if (ccp_present())
		ccp5_debugfs_destroy();

	/* Disable and clear interrupts */
	ccp5_disable_queue_interrupts(ccp);
	for (i = 0; i < ccp->cmd_q_count; i++) {
		cmd_q = &ccp->cmd_q[i];

		/* Turn off the run bit */
		iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control);

		/* Clear the interrupt status */
		iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
		ioread32(cmd_q->reg_int_status);
		ioread32(cmd_q->reg_status);
	}

	/* Stop the queue kthreads */
	for (i = 0; i < ccp->cmd_q_count; i++)
		if (ccp->cmd_q[i].kthread)
			kthread_stop(ccp->cmd_q[i].kthread);

	sp_free_ccp_irq(ccp->sp, ccp);

	for (i = 0; i < ccp->cmd_q_count; i++) {
		cmd_q = &ccp->cmd_q[i];
		dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase,
				  cmd_q->qbase_dma);
	}

	/* Flush the cmd and backlog queue */
	while (!list_empty(&ccp->cmd)) {
		/* Invoke the callback directly with an error code */
		cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
		list_del(&cmd->entry);
		cmd->callback(cmd->data, -ENODEV);
	}
	while (!list_empty(&ccp->backlog)) {
		/* Invoke the callback directly with an error code */
		cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
		list_del(&cmd->entry);
		cmd->callback(cmd->data, -ENODEV);
	}
}
Пример #7
0
static void ftmac100_txdma_start_polling(struct ftmac100 *priv)
{
	iowrite32(1, priv->base + FTMAC100_OFFSET_TXPD);
}
Пример #8
0
void vnic_intr_clean(struct vnic_intr *intr)
{
	iowrite32(0, &intr->ctrl->int_credits);
}
Пример #9
0
/**
 * i2c_pnx_master_xmit - transmit data to slave
 * @adap:		pointer to I2C adapter structure
 *
 * Sends one byte of data to the slave
 */
static int i2c_pnx_master_xmit(struct i2c_adapter *adap)
{
	struct i2c_pnx_algo_data *alg_data = adap->algo_data;
	u32 val;

	dev_dbg(&adap->dev, "%s(): entering: stat = %04x.\n",
		__FUNCTION__, ioread32(I2C_REG_STS(alg_data)));

	if (alg_data->mif.len > 0) {
		/* We still have something to talk about... */
		val = *alg_data->mif.buf++;

		if (alg_data->mif.len == 1) {
			val |= stop_bit;
			if (!alg_data->last)
				val |= start_bit;
		}

		alg_data->mif.len--;
		iowrite32(val, I2C_REG_TX(alg_data));

		dev_dbg(&adap->dev, "%s(): xmit %#x [%d]\n", __FUNCTION__,
			val, alg_data->mif.len + 1);

		if (alg_data->mif.len == 0) {
			if (alg_data->last) {
				/* Wait until the STOP is seen. */
				if (wait_timeout(I2C_PNX_TIMEOUT, alg_data))
					dev_err(&adap->dev, "The bus is still "
						"active after timeout\n");
			}
			/* Disable master interrupts */
			iowrite32(ioread32(I2C_REG_CTL(alg_data)) &
				~(mcntrl_afie | mcntrl_naie | mcntrl_drmie),
				  I2C_REG_CTL(alg_data));

			del_timer_sync(&alg_data->mif.timer);

			dev_dbg(&adap->dev, "%s(): Waking up xfer routine.\n",
				__FUNCTION__);

			complete(&alg_data->mif.complete);
		}
	} else if (alg_data->mif.len == 0) {
		/* zero-sized transfer */
		i2c_pnx_stop(adap);

		/* Disable master interrupts. */
		iowrite32(ioread32(I2C_REG_CTL(alg_data)) &
			~(mcntrl_afie | mcntrl_naie | mcntrl_drmie),
			  I2C_REG_CTL(alg_data));

		/* Stop timer. */
		del_timer_sync(&alg_data->mif.timer);
		dev_dbg(&adap->dev, "%s(): Waking up xfer routine after "
			"zero-xfer.\n", __FUNCTION__);

		complete(&alg_data->mif.complete);
	}

	dev_dbg(&adap->dev, "%s(): exiting: stat = %04x.\n",
		__FUNCTION__, ioread32(I2C_REG_STS(alg_data)));

	return 0;
}
Пример #10
0
static inline void tb10x_reg_write(struct tb10x_gpio *gpio, unsigned int offs,
				u32 val)
{
	iowrite32(val, gpio->base + offs);
}
Пример #11
0
void vnic_intr_coalescing_timer_set(struct vnic_intr *intr,
	unsigned int coalescing_timer)
{
	iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer);
}
Пример #12
0
static void bcma_host_pci_write32(struct bcma_device *core, u16 offset,
				 u32 value)
{
	offset += bcma_host_pci_provide_access_to_core(core);
	iowrite32(value, core->bus->mmio + offset);
}
int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
	uint32_t max;
	uint32_t min;
	uint32_t dummy;

	fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
	fifo->static_buffer = vmalloc(fifo->static_buffer_size);
	if (unlikely(fifo->static_buffer == NULL))
		return -ENOMEM;

	fifo->dynamic_buffer = NULL;
	fifo->reserved_size = 0;
	fifo->using_bounce_buffer = false;

	mutex_init(&fifo->fifo_mutex);
	init_rwsem(&fifo->rwsem);

	/*
	 * Allow mapping the first page read-only to user-space.
	 */

	DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
	DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
	DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));

	mutex_lock(&dev_priv->hw_mutex);
	dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
	dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
	dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
	vmw_write(dev_priv, SVGA_REG_ENABLE, 1);

	min = 4;
	if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
		min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
	min <<= 2;

	if (min < PAGE_SIZE)
		min = PAGE_SIZE;

	iowrite32(min, fifo_mem + SVGA_FIFO_MIN);
	iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
	wmb();
	iowrite32(min,  fifo_mem + SVGA_FIFO_NEXT_CMD);
	iowrite32(min,  fifo_mem + SVGA_FIFO_STOP);
	iowrite32(0, fifo_mem + SVGA_FIFO_BUSY);
	mb();

	vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
	mutex_unlock(&dev_priv->hw_mutex);

	max = ioread32(fifo_mem + SVGA_FIFO_MAX);
	min = ioread32(fifo_mem  + SVGA_FIFO_MIN);
	fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);

	DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
		 (unsigned int) max,
		 (unsigned int) min,
		 (unsigned int) fifo->capabilities);

	atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
	iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
	vmw_marker_queue_init(&fifo->marker_queue);
	return vmw_fifo_send_fence(dev_priv, &dummy);
}
/**
 * Reserve @bytes number of bytes in the fifo.
 *
 * This function will return NULL (error) on two conditions:
 *  If it timeouts waiting for fifo space, or if @bytes is larger than the
 *   available fifo space.
 *
 * Returns:
 *   Pointer to the fifo, or null on error (possible hardware hang).
 */
void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
{
	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
	uint32_t max;
	uint32_t min;
	uint32_t next_cmd;
	uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
	int ret;

	mutex_lock(&fifo_state->fifo_mutex);
	max = ioread32(fifo_mem + SVGA_FIFO_MAX);
	min = ioread32(fifo_mem + SVGA_FIFO_MIN);
	next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);

	if (unlikely(bytes >= (max - min)))
		goto out_err;

	BUG_ON(fifo_state->reserved_size != 0);
	BUG_ON(fifo_state->dynamic_buffer != NULL);

	fifo_state->reserved_size = bytes;

	while (1) {
		uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
		bool need_bounce = false;
		bool reserve_in_place = false;

		if (next_cmd >= stop) {
			if (likely((next_cmd + bytes < max ||
				    (next_cmd + bytes == max && stop > min))))
				reserve_in_place = true;

			else if (vmw_fifo_is_full(dev_priv, bytes)) {
				ret = vmw_fifo_wait(dev_priv, bytes,
						    false, 3 * HZ);
				if (unlikely(ret != 0))
					goto out_err;
			} else
				need_bounce = true;

		} else {

			if (likely((next_cmd + bytes < stop)))
				reserve_in_place = true;
			else {
				ret = vmw_fifo_wait(dev_priv, bytes,
						    false, 3 * HZ);
				if (unlikely(ret != 0))
					goto out_err;
			}
		}

		if (reserve_in_place) {
			if (reserveable || bytes <= sizeof(uint32_t)) {
				fifo_state->using_bounce_buffer = false;

				if (reserveable)
					iowrite32(bytes, fifo_mem +
						  SVGA_FIFO_RESERVED);
				return fifo_mem + (next_cmd >> 2);
			} else {
				need_bounce = true;
			}
		}
Пример #15
0
static void ccp5_config(struct ccp_device *ccp)
{
	/* Public side */
	iowrite32(0x0, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET);
}
Пример #16
0
/**
 * i2c_pnx_master_rcv - receive data from slave
 * @adap:		pointer to I2C adapter structure
 *
 * Reads one byte data from the slave
 */
static int i2c_pnx_master_rcv(struct i2c_adapter *adap)
{
	struct i2c_pnx_algo_data *alg_data = adap->algo_data;
	unsigned int val = 0;
	u32 ctl = 0;

	dev_dbg(&adap->dev, "%s(): entering: stat = %04x.\n",
		__FUNCTION__, ioread32(I2C_REG_STS(alg_data)));

	/* Check, whether there is already data,
	 * or we didn't 'ask' for it yet.
	 */
	if (ioread32(I2C_REG_STS(alg_data)) & mstatus_rfe) {
		dev_dbg(&adap->dev, "%s(): Write dummy data to fill "
			"Rx-fifo...\n", __FUNCTION__);

		if (alg_data->mif.len == 1) {
			/* Last byte, do not acknowledge next rcv. */
			val |= stop_bit;
			if (!alg_data->last)
				val |= start_bit;

			/*
			 * Enable interrupt RFDAIE (data in Rx fifo),
			 * and disable DRMIE (need data for Tx)
			 */
			ctl = ioread32(I2C_REG_CTL(alg_data));
			ctl |= mcntrl_rffie | mcntrl_daie;
			ctl &= ~mcntrl_drmie;
			iowrite32(ctl, I2C_REG_CTL(alg_data));
		}

		/*
		 * Now we'll 'ask' for data:
		 * For each byte we want to receive, we must
		 * write a (dummy) byte to the Tx-FIFO.
		 */
		iowrite32(val, I2C_REG_TX(alg_data));

		return 0;
	}

	/* Handle data. */
	if (alg_data->mif.len > 0) {
		val = ioread32(I2C_REG_RX(alg_data));
		*alg_data->mif.buf++ = (u8) (val & 0xff);
		dev_dbg(&adap->dev, "%s(): rcv 0x%x [%d]\n", __FUNCTION__, val,
			alg_data->mif.len);

		alg_data->mif.len--;
		if (alg_data->mif.len == 0) {
			if (alg_data->last)
				/* Wait until the STOP is seen. */
				if (wait_timeout(I2C_PNX_TIMEOUT, alg_data))
					dev_err(&adap->dev, "The bus is still "
						"active after timeout\n");

			/* Disable master interrupts */
			ctl = ioread32(I2C_REG_CTL(alg_data));
			ctl &= ~(mcntrl_afie | mcntrl_naie | mcntrl_rffie |
				 mcntrl_drmie | mcntrl_daie);
			iowrite32(ctl, I2C_REG_CTL(alg_data));

			/* Kill timer. */
			del_timer_sync(&alg_data->mif.timer);
			complete(&alg_data->mif.complete);
		}
	}

	dev_dbg(&adap->dev, "%s(): exiting: stat = %04x.\n",
		__FUNCTION__, ioread32(I2C_REG_STS(alg_data)));

	return 0;
}
Пример #17
0
static int ccp5_init(struct ccp_device *ccp)
{
	struct device *dev = ccp->dev;
	struct ccp_cmd_queue *cmd_q;
	struct dma_pool *dma_pool;
	char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
	unsigned int qmr, i;
	u64 status;
	u32 status_lo, status_hi;
	int ret;

	/* Find available queues */
	qmr = ioread32(ccp->io_regs + Q_MASK_REG);
	for (i = 0; i < MAX_HW_QUEUES; i++) {

		if (!(qmr & (1 << i)))
			continue;

		/* Allocate a dma pool for this queue */
		snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d",
			 ccp->name, i);
		dma_pool = dma_pool_create(dma_pool_name, dev,
					   CCP_DMAPOOL_MAX_SIZE,
					   CCP_DMAPOOL_ALIGN, 0);
		if (!dma_pool) {
			dev_err(dev, "unable to allocate dma pool\n");
			ret = -ENOMEM;
		}

		cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
		ccp->cmd_q_count++;

		cmd_q->ccp = ccp;
		cmd_q->id = i;
		cmd_q->dma_pool = dma_pool;
		mutex_init(&cmd_q->q_mutex);

		/* Page alignment satisfies our needs for N <= 128 */
		BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128);
		cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
		cmd_q->qbase = dma_zalloc_coherent(dev, cmd_q->qsize,
						   &cmd_q->qbase_dma,
						   GFP_KERNEL);
		if (!cmd_q->qbase) {
			dev_err(dev, "unable to allocate command queue\n");
			ret = -ENOMEM;
			goto e_pool;
		}

		cmd_q->qidx = 0;
		/* Preset some register values and masks that are queue
		 * number dependent
		 */
		cmd_q->reg_control = ccp->io_regs +
				     CMD5_Q_STATUS_INCR * (i + 1);
		cmd_q->reg_tail_lo = cmd_q->reg_control + CMD5_Q_TAIL_LO_BASE;
		cmd_q->reg_head_lo = cmd_q->reg_control + CMD5_Q_HEAD_LO_BASE;
		cmd_q->reg_int_enable = cmd_q->reg_control +
					CMD5_Q_INT_ENABLE_BASE;
		cmd_q->reg_interrupt_status = cmd_q->reg_control +
					      CMD5_Q_INTERRUPT_STATUS_BASE;
		cmd_q->reg_status = cmd_q->reg_control + CMD5_Q_STATUS_BASE;
		cmd_q->reg_int_status = cmd_q->reg_control +
					CMD5_Q_INT_STATUS_BASE;
		cmd_q->reg_dma_status = cmd_q->reg_control +
					CMD5_Q_DMA_STATUS_BASE;
		cmd_q->reg_dma_read_status = cmd_q->reg_control +
					     CMD5_Q_DMA_READ_STATUS_BASE;
		cmd_q->reg_dma_write_status = cmd_q->reg_control +
					      CMD5_Q_DMA_WRITE_STATUS_BASE;

		init_waitqueue_head(&cmd_q->int_queue);

		dev_dbg(dev, "queue #%u available\n", i);
	}

	if (ccp->cmd_q_count == 0) {
		dev_notice(dev, "no command queues available\n");
		ret = -EIO;
		goto e_pool;
	}

	/* Turn off the queues and disable interrupts until ready */
	ccp5_disable_queue_interrupts(ccp);
	for (i = 0; i < ccp->cmd_q_count; i++) {
		cmd_q = &ccp->cmd_q[i];

		cmd_q->qcontrol = 0; /* Start with nothing */
		iowrite32(cmd_q->qcontrol, cmd_q->reg_control);

		ioread32(cmd_q->reg_int_status);
		ioread32(cmd_q->reg_status);

		/* Clear the interrupt status */
		iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
	}

	dev_dbg(dev, "Requesting an IRQ...\n");
	/* Request an irq */
	ret = sp_request_ccp_irq(ccp->sp, ccp5_irq_handler, ccp->name, ccp);
	if (ret) {
		dev_err(dev, "unable to allocate an IRQ\n");
		goto e_pool;
	}
	/* Initialize the ISR tasklet */
	if (ccp->use_tasklet)
		tasklet_init(&ccp->irq_tasklet, ccp5_irq_bh,
			     (unsigned long)ccp);

	dev_dbg(dev, "Loading LSB map...\n");
	/* Copy the private LSB mask to the public registers */
	status_lo = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET);
	status_hi = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET);
	iowrite32(status_lo, ccp->io_regs + LSB_PUBLIC_MASK_LO_OFFSET);
	iowrite32(status_hi, ccp->io_regs + LSB_PUBLIC_MASK_HI_OFFSET);
	status = ((u64)status_hi<<30) | (u64)status_lo;

	dev_dbg(dev, "Configuring virtual queues...\n");
	/* Configure size of each virtual queue accessible to host */
	for (i = 0; i < ccp->cmd_q_count; i++) {
		u32 dma_addr_lo;
		u32 dma_addr_hi;

		cmd_q = &ccp->cmd_q[i];

		cmd_q->qcontrol &= ~(CMD5_Q_SIZE << CMD5_Q_SHIFT);
		cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD5_Q_SHIFT;

		cmd_q->qdma_tail = cmd_q->qbase_dma;
		dma_addr_lo = low_address(cmd_q->qdma_tail);
		iowrite32((u32)dma_addr_lo, cmd_q->reg_tail_lo);
		iowrite32((u32)dma_addr_lo, cmd_q->reg_head_lo);

		dma_addr_hi = high_address(cmd_q->qdma_tail);
		cmd_q->qcontrol |= (dma_addr_hi << 16);
		iowrite32(cmd_q->qcontrol, cmd_q->reg_control);

		/* Find the LSB regions accessible to the queue */
		ccp_find_lsb_regions(cmd_q, status);
		cmd_q->lsb = -1; /* Unassigned value */
	}

	dev_dbg(dev, "Assigning LSBs...\n");
	ret = ccp_assign_lsbs(ccp);
	if (ret) {
		dev_err(dev, "Unable to assign LSBs (%d)\n", ret);
		goto e_irq;
	}

	/* Optimization: pre-allocate LSB slots for each queue */
	for (i = 0; i < ccp->cmd_q_count; i++) {
		ccp->cmd_q[i].sb_key = ccp_lsb_alloc(&ccp->cmd_q[i], 2);
		ccp->cmd_q[i].sb_ctx = ccp_lsb_alloc(&ccp->cmd_q[i], 2);
	}

	dev_dbg(dev, "Starting threads...\n");
	/* Create a kthread for each queue */
	for (i = 0; i < ccp->cmd_q_count; i++) {
		struct task_struct *kthread;

		cmd_q = &ccp->cmd_q[i];

		kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
					 "%s-q%u", ccp->name, cmd_q->id);
		if (IS_ERR(kthread)) {
			dev_err(dev, "error creating queue thread (%ld)\n",
				PTR_ERR(kthread));
			ret = PTR_ERR(kthread);
			goto e_kthread;
		}

		cmd_q->kthread = kthread;
		wake_up_process(kthread);
	}

	dev_dbg(dev, "Enabling interrupts...\n");
	ccp5_enable_queue_interrupts(ccp);

	dev_dbg(dev, "Registering device...\n");
	/* Put this on the unit list to make it available */
	ccp_add_device(ccp);

	ret = ccp_register_rng(ccp);
	if (ret)
		goto e_kthread;

	/* Register the DMA engine support */
	ret = ccp_dmaengine_register(ccp);
	if (ret)
		goto e_hwrng;

	/* Set up debugfs entries */
	ccp5_debugfs_setup(ccp);

	return 0;

e_hwrng:
	ccp_unregister_rng(ccp);

e_kthread:
	for (i = 0; i < ccp->cmd_q_count; i++)
		if (ccp->cmd_q[i].kthread)
			kthread_stop(ccp->cmd_q[i].kthread);

e_irq:
	sp_free_ccp_irq(ccp->sp, ccp);

e_pool:
	for (i = 0; i < ccp->cmd_q_count; i++)
		dma_pool_destroy(ccp->cmd_q[i].dma_pool);

	return ret;
}
Пример #18
0
static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
{
	u32 stat, ctl;
	struct i2c_adapter *adap = dev_id;
	struct i2c_pnx_algo_data *alg_data = adap->algo_data;

	dev_dbg(&adap->dev, "%s(): mstat = %x mctrl = %x, mode = %d\n",
		__FUNCTION__,
		ioread32(I2C_REG_STS(alg_data)),
		ioread32(I2C_REG_CTL(alg_data)),
		alg_data->mif.mode);
	stat = ioread32(I2C_REG_STS(alg_data));

	/* let's see what kind of event this is */
	if (stat & mstatus_afi) {
		/* We lost arbitration in the midst of a transfer */
		alg_data->mif.ret = -EIO;

		/* Disable master interrupts. */
		ctl = ioread32(I2C_REG_CTL(alg_data));
		ctl &= ~(mcntrl_afie | mcntrl_naie | mcntrl_rffie |
			 mcntrl_drmie);
		iowrite32(ctl, I2C_REG_CTL(alg_data));

		/* Stop timer, to prevent timeout. */
		del_timer_sync(&alg_data->mif.timer);
		complete(&alg_data->mif.complete);
	} else if (stat & mstatus_nai) {
		/* Slave did not acknowledge, generate a STOP */
		dev_dbg(&adap->dev, "%s(): "
			"Slave did not acknowledge, generating a STOP.\n",
			__FUNCTION__);
		i2c_pnx_stop(adap);

		/* Disable master interrupts. */
		ctl = ioread32(I2C_REG_CTL(alg_data));
		ctl &= ~(mcntrl_afie | mcntrl_naie | mcntrl_rffie |
			 mcntrl_drmie);
		iowrite32(ctl, I2C_REG_CTL(alg_data));

		/* Our return value. */
		alg_data->mif.ret = -EIO;

		/* Stop timer, to prevent timeout. */
		del_timer_sync(&alg_data->mif.timer);
		complete(&alg_data->mif.complete);
	} else {
		/*
		 * Two options:
		 * - Master Tx needs data.
		 * - There is data in the Rx-fifo
		 * The latter is only the case if we have requested for data,
		 * via a dummy write. (See 'i2c_pnx_master_rcv'.)
		 * We therefore check, as a sanity check, whether that interrupt
		 * has been enabled.
		 */
		if ((stat & mstatus_drmi) || !(stat & mstatus_rfe)) {
			if (alg_data->mif.mode == I2C_SMBUS_WRITE) {
				i2c_pnx_master_xmit(adap);
			} else if (alg_data->mif.mode == I2C_SMBUS_READ) {
				i2c_pnx_master_rcv(adap);
			}
		}
	}

	/* Clear TDI and AFI bits */
	stat = ioread32(I2C_REG_STS(alg_data));
	iowrite32(stat | mstatus_tdi | mstatus_afi, I2C_REG_STS(alg_data));

	dev_dbg(&adap->dev, "%s(): exiting, stat = %x ctrl = %x.\n",
		 __FUNCTION__, ioread32(I2C_REG_STS(alg_data)),
		 ioread32(I2C_REG_CTL(alg_data)));

	return IRQ_HANDLED;
}
Пример #19
0
static void ftmac100_set_tx_ring_base(struct ftmac100 *priv, dma_addr_t addr)
{
	iowrite32(addr, priv->base + FTMAC100_OFFSET_TXR_BADR);
}
Пример #20
0
/**
 * i2c_pnx_xfer - generic transfer entry point
 * @adap:		pointer to I2C adapter structure
 * @msgs:		array of messages
 * @num:		number of messages
 *
 * Initiates the transfer
 */
static int
i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
	struct i2c_msg *pmsg;
	int rc = 0, completed = 0, i;
	struct i2c_pnx_algo_data *alg_data = adap->algo_data;
	u32 stat = ioread32(I2C_REG_STS(alg_data));

	dev_dbg(&adap->dev, "%s(): entering: %d messages, stat = %04x.\n",
		__FUNCTION__, num, ioread32(I2C_REG_STS(alg_data)));

	bus_reset_if_active(adap);

	/* Process transactions in a loop. */
	for (i = 0; rc >= 0 && i < num; i++) {
		u8 addr;

		pmsg = &msgs[i];
		addr = pmsg->addr;

		if (pmsg->flags & I2C_M_TEN) {
			dev_err(&adap->dev,
				"%s: 10 bits addr not supported!\n",
				adap->name);
			rc = -EINVAL;
			break;
		}

		alg_data->mif.buf = pmsg->buf;
		alg_data->mif.len = pmsg->len;
		alg_data->mif.mode = (pmsg->flags & I2C_M_RD) ?
			I2C_SMBUS_READ : I2C_SMBUS_WRITE;
		alg_data->mif.ret = 0;
		alg_data->last = (i == num - 1);

		dev_dbg(&adap->dev, "%s(): mode %d, %d bytes\n", __FUNCTION__,
			alg_data->mif.mode,
			alg_data->mif.len);

		i2c_pnx_arm_timer(adap);

		/* initialize the completion var */
		init_completion(&alg_data->mif.complete);

		/* Enable master interrupt */
		iowrite32(ioread32(I2C_REG_CTL(alg_data)) | mcntrl_afie |
				mcntrl_naie | mcntrl_drmie,
			  I2C_REG_CTL(alg_data));

		/* Put start-code and slave-address on the bus. */
		rc = i2c_pnx_start(addr, adap);
		if (rc < 0)
			break;

		/* Wait for completion */
		wait_for_completion(&alg_data->mif.complete);

		if (!(rc = alg_data->mif.ret))
			completed++;
		dev_dbg(&adap->dev, "%s(): Complete, return code = %d.\n",
			__FUNCTION__, rc);

		/* Clear TDI and AFI bits in case they are set. */
		if ((stat = ioread32(I2C_REG_STS(alg_data))) & mstatus_tdi) {
			dev_dbg(&adap->dev,
				"%s: TDI still set... clearing now.\n",
			       adap->name);
			iowrite32(stat, I2C_REG_STS(alg_data));
		}
		if ((stat = ioread32(I2C_REG_STS(alg_data))) & mstatus_afi) {
			dev_dbg(&adap->dev,
				"%s: AFI still set... clearing now.\n",
			       adap->name);
			iowrite32(stat, I2C_REG_STS(alg_data));
		}
	}

	bus_reset_if_active(adap);

	/* Cleanup to be sure... */
	alg_data->mif.buf = NULL;
	alg_data->mif.len = 0;

	dev_dbg(&adap->dev, "%s(): exiting, stat = %x\n",
		__FUNCTION__, ioread32(I2C_REG_STS(alg_data)));

	if (completed != num)
		return ((rc < 0) ? rc : -EREMOTEIO);

	return num;
}
Пример #21
0
static void ftmac100_stop_hw(struct ftmac100 *priv)
{
	iowrite32(0, priv->base + FTMAC100_OFFSET_MACCR);
}
Пример #22
0
static int __devinit i2c_pnx_probe(struct platform_device *pdev)
{
	unsigned long tmp;
	int ret = 0;
	struct i2c_pnx_algo_data *alg_data;
	int freq_mhz;
	struct i2c_pnx_data *i2c_pnx = pdev->dev.platform_data;

	if (!i2c_pnx || !i2c_pnx->adapter) {
		dev_err(&pdev->dev, "%s: no platform data supplied\n",
		       __FUNCTION__);
		ret = -EINVAL;
		goto out;
	}

	platform_set_drvdata(pdev, i2c_pnx);

	if (i2c_pnx->calculate_input_freq)
		freq_mhz = i2c_pnx->calculate_input_freq(pdev);
	else {
		freq_mhz = PNX_DEFAULT_FREQ;
		dev_info(&pdev->dev, "Setting bus frequency to default value: "
		       "%d MHz", freq_mhz);
	}

	i2c_pnx->adapter->algo = &pnx_algorithm;

	alg_data = i2c_pnx->adapter->algo_data;
	init_timer(&alg_data->mif.timer);
	alg_data->mif.timer.function = i2c_pnx_timeout;
	alg_data->mif.timer.data = (unsigned long)i2c_pnx->adapter;

	/* Register I/O resource */
	if (!request_region(alg_data->base, I2C_PNX_REGION_SIZE, pdev->name)) {
		dev_err(&pdev->dev,
		       "I/O region 0x%08x for I2C already in use.\n",
		       alg_data->base);
		ret = -ENODEV;
		goto out_drvdata;
	}

	if (!(alg_data->ioaddr =
			(u32)ioremap(alg_data->base, I2C_PNX_REGION_SIZE))) {
		dev_err(&pdev->dev, "Couldn't ioremap I2C I/O region\n");
		ret = -ENOMEM;
		goto out_release;
	}

	i2c_pnx->set_clock_run(pdev);

	/*
	 * Clock Divisor High This value is the number of system clocks
	 * the serial clock (SCL) will be high.
	 * For example, if the system clock period is 50 ns and the maximum
	 * desired serial period is 10000 ns (100 kHz), then CLKHI would be
	 * set to 0.5*(f_sys/f_i2c)-2=0.5*(20e6/100e3)-2=98. The actual value
	 * programmed into CLKHI will vary from this slightly due to
	 * variations in the output pad's rise and fall times as well as
	 * the deglitching filter length.
	 */

	tmp = ((freq_mhz * 1000) / I2C_PNX_SPEED_KHZ) / 2 - 2;
	iowrite32(tmp, I2C_REG_CKH(alg_data));
	iowrite32(tmp, I2C_REG_CKL(alg_data));

	iowrite32(mcntrl_reset, I2C_REG_CTL(alg_data));
	if (wait_reset(I2C_PNX_TIMEOUT, alg_data)) {
		ret = -ENODEV;
		goto out_unmap;
	}
	init_completion(&alg_data->mif.complete);

	ret = request_irq(alg_data->irq, i2c_pnx_interrupt,
			0, pdev->name, i2c_pnx->adapter);
	if (ret)
		goto out_clock;

	/* Register this adapter with the I2C subsystem */
	i2c_pnx->adapter->dev.parent = &pdev->dev;
	ret = i2c_add_adapter(i2c_pnx->adapter);
	if (ret < 0) {
		dev_err(&pdev->dev, "I2C: Failed to add bus\n");
		goto out_irq;
	}

	dev_dbg(&pdev->dev, "%s: Master at %#8x, irq %d.\n",
	       i2c_pnx->adapter->name, alg_data->base, alg_data->irq);

	return 0;

out_irq:
	free_irq(alg_data->irq, alg_data);
out_clock:
	i2c_pnx->set_clock_stop(pdev);
out_unmap:
	iounmap((void *)alg_data->ioaddr);
out_release:
	release_region(alg_data->base, I2C_PNX_REGION_SIZE);
out_drvdata:
	platform_set_drvdata(pdev, NULL);
out:
	return ret;
}
Пример #23
0
static inline void ethoc_write(struct ethoc *dev, loff_t offset, u32 data)
{
	iowrite32(data, dev->iobase + offset);
}
Пример #24
0
static inline void
socle_vop_write(u32 val, u32 reg)
{
	iowrite32(val, SOCLE_VOP_BASE+reg);
}
Пример #25
0
/**
 * davinci_spi_probe - probe function for SPI Master Controller
 * @pdev: platform_device structure which contains plateform specific data
 */
static int davinci_spi_probe(struct platform_device *pdev)
{
	struct spi_master *master;
	struct davinci_spi *davinci_spi;
	struct davinci_spi_platform_data *pdata;
	struct resource *r, *mem;
	resource_size_t dma_rx_chan = SPI_NO_RESOURCE;
	resource_size_t	dma_tx_chan = SPI_NO_RESOURCE;
	resource_size_t	dma_eventq = SPI_NO_RESOURCE;
	int i = 0, ret = 0;

	pdata = pdev->dev.platform_data;
	if (pdata == NULL) {
		ret = -ENODEV;
		goto err;
	}

	master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi));
	if (master == NULL) {
		ret = -ENOMEM;
		goto err;
	}

	dev_set_drvdata(&pdev->dev, master);

	davinci_spi = spi_master_get_devdata(master);
	if (davinci_spi == NULL) {
		ret = -ENOENT;
		goto free_master;
	}

	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (r == NULL) {
		ret = -ENOENT;
		goto free_master;
	}

	davinci_spi->pbase = r->start;
	davinci_spi->region_size = resource_size(r);
	davinci_spi->pdata = pdata;

	mem = request_mem_region(r->start, davinci_spi->region_size,
					pdev->name);
	if (mem == NULL) {
		ret = -EBUSY;
		goto free_master;
	}

	davinci_spi->base = (struct davinci_spi_reg __iomem *)
			ioremap(r->start, davinci_spi->region_size);
	if (davinci_spi->base == NULL) {
		ret = -ENOMEM;
		goto release_region;
	}

	davinci_spi->irq = platform_get_irq(pdev, 0);
	if (davinci_spi->irq <= 0) {
		ret = -EINVAL;
		goto unmap_io;
	}

	ret = request_irq(davinci_spi->irq, davinci_spi_irq, IRQF_DISABLED,
			  dev_name(&pdev->dev), davinci_spi);
	if (ret)
		goto unmap_io;

	/* Allocate tmp_buf for tx_buf */
	davinci_spi->tmp_buf = kzalloc(SPI_BUFSIZ, GFP_KERNEL);
	if (davinci_spi->tmp_buf == NULL) {
		ret = -ENOMEM;
		goto irq_free;
	}

	davinci_spi->bitbang.master = spi_master_get(master);
	if (davinci_spi->bitbang.master == NULL) {
		ret = -ENODEV;
		goto free_tmp_buf;
	}

	davinci_spi->clk = clk_get(&pdev->dev, NULL);
	if (IS_ERR(davinci_spi->clk)) {
		ret = -ENODEV;
		goto put_master;
	}
	clk_enable(davinci_spi->clk);


	master->bus_num = pdev->id;
	master->num_chipselect = pdata->num_chipselect;
	master->setup = davinci_spi_setup;
	master->cleanup = davinci_spi_cleanup;

	davinci_spi->bitbang.chipselect = davinci_spi_chipselect;
	davinci_spi->bitbang.setup_transfer = davinci_spi_setup_transfer;

	davinci_spi->version = pdata->version;
	use_dma = pdata->use_dma;

	davinci_spi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP;
	if (davinci_spi->version == SPI_VERSION_2)
		davinci_spi->bitbang.flags |= SPI_READY;

	if (use_dma) {
			r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
			if (r)
				dma_rx_chan = r->start;
			r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
			if (r)
				dma_tx_chan = r->start;
			r = platform_get_resource(pdev, IORESOURCE_DMA, 2);
			if (r)
				dma_eventq = r->start;
	}

	if (!use_dma ||
	    dma_rx_chan == SPI_NO_RESOURCE ||
	    dma_tx_chan == SPI_NO_RESOURCE ||
	    dma_eventq	== SPI_NO_RESOURCE) {
		davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_pio;
		use_dma = 0;
	} else {
		davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_dma;
		davinci_spi->dma_channels = kzalloc(master->num_chipselect
				* sizeof(struct davinci_spi_dma), GFP_KERNEL);
		if (davinci_spi->dma_channels == NULL) {
			ret = -ENOMEM;
			goto free_clk;
		}

		for (i = 0; i < master->num_chipselect; i++) {
			davinci_spi->dma_channels[i].dma_rx_channel = -1;
			davinci_spi->dma_channels[i].dma_rx_sync_dev =
				dma_rx_chan;
			davinci_spi->dma_channels[i].dma_tx_channel = -1;
			davinci_spi->dma_channels[i].dma_tx_sync_dev =
				dma_tx_chan;
			davinci_spi->dma_channels[i].eventq = dma_eventq;
		}
		dev_info(&pdev->dev, "DaVinci SPI driver in EDMA mode\n"
				"Using RX channel = %d , TX channel = %d and "
				"event queue = %d", dma_rx_chan, dma_tx_chan,
				dma_eventq);
	}

	davinci_spi->get_rx = davinci_spi_rx_buf_u8;
	davinci_spi->get_tx = davinci_spi_tx_buf_u8;

	init_completion(&davinci_spi->done);

	/* Reset In/OUT SPI module */
	iowrite32(0, davinci_spi->base + SPIGCR0);
	udelay(100);
	iowrite32(1, davinci_spi->base + SPIGCR0);

	/* Clock internal */
	if (davinci_spi->pdata->clk_internal)
		set_io_bits(davinci_spi->base + SPIGCR1,
				SPIGCR1_CLKMOD_MASK);
	else
		clear_io_bits(davinci_spi->base + SPIGCR1,
				SPIGCR1_CLKMOD_MASK);

	/* master mode default */
	set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_MASTER_MASK);

	if (davinci_spi->pdata->intr_level)
		iowrite32(SPI_INTLVL_1, davinci_spi->base + SPILVL);
	else
		iowrite32(SPI_INTLVL_0, davinci_spi->base + SPILVL);

	ret = spi_bitbang_start(&davinci_spi->bitbang);
	if (ret)
		goto free_clk;

	dev_info(&pdev->dev, "Controller at 0x%p \n", davinci_spi->base);

	if (!pdata->poll_mode)
		dev_info(&pdev->dev, "Operating in interrupt mode"
			" using IRQ %d\n", davinci_spi->irq);

	return ret;

free_clk:
	clk_disable(davinci_spi->clk);
	clk_put(davinci_spi->clk);
put_master:
	spi_master_put(master);
free_tmp_buf:
	kfree(davinci_spi->tmp_buf);
irq_free:
	free_irq(davinci_spi->irq, davinci_spi);
unmap_io:
	iounmap(davinci_spi->base);
release_region:
	release_mem_region(davinci_spi->pbase, davinci_spi->region_size);
free_master:
	kfree(master);
err:
	return ret;
}
Пример #26
0
static void fotg210_disable_dma(struct fotg210_ep *ep)
{
	iowrite32(DMATFNR_DISDMA, ep->fotg210->reg + FOTG210_DMATFNR);
}
Пример #27
0
static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t)
{
	struct davinci_spi *davinci_spi;
	int int_status = 0;
	int count, temp_count;
	u8 conv = 1;
	u8 tmp;
	u32 data1_reg_val;
	struct davinci_spi_dma *davinci_spi_dma;
	int word_len, data_type, ret;
	unsigned long tx_reg, rx_reg;
	struct davinci_spi_platform_data *pdata;
	struct device *sdev;

	davinci_spi = spi_master_get_devdata(spi->master);
	pdata = davinci_spi->pdata;
	sdev = davinci_spi->bitbang.master->dev.parent;

	davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];

	tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1;
	rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF;

	davinci_spi->tx = t->tx_buf;
	davinci_spi->rx = t->rx_buf;

	/* convert len to words based on bits_per_word */
	conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
	davinci_spi->count = t->len / conv;

	INIT_COMPLETION(davinci_spi->done);

	init_completion(&davinci_spi_dma->dma_rx_completion);
	init_completion(&davinci_spi_dma->dma_tx_completion);

	word_len = conv * 8;

	if (word_len <= 8)
		data_type = DAVINCI_DMA_DATA_TYPE_S8;
	else if (word_len <= 16)
		data_type = DAVINCI_DMA_DATA_TYPE_S16;
	else if (word_len <= 32)
		data_type = DAVINCI_DMA_DATA_TYPE_S32;
	else
		return -EINVAL;

	ret = davinci_spi_bufs_prep(spi, davinci_spi);
	if (ret)
		return ret;

	/* Put delay val if required */
	iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) |
			(pdata->t2cdelay << SPI_T2CDELAY_SHIFT),
			davinci_spi->base + SPIDELAY);

	count = davinci_spi->count;	/* the number of elements */
	data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT;

	/* CS default = 0xFF */
	tmp = ~(0x1 << spi->chip_select);

	clear_io_bits(davinci_spi->base + SPIDEF, ~tmp);

	data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT;

	/* disable all interrupts for dma transfers */
	clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
	/* Disable SPI to write configuration bits in SPIDAT */
	clear_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
	iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
	/* Enable SPI */
	set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);

	while ((ioread32(davinci_spi->base + SPIBUF)
				& SPIBUF_RXEMPTY_MASK) == 0)
		cpu_relax();


	if (t->tx_buf) {
		t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, count,
				DMA_TO_DEVICE);
		if (dma_mapping_error(&spi->dev, t->tx_dma)) {
			dev_dbg(sdev, "Unable to DMA map a %d bytes"
				" TX buffer\n", count);
			return -ENOMEM;
		}
		temp_count = count;
	} else {
		/* We need TX clocking for RX transaction */
		t->tx_dma = dma_map_single(&spi->dev,
				(void *)davinci_spi->tmp_buf, count + 1,
				DMA_TO_DEVICE);
		if (dma_mapping_error(&spi->dev, t->tx_dma)) {
			dev_dbg(sdev, "Unable to DMA map a %d bytes"
				" TX tmp buffer\n", count);
			return -ENOMEM;
		}
		temp_count = count + 1;
	}

	edma_set_transfer_params(davinci_spi_dma->dma_tx_channel,
					data_type, temp_count, 1, 0, ASYNC);
	edma_set_dest(davinci_spi_dma->dma_tx_channel, tx_reg, INCR, W8BIT);
	edma_set_src(davinci_spi_dma->dma_tx_channel, t->tx_dma, INCR, W8BIT);
	edma_set_src_index(davinci_spi_dma->dma_tx_channel, data_type, 0);
	edma_set_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0);

	if (t->rx_buf) {
		/* initiate transaction */
		iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);

		t->rx_dma = dma_map_single(&spi->dev, (void *)t->rx_buf, count,
				DMA_FROM_DEVICE);
		if (dma_mapping_error(&spi->dev, t->rx_dma)) {
			dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n",
					count);
			if (t->tx_buf != NULL)
				dma_unmap_single(NULL, t->tx_dma,
						 count, DMA_TO_DEVICE);
			return -ENOMEM;
		}
		edma_set_transfer_params(davinci_spi_dma->dma_rx_channel,
				data_type, count, 1, 0, ASYNC);
		edma_set_src(davinci_spi_dma->dma_rx_channel,
				rx_reg, INCR, W8BIT);
		edma_set_dest(davinci_spi_dma->dma_rx_channel,
				t->rx_dma, INCR, W8BIT);
		edma_set_src_index(davinci_spi_dma->dma_rx_channel, 0, 0);
		edma_set_dest_index(davinci_spi_dma->dma_rx_channel,
				data_type, 0);
	}

	if ((t->tx_buf) || (t->rx_buf))
		edma_start(davinci_spi_dma->dma_tx_channel);

	if (t->rx_buf)
		edma_start(davinci_spi_dma->dma_rx_channel);

	if ((t->rx_buf) || (t->tx_buf))
		davinci_spi_set_dma_req(spi, 1);

	if (t->tx_buf)
		wait_for_completion_interruptible(
				&davinci_spi_dma->dma_tx_completion);

	if (t->rx_buf)
		wait_for_completion_interruptible(
				&davinci_spi_dma->dma_rx_completion);

	dma_unmap_single(NULL, t->tx_dma, temp_count, DMA_TO_DEVICE);

	if (t->rx_buf)
		dma_unmap_single(NULL, t->rx_dma, count, DMA_FROM_DEVICE);

	/*
	 * Check for bit error, desync error,parity error,timeout error and
	 * receive overflow errors
	 */
	int_status = ioread32(davinci_spi->base + SPIFLG);

	ret = davinci_spi_check_error(davinci_spi, int_status);
	if (ret != 0)
		return ret;

	/* SPI Framework maintains the count only in bytes so convert back */
	davinci_spi->count *= conv;

	return t->len;
}
Пример #28
0
static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
	int wait)
{
	struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
	unsigned int i;
	int delay;
	u32 status;
	int err;

	status = ioread32(&devcmd->status);
	if (status == 0xFFFFFFFF) {
		/* PCI-e target device is gone */
		return -ENODEV;
	}
	if (status & STAT_BUSY) {
		pr_err("Busy devcmd %d\n", _CMD_N(cmd));
		return -EBUSY;
	}

	if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
		for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
			writeq(vdev->args[i], &devcmd->args[i]);
		wmb();
	}

	iowrite32(cmd, &devcmd->cmd);

	if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
		return 0;

	for (delay = 0; delay < wait; delay++) {

		udelay(100);

		status = ioread32(&devcmd->status);
		if (status == 0xFFFFFFFF) {
			/* PCI-e target device is gone */
			return -ENODEV;
		}

		if (!(status & STAT_BUSY)) {

			if (status & STAT_ERROR) {
				err = (int)readq(&devcmd->args[0]);
				if (err != ERR_ECMDUNKNOWN ||
				    cmd != CMD_CAPABILITY)
					pr_err("Error %d devcmd %d\n",
						err, _CMD_N(cmd));
				return err;
			}

			if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
				rmb();
				for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
					vdev->args[i] = readq(&devcmd->args[i]);
			}

			return 0;
		}
	}

	pr_err("Timedout devcmd %d\n", _CMD_N(cmd));
	return -ETIMEDOUT;
}
Пример #29
0
static void oc_setreg_32(struct ocores_i2c *i2c, int reg, u8 value)
{
	iowrite32(value, i2c->base + (reg << i2c->reg_shift));
}
Пример #30
0
static int wdog_stop(void)
{
	iowrite32(PM_PASSWORD | PM_RSTC_RESET, __io_address(PM_RSTC));
	printk(KERN_INFO "watchdog stopped\n");
	return 0;
}