Beispiel #1
0
void __init early_dma_memcpy(void *pdst, const void *psrc, size_t size)
{
	unsigned long dst = (unsigned long)pdst;
	unsigned long src = (unsigned long)psrc;
	struct dma_register *dst_ch, *src_ch;

	early_shadow_stamp();

	/* We assume that everything is 4 byte aligned, so include
	 * a basic sanity check
	 */
	BUG_ON(dst % 4);
	BUG_ON(src % 4);
	BUG_ON(size % 4);

	src_ch = 0;
	/* Find an avalible memDMA channel */
	while (1) {
		if (src_ch == (struct dma_register *)MDMA_S0_NEXT_DESC_PTR) {
			dst_ch = (struct dma_register *)MDMA_D1_NEXT_DESC_PTR;
			src_ch = (struct dma_register *)MDMA_S1_NEXT_DESC_PTR;
		} else {
			dst_ch = (struct dma_register *)MDMA_D0_NEXT_DESC_PTR;
			src_ch = (struct dma_register *)MDMA_S0_NEXT_DESC_PTR;
		}

		if (!bfin_read16(&src_ch->cfg))
			break;
		else if (bfin_read16(&dst_ch->irq_status) & DMA_DONE) {
			bfin_write16(&src_ch->cfg, 0);
			break;
		}
	}

	/* Force a sync in case a previous config reset on this channel
	 * occurred.  This is needed so subsequent writes to DMA registers
	 * are not spuriously lost/corrupted.
	 */
	__builtin_bfin_ssync();

	/* Destination */
	bfin_write32(&dst_ch->start_addr, dst);
	bfin_write16(&dst_ch->x_count, size >> 2);
	bfin_write16(&dst_ch->x_modify, 1 << 2);
	bfin_write16(&dst_ch->irq_status, DMA_DONE | DMA_ERR);

	/* Source */
	bfin_write32(&src_ch->start_addr, src);
	bfin_write16(&src_ch->x_count, size >> 2);
	bfin_write16(&src_ch->x_modify, 1 << 2);
	bfin_write16(&src_ch->irq_status, DMA_DONE | DMA_ERR);

	/* Enable */
	bfin_write16(&src_ch->cfg, DMAEN | WDSIZE_32);
	bfin_write16(&dst_ch->cfg, WNR | DI_EN | DMAEN | WDSIZE_32);

	/* Since we are atomic now, don't use the workaround ssync */
	__builtin_bfin_ssync();
}
Beispiel #2
0
u8 bfspi_read_8_bits(u16 chip_select)
{
  u16 flag_enable, flag;
  u8 ret;

  if (chip_select < 8) {
    flag = bfin_read_SPI_FLG();
    flag_enable = flag & ~(1 << (chip_select + 8));
    //PRINTK("read: flag: 0x%04x flag_enable: 0x%04x \n", flag, flag_enable);
  }
  else {
#if (defined(CONFIG_BF533) || defined(CONFIG_BF532))
    bfin_write_FIO_FLAG_C((1<<chip_select)); 
#endif
#if (defined(CONFIG_BF536) || defined(CONFIG_BF537))
        bfin_write_PORTFIO_CLEAR((1<<chip_select));
#endif
    __builtin_bfin_ssync();
  }

  /* drop SPISEL */
  bfin_write_SPI_FLG(flag_enable); 

  /* read kicks off transfer, detect end by polling RXS, we
     read the shadow register to prevent another transfer
     being started 

     While reading we write a dummy tx value, 0xff.  For
     the MMC card, a 0 bit indicates the start of a command 
     sequence therefore an all 1's sequence keeps the MMC
     card in the current state.
  */
  bfin_write_SPI_TDBR(0xff);
  bfin_read_SPI_RDBR(); __builtin_bfin_ssync();
  do { } while (!(bfin_read_SPI_STAT() & RXS)); //hardcode RXS mask
  ret = bfin_read_SPI_SHADOW(); __builtin_bfin_ssync();

  //ret = read_RDBR(); __builtin_bfin_ssync();
  PRINTK("\nkern>> read: 0x%04X\n", ret);	
  /* raise SPISEL */
  if (chip_select < 8) {
    bfin_write_SPI_FLG(flag); 
  }
  else {
#if (defined(CONFIG_BF533) || defined(CONFIG_BF532))
    bfin_write_FIO_FLAG_S((1<<chip_select)); 
#endif
#if (defined(CONFIG_BF536) || defined(CONFIG_BF537))
        bfin_write_PORTFIO_SET((1<<chip_select));
#endif
    __builtin_bfin_ssync();
  }

  return ret;
}
Beispiel #3
0
void __init early_dma_memcpy(void *pdst, const void *psrc, size_t size)
{
	unsigned long dst = (unsigned long)pdst;
	unsigned long src = (unsigned long)psrc;
	struct dma_register *dst_ch, *src_ch;

	early_shadow_stamp();

	BUG_ON(dst % 4);
	BUG_ON(src % 4);
	BUG_ON(size % 4);

	src_ch = 0;
	
	while (1) {
		if (src_ch == (struct dma_register *)MDMA_S0_NEXT_DESC_PTR) {
			dst_ch = (struct dma_register *)MDMA_D1_NEXT_DESC_PTR;
			src_ch = (struct dma_register *)MDMA_S1_NEXT_DESC_PTR;
		} else {
			dst_ch = (struct dma_register *)MDMA_D0_NEXT_DESC_PTR;
			src_ch = (struct dma_register *)MDMA_S0_NEXT_DESC_PTR;
		}

		if (!bfin_read16(&src_ch->cfg))
			break;
		else if (bfin_read16(&dst_ch->irq_status) & DMA_DONE) {
			bfin_write16(&src_ch->cfg, 0);
			break;
		}
	}

	__builtin_bfin_ssync();

	
	bfin_write32(&dst_ch->start_addr, dst);
	bfin_write16(&dst_ch->x_count, size >> 2);
	bfin_write16(&dst_ch->x_modify, 1 << 2);
	bfin_write16(&dst_ch->irq_status, DMA_DONE | DMA_ERR);

	
	bfin_write32(&src_ch->start_addr, src);
	bfin_write16(&src_ch->x_count, size >> 2);
	bfin_write16(&src_ch->x_modify, 1 << 2);
	bfin_write16(&src_ch->irq_status, DMA_DONE | DMA_ERR);

	
	bfin_write16(&src_ch->cfg, DMAEN | WDSIZE_32);
	bfin_write16(&dst_ch->cfg, WNR | DI_EN | DMAEN | WDSIZE_32);

	
	__builtin_bfin_ssync();
}
Beispiel #4
0
void __init init_leds(void)
{
	unsigned int tmp = 0;

#if defined(CONFIG_BFIN_ALIVE_LED)
	/* config pins as output. */
	tmp = *(volatile unsigned short *)CONFIG_BFIN_ALIVE_LED_DPORT;
	__builtin_bfin_ssync();
	*(volatile unsigned short *)CONFIG_BFIN_ALIVE_LED_DPORT =
	    tmp | CONFIG_BFIN_ALIVE_LED_PIN;
	__builtin_bfin_ssync();

	/*      First set led be off */
	tmp = *(volatile unsigned short *)CONFIG_BFIN_ALIVE_LED_PORT;
	__builtin_bfin_ssync();
	*(volatile unsigned short *)CONFIG_BFIN_ALIVE_LED_PORT = tmp | CONFIG_BFIN_ALIVE_LED_PIN;	/* light off */
	__builtin_bfin_ssync();
#endif

#if defined(CONFIG_BFIN_IDLE_LED)
	/* config pins as output. */
	tmp = *(volatile unsigned short *)CONFIG_BFIN_IDLE_LED_DPORT;
	__builtin_bfin_ssync();
	*(volatile unsigned short *)CONFIG_BFIN_IDLE_LED_DPORT =
	    tmp | CONFIG_BFIN_IDLE_LED_PIN;
	__builtin_bfin_ssync();

	/*      First set led be off */
	tmp = *(volatile unsigned short *)CONFIG_BFIN_IDLE_LED_PORT;
	__builtin_bfin_ssync();
	*(volatile unsigned short *)CONFIG_BFIN_IDLE_LED_PORT = tmp | CONFIG_BFIN_IDLE_LED_PIN;	/* light off */
	__builtin_bfin_ssync();
#endif

}
Beispiel #5
0
/**
 *	__dma_memcpy - program the MDMA registers
 *
 * Actually program MDMA0 and wait for the transfer to finish.  Disable IRQs
 * while programming registers so that everything is fully configured.  Wait
 * for DMA to finish with IRQs enabled.  If interrupted, the initial DMA_DONE
 * check will make sure we don't clobber any existing transfer.
 */
static void __dma_memcpy(u32 daddr, s16 dmod, u32 saddr, s16 smod, size_t cnt, u32 conf)
{
	static DEFINE_SPINLOCK(mdma_lock);
	unsigned long flags;

	spin_lock_irqsave(&mdma_lock, flags);

	/* Force a sync in case a previous config reset on this channel
	 * occurred.  This is needed so subsequent writes to DMA registers
	 * are not spuriously lost/corrupted.  Do it under irq lock and
	 * without the anomaly version (because we are atomic already).
	 */
	__builtin_bfin_ssync();

	if (bfin_read_MDMA_S0_CONFIG())
		while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE))
			continue;

	if (conf & DMA2D) {
		/* For larger bit sizes, we've already divided down cnt so it
		 * is no longer a multiple of 64k.  So we have to break down
		 * the limit here so it is a multiple of the incoming size.
		 * There is no limitation here in terms of total size other
		 * than the hardware though as the bits lost in the shift are
		 * made up by MODIFY (== we can hit the whole address space).
		 * X: (2^(16 - 0)) * 1 == (2^(16 - 1)) * 2 == (2^(16 - 2)) * 4
		 */
		u32 shift = abs(dmod) >> 1;
		size_t ycnt = cnt >> (16 - shift);
		cnt = 1 << (16 - shift);
		bfin_write_MDMA_D0_Y_COUNT(ycnt);
		bfin_write_MDMA_S0_Y_COUNT(ycnt);
		bfin_write_MDMA_D0_Y_MODIFY(dmod);
		bfin_write_MDMA_S0_Y_MODIFY(smod);
	}
Beispiel #6
0
void bfspi_write_8_bits(u16 chip_select, u8 bits)
{
  u16 flag_enable, flag;

  if (chip_select < 8) {
    flag = bfin_read_SPI_FLG();
    flag_enable = flag & ~(1 << (chip_select + 8));
    //PRINTK("kern>> chip_select: %d write: flag: 0x%04x flag_enable: 0x%04x \n", chip_select, flag, flag_enable);

    /* drop SPISEL */
    bfin_write_SPI_FLG(flag_enable); 
  }
  else {
#if (defined(CONFIG_BF533) || defined(CONFIG_BF532))
  	bfin_write_FIO_FLAG_C((1<<chip_select)); 
#endif
#if (defined(CONFIG_BF536) || defined(CONFIG_BF537))
	bfin_write_PORTFIO_CLEAR((1<<chip_select));
#endif
  	__builtin_bfin_ssync();
  }

  /* read kicks off transfer, detect end by polling RXS */
  
  bfin_write_SPI_TDBR(bits);
  bfin_read_SPI_RDBR(); __builtin_bfin_ssync();
  do {} while (!(bfin_read_SPI_STAT() & RXS)); //hardcode RXS mask
  //(void) bfin_read_SPI_SHADOW(); //discard data && clear rxs
  //__builtin_bfin_ssync();
  
  /* raise SPISEL */
  if (chip_select < 8) {
    bfin_write_SPI_FLG(flag); 
  }
  else {
#if (defined(CONFIG_BF533) || defined(CONFIG_BF532))
    bfin_write_FIO_FLAG_S((1<<chip_select)); 
#endif
#if (defined(CONFIG_BF536) || defined(CONFIG_BF537))
        bfin_write_PORTFIO_SET((1<<chip_select));
#endif
    __builtin_bfin_ssync();
  }
}
Beispiel #7
0
inline static void do_leds(void)
{
	static unsigned int count = 50;
	static int flag = 0;
	unsigned short tmp = 0;

	if (--count == 0) {
		count = 50;
		flag = ~flag;
	}
	tmp = *(volatile unsigned short *)CONFIG_BFIN_ALIVE_LED_PORT;
	__builtin_bfin_ssync();

	if (flag)
		tmp &= ~CONFIG_BFIN_ALIVE_LED_PIN;	/* light on */
	else
		tmp |= CONFIG_BFIN_ALIVE_LED_PIN;	/* light off */

	*(volatile unsigned short *)CONFIG_BFIN_ALIVE_LED_PORT = tmp;
	__builtin_bfin_ssync();

}
Beispiel #8
0
void __init early_dma_memcpy_done(void)
{
	early_shadow_stamp();

	while ((bfin_read_MDMA_S0_CONFIG() && !(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)) ||
	       (bfin_read_MDMA_S1_CONFIG() && !(bfin_read_MDMA_D1_IRQ_STATUS() & DMA_DONE)))
		continue;

	bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
	bfin_write_MDMA_D1_IRQ_STATUS(DMA_DONE | DMA_ERR);
	bfin_write_MDMA_S0_CONFIG(0);
	bfin_write_MDMA_S1_CONFIG(0);
	bfin_write_MDMA_D0_CONFIG(0);
	bfin_write_MDMA_D1_CONFIG(0);

	__builtin_bfin_ssync();
}
Beispiel #9
0
static void __dma_memcpy(u32 daddr, s16 dmod, u32 saddr, s16 smod, size_t cnt, u32 conf)
{
	static DEFINE_SPINLOCK(mdma_lock);
	unsigned long flags;

	spin_lock_irqsave(&mdma_lock, flags);

	__builtin_bfin_ssync();

	if (bfin_read_MDMA_S0_CONFIG())
		while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE))
			continue;

	if (conf & DMA2D) {
		u32 shift = abs(dmod) >> 1;
		size_t ycnt = cnt >> (16 - shift);
		cnt = 1 << (16 - shift);
		bfin_write_MDMA_D0_Y_COUNT(ycnt);
		bfin_write_MDMA_S0_Y_COUNT(ycnt);
		bfin_write_MDMA_D0_Y_MODIFY(dmod);
		bfin_write_MDMA_S0_Y_MODIFY(smod);
	}
Beispiel #10
0
void __init early_dma_memcpy_done(void)
{
	early_shadow_stamp();

	while ((bfin_read_MDMA_S0_CONFIG() && !(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)) ||
	       (bfin_read_MDMA_S1_CONFIG() && !(bfin_read_MDMA_D1_IRQ_STATUS() & DMA_DONE)))
		continue;

	bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
	bfin_write_MDMA_D1_IRQ_STATUS(DMA_DONE | DMA_ERR);
	/*
	 * Now that DMA is done, we would normally flush cache, but
	 * i/d cache isn't running this early, so we don't bother,
	 * and just clear out the DMA channel for next time
	 */
	bfin_write_MDMA_S0_CONFIG(0);
	bfin_write_MDMA_S1_CONFIG(0);
	bfin_write_MDMA_D0_CONFIG(0);
	bfin_write_MDMA_D1_CONFIG(0);

	__builtin_bfin_ssync();
}
Beispiel #11
0
static int coreb_ioctl(struct inode *inode, struct file *file, 
			unsigned int cmd, unsigned long arg)
{
	int retval = 0;
	int coreb_index = 0;

	spin_lock_irq(&coreb_lock);

	switch (cmd)
	{
	case CMD_COREB_INDEX:
		if (copy_from_user(&coreb_index, (int*)arg, sizeof(int))) {
			retval - -EFAULT;
			break;
		}
		switch (coreb_index) {
		case 0:
			coreb_base = 0xff600000;
			coreb_size = 0x4000;
			break;
		case 1:
			coreb_base = 0xff610000;
			coreb_size = 0x4000;
			break;
		case 2:
			coreb_base = 0xff500000;
			coreb_size = 0x8000;
			break;
		case 3:
			coreb_base = 0xff400000;
			coreb_size = 0x8000;
			break;
		default:
			retval = -EINVAL;
			break;
		}
		down(&file->f_dentry->d_inode->i_sem);
		file->f_pos = 0;
		up(&file->f_dentry->d_inode->i_sem);
		break;
	case CMD_COREB_START:
		if (coreb_status & COREB_IS_RUNNING) {
			retval = -EBUSY;
			break;
		}
		printk(KERN_INFO "Starting Core B\n");
		coreb_status |= COREB_IS_RUNNING;
		*pSICA_SYSCR &= ~0x0020;
		__builtin_bfin_ssync();
		break;
#if defined(CONFIG_BF561_COREB_RESET)
	case CMD_COREB_STOP:
		printk(KERN_INFO "Stopping Core B\n");
		*pSICA_SYSCR |= 0x0020;
		*pSICB_SYSCR |= 0x0080;
		coreb_status &= ~COREB_IS_RUNNING;
		break;
	case CMD_COREB_RESET:
		printk(KERN_INFO "Resetting Core B\n");
		*pSICB_SYSCR |= 0x0080;
		break;
#endif
	}

	spin_unlock_irq(&coreb_lock);

	return retval;
}
Beispiel #12
0
static void do_sync(void)
{
	__builtin_bfin_ssync();
}
Beispiel #13
0
void bfspi_reset(int reset_bit) {
	PRINTK("toggle reset\n");
  
#if (defined(CONFIG_BF533) || defined(CONFIG_BF532))
       	PRINTK("set reset to PF%d\n",reset_bit);
  	bfin_write_FIO_DIR(bfin_read_FIO_DIR() | (1<<reset_bit)); 
  	__builtin_bfin_ssync();

  	bfin_write_FIO_FLAG_C((1<<reset_bit)); 
  	__builtin_bfin_ssync();
  	udelay(100);

  	bfin_write_FIO_FLAG_S((1<<reset_bit));
  	__builtin_bfin_ssync();
#endif
  	
#if (defined(CONFIG_BF536) || defined(CONFIG_BF537))
	if (reset_bit == 1) {
       		PRINTK("set reset to PF10\n");
                bfin_write_PORTF_FER(bfin_read_PORTF_FER() & 0xFBFF);
		__builtin_bfin_ssync();
		bfin_write_PORTFIO_DIR(bfin_read_PORTFIO_DIR() | 0x0400);
		__builtin_bfin_ssync();
		bfin_write_PORTFIO_CLEAR(1<<10);
		__builtin_bfin_ssync();
		udelay(100);
		bfin_write_PORTFIO_SET(1<<10);
		__builtin_bfin_ssync();
        } else if (reset_bit == 2)  {
                PRINTK("Error: cannot set reset to PJ11\n");
        } else if (reset_bit == 3) {
                PRINTK("Error: cannot set reset to PJ10\n");
        } else if (reset_bit == 4) {
                PRINTK("set reset to PF6\n");
                bfin_write_PORTF_FER(bfin_read_PORTF_FER() & 0xFFBF);
                __builtin_bfin_ssync();
		bfin_write_PORTFIO_DIR(bfin_read_PORTFIO_DIR() | 0x0040);
		__builtin_bfin_ssync();
		bfin_write_PORTFIO_CLEAR(1<<6);
		__builtin_bfin_ssync();
		udelay(100);
		bfin_write_PORTFIO_SET(1<<6);
		__builtin_bfin_ssync();
        } else if (reset_bit == 5) {
                PRINTK("set reset to PF5\n");
                bfin_write_PORTF_FER(bfin_read_PORTF_FER() & 0xFFDF);
                __builtin_bfin_ssync();
		bfin_write_PORTFIO_DIR(bfin_read_PORTFIO_DIR() | 0x0020);
		__builtin_bfin_ssync();
		bfin_write_PORTFIO_CLEAR(1<<5);
		__builtin_bfin_ssync();
		udelay(100);
		bfin_write_PORTFIO_SET(1<<5);
		__builtin_bfin_ssync();
        } else if (reset_bit == 6) {
                PRINTK("set reset to PF4\n");
                bfin_write_PORTF_FER(bfin_read_PORTF_FER() & 0xFFEF);
                __builtin_bfin_ssync();
		bfin_write_PORTFIO_DIR(bfin_read_PORTFIO_DIR() | 0x0010);
		__builtin_bfin_ssync();
		bfin_write_PORTFIO_CLEAR(1<<4);
		__builtin_bfin_ssync();
		udelay(100);
		bfin_write_PORTFIO_SET(1<<4);
		__builtin_bfin_ssync();
        } else if (reset_bit == 7) {
                PRINTK("Error: cannot set reset to PJ5\n");

		} else if (reset_bit == 8) {

			PRINTK("Using PF8 for reset...\n");
			bfin_write_PORTF_FER(bfin_read_PORTF_FER() & 0xFEFF);
			__builtin_bfin_ssync();
			bfin_write_PORTFIO_DIR(bfin_read_PORTFIO_DIR() | 0x0100);
			__builtin_bfin_ssync();
			bfin_write_PORTFIO_CLEAR(1<<8);
			__builtin_bfin_ssync();
			udelay(100);
			bfin_write_PORTFIO_SET(1<<8);

		} else if ( reset_bit == 9 ) {
			PRINTK("Using PF9 for reset...\n");
			bfin_write_PORTF_FER(bfin_read_PORTF_FER() & 0xFDFF);
			__builtin_bfin_ssync();
			bfin_write_PORTFIO_DIR(bfin_read_PORTFIO_DIR() | 0x0200);
			__builtin_bfin_ssync();
			bfin_write_PORTFIO_CLEAR(1<<9);
			__builtin_bfin_ssync();
			udelay(100);
			bfin_write_PORTFIO_SET(1<<9);


		}

#endif	
  /* 
     p24 3050 data sheet, allow 1ms for PLL lock, with
     less than 1ms (1000us) I found register 2 would have
     a value of 0 rather than 3, indicating a bad reset.
  */
  udelay(1000); 
}
Beispiel #14
0
void bfspi_hardware_init(int baud, u16 new_chip_select_mask) 
{
	u16 ctl_reg, flag;
	int cs, bit;

  	if (baud < 4) {
    		printk("\nkern>>baud = %d may mean SPI clock too fast for Si labs 3050"
	   		"consider baud == 4 or greater", baud);
  	}

	PRINTK("\nkern>> bfspi_spi_init\n");
	PRINTK("kern>>   new_chip_select_mask = 0x%04x\n", new_chip_select_mask);
#if (defined(CONFIG_BF533) || defined(CONFIG_BF532))
	PRINTK("kern>>   FIOD_DIR = 0x%04x\n", bfin_read_FIO_DIR());
#endif

#if (defined(CONFIG_BF536) || defined(CONFIG_BF537))
	PRINTK("  FIOD_DIR = 0x%04x\n", bfin_read_PORTFIO_DIR());
#endif
	/* grab SPISEL/GPIO pins for SPI, keep level of SPISEL pins H */
	chip_select_mask |= new_chip_select_mask;

	flag = 0xff00 | (chip_select_mask & 0xff);

	/* set up chip selects greater than PF7 */

  	if (chip_select_mask & 0xff00) {
#if (defined(CONFIG_BF533) || defined(CONFIG_BF532))
	  bfin_write_FIO_DIR(bfin_read_FIO_DIR() | (chip_select_mask & 0xff00)); 
#endif
#if (defined(CONFIG_BF536) || defined(CONFIG_BF537))
	bfin_write_PORTFIO_DIR(bfin_read_PORTFIO_DIR() | (chip_select_mask & 0xff00));
#endif
   	  __builtin_bfin_ssync();
	}
#if (defined(CONFIG_BF533) || defined(CONFIG_BF532))
	PRINTK("kern>>   After FIOD_DIR = 0x%04x\n", bfin_read_FIO_DIR());
#endif

#if (defined(CONFIG_BF536) || defined(CONFIG_BF537))
	PRINTK("  After FIOD_DIR = 0x%04x\n",bfin_read_PORTFIO_DIR());

	/* we need to work thru each bit in mask and set the MUX regs */

	for(bit=0; bit<8; bit++) {
	  if (chip_select_mask & (1<<bit)) {
	    PRINTK("SPI CS bit: %d enabled\n", bit);
	    cs = bit;
	    if (cs == 1) {
	      PRINTK("set for chip select 1\n");
	      bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3c00);
	      __builtin_bfin_ssync();

	    } else if (cs == 2 || cs == 3) {
	      PRINTK("set for chip select 2\n");
	      bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PJSE_SPI);
	      __builtin_bfin_ssync();
	      bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3800);
	      __builtin_bfin_ssync();

	    } else if (cs == 4) {
	      bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PFS4E_SPI);
	      __builtin_bfin_ssync();
	      bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3840);
	      __builtin_bfin_ssync();

	    } else if (cs == 5) {
	      bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PFS5E_SPI);
	      __builtin_bfin_ssync();
	      bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3820);
	      __builtin_bfin_ssync();

	    } else if (cs == 6) {
	      bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PFS6E_SPI);
	      __builtin_bfin_ssync();
	      bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3810);
	      __builtin_bfin_ssync();

	    } else if (cs == 7) {
	      bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PJCE_SPI);
	      __builtin_bfin_ssync();
	      bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3800);
	      __builtin_bfin_ssync();
	    }
	  }
	}
#endif

  	/* note TIMOD = 00 - reading SPI_RDBR kicks off transfer */
  	//Undefines flags lets patch it for now. BFSI is kind of obsolate. 
	//Will be replaced in teh future
	ctl_reg = 0xD004;   //0101 1100 0000  0100  SPE | MSTR | CPOL | CPHA | SZ;
	ctl_reg |= (spimode << 10);
  	bfin_write_SPI_FLG(flag);
  	bfin_write_SPI_BAUD(baud);
  	bfin_write_SPI_CTL(ctl_reg);
}
Beispiel #15
0
void __init early_dma_memcpy(void *pdst, const void *psrc, size_t size)
{
	unsigned long dst = (unsigned long)pdst;
	unsigned long src = (unsigned long)psrc;
	struct dma_register *dst_ch, *src_ch;

	early_shadow_stamp();

	/* We assume that everything is 4 byte aligned, so include
	 * a basic sanity check
	 */
	BUG_ON(dst % 4);
	BUG_ON(src % 4);
	BUG_ON(size % 4);

	src_ch = 0;
	/* Find an avalible memDMA channel */
	while (1) {
		if (src_ch == (struct dma_register *)MDMA_S0_NEXT_DESC_PTR) {
			dst_ch = (struct dma_register *)MDMA_D1_NEXT_DESC_PTR;
			src_ch = (struct dma_register *)MDMA_S1_NEXT_DESC_PTR;
		} else {
			dst_ch = (struct dma_register *)MDMA_D0_NEXT_DESC_PTR;
			src_ch = (struct dma_register *)MDMA_S0_NEXT_DESC_PTR;
		}

		if (!DMA_MMR_READ(&src_ch->cfg))
			break;
		else if (DMA_MMR_READ(&dst_ch->irq_status) & DMA_DONE) {
			DMA_MMR_WRITE(&src_ch->cfg, 0);
			break;
		}
	}

	/* Force a sync in case a previous config reset on this channel
	 * occurred.  This is needed so subsequent writes to DMA registers
	 * are not spuriously lost/corrupted.
	 */
	__builtin_bfin_ssync();

	/* Destination */
	bfin_write32(&dst_ch->start_addr, dst);
	DMA_MMR_WRITE(&dst_ch->x_count, size >> 2);
	DMA_MMR_WRITE(&dst_ch->x_modify, 1 << 2);
	DMA_MMR_WRITE(&dst_ch->irq_status, DMA_DONE | DMA_ERR);

	/* Source */
	bfin_write32(&src_ch->start_addr, src);
	DMA_MMR_WRITE(&src_ch->x_count, size >> 2);
	DMA_MMR_WRITE(&src_ch->x_modify, 1 << 2);
	DMA_MMR_WRITE(&src_ch->irq_status, DMA_DONE | DMA_ERR);

	/* Enable */
	DMA_MMR_WRITE(&src_ch->cfg, DMAEN | WDSIZE_32);
	DMA_MMR_WRITE(&dst_ch->cfg, WNR | DI_EN_X | DMAEN | WDSIZE_32);

	/* Since we are atomic now, don't use the workaround ssync */
	__builtin_bfin_ssync();

#ifdef CONFIG_BF60x
	/* Work around a possible MDMA anomaly. Running 2 MDMA channels to
	 * transfer DDR data to L1 SRAM may corrupt data.
	 * Should be reverted after this issue is root caused.
	 */
	while (!(DMA_MMR_READ(&dst_ch->irq_status) & DMA_DONE))
		continue;
#endif
}