Exemple #1
0
void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t gfp)
{
	void *ret;

	ret = consistent_alloc(gfp, size, dma_handle);
	if (ret)
		memset(ret, 0, size);

	return ret;
}
Exemple #2
0
/* We used to do this earlier, but have to postpone as long as possible
 * to ensure the kernel VM is now running.
 */
static void
alloc_host_memory()
{
	uint	physaddr;

	/* Set the host page for allocation.
	*/
	host_buffer = (uint)consistent_alloc(GFP_KERNEL, PAGE_SIZE, &physaddr);
	host_end = host_buffer + PAGE_SIZE;
}
void *
dma_alloc_coherent(void *p, int size, dma_addr_t *phys, int flags)
{
    void *ret;

//    printk("dma_alloc_coherent: p %p, size %d, phys %p, flags 0x%x\n", p, size, phys, flags);
    ret = consistent_alloc(GFP_DMA | GFP_KERNEL | flags, size, phys);
//    printk("dma_alloc_coherent: ret %p\n", ret);
    return ret;
}
Exemple #4
0
static void *frv_dma_alloc(struct device *hwdev, size_t size,
		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
	void *ret;

	ret = consistent_alloc(gfp, size, dma_handle);
	if (ret)
		memset(ret, 0, size);

	return ret;
}
Exemple #5
0
static int omap_mmc_slot_init( void )
{
	int retval;
        long flags;

	/* Set up timers */

	g_omap_mmc_data.sd_detect_timer.function = omap_mmc_fix_sd_detect;
	g_omap_mmc_data.sd_detect_timer.data     = (unsigned long) &g_omap_mmc_data;
	init_timer(&g_omap_mmc_data.sd_detect_timer);

	/* Basic service interrupt */

        local_irq_save(flags);

	retval = request_irq( INT_MMC, omap_mmc_int,
			      SA_INTERRUPT, "omap_mmc_int", &g_omap_mmc_data );
	if ( retval ) {
		printk(KERN_CRIT "MMC/SD: unable to grab MMC IRQ\n");
		return retval;
	}

	disable_irq( INT_MMC );

        /* Card Detect interrupt */

	retval = request_irq( INT_FPGA_CD, omap_mmc_sd_detect_int, 
			      SA_INTERRUPT, "omap_mmc_fpga_cd", &g_omap_mmc_data );

	if ( retval ) {
		printk(KERN_CRIT "MMC/SD: unable to grab FPGA_CD_IRQ\n");
		free_irq(INT_MMC, &g_omap_mmc_data);
	}

	disable_irq( INT_FPGA_CD );

        /* Allocate DMA buffers (max BLOCK LENGTH = 2048 (11 bit)) */

        g_omap_mmc_data.buf_dma_virt = consistent_alloc(GFP_KERNEL | GFP_DMA | GFP_ATOMIC,
                                       2048, &(g_omap_mmc_data.buf_dma_phys));

#ifdef CONFIG_PM
        pm_register(PM_UNKNOWN_DEV, PM_SYS_UNKNOWN, omap_mmc_pm_callback);
#endif
       	omap_mmc_slot_up(); 

        enable_irq ( INT_FPGA_CD ); /* Enable IRQ to detect card*/
        
        local_irq_restore(flags);          

	return retval;
}
static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
				dma_addr_t *dma_handle, gfp_t flag)
{
#ifdef NOT_COHERENT_CACHE
	return consistent_alloc(flag, size, dma_handle);
#else
	void *ret;
	struct page *page;
	int node = dev_to_node(dev);

	/* ignore region specifiers */
	flag  &= ~(__GFP_HIGHMEM);

	page = alloc_pages_node(node, flag, get_order(size));
	if (page == NULL)
		return NULL;
	ret = page_address(page);
	memset(ret, 0, size);
	*dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev);

	return ret;
#endif
}
/*
 *   Create a scatter/gather list handle.  This is simply a structure which
 *   describes a scatter/gather list.
 *
 *   A handle is returned in "handle" which the driver should save in order to 
 *   be able to access this list later.  A chunk of memory will be allocated 
 *   to be used by the API for internal management purposes, including managing 
 *   the sg list and allocating memory for the sgl descriptors.  One page should 
 *   be more than enough for that purpose.  Perhaps it's a bit wasteful to use 
 *   a whole page for a single sg list, but most likely there will be only one 
 *   sg list per channel.
 *
 *   Interrupt notes:
 *   Each sgl descriptor has a copy of the DMA control word which the DMA engine
 *   loads in the control register.  The control word has a "global" interrupt 
 *   enable bit for that channel. Interrupts are further qualified by a few bits
 *   in the sgl descriptor count register.  In order to setup an sgl, we have to
 *   know ahead of time whether or not interrupts will be enabled at the completion
 *   of the transfers.  Thus, enable_dma_interrupt()/disable_dma_interrupt() MUST
 *   be called before calling alloc_dma_handle().  If the interrupt mode will never
 *   change after powerup, then enable_dma_interrupt()/disable_dma_interrupt() 
 *   do not have to be called -- interrupts will be enabled or disabled based
 *   on how the channel was configured after powerup by the hw_init_dma_channel()
 *   function.  Each sgl descriptor will be setup to interrupt if an error occurs;
 *   however, only the last descriptor will be setup to interrupt. Thus, an 
 *   interrupt will occur (if interrupts are enabled) only after the complete
 *   sgl transfer is done.
 */
int
ppc4xx_alloc_dma_handle(sgl_handle_t * phandle, unsigned int mode, unsigned int dmanr)
{
	sgl_list_info_t *psgl;
	dma_addr_t dma_addr;
	ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
	uint32_t sg_command;
	void *ret;

	if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
		printk("ppc4xx_alloc_dma_handle: invalid channel 0x%x\n", dmanr);
		return DMA_STATUS_BAD_CHANNEL;
	}

	if (!phandle) {
		printk("ppc4xx_alloc_dma_handle: null handle pointer\n");
		return DMA_STATUS_NULL_POINTER;
	}

	/* Get a page of memory, which is zeroed out by consistent_alloc() */
	ret = consistent_alloc(GFP_KERNEL, DMA_PPC4xx_SIZE, &dma_addr);
	if (ret != NULL) {
		memset(ret, 0, DMA_PPC4xx_SIZE);
		psgl = (sgl_list_info_t *) ret;
	}

	if (psgl == NULL) {
		*phandle = (sgl_handle_t) NULL;
		return DMA_STATUS_OUT_OF_MEMORY;
	}

	psgl->dma_addr = dma_addr;
	psgl->dmanr = dmanr;

	/*
	 * Modify and save the control word. These words will be
	 * written to each sgl descriptor.  The DMA engine then
	 * loads this control word into the control register
	 * every time it reads a new descriptor.
	 */
	psgl->control = p_dma_ch->control;
	/* Clear all mode bits */
	psgl->control &= ~(DMA_TM_MASK | DMA_TD);
	/* Save control word and mode */
	psgl->control |= (mode | DMA_CE_ENABLE);

	/* In MM mode, we must set ETD/TCE */
	if (mode == DMA_MODE_MM)
		psgl->control |= DMA_ETD_OUTPUT | DMA_TCE_ENABLE;

	if (p_dma_ch->int_enable) {
		/* Enable channel interrupt */
		psgl->control |= DMA_CIE_ENABLE;
	} else {
		psgl->control &= ~DMA_CIE_ENABLE;
	}

	sg_command = mfdcr(DCRN_ASGC);
	switch (dmanr) {
	case 0:
		sg_command |= SSG0_MASK_ENABLE;
		break;
	case 1:
		sg_command |= SSG1_MASK_ENABLE;
		break;
	case 2:
		sg_command |= SSG2_MASK_ENABLE;
		break;
	case 3:
		sg_command |= SSG3_MASK_ENABLE;
		break;
	default:
		printk("ppc4xx_alloc_dma_handle: bad channel: %d\n", dmanr);
		ppc4xx_free_dma_handle((sgl_handle_t) psgl);
		*phandle = (sgl_handle_t) NULL;
		return DMA_STATUS_BAD_CHANNEL;
	}

	/* Enable SGL control access */
	mtdcr(DCRN_ASGC, sg_command);
	psgl->sgl_control = SG_ERI_ENABLE | SG_LINK;

	if (p_dma_ch->int_enable) {
		if (p_dma_ch->tce_enable)
			psgl->sgl_control |= SG_TCI_ENABLE;
		else
			psgl->sgl_control |= SG_ETI_ENABLE;
	}

	*phandle = (sgl_handle_t) psgl;
	return DMA_STATUS_GOOD;
}
Exemple #8
0
/* Initialize the CPM Ethernet on SCC.  If EPPC-Bug loaded us, or performed
 * some other network I/O, a whole bunch of this has already been set up.
 * It is no big deal if we do it again, we just have to disable the
 * transmit and receive to make sure we don't catch the CPM with some
 * inconsistent control information.
 */
int __init scc_enet_init(void)
{
	struct rtnet_device *rtdev = NULL;
	struct scc_enet_private *cep;
	int i, j, k;
	unsigned char	*eap, *ba;
	dma_addr_t	mem_addr;
	bd_t		*bd;
	volatile	cbd_t		*bdp;
	volatile	cpm8xx_t	*cp;
	volatile	scc_t		*sccp;
	volatile	scc_enet_t	*ep;
	volatile	immap_t		*immap;

	cp = cpmp;	/* Get pointer to Communication Processor */

	immap = (immap_t *)(mfspr(IMMR) & 0xFFFF0000);	/* and to internal registers */

	bd = (bd_t *)__res;

	/* Configure the SCC parameters (this has formerly be done 
	 * by macro definitions).
	 */
	switch (rtnet_scc) {
	case 3:
		CPM_CR_ENET = CPM_CR_CH_SCC3;
		PROFF_ENET  = PROFF_SCC3;
		SCC_ENET    = 2;		/* Index, not number! */
		CPMVEC_ENET = CPMVEC_SCC3;
		break;
	case 2:
		CPM_CR_ENET = CPM_CR_CH_SCC2;
		PROFF_ENET  = PROFF_SCC2;
		SCC_ENET    = 1;		/* Index, not number! */
		CPMVEC_ENET = CPMVEC_SCC2;
		break;
	case 1:
		CPM_CR_ENET = CPM_CR_CH_SCC1;
		PROFF_ENET  = PROFF_SCC1;
		SCC_ENET    = 0;		/* Index, not number! */
		CPMVEC_ENET = CPMVEC_SCC1;
		break;
	default:
		printk(KERN_ERR "enet: SCC%d doesn't exit (check rtnet_scc)\n", rtnet_scc);
		return -1;
	}

	/* Allocate some private information and create an Ethernet device instance.
	*/
	rtdev = rtdev_root = rt_alloc_etherdev(sizeof(struct scc_enet_private));
	if (rtdev == NULL) {
		printk(KERN_ERR "enet: Could not allocate ethernet device.\n");
		return -1;
	}
	rtdev_alloc_name(rtdev, "rteth%d");
	rt_rtdev_connect(rtdev, &RTDEV_manager);
	RTNET_SET_MODULE_OWNER(rtdev);
	rtdev->vers = RTDEV_VERS_2_0;

	cep = (struct scc_enet_private *)rtdev->priv;
	rtdm_lock_init(&cep->lock);

	/* Get pointer to SCC area in parameter RAM.
	*/
	ep = (scc_enet_t *)(&cp->cp_dparam[PROFF_ENET]);

	/* And another to the SCC register area.
	*/
	sccp = (volatile scc_t *)(&cp->cp_scc[SCC_ENET]);
	cep->sccp = (scc_t *)sccp;		/* Keep the pointer handy */

	/* Disable receive and transmit in case EPPC-Bug started it.
	*/
	sccp->scc_gsmrl &= ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT);

	/* Cookbook style from the MPC860 manual.....
	 * Not all of this is necessary if EPPC-Bug has initialized
	 * the network.
	 * So far we are lucky, all board configurations use the same
	 * pins, or at least the same I/O Port for these functions.....
	 * It can't last though......
	 */

#if (defined(PA_ENET_RXD) && defined(PA_ENET_TXD))
	/* Configure port A pins for Txd and Rxd.
	*/
	immap->im_ioport.iop_papar |=  (PA_ENET_RXD | PA_ENET_TXD);
	immap->im_ioport.iop_padir &= ~(PA_ENET_RXD | PA_ENET_TXD);
	immap->im_ioport.iop_paodr &=                ~PA_ENET_TXD;
#elif (defined(PB_ENET_RXD) && defined(PB_ENET_TXD))
	/* Configure port B pins for Txd and Rxd.
	*/
	immap->im_cpm.cp_pbpar |=  (PB_ENET_RXD | PB_ENET_TXD);
	immap->im_cpm.cp_pbdir &= ~(PB_ENET_RXD | PB_ENET_TXD);
	immap->im_cpm.cp_pbodr &=		 ~PB_ENET_TXD;
#else
#error Exactly ONE pair of PA_ENET_[RT]XD, PB_ENET_[RT]XD must be defined
#endif

#if defined(PC_ENET_LBK)
	/* Configure port C pins to disable External Loopback
	 */
	immap->im_ioport.iop_pcpar &= ~PC_ENET_LBK;
	immap->im_ioport.iop_pcdir |=  PC_ENET_LBK;
	immap->im_ioport.iop_pcso  &= ~PC_ENET_LBK;
	immap->im_ioport.iop_pcdat &= ~PC_ENET_LBK;	/* Disable Loopback */
#endif	/* PC_ENET_LBK */

	/* Configure port C pins to enable CLSN and RENA.
	*/
	immap->im_ioport.iop_pcpar &= ~(PC_ENET_CLSN | PC_ENET_RENA);
	immap->im_ioport.iop_pcdir &= ~(PC_ENET_CLSN | PC_ENET_RENA);
	immap->im_ioport.iop_pcso  |=  (PC_ENET_CLSN | PC_ENET_RENA);

	/* Configure port A for TCLK and RCLK.
	*/
	immap->im_ioport.iop_papar |=  (PA_ENET_TCLK | PA_ENET_RCLK);
	immap->im_ioport.iop_padir &= ~(PA_ENET_TCLK | PA_ENET_RCLK);

	/* Configure Serial Interface clock routing.
	 * First, clear all SCC bits to zero, then set the ones we want.
	 */
	cp->cp_sicr &= ~SICR_ENET_MASK;
	cp->cp_sicr |=  SICR_ENET_CLKRT;

	/* Manual says set SDDR, but I can't find anything with that
	 * name.  I think it is a misprint, and should be SDCR.  This
	 * has already been set by the communication processor initialization.
	 */

	/* Allocate space for the buffer descriptors in the DP ram.
	 * These are relative offsets in the DP ram address space.
	 * Initialize base addresses for the buffer descriptors.
	 */
	i = m8xx_cpm_dpalloc(sizeof(cbd_t) * RX_RING_SIZE);
	ep->sen_genscc.scc_rbase = i;
	cep->rx_bd_base = (cbd_t *)&cp->cp_dpmem[i];

	i = m8xx_cpm_dpalloc(sizeof(cbd_t) * TX_RING_SIZE);
	ep->sen_genscc.scc_tbase = i;
	cep->tx_bd_base = (cbd_t *)&cp->cp_dpmem[i];

	cep->dirty_tx = cep->cur_tx = cep->tx_bd_base;
	cep->cur_rx = cep->rx_bd_base;

	/* Issue init Rx BD command for SCC.
	 * Manual says to perform an Init Rx parameters here.  We have
	 * to perform both Rx and Tx because the SCC may have been
	 * already running.
	 * In addition, we have to do it later because we don't yet have
	 * all of the BD control/status set properly.
	cp->cp_cpcr = mk_cr_cmd(CPM_CR_ENET, CPM_CR_INIT_RX) | CPM_CR_FLG;
	while (cp->cp_cpcr & CPM_CR_FLG);
	 */

	/* Initialize function code registers for big-endian.
	*/
	ep->sen_genscc.scc_rfcr = SCC_EB;
	ep->sen_genscc.scc_tfcr = SCC_EB;

	/* Set maximum bytes per receive buffer.
	 * This appears to be an Ethernet frame size, not the buffer
	 * fragment size.  It must be a multiple of four.
	 */
	ep->sen_genscc.scc_mrblr = PKT_MAXBLR_SIZE;

	/* Set CRC preset and mask.
	*/
	ep->sen_cpres = 0xffffffff;
	ep->sen_cmask = 0xdebb20e3;

	ep->sen_crcec = 0;	/* CRC Error counter */
	ep->sen_alec = 0;	/* alignment error counter */
	ep->sen_disfc = 0;	/* discard frame counter */

	ep->sen_pads = 0x8888;	/* Tx short frame pad character */
	ep->sen_retlim = 15;	/* Retry limit threshold */

	ep->sen_maxflr = PKT_MAXBUF_SIZE;   /* maximum frame length register */
	ep->sen_minflr = PKT_MINBUF_SIZE;  /* minimum frame length register */

	ep->sen_maxd1 = PKT_MAXBLR_SIZE;	/* maximum DMA1 length */
	ep->sen_maxd2 = PKT_MAXBLR_SIZE;	/* maximum DMA2 length */

	/* Clear hash tables.
	*/
	ep->sen_gaddr1 = 0;
	ep->sen_gaddr2 = 0;
	ep->sen_gaddr3 = 0;
	ep->sen_gaddr4 = 0;
	ep->sen_iaddr1 = 0;
	ep->sen_iaddr2 = 0;
	ep->sen_iaddr3 = 0;
	ep->sen_iaddr4 = 0;

	/* Set Ethernet station address.
	 */
	eap = (unsigned char *)&(ep->sen_paddrh);
#ifdef CONFIG_FEC_ENET
	/* We need a second MAC address if FEC is used by Linux */
	for (i=5; i>=0; i--)
		*eap++ = rtdev->dev_addr[i] = (bd->bi_enetaddr[i] | 
					     (i==3 ? 0x80 : 0));
#else
	for (i=5; i>=0; i--)
		*eap++ = rtdev->dev_addr[i] = bd->bi_enetaddr[i];
#endif

	ep->sen_pper = 0;	/* 'cause the book says so */
	ep->sen_taddrl = 0;	/* temp address (LSB) */
	ep->sen_taddrm = 0;
	ep->sen_taddrh = 0;	/* temp address (MSB) */

	/* Now allocate the host memory pages and initialize the
	 * buffer descriptors.
	 */
	bdp = cep->tx_bd_base;
	for (i=0; i<TX_RING_SIZE; i++) {

		/* Initialize the BD for every fragment in the page.
		*/
		bdp->cbd_sc = 0;
		bdp->cbd_bufaddr = 0;
		bdp++;
	}

	/* Set the last buffer to wrap.
	*/
	bdp--;
	bdp->cbd_sc |= BD_SC_WRAP;

	bdp = cep->rx_bd_base;
	k = 0;
	for (i=0; i<CPM_ENET_RX_PAGES; i++) {

		/* Allocate a page.
		*/
		ba = (unsigned char *)consistent_alloc(GFP_KERNEL, PAGE_SIZE, &mem_addr);

		/* Initialize the BD for every fragment in the page.
		*/
		for (j=0; j<CPM_ENET_RX_FRPPG; j++) {
			bdp->cbd_sc = BD_ENET_RX_EMPTY | BD_ENET_RX_INTR;
			bdp->cbd_bufaddr = mem_addr;
			cep->rx_vaddr[k++] = ba;
			mem_addr += CPM_ENET_RX_FRSIZE;
			ba += CPM_ENET_RX_FRSIZE;
			bdp++;
		}
	}

	/* Set the last buffer to wrap.
	*/
	bdp--;
	bdp->cbd_sc |= BD_SC_WRAP;

	/* Let's re-initialize the channel now.  We have to do it later
	 * than the manual describes because we have just now finished
	 * the BD initialization.
	 */
	cp->cp_cpcr = mk_cr_cmd(CPM_CR_ENET, CPM_CR_INIT_TRX) | CPM_CR_FLG;
	while (cp->cp_cpcr & CPM_CR_FLG);

	cep->skb_cur = cep->skb_dirty = 0;

	sccp->scc_scce = 0xffff;	/* Clear any pending events */

	/* Enable interrupts for transmit error, complete frame
	 * received, and any transmit buffer we have also set the
	 * interrupt flag.
	 */
	sccp->scc_sccm = (SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB);

	/* Install our interrupt handler.
	*/
	rtdev->irq = CPM_IRQ_OFFSET + CPMVEC_ENET;
	rt_stack_connect(rtdev, &STACK_manager);
	if ((i = rtdm_irq_request(&cep->irq_handle, rtdev->irq,
				  scc_enet_interrupt, 0, "rt_mpc8xx_enet", rtdev))) {
		printk(KERN_ERR "Couldn't request IRQ %d\n", rtdev->irq);
		rtdev_free(rtdev);
		return i;
	}
	

	/* Set GSMR_H to enable all normal operating modes.
	 * Set GSMR_L to enable Ethernet to MC68160.
	 */
	sccp->scc_gsmrh = 0;
	sccp->scc_gsmrl = (SCC_GSMRL_TCI | SCC_GSMRL_TPL_48 | SCC_GSMRL_TPP_10 | SCC_GSMRL_MODE_ENET);

	/* Set sync/delimiters.
	*/
	sccp->scc_dsr = 0xd555;

	/* Set processing mode.  Use Ethernet CRC, catch broadcast, and
	 * start frame search 22 bit times after RENA.
	 */
	sccp->scc_pmsr = (SCC_PMSR_ENCRC | SCC_PMSR_NIB22);

	/* It is now OK to enable the Ethernet transmitter.
	 * Unfortunately, there are board implementation differences here.
	 */
#if   (!defined (PB_ENET_TENA) &&  defined (PC_ENET_TENA))
	immap->im_ioport.iop_pcpar |=  PC_ENET_TENA;
	immap->im_ioport.iop_pcdir &= ~PC_ENET_TENA;
#elif ( defined (PB_ENET_TENA) && !defined (PC_ENET_TENA))
	cp->cp_pbpar |= PB_ENET_TENA;
	cp->cp_pbdir |= PB_ENET_TENA;
#else
#error Configuration Error: define exactly ONE of PB_ENET_TENA, PC_ENET_TENA
#endif

#if defined(CONFIG_RPXLITE) || defined(CONFIG_RPXCLASSIC)
	/* And while we are here, set the configuration to enable ethernet.
	*/
	*((volatile uint *)RPX_CSR_ADDR) &= ~BCSR0_ETHLPBK;
	*((volatile uint *)RPX_CSR_ADDR) |=
			(BCSR0_ETHEN | BCSR0_COLTESTDIS | BCSR0_FULLDPLXDIS);
#endif

#ifdef CONFIG_BSEIP
	/* BSE uses port B and C for PHY control.
	*/
	cp->cp_pbpar &= ~(PB_BSE_POWERUP | PB_BSE_FDXDIS);
	cp->cp_pbdir |= (PB_BSE_POWERUP | PB_BSE_FDXDIS);
	cp->cp_pbdat |= (PB_BSE_POWERUP | PB_BSE_FDXDIS);

	immap->im_ioport.iop_pcpar &= ~PC_BSE_LOOPBACK;
	immap->im_ioport.iop_pcdir |= PC_BSE_LOOPBACK;
	immap->im_ioport.iop_pcso &= ~PC_BSE_LOOPBACK;
	immap->im_ioport.iop_pcdat &= ~PC_BSE_LOOPBACK;
#endif

#ifdef CONFIG_FADS
	cp->cp_pbpar |= PB_ENET_TENA;
	cp->cp_pbdir |= PB_ENET_TENA;

	/* Enable the EEST PHY.
	*/
	*((volatile uint *)BCSR1) &= ~BCSR1_ETHEN;
#endif

	rtdev->base_addr = (unsigned long)ep;

	/* The CPM Ethernet specific entries in the device structure. */
	rtdev->open = scc_enet_open;
	rtdev->hard_start_xmit = scc_enet_start_xmit;
	rtdev->stop = scc_enet_close;
	rtdev->hard_header = &rt_eth_header;
	rtdev->get_stats = scc_enet_get_stats;

	if (!rx_pool_size)
		rx_pool_size = RX_RING_SIZE * 2;
	if (rtskb_pool_init(&cep->skb_pool, rx_pool_size) < rx_pool_size) {
		rtdm_irq_disable(&cep->irq_handle);
		rtdm_irq_free(&cep->irq_handle);
		rtskb_pool_release(&cep->skb_pool);
		rtdev_free(rtdev);
		return -ENOMEM;
	}

	if ((i = rt_register_rtnetdev(rtdev))) {
		printk(KERN_ERR "Couldn't register rtdev\n");
		rtdm_irq_disable(&cep->irq_handle);
		rtdm_irq_free(&cep->irq_handle);
		rtskb_pool_release(&cep->skb_pool);
		rtdev_free(rtdev);
		return i;
	}

	/* And last, enable the transmit and receive processing.
	*/
	sccp->scc_gsmrl |= (SCC_GSMRL_ENR | SCC_GSMRL_ENT);

	printk("%s: CPM ENET Version 0.2 on SCC%d, irq %d, addr %02x:%02x:%02x:%02x:%02x:%02x\n", 
	       rtdev->name, SCC_ENET+1, rtdev->irq,
	       rtdev->dev_addr[0], rtdev->dev_addr[1], rtdev->dev_addr[2],
	       rtdev->dev_addr[3], rtdev->dev_addr[4], rtdev->dev_addr[5]);
	
	return 0;
}
Exemple #9
0
static int audio_setup_buf(audio_stream_t * s)
{
	int frag;
	int dmasize = 0;
	char *dmabuf = NULL;
	dma_addr_t dmaphys = 0;

	if (s->buffers)
		return -EBUSY;

	s->buffers = kmalloc(sizeof(audio_buf_t) * s->nbfrags, GFP_KERNEL);
	if (!s->buffers)
		goto err;
	memset(s->buffers, 0, sizeof(audio_buf_t) * s->nbfrags);

	for (frag = 0; frag < s->nbfrags; frag++) {
		audio_buf_t *b = &s->buffers[frag];

		/*
		 * Let's allocate non-cached memory for DMA buffers.
		 * We try to allocate all memory at once.
		 * If this fails (a common reason is memory fragmentation),
		 * then we allocate more smaller buffers.
		 */
		if (!dmasize) {
			dmasize = (s->nbfrags - frag) * s->fragsize;
			do {
			  	dmabuf = consistent_alloc(GFP_KERNEL|GFP_DMA,
							  dmasize,
							  &dmaphys);
				if (!dmabuf)
					dmasize -= s->fragsize;
			} while (!dmabuf && dmasize);
			if (!dmabuf)
				goto err;
			b->master = dmasize;
			memzero(dmabuf, dmasize);
		}

		b->data = dmabuf;
		b->dma_addr = dmaphys;
		DPRINTK("buf %d: start %p dma %#08x master %d fragsize %d\n", 
			frag, b->data, b->dma_addr, b->master, s->fragsize);

		dmabuf += s->fragsize;
		dmaphys += s->fragsize;
		dmasize -= s->fragsize;
	}

	s->usr_head = s->dma_head = s->dma_tail = 0;
	s->bytecount = 0;
	s->fragcount = 0;
	sema_init(&s->sem, s->nbfrags);

	return 0;

err:
	printk(AUDIO_NAME ": unable to allocate audio memory\n ");
	audio_discard_buf(s);
	return -ENOMEM;
}
Exemple #10
0
static int pxa250_irda_start(struct net_device *dev)
{
	struct pxa250_irda *si = dev->priv;
	int err;
	unsigned int flags;
	

	MOD_INC_USE_COUNT;

	__ECHO_IN;
	si->speed = 9600;

	local_irq_save(flags);
	
	err = request_irq(si->fir_irq, pxa250_irda_fir_irq, 0,  dev->name, dev);
	if (err)
		goto err_fir_irq;

	err = request_irq(dev->irq, pxa250_irda_irq, 0, dev->name, dev);
	if (err)
		goto err_irq;

	/*
	 * The interrupt must remain disabled for now.
	 */
	
	disable_irq(dev->irq);
  	disable_irq(si->fir_irq);

	local_irq_restore(flags);


	/* Allocate DMA channel for receiver (not used) */
	err = pxa_request_dma("IrDA receive", DMA_PRIO_LOW, pxa250_irda_rxdma_irq, dev);
	if (err < 0 )
	   goto err_rx_dma;
	si->rxdma_ch=err;

	DRCMRRXICDR = DRCMR_MAPVLD | si->rxdma_ch;
	

	/* Allocate DMA channel for transmit */
	err = pxa_request_dma("IrDA transmit", DMA_PRIO_LOW, pxa250_irda_txdma_irq , dev);
	if (err < 0 )
	   goto err_tx_dma;

	si->txdma_ch=err;

	/*
	 * Make sure that ICP will be able 
	 * to assert the transmit dma request bit
	 * through the peripherals request bus (PREQ)
	 */
	
	DRCMRTXICDR = DRCMR_MAPVLD | si->txdma_ch;

	DBG("rx(not used) channel=%d tx channel=%d\n",si->rxdma_ch,si->txdma_ch);
	
	/* allocate consistent buffers for dma access
	 * buffers have to be aligned and situated in dma capable memory region;
	 */
	si->rxbuf_dma_virt = consistent_alloc(GFP_KERNEL | GFP_DMA ,HPSIR_MAX_RXLEN , &si->rxbuf_dma);
	if (! si->rxbuf_dma_virt )
		goto err_rxbuf_dma;

	si->txbuf_dma_virt = consistent_alloc(GFP_KERNEL | GFP_DMA, HPSIR_MAX_TXLEN,  &si->txbuf_dma); 
	if (! si->txbuf_dma_virt )
		goto err_txbuf_dma;

	/* Alocate skb for receiver */
	err=pxa250_irda_rx_alloc(si);
	if (err)
	   goto err_rx_alloc;
	
	/*
	 * Setup the serial port for the specified config.
	 */
	err = pxa250_irda_startup(dev);
	if (err)
		goto err_startup;

	pxa250_irda_set_speed(dev,si->speed = 9600);


	/*
	 * Open a new IrLAP layer instance.
	 */
	si->irlap = irlap_open(dev, &si->qos, "pxa250");
	err = -ENOMEM;
	if (!si->irlap)
		goto err_irlap;

	/*
	 * Now enable the interrupt and start the queue
	 */
	si->open = 1;
	enable_irq(dev->irq);
	netif_start_queue(dev);
	return 0;

err_irlap:
	si->open = 0;
	pxa250_sir_irda_shutdown(si);
err_startup:
	dev_kfree_skb(si->rxskb);
err_rx_alloc:	
	consistent_free (si->txbuf_dma_virt,HPSIR_MAX_TXLEN,si->txbuf_dma);
err_txbuf_dma:
	consistent_free (si->rxbuf_dma_virt,HPSIR_MAX_RXLEN,si->rxbuf_dma);
err_rxbuf_dma:
	pxa_free_dma(si->txdma_ch);
err_tx_dma:
	pxa_free_dma(si->rxdma_ch);
err_rx_dma:
	free_irq(dev->irq, dev);
err_irq:
	free_irq(si->fir_irq, dev);
err_fir_irq:	
	MOD_DEC_USE_COUNT;
	return err;
}
Exemple #11
0
/*
 * This function allocates the DMA descriptor array and buffer data space
 * according to the current number of fragments and fragment size.
 */
static int audio_setup_buf(audio_stream_t * s)
{
	pxa_dma_desc *dma_desc;
	dma_addr_t dma_desc_phys;
	int nb_desc, frag, i, buf_size = 0;
	char *dma_buf = NULL;
	dma_addr_t dma_buf_phys = 0;

	if (s->buffers)
		return -EBUSY;

	/* Our buffer structure array */
	s->buffers = kmalloc(sizeof(audio_buf_t) * s->nbfrags, GFP_KERNEL);
	if (!s->buffers)
		goto err;
	memzero(s->buffers, sizeof(audio_buf_t) * s->nbfrags);

	/* 
	 * Our DMA descriptor array:
	 * for Each fragment we have one checkpoint descriptor plus one 
	 * descriptor per MAX_DMA_SIZE byte data blocks.
	 */
	nb_desc = (1 + (s->fragsize + MAX_DMA_SIZE - 1)/MAX_DMA_SIZE) * s->nbfrags;
	dma_desc = consistent_alloc(GFP_KERNEL,
				    nb_desc * DMA_DESC_SIZE,
				    &dma_desc_phys);
	if (!dma_desc)
		goto err;
	s->descs_per_frag = nb_desc / s->nbfrags;
	s->buffers->dma_desc = dma_desc;
	s->dma_desc_phys = dma_desc_phys;
	for (i = 0; i < nb_desc - 1; i++)
		dma_desc[i].ddadr = dma_desc_phys + (i + 1) * DMA_DESC_SIZE;
	dma_desc[i].ddadr = dma_desc_phys;

	/* Our actual DMA buffers */
	for (frag = 0; frag < s->nbfrags; frag++) {
		audio_buf_t *b = &s->buffers[frag];

		/*
		 * Let's allocate non-cached memory for DMA buffers.
		 * We try to allocate all memory at once.
		 * If this fails (a common reason is memory fragmentation),
		 * then we'll try allocating smaller buffers.
		 */
		if (!buf_size) {
			buf_size = (s->nbfrags - frag) * s->fragsize;
			do {
				dma_buf = consistent_alloc(GFP_KERNEL,
							   buf_size, 
							   &dma_buf_phys);
				if (!dma_buf)
					buf_size -= s->fragsize;
			} while (!dma_buf && buf_size);
			if (!dma_buf)
				goto err;
			b->master = buf_size;
			memzero(dma_buf, buf_size);
		}

		/* 
		 * Set up our checkpoint descriptor.  Since the count 
		 * is always zero, we'll abuse the dsadr and dtadr fields
		 * just in case this one is picked up by the hardware
		 * while processing SOUND_DSP_GETPTR.
		 */
		dma_desc->dsadr = dma_buf_phys;
		dma_desc->dtadr = dma_buf_phys;
		dma_desc->dcmd = DCMD_ENDIRQEN;
		if (s->output && !s->mapped)
			dma_desc->ddadr |= DDADR_STOP;
		b->dma_desc = dma_desc++;

		/* set up the actual data descriptors */
		for (i = 0; (i * MAX_DMA_SIZE) < s->fragsize; i++) {
			dma_desc[i].dsadr = (s->output) ?
				(dma_buf_phys + i*MAX_DMA_SIZE) : s->dev_addr;
			dma_desc[i].dtadr = (s->output) ?
				s->dev_addr : (dma_buf_phys + i*MAX_DMA_SIZE);
			dma_desc[i].dcmd = s->dcmd |
				((s->fragsize < MAX_DMA_SIZE) ?
					s->fragsize : MAX_DMA_SIZE);
		}
		dma_desc += i;

		/* handle buffer pointers */
		b->data = dma_buf;
		dma_buf += s->fragsize;
		dma_buf_phys += s->fragsize;
		buf_size -= s->fragsize;
	}

	s->usr_frag = s->dma_frag = 0;
	s->bytecount = 0;
	s->fragcount = 0;
	sema_init(&s->sem, (s->output) ? s->nbfrags : 0);
	return 0;

err:
	printk("pxa-audio: unable to allocate audio memory\n ");
	audio_clear_buf(s);
	return -ENOMEM;
}
static int __init
probe(int index)
{
	u32 *phys_reg_addr;
	struct xilinxfb_info *i;
	struct page *page, *end_page;

	switch (index) {
#if defined(CONFIG_XILINX_TFT_CNTLR_REF_0_INSTANCE)
	case 0:
		phys_reg_addr = 
			(u32 *) CONFIG_XILINX_TFT_CNTLR_REF_0_DCR_BASEADDR;
		break;
#if defined(CONFIG_XILINX_TFT_CNTRLR_REF_1_INSTANCE)
	case 1:
		phys_reg_addr = 
			(u32 *) CONFIG_XILINX_TFT_CNTLR_REF_1_DCR_BASEADDR;
		break;
#if defined(CONFIG_XILINX_TFT_CNTRLR_REF_2_INSTANCE)
	case 2:
		phys_reg_addr = 
			(u32 *) CONFIG_XILINX_TFT_CNTLR_REF_2_DCR_BASEADDR;
		break;
#if defined(CONFIG_XILINX_TFT_CNTLR_REF_3_INSTANCE)
#error Edit this file to add more devices.
#endif				/* 3 */
#endif				/* 2 */
#endif				/* 1 */
#endif				/* 0 */
	default:
		return -ENODEV;
	}

	/* Convert DCR register address to OPB address */
	phys_reg_addr = (unsigned *)(((unsigned)phys_reg_addr*4)+0xd0000000);

	/* Allocate the info and zero it out. */
	i = (struct xilinxfb_info *) kmalloc(sizeof (struct xilinxfb_info),
					     GFP_KERNEL);
	if (!i) {
		printk(KERN_ERR "Could not allocate Xilinx "
		       "frame buffer #%d information.\n", index);
		return -ENOMEM;
	}
	memset(i, 0, sizeof (struct xilinxfb_info));

	/* Make it the head of info_list. */
	spin_lock(&info_lock);
	i->next = info_list;
	info_list = i;
	spin_unlock(&info_lock);

	/*
	 * At this point, things are ok for us to call remove_head_info() to
	 * clean up if we run into any problems; i is on info_list and
	 * all the pointers are zeroed because of the memset above.
	 */

	i->fb_virt_start = (unsigned long) consistent_alloc(GFP_KERNEL|GFP_DMA,
							    FB_SIZE,
							    &i->fb_phys);
	if (!i->fb_virt_start) {
		printk(KERN_ERR "Could not allocate frame buffer memory "
		       "for Xilinx device #%d.\n", index);
		remove_head_info();
		return -ENOMEM;
	}

	/*
	 * The 2.4 PPC version of consistent_alloc does not set the
	 * pages reserved.  The pages need to be reserved so that mmap
	 * will work.  This means that we need the following code.  When
	 * consistent_alloc gets fixed, this will no longer be needed.
	 * Note that in 2.4, consistent_alloc doesn't free up the extra
	 * pages either.  This is already fixed in 2.5.
	 */
	page = virt_to_page(__va(i->fb_phys));
	end_page = page + ((FB_SIZE+PAGE_SIZE-1)/PAGE_SIZE);
	while (page < end_page)
		mem_map_reserve(page++);

	/* Clear the frame buffer. */
	memset((void *) i->fb_virt_start, 0, FB_SIZE);

	/* Map the control registers in. */
	i->regs = (u32 *) ioremap((unsigned long) phys_reg_addr, NUM_REGS);

	/* Tell the hardware where the frame buffer is. */
	out_be32(i->regs + REG_FB_ADDR, i->fb_phys);

	/* Turn on the display. */
	out_be32(i->regs + REG_CTRL, REG_CTRL_DEFAULT);

	current_par.var.xres = XRES;
	current_par.var.xres_virtual = XRES_VIRTUAL;
	current_par.var.yres = YRES;
	current_par.var.yres_virtual = YRES_VIRTUAL;
	current_par.var.bits_per_pixel = BITS_PER_PIXEL;

	i->gen.parsize = sizeof (struct xilinxfb_par);
	i->gen.fbhw = &xilinx_switch;

	strcpy(i->gen.info.modename, "Xilinx LCD");
	i->gen.info.changevar = NULL;
	i->gen.info.node = -1;

	i->gen.info.fbops = &xilinxfb_ops;
	i->gen.info.disp = &i->disp;
	i->gen.info.switch_con = &fbgen_switch;
	i->gen.info.updatevar = &fbgen_update_var;
	i->gen.info.blank = &fbgen_blank;
	i->gen.info.flags = FBINFO_FLAG_DEFAULT;

	/* This should give a reasonable default video mode */
	fbgen_get_var(&i->disp.var, -1, &i->gen.info);
	fbgen_do_set_var(&i->disp.var, 1, &i->gen);
	fbgen_set_disp(-1, &i->gen);
	fbgen_install_cmap(0, &i->gen);
	if (register_framebuffer(&i->gen.info) < 0) {
		printk(KERN_ERR "Could not register frame buffer "
		       "for Xilinx device #%d.\n", index);
		remove_head_info();
		return -EINVAL;
	}
	printk(KERN_INFO "fb%d: %s frame buffer at 0x%08X mapped to 0x%08lX\n",
	       GET_FB_IDX(i->gen.info.node), i->gen.info.modename,
	       i->fb_phys, i->fb_virt_start);

	return 0;
}