コード例 #1
0
int
enable_ch_irq (_dma_channel_info * pCh)
{
	int chan_no = (int) (pCh - dma_chan);
	int flag;
	local_irq_save (flag);
	*DANUBE_DMA_CS = chan_no;
	*DANUBE_DMA_CIE = 0x4a;
	*DANUBE_DMA_IRNEN |= 1 << chan_no;
	local_irq_restore (flag);
	enable_danube_irq (pCh->irq);
	return IFX_SUCCESS;
}
コード例 #2
0
void
rx_chan_intr_handler (int chan_no)
{
	_dma_device_info *pDev =
		(_dma_device_info *) dma_chan[chan_no].dma_dev;
	_dma_channel_info *pCh = &dma_chan[chan_no];
	struct rx_desc *rx_desc_p;
	int tmp;
	int flag;

	/*handle command complete interrupt */
	rx_desc_p = (struct rx_desc *) pCh->desc_base + pCh->curr_desc;
#if !defined(ENABLE_DANUBE_ETHERNET_D2) || !ENABLE_DANUBE_ETHERNET_D2
	if (rx_desc_p->status.field.OWN == CPU_OWN
	    && rx_desc_p->status.field.C
	    && rx_desc_p->status.field.data_length < 1536)
#else
	if (rx_desc_p->status.field.OWN == CPU_OWN
	    && rx_desc_p->status.field.C)
#endif
	{
		/*Every thing is correct, then we inform the upper layer */
		pDev->current_rx_chan = pCh->rel_chan_no;
		if (pDev->intr_handler)
			pDev->intr_handler (pDev, RCV_INT);
		pCh->weight--;
	}
	else {
		local_irq_save (flag);
		tmp = *DANUBE_DMA_CS;
		*DANUBE_DMA_CS = chan_no;
		*DANUBE_DMA_CIS |= 0x7e;
		*DANUBE_DMA_CS = tmp;
		enable_danube_irq (dma_chan[chan_no].irq);
		if ( rx_desc_p->status.field.OWN != CPU_OWN )
		    g_danube_dma_int_status &= ~(1 << chan_no);
		local_irq_restore (flag);
//		enable_danube_irq (dma_chan[chan_no].irq);
	}
}
コード例 #3
0
int
dma_device_register (_dma_device_info * dev)
{
	int result = IFX_SUCCESS;
	int i, j;
	int chan_no = 0;
	u8 *buffer;
	int byte_offset;
	int flag;
	_dma_device_info *pDev;
	_dma_channel_info *pCh;
	struct rx_desc *rx_desc_p;
	struct tx_desc *tx_desc_p;
#if 0
	if (strcmp (dev->device_name, "MCTRL0") == 0 || strcmp (dev->device_name, "MCTRL1") == 0) {	/*select the port */
		*DANUBE_DMA_PS = 4;
		/*set port parameters */
		*DANUBE_DMA_PCTRL |= 1 << 16;	/*flush memcopy */
	}
#endif
	for (i = 0; i < dev->max_tx_chan_num; i++) {
		pCh = dev->tx_chan[i];
		if (pCh->control == DANUBE_DMA_CH_ON) {
			chan_no = (int) (pCh - dma_chan);
			for (j = 0; j < pCh->desc_len; j++) {
				tx_desc_p =
					(struct tx_desc *) pCh->desc_base + j;
				memset (tx_desc_p, 0,
					sizeof (struct tx_desc));
			}
			local_irq_save (flag);
			*DANUBE_DMA_CS = chan_no;
#if defined(ENABLE_DANUBE_ETHERNET_D2) && ENABLE_DANUBE_ETHERNET_D2
			/*check if the descriptor base is changed */
			if (*DANUBE_DMA_CDBA !=
			    (u32) CPHYSADDR (pCh->desc_base))
				*DANUBE_DMA_CDBA =
					(u32) CPHYSADDR (pCh->desc_base);
#endif
			/*check if the descriptor length is changed */
			if (*DANUBE_DMA_CDLEN != pCh->desc_len)
				*DANUBE_DMA_CDLEN = pCh->desc_len;

			*DANUBE_DMA_CCTRL &= ~1;
			*DANUBE_DMA_CCTRL |= 2;
			while (*DANUBE_DMA_CCTRL & 2) {
			};
			//disable_danube_irq(pCh->irq);
			//*DANUBE_DMA_CIE=0x0a;
			*DANUBE_DMA_IRNEN |= 1 << chan_no;
			*DANUBE_DMA_CCTRL = 0x30100;	/*reset and enable channel,enable channel later */
			local_irq_restore (flag);
		}
	}

	for (i = 0; i < dev->max_rx_chan_num; i++) {
		pCh = dev->rx_chan[i];
		if (pCh->control == DANUBE_DMA_CH_ON) {
			chan_no = (int) (pCh - dma_chan);

			for (j = 0; j < pCh->desc_len; j++) {
				rx_desc_p =
					(struct rx_desc *) pCh->desc_base + j;
				pDev = (_dma_device_info *) (pCh->dma_dev);
				buffer = pDev->buffer_alloc (pCh->packet_size,
							     &byte_offset,
							     (void *) &(pCh->
									opt
									[j]));
				if (!buffer)
					break;
#ifndef CONFIG_MIPS_UNCACHED
    				/* tc.chen: invalidate cache    */
    				dma_cache_inv ((unsigned long) buffer,
                  			pCh->packet_size);
#endif

				rx_desc_p->Data_Pointer =
					(u32) CPHYSADDR ((u32) buffer);
				rx_desc_p->status.word = 0;
				rx_desc_p->status.field.byte_offset =
					byte_offset;
				rx_desc_p->status.field.OWN = DMA_OWN;
				rx_desc_p->status.field.data_length =
					pCh->packet_size;
			}

			local_irq_save (flag);
			*DANUBE_DMA_CS = chan_no;
#if defined(ENABLE_DANUBE_ETHERNET_D2) && ENABLE_DANUBE_ETHERNET_D2
			/*check if the descriptor base is changed */
			if (*DANUBE_DMA_CDBA !=
			    (u32) CPHYSADDR (pCh->desc_base))
				*DANUBE_DMA_CDBA =
					(u32) CPHYSADDR (pCh->desc_base);
#endif
			/*check if the descriptor length is changed */
			if (*DANUBE_DMA_CDLEN != pCh->desc_len)
				*DANUBE_DMA_CDLEN = pCh->desc_len;
			*DANUBE_DMA_CCTRL &= ~1;
			*DANUBE_DMA_CCTRL |= 2;
			while (*DANUBE_DMA_CCTRL & 2) {
			};
			*DANUBE_DMA_CIE = 0x0A;	/*fix me, should enable all the interrupts here? */
			*DANUBE_DMA_IRNEN |= 1 << chan_no;
			*DANUBE_DMA_CCTRL = 0x30000;
			local_irq_restore (flag);
			enable_danube_irq (dma_chan[chan_no].irq);
		}
	}
	return result;
}
コード例 #4
0
static void end_danube_irq(unsigned int irq)
{
  if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))){
    enable_danube_irq(irq);
  }
}
コード例 #5
0
static unsigned int startup_danube_irq(unsigned int irq)
{
	enable_danube_irq(irq);
	return 0; /* never anything pending */
}