int main()
{


	XScuTimer_Config *TMRConfigPtr;     //timer config

    //Disable cache on OCM
	Xil_SetTlbAttributes(0xFFFF0000,0x14de2);           // S=b1 TEX=b100 AP=b11, Domain=b1111, C=b0, B=b0
	print("CPU1: starting\n\r");

	//GPIO Initilization
	XGpio_Initialize(&Gpio, GPIO_DEVICE_ID);
	XGpio_SetDataDirection(&Gpio, CHANNEL, 0x00);
	XGpio_DiscreteWrite(&Gpio,CHANNEL, 0xff);

    //timer initialisation
    TMRConfigPtr = XScuTimer_LookupConfig(TIMER_DEVICE_ID);
    XScuTimer_CfgInitialize(&Timer, TMRConfigPtr,TMRConfigPtr->BaseAddr);
    XScuTimer_SelfTest(&Timer);

	//load the timer
    XScuTimer_LoadTimer(&Timer, TIMER_LOAD_VALUE);

    SetupInterruptSystem(&Intc, &Timer,TIMER_IRPT_INTR);

    XScuTimer_Start(&Timer);
    print("CPU1: configured\n\r");
    while(1){


    }

    return 0;
}
Пример #2
0
int main( void )
{
	Xil_SetTlbAttributes(0xFFFF0000,0x14de2);           // S=b1 TEX=b100 AP=b11, Domain=b1111, C=b0, B=b0


    print("CPU1: init_platform\n\r");

	prvInitializeExceptions();
	init_axi_gpio();
	init_fifo_queues();

	startProductionControl();

	startAdversaryTask();

	// Start the tasks and timer running.
	vTaskStartScheduler();

	// If all is well, the scheduler will now be running
//	for( ;; );
	 while(1){
	     	while(COMM_VAL == 0){};
	  //   	if(irq_count > 0) {
	  //       	print("CPU1: Hello World With Interrupt CPU 1\n\r");
	  //       	irq_count = 0;
	  //       	sleep(2);		//Delay so output can be seen
	  //   	} else {
	     	print("CPU1: Hello World CPU 1\n\r");
	 //    	}
	     	COMM_VAL = 0;

	     }
	 return 0;
}
int main()
{
	int delay;
	unsigned int inchar;
	unsigned int newlr;

    //Disable cache on OCM
    Xil_SetTlbAttributes(0xFFFF0000,0x14de2);           // S=b1 TEX=b100 AP=b11, Domain=b1111, C=b0, B=b0
    SetupIntrSystem(&IntcInstancePtr);
    print("CPU0: writing startaddress for cpu1\n\r");
    Xil_Out32(CPU1STARTADR, 0x00200000);
    dmb(); //waits until write has finished

    print("CPU0: sending the SEV to wake up CPU1\n\r");
    sev();

    while(1){

    	inchar = getchar();
    	newlr = getchar();
    	//for( delay = 0; delay < OP_DELAY; delay++)//wait
    	//{}
    	xil_printf("value %d",inchar);
    	Xil_Out8(LED_PAT,inchar);

    }

    return 0;
}
Пример #4
0
/* \brief LED and AUDIO initialization
 *
 */
void system_init()
{
	Xil_Out32(OLED_BASE_ADDR,0xff);
	OLED_Init();							   //OLED initialization
	IicConfig(XPAR_XIICPS_0_DEVICE_ID);
	AudioPllConfig(); 						   //enable core clock for ADAU1761
	AudioConfigure();
	DMA_Congfig(DMA_DEV_ID);
	//Disable cache on OCM
	Xil_SetTlbAttributes(0x00000000,0x14de2);

}
Пример #5
0
void *metal_machine_io_mem_map(void *va, metal_phys_addr_t pa,
			       size_t size, unsigned int flags)
{
	unsigned long section_offset;
	unsigned long ttb_addr;
#if defined (__aarch64__)
	unsigned long ttb_size = (pa < 4*GB) ? 2*MB : 1*GB;
#else
	unsigned long ttb_size = 1*MB;
#endif

	if (!flags)
		return va;

	/* Ensure alignement on a section boundary */
	pa &= ~(ttb_size-1UL);

	/* Loop through entire region of memory (one MMU section at a time).
	   Each section requires a TTB entry. */
	for (section_offset = 0; section_offset < size; ) {
		/* Calculate translation table entry for this memory section */
		ttb_addr = (pa + section_offset);

		/* Write translation table entry value to entry address */
		Xil_SetTlbAttributes(ttb_addr, flags);

#if defined (__aarch64__)
		/* recalculate if we started below 4GB and going above in 64bit mode */ 
		if ( ttb_addr >= 4*GB ) {
			ttb_size = 1*GB;
		}
#endif
		section_offset += ttb_size;
	}

	return va;
}
Пример #6
0
XStatus init_dma(struct xemac_s *xemac)
{
	XEmacPs_Bd bdtemplate;
	XEmacPs_BdRing *rxringptr, *txringptr;
	XEmacPs_Bd *rxbd;
	struct pbuf *p;
	XStatus status;
	s32_t i;
	u32_t bdindex;
	volatile u32_t tempaddress;

	xemacpsif_s *xemacpsif = (xemacpsif_s *)(xemac->state);
	struct xtopology_t *xtopologyp = &xtopology[xemac->topology_index];

	/*
	 * The BDs need to be allocated in uncached memory. Hence the 1 MB
	 * address range allocated for Bd_Space is made uncached
	 * by setting appropriate attributes in the translation table.
	 * The Bd_Space is aligned to 1MB and has a size of 1 MB. This ensures
	 * a reserved uncached area used only for BDs.
	 */
	if (bd_space_attr_set == 0) {
		Xil_SetTlbAttributes((s32_t)bd_space, 0xc02); // addr, attr
		bd_space_attr_set = 1;
	}

	rxringptr = &XEmacPs_GetRxRing(&xemacpsif->emacps);
	txringptr = &XEmacPs_GetTxRing(&xemacpsif->emacps);
	LWIP_DEBUGF(NETIF_DEBUG, ("rxringptr: 0x%08x\r\n", rxringptr));
	LWIP_DEBUGF(NETIF_DEBUG, ("txringptr: 0x%08x\r\n", txringptr));

	/* Allocate 64k for Rx and Tx bds each to take care of extreme cases */
	tempaddress = (u32_t)&(bd_space[bd_space_index]);
	xemacpsif->rx_bdspace = (void *)tempaddress;
	bd_space_index += 0x10000;
	tempaddress = (u32_t)&(bd_space[bd_space_index]);
	xemacpsif->tx_bdspace = (void *)tempaddress;
	bd_space_index += 0x10000;

	LWIP_DEBUGF(NETIF_DEBUG, ("rx_bdspace: 0x%08x\r\n", xemacpsif->rx_bdspace));
	LWIP_DEBUGF(NETIF_DEBUG, ("tx_bdspace: 0x%08x\r\n", xemacpsif->tx_bdspace));

	if (!xemacpsif->rx_bdspace || !xemacpsif->tx_bdspace) {
		xil_printf("%s@%d: Error: Unable to allocate memory for TX/RX buffer descriptors",
				__FILE__, __LINE__);
		return ERR_IF;
	}

	/*
	 * Setup RxBD space.
	 *
	 * Setup a BD template for the Rx channel. This template will be copied to
	 * every RxBD. We will not have to explicitly set these again.
	 */
	XEmacPs_BdClear(&bdtemplate);

	/*
	 * Create the RxBD ring
	 */

	status = XEmacPs_BdRingCreate(rxringptr, (u32) xemacpsif->rx_bdspace,
				(u32) xemacpsif->rx_bdspace, BD_ALIGNMENT,
				     XLWIP_CONFIG_N_RX_DESC);

	if (status != XST_SUCCESS) {
		LWIP_DEBUGF(NETIF_DEBUG, ("Error setting up RxBD space\r\n"));
		return ERR_IF;
	}

	status = XEmacPs_BdRingClone(rxringptr, &bdtemplate, XEMACPS_RECV);
	if (status != XST_SUCCESS) {
		LWIP_DEBUGF(NETIF_DEBUG, ("Error initializing RxBD space\r\n"));
		return ERR_IF;
	}

	XEmacPs_BdClear(&bdtemplate);
	XEmacPs_BdSetStatus(&bdtemplate, XEMACPS_TXBUF_USED_MASK);
	/*
	 * Create the TxBD ring
	 */
	status = XEmacPs_BdRingCreate(txringptr, (u32) xemacpsif->tx_bdspace,
				(u32) xemacpsif->tx_bdspace, BD_ALIGNMENT,
				     XLWIP_CONFIG_N_TX_DESC);

	if (status != XST_SUCCESS) {
		return ERR_IF;
	}

	/* We reuse the bd template, as the same one will work for both rx and tx. */
	status = XEmacPs_BdRingClone(txringptr, &bdtemplate, XEMACPS_SEND);
	if (status != XST_SUCCESS) {
		return ERR_IF;
	}

	/*
	 * Allocate RX descriptors, 1 RxBD at a time.
	 */
	for (i = 0; i < XLWIP_CONFIG_N_RX_DESC; i++) {
		p = pbuf_alloc(PBUF_RAW, XEMACPS_MAX_FRAME_SIZE, PBUF_POOL);
		if (!p) {
#if LINK_STATS
			lwip_stats.link.memerr++;
			lwip_stats.link.drop++;
#endif
			printf("unable to alloc pbuf in init_dma\r\n");
			return ERR_IF;
		}
		status = XEmacPs_BdRingAlloc(rxringptr, 1, &rxbd);
		if (status != XST_SUCCESS) {
			LWIP_DEBUGF(NETIF_DEBUG, ("init_dma: Error allocating RxBD\r\n"));
			pbuf_free(p);
			return ERR_IF;
		}
		/* Enqueue to HW */
		status = XEmacPs_BdRingToHw(rxringptr, 1, rxbd);
		if (status != XST_SUCCESS) {
			LWIP_DEBUGF(NETIF_DEBUG, ("Error: committing RxBD to HW\r\n"));
			pbuf_free(p);
			XEmacPs_BdRingUnAlloc(rxringptr, 1, rxbd);
			return ERR_IF;
		}

		Xil_DCacheInvalidateRange((u32_t)p->payload, (u32_t)XEMACPS_MAX_FRAME_SIZE);
		XEmacPs_BdSetAddressRx(rxbd, (u32_t)p->payload);

		bdindex = XEMACPS_BD_TO_INDEX(rxringptr, rxbd);
		rx_pbufs_storage[bdindex] = (s32_t)p;
	}

	/*
	 * Connect the device driver handler that will be called when an
	 * interrupt for the device occurs, the handler defined above performs
	 * the specific interrupt processing for the device.
	 */
	XScuGic_RegisterHandler(INTC_BASE_ADDR, xtopologyp->scugic_emac_intr,
				(Xil_ExceptionHandler)XEmacPs_IntrHandler,
						(void *)&xemacpsif->emacps);
	/*
	 * Enable the interrupt for emacps.
	 */
	XScuGic_EnableIntr(INTC_DIST_BASE_ADDR, (u32) xtopologyp->scugic_emac_intr);
	emac_intr_num = (u32) xtopologyp->scugic_emac_intr;
	return 0;
}
/**
* The example to do the scatter gather transfer through polling.
*
* @param	DeviceId is the Device Id of the XAxiCdma instance
*
* @return
* 		- XST_SUCCESS if example finishes successfully
* 		- XST_FAILURE if error occurs
*
* @note		None
*
******************************************************************************/
int XAxiCdma_SgPollExample(u16 DeviceId)
{
	XAxiCdma_Config *CfgPtr;
	int Status;
	u8 *SrcPtr;
	u8 *DstPtr;

	SrcPtr = (u8 *)TransmitBufferPtr;
	DstPtr = (u8 *)ReceiveBufferPtr;

#ifdef __aarch64__
	Xil_SetTlbAttributes(BD_SPACE_BASE, MARK_UNCACHEABLE);
#endif

	/* Initialize the XAxiCdma device.
	 */
	CfgPtr = XAxiCdma_LookupConfig(DeviceId);
	if (!CfgPtr) {
		xdbg_printf(XDBG_DEBUG_ERROR,
		    "Cannot find config structure for device %d\r\n",
			XPAR_AXICDMA_0_DEVICE_ID);

		return XST_FAILURE;
	}

	Status = XAxiCdma_CfgInitialize(&AxiCdmaInstance, CfgPtr,
		CfgPtr->BaseAddress);
	if (Status != XST_SUCCESS) {
		xdbg_printf(XDBG_DEBUG_ERROR,
		    "Initialization failed with %d\r\n", Status);

		return XST_FAILURE;
	}

	/* Setup the BD ring
	 */
	Status = SetupTransfer(&AxiCdmaInstance);
	if (Status != XST_SUCCESS) {
		xdbg_printf(XDBG_DEBUG_ERROR,
		    "Setup BD ring failed with %d\r\n", Status);

		return XST_FAILURE;
	}

	Done = 0;
	Error = 0;

	/* Start the DMA transfer
	 */
	Status = DoTransfer(&AxiCdmaInstance);
	if (Status != XST_SUCCESS) {
		xdbg_printf(XDBG_DEBUG_ERROR,
		    "Do transfer failed with %d\r\n", Status);

		return XST_FAILURE;
	}

	/* Wait until the DMA transfer is done or error occurs
	 */
	while ((CheckCompletion(&AxiCdmaInstance) < NUMBER_OF_BDS_TO_TRANSFER)
		&& !Error) {
		/* Wait */
	}

	if(Error) {
		int TimeOut = RESET_LOOP_COUNT;

		xdbg_printf(XDBG_DEBUG_ERROR, "Transfer has error %x\r\n",
				Error);

		/* Need to reset the hardware to restore to the correct state
		 */
		XAxiCdma_Reset(&AxiCdmaInstance);

		while (TimeOut) {
			if (XAxiCdma_ResetIsDone(&AxiCdmaInstance)) {
				break;
			}
			TimeOut -= 1;
		}

		/* Reset has failed, print a message to notify the user
		 */
		if (!TimeOut) {
			xdbg_printf(XDBG_DEBUG_ERROR,
			    "Reset hardware failed with %d\r\n", Status);

		}

		return XST_FAILURE;
	}

	/* Transfer completed successfully, check data
	 */
	Status = CheckData(SrcPtr, DstPtr,
		MAX_PKT_LEN * NUMBER_OF_BDS_TO_TRANSFER);
	if (Status != XST_SUCCESS) {
		xdbg_printf(XDBG_DEBUG_ERROR, "Check data failed for sg "
		    "transfer\r\n");
		return XST_FAILURE;
	}

	/* Test finishes successfully, return successfully
	 */
	return XST_SUCCESS;
}
int XAxiCdma_SgIntrExample(XScuGic *IntcInstancePtr, XAxiCdma *InstancePtr,
				u16 DeviceId,u32 IntrId)
#endif
{
	XAxiCdma_Config *CfgPtr;
	int Status;
	u8 *SrcPtr;
	u8 *DstPtr;

	SrcPtr = (u8 *)TransmitBufferPtr;
	DstPtr = (u8 *)ReceiveBufferPtr;

#ifdef __aarch64__
	Xil_SetTlbAttributes(BD_SPACE_BASE, MARK_UNCACHEABLE);
#endif

	/* Initialize the XAxiCdma device.
	 */
	CfgPtr = XAxiCdma_LookupConfig(DeviceId);
	if (!CfgPtr) {
		xdbg_printf(XDBG_DEBUG_ERROR,
		    "Cannot find config structure for device %d\r\n",
			XPAR_AXICDMA_0_DEVICE_ID);

		return XST_FAILURE;
	}

	Status = XAxiCdma_CfgInitialize(InstancePtr, CfgPtr,
						CfgPtr->BaseAddress);
	if (Status != XST_SUCCESS) {
		xdbg_printf(XDBG_DEBUG_ERROR,
		    "Initialization failed with %d\r\n", Status);

		return XST_FAILURE;
	}

	/* Setup the BD ring
	 */
	Status = SetupTransfer(InstancePtr);
	if (Status != XST_SUCCESS) {
		xdbg_printf(XDBG_DEBUG_ERROR,
		    "Setup BD ring failed with %d\r\n", Status);

		return XST_FAILURE;
	}

	/* Setup the interrupt system
	 */
	Status = SetupIntrSystem(IntcInstancePtr, InstancePtr, IntrId);
	if (Status != XST_SUCCESS) {
		xdbg_printf(XDBG_DEBUG_ERROR,
		    "Setup Intr system failed with %d\r\n", Status);

		return XST_FAILURE;
	}

	/* Enable completion and error interrupts
	 */
	XAxiCdma_IntrEnable(InstancePtr, XAXICDMA_XR_IRQ_ALL_MASK);

	/* Start the DMA transfer
	 */
	Status = DoTransfer(InstancePtr);
	if (Status != XST_SUCCESS) {
		xdbg_printf(XDBG_DEBUG_ERROR,
		    "Do transfer failed with %d\r\n", Status);

		return XST_FAILURE;
	}

	/* Wait until the DMA transfer is done
	 */

	while ((Done < NUMBER_OF_BDS_TO_TRANSFER) && !Error) {
		/* Wait */
	}

	if(Error) {
		xdbg_printf(XDBG_DEBUG_ERROR, "Transfer has error %x\r\n",
		    (unsigned int)XAxiCdma_GetError(InstancePtr));

		DisableIntrSystem(IntcInstancePtr, IntrId);

		return XST_FAILURE;
	}

	/* Transfer completes successfully, check data
	 */
	Status = CheckData(SrcPtr, DstPtr,
		MAX_PKT_LEN * NUMBER_OF_BDS_TO_TRANSFER);
	if (Status != XST_SUCCESS) {
		xdbg_printf(XDBG_DEBUG_ERROR, "Check data failed for sg "
		    "transfer\r\n");

		DisableIntrSystem(IntcInstancePtr, IntrId);
		return XST_FAILURE;
	}

	/* Test finishes successfully, clean up and return
	 */
	DisableIntrSystem(IntcInstancePtr, IntrId);

	return XST_SUCCESS;
}
Пример #9
0
XStatus init_dma(struct xemac_s *xemac)
{
	XEmacPs_Bd BdTemplate;
	XEmacPs_BdRing *RxRingPtr, *TxRingPtr;
	XEmacPs_Bd *rxbd;
	struct pbuf *p;
	XStatus Status;
	int i;
	unsigned int BdIndex;
	char *endAdd = &_end;
	/*
	 * Align the BD starte address to 1 MB boundary.
	 */
	char *endAdd_aligned = (char *)(((int)endAdd + 0x100000) & (~0xFFFFF));
	xemacpsif_s *xemacpsif = (xemacpsif_s *)(xemac->state);
	struct xtopology_t *xtopologyp = &xtopology[xemac->topology_index];

	/*
	 * The BDs need to be allocated in uncached memory. Hence the 1 MB
	 * address range that starts at address 0xFF00000 is made uncached
	 * by setting appropriate attributes in the translation table.
	 */
	Xil_SetTlbAttributes((int)endAdd_aligned, 0xc02); // addr, attr

	RxRingPtr = &XEmacPs_GetRxRing(&xemacpsif->emacps);
	TxRingPtr = &XEmacPs_GetTxRing(&xemacpsif->emacps);
	LWIP_DEBUGF(NETIF_DEBUG, ("RxRingPtr: 0x%08x\r\n", RxRingPtr));
	LWIP_DEBUGF(NETIF_DEBUG, ("TxRingPtr: 0x%08x\r\n", TxRingPtr));

	xemacpsif->rx_bdspace = (void *)endAdd_aligned;
	/*
	 * We allocate 65536 bytes for Rx BDs which can accomodate a
	 * maximum of 8192 BDs which is much more than any application
	 * will ever need.
	 */
	xemacpsif->tx_bdspace = (void *)(endAdd_aligned + 0x10000);

	LWIP_DEBUGF(NETIF_DEBUG, ("rx_bdspace: 0x%08x\r\n",
												xemacpsif->rx_bdspace));
	LWIP_DEBUGF(NETIF_DEBUG, ("tx_bdspace: 0x%08x\r\n",
												xemacpsif->tx_bdspace));

	if (!xemacpsif->rx_bdspace || !xemacpsif->tx_bdspace) {
		xil_printf("%s@%d: Error: Unable to allocate memory for TX/RX buffer descriptors",
				__FILE__, __LINE__);
		return XST_FAILURE;
	}

	/*
	 * Setup RxBD space.
	 *
	 * Setup a BD template for the Rx channel. This template will be copied to
	 * every RxBD. We will not have to explicitly set these again.
	 */
	XEmacPs_BdClear(&BdTemplate);

	/*
	 * Create the RxBD ring
	 */

	Status = XEmacPs_BdRingCreate(RxRingPtr, (u32) xemacpsif->rx_bdspace,
				(u32) xemacpsif->rx_bdspace, BD_ALIGNMENT,
				     XLWIP_CONFIG_N_RX_DESC);

	if (Status != XST_SUCCESS) {
		LWIP_DEBUGF(NETIF_DEBUG, ("Error setting up RxBD space\r\n"));
		return XST_FAILURE;
	}

	Status = XEmacPs_BdRingClone(RxRingPtr, &BdTemplate, XEMACPS_RECV);
	if (Status != XST_SUCCESS) {
		LWIP_DEBUGF(NETIF_DEBUG, ("Error initializing RxBD space\r\n"));
		return XST_FAILURE;
	}

	XEmacPs_BdClear(&BdTemplate);
	XEmacPs_BdSetStatus(&BdTemplate, XEMACPS_TXBUF_USED_MASK);
	/*
	 * Create the TxBD ring
	 */
	Status = XEmacPs_BdRingCreate(TxRingPtr, (u32) xemacpsif->tx_bdspace,
				(u32) xemacpsif->tx_bdspace, BD_ALIGNMENT,
				     XLWIP_CONFIG_N_TX_DESC);

	if (Status != XST_SUCCESS) {
		return XST_FAILURE;
	}

	/* We reuse the bd template, as the same one will work for both rx and tx. */
	Status = XEmacPs_BdRingClone(TxRingPtr, &BdTemplate, XEMACPS_SEND);
	if (Status != XST_SUCCESS) {
		return ERR_IF;
	}

	/*
	 * Allocate RX descriptors, 1 RxBD at a time.
	 */
	for (i = 0; i < XLWIP_CONFIG_N_RX_DESC; i++) {
		Status = XEmacPs_BdRingAlloc(RxRingPtr, 1, &rxbd);
		if (Status != XST_SUCCESS) {
			LWIP_DEBUGF(NETIF_DEBUG, ("init_dma: Error allocating RxBD\r\n"));
			return ERR_IF;
		}

		p = pbuf_alloc(PBUF_RAW, XEMACPS_MAX_FRAME_SIZE, PBUF_POOL);
		if (!p) {
#if LINK_STATS
			lwip_stats.link.memerr++;
			lwip_stats.link.drop++;
#endif
			LWIP_DEBUGF(NETIF_DEBUG, ("unable to alloc pbuf in recv_handler\r\n"));
			return -1;
		}

		XEmacPs_BdSetAddressRx(rxbd, (u32)p->payload);

		BdIndex = XEMACPS_BD_TO_INDEX(RxRingPtr, rxbd);
		rx_pbufs_storage[BdIndex] = (int)p;

		/* Enqueue to HW */
		Status = XEmacPs_BdRingToHw(RxRingPtr, 1, rxbd);
		if (Status != XST_SUCCESS) {
			LWIP_DEBUGF(NETIF_DEBUG, ("Error: committing RxBD to HW\r\n"));
			return XST_FAILURE;
		}
	}

	/*
	 * Connect the device driver handler that will be called when an
	 * interrupt for the device occurs, the handler defined above performs
	 * the specific interrupt processing for the device.
	 */
	XScuGic_RegisterHandler(INTC_BASE_ADDR, xtopologyp->scugic_emac_intr,
				(Xil_ExceptionHandler)XEmacPs_IntrHandler,
						(void *)&xemacpsif->emacps);
	/*
	 * Enable the interrupt for emacps.
	 */
	XScuGic_EnableIntr(INTC_DIST_BASE_ADDR, (u32) xtopologyp->scugic_emac_intr);
	EmacIntrNum = (u32) xtopologyp->scugic_emac_intr;
	return 0;
}
Пример #10
0
/**
*
* This function demonstrates the usage of the EmacPs driver by sending by
* sending and receiving frames in interrupt driven DMA mode.
*
*
* @param	IntcInstancePtr is a pointer to the instance of the Intc driver.
* @param	EmacPsInstancePtr is a pointer to the instance of the EmacPs
*		driver.
* @param	EmacPsDeviceId is Device ID of the EmacPs Device , typically
*		XPAR_<EMACPS_instance>_DEVICE_ID value from xparameters.h.
* @param	EmacPsIntrId is the Interrupt ID and is typically
*		XPAR_<EMACPS_instance>_INTR value from xparameters.h.
*
* @return	XST_SUCCESS to indicate success, otherwise XST_FAILURE.
*
* @note		None.
*
*****************************************************************************/
int EmacPsDmaIntrExample(XScuGic * IntcInstancePtr,
			  XEmacPs * EmacPsInstancePtr,
			  u16 EmacPsDeviceId,
			  u16 EmacPsIntrId)
{
	int Status;
	XEmacPs_Config *Config;
	XEmacPs_Bd BdTemplate;
#ifndef PEEP
	u32 SlcrTxClkCntrl;
#endif

	/*************************************/
	/* Setup device for first-time usage */
	/*************************************/

	/* SLCR unlock */
	*(volatile unsigned int *)(SLCR_UNLOCK_ADDR) = SLCR_UNLOCK_KEY_VALUE;
#ifdef PEEP
	*(volatile unsigned int *)(SLCR_GEM0_CLK_CTRL_ADDR) =
						SLCR_GEM_1G_CLK_CTRL_VALUE;
	*(volatile unsigned int *)(SLCR_GEM1_CLK_CTRL_ADDR) =
						SLCR_GEM_1G_CLK_CTRL_VALUE;
#else
	if (EmacPsIntrId == XPS_GEM0_INT_ID) {
#ifdef XPAR_PS7_ETHERNET_0_ENET_SLCR_1000MBPS_DIV0
		/* GEM0 1G clock configuration*/
		SlcrTxClkCntrl =
		*(volatile unsigned int *)(SLCR_GEM0_CLK_CTRL_ADDR);
		SlcrTxClkCntrl &= EMACPS_SLCR_DIV_MASK;
		SlcrTxClkCntrl |= (XPAR_PS7_ETHERNET_0_ENET_SLCR_1000MBPS_DIV1 << 20);
		SlcrTxClkCntrl |= (XPAR_PS7_ETHERNET_0_ENET_SLCR_1000MBPS_DIV0 << 8);
		*(volatile unsigned int *)(SLCR_GEM0_CLK_CTRL_ADDR) =
								SlcrTxClkCntrl;
#endif
	} else if (EmacPsIntrId == XPS_GEM1_INT_ID) {
#ifdef XPAR_PS7_ETHERNET_1_ENET_SLCR_1000MBPS_DIV1
		/* GEM1 1G clock configuration*/
		SlcrTxClkCntrl =
		*(volatile unsigned int *)(SLCR_GEM1_CLK_CTRL_ADDR);
		SlcrTxClkCntrl &= EMACPS_SLCR_DIV_MASK;
		SlcrTxClkCntrl |= (XPAR_PS7_ETHERNET_1_ENET_SLCR_1000MBPS_DIV1 << 20);
		SlcrTxClkCntrl |= (XPAR_PS7_ETHERNET_1_ENET_SLCR_1000MBPS_DIV0 << 8);
		*(volatile unsigned int *)(SLCR_GEM1_CLK_CTRL_ADDR) =
								SlcrTxClkCntrl;
#endif
	}
#endif
	/* SLCR lock */
	*(unsigned int *)(SLCR_LOCK_ADDR) = SLCR_LOCK_KEY_VALUE;
	sleep(1);

	/*
	 *  Initialize instance. Should be configured for DMA
	 *  This example calls _CfgInitialize instead of _Initialize due to
	 *  retiring _Initialize. So in _CfgInitialize we use
	 *  XPAR_(IP)_BASEADDRESS to make sure it is not virtual address.
	 */
	Config = XEmacPs_LookupConfig(EmacPsDeviceId);

	Status = XEmacPs_CfgInitialize(EmacPsInstancePtr, Config,
					Config->BaseAddress);

	if (Status != XST_SUCCESS) {
		EmacPsUtilErrorTrap("Error in initialize");
		return XST_FAILURE;
	}

	/*
	 * Set the MAC address
	 */
	Status = XEmacPs_SetMacAddress(EmacPsInstancePtr, EmacPsMAC, 1);
	if (Status != XST_SUCCESS) {
		EmacPsUtilErrorTrap("Error setting MAC address");
		return XST_FAILURE;
	}

	/*
	 * Setup callbacks
	 */
	Status = XEmacPs_SetHandler(EmacPsInstancePtr,
				     XEMACPS_HANDLER_DMASEND,
				     (void *) XEmacPsSendHandler,
				     EmacPsInstancePtr);
	Status |=
		XEmacPs_SetHandler(EmacPsInstancePtr,
				    XEMACPS_HANDLER_DMARECV,
				    (void *) XEmacPsRecvHandler,
				    EmacPsInstancePtr);
	Status |=
		XEmacPs_SetHandler(EmacPsInstancePtr, XEMACPS_HANDLER_ERROR,
				    (void *) XEmacPsErrorHandler,
				    EmacPsInstancePtr);
	if (Status != XST_SUCCESS) {
		EmacPsUtilErrorTrap("Error assigning handlers");
		return XST_FAILURE;
	}

	/*
	 * The BDs need to be allocated in uncached memory. Hence the 1 MB
	 * address range that starts at address 0xFF00000 is made uncached.
	 */
	Xil_SetTlbAttributes(0x0FF00000, 0xc02);
	/*
	 * Setup RxBD space.
	 *
	 * We have already defined a properly aligned area of memory to store
	 * RxBDs at the beginning of this source code file so just pass its
	 * address into the function. No MMU is being used so the physical
	 * and virtual addresses are the same.
	 *
	 * Setup a BD template for the Rx channel. This template will be
	 * copied to every RxBD. We will not have to explicitly set these
	 * again.
	 */
	XEmacPs_BdClear(&BdTemplate);

	/*
	 * Create the RxBD ring
	 */
	Status = XEmacPs_BdRingCreate(&(XEmacPs_GetRxRing
				       (EmacPsInstancePtr)),
				       RX_BD_LIST_START_ADDRESS,
				       RX_BD_LIST_START_ADDRESS,
				       XEMACPS_BD_ALIGNMENT,
				       RXBD_CNT);
	if (Status != XST_SUCCESS) {
		EmacPsUtilErrorTrap
			("Error setting up RxBD space, BdRingCreate");
		return XST_FAILURE;
	}

	Status = XEmacPs_BdRingClone(&(XEmacPs_GetRxRing(EmacPsInstancePtr)),
				      &BdTemplate, XEMACPS_RECV);
	if (Status != XST_SUCCESS) {
		EmacPsUtilErrorTrap
			("Error setting up RxBD space, BdRingClone");
		return XST_FAILURE;
	}

	/*
	 * Setup TxBD space.
	 *
	 * Like RxBD space, we have already defined a properly aligned area
	 * of memory to use.
	 *
	 * Also like the RxBD space, we create a template. Notice we don't
	 * set the "last" attribute. The example will be overriding this
	 * attribute so it does no good to set it up here.
	 */
	XEmacPs_BdClear(&BdTemplate);
	XEmacPs_BdSetStatus(&BdTemplate, XEMACPS_TXBUF_USED_MASK);

	/*
	 * Create the TxBD ring
	 */
	Status = XEmacPs_BdRingCreate(&(XEmacPs_GetTxRing
				       (EmacPsInstancePtr)),
				       TX_BD_LIST_START_ADDRESS,
				       TX_BD_LIST_START_ADDRESS,
				       XEMACPS_BD_ALIGNMENT,
				       TXBD_CNT);
	if (Status != XST_SUCCESS) {
		EmacPsUtilErrorTrap
			("Error setting up TxBD space, BdRingCreate");
		return XST_FAILURE;
	}
	Status = XEmacPs_BdRingClone(&(XEmacPs_GetTxRing(EmacPsInstancePtr)),
				      &BdTemplate, XEMACPS_SEND);
	if (Status != XST_SUCCESS) {
		EmacPsUtilErrorTrap
			("Error setting up TxBD space, BdRingClone");
		return XST_FAILURE;
	}

	/*
	 * Set emacps to phy loopback
	 */
#ifndef PEEP /* For Zynq board */
	XEmacPs_SetMdioDivisor(EmacPsInstancePtr, MDC_DIV_224);
	sleep(1);
#endif
	EmacPsUtilEnterLoopback(EmacPsInstancePtr, EMACPS_LOOPBACK_SPEED_1G);
	XEmacPs_SetOperatingSpeed(EmacPsInstancePtr, EMACPS_LOOPBACK_SPEED_1G);

	/*
	 * Setup the interrupt controller and enable interrupts
	 */
	Status = EmacPsSetupIntrSystem(IntcInstancePtr,
					EmacPsInstancePtr, EmacPsIntrId);
	/*
	 * Run the EmacPs DMA Single Frame Interrupt example
	 */
	Status = EmacPsDmaSingleFrameIntrExample(EmacPsInstancePtr);
	if (Status != XST_SUCCESS) {
		return XST_FAILURE;
	}

	/*
	 * Disable the interrupts for the EmacPs device
	 */
	EmacPsDisableIntrSystem(IntcInstancePtr, EmacPsIntrId);

	/*
	 * Stop the device
	 */
	XEmacPs_Stop(EmacPsInstancePtr);

	return XST_SUCCESS;
}
Пример #11
0
void set_tlb_attr(u32 addr, u32 attr)
{
        Xil_SetTlbAttributes(addr, attr);
}