static void MACNET_rx_free
   (
         /* [IN] the PCB to enqueue */
      PCB_PTR  pcb_ptr
   )
{ 
   ENET_CONTEXT_STRUCT_PTR     enet_ptr = (ENET_CONTEXT_STRUCT_PTR)pcb_ptr->PRIVATE;
   MACNET_CONTEXT_STRUCT_PTR   macnet_context_ptr = (MACNET_CONTEXT_STRUCT_PTR) enet_ptr->MAC_CONTEXT_PTR;
   #if ENETCFG_SUPPORT_FRAGMENTED_PCBS
   uint_32                     i;
   #endif

   /*
   ** This function can be called from any context, and it needs mutual
   ** exclusion with itself.
   */
   MACNET_int_disable();

   #if ENETCFG_SUPPORT_FRAGMENTED_PCBS
      for (i=0;pcb_ptr->FRAG[i].FRAGMENT;i++) {
         ENET_Enqueue_Buffer((pointer*)&macnet_context_ptr->RX_BUFFERS, pcb_ptr->FRAG[i].FRAGMENT );
         pcb_ptr->FRAG[i].FRAGMENT = NULL;
      }
   #else   
      ENET_Enqueue_Buffer((pointer*)&macnet_context_ptr->RX_BUFFERS, pcb_ptr->FRAG[0].FRAGMENT );
      pcb_ptr->FRAG[0].FRAGMENT = NULL;
   #endif
   
   QADD(macnet_context_ptr->RxPCBHead, macnet_context_ptr->RxPCBTail, pcb_ptr);

   MACNET_add_buffers_to_rx_ring(macnet_context_ptr);
   MACNET_int_enable();
}
void MACNET_add_buffers_to_rx_ring(MACNET_CONTEXT_STRUCT_PTR   macnet_context_ptr)
{ 
   ENET_MemMapPtr            macnet_ptr = macnet_context_ptr->MACNET_ADDRESS;
   VENET_BD_STRUCT_PTR       bd_ptr;
   uchar_ptr buf;

   if (macnet_ptr == NULL) return;

   /*
   ** This function can be called from any context, and it needs mutual
   ** exclusion with itself.
   */
   MACNET_int_disable();
   
   while (macnet_context_ptr->ActiveRxBDs < macnet_context_ptr->NumRxBDs) {

      bd_ptr = &macnet_context_ptr->MACNET_RX_RING_PTR[macnet_context_ptr->LastRxBD];

      _DCACHE_INVALIDATE_MBYTES((pointer)bd_ptr, sizeof(ENET_BD_STRUCT));
      
      /* Initialize the descriptor and give it to the MACNET */
      buf = (uchar_ptr)HOST_TO_BE_LONG((uint_32)ENET_Dequeue_Buffer((pointer *) &macnet_context_ptr->RX_BUFFERS));
      if (buf == NULL) {
         MACNET_int_enable();
         return;
      }
      
      bd_ptr->BUFFER = buf;
      bd_ptr->LENGTH = 0;

      /*
      ** The processor doesn't clear control bits when errors don't occur;
      ** it just sets bits when errors do occur.  So we clear all the
      ** control bits before enqueueing the descriptor.
      */

      bd_ptr->CONTROL &= HOST_TO_BE_SHORT_CONST(ENET_BD_ETHER_RX_WRAP);

      /*
      ** Add the PCB to the receive PCB queue (linked via PRIVATE) and
      ** increment the tail to the next descriptor
      */
      BD_INC(macnet_context_ptr->LastRxBD, macnet_context_ptr->NumRxBDs);
      macnet_context_ptr->ActiveRxBDs++;

      bd_ptr->CONTROL |= HOST_TO_BE_SHORT_CONST(ENET_BD_ETHER_RX_EMPTY);

      /* In case Enhanced BD are used allow interrupt generation */
      bd_ptr->CONTROL_EXT0 |= HOST_TO_BE_SHORT_CONST(ENET_BD_EXT0_ETHER_RX_GENERATE_INTR);

      _DCACHE_FLUSH_MBYTES((pointer)bd_ptr, sizeof(ENET_BD_STRUCT));  

      /*
      ** Signals the MACNET that empty buffers are available.
      */
      macnet_ptr->RDAR = ENET_RDAR_RDAR_MASK;
   }
   MACNET_int_enable();
} 
static void MACNET_rx_free_specific
   (
      MACNET_CONTEXT_STRUCT_PTR   macnet_context_ptr,
         /* [IN] the PCB to enqueue */
      PCB_PTR     pcb_ptr,
      pointer *   list
   )
{ 
   /*
   ** This function can be called from any context, and it needs mutual
   ** exclusion with itself.
   */
   MACNET_int_disable();
   ENET_Enqueue_Buffer(list, pcb_ptr->FRAG[0].FRAGMENT );
   pcb_ptr->FRAG[0].FRAGMENT = NULL;
   QADD(macnet_context_ptr->RxPCBHead, macnet_context_ptr->RxPCBTail, pcb_ptr);
   MACNET_int_enable();
}
uint32_t MACNET_initialize
   (
      ENET_CONTEXT_STRUCT_PTR enet_ptr
   )
{ 
   MACNET_CONTEXT_STRUCT_PTR  macnet_context_ptr=NULL;
   ENET_MemMapPtr             macnet_ptr;
   VENET_BD_STRUCT_PTR        bd_ptr;
   unsigned char                  *buf_ptr;
   MACNET_RX_PCB_PTR          pcb_ptr;
   uint32_t                   i, rxsize, txsize, ssize, lsize, pcbsize,rx_pcb_size, large_packet_size, rcr;
   uint32_t                   timeout, error = ENET_OK;
   bool                    bOK;
#if ENETCFG_SUPPORT_PTP
   MACNET_PTP_PRIVATE_PTR     macnet_ptp_ptr = NULL;
#endif /* ENETCFG_SUPPORT_PTP */

   // Initialize the MACNET I/O Pins
   MACNET_io_init(enet_ptr->PARAM_PTR->ENET_IF->MAC_NUMBER);
   
   macnet_ptr = MACNET_get_base_address(enet_ptr->PARAM_PTR->ENET_IF->MAC_NUMBER);
   if (macnet_ptr == NULL) {
      return ENETERR_INVALID_DEVICE;
   }
   
   
   // currently limit number of TX BDs to 32, as a bitmask is used in the free function.
   if (enet_ptr->PARAM_PTR->NUM_TX_ENTRIES > 32) {
      return ENETERR_INVALID_INIT_PARAM;
   }
   
   /*
   ** This function can be called from any context, and it needs mutual
   ** exclusion with itself.
   */
   MACNET_int_disable();

   macnet_context_ptr = _mem_alloc_system_zero(sizeof(MACNET_CONTEXT_STRUCT));
   IF_ERROR_EXIT((NULL==macnet_context_ptr), ENETERR_ALLOC_MAC_CONTEXT);
   _mem_set_type((void *)macnet_context_ptr, MEM_TYPE_IO_ENET_MAC_CONTEXT_STRUCT);
   
   enet_ptr->MAC_CONTEXT_PTR = (void *) macnet_context_ptr;  

   macnet_context_ptr->MACNET_ADDRESS = macnet_ptr;
   macnet_context_ptr->PHY_PTR = MACNET_get_base_address(enet_ptr->PARAM_PTR->ENET_IF->PHY_NUMBER);

   /* Stop the chip */
   macnet_ptr->ECR = ENET_ECR_RESET_MASK;

   /* wait until the initialization cycle completes */
   timeout = 0;
   while ((macnet_ptr->ECR & ENET_ECR_RESET_MASK) && (timeout<MACNET_RESET_TIMEOUT)){
      _time_delay(1);
      timeout++;
   } 
   IF_ERROR_EXIT((macnet_ptr->ECR & ENET_ECR_RESET_MASK), ENETERR_INIT_FAILED);
   
   /* Disable all MACNET interrupts */
   macnet_ptr->EIMR = 0;

   /* clear any pending interrpts */
   macnet_ptr->EIR = ENET_EIR_ALL_PENDING;

   macnet_context_ptr->NumRxBDs = enet_ptr->PARAM_PTR->NUM_RX_ENTRIES;
   macnet_context_ptr->NumTxBDs = enet_ptr->PARAM_PTR->NUM_TX_ENTRIES;
     
   // Compute aligned buffer sizes 
   if (enet_ptr->PARAM_PTR->TX_BUFFER_SIZE) {
      macnet_context_ptr->AlignedTxBufferSize = MACNET_TX_ALIGN(enet_ptr->PARAM_PTR->TX_BUFFER_SIZE);
   } else {
      macnet_context_ptr->AlignedTxBufferSize = MACNET_TX_ALIGN(enet_ptr->MaxTxFrameSize);
   }

   if (enet_ptr->PARAM_PTR->RX_BUFFER_SIZE) {
      macnet_context_ptr->AlignedRxBufferSize = MACNET_RX_ALIGN(enet_ptr->PARAM_PTR->RX_BUFFER_SIZE);
   } else {
      macnet_context_ptr->AlignedRxBufferSize = MACNET_RX_ALIGN(enet_ptr->MaxRxFrameSize);
   }

   // Allocate the Transmit and Receive buffer descriptors
   // TODO remake to using alloc_align fn
#if BSPCFG_HAS_SRAM_POOL && BSPCFG_ENET_SRAM_BUF
   bd_ptr = (VENET_BD_STRUCT_PTR)_mem_alloc_system_zero_from(_BSP_sram_pool, (sizeof(ENET_BD_STRUCT)*(macnet_context_ptr->NumRxBDs+macnet_context_ptr->NumTxBDs))+MACNET_BD_ALIGNMENT);
#else
   bd_ptr = (VENET_BD_STRUCT_PTR)_mem_alloc_system_zero_uncached((sizeof(ENET_BD_STRUCT)*(macnet_context_ptr->NumRxBDs+macnet_context_ptr->NumTxBDs))+MACNET_BD_ALIGNMENT);
#endif

   IF_ERROR_EXIT((NULL == bd_ptr), ENETERR_ALLOC_BD);
   _mem_set_type((void *)bd_ptr, MEM_TYPE_IO_BD_STRUCT);

   macnet_context_ptr->UNALIGNED_RING_PTR = (void *) bd_ptr;
   macnet_context_ptr->MACNET_RX_RING_PTR = (VENET_BD_STRUCT_PTR) MACNET_BD_ALIGN((uint32_t)bd_ptr);
   macnet_context_ptr->MACNET_TX_RING_PTR = &macnet_context_ptr->MACNET_RX_RING_PTR[macnet_context_ptr->NumRxBDs];

   /* Set wrap bit in last BD */
   macnet_context_ptr->MACNET_RX_RING_PTR[macnet_context_ptr->NumRxBDs - 1].CONTROL = HOST_TO_BE_SHORT_CONST(ENET_BD_ETHER_RX_WRAP);
   macnet_context_ptr->MACNET_TX_RING_PTR[macnet_context_ptr->NumTxBDs - 1].CONTROL = HOST_TO_BE_SHORT_CONST(ENET_BD_ETHER_TX_WRAP);

   macnet_context_ptr->AvailableTxBDs = macnet_context_ptr->NumTxBDs;

   // Allocate array to hold Transmit PCB pointers while they are queued for transmission
   macnet_context_ptr->TxPCBS_PTR = (PCB_PTR *) _mem_alloc_system_zero(sizeof(PCB_PTR)*macnet_context_ptr->NumTxBDs);
   IF_ERROR_EXIT((NULL==macnet_context_ptr->TxPCBS_PTR), ENETERR_ALLOC_PCB);
   _mem_set_type((void *)macnet_context_ptr->TxPCBS_PTR, MEM_TYPE_IO_PCB_PTR);

   // Allocate the Receive PCBs
   rx_pcb_size = sizeof(MACNET_RX_PCB);
   pcbsize = enet_ptr->PARAM_PTR->NUM_RX_PCBS * rx_pcb_size;
   macnet_context_ptr->RX_PCB_BASE = (MACNET_RX_PCB_PTR) _mem_alloc_system_zero(pcbsize);
   IF_ERROR_EXIT((NULL==macnet_context_ptr->RX_PCB_BASE), ENETERR_ALLOC_PCB);
   _mem_set_type((void *)macnet_context_ptr->RX_PCB_BASE, MEM_TYPE_IO_PCB_STRUCT);
       
   // Allocate the Transmit and Receive buffers
   txsize = (enet_ptr->PARAM_PTR->NUM_TX_BUFFERS * macnet_context_ptr->AlignedTxBufferSize) + MACNET_TX_BUFFER_ALIGNMENT;
   rxsize = (enet_ptr->PARAM_PTR->NUM_RX_BUFFERS  * macnet_context_ptr->AlignedRxBufferSize) +MACNET_RX_BUFFER_ALIGNMENT;
   ssize  = (enet_ptr->PARAM_PTR->NUM_SMALL_BUFFERS * MACNET_SMALL_PACKET_SIZE);
   large_packet_size = enet_ptr->PARAM_PTR->OPTIONS&ENET_OPTION_VLAN ? ENET_FRAMESIZE_VLAN : ENET_FRAMESIZE;
   lsize  = (enet_ptr->PARAM_PTR->NUM_LARGE_BUFFERS * large_packet_size);

#if BSPCFG_HAS_SRAM_POOL && BSPCFG_ENET_SRAM_BUF
   buf_ptr = _mem_alloc_system_from(_BSP_sram_pool, rxsize + txsize + ssize + lsize);
#else
   buf_ptr = _mem_alloc_system_uncached(rxsize + txsize + ssize + lsize);  // temporary fix for cache problems... previously _mem_alloc_system
#endif
   
   IF_ERROR_EXIT((NULL==buf_ptr), ENETERR_ALLOC_BUFFERS);
   _mem_set_type(buf_ptr, MEM_TYPE_IO_ENET_BUFFERS);

   macnet_context_ptr->UNALIGNED_BUFFERS = buf_ptr;

   // Align to TX buffer boundary
   buf_ptr = (unsigned char *)MACNET_TX_ALIGN((uint32_t)buf_ptr);
   
   // Queue packets on TX Buffer Q.
   macnet_context_ptr->TX_BUFFERS = NULL;
   for (i=0;i<enet_ptr->PARAM_PTR->NUM_TX_BUFFERS;i++) {
      ENET_Enqueue_Buffer((void **)&macnet_context_ptr->TX_BUFFERS, buf_ptr);
      buf_ptr += macnet_context_ptr->AlignedTxBufferSize;
   }
   
   // Align to RX buffer boundary
   buf_ptr = (unsigned char *)MACNET_RX_ALIGN((uint32_t)buf_ptr);
  
   // Queue packets on RX Buffer Q.
   macnet_context_ptr->RX_BUFFERS = NULL;
   for (i=0;i<enet_ptr->PARAM_PTR->NUM_RX_BUFFERS;i++) {
      ENET_Enqueue_Buffer((void **)&macnet_context_ptr->RX_BUFFERS, buf_ptr);
      buf_ptr += macnet_context_ptr->AlignedRxBufferSize;
   }

   // Queue small packets on small buffer Q.
   for (i=0;i<enet_ptr->PARAM_PTR->NUM_SMALL_BUFFERS;i++) {
      ENET_Enqueue_Buffer((void **)&macnet_context_ptr->SMALL_BUFFERS, buf_ptr);
      buf_ptr += MACNET_SMALL_PACKET_SIZE;
   }

   // Queue large packets on large buffer Q.
   for (i=0;i<enet_ptr->PARAM_PTR->NUM_LARGE_BUFFERS;i++) {
      ENET_Enqueue_Buffer((void **)&macnet_context_ptr->LARGE_BUFFERS, buf_ptr);
      buf_ptr += large_packet_size;
   }

   // Enqueue the RX PCBs onto the receive PCB queue 
   pcb_ptr = macnet_context_ptr->RX_PCB_BASE;
   for (i = 0; i < enet_ptr->PARAM_PTR->NUM_RX_PCBS; i++) {
      QADD(macnet_context_ptr->RxPCBHead, macnet_context_ptr->RxPCBTail, (PCB_PTR) pcb_ptr);
      pcb_ptr++;
   } 

   // Fill up the receive ring 
   MACNET_add_buffers_to_rx_ring(macnet_context_ptr);

   /* Program this station's Ethernet physical address */
   macnet_ptr->PALR = (uint32_t)((enet_ptr->ADDRESS[0] << 24)|(enet_ptr->ADDRESS[1] << 16)|(enet_ptr->ADDRESS[2] << 8)|(enet_ptr->ADDRESS[3] << 0));
   macnet_ptr->PAUR = (uint32_t)((enet_ptr->ADDRESS[4] << 24)|(enet_ptr->ADDRESS[5] << 16));
   // Clear the individual hash table registers
   macnet_ptr->IAUR = 0;
   macnet_ptr->IALR = 0;

   // Clear the group hash table registers                                
   macnet_ptr->GAUR = 0;
   macnet_ptr->GALR = 0;  

   /* Program receive buffer size */
   macnet_ptr->MRBR = macnet_context_ptr->AlignedRxBufferSize;     

   // Configure start of Rx and Tx BD rings
   macnet_ptr->RDSR = (uint32_t)(macnet_context_ptr->MACNET_RX_RING_PTR);
   macnet_ptr->TDSR = (uint32_t)(macnet_context_ptr->MACNET_TX_RING_PTR);

   // Set Receive Frame size 
   // NOTE: Oddly, the Receive Control Register (RCR) afmacnetts the transmit side too.  The RCR is used to determine if the 
   //       transmitter is babbling, which means, if the RX buffer size < Tx Buffer size, we can get babling transmitter
   //       errors if we set RCR to the maximum Receive frame length.  We really have no choice but to set RCR to one
   //       of ENET_FRAMESIZE or ENET_FRAMESIZE_VLAN.
   //

   rcr = ENET_RCR_MII_MODE_MASK | (ENET_RCR_MAX_FL_MASK & (large_packet_size << ENET_RCR_MAX_FL_SHIFT));
   if (enet_ptr->PARAM_PTR->OPTIONS & ENET_OPTION_RMII) {
      rcr |= ENET_RCR_RMII_MODE_MASK;
   } else if (enet_ptr->PARAM_PTR->OPTIONS & ENET_OPTION_7WIRE) {
      rcr &= ~ENET_RCR_MII_MODE_MASK;
   }
   if (enet_ptr->PARAM_PTR->MODE & ENET_10M)
   {
        rcr |= ENET_RCR_RMII_10T_MASK;
   }
   macnet_ptr->RCR =  rcr;
   

   if (enet_ptr->PARAM_PTR->OPTIONS & ENET_OPTION_MAC_LOOPBACK) {
      macnet_ptr->RCR |= ENET_RCR_LOOP_MASK;
   }
   //  Set Full/Half duplex based on mode.
   if (enet_ptr->PARAM_PTR->MODE & ENET_HALF_DUPLEX) {
      macnet_ptr->TCR = 0; // half duplex
   } else {
      macnet_ptr->TCR = 4; // full duplex
   }
   
   // Enable MII_SPEED register 
   i = (MACNET_device[enet_ptr->PARAM_PTR->ENET_IF->MAC_NUMBER].BUS_CLOCK / enet_ptr->PARAM_PTR->ENET_IF->PHY_MII_SPEED + 1) & ~1;
   if (enet_ptr->PARAM_PTR->OPTIONS & ENET_OPTION_NO_PREAMBLE) {
      i |= ENET_MSCR_DIS_PRE_MASK;
   }
   macnet_context_ptr->PHY_PTR->MSCR = i;

   // Zero counters
   macnet_ptr->MIBC |= ENET_MIBC_MIB_CLEAR_MASK;

   // Install the ISRs
   bOK = MACNET_install_isrs( enet_ptr, &MACNET_device[enet_ptr->PARAM_PTR->ENET_IF->MAC_NUMBER] );
   IF_ERROR_EXIT(!bOK, ENETERR_INSTALL_ISR);
          
   // Unmask transmit/receive interrupts 
   // NOTE: need to enable both RXF and RXB, but only TXB, as RXB does not get generated with RXF, 
   //       but TXB does get generated with TXF
   // However, on 52259, enabling RXB is resulting in an HBERR interrupt. RXB is not required, so leave it disabled.
   macnet_ptr->EIMR = ENET_EIR_TXB_MASK | ENET_EIR_RXF_MASK; 

   // Enable MACNET 
   macnet_ptr->ECR = (ENET_ECR_ETHEREN_MASK | ENET_ECR_EN1588_MASK);// | 0x100;

   // Discover PHY address if PHY_DISCOVER option is set
   if (enet_ptr->PARAM_PTR->OPTIONS & ENET_OPTION_PHY_DISCOVER) {
      bOK = (*enet_ptr->PARAM_PTR->ENET_IF->PHY_IF->DISCOVER)(enet_ptr);
      IF_ERROR_EXIT(!bOK, ENETERR_INIT_FAILED);
   } else {
      // Set Phy address from initialization parameter
      enet_ptr->PHY_ADDRESS = enet_ptr->PARAM_PTR->ENET_IF->PHY_ADDRESS;
   }

    /******* Support of TCP/IP offload engine.*******/
    /* Transmit Accelerator Function Configuration. */
    macnet_ptr->TACC = 0;
#if BSPCFG_ENET_HW_TX_IP_CHECKSUM
    if (enet_ptr->PARAM_PTR->OPTIONS & ENET_OPTION_HW_TX_IP_CHECKSUM)
    {
        macnet_ptr->TACC |= ENET_TACC_IPCHK_MASK;   /* Insert IP header checksum.*/
    }
#endif
#if BSPCFG_ENET_HW_TX_PROTOCOL_CHECKSUM
    if (enet_ptr->PARAM_PTR->OPTIONS & ENET_OPTION_HW_TX_PROTOCOL_CHECKSUM)
    {
        macnet_ptr->TACC |= ENET_TACC_PROCHK_MASK;  /* Insert Protocol checksum.*/
    }
#endif
    /* Receive Accelerator Function Configuration. */
    macnet_ptr->RACC = 0;
#if BSPCFG_ENET_HW_RX_IP_CHECKSUM
    if (enet_ptr->PARAM_PTR->OPTIONS & ENET_OPTION_HW_RX_IP_CHECKSUM)
    {
        macnet_ptr->RACC |= ENET_RACC_IPDIS_MASK;   /* Discard of frames with wrong IPv4 header checksum.*/
    }
#endif
#if BSPCFG_ENET_HW_RX_PROTOCOL_CHECKSUM
    if (enet_ptr->PARAM_PTR->OPTIONS & ENET_OPTION_HW_RX_PROTOCOL_CHECKSUM)
    {
        macnet_ptr->RACC |= ENET_RACC_PRODIS_MASK;  /* Discard of frames with wrong protocol checksum.*/
    }  
#endif 
#if BSPCFG_ENET_HW_RX_MAC_ERR
    if (enet_ptr->PARAM_PTR->OPTIONS & ENET_OPTION_HW_RX_MAC_ERR)
    {
        macnet_ptr->RACC |= ENET_RACC_LINEDIS_MASK; /* Discard of frames with MAC layer errors.*/
    } 
#endif                
    if (
#if BSPCFG_ENET_HW_TX_IP_CHECKSUM || BSPCFG_ENET_HW_TX_PROTOCOL_CHECKSUM
        (enet_ptr->PARAM_PTR->OPTIONS & (ENET_OPTION_HW_TX_IP_CHECKSUM|ENET_OPTION_HW_TX_PROTOCOL_CHECKSUM))||
#endif
        (enet_ptr->PARAM_PTR->OPTIONS & ENET_OPTION_STORE_AND_FORW) )
    {
        /* Transmit FIFO Watermark Register.
         * The TFWR[STRFWD] bit must be set to use the checksum feature. 
         * In this case, the MAC starts to transmit data only when a complete 
         * frame is stored in the transmit FIFO.*/
        macnet_ptr->TFWR = ENET_TFWR_STRFWD_MASK;
    }
    if (
#if BSPCFG_ENET_HW_RX_IP_CHECKSUM || BSPCFG_ENET_HW_RX_PROTOCOL_CHECKSUM
        (enet_ptr->PARAM_PTR->OPTIONS & (ENET_OPTION_HW_RX_IP_CHECKSUM|ENET_OPTION_HW_RX_PROTOCOL_CHECKSUM))||
#endif
        (enet_ptr->PARAM_PTR->OPTIONS & ENET_OPTION_STORE_AND_FORW) )
    {
        /* Discarding is only available when the RX FIFO operates in store and forward
         * mode (RSFL cleared).*/
        macnet_ptr->RSFL = 0;
    }

    // Perform Phy initialization
    bOK = (*enet_ptr->PARAM_PTR->ENET_IF->PHY_IF->INIT)(enet_ptr);
    IF_ERROR_EXIT(!bOK, ENETERR_INIT_FAILED);

   // Signals the MACNET that empty buffers are available.
   // It is NECESSARY to do this AFTER enabling the MACNET.
   macnet_ptr->RDAR = ENET_RDAR_RDAR_MASK;
   
   //jm _mem_copy((void*)0x400d0000, (void*)0x3f400000, 0x400);


#if ENETCFG_SUPPORT_PTP
   macnet_ptp_ptr = _mem_alloc_system_zero(sizeof(MACNET_PTP_PRIVATE));
   IF_ERROR_EXIT((NULL==macnet_context_ptr), ENETERR_ALLOC_MAC_CONTEXT);


   macnet_ptp_ptr->MACNET_PTR = macnet_context_ptr->MACNET_ADDRESS;
   macnet_ptp_ptr->PTIMER_PRESENT = 1;

   bOK = _lwevent_create(&(macnet_ptp_ptr->LWEVENT_PTP), 0);
   IF_ERROR_EXIT(bOK, ENETERR_1588_LWEVENT);
   
   macnet_ptp_ptr->TXSTAMP = (MACNET_PTP_TIME){0,0};
   macnet_ptp_ptr->L2PCKS_PTR = NULL;
   
   macnet_context_ptr->PTP_PRIV = macnet_ptp_ptr;
   MACNET_ptp_init(enet_ptr);
   
   if (macnet_ptp_ptr->PTIMER_PRESENT) {
      /* Set Timer count */
      MACNET_ptp_start(macnet_ptp_ptr, (enet_ptr->PARAM_PTR->OPTIONS & ENET_OPTION_PTP_MASTER_CLK));
      if(enet_ptr->PARAM_PTR->OPTIONS & ENET_OPTION_PTP_MASTER_CLK)
          MACNET_ptp_set_master_base_address(macnet_ptr);
      macnet_ptp_ptr->MACNET_PTR->ECR |= ENET_ECR_EN1588_MASK;

   }
   /* Enable timer-relative interrupts */
   macnet_ptp_ptr->MACNET_PTR->EIMR |= (ENET_EIR_TS_TIMER_MASK | ENET_EIR_TS_AVAIL_MASK);
#endif /* ENETCFG_SUPPORT_PTP */


   // control transfers to this point on any error, with error set to error code.
EXIT:
   if (ENET_OK!=error) {
      #if BSPCFG_ENET_RESTORE
         MACNET_uninstall_all_isrs(enet_ptr);
      #endif
      MACNET_free_context(macnet_context_ptr);
   }
   MACNET_int_enable();
   return error;
} 
Exemple #5
0
uint_32 MACNET_send
   (
      ENET_CONTEXT_STRUCT_PTR  enet_ptr,
         /* [IN] the Ethernet state structure */
      PCB_PTR              packet,
         /* [IN] the packet to send */
      uint_32              size,
         /* [IN] total size of the packet */
      uint_32              frags,
         /* [IN] total fragments in the packet */
      uint_32              flags
         /* [IN] optional flags, zero = default */
   )
{ 
   MACNET_CONTEXT_STRUCT_PTR   macnet_context_ptr = (MACNET_CONTEXT_STRUCT_PTR) enet_ptr->MAC_CONTEXT_PTR;
   ENET_MemMapPtr              macnet_ptr= macnet_context_ptr->MACNET_ADDRESS;
   PCB_FRAGMENT_PTR            frag_ptr;
   VENET_BD_STRUCT_PTR         tx_bd_ptr;
   uint_32                     len,totlen,frag;
   uchar_ptr                   txmem;
   boolean                     aligned;
   uint_32                     err = ENET_OK;
#if ENETCFG_SUPPORT_PTP
   MACNET_PTP_DATA             tmp_tx_time;
   boolean                     wait_for_ts=FALSE;
#endif

   if (macnet_ptr == NULL) 
      return ENETERR_INVALID_DEVICE;
   
   MACNET_int_disable();
   
   /*
   ** Make sure there aren't too many fragments.  (We really should check
   ** this every time through the previous loop, but it is extremely
   ** unlikely that the fragment counter overflowed -- there would have
   ** to be over 2^32 fragments.)
   */
   if (macnet_context_ptr->AvailableTxBDs < 1) {
      ENET_INC_STATS(COMMON.ST_TX_MISSED);
   
      err = ENETERR_SEND_FULL;      
      goto END;
   } 

   #if BSPCFG_ENABLE_ENET_HISTOGRAM
   {
      uint_32 index = size>> ENET_HISTOGRAM_SHIFT;
      
      if (index < ENET_HISTOGRAM_ENTRIES) {
         ENET_INC_STATS(TX_HISTOGRAM[index]);
      }
   }
   #endif
   
   aligned = TRUE;
   for (frag_ptr = packet->FRAG; frag_ptr->LENGTH; frag_ptr++) {
      if (MACNET_TX_ALIGN((uint_32)frag_ptr->FRAGMENT)!= (uint_32)frag_ptr->FRAGMENT)
         aligned = FALSE;
   } 
   if (aligned) {
      ENET_INC_STATS(TX_ALL_ALIGNED);
   }
   
   /*
   ** Enqueue the packet on the transmit ring.  Don't set the ready
   ** bit in the first descriptor until all descriptors are enqueued.
   */
   tx_bd_ptr = &macnet_context_ptr->MACNET_TX_RING_PTR[macnet_context_ptr->NextTxBD];

   frag_ptr = packet->FRAG;
   frag = (uint_32) frag_ptr->FRAGMENT;
   if (frags > 1 || (MACNET_TX_ALIGN(frag)!= frag)) {
      // Packet is fragmented and/or it is misaligned, needs to be copied
      txmem = NULL;
      // See if it fits in a small buffer
      if (size <= MACNET_SMALL_PACKET_SIZE) {
         // it does
         txmem = ENET_Dequeue_Buffer((pointer *) &macnet_context_ptr->SMALL_BUFFERS);
      }
      // If it didn't fit, or the small alloc failed, try for a large buffer
      if (txmem) {
         // signal buffer is to be deallocated.
         macnet_context_ptr->FREE_TX_SMALL |= (1<<macnet_context_ptr->NextTxBD);
         ENET_INC_STATS(ST_TX_COPY_SMALL);

      } else { 
         if (size <=  macnet_context_ptr->AlignedTxBufferSize) {

            txmem = ENET_Dequeue_Buffer((pointer *) &macnet_context_ptr->TX_BUFFERS);
         }
         if (txmem) {
            // signal buffer is to be deallocated.
            macnet_context_ptr->FREE_TX |= (1<<macnet_context_ptr->NextTxBD);
         } else {
            ENET_INC_STATS(COMMON.ST_TX_MISSED);
            
            err = ENETERR_NO_TX_BUFFER;      
            goto END;
         }
         
      }
      totlen = 0;
      
      for (len = frag_ptr->LENGTH; len != 0; len = frag_ptr->LENGTH) {
         _mem_copy(frag_ptr->FRAGMENT, txmem + totlen, len);
         totlen += len;
         frag_ptr++;
      } 

   } else {
      // Packet is not fragmented and it is not misaligned
      totlen = frag_ptr->LENGTH;
      txmem  = frag_ptr->FRAGMENT;
      ENET_INC_STATS(TX_ALIGNED);
   } 

   // Flush the buffer from cache
   _DCACHE_FLUSH_MBYTES(txmem, totlen);  

   // Invalidate the bd from cache
   _DCACHE_INVALIDATE_MBYTES((pointer)tx_bd_ptr, sizeof(ENET_BD_STRUCT));  

   // set up the tx bd
   tx_bd_ptr->CONTROL &= HOST_TO_BE_SHORT_CONST(ENET_BD_ETHER_TX_WRAP);
   tx_bd_ptr->BUFFER = (uchar_ptr)HOST_TO_BE_LONG((uint_32)txmem);
   tx_bd_ptr->LENGTH = HOST_TO_BE_SHORT(totlen);
   tx_bd_ptr->CONTROL |= HOST_TO_BE_SHORT_CONST(ENET_BD_ETHER_TX_LAST | ENET_BD_ETHER_TX_SEND_CRC | ENET_BD_ETHER_TX_READY);
   
   tx_bd_ptr->CONTROL_EXT0 |= HOST_TO_BE_SHORT_CONST(ENET_BD_EXT0_ETHER_TX_GENERATE_INTR);
     
   
#if ENETCFG_SUPPORT_PTP
   if (macnet_context_ptr->PTP_PRIV->PTIMER_PRESENT) 
   {
       /* PTP over Ethernet frames: Check the PTPv2 over Ethernet type (identifier) */
       if((totlen > 44) && 
          (*(uint_16 *)(txmem + MACNET_PTP_ETHER_PKT_TYPE_OFFS) == HOST_TO_BE_SHORT_CONST(MACNET_PACKET_TYPE_IEEE_802_3)))
       {
           /* Allow interrupt and timestamp generation */
           tx_bd_ptr->CONTROL_EXT0 |= HOST_TO_BE_SHORT_CONST(ENET_BD_EXT0_ETHER_TX_TIMESTAMP);
           tmp_tx_time.KEY = HOST_TO_BE_SHORT(*((uint_16 *)(txmem + MACNET_PTP_ETHER_SEQ_ID_OFFS)));
           for(len=0;len<MACNET_PTP_CLOCKID_SIZE;len++)
               tmp_tx_time.CLOCKID[len] = *((uint_8 *)(txmem + MACNET_PTP_ETHER_CLOCKID + len));
           wait_for_ts = TRUE;
       }
       /* PTP over UDP: Check if port is 319 for PTP Event, and check for UDP */
       else if((totlen > 44) && 
          (*(uchar_ptr)(txmem + MACNET_PTP_UDP_PKT_TYPE_OFFS) == MACNET_PACKET_TYPE_UDP) && 
          (*(uint_16 *)(txmem + MACNET_PTP_UDP_PORT_OFFS) == HOST_TO_BE_SHORT_CONST(MACNET_PTP_EVNT_PORT)))
       {
           tx_bd_ptr->CONTROL_EXT0 |= HOST_TO_BE_SHORT_CONST(ENET_BD_EXT0_ETHER_TX_TIMESTAMP);
           tmp_tx_time.KEY = HOST_TO_BE_SHORT(*((uint_16 *)(txmem + MACNET_PTP_UDP_SEQ_ID_OFFS)));
           for(len=0;len<MACNET_PTP_CLOCKID_SIZE;len++)
               tmp_tx_time.CLOCKID[len] = *((uint_8 *)(txmem + MACNET_PTP_UDP_CLOCKID + len));
           wait_for_ts = TRUE;
       }

   }
#endif /* ENETCFG_SUPPORT_PTP */

   // Flush the tx bd from cache
   _DCACHE_FLUSH_MBYTES((pointer)tx_bd_ptr, sizeof(ENET_BD_STRUCT));  

   macnet_context_ptr->TxPCBS_PTR[macnet_context_ptr->NextTxBD] = packet;
   
   macnet_context_ptr->AvailableTxBDs--;
   BD_INC(macnet_context_ptr->NextTxBD,macnet_context_ptr->NumTxBDs);

   macnet_ptr->TDAR = ENET_TDAR_TDAR_MASK;
   
END:
   MACNET_int_enable();
      
#if ENETCFG_SUPPORT_PTP
   if(wait_for_ts)
   {
      /* Wait for the interrupt */
      _lwevent_wait_ticks(&macnet_context_ptr->PTP_PRIV->LWEVENT_PTP, MACNET_PTP_LWEVENT_TX_TS_INTR, FALSE, (uint_32)NULL);

      /* Clear the event */
      _lwevent_clear(&macnet_context_ptr->PTP_PRIV->LWEVENT_PTP, MACNET_PTP_LWEVENT_TX_TS_INTR);

      /* Clear the TS flag from the BD */
      tx_bd_ptr->CONTROL_EXT0 &= HOST_TO_BE_SHORT_CONST(~ENET_BD_EXT0_ETHER_TX_TIMESTAMP);
      
      tmp_tx_time.TS_TIME.NSEC = macnet_context_ptr->PTP_PRIV->TXSTAMP.NSEC;
      tmp_tx_time.TS_TIME.SEC  = macnet_context_ptr->PTP_PRIV->TXSTAMP.SEC;
      MACNET_ptp_insert(&(macnet_context_ptr->PTP_PRIV->TX_TIME), &tmp_tx_time);
      wait_for_ts = FALSE;
   }
#endif /* ENETCFG_SUPPORT_PTP */
   return err;
}