void ether00_mem_update(void* dev_id) { struct net_device* dev=dev_id; struct net_priv* priv=dev->priv; struct sk_buff* skb; struct tx_fda_ent *fda_ptr=priv->tx_fdalist_vp; struct rx_blist_ent* blist_ent_ptr; unsigned long flags; priv->tq_memupdate.sync=0; //priv->tq_memupdate.list= priv->memupdate_scheduled=0; /* Transmit interrupt */ while(fda_ptr<(priv->tx_fdalist_vp+TX_NUM_FDESC)){ if(!(FDCTL_COWNSFD_MSK&fda_ptr->fd.FDCtl) && (ETHER_TX_STAT_COMP_MSK&fda_ptr->fd.FDStat)){ priv->stats.tx_packets++; priv->stats.tx_bytes+=fda_ptr->bd.BuffLength; skb=(struct sk_buff*)fda_ptr->fd.FDSystem; //printk("%d:txcln:fda=%#x skb=%#x\n",jiffies,fda_ptr,skb); dev_kfree_skb(skb); fda_ptr->fd.FDSystem=0; fda_ptr->fd.FDStat=0; fda_ptr->fd.FDCtl=0; } fda_ptr++; } /* Fill in any missing buffers from the received queue */ spin_lock_irqsave(&priv->rx_lock,flags); blist_ent_ptr=priv->rx_blist_vp; while(blist_ent_ptr<(priv->rx_blist_vp+RX_NUM_BUFF)){ /* fd.FDSystem of 0 indicates we failed to allocate the buffer in the ISR */ if(!blist_ent_ptr->fd.FDSystem){ struct sk_buff *skb; skb=dev_alloc_skb(PKT_BUF_SZ); blist_ent_ptr->fd.FDSystem=(unsigned int)skb; if(skb){ setup_blist_entry(skb,blist_ent_ptr); } else { break; } } blist_ent_ptr++; } spin_unlock_irqrestore(&priv->rx_lock,flags); if(priv->queue_stopped){ //printk("%d:cln:start q\n",jiffies); netif_start_queue(dev); } if(priv->rx_disabled){ //printk("%d:enable_irq\n",jiffies); priv->rx_disabled=0; writel(ETHER_RX_CTL_RXEN_MSK,ETHER_RX_CTL(dev->base_addr)); } }
void ether00_mem_update(void* dev_id) { struct net_device* dev=dev_id; struct net_priv* priv=dev->priv; struct rx_blist_ent* blist_ent_ptr; unsigned long flags; int enable_rx = 0; priv->tq_memupdate.sync=0; priv->memupdate_scheduled=0; /* Fill in any missing buffers from the received queue */ blist_ent_ptr=priv->rx_blist_vp; while(blist_ent_ptr<(priv->rx_blist_vp+RX_NUM_BUFF)){ spin_lock_irqsave(&priv->dma_lock,flags); /* fd.FDSystem of 0 indicates we failed to allocate the buffer in the ISR */ if(!blist_ent_ptr->fd.FDSystem){ struct sk_buff *skb; skb=dev_alloc_skb(PKT_BUF_SZ); blist_ent_ptr->fd.FDSystem=(unsigned int)skb; if(skb){ setup_blist_entry(skb,blist_ent_ptr); enable_rx = 1; } else { /* * reschedule the clean up, since we * didn't patch up all the buffers */ if(!priv->memupdate_scheduled){ schedule_task(&priv->tq_memupdate); priv->memupdate_scheduled=1; } spin_unlock_irqrestore(&priv->dma_lock,flags); break; } } spin_unlock_irqrestore(&priv->dma_lock,flags); blist_ent_ptr++; } if(enable_rx){ if (!priv->rx_disabled){ priv->rx_disabled = 0; writel(ETHER_RX_CTL_RXEN_MSK,ETHER_RX_CTL(dev->base_addr)); } } }
static void ether00_int( int irq_num, void* dev_id, struct pt_regs* regs) { struct net_device* dev=dev_id; struct net_priv* priv=dev->priv; unsigned int interruptValue; interruptValue=readl(ETHER_INT_SRC(dev->base_addr)); //printk("INT_SRC=%x\n",interruptValue); if(!(readl(ETHER_INT_SRC(dev->base_addr)) & ETHER_INT_SRC_IRQ_MSK)) { return; /* Interrupt wasn't caused by us!! */ } if(readl(ETHER_INT_SRC(dev->base_addr))& (ETHER_INT_SRC_INTMACRX_MSK | ETHER_INT_SRC_FDAEX_MSK | ETHER_INT_SRC_BLEX_MSK)) { struct rx_blist_ent* blist_ent_ptr; struct rx_fda_ent* fda_ent_ptr; struct sk_buff* skb; fda_ent_ptr=priv->rx_fda_ptr; spin_lock(&priv->rx_lock); while(fda_ent_ptr<(priv->rx_fda_ptr+RX_NUM_FDESC)){ int result; if(!(fda_ent_ptr->fd.FDCtl&FDCTL_COWNSFD_MSK)) { /* This frame is ready for processing */ /*find the corresponding buffer in the bufferlist */ blist_ent_ptr=priv->rx_blist_vp+fda_ent_ptr->bd.BDStat; skb=(struct sk_buff*)blist_ent_ptr->fd.FDSystem; /* Pass this skb up the stack */ skb->dev=dev; skb_put(skb,fda_ent_ptr->fd.FDLength); skb->protocol=eth_type_trans(skb,dev); skb->ip_summed=CHECKSUM_UNNECESSARY; result=netif_rx(skb); /* Update statistics */ priv->stats.rx_packets++; priv->stats.rx_bytes+=fda_ent_ptr->fd.FDLength; /* Free the FDA entry */ fda_ent_ptr->bd.BDStat=0xff; fda_ent_ptr->fd.FDCtl=FDCTL_COWNSFD_MSK; /* Allocate a new skb and point the bd entry to it */ blist_ent_ptr->fd.FDSystem=0; skb=dev_alloc_skb(PKT_BUF_SZ); //printk("allocskb=%#x\n",skb); if(skb){ setup_blist_entry(skb,blist_ent_ptr); } else if(!priv->memupdate_scheduled){ int tmp; /* There are no buffers at the moment, so schedule */ /* the background task to sort this out */ schedule_task(&priv->tq_memupdate); priv->memupdate_scheduled=1; printk(KERN_DEBUG "%s:No buffers",dev->name); /* If this interrupt was due to a lack of buffers then * we'd better stop the receiver too */ if(interruptValueÐER_INT_SRC_BLEX_MSK){ priv->rx_disabled=1; tmp=readl(ETHER_INT_SRC(dev->base_addr)); writel(tmp&~ETHER_RX_CTL_RXEN_MSK,ETHER_RX_CTL(dev->base_addr)); printk(KERN_DEBUG "%s:Halting rx",dev->name); } } } fda_ent_ptr++; } spin_unlock(&priv->rx_lock); /* Clear the interrupts */ writel(ETHER_INT_SRC_INTMACRX_MSK | ETHER_INT_SRC_FDAEX_MSK | ETHER_INT_SRC_BLEX_MSK,ETHER_INT_SRC(dev->base_addr)); } if(readl(ETHER_INT_SRC(dev->base_addr))ÐER_INT_SRC_INTMACTX_MSK){ if(!priv->memupdate_scheduled){ schedule_task(&priv->tq_memupdate); priv->memupdate_scheduled=1; } /* Clear the interrupt */ writel(ETHER_INT_SRC_INTMACTX_MSK,ETHER_INT_SRC(dev->base_addr)); } if (readl(ETHER_INT_SRC(dev->base_addr)) & (ETHER_INT_SRC_SWINT_MSK| ETHER_INT_SRC_INTEARNOT_MSK| ETHER_INT_SRC_INTLINK_MSK| ETHER_INT_SRC_INTEXBD_MSK| ETHER_INT_SRC_INTTXCTLCMP_MSK)) { /* * Not using any of these so they shouldn't happen * * In the cased of INTEXBD - if you allocate more * than 28 decsriptors you may need to think about this */ printk("Not using this interrupt\n"); } if (readl(ETHER_INT_SRC(dev->base_addr)) & (ETHER_INT_SRC_INTSBUS_MSK | ETHER_INT_SRC_INTNRABT_MSK |ETHER_INT_SRC_DMPARERR_MSK)) { /* * Hardware errors, we can either ignore them and hope they go away *or reset the device, I'll try the first for now to see if they happen */ printk("Hardware error\n"); } }
static int ether00_mem_init(struct net_device* dev) { struct net_priv* priv=dev->priv; struct tx_fda_ent *tx_fd_ptr,*tx_end_ptr; struct rx_blist_ent* blist_ent_ptr; int i; /* * Grab a block of on chip SRAM to contain the control stuctures for * the ethernet MAC. This uncached becuase it needs to be accesses by both * bus masters (cpu + mac). However, it shouldn't matter too much in terms * of speed as its on chip memory */ priv->dma_data=ioremap_nocache(EXC_SPSRAM_BLOCK0_BASE,EXC_SPSRAM_BLOCK0_SIZE ); if (!priv->dma_data) return -ENOMEM; priv->rx_fda_ptr=(struct rx_fda_ent*)priv->dma_data; /* * Now share it out amongst the Frame descriptors and the buffer list */ priv->rx_blist_vp=(struct rx_blist_ent*)((unsigned int)priv->dma_data+RX_NUM_FDESC*sizeof(struct rx_fda_ent)); /* *Initalise the FDA list */ /* set ownership to the controller */ memset(priv->rx_fda_ptr,0x80,RX_NUM_FDESC*sizeof(struct rx_fda_ent)); /* *Initialise the buffer list */ blist_ent_ptr=priv->rx_blist_vp; i=0; while(blist_ent_ptr<(priv->rx_blist_vp+RX_NUM_BUFF)){ struct sk_buff *skb; blist_ent_ptr->fd.FDLength=1; skb=dev_alloc_skb(PKT_BUF_SZ); if(skb){ setup_blist_entry(skb,blist_ent_ptr); blist_ent_ptr->fd.FDNext=(FDA_DESC*)__dma_pa(blist_ent_ptr+1); blist_ent_ptr->bd.BDStat=i++; blist_ent_ptr++; } else { printk("Failed to initalise buffer list\n"); } } blist_ent_ptr--; blist_ent_ptr->fd.FDNext=(FDA_DESC*)__dma_pa(priv->rx_blist_vp); priv->tx_fdalist_vp=(struct tx_fda_ent*)(priv->rx_blist_vp+RX_NUM_BUFF); /* Initialise the buffers to be a circular list. The mac will then go poll * the list until it finds a frame ready to transmit */ tx_end_ptr=priv->tx_fdalist_vp+TX_NUM_FDESC; for(tx_fd_ptr=priv->tx_fdalist_vp;tx_fd_ptr<tx_end_ptr;tx_fd_ptr++){ tx_fd_ptr->fd.FDNext=(FDA_DESC*)__dma_pa((tx_fd_ptr+1)); tx_fd_ptr->fd.FDCtl=1; tx_fd_ptr->fd.FDStat=0; tx_fd_ptr->fd.FDLength=1; } /* Change the last FDNext pointer to make a circular list */ tx_fd_ptr--; tx_fd_ptr->fd.FDNext=(FDA_DESC*)__dma_pa(priv->tx_fdalist_vp); /* Point the device at the chain of Rx and Tx Buffers */ writel((unsigned int)__dma_pa(priv->rx_fda_ptr),ETHER_FDA_BAS(dev->base_addr)); writel((RX_NUM_FDESC-1)*sizeof(struct rx_fda_ent),ETHER_FDA_LIM(dev->base_addr)); writel((unsigned int)__dma_pa(priv->rx_blist_vp),ETHER_BLFRMPTR(dev->base_addr)); writel((unsigned int)__dma_pa(priv->tx_fdalist_vp),ETHER_TXFRMPTR(dev->base_addr)); return 0; }