static void __exit scc_enet_cleanup(void) { struct rtnet_device *rtdev = rtdev_root; struct scc_enet_private *cep = (struct scc_enet_private *)rtdev->priv; volatile cpm8xx_t *cp = cpmp; volatile scc_enet_t *ep; if (rtdev) { rt_disable_irq(rtdev->irq); rt_free_global_irq(rtdev->irq); ep = (scc_enet_t *)(&cp->cp_dparam[PROFF_ENET]); m8xx_cpm_dpfree(ep->sen_genscc.scc_rbase); m8xx_cpm_dpfree(ep->sen_genscc.scc_tbase); rt_stack_disconnect(rtdev); rt_unregister_rtnetdev(rtdev); rt_rtdev_disconnect(rtdev); printk("%s: unloaded\n", rtdev->name); rtskb_pool_release(&cep->skb_pool); rtdev_free(rtdev); rtdev_root = NULL; } }
static int scc_enet_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev) { struct scc_enet_private *cep = (struct scc_enet_private *)rtdev->priv; volatile cbd_t *bdp; RT_DEBUG(__FUNCTION__": ...\n"); /* Fill in a Tx ring entry */ bdp = cep->cur_tx; #ifndef final_version if (bdp->cbd_sc & BD_ENET_TX_READY) { /* Ooops. All transmit buffers are full. Bail out. * This should not happen, since cep->tx_busy should be set. */ printk("%s: tx queue full!.\n", rtdev->name); return 1; } #endif /* Clear all of the status flags. */ bdp->cbd_sc &= ~BD_ENET_TX_STATS; /* If the frame is short, tell CPM to pad it. */ if (skb->len <= ETH_ZLEN) bdp->cbd_sc |= BD_ENET_TX_PAD; else bdp->cbd_sc &= ~BD_ENET_TX_PAD; /* Set buffer length and buffer pointer. */ bdp->cbd_datlen = skb->len; bdp->cbd_bufaddr = __pa(skb->data); /* Save skb pointer. */ cep->tx_skbuff[cep->skb_cur] = skb; cep->stats.tx_bytes += skb->len; cep->skb_cur = (cep->skb_cur+1) & TX_RING_MOD_MASK; /* Push the data cache so the CPM does not get stale memory * data. */ flush_dcache_range((unsigned long)(skb->data), (unsigned long)(skb->data + skb->len)); /* Prevent interrupts from changing the Tx ring from underneath us. */ // *** RTnet *** rt_sem_wait(&rtdev->xmit_sem); rt_disable_irq(rtdev->irq); rt_spin_lock(&cep->lock); /* Send it on its way. Tell CPM its ready, interrupt when done, * its the last BD of the frame, and to put the CRC on the end. */ bdp->cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_INTR | BD_ENET_TX_LAST | BD_ENET_TX_TC); #if 0 dev->trans_start = jiffies; #endif /* If this was the last BD in the ring, start at the beginning again. */ if (bdp->cbd_sc & BD_ENET_TX_WRAP) bdp = cep->tx_bd_base; else bdp++; if (bdp->cbd_sc & BD_ENET_TX_READY) { rtnetif_stop_queue(rtdev); cep->tx_full = 1; } cep->cur_tx = (cbd_t *)bdp; rt_spin_unlock(&cep->lock); rt_enable_irq(rtdev->irq); rt_sem_signal(&rtdev->xmit_sem); return 0; }
int xnintr_disable (xnintr_t *intr) { rt_disable_irq(intr->irq); return 0; }