static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp) { int sz = sp->use_sg - 1; struct mmu_sglist *sg = (struct mmu_sglist *)sp->buffer; while(sz >= 0) { vdma_free(sg[sz].dvma_addr); sz--; } }
static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, struct scsi_cmnd *sp) { int sz = sp->use_sg - 1; struct scatterlist *sg = (struct scatterlist *)sp->buffer; while(sz >= 0) { vdma_free(sg[sz].dma_address); sz--; } }
/* * The typical workload of the driver: * Handle the network interface interrupts. */ static void sonic_interrupt(int irq, void *dev_id, struct pt_regs * regs) { struct net_device *dev = (struct net_device *)dev_id; unsigned int base_addr = dev->base_addr; struct sonic_local *lp; int status; if (dev == NULL) { printk ("sonic_interrupt: irq %d for unknown device.\n", irq); return; } dev->interrupt = 1; lp = (struct sonic_local *)dev->priv; status = SONIC_READ(SONIC_ISR); SONIC_WRITE(SONIC_ISR,0x7fff); /* clear all bits */ if (sonic_debug > 2) printk("sonic_interrupt: ISR=%x\n",status); if (status & SONIC_INT_PKTRX) { sonic_rx(dev); /* got packet(s) */ } if (status & SONIC_INT_TXDN) { int dirty_tx = lp->dirty_tx; while (dirty_tx < lp->cur_tx) { int entry = dirty_tx & SONIC_TDS_MASK; int status = lp->tda[entry].tx_status; if (sonic_debug > 3) printk ("sonic_interrupt: status %d, cur_tx %d, dirty_tx %d\n", status,lp->cur_tx,lp->dirty_tx); if (status == 0) { /* It still hasn't been Txed, kick the sonic again */ SONIC_WRITE(SONIC_CMD,SONIC_CR_TXP); break; } /* put back EOL and free descriptor */ lp->tda[entry].tx_frag_count = 0; lp->tda[entry].tx_status = 0; if (status & 0x0001) lp->stats.tx_packets++; else { lp->stats.tx_errors++; if (status & 0x0642) lp->stats.tx_aborted_errors++; if (status & 0x0180) lp->stats.tx_carrier_errors++; if (status & 0x0020) lp->stats.tx_window_errors++; if (status & 0x0004) lp->stats.tx_fifo_errors++; } /* We must free the original skb */ if (lp->tx_skb[entry]) { dev_kfree_skb(lp->tx_skb[entry]); lp->tx_skb[entry] = 0; } /* and the VDMA address */ vdma_free(lp->tx_laddr[entry]); dirty_tx++; } if (lp->tx_full && dev->tbusy && dirty_tx + SONIC_NUM_TDS > lp->cur_tx + 2) { /* The ring is no longer full, clear tbusy. */ lp->tx_full = 0; dev->tbusy = 0; mark_bh(NET_BH); } lp->dirty_tx = dirty_tx; } /* * check error conditions */ if (status & SONIC_INT_RFO) { printk ("%s: receive fifo underrun\n",dev->name); lp->stats.rx_fifo_errors++; } if (status & SONIC_INT_RDE) { printk ("%s: receive descriptors exhausted\n",dev->name); lp->stats.rx_dropped++; } if (status & SONIC_INT_RBE) { printk ("%s: receive buffer exhausted\n",dev->name); lp->stats.rx_dropped++; } if (status & SONIC_INT_RBAE) { printk ("%s: receive buffer area exhausted\n",dev->name); lp->stats.rx_dropped++; } /* counter overruns; all counters are 16bit wide */ if (status & SONIC_INT_FAE) lp->stats.rx_frame_errors += 65536; if (status & SONIC_INT_CRC) lp->stats.rx_crc_errors += 65536; if (status & SONIC_INT_MP) lp->stats.rx_missed_errors += 65536; /* transmit error */ if (status & SONIC_INT_TXER) lp->stats.tx_errors++; /* * clear interrupt bits and return */ SONIC_WRITE(SONIC_ISR,status); dev->interrupt = 0; return; }
static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp) { vdma_free(sp->SCp.have_data_in); }
static void jazz_fd_dma_mem_free(unsigned long addr, unsigned long size) { vdma_free(vdma_phys2log(PHYSADDR(addr))); free_pages(addr, get_order(size)); }