/* * Synchronize an ISA DMA map. */ void _isa_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, bus_size_t len, int op) { struct i386_isa_dma_cookie *cookie = map->_dm_cookie; #ifdef DEBUG if ((op & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) { if (offset >= map->dm_mapsize) panic("_isa_bus_dmamap_sync: bad offset"); if (len == 0 || (offset + len) > map->dm_mapsize) panic("_isa_bus_dmamap_sync: bad length"); } #endif switch (op) { case BUS_DMASYNC_PREREAD: /* * Nothing to do for pre-read. */ break; case BUS_DMASYNC_PREWRITE: /* * If we're bouncing this transfer, copy the * caller's buffer to the bounce buffer. */ if (cookie->id_flags & ID_IS_BOUNCING) bcopy(cookie->id_origbuf + offset, cookie->id_bouncebuf + offset, len); break; case BUS_DMASYNC_POSTREAD: /* * If we're bouncing this transfer, copy the * bounce buffer to the caller's buffer. */ if (cookie->id_flags & ID_IS_BOUNCING) bcopy(cookie->id_bouncebuf + offset, cookie->id_origbuf + offset, len); break; case BUS_DMASYNC_POSTWRITE: /* * Nothing to do for post-write. */ break; } #if 0 /* This is a noop anyhow, so why bother calling it? */ _bus_dmamap_sync(t, map, op); #endif }
/* * Function for MIPS3 DMA map synchronization. */ void jazz_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, bus_size_t len, int ops) { /* Flush DMA TLB */ if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0) jazz_dmatlb_flush(); return _bus_dmamap_sync(t, map, offset, len, ops); }
/* * Synchronize an Integrator DMA map. */ static void integrator_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, bus_size_t len, int ops) { struct integrator_dma_cookie *cookie = map->_dm_cookie; DEBUG(printf("I_bus_dmamap_sync (tag %x, map %x, offset %x, size %u," " ops %d\n", (unsigned)t, (unsigned)map, (unsigned)offset , (unsigned)len, ops)); /* * Mixing PRE and POST operations is not allowed. */ if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 && (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0) panic("integrator_bus_dmamap_sync: mix PRE and POST"); #ifdef DIAGNOSTIC if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) { if (offset >= map->dm_mapsize) panic("integrator_bus_dmamap_sync: bad offset"); if (len == 0 || (offset + len) > map->dm_mapsize) panic("integrator_bus_dmamap_sync: bad length"); } #endif /* * If we're not bouncing then use the standard code. */ if ((cookie->id_flags & ID_IS_BOUNCING) == 0) { _bus_dmamap_sync(t, map, offset, len, ops); return; } DEBUG(printf("dmamap_sync("); if (ops & BUS_DMASYNC_PREREAD) printf("preread "); if (ops & BUS_DMASYNC_PREWRITE) printf("prewrite "); if (ops & BUS_DMASYNC_POSTREAD) printf("postread "); if (ops & BUS_DMASYNC_POSTWRITE) printf("postwrite ");) switch (cookie->id_buftype) {
/* * Synchronize an ISA DMA map. */ void isadma_bounce_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, bus_size_t len, int ops) { struct isadma_bounce_cookie *cookie = map->_dm_cookie; /* * Mixing PRE and POST operations is not allowed. */ if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 && (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0) panic("isadma_bounce_dmamap_sync: mix PRE and POST"); #ifdef DIAGNOSTIC if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) { if (offset >= map->dm_mapsize) panic("isadma_bounce_dmamap_sync: bad offset"); if (len == 0 || (offset + len) > map->dm_mapsize) panic("isadma_bounce_dmamap_sync: bad length"); } #endif /* * If we're not bouncing, just do the normal sync operation * and return. */ if ((cookie->id_flags & ID_IS_BOUNCING) == 0) { _bus_dmamap_sync(t, map, offset, len, ops); return; } /* * Flush data cache for PREREAD. This has the side-effect * of invalidating the cache. Done at PREREAD since it * causes the cache line(s) to be written back to memory. * * Copy the original buffer to the bounce buffer and flush * the data cache for PREWRITE, so that the contents * of the data buffer in memory reflect reality. * * Copy the bounce buffer to the original buffer in POSTREAD. */ switch (cookie->id_buftype) { case ID_BUFTYPE_LINEAR: /* * Nothing to do for pre-read. */ if (ops & BUS_DMASYNC_PREWRITE) { /* * Copy the caller's buffer to the bounce buffer. */ memcpy((char *)cookie->id_bouncebuf + offset, (char *)cookie->id_origbuf + offset, len); wbflush(); } if (ops & BUS_DMASYNC_POSTREAD) { /* * Copy the bounce buffer to the caller's buffer. */ memcpy((char *)cookie->id_origbuf + offset, (char *)cookie->id_bouncebuf + offset, len); } /* * Nothing to do for post-write. */ break; case ID_BUFTYPE_MBUF: { struct mbuf *m, *m0 = cookie->id_origbuf; bus_size_t minlen, moff; /* * Nothing to do for pre-read. */ if (ops & BUS_DMASYNC_PREWRITE) { /* * Copy the caller's buffer to the bounce buffer. */ m_copydata(m0, offset, len, (char *)cookie->id_bouncebuf + offset); } if (ops & BUS_DMASYNC_POSTREAD) { /* * Copy the bounce buffer to the caller's buffer. */ for (moff = offset, m = m0; m != NULL && len != 0; m = m->m_next) { /* Find the beginning mbuf. */ if (moff >= m->m_len) { moff -= m->m_len; continue; } /* * Now at the first mbuf to sync; nail * each one until we have exhausted the * length. */ minlen = len < m->m_len - moff ? len : m->m_len - moff; memcpy(mtod(m, caddr_t) + moff, (char *)cookie->id_bouncebuf + offset, minlen); moff = 0; len -= minlen; offset += minlen; } } /* * Nothing to do for post-write. */ break; } case ID_BUFTYPE_UIO: panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_UIO"); break; case ID_BUFTYPE_RAW: panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_RAW"); break; case ID_BUFTYPE_INVALID: panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_INVALID"); break; default: printf("unknown buffer type %d\n", cookie->id_buftype); panic("isadma_bounce_dmamap_sync"); } /* Drain the write buffer. */ wbflush(); /* XXXJRT */ if (ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) mips_dcache_wbinv_range((vaddr_t)cookie->id_bouncebuf + offset, len); }
/* * Synchronize a MCA DMA map. */ static void _mca_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, bus_size_t len, int ops) { struct x86_isa_dma_cookie *cookie; bus_addr_t phys; bus_size_t cnt; int dmach, mode; _bus_dmamap_sync(t, map, offset, len, ops); /* * Don't do anything if not using the DMA controller. */ if ((map->_dm_flags & _MCABUS_DMA_USEDMACTRL) == 0) return; /* * Don't do anything if not PRE* operation, allow only * one of PREREAD and PREWRITE. */ if (ops != BUS_DMASYNC_PREREAD && ops != BUS_DMASYNC_PREWRITE) return; cookie = (struct x86_isa_dma_cookie *)map->_dm_cookie; dmach = (cookie->id_flags & 0xf0) >> 4; phys = map->dm_segs[0].ds_addr; cnt = map->dm_segs[0].ds_len; mode = DMACMD_MODE_XFER; mode |= (ops == BUS_DMASYNC_PREREAD) ? DMACMD_MODE_READ : DMACMD_MODE_WRITE; if (map->_dm_flags & MCABUS_DMA_IOPORT) mode |= DMACMD_MODE_IOPORT; /* Use 16bit DMA if requested */ if (map->_dm_flags & MCABUS_DMA_16BIT) { #ifdef DIAGNOSTIC if ((cnt % 2) != 0) { panic("_mca_bus_dmamap_sync: 16bit DMA and cnt %lu odd", cnt); } #endif mode |= DMACMD_MODE_16BIT; cnt /= 2; } /* * Initialize the MCA DMA controller appropriately. The exact * sequence to setup the controller is taken from Minix. */ /* Disable access to DMA channel. */ bus_space_write_1(dmaiot, dmacmdh, 0, DMACMD_MASK | dmach); /* Set the transfer mode. */ bus_space_write_1(dmaiot, dmacmdh, 0, DMACMD_SET_MODE | dmach); bus_space_write_1(dmaiot, dmaexech, 0, mode); /* Set the address byte pointer. */ bus_space_write_1(dmaiot, dmacmdh, 0, DMACMD_SET_ADDR | dmach); /* address bits 0..7 */ bus_space_write_1(dmaiot, dmaexech, 0, (phys >> 0) & 0xff); /* address bits 8..15 */ bus_space_write_1(dmaiot, dmaexech, 0, (phys >> 8) & 0xff); /* address bits 16..23 */ bus_space_write_1(dmaiot, dmaexech, 0, (phys >> 16) & 0xff); /* Set the count byte pointer */ bus_space_write_1(dmaiot, dmacmdh, 0, DMACMD_SET_CNT | dmach); /* count bits 0..7 */ bus_space_write_1(dmaiot, dmaexech, 0, ((cnt - 1) >> 0) & 0xff); /* count bits 8..15 */ bus_space_write_1(dmaiot, dmaexech, 0, ((cnt - 1) >> 8) & 0xff); /* Enable access to DMA channel. */ bus_space_write_1(dmaiot, dmacmdh, 0, DMACMD_RESET_MASK | dmach); }