/* * Load a cam control block. */ static int _bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb, int *nsegs, int flags) { struct ccb_hdr *ccb_h; void *data_ptr; int error; uint32_t dxfer_len; uint16_t sglist_cnt; error = 0; ccb_h = &ccb->ccb_h; switch (ccb_h->func_code) { case XPT_SCSI_IO: { struct ccb_scsiio *csio; csio = &ccb->csio; data_ptr = csio->data_ptr; dxfer_len = csio->dxfer_len; sglist_cnt = csio->sglist_cnt; break; } case XPT_CONT_TARGET_IO: { struct ccb_scsiio *ctio; ctio = &ccb->ctio; data_ptr = ctio->data_ptr; dxfer_len = ctio->dxfer_len; sglist_cnt = ctio->sglist_cnt; break; } case XPT_ATA_IO: { struct ccb_ataio *ataio; ataio = &ccb->ataio; data_ptr = ataio->data_ptr; dxfer_len = ataio->dxfer_len; sglist_cnt = 0; break; } default: panic("_bus_dmamap_load_ccb: Unsupported func code %d", ccb_h->func_code); } switch ((ccb_h->flags & CAM_DATA_MASK)) { case CAM_DATA_VADDR: error = _bus_dmamap_load_buffer(dmat, map, data_ptr, dxfer_len, kernel_pmap, flags, NULL, nsegs); break; case CAM_DATA_PADDR: error = _bus_dmamap_load_phys(dmat, map, (vm_paddr_t)(uintptr_t)data_ptr, dxfer_len, flags, NULL, nsegs); break; case CAM_DATA_SG: error = _bus_dmamap_load_vlist(dmat, map, (bus_dma_segment_t *)data_ptr, sglist_cnt, kernel_pmap, nsegs, flags); break; case CAM_DATA_SG_PADDR: error = _bus_dmamap_load_plist(dmat, map, (bus_dma_segment_t *)data_ptr, sglist_cnt, nsegs, flags); break; case CAM_DATA_BIO: error = _bus_dmamap_load_bio(dmat, map, (struct bio *)data_ptr, nsegs, flags); break; default: panic("_bus_dmamap_load_ccb: flags 0x%X unimplemented", ccb_h->flags); } return (error); }
int bus_dmamap_load_mem(bus_dma_tag_t dmat, bus_dmamap_t map, struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg, int flags) { bus_dma_segment_t *segs; int error; int nsegs; if ((flags & BUS_DMA_NOWAIT) == 0) _bus_dmamap_waitok(dmat, map, mem, callback, callback_arg); nsegs = -1; error = 0; switch (mem->md_type) { case MEMDESC_VADDR: error = _bus_dmamap_load_buffer(dmat, map, mem->u.md_vaddr, mem->md_opaque, kernel_pmap, flags, NULL, &nsegs); break; case MEMDESC_PADDR: error = _bus_dmamap_load_phys(dmat, map, mem->u.md_paddr, mem->md_opaque, flags, NULL, &nsegs); break; case MEMDESC_VLIST: error = _bus_dmamap_load_vlist(dmat, map, mem->u.md_list, mem->md_opaque, kernel_pmap, &nsegs, flags); break; case MEMDESC_PLIST: error = _bus_dmamap_load_plist(dmat, map, mem->u.md_list, mem->md_opaque, &nsegs, flags); break; case MEMDESC_BIO: error = _bus_dmamap_load_bio(dmat, map, mem->u.md_bio, &nsegs, flags); break; case MEMDESC_UIO: error = _bus_dmamap_load_uio(dmat, map, mem->u.md_uio, &nsegs, flags); break; case MEMDESC_MBUF: error = _bus_dmamap_load_mbuf_sg(dmat, map, mem->u.md_mbuf, NULL, &nsegs, flags); break; case MEMDESC_CCB: error = _bus_dmamap_load_ccb(dmat, map, mem->u.md_ccb, &nsegs, flags); break; } nsegs++; CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", __func__, dmat, flags, error, nsegs); if (error == EINPROGRESS) return (error); segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); if (error) (*callback)(callback_arg, segs, 0, error); else (*callback)(callback_arg, segs, nsegs, 0); /* * Return ENOMEM to the caller so that it can pass it up the stack. * This error only happens when NOWAIT is set, so deferral is disabled. */ if (error == ENOMEM) return (error); return (0); }