/** * \brief allocates and maps a memory region to be used for DMA purposes * * \param bytes minimum size of the memory region in bytes * \param flags VREGION flags how the region gets mapped * \param mem returns the mapping information * * \returns SYS_ERR_OK on success * errval on error */ errval_t dma_mem_alloc(size_t bytes, vregion_flags_t flags, struct dma_mem *mem) { errval_t err; if (mem == NULL) { return DMA_ERR_ARG_INVALID; } err = frame_alloc(&mem->frame, bytes, &mem->bytes); if (err_is_fail(err)) { return err; } struct frame_identity id; err = invoke_frame_identify(mem->frame, &id); if (err_is_fail(err)) { dma_mem_free(mem); return err; } mem->paddr = id.base; void *addr; err = vspace_map_one_frame_attr(&addr, mem->bytes, mem->frame, flags, NULL, NULL); if (err_is_fail(err)) { dma_mem_free(mem); return err; } mem->vaddr = (lvaddr_t)addr; return SYS_ERR_OK; }
static errval_t device_init_ioat_v3(struct ioat_dma_device *dev) { errval_t err; IOATDEV_DEBUG("initialize Crystal Beach 3 DMA device\n", dev->common.id); ioat_dma_dmacapability_t cap = ioat_dma_dmacapability_rd(&dev->device); if (ioat_dma_cbver_minor_extract(dev->version) == 2) { IOATDEV_DEBUG("disabling XOR and PQ opcodes for Crystal Beach 3.2\n", dev->common.id); cap = ioat_dma_dmacapability_xor_insert(cap, 0x0); cap = ioat_dma_dmacapability_pq_insert(cap, 0x0); } else if (ioat_dma_cbver_minor_extract(dev->version) == 3) { IOATDEV_DEBUG("devices of Crystal Beach Version 3.3 are not supported.\n", dev->common.id); return DMA_ERR_DEVICE_UNSUPPORTED; } /* if DCA is enabled, we cannot support the RAID functions */ if (ioat_dma_dca_is_enabled()) { IOATDEV_DEBUG("Disabling XOR and PQ while DCA is enabled\n", dev->common.id); cap = ioat_dma_dmacapability_xor_insert(cap, 0x0); cap = ioat_dma_dmacapability_pq_insert(cap, 0x0); } if (ioat_dma_dmacapability_xor_extract(cap)) { IOATDEV_DEBUG("device supports XOR RAID.\n", dev->common.id); dev->flags |= IOAT_DMA_DEV_F_RAID; /* * this may need some additional functions to prepare * the specific transfers... * * max_xor = 8; * prepare_xor, prepare_xor_val */ } if (ioat_dma_dmacapability_pq_extract(cap)) { IOATDEV_DEBUG("device supports PQ RAID.\n", dev->common.id); dev->flags |= IOAT_DMA_DEV_F_RAID; /* * this may need some additional functions to prepare the * DMA descriptors * * max_xor = 8; * max_pq = 8; * prepare_pq, perpare_pq_val * * also set the prepare_xor pointers... * */ } /* set the interrupt type to disabled*/ dev->common.irq_type = DMA_IRQ_DISABLED; dev->common.type = DMA_DEV_TYPE_IOAT; /* allocate memory for completion status writeback */ err = dma_mem_alloc(IOAT_DMA_COMPLSTATUS_SIZE, IOAT_DMA_COMPLSTATUS_FLAGS, &dev->complstatus); if (err_is_fail(err)) { return err; } dev->common.channels.count = ioat_dma_chancnt_num_rdf(&dev->device); dev->common.channels.c = calloc(dev->common.channels.count, sizeof(*dev->common.channels.c)); if (dev->common.channels.c == NULL) { dma_mem_free(&dev->complstatus); return LIB_ERR_MALLOC_FAIL; } /* channel enumeration */ IOATDEV_DEBUG("channel enumeration. discovered %u channels\n", dev->common.id, dev->common.channels.count); uint32_t max_xfer_size = (1 << ioat_dma_xfercap_max_rdf(&dev->device)); for (uint8_t i = 0; i < dev->common.channels.count; ++i) { struct dma_channel **chan = &dev->common.channels.c[i]; err = ioat_dma_channel_init(dev, i, max_xfer_size, (struct ioat_dma_channel **) chan); } if (dev->flags & IOAT_DMA_DEV_F_DCA) { /*TODO: DCA initialization device->dca = ioat3_dca_init(pdev, device->reg_base);*/ } return SYS_ERR_OK; }