/** * md_mic_dma_free_chan - Frees up a DMA channel * @chan: The DMA channel handle */ void md_mic_dma_free_chan(struct mic_dma_device *dma_dev, struct md_mic_dma_chan *chan) { CHECK_CHAN(chan); atomic_set(&(chan->in_use), CHAN_AVAILABLE); md_mic_dma_enable_chan(dma_dev, chan->ch_num, false); }
/** * md_mic_dma_chan_set_desc_ring - Configures the DMA channel desc ring * @chan: The DMA channel handle * @desc_ring_phys_addr: Physical address of the desc ring base. Needs to be * physically contiguous and wired down memory. * @num_desc: Number of descriptors must be a multiple of cache line size. * Descriptor size should be determined using sizeof(union md_mic_dma_desc). * The maximum number of descriptors is defined by * MIC_MAX_NUM_DESC_PER_RING. */ void md_mic_dma_chan_set_desc_ring(struct mic_dma_device *dma_dev, struct md_mic_dma_chan *chan, phys_addr_t desc_ring_phys_addr, uint32_t num_desc) { uint32_t chan_num; uint32_t drar_lo = 0; uint32_t drar_hi = 0; CHECK_CHAN(chan); chan_num = chan->ch_num; /* * TODO: Maybe the 2nd condition should be different considering the * size of union md_mic_dma_desc? */ KASSERT((((num_desc) <= MIC_MAX_NUM_DESC_PER_RING) && (ALIGN((num_desc - (L1_CACHE_BYTES - 1)), L1_CACHE_BYTES) == num_desc)), "num_desc > max or not multiple of cache line num 0x%x", num_desc); md_mic_dma_enable_chan(dma_dev, chan_num, false); drar_hi = size_to_drar_hi_size(num_desc); if (MIC_DMA_CHAN_HOST_OWNED == chan->owner) { drar_hi |= SBOX_DRARHI_SYS_MASK; drar_hi |= addr_to_drar_hi_smpt_bits(desc_ring_phys_addr); } drar_lo = (uint32_t)desc_ring_phys_addr; drar_hi |= physaddr_to_drarhi_ba(desc_ring_phys_addr); md_mic_dma_write_mmio(dma_dev, chan_num, REG_DRAR_LO, drar_lo); md_mic_dma_write_mmio(dma_dev, chan_num, REG_DRAR_HI, drar_hi); chan->num_desc_in_ring = num_desc; pr_debug("md_mic_dma_chan_set_desc_ring addr=0x%llx num=%d drar_hi.bits.pageno 0x%x\n", desc_ring_phys_addr, num_desc, (uint32_t)(desc_ring_phys_addr >> MIC_SYSTEM_PAGE_SHIFT)); chan->cached_tail = md_mic_dma_chan_read_tail(dma_dev, chan); md_mic_dma_enable_chan(dma_dev, chan_num, true); }
int mic_dma_lib_init(uint8_t *mmio_va_base, struct mic_dma_ctx_t *dma_ctx) { int i; #ifdef _MIC_SCIF_ int ret_value; #endif struct dma_channel *ch; enum md_mic_dma_chan_owner owner, currentOwner; //pr_debug(PR_PREFIX "Initialized the dma mmio va=%p\n", mmio_va_base); // Using this to check where the DMA lib is at for now. currentOwner = mmio_va_base == 0 ? MIC_DMA_CHAN_MIC_OWNED : MIC_DMA_CHAN_HOST_OWNED; // TODO: multi-card support md_mic_dma_init(&dma_ctx->dma_dev, mmio_va_base); for (i = 0 ; i < MAX_NUM_DMA_CHAN; i++) { ch = &dma_ctx->dma_channels[i]; /* Initialize pointer to parent */ ch->dma_ctx = dma_ctx; owner = i > __LAST_HOST_CHAN_NUM ? MIC_DMA_CHAN_MIC_OWNED : MIC_DMA_CHAN_HOST_OWNED; // This has to be done from card side ch->chan = md_mic_dma_request_chan(&dma_ctx->dma_dev, owner); KASSERT((ch->chan != NULL), "dummy\n"); ch->ch_num = ch->chan->ch_num; #ifdef _MIC_SCIF_ /* * Host driver would have executed by now and thus setup the * desc. ring */ if (ch->chan->owner == MIC_DMA_CHAN_HOST_OWNED) md_mic_dma_enable_chan(&dma_ctx->dma_dev, i, true); #endif atomic_set(&(ch->flags), CHAN_INUSE); // Mark as used by default if (currentOwner == owner) { alloc_dma_desc_ring_mem(ch, dma_ctx); #ifdef _MIC_SCIF_ // DMA now shares the IRQ handler with other system interrupts ret_value = request_irq(i, dma_interrupt_handler, IRQF_DISABLED, "dma channel", ch); ret_value = ret_value; //pr_debug(PR_PREFIX "Interrupt handler ret value for chan %d = %d\n", i, ret_value); #endif md_mic_dma_chan_setup(dma_ctx, ch); mi_mic_dma_chan_setup(ch, dma_ctx); init_waitqueue_head(&ch->intr_wq); init_waitqueue_head(&ch->access_wq); // Only mark owned channel to be available atomic_set(&(ch->flags), CHAN_AVAILABLE); md_mic_dma_print_debug(&dma_ctx->dma_dev, ch->chan); } else { ch->desc_ring = NULL; } } /* Initialize last_allocated_dma_channel */ dma_ctx->last_allocated_dma_channel_num = -1; //pr_debug(PR_PREFIX "Initialized the dma channels\n"); mic_dma_proc_init(dma_ctx); return 0; }