/** * __dma_memcpy - program the MDMA registers * * Actually program MDMA0 and wait for the transfer to finish. Disable IRQs * while programming registers so that everything is fully configured. Wait * for DMA to finish with IRQs enabled. If interrupted, the initial DMA_DONE * check will make sure we don't clobber any existing transfer. */ static void __dma_memcpy(u32 daddr, s16 dmod, u32 saddr, s16 smod, size_t cnt, u32 conf) { static DEFINE_SPINLOCK(mdma_lock); unsigned long flags; spin_lock_irqsave(&mdma_lock, flags); /* Force a sync in case a previous config reset on this channel * occurred. This is needed so subsequent writes to DMA registers * are not spuriously lost/corrupted. Do it under irq lock and * without the anomaly version (because we are atomic already). */ __builtin_bfin_ssync(); if (bfin_read_MDMA_S0_CONFIG()) while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)) continue; if (conf & DMA2D) { /* For larger bit sizes, we've already divided down cnt so it * is no longer a multiple of 64k. So we have to break down * the limit here so it is a multiple of the incoming size. * There is no limitation here in terms of total size other * than the hardware though as the bits lost in the shift are * made up by MODIFY (== we can hit the whole address space). * X: (2^(16 - 0)) * 1 == (2^(16 - 1)) * 2 == (2^(16 - 2)) * 4 */ u32 shift = abs(dmod) >> 1; size_t ycnt = cnt >> (16 - shift); cnt = 1 << (16 - shift); bfin_write_MDMA_D0_Y_COUNT(ycnt); bfin_write_MDMA_S0_Y_COUNT(ycnt); bfin_write_MDMA_D0_Y_MODIFY(dmod); bfin_write_MDMA_S0_Y_MODIFY(smod); }
void __init early_dma_memcpy_done(void) { early_shadow_stamp(); while ((bfin_read_MDMA_S0_CONFIG() && !(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)) || (bfin_read_MDMA_S1_CONFIG() && !(bfin_read_MDMA_D1_IRQ_STATUS() & DMA_DONE))) continue; bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); bfin_write_MDMA_D1_IRQ_STATUS(DMA_DONE | DMA_ERR); bfin_write_MDMA_S0_CONFIG(0); bfin_write_MDMA_S1_CONFIG(0); bfin_write_MDMA_D0_CONFIG(0); bfin_write_MDMA_D1_CONFIG(0); __builtin_bfin_ssync(); }
static void __dma_memcpy(u32 daddr, s16 dmod, u32 saddr, s16 smod, size_t cnt, u32 conf) { static DEFINE_SPINLOCK(mdma_lock); unsigned long flags; spin_lock_irqsave(&mdma_lock, flags); __builtin_bfin_ssync(); if (bfin_read_MDMA_S0_CONFIG()) while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)) continue; if (conf & DMA2D) { u32 shift = abs(dmod) >> 1; size_t ycnt = cnt >> (16 - shift); cnt = 1 << (16 - shift); bfin_write_MDMA_D0_Y_COUNT(ycnt); bfin_write_MDMA_S0_Y_COUNT(ycnt); bfin_write_MDMA_D0_Y_MODIFY(dmod); bfin_write_MDMA_S0_Y_MODIFY(smod); }
void __init early_dma_memcpy_done(void) { early_shadow_stamp(); while ((bfin_read_MDMA_S0_CONFIG() && !(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)) || (bfin_read_MDMA_S1_CONFIG() && !(bfin_read_MDMA_D1_IRQ_STATUS() & DMA_DONE))) continue; bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); bfin_write_MDMA_D1_IRQ_STATUS(DMA_DONE | DMA_ERR); /* * Now that DMA is done, we would normally flush cache, but * i/d cache isn't running this early, so we don't bother, * and just clear out the DMA channel for next time */ bfin_write_MDMA_S0_CONFIG(0); bfin_write_MDMA_S1_CONFIG(0); bfin_write_MDMA_D0_CONFIG(0); bfin_write_MDMA_D1_CONFIG(0); __builtin_bfin_ssync(); }