void __init early_dma_memcpy(void *pdst, const void *psrc, size_t size) { unsigned long dst = (unsigned long)pdst; unsigned long src = (unsigned long)psrc; struct dma_register *dst_ch, *src_ch; early_shadow_stamp(); /* We assume that everything is 4 byte aligned, so include * a basic sanity check */ BUG_ON(dst % 4); BUG_ON(src % 4); BUG_ON(size % 4); src_ch = 0; /* Find an avalible memDMA channel */ while (1) { if (src_ch == (struct dma_register *)MDMA_S0_NEXT_DESC_PTR) { dst_ch = (struct dma_register *)MDMA_D1_NEXT_DESC_PTR; src_ch = (struct dma_register *)MDMA_S1_NEXT_DESC_PTR; } else { dst_ch = (struct dma_register *)MDMA_D0_NEXT_DESC_PTR; src_ch = (struct dma_register *)MDMA_S0_NEXT_DESC_PTR; } if (!bfin_read16(&src_ch->cfg)) break; else if (bfin_read16(&dst_ch->irq_status) & DMA_DONE) { bfin_write16(&src_ch->cfg, 0); break; } } /* Force a sync in case a previous config reset on this channel * occurred. This is needed so subsequent writes to DMA registers * are not spuriously lost/corrupted. */ __builtin_bfin_ssync(); /* Destination */ bfin_write32(&dst_ch->start_addr, dst); bfin_write16(&dst_ch->x_count, size >> 2); bfin_write16(&dst_ch->x_modify, 1 << 2); bfin_write16(&dst_ch->irq_status, DMA_DONE | DMA_ERR); /* Source */ bfin_write32(&src_ch->start_addr, src); bfin_write16(&src_ch->x_count, size >> 2); bfin_write16(&src_ch->x_modify, 1 << 2); bfin_write16(&src_ch->irq_status, DMA_DONE | DMA_ERR); /* Enable */ bfin_write16(&src_ch->cfg, DMAEN | WDSIZE_32); bfin_write16(&dst_ch->cfg, WNR | DI_EN | DMAEN | WDSIZE_32); /* Since we are atomic now, don't use the workaround ssync */ __builtin_bfin_ssync(); }
void __init early_dma_memcpy(void *pdst, const void *psrc, size_t size) { unsigned long dst = (unsigned long)pdst; unsigned long src = (unsigned long)psrc; struct dma_register *dst_ch, *src_ch; early_shadow_stamp(); BUG_ON(dst % 4); BUG_ON(src % 4); BUG_ON(size % 4); src_ch = 0; while (1) { if (src_ch == (struct dma_register *)MDMA_S0_NEXT_DESC_PTR) { dst_ch = (struct dma_register *)MDMA_D1_NEXT_DESC_PTR; src_ch = (struct dma_register *)MDMA_S1_NEXT_DESC_PTR; } else { dst_ch = (struct dma_register *)MDMA_D0_NEXT_DESC_PTR; src_ch = (struct dma_register *)MDMA_S0_NEXT_DESC_PTR; } if (!bfin_read16(&src_ch->cfg)) break; else if (bfin_read16(&dst_ch->irq_status) & DMA_DONE) { bfin_write16(&src_ch->cfg, 0); break; } } __builtin_bfin_ssync(); bfin_write32(&dst_ch->start_addr, dst); bfin_write16(&dst_ch->x_count, size >> 2); bfin_write16(&dst_ch->x_modify, 1 << 2); bfin_write16(&dst_ch->irq_status, DMA_DONE | DMA_ERR); bfin_write32(&src_ch->start_addr, src); bfin_write16(&src_ch->x_count, size >> 2); bfin_write16(&src_ch->x_modify, 1 << 2); bfin_write16(&src_ch->irq_status, DMA_DONE | DMA_ERR); bfin_write16(&src_ch->cfg, DMAEN | WDSIZE_32); bfin_write16(&dst_ch->cfg, WNR | DI_EN | DMAEN | WDSIZE_32); __builtin_bfin_ssync(); }
void __init early_dma_memcpy_done(void) { early_shadow_stamp(); while ((bfin_read_MDMA_S0_CONFIG() && !(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)) || (bfin_read_MDMA_S1_CONFIG() && !(bfin_read_MDMA_D1_IRQ_STATUS() & DMA_DONE))) continue; bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); bfin_write_MDMA_D1_IRQ_STATUS(DMA_DONE | DMA_ERR); bfin_write_MDMA_S0_CONFIG(0); bfin_write_MDMA_S1_CONFIG(0); bfin_write_MDMA_D0_CONFIG(0); bfin_write_MDMA_D1_CONFIG(0); __builtin_bfin_ssync(); }
void __init early_dma_memcpy_done(void) { early_shadow_stamp(); while ((bfin_read_MDMA_S0_CONFIG() && !(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)) || (bfin_read_MDMA_S1_CONFIG() && !(bfin_read_MDMA_D1_IRQ_STATUS() & DMA_DONE))) continue; bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); bfin_write_MDMA_D1_IRQ_STATUS(DMA_DONE | DMA_ERR); /* * Now that DMA is done, we would normally flush cache, but * i/d cache isn't running this early, so we don't bother, * and just clear out the DMA channel for next time */ bfin_write_MDMA_S0_CONFIG(0); bfin_write_MDMA_S1_CONFIG(0); bfin_write_MDMA_D0_CONFIG(0); bfin_write_MDMA_D1_CONFIG(0); __builtin_bfin_ssync(); }
void __init blackfin_dma_early_init(void) { early_shadow_stamp(); bfin_write_MDMA_S0_CONFIG(0); bfin_write_MDMA_S1_CONFIG(0); }
void __init early_dma_memcpy(void *pdst, const void *psrc, size_t size) { unsigned long dst = (unsigned long)pdst; unsigned long src = (unsigned long)psrc; struct dma_register *dst_ch, *src_ch; early_shadow_stamp(); /* We assume that everything is 4 byte aligned, so include * a basic sanity check */ BUG_ON(dst % 4); BUG_ON(src % 4); BUG_ON(size % 4); src_ch = 0; /* Find an avalible memDMA channel */ while (1) { if (src_ch == (struct dma_register *)MDMA_S0_NEXT_DESC_PTR) { dst_ch = (struct dma_register *)MDMA_D1_NEXT_DESC_PTR; src_ch = (struct dma_register *)MDMA_S1_NEXT_DESC_PTR; } else { dst_ch = (struct dma_register *)MDMA_D0_NEXT_DESC_PTR; src_ch = (struct dma_register *)MDMA_S0_NEXT_DESC_PTR; } if (!DMA_MMR_READ(&src_ch->cfg)) break; else if (DMA_MMR_READ(&dst_ch->irq_status) & DMA_DONE) { DMA_MMR_WRITE(&src_ch->cfg, 0); break; } } /* Force a sync in case a previous config reset on this channel * occurred. This is needed so subsequent writes to DMA registers * are not spuriously lost/corrupted. */ __builtin_bfin_ssync(); /* Destination */ bfin_write32(&dst_ch->start_addr, dst); DMA_MMR_WRITE(&dst_ch->x_count, size >> 2); DMA_MMR_WRITE(&dst_ch->x_modify, 1 << 2); DMA_MMR_WRITE(&dst_ch->irq_status, DMA_DONE | DMA_ERR); /* Source */ bfin_write32(&src_ch->start_addr, src); DMA_MMR_WRITE(&src_ch->x_count, size >> 2); DMA_MMR_WRITE(&src_ch->x_modify, 1 << 2); DMA_MMR_WRITE(&src_ch->irq_status, DMA_DONE | DMA_ERR); /* Enable */ DMA_MMR_WRITE(&src_ch->cfg, DMAEN | WDSIZE_32); DMA_MMR_WRITE(&dst_ch->cfg, WNR | DI_EN_X | DMAEN | WDSIZE_32); /* Since we are atomic now, don't use the workaround ssync */ __builtin_bfin_ssync(); #ifdef CONFIG_BF60x /* Work around a possible MDMA anomaly. Running 2 MDMA channels to * transfer DDR data to L1 SRAM may corrupt data. * Should be reverted after this issue is root caused. */ while (!(DMA_MMR_READ(&dst_ch->irq_status) & DMA_DONE)) continue; #endif }