/*=======================================================================*/ static void dma_irq_handler(int irq, void *dev_id, struct pt_regs *regs) { iop310_dma_t *dma = (iop310_dma_t *) dev_id; u32 irq_status = 0; u32 status = 0; u32 thresh; irq_status = *(IOP310_FIQ1ISR); if(!(irq_status & DMA_INT_MASK)) { return; } DPRINTK("IRQ: irq=%d status=%#x\n", irq, irq_status); status = *(dma->reg_addr.CSR); thresh = atomic_read(&dma->irq_thresh); DPRINTK("CSR: %#x\n", status); DPRINTK("Thresh: %d\n", thresh); /* while we continue to get DMA INTs */ while((irq_status & DMA_INT_MASK) && thresh--) { /* clear CSR */ *(dma->reg_addr.CSR) |= DMA_CSR_DONE_MASK; dma_process(dma); status = *(dma->reg_addr.CSR); irq_status = *(IOP310_FIQ1ISR); } /* schedule bottom half */ dma->dma_task.data = (void *)dma; /* task goes to the immediate task queue */ queue_task(&dma->dma_task, &tq_immediate); /* mark IMMEDIATE BH for execute */ mark_bh(IMMEDIATE_BH); }
static void dma_io_write(ioport_t port, Bit8u value) { switch (port) { #define HANDLE_ADDR_WRITE(d_n, c_n) \ case DMA##d_n##_ADDR_##c_n: \ dma[d(d_n)].chans[d(c_n)].base_addr.byte[dma[d(d_n)].ff] = value; \ dma[d(d_n)].chans[d(c_n)].cur_addr.byte[dma[d(d_n)].ff] = value; \ q_printf("DMA%i: addr write: %#x to Channel %d byte %d\n", \ d_n, value, d(c_n), dma[d(d_n)].ff); \ dma[d(d_n)].ff ^= 1; \ break HANDLE_X(ADDR_WRITE); #define HANDLE_CNT_WRITE(d_n, c_n) \ case DMA##d_n##_CNT_##c_n: \ dma[d(d_n)].chans[d(c_n)].base_count.byte[dma[d(d_n)].ff] = value; \ dma[d(d_n)].chans[d(c_n)].cur_count.byte[dma[d(d_n)].ff] = value; \ q_printf("DMA%i: count write: %#x to Channel %d byte %d\n", \ d_n, value, d(c_n), dma[d(d_n)].ff); \ dma[d(d_n)].ff ^= 1; \ break HANDLE_X(CNT_WRITE); #define HANDLE_PAGE_WRITE(d_n, c_n) \ case DMA##d_n##_PAGE_##c_n: \ dma[d(d_n)].chans[d(c_n)].page = value; \ q_printf("DMA%i: page write: %#x to Channel %d\n", \ d_n, value, d(c_n)); \ break HANDLE_X(PAGE_WRITE); case DMA1_MASK_REG: if (value & 4) { q_printf("DMA1: mask channel %i\n", value & 3); dma[DMA1].mask |= 1 << (value & 3); } else { q_printf("DMA1: unmask channel %i\n", value & 3); dma[DMA1].mask &= ~(1 << (value & 3)); dma[DMA1].status &= ~(1 << (value & 3)); } break; case DMA2_MASK_REG: if (value & 4) { q_printf("DMA2: mask channel %i\n", value & 3); dma[DMA2].mask |= 1 << (value & 3); } else { q_printf("DMA2: unmask channel %i\n", value & 3); dma[DMA2].mask &= ~(1 << (value & 3)); dma[DMA2].status &= ~(1 << (value & 3)); } break; case DMA1_MODE_REG: dma[DMA1].chans[value & 3].mode = value >> 2; q_printf("DMA1: Write mode 0x%x to Channel %u\n", value >> 2, value & 3); break; case DMA2_MODE_REG: dma[DMA2].chans[value & 3].mode = value >> 2; q_printf("DMA2: Write mode 0x%x to Channel %u\n", value >> 2, value & 3); break; case DMA1_CMD_REG: dma[DMA1].command = value; q_printf("DMA1: Write 0x%x to Command reg\n", value); break; case DMA2_CMD_REG: dma[DMA2].command = value; q_printf("DMA2: Write 0x%x to Command reg\n", value); break; case DMA1_CLEAR_FF_REG: q_printf("DMA1: Clearing Output Flip-Flop\n"); dma[DMA1].ff = 0; break; case DMA2_CLEAR_FF_REG: q_printf("DMA2: Clearing Output Flip-Flop\n"); dma[DMA2].ff = 0; break; case DMA1_RESET_REG: q_printf("DMA1: Reset\n"); dma_soft_reset(DMA1); break; case DMA2_RESET_REG: q_printf("DMA2: Reset\n"); dma_soft_reset(DMA2); break; case DMA1_REQ_REG: if (value & 4) { q_printf("DMA1: Setting request state %#x\n", value); dma[DMA1].request |= 1 << (value & 3); } else { q_printf("DMA1: Clearing request state %#x\n", value); dma[DMA1].request &= ~(1 << (value & 3)); } break; case DMA2_REQ_REG: if (value & 4) { q_printf("DMA2: Setting request state %#x\n", value); dma[DMA2].request |= 1 << (value & 3); } else { q_printf("DMA2: Clearing request state %#x\n", value); dma[DMA2].request &= ~(1 << (value & 3)); } break; case DMA1_CLR_MASK_REG: q_printf("DMA1: Clearing masks\n"); dma[DMA1].mask = 0; dma[DMA1].status &= 0xf0; break; case DMA2_CLR_MASK_REG: q_printf("DMA2: Clearing masks\n"); dma[DMA2].mask = 0; dma[DMA2].status &= 0xf0; break; case DMA1_MASK_ALL_REG: q_printf("DMA1: Setting masks %#x\n", value); dma[DMA1].mask = value; dma[DMA1].status &= 0xf0; break; case DMA2_MASK_ALL_REG: q_printf("DMA2: Setting masks %#x\n", value); dma[DMA2].mask = value; dma[DMA2].status &= 0xf0; break; default: q_printf("DMA: Unhandled Write on 0x%04x\n", (Bit16u) port); } dma_process(); // Not needed in fact }
static Bit8u dma_io_read(ioport_t port) { Bit8u r = 0xff; switch (port) { #define HANDLE_CUR_ADDR_READ(d_n, c_n) \ case DMA##d_n##_ADDR_##c_n: \ r = dma[d(d_n)].chans[d(c_n)].cur_addr.byte[dma[d(d_n)].ff]; \ q_printf("DMA%i: cur_addr read: %#x from Channel %d byte %d\n", \ d_n, r, d(c_n), dma[d(d_n)].ff); \ dma[d(d_n)].ff ^= 1; \ break HANDLE_X(CUR_ADDR_READ); #define HANDLE_CUR_CNT_READ(d_n, c_n) \ case DMA##d_n##_CNT_##c_n: \ r = dma[d(d_n)].chans[d(c_n)].cur_count.byte[dma[d(d_n)].ff]; \ q_printf("DMA%i: cur_cnt read: %#x from Channel %d byte %d\n", \ d_n, r, d(c_n), dma[d(d_n)].ff); \ dma[d(d_n)].ff ^= 1; \ break HANDLE_X(CUR_CNT_READ); #define HANDLE_PAGE_READ(d_n, c_n) \ case DMA##d_n##_PAGE_##c_n: \ r = dma[d(d_n)].chans[d(c_n)].page; \ q_printf("DMA%i: page read: %#x from Channel %d\n", \ d_n, r, d(c_n)); \ break HANDLE_X(PAGE_READ); case DMA1_STAT_REG: r = dma[DMA1].status; q_printf("DMA1: Read %u from Status reg\n", r); dma[DMA1].status &= 0xf0; /* clear status bits */ break; case DMA2_STAT_REG: r = dma[DMA2].status; q_printf("DMA2: Read %u from Status reg\n", r); dma[DMA2].status &= 0xf0; /* clear status bits */ break; case DMA1_TEMP_REG: r = dma[DMA1].tmp_reg; q_printf("DMA1: Read %u from temporary register unimplemented\n", r); break; case DMA2_TEMP_REG: r = dma[DMA2].tmp_reg; q_printf("DMA2: Read %u from temporary register unimplemented\n", r); break; default: q_printf("DMA: Unhandled Read on 0x%04x\n", (Bit16u) port); } dma_process(); // Not needed in fact return r; }