void interrupt_fixup_int_clk(interrupt_cpu_status_t *cs, CLOCK cpu_clk, CLOCK *int_clk) { unsigned int num_cycles_left = 0, last_num_cycles_left = 0, num_dma; unsigned int cycles_left_to_trigger_irq = (OPINFO_DELAYS_INTERRUPT(*cs->last_opcode_info_ptr) ? 2 : 1); CLOCK last_start_clk = CLOCK_MAX; #ifdef DEBUGIRQDMA if (debug.maincpu_traceflg) { unsigned int i; log_debug("INTREQ %ld NUMWR %i", (long)cpu_clk, maincpu_num_write_cycles()); for (i = 0; i < cs->num_dma_per_opcode; i++) log_debug("%iCYLEFT %i STCLK %i", i, cs->num_cycles_left[i], cs->dma_start_clk[i]); } #endif num_dma = cs->num_dma_per_opcode; while (num_dma != 0) { num_dma--; num_cycles_left = cs->num_cycles_left[num_dma]; if ((cs->dma_start_clk[num_dma] - 1) <= cpu_clk) break; last_num_cycles_left = num_cycles_left; last_start_clk = cs->dma_start_clk[num_dma]; } /* if the INTREQ happens between two CPU cycles, we have to interpolate */ if (num_cycles_left - last_num_cycles_left > last_start_clk - cpu_clk - 1) num_cycles_left = last_num_cycles_left + last_start_clk - cpu_clk - 1; #ifdef DEBUGIRQDMA if (debug.maincpu_traceflg) { log_debug("TAKENLEFT %i LASTSTOLENCYCLECLK %i", num_cycles_left,cs->last_stolen_cycles_clk); } #endif *int_clk = cs->last_stolen_cycles_clk; if (cs->num_dma_per_opcode > 0 && cs->dma_start_clk[0] > cpu_clk) { /* interrupt was triggered before end of last opcode */ *int_clk -= (cs->dma_start_clk[0] - cpu_clk); } #ifdef DEBUGIRQDMA if (debug.maincpu_traceflg) { log_debug("INTCLK dma shifted %i (cs->dma_start_clk[0]=%i", *int_clk, cs->dma_start_clk[0]); } #endif if (num_cycles_left >= cycles_left_to_trigger_irq) *int_clk -= (cycles_left_to_trigger_irq + 1); #ifdef DEBUGIRQDMA if (debug.maincpu_traceflg) { log_debug("INTCLK fixed %i", *int_clk); } #endif }
/* Return nonzero if a pending IRQ should be dispatched now. This takes account for the internal delays of the 6510, but does not actually check the status of the IRQ line. */ inline static int interrupt_check_irq_delay(interrupt_cpu_status_t *cs, CLOCK cpu_clk) { CLOCK irq_clk = cs->irq_clk + INTERRUPT_DELAY; /* Branch instructions delay IRQs and NMI by one cycle if branch is taken with no page boundary crossing. */ if (OPINFO_DELAYS_INTERRUPT(*cs->last_opcode_info_ptr)) { irq_clk++; } /* If an opcode changes the I flag from 1 to 0, the 6510 needs one more opcode before it triggers the IRQ routine. */ if (cpu_clk >= irq_clk) { if (!OPINFO_ENABLES_IRQ(*cs->last_opcode_info_ptr)) { return 1; } else { cs->global_pending_int |= IK_IRQPEND; } } return 0; }
void interrupt_fixup_int_clk(interrupt_cpu_status_t *cs, CLOCK cpu_clk, CLOCK *int_clk) { unsigned int num_cycles_left = 0, last_num_cycles_left = 0, num_dma; unsigned int cycles_left_to_trigger_irq = (OPINFO_DELAYS_INTERRUPT(*cs->last_opcode_info_ptr) ? 2 : 1); CLOCK last_start_clk = CLOCK_MAX; /* { unsigned int i; log_debug("INTREQ %ld NUMWR %i", (long)cpu_clk, maincpu_num_write_cycles()); for (i = 0; i < cs->num_dma_per_opcode; i++) log_debug("%iCYLEFT %i STCLK %i", i, cs->num_cycles_left[i], cs->dma_start_clk[i]); } */ num_dma = cs->num_dma_per_opcode; while (num_dma != 0) { num_dma--; num_cycles_left = cs->num_cycles_left[num_dma]; if ((cs->dma_start_clk[num_dma] - 1) <= cpu_clk) break; last_num_cycles_left = num_cycles_left; last_start_clk = cs->dma_start_clk[num_dma]; } /* if the INTREQ happens between two CPU cycles, we have to interpolate */ if (num_cycles_left - last_num_cycles_left > last_start_clk - cpu_clk - 1) num_cycles_left = last_num_cycles_left + last_start_clk - cpu_clk - 1; /*log_debug("TAKENLEFT %i", num_cycles_left);*/ *int_clk = cs->last_stolen_cycles_clk; if (num_cycles_left >= cycles_left_to_trigger_irq) *int_clk -= (cycles_left_to_trigger_irq + 1); /*log_debug("INTCLK %i", *int_clk);*/ }
/* Return nonzero if a pending NMI should be dispatched now. This takes account for the internal delays of the 6510, but does not actually check the status of the NMI line. */ inline static int interrupt_check_nmi_delay(interrupt_cpu_status_t *cs, CLOCK cpu_clk) { CLOCK nmi_clk = cs->nmi_clk + INTERRUPT_DELAY; /* BRK (0x00) delays the NMI by one opcode. */ /* TODO DO_INTERRUPT sets last opcode to 0: can NMI occur right after IRQ? */ if (OPINFO_NUMBER(*cs->last_opcode_info_ptr) == 0x00) { return 0; } /* Branch instructions delay IRQs and NMI by one cycle if branch is taken with no page boundary crossing. */ if (OPINFO_DELAYS_INTERRUPT(*cs->last_opcode_info_ptr)) { nmi_clk++; } if (cpu_clk >= nmi_clk) { return 1; } return 0; }
/* Asynchronously steal `num' cycles from the CPU, starting from cycle `start_clk'. */ void dma_maincpu_steal_cycles(CLOCK start_clk, int num, CLOCK sub) { CLOCK irq_sub = 0; CLOCK nmi_sub = 0; unsigned int cycles_left_to_trigger_irq; interrupt_cpu_status_t *cs = maincpu_int_status; CLOCK dma_start; if (num == 0) return; dma_start = start_clk + sub; if (start_clk == cs->last_stolen_cycles_clk) cs->num_last_stolen_cycles += num; else cs->num_last_stolen_cycles = num; /*log_debug("START %i NUM %i SUB %i MAIN %i DMAST %i", start_clk, num, sub, maincpu_clk, dma_start);*/ cs->num_cycles_left[cs->num_dma_per_opcode] = maincpu_clk - dma_start; cs->dma_start_clk[cs->num_dma_per_opcode] = dma_start; (cs->num_dma_per_opcode)++; #ifdef DEBUG if (debug.maincpu_traceflg) debug_dma("VICII", start_clk, num); #endif cycles_left_to_trigger_irq = (OPINFO_DELAYS_INTERRUPT(*cs->last_opcode_info_ptr) ? 2 : 1); if (cs->irq_clk >= start_clk && dma_start == (maincpu_clk - cycles_left_to_trigger_irq) && cs->num_dma_per_opcode == 1) { /*log_debug("DECR");*/ irq_sub = 1; } if (cs->nmi_clk >= start_clk && dma_start == (maincpu_clk - cycles_left_to_trigger_irq) && cs->num_dma_per_opcode == 1) { /*log_debug("DECR");*/ nmi_sub = 1; } maincpu_clk += num; cs->last_stolen_cycles_clk = dma_start + num; /*log_debug("IRQCLK %i LASTSTOLEN %i", cs->irq_clk, cs->last_stolen_cycles_clk);*/ if (cs->irq_clk > dma_start) cs->irq_clk = cs->last_stolen_cycles_clk; else cs->irq_clk += num; if (cs->nmi_clk > dma_start) cs->nmi_clk = cs->last_stolen_cycles_clk; else cs->nmi_clk += num; cs->irq_clk -= irq_sub; cs->nmi_clk -= nmi_sub; /*log_debug("NEWIRQCLK %i", cs->irq_clk);*/ }