void interrupt_fixup_int_clk(interrupt_cpu_status_t *cs, CLOCK cpu_clk, CLOCK *int_clk) { unsigned int num_cycles_left = 0, last_num_cycles_left = 0, num_dma; unsigned int cycles_left_to_trigger_irq = (OPINFO_DELAYS_INTERRUPT(*cs->last_opcode_info_ptr) ? 2 : 1); CLOCK last_start_clk = CLOCK_MAX; #ifdef DEBUGIRQDMA if (debug.maincpu_traceflg) { unsigned int i; log_debug("INTREQ %ld NUMWR %i", (long)cpu_clk, maincpu_num_write_cycles()); for (i = 0; i < cs->num_dma_per_opcode; i++) log_debug("%iCYLEFT %i STCLK %i", i, cs->num_cycles_left[i], cs->dma_start_clk[i]); } #endif num_dma = cs->num_dma_per_opcode; while (num_dma != 0) { num_dma--; num_cycles_left = cs->num_cycles_left[num_dma]; if ((cs->dma_start_clk[num_dma] - 1) <= cpu_clk) break; last_num_cycles_left = num_cycles_left; last_start_clk = cs->dma_start_clk[num_dma]; } /* if the INTREQ happens between two CPU cycles, we have to interpolate */ if (num_cycles_left - last_num_cycles_left > last_start_clk - cpu_clk - 1) num_cycles_left = last_num_cycles_left + last_start_clk - cpu_clk - 1; #ifdef DEBUGIRQDMA if (debug.maincpu_traceflg) { log_debug("TAKENLEFT %i LASTSTOLENCYCLECLK %i", num_cycles_left,cs->last_stolen_cycles_clk); } #endif *int_clk = cs->last_stolen_cycles_clk; if (cs->num_dma_per_opcode > 0 && cs->dma_start_clk[0] > cpu_clk) { /* interrupt was triggered before end of last opcode */ *int_clk -= (cs->dma_start_clk[0] - cpu_clk); } #ifdef DEBUGIRQDMA if (debug.maincpu_traceflg) { log_debug("INTCLK dma shifted %i (cs->dma_start_clk[0]=%i", *int_clk, cs->dma_start_clk[0]); } #endif if (num_cycles_left >= cycles_left_to_trigger_irq) *int_clk -= (cycles_left_to_trigger_irq + 1); #ifdef DEBUGIRQDMA if (debug.maincpu_traceflg) { log_debug("INTCLK fixed %i", *int_clk); } #endif }
static void pre_store(void) { vicii_handle_pending_alarms_external(maincpu_num_write_cycles()); }
/* Handle matrix fetch events. FIXME: could be made slightly faster. */ void ted_fetch_alarm_handler(CLOCK offset, void *data) { CLOCK last_opcode_first_write_clk, last_opcode_last_write_clk; /* This kludgy thing is used to emulate the behavior of the 6510 when BA goes low. When BA goes low, every read access stops the processor until BA is high again; write accesses happen as usual instead. */ if (offset > 0) { switch (OPINFO_NUMBER(last_opcode_info)) { case 0: /* In BRK, IRQ and NMI the 3rd, 4th and 5th cycles are write accesses, while the 1st, 2nd, 6th and 7th are read accesses. */ last_opcode_first_write_clk = maincpu_clk - 10; last_opcode_last_write_clk = maincpu_clk - 6; break; case 0x20: /* In JSR, the 4th and 5th cycles are write accesses, while the 1st, 2nd, 3rd and 6th are read accesses. */ last_opcode_first_write_clk = maincpu_clk - 6; last_opcode_last_write_clk = maincpu_clk - 4; break; default: /* In all the other opcodes, all the write accesses are the last ones. */ if (maincpu_num_write_cycles() != 0) { last_opcode_last_write_clk = maincpu_clk - 2; last_opcode_first_write_clk = maincpu_clk - maincpu_num_write_cycles() * 2; } else { last_opcode_first_write_clk = (CLOCK)0; last_opcode_last_write_clk = last_opcode_first_write_clk; } break; } } else { /* offset <= 0, i.e. offset == 0 */ /* If we are called with no offset, we don't have to care about write accesses. */ last_opcode_first_write_clk = last_opcode_last_write_clk = 0; } { CLOCK sub; CLOCK write_offset; if (ted.fetch_clk < (last_opcode_first_write_clk - 1) || ted.fetch_clk > last_opcode_last_write_clk) { sub = 0; } else { sub = last_opcode_last_write_clk - ted.fetch_clk + 1; } handle_fetch_matrix(offset, sub, &write_offset); last_opcode_first_write_clk += write_offset; last_opcode_last_write_clk += write_offset; } if ((offset > 11) && (ted.fastmode)) { dma_maincpu_steal_cycles(ted.fetch_clk, -(((signed)offset - 11) / 2), 0); ted_delay_oldclk(-(((signed)offset - 11) / 2)); } }