Mercurial > repos > blastem
diff genesis.c @ 2350:f8b5142c06aa
Allow 68K to return mid-instruction. Adjust how 68K interrupt ack works so int2 busy flag timing is more correct. Fix some other SCD timing issues
author | Michael Pavone <pavone@retrodev.com> |
---|---|
date | Mon, 16 Oct 2023 23:30:04 -0700 |
parents | 83f5529086c5 |
children | a773b8f09292 |
line wrap: on
line diff
--- a/genesis.c Fri Oct 13 22:44:36 2023 -0700 +++ b/genesis.c Mon Oct 16 23:30:04 2023 -0700 @@ -471,6 +471,49 @@ #define REFRESH_INTERVAL 128 #define REFRESH_DELAY 2 +void gen_update_refresh(m68k_context *context) +{ + uint32_t interval = MCLKS_PER_68K * REFRESH_INTERVAL; + genesis_context *gen = context->system; + gen->refresh_counter += context->current_cycle - gen->last_sync_cycle; + gen->last_sync_cycle = context->current_cycle; + context->current_cycle += REFRESH_DELAY * MCLKS_PER_68K * (gen->refresh_counter / interval); + gen->refresh_counter = gen->refresh_counter % interval; +} + +void gen_update_refresh_free_access(m68k_context *context) +{ + genesis_context *gen = context->system; + uint32_t before = context->current_cycle - 4*MCLKS_PER_68K; + if (before < gen->last_sync_cycle) { + return; + } + //Add refresh delays for any accesses that happened beofre the current one + gen->refresh_counter += before - gen->last_sync_cycle; + uint32_t interval = MCLKS_PER_68K * REFRESH_INTERVAL; + uint32_t delay = REFRESH_DELAY * MCLKS_PER_68K * (gen->refresh_counter / interval); + if (delay) { + //To avoid the extra cycles being absorbed in the refresh free update below, we need to update again + gen->refresh_counter = gen->refresh_counter % interval; + gen->refresh_counter += delay; + delay += REFRESH_DELAY * MCLKS_PER_68K * (gen->refresh_counter / interval); + context->current_cycle += delay; + } + gen->last_sync_cycle = context->current_cycle; + //advance refresh counter for the current access, but don't generate delays + gen->refresh_counter += 4*MCLKS_PER_68K; + gen->refresh_counter = gen->refresh_counter % interval; +} + +void gen_update_refresh_no_wait(m68k_context *context) +{ + uint32_t interval = MCLKS_PER_68K * REFRESH_INTERVAL; + genesis_context *gen = context->system; + gen->refresh_counter += context->current_cycle - gen->last_sync_cycle; + gen->last_sync_cycle = context->current_cycle; + gen->refresh_counter = gen->refresh_counter % interval; +} + #include <limits.h> #define ADJUST_BUFFER (8*MCLKS_LINE*313) #define MAX_NO_ADJUST (UINT_MAX-ADJUST_BUFFER) @@ -480,12 +523,11 @@ genesis_context * gen = context->system; vdp_context * v_context = gen->vdp; z80_context * z_context = gen->z80; - //lame estimation of refresh cycle delay - gen->refresh_counter += context->current_cycle - gen->last_sync_cycle; - if (!gen->bus_busy) { - context->current_cycle += REFRESH_DELAY * MCLKS_PER_68K * (gen->refresh_counter / (MCLKS_PER_68K * REFRESH_INTERVAL)); + if (gen->bus_busy) { + gen_update_refresh_no_wait(context); + } else { + gen_update_refresh(context); } - gen->refresh_counter = gen->refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL); uint32_t mclks = context->current_cycle; sync_z80(gen, mclks); @@ -561,11 +603,6 @@ gen->frame_end = vdp_cycles_to_frame_end(v_context); context->sync_cycle = gen->frame_end; //printf("Set sync cycle to: %d @ %d, vcounter: %d, hslot: %d\n", context->sync_cycle, context->current_cycle, v_context->vcounter, v_context->hslot); - if (context->int_ack) { - //printf("acknowledging %d @ %d:%d, vcounter: %d, hslot: %d\n", context->int_ack, context->current_cycle, v_context->cycles, v_context->vcounter, v_context->hslot); - vdp_int_ack(v_context); - context->int_ack = 0; - } if (!address && (gen->header.enter_debugger || gen->header.save_state)) { context->sync_cycle = context->current_cycle + 1; } @@ -627,7 +664,26 @@ context->sync_cycle = context->current_cycle + 1; } } - gen->last_sync_cycle = context->current_cycle; + return context; +} + +static m68k_context *int_ack(m68k_context *context) +{ + genesis_context * gen = context->system; + vdp_context * v_context = gen->vdp; + //printf("acknowledging %d @ %d:%d, vcounter: %d, hslot: %d\n", context->int_ack, context->current_cycle, v_context->cycles, v_context->vcounter, v_context->hslot); + vdp_run_context(v_context, context->current_cycle); + vdp_int_ack(v_context); + + //the Genesis responds to these exclusively with !VPA which means its a slow + //6800 operation. documentation says these can take between 10 and 19 cycles. + //actual results measurements seem to suggest it's actually between 9 and 18 + //Base 68K core has added 4 cycles for a normal int ack cycle already + //We add 5 + the current cycle count (in 68K cycles) mod 10 to simulate the + //additional variable delay from the use of the 6800 cycle + uint32_t cycle_count = context->current_cycle / context->options->gen.clock_divider; + context->current_cycle += 5 + (cycle_count % 10); + return context; } @@ -644,10 +700,7 @@ //printf("vdp_port write: %X, value: %X, cycle: %d\n", vdp_port, value, context->current_cycle); //do refresh check here so we can avoid adding a penalty for a refresh that happens during a VDP access - gen->refresh_counter += context->current_cycle - 4*MCLKS_PER_68K - gen->last_sync_cycle; - context->current_cycle += REFRESH_DELAY * MCLKS_PER_68K * (gen->refresh_counter / (MCLKS_PER_68K * REFRESH_INTERVAL)); - gen->refresh_counter = gen->refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL); - gen->last_sync_cycle = context->current_cycle; + gen_update_refresh_free_access(context); sync_components(context, 0); vdp_context *v_context = gen->vdp; @@ -726,16 +779,14 @@ vdp_test_port_write(gen->vdp, value); } - gen->last_sync_cycle -= 4 * MCLKS_PER_68K; //refresh may have happened while we were waiting on the VDP, //so advance refresh_counter but don't add any delays if (vdp_port >= 4 && vdp_port < 8 && v_context->cycles != before_cycle) { gen->refresh_counter = 0; + gen->last_sync_cycle = context->current_cycle; } else { - gen->refresh_counter += (context->current_cycle - gen->last_sync_cycle); - gen->refresh_counter = gen->refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL); + gen_update_refresh_no_wait(context); } - gen->last_sync_cycle = context->current_cycle; return context; } @@ -785,10 +836,7 @@ uint16_t value; //do refresh check here so we can avoid adding a penalty for a refresh that happens during a VDP access - gen->refresh_counter += context->current_cycle - 4*MCLKS_PER_68K - gen->last_sync_cycle; - context->current_cycle += REFRESH_DELAY * MCLKS_PER_68K * (gen->refresh_counter / (MCLKS_PER_68K * REFRESH_INTERVAL)); - gen->refresh_counter = gen->refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL); - gen->last_sync_cycle = context->current_cycle; + gen_update_refresh_free_access(context); sync_components(context, 0); vdp_context * v_context = gen->vdp; @@ -816,12 +864,9 @@ gen->bus_busy = 0; } - gen->last_sync_cycle -= 4 * MCLKS_PER_68K; //refresh may have happened while we were waiting on the VDP, //so advance refresh_counter but don't add any delays - gen->refresh_counter += (context->current_cycle - gen->last_sync_cycle); - gen->refresh_counter = gen->refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL); - gen->last_sync_cycle = context->current_cycle; + gen_update_refresh_no_wait(context); return value; } @@ -882,10 +927,7 @@ genesis_context * gen = context->system; //do refresh check here so we can avoid adding a penalty for a refresh that happens during an IO area access - gen->refresh_counter += context->current_cycle - 4*MCLKS_PER_68K - gen->last_sync_cycle; - context->current_cycle += REFRESH_DELAY * MCLKS_PER_68K * (gen->refresh_counter / (MCLKS_PER_68K * REFRESH_INTERVAL)); - gen->refresh_counter = gen->refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL); - gen->last_sync_cycle = context->current_cycle - 4*MCLKS_PER_68K; + gen_update_refresh_free_access(context); if (location < 0x10000) { //Access to Z80 memory incurs a one 68K cycle wait state @@ -1014,8 +1056,7 @@ } //no refresh delays during IO access - gen->refresh_counter += context->current_cycle - gen->last_sync_cycle; - gen->refresh_counter = gen->refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL); + gen_update_refresh_no_wait(context); return context; } @@ -1041,10 +1082,7 @@ genesis_context *gen = context->system; //do refresh check here so we can avoid adding a penalty for a refresh that happens during an IO area access - gen->refresh_counter += context->current_cycle - 4*MCLKS_PER_68K - gen->last_sync_cycle; - context->current_cycle += REFRESH_DELAY * MCLKS_PER_68K * (gen->refresh_counter / (MCLKS_PER_68K * REFRESH_INTERVAL)); - gen->refresh_counter = gen->refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL); - gen->last_sync_cycle = context->current_cycle - 4*MCLKS_PER_68K; + gen_update_refresh_free_access(context); if (location < 0x10000) { //Access to Z80 memory incurs a one 68K cycle wait state @@ -1143,9 +1181,7 @@ } //no refresh delays during IO access - gen->refresh_counter += context->current_cycle - gen->last_sync_cycle; - gen->refresh_counter = gen->refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL); - gen->last_sync_cycle = context->current_cycle; + gen_update_refresh_no_wait(context); return value; } @@ -2327,7 +2363,7 @@ info.map = gen->header.info.map = NULL; m68k_options *opts = malloc(sizeof(m68k_options)); - init_m68k_opts(opts, map, map_chunks, MCLKS_PER_68K, sync_components); + init_m68k_opts(opts, map, map_chunks, MCLKS_PER_68K, sync_components, int_ack); if (!strcmp(tern_find_ptr_default(model, "tas", "broken"), "broken")) { opts->gen.flags |= M68K_OPT_BROKEN_READ_MODIFY; } @@ -2400,7 +2436,7 @@ uint32_t num_chunks = cd_chunks + base_chunks; m68k_options *opts = malloc(sizeof(m68k_options)); - init_m68k_opts(opts, map, num_chunks, MCLKS_PER_68K, sync_components); + init_m68k_opts(opts, map, num_chunks, MCLKS_PER_68K, sync_components, int_ack); //TODO: make this configurable opts->gen.flags |= M68K_OPT_BROKEN_READ_MODIFY; gen->m68k = init_68k_context(opts, NULL);