comparison genesis.c @ 2350:f8b5142c06aa

Allow 68K to return mid-instruction. Adjust how 68K interrupt ack works so int2 busy flag timing is more correct. Fix some other SCD timing issues
author Michael Pavone <pavone@retrodev.com>
date Mon, 16 Oct 2023 23:30:04 -0700
parents 83f5529086c5
children a773b8f09292
comparison
equal deleted inserted replaced
2349:f0fc6c09517d 2350:f8b5142c06aa
469 } 469 }
470 470
471 #define REFRESH_INTERVAL 128 471 #define REFRESH_INTERVAL 128
472 #define REFRESH_DELAY 2 472 #define REFRESH_DELAY 2
473 473
474 void gen_update_refresh(m68k_context *context)
475 {
476 uint32_t interval = MCLKS_PER_68K * REFRESH_INTERVAL;
477 genesis_context *gen = context->system;
478 gen->refresh_counter += context->current_cycle - gen->last_sync_cycle;
479 gen->last_sync_cycle = context->current_cycle;
480 context->current_cycle += REFRESH_DELAY * MCLKS_PER_68K * (gen->refresh_counter / interval);
481 gen->refresh_counter = gen->refresh_counter % interval;
482 }
483
484 void gen_update_refresh_free_access(m68k_context *context)
485 {
486 genesis_context *gen = context->system;
487 uint32_t before = context->current_cycle - 4*MCLKS_PER_68K;
488 if (before < gen->last_sync_cycle) {
489 return;
490 }
491 //Add refresh delays for any accesses that happened beofre the current one
492 gen->refresh_counter += before - gen->last_sync_cycle;
493 uint32_t interval = MCLKS_PER_68K * REFRESH_INTERVAL;
494 uint32_t delay = REFRESH_DELAY * MCLKS_PER_68K * (gen->refresh_counter / interval);
495 if (delay) {
496 //To avoid the extra cycles being absorbed in the refresh free update below, we need to update again
497 gen->refresh_counter = gen->refresh_counter % interval;
498 gen->refresh_counter += delay;
499 delay += REFRESH_DELAY * MCLKS_PER_68K * (gen->refresh_counter / interval);
500 context->current_cycle += delay;
501 }
502 gen->last_sync_cycle = context->current_cycle;
503 //advance refresh counter for the current access, but don't generate delays
504 gen->refresh_counter += 4*MCLKS_PER_68K;
505 gen->refresh_counter = gen->refresh_counter % interval;
506 }
507
508 void gen_update_refresh_no_wait(m68k_context *context)
509 {
510 uint32_t interval = MCLKS_PER_68K * REFRESH_INTERVAL;
511 genesis_context *gen = context->system;
512 gen->refresh_counter += context->current_cycle - gen->last_sync_cycle;
513 gen->last_sync_cycle = context->current_cycle;
514 gen->refresh_counter = gen->refresh_counter % interval;
515 }
516
474 #include <limits.h> 517 #include <limits.h>
475 #define ADJUST_BUFFER (8*MCLKS_LINE*313) 518 #define ADJUST_BUFFER (8*MCLKS_LINE*313)
476 #define MAX_NO_ADJUST (UINT_MAX-ADJUST_BUFFER) 519 #define MAX_NO_ADJUST (UINT_MAX-ADJUST_BUFFER)
477 520
478 static m68k_context *sync_components(m68k_context * context, uint32_t address) 521 static m68k_context *sync_components(m68k_context * context, uint32_t address)
479 { 522 {
480 genesis_context * gen = context->system; 523 genesis_context * gen = context->system;
481 vdp_context * v_context = gen->vdp; 524 vdp_context * v_context = gen->vdp;
482 z80_context * z_context = gen->z80; 525 z80_context * z_context = gen->z80;
483 //lame estimation of refresh cycle delay 526 if (gen->bus_busy) {
484 gen->refresh_counter += context->current_cycle - gen->last_sync_cycle; 527 gen_update_refresh_no_wait(context);
485 if (!gen->bus_busy) { 528 } else {
486 context->current_cycle += REFRESH_DELAY * MCLKS_PER_68K * (gen->refresh_counter / (MCLKS_PER_68K * REFRESH_INTERVAL)); 529 gen_update_refresh(context);
487 } 530 }
488 gen->refresh_counter = gen->refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL);
489 531
490 uint32_t mclks = context->current_cycle; 532 uint32_t mclks = context->current_cycle;
491 sync_z80(gen, mclks); 533 sync_z80(gen, mclks);
492 sync_sound(gen, mclks); 534 sync_sound(gen, mclks);
493 vdp_run_context(v_context, mclks); 535 vdp_run_context(v_context, mclks);
559 gen->last_flush_cycle = mclks; 601 gen->last_flush_cycle = mclks;
560 } 602 }
561 gen->frame_end = vdp_cycles_to_frame_end(v_context); 603 gen->frame_end = vdp_cycles_to_frame_end(v_context);
562 context->sync_cycle = gen->frame_end; 604 context->sync_cycle = gen->frame_end;
563 //printf("Set sync cycle to: %d @ %d, vcounter: %d, hslot: %d\n", context->sync_cycle, context->current_cycle, v_context->vcounter, v_context->hslot); 605 //printf("Set sync cycle to: %d @ %d, vcounter: %d, hslot: %d\n", context->sync_cycle, context->current_cycle, v_context->vcounter, v_context->hslot);
564 if (context->int_ack) {
565 //printf("acknowledging %d @ %d:%d, vcounter: %d, hslot: %d\n", context->int_ack, context->current_cycle, v_context->cycles, v_context->vcounter, v_context->hslot);
566 vdp_int_ack(v_context);
567 context->int_ack = 0;
568 }
569 if (!address && (gen->header.enter_debugger || gen->header.save_state)) { 606 if (!address && (gen->header.enter_debugger || gen->header.save_state)) {
570 context->sync_cycle = context->current_cycle + 1; 607 context->sync_cycle = context->current_cycle + 1;
571 } 608 }
572 adjust_int_cycle(context, v_context); 609 adjust_int_cycle(context, v_context);
573 if (gen->reset_cycle < context->target_cycle) { 610 if (gen->reset_cycle < context->target_cycle) {
625 free(save_path); 662 free(save_path);
626 } else if(gen->header.save_state) { 663 } else if(gen->header.save_state) {
627 context->sync_cycle = context->current_cycle + 1; 664 context->sync_cycle = context->current_cycle + 1;
628 } 665 }
629 } 666 }
630 gen->last_sync_cycle = context->current_cycle; 667 return context;
668 }
669
670 static m68k_context *int_ack(m68k_context *context)
671 {
672 genesis_context * gen = context->system;
673 vdp_context * v_context = gen->vdp;
674 //printf("acknowledging %d @ %d:%d, vcounter: %d, hslot: %d\n", context->int_ack, context->current_cycle, v_context->cycles, v_context->vcounter, v_context->hslot);
675 vdp_run_context(v_context, context->current_cycle);
676 vdp_int_ack(v_context);
677
678 //the Genesis responds to these exclusively with !VPA which means its a slow
679 //6800 operation. documentation says these can take between 10 and 19 cycles.
680 //actual results measurements seem to suggest it's actually between 9 and 18
681 //Base 68K core has added 4 cycles for a normal int ack cycle already
682 //We add 5 + the current cycle count (in 68K cycles) mod 10 to simulate the
683 //additional variable delay from the use of the 6800 cycle
684 uint32_t cycle_count = context->current_cycle / context->options->gen.clock_divider;
685 context->current_cycle += 5 + (cycle_count % 10);
686
631 return context; 687 return context;
632 } 688 }
633 689
634 static m68k_context * vdp_port_write(uint32_t vdp_port, m68k_context * context, uint16_t value) 690 static m68k_context * vdp_port_write(uint32_t vdp_port, m68k_context * context, uint16_t value)
635 { 691 {
642 } 698 }
643 vdp_port &= 0x1F; 699 vdp_port &= 0x1F;
644 //printf("vdp_port write: %X, value: %X, cycle: %d\n", vdp_port, value, context->current_cycle); 700 //printf("vdp_port write: %X, value: %X, cycle: %d\n", vdp_port, value, context->current_cycle);
645 701
646 //do refresh check here so we can avoid adding a penalty for a refresh that happens during a VDP access 702 //do refresh check here so we can avoid adding a penalty for a refresh that happens during a VDP access
647 gen->refresh_counter += context->current_cycle - 4*MCLKS_PER_68K - gen->last_sync_cycle; 703 gen_update_refresh_free_access(context);
648 context->current_cycle += REFRESH_DELAY * MCLKS_PER_68K * (gen->refresh_counter / (MCLKS_PER_68K * REFRESH_INTERVAL));
649 gen->refresh_counter = gen->refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL);
650 gen->last_sync_cycle = context->current_cycle;
651 704
652 sync_components(context, 0); 705 sync_components(context, 0);
653 vdp_context *v_context = gen->vdp; 706 vdp_context *v_context = gen->vdp;
654 uint32_t before_cycle = v_context->cycles; 707 uint32_t before_cycle = v_context->cycles;
655 if (vdp_port < 0x10) { 708 if (vdp_port < 0x10) {
724 psg_write(gen->psg, value); 777 psg_write(gen->psg, value);
725 } else { 778 } else {
726 vdp_test_port_write(gen->vdp, value); 779 vdp_test_port_write(gen->vdp, value);
727 } 780 }
728 781
729 gen->last_sync_cycle -= 4 * MCLKS_PER_68K;
730 //refresh may have happened while we were waiting on the VDP, 782 //refresh may have happened while we were waiting on the VDP,
731 //so advance refresh_counter but don't add any delays 783 //so advance refresh_counter but don't add any delays
732 if (vdp_port >= 4 && vdp_port < 8 && v_context->cycles != before_cycle) { 784 if (vdp_port >= 4 && vdp_port < 8 && v_context->cycles != before_cycle) {
733 gen->refresh_counter = 0; 785 gen->refresh_counter = 0;
734 } else { 786 gen->last_sync_cycle = context->current_cycle;
735 gen->refresh_counter += (context->current_cycle - gen->last_sync_cycle); 787 } else {
736 gen->refresh_counter = gen->refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL); 788 gen_update_refresh_no_wait(context);
737 } 789 }
738 gen->last_sync_cycle = context->current_cycle;
739 return context; 790 return context;
740 } 791 }
741 792
742 static m68k_context * vdp_port_write_b(uint32_t vdp_port, m68k_context * context, uint8_t value) 793 static m68k_context * vdp_port_write_b(uint32_t vdp_port, m68k_context * context, uint8_t value)
743 { 794 {
783 } 834 }
784 vdp_port &= 0x1F; 835 vdp_port &= 0x1F;
785 uint16_t value; 836 uint16_t value;
786 837
787 //do refresh check here so we can avoid adding a penalty for a refresh that happens during a VDP access 838 //do refresh check here so we can avoid adding a penalty for a refresh that happens during a VDP access
788 gen->refresh_counter += context->current_cycle - 4*MCLKS_PER_68K - gen->last_sync_cycle; 839 gen_update_refresh_free_access(context);
789 context->current_cycle += REFRESH_DELAY * MCLKS_PER_68K * (gen->refresh_counter / (MCLKS_PER_68K * REFRESH_INTERVAL));
790 gen->refresh_counter = gen->refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL);
791 gen->last_sync_cycle = context->current_cycle;
792 840
793 sync_components(context, 0); 841 sync_components(context, 0);
794 vdp_context * v_context = gen->vdp; 842 vdp_context * v_context = gen->vdp;
795 uint32_t before_cycle = context->current_cycle; 843 uint32_t before_cycle = context->current_cycle;
796 if (vdp_port < 0x10) { 844 if (vdp_port < 0x10) {
814 gen->bus_busy = 1; 862 gen->bus_busy = 1;
815 sync_z80(gen, context->current_cycle); 863 sync_z80(gen, context->current_cycle);
816 gen->bus_busy = 0; 864 gen->bus_busy = 0;
817 } 865 }
818 866
819 gen->last_sync_cycle -= 4 * MCLKS_PER_68K;
820 //refresh may have happened while we were waiting on the VDP, 867 //refresh may have happened while we were waiting on the VDP,
821 //so advance refresh_counter but don't add any delays 868 //so advance refresh_counter but don't add any delays
822 gen->refresh_counter += (context->current_cycle - gen->last_sync_cycle); 869 gen_update_refresh_no_wait(context);
823 gen->refresh_counter = gen->refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL);
824 gen->last_sync_cycle = context->current_cycle;
825 return value; 870 return value;
826 } 871 }
827 872
828 static uint8_t vdp_port_read_b(uint32_t vdp_port, m68k_context * context) 873 static uint8_t vdp_port_read_b(uint32_t vdp_port, m68k_context * context)
829 { 874 {
880 static m68k_context * io_write(uint32_t location, m68k_context * context, uint8_t value) 925 static m68k_context * io_write(uint32_t location, m68k_context * context, uint8_t value)
881 { 926 {
882 genesis_context * gen = context->system; 927 genesis_context * gen = context->system;
883 928
884 //do refresh check here so we can avoid adding a penalty for a refresh that happens during an IO area access 929 //do refresh check here so we can avoid adding a penalty for a refresh that happens during an IO area access
885 gen->refresh_counter += context->current_cycle - 4*MCLKS_PER_68K - gen->last_sync_cycle; 930 gen_update_refresh_free_access(context);
886 context->current_cycle += REFRESH_DELAY * MCLKS_PER_68K * (gen->refresh_counter / (MCLKS_PER_68K * REFRESH_INTERVAL));
887 gen->refresh_counter = gen->refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL);
888 gen->last_sync_cycle = context->current_cycle - 4*MCLKS_PER_68K;
889 931
890 if (location < 0x10000) { 932 if (location < 0x10000) {
891 //Access to Z80 memory incurs a one 68K cycle wait state 933 //Access to Z80 memory incurs a one 68K cycle wait state
892 context->current_cycle += MCLKS_PER_68K; 934 context->current_cycle += MCLKS_PER_68K;
893 if (!z80_enabled || z80_get_busack(gen->z80, context->current_cycle)) { 935 if (!z80_enabled || z80_get_busack(gen->z80, context->current_cycle)) {
1012 } 1054 }
1013 } 1055 }
1014 } 1056 }
1015 1057
1016 //no refresh delays during IO access 1058 //no refresh delays during IO access
1017 gen->refresh_counter += context->current_cycle - gen->last_sync_cycle; 1059 gen_update_refresh_no_wait(context);
1018 gen->refresh_counter = gen->refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL);
1019 return context; 1060 return context;
1020 } 1061 }
1021 1062
1022 static m68k_context * io_write_w(uint32_t location, m68k_context * context, uint16_t value) 1063 static m68k_context * io_write_w(uint32_t location, m68k_context * context, uint16_t value)
1023 { 1064 {
1039 { 1080 {
1040 uint8_t value; 1081 uint8_t value;
1041 genesis_context *gen = context->system; 1082 genesis_context *gen = context->system;
1042 1083
1043 //do refresh check here so we can avoid adding a penalty for a refresh that happens during an IO area access 1084 //do refresh check here so we can avoid adding a penalty for a refresh that happens during an IO area access
1044 gen->refresh_counter += context->current_cycle - 4*MCLKS_PER_68K - gen->last_sync_cycle; 1085 gen_update_refresh_free_access(context);
1045 context->current_cycle += REFRESH_DELAY * MCLKS_PER_68K * (gen->refresh_counter / (MCLKS_PER_68K * REFRESH_INTERVAL));
1046 gen->refresh_counter = gen->refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL);
1047 gen->last_sync_cycle = context->current_cycle - 4*MCLKS_PER_68K;
1048 1086
1049 if (location < 0x10000) { 1087 if (location < 0x10000) {
1050 //Access to Z80 memory incurs a one 68K cycle wait state 1088 //Access to Z80 memory incurs a one 68K cycle wait state
1051 context->current_cycle += MCLKS_PER_68K; 1089 context->current_cycle += MCLKS_PER_68K;
1052 if (!z80_enabled || z80_get_busack(gen->z80, context->current_cycle)) { 1090 if (!z80_enabled || z80_get_busack(gen->z80, context->current_cycle)) {
1141 } 1179 }
1142 } 1180 }
1143 } 1181 }
1144 1182
1145 //no refresh delays during IO access 1183 //no refresh delays during IO access
1146 gen->refresh_counter += context->current_cycle - gen->last_sync_cycle; 1184 gen_update_refresh_no_wait(context);
1147 gen->refresh_counter = gen->refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL);
1148 gen->last_sync_cycle = context->current_cycle;
1149 return value; 1185 return value;
1150 } 1186 }
1151 1187
1152 static uint16_t io_read_w(uint32_t location, m68k_context * context) 1188 static uint16_t io_read_w(uint32_t location, m68k_context * context)
1153 { 1189 {
2325 cd->m68k->options->address_log = (ym_opts & OPT_ADDRESS_LOG) ? fopen("address_sub.log", "w") : NULL; 2361 cd->m68k->options->address_log = (ym_opts & OPT_ADDRESS_LOG) ? fopen("address_sub.log", "w") : NULL;
2326 } 2362 }
2327 info.map = gen->header.info.map = NULL; 2363 info.map = gen->header.info.map = NULL;
2328 2364
2329 m68k_options *opts = malloc(sizeof(m68k_options)); 2365 m68k_options *opts = malloc(sizeof(m68k_options));
2330 init_m68k_opts(opts, map, map_chunks, MCLKS_PER_68K, sync_components); 2366 init_m68k_opts(opts, map, map_chunks, MCLKS_PER_68K, sync_components, int_ack);
2331 if (!strcmp(tern_find_ptr_default(model, "tas", "broken"), "broken")) { 2367 if (!strcmp(tern_find_ptr_default(model, "tas", "broken"), "broken")) {
2332 opts->gen.flags |= M68K_OPT_BROKEN_READ_MODIFY; 2368 opts->gen.flags |= M68K_OPT_BROKEN_READ_MODIFY;
2333 } 2369 }
2334 gen->m68k = init_68k_context(opts, NULL); 2370 gen->m68k = init_68k_context(opts, NULL);
2335 gen->m68k->system = gen; 2371 gen->m68k->system = gen;
2398 memcpy(map + cd_chunks, base_map, sizeof(memmap_chunk) * base_chunks); 2434 memcpy(map + cd_chunks, base_map, sizeof(memmap_chunk) * base_chunks);
2399 map[cd_chunks].buffer = gen->work_ram; 2435 map[cd_chunks].buffer = gen->work_ram;
2400 uint32_t num_chunks = cd_chunks + base_chunks; 2436 uint32_t num_chunks = cd_chunks + base_chunks;
2401 2437
2402 m68k_options *opts = malloc(sizeof(m68k_options)); 2438 m68k_options *opts = malloc(sizeof(m68k_options));
2403 init_m68k_opts(opts, map, num_chunks, MCLKS_PER_68K, sync_components); 2439 init_m68k_opts(opts, map, num_chunks, MCLKS_PER_68K, sync_components, int_ack);
2404 //TODO: make this configurable 2440 //TODO: make this configurable
2405 opts->gen.flags |= M68K_OPT_BROKEN_READ_MODIFY; 2441 opts->gen.flags |= M68K_OPT_BROKEN_READ_MODIFY;
2406 gen->m68k = init_68k_context(opts, NULL); 2442 gen->m68k = init_68k_context(opts, NULL);
2407 gen->m68k->system = gen; 2443 gen->m68k->system = gen;
2408 opts->address_log = (system_opts & OPT_ADDRESS_LOG) ? fopen("address.log", "w") : NULL; 2444 opts->address_log = (system_opts & OPT_ADDRESS_LOG) ? fopen("address.log", "w") : NULL;