comparison z80_to_x86.c @ 667:30ccf56842d6

All cycle counters are now based off the master clock. This seems to have messed up Z80 interrupt timing (music in Sonic 2 is too slow for instance), but things are generally working
author Michael Pavone <pavone@retrodev.com>
date Sat, 03 Jan 2015 16:08:23 -0800
parents b68039895627
children 5439ae7946ca
comparison
equal deleted inserted replaced
666:b68039895627 667:30ccf56842d6
877 } else { 877 } else {
878 cycles(&opts->gen, 4 * inst->immed); 878 cycles(&opts->gen, 4 * inst->immed);
879 } 879 }
880 break; 880 break;
881 case Z80_HALT: { 881 case Z80_HALT: {
882 code_ptr loop_top = code->cur;
883 //this isn't terribly efficient, but it's good enough for now
882 cycles(&opts->gen, 4); 884 cycles(&opts->gen, 4);
883 mov_ir(code, address, opts->gen.scratch1, SZ_W); 885 check_cycles_int(&opts->gen, address);
884 uint8_t * call_inst = code->cur; 886 jmp(code, loop_top);
885 mov_rr(code, opts->gen.limit, opts->gen.scratch2, SZ_D);
886 sub_rr(code, opts->gen.cycles, opts->gen.scratch2, SZ_D);
887 and_ir(code, 0xFFFFFFFC, opts->gen.scratch2, SZ_D);
888 add_rr(code, opts->gen.scratch2, opts->gen.cycles, SZ_D);
889 cmp_rr(code, opts->gen.limit, opts->gen.cycles, SZ_D);
890 code_ptr skip_last = code->cur+1;
891 jcc(code, CC_NB, code->cur+2);
892 cycles(&opts->gen, 4);
893 *skip_last = code->cur - (skip_last+1);
894 call(code, opts->gen.handle_cycle_limit_int);
895 jmp(code, call_inst);
896 break; 887 break;
897 } 888 }
898 case Z80_DI: 889 case Z80_DI:
899 cycles(&opts->gen, 4); 890 cycles(&opts->gen, 4);
900 mov_irdisp(code, 0, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B); 891 mov_irdisp(code, 0, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B);
906 cycles(&opts->gen, 4); 897 cycles(&opts->gen, 4);
907 mov_rrdisp(code, opts->gen.cycles, opts->gen.context_reg, offsetof(z80_context, int_enable_cycle), SZ_D); 898 mov_rrdisp(code, opts->gen.cycles, opts->gen.context_reg, offsetof(z80_context, int_enable_cycle), SZ_D);
908 mov_irdisp(code, 1, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B); 899 mov_irdisp(code, 1, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B);
909 mov_irdisp(code, 1, opts->gen.context_reg, offsetof(z80_context, iff2), SZ_B); 900 mov_irdisp(code, 1, opts->gen.context_reg, offsetof(z80_context, iff2), SZ_B);
910 //interrupt enable has a one-instruction latency, minimum instruction duration is 4 cycles 901 //interrupt enable has a one-instruction latency, minimum instruction duration is 4 cycles
911 add_irdisp(code, 4, opts->gen.context_reg, offsetof(z80_context, int_enable_cycle), SZ_D); 902 add_irdisp(code, 4*opts->gen.clock_divider, opts->gen.context_reg, offsetof(z80_context, int_enable_cycle), SZ_D);
912 call(code, opts->do_sync); 903 call(code, opts->do_sync);
913 break; 904 break;
914 case Z80_IM: 905 case Z80_IM:
915 cycles(&opts->gen, 4); 906 cycles(&opts->gen, 4);
916 mov_irdisp(code, inst->immed, opts->gen.context_reg, offsetof(z80_context, im), SZ_B); 907 mov_irdisp(code, inst->immed, opts->gen.context_reg, offsetof(z80_context, im), SZ_B);
1930 dprintf("defferred address: %X\n", address); 1921 dprintf("defferred address: %X\n", address);
1931 } 1922 }
1932 } while (opts->gen.deferred); 1923 } while (opts->gen.deferred);
1933 } 1924 }
1934 1925
1935 void init_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t num_chunks) 1926 void init_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t num_chunks, uint32_t clock_divider)
1936 { 1927 {
1937 memset(options, 0, sizeof(*options)); 1928 memset(options, 0, sizeof(*options));
1938 1929
1939 options->gen.memmap = chunks; 1930 options->gen.memmap = chunks;
1940 options->gen.memmap_chunks = num_chunks; 1931 options->gen.memmap_chunks = num_chunks;
1941 options->gen.address_size = SZ_W; 1932 options->gen.address_size = SZ_W;
1942 options->gen.address_mask = 0xFFFF; 1933 options->gen.address_mask = 0xFFFF;
1943 options->gen.max_address = 0x10000; 1934 options->gen.max_address = 0x10000;
1944 options->gen.bus_cycles = 3; 1935 options->gen.bus_cycles = 3;
1936 options->gen.clock_divider = clock_divider;
1945 options->gen.mem_ptr_off = offsetof(z80_context, mem_pointers); 1937 options->gen.mem_ptr_off = offsetof(z80_context, mem_pointers);
1946 options->gen.ram_flags_off = offsetof(z80_context, ram_code_flags); 1938 options->gen.ram_flags_off = offsetof(z80_context, ram_code_flags);
1947 options->gen.ram_flags_shift = 7; 1939 options->gen.ram_flags_shift = 7;
1948 1940
1949 options->flags = 0; 1941 options->flags = 0;
1966 options->regs[Z80_HL] = RAX; 1958 options->regs[Z80_HL] = RAX;
1967 options->regs[Z80_SP] = R9; 1959 options->regs[Z80_SP] = R9;
1968 options->regs[Z80_AF] = -1; 1960 options->regs[Z80_AF] = -1;
1969 options->regs[Z80_IX] = RDX; 1961 options->regs[Z80_IX] = RDX;
1970 options->regs[Z80_IY] = R8; 1962 options->regs[Z80_IY] = R8;
1971 1963
1972 options->gen.scratch1 = R13; 1964 options->gen.scratch1 = R13;
1973 options->gen.scratch2 = R14; 1965 options->gen.scratch2 = R14;
1974 #else 1966 #else
1975 memset(options->regs, -1, sizeof(options->regs)); 1967 memset(options->regs, -1, sizeof(options->regs));
1976 options->regs[Z80_A] = RAX; 1968 options->regs[Z80_A] = RAX;
1977 options->regx[Z80_SP] = RBX; 1969 options->regx[Z80_SP] = RBX;
1978 1970
1979 options->gen.scratch1 = RCX; 1971 options->gen.scratch1 = RCX;
1980 options->gen.scratch2 = RDX; 1972 options->gen.scratch2 = RDX;
1981 #endif 1973 #endif
1982 1974
1983 options->gen.context_reg = RSI; 1975 options->gen.context_reg = RSI;
1984 options->gen.cycles = RBP; 1976 options->gen.cycles = RBP;
1985 options->gen.limit = RDI; 1977 options->gen.limit = RDI;
1986 1978
1987 options->gen.native_code_map = malloc(sizeof(native_map_slot)); 1979 options->gen.native_code_map = malloc(sizeof(native_map_slot));