comparison m68k_core_x86.c @ 2054:8ee7ecbf3f21 segacd

Implement enough of Sega CD gate array and Sub CPU to pass Sik's Mode 1 test ROM
author Michael Pavone <pavone@retrodev.com>
date Tue, 18 Jan 2022 00:03:50 -0800
parents 0d87116630c7
children 973a39d93d7b
comparison
equal deleted inserted replaced
2053:3414a4423de1 2054:8ee7ecbf3f21
420 if (dst && inst->src.addr_mode == MODE_AREG_PREDEC) { 420 if (dst && inst->src.addr_mode == MODE_AREG_PREDEC) {
421 push_r(code, opts->gen.scratch1); 421 push_r(code, opts->gen.scratch1);
422 } 422 }
423 dec_amount = inst->extra.size == OPSIZE_WORD ? 2 : (inst->extra.size == OPSIZE_LONG ? 4 : (op->params.regs.pri == 7 ? 2 :1)); 423 dec_amount = inst->extra.size == OPSIZE_WORD ? 2 : (inst->extra.size == OPSIZE_LONG ? 4 : (op->params.regs.pri == 7 ? 2 :1));
424 if (!dst || ( 424 if (!dst || (
425 inst->op != M68K_MOVE && inst->op != M68K_MOVEM 425 inst->op != M68K_MOVE && inst->op != M68K_MOVEM
426 && inst->op != M68K_SUBX && inst->op != M68K_ADDX 426 && inst->op != M68K_SUBX && inst->op != M68K_ADDX
427 && inst->op != M68K_ABCD && inst->op != M68K_SBCD 427 && inst->op != M68K_ABCD && inst->op != M68K_SBCD
428 )) { 428 )) {
429 cycles(&opts->gen, PREDEC_PENALTY); 429 cycles(&opts->gen, PREDEC_PENALTY);
430 } 430 }
431 subi_areg(opts, dec_amount, op->params.regs.pri); 431 subi_areg(opts, dec_amount, op->params.regs.pri);
815 } 815 }
816 816
817 void translate_m68k_bcc(m68k_options * opts, m68kinst * inst) 817 void translate_m68k_bcc(m68k_options * opts, m68kinst * inst)
818 { 818 {
819 code_info *code = &opts->gen.code; 819 code_info *code = &opts->gen.code;
820 820
821 int32_t disp = inst->src.params.immed; 821 int32_t disp = inst->src.params.immed;
822 uint32_t after = inst->address + 2; 822 uint32_t after = inst->address + 2;
823 if (inst->extra.cond == COND_TRUE) { 823 if (inst->extra.cond == COND_TRUE) {
824 cycles(&opts->gen, 10); 824 cycles(&opts->gen, 10);
825 jump_m68k_abs(opts, after + disp); 825 jump_m68k_abs(opts, after + disp);
826 } else { 826 } else {
827 uint8_t cond = m68k_eval_cond(opts, inst->extra.cond); 827 uint8_t cond = m68k_eval_cond(opts, inst->extra.cond);
828 code_ptr do_branch = code->cur + 1; 828 code_ptr do_branch = code->cur + 1;
829 jcc(code, cond, do_branch); 829 jcc(code, cond, do_branch);
830 830
831 cycles(&opts->gen, inst->variant == VAR_BYTE ? 8 : 12); 831 cycles(&opts->gen, inst->variant == VAR_BYTE ? 8 : 12);
832 code_ptr done = code->cur + 1; 832 code_ptr done = code->cur + 1;
833 jmp(code, done); 833 jmp(code, done);
834 834
835 *do_branch = code->cur - (do_branch + 1); 835 *do_branch = code->cur - (do_branch + 1);
836 cycles(&opts->gen, 10); 836 cycles(&opts->gen, 10);
837 code_ptr dest_addr = get_native_address(opts, after + disp); 837 code_ptr dest_addr = get_native_address(opts, after + disp);
838 if (!dest_addr) { 838 if (!dest_addr) {
839 opts->gen.deferred = defer_address(opts->gen.deferred, after + disp, code->cur + 1); 839 opts->gen.deferred = defer_address(opts->gen.deferred, after + disp, code->cur + 1);
840 //dummy address to be replaced later, make sure it generates a 4-byte displacement 840 //dummy address to be replaced later, make sure it generates a 4-byte displacement
841 dest_addr = code->cur + 256; 841 dest_addr = code->cur + 256;
842 } 842 }
843 jmp(code, dest_addr); 843 jmp(code, dest_addr);
844 844
845 *done = code->cur - (done + 1); 845 *done = code->cur - (done + 1);
846 } 846 }
847 } 847 }
848 848
849 void translate_m68k_scc(m68k_options * opts, m68kinst * inst) 849 void translate_m68k_scc(m68k_options * opts, m68kinst * inst)
1308 1308
1309 void translate_m68k_arith(m68k_options *opts, m68kinst * inst, uint32_t flag_mask, host_ea *src_op, host_ea *dst_op) 1309 void translate_m68k_arith(m68k_options *opts, m68kinst * inst, uint32_t flag_mask, host_ea *src_op, host_ea *dst_op)
1310 { 1310 {
1311 code_info *code = &opts->gen.code; 1311 code_info *code = &opts->gen.code;
1312 uint8_t size = inst->dst.addr_mode == MODE_AREG ? OPSIZE_LONG : inst->extra.size; 1312 uint8_t size = inst->dst.addr_mode == MODE_AREG ? OPSIZE_LONG : inst->extra.size;
1313 1313
1314 uint32_t numcycles; 1314 uint32_t numcycles;
1315 if ((inst->op == M68K_ADDX || inst->op == M68K_SUBX) && inst->src.addr_mode != MODE_REG) { 1315 if ((inst->op == M68K_ADDX || inst->op == M68K_SUBX) && inst->src.addr_mode != MODE_REG) {
1316 numcycles = 4; 1316 numcycles = 4;
1317 } else if (size == OPSIZE_LONG) { 1317 } else if (size == OPSIZE_LONG) {
1318 if (inst->op == M68K_CMP) { 1318 if (inst->op == M68K_CMP) {
1320 } else if (inst->op == M68K_AND && inst->variant == VAR_IMMEDIATE && inst->dst.addr_mode == MODE_REG) { 1320 } else if (inst->op == M68K_AND && inst->variant == VAR_IMMEDIATE && inst->dst.addr_mode == MODE_REG) {
1321 numcycles = 6; 1321 numcycles = 6;
1322 } else if (inst->dst.addr_mode == MODE_REG) { 1322 } else if (inst->dst.addr_mode == MODE_REG) {
1323 numcycles = inst->src.addr_mode <= MODE_AREG || inst->src.addr_mode == MODE_IMMEDIATE ? 8 : 6; 1323 numcycles = inst->src.addr_mode <= MODE_AREG || inst->src.addr_mode == MODE_IMMEDIATE ? 8 : 6;
1324 } else if (inst->dst.addr_mode == MODE_AREG) { 1324 } else if (inst->dst.addr_mode == MODE_AREG) {
1325 numcycles = numcycles = inst->src.addr_mode <= MODE_AREG || inst->src.addr_mode == MODE_IMMEDIATE 1325 numcycles = numcycles = inst->src.addr_mode <= MODE_AREG || inst->src.addr_mode == MODE_IMMEDIATE
1326 || inst->extra.size == OPSIZE_WORD ? 8 : 6; 1326 || inst->extra.size == OPSIZE_WORD ? 8 : 6;
1327 } else { 1327 } else {
1328 numcycles = 4; 1328 numcycles = 4;
1329 } 1329 }
1330 } else { 1330 } else {
1331 numcycles = 4; 1331 numcycles = 4;
1332 } 1332 }
1333 cycles(&opts->gen, numcycles); 1333 cycles(&opts->gen, numcycles);
1334 1334
1335 if (inst->op == M68K_ADDX || inst->op == M68K_SUBX) { 1335 if (inst->op == M68K_ADDX || inst->op == M68K_SUBX) {
1336 flag_to_carry(opts, FLAG_X); 1336 flag_to_carry(opts, FLAG_X);
1337 } 1337 }
1338 1338
1339 if (src_op->mode == MODE_REG_DIRECT) { 1339 if (src_op->mode == MODE_REG_DIRECT) {
1340 if (dst_op->mode == MODE_REG_DIRECT) { 1340 if (dst_op->mode == MODE_REG_DIRECT) {
1341 op_rr(code, inst, src_op->base, dst_op->base, size); 1341 op_rr(code, inst, src_op->base, dst_op->base, size);
1342 } else { 1342 } else {
1343 op_rrdisp(code, inst, src_op->base, dst_op->base, dst_op->disp, size); 1343 op_rrdisp(code, inst, src_op->base, dst_op->base, dst_op->disp, size);
1498 } 1498 }
1499 if (inst->dst.addr_mode != MODE_REG && inst->dst.addr_mode != MODE_AREG && inst->dst.addr_mode != MODE_AREG_PREDEC) { 1499 if (inst->dst.addr_mode != MODE_REG && inst->dst.addr_mode != MODE_AREG && inst->dst.addr_mode != MODE_AREG_PREDEC) {
1500 //destination is in memory so we need to preserve scratch2 for the write at the end 1500 //destination is in memory so we need to preserve scratch2 for the write at the end
1501 push_r(code, opts->gen.scratch2); 1501 push_r(code, opts->gen.scratch2);
1502 } 1502 }
1503 1503
1504 //reg to reg takes 6 cycles, mem to mem is 4 cycles + all the operand fetch/writing (including 2 cycle predec penalty for first operand) 1504 //reg to reg takes 6 cycles, mem to mem is 4 cycles + all the operand fetch/writing (including 2 cycle predec penalty for first operand)
1505 cycles(&opts->gen, inst->dst.addr_mode != MODE_REG ? BUS : BUS + 2); 1505 cycles(&opts->gen, inst->dst.addr_mode != MODE_REG ? BUS : BUS + 2);
1506 uint8_t other_reg; 1506 uint8_t other_reg;
1507 //WARNING: This may need adjustment if register assignments change 1507 //WARNING: This may need adjustment if register assignments change
1508 if (opts->gen.scratch2 > RBX) { 1508 if (opts->gen.scratch2 > RBX) {
1760 for (int i = 0; i < 16; i++) 1760 for (int i = 0; i < 16; i++)
1761 { 1761 {
1762 force = dividend >> 31; 1762 force = dividend >> 31;
1763 quotient = quotient << 1 | bit; 1763 quotient = quotient << 1 | bit;
1764 dividend = dividend << 1; 1764 dividend = dividend << 1;
1765 1765
1766 if (force || dividend >= divisor_shift) { 1766 if (force || dividend >= divisor_shift) {
1767 dividend -= divisor_shift; 1767 dividend -= divisor_shift;
1768 cycles += force ? 4 : 6; 1768 cycles += force ? 4 : 6;
1769 bit = 1; 1769 bit = 1;
1770 } else { 1770 } else {
1782 { 1782 {
1783 uint32_t orig_divisor = divisor_shift, orig_dividend = dividend; 1783 uint32_t orig_divisor = divisor_shift, orig_dividend = dividend;
1784 if (divisor_shift & 0x80000000) { 1784 if (divisor_shift & 0x80000000) {
1785 divisor_shift = 0 - divisor_shift; 1785 divisor_shift = 0 - divisor_shift;
1786 } 1786 }
1787 1787
1788 uint32_t cycles = 12; 1788 uint32_t cycles = 12;
1789 if (dividend & 0x80000000) { 1789 if (dividend & 0x80000000) {
1790 //dvs10 1790 //dvs10
1791 dividend = 0 - dividend; 1791 dividend = 0 - dividend;
1792 cycles += 2; 1792 cycles += 2;
1803 uint16_t bit = 0; 1803 uint16_t bit = 0;
1804 for (int i = 0; i < 15; i++) 1804 for (int i = 0; i < 15; i++)
1805 { 1805 {
1806 quotient = quotient << 1 | bit; 1806 quotient = quotient << 1 | bit;
1807 dividend = dividend << 1; 1807 dividend = dividend << 1;
1808 1808
1809 if (dividend >= divisor_shift) { 1809 if (dividend >= divisor_shift) {
1810 dividend -= divisor_shift; 1810 dividend -= divisor_shift;
1811 cycles += 6; 1811 cycles += 6;
1812 bit = 1; 1812 bit = 1;
1813 } else { 1813 } else {
1822 quotient = quotient << 1 | 1; 1822 quotient = quotient << 1 | 1;
1823 } else { 1823 } else {
1824 quotient = quotient << 1; 1824 quotient = quotient << 1;
1825 } 1825 }
1826 cycles += 4; 1826 cycles += 4;
1827 1827
1828 context->flags[FLAG_V] = 0; 1828 context->flags[FLAG_V] = 0;
1829 if (orig_divisor & 0x80000000) { 1829 if (orig_divisor & 0x80000000) {
1830 cycles += 16; //was 10 1830 cycles += 16; //was 10
1831 if (orig_dividend & 0x80000000) { 1831 if (orig_dividend & 0x80000000) {
1832 if (quotient & 0x8000) { 1832 if (quotient & 0x8000) {
1892 shl_ir(code, 16, opts->gen.scratch1, SZ_D); 1892 shl_ir(code, 16, opts->gen.scratch1, SZ_D);
1893 } 1893 }
1894 cmp_ir(code, 0, opts->gen.scratch1, SZ_D); 1894 cmp_ir(code, 0, opts->gen.scratch1, SZ_D);
1895 code_ptr not_zero = code->cur+1; 1895 code_ptr not_zero = code->cur+1;
1896 jcc(code, CC_NZ, not_zero); 1896 jcc(code, CC_NZ, not_zero);
1897 1897
1898 //TODO: Check that opts->trap includes the cycles conumed by the first trap0 microinstruction 1898 //TODO: Check that opts->trap includes the cycles conumed by the first trap0 microinstruction
1899 cycles(&opts->gen, 4); 1899 cycles(&opts->gen, 4);
1900 uint32_t isize = 2; 1900 uint32_t isize = 2;
1901 switch(inst->src.addr_mode) 1901 switch(inst->src.addr_mode)
1902 { 1902 {
1915 //zero seems to clear all flags 1915 //zero seems to clear all flags
1916 update_flags(opts, N0|Z0|V0); 1916 update_flags(opts, N0|Z0|V0);
1917 mov_ir(code, VECTOR_INT_DIV_ZERO, opts->gen.scratch2, SZ_D); 1917 mov_ir(code, VECTOR_INT_DIV_ZERO, opts->gen.scratch2, SZ_D);
1918 mov_ir(code, inst->address+isize, opts->gen.scratch1, SZ_D); 1918 mov_ir(code, inst->address+isize, opts->gen.scratch1, SZ_D);
1919 jmp(code, opts->trap); 1919 jmp(code, opts->trap);
1920 1920
1921 *not_zero = code->cur - (not_zero + 1); 1921 *not_zero = code->cur - (not_zero + 1);
1922 code_ptr end = NULL; 1922 code_ptr end = NULL;
1923 if (inst->op == M68K_DIVU) { 1923 if (inst->op == M68K_DIVU) {
1924 //initial overflow check needs to be done in the C code for divs 1924 //initial overflow check needs to be done in the C code for divs
1925 //but can be done before dumping state to mem in divu as an optimization 1925 //but can be done before dumping state to mem in divu as an optimization
1926 cmp_rr(code, opts->gen.scratch1, opts->gen.scratch2, SZ_D); 1926 cmp_rr(code, opts->gen.scratch1, opts->gen.scratch2, SZ_D);
1927 code_ptr not_overflow = code->cur+1; 1927 code_ptr not_overflow = code->cur+1;
1928 jcc(code, CC_C, not_overflow); 1928 jcc(code, CC_C, not_overflow);
1929 1929
1930 //overflow seems to always set the N and clear Z 1930 //overflow seems to always set the N and clear Z
1931 update_flags(opts, N1|Z0|V1); 1931 update_flags(opts, N1|Z0|V1);
1932 cycles(&opts->gen, 10); 1932 cycles(&opts->gen, 10);
1933 end = code->cur+1; 1933 end = code->cur+1;
1934 jmp(code, end); 1934 jmp(code, end);
1935 1935
1936 *not_overflow = code->cur - (not_overflow + 1); 1936 *not_overflow = code->cur - (not_overflow + 1);
1937 } 1937 }
1938 call(code, opts->gen.save_context); 1938 call(code, opts->gen.save_context);
1939 push_r(code, opts->gen.context_reg); 1939 push_r(code, opts->gen.context_reg);
1940 //TODO: inline the functionality of divudivs/ so we don't need to dump context to memory 1940 //TODO: inline the functionality of divudivs/ so we don't need to dump context to memory
1941 call_args(code, (code_ptr)(inst->op == M68K_DIVU ? divu : divs), 3, opts->gen.scratch2, opts->gen.context_reg, opts->gen.scratch1); 1941 call_args(code, (code_ptr)(inst->op == M68K_DIVU ? divu : divs), 3, opts->gen.scratch2, opts->gen.context_reg, opts->gen.scratch1);
1942 pop_r(code, opts->gen.context_reg); 1942 pop_r(code, opts->gen.context_reg);
1943 mov_rr(code, RAX, opts->gen.scratch1, SZ_D); 1943 mov_rr(code, RAX, opts->gen.scratch1, SZ_D);
1944 1944
1945 call(code, opts->gen.load_context); 1945 call(code, opts->gen.load_context);
1946 1946
1947 if (inst->op == M68K_DIVU) { 1947 if (inst->op == M68K_DIVU) {
1948 cmp_ir(code, 0, opts->gen.scratch1, SZ_W); 1948 cmp_ir(code, 0, opts->gen.scratch1, SZ_W);
1949 update_flags(opts, V0|Z|N); 1949 update_flags(opts, V0|Z|N);
1950 } 1950 }
1951 1951
1952 if (dst_op->mode == MODE_REG_DIRECT) { 1952 if (dst_op->mode == MODE_REG_DIRECT) {
1953 mov_rr(code, opts->gen.scratch1, dst_op->base, SZ_D); 1953 mov_rr(code, opts->gen.scratch1, dst_op->base, SZ_D);
1954 } else { 1954 } else {
1955 mov_rrdisp(code, opts->gen.scratch1, dst_op->base, dst_op->disp, SZ_D); 1955 mov_rrdisp(code, opts->gen.scratch1, dst_op->base, dst_op->disp, SZ_D);
1956 } 1956 }
2046 add_rrdisp(code, RAX, opts->gen.context_reg, offsetof(m68k_context, current_cycle), SZ_D); 2046 add_rrdisp(code, RAX, opts->gen.context_reg, offsetof(m68k_context, current_cycle), SZ_D);
2047 //restore context and scratch1 2047 //restore context and scratch1
2048 call(code, opts->gen.load_context); 2048 call(code, opts->gen.load_context);
2049 pop_r(code, opts->gen.scratch1); 2049 pop_r(code, opts->gen.scratch1);
2050 } 2050 }
2051 2051
2052 uint8_t dst_reg; 2052 uint8_t dst_reg;
2053 if (dst_op->mode == MODE_REG_DIRECT) { 2053 if (dst_op->mode == MODE_REG_DIRECT) {
2054 dst_reg = dst_op->base; 2054 dst_reg = dst_op->base;
2055 if (inst->op == M68K_MULS) { 2055 if (inst->op == M68K_MULS) {
2056 movsx_rr(code, dst_reg, dst_reg, SZ_W, SZ_D); 2056 movsx_rr(code, dst_reg, dst_reg, SZ_W, SZ_D);
2215 code_info *code = &opts->gen.code; 2215 code_info *code = &opts->gen.code;
2216 //check supervisor bit in SR and trap if not in supervisor mode 2216 //check supervisor bit in SR and trap if not in supervisor mode
2217 bt_irdisp(code, BIT_SUPERVISOR, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B); 2217 bt_irdisp(code, BIT_SUPERVISOR, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
2218 code_ptr in_sup_mode = code->cur + 1; 2218 code_ptr in_sup_mode = code->cur + 1;
2219 jcc(code, CC_C, code->cur + 2); 2219 jcc(code, CC_C, code->cur + 2);
2220 2220
2221 ldi_native(opts, VECTOR_PRIV_VIOLATION, opts->gen.scratch2); 2221 ldi_native(opts, VECTOR_PRIV_VIOLATION, opts->gen.scratch2);
2222 ldi_native(opts, inst->address, opts->gen.scratch1); 2222 ldi_native(opts, inst->address, opts->gen.scratch1);
2223 jmp(code, opts->trap); 2223 jmp(code, opts->trap);
2224 2224
2225 *in_sup_mode = code->cur - (in_sup_mode + 1); 2225 *in_sup_mode = code->cur - (in_sup_mode + 1);
2226 } 2226 }
2227 2227
2228 void translate_m68k_andi_ori_ccr_sr(m68k_options *opts, m68kinst *inst) 2228 void translate_m68k_andi_ori_ccr_sr(m68k_options *opts, m68kinst *inst)
2229 { 2229 {
2542 void m68k_breakpoint_patch(m68k_context *context, uint32_t address, m68k_debug_handler bp_handler, code_ptr native_addr) 2542 void m68k_breakpoint_patch(m68k_context *context, uint32_t address, m68k_debug_handler bp_handler, code_ptr native_addr)
2543 { 2543 {
2544 m68k_options * opts = context->options; 2544 m68k_options * opts = context->options;
2545 code_info native; 2545 code_info native;
2546 native.cur = native_addr ? native_addr : get_native_address(context->options, address); 2546 native.cur = native_addr ? native_addr : get_native_address(context->options, address);
2547 2547
2548 if (!native.cur) { 2548 if (!native.cur) {
2549 return; 2549 return;
2550 } 2550 }
2551 2551
2552 if (*native.cur != opts->prologue_start) { 2552 if (*native.cur != opts->prologue_start) {
2553 //instruction has already been patched, probably for retranslation 2553 //instruction has already been patched, probably for retranslation
2554 return; 2554 return;
2555 } 2555 }
2556 native.last = native.cur + 128; 2556 native.last = native.cur + 128;
2557 native.stack_off = 0; 2557 native.stack_off = 0;
2558 code_ptr start_native = native.cur; 2558 code_ptr start_native = native.cur;
2559 mov_ir(&native, address, opts->gen.scratch1, SZ_D); 2559 mov_ir(&native, address, opts->gen.scratch1, SZ_D);
2560 2560
2561 2561
2562 call(&native, opts->bp_stub); 2562 call(&native, opts->bp_stub);
2563 } 2563 }
2564 2564
2565 void init_m68k_opts(m68k_options * opts, memmap_chunk * memmap, uint32_t num_chunks, uint32_t clock_divider) 2565 void init_m68k_opts(m68k_options * opts, memmap_chunk * memmap, uint32_t num_chunks, uint32_t clock_divider, sync_fun sync_components)
2566 { 2566 {
2567 memset(opts, 0, sizeof(*opts)); 2567 memset(opts, 0, sizeof(*opts));
2568 opts->gen.memmap = memmap; 2568 opts->gen.memmap = memmap;
2569 opts->gen.memmap_chunks = num_chunks; 2569 opts->gen.memmap_chunks = num_chunks;
2570 opts->gen.address_size = SZ_D; 2570 opts->gen.address_size = SZ_D;
2610 opts->gen.context_reg = RSI; 2610 opts->gen.context_reg = RSI;
2611 opts->gen.cycles = RAX; 2611 opts->gen.cycles = RAX;
2612 opts->gen.limit = RBP; 2612 opts->gen.limit = RBP;
2613 opts->gen.scratch1 = RCX; 2613 opts->gen.scratch1 = RCX;
2614 opts->gen.align_error_mask = 1; 2614 opts->gen.align_error_mask = 1;
2615 opts->sync_components = sync_components;
2615 2616
2616 2617
2617 opts->gen.native_code_map = malloc(sizeof(native_map_slot) * NATIVE_MAP_CHUNKS); 2618 opts->gen.native_code_map = malloc(sizeof(native_map_slot) * NATIVE_MAP_CHUNKS);
2618 memset(opts->gen.native_code_map, 0, sizeof(native_map_slot) * NATIVE_MAP_CHUNKS); 2619 memset(opts->gen.native_code_map, 0, sizeof(native_map_slot) * NATIVE_MAP_CHUNKS);
2619 opts->gen.deferred = NULL; 2620 opts->gen.deferred = NULL;
2693 opts->native_addr_and_sync = code->cur; 2694 opts->native_addr_and_sync = code->cur;
2694 call(code, opts->gen.save_context); 2695 call(code, opts->gen.save_context);
2695 push_r(code, opts->gen.scratch1); 2696 push_r(code, opts->gen.scratch1);
2696 2697
2697 xor_rr(code, opts->gen.scratch1, opts->gen.scratch1, SZ_D); 2698 xor_rr(code, opts->gen.scratch1, opts->gen.scratch1, SZ_D);
2698 call_args_abi(code, (code_ptr)sync_components, 2, opts->gen.context_reg, opts->gen.scratch1); 2699 call_args_abi(code, (code_ptr)opts->sync_components, 2, opts->gen.context_reg, opts->gen.scratch1);
2699 pop_r(code, RSI); //restore saved address from opts->gen.scratch1 2700 pop_r(code, RSI); //restore saved address from opts->gen.scratch1
2700 push_r(code, RAX); //save context pointer for later 2701 push_r(code, RAX); //save context pointer for later
2701 call_args(code, (code_ptr)get_native_address_trans, 2, RAX, RSI); 2702 call_args(code, (code_ptr)get_native_address_trans, 2, RAX, RSI);
2702 mov_rr(code, RAX, opts->gen.scratch1, SZ_PTR); //move result to scratch reg 2703 mov_rr(code, RAX, opts->gen.scratch1, SZ_PTR); //move result to scratch reg
2703 pop_r(code, opts->gen.context_reg); 2704 pop_r(code, opts->gen.context_reg);
2711 opts->do_sync = code->cur; 2712 opts->do_sync = code->cur;
2712 push_r(code, opts->gen.scratch1); 2713 push_r(code, opts->gen.scratch1);
2713 push_r(code, opts->gen.scratch2); 2714 push_r(code, opts->gen.scratch2);
2714 call(code, opts->gen.save_context); 2715 call(code, opts->gen.save_context);
2715 xor_rr(code, opts->gen.scratch1, opts->gen.scratch1, SZ_D); 2716 xor_rr(code, opts->gen.scratch1, opts->gen.scratch1, SZ_D);
2716 call_args_abi(code, (code_ptr)sync_components, 2, opts->gen.context_reg, opts->gen.scratch1); 2717 call_args_abi(code, (code_ptr)opts->sync_components, 2, opts->gen.context_reg, opts->gen.scratch1);
2717 mov_rr(code, RAX, opts->gen.context_reg, SZ_PTR); 2718 mov_rr(code, RAX, opts->gen.context_reg, SZ_PTR);
2718 call(code, opts->gen.load_context); 2719 call(code, opts->gen.load_context);
2719 pop_r(code, opts->gen.scratch2); 2720 pop_r(code, opts->gen.scratch2);
2720 pop_r(code, opts->gen.scratch1); 2721 pop_r(code, opts->gen.scratch1);
2721 *skip_sync = code->cur - (skip_sync+1); 2722 *skip_sync = code->cur - (skip_sync+1);
2722 retn(code); 2723 retn(code);
2723 2724
2724 opts->gen.handle_code_write = (code_ptr)m68k_handle_code_write; 2725 opts->gen.handle_code_write = (code_ptr)m68k_handle_code_write;
2725 2726
2726 check_alloc_code(code, 256); 2727 check_alloc_code(code, 256);
2727 opts->gen.handle_align_error_write = code->cur; 2728 opts->gen.handle_align_error_write = code->cur;
2728 code->cur += 256; 2729 code->cur += 256;
2729 check_alloc_code(code, 256); 2730 check_alloc_code(code, 256);
2730 opts->gen.handle_align_error_read = code->cur; 2731 opts->gen.handle_align_error_read = code->cur;
2731 code->cur += 256; 2732 code->cur += 256;
2732 2733
2733 opts->read_16 = gen_mem_fun(&opts->gen, memmap, num_chunks, READ_16, NULL); 2734 opts->read_16 = gen_mem_fun(&opts->gen, memmap, num_chunks, READ_16, NULL);
2734 opts->read_8 = gen_mem_fun(&opts->gen, memmap, num_chunks, READ_8, NULL); 2735 opts->read_8 = gen_mem_fun(&opts->gen, memmap, num_chunks, READ_8, NULL);
2735 opts->write_16 = gen_mem_fun(&opts->gen, memmap, num_chunks, WRITE_16, NULL); 2736 opts->write_16 = gen_mem_fun(&opts->gen, memmap, num_chunks, WRITE_16, NULL);
2736 opts->write_8 = gen_mem_fun(&opts->gen, memmap, num_chunks, WRITE_8, NULL); 2737 opts->write_8 = gen_mem_fun(&opts->gen, memmap, num_chunks, WRITE_8, NULL);
2737 2738
2828 setcc_rind(code, CC_C, opts->gen.context_reg); 2829 setcc_rind(code, CC_C, opts->gen.context_reg);
2829 } 2830 }
2830 } 2831 }
2831 } 2832 }
2832 retn(code); 2833 retn(code);
2833 2834
2834 code_info tmp_code = *code; 2835 code_info tmp_code = *code;
2835 code->cur = opts->gen.handle_align_error_write; 2836 code->cur = opts->gen.handle_align_error_write;
2836 code->last = code->cur + 256; 2837 code->last = code->cur + 256;
2837 //unwind the stack one functinon call 2838 //unwind the stack one functinon call
2838 add_ir(code, 16, RSP, SZ_PTR); 2839 add_ir(code, 16, RSP, SZ_PTR);
2891 mov_ir(code, 4 * VECTOR_ADDRESS_ERROR, opts->gen.scratch1, SZ_D); 2892 mov_ir(code, 4 * VECTOR_ADDRESS_ERROR, opts->gen.scratch1, SZ_D);
2892 call(code, opts->read_32); 2893 call(code, opts->read_32);
2893 call(code, opts->native_addr_and_sync); 2894 call(code, opts->native_addr_and_sync);
2894 cycles(&opts->gen, 18); 2895 cycles(&opts->gen, 18);
2895 jmp_r(code, opts->gen.scratch1); 2896 jmp_r(code, opts->gen.scratch1);
2896 2897
2897 code->cur = opts->gen.handle_align_error_read; 2898 code->cur = opts->gen.handle_align_error_read;
2898 code->last = code->cur + 256; 2899 code->last = code->cur + 256;
2899 //unwind the stack one functinon call 2900 //unwind the stack one functinon call
2900 add_ir(code, 16, RSP, SZ_PTR); 2901 add_ir(code, 16, RSP, SZ_PTR);
2901 //save address that triggered error so we can write it to the 68K stack at the appropriate place 2902 //save address that triggered error so we can write it to the 68K stack at the appropriate place
2953 mov_ir(code, 4 * VECTOR_ADDRESS_ERROR, opts->gen.scratch1, SZ_D); 2954 mov_ir(code, 4 * VECTOR_ADDRESS_ERROR, opts->gen.scratch1, SZ_D);
2954 call(code, opts->read_32); 2955 call(code, opts->read_32);
2955 call(code, opts->native_addr_and_sync); 2956 call(code, opts->native_addr_and_sync);
2956 cycles(&opts->gen, 18); 2957 cycles(&opts->gen, 18);
2957 jmp_r(code, opts->gen.scratch1); 2958 jmp_r(code, opts->gen.scratch1);
2958 2959
2959 *code = tmp_code; 2960 *code = tmp_code;
2960 2961
2961 opts->gen.handle_cycle_limit_int = code->cur; 2962 opts->gen.handle_cycle_limit_int = code->cur;
2962 //calculate stack adjust size 2963 //calculate stack adjust size
2963 add_ir(code, 16-sizeof(void*), RSP, SZ_PTR); 2964 add_ir(code, 16-sizeof(void*), RSP, SZ_PTR);
2972 jcc(code, CC_NC, no_trace); 2973 jcc(code, CC_NC, no_trace);
2973 mov_irdisp(code, 1, opts->gen.context_reg, offsetof(m68k_context, trace_pending), SZ_B); 2974 mov_irdisp(code, 1, opts->gen.context_reg, offsetof(m68k_context, trace_pending), SZ_B);
2974 *no_trace = code->cur - (no_trace + 1); 2975 *no_trace = code->cur - (no_trace + 1);
2975 //handle interrupts 2976 //handle interrupts
2976 cmp_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, int_cycle), opts->gen.cycles, SZ_D); 2977 cmp_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, int_cycle), opts->gen.cycles, SZ_D);
2977 code_ptr do_int = code->cur + 2; 2978 code_ptr do_int = code->cur + 2;
2978 jcc(code, CC_NC, do_int+512);//force 32-bit displacement 2979 jcc(code, CC_NC, do_int+512);//force 32-bit displacement
2979 //handle component synchronization 2980 //handle component synchronization
2980 cmp_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, sync_cycle), opts->gen.cycles, SZ_D); 2981 cmp_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, sync_cycle), opts->gen.cycles, SZ_D);
2981 skip_sync = code->cur + 1; 2982 skip_sync = code->cur + 1;
2982 jcc(code, CC_C, code->cur + 2); 2983 jcc(code, CC_C, code->cur + 2);
2983 call(code, opts->gen.save_context); 2984 call(code, opts->gen.save_context);
2984 call_args_abi(code, (code_ptr)sync_components, 2, opts->gen.context_reg, opts->gen.scratch1); 2985 call_args_abi(code, (code_ptr)opts->sync_components, 2, opts->gen.context_reg, opts->gen.scratch1);
2985 mov_rr(code, RAX, opts->gen.context_reg, SZ_PTR); 2986 mov_rr(code, RAX, opts->gen.context_reg, SZ_PTR);
2986 jmp(code, opts->gen.load_context); 2987 call(code, opts->gen.load_context);
2987 *skip_sync = code->cur - (skip_sync+1); 2988 *skip_sync = code->cur - (skip_sync+1);
2988 cmp_irdisp(code, 0, opts->gen.context_reg, offsetof(m68k_context, should_return), SZ_B); 2989 cmp_irdisp(code, 0, opts->gen.context_reg, offsetof(m68k_context, should_return), SZ_B);
2989 code_ptr do_ret = code->cur + 1; 2990 code_ptr do_ret = code->cur + 1;
2990 jcc(code, CC_NZ, do_ret); 2991 jcc(code, CC_NZ, do_ret);
2991 retn(code); 2992 retn(code);
3029 cycles(&opts->gen, 10); 3030 cycles(&opts->gen, 10);
3030 //discard function return address 3031 //discard function return address
3031 pop_r(code, opts->gen.scratch2); 3032 pop_r(code, opts->gen.scratch2);
3032 add_ir(code, 16-sizeof(void *), RSP, SZ_PTR); 3033 add_ir(code, 16-sizeof(void *), RSP, SZ_PTR);
3033 jmp_r(code, opts->gen.scratch1); 3034 jmp_r(code, opts->gen.scratch1);
3034 3035
3035 code->stack_off = tmp_stack_off; 3036 code->stack_off = tmp_stack_off;
3036 3037
3037 *((uint32_t *)do_int) = code->cur - (do_int+4); 3038 *((uint32_t *)do_int) = code->cur - (do_int+4);
3038 //implement 1 instruction latency 3039 //implement 1 instruction latency
3039 cmp_irdisp(code, INT_PENDING_NONE, opts->gen.context_reg, offsetof(m68k_context, int_pending), SZ_B); 3040 cmp_irdisp(code, INT_PENDING_NONE, opts->gen.context_reg, offsetof(m68k_context, int_pending), SZ_B);
3040 do_int = code->cur + 1; 3041 do_int = code->cur + 1;
3041 jcc(code, CC_NZ, do_int); 3042 jcc(code, CC_NZ, do_int);
3046 *do_int = code->cur - (do_int + 1); 3047 *do_int = code->cur - (do_int + 1);
3047 //Check if int_pending has an actual interrupt priority in it 3048 //Check if int_pending has an actual interrupt priority in it
3048 cmp_irdisp(code, INT_PENDING_SR_CHANGE, opts->gen.context_reg, offsetof(m68k_context, int_pending), SZ_B); 3049 cmp_irdisp(code, INT_PENDING_SR_CHANGE, opts->gen.context_reg, offsetof(m68k_context, int_pending), SZ_B);
3049 code_ptr already_int_num = code->cur + 1; 3050 code_ptr already_int_num = code->cur + 1;
3050 jcc(code, CC_NZ, already_int_num); 3051 jcc(code, CC_NZ, already_int_num);
3051 3052
3052 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, int_num), opts->gen.scratch2, SZ_B); 3053 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, int_num), opts->gen.scratch2, SZ_B);
3053 mov_rrdisp(code, opts->gen.scratch2, opts->gen.context_reg, offsetof(m68k_context, int_pending), SZ_B); 3054 mov_rrdisp(code, opts->gen.scratch2, opts->gen.context_reg, offsetof(m68k_context, int_pending), SZ_B);
3054 3055
3055 *already_int_num = code->cur - (already_int_num + 1); 3056 *already_int_num = code->cur - (already_int_num + 1);
3056 //save PC as stored in scratch1 for later 3057 //save PC as stored in scratch1 for later
3057 push_r(code, opts->gen.scratch1); 3058 push_r(code, opts->gen.scratch1);
3058 //set target cycle to sync cycle 3059 //set target cycle to sync cycle
3059 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, sync_cycle), opts->gen.limit, SZ_D); 3060 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, sync_cycle), opts->gen.limit, SZ_D);
3131 //discard function return address 3132 //discard function return address
3132 pop_r(code, opts->gen.scratch2); 3133 pop_r(code, opts->gen.scratch2);
3133 add_ir(code, 16-sizeof(void *), RSP, SZ_PTR); 3134 add_ir(code, 16-sizeof(void *), RSP, SZ_PTR);
3134 jmp_r(code, opts->gen.scratch1); 3135 jmp_r(code, opts->gen.scratch1);
3135 code->stack_off = tmp_stack_off; 3136 code->stack_off = tmp_stack_off;
3136 3137
3137 opts->handle_int_latch = code->cur; 3138 opts->handle_int_latch = code->cur;
3138 cmp_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, int_cycle), opts->gen.cycles, SZ_D); 3139 cmp_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, int_cycle), opts->gen.cycles, SZ_D);
3139 code_ptr do_latch = code->cur + 1; 3140 code_ptr do_latch = code->cur + 1;
3140 jcc(code, CC_NC, do_latch); 3141 jcc(code, CC_NC, do_latch);
3141 retn(code); 3142 retn(code);
3142 *do_latch = code->cur - (do_latch + 1); 3143 *do_latch = code->cur - (do_latch + 1);
3143 cmp_irdisp(code, INT_PENDING_NONE, opts->gen.context_reg, offsetof(m68k_context, int_pending), SZ_B); 3144 cmp_irdisp(code, INT_PENDING_NONE, opts->gen.context_reg, offsetof(m68k_context, int_pending), SZ_B);
3144 do_latch = code->cur + 1; 3145 do_latch = code->cur + 1;
3175 shl_ir(code, 2, opts->gen.scratch1, SZ_D); 3176 shl_ir(code, 2, opts->gen.scratch1, SZ_D);
3176 call(code, opts->read_32); 3177 call(code, opts->read_32);
3177 call(code, opts->native_addr_and_sync); 3178 call(code, opts->native_addr_and_sync);
3178 cycles(&opts->gen, 18); 3179 cycles(&opts->gen, 18);
3179 jmp_r(code, opts->gen.scratch1); 3180 jmp_r(code, opts->gen.scratch1);
3180 3181
3181 opts->retrans_stub = code->cur; 3182 opts->retrans_stub = code->cur;
3182 call(code, opts->gen.save_context); 3183 call(code, opts->gen.save_context);
3183 push_r(code, opts->gen.context_reg); 3184 push_r(code, opts->gen.context_reg);
3184 call_args(code,(code_ptr)m68k_retranslate_inst, 2, opts->gen.scratch1, opts->gen.context_reg); 3185 call_args(code,(code_ptr)m68k_retranslate_inst, 2, opts->gen.scratch1, opts->gen.context_reg);
3185 pop_r(code, opts->gen.context_reg); 3186 pop_r(code, opts->gen.context_reg);
3186 mov_rr(code, RAX, opts->gen.scratch1, SZ_PTR); 3187 mov_rr(code, RAX, opts->gen.scratch1, SZ_PTR);
3187 call(code, opts->gen.load_context); 3188 call(code, opts->gen.load_context);
3188 jmp_r(code, opts->gen.scratch1); 3189 jmp_r(code, opts->gen.scratch1);
3189 3190
3190 3191
3191 check_code_prologue(code); 3192 check_code_prologue(code);
3192 opts->bp_stub = code->cur; 3193 opts->bp_stub = code->cur;
3193 3194
3194 tmp_stack_off = code->stack_off; 3195 tmp_stack_off = code->stack_off;
3195 //Calculate length of prologue 3196 //Calculate length of prologue
3222 //jump back to body of translated instruction 3223 //jump back to body of translated instruction
3223 pop_r(code, opts->gen.scratch1); 3224 pop_r(code, opts->gen.scratch1);
3224 add_ir(code, check_int_size - patch_size, opts->gen.scratch1, SZ_PTR); 3225 add_ir(code, check_int_size - patch_size, opts->gen.scratch1, SZ_PTR);
3225 jmp_r(code, opts->gen.scratch1); 3226 jmp_r(code, opts->gen.scratch1);
3226 code->stack_off = tmp_stack_off; 3227 code->stack_off = tmp_stack_off;
3227 3228
3228 retranslate_calc(&opts->gen); 3229 retranslate_calc(&opts->gen);
3229 } 3230 }