comparison z80_to_x86.c @ 1047:6b07af1515b5

Change cycle tracking code for Z80 core to only use a single register. Store low 7 bits of R in a reg and increment it appropriately.
author Michael Pavone <pavone@retrodev.com>
date Wed, 27 Jul 2016 22:46:22 -0700
parents a27fdf43f1a7
children 05ecef6c73b6
comparison
equal deleted inserted replaced
1046:a27fdf43f1a7 1047:6b07af1515b5
333 check_cycles_int(&opts->gen, address); 333 check_cycles_int(&opts->gen, address);
334 if (context->breakpoint_flags[address / 8] & (1 << (address % 8))) { 334 if (context->breakpoint_flags[address / 8] & (1 << (address % 8))) {
335 zbreakpoint_patch(context, address, start); 335 zbreakpoint_patch(context, address, start);
336 } 336 }
337 num_cycles = 4 * inst->opcode_bytes; 337 num_cycles = 4 * inst->opcode_bytes;
338 //TODO: increment R register once for every opcode byte 338 add_ir(code, inst->opcode_bytes > 1 ? 2 : 1, opts->regs[Z80_R], SZ_B);
339 #ifdef Z80_LOG_ADDRESS 339 #ifdef Z80_LOG_ADDRESS
340 log_address(&opts->gen, address, "Z80: %X @ %d\n"); 340 log_address(&opts->gen, address, "Z80: %X @ %d\n");
341 #endif 341 #endif
342 } 342 }
343 switch(inst->op) 343 switch(inst->op)
372 translate_z80_reg(inst, &src_op, opts); 372 translate_z80_reg(inst, &src_op, opts);
373 } else { 373 } else {
374 translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY); 374 translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY);
375 translate_z80_reg(inst, &dst_op, opts); 375 translate_z80_reg(inst, &dst_op, opts);
376 } 376 }
377 if (src_op.mode == MODE_REG_DIRECT) { 377 if (inst->reg == Z80_R) {
378 mov_rr(code, opts->regs[Z80_A], opts->gen.scratch1, SZ_B);
379 mov_rr(code, opts->regs[Z80_A], opts->regs[Z80_R], SZ_B);
380 and_ir(code, 0x80, opts->gen.scratch1, SZ_B);
381 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zr_off(Z80_R), SZ_B);
382 } else if (inst->ea_reg == Z80_R && inst->addr_mode == Z80_REG) {
383 mov_rr(code, opts->regs[Z80_R], opts->regs[Z80_A], SZ_B);
384 and_ir(code, 0x7F, opts->regs[Z80_A], SZ_B);
385 or_rdispr(code, opts->gen.context_reg, zr_off(Z80_R), opts->regs[Z80_A], SZ_B);
386 } else if (src_op.mode == MODE_REG_DIRECT) {
378 if(dst_op.mode == MODE_REG_DISPLACE8) { 387 if(dst_op.mode == MODE_REG_DISPLACE8) {
379 mov_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, size); 388 mov_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, size);
380 } else { 389 } else {
381 mov_rr(code, src_op.base, dst_op.base, size); 390 mov_rr(code, src_op.base, dst_op.base, size);
382 } 391 }
1283 } 1292 }
1284 case Z80_DI: 1293 case Z80_DI:
1285 cycles(&opts->gen, num_cycles); 1294 cycles(&opts->gen, num_cycles);
1286 mov_irdisp(code, 0, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B); 1295 mov_irdisp(code, 0, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B);
1287 mov_irdisp(code, 0, opts->gen.context_reg, offsetof(z80_context, iff2), SZ_B); 1296 mov_irdisp(code, 0, opts->gen.context_reg, offsetof(z80_context, iff2), SZ_B);
1288 mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, sync_cycle), opts->gen.limit, SZ_D); 1297 add_rdispr(code, opts->gen.context_reg, offsetof(z80_context, target_cycle), opts->gen.cycles, SZ_D);
1298 mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, sync_cycle), opts->gen.scratch1, SZ_D);
1289 mov_irdisp(code, 0xFFFFFFFF, opts->gen.context_reg, offsetof(z80_context, int_cycle), SZ_D); 1299 mov_irdisp(code, 0xFFFFFFFF, opts->gen.context_reg, offsetof(z80_context, int_cycle), SZ_D);
1300 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(z80_context, target_cycle), SZ_D);
1301 sub_rr(code, opts->gen.scratch1, opts->gen.cycles, SZ_D);
1290 break; 1302 break;
1291 case Z80_EI: 1303 case Z80_EI:
1292 cycles(&opts->gen, num_cycles); 1304 cycles(&opts->gen, num_cycles);
1305 neg_r(code, opts->gen.cycles, SZ_D);
1306 add_rdispr(code, opts->gen.context_reg, offsetof(z80_context, target_cycle), opts->gen.cycles, SZ_D);
1293 mov_rrdisp(code, opts->gen.cycles, opts->gen.context_reg, offsetof(z80_context, int_enable_cycle), SZ_D); 1307 mov_rrdisp(code, opts->gen.cycles, opts->gen.context_reg, offsetof(z80_context, int_enable_cycle), SZ_D);
1308 neg_r(code, opts->gen.cycles, SZ_D);
1309 add_rdispr(code, opts->gen.context_reg, offsetof(z80_context, target_cycle), opts->gen.cycles, SZ_D);
1294 mov_irdisp(code, 1, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B); 1310 mov_irdisp(code, 1, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B);
1295 mov_irdisp(code, 1, opts->gen.context_reg, offsetof(z80_context, iff2), SZ_B); 1311 mov_irdisp(code, 1, opts->gen.context_reg, offsetof(z80_context, iff2), SZ_B);
1296 //interrupt enable has a one-instruction latency, minimum instruction duration is 4 cycles 1312 //interrupt enable has a one-instruction latency, minimum instruction duration is 4 cycles
1297 add_irdisp(code, 4*opts->gen.clock_divider, opts->gen.context_reg, offsetof(z80_context, int_enable_cycle), SZ_D); 1313 add_irdisp(code, 4*opts->gen.clock_divider, opts->gen.context_reg, offsetof(z80_context, int_enable_cycle), SZ_D);
1298 call(code, opts->do_sync); 1314 call(code, opts->do_sync);
2713 options->regs[Z80_IXH] = DH; 2729 options->regs[Z80_IXH] = DH;
2714 options->regs[Z80_IXL] = RDX; 2730 options->regs[Z80_IXL] = RDX;
2715 options->regs[Z80_IYH] = -1; 2731 options->regs[Z80_IYH] = -1;
2716 options->regs[Z80_IYL] = R8; 2732 options->regs[Z80_IYL] = R8;
2717 options->regs[Z80_I] = -1; 2733 options->regs[Z80_I] = -1;
2718 options->regs[Z80_R] = -1; 2734 options->regs[Z80_R] = RDI;
2719 options->regs[Z80_A] = R10; 2735 options->regs[Z80_A] = R10;
2720 options->regs[Z80_BC] = RBX; 2736 options->regs[Z80_BC] = RBX;
2721 options->regs[Z80_DE] = RCX; 2737 options->regs[Z80_DE] = RCX;
2722 options->regs[Z80_HL] = RAX; 2738 options->regs[Z80_HL] = RAX;
2723 options->regs[Z80_SP] = R9; 2739 options->regs[Z80_SP] = R9;
2728 options->gen.scratch1 = R13; 2744 options->gen.scratch1 = R13;
2729 options->gen.scratch2 = R14; 2745 options->gen.scratch2 = R14;
2730 #else 2746 #else
2731 memset(options->regs, -1, sizeof(options->regs)); 2747 memset(options->regs, -1, sizeof(options->regs));
2732 options->regs[Z80_A] = RAX; 2748 options->regs[Z80_A] = RAX;
2749 options->regs[Z80_R] = AH;
2733 options->regs[Z80_SP] = RBX; 2750 options->regs[Z80_SP] = RBX;
2734 2751
2735 options->gen.scratch1 = RCX; 2752 options->gen.scratch1 = RCX;
2736 options->gen.scratch2 = RDX; 2753 options->gen.scratch2 = RDX;
2737 #endif 2754 #endif
2738 2755
2739 options->gen.context_reg = RSI; 2756 options->gen.context_reg = RSI;
2740 options->gen.cycles = RBP; 2757 options->gen.cycles = RBP;
2741 options->gen.limit = RDI; 2758 options->gen.limit = -1;
2742 2759
2743 options->gen.native_code_map = malloc(sizeof(native_map_slot)); 2760 options->gen.native_code_map = malloc(sizeof(native_map_slot));
2744 memset(options->gen.native_code_map, 0, sizeof(native_map_slot)); 2761 memset(options->gen.native_code_map, 0, sizeof(native_map_slot));
2745 options->gen.deferred = NULL; 2762 options->gen.deferred = NULL;
2746 options->gen.ram_inst_sizes = malloc(sizeof(uint8_t) * 0x2000 + sizeof(uint8_t *)); 2763 options->gen.ram_inst_sizes = malloc(sizeof(uint8_t) * 0x2000 + sizeof(uint8_t *));
2763 reg = i /2 + Z80_BC + (i > Z80_H ? 2 : 0); 2780 reg = i /2 + Z80_BC + (i > Z80_H ? 2 : 0);
2764 size = SZ_W; 2781 size = SZ_W;
2765 } else { 2782 } else {
2766 reg = i; 2783 reg = i;
2767 size = SZ_B; 2784 size = SZ_B;
2768 } 2785 }
2769 if (options->regs[reg] >= 0) { 2786 if (reg == Z80_R) {
2770 mov_rrdisp(code, options->regs[reg], options->gen.context_reg, offsetof(z80_context, regs) + i, size); 2787 and_ir(code, 0x7F, options->regs[Z80_R], SZ_B);
2788 or_rrdisp(code, options->regs[Z80_R], options->gen.context_reg, zr_off(Z80_R), SZ_B);
2789 } else if (options->regs[reg] >= 0) {
2790 mov_rrdisp(code, options->regs[reg], options->gen.context_reg, zr_off(reg), size);
2791 if (reg == Z80_R) {
2792 and_irdisp(code, 0x80, options->gen.context_reg, zr_off(reg), SZ_B);
2793 }
2771 } 2794 }
2772 if (size == SZ_W) { 2795 if (size == SZ_W) {
2773 i++; 2796 i++;
2774 } 2797 }
2775 } 2798 }
2776 if (options->regs[Z80_SP] >= 0) { 2799 if (options->regs[Z80_SP] >= 0) {
2777 mov_rrdisp(code, options->regs[Z80_SP], options->gen.context_reg, offsetof(z80_context, sp), SZ_W); 2800 mov_rrdisp(code, options->regs[Z80_SP], options->gen.context_reg, offsetof(z80_context, sp), SZ_W);
2778 } 2801 }
2779 mov_rrdisp(code, options->gen.limit, options->gen.context_reg, offsetof(z80_context, target_cycle), SZ_D); 2802 neg_r(code, options->gen.cycles, SZ_D);
2803 add_rdispr(code, options->gen.context_reg, offsetof(z80_context, target_cycle), options->gen.cycles, SZ_D);
2780 mov_rrdisp(code, options->gen.cycles, options->gen.context_reg, offsetof(z80_context, current_cycle), SZ_D); 2804 mov_rrdisp(code, options->gen.cycles, options->gen.context_reg, offsetof(z80_context, current_cycle), SZ_D);
2781 retn(code); 2805 retn(code);
2782 2806
2783 options->load_context_scratch = code->cur; 2807 options->load_context_scratch = code->cur;
2784 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, scratch1), options->gen.scratch1, SZ_W); 2808 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, scratch1), options->gen.scratch1, SZ_W);
2803 } 2827 }
2804 } 2828 }
2805 if (options->regs[Z80_SP] >= 0) { 2829 if (options->regs[Z80_SP] >= 0) {
2806 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, sp), options->regs[Z80_SP], SZ_W); 2830 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, sp), options->regs[Z80_SP], SZ_W);
2807 } 2831 }
2808 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, target_cycle), options->gen.limit, SZ_D); 2832 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, target_cycle), options->gen.cycles, SZ_D);
2809 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, current_cycle), options->gen.cycles, SZ_D); 2833 sub_rdispr(code, options->gen.context_reg, offsetof(z80_context, current_cycle), options->gen.cycles, SZ_D);
2810 retn(code); 2834 retn(code);
2811 2835
2812 options->native_addr = code->cur; 2836 options->native_addr = code->cur;
2813 call(code, options->gen.save_context); 2837 call(code, options->gen.save_context);
2814 push_r(code, options->gen.context_reg); 2838 push_r(code, options->gen.context_reg);
2826 sub_ir(code, 16-sizeof(void *), RSP, SZ_PTR); 2850 sub_ir(code, 16-sizeof(void *), RSP, SZ_PTR);
2827 call_noalign(code, options->gen.handle_cycle_limit); 2851 call_noalign(code, options->gen.handle_cycle_limit);
2828 uint32_t call_adjust_size = code->cur - options->gen.handle_cycle_limit; 2852 uint32_t call_adjust_size = code->cur - options->gen.handle_cycle_limit;
2829 code->cur = options->gen.handle_cycle_limit; 2853 code->cur = options->gen.handle_cycle_limit;
2830 2854
2855 neg_r(code, options->gen.cycles, SZ_D);
2856 add_rdispr(code, options->gen.context_reg, offsetof(z80_context, target_cycle), options->gen.cycles, SZ_D);
2831 cmp_rdispr(code, options->gen.context_reg, offsetof(z80_context, sync_cycle), options->gen.cycles, SZ_D); 2857 cmp_rdispr(code, options->gen.context_reg, offsetof(z80_context, sync_cycle), options->gen.cycles, SZ_D);
2832 code_ptr no_sync = code->cur+1; 2858 code_ptr no_sync = code->cur+1;
2833 jcc(code, CC_B, no_sync); 2859 jcc(code, CC_B, no_sync);
2860 neg_r(code, options->gen.cycles, SZ_D);
2861 add_rdispr(code, options->gen.context_reg, offsetof(z80_context, target_cycle), options->gen.cycles, SZ_D);
2834 mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, pc), SZ_W); 2862 mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, pc), SZ_W);
2835 call(code, options->save_context_scratch); 2863 call(code, options->save_context_scratch);
2836 tmp_stack_off = code->stack_off; 2864 tmp_stack_off = code->stack_off;
2837 pop_r(code, RAX); //return address in read/write func 2865 pop_r(code, RAX); //return address in read/write func
2838 add_ir(code, 16-sizeof(void *), RSP, SZ_PTR); 2866 add_ir(code, 16-sizeof(void *), RSP, SZ_PTR);
2840 add_ir(code, 16-sizeof(void *), RSP, SZ_PTR); 2868 add_ir(code, 16-sizeof(void *), RSP, SZ_PTR);
2841 sub_ir(code, call_adjust_size, RAX, SZ_PTR); //adjust return address to point to the call + stack adjust that got us here 2869 sub_ir(code, call_adjust_size, RAX, SZ_PTR); //adjust return address to point to the call + stack adjust that got us here
2842 mov_rrdisp(code, RBX, options->gen.context_reg, offsetof(z80_context, extra_pc), SZ_PTR); 2870 mov_rrdisp(code, RBX, options->gen.context_reg, offsetof(z80_context, extra_pc), SZ_PTR);
2843 mov_rrind(code, RAX, options->gen.context_reg, SZ_PTR); 2871 mov_rrind(code, RAX, options->gen.context_reg, SZ_PTR);
2844 restore_callee_save_regs(code); 2872 restore_callee_save_regs(code);
2873 //return to caller of z80_run
2874 retn(code);
2875
2845 *no_sync = code->cur - (no_sync + 1); 2876 *no_sync = code->cur - (no_sync + 1);
2846 //return to caller of z80_run 2877 neg_r(code, options->gen.cycles, SZ_D);
2878 add_rdispr(code, options->gen.context_reg, offsetof(z80_context, target_cycle), options->gen.cycles, SZ_D);
2847 retn(code); 2879 retn(code);
2848 code->stack_off = tmp_stack_off; 2880 code->stack_off = tmp_stack_off;
2849 2881
2850 options->gen.handle_code_write = (code_ptr)z80_handle_code_write; 2882 options->gen.handle_code_write = (code_ptr)z80_handle_code_write;
2851 2883
2859 code->cur = skip_int; 2891 code->cur = skip_int;
2860 2892
2861 cmp_rdispr(code, options->gen.context_reg, offsetof(z80_context, sync_cycle), options->gen.cycles, SZ_D); 2893 cmp_rdispr(code, options->gen.context_reg, offsetof(z80_context, sync_cycle), options->gen.cycles, SZ_D);
2862 code_ptr skip_sync = code->cur + 1; 2894 code_ptr skip_sync = code->cur + 1;
2863 jcc(code, CC_B, skip_sync); 2895 jcc(code, CC_B, skip_sync);
2896 neg_r(code, options->gen.cycles, SZ_D);
2897 add_rdispr(code, options->gen.context_reg, offsetof(z80_context, target_cycle), options->gen.cycles, SZ_D);
2864 //save PC 2898 //save PC
2865 mov_rrdisp(code, options->gen.scratch1, options->gen.context_reg, offsetof(z80_context, pc), SZ_D); 2899 mov_rrdisp(code, options->gen.scratch1, options->gen.context_reg, offsetof(z80_context, pc), SZ_D);
2866 options->do_sync = code->cur; 2900 options->do_sync = code->cur;
2867 call(code, options->gen.save_context); 2901 call(code, options->gen.save_context);
2868 tmp_stack_off = code->stack_off; 2902 tmp_stack_off = code->stack_off;
2874 mov_rrind(code, RAX, options->gen.context_reg, SZ_PTR); 2908 mov_rrind(code, RAX, options->gen.context_reg, SZ_PTR);
2875 2909
2876 //restore callee saved registers 2910 //restore callee saved registers
2877 restore_callee_save_regs(code); 2911 restore_callee_save_regs(code);
2878 //return to caller of z80_run 2912 //return to caller of z80_run
2913 retn(code);
2879 *skip_sync = code->cur - (skip_sync+1); 2914 *skip_sync = code->cur - (skip_sync+1);
2915 neg_r(code, options->gen.cycles, SZ_D);
2916 add_rdispr(code, options->gen.context_reg, offsetof(z80_context, target_cycle), options->gen.cycles, SZ_D);
2880 retn(code); 2917 retn(code);
2881 code->stack_off = tmp_stack_off; 2918 code->stack_off = tmp_stack_off;
2882 2919
2883 options->gen.handle_cycle_limit_int = code->cur; 2920 options->gen.handle_cycle_limit_int = code->cur;
2921 neg_r(code, options->gen.cycles, SZ_D);
2922 add_rdispr(code, options->gen.context_reg, offsetof(z80_context, target_cycle), options->gen.cycles, SZ_D);
2884 cmp_rdispr(code, options->gen.context_reg, offsetof(z80_context, int_cycle), options->gen.cycles, SZ_D); 2923 cmp_rdispr(code, options->gen.context_reg, offsetof(z80_context, int_cycle), options->gen.cycles, SZ_D);
2885 jcc(code, CC_B, skip_int); 2924 jcc(code, CC_B, skip_int);
2886 //set limit to the cycle limit 2925 //set limit to the cycle limit
2887 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, sync_cycle), options->gen.limit, SZ_D); 2926 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, sync_cycle), options->gen.scratch2, SZ_D);
2927 mov_rrdisp(code, options->gen.scratch2, options->gen.context_reg, offsetof(z80_context, target_cycle), SZ_D);
2928 neg_r(code, options->gen.cycles, SZ_D);
2929 add_rr(code, options->gen.scratch2, options->gen.cycles, SZ_D);
2888 //disable interrupts 2930 //disable interrupts
2889 mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, iff1), SZ_B); 2931 mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, iff1), SZ_B);
2890 mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, iff2), SZ_B); 2932 mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, iff2), SZ_B);
2891 cycles(&options->gen, 7); 2933 cycles(&options->gen, 7);
2892 //save return address (in scratch1) to Z80 stack 2934 //save return address (in scratch1) to Z80 stack
3201 mov_rr(code, RAX, opts->gen.context_reg, SZ_PTR); 3243 mov_rr(code, RAX, opts->gen.context_reg, SZ_PTR);
3202 //Restore context 3244 //Restore context
3203 call(code, opts->gen.load_context); 3245 call(code, opts->gen.load_context);
3204 pop_r(code, opts->gen.scratch1); 3246 pop_r(code, opts->gen.scratch1);
3205 //do prologue stuff 3247 //do prologue stuff
3206 cmp_rr(code, opts->gen.cycles, opts->gen.limit, SZ_D); 3248 or_rr(code, opts->gen.cycles, opts->gen.cycles, SZ_D);
3207 uint8_t * jmp_off = code->cur+1; 3249 uint8_t * jmp_off = code->cur+1;
3208 jcc(code, CC_NC, code->cur + 7); 3250 jcc(code, CC_NS, code->cur + 7);
3209 pop_r(code, opts->gen.scratch1); 3251 pop_r(code, opts->gen.scratch1);
3210 add_ir(code, check_int_size - patch_size, opts->gen.scratch1, SZ_PTR); 3252 add_ir(code, check_int_size - patch_size, opts->gen.scratch1, SZ_PTR);
3211 push_r(code, opts->gen.scratch1); 3253 push_r(code, opts->gen.scratch1);
3212 jmp(code, opts->gen.handle_cycle_limit_int); 3254 jmp(code, opts->gen.handle_cycle_limit_int);
3213 *jmp_off = code->cur - (jmp_off+1); 3255 *jmp_off = code->cur - (jmp_off+1);