Mercurial > repos > blastem
comparison m68k_core_x86.c @ 2268:5b308c7b098c
Avoid code mem allocation bomb when a div instruction gets rewritten
author | Michael Pavone <pavone@retrodev.com> |
---|---|
date | Sun, 25 Dec 2022 18:16:44 -0800 |
parents | 8e8db9141209 |
children | 6677afe78a6f |
comparison
equal
deleted
inserted
replaced
2267:94cca8b8429e | 2268:5b308c7b098c |
---|---|
351 } | 351 } |
352 | 352 |
353 void m68k_check_cycles_int_latch(m68k_options *opts) | 353 void m68k_check_cycles_int_latch(m68k_options *opts) |
354 { | 354 { |
355 code_info *code = &opts->gen.code; | 355 code_info *code = &opts->gen.code; |
356 check_alloc_code(code, 3*MAX_INST_LEN); | |
357 uint8_t cc; | 356 uint8_t cc; |
358 if (opts->gen.limit < 0) { | 357 if (opts->gen.limit < 0) { |
359 cmp_ir(code, 1, opts->gen.cycles, SZ_D); | 358 cmp_ir(code, 1, opts->gen.cycles, SZ_D); |
360 cc = CC_NS; | 359 cc = CC_NS; |
361 } else { | 360 } else { |
362 cmp_rr(code, opts->gen.cycles, opts->gen.limit, SZ_D); | 361 cmp_rr(code, opts->gen.cycles, opts->gen.limit, SZ_D); |
363 cc = CC_A; | 362 cc = CC_A; |
364 } | 363 } |
364 ALLOC_CODE_RETRY_POINT | |
365 code_ptr jmp_off = code->cur+1; | 365 code_ptr jmp_off = code->cur+1; |
366 jcc(code, cc, jmp_off+1); | 366 jcc(code, cc, jmp_off+1); |
367 call(code, opts->handle_int_latch); | 367 call(code, opts->handle_int_latch); |
368 *jmp_off = code->cur - (jmp_off+1); | 368 CHECK_BRANCH_DEST(jmp_off) |
369 } | 369 } |
370 | 370 |
371 uint8_t translate_m68k_op(m68kinst * inst, host_ea * ea, m68k_options * opts, uint8_t dst) | 371 uint8_t translate_m68k_op(m68kinst * inst, host_ea * ea, m68k_options * opts, uint8_t dst) |
372 { | 372 { |
373 code_info *code = &opts->gen.code; | 373 code_info *code = &opts->gen.code; |
864 } else { | 864 } else { |
865 mov_irdisp(code, cond == COND_TRUE ? 0xFF : 0, dst_op.base, dst_op.disp, SZ_B); | 865 mov_irdisp(code, cond == COND_TRUE ? 0xFF : 0, dst_op.base, dst_op.disp, SZ_B); |
866 } | 866 } |
867 } else { | 867 } else { |
868 uint8_t cc = m68k_eval_cond(opts, cond); | 868 uint8_t cc = m68k_eval_cond(opts, cond); |
869 check_alloc_code(code, 6*MAX_INST_LEN); | 869 ALLOC_CODE_RETRY_POINT |
870 code_ptr true_off = code->cur + 1; | 870 code_ptr true_off = code->cur + 1; |
871 jcc(code, cc, code->cur+2); | 871 jcc(code, cc, code->cur+2); |
872 cycles(&opts->gen, BUS); | 872 cycles(&opts->gen, BUS); |
873 if (dst_op.mode == MODE_REG_DIRECT) { | 873 if (dst_op.mode == MODE_REG_DIRECT) { |
874 mov_ir(code, 0, dst_op.base, SZ_B); | 874 mov_ir(code, 0, dst_op.base, SZ_B); |
875 } else { | 875 } else { |
876 mov_irdisp(code, 0, dst_op.base, dst_op.disp, SZ_B); | 876 mov_irdisp(code, 0, dst_op.base, dst_op.disp, SZ_B); |
877 } | 877 } |
878 code_ptr end_off = code->cur+1; | 878 code_ptr end_off = code->cur+1; |
879 jmp(code, code->cur+2); | 879 jmp(code, code->cur+2); |
880 *true_off = code->cur - (true_off+1); | 880 CHECK_BRANCH_DEST(true_off); |
881 cycles(&opts->gen, inst->dst.addr_mode == MODE_REG ? 6 : 4); | 881 cycles(&opts->gen, inst->dst.addr_mode == MODE_REG ? 6 : 4); |
882 if (dst_op.mode == MODE_REG_DIRECT) { | 882 if (dst_op.mode == MODE_REG_DIRECT) { |
883 mov_ir(code, 0xFF, dst_op.base, SZ_B); | 883 mov_ir(code, 0xFF, dst_op.base, SZ_B); |
884 } else { | 884 } else { |
885 mov_irdisp(code, 0xFF, dst_op.base, dst_op.disp, SZ_B); | 885 mov_irdisp(code, 0xFF, dst_op.base, dst_op.disp, SZ_B); |
886 } | 886 } |
887 *end_off = code->cur - (end_off+1); | 887 CHECK_BRANCH_DEST(end_off); |
888 } | 888 } |
889 m68k_save_result(inst, opts); | 889 m68k_save_result(inst, opts); |
890 } | 890 } |
891 | 891 |
892 void translate_m68k_dbcc(m68k_options * opts, m68kinst * inst) | 892 void translate_m68k_dbcc(m68k_options * opts, m68kinst * inst) |
895 //best case duration | 895 //best case duration |
896 cycles(&opts->gen, 10); | 896 cycles(&opts->gen, 10); |
897 code_ptr skip_loc = NULL; | 897 code_ptr skip_loc = NULL; |
898 //TODO: Check if COND_TRUE technically valid here even though | 898 //TODO: Check if COND_TRUE technically valid here even though |
899 //it's basically a slow NOP | 899 //it's basically a slow NOP |
900 ALLOC_CODE_RETRY_VAR; | |
900 if (inst->extra.cond != COND_FALSE) { | 901 if (inst->extra.cond != COND_FALSE) { |
901 uint8_t cond = m68k_eval_cond(opts, inst->extra.cond); | 902 uint8_t cond = m68k_eval_cond(opts, inst->extra.cond); |
902 check_alloc_code(code, 6*MAX_INST_LEN); | 903 ALLOC_CODE_RETRY_POINT_NO_VAR |
903 skip_loc = code->cur + 1; | 904 skip_loc = code->cur + 1; |
904 jcc(code, cond, code->cur + 2); | 905 jcc(code, cond, code->cur + 2); |
905 } | 906 } |
906 if (opts->dregs[inst->dst.params.regs.pri] >= 0) { | 907 if (opts->dregs[inst->dst.params.regs.pri] >= 0) { |
907 sub_ir(code, 1, opts->dregs[inst->dst.params.regs.pri], SZ_W); | 908 sub_ir(code, 1, opts->dregs[inst->dst.params.regs.pri], SZ_W); |
915 uint32_t after = inst->address + 2; | 916 uint32_t after = inst->address + 2; |
916 jump_m68k_abs(opts, after + inst->src.params.immed); | 917 jump_m68k_abs(opts, after + inst->src.params.immed); |
917 *loop_end_loc = code->cur - (loop_end_loc+1); | 918 *loop_end_loc = code->cur - (loop_end_loc+1); |
918 if (skip_loc) { | 919 if (skip_loc) { |
919 cycles(&opts->gen, 2); | 920 cycles(&opts->gen, 2); |
920 *skip_loc = code->cur - (skip_loc+1); | 921 CHECK_BRANCH_DEST(skip_loc); |
921 cycles(&opts->gen, 2); | 922 cycles(&opts->gen, 2); |
922 } else { | 923 } else { |
923 cycles(&opts->gen, 4); | 924 cycles(&opts->gen, 4); |
924 } | 925 } |
925 } | 926 } |
1029 { | 1030 { |
1030 code_info *code = &opts->gen.code; | 1031 code_info *code = &opts->gen.code; |
1031 code_ptr end_off = NULL; | 1032 code_ptr end_off = NULL; |
1032 code_ptr nz_off = NULL; | 1033 code_ptr nz_off = NULL; |
1033 code_ptr z_off = NULL; | 1034 code_ptr z_off = NULL; |
1035 ALLOC_CODE_RETRY_VAR; | |
1034 if (inst->src.addr_mode == MODE_UNUSED) { | 1036 if (inst->src.addr_mode == MODE_UNUSED) { |
1035 cycles(&opts->gen, BUS); | 1037 cycles(&opts->gen, BUS); |
1036 //Memory shift | 1038 //Memory shift |
1037 shift_ir(code, 1, dst_op->base, SZ_W); | 1039 shift_ir(code, 1, dst_op->base, SZ_W); |
1038 } else { | 1040 } else { |
1041 ALLOC_CODE_RETRY_POINT_NO_VAR | |
1039 if (src_op->mode == MODE_IMMED) { | 1042 if (src_op->mode == MODE_IMMED) { |
1040 cycles(&opts->gen, (inst->extra.size == OPSIZE_LONG ? 8 : 6) + 2 * src_op->disp); | 1043 cycles(&opts->gen, (inst->extra.size == OPSIZE_LONG ? 8 : 6) + 2 * src_op->disp); |
1041 if (src_op->disp != 1 && inst->op == M68K_ASL) { | 1044 if (src_op->disp != 1 && inst->op == M68K_ASL) { |
1042 set_flag(opts, 0, FLAG_V); | 1045 set_flag(opts, 0, FLAG_V); |
1043 for (int i = 0; i < src_op->disp; i++) { | 1046 for (int i = 0; i < src_op->disp; i++) { |
1044 if (dst_op->mode == MODE_REG_DIRECT) { | 1047 if (dst_op->mode == MODE_REG_DIRECT) { |
1045 shift_ir(code, 1, dst_op->base, inst->extra.size); | 1048 shift_ir(code, 1, dst_op->base, inst->extra.size); |
1046 } else { | 1049 } else { |
1047 shift_irdisp(code, 1, dst_op->base, dst_op->disp, inst->extra.size); | 1050 shift_irdisp(code, 1, dst_op->base, dst_op->disp, inst->extra.size); |
1048 } | 1051 } |
1049 check_alloc_code(code, 2*MAX_INST_LEN); | |
1050 code_ptr after_flag_set = code->cur + 1; | 1052 code_ptr after_flag_set = code->cur + 1; |
1051 jcc(code, CC_NO, code->cur + 2); | 1053 jcc(code, CC_NO, code->cur + 2); |
1052 set_flag(opts, 1, FLAG_V); | 1054 set_flag(opts, 1, FLAG_V); |
1053 *after_flag_set = code->cur - (after_flag_set+1); | 1055 CHECK_BRANCH_DEST(after_flag_set); |
1054 } | 1056 } |
1055 } else { | 1057 } else { |
1056 if (dst_op->mode == MODE_REG_DIRECT) { | 1058 if (dst_op->mode == MODE_REG_DIRECT) { |
1057 shift_ir(code, src_op->disp, dst_op->base, inst->extra.size); | 1059 shift_ir(code, src_op->disp, dst_op->base, inst->extra.size); |
1058 } else { | 1060 } else { |
1069 mov_rdispr(code, src_op->base, src_op->disp, RCX, SZ_B); | 1071 mov_rdispr(code, src_op->base, src_op->disp, RCX, SZ_B); |
1070 } | 1072 } |
1071 | 1073 |
1072 } | 1074 } |
1073 and_ir(code, 63, RCX, SZ_D); | 1075 and_ir(code, 63, RCX, SZ_D); |
1074 check_alloc_code(code, 7*MAX_INST_LEN); | |
1075 nz_off = code->cur + 1; | 1076 nz_off = code->cur + 1; |
1076 jcc(code, CC_NZ, code->cur + 2); | 1077 jcc(code, CC_NZ, code->cur + 2); |
1077 //Flag behavior for shift count of 0 is different for x86 than 68K | 1078 //Flag behavior for shift count of 0 is different for x86 than 68K |
1078 if (dst_op->mode == MODE_REG_DIRECT) { | 1079 if (dst_op->mode == MODE_REG_DIRECT) { |
1079 cmp_ir(code, 0, dst_op->base, inst->extra.size); | 1080 cmp_ir(code, 0, dst_op->base, inst->extra.size); |
1087 if (inst->op == M68K_ASL) { | 1088 if (inst->op == M68K_ASL) { |
1088 set_flag(opts, 0, FLAG_V); | 1089 set_flag(opts, 0, FLAG_V); |
1089 } | 1090 } |
1090 z_off = code->cur + 1; | 1091 z_off = code->cur + 1; |
1091 jmp(code, code->cur + 2); | 1092 jmp(code, code->cur + 2); |
1092 *nz_off = code->cur - (nz_off + 1); | 1093 CHECK_BRANCH_DEST(nz_off); |
1093 //add 2 cycles for every bit shifted | 1094 //add 2 cycles for every bit shifted |
1094 mov_ir(code, 2 * opts->gen.clock_divider, opts->gen.scratch2, SZ_D); | 1095 mov_ir(code, 2 * opts->gen.clock_divider, opts->gen.scratch2, SZ_D); |
1095 imul_rr(code, RCX, opts->gen.scratch2, SZ_D); | 1096 imul_rr(code, RCX, opts->gen.scratch2, SZ_D); |
1096 add_rr(code, opts->gen.scratch2, opts->gen.cycles, SZ_D); | 1097 add_rr(code, opts->gen.scratch2, opts->gen.cycles, SZ_D); |
1097 if (inst->op == M68K_ASL) { | 1098 if (inst->op == M68K_ASL) { |
1098 //ASL has Overflow flag behavior that depends on all of the bits shifted through the MSB | 1099 //ASL has Overflow flag behavior that depends on all of the bits shifted through the MSB |
1099 //Easiest way to deal with this is to shift one bit at a time | 1100 //Easiest way to deal with this is to shift one bit at a time |
1100 set_flag(opts, 0, FLAG_V); | 1101 set_flag(opts, 0, FLAG_V); |
1101 check_alloc_code(code, 5*MAX_INST_LEN); | |
1102 code_ptr loop_start = code->cur; | 1102 code_ptr loop_start = code->cur; |
1103 if (dst_op->mode == MODE_REG_DIRECT) { | 1103 if (dst_op->mode == MODE_REG_DIRECT) { |
1104 shift_ir(code, 1, dst_op->base, inst->extra.size); | 1104 shift_ir(code, 1, dst_op->base, inst->extra.size); |
1105 } else { | 1105 } else { |
1106 shift_irdisp(code, 1, dst_op->base, dst_op->disp, inst->extra.size); | 1106 shift_irdisp(code, 1, dst_op->base, dst_op->disp, inst->extra.size); |
1107 } | 1107 } |
1108 code_ptr after_flag_set = code->cur + 1; | 1108 code_ptr after_flag_set = code->cur + 1; |
1109 jcc(code, CC_NO, code->cur + 2); | 1109 jcc(code, CC_NO, code->cur + 2); |
1110 set_flag(opts, 1, FLAG_V); | 1110 set_flag(opts, 1, FLAG_V); |
1111 *after_flag_set = code->cur - (after_flag_set+1); | 1111 CHECK_BRANCH_DEST(after_flag_set); |
1112 loop(code, loop_start); | 1112 loop(code, loop_start); |
1113 } else { | 1113 } else { |
1114 //x86 shifts modulo 32 for operand sizes less than 64-bits | 1114 //x86 shifts modulo 32 for operand sizes less than 64-bits |
1115 //but M68K shifts modulo 64, so we need to check for large shifts here | 1115 //but M68K shifts modulo 64, so we need to check for large shifts here |
1116 cmp_ir(code, 32, RCX, SZ_B); | 1116 cmp_ir(code, 32, RCX, SZ_B); |
1117 check_alloc_code(code, 14*MAX_INST_LEN); | |
1118 code_ptr norm_shift_off = code->cur + 1; | 1117 code_ptr norm_shift_off = code->cur + 1; |
1119 jcc(code, CC_L, code->cur + 2); | 1118 jcc(code, CC_L, code->cur + 2); |
1120 if (special) { | 1119 if (special) { |
1121 code_ptr after_flag_set = NULL; | 1120 code_ptr after_flag_set = NULL; |
1122 if (inst->extra.size == OPSIZE_LONG) { | 1121 if (inst->extra.size == OPSIZE_LONG) { |
1130 special_disp(code, 1, dst_op->base, dst_op->disp, SZ_D); | 1129 special_disp(code, 1, dst_op->base, dst_op->disp, SZ_D); |
1131 } | 1130 } |
1132 set_flag_cond(opts, CC_C, FLAG_C); | 1131 set_flag_cond(opts, CC_C, FLAG_C); |
1133 after_flag_set = code->cur + 1; | 1132 after_flag_set = code->cur + 1; |
1134 jmp(code, code->cur + 2); | 1133 jmp(code, code->cur + 2); |
1135 *neq_32_off = code->cur - (neq_32_off+1); | 1134 CHECK_BRANCH_DEST(neq_32_off); |
1136 } | 1135 } |
1137 set_flag(opts, 0, FLAG_C); | 1136 set_flag(opts, 0, FLAG_C); |
1138 if (after_flag_set) { | 1137 if (after_flag_set) { |
1139 *after_flag_set = code->cur - (after_flag_set+1); | 1138 CHECK_BRANCH_DEST(after_flag_set); |
1140 } | 1139 } |
1141 set_flag(opts, 1, FLAG_Z); | 1140 set_flag(opts, 1, FLAG_Z); |
1142 set_flag(opts, 0, FLAG_N); | 1141 set_flag(opts, 0, FLAG_N); |
1143 if (dst_op->mode == MODE_REG_DIRECT) { | 1142 if (dst_op->mode == MODE_REG_DIRECT) { |
1144 xor_rr(code, dst_op->base, dst_op->base, inst->extra.size); | 1143 xor_rr(code, dst_op->base, dst_op->base, inst->extra.size); |
1155 } | 1154 } |
1156 | 1155 |
1157 } | 1156 } |
1158 end_off = code->cur + 1; | 1157 end_off = code->cur + 1; |
1159 jmp(code, code->cur + 2); | 1158 jmp(code, code->cur + 2); |
1160 *norm_shift_off = code->cur - (norm_shift_off+1); | 1159 CHECK_BRANCH_DEST(norm_shift_off); |
1161 if (dst_op->mode == MODE_REG_DIRECT) { | 1160 if (dst_op->mode == MODE_REG_DIRECT) { |
1162 shift_clr(code, dst_op->base, inst->extra.size); | 1161 shift_clr(code, dst_op->base, inst->extra.size); |
1163 } else { | 1162 } else { |
1164 shift_clrdisp(code, dst_op->base, dst_op->disp, inst->extra.size); | 1163 shift_clrdisp(code, dst_op->base, dst_op->disp, inst->extra.size); |
1165 } | 1164 } |
1166 } | 1165 } |
1167 } | 1166 } |
1168 | 1167 |
1169 } | 1168 } |
1170 if (!special && end_off) { | 1169 if (!special && end_off) { |
1171 *end_off = code->cur - (end_off + 1); | 1170 CHECK_BRANCH_DEST(end_off); |
1172 } | 1171 } |
1173 update_flags(opts, C|Z|N); | 1172 update_flags(opts, C|Z|N); |
1174 if (special && end_off) { | 1173 if (special && end_off) { |
1175 *end_off = code->cur - (end_off + 1); | 1174 CHECK_BRANCH_DEST(end_off); |
1176 } | 1175 } |
1177 //set X flag to same as C flag | 1176 //set X flag to same as C flag |
1178 if (opts->flag_regs[FLAG_C] >= 0) { | 1177 if (opts->flag_regs[FLAG_C] >= 0) { |
1179 flag_to_flag(opts, FLAG_C, FLAG_X); | 1178 flag_to_flag(opts, FLAG_C, FLAG_X); |
1180 } else { | 1179 } else { |
1181 set_flag_cond(opts, CC_C, FLAG_X); | 1180 set_flag_cond(opts, CC_C, FLAG_X); |
1182 } | 1181 } |
1183 if (z_off) { | 1182 if (z_off) { |
1184 *z_off = code->cur - (z_off + 1); | 1183 CHECK_BRANCH_DEST(z_off); |
1185 } | 1184 } |
1186 if (inst->op != M68K_ASL) { | 1185 if (inst->op != M68K_ASL) { |
1187 set_flag(opts, 0, FLAG_V); | 1186 set_flag(opts, 0, FLAG_V); |
1188 } | 1187 } |
1189 if (inst->src.addr_mode == MODE_UNUSED) { | 1188 if (inst->src.addr_mode == MODE_UNUSED) { |
1350 } | 1349 } |
1351 } | 1350 } |
1352 if (inst->dst.addr_mode != MODE_AREG || inst->op == M68K_CMP) { | 1351 if (inst->dst.addr_mode != MODE_AREG || inst->op == M68K_CMP) { |
1353 update_flags(opts, flag_mask); | 1352 update_flags(opts, flag_mask); |
1354 if (inst->op == M68K_ADDX || inst->op == M68K_SUBX) { | 1353 if (inst->op == M68K_ADDX || inst->op == M68K_SUBX) { |
1355 check_alloc_code(code, 2*MAX_INST_LEN); | 1354 ALLOC_CODE_RETRY_POINT |
1356 code_ptr after_flag_set = code->cur + 1; | 1355 code_ptr after_flag_set = code->cur + 1; |
1357 jcc(code, CC_Z, code->cur + 2); | 1356 jcc(code, CC_Z, code->cur + 2); |
1358 set_flag(opts, 0, FLAG_Z); | 1357 set_flag(opts, 0, FLAG_Z); |
1359 *after_flag_set = code->cur - (after_flag_set+1); | 1358 CHECK_BRANCH_DEST(after_flag_set); |
1360 } | 1359 } |
1361 } | 1360 } |
1362 if (inst->op != M68K_CMP) { | 1361 if (inst->op != M68K_CMP) { |
1363 m68k_save_result(inst, opts); | 1362 m68k_save_result(inst, opts); |
1364 } | 1363 } |
1724 isize = 6; | 1723 isize = 6; |
1725 break; | 1724 break; |
1726 default: | 1725 default: |
1727 isize = 2; | 1726 isize = 2; |
1728 } | 1727 } |
1729 //make sure we won't start a new chunk in the middle of these branches | 1728 ALLOC_CODE_RETRY_POINT |
1730 check_alloc_code(code, MAX_INST_LEN * 11); | |
1731 code_ptr passed = code->cur + 1; | 1729 code_ptr passed = code->cur + 1; |
1732 jcc(code, CC_GE, code->cur + 2); | 1730 jcc(code, CC_GE, code->cur + 2); |
1733 set_flag(opts, 1, FLAG_N); | 1731 set_flag(opts, 1, FLAG_N); |
1734 mov_ir(code, VECTOR_CHK, opts->gen.scratch2, SZ_D); | 1732 mov_ir(code, VECTOR_CHK, opts->gen.scratch2, SZ_D); |
1735 mov_ir(code, inst->address+isize, opts->gen.scratch1, SZ_D); | 1733 mov_ir(code, inst->address+isize, opts->gen.scratch1, SZ_D); |
1736 jmp(code, opts->trap); | 1734 jmp(code, opts->trap); |
1737 *passed = code->cur - (passed+1); | 1735 CHECK_BRANCH_DEST(passed); |
1738 if (dst_op->mode == MODE_REG_DIRECT) { | 1736 if (dst_op->mode == MODE_REG_DIRECT) { |
1739 if (src_op->mode == MODE_REG_DIRECT) { | 1737 if (src_op->mode == MODE_REG_DIRECT) { |
1740 cmp_rr(code, src_op->base, dst_op->base, inst->extra.size); | 1738 cmp_rr(code, src_op->base, dst_op->base, inst->extra.size); |
1741 } else if(src_op->mode == MODE_REG_DISPLACE8) { | 1739 } else if(src_op->mode == MODE_REG_DISPLACE8) { |
1742 cmp_rdispr(code, src_op->base, src_op->disp, dst_op->base, inst->extra.size); | 1740 cmp_rdispr(code, src_op->base, src_op->disp, dst_op->base, inst->extra.size); |
1754 jcc(code, CC_LE, code->cur + 2); | 1752 jcc(code, CC_LE, code->cur + 2); |
1755 set_flag(opts, 0, FLAG_N); | 1753 set_flag(opts, 0, FLAG_N); |
1756 mov_ir(code, VECTOR_CHK, opts->gen.scratch2, SZ_D); | 1754 mov_ir(code, VECTOR_CHK, opts->gen.scratch2, SZ_D); |
1757 mov_ir(code, inst->address+isize, opts->gen.scratch1, SZ_D); | 1755 mov_ir(code, inst->address+isize, opts->gen.scratch1, SZ_D); |
1758 jmp(code, opts->trap); | 1756 jmp(code, opts->trap); |
1759 *passed = code->cur - (passed+1); | 1757 CHECK_BRANCH_DEST(passed); |
1760 cycles(&opts->gen, 6); | 1758 cycles(&opts->gen, 6); |
1761 } | 1759 } |
1762 | 1760 |
1763 static uint32_t divu(uint32_t dividend, m68k_context *context, uint32_t divisor_shift) | 1761 static uint32_t divu(uint32_t dividend, m68k_context *context, uint32_t divisor_shift) |
1764 { | 1762 { |
1881 } | 1879 } |
1882 | 1880 |
1883 void translate_m68k_div(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op) | 1881 void translate_m68k_div(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op) |
1884 { | 1882 { |
1885 code_info *code = &opts->gen.code; | 1883 code_info *code = &opts->gen.code; |
1886 check_alloc_code(code, MAX_NATIVE_SIZE); | |
1887 set_flag(opts, 0, FLAG_C); | 1884 set_flag(opts, 0, FLAG_C); |
1888 if (dst_op->mode == MODE_REG_DIRECT) { | 1885 if (dst_op->mode == MODE_REG_DIRECT) { |
1889 mov_rr(code, dst_op->base, opts->gen.scratch2, SZ_D); | 1886 mov_rr(code, dst_op->base, opts->gen.scratch2, SZ_D); |
1890 } else { | 1887 } else { |
1891 mov_rdispr(code, dst_op->base, dst_op->disp, opts->gen.scratch2, SZ_D); | 1888 mov_rdispr(code, dst_op->base, dst_op->disp, opts->gen.scratch2, SZ_D); |
1899 movzx_rr(code, src_op->base, opts->gen.scratch1, SZ_W, SZ_D); | 1896 movzx_rr(code, src_op->base, opts->gen.scratch1, SZ_W, SZ_D); |
1900 } | 1897 } |
1901 shl_ir(code, 16, opts->gen.scratch1, SZ_D); | 1898 shl_ir(code, 16, opts->gen.scratch1, SZ_D); |
1902 } | 1899 } |
1903 cmp_ir(code, 0, opts->gen.scratch1, SZ_D); | 1900 cmp_ir(code, 0, opts->gen.scratch1, SZ_D); |
1901 ALLOC_CODE_RETRY_POINT | |
1904 code_ptr not_zero = code->cur+1; | 1902 code_ptr not_zero = code->cur+1; |
1905 jcc(code, CC_NZ, not_zero); | 1903 jcc(code, CC_NZ, not_zero); |
1906 | 1904 |
1907 //TODO: Check that opts->trap includes the cycles conumed by the first trap0 microinstruction | 1905 //TODO: Check that opts->trap includes the cycles conumed by the first trap0 microinstruction |
1908 cycles(&opts->gen, 4); | 1906 cycles(&opts->gen, 4); |
1924 //zero seems to clear all flags | 1922 //zero seems to clear all flags |
1925 update_flags(opts, N0|Z0|V0); | 1923 update_flags(opts, N0|Z0|V0); |
1926 mov_ir(code, VECTOR_INT_DIV_ZERO, opts->gen.scratch2, SZ_D); | 1924 mov_ir(code, VECTOR_INT_DIV_ZERO, opts->gen.scratch2, SZ_D); |
1927 mov_ir(code, inst->address+isize, opts->gen.scratch1, SZ_D); | 1925 mov_ir(code, inst->address+isize, opts->gen.scratch1, SZ_D); |
1928 jmp(code, opts->trap); | 1926 jmp(code, opts->trap); |
1929 | 1927 CHECK_BRANCH_DEST(not_zero); |
1930 *not_zero = code->cur - (not_zero + 1); | |
1931 code_ptr end = NULL; | 1928 code_ptr end = NULL; |
1932 if (inst->op == M68K_DIVU) { | 1929 if (inst->op == M68K_DIVU) { |
1933 //initial overflow check needs to be done in the C code for divs | 1930 //initial overflow check needs to be done in the C code for divs |
1934 //but can be done before dumping state to mem in divu as an optimization | 1931 //but can be done before dumping state to mem in divu as an optimization |
1935 cmp_rr(code, opts->gen.scratch1, opts->gen.scratch2, SZ_D); | 1932 cmp_rr(code, opts->gen.scratch1, opts->gen.scratch2, SZ_D); |
1940 update_flags(opts, N1|Z0|V1); | 1937 update_flags(opts, N1|Z0|V1); |
1941 cycles(&opts->gen, 10); | 1938 cycles(&opts->gen, 10); |
1942 end = code->cur+1; | 1939 end = code->cur+1; |
1943 jmp(code, end); | 1940 jmp(code, end); |
1944 | 1941 |
1945 *not_overflow = code->cur - (not_overflow + 1); | 1942 CHECK_BRANCH_DEST(not_overflow); |
1946 } | 1943 } |
1947 call(code, opts->gen.save_context); | 1944 call(code, opts->gen.save_context); |
1948 push_r(code, opts->gen.context_reg); | 1945 push_r(code, opts->gen.context_reg); |
1949 //TODO: inline the functionality of divudivs/ so we don't need to dump context to memory | 1946 //TODO: inline the functionality of divudivs/ so we don't need to dump context to memory |
1950 call_args(code, (code_ptr)(inst->op == M68K_DIVU ? divu : divs), 3, opts->gen.scratch2, opts->gen.context_reg, opts->gen.scratch1); | 1947 call_args(code, (code_ptr)(inst->op == M68K_DIVU ? divu : divs), 3, opts->gen.scratch2, opts->gen.context_reg, opts->gen.scratch1); |
1962 mov_rr(code, opts->gen.scratch1, dst_op->base, SZ_D); | 1959 mov_rr(code, opts->gen.scratch1, dst_op->base, SZ_D); |
1963 } else { | 1960 } else { |
1964 mov_rrdisp(code, opts->gen.scratch1, dst_op->base, dst_op->disp, SZ_D); | 1961 mov_rrdisp(code, opts->gen.scratch1, dst_op->base, dst_op->disp, SZ_D); |
1965 } | 1962 } |
1966 if (end) { | 1963 if (end) { |
1967 *end = code->cur - (end + 1); | 1964 CHECK_BRANCH_DEST(end); |
1968 } | 1965 } |
1969 } | 1966 } |
1970 | 1967 |
1971 void translate_m68k_exg(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op) | 1968 void translate_m68k_exg(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op) |
1972 { | 1969 { |