comparison m68k_core_x86.c @ 744:fc68992cf18d

Merge windows branch with latest changes
author Michael Pavone <pavone@retrodev.com>
date Thu, 28 May 2015 21:19:55 -0700
parents fbda8e865dae
children cf09b189a0ca
comparison
equal deleted inserted replaced
743:cf78cb045fa4 744:fc68992cf18d
11 #include "backend.h" 11 #include "backend.h"
12 #include <stdio.h> 12 #include <stdio.h>
13 #include <stddef.h> 13 #include <stddef.h>
14 #include <stdlib.h> 14 #include <stdlib.h>
15 #include <string.h> 15 #include <string.h>
16
17 #define CYCLES RAX
18 #define LIMIT RBP
19 #define CONTEXT RSI
20 #define SCRATCH1 RCX
21
22 #ifdef X86_64
23 #define SCRATCH2 RDI
24 #else
25 #define SCRATCH2 RBX
26 #endif
27 16
28 enum { 17 enum {
29 FLAG_X, 18 FLAG_X,
30 FLAG_N, 19 FLAG_N,
31 FLAG_Z, 20 FLAG_Z,
231 movsx_rdispr(&opts->gen.code, opts->gen.context_reg, dreg_offset(reg), native_reg, SZ_W, SZ_D); 220 movsx_rdispr(&opts->gen.code, opts->gen.context_reg, dreg_offset(reg), native_reg, SZ_W, SZ_D);
232 } 221 }
233 } 222 }
234 223
235 void native_to_areg(m68k_options *opts, uint8_t native_reg, uint8_t reg) 224 void native_to_areg(m68k_options *opts, uint8_t native_reg, uint8_t reg)
236 { 225 {
237 if (opts->aregs[reg] >= 0) { 226 if (opts->aregs[reg] >= 0) {
238 mov_rr(&opts->gen.code, native_reg, opts->aregs[reg], SZ_D); 227 mov_rr(&opts->gen.code, native_reg, opts->aregs[reg], SZ_D);
239 } else { 228 } else {
240 mov_rrdisp(&opts->gen.code, native_reg, opts->gen.context_reg, areg_offset(reg), SZ_D); 229 mov_rrdisp(&opts->gen.code, native_reg, opts->gen.context_reg, areg_offset(reg), SZ_D);
241 } 230 }
265 } 254 }
266 255
267 void addi_native(m68k_options *opts, int32_t value, uint8_t reg) 256 void addi_native(m68k_options *opts, int32_t value, uint8_t reg)
268 { 257 {
269 add_ir(&opts->gen.code, value, reg, SZ_D); 258 add_ir(&opts->gen.code, value, reg, SZ_D);
270 } 259 }
271 260
272 void subi_native(m68k_options *opts, int32_t value, uint8_t reg) 261 void subi_native(m68k_options *opts, int32_t value, uint8_t reg)
273 { 262 {
274 sub_ir(&opts->gen.code, value, reg, SZ_D); 263 sub_ir(&opts->gen.code, value, reg, SZ_D);
275 } 264 }
364 { 353 {
365 code_info *code = &opts->gen.code; 354 code_info *code = &opts->gen.code;
366 m68k_op_info *op = dst ? &inst->dst : &inst->src; 355 m68k_op_info *op = dst ? &inst->dst : &inst->src;
367 int8_t reg = native_reg(op, opts); 356 int8_t reg = native_reg(op, opts);
368 uint8_t sec_reg; 357 uint8_t sec_reg;
369 int32_t dec_amount,inc_amount; 358 int32_t dec_amount, inc_amount;
370 if (reg >= 0) { 359 if (reg >= 0) {
371 ea->mode = MODE_REG_DIRECT; 360 ea->mode = MODE_REG_DIRECT;
372 if (!dst && inst->dst.addr_mode == MODE_AREG && inst->extra.size == OPSIZE_WORD) { 361 if (!dst && inst->dst.addr_mode == MODE_AREG && inst->extra.size == OPSIZE_WORD) {
373 movsx_rr(code, reg, opts->gen.scratch1, SZ_W, SZ_D); 362 movsx_rr(code, reg, opts->gen.scratch1, SZ_W, SZ_D);
374 ea->base = opts->gen.scratch1; 363 ea->base = opts->gen.scratch1;
530 } 519 }
531 ea->base = opts->gen.scratch1; 520 ea->base = opts->gen.scratch1;
532 } 521 }
533 } 522 }
534 523
535 void m68k_save_result(m68kinst * inst, m68k_options * opts) 524 void check_user_mode_swap_ssp_usp(m68k_options *opts)
536 { 525 {
537 code_info *code = &opts->gen.code; 526 code_info * code = &opts->gen.code;
538 if (inst->dst.addr_mode != MODE_REG && inst->dst.addr_mode != MODE_AREG && inst->dst.addr_mode != MODE_UNUSED) { 527 //Check if we've switched to user mode and swap stack pointers if needed
539 if (inst->dst.addr_mode == MODE_AREG_PREDEC && inst->src.addr_mode == MODE_AREG_PREDEC && inst->op != M68K_MOVE) { 528 bt_irdisp(code, 5, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
540 areg_to_native(opts, inst->dst.params.regs.pri, opts->gen.scratch2); 529 code_ptr end_off = code->cur + 1;
541 } 530 jcc(code, CC_C, code->cur + 2);
542 switch (inst->extra.size) 531 swap_ssp_usp(opts);
543 { 532 *end_off = code->cur - (end_off + 1);
544 case OPSIZE_BYTE:
545 call(code, opts->write_8);
546 break;
547 case OPSIZE_WORD:
548 call(code, opts->write_16);
549 break;
550 case OPSIZE_LONG:
551 call(code, opts->write_32_lowfirst);
552 break;
553 }
554 }
555 } 533 }
556 534
557 void translate_m68k_move(m68k_options * opts, m68kinst * inst) 535 void translate_m68k_move(m68k_options * opts, m68kinst * inst)
558 { 536 {
559 code_info *code = &opts->gen.code; 537 code_info *code = &opts->gen.code;
602 mov_irdisp(code, src.disp, opts->gen.context_reg, reg_offset(&(inst->dst)), size); 580 mov_irdisp(code, src.disp, opts->gen.context_reg, reg_offset(&(inst->dst)), size);
603 } 581 }
604 break; 582 break;
605 case MODE_AREG_PREDEC: 583 case MODE_AREG_PREDEC:
606 dec_amount = inst->extra.size == OPSIZE_WORD ? 2 : (inst->extra.size == OPSIZE_LONG ? 4 : (inst->dst.params.regs.pri == 7 ? 2 : 1)); 584 dec_amount = inst->extra.size == OPSIZE_WORD ? 2 : (inst->extra.size == OPSIZE_LONG ? 4 : (inst->dst.params.regs.pri == 7 ? 2 : 1));
607 subi_areg(opts, dec_amount, inst->dst.params.regs.pri);
608 case MODE_AREG_INDIRECT: 585 case MODE_AREG_INDIRECT:
609 case MODE_AREG_POSTINC: 586 case MODE_AREG_POSTINC:
610 areg_to_native(opts, inst->dst.params.regs.pri, opts->gen.scratch2);
611 if (src.mode == MODE_REG_DIRECT) { 587 if (src.mode == MODE_REG_DIRECT) {
612 if (src.base != opts->gen.scratch1) { 588 if (src.base != opts->gen.scratch1) {
613 mov_rr(code, src.base, opts->gen.scratch1, inst->extra.size); 589 mov_rr(code, src.base, opts->gen.scratch1, inst->extra.size);
614 } 590 }
615 } else if (src.mode == MODE_REG_DISPLACE8) { 591 } else if (src.mode == MODE_REG_DISPLACE8) {
616 mov_rdispr(code, src.base, src.disp, opts->gen.scratch1, inst->extra.size); 592 mov_rdispr(code, src.base, src.disp, opts->gen.scratch1, inst->extra.size);
617 } else { 593 } else {
618 mov_ir(code, src.disp, opts->gen.scratch1, inst->extra.size); 594 mov_ir(code, src.disp, opts->gen.scratch1, inst->extra.size);
619 } 595 }
596 if (inst->dst.addr_mode == MODE_AREG_PREDEC) {
597 subi_areg(opts, dec_amount, inst->dst.params.regs.pri);
598 }
599 areg_to_native(opts, inst->dst.params.regs.pri, opts->gen.scratch2);
620 break; 600 break;
621 case MODE_AREG_DISPLACE: 601 case MODE_AREG_DISPLACE:
622 cycles(&opts->gen, BUS); 602 cycles(&opts->gen, BUS);
623 calc_areg_displace(opts, &inst->dst, opts->gen.scratch2); 603 calc_areg_displace(opts, &inst->dst, opts->gen.scratch2);
624 if (src.mode == MODE_REG_DIRECT) { 604 if (src.mode == MODE_REG_DIRECT) {
819 int32_t disp = inst->src.params.immed; 799 int32_t disp = inst->src.params.immed;
820 uint32_t after = inst->address + 2; 800 uint32_t after = inst->address + 2;
821 if (inst->extra.cond == COND_TRUE) { 801 if (inst->extra.cond == COND_TRUE) {
822 jump_m68k_abs(opts, after + disp); 802 jump_m68k_abs(opts, after + disp);
823 } else { 803 } else {
824 code_ptr dest_addr = get_native_address(opts->gen.native_code_map, after + disp); 804 code_ptr dest_addr = get_native_address(opts, after + disp);
825 uint8_t cond = m68k_eval_cond(opts, inst->extra.cond); 805 uint8_t cond = m68k_eval_cond(opts, inst->extra.cond);
826 if (!dest_addr) { 806 if (!dest_addr) {
827 opts->gen.deferred = defer_address(opts->gen.deferred, after + disp, code->cur + 2); 807 opts->gen.deferred = defer_address(opts->gen.deferred, after + disp, code->cur + 2);
828 //dummy address to be replaced later, make sure it generates a 4-byte displacement 808 //dummy address to be replaced later, make sure it generates a 4-byte displacement
829 dest_addr = code->cur + 256; 809 dest_addr = code->cur + 256;
1019 if (inst->src.addr_mode == MODE_UNUSED) { 999 if (inst->src.addr_mode == MODE_UNUSED) {
1020 cycles(&opts->gen, BUS); 1000 cycles(&opts->gen, BUS);
1021 //Memory shift 1001 //Memory shift
1022 shift_ir(code, 1, dst_op->base, SZ_W); 1002 shift_ir(code, 1, dst_op->base, SZ_W);
1023 } else { 1003 } else {
1024 cycles(&opts->gen, inst->extra.size == OPSIZE_LONG ? 8 : 6);
1025 if (src_op->mode == MODE_IMMED) { 1004 if (src_op->mode == MODE_IMMED) {
1005 cycles(&opts->gen, (inst->extra.size == OPSIZE_LONG ? 8 : 6) + 2 * src_op->disp);
1026 if (src_op->disp != 1 && inst->op == M68K_ASL) { 1006 if (src_op->disp != 1 && inst->op == M68K_ASL) {
1027 set_flag(opts, 0, FLAG_V); 1007 set_flag(opts, 0, FLAG_V);
1028 for (int i = 0; i < src_op->disp; i++) { 1008 for (int i = 0; i < src_op->disp; i++) {
1029 if (dst_op->mode == MODE_REG_DIRECT) { 1009 if (dst_op->mode == MODE_REG_DIRECT) {
1030 shift_ir(code, 1, dst_op->base, inst->extra.size); 1010 shift_ir(code, 1, dst_op->base, inst->extra.size);
1044 shift_irdisp(code, src_op->disp, dst_op->base, dst_op->disp, inst->extra.size); 1024 shift_irdisp(code, src_op->disp, dst_op->base, dst_op->disp, inst->extra.size);
1045 } 1025 }
1046 set_flag_cond(opts, CC_O, FLAG_V); 1026 set_flag_cond(opts, CC_O, FLAG_V);
1047 } 1027 }
1048 } else { 1028 } else {
1029 cycles(&opts->gen, inst->extra.size == OPSIZE_LONG ? 8 : 6);
1049 if (src_op->base != RCX) { 1030 if (src_op->base != RCX) {
1050 if (src_op->mode == MODE_REG_DIRECT) { 1031 if (src_op->mode == MODE_REG_DIRECT) {
1051 mov_rr(code, src_op->base, RCX, SZ_B); 1032 mov_rr(code, src_op->base, RCX, SZ_B);
1052 } else { 1033 } else {
1053 mov_rdispr(code, src_op->base, src_op->disp, RCX, SZ_B); 1034 mov_rdispr(code, src_op->base, src_op->disp, RCX, SZ_B);
1073 } 1054 }
1074 z_off = code->cur + 1; 1055 z_off = code->cur + 1;
1075 jmp(code, code->cur + 2); 1056 jmp(code, code->cur + 2);
1076 *nz_off = code->cur - (nz_off + 1); 1057 *nz_off = code->cur - (nz_off + 1);
1077 //add 2 cycles for every bit shifted 1058 //add 2 cycles for every bit shifted
1078 add_rr(code, RCX, CYCLES, SZ_D); 1059 mov_ir(code, 2 * opts->gen.clock_divider, opts->gen.scratch2, SZ_D);
1079 add_rr(code, RCX, CYCLES, SZ_D); 1060 imul_rr(code, RCX, opts->gen.scratch2, SZ_D);
1061 add_rr(code, opts->gen.scratch2, opts->gen.cycles, SZ_D);
1080 if (inst->op == M68K_ASL) { 1062 if (inst->op == M68K_ASL) {
1081 //ASL has Overflow flag behavior that depends on all of the bits shifted through the MSB 1063 //ASL has Overflow flag behavior that depends on all of the bits shifted through the MSB
1082 //Easiest way to deal with this is to shift one bit at a time 1064 //Easiest way to deal with this is to shift one bit at a time
1083 set_flag(opts, 0, FLAG_V); 1065 set_flag(opts, 0, FLAG_V);
1084 check_alloc_code(code, 5*MAX_INST_LEN); 1066 check_alloc_code(code, 5*MAX_INST_LEN);
1239 } 1221 }
1240 } 1222 }
1241 1223
1242 void op_rrdisp(code_info *code, m68kinst *inst, uint8_t src, uint8_t dst, int32_t disp, uint8_t size) 1224 void op_rrdisp(code_info *code, m68kinst *inst, uint8_t src, uint8_t dst, int32_t disp, uint8_t size)
1243 { 1225 {
1244 switch (inst->op) 1226 switch(inst->op)
1245 { 1227 {
1246 case M68K_ADD: add_rrdisp(code, src, dst, disp, size); break; 1228 case M68K_ADD: add_rrdisp(code, src, dst, disp, size); break;
1247 case M68K_ADDX: adc_rrdisp(code, src, dst, disp, size); break; 1229 case M68K_ADDX: adc_rrdisp(code, src, dst, disp, size); break;
1248 case M68K_AND: and_rrdisp(code, src, dst, disp, size); break; 1230 case M68K_AND: and_rrdisp(code, src, dst, disp, size); break;
1249 case M68K_BTST: bt_rrdisp(code, src, dst, disp, size); break; 1231 case M68K_BTST: bt_rrdisp(code, src, dst, disp, size); break;
1379 code_info *code = &opts->gen.code; 1361 code_info *code = &opts->gen.code;
1380 if (inst->src.params.immed == 0x7100) { 1362 if (inst->src.params.immed == 0x7100) {
1381 retn(code); 1363 retn(code);
1382 return; 1364 return;
1383 } 1365 }
1384 mov_ir(code, inst->address, opts->gen.scratch1, SZ_D); 1366 mov_ir(code, (int64_t)stderr, RDI, SZ_PTR);
1385 call(code, (code_ptr)m68k_invalid); 1367 mov_ir(code, (int64_t)"Invalid instruction at %X\n", RSI, SZ_PTR);
1368 mov_ir(code, inst->address, RDX, SZ_D);
1369 call_args_abi(code, (code_ptr)fprintf, 3, RDI, RSI, RDX);
1370 mov_ir(code, 1, RDI, SZ_D);
1371 call_args(code, (code_ptr)exit, 1, RDI);
1386 } 1372 }
1387 1373
1388 void translate_m68k_abcd_sbcd(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op) 1374 void translate_m68k_abcd_sbcd(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op)
1389 { 1375 {
1390 code_info *code = &opts->gen.code; 1376 code_info *code = &opts->gen.code;
1400 mov_rr(code, dst_op->base, opts->gen.scratch1, SZ_B); 1386 mov_rr(code, dst_op->base, opts->gen.scratch1, SZ_B);
1401 } else { 1387 } else {
1402 mov_rdispr(code, dst_op->base, dst_op->disp, opts->gen.scratch1, SZ_B); 1388 mov_rdispr(code, dst_op->base, dst_op->disp, opts->gen.scratch1, SZ_B);
1403 } 1389 }
1404 } 1390 }
1391 uint8_t other_reg;
1392 //WARNING: This may need adjustment if register assignments change
1393 if (opts->gen.scratch2 > RBX) {
1394 other_reg = RAX;
1395 xchg_rr(code, opts->gen.scratch2, RAX, SZ_D);
1396 } else {
1397 other_reg = opts->gen.scratch2;
1398 }
1399 mov_rr(code, opts->gen.scratch1, opts->gen.scratch1 + (AH-RAX), SZ_B);
1400 mov_rr(code, other_reg, other_reg + (AH-RAX), SZ_B);
1401 and_ir(code, 0xF0, opts->gen.scratch1, SZ_B);
1402 and_ir(code, 0xF0, other_reg, SZ_B);
1403 and_ir(code, 0xF, opts->gen.scratch1 + (AH-RAX), SZ_B);
1404 and_ir(code, 0xF, other_reg + (AH-RAX), SZ_B);
1405 //do op on low nibble
1405 flag_to_carry(opts, FLAG_X); 1406 flag_to_carry(opts, FLAG_X);
1406 jcc(code, CC_NC, code->cur + 5);
1407 if (inst->op == M68K_ABCD) { 1407 if (inst->op == M68K_ABCD) {
1408 add_ir(code, 1, opts->gen.scratch1, SZ_B); 1408 adc_rr(code, other_reg + (AH-RAX), opts->gen.scratch1 + (AH-RAX), SZ_B);
1409 } else { 1409 } else {
1410 sub_ir(code, 1, opts->gen.scratch1, SZ_B); 1410 sbb_rr(code, other_reg + (AH-RAX), opts->gen.scratch1 + (AH-RAX), SZ_B);
1411 } 1411 }
1412 call(code, (code_ptr) (inst->op == M68K_ABCD ? bcd_add : bcd_sub)); 1412 cmp_ir(code, 0xA, opts->gen.scratch1 + (AH-RAX), SZ_B);
1413 reg_to_flag(opts, CH, FLAG_C); 1413 code_ptr no_adjust = code->cur+1;
1414 reg_to_flag(opts, CH, FLAG_X); 1414 //add correction factor if necessary
1415 jcc(code, CC_B, no_adjust);
1416 if (inst->op == M68K_ABCD) {
1417 add_ir(code, 6, opts->gen.scratch1 + (AH-RAX), SZ_B);
1418 } else {
1419 sub_ir(code, 6, opts->gen.scratch1 + (AH-RAX), SZ_B);
1420 }
1421 *no_adjust = code->cur - (no_adjust+1);
1422 //add low nibble result to one of the high nibble operands
1423 add_rr(code, opts->gen.scratch1 + (AH-RAX), opts->gen.scratch1, SZ_B);
1424 if (inst->op == M68K_ABCD) {
1425 add_rr(code, other_reg, opts->gen.scratch1, SZ_B);
1426 } else {
1427 sub_rr(code, other_reg, opts->gen.scratch1, SZ_B);
1428 }
1429 if (opts->gen.scratch2 > RBX) {
1430 mov_rr(code, opts->gen.scratch2, RAX, SZ_D);
1431 }
1432 set_flag(opts, 0, FLAG_C);
1433 set_flag(opts, 0, FLAG_V);
1434 code_ptr def_adjust = code->cur+1;
1435 jcc(code, CC_C, def_adjust);
1436 cmp_ir(code, 0xA0, opts->gen.scratch1, SZ_B);
1437 no_adjust = code->cur+1;
1438 jcc(code, CC_B, no_adjust);
1439 *def_adjust = code->cur - (def_adjust + 1);
1440 set_flag(opts, 1, FLAG_C);
1441 if (inst->op == M68K_ABCD) {
1442 add_ir(code, 0x60, opts->gen.scratch1, SZ_B);
1443 } else {
1444 sub_ir(code, 0x60, opts->gen.scratch1, SZ_B);
1445 }
1446 //V flag is set based on the result of the addition of the
1447 //result and the correction factor
1448 set_flag_cond(opts, CC_O, FLAG_V);
1449 *no_adjust = code->cur - (no_adjust+1);
1450 flag_to_flag(opts, FLAG_C, FLAG_X);
1451
1415 cmp_ir(code, 0, opts->gen.scratch1, SZ_B); 1452 cmp_ir(code, 0, opts->gen.scratch1, SZ_B);
1416 jcc(code, CC_Z, code->cur + 4); 1453 set_flag_cond(opts, CC_S, FLAG_N);
1454 code_ptr no_setz = code->cur+1;
1455 jcc(code, CC_Z, no_setz);
1417 set_flag(opts, 0, FLAG_Z); 1456 set_flag(opts, 0, FLAG_Z);
1457 *no_setz = code->cur - (no_setz + 1);
1418 if (dst_op->base != opts->gen.scratch1) { 1458 if (dst_op->base != opts->gen.scratch1) {
1419 if (dst_op->mode == MODE_REG_DIRECT) { 1459 if (dst_op->mode == MODE_REG_DIRECT) {
1420 mov_rr(code, opts->gen.scratch1, dst_op->base, SZ_B); 1460 mov_rr(code, opts->gen.scratch1, dst_op->base, SZ_B);
1421 } else { 1461 } else {
1422 mov_rrdisp(code, opts->gen.scratch1, dst_op->base, dst_op->disp, SZ_B); 1462 mov_rrdisp(code, opts->gen.scratch1, dst_op->base, dst_op->disp, SZ_B);
1442 1482
1443 void translate_m68k_bit(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op) 1483 void translate_m68k_bit(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op)
1444 { 1484 {
1445 code_info *code = &opts->gen.code; 1485 code_info *code = &opts->gen.code;
1446 cycles(&opts->gen, inst->extra.size == OPSIZE_BYTE ? 4 : ( 1486 cycles(&opts->gen, inst->extra.size == OPSIZE_BYTE ? 4 : (
1447 inst->op == M68K_BTST ? 6 : (inst->op == M68K_BCLR ? 10 : 8)) 1487 inst->op == M68K_BTST ? 6 : (inst->op == M68K_BCLR ? 10 : 8))
1448 ); 1488 );
1449 if (src_op->mode == MODE_IMMED) { 1489 if (src_op->mode == MODE_IMMED) {
1450 if (inst->extra.size == OPSIZE_BYTE) { 1490 if (inst->extra.size == OPSIZE_BYTE) {
1451 src_op->disp &= 0x7; 1491 src_op->disp &= 0x7;
1452 } 1492 }
1470 mov_rr(code, src_op->base, opts->gen.scratch1, SZ_B); 1510 mov_rr(code, src_op->base, opts->gen.scratch1, SZ_B);
1471 } else { 1511 } else {
1472 mov_rdispr(code, src_op->base, src_op->disp, opts->gen.scratch1, SZ_B); 1512 mov_rdispr(code, src_op->base, src_op->disp, opts->gen.scratch1, SZ_B);
1473 } 1513 }
1474 src_op->base = opts->gen.scratch1; 1514 src_op->base = opts->gen.scratch1;
1475 } 1515 }
1476 } 1516 }
1477 uint8_t size = inst->extra.size; 1517 uint8_t size = inst->extra.size;
1478 if (dst_op->mode == MODE_REG_DISPLACE8) { 1518 if (dst_op->mode == MODE_REG_DISPLACE8) {
1479 if (src_op->base != opts->gen.scratch1 && src_op->base != opts->gen.scratch2) { 1519 if (src_op->base != opts->gen.scratch1 && src_op->base != opts->gen.scratch2) {
1480 if (src_op->mode == MODE_REG_DIRECT) { 1520 if (src_op->mode == MODE_REG_DIRECT) {
1481 mov_rr(code, src_op->base, opts->gen.scratch1, SZ_D); 1521 mov_rr(code, src_op->base, opts->gen.scratch1, SZ_D);
1482 } else { 1522 } else {
1512 m68k_save_result(inst, opts); 1552 m68k_save_result(inst, opts);
1513 } 1553 }
1514 } 1554 }
1515 1555
1516 void translate_m68k_chk(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op) 1556 void translate_m68k_chk(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op)
1517 { 1557 {
1518 code_info *code = &opts->gen.code; 1558 code_info *code = &opts->gen.code;
1519 cycles(&opts->gen, 6); 1559 cycles(&opts->gen, 6);
1520 if (dst_op->mode == MODE_REG_DIRECT) { 1560 if (dst_op->mode == MODE_REG_DIRECT) {
1521 cmp_ir(code, 0, dst_op->base, inst->extra.size); 1561 cmp_ir(code, 0, dst_op->base, inst->extra.size);
1522 } else { 1562 } else {
1600 movsx_rdispr(code, src_op->base, src_op->disp, opts->gen.scratch2, SZ_W, SZ_D); 1640 movsx_rdispr(code, src_op->base, src_op->disp, opts->gen.scratch2, SZ_W, SZ_D);
1601 } else { 1641 } else {
1602 movzx_rdispr(code, src_op->base, src_op->disp, opts->gen.scratch2, SZ_W, SZ_D); 1642 movzx_rdispr(code, src_op->base, src_op->disp, opts->gen.scratch2, SZ_W, SZ_D);
1603 } 1643 }
1604 } 1644 }
1645 uint32_t isize = 2;
1646 switch(inst->src.addr_mode)
1647 {
1648 case MODE_AREG_DISPLACE:
1649 case MODE_AREG_INDEX_DISP8:
1650 case MODE_ABSOLUTE_SHORT:
1651 case MODE_PC_INDEX_DISP8:
1652 case MODE_IMMEDIATE:
1653 isize = 4;
1654 break;
1655 case MODE_ABSOLUTE:
1656 isize = 6;
1657 break;
1658 }
1605 cmp_ir(code, 0, opts->gen.scratch2, SZ_D); 1659 cmp_ir(code, 0, opts->gen.scratch2, SZ_D);
1606 check_alloc_code(code, 6*MAX_INST_LEN); 1660 check_alloc_code(code, 6*MAX_INST_LEN);
1607 code_ptr not_zero = code->cur + 1; 1661 code_ptr not_zero = code->cur + 1;
1608 jcc(code, CC_NZ, code->cur + 2); 1662 jcc(code, CC_NZ, code->cur + 2);
1609 pop_r(code, RAX); 1663 pop_r(code, RAX);
1610 pop_r(code, RDX); 1664 pop_r(code, RDX);
1611 mov_ir(code, VECTOR_INT_DIV_ZERO, opts->gen.scratch2, SZ_D); 1665 mov_ir(code, VECTOR_INT_DIV_ZERO, opts->gen.scratch2, SZ_D);
1612 mov_ir(code, inst->address+2, opts->gen.scratch1, SZ_D); 1666 mov_ir(code, inst->address+isize, opts->gen.scratch1, SZ_D);
1613 jmp(code, opts->trap); 1667 jmp(code, opts->trap);
1614 *not_zero = code->cur - (not_zero+1); 1668 *not_zero = code->cur - (not_zero+1);
1615 if (inst->op == M68K_DIVS) { 1669 if (inst->op == M68K_DIVS) {
1616 cdq(code); 1670 cdq(code);
1617 } else { 1671 } else {
1644 shl_irdisp(code, 16, dst_op->base, dst_op->disp, SZ_D); 1698 shl_irdisp(code, 16, dst_op->base, dst_op->disp, SZ_D);
1645 mov_rrdisp(code, RAX, dst_op->base, dst_op->disp, SZ_W); 1699 mov_rrdisp(code, RAX, dst_op->base, dst_op->disp, SZ_W);
1646 } 1700 }
1647 cmp_ir(code, 0, RAX, SZ_W); 1701 cmp_ir(code, 0, RAX, SZ_W);
1648 pop_r(code, RAX); 1702 pop_r(code, RAX);
1649 pop_r(code, RDX); 1703 if (dst_op->base == RDX) {
1650 update_flags(opts, V0|Z|N); 1704 update_flags(opts, V0|Z|N);
1705 add_ir(code, sizeof(void *), RSP, SZ_D);
1706 } else {
1707 pop_r(code, RDX);
1708 update_flags(opts, V0|Z|N);
1709 }
1651 code_ptr end_off = code->cur + 1; 1710 code_ptr end_off = code->cur + 1;
1652 jmp(code, code->cur + 2); 1711 jmp(code, code->cur + 2);
1653 *norm_off = code->cur - (norm_off + 1); 1712 *norm_off = code->cur - (norm_off + 1);
1654 if (inst->op == M68K_DIVS) { 1713 if (inst->op == M68K_DIVS) {
1655 *skip_sec_check = code->cur - (skip_sec_check+1); 1714 *skip_sec_check = code->cur - (skip_sec_check+1);
1806 mov_rdispr(code, src_op->base, src_op->disp, opts->gen.scratch1, SZ_B); 1865 mov_rdispr(code, src_op->base, src_op->disp, opts->gen.scratch1, SZ_B);
1807 } 1866 }
1808 and_ir(code, 63, opts->gen.scratch1, SZ_D); 1867 and_ir(code, 63, opts->gen.scratch1, SZ_D);
1809 code_ptr zero_off = code->cur + 1; 1868 code_ptr zero_off = code->cur + 1;
1810 jcc(code, CC_Z, code->cur + 2); 1869 jcc(code, CC_Z, code->cur + 2);
1811 add_rr(code, opts->gen.scratch1, CYCLES, SZ_D); 1870 //add 2 cycles for every bit shifted
1812 add_rr(code, opts->gen.scratch1, CYCLES, SZ_D); 1871 mov_ir(code, 2 * opts->gen.clock_divider, opts->gen.scratch2, SZ_D);
1872 imul_rr(code, RCX, opts->gen.scratch2, SZ_D);
1873 add_rr(code, opts->gen.scratch2, opts->gen.cycles, SZ_D);
1813 cmp_ir(code, 32, opts->gen.scratch1, SZ_B); 1874 cmp_ir(code, 32, opts->gen.scratch1, SZ_B);
1814 code_ptr norm_off = code->cur + 1; 1875 code_ptr norm_off = code->cur + 1;
1815 jcc(code, CC_L, code->cur + 2); 1876 jcc(code, CC_L, code->cur + 2);
1816 if (inst->op == M68K_ROXR || inst->op == M68K_ROXL) { 1877 if (inst->op == M68K_ROXR || inst->op == M68K_ROXL) {
1817 flag_to_carry(opts, FLAG_X); 1878 flag_to_carry(opts, FLAG_X);
1863 1924
1864 void translate_m68k_illegal(m68k_options *opts, m68kinst *inst) 1925 void translate_m68k_illegal(m68k_options *opts, m68kinst *inst)
1865 { 1926 {
1866 code_info *code = &opts->gen.code; 1927 code_info *code = &opts->gen.code;
1867 call(code, opts->gen.save_context); 1928 call(code, opts->gen.save_context);
1868 #ifdef X86_64 1929 call_args(code, (code_ptr)print_regs_exit, 1, opts->gen.context_reg);
1869 mov_rr(code, opts->gen.context_reg, RDI, SZ_PTR);
1870 #else
1871 push_r(code, opts->gen.context_reg);
1872 #endif
1873 call(code, (code_ptr)print_regs_exit);
1874 } 1930 }
1875 1931
1876 #define BIT_SUPERVISOR 5 1932 #define BIT_SUPERVISOR 5
1877 1933
1878 void translate_m68k_andi_ori_ccr_sr(m68k_options *opts, m68kinst *inst) 1934 void translate_m68k_andi_ori_ccr_sr(m68k_options *opts, m68kinst *inst)
1894 if (inst->op == M68K_ANDI_SR) { 1950 if (inst->op == M68K_ANDI_SR) {
1895 and_irdisp(code, inst->src.params.immed >> 8, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B); 1951 and_irdisp(code, inst->src.params.immed >> 8, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
1896 } else { 1952 } else {
1897 or_irdisp(code, inst->src.params.immed >> 8, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B); 1953 or_irdisp(code, inst->src.params.immed >> 8, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
1898 } 1954 }
1899 if ((base_flag == X0) ^ (((inst->src.params.immed >> 8) & (1 << BIT_SUPERVISOR)) > 0)) { 1955 if (inst->op == M68K_ANDI_SR && !(inst->src.params.immed & (1 << (BIT_SUPERVISOR + 8)))) {
1900 //leave supervisor mode 1956 //leave supervisor mode
1901 swap_ssp_usp(opts); 1957 swap_ssp_usp(opts);
1902 } 1958 }
1903 if ((inst->op == M68K_ANDI_SR && (inst->src.params.immed & 0x700) != 0x700) 1959 if ((inst->op == M68K_ANDI_SR && (inst->src.params.immed & 0x700) != 0x700)
1904 || (inst->op == M68K_ORI_SR && inst->src.params.immed & 0x700)) { 1960 || (inst->op == M68K_ORI_SR && inst->src.params.immed & 0x700)) {
1966 mov_rr(code, src_op->base, opts->gen.scratch1, SZ_W); 2022 mov_rr(code, src_op->base, opts->gen.scratch1, SZ_W);
1967 } else { 2023 } else {
1968 mov_rdispr(code, src_op->base, src_op->disp, opts->gen.scratch1, SZ_W); 2024 mov_rdispr(code, src_op->base, src_op->disp, opts->gen.scratch1, SZ_W);
1969 } 2025 }
1970 } 2026 }
1971 call(code, inst->op == M68K_MOVE_SR ? opts->set_sr : opts->set_ccr); 2027 if (inst->op == M68K_MOVE_SR) {
2028 call(code, opts->set_sr);
2029 call(code, opts->do_sync);
2030 } else {
2031 call(code, opts->set_ccr);
2032 }
1972 cycles(&opts->gen, 12); 2033 cycles(&opts->gen, 12);
1973 } 2034 }
1974 } 2035 }
1975 2036
1976 void translate_m68k_stop(m68k_options *opts, m68kinst *inst) 2037 void translate_m68k_stop(m68k_options *opts, m68kinst *inst)
2014 mov_rrdisp(code, opts->gen.scratch1, dst_op->base, dst_op->disp, SZ_W); 2075 mov_rrdisp(code, opts->gen.scratch1, dst_op->base, dst_op->disp, SZ_W);
2015 } 2076 }
2016 m68k_save_result(inst, opts); 2077 m68k_save_result(inst, opts);
2017 } 2078 }
2018 2079
2019 void translate_m68k_reset(m68k_options *opts, m68kinst *inst)
2020 {
2021 code_info *code = &opts->gen.code;
2022 call(code, opts->gen.save_context);
2023 #ifdef X86_64
2024 mov_rr(code, opts->gen.context_reg, RDI, SZ_PTR);
2025 #else
2026 push_r(code, opts->gen.context_reg);
2027 #endif
2028 call(code, (code_ptr)print_regs_exit);
2029 }
2030
2031 void translate_m68k_rte(m68k_options *opts, m68kinst *inst)
2032 {
2033 code_info *code = &opts->gen.code;
2034 //TODO: Trap if not in system mode
2035 //Read saved SR
2036 areg_to_native(opts, 7, opts->gen.scratch1);
2037 call(code, opts->read_16);
2038 addi_areg(opts, 2, 7);
2039 call(code, opts->set_sr);
2040 //Read saved PC
2041 areg_to_native(opts, 7, opts->gen.scratch1);
2042 call(code, opts->read_32);
2043 addi_areg(opts, 4, 7);
2044 //Check if we've switched to user mode and swap stack pointers if needed
2045 bt_irdisp(code, 5, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
2046 code_ptr end_off = code->cur + 1;
2047 jcc(code, CC_C, code->cur + 2);
2048 swap_ssp_usp(opts);
2049 *end_off = code->cur - (end_off+1);
2050 //Get native address, sync components, recalculate integer points and jump to returned address
2051 call(code, opts->native_addr_and_sync);
2052 jmp_r(code, opts->gen.scratch1);
2053 }
2054
2055 void translate_out_of_bounds(code_info *code) 2080 void translate_out_of_bounds(code_info *code)
2056 { 2081 {
2057 xor_rr(code, RDI, RDI, SZ_D); 2082 xor_rr(code, RDI, RDI, SZ_D);
2058 #ifdef X86_32 2083 call_args(code, (code_ptr)exit, 1, RDI);
2059 push_r(code, RDI); 2084 }
2060 #endif
2061 call(code, (code_ptr)exit);
2062 }
2063
2064 void check_code_prologue(code_info *code)
2065 {
2066 check_alloc_code(code, MAX_INST_LEN*4);
2067 };
2068 2085
2069 void nop_fill_or_jmp_next(code_info *code, code_ptr old_end, code_ptr next_inst) 2086 void nop_fill_or_jmp_next(code_info *code, code_ptr old_end, code_ptr next_inst)
2070 { 2087 {
2071 if (next_inst == old_end && next_inst - code->cur < 2) { 2088 if (next_inst == old_end && next_inst - code->cur < 2) {
2072 while (code->cur < old_end) { 2089 while (code->cur < old_end) {
2081 { 2098 {
2082 uint32_t inst_start = get_instruction_start(context->native_code_map, address | 0xFF0000); 2099 uint32_t inst_start = get_instruction_start(context->native_code_map, address | 0xFF0000);
2083 if (inst_start) { 2100 if (inst_start) {
2084 m68k_options * options = context->options; 2101 m68k_options * options = context->options;
2085 code_info *code = &options->gen.code; 2102 code_info *code = &options->gen.code;
2086 code_ptr dst = get_native_address(context->native_code_map, inst_start); 2103 code_ptr dst = get_native_address(context->options, inst_start);
2087 code_info orig; 2104 code_info orig;
2088 orig.cur = dst; 2105 orig.cur = dst;
2089 orig.last = dst + 128; 2106 orig.last = dst + 128;
2090 mov_ir(&orig, inst_start, options->gen.scratch2, SZ_D); 2107 mov_ir(&orig, inst_start, options->gen.scratch2, SZ_D);
2091 2108
2092 if (!options->retrans_stub) { 2109 if (!options->retrans_stub) {
2093 options->retrans_stub = code->cur; 2110 options->retrans_stub = code->cur;
2094 call(code, options->gen.save_context); 2111 call(code, options->gen.save_context);
2095 push_r(code, options->gen.context_reg); 2112 push_r(code, options->gen.context_reg);
2096 #ifdef X86_32 2113 call_args(code,(code_ptr)m68k_retranslate_inst, 2, options->gen.scratch2, options->gen.context_reg);
2097 push_r(code, options->gen.context_reg);
2098 push_r(code, options->gen.scratch2);
2099 #endif
2100 call(code, (code_ptr)m68k_retranslate_inst);
2101 #ifdef X86_32
2102 add_ir(code, 8, RSP, SZ_D);
2103 #endif
2104 pop_r(code, options->gen.context_reg); 2114 pop_r(code, options->gen.context_reg);
2105 mov_rr(code, RAX, options->gen.scratch1, SZ_PTR); 2115 mov_rr(code, RAX, options->gen.scratch1, SZ_PTR);
2106 call(code, options->gen.load_context); 2116 call(code, options->gen.load_context);
2107 jmp_r(code, options->gen.scratch1); 2117 jmp_r(code, options->gen.scratch1);
2108 } 2118 }
2120 native.last = native.cur + 128; 2130 native.last = native.cur + 128;
2121 code_ptr start_native = native.cur; 2131 code_ptr start_native = native.cur;
2122 mov_ir(&native, address, opts->gen.scratch1, SZ_D); 2132 mov_ir(&native, address, opts->gen.scratch1, SZ_D);
2123 if (!bp_stub) { 2133 if (!bp_stub) {
2124 code_info *code = &opts->gen.code; 2134 code_info *code = &opts->gen.code;
2125 check_alloc_code(code, 5); 2135 check_code_prologue(code);
2126 bp_stub = code->cur; 2136 bp_stub = code->cur;
2127 call(&native, bp_stub); 2137 call(&native, bp_stub);
2128 2138
2129 //Calculate length of prologue 2139 //Calculate length of prologue
2130 check_cycles_int(&opts->gen, address); 2140 check_cycles_int(&opts->gen, address);
2132 code->cur = bp_stub; 2142 code->cur = bp_stub;
2133 2143
2134 //Save context and call breakpoint handler 2144 //Save context and call breakpoint handler
2135 call(code, opts->gen.save_context); 2145 call(code, opts->gen.save_context);
2136 push_r(code, opts->gen.scratch1); 2146 push_r(code, opts->gen.scratch1);
2137 #ifdef X86_64 2147 call_args_abi(code, bp_handler, 2, opts->gen.context_reg, opts->gen.scratch1);
2138 mov_rr(code, opts->gen.context_reg, RDI, SZ_PTR);
2139 mov_rr(code, opts->gen.scratch1, RSI, SZ_D);
2140 #else
2141 push_r(code, opts->gen.scratch1);
2142 push_r(code, opts->gen.context_reg);
2143 #endif
2144 call(code, bp_handler);
2145 #ifdef X86_32
2146 add_ir(code, 8, RSP, SZ_D);
2147 #endif
2148 mov_rr(code, RAX, opts->gen.context_reg, SZ_PTR); 2148 mov_rr(code, RAX, opts->gen.context_reg, SZ_PTR);
2149 //Restore context 2149 //Restore context
2150 call(code, opts->gen.load_context); 2150 call(code, opts->gen.load_context);
2151 pop_r(code, opts->gen.scratch1); 2151 pop_r(code, opts->gen.scratch1);
2152 //do prologue stuff 2152 //do prologue stuff
2162 } else { 2162 } else {
2163 call(&native, bp_stub); 2163 call(&native, bp_stub);
2164 } 2164 }
2165 } 2165 }
2166 2166
2167 void init_m68k_opts(m68k_options * opts, memmap_chunk * memmap, uint32_t num_chunks) 2167 void init_m68k_opts(m68k_options * opts, memmap_chunk * memmap, uint32_t num_chunks, uint32_t clock_divider)
2168 { 2168 {
2169 memset(opts, 0, sizeof(*opts)); 2169 memset(opts, 0, sizeof(*opts));
2170 opts->gen.memmap = memmap;
2171 opts->gen.memmap_chunks = num_chunks;
2170 opts->gen.address_size = SZ_D; 2172 opts->gen.address_size = SZ_D;
2171 opts->gen.address_mask = 0xFFFFFF; 2173 opts->gen.address_mask = 0xFFFFFF;
2174 opts->gen.byte_swap = 1;
2172 opts->gen.max_address = 0x1000000; 2175 opts->gen.max_address = 0x1000000;
2173 opts->gen.bus_cycles = BUS; 2176 opts->gen.bus_cycles = BUS;
2177 opts->gen.clock_divider = clock_divider;
2174 opts->gen.mem_ptr_off = offsetof(m68k_context, mem_pointers); 2178 opts->gen.mem_ptr_off = offsetof(m68k_context, mem_pointers);
2175 opts->gen.ram_flags_off = offsetof(m68k_context, ram_code_flags); 2179 opts->gen.ram_flags_off = offsetof(m68k_context, ram_code_flags);
2180 opts->gen.ram_flags_shift = 11;
2176 for (int i = 0; i < 8; i++) 2181 for (int i = 0; i < 8; i++)
2177 { 2182 {
2178 opts->dregs[i] = opts->aregs[i] = -1; 2183 opts->dregs[i] = opts->aregs[i] = -1;
2179 } 2184 }
2180 #ifdef X86_64 2185 #ifdef X86_64
2211 2216
2212 2217
2213 opts->gen.native_code_map = malloc(sizeof(native_map_slot) * NATIVE_MAP_CHUNKS); 2218 opts->gen.native_code_map = malloc(sizeof(native_map_slot) * NATIVE_MAP_CHUNKS);
2214 memset(opts->gen.native_code_map, 0, sizeof(native_map_slot) * NATIVE_MAP_CHUNKS); 2219 memset(opts->gen.native_code_map, 0, sizeof(native_map_slot) * NATIVE_MAP_CHUNKS);
2215 opts->gen.deferred = NULL; 2220 opts->gen.deferred = NULL;
2216 opts->gen.ram_inst_sizes = malloc(sizeof(uint8_t *) * 64); 2221
2217 memset(opts->gen.ram_inst_sizes, 0, sizeof(uint8_t *) * 64); 2222 uint32_t inst_size_size = sizeof(uint8_t *) * ram_size(&opts->gen) / 1024;
2223 opts->gen.ram_inst_sizes = malloc(inst_size_size);
2224 memset(opts->gen.ram_inst_sizes, 0, inst_size_size);
2218 2225
2219 code_info *code = &opts->gen.code; 2226 code_info *code = &opts->gen.code;
2220 init_code_info(code); 2227 init_code_info(code);
2221 2228
2222 opts->gen.save_context = code->cur; 2229 opts->gen.save_context = code->cur;
2236 mov_rrdisp(code, opts->gen.cycles, opts->gen.context_reg, offsetof(m68k_context, current_cycle), SZ_D); 2243 mov_rrdisp(code, opts->gen.cycles, opts->gen.context_reg, offsetof(m68k_context, current_cycle), SZ_D);
2237 retn(code); 2244 retn(code);
2238 2245
2239 opts->gen.load_context = code->cur; 2246 opts->gen.load_context = code->cur;
2240 for (int i = 0; i < 5; i++) 2247 for (int i = 0; i < 5; i++)
2248 {
2241 if (opts->flag_regs[i] >= 0) { 2249 if (opts->flag_regs[i] >= 0) {
2242 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, flags) + i, opts->flag_regs[i], SZ_B); 2250 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, flags) + i, opts->flag_regs[i], SZ_B);
2243 } 2251 }
2252 }
2244 for (int i = 0; i < 8; i++) 2253 for (int i = 0; i < 8; i++)
2245 { 2254 {
2246 if (opts->dregs[i] >= 0) { 2255 if (opts->dregs[i] >= 0) {
2247 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t) * i, opts->dregs[i], SZ_D); 2256 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t) * i, opts->dregs[i], SZ_D);
2248 } 2257 }
2249 if (opts->aregs[i] >= 0) { 2258 if (opts->aregs[i] >= 0) {
2250 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * i, opts->aregs[i], SZ_D); 2259 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * i, opts->aregs[i], SZ_D);
2251 } 2260 }
2252 } 2261 }
2253 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, current_cycle), CYCLES, SZ_D); 2262 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, current_cycle), opts->gen.cycles, SZ_D);
2254 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, target_cycle), LIMIT, SZ_D); 2263 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, target_cycle), opts->gen.limit, SZ_D);
2255 retn(code); 2264 retn(code);
2256 2265
2257 opts->start_context = (start_fun)code->cur; 2266 opts->start_context = (start_fun)code->cur;
2267 save_callee_save_regs(code);
2258 #ifdef X86_64 2268 #ifdef X86_64
2259 if (opts->gen.scratch2 != RDI) { 2269 if (opts->gen.scratch2 != RDI) {
2260 mov_rr(code, RDI, opts->gen.scratch2, SZ_PTR); 2270 mov_rr(code, RDI, opts->gen.scratch2, SZ_PTR);
2261 } 2271 }
2262 //save callee save registers
2263 push_r(code, RBP);
2264 push_r(code, R12);
2265 push_r(code, R13);
2266 push_r(code, R14);
2267 push_r(code, R15);
2268 #else 2272 #else
2269 //save callee save registers
2270 push_r(code, RBP);
2271 push_r(code, RBX);
2272 push_r(code, RSI);
2273 push_r(code, RDI);
2274
2275 mov_rdispr(code, RSP, 20, opts->gen.scratch2, SZ_D); 2273 mov_rdispr(code, RSP, 20, opts->gen.scratch2, SZ_D);
2276 mov_rdispr(code, RSP, 24, opts->gen.context_reg, SZ_D); 2274 mov_rdispr(code, RSP, 24, opts->gen.context_reg, SZ_D);
2277 #endif 2275 #endif
2278 call(code, opts->gen.load_context); 2276 call(code, opts->gen.load_context);
2279 call_r(code, opts->gen.scratch2); 2277 call_r(code, opts->gen.scratch2);
2280 call(code, opts->gen.save_context); 2278 call(code, opts->gen.save_context);
2281 #ifdef X86_64 2279 restore_callee_save_regs(code);
2282 //restore callee save registers
2283 pop_r(code, R15);
2284 pop_r(code, R14);
2285 pop_r(code, R13);
2286 pop_r(code, R12);
2287 pop_r(code, RBP);
2288 #else
2289 pop_r(code, RDI);
2290 pop_r(code, RSI);
2291 pop_r(code, RBX);
2292 pop_r(code, RBP);
2293 #endif
2294 retn(code); 2280 retn(code);
2295 2281
2296 opts->native_addr = code->cur; 2282 opts->native_addr = code->cur;
2297 call(code, opts->gen.save_context); 2283 call(code, opts->gen.save_context);
2298 push_r(code, opts->gen.context_reg); 2284 push_r(code, opts->gen.context_reg);
2299 #ifdef X86_64 2285 call_args(code, (code_ptr)get_native_address_trans, 2, opts->gen.context_reg, opts->gen.scratch1);
2300 mov_rr(code, opts->gen.context_reg, RDI, SZ_PTR); //move context to 1st arg reg
2301 mov_rr(code, opts->gen.scratch1, RSI, SZ_D); //move address to 2nd arg reg
2302 #else
2303 push_r(code, opts->gen.scratch1);
2304 push_r(code, opts->gen.context_reg);
2305 #endif
2306 call(code, (code_ptr)get_native_address_trans);
2307 #ifdef X86_32
2308 add_ir(code, 8, RSP, SZ_D);
2309 #endif
2310 mov_rr(code, RAX, opts->gen.scratch1, SZ_PTR); //move result to scratch reg 2286 mov_rr(code, RAX, opts->gen.scratch1, SZ_PTR); //move result to scratch reg
2311 pop_r(code, opts->gen.context_reg); 2287 pop_r(code, opts->gen.context_reg);
2312 call(code, opts->gen.load_context); 2288 call(code, opts->gen.load_context);
2313 retn(code); 2289 retn(code);
2314 2290
2315 opts->native_addr_and_sync = code->cur; 2291 opts->native_addr_and_sync = code->cur;
2316 call(code, opts->gen.save_context); 2292 call(code, opts->gen.save_context);
2317 push_r(code, opts->gen.scratch1); 2293 push_r(code, opts->gen.scratch1);
2318 #ifdef X86_64 2294
2319 mov_rr(code, opts->gen.context_reg, RDI, SZ_PTR); 2295 xor_rr(code, opts->gen.scratch1, opts->gen.scratch1, SZ_D);
2320 xor_rr(code, RSI, RSI, SZ_D); 2296 call_args_abi(code, (code_ptr)sync_components, 2, opts->gen.context_reg, opts->gen.scratch1);
2321 test_ir(code, 8, RSP, SZ_PTR); //check stack alignment
2322 code_ptr do_adjust_rsp = code->cur + 1;
2323 jcc(code, CC_NZ, code->cur + 2);
2324 call(code, (code_ptr)sync_components);
2325 code_ptr no_adjust_rsp = code->cur + 1;
2326 jmp(code, code->cur + 2);
2327 *do_adjust_rsp = code->cur - (do_adjust_rsp+1);
2328 sub_ir(code, 8, RSP, SZ_PTR);
2329 call(code, (code_ptr)sync_components);
2330 add_ir(code, 8, RSP, SZ_PTR);
2331 *no_adjust_rsp = code->cur - (no_adjust_rsp+1);
2332 pop_r(code, RSI);
2333 push_r(code, RAX);
2334 mov_rr(code, RAX, RDI, SZ_PTR);
2335 call(code, (code_ptr)get_native_address_trans);
2336 #else
2337 //TODO: Add support for pushing a constant in gen_x86
2338 xor_rr(code, RAX, RAX, SZ_D);
2339 push_r(code, RAX);
2340 push_r(code, opts->gen.context_reg);
2341 call(code, (code_ptr)sync_components);
2342 add_ir(code, 8, RSP, SZ_D);
2343 pop_r(code, RSI); //restore saved address from opts->gen.scratch1 2297 pop_r(code, RSI); //restore saved address from opts->gen.scratch1
2344 push_r(code, RAX); //save context pointer for later 2298 push_r(code, RAX); //save context pointer for later
2345 push_r(code, RSI); //2nd arg -- address 2299 call_args(code, (code_ptr)get_native_address_trans, 2, RAX, RSI);
2346 push_r(code, RAX); //1st arg -- context pointer
2347 call(code, (code_ptr)get_native_address_trans);
2348 add_ir(code, 8, RSP, SZ_D);
2349 #endif
2350
2351 mov_rr(code, RAX, opts->gen.scratch1, SZ_PTR); //move result to scratch reg 2300 mov_rr(code, RAX, opts->gen.scratch1, SZ_PTR); //move result to scratch reg
2352 pop_r(code, opts->gen.context_reg); 2301 pop_r(code, opts->gen.context_reg);
2353 call(code, opts->gen.load_context); 2302 call(code, opts->gen.load_context);
2354 retn(code); 2303 retn(code);
2355 2304
2356 opts->gen.handle_cycle_limit = code->cur; 2305 opts->gen.handle_cycle_limit = code->cur;
2357 cmp_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, sync_cycle), CYCLES, SZ_D); 2306 cmp_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, sync_cycle), opts->gen.cycles, SZ_D);
2358 code_ptr skip_sync = code->cur + 1; 2307 code_ptr skip_sync = code->cur + 1;
2359 jcc(code, CC_C, code->cur + 2); 2308 jcc(code, CC_C, code->cur + 2);
2360 opts->do_sync = code->cur; 2309 opts->do_sync = code->cur;
2361 push_r(code, opts->gen.scratch1); 2310 push_r(code, opts->gen.scratch1);
2362 push_r(code, opts->gen.scratch2); 2311 push_r(code, opts->gen.scratch2);
2363 call(code, opts->gen.save_context); 2312 call(code, opts->gen.save_context);
2364 #ifdef X86_64 2313 xor_rr(code, opts->gen.scratch1, opts->gen.scratch1, SZ_D);
2365 mov_rr(code, opts->gen.context_reg, RDI, SZ_PTR); 2314 call_args_abi(code, (code_ptr)sync_components, 2, opts->gen.context_reg, opts->gen.scratch1);
2366 xor_rr(code, RSI, RSI, SZ_D);
2367 test_ir(code, 8, RSP, SZ_D);
2368 code_ptr adjust_rsp = code->cur + 1;
2369 jcc(code, CC_NZ, code->cur + 2);
2370 call(code, (code_ptr)sync_components);
2371 code_ptr no_adjust = code->cur + 1;
2372 jmp(code, code->cur + 2);
2373 *adjust_rsp = code->cur - (adjust_rsp + 1);
2374 sub_ir(code, 8, RSP, SZ_PTR);
2375 call(code, (code_ptr)sync_components);
2376 add_ir(code, 8, RSP, SZ_PTR);
2377 *no_adjust = code->cur - (no_adjust+1);
2378 #else
2379 //TODO: Add support for pushing a constant in gen_x86
2380 xor_rr(code, RAX, RAX, SZ_D);
2381 push_r(code, RAX);
2382 push_r(code, opts->gen.context_reg);
2383 call(code, (code_ptr)sync_components);
2384 add_ir(code, 8, RSP, SZ_D);
2385 #endif
2386 mov_rr(code, RAX, opts->gen.context_reg, SZ_PTR); 2315 mov_rr(code, RAX, opts->gen.context_reg, SZ_PTR);
2387 call(code, opts->gen.load_context); 2316 call(code, opts->gen.load_context);
2388 pop_r(code, opts->gen.scratch2); 2317 pop_r(code, opts->gen.scratch2);
2389 pop_r(code, opts->gen.scratch1); 2318 pop_r(code, opts->gen.scratch1);
2390 *skip_sync = code->cur - (skip_sync+1); 2319 *skip_sync = code->cur - (skip_sync+1);
2391 retn(code); 2320 retn(code);
2392 2321
2393 opts->read_16 = gen_mem_fun(&opts->gen, memmap, num_chunks, READ_16); 2322 opts->gen.handle_code_write = (code_ptr)m68k_handle_code_write;
2394 opts->read_8 = gen_mem_fun(&opts->gen, memmap, num_chunks, READ_8); 2323
2395 opts->write_16 = gen_mem_fun(&opts->gen, memmap, num_chunks, WRITE_16); 2324 opts->read_16 = gen_mem_fun(&opts->gen, memmap, num_chunks, READ_16, NULL);
2396 opts->write_8 = gen_mem_fun(&opts->gen, memmap, num_chunks, WRITE_8); 2325 opts->read_8 = gen_mem_fun(&opts->gen, memmap, num_chunks, READ_8, NULL);
2326 opts->write_16 = gen_mem_fun(&opts->gen, memmap, num_chunks, WRITE_16, NULL);
2327 opts->write_8 = gen_mem_fun(&opts->gen, memmap, num_chunks, WRITE_8, NULL);
2397 2328
2398 opts->read_32 = code->cur; 2329 opts->read_32 = code->cur;
2399 push_r(code, opts->gen.scratch1); 2330 push_r(code, opts->gen.scratch1);
2400 call(code, opts->read_16); 2331 call(code, opts->read_16);
2401 mov_rr(code, opts->gen.scratch1, opts->gen.scratch2, SZ_W); 2332 mov_rr(code, opts->gen.scratch1, opts->gen.scratch2, SZ_W);
2488 } 2419 }
2489 } 2420 }
2490 retn(code); 2421 retn(code);
2491 2422
2492 opts->gen.handle_cycle_limit_int = code->cur; 2423 opts->gen.handle_cycle_limit_int = code->cur;
2493 cmp_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, int_cycle), CYCLES, SZ_D); 2424 cmp_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, int_cycle), opts->gen.cycles, SZ_D);
2494 code_ptr do_int = code->cur + 1; 2425 code_ptr do_int = code->cur + 1;
2495 jcc(code, CC_NC, code->cur + 2); 2426 jcc(code, CC_NC, code->cur + 2);
2496 cmp_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, sync_cycle), CYCLES, SZ_D); 2427 cmp_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, sync_cycle), opts->gen.cycles, SZ_D);
2497 skip_sync = code->cur + 1; 2428 skip_sync = code->cur + 1;
2498 jcc(code, CC_C, code->cur + 2); 2429 jcc(code, CC_C, code->cur + 2);
2499 call(code, opts->gen.save_context); 2430 call(code, opts->gen.save_context);
2500 #ifdef X86_64 2431 call_args_abi(code, (code_ptr)sync_components, 2, opts->gen.context_reg, opts->gen.scratch1);
2501 mov_rr(code, opts->gen.context_reg, RDI, SZ_PTR);
2502 mov_rr(code, opts->gen.scratch1, RSI, SZ_D);
2503 test_ir(code, 8, RSP, SZ_D);
2504 adjust_rsp = code->cur + 1;
2505 jcc(code, CC_NZ, code->cur + 2);
2506 call(code, (code_ptr)sync_components);
2507 no_adjust = code->cur + 1;
2508 jmp(code, code->cur + 2);
2509 *adjust_rsp = code->cur - (adjust_rsp + 1);
2510 sub_ir(code, 8, RSP, SZ_PTR);
2511 call(code, (code_ptr)sync_components);
2512 add_ir(code, 8, RSP, SZ_PTR);
2513 *no_adjust = code->cur - (no_adjust+1);
2514 #else
2515 push_r(code, opts->gen.scratch1);
2516 push_r(code, opts->gen.context_reg);
2517 call(code, (code_ptr)sync_components);
2518 add_ir(code, 8, RSP, SZ_D);
2519 #endif
2520 mov_rr(code, RAX, opts->gen.context_reg, SZ_PTR); 2432 mov_rr(code, RAX, opts->gen.context_reg, SZ_PTR);
2521 jmp(code, opts->gen.load_context); 2433 jmp(code, opts->gen.load_context);
2522 *skip_sync = code->cur - (skip_sync+1); 2434 *skip_sync = code->cur - (skip_sync+1);
2523 retn(code); 2435 retn(code);
2524 *do_int = code->cur - (do_int+1); 2436 *do_int = code->cur - (do_int+1);
2525 //set target cycle to sync cycle 2437 //set target cycle to sync cycle
2526 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, sync_cycle), LIMIT, SZ_D); 2438 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, sync_cycle), opts->gen.limit, SZ_D);
2527 //swap USP and SSP if not already in supervisor mode 2439 //swap USP and SSP if not already in supervisor mode
2528 bt_irdisp(code, 5, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B); 2440 check_user_mode_swap_ssp_usp(opts);
2529 code_ptr already_supervisor = code->cur + 1;
2530 jcc(code, CC_C, code->cur + 2);
2531 swap_ssp_usp(opts);
2532 *already_supervisor = code->cur - (already_supervisor+1);
2533 //save PC 2441 //save PC
2534 subi_areg(opts, 4, 7); 2442 subi_areg(opts, 4, 7);
2535 areg_to_native(opts, 7, opts->gen.scratch2); 2443 areg_to_native(opts, 7, opts->gen.scratch2);
2536 call(code, opts->write_32_lowfirst); 2444 call(code, opts->write_32_lowfirst);
2537 //save status register 2445 //save status register
2557 jmp_r(code, opts->gen.scratch1); 2465 jmp_r(code, opts->gen.scratch1);
2558 2466
2559 opts->trap = code->cur; 2467 opts->trap = code->cur;
2560 push_r(code, opts->gen.scratch2); 2468 push_r(code, opts->gen.scratch2);
2561 //swap USP and SSP if not already in supervisor mode 2469 //swap USP and SSP if not already in supervisor mode
2562 bt_irdisp(code, 5, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B); 2470 check_user_mode_swap_ssp_usp(opts);
2563 already_supervisor = code->cur + 1;
2564 jcc(code, CC_C, code->cur + 2);
2565 swap_ssp_usp(opts);
2566 *already_supervisor = code->cur - (already_supervisor+1);
2567 //save PC 2471 //save PC
2568 subi_areg(opts, 4, 7); 2472 subi_areg(opts, 4, 7);
2569 areg_to_native(opts, 7, opts->gen.scratch2); 2473 areg_to_native(opts, 7, opts->gen.scratch2);
2570 call(code, opts->write_32_lowfirst); 2474 call(code, opts->write_32_lowfirst);
2571 //save status register 2475 //save status register
2580 shl_ir(code, 2, opts->gen.scratch1, SZ_D); 2484 shl_ir(code, 2, opts->gen.scratch1, SZ_D);
2581 call(code, opts->read_32); 2485 call(code, opts->read_32);
2582 call(code, opts->native_addr_and_sync); 2486 call(code, opts->native_addr_and_sync);
2583 cycles(&opts->gen, 18); 2487 cycles(&opts->gen, 18);
2584 jmp_r(code, opts->gen.scratch1); 2488 jmp_r(code, opts->gen.scratch1);
2585 } 2489
2490 opts->odd_address = code->cur;
2491 mov_ir(code, (int64_t)stderr, RDI, SZ_PTR);
2492 mov_ir(code, (int64_t)"Attempt to execute code at odd address\n", RSI, SZ_PTR);
2493 call_args_abi(code, (code_ptr)fprintf, 2, RDI, RSI, RDX);
2494 xor_rr(code, RDI, RDI, SZ_D);
2495 call_args(code, (code_ptr)exit, 1, RDI);
2496 }