comparison m68k_to_x86.c @ 82:6331ddec228f

Initial stab at interrupt support. Make native code offsets bigger so I don't have to worry about overflowing the offset. Implement neg and not (untested).
author Mike Pavone <pavone@retrodev.com>
date Wed, 26 Dec 2012 11:09:04 -0800
parents 6d231dbe75ab
children 3d3966c254b2
comparison
equal deleted inserted replaced
81:6d231dbe75ab 82:6331ddec228f
24 uint8_t base; 24 uint8_t base;
25 uint8_t index; 25 uint8_t index;
26 uint8_t cycles; 26 uint8_t cycles;
27 } x86_ea; 27 } x86_ea;
28 28
29 void handle_cycle_limit(); 29 void handle_cycle_limit_int();
30 void m68k_read_word_scratch1(); 30 void m68k_read_word_scratch1();
31 void m68k_read_long_scratch1(); 31 void m68k_read_long_scratch1();
32 void m68k_read_byte_scratch1(); 32 void m68k_read_byte_scratch1();
33 void m68k_write_word(); 33 void m68k_write_word();
34 void m68k_write_long_lowfirst(); 34 void m68k_write_long_lowfirst();
35 void m68k_write_long_highfirst(); 35 void m68k_write_long_highfirst();
36 void m68k_write_byte(); 36 void m68k_write_byte();
37 void m68k_save_context(); 37 void m68k_save_context();
38 void m68k_modified_ret_addr(); 38 void m68k_modified_ret_addr();
39 void m68k_native_addr(); 39 void m68k_native_addr();
40 void m68k_native_addr_and_sync();
41 void set_sr();
42 void set_ccr();
43 void get_sr();
40 void m68k_start_context(uint8_t * addr, m68k_context * context); 44 void m68k_start_context(uint8_t * addr, m68k_context * context);
41 45
42 uint8_t * cycles(uint8_t * dst, uint32_t num) 46 uint8_t * cycles(uint8_t * dst, uint32_t num)
43 { 47 {
44 dst = add_ir(dst, num, CYCLES, SZ_D); 48 dst = add_ir(dst, num, CYCLES, SZ_D);
49 }
50
51 uint8_t * check_cycles_int(uint8_t * dst, uint32_t address)
52 {
53 dst = cmp_rr(dst, LIMIT, CYCLES, SZ_D);
54 uint8_t * jmp_off = dst+1;
55 dst = jcc(dst, CC_NC, dst + 7);
56 dst = mov_ir(dst, address, SCRATCH1, SZ_D);
57 dst = call(dst, (uint8_t *)handle_cycle_limit_int);
58 *jmp_off = dst - (jmp_off+1);
59 return dst;
45 } 60 }
46 61
47 int8_t native_reg(m68k_op_info * op, x86_68k_options * opts) 62 int8_t native_reg(m68k_op_info * op, x86_68k_options * opts)
48 { 63 {
49 if (op->addr_mode == MODE_REG) { 64 if (op->addr_mode == MODE_REG) {
511 } 526 }
512 } 527 }
513 528
514 void map_native_address(native_map_slot * native_code_map, uint32_t address, uint8_t * native_addr) 529 void map_native_address(native_map_slot * native_code_map, uint32_t address, uint8_t * native_addr)
515 { 530 {
516 //FIXME: This probably isn't going to work with real code in a lot of cases, no guarantee that
517 //all the code in 1KB block is going to be translated at the same time
518 address &= 0xFFFFFF; 531 address &= 0xFFFFFF;
519 uint32_t chunk = address / NATIVE_CHUNK_SIZE; 532 uint32_t chunk = address / NATIVE_CHUNK_SIZE;
520 if (!native_code_map[chunk].base) { 533 if (!native_code_map[chunk].base) {
521 native_code_map[chunk].base = native_addr; 534 native_code_map[chunk].base = native_addr;
522 native_code_map[chunk].offsets = malloc(sizeof(uint16_t) * NATIVE_CHUNK_SIZE); 535 native_code_map[chunk].offsets = malloc(sizeof(int32_t) * NATIVE_CHUNK_SIZE);
523 memset(native_code_map[chunk].offsets, 0xFF, sizeof(uint16_t) * NATIVE_CHUNK_SIZE); 536 memset(native_code_map[chunk].offsets, 0xFF, sizeof(int32_t) * NATIVE_CHUNK_SIZE);
524 } 537 }
525 uint32_t offset = address % NATIVE_CHUNK_SIZE; 538 uint32_t offset = address % NATIVE_CHUNK_SIZE;
526 native_code_map[chunk].offsets[offset] = native_addr-native_code_map[chunk].base; 539 native_code_map[chunk].offsets[offset] = native_addr-native_code_map[chunk].base;
527 } 540 }
528 541
1405 1418
1406 uint8_t * translate_m68k(uint8_t * dst, m68kinst * inst, x86_68k_options * opts) 1419 uint8_t * translate_m68k(uint8_t * dst, m68kinst * inst, x86_68k_options * opts)
1407 { 1420 {
1408 uint8_t * end_off; 1421 uint8_t * end_off;
1409 map_native_address(opts->native_code_map, inst->address, dst); 1422 map_native_address(opts->native_code_map, inst->address, dst);
1423 dst = check_cycles_int(dst, inst->address);
1410 if (inst->op == M68K_MOVE) { 1424 if (inst->op == M68K_MOVE) {
1411 return translate_m68k_move(dst, inst, opts); 1425 return translate_m68k_move(dst, inst, opts);
1412 } else if(inst->op == M68K_LEA) { 1426 } else if(inst->op == M68K_LEA) {
1413 return translate_m68k_lea(dst, inst, opts); 1427 return translate_m68k_lea(dst, inst, opts);
1414 } else if(inst->op == M68K_BSR) { 1428 } else if(inst->op == M68K_BSR) {
1651 case M68K_ILLEGAL: 1665 case M68K_ILLEGAL:
1652 dst = call(dst, (uint8_t *)m68k_save_context); 1666 dst = call(dst, (uint8_t *)m68k_save_context);
1653 dst = mov_rr(dst, CONTEXT, RDI, SZ_Q); 1667 dst = mov_rr(dst, CONTEXT, RDI, SZ_Q);
1654 dst = call(dst, (uint8_t *)print_regs_exit); 1668 dst = call(dst, (uint8_t *)print_regs_exit);
1655 break; 1669 break;
1656 /*case M68K_JSR: 1670 /*case M68K_MOVE_FROM_SR:
1657 case M68K_LEA:
1658 case M68K_MOVE_FROM_SR:
1659 break;*/ 1671 break;*/
1660 case M68K_MOVE_CCR: 1672 case M68K_MOVE_CCR:
1661 case M68K_MOVE_SR: 1673 case M68K_MOVE_SR:
1662 //TODO: Privilege check for MOVE to SR 1674 //TODO: Privilege check for MOVE to SR
1663 if (src_op.mode == MODE_IMMED) { 1675 if (src_op.mode == MODE_IMMED) {
1675 dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, SZ_D); 1687 dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, SZ_D);
1676 } 1688 }
1677 } 1689 }
1678 dst = cycles(dst, 12); 1690 dst = cycles(dst, 12);
1679 } else { 1691 } else {
1680 if (src_op.mode == MODE_REG_DIRECT) { 1692 if (src_op.base != SCRATCH1) {
1681 dst = mov_rr(dst, src_op.base, FLAG_C, SZ_B); 1693 if (src_op.mode == MODE_REG_DIRECT) {
1682 } else { 1694 dst = mov_rr(dst, src_op.base, SCRATCH1, SZ_W);
1683 dst = mov_rdisp8r(dst, src_op.base, src_op.disp, FLAG_C, SZ_B); 1695 } else {
1684 } 1696 dst = mov_rdisp8r(dst, src_op.base, src_op.disp, SCRATCH1, SZ_W);
1685 dst = mov_rr(dst, FLAG_C, FLAG_V, SZ_B); 1697 }
1686 dst = and_ir(dst, 1, FLAG_C, SZ_B); 1698 }
1687 dst = shr_ir(dst, 1, FLAG_V, SZ_B); 1699 dst = call(dst, (uint8_t *)(inst->op == M68K_MOVE_SR ? set_sr : set_ccr));
1688 dst = mov_rr(dst, FLAG_V, FLAG_Z, SZ_B);
1689 dst = and_ir(dst, 1, FLAG_V, SZ_B);
1690 dst = shr_ir(dst, 1, FLAG_Z, SZ_B);
1691 dst = mov_rr(dst, FLAG_Z, FLAG_N, SZ_B);
1692 dst = and_ir(dst, 1, FLAG_Z, SZ_B);
1693 dst = shr_ir(dst, 1, FLAG_N, SZ_B);
1694 dst = mov_rr(dst, 1, SCRATCH2, SZ_B);
1695 dst = shr_ir(dst, 1, SCRATCH2, SZ_B);
1696 dst = and_ir(dst, 1, SCRATCH2, SZ_B);
1697 dst = mov_rrind(dst, SCRATCH2, CONTEXT, SZ_B);
1698 dst = cycles(dst, 12); 1700 dst = cycles(dst, 12);
1699 if (inst->op == M68K_MOVE_SR) { 1701
1700 if (src_op.mode == MODE_REG_DIRECT) {
1701 dst = mov_rr(dst, src_op.base, SCRATCH2, SZ_W);
1702 } else {
1703 dst = mov_rdisp8r(dst, src_op.base, src_op.disp, SCRATCH2, SZ_W);
1704 }
1705 }
1706 dst = shr_ir(dst, 8, SCRATCH2, SZ_B);
1707 dst = mov_rrdisp8(dst, SCRATCH2, CONTEXT, offsetof(m68k_context, status), SZ_B);
1708 } 1702 }
1709 break; 1703 break;
1710 case M68K_MOVE_USP: 1704 case M68K_MOVE_USP:
1711 dst = cycles(dst, BUS); 1705 dst = cycles(dst, BUS);
1712 //TODO: Trap if not in supervisor mode 1706 //TODO: Trap if not in supervisor mode
1728 } 1722 }
1729 break; 1723 break;
1730 /*case M68K_MOVEP: 1724 /*case M68K_MOVEP:
1731 case M68K_MULS: 1725 case M68K_MULS:
1732 case M68K_MULU: 1726 case M68K_MULU:
1733 case M68K_NBCD: 1727 case M68K_NBCD:*/
1734 case M68K_NEG: 1728 case M68K_NEG:
1735 case M68K_NEGX: 1729 if (dst_op.mode == MODE_REG_DIRECT) {
1730 dst = neg_r(dst, dst_op.base, inst->extra.size);
1731 } else {
1732 dst = not_rdisp8(dst, dst_op.base, dst_op.disp, inst->extra.size);
1733 }
1734 dst = mov_ir(dst, 0, FLAG_C, SZ_B);
1735 dst = setcc_r(dst, CC_Z, FLAG_Z);
1736 dst = setcc_r(dst, CC_S, FLAG_N);
1737 dst = mov_ir(dst, 0, FLAG_V, SZ_B);
1738 dst = m68k_save_result(inst, dst, opts);
1739 break;
1740 /*case M68K_NEGX:
1736 break;*/ 1741 break;*/
1737 case M68K_NOP: 1742 case M68K_NOP:
1738 dst = cycles(dst, BUS); 1743 dst = cycles(dst, BUS);
1739 break; 1744 break;
1740 //case M68K_NOT: 1745 case M68K_NOT:
1741 // break; 1746 if (dst_op.mode == MODE_REG_DIRECT) {
1747 dst = not_r(dst, dst_op.base, inst->extra.size);
1748 } else {
1749 dst = not_rdisp8(dst, dst_op.base, dst_op.disp, inst->extra.size);
1750 }
1751 dst = mov_ir(dst, 0, FLAG_C, SZ_B);
1752 dst = setcc_r(dst, CC_Z, FLAG_Z);
1753 dst = setcc_r(dst, CC_S, FLAG_N);
1754 dst = mov_ir(dst, 0, FLAG_V, SZ_B);
1755 dst = m68k_save_result(inst, dst, opts);
1756 break;
1742 case M68K_OR: 1757 case M68K_OR:
1743 dst = cycles(dst, BUS); 1758 dst = cycles(dst, BUS);
1744 if (src_op.mode == MODE_REG_DIRECT) { 1759 if (src_op.mode == MODE_REG_DIRECT) {
1745 if (dst_op.mode == MODE_REG_DIRECT) { 1760 if (dst_op.mode == MODE_REG_DIRECT) {
1746 dst = or_rr(dst, src_op.base, dst_op.base, inst->extra.size); 1761 dst = or_rr(dst, src_op.base, dst_op.base, inst->extra.size);
1767 case M68K_PEA: 1782 case M68K_PEA:
1768 case M68K_RESET: 1783 case M68K_RESET:
1769 case M68K_ROL: 1784 case M68K_ROL:
1770 case M68K_ROR: 1785 case M68K_ROR:
1771 case M68K_ROXL: 1786 case M68K_ROXL:
1772 case M68K_ROXR: 1787 case M68K_ROXR:*/
1773 case M68K_RTE: 1788 case M68K_RTE:
1774 case M68K_RTR: 1789 dst = mov_rr(dst, opts->aregs[7], SCRATCH1, SZ_D);
1790 dst = call(dst, (uint8_t *)m68k_read_long_scratch1);
1791 dst = push_r(dst, SCRATCH1);
1792 dst = add_ir(dst, 4, opts->aregs[7], SZ_D);
1793 dst = mov_rr(dst, opts->aregs[7], SCRATCH1, SZ_D);
1794 dst = call(dst, (uint8_t *)m68k_read_word_scratch1);
1795 dst = add_ir(dst, 2, opts->aregs[7], SZ_D);
1796 dst = call(dst, (uint8_t *)set_sr);
1797 dst = pop_r(dst, SCRATCH1);
1798 dst = bt_irdisp8(dst, 5, CONTEXT, offsetof(m68k_context, status), SZ_B);
1799 end_off = dst+1;
1800 dst = jcc(dst, CC_NC, dst+2);
1801 dst = mov_rr(dst, opts->aregs[7], SCRATCH2, SZ_D);
1802 dst = mov_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, opts->aregs[7], SZ_D);
1803 dst = mov_rrdisp8(dst, SCRATCH2, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, SZ_D);
1804 *end_off = dst - (end_off+1);
1805 dst = call(dst, (uint8_t *)m68k_native_addr_and_sync);
1806 dst = jmp_r(dst, SCRATCH1);
1807 break;
1808 /*case M68K_RTR:
1775 case M68K_SBCD: 1809 case M68K_SBCD:
1776 case M68K_SCC: 1810 case M68K_SCC:
1777 case M68K_STOP: 1811 case M68K_STOP:
1778 break;*/ 1812 break;*/
1779 case M68K_SUB: 1813 case M68K_SUB:
1855 1889
1856 uint8_t * translate_m68k_stream(uint8_t * dst, uint8_t * dst_end, uint32_t address, m68k_context * context) 1890 uint8_t * translate_m68k_stream(uint8_t * dst, uint8_t * dst_end, uint32_t address, m68k_context * context)
1857 { 1891 {
1858 m68kinst instbuf; 1892 m68kinst instbuf;
1859 x86_68k_options * opts = context->options; 1893 x86_68k_options * opts = context->options;
1894 if(get_native_address(opts->native_code_map, address)) {
1895 return dst;
1896 }
1860 char disbuf[1024]; 1897 char disbuf[1024];
1861 uint16_t *encoded = context->mem_pointers[0] + address/2, *next; 1898 uint16_t *encoded = context->mem_pointers[0] + address/2, *next;
1862 do { 1899 do {
1863 do { 1900 do {
1864 if (dst_end-dst < 128) { 1901 if (dst_end-dst < 128) {
1916 void init_68k_context(m68k_context * context, native_map_slot * native_code_map, void * opts) 1953 void init_68k_context(m68k_context * context, native_map_slot * native_code_map, void * opts)
1917 { 1954 {
1918 memset(context, 0, sizeof(m68k_context)); 1955 memset(context, 0, sizeof(m68k_context));
1919 context->native_code_map = native_code_map; 1956 context->native_code_map = native_code_map;
1920 context->options = opts; 1957 context->options = opts;
1921 } 1958 context->int_cycle = 0xFFFFFFFF;
1922 1959 }
1960