changeset 49:d2e43d64e999

Add untested support for and, eor, or, swap, tst and nop instructions. Add call to m68k_save_result for add and sub so that they will properly save results for memory destinations
author Mike Pavone <pavone@retrodev.com>
date Wed, 12 Dec 2012 23:21:11 -0800
parents 0bdda50c7364
children 4836d1f3841a
files gen_x86.c gen_x86.h m68k_to_x86.c
diffstat 3 files changed, 261 insertions(+), 6 deletions(-) [+]
line wrap: on
line diff
--- a/gen_x86.c	Wed Dec 12 21:25:31 2012 -0800
+++ b/gen_x86.c	Wed Dec 12 23:21:11 2012 -0800
@@ -28,8 +28,11 @@
 #define OP_POPF 0x9D
 #define OP_MOV_I8R 0xB0
 #define OP_MOV_IR 0xB8
+#define OP_SHIFTROT_IR 0xC0
 #define OP_RETN 0xC3
 #define OP_MOV_IEA 0xC6
+#define OP_SHIFTROT_1 0xD0
+#define OP_SHIRTROT_CL 0xD2
 #define OP_CALL 0xE8
 #define OP_JMP 0xE9
 #define OP_JMP_BYTE 0xEB
@@ -47,6 +50,15 @@
 #define OP_EX_XORI 0x6
 #define OP_EX_CMPI 0x7
 
+#define OP_EX_ROL 0x0
+#define OP_EX_ROR 0x1
+#define OP_EX_RCL 0x2
+#define OP_EX_RCR 0x3
+#define OP_EX_SHL 0x4
+#define OP_EX_SHR 0x5
+#define OP_EX_SAL 0x6 //identical to SHL
+#define OP_EX_SAR 0x7
+
 #define BIT_IMMED_RAX 0x4
 #define BIT_DIR 0x2
 #define BIT_SIZE 0x1
@@ -294,6 +306,133 @@
 }
 
 
+uint8_t * x86_shiftrot_ir(uint8_t * out, uint8_t op_ex, uint8_t val, uint8_t dst, uint8_t size)
+{
+	if (size == SZ_W) {
+		*(out++) = PRE_SIZE;
+	}
+	if (size == SZ_Q || dst >= R8 || (size == SZ_B && dst >= RSP && dst <= RDI)) {
+		*out = PRE_REX;
+		if (size == SZ_Q) {
+			*out |= REX_QUAD;
+		}
+		if (dst >= R8) {
+			*out |= REX_RM_FIELD;
+			dst -= (R8 - X86_R8);
+		}
+		out++;
+	}
+	if (dst >= AH && dst <= BH) {
+		dst -= (AH-X86_AH);
+	}
+
+	*(out++) = (val == 1 ? OP_SHIFTROT_1: OP_SHIFTROT_IR) | (size == SZ_B ? 0 : BIT_SIZE);
+	*(out++) = MODE_REG_DIRECT | dst | (op_ex << 3);
+	if (val != 1) {
+		*(out++) = val;
+	}
+	return out;
+}
+
+uint8_t * x86_shiftrot_irdisp8(uint8_t * out, uint8_t op_ex, uint8_t val, uint8_t dst, int8_t disp, uint8_t size)
+{
+	if (size == SZ_W) {
+		*(out++) = PRE_SIZE;
+	}
+	if (size == SZ_Q || dst >= R8 || (size == SZ_B && dst >= RSP && dst <= RDI)) {
+		*out = PRE_REX;
+		if (size == SZ_Q) {
+			*out |= REX_QUAD;
+		}
+		if (dst >= R8) {
+			*out |= REX_RM_FIELD;
+			dst -= (R8 - X86_R8);
+		}
+		out++;
+	}
+	if (dst >= AH && dst <= BH) {
+		dst -= (AH-X86_AH);
+	}
+
+	*(out++) = (val == 1 ? OP_SHIFTROT_1: OP_SHIFTROT_IR) | (size == SZ_B ? 0 : BIT_SIZE);
+	*(out++) = MODE_REG_DISPLACE8 | dst | (op_ex << 3);
+	*(out++) = disp;
+	if (val != 1) {
+		*(out++) = val;
+	}
+	return out;
+}
+
+uint8_t * rol_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size)
+{
+	return x86_shiftrot_ir(out, OP_EX_ROL, val, dst, size);
+}
+
+uint8_t * ror_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size)
+{
+	return x86_shiftrot_ir(out, OP_EX_ROR, val, dst, size);
+}
+
+uint8_t * rcl_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size)
+{
+	return x86_shiftrot_ir(out, OP_EX_RCL, val, dst, size);
+}
+
+uint8_t * rcr_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size)
+{
+	return x86_shiftrot_ir(out, OP_EX_RCR, val, dst, size);
+}
+
+uint8_t * shl_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size)
+{
+	return x86_shiftrot_ir(out, OP_EX_SHL, val, dst, size);
+}
+
+uint8_t * shr_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size)
+{
+	return x86_shiftrot_ir(out, OP_EX_SHR, val, dst, size);
+}
+
+uint8_t * sar_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size)
+{
+	return x86_shiftrot_ir(out, OP_EX_SAR, val, dst, size);
+}
+
+uint8_t * rol_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t disp, uint8_t size)
+{
+	return x86_shiftrot_irdisp8(out, OP_EX_ROL, val, dst_base, disp, size);
+}
+
+uint8_t * ror_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t disp, uint8_t size)
+{
+	return x86_shiftrot_irdisp8(out, OP_EX_ROR, val, dst_base, disp, size);
+}
+
+uint8_t * rcl_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t disp, uint8_t size)
+{
+	return x86_shiftrot_irdisp8(out, OP_EX_RCL, val, dst_base, disp, size);
+}
+
+uint8_t * rcr_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t disp, uint8_t size)
+{
+	return x86_shiftrot_irdisp8(out, OP_EX_RCR, val, dst_base, disp, size);
+}
+
+uint8_t * shl_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t disp, uint8_t size)
+{
+	return x86_shiftrot_irdisp8(out, OP_EX_SHL, val, dst_base, disp, size);
+}
+
+uint8_t * shr_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t disp, uint8_t size)
+{
+	return x86_shiftrot_irdisp8(out, OP_EX_SHR, val, dst_base, disp, size);
+}
+
+uint8_t * sar_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t disp, uint8_t size)
+{
+	return x86_shiftrot_irdisp8(out, OP_EX_SAR, val, dst_base, disp, size);
+}
+
 uint8_t * add_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size)
 {
 	return x86_rr_sizedir(out, OP_ADD, src, dst, size);
--- a/gen_x86.h	Wed Dec 12 21:25:31 2012 -0800
+++ b/gen_x86.h	Wed Dec 12 23:21:11 2012 -0800
@@ -65,6 +65,20 @@
 } x86_modes;
 
 
+uint8_t * rol_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size);
+uint8_t * ror_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size);
+uint8_t * rcl_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size);
+uint8_t * rcr_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size);
+uint8_t * shl_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size);
+uint8_t * shr_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size);
+uint8_t * sar_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size);
+uint8_t * rol_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t disp, uint8_t size);
+uint8_t * ror_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t disp, uint8_t size);
+uint8_t * rcl_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t disp, uint8_t size);
+uint8_t * rcr_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t disp, uint8_t size);
+uint8_t * shl_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t disp, uint8_t size);
+uint8_t * shr_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t disp, uint8_t size);
+uint8_t * sar_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t disp, uint8_t size);
 uint8_t * add_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size);
 uint8_t * or_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size);
 uint8_t * xor_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size);
--- a/m68k_to_x86.c	Wed Dec 12 21:25:31 2012 -0800
+++ b/m68k_to_x86.c	Wed Dec 12 23:21:11 2012 -0800
@@ -689,21 +689,47 @@
 		dst = setcc_r(dst, CC_O, FLAG_V);
 		dst = mov_rrind(dst, FLAG_C, CONTEXT, SZ_B);
 		dst = check_cycles(dst);
+		dst = m68k_save_result(inst, dst, opts);
 		break;
 	case M68K_ADDX:
+		break;
 	case M68K_AND:
+		dst = cycles(dst, BUS);
+		if (src_op.mode == MODE_REG_DIRECT) {
+			if (dst_op.mode == MODE_REG_DIRECT) {
+				dst = and_rr(dst, src_op.base, dst_op.base, inst->extra.size);
+			} else {
+				dst = and_rrdisp8(dst, src_op.base, dst_op.base, dst_op.disp, inst->extra.size);
+			}
+		} else if (src_op.mode == MODE_REG_DISPLACE8) {
+			dst = and_rdisp8r(dst, src_op.base, src_op.disp, dst_op.base, inst->extra.size);
+		} else {
+			if (dst_op.mode == MODE_REG_DIRECT) {
+				dst = and_ir(dst, src_op.disp, dst_op.base, inst->extra.size);
+			} else {
+				dst = and_irdisp8(dst, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size);
+			}
+		}
+		dst = mov_ir(dst, 0, FLAG_C, SZ_B);
+		dst = setcc_r(dst, CC_Z, FLAG_Z);
+		dst = setcc_r(dst, CC_S, FLAG_N);
+		dst = mov_ir(dst, 0, FLAG_V, SZ_B);
+		dst = check_cycles(dst);
+		dst = m68k_save_result(inst, dst, opts);
+		break;
 	case M68K_ANDI_CCR:
 	case M68K_ANDI_SR:
 	case M68K_ASL:
+	case M68K_LSL:
 	case M68K_ASR:
-	case M68K_BCC:
+	case M68K_LSR:
 	case M68K_BCHG:
 	case M68K_BCLR:
 	case M68K_BSET:
-	case M68K_BSR:
 	case M68K_BTST:
 	case M68K_CHK:
 	case M68K_CLR:
+		break;
 	case M68K_CMP:
 		dst = cycles(dst, BUS);
 		if (src_op.mode == MODE_REG_DIRECT) {
@@ -727,10 +753,33 @@
 		dst = setcc_r(dst, CC_O, FLAG_V);
 		dst = check_cycles(dst);
 		break;
-	case M68K_DBCC:
 	case M68K_DIVS:
 	case M68K_DIVU:
+		break;
 	case M68K_EOR:
+		dst = cycles(dst, BUS);
+		if (src_op.mode == MODE_REG_DIRECT) {
+			if (dst_op.mode == MODE_REG_DIRECT) {
+				dst = xor_rr(dst, src_op.base, dst_op.base, inst->extra.size);
+			} else {
+				dst = xor_rrdisp8(dst, src_op.base, dst_op.base, dst_op.disp, inst->extra.size);
+			}
+		} else if (src_op.mode == MODE_REG_DISPLACE8) {
+			dst = xor_rdisp8r(dst, src_op.base, src_op.disp, dst_op.base, inst->extra.size);
+		} else {
+			if (dst_op.mode == MODE_REG_DIRECT) {
+				dst = xor_ir(dst, src_op.disp, dst_op.base, inst->extra.size);
+			} else {
+				dst = xor_irdisp8(dst, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size);
+			}
+		}
+		dst = mov_ir(dst, 0, FLAG_C, SZ_B);
+		dst = setcc_r(dst, CC_Z, FLAG_Z);
+		dst = setcc_r(dst, CC_S, FLAG_N);
+		dst = mov_ir(dst, 0, FLAG_V, SZ_B);
+		dst = check_cycles(dst);
+		dst = m68k_save_result(inst, dst, opts);
+		break;
 	case M68K_EORI_CCR:
 	case M68K_EORI_SR:
 	case M68K_EXG:
@@ -758,6 +807,7 @@
 	    dst = check_cycles(dst);
 	    break;
 	case M68K_EXT:
+		break;
 	case M68K_ILLEGAL:
 		dst = call(dst, (uint8_t *)m68k_save_context);
 		dst = mov_rr(dst, CONTEXT, RDI, SZ_Q);
@@ -767,8 +817,6 @@
 	case M68K_JSR:
 	case M68K_LEA:
 	case M68K_LINK:
-	case M68K_LSL:
-	case M68K_LSR:
 	case M68K_MOVE_CCR:
 	case M68K_MOVE_FROM_SR:
 	case M68K_MOVE_SR:
@@ -780,9 +828,37 @@
 	case M68K_NBCD:
 	case M68K_NEG:
 	case M68K_NEGX:
+		break;
 	case M68K_NOP:
+		dst = cycles(dst, BUS);
+		dst = check_cycles(dst);
+		break;
 	case M68K_NOT:
+		break;
 	case M68K_OR:
+		dst = cycles(dst, BUS);
+		if (src_op.mode == MODE_REG_DIRECT) {
+			if (dst_op.mode == MODE_REG_DIRECT) {
+				dst = or_rr(dst, src_op.base, dst_op.base, inst->extra.size);
+			} else {
+				dst = or_rrdisp8(dst, src_op.base, dst_op.base, dst_op.disp, inst->extra.size);
+			}
+		} else if (src_op.mode == MODE_REG_DISPLACE8) {
+			dst = or_rdisp8r(dst, src_op.base, src_op.disp, dst_op.base, inst->extra.size);
+		} else {
+			if (dst_op.mode == MODE_REG_DIRECT) {
+				dst = or_ir(dst, src_op.disp, dst_op.base, inst->extra.size);
+			} else {
+				dst = or_irdisp8(dst, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size);
+			}
+		}
+		dst = mov_ir(dst, 0, FLAG_C, SZ_B);
+		dst = setcc_r(dst, CC_Z, FLAG_Z);
+		dst = setcc_r(dst, CC_S, FLAG_N);
+		dst = mov_ir(dst, 0, FLAG_V, SZ_B);
+		dst = check_cycles(dst);
+		dst = m68k_save_result(inst, dst, opts);
+		break;
 	case M68K_ORI_CCR:
 	case M68K_ORI_SR:
 	case M68K_PEA:
@@ -793,10 +869,10 @@
 	case M68K_ROXR:
 	case M68K_RTE:
 	case M68K_RTR:
-	case M68K_RTS:
 	case M68K_SBCD:
 	case M68K_SCC:
 	case M68K_STOP:
+		break;
 	case M68K_SUB:
 		dst = cycles(dst, BUS);
 		if (src_op.mode == MODE_REG_DIRECT) {
@@ -820,13 +896,39 @@
 		dst = setcc_r(dst, CC_O, FLAG_V);
 		dst = mov_rrind(dst, FLAG_C, CONTEXT, SZ_B);
 		dst = check_cycles(dst);
+		dst = m68k_save_result(inst, dst, opts);
 		break;
 	case M68K_SUBX:
+		break;
 	case M68K_SWAP:
+		dst = cycles(dst, BUS);
+		if (src_op.mode == MODE_REG_DIRECT) {
+			dst = rol_ir(dst, 16, src_op.base, inst->extra.size);
+		} else{
+			dst = rol_irdisp8(dst, 16, src_op.base, src_op.disp, inst->extra.size);
+		}
+		dst = mov_ir(dst, 0, FLAG_C, SZ_B);
+		dst = setcc_r(dst, CC_Z, FLAG_Z);
+		dst = setcc_r(dst, CC_S, FLAG_N);
+		dst = mov_ir(dst, 0, FLAG_V, SZ_B);
+		dst = check_cycles(dst);
+		break;
 	case M68K_TAS:
 	case M68K_TRAP:
 	case M68K_TRAPV:
 	case M68K_TST:
+		dst = cycles(dst, BUS);
+		if (src_op.mode == MODE_REG_DIRECT) {
+			dst = cmp_ir(dst, 0, src_op.base, inst->extra.size);
+		} else { //M68000 doesn't support immedate operand for tst, so this must be MODE_REG_DISPLACE8
+			dst = cmp_irdisp8(dst, 0, src_op.base, src_op.disp, inst->extra.size);
+		}
+		dst = setcc_r(dst, CC_C, FLAG_C);
+		dst = setcc_r(dst, CC_Z, FLAG_Z);
+		dst = setcc_r(dst, CC_S, FLAG_N);
+		dst = setcc_r(dst, CC_O, FLAG_V);
+		dst = check_cycles(dst);
+		break;
 	case M68K_UNLK:
 	case M68K_INVALID:
 		break;