# HG changeset patch # User Mike Pavone # Date 1367181917 25200 # Node ID 2586d49ddd46ad2b20422330f43f3ea8c5e13704 # Parent ed540dd4cf2ba4b5db24b70ddac052ebacc51e1d Implement EX, EXX and RST in Z80 core diff -r ed540dd4cf2b -r 2586d49ddd46 gen_x86.c --- a/gen_x86.c Sun Apr 28 13:45:00 2013 -0700 +++ b/gen_x86.c Sun Apr 28 13:45:17 2013 -0700 @@ -24,7 +24,9 @@ #define PRE_SIZE 0x66 #define OP_JCC 0x70 #define OP_IMMED_ARITH 0x80 +#define OP_XCHG 0x86 #define OP_MOV 0x88 +#define OP_XCHG_AX 0x90 #define OP_CDQ 0x99 #define OP_PUSHF 0x9C #define OP_POPF 0x9D @@ -120,7 +122,6 @@ uint8_t * x86_rr_sizedir(uint8_t * out, uint16_t opcode, uint8_t src, uint8_t dst, uint8_t size) { - //TODO: Deal with the fact that AH, BH, CH and DH can only be in the R/M param when there's a REX prefix uint8_t tmp; if (size == SZ_W) { *(out++) = PRE_SIZE; @@ -1184,6 +1185,49 @@ return out; } +uint8_t * xchg_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size) +{ + //TODO: Use OP_XCHG_AX when one of the registers is AX, EAX or RAX + uint8_t tmp; + if (size == SZ_W) { + *(out++) = PRE_SIZE; + } + if (size == SZ_B && dst >= RSP && dst <= RDI) { + tmp = dst; + dst = src; + src = tmp; + } + if (size == SZ_Q || src >= R8 || dst >= R8 || (size == SZ_B && src >= RSP && src <= RDI)) { + *out = PRE_REX; + if (size == SZ_Q) { + *out |= REX_QUAD; + } + if (src >= R8) { + *out |= REX_REG_FIELD; + src -= (R8 - X86_R8); + } + if (dst >= R8) { + *out |= REX_RM_FIELD; + dst -= (R8 - X86_R8); + } + out++; + } + uint8_t opcode = OP_XCHG; + if (size == SZ_B) { + if (src >= AH && src <= BH) { + src -= (AH-X86_AH); + } + if (dst >= AH && dst <= BH) { + dst -= (AH-X86_AH); + } + } else { + opcode |= BIT_SIZE; + } + *(out++) = opcode; + *(out++) = MODE_REG_DIRECT | dst | (src << 3); + return out; +} + uint8_t * pushf(uint8_t * out) { *(out++) = OP_PUSHF; diff -r ed540dd4cf2b -r 2586d49ddd46 gen_x86.h --- a/gen_x86.h Sun Apr 28 13:45:00 2013 -0700 +++ b/gen_x86.h Sun Apr 28 13:45:17 2013 -0700 @@ -160,6 +160,7 @@ uint8_t * movsx_rdisp8r(uint8_t * out, uint8_t src, int8_t disp, uint8_t dst, uint8_t src_size, uint8_t size); uint8_t * movzx_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t src_size, uint8_t size); uint8_t * movzx_rdisp8r(uint8_t * out, uint8_t src, int8_t disp, uint8_t dst, uint8_t src_size, uint8_t size); +uint8_t * xchg_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size); uint8_t * pushf(uint8_t * out); uint8_t * popf(uint8_t * out); uint8_t * push_r(uint8_t * out, uint8_t reg); diff -r ed540dd4cf2b -r 2586d49ddd46 z80_to_x86.c --- a/z80_to_x86.c Sun Apr 28 13:45:00 2013 -0700 +++ b/z80_to_x86.c Sun Apr 28 13:45:17 2013 -0700 @@ -35,6 +35,30 @@ return SZ_B; } +uint8_t z80_high_reg(uint8_t reg) +{ + switch(reg) + { + case Z80_C: + case Z80_BC: + return Z80_B; + case Z80_E: + case Z80_DE: + return Z80_D; + case Z80_L: + case Z80_HL: + return Z80_H; + case Z80_IXL: + case Z80_IX: + return Z80_IXH; + case Z80_IYL: + case Z80_IY: + return Z80_IYH; + default: + return Z80_UNUSED; + } +} + uint8_t * zcycles(uint8_t * dst, uint32_t num_cycles) { return add_ir(dst, num_cycles, ZCYCLES, SZ_D); @@ -185,6 +209,16 @@ return offsetof(z80_context, flags) + flag; } +uint8_t zaf_off(uint8_t flag) +{ + return offsetof(z80_context, alt_flags) + flag; +} + +uint8_t zar_off(uint8_t reg) +{ + return offsetof(z80_context, alt_regs) + reg; +} + void z80_print_regs_exit(z80_context * context) { printf("A: %X\nB: %X\nC: %X\nD: %X\nE: %X\nHL: %X\nIX: %X\nIY: %X\nSP: %X\n", @@ -194,6 +228,13 @@ (context->regs[Z80_IXH] << 8) | context->regs[Z80_IXL], (context->regs[Z80_IYH] << 8) | context->regs[Z80_IYL], context->sp); + puts("--Alternate Regs--"); + printf("A: %X\nB: %X\nC: %X\nD: %X\nE: %X\nHL: %X\nIX: %X\nIY: %X\n", + context->alt_regs[Z80_A], context->alt_regs[Z80_B], context->alt_regs[Z80_C], + context->alt_regs[Z80_D], context->alt_regs[Z80_E], + (context->alt_regs[Z80_H] << 8) | context->alt_regs[Z80_L], + (context->alt_regs[Z80_IXH] << 8) | context->alt_regs[Z80_IXL], + (context->alt_regs[Z80_IYH] << 8) | context->alt_regs[Z80_IYL]); exit(0); } @@ -301,9 +342,71 @@ //no call to save_z80_reg needed since there's no chance we'll use the only //the upper half of a register pair break; - /*case Z80_EX: + case Z80_EX: + if (inst->addr_mode == Z80_REG || inst->reg == Z80_HL) { + cycles = 4; + } else { + cycles = 8; + } + dst = zcycles(dst, cycles); + if (inst->addr_mode == Z80_REG) { + if(inst->reg == Z80_AF) { + dst = mov_rr(dst, opts->regs[Z80_A], SCRATCH1, SZ_B); + dst = mov_rdisp8r(dst, CONTEXT, zar_off(Z80_A), opts->regs[Z80_A], SZ_B); + dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, zar_off(Z80_A), SZ_B); + + //Flags are currently word aligned, so we can move + //them efficiently a word at a time + for (int f = ZF_C; f < ZF_NUM; f+=2) { + dst = mov_rdisp8r(dst, CONTEXT, zf_off(f), SCRATCH1, SZ_W); + dst = mov_rdisp8r(dst, CONTEXT, zaf_off(f), SCRATCH2, SZ_W); + dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, zaf_off(f), SZ_W); + dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, zf_off(f), SZ_W); + } + } else { + dst = xchg_rr(dst, opts->regs[Z80_DE], opts->regs[Z80_HL], SZ_W); + } + } else { + dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); + dst = call(dst, (uint8_t *)z80_read_byte); + dst = mov_rr(dst, opts->regs[inst->reg], SCRATCH2, SZ_B); + dst = mov_rr(dst, SCRATCH1, opts->regs[inst->reg], SZ_B); + dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); + dst = call(dst, (uint8_t *)z80_write_byte); + dst = zcycles(dst, 1); + uint8_t high_reg = z80_high_reg(inst->reg); + uint8_t use_reg; + //even though some of the upper halves can be used directly + //the limitations on mixing *H regs with the REX prefix + //prevent us from taking advantage of it + use_reg = opts->regs[inst->reg]; + dst = ror_ir(dst, 8, use_reg, SZ_W); + dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); + dst = add_ir(dst, 1, SCRATCH1, SZ_W); + dst = call(dst, (uint8_t *)z80_read_byte); + dst = mov_rr(dst, use_reg, SCRATCH2, SZ_B); + dst = mov_rr(dst, SCRATCH1, use_reg, SZ_B); + dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); + dst = add_ir(dst, 1, SCRATCH1, SZ_W); + dst = call(dst, (uint8_t *)z80_write_byte); + //restore reg to normal rotation + dst = ror_ir(dst, 8, use_reg, SZ_W); + dst = zcycles(dst, 2); + } + break; case Z80_EXX: - case Z80_LDI: + dst = zcycles(dst, 4); + dst = mov_rr(dst, opts->regs[Z80_BC], SCRATCH1, SZ_W); + dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH2, SZ_W); + dst = mov_rdisp8r(dst, CONTEXT, zar_off(Z80_C), opts->regs[Z80_BC], SZ_W); + dst = mov_rdisp8r(dst, CONTEXT, zar_off(Z80_L), opts->regs[Z80_HL], SZ_W); + dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, zar_off(Z80_C), SZ_W); + dst = mov_rrdisp8(dst, SCRATCH2, CONTEXT, zar_off(Z80_L), SZ_W); + dst = mov_rr(dst, opts->regs[Z80_DE], SCRATCH1, SZ_W); + dst = mov_rdisp8r(dst, CONTEXT, zar_off(Z80_E), opts->regs[Z80_DE], SZ_W); + dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, zar_off(Z80_E), SZ_W); + break; + /*case Z80_LDI: case Z80_LDIR: case Z80_LDD: case Z80_LDDR: @@ -770,9 +873,24 @@ break; /*case Z80_RETCC: case Z80_RETI: - case Z80_RETN: - case Z80_RST: - case Z80_IN: + case Z80_RETN:*/ + case Z80_RST: { + //RST is basically CALL to an address in page 0 + dst = zcycles(dst, 5);//T States: 5 + dst = sub_ir(dst, 2, opts->regs[Z80_SP], SZ_W); + dst = mov_ir(dst, address + 3, SCRATCH2, SZ_W); + dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); + dst = call(dst, (uint8_t *)z80_write_word_highfirst);//T States: 3, 3 + uint8_t * call_dst = z80_get_native_address(context, inst->immed); + if (!call_dst) { + opts->deferred = defer_address(opts->deferred, inst->immed, dst + 1); + //fake address to force large displacement + call_dst = dst + 256; + } + dst = jmp(dst, call_dst); + break; + } + /*case Z80_IN: case Z80_INI: case Z80_INIR: case Z80_IND: diff -r ed540dd4cf2b -r 2586d49ddd46 z80_to_x86.h --- a/z80_to_x86.h Sun Apr 28 13:45:00 2013 -0700 +++ b/z80_to_x86.h Sun Apr 28 13:45:17 2013 -0700 @@ -29,9 +29,11 @@ uint8_t flags[ZF_NUM]; uint16_t bank_reg; uint8_t regs[Z80_A+1]; + uint8_t im; uint8_t alt_regs[Z80_A+1]; uint32_t target_cycle; uint32_t current_cycle; + uint8_t alt_flags[ZF_NUM]; uint8_t * mem_pointers[ZNUM_MEM_AREAS]; native_map_slot * static_code_map; native_map_slot * banked_code_map;