pavone@467: /* pavone@467: Copyright 2013 Michael Pavone pavone@505: This file is part of BlastEm. pavone@467: BlastEm is free software distributed under the terms of the GNU General Public License version 3 or greater. See COPYING for full license text. pavone@467: */ pavone@235: #include "z80inst.h" pavone@213: #include "z80_to_x86.h" pavone@213: #include "gen_x86.h" pavone@235: #include "mem.h" pavone@792: #include "util.h" pavone@235: #include pavone@235: #include pavone@235: #include pavone@235: #include pavone@213: pavone@213: #define MODE_UNUSED (MODE_IMMED-1) pavone@854: #define MAX_MCYCLE_LENGTH 6 pavone@213: pavone@315: //#define DO_DEBUG_PRINT pavone@275: pavone@268: #ifdef DO_DEBUG_PRINT pavone@268: #define dprintf printf pavone@268: #else pavone@268: #define dprintf pavone@268: #endif pavone@268: pavone@652: uint32_t zbreakpoint_patch(z80_context * context, uint16_t address, code_ptr dst); pavone@715: void z80_handle_deferred(z80_context * context); pavone@213: pavone@235: uint8_t z80_size(z80inst * inst) pavone@213: { pavone@213: uint8_t reg = (inst->reg & 0x1F); pavone@235: if (reg != Z80_UNUSED && reg != Z80_USE_IMMED) { pavone@213: return reg < Z80_BC ? SZ_B : SZ_W; pavone@213: } pavone@213: //TODO: Handle any necessary special cases pavone@213: return SZ_B; pavone@213: } pavone@213: pavone@731: uint8_t zf_off(uint8_t flag) pavone@731: { pavone@731: return offsetof(z80_context, flags) + flag; pavone@731: } pavone@731: pavone@731: uint8_t zaf_off(uint8_t flag) pavone@731: { pavone@731: return offsetof(z80_context, alt_flags) + flag; pavone@731: } pavone@731: pavone@731: uint8_t zr_off(uint8_t reg) pavone@731: { pavone@731: if (reg > Z80_A) { pavone@731: reg = z80_low_reg(reg); pavone@731: } pavone@731: return offsetof(z80_context, regs) + reg; pavone@731: } pavone@731: pavone@731: uint8_t zar_off(uint8_t reg) pavone@731: { pavone@731: if (reg > Z80_A) { pavone@731: reg = z80_low_reg(reg); pavone@731: } pavone@731: return offsetof(z80_context, alt_regs) + reg; pavone@731: } pavone@731: pavone@731: void zreg_to_native(z80_options *opts, uint8_t reg, uint8_t native_reg) pavone@731: { pavone@731: if (opts->regs[reg] >= 0) { pavone@731: mov_rr(&opts->gen.code, opts->regs[reg], native_reg, reg > Z80_A ? SZ_W : SZ_B); pavone@731: } else { pavone@731: mov_rdispr(&opts->gen.code, opts->gen.context_reg, zr_off(reg), native_reg, reg > Z80_A ? SZ_W : SZ_B); pavone@731: } pavone@731: } pavone@731: pavone@731: void native_to_zreg(z80_options *opts, uint8_t native_reg, uint8_t reg) pavone@731: { pavone@731: if (opts->regs[reg] >= 0) { pavone@731: mov_rr(&opts->gen.code, native_reg, opts->regs[reg], reg > Z80_A ? SZ_W : SZ_B); pavone@731: } else { pavone@731: mov_rrdisp(&opts->gen.code, native_reg, opts->gen.context_reg, zr_off(reg), reg > Z80_A ? SZ_W : SZ_B); pavone@731: } pavone@731: } pavone@731: pavone@591: void translate_z80_reg(z80inst * inst, host_ea * ea, z80_options * opts) pavone@250: { pavone@590: code_info *code = &opts->gen.code; pavone@213: if (inst->reg == Z80_USE_IMMED) { pavone@213: ea->mode = MODE_IMMED; pavone@213: ea->disp = inst->immed; pavone@213: } else if ((inst->reg & 0x1F) == Z80_UNUSED) { pavone@213: ea->mode = MODE_UNUSED; pavone@213: } else { pavone@235: ea->mode = MODE_REG_DIRECT; pavone@731: if (inst->reg == Z80_IYH && opts->regs[Z80_IYL] >= 0) { pavone@312: if ((inst->addr_mode & 0x1F) == Z80_REG && inst->ea_reg == Z80_IYL) { pavone@590: mov_rr(code, opts->regs[Z80_IY], opts->gen.scratch1, SZ_W); pavone@590: ror_ir(code, 8, opts->gen.scratch1, SZ_W); pavone@590: ea->base = opts->gen.scratch1; pavone@312: } else { pavone@312: ea->base = opts->regs[Z80_IYL]; pavone@590: ror_ir(code, 8, opts->regs[Z80_IY], SZ_W); pavone@312: } pavone@262: } else if(opts->regs[inst->reg] >= 0) { pavone@262: ea->base = opts->regs[inst->reg]; pavone@268: if (ea->base >= AH && ea->base <= BH) { pavone@268: if ((inst->addr_mode & 0x1F) == Z80_REG) { pavone@268: uint8_t other_reg = opts->regs[inst->ea_reg]; pavone@269: if (other_reg >= R8 || (other_reg >= RSP && other_reg <= RDI)) { pavone@268: //we can't mix an *H reg with a register that requires the REX prefix pavone@268: ea->base = opts->regs[z80_low_reg(inst->reg)]; pavone@590: ror_ir(code, 8, ea->base, SZ_W); pavone@268: } pavone@268: } else if((inst->addr_mode & 0x1F) != Z80_UNUSED && (inst->addr_mode & 0x1F) != Z80_IMMED) { pavone@268: //temp regs require REX prefix too pavone@267: ea->base = opts->regs[z80_low_reg(inst->reg)]; pavone@590: ror_ir(code, 8, ea->base, SZ_W); pavone@267: } pavone@267: } pavone@213: } else { pavone@262: ea->mode = MODE_REG_DISPLACE8; pavone@590: ea->base = opts->gen.context_reg; pavone@731: ea->disp = zr_off(inst->reg); pavone@213: } pavone@213: } pavone@213: } pavone@213: pavone@590: void z80_save_reg(z80inst * inst, z80_options * opts) pavone@213: { pavone@590: code_info *code = &opts->gen.code; pavone@716: if (inst->reg == Z80_USE_IMMED || inst->reg == Z80_UNUSED) { pavone@716: return; pavone@716: } pavone@731: if (inst->reg == Z80_IYH && opts->regs[Z80_IYL] >= 0) { pavone@312: if ((inst->addr_mode & 0x1F) == Z80_REG && inst->ea_reg == Z80_IYL) { pavone@590: ror_ir(code, 8, opts->regs[Z80_IY], SZ_W); pavone@590: mov_rr(code, opts->gen.scratch1, opts->regs[Z80_IYL], SZ_B); pavone@590: ror_ir(code, 8, opts->regs[Z80_IY], SZ_W); pavone@312: } else { pavone@590: ror_ir(code, 8, opts->regs[Z80_IY], SZ_W); pavone@312: } pavone@268: } else if (opts->regs[inst->reg] >= AH && opts->regs[inst->reg] <= BH) { pavone@268: if ((inst->addr_mode & 0x1F) == Z80_REG) { pavone@268: uint8_t other_reg = opts->regs[inst->ea_reg]; pavone@269: if (other_reg >= R8 || (other_reg >= RSP && other_reg <= RDI)) { pavone@268: //we can't mix an *H reg with a register that requires the REX prefix pavone@590: ror_ir(code, 8, opts->regs[z80_low_reg(inst->reg)], SZ_W); pavone@268: } pavone@268: } else if((inst->addr_mode & 0x1F) != Z80_UNUSED && (inst->addr_mode & 0x1F) != Z80_IMMED) { pavone@268: //temp regs require REX prefix too pavone@590: ror_ir(code, 8, opts->regs[z80_low_reg(inst->reg)], SZ_W); pavone@267: } pavone@213: } pavone@213: } pavone@213: pavone@591: void translate_z80_ea(z80inst * inst, host_ea * ea, z80_options * opts, uint8_t read, uint8_t modify) pavone@213: { pavone@590: code_info *code = &opts->gen.code; pavone@730: uint8_t size, areg; pavone@730: int8_t reg; pavone@235: ea->mode = MODE_REG_DIRECT; pavone@590: areg = read ? opts->gen.scratch1 : opts->gen.scratch2; pavone@213: switch(inst->addr_mode & 0x1F) pavone@213: { pavone@213: case Z80_REG: pavone@731: if (inst->ea_reg == Z80_IYH && opts->regs[Z80_IYL] >= 0) { pavone@312: if (inst->reg == Z80_IYL) { pavone@590: mov_rr(code, opts->regs[Z80_IY], opts->gen.scratch1, SZ_W); pavone@590: ror_ir(code, 8, opts->gen.scratch1, SZ_W); pavone@590: ea->base = opts->gen.scratch1; pavone@312: } else { pavone@312: ea->base = opts->regs[Z80_IYL]; pavone@590: ror_ir(code, 8, opts->regs[Z80_IY], SZ_W); pavone@312: } pavone@651: } else if(opts->regs[inst->ea_reg] >= 0) { pavone@213: ea->base = opts->regs[inst->ea_reg]; pavone@267: if (ea->base >= AH && ea->base <= BH && inst->reg != Z80_UNUSED && inst->reg != Z80_USE_IMMED) { pavone@267: uint8_t other_reg = opts->regs[inst->reg]; pavone@666: #ifdef X86_64 pavone@269: if (other_reg >= R8 || (other_reg >= RSP && other_reg <= RDI)) { pavone@267: //we can't mix an *H reg with a register that requires the REX prefix pavone@267: ea->base = opts->regs[z80_low_reg(inst->ea_reg)]; pavone@590: ror_ir(code, 8, ea->base, SZ_W); pavone@267: } pavone@666: #endif pavone@267: } pavone@651: } else { pavone@651: ea->mode = MODE_REG_DISPLACE8; pavone@659: ea->base = opts->gen.context_reg; pavone@731: ea->disp = zr_off(inst->ea_reg); pavone@213: } pavone@213: break; pavone@213: case Z80_REG_INDIRECT: pavone@731: zreg_to_native(opts, inst->ea_reg, areg); pavone@213: size = z80_size(inst); pavone@213: if (read) { pavone@213: if (modify) { pavone@590: //push_r(code, opts->gen.scratch1); pavone@591: mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(z80_context, scratch1), SZ_W); pavone@213: } pavone@213: if (size == SZ_B) { pavone@590: call(code, opts->read_8); pavone@213: } else { pavone@591: call(code, opts->read_16); pavone@213: } pavone@213: if (modify) { pavone@590: //pop_r(code, opts->gen.scratch2); pavone@591: mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, scratch1), opts->gen.scratch2, SZ_W); pavone@213: } pavone@213: } pavone@590: ea->base = opts->gen.scratch1; pavone@213: break; pavone@213: case Z80_IMMED: pavone@213: ea->mode = MODE_IMMED; pavone@213: ea->disp = inst->immed; pavone@213: break; pavone@213: case Z80_IMMED_INDIRECT: pavone@591: mov_ir(code, inst->immed, areg, SZ_W); pavone@213: size = z80_size(inst); pavone@213: if (read) { pavone@277: /*if (modify) { pavone@591: push_r(code, opts->gen.scratch1); pavone@277: }*/ pavone@213: if (size == SZ_B) { pavone@593: call(code, opts->read_8); pavone@213: } else { pavone@593: call(code, opts->read_16); pavone@213: } pavone@213: if (modify) { pavone@591: //pop_r(code, opts->gen.scratch2); pavone@591: mov_ir(code, inst->immed, opts->gen.scratch2, SZ_W); pavone@213: } pavone@213: } pavone@590: ea->base = opts->gen.scratch1; pavone@213: break; pavone@235: case Z80_IX_DISPLACE: pavone@235: case Z80_IY_DISPLACE: pavone@731: zreg_to_native(opts, (inst->addr_mode & 0x1F) == Z80_IX_DISPLACE ? Z80_IX : Z80_IY, areg); pavone@591: add_ir(code, inst->ea_reg & 0x80 ? inst->ea_reg - 256 : inst->ea_reg, areg, SZ_W); pavone@213: size = z80_size(inst); pavone@213: if (read) { pavone@213: if (modify) { pavone@591: //push_r(code, opts->gen.scratch1); pavone@591: mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(z80_context, scratch1), SZ_W); pavone@213: } pavone@213: if (size == SZ_B) { pavone@593: call(code, opts->read_8); pavone@213: } else { pavone@593: call(code, opts->read_16); pavone@213: } pavone@213: if (modify) { pavone@591: //pop_r(code, opts->gen.scratch2); pavone@591: mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, scratch1), opts->gen.scratch2, SZ_W); pavone@213: } pavone@213: } pavone@590: ea->base = opts->gen.scratch1; pavone@213: break; pavone@213: case Z80_UNUSED: pavone@235: ea->mode = MODE_UNUSED; pavone@213: break; pavone@213: default: pavone@792: fatal_error("Unrecognized Z80 addressing mode %d\n", inst->addr_mode & 0x1F); pavone@213: } pavone@213: } pavone@213: pavone@591: void z80_save_ea(code_info *code, z80inst * inst, z80_options * opts) pavone@213: { pavone@267: if ((inst->addr_mode & 0x1F) == Z80_REG) { pavone@731: if (inst->ea_reg == Z80_IYH && opts->regs[Z80_IYL] >= 0) { pavone@312: if (inst->reg == Z80_IYL) { pavone@591: ror_ir(code, 8, opts->regs[Z80_IY], SZ_W); pavone@591: mov_rr(code, opts->gen.scratch1, opts->regs[Z80_IYL], SZ_B); pavone@591: ror_ir(code, 8, opts->regs[Z80_IY], SZ_W); pavone@312: } else { pavone@591: ror_ir(code, 8, opts->regs[Z80_IY], SZ_W); pavone@312: } pavone@267: } else if (inst->reg != Z80_UNUSED && inst->reg != Z80_USE_IMMED && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) { pavone@267: uint8_t other_reg = opts->regs[inst->reg]; pavone@666: #ifdef X86_64 pavone@269: if (other_reg >= R8 || (other_reg >= RSP && other_reg <= RDI)) { pavone@267: //we can't mix an *H reg with a register that requires the REX prefix pavone@591: ror_ir(code, 8, opts->regs[z80_low_reg(inst->ea_reg)], SZ_W); pavone@267: } pavone@666: #endif pavone@267: } pavone@213: } pavone@213: } pavone@213: pavone@593: void z80_save_result(z80_options *opts, z80inst * inst) pavone@213: { pavone@253: switch(inst->addr_mode & 0x1f) pavone@253: { pavone@253: case Z80_REG_INDIRECT: pavone@253: case Z80_IMMED_INDIRECT: pavone@253: case Z80_IX_DISPLACE: pavone@253: case Z80_IY_DISPLACE: pavone@253: if (z80_size(inst) == SZ_B) { pavone@593: call(&opts->gen.code, opts->write_8); pavone@253: } else { pavone@593: call(&opts->gen.code, opts->write_16_lowfirst); pavone@253: } pavone@213: } pavone@213: } pavone@213: pavone@213: enum { pavone@213: DONT_READ=0, pavone@213: READ pavone@213: }; pavone@213: pavone@213: enum { pavone@213: DONT_MODIFY=0, pavone@213: MODIFY pavone@213: }; pavone@213: pavone@235: void z80_print_regs_exit(z80_context * context) pavone@235: { pavone@505: printf("A: %X\nB: %X\nC: %X\nD: %X\nE: %X\nHL: %X\nIX: %X\nIY: %X\nSP: %X\n\nIM: %d, IFF1: %d, IFF2: %d\n", pavone@235: context->regs[Z80_A], context->regs[Z80_B], context->regs[Z80_C], pavone@505: context->regs[Z80_D], context->regs[Z80_E], pavone@505: (context->regs[Z80_H] << 8) | context->regs[Z80_L], pavone@505: (context->regs[Z80_IXH] << 8) | context->regs[Z80_IXL], pavone@505: (context->regs[Z80_IYH] << 8) | context->regs[Z80_IYL], pavone@243: context->sp, context->im, context->iff1, context->iff2); pavone@241: puts("--Alternate Regs--"); pavone@505: printf("A: %X\nB: %X\nC: %X\nD: %X\nE: %X\nHL: %X\nIX: %X\nIY: %X\n", pavone@241: context->alt_regs[Z80_A], context->alt_regs[Z80_B], context->alt_regs[Z80_C], pavone@505: context->alt_regs[Z80_D], context->alt_regs[Z80_E], pavone@505: (context->alt_regs[Z80_H] << 8) | context->alt_regs[Z80_L], pavone@505: (context->alt_regs[Z80_IXH] << 8) | context->alt_regs[Z80_IXL], pavone@241: (context->alt_regs[Z80_IYH] << 8) | context->alt_regs[Z80_IYL]); pavone@235: exit(0); pavone@235: } pavone@235: pavone@652: void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, uint8_t interp) pavone@213: { pavone@591: uint32_t num_cycles; pavone@591: host_ea src_op, dst_op; pavone@235: uint8_t size; pavone@590: z80_options *opts = context->options; pavone@591: uint8_t * start = opts->gen.code.cur; pavone@591: code_info *code = &opts->gen.code; pavone@627: if (!interp) { pavone@652: check_cycles_int(&opts->gen, address); pavone@819: if (context->breakpoint_flags[address / 8] & (1 << (address % 8))) { pavone@627: zbreakpoint_patch(context, address, start); pavone@627: } pavone@735: #ifdef Z80_LOG_ADDRESS pavone@735: log_address(&opts->gen, address, "Z80: %X @ %d\n"); pavone@735: #endif pavone@626: } pavone@213: switch(inst->op) pavone@213: { pavone@213: case Z80_LD: pavone@235: size = z80_size(inst); pavone@235: switch (inst->addr_mode & 0x1F) pavone@235: { pavone@235: case Z80_REG: pavone@235: case Z80_REG_INDIRECT: pavone@591: num_cycles = size == SZ_B ? 4 : 6; pavone@841: if (inst->ea_reg == Z80_IX || inst->ea_reg == Z80_IY || (inst->ea_reg >= Z80_IXL && inst->ea_reg <= Z80_IYH)) { pavone@591: num_cycles += 4; pavone@235: } pavone@841: if (inst->reg == Z80_I || inst->ea_reg == Z80_I || inst->reg == Z80_R || inst->ea_reg == Z80_R) { pavone@591: num_cycles += 5; pavone@506: } pavone@235: break; pavone@235: case Z80_IMMED: pavone@591: num_cycles = size == SZ_B ? 7 : 10; pavone@235: break; pavone@235: case Z80_IMMED_INDIRECT: pavone@591: num_cycles = 10; pavone@235: break; pavone@235: case Z80_IX_DISPLACE: pavone@235: case Z80_IY_DISPLACE: pavone@591: num_cycles = 16; pavone@235: break; pavone@235: } pavone@235: if ((inst->reg >= Z80_IXL && inst->reg <= Z80_IYH) || inst->reg == Z80_IX || inst->reg == Z80_IY) { pavone@591: num_cycles += 4; pavone@235: } pavone@591: cycles(&opts->gen, num_cycles); pavone@213: if (inst->addr_mode & Z80_DIR) { pavone@591: translate_z80_ea(inst, &dst_op, opts, DONT_READ, MODIFY); pavone@591: translate_z80_reg(inst, &src_op, opts); pavone@235: } else { pavone@591: translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY); pavone@591: translate_z80_reg(inst, &dst_op, opts); pavone@213: } pavone@235: if (src_op.mode == MODE_REG_DIRECT) { pavone@262: if(dst_op.mode == MODE_REG_DISPLACE8) { pavone@591: mov_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, size); pavone@262: } else { pavone@591: mov_rr(code, src_op.base, dst_op.base, size); pavone@262: } pavone@262: } else if(src_op.mode == MODE_IMMED) { pavone@730: if(dst_op.mode == MODE_REG_DISPLACE8) { pavone@730: mov_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, size); pavone@730: } else { pavone@730: mov_ir(code, src_op.disp, dst_op.base, size); pavone@730: } pavone@213: } else { pavone@730: if(dst_op.mode == MODE_REG_DISPLACE8) { pavone@730: mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, size); pavone@730: mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, size); pavone@730: } else { pavone@730: mov_rdispr(code, src_op.base, src_op.disp, dst_op.base, size); pavone@730: } pavone@213: } pavone@842: if ((inst->ea_reg == Z80_I || inst->ea_reg == Z80_R) && inst->addr_mode == Z80_REG) { pavone@842: //ld a, i and ld a, r sets some flags pavone@652: cmp_ir(code, 0, dst_op.base, SZ_B); pavone@652: setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); pavone@652: setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); pavone@821: mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_H), SZ_B);; pavone@652: mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);; pavone@659: mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, iff2), opts->gen.scratch1, SZ_B); pavone@652: mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zf_off(ZF_PV), SZ_B); pavone@651: } pavone@591: z80_save_reg(inst, opts); pavone@591: z80_save_ea(code, inst, opts); pavone@235: if (inst->addr_mode & Z80_DIR) { pavone@593: z80_save_result(opts, inst); pavone@213: } pavone@213: break; pavone@213: case Z80_PUSH: pavone@591: cycles(&opts->gen, (inst->reg == Z80_IX || inst->reg == Z80_IY) ? 9 : 5); pavone@591: sub_ir(code, 2, opts->regs[Z80_SP], SZ_W); pavone@235: if (inst->reg == Z80_AF) { pavone@729: zreg_to_native(opts, Z80_A, opts->gen.scratch1); pavone@591: shl_ir(code, 8, opts->gen.scratch1, SZ_W); pavone@591: mov_rdispr(code, opts->gen.context_reg, zf_off(ZF_S), opts->gen.scratch1, SZ_B); pavone@591: shl_ir(code, 1, opts->gen.scratch1, SZ_B); pavone@591: or_rdispr(code, opts->gen.context_reg, zf_off(ZF_Z), opts->gen.scratch1, SZ_B); pavone@591: shl_ir(code, 2, opts->gen.scratch1, SZ_B); pavone@591: or_rdispr(code, opts->gen.context_reg, zf_off(ZF_H), opts->gen.scratch1, SZ_B); pavone@591: shl_ir(code, 2, opts->gen.scratch1, SZ_B); pavone@591: or_rdispr(code, opts->gen.context_reg, zf_off(ZF_PV), opts->gen.scratch1, SZ_B); pavone@591: shl_ir(code, 1, opts->gen.scratch1, SZ_B); pavone@591: or_rdispr(code, opts->gen.context_reg, zf_off(ZF_N), opts->gen.scratch1, SZ_B); pavone@591: shl_ir(code, 1, opts->gen.scratch1, SZ_B); pavone@591: or_rdispr(code, opts->gen.context_reg, zf_off(ZF_C), opts->gen.scratch1, SZ_B); pavone@235: } else { pavone@731: zreg_to_native(opts, inst->reg, opts->gen.scratch1); pavone@235: } pavone@591: mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); pavone@593: call(code, opts->write_16_highfirst); pavone@235: //no call to save_z80_reg needed since there's no chance we'll use the only pavone@235: //the upper half of a register pair pavone@213: break; pavone@213: case Z80_POP: pavone@591: cycles(&opts->gen, (inst->reg == Z80_IX || inst->reg == Z80_IY) ? 8 : 4); pavone@591: mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); pavone@593: call(code, opts->read_16); pavone@591: add_ir(code, 2, opts->regs[Z80_SP], SZ_W); pavone@235: if (inst->reg == Z80_AF) { pavone@505: pavone@591: bt_ir(code, 0, opts->gen.scratch1, SZ_W); pavone@591: setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); pavone@591: bt_ir(code, 1, opts->gen.scratch1, SZ_W); pavone@591: setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_N)); pavone@591: bt_ir(code, 2, opts->gen.scratch1, SZ_W); pavone@591: setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_PV)); pavone@591: bt_ir(code, 4, opts->gen.scratch1, SZ_W); pavone@591: setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_H)); pavone@591: bt_ir(code, 6, opts->gen.scratch1, SZ_W); pavone@591: setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_Z)); pavone@591: bt_ir(code, 7, opts->gen.scratch1, SZ_W); pavone@591: setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_S)); pavone@591: shr_ir(code, 8, opts->gen.scratch1, SZ_W); pavone@729: native_to_zreg(opts, opts->gen.scratch1, Z80_A); pavone@235: } else { pavone@731: native_to_zreg(opts, opts->gen.scratch1, inst->reg); pavone@235: } pavone@235: //no call to save_z80_reg needed since there's no chance we'll use the only pavone@235: //the upper half of a register pair pavone@213: break; pavone@241: case Z80_EX: pavone@241: if (inst->addr_mode == Z80_REG || inst->reg == Z80_HL) { pavone@591: num_cycles = 4; pavone@241: } else { pavone@591: num_cycles = 8; pavone@241: } pavone@591: cycles(&opts->gen, num_cycles); pavone@241: if (inst->addr_mode == Z80_REG) { pavone@241: if(inst->reg == Z80_AF) { pavone@729: zreg_to_native(opts, Z80_A, opts->gen.scratch1); pavone@729: mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_A), opts->gen.scratch2, SZ_B); pavone@591: mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_A), SZ_B); pavone@729: native_to_zreg(opts, opts->gen.scratch2, Z80_A); pavone@505: pavone@241: //Flags are currently word aligned, so we can move pavone@241: //them efficiently a word at a time pavone@241: for (int f = ZF_C; f < ZF_NUM; f+=2) { pavone@591: mov_rdispr(code, opts->gen.context_reg, zf_off(f), opts->gen.scratch1, SZ_W); pavone@591: mov_rdispr(code, opts->gen.context_reg, zaf_off(f), opts->gen.scratch2, SZ_W); pavone@591: mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zaf_off(f), SZ_W); pavone@591: mov_rrdisp(code, opts->gen.scratch2, opts->gen.context_reg, zf_off(f), SZ_W); pavone@241: } pavone@241: } else { pavone@729: if (opts->regs[Z80_DE] >= 0 && opts->regs[Z80_HL] >= 0) { pavone@729: xchg_rr(code, opts->regs[Z80_DE], opts->regs[Z80_HL], SZ_W); pavone@729: } else { pavone@729: zreg_to_native(opts, Z80_DE, opts->gen.scratch1); pavone@729: zreg_to_native(opts, Z80_HL, opts->gen.scratch2); pavone@729: native_to_zreg(opts, opts->gen.scratch1, Z80_HL); pavone@729: native_to_zreg(opts, opts->gen.scratch2, Z80_DE); pavone@729: } pavone@241: } pavone@241: } else { pavone@591: mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); pavone@593: call(code, opts->read_8); pavone@729: if (opts->regs[inst->reg] >= 0) { pavone@729: xchg_rr(code, opts->regs[inst->reg], opts->gen.scratch1, SZ_B); pavone@729: } else { pavone@729: zreg_to_native(opts, inst->reg, opts->gen.scratch2); pavone@729: xchg_rr(code, opts->gen.scratch1, opts->gen.scratch2, SZ_B); pavone@729: native_to_zreg(opts, opts->gen.scratch2, inst->reg); pavone@729: } pavone@591: mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); pavone@593: call(code, opts->write_8); pavone@591: cycles(&opts->gen, 1); pavone@241: uint8_t high_reg = z80_high_reg(inst->reg); pavone@591: mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); pavone@591: add_ir(code, 1, opts->gen.scratch1, SZ_W); pavone@593: call(code, opts->read_8); pavone@729: if (opts->regs[inst->reg] >= 0) { pavone@729: //even though some of the upper halves can be used directly pavone@729: //the limitations on mixing *H regs with the REX prefix pavone@729: //prevent us from taking advantage of it pavone@729: uint8_t use_reg = opts->regs[inst->reg]; pavone@729: ror_ir(code, 8, use_reg, SZ_W); pavone@729: xchg_rr(code, use_reg, opts->gen.scratch1, SZ_B); pavone@729: //restore reg to normal rotation pavone@729: ror_ir(code, 8, use_reg, SZ_W); pavone@729: } else { pavone@729: zreg_to_native(opts, high_reg, opts->gen.scratch2); pavone@729: xchg_rr(code, opts->gen.scratch1, opts->gen.scratch2, SZ_B); pavone@729: native_to_zreg(opts, opts->gen.scratch2, high_reg); pavone@729: } pavone@591: mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); pavone@591: add_ir(code, 1, opts->gen.scratch2, SZ_W); pavone@593: call(code, opts->write_8); pavone@591: cycles(&opts->gen, 2); pavone@241: } pavone@241: break; pavone@213: case Z80_EXX: pavone@591: cycles(&opts->gen, 4); pavone@729: zreg_to_native(opts, Z80_BC, opts->gen.scratch1); pavone@729: mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_BC), opts->gen.scratch2, SZ_W); pavone@729: mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_BC), SZ_W); pavone@729: native_to_zreg(opts, opts->gen.scratch2, Z80_BC); pavone@840: pavone@729: zreg_to_native(opts, Z80_HL, opts->gen.scratch1); pavone@729: mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_HL), opts->gen.scratch2, SZ_W); pavone@729: mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_HL), SZ_W); pavone@729: native_to_zreg(opts, opts->gen.scratch2, Z80_HL); pavone@840: pavone@729: zreg_to_native(opts, Z80_DE, opts->gen.scratch1); pavone@729: mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_DE), opts->gen.scratch2, SZ_W); pavone@729: mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_DE), SZ_W); pavone@729: native_to_zreg(opts, opts->gen.scratch2, Z80_DE); pavone@241: break; pavone@272: case Z80_LDI: { pavone@591: cycles(&opts->gen, 8); pavone@729: zreg_to_native(opts, Z80_HL, opts->gen.scratch1); pavone@593: call(code, opts->read_8); pavone@729: zreg_to_native(opts, Z80_DE, opts->gen.scratch2); pavone@593: call(code, opts->write_8); pavone@591: cycles(&opts->gen, 2); pavone@729: if (opts->regs[Z80_DE] >= 0) { pavone@729: add_ir(code, 1, opts->regs[Z80_DE], SZ_W); pavone@729: } else { pavone@729: add_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_DE), SZ_W); pavone@729: } pavone@729: if (opts->regs[Z80_HL] >= 0) { pavone@729: add_ir(code, 1, opts->regs[Z80_HL], SZ_W); pavone@729: } else { pavone@729: add_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_HL), SZ_W); pavone@729: } pavone@729: if (opts->regs[Z80_BC] >= 0) { pavone@729: sub_ir(code, 1, opts->regs[Z80_BC], SZ_W); pavone@729: } else { pavone@729: sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_BC), SZ_W); pavone@729: } pavone@821: mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_H), SZ_B); pavone@591: mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); pavone@591: setcc_rdisp(code, CC_NZ, opts->gen.context_reg, zf_off(ZF_PV)); pavone@272: break; pavone@272: } pavone@261: case Z80_LDIR: { pavone@591: cycles(&opts->gen, 8); pavone@729: zreg_to_native(opts, Z80_HL, opts->gen.scratch1); pavone@593: call(code, opts->read_8); pavone@729: zreg_to_native(opts, Z80_DE, opts->gen.scratch2); pavone@593: call(code, opts->write_8); pavone@729: if (opts->regs[Z80_DE] >= 0) { pavone@729: add_ir(code, 1, opts->regs[Z80_DE], SZ_W); pavone@729: } else { pavone@729: add_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_DE), SZ_W); pavone@729: } pavone@729: if (opts->regs[Z80_HL] >= 0) { pavone@729: add_ir(code, 1, opts->regs[Z80_HL], SZ_W); pavone@729: } else { pavone@729: add_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_HL), SZ_W); pavone@729: } pavone@729: if (opts->regs[Z80_BC] >= 0) { pavone@729: sub_ir(code, 1, opts->regs[Z80_BC], SZ_W); pavone@729: } else { pavone@729: sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_BC), SZ_W); pavone@729: } pavone@591: uint8_t * cont = code->cur+1; pavone@591: jcc(code, CC_Z, code->cur+2); pavone@591: cycles(&opts->gen, 7); pavone@261: //TODO: Figure out what the flag state should be here pavone@261: //TODO: Figure out whether an interrupt can interrupt this pavone@591: jmp(code, start); pavone@591: *cont = code->cur - (cont + 1); pavone@591: cycles(&opts->gen, 2); pavone@821: mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_H), SZ_B); pavone@591: mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); pavone@591: mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B); pavone@261: break; pavone@261: } pavone@273: case Z80_LDD: { pavone@591: cycles(&opts->gen, 8); pavone@729: zreg_to_native(opts, Z80_HL, opts->gen.scratch1); pavone@593: call(code, opts->read_8); pavone@729: zreg_to_native(opts, Z80_DE, opts->gen.scratch2); pavone@593: call(code, opts->write_8); pavone@591: cycles(&opts->gen, 2); pavone@729: if (opts->regs[Z80_DE] >= 0) { pavone@729: sub_ir(code, 1, opts->regs[Z80_DE], SZ_W); pavone@729: } else { pavone@729: sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_DE), SZ_W); pavone@729: } pavone@729: if (opts->regs[Z80_HL] >= 0) { pavone@828: sub_ir(code, 1, opts->regs[Z80_HL], SZ_W); pavone@729: } else { pavone@729: sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_HL), SZ_W); pavone@729: } pavone@729: if (opts->regs[Z80_BC] >= 0) { pavone@729: sub_ir(code, 1, opts->regs[Z80_BC], SZ_W); pavone@729: } else { pavone@729: sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_BC), SZ_W); pavone@729: } pavone@821: mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_H), SZ_B); pavone@591: mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); pavone@591: setcc_rdisp(code, CC_NZ, opts->gen.context_reg, zf_off(ZF_PV)); pavone@273: break; pavone@273: } pavone@273: case Z80_LDDR: { pavone@591: cycles(&opts->gen, 8); pavone@729: zreg_to_native(opts, Z80_HL, opts->gen.scratch1); pavone@593: call(code, opts->read_8); pavone@729: zreg_to_native(opts, Z80_DE, opts->gen.scratch2); pavone@593: call(code, opts->write_8); pavone@729: if (opts->regs[Z80_DE] >= 0) { pavone@729: sub_ir(code, 1, opts->regs[Z80_DE], SZ_W); pavone@729: } else { pavone@729: sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_DE), SZ_W); pavone@729: } pavone@729: if (opts->regs[Z80_HL] >= 0) { pavone@828: sub_ir(code, 1, opts->regs[Z80_HL], SZ_W); pavone@729: } else { pavone@729: sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_HL), SZ_W); pavone@729: } pavone@729: if (opts->regs[Z80_BC] >= 0) { pavone@729: sub_ir(code, 1, opts->regs[Z80_BC], SZ_W); pavone@729: } else { pavone@729: sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_BC), SZ_W); pavone@729: } pavone@591: uint8_t * cont = code->cur+1; pavone@591: jcc(code, CC_Z, code->cur+2); pavone@591: cycles(&opts->gen, 7); pavone@273: //TODO: Figure out what the flag state should be here pavone@591: jmp(code, start); pavone@591: *cont = code->cur - (cont + 1); pavone@591: cycles(&opts->gen, 2); pavone@821: mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_H), SZ_B); pavone@591: mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); pavone@591: mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B); pavone@273: break; pavone@273: } pavone@273: /*case Z80_CPI: pavone@213: case Z80_CPIR: pavone@213: case Z80_CPD: pavone@213: case Z80_CPDR: pavone@213: break;*/ pavone@213: case Z80_ADD: pavone@591: num_cycles = 4; pavone@235: if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { pavone@591: num_cycles += 12; pavone@213: } else if(inst->addr_mode == Z80_IMMED) { pavone@591: num_cycles += 3; pavone@213: } else if(z80_size(inst) == SZ_W) { pavone@591: num_cycles += 4; pavone@213: } pavone@591: cycles(&opts->gen, num_cycles); pavone@591: translate_z80_reg(inst, &dst_op, opts); pavone@591: translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY); pavone@730: if (dst_op.mode == MODE_REG_DIRECT) { pavone@730: if (src_op.mode == MODE_REG_DIRECT) { pavone@730: add_rr(code, src_op.base, dst_op.base, z80_size(inst)); pavone@730: } else if (src_op.mode == MODE_IMMED) { pavone@730: add_ir(code, src_op.disp, dst_op.base, z80_size(inst)); pavone@730: } else { pavone@730: add_rdispr(code, src_op.base, src_op.disp, dst_op.base, z80_size(inst)); pavone@730: } pavone@213: } else { pavone@730: if (src_op.mode == MODE_REG_DIRECT) { pavone@730: add_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, z80_size(inst)); pavone@730: } else if (src_op.mode == MODE_IMMED) { pavone@730: add_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, z80_size(inst)); pavone@730: } else { pavone@730: mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, z80_size(inst)); pavone@730: add_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, z80_size(inst)); pavone@730: } pavone@213: } pavone@591: setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); pavone@591: mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); pavone@213: //TODO: Implement half-carry flag pavone@213: if (z80_size(inst) == SZ_B) { pavone@591: setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); pavone@591: setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); pavone@591: setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); pavone@213: } pavone@591: z80_save_reg(inst, opts); pavone@591: z80_save_ea(code, inst, opts); pavone@213: break; pavone@248: case Z80_ADC: pavone@591: num_cycles = 4; pavone@248: if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { pavone@591: num_cycles += 12; pavone@248: } else if(inst->addr_mode == Z80_IMMED) { pavone@591: num_cycles += 3; pavone@248: } else if(z80_size(inst) == SZ_W) { pavone@591: num_cycles += 4; pavone@248: } pavone@591: cycles(&opts->gen, num_cycles); pavone@591: translate_z80_reg(inst, &dst_op, opts); pavone@591: translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY); pavone@591: bt_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); pavone@730: if (dst_op.mode == MODE_REG_DIRECT) { pavone@730: if (src_op.mode == MODE_REG_DIRECT) { pavone@730: adc_rr(code, src_op.base, dst_op.base, z80_size(inst)); pavone@730: } else if (src_op.mode == MODE_IMMED) { pavone@730: adc_ir(code, src_op.disp, dst_op.base, z80_size(inst)); pavone@730: } else { pavone@730: adc_rdispr(code, src_op.base, src_op.disp, dst_op.base, z80_size(inst)); pavone@730: } pavone@248: } else { pavone@730: if (src_op.mode == MODE_REG_DIRECT) { pavone@730: adc_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, z80_size(inst)); pavone@730: } else if (src_op.mode == MODE_IMMED) { pavone@730: adc_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, z80_size(inst)); pavone@730: } else { pavone@730: mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, z80_size(inst)); pavone@730: adc_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, z80_size(inst)); pavone@730: } pavone@248: } pavone@591: setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); pavone@591: mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); pavone@248: //TODO: Implement half-carry flag pavone@591: setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); pavone@591: setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); pavone@591: setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); pavone@591: z80_save_reg(inst, opts); pavone@591: z80_save_ea(code, inst, opts); pavone@248: break; pavone@213: case Z80_SUB: pavone@591: num_cycles = 4; pavone@235: if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { pavone@591: num_cycles += 12; pavone@213: } else if(inst->addr_mode == Z80_IMMED) { pavone@591: num_cycles += 3; pavone@213: } pavone@591: cycles(&opts->gen, num_cycles); pavone@591: translate_z80_reg(inst, &dst_op, opts); pavone@591: translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY); pavone@730: if (dst_op.mode == MODE_REG_DIRECT) { pavone@730: if (src_op.mode == MODE_REG_DIRECT) { pavone@730: sub_rr(code, src_op.base, dst_op.base, z80_size(inst)); pavone@730: } else if (src_op.mode == MODE_IMMED) { pavone@730: sub_ir(code, src_op.disp, dst_op.base, z80_size(inst)); pavone@730: } else { pavone@730: sub_rdispr(code, src_op.base, src_op.disp, dst_op.base, z80_size(inst)); pavone@730: } pavone@213: } else { pavone@730: if (src_op.mode == MODE_REG_DIRECT) { pavone@730: sub_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, z80_size(inst)); pavone@730: } else if (src_op.mode == MODE_IMMED) { pavone@730: sub_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, z80_size(inst)); pavone@730: } else { pavone@730: mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, z80_size(inst)); pavone@730: sub_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, z80_size(inst)); pavone@730: } pavone@213: } pavone@591: setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); pavone@591: mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); pavone@591: setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); pavone@213: //TODO: Implement half-carry flag pavone@591: setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); pavone@591: setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); pavone@591: z80_save_reg(inst, opts); pavone@591: z80_save_ea(code, inst, opts); pavone@213: break; pavone@248: case Z80_SBC: pavone@591: num_cycles = 4; pavone@248: if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { pavone@591: num_cycles += 12; pavone@248: } else if(inst->addr_mode == Z80_IMMED) { pavone@591: num_cycles += 3; pavone@248: } else if(z80_size(inst) == SZ_W) { pavone@591: num_cycles += 4; pavone@248: } pavone@591: cycles(&opts->gen, num_cycles); pavone@591: translate_z80_reg(inst, &dst_op, opts); pavone@591: translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY); pavone@591: bt_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); pavone@730: if (dst_op.mode == MODE_REG_DIRECT) { pavone@730: if (src_op.mode == MODE_REG_DIRECT) { pavone@730: sbb_rr(code, src_op.base, dst_op.base, z80_size(inst)); pavone@730: } else if (src_op.mode == MODE_IMMED) { pavone@730: sbb_ir(code, src_op.disp, dst_op.base, z80_size(inst)); pavone@730: } else { pavone@730: sbb_rdispr(code, src_op.base, src_op.disp, dst_op.base, z80_size(inst)); pavone@730: } pavone@248: } else { pavone@730: if (src_op.mode == MODE_REG_DIRECT) { pavone@730: sbb_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, z80_size(inst)); pavone@730: } else if (src_op.mode == MODE_IMMED) { pavone@730: sbb_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, z80_size(inst)); pavone@730: } else { pavone@730: mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, z80_size(inst)); pavone@730: sbb_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, z80_size(inst)); pavone@730: } pavone@248: } pavone@591: setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); pavone@591: mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); pavone@248: //TODO: Implement half-carry flag pavone@591: setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); pavone@591: setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); pavone@591: setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); pavone@591: z80_save_reg(inst, opts); pavone@591: z80_save_ea(code, inst, opts); pavone@248: break; pavone@213: case Z80_AND: pavone@591: num_cycles = 4; pavone@236: if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { pavone@591: num_cycles += 12; pavone@236: } else if(inst->addr_mode == Z80_IMMED) { pavone@591: num_cycles += 3; pavone@236: } else if(z80_size(inst) == SZ_W) { pavone@591: num_cycles += 4; pavone@236: } pavone@591: cycles(&opts->gen, num_cycles); pavone@591: translate_z80_reg(inst, &dst_op, opts); pavone@591: translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY); pavone@236: if (src_op.mode == MODE_REG_DIRECT) { pavone@591: and_rr(code, src_op.base, dst_op.base, z80_size(inst)); pavone@730: } else if (src_op.mode == MODE_IMMED) { pavone@730: and_ir(code, src_op.disp, dst_op.base, z80_size(inst)); pavone@236: } else { pavone@730: and_rdispr(code, src_op.base, src_op.disp, dst_op.base, z80_size(inst)); pavone@236: } pavone@236: //TODO: Cleanup flags pavone@591: setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); pavone@591: mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); pavone@236: //TODO: Implement half-carry flag pavone@236: if (z80_size(inst) == SZ_B) { pavone@591: setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); pavone@591: setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); pavone@591: setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); pavone@236: } pavone@591: z80_save_reg(inst, opts); pavone@591: z80_save_ea(code, inst, opts); pavone@236: break; pavone@213: case Z80_OR: pavone@591: num_cycles = 4; pavone@236: if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { pavone@591: num_cycles += 12; pavone@236: } else if(inst->addr_mode == Z80_IMMED) { pavone@591: num_cycles += 3; pavone@236: } else if(z80_size(inst) == SZ_W) { pavone@591: num_cycles += 4; pavone@236: } pavone@591: cycles(&opts->gen, num_cycles); pavone@591: translate_z80_reg(inst, &dst_op, opts); pavone@591: translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY); pavone@236: if (src_op.mode == MODE_REG_DIRECT) { pavone@591: or_rr(code, src_op.base, dst_op.base, z80_size(inst)); pavone@730: } else if (src_op.mode == MODE_IMMED) { pavone@730: or_ir(code, src_op.disp, dst_op.base, z80_size(inst)); pavone@236: } else { pavone@730: or_rdispr(code, src_op.base, src_op.disp, dst_op.base, z80_size(inst)); pavone@236: } pavone@236: //TODO: Cleanup flags pavone@591: setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); pavone@591: mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); pavone@236: //TODO: Implement half-carry flag pavone@236: if (z80_size(inst) == SZ_B) { pavone@591: setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); pavone@591: setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); pavone@591: setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); pavone@236: } pavone@591: z80_save_reg(inst, opts); pavone@591: z80_save_ea(code, inst, opts); pavone@236: break; pavone@213: case Z80_XOR: pavone@591: num_cycles = 4; pavone@236: if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { pavone@591: num_cycles += 12; pavone@236: } else if(inst->addr_mode == Z80_IMMED) { pavone@591: num_cycles += 3; pavone@236: } else if(z80_size(inst) == SZ_W) { pavone@591: num_cycles += 4; pavone@236: } pavone@591: cycles(&opts->gen, num_cycles); pavone@591: translate_z80_reg(inst, &dst_op, opts); pavone@591: translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY); pavone@236: if (src_op.mode == MODE_REG_DIRECT) { pavone@591: xor_rr(code, src_op.base, dst_op.base, z80_size(inst)); pavone@730: } else if (src_op.mode == MODE_IMMED) { pavone@730: xor_ir(code, src_op.disp, dst_op.base, z80_size(inst)); pavone@236: } else { pavone@730: xor_rdispr(code, src_op.base, src_op.disp, dst_op.base, z80_size(inst)); pavone@236: } pavone@236: //TODO: Cleanup flags pavone@591: setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); pavone@591: mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); pavone@236: //TODO: Implement half-carry flag pavone@236: if (z80_size(inst) == SZ_B) { pavone@591: setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); pavone@591: setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); pavone@591: setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); pavone@236: } pavone@591: z80_save_reg(inst, opts); pavone@591: z80_save_ea(code, inst, opts); pavone@236: break; pavone@242: case Z80_CP: pavone@591: num_cycles = 4; pavone@242: if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { pavone@591: num_cycles += 12; pavone@242: } else if(inst->addr_mode == Z80_IMMED) { pavone@591: num_cycles += 3; pavone@242: } pavone@591: cycles(&opts->gen, num_cycles); pavone@591: translate_z80_reg(inst, &dst_op, opts); pavone@591: translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY); pavone@242: if (src_op.mode == MODE_REG_DIRECT) { pavone@591: cmp_rr(code, src_op.base, dst_op.base, z80_size(inst)); pavone@730: } else if (src_op.mode == MODE_IMMED) { pavone@730: cmp_ir(code, src_op.disp, dst_op.base, z80_size(inst)); pavone@242: } else { pavone@730: cmp_rdispr(code, src_op.base, src_op.disp, dst_op.base, z80_size(inst)); pavone@242: } pavone@591: setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); pavone@591: mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); pavone@591: setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); pavone@242: //TODO: Implement half-carry flag pavone@591: setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); pavone@591: setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); pavone@591: z80_save_reg(inst, opts); pavone@591: z80_save_ea(code, inst, opts); pavone@242: break; pavone@213: case Z80_INC: pavone@591: num_cycles = 4; pavone@213: if (inst->reg == Z80_IX || inst->reg == Z80_IY) { pavone@591: num_cycles += 6; pavone@213: } else if(z80_size(inst) == SZ_W) { pavone@591: num_cycles += 2; pavone@213: } else if(inst->reg == Z80_IXH || inst->reg == Z80_IXL || inst->reg == Z80_IYH || inst->reg == Z80_IYL || inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { pavone@591: num_cycles += 4; pavone@213: } pavone@591: cycles(&opts->gen, num_cycles); pavone@591: translate_z80_reg(inst, &dst_op, opts); pavone@213: if (dst_op.mode == MODE_UNUSED) { pavone@591: translate_z80_ea(inst, &dst_op, opts, READ, MODIFY); pavone@213: } pavone@730: if (dst_op.mode == MODE_REG_DIRECT) { pavone@730: add_ir(code, 1, dst_op.base, z80_size(inst)); pavone@730: } else { pavone@730: add_irdisp(code, 1, dst_op.base, dst_op.disp, z80_size(inst)); pavone@730: } pavone@213: if (z80_size(inst) == SZ_B) { pavone@591: mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); pavone@213: //TODO: Implement half-carry flag pavone@591: setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); pavone@591: setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); pavone@591: setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); pavone@213: } pavone@591: z80_save_reg(inst, opts); pavone@591: z80_save_ea(code, inst, opts); pavone@593: z80_save_result(opts, inst); pavone@213: break; pavone@236: case Z80_DEC: pavone@591: num_cycles = 4; pavone@236: if (inst->reg == Z80_IX || inst->reg == Z80_IY) { pavone@591: num_cycles += 6; pavone@236: } else if(z80_size(inst) == SZ_W) { pavone@591: num_cycles += 2; pavone@236: } else if(inst->reg == Z80_IXH || inst->reg == Z80_IXL || inst->reg == Z80_IYH || inst->reg == Z80_IYL || inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { pavone@591: num_cycles += 4; pavone@236: } pavone@591: cycles(&opts->gen, num_cycles); pavone@591: translate_z80_reg(inst, &dst_op, opts); pavone@236: if (dst_op.mode == MODE_UNUSED) { pavone@591: translate_z80_ea(inst, &dst_op, opts, READ, MODIFY); pavone@236: } pavone@730: if (dst_op.mode == MODE_REG_DIRECT) { pavone@730: sub_ir(code, 1, dst_op.base, z80_size(inst)); pavone@730: } else { pavone@730: sub_irdisp(code, 1, dst_op.base, dst_op.disp, z80_size(inst)); pavone@730: } pavone@840: pavone@236: if (z80_size(inst) == SZ_B) { pavone@591: mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); pavone@236: //TODO: Implement half-carry flag pavone@591: setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); pavone@591: setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); pavone@591: setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); pavone@236: } pavone@591: z80_save_reg(inst, opts); pavone@591: z80_save_ea(code, inst, opts); pavone@593: z80_save_result(opts, inst); pavone@213: break; pavone@274: //case Z80_DAA: pavone@213: case Z80_CPL: pavone@591: cycles(&opts->gen, 4); pavone@591: not_r(code, opts->regs[Z80_A], SZ_B); pavone@274: //TODO: Implement half-carry flag pavone@591: mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); pavone@274: break; pavone@274: case Z80_NEG: pavone@591: cycles(&opts->gen, 8); pavone@591: neg_r(code, opts->regs[Z80_A], SZ_B); pavone@274: //TODO: Implement half-carry flag pavone@591: setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); pavone@591: setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); pavone@591: setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); pavone@591: setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); pavone@591: mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); pavone@274: break; pavone@213: case Z80_CCF: pavone@591: cycles(&opts->gen, 4); pavone@591: xor_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_C), SZ_B); pavone@591: mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); pavone@257: //TODO: Implement half-carry flag pavone@257: break; pavone@257: case Z80_SCF: pavone@591: cycles(&opts->gen, 4); pavone@591: mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_C), SZ_B); pavone@591: mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); pavone@257: //TODO: Implement half-carry flag pavone@257: break; pavone@213: case Z80_NOP: pavone@213: if (inst->immed == 42) { pavone@593: call(code, opts->gen.save_context); pavone@657: call_args(code, (code_ptr)z80_print_regs_exit, 1, opts->gen.context_reg); pavone@213: } else { pavone@591: cycles(&opts->gen, 4 * inst->immed); pavone@213: } pavone@213: break; pavone@593: case Z80_HALT: { pavone@667: code_ptr loop_top = code->cur; pavone@667: //this isn't terribly efficient, but it's good enough for now pavone@591: cycles(&opts->gen, 4); pavone@667: check_cycles_int(&opts->gen, address); pavone@667: jmp(code, loop_top); pavone@285: break; pavone@593: } pavone@213: case Z80_DI: pavone@591: cycles(&opts->gen, 4); pavone@591: mov_irdisp(code, 0, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B); pavone@591: mov_irdisp(code, 0, opts->gen.context_reg, offsetof(z80_context, iff2), SZ_B); pavone@591: mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, sync_cycle), opts->gen.limit, SZ_D); pavone@591: mov_irdisp(code, 0xFFFFFFFF, opts->gen.context_reg, offsetof(z80_context, int_cycle), SZ_D); pavone@243: break; pavone@213: case Z80_EI: pavone@591: cycles(&opts->gen, 4); pavone@591: mov_rrdisp(code, opts->gen.cycles, opts->gen.context_reg, offsetof(z80_context, int_enable_cycle), SZ_D); pavone@591: mov_irdisp(code, 1, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B); pavone@591: mov_irdisp(code, 1, opts->gen.context_reg, offsetof(z80_context, iff2), SZ_B); pavone@335: //interrupt enable has a one-instruction latency, minimum instruction duration is 4 cycles pavone@667: add_irdisp(code, 4*opts->gen.clock_divider, opts->gen.context_reg, offsetof(z80_context, int_enable_cycle), SZ_D); pavone@593: call(code, opts->do_sync); pavone@243: break; pavone@213: case Z80_IM: pavone@840: cycles(&opts->gen, 8); pavone@591: mov_irdisp(code, inst->immed, opts->gen.context_reg, offsetof(z80_context, im), SZ_B); pavone@243: break; pavone@247: case Z80_RLC: pavone@591: num_cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8); pavone@591: cycles(&opts->gen, num_cycles); pavone@299: if (inst->addr_mode != Z80_UNUSED) { pavone@591: translate_z80_ea(inst, &dst_op, opts, READ, MODIFY); pavone@591: translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register pavone@591: cycles(&opts->gen, 1); pavone@247: } else { pavone@302: src_op.mode = MODE_UNUSED; pavone@591: translate_z80_reg(inst, &dst_op, opts); pavone@247: } pavone@730: if (dst_op.mode == MODE_REG_DIRECT) { pavone@730: rol_ir(code, 1, dst_op.base, SZ_B); pavone@730: } else { pavone@730: rol_irdisp(code, 1, dst_op.base, dst_op.disp, SZ_B); pavone@730: } pavone@730: if (src_op.mode == MODE_REG_DIRECT) { pavone@591: mov_rr(code, dst_op.base, src_op.base, SZ_B); pavone@730: } else if(src_op.mode == MODE_REG_DISPLACE8) { pavone@730: mov_rrdisp(code, dst_op.base, src_op.base, src_op.disp, SZ_B); pavone@299: } pavone@591: setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); pavone@591: mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); pavone@247: //TODO: Implement half-carry flag pavone@651: if (inst->immed) { pavone@651: //rlca does not set these flags pavone@731: if (dst_op.mode == MODE_REG_DIRECT) { pavone@731: cmp_ir(code, 0, dst_op.base, SZ_B); pavone@731: } else { pavone@731: cmp_irdisp(code, 0, dst_op.base, dst_op.disp, SZ_B); pavone@731: } pavone@730: setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); pavone@730: setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); pavone@730: setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); pavone@651: } pavone@299: if (inst->addr_mode != Z80_UNUSED) { pavone@593: z80_save_result(opts, inst); pavone@299: if (src_op.mode != MODE_UNUSED) { pavone@591: z80_save_reg(inst, opts); pavone@299: } pavone@247: } else { pavone@591: z80_save_reg(inst, opts); pavone@247: } pavone@247: break; pavone@213: case Z80_RL: pavone@591: num_cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8); pavone@591: cycles(&opts->gen, num_cycles); pavone@299: if (inst->addr_mode != Z80_UNUSED) { pavone@591: translate_z80_ea(inst, &dst_op, opts, READ, MODIFY); pavone@591: translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register pavone@591: cycles(&opts->gen, 1); pavone@247: } else { pavone@302: src_op.mode = MODE_UNUSED; pavone@591: translate_z80_reg(inst, &dst_op, opts); pavone@247: } pavone@591: bt_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); pavone@730: if (dst_op.mode == MODE_REG_DIRECT) { pavone@730: rcl_ir(code, 1, dst_op.base, SZ_B); pavone@730: } else { pavone@730: rcl_irdisp(code, 1, dst_op.base, dst_op.disp, SZ_B); pavone@730: } pavone@730: if (src_op.mode == MODE_REG_DIRECT) { pavone@591: mov_rr(code, dst_op.base, src_op.base, SZ_B); pavone@730: } else if(src_op.mode == MODE_REG_DISPLACE8) { pavone@730: mov_rrdisp(code, dst_op.base, src_op.base, src_op.disp, SZ_B); pavone@299: } pavone@591: setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); pavone@591: mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); pavone@247: //TODO: Implement half-carry flag pavone@651: if (inst->immed) { pavone@651: //rla does not set these flags pavone@731: if (dst_op.mode == MODE_REG_DIRECT) { pavone@731: cmp_ir(code, 0, dst_op.base, SZ_B); pavone@731: } else { pavone@731: cmp_irdisp(code, 0, dst_op.base, dst_op.disp, SZ_B); pavone@731: } pavone@730: setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); pavone@730: setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); pavone@730: setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); pavone@651: } pavone@299: if (inst->addr_mode != Z80_UNUSED) { pavone@593: z80_save_result(opts, inst); pavone@299: if (src_op.mode != MODE_UNUSED) { pavone@591: z80_save_reg(inst, opts); pavone@299: } pavone@247: } else { pavone@591: z80_save_reg(inst, opts); pavone@247: } pavone@247: break; pavone@213: case Z80_RRC: pavone@591: num_cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8); pavone@591: cycles(&opts->gen, num_cycles); pavone@299: if (inst->addr_mode != Z80_UNUSED) { pavone@591: translate_z80_ea(inst, &dst_op, opts, READ, MODIFY); pavone@591: translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register pavone@591: cycles(&opts->gen, 1); pavone@247: } else { pavone@302: src_op.mode = MODE_UNUSED; pavone@591: translate_z80_reg(inst, &dst_op, opts); pavone@247: } pavone@730: if (dst_op.mode == MODE_REG_DIRECT) { pavone@730: ror_ir(code, 1, dst_op.base, SZ_B); pavone@730: } else { pavone@730: ror_irdisp(code, 1, dst_op.base, dst_op.disp, SZ_B); pavone@730: } pavone@730: if (src_op.mode == MODE_REG_DIRECT) { pavone@591: mov_rr(code, dst_op.base, src_op.base, SZ_B); pavone@730: } else if(src_op.mode == MODE_REG_DISPLACE8) { pavone@730: mov_rrdisp(code, dst_op.base, src_op.base, src_op.disp, SZ_B); pavone@299: } pavone@591: setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); pavone@591: mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); pavone@247: //TODO: Implement half-carry flag pavone@651: if (inst->immed) { pavone@651: //rrca does not set these flags pavone@731: if (dst_op.mode == MODE_REG_DIRECT) { pavone@731: cmp_ir(code, 0, dst_op.base, SZ_B); pavone@731: } else { pavone@731: cmp_irdisp(code, 0, dst_op.base, dst_op.disp, SZ_B); pavone@731: } pavone@730: setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); pavone@730: setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); pavone@730: setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); pavone@651: } pavone@299: if (inst->addr_mode != Z80_UNUSED) { pavone@593: z80_save_result(opts, inst); pavone@299: if (src_op.mode != MODE_UNUSED) { pavone@591: z80_save_reg(inst, opts); pavone@299: } pavone@247: } else { pavone@591: z80_save_reg(inst, opts); pavone@247: } pavone@247: break; pavone@213: case Z80_RR: pavone@591: num_cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8); pavone@591: cycles(&opts->gen, num_cycles); pavone@299: if (inst->addr_mode != Z80_UNUSED) { pavone@591: translate_z80_ea(inst, &dst_op, opts, READ, MODIFY); pavone@591: translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register pavone@591: cycles(&opts->gen, 1); pavone@247: } else { pavone@302: src_op.mode = MODE_UNUSED; pavone@591: translate_z80_reg(inst, &dst_op, opts); pavone@247: } pavone@591: bt_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); pavone@730: if (dst_op.mode == MODE_REG_DIRECT) { pavone@730: rcr_ir(code, 1, dst_op.base, SZ_B); pavone@730: } else { pavone@730: rcr_irdisp(code, 1, dst_op.base, dst_op.disp, SZ_B); pavone@730: } pavone@730: if (src_op.mode == MODE_REG_DIRECT) { pavone@591: mov_rr(code, dst_op.base, src_op.base, SZ_B); pavone@730: } else if(src_op.mode == MODE_REG_DISPLACE8) { pavone@730: mov_rrdisp(code, dst_op.base, src_op.base, src_op.disp, SZ_B); pavone@299: } pavone@591: setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); pavone@591: mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); pavone@247: //TODO: Implement half-carry flag pavone@651: if (inst->immed) { pavone@651: //rra does not set these flags pavone@731: if (dst_op.mode == MODE_REG_DIRECT) { pavone@731: cmp_ir(code, 0, dst_op.base, SZ_B); pavone@731: } else { pavone@731: cmp_irdisp(code, 0, dst_op.base, dst_op.disp, SZ_B); pavone@731: } pavone@730: setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); pavone@730: setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); pavone@730: setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); pavone@651: } pavone@299: if (inst->addr_mode != Z80_UNUSED) { pavone@593: z80_save_result(opts, inst); pavone@299: if (src_op.mode != MODE_UNUSED) { pavone@591: z80_save_reg(inst, opts); pavone@299: } pavone@247: } else { pavone@591: z80_save_reg(inst, opts); pavone@247: } pavone@247: break; pavone@275: case Z80_SLA: pavone@213: case Z80_SLL: pavone@591: num_cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8; pavone@591: cycles(&opts->gen, num_cycles); pavone@299: if (inst->addr_mode != Z80_UNUSED) { pavone@591: translate_z80_ea(inst, &dst_op, opts, READ, MODIFY); pavone@591: translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register pavone@591: cycles(&opts->gen, 1); pavone@275: } else { pavone@302: src_op.mode = MODE_UNUSED; pavone@591: translate_z80_reg(inst, &dst_op, opts); pavone@275: } pavone@730: if (dst_op.mode == MODE_REG_DIRECT) { pavone@730: shl_ir(code, 1, dst_op.base, SZ_B); pavone@730: } else { pavone@730: shl_irdisp(code, 1, dst_op.base, dst_op.disp, SZ_B); pavone@730: } pavone@591: setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); pavone@310: if (inst->op == Z80_SLL) { pavone@731: if (dst_op.mode == MODE_REG_DIRECT) { pavone@731: or_ir(code, 1, dst_op.base, SZ_B); pavone@731: } else { pavone@731: or_irdisp(code, 1, dst_op.base, dst_op.disp, SZ_B); pavone@731: } pavone@310: } pavone@730: if (src_op.mode == MODE_REG_DIRECT) { pavone@591: mov_rr(code, dst_op.base, src_op.base, SZ_B); pavone@730: } else if(src_op.mode == MODE_REG_DISPLACE8) { pavone@730: mov_rrdisp(code, dst_op.base, src_op.base, src_op.disp, SZ_B); pavone@299: } pavone@591: mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); pavone@275: //TODO: Implement half-carry flag pavone@731: if (dst_op.mode == MODE_REG_DIRECT) { pavone@731: cmp_ir(code, 0, dst_op.base, SZ_B); pavone@731: } else { pavone@731: cmp_irdisp(code, 0, dst_op.base, dst_op.disp, SZ_B); pavone@731: } pavone@591: setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); pavone@591: setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); pavone@591: setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); pavone@299: if (inst->addr_mode != Z80_UNUSED) { pavone@593: z80_save_result(opts, inst); pavone@299: if (src_op.mode != MODE_UNUSED) { pavone@591: z80_save_reg(inst, opts); pavone@299: } pavone@275: } else { pavone@591: z80_save_reg(inst, opts); pavone@275: } pavone@275: break; pavone@275: case Z80_SRA: pavone@591: num_cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8; pavone@591: cycles(&opts->gen, num_cycles); pavone@299: if (inst->addr_mode != Z80_UNUSED) { pavone@591: translate_z80_ea(inst, &dst_op, opts, READ, MODIFY); pavone@591: translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register pavone@591: cycles(&opts->gen, 1); pavone@275: } else { pavone@302: src_op.mode = MODE_UNUSED; pavone@591: translate_z80_reg(inst, &dst_op, opts); pavone@275: } pavone@730: if (dst_op.mode == MODE_REG_DIRECT) { pavone@730: sar_ir(code, 1, dst_op.base, SZ_B); pavone@730: } else { pavone@730: sar_irdisp(code, 1, dst_op.base, dst_op.disp, SZ_B); pavone@730: } pavone@730: if (src_op.mode == MODE_REG_DIRECT) { pavone@591: mov_rr(code, dst_op.base, src_op.base, SZ_B); pavone@730: } else if(src_op.mode == MODE_REG_DISPLACE8) { pavone@730: mov_rrdisp(code, dst_op.base, src_op.base, src_op.disp, SZ_B); pavone@299: } pavone@591: setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); pavone@591: mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); pavone@275: //TODO: Implement half-carry flag pavone@731: if (dst_op.mode == MODE_REG_DIRECT) { pavone@731: cmp_ir(code, 0, dst_op.base, SZ_B); pavone@731: } else { pavone@731: cmp_irdisp(code, 0, dst_op.base, dst_op.disp, SZ_B); pavone@731: } pavone@591: setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); pavone@591: setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); pavone@591: setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); pavone@299: if (inst->addr_mode != Z80_UNUSED) { pavone@593: z80_save_result(opts, inst); pavone@299: if (src_op.mode != MODE_UNUSED) { pavone@591: z80_save_reg(inst, opts); pavone@299: } pavone@275: } else { pavone@591: z80_save_reg(inst, opts); pavone@275: } pavone@275: break; pavone@213: case Z80_SRL: pavone@591: num_cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8; pavone@591: cycles(&opts->gen, num_cycles); pavone@299: if (inst->addr_mode != Z80_UNUSED) { pavone@591: translate_z80_ea(inst, &dst_op, opts, READ, MODIFY); pavone@591: translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register pavone@591: cycles(&opts->gen, 1); pavone@275: } else { pavone@302: src_op.mode = MODE_UNUSED; pavone@591: translate_z80_reg(inst, &dst_op, opts); pavone@275: } pavone@730: if (dst_op.mode == MODE_REG_DIRECT) { pavone@730: shr_ir(code, 1, dst_op.base, SZ_B); pavone@730: } else { pavone@730: shr_irdisp(code, 1, dst_op.base, dst_op.disp, SZ_B); pavone@730: } pavone@730: if (src_op.mode == MODE_REG_DIRECT) { pavone@591: mov_rr(code, dst_op.base, src_op.base, SZ_B); pavone@730: } else if(src_op.mode == MODE_REG_DISPLACE8) { pavone@730: mov_rrdisp(code, dst_op.base, src_op.base, src_op.disp, SZ_B); pavone@299: } pavone@591: setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); pavone@591: mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); pavone@275: //TODO: Implement half-carry flag pavone@731: if (dst_op.mode == MODE_REG_DIRECT) { pavone@731: cmp_ir(code, 0, dst_op.base, SZ_B); pavone@731: } else { pavone@731: cmp_irdisp(code, 0, dst_op.base, dst_op.disp, SZ_B); pavone@731: } pavone@591: setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); pavone@591: setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); pavone@591: setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); pavone@299: if (inst->addr_mode != Z80_UNUSED) { pavone@593: z80_save_result(opts, inst); pavone@299: if (src_op.mode != MODE_UNUSED) { pavone@591: z80_save_reg(inst, opts); pavone@299: } pavone@275: } else { pavone@591: z80_save_reg(inst, opts); pavone@275: } pavone@310: break; pavone@286: case Z80_RLD: pavone@591: cycles(&opts->gen, 8); pavone@734: zreg_to_native(opts, Z80_HL, opts->gen.scratch1); pavone@593: call(code, opts->read_8); pavone@286: //Before: (HL) = 0x12, A = 0x34 pavone@286: //After: (HL) = 0x24, A = 0x31 pavone@734: zreg_to_native(opts, Z80_A, opts->gen.scratch2); pavone@591: shl_ir(code, 4, opts->gen.scratch1, SZ_W); pavone@591: and_ir(code, 0xF, opts->gen.scratch2, SZ_W); pavone@591: and_ir(code, 0xFFF, opts->gen.scratch1, SZ_W); pavone@591: and_ir(code, 0xF0, opts->regs[Z80_A], SZ_B); pavone@591: or_rr(code, opts->gen.scratch2, opts->gen.scratch1, SZ_W); pavone@590: //opts->gen.scratch1 = 0x0124 pavone@591: ror_ir(code, 8, opts->gen.scratch1, SZ_W); pavone@591: cycles(&opts->gen, 4); pavone@591: or_rr(code, opts->gen.scratch1, opts->regs[Z80_A], SZ_B); pavone@287: //set flags pavone@287: //TODO: Implement half-carry flag pavone@591: mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); pavone@591: setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); pavone@591: setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); pavone@591: setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); pavone@505: pavone@734: zreg_to_native(opts, Z80_HL, opts->gen.scratch2); pavone@591: ror_ir(code, 8, opts->gen.scratch1, SZ_W); pavone@593: call(code, opts->write_8); pavone@286: break; pavone@287: case Z80_RRD: pavone@591: cycles(&opts->gen, 8); pavone@734: zreg_to_native(opts, Z80_HL, opts->gen.scratch1); pavone@593: call(code, opts->read_8); pavone@287: //Before: (HL) = 0x12, A = 0x34 pavone@287: //After: (HL) = 0x41, A = 0x32 pavone@734: zreg_to_native(opts, Z80_A, opts->gen.scratch2); pavone@591: ror_ir(code, 4, opts->gen.scratch1, SZ_W); pavone@591: shl_ir(code, 4, opts->gen.scratch2, SZ_W); pavone@591: and_ir(code, 0xF00F, opts->gen.scratch1, SZ_W); pavone@591: and_ir(code, 0xF0, opts->regs[Z80_A], SZ_B); pavone@967: and_ir(code, 0xF0, opts->gen.scratch2, SZ_W); pavone@590: //opts->gen.scratch1 = 0x2001 pavone@590: //opts->gen.scratch2 = 0x0040 pavone@591: or_rr(code, opts->gen.scratch2, opts->gen.scratch1, SZ_W); pavone@590: //opts->gen.scratch1 = 0x2041 pavone@591: ror_ir(code, 8, opts->gen.scratch1, SZ_W); pavone@591: cycles(&opts->gen, 4); pavone@591: shr_ir(code, 4, opts->gen.scratch1, SZ_B); pavone@591: or_rr(code, opts->gen.scratch1, opts->regs[Z80_A], SZ_B); pavone@287: //set flags pavone@287: //TODO: Implement half-carry flag pavone@591: mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); pavone@591: setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); pavone@591: setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); pavone@591: setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); pavone@505: pavone@734: zreg_to_native(opts, Z80_HL, opts->gen.scratch2); pavone@591: ror_ir(code, 8, opts->gen.scratch1, SZ_W); pavone@593: call(code, opts->write_8); pavone@287: break; pavone@308: case Z80_BIT: { pavone@591: num_cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16; pavone@591: cycles(&opts->gen, num_cycles); pavone@308: uint8_t bit; pavone@308: if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) { pavone@308: src_op.base = opts->regs[z80_word_reg(inst->ea_reg)]; pavone@737: src_op.mode = MODE_REG_DIRECT; pavone@308: size = SZ_W; pavone@308: bit = inst->immed + 8; pavone@308: } else { pavone@308: size = SZ_B; pavone@308: bit = inst->immed; pavone@591: translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY); pavone@308: } pavone@239: if (inst->addr_mode != Z80_REG) { pavone@239: //Reads normally take 3 cycles, but the read at the end of a bit instruction takes 4 pavone@591: cycles(&opts->gen, 1); pavone@239: } pavone@731: if (src_op.mode == MODE_REG_DIRECT) { pavone@731: bt_ir(code, bit, src_op.base, size); pavone@731: } else { pavone@731: bt_irdisp(code, bit, src_op.base, src_op.disp, size); pavone@731: } pavone@591: setcc_rdisp(code, CC_NC, opts->gen.context_reg, zf_off(ZF_Z)); pavone@591: setcc_rdisp(code, CC_NC, opts->gen.context_reg, zf_off(ZF_PV)); pavone@591: mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); pavone@307: if (inst->immed == 7) { pavone@731: if (src_op.mode == MODE_REG_DIRECT) { pavone@731: cmp_ir(code, 0, src_op.base, size); pavone@731: } else { pavone@731: cmp_irdisp(code, 0, src_op.base, src_op.disp, size); pavone@731: } pavone@591: setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); pavone@307: } else { pavone@591: mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B); pavone@307: } pavone@239: break; pavone@308: } pavone@308: case Z80_SET: { pavone@591: num_cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16; pavone@591: cycles(&opts->gen, num_cycles); pavone@308: uint8_t bit; pavone@308: if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) { pavone@308: src_op.base = opts->regs[z80_word_reg(inst->ea_reg)]; pavone@737: src_op.mode = MODE_REG_DIRECT; pavone@308: size = SZ_W; pavone@308: bit = inst->immed + 8; pavone@308: } else { pavone@308: size = SZ_B; pavone@308: bit = inst->immed; pavone@591: translate_z80_ea(inst, &src_op, opts, READ, MODIFY); pavone@308: } pavone@299: if (inst->reg != Z80_USE_IMMED) { pavone@591: translate_z80_reg(inst, &dst_op, opts); pavone@299: } pavone@247: if (inst->addr_mode != Z80_REG) { pavone@247: //Reads normally take 3 cycles, but the read in the middle of a set instruction takes 4 pavone@591: cycles(&opts->gen, 1); pavone@247: } pavone@731: if (src_op.mode == MODE_REG_DIRECT) { pavone@731: bts_ir(code, bit, src_op.base, size); pavone@731: } else { pavone@731: bts_irdisp(code, bit, src_op.base, src_op.disp, size); pavone@731: } pavone@299: if (inst->reg != Z80_USE_IMMED) { pavone@308: if (size == SZ_W) { pavone@666: #ifdef X86_64 pavone@308: if (dst_op.base >= R8) { pavone@591: ror_ir(code, 8, src_op.base, SZ_W); pavone@591: mov_rr(code, opts->regs[z80_low_reg(inst->ea_reg)], dst_op.base, SZ_B); pavone@591: ror_ir(code, 8, src_op.base, SZ_W); pavone@308: } else { pavone@666: #endif pavone@731: if (dst_op.mode == MODE_REG_DIRECT) { pavone@731: zreg_to_native(opts, inst->ea_reg, dst_op.base); pavone@731: } else { pavone@731: zreg_to_native(opts, inst->ea_reg, opts->gen.scratch1); pavone@731: mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, SZ_B); pavone@731: } pavone@666: #ifdef X86_64 pavone@505: } pavone@666: #endif pavone@308: } else { pavone@731: if (dst_op.mode == MODE_REG_DIRECT) { pavone@731: if (src_op.mode == MODE_REG_DIRECT) { pavone@731: mov_rr(code, src_op.base, dst_op.base, SZ_B); pavone@731: } else { pavone@731: mov_rdispr(code, src_op.base, src_op.disp, dst_op.base, SZ_B); pavone@731: } pavone@731: } else if (src_op.mode == MODE_REG_DIRECT) { pavone@731: mov_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, SZ_B); pavone@731: } else { pavone@731: mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, SZ_B); pavone@731: mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, SZ_B); pavone@731: } pavone@308: } pavone@308: } pavone@308: if ((inst->addr_mode & 0x1F) != Z80_REG) { pavone@593: z80_save_result(opts, inst); pavone@308: if (inst->reg != Z80_USE_IMMED) { pavone@591: z80_save_reg(inst, opts); pavone@308: } pavone@308: } pavone@308: break; pavone@308: } pavone@308: case Z80_RES: { pavone@591: num_cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16; pavone@591: cycles(&opts->gen, num_cycles); pavone@308: uint8_t bit; pavone@308: if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) { pavone@308: src_op.base = opts->regs[z80_word_reg(inst->ea_reg)]; pavone@737: src_op.mode = MODE_REG_DIRECT; pavone@308: size = SZ_W; pavone@308: bit = inst->immed + 8; pavone@308: } else { pavone@308: size = SZ_B; pavone@308: bit = inst->immed; pavone@591: translate_z80_ea(inst, &src_op, opts, READ, MODIFY); pavone@308: } pavone@308: if (inst->reg != Z80_USE_IMMED) { pavone@591: translate_z80_reg(inst, &dst_op, opts); pavone@308: } pavone@308: if (inst->addr_mode != Z80_REG) { pavone@308: //Reads normally take 3 cycles, but the read in the middle of a set instruction takes 4 pavone@591: cycles(&opts->gen, 1); pavone@308: } pavone@731: if (src_op.mode == MODE_REG_DIRECT) { pavone@731: btr_ir(code, bit, src_op.base, size); pavone@731: } else { pavone@731: btr_irdisp(code, bit, src_op.base, src_op.disp, size); pavone@731: } pavone@308: if (inst->reg != Z80_USE_IMMED) { pavone@308: if (size == SZ_W) { pavone@666: #ifdef X86_64 pavone@308: if (dst_op.base >= R8) { pavone@591: ror_ir(code, 8, src_op.base, SZ_W); pavone@591: mov_rr(code, opts->regs[z80_low_reg(inst->ea_reg)], dst_op.base, SZ_B); pavone@591: ror_ir(code, 8, src_op.base, SZ_W); pavone@308: } else { pavone@666: #endif pavone@731: if (dst_op.mode == MODE_REG_DIRECT) { pavone@731: zreg_to_native(opts, inst->ea_reg, dst_op.base); pavone@731: } else { pavone@731: zreg_to_native(opts, inst->ea_reg, opts->gen.scratch1); pavone@731: mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, SZ_B); pavone@731: } pavone@666: #ifdef X86_64 pavone@505: } pavone@666: #endif pavone@308: } else { pavone@731: if (dst_op.mode == MODE_REG_DIRECT) { pavone@731: if (src_op.mode == MODE_REG_DIRECT) { pavone@731: mov_rr(code, src_op.base, dst_op.base, SZ_B); pavone@731: } else { pavone@731: mov_rdispr(code, src_op.base, src_op.disp, dst_op.base, SZ_B); pavone@731: } pavone@731: } else if (src_op.mode == MODE_REG_DIRECT) { pavone@731: mov_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, SZ_B); pavone@731: } else { pavone@731: mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, SZ_B); pavone@731: mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, SZ_B); pavone@731: } pavone@308: } pavone@299: } pavone@247: if (inst->addr_mode != Z80_REG) { pavone@593: z80_save_result(opts, inst); pavone@299: if (inst->reg != Z80_USE_IMMED) { pavone@591: z80_save_reg(inst, opts); pavone@299: } pavone@247: } pavone@247: break; pavone@308: } pavone@236: case Z80_JP: { pavone@591: num_cycles = 4; pavone@506: if (inst->addr_mode != Z80_REG_INDIRECT) { pavone@591: num_cycles += 6; pavone@236: } else if(inst->ea_reg == Z80_IX || inst->ea_reg == Z80_IY) { pavone@591: num_cycles += 4; pavone@236: } pavone@591: cycles(&opts->gen, num_cycles); pavone@653: if (inst->addr_mode != Z80_REG_INDIRECT) { pavone@591: code_ptr call_dst = z80_get_native_address(context, inst->immed); pavone@236: if (!call_dst) { pavone@591: opts->gen.deferred = defer_address(opts->gen.deferred, inst->immed, code->cur + 1); pavone@236: //fake address to force large displacement pavone@601: call_dst = code->cur + 256; pavone@236: } pavone@591: jmp(code, call_dst); pavone@236: } else { pavone@239: if (inst->addr_mode == Z80_REG_INDIRECT) { pavone@729: zreg_to_native(opts, inst->ea_reg, opts->gen.scratch1); pavone@236: } else { pavone@591: mov_ir(code, inst->immed, opts->gen.scratch1, SZ_W); pavone@236: } pavone@593: call(code, opts->native_addr); pavone@591: jmp_r(code, opts->gen.scratch1); pavone@236: } pavone@236: break; pavone@236: } pavone@236: case Z80_JPCC: { pavone@591: cycles(&opts->gen, 7);//T States: 4,3 pavone@236: uint8_t cond = CC_Z; pavone@236: switch (inst->reg) pavone@236: { pavone@236: case Z80_CC_NZ: pavone@236: cond = CC_NZ; pavone@236: case Z80_CC_Z: pavone@591: cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_Z), SZ_B); pavone@236: break; pavone@236: case Z80_CC_NC: pavone@236: cond = CC_NZ; pavone@236: case Z80_CC_C: pavone@591: cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); pavone@236: break; pavone@238: case Z80_CC_PO: pavone@238: cond = CC_NZ; pavone@238: case Z80_CC_PE: pavone@591: cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B); pavone@238: break; pavone@238: case Z80_CC_P: pavone@367: cond = CC_NZ; pavone@238: case Z80_CC_M: pavone@591: cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B); pavone@238: break; pavone@236: } pavone@591: uint8_t *no_jump_off = code->cur+1; pavone@591: jcc(code, cond, code->cur+2); pavone@591: cycles(&opts->gen, 5);//T States: 5 pavone@236: uint16_t dest_addr = inst->immed; pavone@653: code_ptr call_dst = z80_get_native_address(context, dest_addr); pavone@236: if (!call_dst) { pavone@653: opts->gen.deferred = defer_address(opts->gen.deferred, dest_addr, code->cur + 1); pavone@236: //fake address to force large displacement pavone@653: call_dst = code->cur + 256; pavone@236: } pavone@653: jmp(code, call_dst); pavone@591: *no_jump_off = code->cur - (no_jump_off+1); pavone@236: break; pavone@236: } pavone@236: case Z80_JR: { pavone@591: cycles(&opts->gen, 12);//T States: 4,3,5 pavone@236: uint16_t dest_addr = address + inst->immed + 2; pavone@653: code_ptr call_dst = z80_get_native_address(context, dest_addr); pavone@236: if (!call_dst) { pavone@653: opts->gen.deferred = defer_address(opts->gen.deferred, dest_addr, code->cur + 1); pavone@236: //fake address to force large displacement pavone@653: call_dst = code->cur + 256; pavone@236: } pavone@653: jmp(code, call_dst); pavone@236: break; pavone@236: } pavone@235: case Z80_JRCC: { pavone@591: cycles(&opts->gen, 7);//T States: 4,3 pavone@235: uint8_t cond = CC_Z; pavone@235: switch (inst->reg) pavone@235: { pavone@235: case Z80_CC_NZ: pavone@235: cond = CC_NZ; pavone@235: case Z80_CC_Z: pavone@591: cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_Z), SZ_B); pavone@235: break; pavone@235: case Z80_CC_NC: pavone@235: cond = CC_NZ; pavone@235: case Z80_CC_C: pavone@591: cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); pavone@235: break; pavone@235: } pavone@591: uint8_t *no_jump_off = code->cur+1; pavone@591: jcc(code, cond, code->cur+2); pavone@591: cycles(&opts->gen, 5);//T States: 5 pavone@235: uint16_t dest_addr = address + inst->immed + 2; pavone@653: code_ptr call_dst = z80_get_native_address(context, dest_addr); pavone@235: if (!call_dst) { pavone@653: opts->gen.deferred = defer_address(opts->gen.deferred, dest_addr, code->cur + 1); pavone@235: //fake address to force large displacement pavone@653: call_dst = code->cur + 256; pavone@235: } pavone@653: jmp(code, call_dst); pavone@591: *no_jump_off = code->cur - (no_jump_off+1); pavone@235: break; pavone@235: } pavone@653: case Z80_DJNZ: { pavone@591: cycles(&opts->gen, 8);//T States: 5,3 pavone@730: if (opts->regs[Z80_B] >= 0) { pavone@730: sub_ir(code, 1, opts->regs[Z80_B], SZ_B); pavone@730: } else { pavone@730: sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_B), SZ_B); pavone@730: } pavone@591: uint8_t *no_jump_off = code->cur+1; pavone@591: jcc(code, CC_Z, code->cur+2); pavone@591: cycles(&opts->gen, 5);//T States: 5 pavone@239: uint16_t dest_addr = address + inst->immed + 2; pavone@653: code_ptr call_dst = z80_get_native_address(context, dest_addr); pavone@239: if (!call_dst) { pavone@653: opts->gen.deferred = defer_address(opts->gen.deferred, dest_addr, code->cur + 1); pavone@239: //fake address to force large displacement pavone@653: call_dst = code->cur + 256; pavone@239: } pavone@653: jmp(code, call_dst); pavone@591: *no_jump_off = code->cur - (no_jump_off+1); pavone@239: break; pavone@239: } pavone@235: case Z80_CALL: { pavone@591: cycles(&opts->gen, 11);//T States: 4,3,4 pavone@591: sub_ir(code, 2, opts->regs[Z80_SP], SZ_W); pavone@591: mov_ir(code, address + 3, opts->gen.scratch1, SZ_W); pavone@591: mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); pavone@593: call(code, opts->write_16_highfirst);//T States: 3, 3 pavone@653: code_ptr call_dst = z80_get_native_address(context, inst->immed); pavone@235: if (!call_dst) { pavone@653: opts->gen.deferred = defer_address(opts->gen.deferred, inst->immed, code->cur + 1); pavone@235: //fake address to force large displacement pavone@653: call_dst = code->cur + 256; pavone@235: } pavone@653: jmp(code, call_dst); pavone@235: break; pavone@235: } pavone@653: case Z80_CALLCC: { pavone@591: cycles(&opts->gen, 10);//T States: 4,3,3 (false case) pavone@238: uint8_t cond = CC_Z; pavone@238: switch (inst->reg) pavone@238: { pavone@238: case Z80_CC_NZ: pavone@238: cond = CC_NZ; pavone@238: case Z80_CC_Z: pavone@591: cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_Z), SZ_B); pavone@238: break; pavone@238: case Z80_CC_NC: pavone@238: cond = CC_NZ; pavone@238: case Z80_CC_C: pavone@591: cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); pavone@238: break; pavone@238: case Z80_CC_PO: pavone@238: cond = CC_NZ; pavone@238: case Z80_CC_PE: pavone@591: cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B); pavone@238: break; pavone@238: case Z80_CC_P: pavone@367: cond = CC_NZ; pavone@238: case Z80_CC_M: pavone@591: cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B); pavone@238: break; pavone@238: } pavone@591: uint8_t *no_call_off = code->cur+1; pavone@591: jcc(code, cond, code->cur+2); pavone@591: cycles(&opts->gen, 1);//Last of the above T states takes an extra cycle in the true case pavone@591: sub_ir(code, 2, opts->regs[Z80_SP], SZ_W); pavone@591: mov_ir(code, address + 3, opts->gen.scratch1, SZ_W); pavone@591: mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); pavone@593: call(code, opts->write_16_highfirst);//T States: 3, 3 pavone@653: code_ptr call_dst = z80_get_native_address(context, inst->immed); pavone@238: if (!call_dst) { pavone@653: opts->gen.deferred = defer_address(opts->gen.deferred, inst->immed, code->cur + 1); pavone@238: //fake address to force large displacement pavone@653: call_dst = code->cur + 256; pavone@238: } pavone@653: jmp(code, call_dst); pavone@591: *no_call_off = code->cur - (no_call_off+1); pavone@238: break; pavone@682: } pavone@213: case Z80_RET: pavone@591: cycles(&opts->gen, 4);//T States: 4 pavone@591: mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); pavone@593: call(code, opts->read_16);//T STates: 3, 3 pavone@591: add_ir(code, 2, opts->regs[Z80_SP], SZ_W); pavone@593: call(code, opts->native_addr); pavone@591: jmp_r(code, opts->gen.scratch1); pavone@235: break; pavone@246: case Z80_RETCC: { pavone@591: cycles(&opts->gen, 5);//T States: 5 pavone@246: uint8_t cond = CC_Z; pavone@246: switch (inst->reg) pavone@246: { pavone@246: case Z80_CC_NZ: pavone@246: cond = CC_NZ; pavone@246: case Z80_CC_Z: pavone@591: cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_Z), SZ_B); pavone@246: break; pavone@246: case Z80_CC_NC: pavone@246: cond = CC_NZ; pavone@246: case Z80_CC_C: pavone@591: cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); pavone@246: break; pavone@246: case Z80_CC_PO: pavone@246: cond = CC_NZ; pavone@246: case Z80_CC_PE: pavone@591: cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B); pavone@246: break; pavone@246: case Z80_CC_P: pavone@367: cond = CC_NZ; pavone@246: case Z80_CC_M: pavone@591: cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B); pavone@246: break; pavone@246: } pavone@591: uint8_t *no_call_off = code->cur+1; pavone@591: jcc(code, cond, code->cur+2); pavone@591: mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); pavone@593: call(code, opts->read_16);//T STates: 3, 3 pavone@591: add_ir(code, 2, opts->regs[Z80_SP], SZ_W); pavone@593: call(code, opts->native_addr); pavone@591: jmp_r(code, opts->gen.scratch1); pavone@591: *no_call_off = code->cur - (no_call_off+1); pavone@246: break; pavone@246: } pavone@283: case Z80_RETI: pavone@283: //For some systems, this may need a callback for signalling interrupt routine completion pavone@591: cycles(&opts->gen, 8);//T States: 4, 4 pavone@591: mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); pavone@593: call(code, opts->read_16);//T STates: 3, 3 pavone@591: add_ir(code, 2, opts->regs[Z80_SP], SZ_W); pavone@593: call(code, opts->native_addr); pavone@591: jmp_r(code, opts->gen.scratch1); pavone@283: break; pavone@283: case Z80_RETN: pavone@591: cycles(&opts->gen, 8);//T States: 4, 4 pavone@591: mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, iff2), opts->gen.scratch2, SZ_B); pavone@591: mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); pavone@591: mov_rrdisp(code, opts->gen.scratch2, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B); pavone@593: call(code, opts->read_16);//T STates: 3, 3 pavone@591: add_ir(code, 2, opts->regs[Z80_SP], SZ_W); pavone@593: call(code, opts->native_addr); pavone@591: jmp_r(code, opts->gen.scratch1); pavone@283: break; pavone@241: case Z80_RST: { pavone@241: //RST is basically CALL to an address in page 0 pavone@591: cycles(&opts->gen, 5);//T States: 5 pavone@591: sub_ir(code, 2, opts->regs[Z80_SP], SZ_W); pavone@591: mov_ir(code, address + 1, opts->gen.scratch1, SZ_W); pavone@591: mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); pavone@593: call(code, opts->write_16_highfirst);//T States: 3, 3 pavone@591: code_ptr call_dst = z80_get_native_address(context, inst->immed); pavone@241: if (!call_dst) { pavone@591: opts->gen.deferred = defer_address(opts->gen.deferred, inst->immed, code->cur + 1); pavone@241: //fake address to force large displacement pavone@601: call_dst = code->cur + 256; pavone@241: } pavone@591: jmp(code, call_dst); pavone@241: break; pavone@241: } pavone@284: case Z80_IN: pavone@591: cycles(&opts->gen, inst->reg == Z80_A ? 7 : 8);//T States: 4 3/4 pavone@284: if (inst->addr_mode == Z80_IMMED_INDIRECT) { pavone@591: mov_ir(code, inst->immed, opts->gen.scratch1, SZ_B); pavone@284: } else { pavone@591: mov_rr(code, opts->regs[Z80_C], opts->gen.scratch1, SZ_B); pavone@284: } pavone@593: call(code, opts->read_io); pavone@591: translate_z80_reg(inst, &dst_op, opts); pavone@735: if (dst_op.mode == MODE_REG_DIRECT) { pavone@735: mov_rr(code, opts->gen.scratch1, dst_op.base, SZ_B); pavone@735: } else { pavone@735: mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, SZ_B); pavone@735: } pavone@591: z80_save_reg(inst, opts); pavone@284: break; pavone@284: /*case Z80_INI: pavone@213: case Z80_INIR: pavone@213: case Z80_IND: pavone@284: case Z80_INDR:*/ pavone@213: case Z80_OUT: pavone@591: cycles(&opts->gen, inst->reg == Z80_A ? 7 : 8);//T States: 4 3/4 pavone@284: if ((inst->addr_mode & 0x1F) == Z80_IMMED_INDIRECT) { pavone@591: mov_ir(code, inst->immed, opts->gen.scratch2, SZ_B); pavone@284: } else { pavone@735: zreg_to_native(opts, Z80_C, opts->gen.scratch2); pavone@591: mov_rr(code, opts->regs[Z80_C], opts->gen.scratch2, SZ_B); pavone@284: } pavone@591: translate_z80_reg(inst, &src_op, opts); pavone@735: if (src_op.mode == MODE_REG_DIRECT) { pavone@735: mov_rr(code, src_op.base, opts->gen.scratch1, SZ_B); pavone@735: } else if (src_op.mode == MODE_IMMED) { pavone@735: mov_ir(code, src_op.disp, opts->gen.scratch1, SZ_B); pavone@735: } else { pavone@735: mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, SZ_B); pavone@735: } pavone@593: call(code, opts->write_io); pavone@591: z80_save_reg(inst, opts); pavone@284: break; pavone@284: /*case Z80_OUTI: pavone@213: case Z80_OTIR: pavone@213: case Z80_OUTD: pavone@213: case Z80_OTDR:*/ pavone@235: default: { pavone@235: char disbuf[80]; pavone@314: z80_disasm(inst, disbuf, address); pavone@259: FILE * f = fopen("zram.bin", "wb"); pavone@259: fwrite(context->mem_pointers[0], 1, 8 * 1024, f); pavone@259: fclose(f); pavone@792: fatal_error("unimplemented Z80 instruction: %s at %X\nZ80 RAM has been saved to zram.bin for debugging", disbuf, address); pavone@213: } pavone@235: } pavone@235: } pavone@235: pavone@627: uint8_t * z80_interp_handler(uint8_t opcode, z80_context * context) pavone@627: { pavone@627: if (!context->interp_code[opcode]) { pavone@755: if (opcode == 0xCB || (opcode >= 0xDD && (opcode & 0xF) == 0xD)) { pavone@792: fatal_error("Encountered prefix byte %X at address %X. Z80 interpeter doesn't support those yet.", opcode, context->pc); pavone@627: } pavone@627: uint8_t codebuf[8]; pavone@627: memset(codebuf, 0, sizeof(codebuf)); pavone@627: codebuf[0] = opcode; pavone@627: z80inst inst; pavone@627: uint8_t * after = z80_decode(codebuf, &inst); pavone@627: if (after - codebuf > 1) { pavone@792: fatal_error("Encountered multi-byte Z80 instruction at %X. Z80 interpeter doesn't support those yet.", context->pc); pavone@627: } pavone@652: pavone@652: z80_options * opts = context->options; pavone@652: code_info *code = &opts->gen.code; pavone@652: check_alloc_code(code, ZMAX_NATIVE_SIZE); pavone@652: context->interp_code[opcode] = code->cur; pavone@652: translate_z80inst(&inst, context, 0, 1); pavone@652: mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, pc), opts->gen.scratch1, SZ_W); pavone@652: add_ir(code, after - codebuf, opts->gen.scratch1, SZ_W); pavone@652: call(code, opts->native_addr); pavone@652: jmp_r(code, opts->gen.scratch1); pavone@715: z80_handle_deferred(context); pavone@627: } pavone@627: return context->interp_code[opcode]; pavone@627: } pavone@627: pavone@652: code_info z80_make_interp_stub(z80_context * context, uint16_t address) pavone@627: { pavone@652: z80_options *opts = context->options; pavone@652: code_info * code = &opts->gen.code; pavone@652: check_alloc_code(code, 32); pavone@652: code_info stub = {code->cur, NULL}; pavone@627: //TODO: make this play well with the breakpoint code pavone@652: mov_ir(code, address, opts->gen.scratch1, SZ_W); pavone@652: call(code, opts->read_8); pavone@627: //normal opcode fetch is already factored into instruction timing pavone@627: //back out the base 3 cycles from a read here pavone@627: //not quite perfect, but it will have to do for now pavone@652: cycles(&opts->gen, -3); pavone@652: check_cycles_int(&opts->gen, address); pavone@652: call(code, opts->gen.save_context); pavone@652: mov_irdisp(code, address, opts->gen.context_reg, offsetof(z80_context, pc), SZ_W); pavone@652: push_r(code, opts->gen.context_reg); pavone@712: call_args(code, (code_ptr)z80_interp_handler, 2, opts->gen.scratch1, opts->gen.context_reg); pavone@664: mov_rr(code, RAX, opts->gen.scratch1, SZ_PTR); pavone@652: pop_r(code, opts->gen.context_reg); pavone@652: call(code, opts->gen.load_context); pavone@652: jmp_r(code, opts->gen.scratch1); pavone@652: stub.last = code->cur; pavone@652: return stub; pavone@627: } pavone@627: pavone@627: pavone@235: uint8_t * z80_get_native_address(z80_context * context, uint32_t address) pavone@235: { pavone@235: native_map_slot *map; pavone@235: if (address < 0x4000) { pavone@235: address &= 0x1FFF; pavone@235: map = context->static_code_map; pavone@235: } else { pavone@627: address -= 0x4000; pavone@627: map = context->banked_code_map; pavone@235: } pavone@268: if (!map->base || !map->offsets || map->offsets[address] == INVALID_OFFSET || map->offsets[address] == EXTENSION_WORD) { pavone@313: //dprintf("z80_get_native_address: %X NULL\n", address); pavone@235: return NULL; pavone@235: } pavone@313: //dprintf("z80_get_native_address: %X %p\n", address, map->base + map->offsets[address]); pavone@235: return map->base + map->offsets[address]; pavone@235: } pavone@235: pavone@590: uint8_t z80_get_native_inst_size(z80_options * opts, uint32_t address) pavone@235: { pavone@627: //TODO: Fix for addresses >= 0x4000 pavone@252: if (address >= 0x4000) { pavone@252: return 0; pavone@252: } pavone@591: return opts->gen.ram_inst_sizes[0][address & 0x1FFF]; pavone@252: } pavone@252: pavone@252: void z80_map_native_address(z80_context * context, uint32_t address, uint8_t * native_address, uint8_t size, uint8_t native_size) pavone@252: { pavone@252: uint32_t orig_address = address; pavone@235: native_map_slot *map; pavone@590: z80_options * opts = context->options; pavone@235: if (address < 0x4000) { pavone@235: address &= 0x1FFF; pavone@235: map = context->static_code_map; pavone@591: opts->gen.ram_inst_sizes[0][address] = native_size; pavone@252: context->ram_code_flags[(address & 0x1C00) >> 10] |= 1 << ((address & 0x380) >> 7); pavone@252: context->ram_code_flags[((address + size) & 0x1C00) >> 10] |= 1 << (((address + size) & 0x380) >> 7); pavone@627: } else { pavone@627: //HERE pavone@627: address -= 0x4000; pavone@627: map = context->banked_code_map; pavone@235: if (!map->offsets) { pavone@627: map->offsets = malloc(sizeof(int32_t) * 0xC000); pavone@627: memset(map->offsets, 0xFF, sizeof(int32_t) * 0xC000); pavone@235: } pavone@235: } pavone@235: if (!map->base) { pavone@235: map->base = native_address; pavone@235: } pavone@235: map->offsets[address] = native_address - map->base; pavone@253: for(--size, orig_address++; size; --size, orig_address++) { pavone@252: address = orig_address; pavone@252: if (address < 0x4000) { pavone@252: address &= 0x1FFF; pavone@252: map = context->static_code_map; pavone@252: } else { pavone@627: address -= 0x4000; pavone@627: map = context->banked_code_map; pavone@252: } pavone@252: if (!map->offsets) { pavone@627: map->offsets = malloc(sizeof(int32_t) * 0xC000); pavone@627: memset(map->offsets, 0xFF, sizeof(int32_t) * 0xC000); pavone@252: } pavone@252: map->offsets[address] = EXTENSION_WORD; pavone@252: } pavone@252: } pavone@252: pavone@252: #define INVALID_INSTRUCTION_START 0xFEEDFEED pavone@252: pavone@252: uint32_t z80_get_instruction_start(native_map_slot * static_code_map, uint32_t address) pavone@252: { pavone@627: //TODO: Fixme for address >= 0x4000 pavone@252: if (!static_code_map->base || address >= 0x4000) { pavone@252: return INVALID_INSTRUCTION_START; pavone@252: } pavone@252: address &= 0x1FFF; pavone@252: if (static_code_map->offsets[address] == INVALID_OFFSET) { pavone@252: return INVALID_INSTRUCTION_START; pavone@252: } pavone@252: while (static_code_map->offsets[address] == EXTENSION_WORD) { pavone@252: --address; pavone@252: address &= 0x1FFF; pavone@252: } pavone@252: return address; pavone@252: } pavone@252: pavone@252: z80_context * z80_handle_code_write(uint32_t address, z80_context * context) pavone@252: { pavone@252: uint32_t inst_start = z80_get_instruction_start(context->static_code_map, address); pavone@252: if (inst_start != INVALID_INSTRUCTION_START) { pavone@591: code_ptr dst = z80_get_native_address(context, inst_start); pavone@899: code_info code = {dst, dst+32, 0}; pavone@591: z80_options * opts = context->options; pavone@668: dprintf("patching code at %p for Z80 instruction at %X due to write to %X\n", code.cur, inst_start, address); pavone@591: mov_ir(&code, inst_start, opts->gen.scratch1, SZ_D); pavone@593: call(&code, opts->retrans_stub); pavone@252: } pavone@252: return context; pavone@252: } pavone@252: pavone@264: uint8_t * z80_get_native_address_trans(z80_context * context, uint32_t address) pavone@264: { pavone@264: uint8_t * addr = z80_get_native_address(context, address); pavone@264: if (!addr) { pavone@264: translate_z80_stream(context, address); pavone@264: addr = z80_get_native_address(context, address); pavone@264: if (!addr) { pavone@264: printf("Failed to translate %X to native code\n", address); pavone@264: } pavone@264: } pavone@264: return addr; pavone@264: } pavone@264: pavone@266: void z80_handle_deferred(z80_context * context) pavone@266: { pavone@590: z80_options * opts = context->options; pavone@591: process_deferred(&opts->gen.deferred, context, (native_addr_func)z80_get_native_address); pavone@591: if (opts->gen.deferred) { pavone@591: translate_z80_stream(context, opts->gen.deferred->address); pavone@266: } pavone@266: } pavone@266: pavone@559: extern void * z80_retranslate_inst(uint32_t address, z80_context * context, uint8_t * orig_start) asm("z80_retranslate_inst"); pavone@390: void * z80_retranslate_inst(uint32_t address, z80_context * context, uint8_t * orig_start) pavone@252: { pavone@266: char disbuf[80]; pavone@590: z80_options * opts = context->options; pavone@252: uint8_t orig_size = z80_get_native_inst_size(opts, address); pavone@591: code_info *code = &opts->gen.code; pavone@653: uint8_t *after, *inst = get_native_pointer(address, (void **)context->mem_pointers, &opts->gen); pavone@252: z80inst instbuf; pavone@268: dprintf("Retranslating code at Z80 address %X, native address %p\n", address, orig_start); pavone@252: after = z80_decode(inst, &instbuf); pavone@268: #ifdef DO_DEBUG_PRINT pavone@314: z80_disasm(&instbuf, disbuf, address); pavone@266: if (instbuf.op == Z80_NOP) { pavone@266: printf("%X\t%s(%d)\n", address, disbuf, instbuf.immed); pavone@266: } else { pavone@266: printf("%X\t%s\n", address, disbuf); pavone@267: } pavone@268: #endif pavone@252: if (orig_size != ZMAX_NATIVE_SIZE) { pavone@597: check_alloc_code(code, ZMAX_NATIVE_SIZE); pavone@591: code_ptr start = code->cur; pavone@591: deferred_addr * orig_deferred = opts->gen.deferred; pavone@652: translate_z80inst(&instbuf, context, address, 0); pavone@644: /* pavone@252: if ((native_end - dst) <= orig_size) { pavone@264: uint8_t * native_next = z80_get_native_address(context, address + after-inst); pavone@264: if (native_next && ((native_next == orig_start + orig_size) || (orig_size - (native_end - dst)) > 5)) { pavone@591: remove_deferred_until(&opts->gen.deferred, orig_deferred); pavone@627: native_end = translate_z80inst(&instbuf, orig_start, context, address, 0); pavone@266: if (native_next == orig_start + orig_size && (native_next-native_end) < 2) { pavone@264: while (native_end < orig_start + orig_size) { pavone@264: *(native_end++) = 0x90; //NOP pavone@264: } pavone@264: } else { pavone@264: jmp(native_end, native_next); pavone@264: } pavone@266: z80_handle_deferred(context); pavone@264: return orig_start; pavone@252: } pavone@591: }*/ pavone@591: z80_map_native_address(context, address, start, after-inst, ZMAX_NATIVE_SIZE); pavone@591: code_info tmp_code = {orig_start, orig_start + 16}; pavone@591: jmp(&tmp_code, start); pavone@597: tmp_code = *code; pavone@597: code->cur = start + ZMAX_NATIVE_SIZE; pavone@283: if (!z80_is_terminal(&instbuf)) { pavone@597: jmp(&tmp_code, z80_get_native_address_trans(context, address + after-inst)); pavone@264: } pavone@266: z80_handle_deferred(context); pavone@591: return start; pavone@252: } else { pavone@591: code_info tmp_code = *code; pavone@591: code->cur = orig_start; pavone@591: code->last = orig_start + ZMAX_NATIVE_SIZE; pavone@652: translate_z80inst(&instbuf, context, address, 0); pavone@601: code_info tmp2 = *code; pavone@601: *code = tmp_code; pavone@283: if (!z80_is_terminal(&instbuf)) { pavone@652: pavone@601: jmp(&tmp2, z80_get_native_address_trans(context, address + after-inst)); pavone@252: } pavone@266: z80_handle_deferred(context); pavone@252: return orig_start; pavone@252: } pavone@235: } pavone@235: pavone@235: void translate_z80_stream(z80_context * context, uint32_t address) pavone@235: { pavone@235: char disbuf[80]; pavone@235: if (z80_get_native_address(context, address)) { pavone@235: return; pavone@235: } pavone@590: z80_options * opts = context->options; pavone@505: uint32_t start_address = address; pavone@627: pavone@653: do pavone@235: { pavone@235: z80inst inst; pavone@268: dprintf("translating Z80 code at address %X\n", address); pavone@235: do { pavone@235: uint8_t * existing = z80_get_native_address(context, address); pavone@235: if (existing) { pavone@591: jmp(&opts->gen.code, existing); pavone@235: break; pavone@235: } pavone@653: uint8_t * encoded, *next; pavone@653: encoded = get_native_pointer(address, (void **)context->mem_pointers, &opts->gen); pavone@653: if (!encoded) { pavone@653: code_info stub = z80_make_interp_stub(context, address); pavone@653: z80_map_native_address(context, address, stub.cur, 1, stub.last - stub.cur); pavone@235: break; pavone@235: } pavone@601: //make sure prologue is in a contiguous chunk of code pavone@601: check_code_prologue(&opts->gen.code); pavone@235: next = z80_decode(encoded, &inst); pavone@268: #ifdef DO_DEBUG_PRINT pavone@314: z80_disasm(&inst, disbuf, address); pavone@235: if (inst.op == Z80_NOP) { pavone@235: printf("%X\t%s(%d)\n", address, disbuf, inst.immed); pavone@235: } else { pavone@235: printf("%X\t%s\n", address, disbuf); pavone@235: } pavone@268: #endif pavone@591: code_ptr start = opts->gen.code.cur; pavone@652: translate_z80inst(&inst, context, address, 0); pavone@591: z80_map_native_address(context, address, start, next-encoded, opts->gen.code.cur - start); pavone@235: address += next-encoded; pavone@255: address &= 0xFFFF; pavone@283: } while (!z80_is_terminal(&inst)); pavone@591: process_deferred(&opts->gen.deferred, context, (native_addr_func)z80_get_native_address); pavone@591: if (opts->gen.deferred) { pavone@591: address = opts->gen.deferred->address; pavone@268: dprintf("defferred address: %X\n", address); pavone@792: } pavone@653: } while (opts->gen.deferred); pavone@213: } pavone@213: pavone@819: void init_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t num_chunks, memmap_chunk const * io_chunks, uint32_t num_io_chunks, uint32_t clock_divider) pavone@213: { pavone@590: memset(options, 0, sizeof(*options)); pavone@590: pavone@653: options->gen.memmap = chunks; pavone@653: options->gen.memmap_chunks = num_chunks; pavone@590: options->gen.address_size = SZ_W; pavone@590: options->gen.address_mask = 0xFFFF; pavone@590: options->gen.max_address = 0x10000; pavone@590: options->gen.bus_cycles = 3; pavone@667: options->gen.clock_divider = clock_divider; pavone@590: options->gen.mem_ptr_off = offsetof(z80_context, mem_pointers); pavone@590: options->gen.ram_flags_off = offsetof(z80_context, ram_code_flags); pavone@620: options->gen.ram_flags_shift = 7; pavone@590: pavone@235: options->flags = 0; pavone@666: #ifdef X86_64 pavone@235: options->regs[Z80_B] = BH; pavone@235: options->regs[Z80_C] = RBX; pavone@235: options->regs[Z80_D] = CH; pavone@235: options->regs[Z80_E] = RCX; pavone@235: options->regs[Z80_H] = AH; pavone@235: options->regs[Z80_L] = RAX; pavone@235: options->regs[Z80_IXH] = DH; pavone@235: options->regs[Z80_IXL] = RDX; pavone@235: options->regs[Z80_IYH] = -1; pavone@239: options->regs[Z80_IYL] = R8; pavone@235: options->regs[Z80_I] = -1; pavone@235: options->regs[Z80_R] = -1; pavone@235: options->regs[Z80_A] = R10; pavone@235: options->regs[Z80_BC] = RBX; pavone@235: options->regs[Z80_DE] = RCX; pavone@235: options->regs[Z80_HL] = RAX; pavone@235: options->regs[Z80_SP] = R9; pavone@235: options->regs[Z80_AF] = -1; pavone@235: options->regs[Z80_IX] = RDX; pavone@235: options->regs[Z80_IY] = R8; pavone@667: pavone@666: options->gen.scratch1 = R13; pavone@666: options->gen.scratch2 = R14; pavone@666: #else pavone@666: memset(options->regs, -1, sizeof(options->regs)); pavone@666: options->regs[Z80_A] = RAX; pavone@729: options->regs[Z80_SP] = RBX; pavone@667: pavone@666: options->gen.scratch1 = RCX; pavone@666: options->gen.scratch2 = RDX; pavone@666: #endif pavone@667: pavone@590: options->gen.context_reg = RSI; pavone@590: options->gen.cycles = RBP; pavone@590: options->gen.limit = RDI; pavone@590: pavone@590: options->gen.native_code_map = malloc(sizeof(native_map_slot)); pavone@590: memset(options->gen.native_code_map, 0, sizeof(native_map_slot)); pavone@590: options->gen.deferred = NULL; pavone@591: options->gen.ram_inst_sizes = malloc(sizeof(uint8_t) * 0x2000 + sizeof(uint8_t *)); pavone@591: options->gen.ram_inst_sizes[0] = (uint8_t *)(options->gen.ram_inst_sizes + 1); pavone@591: memset(options->gen.ram_inst_sizes[0], 0, sizeof(uint8_t) * 0x2000); pavone@590: pavone@590: code_info *code = &options->gen.code; pavone@590: init_code_info(code); pavone@590: pavone@590: options->save_context_scratch = code->cur; pavone@590: mov_rrdisp(code, options->gen.scratch1, options->gen.context_reg, offsetof(z80_context, scratch1), SZ_W); pavone@590: mov_rrdisp(code, options->gen.scratch2, options->gen.context_reg, offsetof(z80_context, scratch2), SZ_W); pavone@590: pavone@590: options->gen.save_context = code->cur; pavone@590: for (int i = 0; i <= Z80_A; i++) pavone@590: { pavone@590: int reg; pavone@590: uint8_t size; pavone@590: if (i < Z80_I) { pavone@594: reg = i /2 + Z80_BC + (i > Z80_H ? 2 : 0); pavone@590: size = SZ_W; pavone@590: } else { pavone@590: reg = i; pavone@590: size = SZ_B; pavone@652: } pavone@590: if (options->regs[reg] >= 0) { pavone@590: mov_rrdisp(code, options->regs[reg], options->gen.context_reg, offsetof(z80_context, regs) + i, size); pavone@590: } pavone@594: if (size == SZ_W) { pavone@594: i++; pavone@594: } pavone@590: } pavone@590: if (options->regs[Z80_SP] >= 0) { pavone@590: mov_rrdisp(code, options->regs[Z80_SP], options->gen.context_reg, offsetof(z80_context, sp), SZ_W); pavone@590: } pavone@590: mov_rrdisp(code, options->gen.limit, options->gen.context_reg, offsetof(z80_context, target_cycle), SZ_D); pavone@590: mov_rrdisp(code, options->gen.cycles, options->gen.context_reg, offsetof(z80_context, current_cycle), SZ_D); pavone@593: retn(code); pavone@590: pavone@590: options->load_context_scratch = code->cur; pavone@590: mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, scratch1), options->gen.scratch1, SZ_W); pavone@590: mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, scratch2), options->gen.scratch2, SZ_W); pavone@590: options->gen.load_context = code->cur; pavone@590: for (int i = 0; i <= Z80_A; i++) pavone@590: { pavone@590: int reg; pavone@590: uint8_t size; pavone@590: if (i < Z80_I) { pavone@594: reg = i /2 + Z80_BC + (i > Z80_H ? 2 : 0); pavone@590: size = SZ_W; pavone@590: } else { pavone@590: reg = i; pavone@590: size = SZ_B; pavone@590: } pavone@590: if (options->regs[reg] >= 0) { pavone@590: mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, regs) + i, options->regs[reg], size); pavone@590: } pavone@594: if (size == SZ_W) { pavone@594: i++; pavone@594: } pavone@590: } pavone@590: if (options->regs[Z80_SP] >= 0) { pavone@590: mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, sp), options->regs[Z80_SP], SZ_W); pavone@590: } pavone@590: mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, target_cycle), options->gen.limit, SZ_D); pavone@590: mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, current_cycle), options->gen.cycles, SZ_D); pavone@593: retn(code); pavone@593: pavone@593: options->native_addr = code->cur; pavone@593: call(code, options->gen.save_context); pavone@593: push_r(code, options->gen.context_reg); pavone@657: movzx_rr(code, options->gen.scratch1, options->gen.scratch1, SZ_W, SZ_D); pavone@657: call_args(code, (code_ptr)z80_get_native_address_trans, 2, options->gen.context_reg, options->gen.scratch1); pavone@593: mov_rr(code, RAX, options->gen.scratch1, SZ_PTR); pavone@593: pop_r(code, options->gen.context_reg); pavone@593: call(code, options->gen.load_context); pavone@593: retn(code); pavone@590: pavone@895: uint32_t tmp_stack_off; pavone@895: pavone@590: options->gen.handle_cycle_limit = code->cur; pavone@900: //calculate call/stack adjust size pavone@900: sub_ir(code, 16-sizeof(void *), RSP, SZ_PTR); pavone@900: call_noalign(code, options->gen.handle_cycle_limit); pavone@900: uint32_t call_adjust_size = code->cur - options->gen.handle_cycle_limit; pavone@900: code->cur = options->gen.handle_cycle_limit; pavone@900: pavone@590: cmp_rdispr(code, options->gen.context_reg, offsetof(z80_context, sync_cycle), options->gen.cycles, SZ_D); pavone@590: code_ptr no_sync = code->cur+1; pavone@590: jcc(code, CC_B, no_sync); pavone@590: mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, pc), SZ_W); pavone@590: call(code, options->save_context_scratch); pavone@895: tmp_stack_off = code->stack_off; pavone@590: pop_r(code, RAX); //return address in read/write func pavone@895: add_ir(code, 16-sizeof(void *), RSP, SZ_PTR); pavone@590: pop_r(code, RBX); //return address in translated code pavone@895: add_ir(code, 16-sizeof(void *), RSP, SZ_PTR); pavone@900: sub_ir(code, call_adjust_size, RAX, SZ_PTR); //adjust return address to point to the call + stack adjust that got us here pavone@590: mov_rrdisp(code, RBX, options->gen.context_reg, offsetof(z80_context, extra_pc), SZ_PTR); pavone@590: mov_rrind(code, RAX, options->gen.context_reg, SZ_PTR); pavone@665: restore_callee_save_regs(code); pavone@598: *no_sync = code->cur - (no_sync + 1); pavone@590: //return to caller of z80_run pavone@590: retn(code); pavone@895: code->stack_off = tmp_stack_off; pavone@652: pavone@594: options->gen.handle_code_write = (code_ptr)z80_handle_code_write; pavone@590: pavone@594: options->read_8 = gen_mem_fun(&options->gen, chunks, num_chunks, READ_8, &options->read_8_noinc); pavone@591: options->write_8 = gen_mem_fun(&options->gen, chunks, num_chunks, WRITE_8, &options->write_8_noinc); pavone@590: pavone@895: code_ptr skip_int = code->cur; pavone@900: //calculate adjust size pavone@900: add_ir(code, 16-sizeof(void *), RSP, SZ_PTR); pavone@900: uint32_t adjust_size = code->cur - skip_int; pavone@900: code->cur = skip_int; pavone@900: pavone@895: cmp_rdispr(code, options->gen.context_reg, offsetof(z80_context, sync_cycle), options->gen.cycles, SZ_D); pavone@895: code_ptr skip_sync = code->cur + 1; pavone@895: jcc(code, CC_B, skip_sync); pavone@895: //save PC pavone@895: mov_rrdisp(code, options->gen.scratch1, options->gen.context_reg, offsetof(z80_context, pc), SZ_D); pavone@895: options->do_sync = code->cur; pavone@895: call(code, options->gen.save_context); pavone@895: tmp_stack_off = code->stack_off; pavone@895: //pop return address off the stack and save for resume later pavone@898: //pop_rind(code, options->gen.context_reg); pavone@898: pop_r(code, RAX); pavone@900: add_ir(code, adjust_size, RAX, SZ_PTR); pavone@895: add_ir(code, 16-sizeof(void *), RSP, SZ_PTR); pavone@898: mov_rrind(code, RAX, options->gen.context_reg, SZ_PTR); pavone@898: pavone@895: //restore callee saved registers pavone@895: restore_callee_save_regs(code); pavone@895: //return to caller of z80_run pavone@895: *skip_sync = code->cur - (skip_sync+1); pavone@895: retn(code); pavone@895: code->stack_off = tmp_stack_off; pavone@895: pavone@590: options->gen.handle_cycle_limit_int = code->cur; pavone@590: cmp_rdispr(code, options->gen.context_reg, offsetof(z80_context, int_cycle), options->gen.cycles, SZ_D); pavone@590: jcc(code, CC_B, skip_int); pavone@590: //set limit to the cycle limit pavone@590: mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, sync_cycle), options->gen.limit, SZ_D); pavone@590: //disable interrupts pavone@591: mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, iff1), SZ_B); pavone@591: mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, iff2), SZ_B); pavone@590: cycles(&options->gen, 7); pavone@590: //save return address (in scratch1) to Z80 stack pavone@590: sub_ir(code, 2, options->regs[Z80_SP], SZ_W); pavone@590: mov_rr(code, options->regs[Z80_SP], options->gen.scratch2, SZ_W); pavone@590: //we need to do check_cycles and cycles outside of the write_8 call pavone@590: //so that the stack has the correct depth if we need to return to C pavone@590: //for a synchronization pavone@590: check_cycles(&options->gen); pavone@590: cycles(&options->gen, 3); pavone@590: //save word to write before call to write_8_noinc pavone@590: push_r(code, options->gen.scratch1); pavone@590: call(code, options->write_8_noinc); pavone@590: //restore word to write pavone@590: pop_r(code, options->gen.scratch1); pavone@590: //write high byte to SP+1 pavone@590: mov_rr(code, options->regs[Z80_SP], options->gen.scratch2, SZ_W); pavone@590: add_ir(code, 1, options->gen.scratch2, SZ_W); pavone@590: shr_ir(code, 8, options->gen.scratch1, SZ_W); pavone@590: check_cycles(&options->gen); pavone@590: cycles(&options->gen, 3); pavone@590: call(code, options->write_8_noinc); pavone@590: //dispose of return address as we'll be jumping somewhere else pavone@895: add_ir(code, 16, RSP, SZ_PTR); pavone@590: //TODO: Support interrupt mode 0 and 2 pavone@590: mov_ir(code, 0x38, options->gen.scratch1, SZ_W); pavone@593: call(code, options->native_addr); pavone@663: mov_rrind(code, options->gen.scratch1, options->gen.context_reg, SZ_PTR); pavone@895: tmp_stack_off = code->stack_off; pavone@665: restore_callee_save_regs(code); pavone@663: //return to caller of z80_run to sync pavone@663: retn(code); pavone@895: code->stack_off = tmp_stack_off; pavone@593: pavone@819: //HACK pavone@819: options->gen.address_size = SZ_D; pavone@819: options->gen.address_mask = 0xFF; pavone@819: options->read_io = gen_mem_fun(&options->gen, io_chunks, num_io_chunks, READ_8, NULL); pavone@819: options->write_io = gen_mem_fun(&options->gen, io_chunks, num_io_chunks, WRITE_8, NULL); pavone@819: options->gen.address_size = SZ_W; pavone@819: options->gen.address_mask = 0xFFFF; pavone@652: pavone@594: options->read_16 = code->cur; pavone@594: cycles(&options->gen, 3); pavone@594: check_cycles(&options->gen); pavone@594: //TODO: figure out how to handle the extra wait state for word reads to bank area pavone@657: //may also need special handling to avoid too much stack depth when access is blocked pavone@594: push_r(code, options->gen.scratch1); pavone@594: call(code, options->read_8_noinc); pavone@594: mov_rr(code, options->gen.scratch1, options->gen.scratch2, SZ_B); pavone@735: #ifndef X86_64 pavone@735: //scratch 2 is a caller save register in 32-bit builds and may be clobbered by something called from the read8 fun pavone@735: mov_rrdisp(code, options->gen.scratch1, options->gen.context_reg, offsetof(z80_context, scratch2), SZ_B); pavone@735: #endif pavone@594: pop_r(code, options->gen.scratch1); pavone@594: add_ir(code, 1, options->gen.scratch1, SZ_W); pavone@594: cycles(&options->gen, 3); pavone@594: check_cycles(&options->gen); pavone@594: call(code, options->read_8_noinc); pavone@594: shl_ir(code, 8, options->gen.scratch1, SZ_W); pavone@735: #ifdef X86_64 pavone@594: mov_rr(code, options->gen.scratch2, options->gen.scratch1, SZ_B); pavone@735: #else pavone@735: mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, scratch2), options->gen.scratch1, SZ_B); pavone@735: #endif pavone@594: retn(code); pavone@652: pavone@594: options->write_16_highfirst = code->cur; pavone@594: cycles(&options->gen, 3); pavone@594: check_cycles(&options->gen); pavone@594: push_r(code, options->gen.scratch2); pavone@594: push_r(code, options->gen.scratch1); pavone@594: add_ir(code, 1, options->gen.scratch2, SZ_W); pavone@594: shr_ir(code, 8, options->gen.scratch1, SZ_W); pavone@594: call(code, options->write_8_noinc); pavone@594: pop_r(code, options->gen.scratch1); pavone@594: pop_r(code, options->gen.scratch2); pavone@594: cycles(&options->gen, 3); pavone@594: check_cycles(&options->gen); pavone@594: //TODO: Check if we can get away with TCO here pavone@594: call(code, options->write_8_noinc); pavone@594: retn(code); pavone@652: pavone@594: options->write_16_lowfirst = code->cur; pavone@594: cycles(&options->gen, 3); pavone@594: check_cycles(&options->gen); pavone@594: push_r(code, options->gen.scratch2); pavone@594: push_r(code, options->gen.scratch1); pavone@594: call(code, options->write_8_noinc); pavone@594: pop_r(code, options->gen.scratch1); pavone@594: pop_r(code, options->gen.scratch2); pavone@594: add_ir(code, 1, options->gen.scratch2, SZ_W); pavone@594: shr_ir(code, 8, options->gen.scratch1, SZ_W); pavone@594: cycles(&options->gen, 3); pavone@594: check_cycles(&options->gen); pavone@594: //TODO: Check if we can get away with TCO here pavone@594: call(code, options->write_8_noinc); pavone@594: retn(code); pavone@593: pavone@593: options->retrans_stub = code->cur; pavone@895: tmp_stack_off = code->stack_off; pavone@899: //calculate size of patch pavone@899: mov_ir(code, 0x7FFF, options->gen.scratch1, SZ_D); pavone@899: code->stack_off += sizeof(void *); pavone@899: if (code->stack_off & 0xF) { pavone@899: sub_ir(code, 16 - (code->stack_off & 0xF), RSP, SZ_PTR); pavone@899: } pavone@899: call_noalign(code, options->retrans_stub); pavone@899: uint32_t patch_size = code->cur - options->retrans_stub; pavone@899: code->cur = options->retrans_stub; pavone@899: code->stack_off = tmp_stack_off; pavone@899: pavone@593: //pop return address pavone@593: pop_r(code, options->gen.scratch2); pavone@895: add_ir(code, 16-sizeof(void*), RSP, SZ_PTR); pavone@895: code->stack_off = tmp_stack_off; pavone@593: call(code, options->gen.save_context); pavone@593: //adjust pointer before move and call instructions that got us here pavone@899: sub_ir(code, patch_size, options->gen.scratch2, SZ_PTR); pavone@593: push_r(code, options->gen.context_reg); pavone@657: call_args(code, (code_ptr)z80_retranslate_inst, 3, options->gen.scratch1, options->gen.context_reg, options->gen.scratch2); pavone@593: pop_r(code, options->gen.context_reg); pavone@593: mov_rr(code, RAX, options->gen.scratch1, SZ_PTR); pavone@593: call(code, options->gen.load_context); pavone@593: jmp_r(code, options->gen.scratch1); pavone@593: pavone@593: options->run = (z80_run_fun)code->cur; pavone@895: tmp_stack_off = code->stack_off; pavone@665: save_callee_save_regs(code); pavone@729: #ifdef X86_64 pavone@593: mov_rr(code, RDI, options->gen.context_reg, SZ_PTR); pavone@729: #else pavone@729: mov_rdispr(code, RSP, 5 * sizeof(int32_t), options->gen.context_reg, SZ_PTR); pavone@729: #endif pavone@594: call(code, options->load_context_scratch); pavone@593: cmp_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, extra_pc), SZ_PTR); pavone@593: code_ptr no_extra = code->cur+1; pavone@593: jcc(code, CC_Z, no_extra); pavone@898: sub_ir(code, 16-sizeof(void *), RSP, SZ_PTR); pavone@593: push_rdisp(code, options->gen.context_reg, offsetof(z80_context, extra_pc)); pavone@593: mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, extra_pc), SZ_PTR); pavone@593: *no_extra = code->cur - (no_extra + 1); pavone@593: jmp_rind(code, options->gen.context_reg); pavone@895: code->stack_off = tmp_stack_off; pavone@213: } pavone@235: pavone@590: void init_z80_context(z80_context * context, z80_options * options) pavone@235: { pavone@235: memset(context, 0, sizeof(*context)); pavone@360: context->static_code_map = malloc(sizeof(*context->static_code_map)); pavone@259: context->static_code_map->base = NULL; pavone@235: context->static_code_map->offsets = malloc(sizeof(int32_t) * 0x2000); pavone@235: memset(context->static_code_map->offsets, 0xFF, sizeof(int32_t) * 0x2000); pavone@627: context->banked_code_map = malloc(sizeof(native_map_slot)); pavone@627: memset(context->banked_code_map, 0, sizeof(native_map_slot)); pavone@235: context->options = options; pavone@668: context->int_cycle = CYCLE_NEVER; pavone@668: context->int_pulse_start = CYCLE_NEVER; pavone@668: context->int_pulse_end = CYCLE_NEVER; pavone@668: } pavone@668: pavone@668: void z80_run(z80_context * context, uint32_t target_cycle) pavone@668: { pavone@668: if (context->reset || context->busack) { pavone@668: context->current_cycle = target_cycle; pavone@668: } else { pavone@668: if (context->current_cycle < target_cycle) { pavone@668: //busreq is sampled at the end of an m-cycle pavone@668: //we can approximate that by running for a single m-cycle after a bus request pavone@668: context->sync_cycle = context->busreq ? context->current_cycle + 3*context->options->gen.clock_divider : target_cycle; pavone@668: if (!context->native_pc) { pavone@668: context->native_pc = z80_get_native_address_trans(context, context->pc); pavone@668: } pavone@668: while (context->current_cycle < context->sync_cycle) pavone@668: { pavone@668: if (context->int_pulse_end < context->current_cycle || context->int_pulse_end == CYCLE_NEVER) { pavone@668: z80_next_int_pulse(context); pavone@668: } pavone@668: if (context->iff1) { pavone@668: context->int_cycle = context->int_pulse_start < context->int_enable_cycle ? context->int_enable_cycle : context->int_pulse_start; pavone@668: } else { pavone@668: context->int_cycle = CYCLE_NEVER; pavone@668: } pavone@668: context->target_cycle = context->sync_cycle < context->int_cycle ? context->sync_cycle : context->int_cycle; pavone@670: dprintf("Running Z80 from cycle %d to cycle %d. Int cycle: %d (%d - %d)\n", context->current_cycle, context->sync_cycle, context->int_cycle, context->int_pulse_start, context->int_pulse_end); pavone@668: context->options->run(context); pavone@668: dprintf("Z80 ran to cycle %d\n", context->current_cycle); pavone@668: } pavone@668: if (context->busreq) { pavone@668: context->busack = 1; pavone@668: context->current_cycle = target_cycle; pavone@668: } pavone@668: } pavone@668: } pavone@668: } pavone@668: pavone@884: void z80_options_free(z80_options *opts) pavone@884: { pavone@884: free(opts->gen.native_code_map); pavone@884: free(opts->gen.ram_inst_sizes); pavone@884: free(opts); pavone@884: } pavone@884: pavone@668: void z80_assert_reset(z80_context * context, uint32_t cycle) pavone@668: { pavone@668: z80_run(context, cycle); pavone@668: context->reset = 1; pavone@235: } pavone@235: pavone@668: void z80_clear_reset(z80_context * context, uint32_t cycle) pavone@668: { pavone@668: z80_run(context, cycle); pavone@668: if (context->reset) { pavone@668: //TODO: Handle case where reset is not asserted long enough pavone@701: context->im = 0; pavone@701: context->iff1 = context->iff2 = 0; pavone@668: context->native_pc = NULL; pavone@701: context->extra_pc = NULL; pavone@668: context->pc = 0; pavone@668: context->reset = 0; pavone@676: if (context->busreq) { pavone@676: //TODO: Figure out appropriate delay pavone@676: context->busack = 1; pavone@676: } pavone@668: } pavone@668: } pavone@668: pavone@668: void z80_assert_busreq(z80_context * context, uint32_t cycle) pavone@668: { pavone@668: z80_run(context, cycle); pavone@668: context->busreq = 1; pavone@854: //this is an imperfect aproximation since most M-cycles take less tstates than the max pavone@854: //and a short 3-tstate m-cycle can take an unbounded number due to wait states pavone@854: if (context->current_cycle - cycle > MAX_MCYCLE_LENGTH * context->options->gen.clock_divider) { pavone@854: context->busack = 1; pavone@854: } pavone@701: } pavone@668: pavone@668: void z80_clear_busreq(z80_context * context, uint32_t cycle) pavone@668: { pavone@668: z80_run(context, cycle); pavone@668: context->busreq = 0; pavone@668: context->busack = 0; pavone@844: //there appears to be at least a 1 Z80 cycle delay between busreq pavone@844: //being released and resumption of execution pavone@844: context->current_cycle += context->options->gen.clock_divider; pavone@235: } pavone@235: pavone@668: uint8_t z80_get_busack(z80_context * context, uint32_t cycle) pavone@235: { pavone@668: z80_run(context, cycle); pavone@668: return context->busack; pavone@668: } pavone@668: pavone@668: void z80_adjust_cycles(z80_context * context, uint32_t deduction) pavone@668: { pavone@668: if (context->current_cycle < deduction) { pavone@668: fprintf(stderr, "WARNING: Deduction of %u cycles when Z80 cycle counter is only %u\n", deduction, context->current_cycle); pavone@668: context->current_cycle = 0; pavone@668: } else { pavone@668: context->current_cycle -= deduction; pavone@668: } pavone@668: if (context->int_enable_cycle != CYCLE_NEVER) { pavone@668: if (context->int_enable_cycle < deduction) { pavone@668: context->int_enable_cycle = 0; pavone@668: } else { pavone@668: context->int_enable_cycle -= deduction; pavone@668: } pavone@668: } pavone@668: if (context->int_pulse_start != CYCLE_NEVER) { pavone@668: if (context->int_pulse_end < deduction) { pavone@668: context->int_pulse_start = context->int_pulse_end = CYCLE_NEVER; pavone@668: } else { pavone@668: context->int_pulse_end -= deduction; pavone@668: if (context->int_pulse_start < deduction) { pavone@668: context->int_pulse_start = 0; pavone@668: } else { pavone@668: context->int_pulse_start -= deduction; pavone@668: } pavone@668: } pavone@668: } pavone@235: } pavone@235: pavone@652: uint32_t zbreakpoint_patch(z80_context * context, uint16_t address, code_ptr dst) pavone@235: { pavone@966: code_info code = {dst, dst+32}; pavone@659: mov_ir(&code, address, context->options->gen.scratch1, SZ_W); pavone@652: call(&code, context->bp_stub); pavone@652: return code.cur-dst; pavone@626: } pavone@626: pavone@626: void zcreate_stub(z80_context * context) pavone@626: { pavone@652: z80_options * opts = context->options; pavone@652: code_info *code = &opts->gen.code; pavone@894: uint32_t start_stack_off = code->stack_off; pavone@652: check_code_prologue(code); pavone@652: context->bp_stub = code->cur; pavone@626: pavone@682: //Calculate length of prologue pavone@652: check_cycles_int(&opts->gen, 0); pavone@652: int check_int_size = code->cur-context->bp_stub; pavone@652: code->cur = context->bp_stub; pavone@626: pavone@626: //Calculate length of patch pavone@652: int patch_size = zbreakpoint_patch(context, 0, code->cur); pavone@626: pavone@682: //Save context and call breakpoint handler pavone@652: call(code, opts->gen.save_context); pavone@652: push_r(code, opts->gen.scratch1); pavone@657: call_args_abi(code, context->bp_handler, 2, opts->gen.context_reg, opts->gen.scratch1); pavone@664: mov_rr(code, RAX, opts->gen.context_reg, SZ_PTR); pavone@682: //Restore context pavone@652: call(code, opts->gen.load_context); pavone@652: pop_r(code, opts->gen.scratch1); pavone@682: //do prologue stuff pavone@652: cmp_rr(code, opts->gen.cycles, opts->gen.limit, SZ_D); pavone@652: uint8_t * jmp_off = code->cur+1; pavone@652: jcc(code, CC_NC, code->cur + 7); pavone@652: pop_r(code, opts->gen.scratch1); pavone@664: add_ir(code, check_int_size - patch_size, opts->gen.scratch1, SZ_PTR); pavone@652: push_r(code, opts->gen.scratch1); pavone@652: jmp(code, opts->gen.handle_cycle_limit_int); pavone@652: *jmp_off = code->cur - (jmp_off+1); pavone@682: //jump back to body of translated instruction pavone@652: pop_r(code, opts->gen.scratch1); pavone@664: add_ir(code, check_int_size - patch_size, opts->gen.scratch1, SZ_PTR); pavone@652: jmp_r(code, opts->gen.scratch1); pavone@894: code->stack_off = start_stack_off; pavone@235: } pavone@235: pavone@366: void zinsert_breakpoint(z80_context * context, uint16_t address, uint8_t * bp_handler) pavone@366: { pavone@626: context->bp_handler = bp_handler; pavone@819: uint8_t bit = 1 << (address % 8); pavone@819: if (!(bit & context->breakpoint_flags[address / 8])) { pavone@819: context->breakpoint_flags[address / 8] |= bit; pavone@626: if (!context->bp_stub) { pavone@626: zcreate_stub(context); pavone@366: } pavone@626: uint8_t * native = z80_get_native_address(context, address); pavone@626: if (native) { pavone@626: zbreakpoint_patch(context, address, native); pavone@626: } pavone@366: } pavone@366: } pavone@235: pavone@366: void zremove_breakpoint(z80_context * context, uint16_t address) pavone@366: { pavone@819: context->breakpoint_flags[address / 8] &= ~(1 << (address % 8)); pavone@366: uint8_t * native = z80_get_native_address(context, address); pavone@626: if (native) { pavone@652: z80_options * opts = context->options; pavone@652: code_info tmp_code = opts->gen.code; pavone@652: opts->gen.code.cur = native; pavone@652: opts->gen.code.last = native + 16; pavone@652: check_cycles_int(&opts->gen, address); pavone@652: opts->gen.code = tmp_code; pavone@792: } pavone@366: } pavone@366: