annotate z80_to_x86.c @ 989:d70000fdff0b

Implemented IR and undefined bits of info word for address error exception frames
author Michael Pavone <pavone@retrodev.com>
date Wed, 27 Apr 2016 21:39:17 -0700
parents 1eb616b8cbe9
children fbfb821e92a8
rev   line source
pavone@467 1 /*
pavone@467 2 Copyright 2013 Michael Pavone
pavone@505 3 This file is part of BlastEm.
pavone@467 4 BlastEm is free software distributed under the terms of the GNU General Public License version 3 or greater. See COPYING for full license text.
pavone@467 5 */
pavone@235 6 #include "z80inst.h"
pavone@213 7 #include "z80_to_x86.h"
pavone@213 8 #include "gen_x86.h"
pavone@235 9 #include "mem.h"
pavone@792 10 #include "util.h"
pavone@235 11 #include <stdio.h>
pavone@235 12 #include <stdlib.h>
pavone@235 13 #include <stddef.h>
pavone@235 14 #include <string.h>
pavone@213 15
pavone@213 16 #define MODE_UNUSED (MODE_IMMED-1)
pavone@854 17 #define MAX_MCYCLE_LENGTH 6
pavone@213 18
pavone@315 19 //#define DO_DEBUG_PRINT
pavone@275 20
pavone@268 21 #ifdef DO_DEBUG_PRINT
pavone@268 22 #define dprintf printf
pavone@268 23 #else
pavone@268 24 #define dprintf
pavone@268 25 #endif
pavone@268 26
pavone@652 27 uint32_t zbreakpoint_patch(z80_context * context, uint16_t address, code_ptr dst);
pavone@715 28 void z80_handle_deferred(z80_context * context);
pavone@213 29
pavone@235 30 uint8_t z80_size(z80inst * inst)
pavone@213 31 {
pavone@213 32 uint8_t reg = (inst->reg & 0x1F);
pavone@235 33 if (reg != Z80_UNUSED && reg != Z80_USE_IMMED) {
pavone@213 34 return reg < Z80_BC ? SZ_B : SZ_W;
pavone@213 35 }
pavone@213 36 //TODO: Handle any necessary special cases
pavone@213 37 return SZ_B;
pavone@213 38 }
pavone@213 39
pavone@731 40 uint8_t zf_off(uint8_t flag)
pavone@731 41 {
pavone@731 42 return offsetof(z80_context, flags) + flag;
pavone@731 43 }
pavone@731 44
pavone@731 45 uint8_t zaf_off(uint8_t flag)
pavone@731 46 {
pavone@731 47 return offsetof(z80_context, alt_flags) + flag;
pavone@731 48 }
pavone@731 49
pavone@731 50 uint8_t zr_off(uint8_t reg)
pavone@731 51 {
pavone@731 52 if (reg > Z80_A) {
pavone@731 53 reg = z80_low_reg(reg);
pavone@731 54 }
pavone@731 55 return offsetof(z80_context, regs) + reg;
pavone@731 56 }
pavone@731 57
pavone@731 58 uint8_t zar_off(uint8_t reg)
pavone@731 59 {
pavone@731 60 if (reg > Z80_A) {
pavone@731 61 reg = z80_low_reg(reg);
pavone@731 62 }
pavone@731 63 return offsetof(z80_context, alt_regs) + reg;
pavone@731 64 }
pavone@731 65
pavone@731 66 void zreg_to_native(z80_options *opts, uint8_t reg, uint8_t native_reg)
pavone@731 67 {
pavone@731 68 if (opts->regs[reg] >= 0) {
pavone@731 69 mov_rr(&opts->gen.code, opts->regs[reg], native_reg, reg > Z80_A ? SZ_W : SZ_B);
pavone@731 70 } else {
pavone@731 71 mov_rdispr(&opts->gen.code, opts->gen.context_reg, zr_off(reg), native_reg, reg > Z80_A ? SZ_W : SZ_B);
pavone@731 72 }
pavone@731 73 }
pavone@731 74
pavone@731 75 void native_to_zreg(z80_options *opts, uint8_t native_reg, uint8_t reg)
pavone@731 76 {
pavone@731 77 if (opts->regs[reg] >= 0) {
pavone@731 78 mov_rr(&opts->gen.code, native_reg, opts->regs[reg], reg > Z80_A ? SZ_W : SZ_B);
pavone@731 79 } else {
pavone@731 80 mov_rrdisp(&opts->gen.code, native_reg, opts->gen.context_reg, zr_off(reg), reg > Z80_A ? SZ_W : SZ_B);
pavone@731 81 }
pavone@731 82 }
pavone@731 83
pavone@591 84 void translate_z80_reg(z80inst * inst, host_ea * ea, z80_options * opts)
pavone@213 85 {
pavone@590 86 code_info *code = &opts->gen.code;
pavone@213 87 if (inst->reg == Z80_USE_IMMED) {
pavone@213 88 ea->mode = MODE_IMMED;
pavone@213 89 ea->disp = inst->immed;
pavone@213 90 } else if ((inst->reg & 0x1F) == Z80_UNUSED) {
pavone@213 91 ea->mode = MODE_UNUSED;
pavone@213 92 } else {
pavone@235 93 ea->mode = MODE_REG_DIRECT;
pavone@731 94 if (inst->reg == Z80_IYH && opts->regs[Z80_IYL] >= 0) {
pavone@312 95 if ((inst->addr_mode & 0x1F) == Z80_REG && inst->ea_reg == Z80_IYL) {
pavone@590 96 mov_rr(code, opts->regs[Z80_IY], opts->gen.scratch1, SZ_W);
pavone@590 97 ror_ir(code, 8, opts->gen.scratch1, SZ_W);
pavone@590 98 ea->base = opts->gen.scratch1;
pavone@312 99 } else {
pavone@312 100 ea->base = opts->regs[Z80_IYL];
pavone@590 101 ror_ir(code, 8, opts->regs[Z80_IY], SZ_W);
pavone@312 102 }
pavone@262 103 } else if(opts->regs[inst->reg] >= 0) {
pavone@262 104 ea->base = opts->regs[inst->reg];
pavone@268 105 if (ea->base >= AH && ea->base <= BH) {
pavone@268 106 if ((inst->addr_mode & 0x1F) == Z80_REG) {
pavone@268 107 uint8_t other_reg = opts->regs[inst->ea_reg];
pavone@269 108 if (other_reg >= R8 || (other_reg >= RSP && other_reg <= RDI)) {
pavone@268 109 //we can't mix an *H reg with a register that requires the REX prefix
pavone@268 110 ea->base = opts->regs[z80_low_reg(inst->reg)];
pavone@590 111 ror_ir(code, 8, ea->base, SZ_W);
pavone@268 112 }
pavone@268 113 } else if((inst->addr_mode & 0x1F) != Z80_UNUSED && (inst->addr_mode & 0x1F) != Z80_IMMED) {
pavone@268 114 //temp regs require REX prefix too
pavone@267 115 ea->base = opts->regs[z80_low_reg(inst->reg)];
pavone@590 116 ror_ir(code, 8, ea->base, SZ_W);
pavone@267 117 }
pavone@267 118 }
pavone@213 119 } else {
pavone@262 120 ea->mode = MODE_REG_DISPLACE8;
pavone@590 121 ea->base = opts->gen.context_reg;
pavone@731 122 ea->disp = zr_off(inst->reg);
pavone@213 123 }
pavone@213 124 }
pavone@213 125 }
pavone@213 126
pavone@590 127 void z80_save_reg(z80inst * inst, z80_options * opts)
pavone@213 128 {
pavone@590 129 code_info *code = &opts->gen.code;
pavone@716 130 if (inst->reg == Z80_USE_IMMED || inst->reg == Z80_UNUSED) {
pavone@716 131 return;
pavone@716 132 }
pavone@731 133 if (inst->reg == Z80_IYH && opts->regs[Z80_IYL] >= 0) {
pavone@312 134 if ((inst->addr_mode & 0x1F) == Z80_REG && inst->ea_reg == Z80_IYL) {
pavone@590 135 ror_ir(code, 8, opts->regs[Z80_IY], SZ_W);
pavone@590 136 mov_rr(code, opts->gen.scratch1, opts->regs[Z80_IYL], SZ_B);
pavone@590 137 ror_ir(code, 8, opts->regs[Z80_IY], SZ_W);
pavone@312 138 } else {
pavone@590 139 ror_ir(code, 8, opts->regs[Z80_IY], SZ_W);
pavone@312 140 }
pavone@268 141 } else if (opts->regs[inst->reg] >= AH && opts->regs[inst->reg] <= BH) {
pavone@268 142 if ((inst->addr_mode & 0x1F) == Z80_REG) {
pavone@268 143 uint8_t other_reg = opts->regs[inst->ea_reg];
pavone@269 144 if (other_reg >= R8 || (other_reg >= RSP && other_reg <= RDI)) {
pavone@268 145 //we can't mix an *H reg with a register that requires the REX prefix
pavone@590 146 ror_ir(code, 8, opts->regs[z80_low_reg(inst->reg)], SZ_W);
pavone@268 147 }
pavone@268 148 } else if((inst->addr_mode & 0x1F) != Z80_UNUSED && (inst->addr_mode & 0x1F) != Z80_IMMED) {
pavone@268 149 //temp regs require REX prefix too
pavone@590 150 ror_ir(code, 8, opts->regs[z80_low_reg(inst->reg)], SZ_W);
pavone@267 151 }
pavone@213 152 }
pavone@213 153 }
pavone@213 154
pavone@591 155 void translate_z80_ea(z80inst * inst, host_ea * ea, z80_options * opts, uint8_t read, uint8_t modify)
pavone@213 156 {
pavone@590 157 code_info *code = &opts->gen.code;
pavone@730 158 uint8_t size, areg;
pavone@730 159 int8_t reg;
pavone@235 160 ea->mode = MODE_REG_DIRECT;
pavone@590 161 areg = read ? opts->gen.scratch1 : opts->gen.scratch2;
pavone@213 162 switch(inst->addr_mode & 0x1F)
pavone@213 163 {
pavone@213 164 case Z80_REG:
pavone@731 165 if (inst->ea_reg == Z80_IYH && opts->regs[Z80_IYL] >= 0) {
pavone@312 166 if (inst->reg == Z80_IYL) {
pavone@590 167 mov_rr(code, opts->regs[Z80_IY], opts->gen.scratch1, SZ_W);
pavone@590 168 ror_ir(code, 8, opts->gen.scratch1, SZ_W);
pavone@590 169 ea->base = opts->gen.scratch1;
pavone@312 170 } else {
pavone@312 171 ea->base = opts->regs[Z80_IYL];
pavone@590 172 ror_ir(code, 8, opts->regs[Z80_IY], SZ_W);
pavone@312 173 }
pavone@651 174 } else if(opts->regs[inst->ea_reg] >= 0) {
pavone@213 175 ea->base = opts->regs[inst->ea_reg];
pavone@267 176 if (ea->base >= AH && ea->base <= BH && inst->reg != Z80_UNUSED && inst->reg != Z80_USE_IMMED) {
pavone@267 177 uint8_t other_reg = opts->regs[inst->reg];
pavone@666 178 #ifdef X86_64
pavone@269 179 if (other_reg >= R8 || (other_reg >= RSP && other_reg <= RDI)) {
pavone@267 180 //we can't mix an *H reg with a register that requires the REX prefix
pavone@267 181 ea->base = opts->regs[z80_low_reg(inst->ea_reg)];
pavone@590 182 ror_ir(code, 8, ea->base, SZ_W);
pavone@267 183 }
pavone@666 184 #endif
pavone@267 185 }
pavone@651 186 } else {
pavone@651 187 ea->mode = MODE_REG_DISPLACE8;
pavone@659 188 ea->base = opts->gen.context_reg;
pavone@731 189 ea->disp = zr_off(inst->ea_reg);
pavone@213 190 }
pavone@213 191 break;
pavone@213 192 case Z80_REG_INDIRECT:
pavone@731 193 zreg_to_native(opts, inst->ea_reg, areg);
pavone@213 194 size = z80_size(inst);
pavone@213 195 if (read) {
pavone@213 196 if (modify) {
pavone@590 197 //push_r(code, opts->gen.scratch1);
pavone@591 198 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(z80_context, scratch1), SZ_W);
pavone@213 199 }
pavone@213 200 if (size == SZ_B) {
pavone@590 201 call(code, opts->read_8);
pavone@213 202 } else {
pavone@591 203 call(code, opts->read_16);
pavone@213 204 }
pavone@213 205 if (modify) {
pavone@590 206 //pop_r(code, opts->gen.scratch2);
pavone@591 207 mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, scratch1), opts->gen.scratch2, SZ_W);
pavone@213 208 }
pavone@213 209 }
pavone@590 210 ea->base = opts->gen.scratch1;
pavone@213 211 break;
pavone@213 212 case Z80_IMMED:
pavone@213 213 ea->mode = MODE_IMMED;
pavone@213 214 ea->disp = inst->immed;
pavone@213 215 break;
pavone@213 216 case Z80_IMMED_INDIRECT:
pavone@591 217 mov_ir(code, inst->immed, areg, SZ_W);
pavone@213 218 size = z80_size(inst);
pavone@213 219 if (read) {
pavone@277 220 /*if (modify) {
pavone@591 221 push_r(code, opts->gen.scratch1);
pavone@277 222 }*/
pavone@213 223 if (size == SZ_B) {
pavone@593 224 call(code, opts->read_8);
pavone@213 225 } else {
pavone@593 226 call(code, opts->read_16);
pavone@213 227 }
pavone@213 228 if (modify) {
pavone@591 229 //pop_r(code, opts->gen.scratch2);
pavone@591 230 mov_ir(code, inst->immed, opts->gen.scratch2, SZ_W);
pavone@213 231 }
pavone@213 232 }
pavone@590 233 ea->base = opts->gen.scratch1;
pavone@213 234 break;
pavone@235 235 case Z80_IX_DISPLACE:
pavone@235 236 case Z80_IY_DISPLACE:
pavone@731 237 zreg_to_native(opts, (inst->addr_mode & 0x1F) == Z80_IX_DISPLACE ? Z80_IX : Z80_IY, areg);
pavone@591 238 add_ir(code, inst->ea_reg & 0x80 ? inst->ea_reg - 256 : inst->ea_reg, areg, SZ_W);
pavone@213 239 size = z80_size(inst);
pavone@213 240 if (read) {
pavone@213 241 if (modify) {
pavone@591 242 //push_r(code, opts->gen.scratch1);
pavone@591 243 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(z80_context, scratch1), SZ_W);
pavone@213 244 }
pavone@213 245 if (size == SZ_B) {
pavone@593 246 call(code, opts->read_8);
pavone@213 247 } else {
pavone@593 248 call(code, opts->read_16);
pavone@213 249 }
pavone@213 250 if (modify) {
pavone@591 251 //pop_r(code, opts->gen.scratch2);
pavone@591 252 mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, scratch1), opts->gen.scratch2, SZ_W);
pavone@213 253 }
pavone@213 254 }
pavone@590 255 ea->base = opts->gen.scratch1;
pavone@213 256 break;
pavone@213 257 case Z80_UNUSED:
pavone@235 258 ea->mode = MODE_UNUSED;
pavone@213 259 break;
pavone@213 260 default:
pavone@792 261 fatal_error("Unrecognized Z80 addressing mode %d\n", inst->addr_mode & 0x1F);
pavone@213 262 }
pavone@213 263 }
pavone@213 264
pavone@591 265 void z80_save_ea(code_info *code, z80inst * inst, z80_options * opts)
pavone@213 266 {
pavone@267 267 if ((inst->addr_mode & 0x1F) == Z80_REG) {
pavone@731 268 if (inst->ea_reg == Z80_IYH && opts->regs[Z80_IYL] >= 0) {
pavone@312 269 if (inst->reg == Z80_IYL) {
pavone@591 270 ror_ir(code, 8, opts->regs[Z80_IY], SZ_W);
pavone@591 271 mov_rr(code, opts->gen.scratch1, opts->regs[Z80_IYL], SZ_B);
pavone@591 272 ror_ir(code, 8, opts->regs[Z80_IY], SZ_W);
pavone@312 273 } else {
pavone@591 274 ror_ir(code, 8, opts->regs[Z80_IY], SZ_W);
pavone@312 275 }
pavone@267 276 } else if (inst->reg != Z80_UNUSED && inst->reg != Z80_USE_IMMED && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) {
pavone@267 277 uint8_t other_reg = opts->regs[inst->reg];
pavone@666 278 #ifdef X86_64
pavone@269 279 if (other_reg >= R8 || (other_reg >= RSP && other_reg <= RDI)) {
pavone@267 280 //we can't mix an *H reg with a register that requires the REX prefix
pavone@591 281 ror_ir(code, 8, opts->regs[z80_low_reg(inst->ea_reg)], SZ_W);
pavone@267 282 }
pavone@666 283 #endif
pavone@267 284 }
pavone@213 285 }
pavone@213 286 }
pavone@213 287
pavone@593 288 void z80_save_result(z80_options *opts, z80inst * inst)
pavone@213 289 {
pavone@253 290 switch(inst->addr_mode & 0x1f)
pavone@253 291 {
pavone@253 292 case Z80_REG_INDIRECT:
pavone@253 293 case Z80_IMMED_INDIRECT:
pavone@253 294 case Z80_IX_DISPLACE:
pavone@253 295 case Z80_IY_DISPLACE:
pavone@253 296 if (z80_size(inst) == SZ_B) {
pavone@593 297 call(&opts->gen.code, opts->write_8);
pavone@253 298 } else {
pavone@593 299 call(&opts->gen.code, opts->write_16_lowfirst);
pavone@253 300 }
pavone@213 301 }
pavone@213 302 }
pavone@213 303
pavone@213 304 enum {
pavone@213 305 DONT_READ=0,
pavone@213 306 READ
pavone@213 307 };
pavone@213 308
pavone@213 309 enum {
pavone@213 310 DONT_MODIFY=0,
pavone@213 311 MODIFY
pavone@213 312 };
pavone@213 313
pavone@235 314 void z80_print_regs_exit(z80_context * context)
pavone@235 315 {
pavone@505 316 printf("A: %X\nB: %X\nC: %X\nD: %X\nE: %X\nHL: %X\nIX: %X\nIY: %X\nSP: %X\n\nIM: %d, IFF1: %d, IFF2: %d\n",
pavone@235 317 context->regs[Z80_A], context->regs[Z80_B], context->regs[Z80_C],
pavone@505 318 context->regs[Z80_D], context->regs[Z80_E],
pavone@505 319 (context->regs[Z80_H] << 8) | context->regs[Z80_L],
pavone@505 320 (context->regs[Z80_IXH] << 8) | context->regs[Z80_IXL],
pavone@505 321 (context->regs[Z80_IYH] << 8) | context->regs[Z80_IYL],
pavone@243 322 context->sp, context->im, context->iff1, context->iff2);
pavone@241 323 puts("--Alternate Regs--");
pavone@505 324 printf("A: %X\nB: %X\nC: %X\nD: %X\nE: %X\nHL: %X\nIX: %X\nIY: %X\n",
pavone@241 325 context->alt_regs[Z80_A], context->alt_regs[Z80_B], context->alt_regs[Z80_C],
pavone@505 326 context->alt_regs[Z80_D], context->alt_regs[Z80_E],
pavone@505 327 (context->alt_regs[Z80_H] << 8) | context->alt_regs[Z80_L],
pavone@505 328 (context->alt_regs[Z80_IXH] << 8) | context->alt_regs[Z80_IXL],
pavone@241 329 (context->alt_regs[Z80_IYH] << 8) | context->alt_regs[Z80_IYL]);
pavone@235 330 exit(0);
pavone@235 331 }
pavone@235 332
pavone@652 333 void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, uint8_t interp)
pavone@213 334 {
pavone@591 335 uint32_t num_cycles;
pavone@591 336 host_ea src_op, dst_op;
pavone@235 337 uint8_t size;
pavone@590 338 z80_options *opts = context->options;
pavone@591 339 uint8_t * start = opts->gen.code.cur;
pavone@591 340 code_info *code = &opts->gen.code;
pavone@627 341 if (!interp) {
pavone@652 342 check_cycles_int(&opts->gen, address);
pavone@819 343 if (context->breakpoint_flags[address / 8] & (1 << (address % 8))) {
pavone@627 344 zbreakpoint_patch(context, address, start);
pavone@627 345 }
pavone@735 346 #ifdef Z80_LOG_ADDRESS
pavone@735 347 log_address(&opts->gen, address, "Z80: %X @ %d\n");
pavone@735 348 #endif
pavone@626 349 }
pavone@213 350 switch(inst->op)
pavone@213 351 {
pavone@213 352 case Z80_LD:
pavone@235 353 size = z80_size(inst);
pavone@235 354 switch (inst->addr_mode & 0x1F)
pavone@235 355 {
pavone@235 356 case Z80_REG:
pavone@235 357 case Z80_REG_INDIRECT:
pavone@591 358 num_cycles = size == SZ_B ? 4 : 6;
pavone@841 359 if (inst->ea_reg == Z80_IX || inst->ea_reg == Z80_IY || (inst->ea_reg >= Z80_IXL && inst->ea_reg <= Z80_IYH)) {
pavone@591 360 num_cycles += 4;
pavone@235 361 }
pavone@841 362 if (inst->reg == Z80_I || inst->ea_reg == Z80_I || inst->reg == Z80_R || inst->ea_reg == Z80_R) {
pavone@591 363 num_cycles += 5;
pavone@506 364 }
pavone@235 365 break;
pavone@235 366 case Z80_IMMED:
pavone@591 367 num_cycles = size == SZ_B ? 7 : 10;
pavone@235 368 break;
pavone@235 369 case Z80_IMMED_INDIRECT:
pavone@591 370 num_cycles = 10;
pavone@235 371 break;
pavone@235 372 case Z80_IX_DISPLACE:
pavone@235 373 case Z80_IY_DISPLACE:
pavone@591 374 num_cycles = 16;
pavone@235 375 break;
pavone@235 376 }
pavone@235 377 if ((inst->reg >= Z80_IXL && inst->reg <= Z80_IYH) || inst->reg == Z80_IX || inst->reg == Z80_IY) {
pavone@591 378 num_cycles += 4;
pavone@235 379 }
pavone@591 380 cycles(&opts->gen, num_cycles);
pavone@213 381 if (inst->addr_mode & Z80_DIR) {
pavone@591 382 translate_z80_ea(inst, &dst_op, opts, DONT_READ, MODIFY);
pavone@591 383 translate_z80_reg(inst, &src_op, opts);
pavone@235 384 } else {
pavone@591 385 translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY);
pavone@591 386 translate_z80_reg(inst, &dst_op, opts);
pavone@235 387 }
pavone@235 388 if (src_op.mode == MODE_REG_DIRECT) {
pavone@262 389 if(dst_op.mode == MODE_REG_DISPLACE8) {
pavone@591 390 mov_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, size);
pavone@262 391 } else {
pavone@591 392 mov_rr(code, src_op.base, dst_op.base, size);
pavone@262 393 }
pavone@262 394 } else if(src_op.mode == MODE_IMMED) {
pavone@730 395 if(dst_op.mode == MODE_REG_DISPLACE8) {
pavone@730 396 mov_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, size);
pavone@730 397 } else {
pavone@730 398 mov_ir(code, src_op.disp, dst_op.base, size);
pavone@730 399 }
pavone@213 400 } else {
pavone@730 401 if(dst_op.mode == MODE_REG_DISPLACE8) {
pavone@730 402 mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, size);
pavone@730 403 mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, size);
pavone@730 404 } else {
pavone@730 405 mov_rdispr(code, src_op.base, src_op.disp, dst_op.base, size);
pavone@730 406 }
pavone@213 407 }
pavone@842 408 if ((inst->ea_reg == Z80_I || inst->ea_reg == Z80_R) && inst->addr_mode == Z80_REG) {
pavone@842 409 //ld a, i and ld a, r sets some flags
pavone@652 410 cmp_ir(code, 0, dst_op.base, SZ_B);
pavone@652 411 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
pavone@652 412 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
pavone@821 413 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_H), SZ_B);;
pavone@652 414 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);;
pavone@659 415 mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, iff2), opts->gen.scratch1, SZ_B);
pavone@652 416 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zf_off(ZF_PV), SZ_B);
pavone@651 417 }
pavone@591 418 z80_save_reg(inst, opts);
pavone@591 419 z80_save_ea(code, inst, opts);
pavone@235 420 if (inst->addr_mode & Z80_DIR) {
pavone@593 421 z80_save_result(opts, inst);
pavone@213 422 }
pavone@213 423 break;
pavone@213 424 case Z80_PUSH:
pavone@591 425 cycles(&opts->gen, (inst->reg == Z80_IX || inst->reg == Z80_IY) ? 9 : 5);
pavone@591 426 sub_ir(code, 2, opts->regs[Z80_SP], SZ_W);
pavone@235 427 if (inst->reg == Z80_AF) {
pavone@729 428 zreg_to_native(opts, Z80_A, opts->gen.scratch1);
pavone@591 429 shl_ir(code, 8, opts->gen.scratch1, SZ_W);
pavone@591 430 mov_rdispr(code, opts->gen.context_reg, zf_off(ZF_S), opts->gen.scratch1, SZ_B);
pavone@591 431 shl_ir(code, 1, opts->gen.scratch1, SZ_B);
pavone@591 432 or_rdispr(code, opts->gen.context_reg, zf_off(ZF_Z), opts->gen.scratch1, SZ_B);
pavone@591 433 shl_ir(code, 2, opts->gen.scratch1, SZ_B);
pavone@591 434 or_rdispr(code, opts->gen.context_reg, zf_off(ZF_H), opts->gen.scratch1, SZ_B);
pavone@591 435 shl_ir(code, 2, opts->gen.scratch1, SZ_B);
pavone@591 436 or_rdispr(code, opts->gen.context_reg, zf_off(ZF_PV), opts->gen.scratch1, SZ_B);
pavone@591 437 shl_ir(code, 1, opts->gen.scratch1, SZ_B);
pavone@591 438 or_rdispr(code, opts->gen.context_reg, zf_off(ZF_N), opts->gen.scratch1, SZ_B);
pavone@591 439 shl_ir(code, 1, opts->gen.scratch1, SZ_B);
pavone@591 440 or_rdispr(code, opts->gen.context_reg, zf_off(ZF_C), opts->gen.scratch1, SZ_B);
pavone@235 441 } else {
pavone@731 442 zreg_to_native(opts, inst->reg, opts->gen.scratch1);
pavone@235 443 }
pavone@591 444 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W);
pavone@593 445 call(code, opts->write_16_highfirst);
pavone@235 446 //no call to save_z80_reg needed since there's no chance we'll use the only
pavone@235 447 //the upper half of a register pair
pavone@213 448 break;
pavone@213 449 case Z80_POP:
pavone@591 450 cycles(&opts->gen, (inst->reg == Z80_IX || inst->reg == Z80_IY) ? 8 : 4);
pavone@591 451 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W);
pavone@593 452 call(code, opts->read_16);
pavone@591 453 add_ir(code, 2, opts->regs[Z80_SP], SZ_W);
pavone@235 454 if (inst->reg == Z80_AF) {
pavone@505 455
pavone@591 456 bt_ir(code, 0, opts->gen.scratch1, SZ_W);
pavone@591 457 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
pavone@591 458 bt_ir(code, 1, opts->gen.scratch1, SZ_W);
pavone@591 459 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_N));
pavone@591 460 bt_ir(code, 2, opts->gen.scratch1, SZ_W);
pavone@591 461 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_PV));
pavone@591 462 bt_ir(code, 4, opts->gen.scratch1, SZ_W);
pavone@591 463 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_H));
pavone@591 464 bt_ir(code, 6, opts->gen.scratch1, SZ_W);
pavone@591 465 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_Z));
pavone@591 466 bt_ir(code, 7, opts->gen.scratch1, SZ_W);
pavone@591 467 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_S));
pavone@591 468 shr_ir(code, 8, opts->gen.scratch1, SZ_W);
pavone@729 469 native_to_zreg(opts, opts->gen.scratch1, Z80_A);
pavone@235 470 } else {
pavone@731 471 native_to_zreg(opts, opts->gen.scratch1, inst->reg);
pavone@235 472 }
pavone@235 473 //no call to save_z80_reg needed since there's no chance we'll use the only
pavone@235 474 //the upper half of a register pair
pavone@213 475 break;
pavone@241 476 case Z80_EX:
pavone@241 477 if (inst->addr_mode == Z80_REG || inst->reg == Z80_HL) {
pavone@591 478 num_cycles = 4;
pavone@241 479 } else {
pavone@591 480 num_cycles = 8;
pavone@241 481 }
pavone@591 482 cycles(&opts->gen, num_cycles);
pavone@241 483 if (inst->addr_mode == Z80_REG) {
pavone@241 484 if(inst->reg == Z80_AF) {
pavone@729 485 zreg_to_native(opts, Z80_A, opts->gen.scratch1);
pavone@729 486 mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_A), opts->gen.scratch2, SZ_B);
pavone@591 487 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_A), SZ_B);
pavone@729 488 native_to_zreg(opts, opts->gen.scratch2, Z80_A);
pavone@505 489
pavone@241 490 //Flags are currently word aligned, so we can move
pavone@241 491 //them efficiently a word at a time
pavone@241 492 for (int f = ZF_C; f < ZF_NUM; f+=2) {
pavone@591 493 mov_rdispr(code, opts->gen.context_reg, zf_off(f), opts->gen.scratch1, SZ_W);
pavone@591 494 mov_rdispr(code, opts->gen.context_reg, zaf_off(f), opts->gen.scratch2, SZ_W);
pavone@591 495 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zaf_off(f), SZ_W);
pavone@591 496 mov_rrdisp(code, opts->gen.scratch2, opts->gen.context_reg, zf_off(f), SZ_W);
pavone@241 497 }
pavone@241 498 } else {
pavone@729 499 if (opts->regs[Z80_DE] >= 0 && opts->regs[Z80_HL] >= 0) {
pavone@729 500 xchg_rr(code, opts->regs[Z80_DE], opts->regs[Z80_HL], SZ_W);
pavone@729 501 } else {
pavone@729 502 zreg_to_native(opts, Z80_DE, opts->gen.scratch1);
pavone@729 503 zreg_to_native(opts, Z80_HL, opts->gen.scratch2);
pavone@729 504 native_to_zreg(opts, opts->gen.scratch1, Z80_HL);
pavone@729 505 native_to_zreg(opts, opts->gen.scratch2, Z80_DE);
pavone@729 506 }
pavone@241 507 }
pavone@241 508 } else {
pavone@591 509 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W);
pavone@593 510 call(code, opts->read_8);
pavone@729 511 if (opts->regs[inst->reg] >= 0) {
pavone@729 512 xchg_rr(code, opts->regs[inst->reg], opts->gen.scratch1, SZ_B);
pavone@729 513 } else {
pavone@729 514 zreg_to_native(opts, inst->reg, opts->gen.scratch2);
pavone@729 515 xchg_rr(code, opts->gen.scratch1, opts->gen.scratch2, SZ_B);
pavone@729 516 native_to_zreg(opts, opts->gen.scratch2, inst->reg);
pavone@729 517 }
pavone@591 518 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W);
pavone@593 519 call(code, opts->write_8);
pavone@591 520 cycles(&opts->gen, 1);
pavone@241 521 uint8_t high_reg = z80_high_reg(inst->reg);
pavone@591 522 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W);
pavone@591 523 add_ir(code, 1, opts->gen.scratch1, SZ_W);
pavone@593 524 call(code, opts->read_8);
pavone@729 525 if (opts->regs[inst->reg] >= 0) {
pavone@729 526 //even though some of the upper halves can be used directly
pavone@729 527 //the limitations on mixing *H regs with the REX prefix
pavone@729 528 //prevent us from taking advantage of it
pavone@729 529 uint8_t use_reg = opts->regs[inst->reg];
pavone@729 530 ror_ir(code, 8, use_reg, SZ_W);
pavone@729 531 xchg_rr(code, use_reg, opts->gen.scratch1, SZ_B);
pavone@729 532 //restore reg to normal rotation
pavone@729 533 ror_ir(code, 8, use_reg, SZ_W);
pavone@729 534 } else {
pavone@729 535 zreg_to_native(opts, high_reg, opts->gen.scratch2);
pavone@729 536 xchg_rr(code, opts->gen.scratch1, opts->gen.scratch2, SZ_B);
pavone@729 537 native_to_zreg(opts, opts->gen.scratch2, high_reg);
pavone@729 538 }
pavone@591 539 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W);
pavone@591 540 add_ir(code, 1, opts->gen.scratch2, SZ_W);
pavone@593 541 call(code, opts->write_8);
pavone@591 542 cycles(&opts->gen, 2);
pavone@241 543 }
pavone@241 544 break;
pavone@213 545 case Z80_EXX:
pavone@591 546 cycles(&opts->gen, 4);
pavone@729 547 zreg_to_native(opts, Z80_BC, opts->gen.scratch1);
pavone@729 548 mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_BC), opts->gen.scratch2, SZ_W);
pavone@729 549 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_BC), SZ_W);
pavone@729 550 native_to_zreg(opts, opts->gen.scratch2, Z80_BC);
pavone@840 551
pavone@729 552 zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
pavone@729 553 mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_HL), opts->gen.scratch2, SZ_W);
pavone@729 554 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_HL), SZ_W);
pavone@729 555 native_to_zreg(opts, opts->gen.scratch2, Z80_HL);
pavone@840 556
pavone@729 557 zreg_to_native(opts, Z80_DE, opts->gen.scratch1);
pavone@729 558 mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_DE), opts->gen.scratch2, SZ_W);
pavone@729 559 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_DE), SZ_W);
pavone@729 560 native_to_zreg(opts, opts->gen.scratch2, Z80_DE);
pavone@241 561 break;
pavone@272 562 case Z80_LDI: {
pavone@591 563 cycles(&opts->gen, 8);
pavone@729 564 zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
pavone@593 565 call(code, opts->read_8);
pavone@729 566 zreg_to_native(opts, Z80_DE, opts->gen.scratch2);
pavone@593 567 call(code, opts->write_8);
pavone@591 568 cycles(&opts->gen, 2);
pavone@729 569 if (opts->regs[Z80_DE] >= 0) {
pavone@729 570 add_ir(code, 1, opts->regs[Z80_DE], SZ_W);
pavone@729 571 } else {
pavone@729 572 add_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_DE), SZ_W);
pavone@729 573 }
pavone@729 574 if (opts->regs[Z80_HL] >= 0) {
pavone@729 575 add_ir(code, 1, opts->regs[Z80_HL], SZ_W);
pavone@729 576 } else {
pavone@729 577 add_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_HL), SZ_W);
pavone@729 578 }
pavone@729 579 if (opts->regs[Z80_BC] >= 0) {
pavone@729 580 sub_ir(code, 1, opts->regs[Z80_BC], SZ_W);
pavone@729 581 } else {
pavone@729 582 sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_BC), SZ_W);
pavone@729 583 }
pavone@821 584 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_H), SZ_B);
pavone@591 585 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
pavone@591 586 setcc_rdisp(code, CC_NZ, opts->gen.context_reg, zf_off(ZF_PV));
pavone@272 587 break;
pavone@272 588 }
pavone@261 589 case Z80_LDIR: {
pavone@591 590 cycles(&opts->gen, 8);
pavone@729 591 zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
pavone@593 592 call(code, opts->read_8);
pavone@729 593 zreg_to_native(opts, Z80_DE, opts->gen.scratch2);
pavone@593 594 call(code, opts->write_8);
pavone@729 595 if (opts->regs[Z80_DE] >= 0) {
pavone@729 596 add_ir(code, 1, opts->regs[Z80_DE], SZ_W);
pavone@729 597 } else {
pavone@729 598 add_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_DE), SZ_W);
pavone@729 599 }
pavone@729 600 if (opts->regs[Z80_HL] >= 0) {
pavone@729 601 add_ir(code, 1, opts->regs[Z80_HL], SZ_W);
pavone@729 602 } else {
pavone@729 603 add_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_HL), SZ_W);
pavone@729 604 }
pavone@729 605 if (opts->regs[Z80_BC] >= 0) {
pavone@729 606 sub_ir(code, 1, opts->regs[Z80_BC], SZ_W);
pavone@729 607 } else {
pavone@729 608 sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_BC), SZ_W);
pavone@729 609 }
pavone@591 610 uint8_t * cont = code->cur+1;
pavone@591 611 jcc(code, CC_Z, code->cur+2);
pavone@591 612 cycles(&opts->gen, 7);
pavone@261 613 //TODO: Figure out what the flag state should be here
pavone@261 614 //TODO: Figure out whether an interrupt can interrupt this
pavone@591 615 jmp(code, start);
pavone@591 616 *cont = code->cur - (cont + 1);
pavone@591 617 cycles(&opts->gen, 2);
pavone@821 618 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_H), SZ_B);
pavone@591 619 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
pavone@591 620 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B);
pavone@261 621 break;
pavone@261 622 }
pavone@273 623 case Z80_LDD: {
pavone@591 624 cycles(&opts->gen, 8);
pavone@729 625 zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
pavone@593 626 call(code, opts->read_8);
pavone@729 627 zreg_to_native(opts, Z80_DE, opts->gen.scratch2);
pavone@593 628 call(code, opts->write_8);
pavone@591 629 cycles(&opts->gen, 2);
pavone@729 630 if (opts->regs[Z80_DE] >= 0) {
pavone@729 631 sub_ir(code, 1, opts->regs[Z80_DE], SZ_W);
pavone@729 632 } else {
pavone@729 633 sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_DE), SZ_W);
pavone@729 634 }
pavone@729 635 if (opts->regs[Z80_HL] >= 0) {
pavone@828 636 sub_ir(code, 1, opts->regs[Z80_HL], SZ_W);
pavone@729 637 } else {
pavone@729 638 sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_HL), SZ_W);
pavone@729 639 }
pavone@729 640 if (opts->regs[Z80_BC] >= 0) {
pavone@729 641 sub_ir(code, 1, opts->regs[Z80_BC], SZ_W);
pavone@729 642 } else {
pavone@729 643 sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_BC), SZ_W);
pavone@729 644 }
pavone@821 645 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_H), SZ_B);
pavone@591 646 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
pavone@591 647 setcc_rdisp(code, CC_NZ, opts->gen.context_reg, zf_off(ZF_PV));
pavone@273 648 break;
pavone@273 649 }
pavone@273 650 case Z80_LDDR: {
pavone@591 651 cycles(&opts->gen, 8);
pavone@729 652 zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
pavone@593 653 call(code, opts->read_8);
pavone@729 654 zreg_to_native(opts, Z80_DE, opts->gen.scratch2);
pavone@593 655 call(code, opts->write_8);
pavone@729 656 if (opts->regs[Z80_DE] >= 0) {
pavone@729 657 sub_ir(code, 1, opts->regs[Z80_DE], SZ_W);
pavone@729 658 } else {
pavone@729 659 sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_DE), SZ_W);
pavone@729 660 }
pavone@729 661 if (opts->regs[Z80_HL] >= 0) {
pavone@828 662 sub_ir(code, 1, opts->regs[Z80_HL], SZ_W);
pavone@729 663 } else {
pavone@729 664 sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_HL), SZ_W);
pavone@729 665 }
pavone@729 666 if (opts->regs[Z80_BC] >= 0) {
pavone@729 667 sub_ir(code, 1, opts->regs[Z80_BC], SZ_W);
pavone@729 668 } else {
pavone@729 669 sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_BC), SZ_W);
pavone@729 670 }
pavone@591 671 uint8_t * cont = code->cur+1;
pavone@591 672 jcc(code, CC_Z, code->cur+2);
pavone@591 673 cycles(&opts->gen, 7);
pavone@273 674 //TODO: Figure out what the flag state should be here
pavone@591 675 jmp(code, start);
pavone@591 676 *cont = code->cur - (cont + 1);
pavone@591 677 cycles(&opts->gen, 2);
pavone@821 678 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_H), SZ_B);
pavone@591 679 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
pavone@591 680 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B);
pavone@273 681 break;
pavone@273 682 }
pavone@273 683 /*case Z80_CPI:
pavone@213 684 case Z80_CPIR:
pavone@213 685 case Z80_CPD:
pavone@213 686 case Z80_CPDR:
pavone@213 687 break;*/
pavone@213 688 case Z80_ADD:
pavone@591 689 num_cycles = 4;
pavone@235 690 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
pavone@591 691 num_cycles += 12;
pavone@213 692 } else if(inst->addr_mode == Z80_IMMED) {
pavone@591 693 num_cycles += 3;
pavone@213 694 } else if(z80_size(inst) == SZ_W) {
pavone@591 695 num_cycles += 4;
pavone@213 696 }
pavone@591 697 cycles(&opts->gen, num_cycles);
pavone@591 698 translate_z80_reg(inst, &dst_op, opts);
pavone@591 699 translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY);
pavone@730 700 if (dst_op.mode == MODE_REG_DIRECT) {
pavone@730 701 if (src_op.mode == MODE_REG_DIRECT) {
pavone@730 702 add_rr(code, src_op.base, dst_op.base, z80_size(inst));
pavone@730 703 } else if (src_op.mode == MODE_IMMED) {
pavone@730 704 add_ir(code, src_op.disp, dst_op.base, z80_size(inst));
pavone@730 705 } else {
pavone@730 706 add_rdispr(code, src_op.base, src_op.disp, dst_op.base, z80_size(inst));
pavone@730 707 }
pavone@213 708 } else {
pavone@730 709 if (src_op.mode == MODE_REG_DIRECT) {
pavone@730 710 add_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, z80_size(inst));
pavone@730 711 } else if (src_op.mode == MODE_IMMED) {
pavone@730 712 add_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, z80_size(inst));
pavone@730 713 } else {
pavone@730 714 mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, z80_size(inst));
pavone@730 715 add_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, z80_size(inst));
pavone@730 716 }
pavone@213 717 }
pavone@591 718 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
pavone@591 719 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
pavone@213 720 //TODO: Implement half-carry flag
pavone@213 721 if (z80_size(inst) == SZ_B) {
pavone@591 722 setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV));
pavone@591 723 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
pavone@591 724 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
pavone@213 725 }
pavone@591 726 z80_save_reg(inst, opts);
pavone@591 727 z80_save_ea(code, inst, opts);
pavone@213 728 break;
pavone@248 729 case Z80_ADC:
pavone@591 730 num_cycles = 4;
pavone@248 731 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
pavone@591 732 num_cycles += 12;
pavone@248 733 } else if(inst->addr_mode == Z80_IMMED) {
pavone@591 734 num_cycles += 3;
pavone@248 735 } else if(z80_size(inst) == SZ_W) {
pavone@591 736 num_cycles += 4;
pavone@248 737 }
pavone@591 738 cycles(&opts->gen, num_cycles);
pavone@591 739 translate_z80_reg(inst, &dst_op, opts);
pavone@591 740 translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY);
pavone@591 741 bt_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B);
pavone@730 742 if (dst_op.mode == MODE_REG_DIRECT) {
pavone@730 743 if (src_op.mode == MODE_REG_DIRECT) {
pavone@730 744 adc_rr(code, src_op.base, dst_op.base, z80_size(inst));
pavone@730 745 } else if (src_op.mode == MODE_IMMED) {
pavone@730 746 adc_ir(code, src_op.disp, dst_op.base, z80_size(inst));
pavone@730 747 } else {
pavone@730 748 adc_rdispr(code, src_op.base, src_op.disp, dst_op.base, z80_size(inst));
pavone@730 749 }
pavone@248 750 } else {
pavone@730 751 if (src_op.mode == MODE_REG_DIRECT) {
pavone@730 752 adc_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, z80_size(inst));
pavone@730 753 } else if (src_op.mode == MODE_IMMED) {
pavone@730 754 adc_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, z80_size(inst));
pavone@730 755 } else {
pavone@730 756 mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, z80_size(inst));
pavone@730 757 adc_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, z80_size(inst));
pavone@730 758 }
pavone@248 759 }
pavone@591 760 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
pavone@591 761 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
pavone@248 762 //TODO: Implement half-carry flag
pavone@591 763 setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV));
pavone@591 764 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
pavone@591 765 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
pavone@591 766 z80_save_reg(inst, opts);
pavone@591 767 z80_save_ea(code, inst, opts);
pavone@248 768 break;
pavone@213 769 case Z80_SUB:
pavone@591 770 num_cycles = 4;
pavone@235 771 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
pavone@591 772 num_cycles += 12;
pavone@213 773 } else if(inst->addr_mode == Z80_IMMED) {
pavone@591 774 num_cycles += 3;
pavone@213 775 }
pavone@591 776 cycles(&opts->gen, num_cycles);
pavone@591 777 translate_z80_reg(inst, &dst_op, opts);
pavone@591 778 translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY);
pavone@730 779 if (dst_op.mode == MODE_REG_DIRECT) {
pavone@730 780 if (src_op.mode == MODE_REG_DIRECT) {
pavone@730 781 sub_rr(code, src_op.base, dst_op.base, z80_size(inst));
pavone@730 782 } else if (src_op.mode == MODE_IMMED) {
pavone@730 783 sub_ir(code, src_op.disp, dst_op.base, z80_size(inst));
pavone@730 784 } else {
pavone@730 785 sub_rdispr(code, src_op.base, src_op.disp, dst_op.base, z80_size(inst));
pavone@730 786 }
pavone@213 787 } else {
pavone@730 788 if (src_op.mode == MODE_REG_DIRECT) {
pavone@730 789 sub_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, z80_size(inst));
pavone@730 790 } else if (src_op.mode == MODE_IMMED) {
pavone@730 791 sub_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, z80_size(inst));
pavone@730 792 } else {
pavone@730 793 mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, z80_size(inst));
pavone@730 794 sub_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, z80_size(inst));
pavone@730 795 }
pavone@213 796 }
pavone@591 797 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
pavone@591 798 mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
pavone@591 799 setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV));
pavone@213 800 //TODO: Implement half-carry flag
pavone@591 801 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
pavone@591 802 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
pavone@591 803 z80_save_reg(inst, opts);
pavone@591 804 z80_save_ea(code, inst, opts);
pavone@213 805 break;
pavone@248 806 case Z80_SBC:
pavone@591 807 num_cycles = 4;
pavone@248 808 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
pavone@591 809 num_cycles += 12;
pavone@248 810 } else if(inst->addr_mode == Z80_IMMED) {
pavone@591 811 num_cycles += 3;
pavone@248 812 } else if(z80_size(inst) == SZ_W) {
pavone@591 813 num_cycles += 4;
pavone@248 814 }
pavone@591 815 cycles(&opts->gen, num_cycles);
pavone@591 816 translate_z80_reg(inst, &dst_op, opts);
pavone@591 817 translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY);
pavone@591 818 bt_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B);
pavone@730 819 if (dst_op.mode == MODE_REG_DIRECT) {
pavone@730 820 if (src_op.mode == MODE_REG_DIRECT) {
pavone@730 821 sbb_rr(code, src_op.base, dst_op.base, z80_size(inst));
pavone@730 822 } else if (src_op.mode == MODE_IMMED) {
pavone@730 823 sbb_ir(code, src_op.disp, dst_op.base, z80_size(inst));
pavone@730 824 } else {
pavone@730 825 sbb_rdispr(code, src_op.base, src_op.disp, dst_op.base, z80_size(inst));
pavone@730 826 }
pavone@248 827 } else {
pavone@730 828 if (src_op.mode == MODE_REG_DIRECT) {
pavone@730 829 sbb_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, z80_size(inst));
pavone@730 830 } else if (src_op.mode == MODE_IMMED) {
pavone@730 831 sbb_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, z80_size(inst));
pavone@730 832 } else {
pavone@730 833 mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, z80_size(inst));
pavone@730 834 sbb_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, z80_size(inst));
pavone@730 835 }
pavone@248 836 }
pavone@591 837 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
pavone@591 838 mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
pavone@248 839 //TODO: Implement half-carry flag
pavone@591 840 setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV));
pavone@591 841 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
pavone@591 842 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
pavone@591 843 z80_save_reg(inst, opts);
pavone@591 844 z80_save_ea(code, inst, opts);
pavone@248 845 break;
pavone@213 846 case Z80_AND:
pavone@591 847 num_cycles = 4;
pavone@236 848 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
pavone@591 849 num_cycles += 12;
pavone@236 850 } else if(inst->addr_mode == Z80_IMMED) {
pavone@591 851 num_cycles += 3;
pavone@236 852 } else if(z80_size(inst) == SZ_W) {
pavone@591 853 num_cycles += 4;
pavone@236 854 }
pavone@591 855 cycles(&opts->gen, num_cycles);
pavone@591 856 translate_z80_reg(inst, &dst_op, opts);
pavone@591 857 translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY);
pavone@236 858 if (src_op.mode == MODE_REG_DIRECT) {
pavone@591 859 and_rr(code, src_op.base, dst_op.base, z80_size(inst));
pavone@730 860 } else if (src_op.mode == MODE_IMMED) {
pavone@730 861 and_ir(code, src_op.disp, dst_op.base, z80_size(inst));
pavone@236 862 } else {
pavone@730 863 and_rdispr(code, src_op.base, src_op.disp, dst_op.base, z80_size(inst));
pavone@236 864 }
pavone@236 865 //TODO: Cleanup flags
pavone@591 866 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
pavone@591 867 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
pavone@236 868 //TODO: Implement half-carry flag
pavone@236 869 if (z80_size(inst) == SZ_B) {
pavone@591 870 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
pavone@591 871 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
pavone@591 872 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
pavone@236 873 }
pavone@591 874 z80_save_reg(inst, opts);
pavone@591 875 z80_save_ea(code, inst, opts);
pavone@236 876 break;
pavone@213 877 case Z80_OR:
pavone@591 878 num_cycles = 4;
pavone@236 879 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
pavone@591 880 num_cycles += 12;
pavone@236 881 } else if(inst->addr_mode == Z80_IMMED) {
pavone@591 882 num_cycles += 3;
pavone@236 883 } else if(z80_size(inst) == SZ_W) {
pavone@591 884 num_cycles += 4;
pavone@236 885 }
pavone@591 886 cycles(&opts->gen, num_cycles);
pavone@591 887 translate_z80_reg(inst, &dst_op, opts);
pavone@591 888 translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY);
pavone@236 889 if (src_op.mode == MODE_REG_DIRECT) {
pavone@591 890 or_rr(code, src_op.base, dst_op.base, z80_size(inst));
pavone@730 891 } else if (src_op.mode == MODE_IMMED) {
pavone@730 892 or_ir(code, src_op.disp, dst_op.base, z80_size(inst));
pavone@236 893 } else {
pavone@730 894 or_rdispr(code, src_op.base, src_op.disp, dst_op.base, z80_size(inst));
pavone@236 895 }
pavone@236 896 //TODO: Cleanup flags
pavone@591 897 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
pavone@591 898 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
pavone@236 899 //TODO: Implement half-carry flag
pavone@236 900 if (z80_size(inst) == SZ_B) {
pavone@591 901 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
pavone@591 902 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
pavone@591 903 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
pavone@236 904 }
pavone@591 905 z80_save_reg(inst, opts);
pavone@591 906 z80_save_ea(code, inst, opts);
pavone@236 907 break;
pavone@213 908 case Z80_XOR:
pavone@591 909 num_cycles = 4;
pavone@236 910 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
pavone@591 911 num_cycles += 12;
pavone@236 912 } else if(inst->addr_mode == Z80_IMMED) {
pavone@591 913 num_cycles += 3;
pavone@236 914 } else if(z80_size(inst) == SZ_W) {
pavone@591 915 num_cycles += 4;
pavone@236 916 }
pavone@591 917 cycles(&opts->gen, num_cycles);
pavone@591 918 translate_z80_reg(inst, &dst_op, opts);
pavone@591 919 translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY);
pavone@236 920 if (src_op.mode == MODE_REG_DIRECT) {
pavone@591 921 xor_rr(code, src_op.base, dst_op.base, z80_size(inst));
pavone@730 922 } else if (src_op.mode == MODE_IMMED) {
pavone@730 923 xor_ir(code, src_op.disp, dst_op.base, z80_size(inst));
pavone@236 924 } else {
pavone@730 925 xor_rdispr(code, src_op.base, src_op.disp, dst_op.base, z80_size(inst));
pavone@236 926 }
pavone@236 927 //TODO: Cleanup flags
pavone@591 928 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
pavone@591 929 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
pavone@236 930 //TODO: Implement half-carry flag
pavone@236 931 if (z80_size(inst) == SZ_B) {
pavone@591 932 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
pavone@591 933 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
pavone@591 934 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
pavone@236 935 }
pavone@591 936 z80_save_reg(inst, opts);
pavone@591 937 z80_save_ea(code, inst, opts);
pavone@236 938 break;
pavone@242 939 case Z80_CP:
pavone@591 940 num_cycles = 4;
pavone@242 941 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
pavone@591 942 num_cycles += 12;
pavone@242 943 } else if(inst->addr_mode == Z80_IMMED) {
pavone@591 944 num_cycles += 3;
pavone@242 945 }
pavone@591 946 cycles(&opts->gen, num_cycles);
pavone@591 947 translate_z80_reg(inst, &dst_op, opts);
pavone@591 948 translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY);
pavone@242 949 if (src_op.mode == MODE_REG_DIRECT) {
pavone@591 950 cmp_rr(code, src_op.base, dst_op.base, z80_size(inst));
pavone@730 951 } else if (src_op.mode == MODE_IMMED) {
pavone@730 952 cmp_ir(code, src_op.disp, dst_op.base, z80_size(inst));
pavone@242 953 } else {
pavone@730 954 cmp_rdispr(code, src_op.base, src_op.disp, dst_op.base, z80_size(inst));
pavone@242 955 }
pavone@591 956 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
pavone@591 957 mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
pavone@591 958 setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV));
pavone@242 959 //TODO: Implement half-carry flag
pavone@591 960 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
pavone@591 961 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
pavone@591 962 z80_save_reg(inst, opts);
pavone@591 963 z80_save_ea(code, inst, opts);
pavone@242 964 break;
pavone@213 965 case Z80_INC:
pavone@591 966 num_cycles = 4;
pavone@213 967 if (inst->reg == Z80_IX || inst->reg == Z80_IY) {
pavone@591 968 num_cycles += 6;
pavone@213 969 } else if(z80_size(inst) == SZ_W) {
pavone@591 970 num_cycles += 2;
pavone@213 971 } else if(inst->reg == Z80_IXH || inst->reg == Z80_IXL || inst->reg == Z80_IYH || inst->reg == Z80_IYL || inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
pavone@591 972 num_cycles += 4;
pavone@213 973 }
pavone@591 974 cycles(&opts->gen, num_cycles);
pavone@591 975 translate_z80_reg(inst, &dst_op, opts);
pavone@213 976 if (dst_op.mode == MODE_UNUSED) {
pavone@591 977 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
pavone@213 978 }
pavone@730 979 if (dst_op.mode == MODE_REG_DIRECT) {
pavone@730 980 add_ir(code, 1, dst_op.base, z80_size(inst));
pavone@730 981 } else {
pavone@730 982 add_irdisp(code, 1, dst_op.base, dst_op.disp, z80_size(inst));
pavone@730 983 }
pavone@213 984 if (z80_size(inst) == SZ_B) {
pavone@591 985 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
pavone@213 986 //TODO: Implement half-carry flag
pavone@591 987 setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV));
pavone@591 988 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
pavone@591 989 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
pavone@213 990 }
pavone@591 991 z80_save_reg(inst, opts);
pavone@591 992 z80_save_ea(code, inst, opts);
pavone@593 993 z80_save_result(opts, inst);
pavone@213 994 break;
pavone@236 995 case Z80_DEC:
pavone@591 996 num_cycles = 4;
pavone@236 997 if (inst->reg == Z80_IX || inst->reg == Z80_IY) {
pavone@591 998 num_cycles += 6;
pavone@236 999 } else if(z80_size(inst) == SZ_W) {
pavone@591 1000 num_cycles += 2;
pavone@236 1001 } else if(inst->reg == Z80_IXH || inst->reg == Z80_IXL || inst->reg == Z80_IYH || inst->reg == Z80_IYL || inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
pavone@591 1002 num_cycles += 4;
pavone@236 1003 }
pavone@591 1004 cycles(&opts->gen, num_cycles);
pavone@591 1005 translate_z80_reg(inst, &dst_op, opts);
pavone@236 1006 if (dst_op.mode == MODE_UNUSED) {
pavone@591 1007 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
pavone@236 1008 }
pavone@730 1009 if (dst_op.mode == MODE_REG_DIRECT) {
pavone@730 1010 sub_ir(code, 1, dst_op.base, z80_size(inst));
pavone@730 1011 } else {
pavone@730 1012 sub_irdisp(code, 1, dst_op.base, dst_op.disp, z80_size(inst));
pavone@730 1013 }
pavone@840 1014
pavone@236 1015 if (z80_size(inst) == SZ_B) {
pavone@591 1016 mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
pavone@236 1017 //TODO: Implement half-carry flag
pavone@591 1018 setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV));
pavone@591 1019 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
pavone@591 1020 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
pavone@236 1021 }
pavone@591 1022 z80_save_reg(inst, opts);
pavone@591 1023 z80_save_ea(code, inst, opts);
pavone@593 1024 z80_save_result(opts, inst);
pavone@213 1025 break;
pavone@274 1026 //case Z80_DAA:
pavone@213 1027 case Z80_CPL:
pavone@591 1028 cycles(&opts->gen, 4);
pavone@591 1029 not_r(code, opts->regs[Z80_A], SZ_B);
pavone@274 1030 //TODO: Implement half-carry flag
pavone@591 1031 mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
pavone@274 1032 break;
pavone@274 1033 case Z80_NEG:
pavone@591 1034 cycles(&opts->gen, 8);
pavone@591 1035 neg_r(code, opts->regs[Z80_A], SZ_B);
pavone@274 1036 //TODO: Implement half-carry flag
pavone@591 1037 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
pavone@591 1038 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
pavone@591 1039 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
pavone@591 1040 setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV));
pavone@591 1041 mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
pavone@274 1042 break;
pavone@213 1043 case Z80_CCF:
pavone@591 1044 cycles(&opts->gen, 4);
pavone@591 1045 xor_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_C), SZ_B);
pavone@591 1046 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
pavone@257 1047 //TODO: Implement half-carry flag
pavone@257 1048 break;
pavone@257 1049 case Z80_SCF:
pavone@591 1050 cycles(&opts->gen, 4);
pavone@591 1051 mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_C), SZ_B);
pavone@591 1052 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
pavone@257 1053 //TODO: Implement half-carry flag
pavone@257 1054 break;
pavone@213 1055 case Z80_NOP:
pavone@213 1056 if (inst->immed == 42) {
pavone@593 1057 call(code, opts->gen.save_context);
pavone@657 1058 call_args(code, (code_ptr)z80_print_regs_exit, 1, opts->gen.context_reg);
pavone@213 1059 } else {
pavone@591 1060 cycles(&opts->gen, 4 * inst->immed);
pavone@213 1061 }
pavone@213 1062 break;
pavone@593 1063 case Z80_HALT: {
pavone@667 1064 code_ptr loop_top = code->cur;
pavone@667 1065 //this isn't terribly efficient, but it's good enough for now
pavone@591 1066 cycles(&opts->gen, 4);
pavone@667 1067 check_cycles_int(&opts->gen, address);
pavone@667 1068 jmp(code, loop_top);
pavone@285 1069 break;
pavone@593 1070 }
pavone@213 1071 case Z80_DI:
pavone@591 1072 cycles(&opts->gen, 4);
pavone@591 1073 mov_irdisp(code, 0, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B);
pavone@591 1074 mov_irdisp(code, 0, opts->gen.context_reg, offsetof(z80_context, iff2), SZ_B);
pavone@591 1075 mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, sync_cycle), opts->gen.limit, SZ_D);
pavone@591 1076 mov_irdisp(code, 0xFFFFFFFF, opts->gen.context_reg, offsetof(z80_context, int_cycle), SZ_D);
pavone@243 1077 break;
pavone@213 1078 case Z80_EI:
pavone@591 1079 cycles(&opts->gen, 4);
pavone@591 1080 mov_rrdisp(code, opts->gen.cycles, opts->gen.context_reg, offsetof(z80_context, int_enable_cycle), SZ_D);
pavone@591 1081 mov_irdisp(code, 1, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B);
pavone@591 1082 mov_irdisp(code, 1, opts->gen.context_reg, offsetof(z80_context, iff2), SZ_B);
pavone@335 1083 //interrupt enable has a one-instruction latency, minimum instruction duration is 4 cycles
pavone@667 1084 add_irdisp(code, 4*opts->gen.clock_divider, opts->gen.context_reg, offsetof(z80_context, int_enable_cycle), SZ_D);
pavone@593 1085 call(code, opts->do_sync);
pavone@243 1086 break;
pavone@213 1087 case Z80_IM:
pavone@840 1088 cycles(&opts->gen, 8);
pavone@591 1089 mov_irdisp(code, inst->immed, opts->gen.context_reg, offsetof(z80_context, im), SZ_B);
pavone@243 1090 break;
pavone@247 1091 case Z80_RLC:
pavone@591 1092 num_cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8);
pavone@591 1093 cycles(&opts->gen, num_cycles);
pavone@299 1094 if (inst->addr_mode != Z80_UNUSED) {
pavone@591 1095 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
pavone@591 1096 translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register
pavone@591 1097 cycles(&opts->gen, 1);
pavone@247 1098 } else {
pavone@302 1099 src_op.mode = MODE_UNUSED;
pavone@591 1100 translate_z80_reg(inst, &dst_op, opts);
pavone@247 1101 }
pavone@730 1102 if (dst_op.mode == MODE_REG_DIRECT) {
pavone@730 1103 rol_ir(code, 1, dst_op.base, SZ_B);
pavone@730 1104 } else {
pavone@730 1105 rol_irdisp(code, 1, dst_op.base, dst_op.disp, SZ_B);
pavone@730 1106 }
pavone@730 1107 if (src_op.mode == MODE_REG_DIRECT) {
pavone@591 1108 mov_rr(code, dst_op.base, src_op.base, SZ_B);
pavone@730 1109 } else if(src_op.mode == MODE_REG_DISPLACE8) {
pavone@730 1110 mov_rrdisp(code, dst_op.base, src_op.base, src_op.disp, SZ_B);
pavone@299 1111 }
pavone@591 1112 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
pavone@591 1113 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
pavone@247 1114 //TODO: Implement half-carry flag
pavone@651 1115 if (inst->immed) {
pavone@651 1116 //rlca does not set these flags
pavone@731 1117 if (dst_op.mode == MODE_REG_DIRECT) {
pavone@731 1118 cmp_ir(code, 0, dst_op.base, SZ_B);
pavone@731 1119 } else {
pavone@731 1120 cmp_irdisp(code, 0, dst_op.base, dst_op.disp, SZ_B);
pavone@731 1121 }
pavone@730 1122 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
pavone@730 1123 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
pavone@730 1124 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
pavone@651 1125 }
pavone@299 1126 if (inst->addr_mode != Z80_UNUSED) {
pavone@593 1127 z80_save_result(opts, inst);
pavone@299 1128 if (src_op.mode != MODE_UNUSED) {
pavone@591 1129 z80_save_reg(inst, opts);
pavone@299 1130 }
pavone@247 1131 } else {
pavone@591 1132 z80_save_reg(inst, opts);
pavone@247 1133 }
pavone@247 1134 break;
pavone@213 1135 case Z80_RL:
pavone@591 1136 num_cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8);
pavone@591 1137 cycles(&opts->gen, num_cycles);
pavone@299 1138 if (inst->addr_mode != Z80_UNUSED) {
pavone@591 1139 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
pavone@591 1140 translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register
pavone@591 1141 cycles(&opts->gen, 1);
pavone@247 1142 } else {
pavone@302 1143 src_op.mode = MODE_UNUSED;
pavone@591 1144 translate_z80_reg(inst, &dst_op, opts);
pavone@247 1145 }
pavone@591 1146 bt_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B);
pavone@730 1147 if (dst_op.mode == MODE_REG_DIRECT) {
pavone@730 1148 rcl_ir(code, 1, dst_op.base, SZ_B);
pavone@730 1149 } else {
pavone@730 1150 rcl_irdisp(code, 1, dst_op.base, dst_op.disp, SZ_B);
pavone@730 1151 }
pavone@730 1152 if (src_op.mode == MODE_REG_DIRECT) {
pavone@591 1153 mov_rr(code, dst_op.base, src_op.base, SZ_B);
pavone@730 1154 } else if(src_op.mode == MODE_REG_DISPLACE8) {
pavone@730 1155 mov_rrdisp(code, dst_op.base, src_op.base, src_op.disp, SZ_B);
pavone@299 1156 }
pavone@591 1157 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
pavone@591 1158 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
pavone@247 1159 //TODO: Implement half-carry flag
pavone@651 1160 if (inst->immed) {
pavone@651 1161 //rla does not set these flags
pavone@731 1162 if (dst_op.mode == MODE_REG_DIRECT) {
pavone@731 1163 cmp_ir(code, 0, dst_op.base, SZ_B);
pavone@731 1164 } else {
pavone@731 1165 cmp_irdisp(code, 0, dst_op.base, dst_op.disp, SZ_B);
pavone@731 1166 }
pavone@730 1167 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
pavone@730 1168 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
pavone@730 1169 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
pavone@651 1170 }
pavone@299 1171 if (inst->addr_mode != Z80_UNUSED) {
pavone@593 1172 z80_save_result(opts, inst);
pavone@299 1173 if (src_op.mode != MODE_UNUSED) {
pavone@591 1174 z80_save_reg(inst, opts);
pavone@299 1175 }
pavone@247 1176 } else {
pavone@591 1177 z80_save_reg(inst, opts);
pavone@247 1178 }
pavone@247 1179 break;
pavone@213 1180 case Z80_RRC:
pavone@591 1181 num_cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8);
pavone@591 1182 cycles(&opts->gen, num_cycles);
pavone@299 1183 if (inst->addr_mode != Z80_UNUSED) {
pavone@591 1184 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
pavone@591 1185 translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register
pavone@591 1186 cycles(&opts->gen, 1);
pavone@247 1187 } else {
pavone@302 1188 src_op.mode = MODE_UNUSED;
pavone@591 1189 translate_z80_reg(inst, &dst_op, opts);
pavone@247 1190 }
pavone@730 1191 if (dst_op.mode == MODE_REG_DIRECT) {
pavone@730 1192 ror_ir(code, 1, dst_op.base, SZ_B);
pavone@730 1193 } else {
pavone@730 1194 ror_irdisp(code, 1, dst_op.base, dst_op.disp, SZ_B);
pavone@730 1195 }
pavone@730 1196 if (src_op.mode == MODE_REG_DIRECT) {
pavone@591 1197 mov_rr(code, dst_op.base, src_op.base, SZ_B);
pavone@730 1198 } else if(src_op.mode == MODE_REG_DISPLACE8) {
pavone@730 1199 mov_rrdisp(code, dst_op.base, src_op.base, src_op.disp, SZ_B);
pavone@299 1200 }
pavone@591 1201 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
pavone@591 1202 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
pavone@247 1203 //TODO: Implement half-carry flag
pavone@651 1204 if (inst->immed) {
pavone@651 1205 //rrca does not set these flags
pavone@731 1206 if (dst_op.mode == MODE_REG_DIRECT) {
pavone@731 1207 cmp_ir(code, 0, dst_op.base, SZ_B);
pavone@731 1208 } else {
pavone@731 1209 cmp_irdisp(code, 0, dst_op.base, dst_op.disp, SZ_B);
pavone@731 1210 }
pavone@730 1211 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
pavone@730 1212 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
pavone@730 1213 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
pavone@651 1214 }
pavone@299 1215 if (inst->addr_mode != Z80_UNUSED) {
pavone@593 1216 z80_save_result(opts, inst);
pavone@299 1217 if (src_op.mode != MODE_UNUSED) {
pavone@591 1218 z80_save_reg(inst, opts);
pavone@299 1219 }
pavone@247 1220 } else {
pavone@591 1221 z80_save_reg(inst, opts);
pavone@247 1222 }
pavone@247 1223 break;
pavone@213 1224 case Z80_RR:
pavone@591 1225 num_cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8);
pavone@591 1226 cycles(&opts->gen, num_cycles);
pavone@299 1227 if (inst->addr_mode != Z80_UNUSED) {
pavone@591 1228 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
pavone@591 1229 translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register
pavone@591 1230 cycles(&opts->gen, 1);
pavone@247 1231 } else {
pavone@302 1232 src_op.mode = MODE_UNUSED;
pavone@591 1233 translate_z80_reg(inst, &dst_op, opts);
pavone@247 1234 }
pavone@591 1235 bt_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B);
pavone@730 1236 if (dst_op.mode == MODE_REG_DIRECT) {
pavone@730 1237 rcr_ir(code, 1, dst_op.base, SZ_B);
pavone@730 1238 } else {
pavone@730 1239 rcr_irdisp(code, 1, dst_op.base, dst_op.disp, SZ_B);
pavone@730 1240 }
pavone@730 1241 if (src_op.mode == MODE_REG_DIRECT) {
pavone@591 1242 mov_rr(code, dst_op.base, src_op.base, SZ_B);
pavone@730 1243 } else if(src_op.mode == MODE_REG_DISPLACE8) {
pavone@730 1244 mov_rrdisp(code, dst_op.base, src_op.base, src_op.disp, SZ_B);
pavone@299 1245 }
pavone@591 1246 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
pavone@591 1247 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
pavone@247 1248 //TODO: Implement half-carry flag
pavone@651 1249 if (inst->immed) {
pavone@651 1250 //rra does not set these flags
pavone@731 1251 if (dst_op.mode == MODE_REG_DIRECT) {
pavone@731 1252 cmp_ir(code, 0, dst_op.base, SZ_B);
pavone@731 1253 } else {
pavone@731 1254 cmp_irdisp(code, 0, dst_op.base, dst_op.disp, SZ_B);
pavone@731 1255 }
pavone@730 1256 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
pavone@730 1257 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
pavone@730 1258 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
pavone@651 1259 }
pavone@299 1260 if (inst->addr_mode != Z80_UNUSED) {
pavone@593 1261 z80_save_result(opts, inst);
pavone@299 1262 if (src_op.mode != MODE_UNUSED) {
pavone@591 1263 z80_save_reg(inst, opts);
pavone@299 1264 }
pavone@247 1265 } else {
pavone@591 1266 z80_save_reg(inst, opts);
pavone@247 1267 }
pavone@247 1268 break;
pavone@275 1269 case Z80_SLA:
pavone@275 1270 case Z80_SLL:
pavone@591 1271 num_cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8;
pavone@591 1272 cycles(&opts->gen, num_cycles);
pavone@299 1273 if (inst->addr_mode != Z80_UNUSED) {
pavone@591 1274 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
pavone@591 1275 translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register
pavone@591 1276 cycles(&opts->gen, 1);
pavone@275 1277 } else {
pavone@302 1278 src_op.mode = MODE_UNUSED;
pavone@591 1279 translate_z80_reg(inst, &dst_op, opts);
pavone@275 1280 }
pavone@730 1281 if (dst_op.mode == MODE_REG_DIRECT) {
pavone@730 1282 shl_ir(code, 1, dst_op.base, SZ_B);
pavone@730 1283 } else {
pavone@730 1284 shl_irdisp(code, 1, dst_op.base, dst_op.disp, SZ_B);
pavone@730 1285 }
pavone@591 1286 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
pavone@310 1287 if (inst->op == Z80_SLL) {
pavone@731 1288 if (dst_op.mode == MODE_REG_DIRECT) {
pavone@731 1289 or_ir(code, 1, dst_op.base, SZ_B);
pavone@731 1290 } else {
pavone@731 1291 or_irdisp(code, 1, dst_op.base, dst_op.disp, SZ_B);
pavone@731 1292 }
pavone@310 1293 }
pavone@730 1294 if (src_op.mode == MODE_REG_DIRECT) {
pavone@591 1295 mov_rr(code, dst_op.base, src_op.base, SZ_B);
pavone@730 1296 } else if(src_op.mode == MODE_REG_DISPLACE8) {
pavone@730 1297 mov_rrdisp(code, dst_op.base, src_op.base, src_op.disp, SZ_B);
pavone@299 1298 }
pavone@591 1299 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
pavone@275 1300 //TODO: Implement half-carry flag
pavone@731 1301 if (dst_op.mode == MODE_REG_DIRECT) {
pavone@731 1302 cmp_ir(code, 0, dst_op.base, SZ_B);
pavone@731 1303 } else {
pavone@731 1304 cmp_irdisp(code, 0, dst_op.base, dst_op.disp, SZ_B);
pavone@731 1305 }
pavone@591 1306 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
pavone@591 1307 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
pavone@591 1308 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
pavone@299 1309 if (inst->addr_mode != Z80_UNUSED) {
pavone@593 1310 z80_save_result(opts, inst);
pavone@299 1311 if (src_op.mode != MODE_UNUSED) {
pavone@591 1312 z80_save_reg(inst, opts);
pavone@299 1313 }
pavone@275 1314 } else {
pavone@591 1315 z80_save_reg(inst, opts);
pavone@275 1316 }
pavone@275 1317 break;
pavone@213 1318 case Z80_SRA:
pavone@591 1319 num_cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8;
pavone@591 1320 cycles(&opts->gen, num_cycles);
pavone@299 1321 if (inst->addr_mode != Z80_UNUSED) {
pavone@591 1322 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
pavone@591 1323 translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register
pavone@591 1324 cycles(&opts->gen, 1);
pavone@275 1325 } else {
pavone@302 1326 src_op.mode = MODE_UNUSED;
pavone@591 1327 translate_z80_reg(inst, &dst_op, opts);
pavone@275 1328 }
pavone@730 1329 if (dst_op.mode == MODE_REG_DIRECT) {
pavone@730 1330 sar_ir(code, 1, dst_op.base, SZ_B);
pavone@730 1331 } else {
pavone@730 1332 sar_irdisp(code, 1, dst_op.base, dst_op.disp, SZ_B);
pavone@730 1333 }
pavone@730 1334 if (src_op.mode == MODE_REG_DIRECT) {
pavone@591 1335 mov_rr(code, dst_op.base, src_op.base, SZ_B);
pavone@730 1336 } else if(src_op.mode == MODE_REG_DISPLACE8) {
pavone@730 1337 mov_rrdisp(code, dst_op.base, src_op.base, src_op.disp, SZ_B);
pavone@299 1338 }
pavone@591 1339 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
pavone@591 1340 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
pavone@275 1341 //TODO: Implement half-carry flag
pavone@731 1342 if (dst_op.mode == MODE_REG_DIRECT) {
pavone@731 1343 cmp_ir(code, 0, dst_op.base, SZ_B);
pavone@731 1344 } else {
pavone@731 1345 cmp_irdisp(code, 0, dst_op.base, dst_op.disp, SZ_B);
pavone@731 1346 }
pavone@591 1347 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
pavone@591 1348 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
pavone@591 1349 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
pavone@299 1350 if (inst->addr_mode != Z80_UNUSED) {
pavone@593 1351 z80_save_result(opts, inst);
pavone@299 1352 if (src_op.mode != MODE_UNUSED) {
pavone@591 1353 z80_save_reg(inst, opts);
pavone@299 1354 }
pavone@275 1355 } else {
pavone@591 1356 z80_save_reg(inst, opts);
pavone@275 1357 }
pavone@275 1358 break;
pavone@213 1359 case Z80_SRL:
pavone@591 1360 num_cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8;
pavone@591 1361 cycles(&opts->gen, num_cycles);
pavone@299 1362 if (inst->addr_mode != Z80_UNUSED) {
pavone@591 1363 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
pavone@591 1364 translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register
pavone@591 1365 cycles(&opts->gen, 1);
pavone@275 1366 } else {
pavone@302 1367 src_op.mode = MODE_UNUSED;
pavone@591 1368 translate_z80_reg(inst, &dst_op, opts);
pavone@275 1369 }
pavone@730 1370 if (dst_op.mode == MODE_REG_DIRECT) {
pavone@730 1371 shr_ir(code, 1, dst_op.base, SZ_B);
pavone@730 1372 } else {
pavone@730 1373 shr_irdisp(code, 1, dst_op.base, dst_op.disp, SZ_B);
pavone@730 1374 }
pavone@730 1375 if (src_op.mode == MODE_REG_DIRECT) {
pavone@591 1376 mov_rr(code, dst_op.base, src_op.base, SZ_B);
pavone@730 1377 } else if(src_op.mode == MODE_REG_DISPLACE8) {
pavone@730 1378 mov_rrdisp(code, dst_op.base, src_op.base, src_op.disp, SZ_B);
pavone@299 1379 }
pavone@591 1380 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
pavone@591 1381 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
pavone@275 1382 //TODO: Implement half-carry flag
pavone@731 1383 if (dst_op.mode == MODE_REG_DIRECT) {
pavone@731 1384 cmp_ir(code, 0, dst_op.base, SZ_B);
pavone@731 1385 } else {
pavone@731 1386 cmp_irdisp(code, 0, dst_op.base, dst_op.disp, SZ_B);
pavone@731 1387 }
pavone@591 1388 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
pavone@591 1389 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
pavone@591 1390 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
pavone@299 1391 if (inst->addr_mode != Z80_UNUSED) {
pavone@593 1392 z80_save_result(opts, inst);
pavone@299 1393 if (src_op.mode != MODE_UNUSED) {
pavone@591 1394 z80_save_reg(inst, opts);
pavone@299 1395 }
pavone@275 1396 } else {
pavone@591 1397 z80_save_reg(inst, opts);
pavone@275 1398 }
pavone@310 1399 break;
pavone@286 1400 case Z80_RLD:
pavone@591 1401 cycles(&opts->gen, 8);
pavone@734 1402 zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
pavone@593 1403 call(code, opts->read_8);
pavone@286 1404 //Before: (HL) = 0x12, A = 0x34
pavone@286 1405 //After: (HL) = 0x24, A = 0x31
pavone@734 1406 zreg_to_native(opts, Z80_A, opts->gen.scratch2);
pavone@591 1407 shl_ir(code, 4, opts->gen.scratch1, SZ_W);
pavone@591 1408 and_ir(code, 0xF, opts->gen.scratch2, SZ_W);
pavone@591 1409 and_ir(code, 0xFFF, opts->gen.scratch1, SZ_W);
pavone@591 1410 and_ir(code, 0xF0, opts->regs[Z80_A], SZ_B);
pavone@591 1411 or_rr(code, opts->gen.scratch2, opts->gen.scratch1, SZ_W);
pavone@590 1412 //opts->gen.scratch1 = 0x0124
pavone@591 1413 ror_ir(code, 8, opts->gen.scratch1, SZ_W);
pavone@591 1414 cycles(&opts->gen, 4);
pavone@591 1415 or_rr(code, opts->gen.scratch1, opts->regs[Z80_A], SZ_B);
pavone@287 1416 //set flags
pavone@287 1417 //TODO: Implement half-carry flag
pavone@591 1418 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
pavone@591 1419 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
pavone@591 1420 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
pavone@591 1421 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
pavone@505 1422
pavone@734 1423 zreg_to_native(opts, Z80_HL, opts->gen.scratch2);
pavone@591 1424 ror_ir(code, 8, opts->gen.scratch1, SZ_W);
pavone@593 1425 call(code, opts->write_8);
pavone@286 1426 break;
pavone@287 1427 case Z80_RRD:
pavone@591 1428 cycles(&opts->gen, 8);
pavone@734 1429 zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
pavone@593 1430 call(code, opts->read_8);
pavone@287 1431 //Before: (HL) = 0x12, A = 0x34
pavone@287 1432 //After: (HL) = 0x41, A = 0x32
pavone@734 1433 zreg_to_native(opts, Z80_A, opts->gen.scratch2);
pavone@591 1434 ror_ir(code, 4, opts->gen.scratch1, SZ_W);
pavone@591 1435 shl_ir(code, 4, opts->gen.scratch2, SZ_W);
pavone@591 1436 and_ir(code, 0xF00F, opts->gen.scratch1, SZ_W);
pavone@591 1437 and_ir(code, 0xF0, opts->regs[Z80_A], SZ_B);
pavone@967 1438 and_ir(code, 0xF0, opts->gen.scratch2, SZ_W);
pavone@590 1439 //opts->gen.scratch1 = 0x2001
pavone@590 1440 //opts->gen.scratch2 = 0x0040
pavone@591 1441 or_rr(code, opts->gen.scratch2, opts->gen.scratch1, SZ_W);
pavone@590 1442 //opts->gen.scratch1 = 0x2041
pavone@591 1443 ror_ir(code, 8, opts->gen.scratch1, SZ_W);
pavone@591 1444 cycles(&opts->gen, 4);
pavone@591 1445 shr_ir(code, 4, opts->gen.scratch1, SZ_B);
pavone@591 1446 or_rr(code, opts->gen.scratch1, opts->regs[Z80_A], SZ_B);
pavone@287 1447 //set flags
pavone@287 1448 //TODO: Implement half-carry flag
pavone@591 1449 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
pavone@591 1450 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
pavone@591 1451 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
pavone@591 1452 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
pavone@505 1453
pavone@734 1454 zreg_to_native(opts, Z80_HL, opts->gen.scratch2);
pavone@591 1455 ror_ir(code, 8, opts->gen.scratch1, SZ_W);
pavone@593 1456 call(code, opts->write_8);
pavone@287 1457 break;
pavone@308 1458 case Z80_BIT: {
pavone@591 1459 num_cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16;
pavone@591 1460 cycles(&opts->gen, num_cycles);
pavone@308 1461 uint8_t bit;
pavone@308 1462 if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) {
pavone@308 1463 src_op.base = opts->regs[z80_word_reg(inst->ea_reg)];
pavone@737 1464 src_op.mode = MODE_REG_DIRECT;
pavone@308 1465 size = SZ_W;
pavone@308 1466 bit = inst->immed + 8;
pavone@308 1467 } else {
pavone@308 1468 size = SZ_B;
pavone@308 1469 bit = inst->immed;
pavone@591 1470 translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY);
pavone@308 1471 }
pavone@239 1472 if (inst->addr_mode != Z80_REG) {
pavone@239 1473 //Reads normally take 3 cycles, but the read at the end of a bit instruction takes 4
pavone@591 1474 cycles(&opts->gen, 1);
pavone@239 1475 }
pavone@731 1476 if (src_op.mode == MODE_REG_DIRECT) {
pavone@731 1477 bt_ir(code, bit, src_op.base, size);
pavone@731 1478 } else {
pavone@731 1479 bt_irdisp(code, bit, src_op.base, src_op.disp, size);
pavone@731 1480 }
pavone@591 1481 setcc_rdisp(code, CC_NC, opts->gen.context_reg, zf_off(ZF_Z));
pavone@591 1482 setcc_rdisp(code, CC_NC, opts->gen.context_reg, zf_off(ZF_PV));
pavone@591 1483 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
pavone@307 1484 if (inst->immed == 7) {
pavone@731 1485 if (src_op.mode == MODE_REG_DIRECT) {
pavone@731 1486 cmp_ir(code, 0, src_op.base, size);
pavone@731 1487 } else {
pavone@731 1488 cmp_irdisp(code, 0, src_op.base, src_op.disp, size);
pavone@731 1489 }
pavone@591 1490 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
pavone@307 1491 } else {
pavone@591 1492 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B);
pavone@307 1493 }
pavone@239 1494 break;
pavone@308 1495 }
pavone@308 1496 case Z80_SET: {
pavone@591 1497 num_cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16;
pavone@591 1498 cycles(&opts->gen, num_cycles);
pavone@308 1499 uint8_t bit;
pavone@308 1500 if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) {
pavone@308 1501 src_op.base = opts->regs[z80_word_reg(inst->ea_reg)];
pavone@737 1502 src_op.mode = MODE_REG_DIRECT;
pavone@308 1503 size = SZ_W;
pavone@308 1504 bit = inst->immed + 8;
pavone@308 1505 } else {
pavone@308 1506 size = SZ_B;
pavone@308 1507 bit = inst->immed;
pavone@591 1508 translate_z80_ea(inst, &src_op, opts, READ, MODIFY);
pavone@308 1509 }
pavone@299 1510 if (inst->reg != Z80_USE_IMMED) {
pavone@591 1511 translate_z80_reg(inst, &dst_op, opts);
pavone@299 1512 }
pavone@247 1513 if (inst->addr_mode != Z80_REG) {
pavone@247 1514 //Reads normally take 3 cycles, but the read in the middle of a set instruction takes 4
pavone@591 1515 cycles(&opts->gen, 1);
pavone@247 1516 }
pavone@731 1517 if (src_op.mode == MODE_REG_DIRECT) {
pavone@731 1518 bts_ir(code, bit, src_op.base, size);
pavone@731 1519 } else {
pavone@731 1520 bts_irdisp(code, bit, src_op.base, src_op.disp, size);
pavone@731 1521 }
pavone@299 1522 if (inst->reg != Z80_USE_IMMED) {
pavone@308 1523 if (size == SZ_W) {
pavone@666 1524 #ifdef X86_64
pavone@308 1525 if (dst_op.base >= R8) {
pavone@591 1526 ror_ir(code, 8, src_op.base, SZ_W);
pavone@591 1527 mov_rr(code, opts->regs[z80_low_reg(inst->ea_reg)], dst_op.base, SZ_B);
pavone@591 1528 ror_ir(code, 8, src_op.base, SZ_W);
pavone@308 1529 } else {
pavone@666 1530 #endif
pavone@731 1531 if (dst_op.mode == MODE_REG_DIRECT) {
pavone@731 1532 zreg_to_native(opts, inst->ea_reg, dst_op.base);
pavone@731 1533 } else {
pavone@731 1534 zreg_to_native(opts, inst->ea_reg, opts->gen.scratch1);
pavone@731 1535 mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, SZ_B);
pavone@731 1536 }
pavone@666 1537 #ifdef X86_64
pavone@505 1538 }
pavone@666 1539 #endif
pavone@308 1540 } else {
pavone@731 1541 if (dst_op.mode == MODE_REG_DIRECT) {
pavone@731 1542 if (src_op.mode == MODE_REG_DIRECT) {
pavone@731 1543 mov_rr(code, src_op.base, dst_op.base, SZ_B);
pavone@731 1544 } else {
pavone@731 1545 mov_rdispr(code, src_op.base, src_op.disp, dst_op.base, SZ_B);
pavone@731 1546 }
pavone@731 1547 } else if (src_op.mode == MODE_REG_DIRECT) {
pavone@731 1548 mov_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, SZ_B);
pavone@731 1549 } else {
pavone@731 1550 mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, SZ_B);
pavone@731 1551 mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, SZ_B);
pavone@731 1552 }
pavone@308 1553 }
pavone@308 1554 }
pavone@308 1555 if ((inst->addr_mode & 0x1F) != Z80_REG) {
pavone@593 1556 z80_save_result(opts, inst);
pavone@308 1557 if (inst->reg != Z80_USE_IMMED) {
pavone@591 1558 z80_save_reg(inst, opts);
pavone@308 1559 }
pavone@308 1560 }
pavone@308 1561 break;
pavone@308 1562 }
pavone@308 1563 case Z80_RES: {
pavone@591 1564 num_cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16;
pavone@591 1565 cycles(&opts->gen, num_cycles);
pavone@308 1566 uint8_t bit;
pavone@308 1567 if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) {
pavone@308 1568 src_op.base = opts->regs[z80_word_reg(inst->ea_reg)];
pavone@737 1569 src_op.mode = MODE_REG_DIRECT;
pavone@308 1570 size = SZ_W;
pavone@308 1571 bit = inst->immed + 8;
pavone@308 1572 } else {
pavone@308 1573 size = SZ_B;
pavone@308 1574 bit = inst->immed;
pavone@591 1575 translate_z80_ea(inst, &src_op, opts, READ, MODIFY);
pavone@308 1576 }
pavone@308 1577 if (inst->reg != Z80_USE_IMMED) {
pavone@591 1578 translate_z80_reg(inst, &dst_op, opts);
pavone@308 1579 }
pavone@308 1580 if (inst->addr_mode != Z80_REG) {
pavone@308 1581 //Reads normally take 3 cycles, but the read in the middle of a set instruction takes 4
pavone@591 1582 cycles(&opts->gen, 1);
pavone@308 1583 }
pavone@731 1584 if (src_op.mode == MODE_REG_DIRECT) {
pavone@731 1585 btr_ir(code, bit, src_op.base, size);
pavone@731 1586 } else {
pavone@731 1587 btr_irdisp(code, bit, src_op.base, src_op.disp, size);
pavone@731 1588 }
pavone@308 1589 if (inst->reg != Z80_USE_IMMED) {
pavone@308 1590 if (size == SZ_W) {
pavone@666 1591 #ifdef X86_64
pavone@308 1592 if (dst_op.base >= R8) {
pavone@591 1593 ror_ir(code, 8, src_op.base, SZ_W);
pavone@591 1594 mov_rr(code, opts->regs[z80_low_reg(inst->ea_reg)], dst_op.base, SZ_B);
pavone@591 1595 ror_ir(code, 8, src_op.base, SZ_W);
pavone@308 1596 } else {
pavone@666 1597 #endif
pavone@731 1598 if (dst_op.mode == MODE_REG_DIRECT) {
pavone@731 1599 zreg_to_native(opts, inst->ea_reg, dst_op.base);
pavone@731 1600 } else {
pavone@731 1601 zreg_to_native(opts, inst->ea_reg, opts->gen.scratch1);
pavone@731 1602 mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, SZ_B);
pavone@731 1603 }
pavone@666 1604 #ifdef X86_64
pavone@505 1605 }
pavone@666 1606 #endif
pavone@308 1607 } else {
pavone@731 1608 if (dst_op.mode == MODE_REG_DIRECT) {
pavone@731 1609 if (src_op.mode == MODE_REG_DIRECT) {
pavone@731 1610 mov_rr(code, src_op.base, dst_op.base, SZ_B);
pavone@731 1611 } else {
pavone@731 1612 mov_rdispr(code, src_op.base, src_op.disp, dst_op.base, SZ_B);
pavone@731 1613 }
pavone@731 1614 } else if (src_op.mode == MODE_REG_DIRECT) {
pavone@731 1615 mov_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, SZ_B);
pavone@731 1616 } else {
pavone@731 1617 mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, SZ_B);
pavone@731 1618 mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, SZ_B);
pavone@731 1619 }
pavone@308 1620 }
pavone@299 1621 }
pavone@247 1622 if (inst->addr_mode != Z80_REG) {
pavone@593 1623 z80_save_result(opts, inst);
pavone@299 1624 if (inst->reg != Z80_USE_IMMED) {
pavone@591 1625 z80_save_reg(inst, opts);
pavone@299 1626 }
pavone@247 1627 }
pavone@247 1628 break;
pavone@308 1629 }
pavone@236 1630 case Z80_JP: {
pavone@591 1631 num_cycles = 4;
pavone@506 1632 if (inst->addr_mode != Z80_REG_INDIRECT) {
pavone@591 1633 num_cycles += 6;
pavone@236 1634 } else if(inst->ea_reg == Z80_IX || inst->ea_reg == Z80_IY) {
pavone@591 1635 num_cycles += 4;
pavone@236 1636 }
pavone@591 1637 cycles(&opts->gen, num_cycles);
pavone@653 1638 if (inst->addr_mode != Z80_REG_INDIRECT) {
pavone@591 1639 code_ptr call_dst = z80_get_native_address(context, inst->immed);
pavone@236 1640 if (!call_dst) {
pavone@591 1641 opts->gen.deferred = defer_address(opts->gen.deferred, inst->immed, code->cur + 1);
pavone@236 1642 //fake address to force large displacement
pavone@601 1643 call_dst = code->cur + 256;
pavone@236 1644 }
pavone@591 1645 jmp(code, call_dst);
pavone@236 1646 } else {
pavone@239 1647 if (inst->addr_mode == Z80_REG_INDIRECT) {
pavone@729 1648 zreg_to_native(opts, inst->ea_reg, opts->gen.scratch1);
pavone@236 1649 } else {
pavone@591 1650 mov_ir(code, inst->immed, opts->gen.scratch1, SZ_W);
pavone@236 1651 }
pavone@593 1652 call(code, opts->native_addr);
pavone@591 1653 jmp_r(code, opts->gen.scratch1);
pavone@236 1654 }
pavone@236 1655 break;
pavone@236 1656 }
pavone@236 1657 case Z80_JPCC: {
pavone@591 1658 cycles(&opts->gen, 7);//T States: 4,3
pavone@236 1659 uint8_t cond = CC_Z;
pavone@236 1660 switch (inst->reg)
pavone@236 1661 {
pavone@236 1662 case Z80_CC_NZ:
pavone@236 1663 cond = CC_NZ;
pavone@236 1664 case Z80_CC_Z:
pavone@591 1665 cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_Z), SZ_B);
pavone@236 1666 break;
pavone@236 1667 case Z80_CC_NC:
pavone@236 1668 cond = CC_NZ;
pavone@236 1669 case Z80_CC_C:
pavone@591 1670 cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B);
pavone@236 1671 break;
pavone@238 1672 case Z80_CC_PO:
pavone@238 1673 cond = CC_NZ;
pavone@238 1674 case Z80_CC_PE:
pavone@591 1675 cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B);
pavone@238 1676 break;
pavone@238 1677 case Z80_CC_P:
pavone@367 1678 cond = CC_NZ;
pavone@238 1679 case Z80_CC_M:
pavone@591 1680 cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B);
pavone@238 1681 break;
pavone@236 1682 }
pavone@591 1683 uint8_t *no_jump_off = code->cur+1;
pavone@591 1684 jcc(code, cond, code->cur+2);
pavone@591 1685 cycles(&opts->gen, 5);//T States: 5
pavone@236 1686 uint16_t dest_addr = inst->immed;
pavone@653 1687 code_ptr call_dst = z80_get_native_address(context, dest_addr);
pavone@236 1688 if (!call_dst) {
pavone@653 1689 opts->gen.deferred = defer_address(opts->gen.deferred, dest_addr, code->cur + 1);
pavone@236 1690 //fake address to force large displacement
pavone@653 1691 call_dst = code->cur + 256;
pavone@236 1692 }
pavone@653 1693 jmp(code, call_dst);
pavone@591 1694 *no_jump_off = code->cur - (no_jump_off+1);
pavone@236 1695 break;
pavone@236 1696 }
pavone@236 1697 case Z80_JR: {
pavone@591 1698 cycles(&opts->gen, 12);//T States: 4,3,5
pavone@236 1699 uint16_t dest_addr = address + inst->immed + 2;
pavone@653 1700 code_ptr call_dst = z80_get_native_address(context, dest_addr);
pavone@236 1701 if (!call_dst) {
pavone@653 1702 opts->gen.deferred = defer_address(opts->gen.deferred, dest_addr, code->cur + 1);
pavone@236 1703 //fake address to force large displacement
pavone@653 1704 call_dst = code->cur + 256;
pavone@236 1705 }
pavone@653 1706 jmp(code, call_dst);
pavone@236 1707 break;
pavone@236 1708 }
pavone@235 1709 case Z80_JRCC: {
pavone@591 1710 cycles(&opts->gen, 7);//T States: 4,3
pavone@235 1711 uint8_t cond = CC_Z;
pavone@235 1712 switch (inst->reg)
pavone@235 1713 {
pavone@235 1714 case Z80_CC_NZ:
pavone@235 1715 cond = CC_NZ;
pavone@235 1716 case Z80_CC_Z:
pavone@591 1717 cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_Z), SZ_B);
pavone@235 1718 break;
pavone@235 1719 case Z80_CC_NC:
pavone@235 1720 cond = CC_NZ;
pavone@235 1721 case Z80_CC_C:
pavone@591 1722 cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B);
pavone@235 1723 break;
pavone@235 1724 }
pavone@591 1725 uint8_t *no_jump_off = code->cur+1;
pavone@591 1726 jcc(code, cond, code->cur+2);
pavone@591 1727 cycles(&opts->gen, 5);//T States: 5
pavone@235 1728 uint16_t dest_addr = address + inst->immed + 2;
pavone@653 1729 code_ptr call_dst = z80_get_native_address(context, dest_addr);
pavone@235 1730 if (!call_dst) {
pavone@653 1731 opts->gen.deferred = defer_address(opts->gen.deferred, dest_addr, code->cur + 1);
pavone@235 1732 //fake address to force large displacement
pavone@653 1733 call_dst = code->cur + 256;
pavone@235 1734 }
pavone@653 1735 jmp(code, call_dst);
pavone@591 1736 *no_jump_off = code->cur - (no_jump_off+1);
pavone@235 1737 break;
pavone@235 1738 }
pavone@653 1739 case Z80_DJNZ: {
pavone@591 1740 cycles(&opts->gen, 8);//T States: 5,3
pavone@730 1741 if (opts->regs[Z80_B] >= 0) {
pavone@730 1742 sub_ir(code, 1, opts->regs[Z80_B], SZ_B);
pavone@730 1743 } else {
pavone@730 1744 sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_B), SZ_B);
pavone@730 1745 }
pavone@591 1746 uint8_t *no_jump_off = code->cur+1;
pavone@591 1747 jcc(code, CC_Z, code->cur+2);
pavone@591 1748 cycles(&opts->gen, 5);//T States: 5
pavone@239 1749 uint16_t dest_addr = address + inst->immed + 2;
pavone@653 1750 code_ptr call_dst = z80_get_native_address(context, dest_addr);
pavone@239 1751 if (!call_dst) {
pavone@653 1752 opts->gen.deferred = defer_address(opts->gen.deferred, dest_addr, code->cur + 1);
pavone@239 1753 //fake address to force large displacement
pavone@653 1754 call_dst = code->cur + 256;
pavone@239 1755 }
pavone@653 1756 jmp(code, call_dst);
pavone@591 1757 *no_jump_off = code->cur - (no_jump_off+1);
pavone@506 1758 break;
pavone@239 1759 }
pavone@235 1760 case Z80_CALL: {
pavone@591 1761 cycles(&opts->gen, 11);//T States: 4,3,4
pavone@591 1762 sub_ir(code, 2, opts->regs[Z80_SP], SZ_W);
pavone@591 1763 mov_ir(code, address + 3, opts->gen.scratch1, SZ_W);
pavone@591 1764 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W);
pavone@593 1765 call(code, opts->write_16_highfirst);//T States: 3, 3
pavone@653 1766 code_ptr call_dst = z80_get_native_address(context, inst->immed);
pavone@235 1767 if (!call_dst) {
pavone@653 1768 opts->gen.deferred = defer_address(opts->gen.deferred, inst->immed, code->cur + 1);
pavone@235 1769 //fake address to force large displacement
pavone@653 1770 call_dst = code->cur + 256;
pavone@235 1771 }
pavone@653 1772 jmp(code, call_dst);
pavone@235 1773 break;
pavone@235 1774 }
pavone@653 1775 case Z80_CALLCC: {
pavone@591 1776 cycles(&opts->gen, 10);//T States: 4,3,3 (false case)
pavone@238 1777 uint8_t cond = CC_Z;
pavone@238 1778 switch (inst->reg)
pavone@238 1779 {
pavone@238 1780 case Z80_CC_NZ:
pavone@238 1781 cond = CC_NZ;
pavone@238 1782 case Z80_CC_Z:
pavone@591 1783 cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_Z), SZ_B);
pavone@238 1784 break;
pavone@238 1785 case Z80_CC_NC:
pavone@238 1786 cond = CC_NZ;
pavone@238 1787 case Z80_CC_C:
pavone@591 1788 cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B);
pavone@238 1789 break;
pavone@238 1790 case Z80_CC_PO:
pavone@238 1791 cond = CC_NZ;
pavone@238 1792 case Z80_CC_PE:
pavone@591 1793 cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B);
pavone@238 1794 break;
pavone@238 1795 case Z80_CC_P:
pavone@367 1796 cond = CC_NZ;
pavone@238 1797 case Z80_CC_M:
pavone@591 1798 cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B);
pavone@238 1799 break;
pavone@238 1800 }
pavone@591 1801 uint8_t *no_call_off = code->cur+1;
pavone@591 1802 jcc(code, cond, code->cur+2);
pavone@591 1803 cycles(&opts->gen, 1);//Last of the above T states takes an extra cycle in the true case
pavone@591 1804 sub_ir(code, 2, opts->regs[Z80_SP], SZ_W);
pavone@591 1805 mov_ir(code, address + 3, opts->gen.scratch1, SZ_W);
pavone@591 1806 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W);
pavone@593 1807 call(code, opts->write_16_highfirst);//T States: 3, 3
pavone@653 1808 code_ptr call_dst = z80_get_native_address(context, inst->immed);
pavone@238 1809 if (!call_dst) {
pavone@653 1810 opts->gen.deferred = defer_address(opts->gen.deferred, inst->immed, code->cur + 1);
pavone@238 1811 //fake address to force large displacement
pavone@653 1812 call_dst = code->cur + 256;
pavone@238 1813 }
pavone@653 1814 jmp(code, call_dst);
pavone@591 1815 *no_call_off = code->cur - (no_call_off+1);
pavone@506 1816 break;
pavone@238 1817 }
pavone@213 1818 case Z80_RET:
pavone@591 1819 cycles(&opts->gen, 4);//T States: 4
pavone@591 1820 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W);
pavone@593 1821 call(code, opts->read_16);//T STates: 3, 3
pavone@591 1822 add_ir(code, 2, opts->regs[Z80_SP], SZ_W);
pavone@593 1823 call(code, opts->native_addr);
pavone@591 1824 jmp_r(code, opts->gen.scratch1);
pavone@235 1825 break;
pavone@246 1826 case Z80_RETCC: {
pavone@591 1827 cycles(&opts->gen, 5);//T States: 5
pavone@246 1828 uint8_t cond = CC_Z;
pavone@246 1829 switch (inst->reg)
pavone@246 1830 {
pavone@246 1831 case Z80_CC_NZ:
pavone@246 1832 cond = CC_NZ;
pavone@246 1833 case Z80_CC_Z:
pavone@591 1834 cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_Z), SZ_B);
pavone@246 1835 break;
pavone@246 1836 case Z80_CC_NC:
pavone@246 1837 cond = CC_NZ;
pavone@246 1838 case Z80_CC_C:
pavone@591 1839 cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B);
pavone@246 1840 break;
pavone@246 1841 case Z80_CC_PO:
pavone@246 1842 cond = CC_NZ;
pavone@246 1843 case Z80_CC_PE:
pavone@591 1844 cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B);
pavone@246 1845 break;
pavone@246 1846 case Z80_CC_P:
pavone@367 1847 cond = CC_NZ;
pavone@246 1848 case Z80_CC_M:
pavone@591 1849 cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B);
pavone@246 1850 break;
pavone@246 1851 }
pavone@591 1852 uint8_t *no_call_off = code->cur+1;
pavone@591 1853 jcc(code, cond, code->cur+2);
pavone@591 1854 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W);
pavone@593 1855 call(code, opts->read_16);//T STates: 3, 3
pavone@591 1856 add_ir(code, 2, opts->regs[Z80_SP], SZ_W);
pavone@593 1857 call(code, opts->native_addr);
pavone@591 1858 jmp_r(code, opts->gen.scratch1);
pavone@591 1859 *no_call_off = code->cur - (no_call_off+1);
pavone@246 1860 break;
pavone@246 1861 }
pavone@283 1862 case Z80_RETI:
pavone@283 1863 //For some systems, this may need a callback for signalling interrupt routine completion
pavone@591 1864 cycles(&opts->gen, 8);//T States: 4, 4
pavone@591 1865 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W);
pavone@593 1866 call(code, opts->read_16);//T STates: 3, 3
pavone@591 1867 add_ir(code, 2, opts->regs[Z80_SP], SZ_W);
pavone@593 1868 call(code, opts->native_addr);
pavone@591 1869 jmp_r(code, opts->gen.scratch1);
pavone@283 1870 break;
pavone@283 1871 case Z80_RETN:
pavone@591 1872 cycles(&opts->gen, 8);//T States: 4, 4
pavone@591 1873 mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, iff2), opts->gen.scratch2, SZ_B);
pavone@591 1874 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W);
pavone@591 1875 mov_rrdisp(code, opts->gen.scratch2, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B);
pavone@593 1876 call(code, opts->read_16);//T STates: 3, 3
pavone@591 1877 add_ir(code, 2, opts->regs[Z80_SP], SZ_W);
pavone@593 1878 call(code, opts->native_addr);
pavone@591 1879 jmp_r(code, opts->gen.scratch1);
pavone@283 1880 break;
pavone@241 1881 case Z80_RST: {
pavone@241 1882 //RST is basically CALL to an address in page 0
pavone@591 1883 cycles(&opts->gen, 5);//T States: 5
pavone@591 1884 sub_ir(code, 2, opts->regs[Z80_SP], SZ_W);
pavone@591 1885 mov_ir(code, address + 1, opts->gen.scratch1, SZ_W);
pavone@591 1886 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W);
pavone@593 1887 call(code, opts->write_16_highfirst);//T States: 3, 3
pavone@591 1888 code_ptr call_dst = z80_get_native_address(context, inst->immed);
pavone@241 1889 if (!call_dst) {
pavone@591 1890 opts->gen.deferred = defer_address(opts->gen.deferred, inst->immed, code->cur + 1);
pavone@241 1891 //fake address to force large displacement
pavone@601 1892 call_dst = code->cur + 256;
pavone@241 1893 }
pavone@591 1894 jmp(code, call_dst);
pavone@241 1895 break;
pavone@241 1896 }
pavone@284 1897 case Z80_IN:
pavone@591 1898 cycles(&opts->gen, inst->reg == Z80_A ? 7 : 8);//T States: 4 3/4
pavone@284 1899 if (inst->addr_mode == Z80_IMMED_INDIRECT) {
pavone@591 1900 mov_ir(code, inst->immed, opts->gen.scratch1, SZ_B);
pavone@284 1901 } else {
pavone@591 1902 mov_rr(code, opts->regs[Z80_C], opts->gen.scratch1, SZ_B);
pavone@284 1903 }
pavone@593 1904 call(code, opts->read_io);
pavone@591 1905 translate_z80_reg(inst, &dst_op, opts);
pavone@735 1906 if (dst_op.mode == MODE_REG_DIRECT) {
pavone@735 1907 mov_rr(code, opts->gen.scratch1, dst_op.base, SZ_B);
pavone@735 1908 } else {
pavone@735 1909 mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, SZ_B);
pavone@735 1910 }
pavone@591 1911 z80_save_reg(inst, opts);
pavone@284 1912 break;
pavone@284 1913 /*case Z80_INI:
pavone@213 1914 case Z80_INIR:
pavone@213 1915 case Z80_IND:
pavone@284 1916 case Z80_INDR:*/
pavone@213 1917 case Z80_OUT:
pavone@591 1918 cycles(&opts->gen, inst->reg == Z80_A ? 7 : 8);//T States: 4 3/4
pavone@284 1919 if ((inst->addr_mode & 0x1F) == Z80_IMMED_INDIRECT) {
pavone@591 1920 mov_ir(code, inst->immed, opts->gen.scratch2, SZ_B);
pavone@284 1921 } else {
pavone@735 1922 zreg_to_native(opts, Z80_C, opts->gen.scratch2);
pavone@591 1923 mov_rr(code, opts->regs[Z80_C], opts->gen.scratch2, SZ_B);
pavone@284 1924 }
pavone@591 1925 translate_z80_reg(inst, &src_op, opts);
pavone@735 1926 if (src_op.mode == MODE_REG_DIRECT) {
pavone@735 1927 mov_rr(code, src_op.base, opts->gen.scratch1, SZ_B);
pavone@735 1928 } else if (src_op.mode == MODE_IMMED) {
pavone@735 1929 mov_ir(code, src_op.disp, opts->gen.scratch1, SZ_B);
pavone@735 1930 } else {
pavone@735 1931 mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, SZ_B);
pavone@735 1932 }
pavone@593 1933 call(code, opts->write_io);
pavone@591 1934 z80_save_reg(inst, opts);
pavone@284 1935 break;
pavone@284 1936 /*case Z80_OUTI:
pavone@213 1937 case Z80_OTIR:
pavone@213 1938 case Z80_OUTD:
pavone@213 1939 case Z80_OTDR:*/
pavone@235 1940 default: {
pavone@235 1941 char disbuf[80];
pavone@314 1942 z80_disasm(inst, disbuf, address);
pavone@259 1943 FILE * f = fopen("zram.bin", "wb");
pavone@259 1944 fwrite(context->mem_pointers[0], 1, 8 * 1024, f);
pavone@259 1945 fclose(f);
pavone@792 1946 fatal_error("unimplemented Z80 instruction: %s at %X\nZ80 RAM has been saved to zram.bin for debugging", disbuf, address);
pavone@213 1947 }
pavone@235 1948 }
pavone@235 1949 }
pavone@235 1950
pavone@627 1951 uint8_t * z80_interp_handler(uint8_t opcode, z80_context * context)
pavone@627 1952 {
pavone@627 1953 if (!context->interp_code[opcode]) {
pavone@755 1954 if (opcode == 0xCB || (opcode >= 0xDD && (opcode & 0xF) == 0xD)) {
pavone@792 1955 fatal_error("Encountered prefix byte %X at address %X. Z80 interpeter doesn't support those yet.", opcode, context->pc);
pavone@627 1956 }
pavone@627 1957 uint8_t codebuf[8];
pavone@627 1958 memset(codebuf, 0, sizeof(codebuf));
pavone@627 1959 codebuf[0] = opcode;
pavone@627 1960 z80inst inst;
pavone@627 1961 uint8_t * after = z80_decode(codebuf, &inst);
pavone@627 1962 if (after - codebuf > 1) {
pavone@792 1963 fatal_error("Encountered multi-byte Z80 instruction at %X. Z80 interpeter doesn't support those yet.", context->pc);
pavone@627 1964 }
pavone@652 1965
pavone@652 1966 z80_options * opts = context->options;
pavone@652 1967 code_info *code = &opts->gen.code;
pavone@652 1968 check_alloc_code(code, ZMAX_NATIVE_SIZE);
pavone@652 1969 context->interp_code[opcode] = code->cur;
pavone@652 1970 translate_z80inst(&inst, context, 0, 1);
pavone@652 1971 mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, pc), opts->gen.scratch1, SZ_W);
pavone@652 1972 add_ir(code, after - codebuf, opts->gen.scratch1, SZ_W);
pavone@652 1973 call(code, opts->native_addr);
pavone@652 1974 jmp_r(code, opts->gen.scratch1);
pavone@715 1975 z80_handle_deferred(context);
pavone@627 1976 }
pavone@627 1977 return context->interp_code[opcode];
pavone@627 1978 }
pavone@627 1979
pavone@652 1980 code_info z80_make_interp_stub(z80_context * context, uint16_t address)
pavone@627 1981 {
pavone@652 1982 z80_options *opts = context->options;
pavone@652 1983 code_info * code = &opts->gen.code;
pavone@652 1984 check_alloc_code(code, 32);
pavone@652 1985 code_info stub = {code->cur, NULL};
pavone@627 1986 //TODO: make this play well with the breakpoint code
pavone@652 1987 mov_ir(code, address, opts->gen.scratch1, SZ_W);
pavone@652 1988 call(code, opts->read_8);
pavone@627 1989 //normal opcode fetch is already factored into instruction timing
pavone@627 1990 //back out the base 3 cycles from a read here
pavone@627 1991 //not quite perfect, but it will have to do for now
pavone@652 1992 cycles(&opts->gen, -3);
pavone@652 1993 check_cycles_int(&opts->gen, address);
pavone@652 1994 call(code, opts->gen.save_context);
pavone@652 1995 mov_irdisp(code, address, opts->gen.context_reg, offsetof(z80_context, pc), SZ_W);
pavone@652 1996 push_r(code, opts->gen.context_reg);
pavone@712 1997 call_args(code, (code_ptr)z80_interp_handler, 2, opts->gen.scratch1, opts->gen.context_reg);
pavone@664 1998 mov_rr(code, RAX, opts->gen.scratch1, SZ_PTR);
pavone@652 1999 pop_r(code, opts->gen.context_reg);
pavone@652 2000 call(code, opts->gen.load_context);
pavone@652 2001 jmp_r(code, opts->gen.scratch1);
pavone@652 2002 stub.last = code->cur;
pavone@652 2003 return stub;
pavone@627 2004 }
pavone@627 2005
pavone@627 2006
pavone@235 2007 uint8_t * z80_get_native_address(z80_context * context, uint32_t address)
pavone@235 2008 {
pavone@235 2009 native_map_slot *map;
pavone@235 2010 if (address < 0x4000) {
pavone@235 2011 address &= 0x1FFF;
pavone@235 2012 map = context->static_code_map;
pavone@235 2013 } else {
pavone@627 2014 address -= 0x4000;
pavone@627 2015 map = context->banked_code_map;
pavone@235 2016 }
pavone@268 2017 if (!map->base || !map->offsets || map->offsets[address] == INVALID_OFFSET || map->offsets[address] == EXTENSION_WORD) {
pavone@313 2018 //dprintf("z80_get_native_address: %X NULL\n", address);
pavone@235 2019 return NULL;
pavone@235 2020 }
pavone@313 2021 //dprintf("z80_get_native_address: %X %p\n", address, map->base + map->offsets[address]);
pavone@235 2022 return map->base + map->offsets[address];
pavone@235 2023 }
pavone@235 2024
pavone@590 2025 uint8_t z80_get_native_inst_size(z80_options * opts, uint32_t address)
pavone@235 2026 {
pavone@627 2027 //TODO: Fix for addresses >= 0x4000
pavone@252 2028 if (address >= 0x4000) {
pavone@252 2029 return 0;
pavone@252 2030 }
pavone@591 2031 return opts->gen.ram_inst_sizes[0][address & 0x1FFF];
pavone@252 2032 }
pavone@252 2033
pavone@252 2034 void z80_map_native_address(z80_context * context, uint32_t address, uint8_t * native_address, uint8_t size, uint8_t native_size)
pavone@252 2035 {
pavone@252 2036 uint32_t orig_address = address;
pavone@235 2037 native_map_slot *map;
pavone@590 2038 z80_options * opts = context->options;
pavone@235 2039 if (address < 0x4000) {
pavone@235 2040 address &= 0x1FFF;
pavone@235 2041 map = context->static_code_map;
pavone@591 2042 opts->gen.ram_inst_sizes[0][address] = native_size;
pavone@252 2043 context->ram_code_flags[(address & 0x1C00) >> 10] |= 1 << ((address & 0x380) >> 7);
pavone@252 2044 context->ram_code_flags[((address + size) & 0x1C00) >> 10] |= 1 << (((address + size) & 0x380) >> 7);
pavone@627 2045 } else {
pavone@627 2046 //HERE
pavone@627 2047 address -= 0x4000;
pavone@627 2048 map = context->banked_code_map;
pavone@235 2049 if (!map->offsets) {
pavone@627 2050 map->offsets = malloc(sizeof(int32_t) * 0xC000);
pavone@627 2051 memset(map->offsets, 0xFF, sizeof(int32_t) * 0xC000);
pavone@235 2052 }
pavone@235 2053 }
pavone@235 2054 if (!map->base) {
pavone@235 2055 map->base = native_address;
pavone@235 2056 }
pavone@235 2057 map->offsets[address] = native_address - map->base;
pavone@253 2058 for(--size, orig_address++; size; --size, orig_address++) {
pavone@252 2059 address = orig_address;
pavone@252 2060 if (address < 0x4000) {
pavone@252 2061 address &= 0x1FFF;
pavone@252 2062 map = context->static_code_map;
pavone@252 2063 } else {
pavone@627 2064 address -= 0x4000;
pavone@627 2065 map = context->banked_code_map;
pavone@252 2066 }
pavone@252 2067 if (!map->offsets) {
pavone@627 2068 map->offsets = malloc(sizeof(int32_t) * 0xC000);
pavone@627 2069 memset(map->offsets, 0xFF, sizeof(int32_t) * 0xC000);
pavone@252 2070 }
pavone@252 2071 map->offsets[address] = EXTENSION_WORD;
pavone@252 2072 }
pavone@252 2073 }
pavone@252 2074
pavone@252 2075 #define INVALID_INSTRUCTION_START 0xFEEDFEED
pavone@252 2076
pavone@252 2077 uint32_t z80_get_instruction_start(native_map_slot * static_code_map, uint32_t address)
pavone@252 2078 {
pavone@627 2079 //TODO: Fixme for address >= 0x4000
pavone@252 2080 if (!static_code_map->base || address >= 0x4000) {
pavone@252 2081 return INVALID_INSTRUCTION_START;
pavone@252 2082 }
pavone@252 2083 address &= 0x1FFF;
pavone@252 2084 if (static_code_map->offsets[address] == INVALID_OFFSET) {
pavone@252 2085 return INVALID_INSTRUCTION_START;
pavone@252 2086 }
pavone@252 2087 while (static_code_map->offsets[address] == EXTENSION_WORD) {
pavone@252 2088 --address;
pavone@252 2089 address &= 0x1FFF;
pavone@252 2090 }
pavone@252 2091 return address;
pavone@252 2092 }
pavone@252 2093
pavone@252 2094 z80_context * z80_handle_code_write(uint32_t address, z80_context * context)
pavone@252 2095 {
pavone@252 2096 uint32_t inst_start = z80_get_instruction_start(context->static_code_map, address);
pavone@252 2097 if (inst_start != INVALID_INSTRUCTION_START) {
pavone@591 2098 code_ptr dst = z80_get_native_address(context, inst_start);
pavone@899 2099 code_info code = {dst, dst+32, 0};
pavone@591 2100 z80_options * opts = context->options;
pavone@668 2101 dprintf("patching code at %p for Z80 instruction at %X due to write to %X\n", code.cur, inst_start, address);
pavone@591 2102 mov_ir(&code, inst_start, opts->gen.scratch1, SZ_D);
pavone@593 2103 call(&code, opts->retrans_stub);
pavone@252 2104 }
pavone@252 2105 return context;
pavone@252 2106 }
pavone@252 2107
pavone@264 2108 uint8_t * z80_get_native_address_trans(z80_context * context, uint32_t address)
pavone@264 2109 {
pavone@264 2110 uint8_t * addr = z80_get_native_address(context, address);
pavone@264 2111 if (!addr) {
pavone@264 2112 translate_z80_stream(context, address);
pavone@264 2113 addr = z80_get_native_address(context, address);
pavone@264 2114 if (!addr) {
pavone@264 2115 printf("Failed to translate %X to native code\n", address);
pavone@264 2116 }
pavone@264 2117 }
pavone@264 2118 return addr;
pavone@264 2119 }
pavone@264 2120
pavone@266 2121 void z80_handle_deferred(z80_context * context)
pavone@266 2122 {
pavone@590 2123 z80_options * opts = context->options;
pavone@591 2124 process_deferred(&opts->gen.deferred, context, (native_addr_func)z80_get_native_address);
pavone@591 2125 if (opts->gen.deferred) {
pavone@591 2126 translate_z80_stream(context, opts->gen.deferred->address);
pavone@266 2127 }
pavone@266 2128 }
pavone@266 2129
pavone@559 2130 extern void * z80_retranslate_inst(uint32_t address, z80_context * context, uint8_t * orig_start) asm("z80_retranslate_inst");
pavone@390 2131 void * z80_retranslate_inst(uint32_t address, z80_context * context, uint8_t * orig_start)
pavone@252 2132 {
pavone@266 2133 char disbuf[80];
pavone@590 2134 z80_options * opts = context->options;
pavone@252 2135 uint8_t orig_size = z80_get_native_inst_size(opts, address);
pavone@591 2136 code_info *code = &opts->gen.code;
pavone@653 2137 uint8_t *after, *inst = get_native_pointer(address, (void **)context->mem_pointers, &opts->gen);
pavone@252 2138 z80inst instbuf;
pavone@268 2139 dprintf("Retranslating code at Z80 address %X, native address %p\n", address, orig_start);
pavone@252 2140 after = z80_decode(inst, &instbuf);
pavone@268 2141 #ifdef DO_DEBUG_PRINT
pavone@314 2142 z80_disasm(&instbuf, disbuf, address);
pavone@266 2143 if (instbuf.op == Z80_NOP) {
pavone@266 2144 printf("%X\t%s(%d)\n", address, disbuf, instbuf.immed);
pavone@266 2145 } else {
pavone@266 2146 printf("%X\t%s\n", address, disbuf);
pavone@267 2147 }
pavone@268 2148 #endif
pavone@252 2149 if (orig_size != ZMAX_NATIVE_SIZE) {
pavone@597 2150 check_alloc_code(code, ZMAX_NATIVE_SIZE);
pavone@591 2151 code_ptr start = code->cur;
pavone@591 2152 deferred_addr * orig_deferred = opts->gen.deferred;
pavone@652 2153 translate_z80inst(&instbuf, context, address, 0);
pavone@644 2154 /*
pavone@252 2155 if ((native_end - dst) <= orig_size) {
pavone@264 2156 uint8_t * native_next = z80_get_native_address(context, address + after-inst);
pavone@264 2157 if (native_next && ((native_next == orig_start + orig_size) || (orig_size - (native_end - dst)) > 5)) {
pavone@591 2158 remove_deferred_until(&opts->gen.deferred, orig_deferred);
pavone@627 2159 native_end = translate_z80inst(&instbuf, orig_start, context, address, 0);
pavone@266 2160 if (native_next == orig_start + orig_size && (native_next-native_end) < 2) {
pavone@264 2161 while (native_end < orig_start + orig_size) {
pavone@264 2162 *(native_end++) = 0x90; //NOP
pavone@264 2163 }
pavone@264 2164 } else {
pavone@264 2165 jmp(native_end, native_next);
pavone@264 2166 }
pavone@266 2167 z80_handle_deferred(context);
pavone@264 2168 return orig_start;
pavone@252 2169 }
pavone@591 2170 }*/
pavone@591 2171 z80_map_native_address(context, address, start, after-inst, ZMAX_NATIVE_SIZE);
pavone@591 2172 code_info tmp_code = {orig_start, orig_start + 16};
pavone@591 2173 jmp(&tmp_code, start);
pavone@597 2174 tmp_code = *code;
pavone@597 2175 code->cur = start + ZMAX_NATIVE_SIZE;
pavone@283 2176 if (!z80_is_terminal(&instbuf)) {
pavone@597 2177 jmp(&tmp_code, z80_get_native_address_trans(context, address + after-inst));
pavone@264 2178 }
pavone@266 2179 z80_handle_deferred(context);
pavone@591 2180 return start;
pavone@252 2181 } else {
pavone@591 2182 code_info tmp_code = *code;
pavone@591 2183 code->cur = orig_start;
pavone@591 2184 code->last = orig_start + ZMAX_NATIVE_SIZE;
pavone@652 2185 translate_z80inst(&instbuf, context, address, 0);
pavone@601 2186 code_info tmp2 = *code;
pavone@601 2187 *code = tmp_code;
pavone@283 2188 if (!z80_is_terminal(&instbuf)) {
pavone@652 2189
pavone@601 2190 jmp(&tmp2, z80_get_native_address_trans(context, address + after-inst));
pavone@252 2191 }
pavone@266 2192 z80_handle_deferred(context);
pavone@252 2193 return orig_start;
pavone@252 2194 }
pavone@235 2195 }
pavone@235 2196
pavone@235 2197 void translate_z80_stream(z80_context * context, uint32_t address)
pavone@235 2198 {
pavone@235 2199 char disbuf[80];
pavone@235 2200 if (z80_get_native_address(context, address)) {
pavone@235 2201 return;
pavone@235 2202 }
pavone@590 2203 z80_options * opts = context->options;
pavone@505 2204 uint32_t start_address = address;
pavone@627 2205
pavone@653 2206 do
pavone@235 2207 {
pavone@235 2208 z80inst inst;
pavone@268 2209 dprintf("translating Z80 code at address %X\n", address);
pavone@235 2210 do {
pavone@506 2211 uint8_t * existing = z80_get_native_address(context, address);
pavone@506 2212 if (existing) {
pavone@591 2213 jmp(&opts->gen.code, existing);
pavone@235 2214 break;
pavone@235 2215 }
pavone@653 2216 uint8_t * encoded, *next;
pavone@653 2217 encoded = get_native_pointer(address, (void **)context->mem_pointers, &opts->gen);
pavone@653 2218 if (!encoded) {
pavone@653 2219 code_info stub = z80_make_interp_stub(context, address);
pavone@653 2220 z80_map_native_address(context, address, stub.cur, 1, stub.last - stub.cur);
pavone@235 2221 break;
pavone@235 2222 }
pavone@601 2223 //make sure prologue is in a contiguous chunk of code
pavone@601 2224 check_code_prologue(&opts->gen.code);
pavone@235 2225 next = z80_decode(encoded, &inst);
pavone@268 2226 #ifdef DO_DEBUG_PRINT
pavone@314 2227 z80_disasm(&inst, disbuf, address);
pavone@235 2228 if (inst.op == Z80_NOP) {
pavone@235 2229 printf("%X\t%s(%d)\n", address, disbuf, inst.immed);
pavone@235 2230 } else {
pavone@235 2231 printf("%X\t%s\n", address, disbuf);
pavone@235 2232 }
pavone@268 2233 #endif
pavone@591 2234 code_ptr start = opts->gen.code.cur;
pavone@652 2235 translate_z80inst(&inst, context, address, 0);
pavone@591 2236 z80_map_native_address(context, address, start, next-encoded, opts->gen.code.cur - start);
pavone@235 2237 address += next-encoded;
pavone@255 2238 address &= 0xFFFF;
pavone@506 2239 } while (!z80_is_terminal(&inst));
pavone@591 2240 process_deferred(&opts->gen.deferred, context, (native_addr_func)z80_get_native_address);
pavone@591 2241 if (opts->gen.deferred) {
pavone@591 2242 address = opts->gen.deferred->address;
pavone@506 2243 dprintf("defferred address: %X\n", address);
pavone@792 2244 }
pavone@653 2245 } while (opts->gen.deferred);
pavone@213 2246 }
pavone@213 2247
pavone@819 2248 void init_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t num_chunks, memmap_chunk const * io_chunks, uint32_t num_io_chunks, uint32_t clock_divider)
pavone@213 2249 {
pavone@590 2250 memset(options, 0, sizeof(*options));
pavone@590 2251
pavone@653 2252 options->gen.memmap = chunks;
pavone@653 2253 options->gen.memmap_chunks = num_chunks;
pavone@590 2254 options->gen.address_size = SZ_W;
pavone@590 2255 options->gen.address_mask = 0xFFFF;
pavone@590 2256 options->gen.max_address = 0x10000;
pavone@590 2257 options->gen.bus_cycles = 3;
pavone@667 2258 options->gen.clock_divider = clock_divider;
pavone@590 2259 options->gen.mem_ptr_off = offsetof(z80_context, mem_pointers);
pavone@590 2260 options->gen.ram_flags_off = offsetof(z80_context, ram_code_flags);
pavone@620 2261 options->gen.ram_flags_shift = 7;
pavone@590 2262
pavone@235 2263 options->flags = 0;
pavone@666 2264 #ifdef X86_64
pavone@235 2265 options->regs[Z80_B] = BH;
pavone@235 2266 options->regs[Z80_C] = RBX;
pavone@235 2267 options->regs[Z80_D] = CH;
pavone@235 2268 options->regs[Z80_E] = RCX;
pavone@235 2269 options->regs[Z80_H] = AH;
pavone@235 2270 options->regs[Z80_L] = RAX;
pavone@235 2271 options->regs[Z80_IXH] = DH;
pavone@235 2272 options->regs[Z80_IXL] = RDX;
pavone@235 2273 options->regs[Z80_IYH] = -1;
pavone@239 2274 options->regs[Z80_IYL] = R8;
pavone@235 2275 options->regs[Z80_I] = -1;
pavone@235 2276 options->regs[Z80_R] = -1;
pavone@235 2277 options->regs[Z80_A] = R10;
pavone@235 2278 options->regs[Z80_BC] = RBX;
pavone@235 2279 options->regs[Z80_DE] = RCX;
pavone@235 2280 options->regs[Z80_HL] = RAX;
pavone@235 2281 options->regs[Z80_SP] = R9;
pavone@235 2282 options->regs[Z80_AF] = -1;
pavone@235 2283 options->regs[Z80_IX] = RDX;
pavone@235 2284 options->regs[Z80_IY] = R8;
pavone@667 2285
pavone@666 2286 options->gen.scratch1 = R13;
pavone@666 2287 options->gen.scratch2 = R14;
pavone@666 2288 #else
pavone@666 2289 memset(options->regs, -1, sizeof(options->regs));
pavone@666 2290 options->regs[Z80_A] = RAX;
pavone@729 2291 options->regs[Z80_SP] = RBX;
pavone@667 2292
pavone@666 2293 options->gen.scratch1 = RCX;
pavone@666 2294 options->gen.scratch2 = RDX;
pavone@666 2295 #endif
pavone@667 2296
pavone@590 2297 options->gen.context_reg = RSI;
pavone@590 2298 options->gen.cycles = RBP;
pavone@590 2299 options->gen.limit = RDI;
pavone@590 2300
pavone@590 2301 options->gen.native_code_map = malloc(sizeof(native_map_slot));
pavone@590 2302 memset(options->gen.native_code_map, 0, sizeof(native_map_slot));
pavone@590 2303 options->gen.deferred = NULL;
pavone@591 2304 options->gen.ram_inst_sizes = malloc(sizeof(uint8_t) * 0x2000 + sizeof(uint8_t *));
pavone@591 2305 options->gen.ram_inst_sizes[0] = (uint8_t *)(options->gen.ram_inst_sizes + 1);
pavone@591 2306 memset(options->gen.ram_inst_sizes[0], 0, sizeof(uint8_t) * 0x2000);
pavone@590 2307
pavone@590 2308 code_info *code = &options->gen.code;
pavone@590 2309 init_code_info(code);
pavone@590 2310
pavone@590 2311 options->save_context_scratch = code->cur;
pavone@590 2312 mov_rrdisp(code, options->gen.scratch1, options->gen.context_reg, offsetof(z80_context, scratch1), SZ_W);
pavone@590 2313 mov_rrdisp(code, options->gen.scratch2, options->gen.context_reg, offsetof(z80_context, scratch2), SZ_W);
pavone@590 2314
pavone@590 2315 options->gen.save_context = code->cur;
pavone@590 2316 for (int i = 0; i <= Z80_A; i++)
pavone@590 2317 {
pavone@590 2318 int reg;
pavone@590 2319 uint8_t size;
pavone@590 2320 if (i < Z80_I) {
pavone@594 2321 reg = i /2 + Z80_BC + (i > Z80_H ? 2 : 0);
pavone@590 2322 size = SZ_W;
pavone@590 2323 } else {
pavone@590 2324 reg = i;
pavone@590 2325 size = SZ_B;
pavone@652 2326 }
pavone@590 2327 if (options->regs[reg] >= 0) {
pavone@590 2328 mov_rrdisp(code, options->regs[reg], options->gen.context_reg, offsetof(z80_context, regs) + i, size);
pavone@590 2329 }
pavone@594 2330 if (size == SZ_W) {
pavone@594 2331 i++;
pavone@594 2332 }
pavone@590 2333 }
pavone@590 2334 if (options->regs[Z80_SP] >= 0) {
pavone@590 2335 mov_rrdisp(code, options->regs[Z80_SP], options->gen.context_reg, offsetof(z80_context, sp), SZ_W);
pavone@590 2336 }
pavone@590 2337 mov_rrdisp(code, options->gen.limit, options->gen.context_reg, offsetof(z80_context, target_cycle), SZ_D);
pavone@590 2338 mov_rrdisp(code, options->gen.cycles, options->gen.context_reg, offsetof(z80_context, current_cycle), SZ_D);
pavone@593 2339 retn(code);
pavone@590 2340
pavone@590 2341 options->load_context_scratch = code->cur;
pavone@590 2342 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, scratch1), options->gen.scratch1, SZ_W);
pavone@590 2343 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, scratch2), options->gen.scratch2, SZ_W);
pavone@590 2344 options->gen.load_context = code->cur;
pavone@590 2345 for (int i = 0; i <= Z80_A; i++)
pavone@590 2346 {
pavone@590 2347 int reg;
pavone@590 2348 uint8_t size;
pavone@590 2349 if (i < Z80_I) {
pavone@594 2350 reg = i /2 + Z80_BC + (i > Z80_H ? 2 : 0);
pavone@590 2351 size = SZ_W;
pavone@590 2352 } else {
pavone@590 2353 reg = i;
pavone@590 2354 size = SZ_B;
pavone@590 2355 }
pavone@590 2356 if (options->regs[reg] >= 0) {
pavone@590 2357 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, regs) + i, options->regs[reg], size);
pavone@590 2358 }
pavone@594 2359 if (size == SZ_W) {
pavone@594 2360 i++;
pavone@594 2361 }
pavone@590 2362 }
pavone@590 2363 if (options->regs[Z80_SP] >= 0) {
pavone@590 2364 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, sp), options->regs[Z80_SP], SZ_W);
pavone@590 2365 }
pavone@590 2366 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, target_cycle), options->gen.limit, SZ_D);
pavone@590 2367 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, current_cycle), options->gen.cycles, SZ_D);
pavone@593 2368 retn(code);
pavone@593 2369
pavone@593 2370 options->native_addr = code->cur;
pavone@593 2371 call(code, options->gen.save_context);
pavone@593 2372 push_r(code, options->gen.context_reg);
pavone@657 2373 movzx_rr(code, options->gen.scratch1, options->gen.scratch1, SZ_W, SZ_D);
pavone@657 2374 call_args(code, (code_ptr)z80_get_native_address_trans, 2, options->gen.context_reg, options->gen.scratch1);
pavone@593 2375 mov_rr(code, RAX, options->gen.scratch1, SZ_PTR);
pavone@593 2376 pop_r(code, options->gen.context_reg);
pavone@593 2377 call(code, options->gen.load_context);
pavone@593 2378 retn(code);
pavone@590 2379
pavone@895 2380 uint32_t tmp_stack_off;
pavone@895 2381
pavone@590 2382 options->gen.handle_cycle_limit = code->cur;
pavone@900 2383 //calculate call/stack adjust size
pavone@900 2384 sub_ir(code, 16-sizeof(void *), RSP, SZ_PTR);
pavone@900 2385 call_noalign(code, options->gen.handle_cycle_limit);
pavone@900 2386 uint32_t call_adjust_size = code->cur - options->gen.handle_cycle_limit;
pavone@900 2387 code->cur = options->gen.handle_cycle_limit;
pavone@900 2388
pavone@590 2389 cmp_rdispr(code, options->gen.context_reg, offsetof(z80_context, sync_cycle), options->gen.cycles, SZ_D);
pavone@590 2390 code_ptr no_sync = code->cur+1;
pavone@590 2391 jcc(code, CC_B, no_sync);
pavone@590 2392 mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, pc), SZ_W);
pavone@590 2393 call(code, options->save_context_scratch);
pavone@895 2394 tmp_stack_off = code->stack_off;
pavone@590 2395 pop_r(code, RAX); //return address in read/write func
pavone@895 2396 add_ir(code, 16-sizeof(void *), RSP, SZ_PTR);
pavone@590 2397 pop_r(code, RBX); //return address in translated code
pavone@895 2398 add_ir(code, 16-sizeof(void *), RSP, SZ_PTR);
pavone@900 2399 sub_ir(code, call_adjust_size, RAX, SZ_PTR); //adjust return address to point to the call + stack adjust that got us here
pavone@590 2400 mov_rrdisp(code, RBX, options->gen.context_reg, offsetof(z80_context, extra_pc), SZ_PTR);
pavone@590 2401 mov_rrind(code, RAX, options->gen.context_reg, SZ_PTR);
pavone@665 2402 restore_callee_save_regs(code);
pavone@598 2403 *no_sync = code->cur - (no_sync + 1);
pavone@590 2404 //return to caller of z80_run
pavone@590 2405 retn(code);
pavone@895 2406 code->stack_off = tmp_stack_off;
pavone@652 2407
pavone@594 2408 options->gen.handle_code_write = (code_ptr)z80_handle_code_write;
pavone@590 2409
pavone@594 2410 options->read_8 = gen_mem_fun(&options->gen, chunks, num_chunks, READ_8, &options->read_8_noinc);
pavone@591 2411 options->write_8 = gen_mem_fun(&options->gen, chunks, num_chunks, WRITE_8, &options->write_8_noinc);
pavone@590 2412
pavone@895 2413 code_ptr skip_int = code->cur;
pavone@900 2414 //calculate adjust size
pavone@900 2415 add_ir(code, 16-sizeof(void *), RSP, SZ_PTR);
pavone@900 2416 uint32_t adjust_size = code->cur - skip_int;
pavone@900 2417 code->cur = skip_int;
pavone@900 2418
pavone@895 2419 cmp_rdispr(code, options->gen.context_reg, offsetof(z80_context, sync_cycle), options->gen.cycles, SZ_D);
pavone@895 2420 code_ptr skip_sync = code->cur + 1;
pavone@895 2421 jcc(code, CC_B, skip_sync);
pavone@895 2422 //save PC
pavone@895 2423 mov_rrdisp(code, options->gen.scratch1, options->gen.context_reg, offsetof(z80_context, pc), SZ_D);
pavone@895 2424 options->do_sync = code->cur;
pavone@895 2425 call(code, options->gen.save_context);
pavone@895 2426 tmp_stack_off = code->stack_off;
pavone@895 2427 //pop return address off the stack and save for resume later
pavone@898 2428 //pop_rind(code, options->gen.context_reg);
pavone@898 2429 pop_r(code, RAX);
pavone@900 2430 add_ir(code, adjust_size, RAX, SZ_PTR);
pavone@895 2431 add_ir(code, 16-sizeof(void *), RSP, SZ_PTR);
pavone@898 2432 mov_rrind(code, RAX, options->gen.context_reg, SZ_PTR);
pavone@898 2433
pavone@895 2434 //restore callee saved registers
pavone@895 2435 restore_callee_save_regs(code);
pavone@895 2436 //return to caller of z80_run
pavone@895 2437 *skip_sync = code->cur - (skip_sync+1);
pavone@895 2438 retn(code);
pavone@895 2439 code->stack_off = tmp_stack_off;
pavone@895 2440
pavone@590 2441 options->gen.handle_cycle_limit_int = code->cur;
pavone@590 2442 cmp_rdispr(code, options->gen.context_reg, offsetof(z80_context, int_cycle), options->gen.cycles, SZ_D);
pavone@590 2443 jcc(code, CC_B, skip_int);
pavone@590 2444 //set limit to the cycle limit
pavone@590 2445 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, sync_cycle), options->gen.limit, SZ_D);
pavone@590 2446 //disable interrupts
pavone@591 2447 mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, iff1), SZ_B);
pavone@591 2448 mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, iff2), SZ_B);
pavone@590 2449 cycles(&options->gen, 7);
pavone@590 2450 //save return address (in scratch1) to Z80 stack
pavone@590 2451 sub_ir(code, 2, options->regs[Z80_SP], SZ_W);
pavone@590 2452 mov_rr(code, options->regs[Z80_SP], options->gen.scratch2, SZ_W);
pavone@590 2453 //we need to do check_cycles and cycles outside of the write_8 call
pavone@590 2454 //so that the stack has the correct depth if we need to return to C
pavone@590 2455 //for a synchronization
pavone@590 2456 check_cycles(&options->gen);
pavone@590 2457 cycles(&options->gen, 3);
pavone@590 2458 //save word to write before call to write_8_noinc
pavone@590 2459 push_r(code, options->gen.scratch1);
pavone@590 2460 call(code, options->write_8_noinc);
pavone@590 2461 //restore word to write
pavone@590 2462 pop_r(code, options->gen.scratch1);
pavone@590 2463 //write high byte to SP+1
pavone@590 2464 mov_rr(code, options->regs[Z80_SP], options->gen.scratch2, SZ_W);
pavone@590 2465 add_ir(code, 1, options->gen.scratch2, SZ_W);
pavone@590 2466 shr_ir(code, 8, options->gen.scratch1, SZ_W);
pavone@590 2467 check_cycles(&options->gen);
pavone@590 2468 cycles(&options->gen, 3);
pavone@590 2469 call(code, options->write_8_noinc);
pavone@590 2470 //dispose of return address as we'll be jumping somewhere else
pavone@895 2471 add_ir(code, 16, RSP, SZ_PTR);
pavone@590 2472 //TODO: Support interrupt mode 0 and 2
pavone@590 2473 mov_ir(code, 0x38, options->gen.scratch1, SZ_W);
pavone@593 2474 call(code, options->native_addr);
pavone@663 2475 mov_rrind(code, options->gen.scratch1, options->gen.context_reg, SZ_PTR);
pavone@895 2476 tmp_stack_off = code->stack_off;
pavone@665 2477 restore_callee_save_regs(code);
pavone@663 2478 //return to caller of z80_run to sync
pavone@663 2479 retn(code);
pavone@895 2480 code->stack_off = tmp_stack_off;
pavone@593 2481
pavone@819 2482 //HACK
pavone@819 2483 options->gen.address_size = SZ_D;
pavone@819 2484 options->gen.address_mask = 0xFF;
pavone@819 2485 options->read_io = gen_mem_fun(&options->gen, io_chunks, num_io_chunks, READ_8, NULL);
pavone@819 2486 options->write_io = gen_mem_fun(&options->gen, io_chunks, num_io_chunks, WRITE_8, NULL);
pavone@819 2487 options->gen.address_size = SZ_W;
pavone@819 2488 options->gen.address_mask = 0xFFFF;
pavone@652 2489
pavone@594 2490 options->read_16 = code->cur;
pavone@594 2491 cycles(&options->gen, 3);
pavone@594 2492 check_cycles(&options->gen);
pavone@594 2493 //TODO: figure out how to handle the extra wait state for word reads to bank area
pavone@657 2494 //may also need special handling to avoid too much stack depth when access is blocked
pavone@594 2495 push_r(code, options->gen.scratch1);
pavone@594 2496 call(code, options->read_8_noinc);
pavone@594 2497 mov_rr(code, options->gen.scratch1, options->gen.scratch2, SZ_B);
pavone@735 2498 #ifndef X86_64
pavone@735 2499 //scratch 2 is a caller save register in 32-bit builds and may be clobbered by something called from the read8 fun
pavone@735 2500 mov_rrdisp(code, options->gen.scratch1, options->gen.context_reg, offsetof(z80_context, scratch2), SZ_B);
pavone@735 2501 #endif
pavone@594 2502 pop_r(code, options->gen.scratch1);
pavone@594 2503 add_ir(code, 1, options->gen.scratch1, SZ_W);
pavone@594 2504 cycles(&options->gen, 3);
pavone@594 2505 check_cycles(&options->gen);
pavone@594 2506 call(code, options->read_8_noinc);
pavone@594 2507 shl_ir(code, 8, options->gen.scratch1, SZ_W);
pavone@735 2508 #ifdef X86_64
pavone@594 2509 mov_rr(code, options->gen.scratch2, options->gen.scratch1, SZ_B);
pavone@735 2510 #else
pavone@735 2511 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, scratch2), options->gen.scratch1, SZ_B);
pavone@735 2512 #endif
pavone@594 2513 retn(code);
pavone@652 2514
pavone@594 2515 options->write_16_highfirst = code->cur;
pavone@594 2516 cycles(&options->gen, 3);
pavone@594 2517 check_cycles(&options->gen);
pavone@594 2518 push_r(code, options->gen.scratch2);
pavone@594 2519 push_r(code, options->gen.scratch1);
pavone@594 2520 add_ir(code, 1, options->gen.scratch2, SZ_W);
pavone@594 2521 shr_ir(code, 8, options->gen.scratch1, SZ_W);
pavone@594 2522 call(code, options->write_8_noinc);
pavone@594 2523 pop_r(code, options->gen.scratch1);
pavone@594 2524 pop_r(code, options->gen.scratch2);
pavone@594 2525 cycles(&options->gen, 3);
pavone@594 2526 check_cycles(&options->gen);
pavone@594 2527 //TODO: Check if we can get away with TCO here
pavone@594 2528 call(code, options->write_8_noinc);
pavone@594 2529 retn(code);
pavone@652 2530
pavone@594 2531 options->write_16_lowfirst = code->cur;
pavone@594 2532 cycles(&options->gen, 3);
pavone@594 2533 check_cycles(&options->gen);
pavone@594 2534 push_r(code, options->gen.scratch2);
pavone@594 2535 push_r(code, options->gen.scratch1);
pavone@594 2536 call(code, options->write_8_noinc);
pavone@594 2537 pop_r(code, options->gen.scratch1);
pavone@594 2538 pop_r(code, options->gen.scratch2);
pavone@594 2539 add_ir(code, 1, options->gen.scratch2, SZ_W);
pavone@594 2540 shr_ir(code, 8, options->gen.scratch1, SZ_W);
pavone@594 2541 cycles(&options->gen, 3);
pavone@594 2542 check_cycles(&options->gen);
pavone@594 2543 //TODO: Check if we can get away with TCO here
pavone@594 2544 call(code, options->write_8_noinc);
pavone@594 2545 retn(code);
pavone@593 2546
pavone@593 2547 options->retrans_stub = code->cur;
pavone@895 2548 tmp_stack_off = code->stack_off;
pavone@899 2549 //calculate size of patch
pavone@899 2550 mov_ir(code, 0x7FFF, options->gen.scratch1, SZ_D);
pavone@899 2551 code->stack_off += sizeof(void *);
pavone@899 2552 if (code->stack_off & 0xF) {
pavone@899 2553 sub_ir(code, 16 - (code->stack_off & 0xF), RSP, SZ_PTR);
pavone@899 2554 }
pavone@899 2555 call_noalign(code, options->retrans_stub);
pavone@899 2556 uint32_t patch_size = code->cur - options->retrans_stub;
pavone@899 2557 code->cur = options->retrans_stub;
pavone@899 2558 code->stack_off = tmp_stack_off;
pavone@899 2559
pavone@593 2560 //pop return address
pavone@593 2561 pop_r(code, options->gen.scratch2);
pavone@895 2562 add_ir(code, 16-sizeof(void*), RSP, SZ_PTR);
pavone@895 2563 code->stack_off = tmp_stack_off;
pavone@593 2564 call(code, options->gen.save_context);
pavone@593 2565 //adjust pointer before move and call instructions that got us here
pavone@899 2566 sub_ir(code, patch_size, options->gen.scratch2, SZ_PTR);
pavone@593 2567 push_r(code, options->gen.context_reg);
pavone@657 2568 call_args(code, (code_ptr)z80_retranslate_inst, 3, options->gen.scratch1, options->gen.context_reg, options->gen.scratch2);
pavone@593 2569 pop_r(code, options->gen.context_reg);
pavone@593 2570 mov_rr(code, RAX, options->gen.scratch1, SZ_PTR);
pavone@593 2571 call(code, options->gen.load_context);
pavone@593 2572 jmp_r(code, options->gen.scratch1);
pavone@593 2573
pavone@593 2574 options->run = (z80_run_fun)code->cur;
pavone@895 2575 tmp_stack_off = code->stack_off;
pavone@665 2576 save_callee_save_regs(code);
pavone@729 2577 #ifdef X86_64
pavone@593 2578 mov_rr(code, RDI, options->gen.context_reg, SZ_PTR);
pavone@729 2579 #else
pavone@729 2580 mov_rdispr(code, RSP, 5 * sizeof(int32_t), options->gen.context_reg, SZ_PTR);
pavone@729 2581 #endif
pavone@594 2582 call(code, options->load_context_scratch);
pavone@593 2583 cmp_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, extra_pc), SZ_PTR);
pavone@593 2584 code_ptr no_extra = code->cur+1;
pavone@593 2585 jcc(code, CC_Z, no_extra);
pavone@898 2586 sub_ir(code, 16-sizeof(void *), RSP, SZ_PTR);
pavone@593 2587 push_rdisp(code, options->gen.context_reg, offsetof(z80_context, extra_pc));
pavone@593 2588 mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, extra_pc), SZ_PTR);
pavone@593 2589 *no_extra = code->cur - (no_extra + 1);
pavone@593 2590 jmp_rind(code, options->gen.context_reg);
pavone@895 2591 code->stack_off = tmp_stack_off;
pavone@213 2592 }
pavone@235 2593
pavone@590 2594 void init_z80_context(z80_context * context, z80_options * options)
pavone@235 2595 {
pavone@235 2596 memset(context, 0, sizeof(*context));
pavone@360 2597 context->static_code_map = malloc(sizeof(*context->static_code_map));
pavone@259 2598 context->static_code_map->base = NULL;
pavone@235 2599 context->static_code_map->offsets = malloc(sizeof(int32_t) * 0x2000);
pavone@235 2600 memset(context->static_code_map->offsets, 0xFF, sizeof(int32_t) * 0x2000);
pavone@627 2601 context->banked_code_map = malloc(sizeof(native_map_slot));
pavone@627 2602 memset(context->banked_code_map, 0, sizeof(native_map_slot));
pavone@235 2603 context->options = options;
pavone@668 2604 context->int_cycle = CYCLE_NEVER;
pavone@668 2605 context->int_pulse_start = CYCLE_NEVER;
pavone@668 2606 context->int_pulse_end = CYCLE_NEVER;
pavone@235 2607 }
pavone@235 2608
pavone@668 2609 void z80_run(z80_context * context, uint32_t target_cycle)
pavone@235 2610 {
pavone@668 2611 if (context->reset || context->busack) {
pavone@668 2612 context->current_cycle = target_cycle;
pavone@668 2613 } else {
pavone@668 2614 if (context->current_cycle < target_cycle) {
pavone@668 2615 //busreq is sampled at the end of an m-cycle
pavone@668 2616 //we can approximate that by running for a single m-cycle after a bus request
pavone@668 2617 context->sync_cycle = context->busreq ? context->current_cycle + 3*context->options->gen.clock_divider : target_cycle;
pavone@668 2618 if (!context->native_pc) {
pavone@668 2619 context->native_pc = z80_get_native_address_trans(context, context->pc);
pavone@668 2620 }
pavone@668 2621 while (context->current_cycle < context->sync_cycle)
pavone@668 2622 {
pavone@668 2623 if (context->int_pulse_end < context->current_cycle || context->int_pulse_end == CYCLE_NEVER) {
pavone@668 2624 z80_next_int_pulse(context);
pavone@668 2625 }
pavone@668 2626 if (context->iff1) {
pavone@668 2627 context->int_cycle = context->int_pulse_start < context->int_enable_cycle ? context->int_enable_cycle : context->int_pulse_start;
pavone@668 2628 } else {
pavone@668 2629 context->int_cycle = CYCLE_NEVER;
pavone@668 2630 }
pavone@668 2631 context->target_cycle = context->sync_cycle < context->int_cycle ? context->sync_cycle : context->int_cycle;
pavone@670 2632 dprintf("Running Z80 from cycle %d to cycle %d. Int cycle: %d (%d - %d)\n", context->current_cycle, context->sync_cycle, context->int_cycle, context->int_pulse_start, context->int_pulse_end);
pavone@668 2633 context->options->run(context);
pavone@668 2634 dprintf("Z80 ran to cycle %d\n", context->current_cycle);
pavone@668 2635 }
pavone@668 2636 if (context->busreq) {
pavone@668 2637 context->busack = 1;
pavone@668 2638 context->current_cycle = target_cycle;
pavone@668 2639 }
pavone@668 2640 }
pavone@668 2641 }
pavone@668 2642 }
pavone@668 2643
pavone@884 2644 void z80_options_free(z80_options *opts)
pavone@884 2645 {
pavone@884 2646 free(opts->gen.native_code_map);
pavone@884 2647 free(opts->gen.ram_inst_sizes);
pavone@884 2648 free(opts);
pavone@884 2649 }
pavone@884 2650
pavone@668 2651 void z80_assert_reset(z80_context * context, uint32_t cycle)
pavone@668 2652 {
pavone@668 2653 z80_run(context, cycle);
pavone@668 2654 context->reset = 1;
pavone@668 2655 }
pavone@668 2656
pavone@668 2657 void z80_clear_reset(z80_context * context, uint32_t cycle)
pavone@668 2658 {
pavone@668 2659 z80_run(context, cycle);
pavone@668 2660 if (context->reset) {
pavone@668 2661 //TODO: Handle case where reset is not asserted long enough
pavone@701 2662 context->im = 0;
pavone@701 2663 context->iff1 = context->iff2 = 0;
pavone@668 2664 context->native_pc = NULL;
pavone@701 2665 context->extra_pc = NULL;
pavone@668 2666 context->pc = 0;
pavone@668 2667 context->reset = 0;
pavone@676 2668 if (context->busreq) {
pavone@676 2669 //TODO: Figure out appropriate delay
pavone@676 2670 context->busack = 1;
pavone@676 2671 }
pavone@668 2672 }
pavone@668 2673 }
pavone@668 2674
pavone@668 2675 void z80_assert_busreq(z80_context * context, uint32_t cycle)
pavone@668 2676 {
pavone@668 2677 z80_run(context, cycle);
pavone@668 2678 context->busreq = 1;
pavone@854 2679 //this is an imperfect aproximation since most M-cycles take less tstates than the max
pavone@854 2680 //and a short 3-tstate m-cycle can take an unbounded number due to wait states
pavone@854 2681 if (context->current_cycle - cycle > MAX_MCYCLE_LENGTH * context->options->gen.clock_divider) {
pavone@854 2682 context->busack = 1;
pavone@854 2683 }
pavone@701 2684 }
pavone@668 2685
pavone@668 2686 void z80_clear_busreq(z80_context * context, uint32_t cycle)
pavone@668 2687 {
pavone@668 2688 z80_run(context, cycle);
pavone@668 2689 context->busreq = 0;
pavone@668 2690 context->busack = 0;
pavone@844 2691 //there appears to be at least a 1 Z80 cycle delay between busreq
pavone@844 2692 //being released and resumption of execution
pavone@844 2693 context->current_cycle += context->options->gen.clock_divider;
pavone@668 2694 }
pavone@668 2695
pavone@668 2696 uint8_t z80_get_busack(z80_context * context, uint32_t cycle)
pavone@668 2697 {
pavone@668 2698 z80_run(context, cycle);
pavone@668 2699 return context->busack;
pavone@668 2700 }
pavone@668 2701
pavone@668 2702 void z80_adjust_cycles(z80_context * context, uint32_t deduction)
pavone@668 2703 {
pavone@668 2704 if (context->current_cycle < deduction) {
pavone@668 2705 fprintf(stderr, "WARNING: Deduction of %u cycles when Z80 cycle counter is only %u\n", deduction, context->current_cycle);
pavone@668 2706 context->current_cycle = 0;
pavone@668 2707 } else {
pavone@668 2708 context->current_cycle -= deduction;
pavone@668 2709 }
pavone@668 2710 if (context->int_enable_cycle != CYCLE_NEVER) {
pavone@668 2711 if (context->int_enable_cycle < deduction) {
pavone@668 2712 context->int_enable_cycle = 0;
pavone@668 2713 } else {
pavone@668 2714 context->int_enable_cycle -= deduction;
pavone@668 2715 }
pavone@668 2716 }
pavone@668 2717 if (context->int_pulse_start != CYCLE_NEVER) {
pavone@668 2718 if (context->int_pulse_end < deduction) {
pavone@668 2719 context->int_pulse_start = context->int_pulse_end = CYCLE_NEVER;
pavone@668 2720 } else {
pavone@668 2721 context->int_pulse_end -= deduction;
pavone@668 2722 if (context->int_pulse_start < deduction) {
pavone@668 2723 context->int_pulse_start = 0;
pavone@668 2724 } else {
pavone@668 2725 context->int_pulse_start -= deduction;
pavone@668 2726 }
pavone@668 2727 }
pavone@668 2728 }
pavone@506 2729 }
pavone@506 2730
pavone@652 2731 uint32_t zbreakpoint_patch(z80_context * context, uint16_t address, code_ptr dst)
pavone@626 2732 {
pavone@966 2733 code_info code = {dst, dst+32};
pavone@659 2734 mov_ir(&code, address, context->options->gen.scratch1, SZ_W);
pavone@652 2735 call(&code, context->bp_stub);
pavone@652 2736 return code.cur-dst;
pavone@626 2737 }
pavone@626 2738
pavone@626 2739 void zcreate_stub(z80_context * context)
pavone@626 2740 {
pavone@652 2741 z80_options * opts = context->options;
pavone@652 2742 code_info *code = &opts->gen.code;
pavone@894 2743 uint32_t start_stack_off = code->stack_off;
pavone@652 2744 check_code_prologue(code);
pavone@652 2745 context->bp_stub = code->cur;
pavone@626 2746
pavone@682 2747 //Calculate length of prologue
pavone@652 2748 check_cycles_int(&opts->gen, 0);
pavone@652 2749 int check_int_size = code->cur-context->bp_stub;
pavone@652 2750 code->cur = context->bp_stub;
pavone@626 2751
pavone@626 2752 //Calculate length of patch
pavone@652 2753 int patch_size = zbreakpoint_patch(context, 0, code->cur);
pavone@626 2754
pavone@682 2755 //Save context and call breakpoint handler
pavone@652 2756 call(code, opts->gen.save_context);
pavone@652 2757 push_r(code, opts->gen.scratch1);
pavone@657 2758 call_args_abi(code, context->bp_handler, 2, opts->gen.context_reg, opts->gen.scratch1);
pavone@664 2759 mov_rr(code, RAX, opts->gen.context_reg, SZ_PTR);
pavone@682 2760 //Restore context
pavone@652 2761 call(code, opts->gen.load_context);
pavone@652 2762 pop_r(code, opts->gen.scratch1);
pavone@682 2763 //do prologue stuff
pavone@652 2764 cmp_rr(code, opts->gen.cycles, opts->gen.limit, SZ_D);
pavone@652 2765 uint8_t * jmp_off = code->cur+1;
pavone@652 2766 jcc(code, CC_NC, code->cur + 7);
pavone@652 2767 pop_r(code, opts->gen.scratch1);
pavone@664 2768 add_ir(code, check_int_size - patch_size, opts->gen.scratch1, SZ_PTR);
pavone@652 2769 push_r(code, opts->gen.scratch1);
pavone@652 2770 jmp(code, opts->gen.handle_cycle_limit_int);
pavone@652 2771 *jmp_off = code->cur - (jmp_off+1);
pavone@682 2772 //jump back to body of translated instruction
pavone@652 2773 pop_r(code, opts->gen.scratch1);
pavone@664 2774 add_ir(code, check_int_size - patch_size, opts->gen.scratch1, SZ_PTR);
pavone@652 2775 jmp_r(code, opts->gen.scratch1);
pavone@894 2776 code->stack_off = start_stack_off;
pavone@235 2777 }
pavone@235 2778
pavone@366 2779 void zinsert_breakpoint(z80_context * context, uint16_t address, uint8_t * bp_handler)
pavone@366 2780 {
pavone@626 2781 context->bp_handler = bp_handler;
pavone@819 2782 uint8_t bit = 1 << (address % 8);
pavone@819 2783 if (!(bit & context->breakpoint_flags[address / 8])) {
pavone@819 2784 context->breakpoint_flags[address / 8] |= bit;
pavone@626 2785 if (!context->bp_stub) {
pavone@626 2786 zcreate_stub(context);
pavone@366 2787 }
pavone@626 2788 uint8_t * native = z80_get_native_address(context, address);
pavone@626 2789 if (native) {
pavone@626 2790 zbreakpoint_patch(context, address, native);
pavone@626 2791 }
pavone@366 2792 }
pavone@366 2793 }
pavone@235 2794
pavone@366 2795 void zremove_breakpoint(z80_context * context, uint16_t address)
pavone@366 2796 {
pavone@819 2797 context->breakpoint_flags[address / 8] &= ~(1 << (address % 8));
pavone@366 2798 uint8_t * native = z80_get_native_address(context, address);
pavone@626 2799 if (native) {
pavone@652 2800 z80_options * opts = context->options;
pavone@652 2801 code_info tmp_code = opts->gen.code;
pavone@652 2802 opts->gen.code.cur = native;
pavone@652 2803 opts->gen.code.last = native + 16;
pavone@652 2804 check_cycles_int(&opts->gen, address);
pavone@652 2805 opts->gen.code = tmp_code;
pavone@792 2806 }
pavone@366 2807 }
pavone@366 2808