comparison z80_to_x86.c @ 729:9ef6db986982

Fix a bunch of assumptions about which Z80 registers are stored in native registers to make the x86-32 build less broken
author Michael Pavone <pavone@retrodev.com>
date Sat, 23 May 2015 20:26:20 -0700
parents b707a8ddc202
children 38e9bee03749
comparison
equal deleted inserted replaced
728:cb1c005880e7 729:9ef6db986982
113 ea->mode = MODE_REG_DIRECT; 113 ea->mode = MODE_REG_DIRECT;
114 areg = read ? opts->gen.scratch1 : opts->gen.scratch2; 114 areg = read ? opts->gen.scratch1 : opts->gen.scratch2;
115 switch(inst->addr_mode & 0x1F) 115 switch(inst->addr_mode & 0x1F)
116 { 116 {
117 case Z80_REG: 117 case Z80_REG:
118 if (inst->ea_reg == Z80_IYH) { 118 if (inst->ea_reg == Z80_IYH && opts->regs[Z80_IY] >= 0) {
119 if (inst->reg == Z80_IYL) { 119 if (inst->reg == Z80_IYL) {
120 mov_rr(code, opts->regs[Z80_IY], opts->gen.scratch1, SZ_W); 120 mov_rr(code, opts->regs[Z80_IY], opts->gen.scratch1, SZ_W);
121 ror_ir(code, 8, opts->gen.scratch1, SZ_W); 121 ror_ir(code, 8, opts->gen.scratch1, SZ_W);
122 ea->base = opts->gen.scratch1; 122 ea->base = opts->gen.scratch1;
123 } else { 123 } else {
141 ea->base = opts->gen.context_reg; 141 ea->base = opts->gen.context_reg;
142 ea->disp = offsetof(z80_context, regs) + inst->ea_reg; 142 ea->disp = offsetof(z80_context, regs) + inst->ea_reg;
143 } 143 }
144 break; 144 break;
145 case Z80_REG_INDIRECT: 145 case Z80_REG_INDIRECT:
146 mov_rr(code, opts->regs[inst->ea_reg], areg, SZ_W); 146 if (opts->regs[inst->ea_reg] >= 0) {
147 mov_rr(code, opts->regs[inst->ea_reg], areg, SZ_W);
148 } else {
149 mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, regs) + z80_low_reg(inst->ea_reg), areg, SZ_W);
150 }
147 size = z80_size(inst); 151 size = z80_size(inst);
148 if (read) { 152 if (read) {
149 if (modify) { 153 if (modify) {
150 //push_r(code, opts->gen.scratch1); 154 //push_r(code, opts->gen.scratch1);
151 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(z80_context, scratch1), SZ_W); 155 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(z80_context, scratch1), SZ_W);
186 ea->base = opts->gen.scratch1; 190 ea->base = opts->gen.scratch1;
187 break; 191 break;
188 case Z80_IX_DISPLACE: 192 case Z80_IX_DISPLACE:
189 case Z80_IY_DISPLACE: 193 case Z80_IY_DISPLACE:
190 reg = opts->regs[(inst->addr_mode & 0x1F) == Z80_IX_DISPLACE ? Z80_IX : Z80_IY]; 194 reg = opts->regs[(inst->addr_mode & 0x1F) == Z80_IX_DISPLACE ? Z80_IX : Z80_IY];
191 mov_rr(code, reg, areg, SZ_W); 195 if (reg >= 0) {
196 mov_rr(code, reg, areg, SZ_W);
197 } else {
198 mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, regs) + (inst->addr_mode & 0x1F) == Z80_IX_DISPLACE ? Z80_IXL : Z80_IYL, areg, SZ_W);
199 }
192 add_ir(code, inst->ea_reg & 0x80 ? inst->ea_reg - 256 : inst->ea_reg, areg, SZ_W); 200 add_ir(code, inst->ea_reg & 0x80 ? inst->ea_reg - 256 : inst->ea_reg, areg, SZ_W);
193 size = z80_size(inst); 201 size = z80_size(inst);
194 if (read) { 202 if (read) {
195 if (modify) { 203 if (modify) {
196 //push_r(code, opts->gen.scratch1); 204 //push_r(code, opts->gen.scratch1);
274 uint8_t zaf_off(uint8_t flag) 282 uint8_t zaf_off(uint8_t flag)
275 { 283 {
276 return offsetof(z80_context, alt_flags) + flag; 284 return offsetof(z80_context, alt_flags) + flag;
277 } 285 }
278 286
287 uint8_t zr_off(uint8_t reg)
288 {
289 if (reg > Z80_A) {
290 reg = z80_low_reg(reg);
291 }
292 return offsetof(z80_context, regs) + reg;
293 }
294
279 uint8_t zar_off(uint8_t reg) 295 uint8_t zar_off(uint8_t reg)
280 { 296 {
297 if (reg > Z80_A) {
298 reg = z80_low_reg(reg);
299 }
281 return offsetof(z80_context, alt_regs) + reg; 300 return offsetof(z80_context, alt_regs) + reg;
301 }
302
303 void zreg_to_native(z80_options *opts, uint8_t reg, uint8_t native_reg)
304 {
305 if (opts->regs[reg] >= 0) {
306 mov_rr(&opts->gen.code, opts->regs[reg], native_reg, reg > Z80_A ? SZ_W : SZ_B);
307 } else {
308 mov_rdispr(&opts->gen.code, opts->gen.context_reg, zr_off(reg), native_reg, reg > Z80_A ? SZ_W : SZ_B);
309 }
310 }
311
312 void native_to_zreg(z80_options *opts, uint8_t native_reg, uint8_t reg)
313 {
314 if (opts->regs[reg] >= 0) {
315 mov_rr(&opts->gen.code, native_reg, opts->regs[reg], reg > Z80_A ? SZ_W : SZ_B);
316 } else {
317 mov_rrdisp(&opts->gen.code, native_reg, opts->gen.context_reg, zr_off(reg), reg > Z80_A ? SZ_W : SZ_B);
318 }
282 } 319 }
283 320
284 void z80_print_regs_exit(z80_context * context) 321 void z80_print_regs_exit(z80_context * context)
285 { 322 {
286 printf("A: %X\nB: %X\nC: %X\nD: %X\nE: %X\nHL: %X\nIX: %X\nIY: %X\nSP: %X\n\nIM: %d, IFF1: %d, IFF2: %d\n", 323 printf("A: %X\nB: %X\nC: %X\nD: %X\nE: %X\nHL: %X\nIX: %X\nIY: %X\nSP: %X\n\nIM: %d, IFF1: %d, IFF2: %d\n",
382 break; 419 break;
383 case Z80_PUSH: 420 case Z80_PUSH:
384 cycles(&opts->gen, (inst->reg == Z80_IX || inst->reg == Z80_IY) ? 9 : 5); 421 cycles(&opts->gen, (inst->reg == Z80_IX || inst->reg == Z80_IY) ? 9 : 5);
385 sub_ir(code, 2, opts->regs[Z80_SP], SZ_W); 422 sub_ir(code, 2, opts->regs[Z80_SP], SZ_W);
386 if (inst->reg == Z80_AF) { 423 if (inst->reg == Z80_AF) {
387 mov_rr(code, opts->regs[Z80_A], opts->gen.scratch1, SZ_B); 424 zreg_to_native(opts, Z80_A, opts->gen.scratch1);
388 shl_ir(code, 8, opts->gen.scratch1, SZ_W); 425 shl_ir(code, 8, opts->gen.scratch1, SZ_W);
389 mov_rdispr(code, opts->gen.context_reg, zf_off(ZF_S), opts->gen.scratch1, SZ_B); 426 mov_rdispr(code, opts->gen.context_reg, zf_off(ZF_S), opts->gen.scratch1, SZ_B);
390 shl_ir(code, 1, opts->gen.scratch1, SZ_B); 427 shl_ir(code, 1, opts->gen.scratch1, SZ_B);
391 or_rdispr(code, opts->gen.context_reg, zf_off(ZF_Z), opts->gen.scratch1, SZ_B); 428 or_rdispr(code, opts->gen.context_reg, zf_off(ZF_Z), opts->gen.scratch1, SZ_B);
392 shl_ir(code, 2, opts->gen.scratch1, SZ_B); 429 shl_ir(code, 2, opts->gen.scratch1, SZ_B);
424 bt_ir(code, 6, opts->gen.scratch1, SZ_W); 461 bt_ir(code, 6, opts->gen.scratch1, SZ_W);
425 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_Z)); 462 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_Z));
426 bt_ir(code, 7, opts->gen.scratch1, SZ_W); 463 bt_ir(code, 7, opts->gen.scratch1, SZ_W);
427 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_S)); 464 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_S));
428 shr_ir(code, 8, opts->gen.scratch1, SZ_W); 465 shr_ir(code, 8, opts->gen.scratch1, SZ_W);
429 mov_rr(code, opts->gen.scratch1, opts->regs[Z80_A], SZ_B); 466 native_to_zreg(opts, opts->gen.scratch1, Z80_A);
430 } else { 467 } else {
431 translate_z80_reg(inst, &src_op, opts); 468 translate_z80_reg(inst, &src_op, opts);
432 mov_rr(code, opts->gen.scratch1, src_op.base, SZ_W); 469 mov_rr(code, opts->gen.scratch1, src_op.base, SZ_W);
433 } 470 }
434 //no call to save_z80_reg needed since there's no chance we'll use the only 471 //no call to save_z80_reg needed since there's no chance we'll use the only
441 num_cycles = 8; 478 num_cycles = 8;
442 } 479 }
443 cycles(&opts->gen, num_cycles); 480 cycles(&opts->gen, num_cycles);
444 if (inst->addr_mode == Z80_REG) { 481 if (inst->addr_mode == Z80_REG) {
445 if(inst->reg == Z80_AF) { 482 if(inst->reg == Z80_AF) {
446 mov_rr(code, opts->regs[Z80_A], opts->gen.scratch1, SZ_B); 483 zreg_to_native(opts, Z80_A, opts->gen.scratch1);
447 mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_A), opts->regs[Z80_A], SZ_B); 484 mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_A), opts->gen.scratch2, SZ_B);
448 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_A), SZ_B); 485 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_A), SZ_B);
486 native_to_zreg(opts, opts->gen.scratch2, Z80_A);
449 487
450 //Flags are currently word aligned, so we can move 488 //Flags are currently word aligned, so we can move
451 //them efficiently a word at a time 489 //them efficiently a word at a time
452 for (int f = ZF_C; f < ZF_NUM; f+=2) { 490 for (int f = ZF_C; f < ZF_NUM; f+=2) {
453 mov_rdispr(code, opts->gen.context_reg, zf_off(f), opts->gen.scratch1, SZ_W); 491 mov_rdispr(code, opts->gen.context_reg, zf_off(f), opts->gen.scratch1, SZ_W);
454 mov_rdispr(code, opts->gen.context_reg, zaf_off(f), opts->gen.scratch2, SZ_W); 492 mov_rdispr(code, opts->gen.context_reg, zaf_off(f), opts->gen.scratch2, SZ_W);
455 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zaf_off(f), SZ_W); 493 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zaf_off(f), SZ_W);
456 mov_rrdisp(code, opts->gen.scratch2, opts->gen.context_reg, zf_off(f), SZ_W); 494 mov_rrdisp(code, opts->gen.scratch2, opts->gen.context_reg, zf_off(f), SZ_W);
457 } 495 }
458 } else { 496 } else {
459 xchg_rr(code, opts->regs[Z80_DE], opts->regs[Z80_HL], SZ_W); 497 if (opts->regs[Z80_DE] >= 0 && opts->regs[Z80_HL] >= 0) {
498 xchg_rr(code, opts->regs[Z80_DE], opts->regs[Z80_HL], SZ_W);
499 } else {
500 zreg_to_native(opts, Z80_DE, opts->gen.scratch1);
501 zreg_to_native(opts, Z80_HL, opts->gen.scratch2);
502 native_to_zreg(opts, opts->gen.scratch1, Z80_HL);
503 native_to_zreg(opts, opts->gen.scratch2, Z80_DE);
504 }
460 } 505 }
461 } else { 506 } else {
462 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); 507 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W);
463 call(code, opts->read_8); 508 call(code, opts->read_8);
464 xchg_rr(code, opts->regs[inst->reg], opts->gen.scratch1, SZ_B); 509 if (opts->regs[inst->reg] >= 0) {
510 xchg_rr(code, opts->regs[inst->reg], opts->gen.scratch1, SZ_B);
511 } else {
512 zreg_to_native(opts, inst->reg, opts->gen.scratch2);
513 xchg_rr(code, opts->gen.scratch1, opts->gen.scratch2, SZ_B);
514 native_to_zreg(opts, opts->gen.scratch2, inst->reg);
515 }
465 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); 516 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W);
466 call(code, opts->write_8); 517 call(code, opts->write_8);
467 cycles(&opts->gen, 1); 518 cycles(&opts->gen, 1);
468 uint8_t high_reg = z80_high_reg(inst->reg); 519 uint8_t high_reg = z80_high_reg(inst->reg);
469 uint8_t use_reg;
470 //even though some of the upper halves can be used directly
471 //the limitations on mixing *H regs with the REX prefix
472 //prevent us from taking advantage of it
473 use_reg = opts->regs[inst->reg];
474 ror_ir(code, 8, use_reg, SZ_W);
475 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); 520 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W);
476 add_ir(code, 1, opts->gen.scratch1, SZ_W); 521 add_ir(code, 1, opts->gen.scratch1, SZ_W);
477 call(code, opts->read_8); 522 call(code, opts->read_8);
478 xchg_rr(code, use_reg, opts->gen.scratch1, SZ_B); 523 if (opts->regs[inst->reg] >= 0) {
524 //even though some of the upper halves can be used directly
525 //the limitations on mixing *H regs with the REX prefix
526 //prevent us from taking advantage of it
527 uint8_t use_reg = opts->regs[inst->reg];
528 ror_ir(code, 8, use_reg, SZ_W);
529 xchg_rr(code, use_reg, opts->gen.scratch1, SZ_B);
530 //restore reg to normal rotation
531 ror_ir(code, 8, use_reg, SZ_W);
532 } else {
533 zreg_to_native(opts, high_reg, opts->gen.scratch2);
534 xchg_rr(code, opts->gen.scratch1, opts->gen.scratch2, SZ_B);
535 native_to_zreg(opts, opts->gen.scratch2, high_reg);
536 }
479 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); 537 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W);
480 add_ir(code, 1, opts->gen.scratch2, SZ_W); 538 add_ir(code, 1, opts->gen.scratch2, SZ_W);
481 call(code, opts->write_8); 539 call(code, opts->write_8);
482 //restore reg to normal rotation
483 ror_ir(code, 8, use_reg, SZ_W);
484 cycles(&opts->gen, 2); 540 cycles(&opts->gen, 2);
485 } 541 }
486 break; 542 break;
487 case Z80_EXX: 543 case Z80_EXX:
488 cycles(&opts->gen, 4); 544 cycles(&opts->gen, 4);
489 mov_rr(code, opts->regs[Z80_BC], opts->gen.scratch1, SZ_W); 545 zreg_to_native(opts, Z80_BC, opts->gen.scratch1);
490 mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch2, SZ_W); 546 mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_BC), opts->gen.scratch2, SZ_W);
491 mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_C), opts->regs[Z80_BC], SZ_W); 547 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_BC), SZ_W);
492 mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_L), opts->regs[Z80_HL], SZ_W); 548 native_to_zreg(opts, opts->gen.scratch2, Z80_BC);
493 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_C), SZ_W); 549
494 mov_rrdisp(code, opts->gen.scratch2, opts->gen.context_reg, zar_off(Z80_L), SZ_W); 550 zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
495 mov_rr(code, opts->regs[Z80_DE], opts->gen.scratch1, SZ_W); 551 mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_HL), opts->gen.scratch2, SZ_W);
496 mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_E), opts->regs[Z80_DE], SZ_W); 552 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_HL), SZ_W);
497 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_E), SZ_W); 553 native_to_zreg(opts, opts->gen.scratch2, Z80_HL);
554
555 zreg_to_native(opts, Z80_DE, opts->gen.scratch1);
556 mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_DE), opts->gen.scratch2, SZ_W);
557 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_DE), SZ_W);
558 native_to_zreg(opts, opts->gen.scratch2, Z80_DE);
498 break; 559 break;
499 case Z80_LDI: { 560 case Z80_LDI: {
500 cycles(&opts->gen, 8); 561 cycles(&opts->gen, 8);
501 mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); 562 zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
502 call(code, opts->read_8); 563 call(code, opts->read_8);
503 mov_rr(code, opts->regs[Z80_DE], opts->gen.scratch2, SZ_W); 564 zreg_to_native(opts, Z80_DE, opts->gen.scratch2);
504 call(code, opts->write_8); 565 call(code, opts->write_8);
505 cycles(&opts->gen, 2); 566 cycles(&opts->gen, 2);
506 add_ir(code, 1, opts->regs[Z80_DE], SZ_W); 567 if (opts->regs[Z80_DE] >= 0) {
507 add_ir(code, 1, opts->regs[Z80_HL], SZ_W); 568 add_ir(code, 1, opts->regs[Z80_DE], SZ_W);
508 sub_ir(code, 1, opts->regs[Z80_BC], SZ_W); 569 } else {
570 add_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_DE), SZ_W);
571 }
572 if (opts->regs[Z80_HL] >= 0) {
573 add_ir(code, 1, opts->regs[Z80_HL], SZ_W);
574 } else {
575 add_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_HL), SZ_W);
576 }
577 if (opts->regs[Z80_BC] >= 0) {
578 sub_ir(code, 1, opts->regs[Z80_BC], SZ_W);
579 } else {
580 sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_BC), SZ_W);
581 }
509 //TODO: Implement half-carry 582 //TODO: Implement half-carry
510 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); 583 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
511 setcc_rdisp(code, CC_NZ, opts->gen.context_reg, zf_off(ZF_PV)); 584 setcc_rdisp(code, CC_NZ, opts->gen.context_reg, zf_off(ZF_PV));
512 break; 585 break;
513 } 586 }
514 case Z80_LDIR: { 587 case Z80_LDIR: {
515 cycles(&opts->gen, 8); 588 cycles(&opts->gen, 8);
516 mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); 589 zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
517 call(code, opts->read_8); 590 call(code, opts->read_8);
518 mov_rr(code, opts->regs[Z80_DE], opts->gen.scratch2, SZ_W); 591 zreg_to_native(opts, Z80_DE, opts->gen.scratch2);
519 call(code, opts->write_8); 592 call(code, opts->write_8);
520 add_ir(code, 1, opts->regs[Z80_DE], SZ_W); 593 if (opts->regs[Z80_DE] >= 0) {
521 add_ir(code, 1, opts->regs[Z80_HL], SZ_W); 594 add_ir(code, 1, opts->regs[Z80_DE], SZ_W);
522 595 } else {
523 sub_ir(code, 1, opts->regs[Z80_BC], SZ_W); 596 add_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_DE), SZ_W);
597 }
598 if (opts->regs[Z80_HL] >= 0) {
599 add_ir(code, 1, opts->regs[Z80_HL], SZ_W);
600 } else {
601 add_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_HL), SZ_W);
602 }
603 if (opts->regs[Z80_BC] >= 0) {
604 sub_ir(code, 1, opts->regs[Z80_BC], SZ_W);
605 } else {
606 sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_BC), SZ_W);
607 }
524 uint8_t * cont = code->cur+1; 608 uint8_t * cont = code->cur+1;
525 jcc(code, CC_Z, code->cur+2); 609 jcc(code, CC_Z, code->cur+2);
526 cycles(&opts->gen, 7); 610 cycles(&opts->gen, 7);
527 //TODO: Figure out what the flag state should be here 611 //TODO: Figure out what the flag state should be here
528 //TODO: Figure out whether an interrupt can interrupt this 612 //TODO: Figure out whether an interrupt can interrupt this
534 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B); 618 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B);
535 break; 619 break;
536 } 620 }
537 case Z80_LDD: { 621 case Z80_LDD: {
538 cycles(&opts->gen, 8); 622 cycles(&opts->gen, 8);
539 mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); 623 zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
540 call(code, opts->read_8); 624 call(code, opts->read_8);
541 mov_rr(code, opts->regs[Z80_DE], opts->gen.scratch2, SZ_W); 625 zreg_to_native(opts, Z80_DE, opts->gen.scratch2);
542 call(code, opts->write_8); 626 call(code, opts->write_8);
543 cycles(&opts->gen, 2); 627 cycles(&opts->gen, 2);
544 sub_ir(code, 1, opts->regs[Z80_DE], SZ_W); 628 if (opts->regs[Z80_DE] >= 0) {
545 sub_ir(code, 1, opts->regs[Z80_HL], SZ_W); 629 sub_ir(code, 1, opts->regs[Z80_DE], SZ_W);
546 sub_ir(code, 1, opts->regs[Z80_BC], SZ_W); 630 } else {
631 sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_DE), SZ_W);
632 }
633 if (opts->regs[Z80_HL] >= 0) {
634 add_ir(code, 1, opts->regs[Z80_HL], SZ_W);
635 } else {
636 sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_HL), SZ_W);
637 }
638 if (opts->regs[Z80_BC] >= 0) {
639 sub_ir(code, 1, opts->regs[Z80_BC], SZ_W);
640 } else {
641 sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_BC), SZ_W);
642 }
547 //TODO: Implement half-carry 643 //TODO: Implement half-carry
548 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); 644 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
549 setcc_rdisp(code, CC_NZ, opts->gen.context_reg, zf_off(ZF_PV)); 645 setcc_rdisp(code, CC_NZ, opts->gen.context_reg, zf_off(ZF_PV));
550 break; 646 break;
551 } 647 }
552 case Z80_LDDR: { 648 case Z80_LDDR: {
553 cycles(&opts->gen, 8); 649 cycles(&opts->gen, 8);
554 mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); 650 zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
555 call(code, opts->read_8); 651 call(code, opts->read_8);
556 mov_rr(code, opts->regs[Z80_DE], opts->gen.scratch2, SZ_W); 652 zreg_to_native(opts, Z80_DE, opts->gen.scratch2);
557 call(code, opts->write_8); 653 call(code, opts->write_8);
558 sub_ir(code, 1, opts->regs[Z80_DE], SZ_W); 654 if (opts->regs[Z80_DE] >= 0) {
559 sub_ir(code, 1, opts->regs[Z80_HL], SZ_W); 655 sub_ir(code, 1, opts->regs[Z80_DE], SZ_W);
560 656 } else {
561 sub_ir(code, 1, opts->regs[Z80_BC], SZ_W); 657 sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_DE), SZ_W);
658 }
659 if (opts->regs[Z80_HL] >= 0) {
660 add_ir(code, 1, opts->regs[Z80_HL], SZ_W);
661 } else {
662 sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_HL), SZ_W);
663 }
664 if (opts->regs[Z80_BC] >= 0) {
665 sub_ir(code, 1, opts->regs[Z80_BC], SZ_W);
666 } else {
667 sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_BC), SZ_W);
668 }
562 uint8_t * cont = code->cur+1; 669 uint8_t * cont = code->cur+1;
563 jcc(code, CC_Z, code->cur+2); 670 jcc(code, CC_Z, code->cur+2);
564 cycles(&opts->gen, 7); 671 cycles(&opts->gen, 7);
565 //TODO: Figure out what the flag state should be here 672 //TODO: Figure out what the flag state should be here
566 //TODO: Figure out whether an interrupt can interrupt this 673 //TODO: Figure out whether an interrupt can interrupt this
1340 call_dst = code->cur + 256; 1447 call_dst = code->cur + 256;
1341 } 1448 }
1342 jmp(code, call_dst); 1449 jmp(code, call_dst);
1343 } else { 1450 } else {
1344 if (inst->addr_mode == Z80_REG_INDIRECT) { 1451 if (inst->addr_mode == Z80_REG_INDIRECT) {
1345 mov_rr(code, opts->regs[inst->ea_reg], opts->gen.scratch1, SZ_W); 1452 zreg_to_native(opts, inst->ea_reg, opts->gen.scratch1);
1346 } else { 1453 } else {
1347 mov_ir(code, inst->immed, opts->gen.scratch1, SZ_W); 1454 mov_ir(code, inst->immed, opts->gen.scratch1, SZ_W);
1348 } 1455 }
1349 call(code, opts->native_addr); 1456 call(code, opts->native_addr);
1350 jmp_r(code, opts->gen.scratch1); 1457 jmp_r(code, opts->gen.scratch1);
1971 options->gen.scratch1 = R13; 2078 options->gen.scratch1 = R13;
1972 options->gen.scratch2 = R14; 2079 options->gen.scratch2 = R14;
1973 #else 2080 #else
1974 memset(options->regs, -1, sizeof(options->regs)); 2081 memset(options->regs, -1, sizeof(options->regs));
1975 options->regs[Z80_A] = RAX; 2082 options->regs[Z80_A] = RAX;
1976 options->regx[Z80_SP] = RBX; 2083 options->regs[Z80_SP] = RBX;
1977 2084
1978 options->gen.scratch1 = RCX; 2085 options->gen.scratch1 = RCX;
1979 options->gen.scratch2 = RDX; 2086 options->gen.scratch2 = RDX;
1980 #endif 2087 #endif
1981 2088
2212 call(code, options->gen.load_context); 2319 call(code, options->gen.load_context);
2213 jmp_r(code, options->gen.scratch1); 2320 jmp_r(code, options->gen.scratch1);
2214 2321
2215 options->run = (z80_run_fun)code->cur; 2322 options->run = (z80_run_fun)code->cur;
2216 save_callee_save_regs(code); 2323 save_callee_save_regs(code);
2324 #ifdef X86_64
2217 mov_rr(code, RDI, options->gen.context_reg, SZ_PTR); 2325 mov_rr(code, RDI, options->gen.context_reg, SZ_PTR);
2326 #else
2327 mov_rdispr(code, RSP, 5 * sizeof(int32_t), options->gen.context_reg, SZ_PTR);
2328 #endif
2218 call(code, options->load_context_scratch); 2329 call(code, options->load_context_scratch);
2219 cmp_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, extra_pc), SZ_PTR); 2330 cmp_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, extra_pc), SZ_PTR);
2220 code_ptr no_extra = code->cur+1; 2331 code_ptr no_extra = code->cur+1;
2221 jcc(code, CC_Z, no_extra); 2332 jcc(code, CC_Z, no_extra);
2222 push_rdisp(code, options->gen.context_reg, offsetof(z80_context, extra_pc)); 2333 push_rdisp(code, options->gen.context_reg, offsetof(z80_context, extra_pc));