comparison z80_to_x86.c @ 1044:1625555e346e

Properly handle redundant prefixes
author Michael Pavone <pavone@retrodev.com>
date Mon, 25 Jul 2016 23:16:04 -0700
parents 3980ef0f6307
children e0489abfdab0
comparison
equal deleted inserted replaced
1043:3980ef0f6307 1044:1625555e346e
341 if (!interp) { 341 if (!interp) {
342 check_cycles_int(&opts->gen, address); 342 check_cycles_int(&opts->gen, address);
343 if (context->breakpoint_flags[address / 8] & (1 << (address % 8))) { 343 if (context->breakpoint_flags[address / 8] & (1 << (address % 8))) {
344 zbreakpoint_patch(context, address, start); 344 zbreakpoint_patch(context, address, start);
345 } 345 }
346 num_cycles = 4 * inst->opcode_bytes;
347 //TODO: increment R register once for every opcode byte
346 #ifdef Z80_LOG_ADDRESS 348 #ifdef Z80_LOG_ADDRESS
347 log_address(&opts->gen, address, "Z80: %X @ %d\n"); 349 log_address(&opts->gen, address, "Z80: %X @ %d\n");
348 #endif 350 #endif
349 } 351 }
350 switch(inst->op) 352 switch(inst->op)
353 size = z80_size(inst); 355 size = z80_size(inst);
354 switch (inst->addr_mode & 0x1F) 356 switch (inst->addr_mode & 0x1F)
355 { 357 {
356 case Z80_REG: 358 case Z80_REG:
357 case Z80_REG_INDIRECT: 359 case Z80_REG_INDIRECT:
358 num_cycles = size == SZ_B ? 4 : 6; 360 if (size != SZ_B) {
359 if (inst->ea_reg == Z80_IX || inst->ea_reg == Z80_IY || (inst->ea_reg >= Z80_IXL && inst->ea_reg <= Z80_IYH)) { 361 num_cycles += 2;
360 num_cycles += 4;
361 } 362 }
362 if (inst->reg == Z80_I || inst->ea_reg == Z80_I || inst->reg == Z80_R || inst->ea_reg == Z80_R) { 363 if (inst->reg == Z80_I || inst->ea_reg == Z80_I || inst->reg == Z80_R || inst->ea_reg == Z80_R) {
363 num_cycles += 5; 364 num_cycles += 1;
364 } 365 }
365 break; 366 break;
366 case Z80_IMMED: 367 case Z80_IMMED:
367 num_cycles = size == SZ_B ? 7 : 10; 368 num_cycles += size == SZ_B ? 3 : 6;
368 break; 369 break;
369 case Z80_IMMED_INDIRECT: 370 case Z80_IMMED_INDIRECT:
370 num_cycles = 10; 371 num_cycles += 6;
371 break; 372 break;
372 case Z80_IX_DISPLACE: 373 case Z80_IX_DISPLACE:
373 case Z80_IY_DISPLACE: 374 case Z80_IY_DISPLACE:
374 num_cycles = 16; 375 num_cycles = 8; //3 for displacement, 5 for address addition
375 break; 376 break;
376 }
377 if ((inst->reg >= Z80_IXL && inst->reg <= Z80_IYH) || inst->reg == Z80_IX || inst->reg == Z80_IY) {
378 num_cycles += 4;
379 } 377 }
380 cycles(&opts->gen, num_cycles); 378 cycles(&opts->gen, num_cycles);
381 if (inst->addr_mode & Z80_DIR) { 379 if (inst->addr_mode & Z80_DIR) {
382 translate_z80_ea(inst, &dst_op, opts, DONT_READ, MODIFY); 380 translate_z80_ea(inst, &dst_op, opts, DONT_READ, MODIFY);
383 translate_z80_reg(inst, &src_op, opts); 381 translate_z80_reg(inst, &src_op, opts);
420 if (inst->addr_mode & Z80_DIR) { 418 if (inst->addr_mode & Z80_DIR) {
421 z80_save_result(opts, inst); 419 z80_save_result(opts, inst);
422 } 420 }
423 break; 421 break;
424 case Z80_PUSH: 422 case Z80_PUSH:
425 cycles(&opts->gen, (inst->reg == Z80_IX || inst->reg == Z80_IY) ? 9 : 5); 423 cycles(&opts->gen, num_cycles + 1);
426 sub_ir(code, 2, opts->regs[Z80_SP], SZ_W); 424 sub_ir(code, 2, opts->regs[Z80_SP], SZ_W);
427 if (inst->reg == Z80_AF) { 425 if (inst->reg == Z80_AF) {
428 zreg_to_native(opts, Z80_A, opts->gen.scratch1); 426 zreg_to_native(opts, Z80_A, opts->gen.scratch1);
429 shl_ir(code, 8, opts->gen.scratch1, SZ_W); 427 shl_ir(code, 8, opts->gen.scratch1, SZ_W);
430 mov_rdispr(code, opts->gen.context_reg, zf_off(ZF_S), opts->gen.scratch1, SZ_B); 428 mov_rdispr(code, opts->gen.context_reg, zf_off(ZF_S), opts->gen.scratch1, SZ_B);
445 call(code, opts->write_16_highfirst); 443 call(code, opts->write_16_highfirst);
446 //no call to save_z80_reg needed since there's no chance we'll use the only 444 //no call to save_z80_reg needed since there's no chance we'll use the only
447 //the upper half of a register pair 445 //the upper half of a register pair
448 break; 446 break;
449 case Z80_POP: 447 case Z80_POP:
450 cycles(&opts->gen, (inst->reg == Z80_IX || inst->reg == Z80_IY) ? 8 : 4); 448 cycles(&opts->gen, num_cycles);
451 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); 449 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W);
452 call(code, opts->read_16); 450 call(code, opts->read_16);
453 add_ir(code, 2, opts->regs[Z80_SP], SZ_W); 451 add_ir(code, 2, opts->regs[Z80_SP], SZ_W);
454 if (inst->reg == Z80_AF) { 452 if (inst->reg == Z80_AF) {
455 453
472 } 470 }
473 //no call to save_z80_reg needed since there's no chance we'll use the only 471 //no call to save_z80_reg needed since there's no chance we'll use the only
474 //the upper half of a register pair 472 //the upper half of a register pair
475 break; 473 break;
476 case Z80_EX: 474 case Z80_EX:
477 if (inst->addr_mode == Z80_REG || inst->reg == Z80_HL) {
478 num_cycles = 4;
479 } else {
480 num_cycles = 8;
481 }
482 cycles(&opts->gen, num_cycles); 475 cycles(&opts->gen, num_cycles);
483 if (inst->addr_mode == Z80_REG) { 476 if (inst->addr_mode == Z80_REG) {
484 if(inst->reg == Z80_AF) { 477 if(inst->reg == Z80_AF) {
485 zreg_to_native(opts, Z80_A, opts->gen.scratch1); 478 zreg_to_native(opts, Z80_A, opts->gen.scratch1);
486 mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_A), opts->gen.scratch2, SZ_B); 479 mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_A), opts->gen.scratch2, SZ_B);
541 call(code, opts->write_8); 534 call(code, opts->write_8);
542 cycles(&opts->gen, 2); 535 cycles(&opts->gen, 2);
543 } 536 }
544 break; 537 break;
545 case Z80_EXX: 538 case Z80_EXX:
546 cycles(&opts->gen, 4); 539 cycles(&opts->gen, num_cycles);
547 zreg_to_native(opts, Z80_BC, opts->gen.scratch1); 540 zreg_to_native(opts, Z80_BC, opts->gen.scratch1);
548 mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_BC), opts->gen.scratch2, SZ_W); 541 mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_BC), opts->gen.scratch2, SZ_W);
549 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_BC), SZ_W); 542 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_BC), SZ_W);
550 native_to_zreg(opts, opts->gen.scratch2, Z80_BC); 543 native_to_zreg(opts, opts->gen.scratch2, Z80_BC);
551 544
558 mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_DE), opts->gen.scratch2, SZ_W); 551 mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_DE), opts->gen.scratch2, SZ_W);
559 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_DE), SZ_W); 552 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_DE), SZ_W);
560 native_to_zreg(opts, opts->gen.scratch2, Z80_DE); 553 native_to_zreg(opts, opts->gen.scratch2, Z80_DE);
561 break; 554 break;
562 case Z80_LDI: { 555 case Z80_LDI: {
563 cycles(&opts->gen, 8); 556 cycles(&opts->gen, num_cycles);
564 zreg_to_native(opts, Z80_HL, opts->gen.scratch1); 557 zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
565 call(code, opts->read_8); 558 call(code, opts->read_8);
566 zreg_to_native(opts, Z80_DE, opts->gen.scratch2); 559 zreg_to_native(opts, Z80_DE, opts->gen.scratch2);
567 call(code, opts->write_8); 560 call(code, opts->write_8);
568 cycles(&opts->gen, 2); 561 cycles(&opts->gen, 2);
585 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); 578 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
586 setcc_rdisp(code, CC_NZ, opts->gen.context_reg, zf_off(ZF_PV)); 579 setcc_rdisp(code, CC_NZ, opts->gen.context_reg, zf_off(ZF_PV));
587 break; 580 break;
588 } 581 }
589 case Z80_LDIR: { 582 case Z80_LDIR: {
590 cycles(&opts->gen, 8); 583 cycles(&opts->gen, num_cycles);
591 zreg_to_native(opts, Z80_HL, opts->gen.scratch1); 584 zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
592 call(code, opts->read_8); 585 call(code, opts->read_8);
593 zreg_to_native(opts, Z80_DE, opts->gen.scratch2); 586 zreg_to_native(opts, Z80_DE, opts->gen.scratch2);
594 call(code, opts->write_8); 587 call(code, opts->write_8);
595 if (opts->regs[Z80_DE] >= 0) { 588 if (opts->regs[Z80_DE] >= 0) {
618 cycles(&opts->gen, 2); 611 cycles(&opts->gen, 2);
619 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B); 612 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B);
620 break; 613 break;
621 } 614 }
622 case Z80_LDD: { 615 case Z80_LDD: {
623 cycles(&opts->gen, 8); 616 cycles(&opts->gen, num_cycles);
624 zreg_to_native(opts, Z80_HL, opts->gen.scratch1); 617 zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
625 call(code, opts->read_8); 618 call(code, opts->read_8);
626 zreg_to_native(opts, Z80_DE, opts->gen.scratch2); 619 zreg_to_native(opts, Z80_DE, opts->gen.scratch2);
627 call(code, opts->write_8); 620 call(code, opts->write_8);
628 cycles(&opts->gen, 2); 621 cycles(&opts->gen, 2);
645 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); 638 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
646 setcc_rdisp(code, CC_NZ, opts->gen.context_reg, zf_off(ZF_PV)); 639 setcc_rdisp(code, CC_NZ, opts->gen.context_reg, zf_off(ZF_PV));
647 break; 640 break;
648 } 641 }
649 case Z80_LDDR: { 642 case Z80_LDDR: {
650 cycles(&opts->gen, 8); 643 cycles(&opts->gen, num_cycles);
651 zreg_to_native(opts, Z80_HL, opts->gen.scratch1); 644 zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
652 call(code, opts->read_8); 645 call(code, opts->read_8);
653 zreg_to_native(opts, Z80_DE, opts->gen.scratch2); 646 zreg_to_native(opts, Z80_DE, opts->gen.scratch2);
654 call(code, opts->write_8); 647 call(code, opts->write_8);
655 if (opts->regs[Z80_DE] >= 0) { 648 if (opts->regs[Z80_DE] >= 0) {
678 cycles(&opts->gen, 2); 671 cycles(&opts->gen, 2);
679 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B); 672 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B);
680 break; 673 break;
681 } 674 }
682 case Z80_CPI: 675 case Z80_CPI:
683 cycles(&opts->gen, 8);//T-States 4,4 676 cycles(&opts->gen, num_cycles);//T-States 4,4
684 zreg_to_native(opts, Z80_HL, opts->gen.scratch1); 677 zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
685 call(code, opts->read_8);//T-States 3 678 call(code, opts->read_8);//T-States 3
686 cmp_rr(code, opts->gen.scratch1, opts->regs[Z80_A], SZ_B); 679 cmp_rr(code, opts->gen.scratch1, opts->regs[Z80_A], SZ_B);
687 mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); 680 mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
688 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); 681 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
700 sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_BC), SZ_W); 693 sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_BC), SZ_W);
701 } 694 }
702 setcc_rdisp(code, CC_NZ, opts->gen.context_reg, zf_off(ZF_PV)); 695 setcc_rdisp(code, CC_NZ, opts->gen.context_reg, zf_off(ZF_PV));
703 break; 696 break;
704 case Z80_CPIR: { 697 case Z80_CPIR: {
705 cycles(&opts->gen, 8);//T-States 4,4 698 cycles(&opts->gen, num_cycles);//T-States 4,4
706 zreg_to_native(opts, Z80_HL, opts->gen.scratch1); 699 zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
707 call(code, opts->read_8);//T-States 3 700 call(code, opts->read_8);//T-States 3
708 cmp_rr(code, opts->gen.scratch1, opts->regs[Z80_A], SZ_B); 701 cmp_rr(code, opts->gen.scratch1, opts->regs[Z80_A], SZ_B);
709 mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); 702 mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
710 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); 703 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
733 *cont = code->cur - (cont + 1); 726 *cont = code->cur - (cont + 1);
734 *cont2 = code->cur - (cont2 + 1); 727 *cont2 = code->cur - (cont2 + 1);
735 break; 728 break;
736 } 729 }
737 case Z80_CPD: 730 case Z80_CPD:
738 cycles(&opts->gen, 8);//T-States 4,4 731 cycles(&opts->gen, num_cycles);//T-States 4,4
739 zreg_to_native(opts, Z80_HL, opts->gen.scratch1); 732 zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
740 call(code, opts->read_8);//T-States 3 733 call(code, opts->read_8);//T-States 3
741 cmp_rr(code, opts->gen.scratch1, opts->regs[Z80_A], SZ_B); 734 cmp_rr(code, opts->gen.scratch1, opts->regs[Z80_A], SZ_B);
742 mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); 735 mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
743 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); 736 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
755 sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_BC), SZ_W); 748 sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_BC), SZ_W);
756 } 749 }
757 setcc_rdisp(code, CC_NZ, opts->gen.context_reg, zf_off(ZF_PV)); 750 setcc_rdisp(code, CC_NZ, opts->gen.context_reg, zf_off(ZF_PV));
758 break; 751 break;
759 case Z80_CPDR: { 752 case Z80_CPDR: {
760 cycles(&opts->gen, 8);//T-States 4,4 753 cycles(&opts->gen, num_cycles);//T-States 4,4
761 zreg_to_native(opts, Z80_HL, opts->gen.scratch1); 754 zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
762 call(code, opts->read_8);//T-States 3 755 call(code, opts->read_8);//T-States 3
763 cmp_rr(code, opts->gen.scratch1, opts->regs[Z80_A], SZ_B); 756 cmp_rr(code, opts->gen.scratch1, opts->regs[Z80_A], SZ_B);
764 mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); 757 mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
765 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); 758 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
788 *cont = code->cur - (cont + 1); 781 *cont = code->cur - (cont + 1);
789 *cont2 = code->cur - (cont2 + 1); 782 *cont2 = code->cur - (cont2 + 1);
790 break; 783 break;
791 } 784 }
792 case Z80_ADD: 785 case Z80_ADD:
793 num_cycles = 4;
794 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { 786 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
795 num_cycles += 12; 787 num_cycles += 8;
796 } else if(inst->addr_mode == Z80_IMMED) { 788 } else if(inst->addr_mode == Z80_IMMED) {
797 num_cycles += 3; 789 num_cycles += 3;
798 } else if(z80_size(inst) == SZ_W) { 790 } else if(z80_size(inst) == SZ_W) {
799 num_cycles += 4; 791 num_cycles += 4;
800 } 792 }
829 } 821 }
830 z80_save_reg(inst, opts); 822 z80_save_reg(inst, opts);
831 z80_save_ea(code, inst, opts); 823 z80_save_ea(code, inst, opts);
832 break; 824 break;
833 case Z80_ADC: 825 case Z80_ADC:
834 num_cycles = 4;
835 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { 826 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
836 num_cycles += 12; 827 num_cycles += 8;
837 } else if(inst->addr_mode == Z80_IMMED) { 828 } else if(inst->addr_mode == Z80_IMMED) {
838 num_cycles += 3; 829 num_cycles += 3;
839 } else if(z80_size(inst) == SZ_W) { 830 } else if(z80_size(inst) == SZ_W) {
840 num_cycles += 4; 831 num_cycles += 4;
841 } 832 }
869 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); 860 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
870 z80_save_reg(inst, opts); 861 z80_save_reg(inst, opts);
871 z80_save_ea(code, inst, opts); 862 z80_save_ea(code, inst, opts);
872 break; 863 break;
873 case Z80_SUB: 864 case Z80_SUB:
874 num_cycles = 4;
875 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { 865 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
876 num_cycles += 12; 866 num_cycles += 8;
877 } else if(inst->addr_mode == Z80_IMMED) { 867 } else if(inst->addr_mode == Z80_IMMED) {
878 num_cycles += 3; 868 num_cycles += 3;
879 } 869 }
880 cycles(&opts->gen, num_cycles); 870 cycles(&opts->gen, num_cycles);
881 translate_z80_reg(inst, &dst_op, opts); 871 translate_z80_reg(inst, &dst_op, opts);
906 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); 896 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
907 z80_save_reg(inst, opts); 897 z80_save_reg(inst, opts);
908 z80_save_ea(code, inst, opts); 898 z80_save_ea(code, inst, opts);
909 break; 899 break;
910 case Z80_SBC: 900 case Z80_SBC:
911 num_cycles = 4;
912 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { 901 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
913 num_cycles += 12; 902 num_cycles += 8;
914 } else if(inst->addr_mode == Z80_IMMED) { 903 } else if(inst->addr_mode == Z80_IMMED) {
915 num_cycles += 3; 904 num_cycles += 3;
916 } else if(z80_size(inst) == SZ_W) { 905 } else if(z80_size(inst) == SZ_W) {
917 num_cycles += 4; 906 num_cycles += 4;
918 } 907 }
946 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); 935 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
947 z80_save_reg(inst, opts); 936 z80_save_reg(inst, opts);
948 z80_save_ea(code, inst, opts); 937 z80_save_ea(code, inst, opts);
949 break; 938 break;
950 case Z80_AND: 939 case Z80_AND:
951 num_cycles = 4;
952 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { 940 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
953 num_cycles += 12; 941 num_cycles += 8;
954 } else if(inst->addr_mode == Z80_IMMED) { 942 } else if(inst->addr_mode == Z80_IMMED) {
955 num_cycles += 3; 943 num_cycles += 3;
956 } else if(z80_size(inst) == SZ_W) { 944 } else if(z80_size(inst) == SZ_W) {
957 num_cycles += 4; 945 num_cycles += 4;
958 } 946 }
974 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); 962 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
975 z80_save_reg(inst, opts); 963 z80_save_reg(inst, opts);
976 z80_save_ea(code, inst, opts); 964 z80_save_ea(code, inst, opts);
977 break; 965 break;
978 case Z80_OR: 966 case Z80_OR:
979 num_cycles = 4;
980 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { 967 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
981 num_cycles += 12; 968 num_cycles += 8;
982 } else if(inst->addr_mode == Z80_IMMED) { 969 } else if(inst->addr_mode == Z80_IMMED) {
983 num_cycles += 3; 970 num_cycles += 3;
984 } else if(z80_size(inst) == SZ_W) { 971 } else if(z80_size(inst) == SZ_W) {
985 num_cycles += 4; 972 num_cycles += 4;
986 } 973 }
1002 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); 989 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
1003 z80_save_reg(inst, opts); 990 z80_save_reg(inst, opts);
1004 z80_save_ea(code, inst, opts); 991 z80_save_ea(code, inst, opts);
1005 break; 992 break;
1006 case Z80_XOR: 993 case Z80_XOR:
1007 num_cycles = 4;
1008 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { 994 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
1009 num_cycles += 12; 995 num_cycles += 8;
1010 } else if(inst->addr_mode == Z80_IMMED) { 996 } else if(inst->addr_mode == Z80_IMMED) {
1011 num_cycles += 3; 997 num_cycles += 3;
1012 } else if(z80_size(inst) == SZ_W) { 998 } else if(z80_size(inst) == SZ_W) {
1013 num_cycles += 4; 999 num_cycles += 4;
1014 } 1000 }
1030 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); 1016 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
1031 z80_save_reg(inst, opts); 1017 z80_save_reg(inst, opts);
1032 z80_save_ea(code, inst, opts); 1018 z80_save_ea(code, inst, opts);
1033 break; 1019 break;
1034 case Z80_CP: 1020 case Z80_CP:
1035 num_cycles = 4;
1036 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { 1021 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
1037 num_cycles += 12; 1022 num_cycles += 8;
1038 } else if(inst->addr_mode == Z80_IMMED) { 1023 } else if(inst->addr_mode == Z80_IMMED) {
1039 num_cycles += 3; 1024 num_cycles += 3;
1040 } 1025 }
1041 cycles(&opts->gen, num_cycles); 1026 cycles(&opts->gen, num_cycles);
1042 translate_z80_reg(inst, &dst_op, opts); 1027 translate_z80_reg(inst, &dst_op, opts);
1056 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); 1041 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
1057 z80_save_reg(inst, opts); 1042 z80_save_reg(inst, opts);
1058 z80_save_ea(code, inst, opts); 1043 z80_save_ea(code, inst, opts);
1059 break; 1044 break;
1060 case Z80_INC: 1045 case Z80_INC:
1061 num_cycles = 4; 1046 if(z80_size(inst) == SZ_W) {
1062 if (inst->reg == Z80_IX || inst->reg == Z80_IY) {
1063 num_cycles += 6;
1064 } else if(z80_size(inst) == SZ_W) {
1065 num_cycles += 2; 1047 num_cycles += 2;
1066 } else if(inst->reg == Z80_IXH || inst->reg == Z80_IXL || inst->reg == Z80_IYH || inst->reg == Z80_IYL || inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
1067 num_cycles += 4;
1068 } 1048 }
1069 cycles(&opts->gen, num_cycles); 1049 cycles(&opts->gen, num_cycles);
1070 translate_z80_reg(inst, &dst_op, opts); 1050 translate_z80_reg(inst, &dst_op, opts);
1071 if (dst_op.mode == MODE_UNUSED) { 1051 if (dst_op.mode == MODE_UNUSED) {
1072 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY); 1052 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
1086 z80_save_reg(inst, opts); 1066 z80_save_reg(inst, opts);
1087 z80_save_ea(code, inst, opts); 1067 z80_save_ea(code, inst, opts);
1088 z80_save_result(opts, inst); 1068 z80_save_result(opts, inst);
1089 break; 1069 break;
1090 case Z80_DEC: 1070 case Z80_DEC:
1091 num_cycles = 4; 1071 if(z80_size(inst) == SZ_W) {
1092 if (inst->reg == Z80_IX || inst->reg == Z80_IY) {
1093 num_cycles += 6;
1094 } else if(z80_size(inst) == SZ_W) {
1095 num_cycles += 2; 1072 num_cycles += 2;
1096 } else if(inst->reg == Z80_IXH || inst->reg == Z80_IXL || inst->reg == Z80_IYH || inst->reg == Z80_IYL || inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
1097 num_cycles += 4;
1098 } 1073 }
1099 cycles(&opts->gen, num_cycles); 1074 cycles(&opts->gen, num_cycles);
1100 translate_z80_reg(inst, &dst_op, opts); 1075 translate_z80_reg(inst, &dst_op, opts);
1101 if (dst_op.mode == MODE_UNUSED) { 1076 if (dst_op.mode == MODE_UNUSED) {
1102 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY); 1077 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
1117 z80_save_reg(inst, opts); 1092 z80_save_reg(inst, opts);
1118 z80_save_ea(code, inst, opts); 1093 z80_save_ea(code, inst, opts);
1119 z80_save_result(opts, inst); 1094 z80_save_result(opts, inst);
1120 break; 1095 break;
1121 case Z80_DAA: 1096 case Z80_DAA:
1122 cycles(&opts->gen, 4); 1097 cycles(&opts->gen, num_cycles);
1123 xor_rr(code, opts->gen.scratch2, opts->gen.scratch2, SZ_B); 1098 xor_rr(code, opts->gen.scratch2, opts->gen.scratch2, SZ_B);
1124 cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_H), SZ_B); 1099 cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_H), SZ_B);
1125 code_ptr corf_low = code->cur+1; 1100 code_ptr corf_low = code->cur+1;
1126 jcc(code, CC_NZ, code->cur+2); 1101 jcc(code, CC_NZ, code->cur+2);
1127 zreg_to_native(opts, Z80_A, opts->gen.scratch1); 1102 zreg_to_native(opts, Z80_A, opts->gen.scratch1);
1164 mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_C), SZ_B); 1139 mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_C), SZ_B);
1165 *no_carry = code->cur - (no_carry + 1); 1140 *no_carry = code->cur - (no_carry + 1);
1166 //TODO: Implement half-carry flag 1141 //TODO: Implement half-carry flag
1167 break; 1142 break;
1168 case Z80_CPL: 1143 case Z80_CPL:
1169 cycles(&opts->gen, 4); 1144 cycles(&opts->gen, num_cycles);
1170 not_r(code, opts->regs[Z80_A], SZ_B); 1145 not_r(code, opts->regs[Z80_A], SZ_B);
1171 mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_H), SZ_B); 1146 mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_H), SZ_B);
1172 mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); 1147 mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
1173 break; 1148 break;
1174 case Z80_NEG: 1149 case Z80_NEG:
1175 cycles(&opts->gen, 8); 1150 cycles(&opts->gen, num_cycles);
1176 neg_r(code, opts->regs[Z80_A], SZ_B); 1151 neg_r(code, opts->regs[Z80_A], SZ_B);
1177 //TODO: Implement half-carry flag 1152 //TODO: Implement half-carry flag
1178 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); 1153 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
1179 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); 1154 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
1180 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); 1155 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
1181 setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); 1156 setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV));
1182 mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); 1157 mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
1183 break; 1158 break;
1184 case Z80_CCF: 1159 case Z80_CCF:
1185 cycles(&opts->gen, 4); 1160 cycles(&opts->gen, num_cycles);
1186 mov_rdispr(code, opts->gen.context_reg, zf_off(ZF_C), opts->gen.scratch1, SZ_B); 1161 mov_rdispr(code, opts->gen.context_reg, zf_off(ZF_C), opts->gen.scratch1, SZ_B);
1187 xor_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_C), SZ_B); 1162 xor_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_C), SZ_B);
1188 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); 1163 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
1189 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zf_off(ZF_H), SZ_B); 1164 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zf_off(ZF_H), SZ_B);
1190 break; 1165 break;
1191 case Z80_SCF: 1166 case Z80_SCF:
1192 cycles(&opts->gen, 4); 1167 cycles(&opts->gen, num_cycles);
1193 mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_C), SZ_B); 1168 mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_C), SZ_B);
1194 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); 1169 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
1195 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_H), SZ_B); 1170 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_H), SZ_B);
1196 break; 1171 break;
1197 case Z80_NOP: 1172 case Z80_NOP:
1198 if (inst->immed == 42) { 1173 cycles(&opts->gen, num_cycles);
1199 call(code, opts->gen.save_context);
1200 call_args(code, (code_ptr)z80_print_regs_exit, 1, opts->gen.context_reg);
1201 } else {
1202 cycles(&opts->gen, 4 * inst->immed);
1203 }
1204 break; 1174 break;
1205 case Z80_HALT: { 1175 case Z80_HALT: {
1206 code_ptr loop_top = code->cur; 1176 code_ptr loop_top = code->cur;
1207 //this isn't terribly efficient, but it's good enough for now 1177 //this isn't terribly efficient, but it's good enough for now
1208 cycles(&opts->gen, 4); 1178 cycles(&opts->gen, num_cycles);
1209 check_cycles_int(&opts->gen, address); 1179 check_cycles_int(&opts->gen, address);
1210 jmp(code, loop_top); 1180 jmp(code, loop_top);
1211 break; 1181 break;
1212 } 1182 }
1213 case Z80_DI: 1183 case Z80_DI:
1214 cycles(&opts->gen, 4); 1184 cycles(&opts->gen, num_cycles);
1215 mov_irdisp(code, 0, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B); 1185 mov_irdisp(code, 0, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B);
1216 mov_irdisp(code, 0, opts->gen.context_reg, offsetof(z80_context, iff2), SZ_B); 1186 mov_irdisp(code, 0, opts->gen.context_reg, offsetof(z80_context, iff2), SZ_B);
1217 mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, sync_cycle), opts->gen.limit, SZ_D); 1187 mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, sync_cycle), opts->gen.limit, SZ_D);
1218 mov_irdisp(code, 0xFFFFFFFF, opts->gen.context_reg, offsetof(z80_context, int_cycle), SZ_D); 1188 mov_irdisp(code, 0xFFFFFFFF, opts->gen.context_reg, offsetof(z80_context, int_cycle), SZ_D);
1219 break; 1189 break;
1220 case Z80_EI: 1190 case Z80_EI:
1221 cycles(&opts->gen, 4); 1191 cycles(&opts->gen, num_cycles);
1222 mov_rrdisp(code, opts->gen.cycles, opts->gen.context_reg, offsetof(z80_context, int_enable_cycle), SZ_D); 1192 mov_rrdisp(code, opts->gen.cycles, opts->gen.context_reg, offsetof(z80_context, int_enable_cycle), SZ_D);
1223 mov_irdisp(code, 1, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B); 1193 mov_irdisp(code, 1, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B);
1224 mov_irdisp(code, 1, opts->gen.context_reg, offsetof(z80_context, iff2), SZ_B); 1194 mov_irdisp(code, 1, opts->gen.context_reg, offsetof(z80_context, iff2), SZ_B);
1225 //interrupt enable has a one-instruction latency, minimum instruction duration is 4 cycles 1195 //interrupt enable has a one-instruction latency, minimum instruction duration is 4 cycles
1226 add_irdisp(code, 4*opts->gen.clock_divider, opts->gen.context_reg, offsetof(z80_context, int_enable_cycle), SZ_D); 1196 add_irdisp(code, 4*opts->gen.clock_divider, opts->gen.context_reg, offsetof(z80_context, int_enable_cycle), SZ_D);
1227 call(code, opts->do_sync); 1197 call(code, opts->do_sync);
1228 break; 1198 break;
1229 case Z80_IM: 1199 case Z80_IM:
1230 cycles(&opts->gen, 8); 1200 cycles(&opts->gen, num_cycles);
1231 mov_irdisp(code, inst->immed, opts->gen.context_reg, offsetof(z80_context, im), SZ_B); 1201 mov_irdisp(code, inst->immed, opts->gen.context_reg, offsetof(z80_context, im), SZ_B);
1232 break; 1202 break;
1233 case Z80_RLC: 1203 case Z80_RLC:
1234 num_cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8); 1204 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
1205 num_cycles += 8;
1206 }
1235 cycles(&opts->gen, num_cycles); 1207 cycles(&opts->gen, num_cycles);
1236 if (inst->addr_mode != Z80_UNUSED) { 1208 if (inst->addr_mode != Z80_UNUSED) {
1237 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY); 1209 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
1238 translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register 1210 translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register
1239 cycles(&opts->gen, 1); 1211 cycles(&opts->gen, 1);
1273 } else { 1245 } else {
1274 z80_save_reg(inst, opts); 1246 z80_save_reg(inst, opts);
1275 } 1247 }
1276 break; 1248 break;
1277 case Z80_RL: 1249 case Z80_RL:
1278 num_cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8); 1250 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
1251 num_cycles += 8;
1252 }
1279 cycles(&opts->gen, num_cycles); 1253 cycles(&opts->gen, num_cycles);
1280 if (inst->addr_mode != Z80_UNUSED) { 1254 if (inst->addr_mode != Z80_UNUSED) {
1281 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY); 1255 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
1282 translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register 1256 translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register
1283 cycles(&opts->gen, 1); 1257 cycles(&opts->gen, 1);
1318 } else { 1292 } else {
1319 z80_save_reg(inst, opts); 1293 z80_save_reg(inst, opts);
1320 } 1294 }
1321 break; 1295 break;
1322 case Z80_RRC: 1296 case Z80_RRC:
1323 num_cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8); 1297 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
1298 num_cycles += 8;
1299 }
1324 cycles(&opts->gen, num_cycles); 1300 cycles(&opts->gen, num_cycles);
1325 if (inst->addr_mode != Z80_UNUSED) { 1301 if (inst->addr_mode != Z80_UNUSED) {
1326 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY); 1302 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
1327 translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register 1303 translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register
1328 cycles(&opts->gen, 1); 1304 cycles(&opts->gen, 1);
1362 } else { 1338 } else {
1363 z80_save_reg(inst, opts); 1339 z80_save_reg(inst, opts);
1364 } 1340 }
1365 break; 1341 break;
1366 case Z80_RR: 1342 case Z80_RR:
1367 num_cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8); 1343 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
1344 num_cycles += 8;
1345 }
1368 cycles(&opts->gen, num_cycles); 1346 cycles(&opts->gen, num_cycles);
1369 if (inst->addr_mode != Z80_UNUSED) { 1347 if (inst->addr_mode != Z80_UNUSED) {
1370 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY); 1348 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
1371 translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register 1349 translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register
1372 cycles(&opts->gen, 1); 1350 cycles(&opts->gen, 1);
1408 z80_save_reg(inst, opts); 1386 z80_save_reg(inst, opts);
1409 } 1387 }
1410 break; 1388 break;
1411 case Z80_SLA: 1389 case Z80_SLA:
1412 case Z80_SLL: 1390 case Z80_SLL:
1413 num_cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8; 1391 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
1392 num_cycles += 8;
1393 }
1414 cycles(&opts->gen, num_cycles); 1394 cycles(&opts->gen, num_cycles);
1415 if (inst->addr_mode != Z80_UNUSED) { 1395 if (inst->addr_mode != Z80_UNUSED) {
1416 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY); 1396 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
1417 translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register 1397 translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register
1418 cycles(&opts->gen, 1); 1398 cycles(&opts->gen, 1);
1456 } else { 1436 } else {
1457 z80_save_reg(inst, opts); 1437 z80_save_reg(inst, opts);
1458 } 1438 }
1459 break; 1439 break;
1460 case Z80_SRA: 1440 case Z80_SRA:
1461 num_cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8; 1441 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
1442 num_cycles += 8;
1443 }
1462 cycles(&opts->gen, num_cycles); 1444 cycles(&opts->gen, num_cycles);
1463 if (inst->addr_mode != Z80_UNUSED) { 1445 if (inst->addr_mode != Z80_UNUSED) {
1464 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY); 1446 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
1465 translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register 1447 translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register
1466 cycles(&opts->gen, 1); 1448 cycles(&opts->gen, 1);
1497 } else { 1479 } else {
1498 z80_save_reg(inst, opts); 1480 z80_save_reg(inst, opts);
1499 } 1481 }
1500 break; 1482 break;
1501 case Z80_SRL: 1483 case Z80_SRL:
1502 num_cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8; 1484 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
1485 num_cycles += 8;
1486 }
1503 cycles(&opts->gen, num_cycles); 1487 cycles(&opts->gen, num_cycles);
1504 if (inst->addr_mode != Z80_UNUSED) { 1488 if (inst->addr_mode != Z80_UNUSED) {
1505 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY); 1489 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
1506 translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register 1490 translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register
1507 cycles(&opts->gen, 1); 1491 cycles(&opts->gen, 1);
1538 } else { 1522 } else {
1539 z80_save_reg(inst, opts); 1523 z80_save_reg(inst, opts);
1540 } 1524 }
1541 break; 1525 break;
1542 case Z80_RLD: 1526 case Z80_RLD:
1543 cycles(&opts->gen, 8); 1527 cycles(&opts->gen, num_cycles);
1544 zreg_to_native(opts, Z80_HL, opts->gen.scratch1); 1528 zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
1545 call(code, opts->read_8); 1529 call(code, opts->read_8);
1546 //Before: (HL) = 0x12, A = 0x34 1530 //Before: (HL) = 0x12, A = 0x34
1547 //After: (HL) = 0x24, A = 0x31 1531 //After: (HL) = 0x24, A = 0x31
1548 zreg_to_native(opts, Z80_A, opts->gen.scratch2); 1532 zreg_to_native(opts, Z80_A, opts->gen.scratch2);
1565 zreg_to_native(opts, Z80_HL, opts->gen.scratch2); 1549 zreg_to_native(opts, Z80_HL, opts->gen.scratch2);
1566 ror_ir(code, 8, opts->gen.scratch1, SZ_W); 1550 ror_ir(code, 8, opts->gen.scratch1, SZ_W);
1567 call(code, opts->write_8); 1551 call(code, opts->write_8);
1568 break; 1552 break;
1569 case Z80_RRD: 1553 case Z80_RRD:
1570 cycles(&opts->gen, 8); 1554 cycles(&opts->gen, num_cycles);
1571 zreg_to_native(opts, Z80_HL, opts->gen.scratch1); 1555 zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
1572 call(code, opts->read_8); 1556 call(code, opts->read_8);
1573 //Before: (HL) = 0x12, A = 0x34 1557 //Before: (HL) = 0x12, A = 0x34
1574 //After: (HL) = 0x41, A = 0x32 1558 //After: (HL) = 0x41, A = 0x32
1575 zreg_to_native(opts, Z80_A, opts->gen.scratch2); 1559 zreg_to_native(opts, Z80_A, opts->gen.scratch2);
1596 zreg_to_native(opts, Z80_HL, opts->gen.scratch2); 1580 zreg_to_native(opts, Z80_HL, opts->gen.scratch2);
1597 ror_ir(code, 8, opts->gen.scratch1, SZ_W); 1581 ror_ir(code, 8, opts->gen.scratch1, SZ_W);
1598 call(code, opts->write_8); 1582 call(code, opts->write_8);
1599 break; 1583 break;
1600 case Z80_BIT: { 1584 case Z80_BIT: {
1601 num_cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16; 1585 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
1586 num_cycles += 8;
1587 }
1602 cycles(&opts->gen, num_cycles); 1588 cycles(&opts->gen, num_cycles);
1603 uint8_t bit; 1589 uint8_t bit;
1604 if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) { 1590 if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) {
1605 src_op.base = opts->regs[z80_word_reg(inst->ea_reg)]; 1591 src_op.base = opts->regs[z80_word_reg(inst->ea_reg)];
1606 src_op.mode = MODE_REG_DIRECT; 1592 src_op.mode = MODE_REG_DIRECT;
1635 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B); 1621 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B);
1636 } 1622 }
1637 break; 1623 break;
1638 } 1624 }
1639 case Z80_SET: { 1625 case Z80_SET: {
1640 num_cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16; 1626 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
1627 num_cycles += 8;
1628 }
1641 cycles(&opts->gen, num_cycles); 1629 cycles(&opts->gen, num_cycles);
1642 uint8_t bit; 1630 uint8_t bit;
1643 if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) { 1631 if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) {
1644 src_op.base = opts->regs[z80_word_reg(inst->ea_reg)]; 1632 src_op.base = opts->regs[z80_word_reg(inst->ea_reg)];
1645 src_op.mode = MODE_REG_DIRECT; 1633 src_op.mode = MODE_REG_DIRECT;
1702 } 1690 }
1703 } 1691 }
1704 break; 1692 break;
1705 } 1693 }
1706 case Z80_RES: { 1694 case Z80_RES: {
1707 num_cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16; 1695 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
1696 num_cycles += 8;
1697 }
1708 cycles(&opts->gen, num_cycles); 1698 cycles(&opts->gen, num_cycles);
1709 uint8_t bit; 1699 uint8_t bit;
1710 if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) { 1700 if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) {
1711 src_op.base = opts->regs[z80_word_reg(inst->ea_reg)]; 1701 src_op.base = opts->regs[z80_word_reg(inst->ea_reg)];
1712 src_op.mode = MODE_REG_DIRECT; 1702 src_op.mode = MODE_REG_DIRECT;
1769 } 1759 }
1770 } 1760 }
1771 break; 1761 break;
1772 } 1762 }
1773 case Z80_JP: { 1763 case Z80_JP: {
1774 num_cycles = 4;
1775 if (inst->addr_mode != Z80_REG_INDIRECT) { 1764 if (inst->addr_mode != Z80_REG_INDIRECT) {
1776 num_cycles += 6; 1765 num_cycles += 6;
1777 } else if(inst->ea_reg == Z80_IX || inst->ea_reg == Z80_IY) {
1778 num_cycles += 4;
1779 } 1766 }
1780 cycles(&opts->gen, num_cycles); 1767 cycles(&opts->gen, num_cycles);
1781 if (inst->addr_mode != Z80_REG_INDIRECT) { 1768 if (inst->addr_mode != Z80_REG_INDIRECT) {
1782 code_ptr call_dst = z80_get_native_address(context, inst->immed); 1769 code_ptr call_dst = z80_get_native_address(context, inst->immed);
1783 if (!call_dst) { 1770 if (!call_dst) {
1796 jmp_r(code, opts->gen.scratch1); 1783 jmp_r(code, opts->gen.scratch1);
1797 } 1784 }
1798 break; 1785 break;
1799 } 1786 }
1800 case Z80_JPCC: { 1787 case Z80_JPCC: {
1801 cycles(&opts->gen, 7);//T States: 4,3 1788 cycles(&opts->gen, num_cycles + 3);//T States: 4,3
1802 uint8_t cond = CC_Z; 1789 uint8_t cond = CC_Z;
1803 switch (inst->reg) 1790 switch (inst->reg)
1804 { 1791 {
1805 case Z80_CC_NZ: 1792 case Z80_CC_NZ:
1806 cond = CC_NZ; 1793 cond = CC_NZ;
1836 jmp(code, call_dst); 1823 jmp(code, call_dst);
1837 *no_jump_off = code->cur - (no_jump_off+1); 1824 *no_jump_off = code->cur - (no_jump_off+1);
1838 break; 1825 break;
1839 } 1826 }
1840 case Z80_JR: { 1827 case Z80_JR: {
1841 cycles(&opts->gen, 12);//T States: 4,3,5 1828 cycles(&opts->gen, num_cycles + 8);//T States: 4,3,5
1842 uint16_t dest_addr = address + inst->immed + 2; 1829 uint16_t dest_addr = address + inst->immed + 2;
1843 code_ptr call_dst = z80_get_native_address(context, dest_addr); 1830 code_ptr call_dst = z80_get_native_address(context, dest_addr);
1844 if (!call_dst) { 1831 if (!call_dst) {
1845 opts->gen.deferred = defer_address(opts->gen.deferred, dest_addr, code->cur + 1); 1832 opts->gen.deferred = defer_address(opts->gen.deferred, dest_addr, code->cur + 1);
1846 //fake address to force large displacement 1833 //fake address to force large displacement
1848 } 1835 }
1849 jmp(code, call_dst); 1836 jmp(code, call_dst);
1850 break; 1837 break;
1851 } 1838 }
1852 case Z80_JRCC: { 1839 case Z80_JRCC: {
1853 cycles(&opts->gen, 7);//T States: 4,3 1840 cycles(&opts->gen, num_cycles + 3);//T States: 4,3
1854 uint8_t cond = CC_Z; 1841 uint8_t cond = CC_Z;
1855 switch (inst->reg) 1842 switch (inst->reg)
1856 { 1843 {
1857 case Z80_CC_NZ: 1844 case Z80_CC_NZ:
1858 cond = CC_NZ; 1845 cond = CC_NZ;
1878 jmp(code, call_dst); 1865 jmp(code, call_dst);
1879 *no_jump_off = code->cur - (no_jump_off+1); 1866 *no_jump_off = code->cur - (no_jump_off+1);
1880 break; 1867 break;
1881 } 1868 }
1882 case Z80_DJNZ: { 1869 case Z80_DJNZ: {
1883 cycles(&opts->gen, 8);//T States: 5,3 1870 cycles(&opts->gen, num_cycles + 4);//T States: 5,3
1884 if (opts->regs[Z80_B] >= 0) { 1871 if (opts->regs[Z80_B] >= 0) {
1885 sub_ir(code, 1, opts->regs[Z80_B], SZ_B); 1872 sub_ir(code, 1, opts->regs[Z80_B], SZ_B);
1886 } else { 1873 } else {
1887 sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_B), SZ_B); 1874 sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_B), SZ_B);
1888 } 1875 }
1899 jmp(code, call_dst); 1886 jmp(code, call_dst);
1900 *no_jump_off = code->cur - (no_jump_off+1); 1887 *no_jump_off = code->cur - (no_jump_off+1);
1901 break; 1888 break;
1902 } 1889 }
1903 case Z80_CALL: { 1890 case Z80_CALL: {
1904 cycles(&opts->gen, 11);//T States: 4,3,4 1891 cycles(&opts->gen, num_cycles + 7);//T States: 4,3,4
1905 sub_ir(code, 2, opts->regs[Z80_SP], SZ_W); 1892 sub_ir(code, 2, opts->regs[Z80_SP], SZ_W);
1906 mov_ir(code, address + 3, opts->gen.scratch1, SZ_W); 1893 mov_ir(code, address + 3, opts->gen.scratch1, SZ_W);
1907 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); 1894 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W);
1908 call(code, opts->write_16_highfirst);//T States: 3, 3 1895 call(code, opts->write_16_highfirst);//T States: 3, 3
1909 code_ptr call_dst = z80_get_native_address(context, inst->immed); 1896 code_ptr call_dst = z80_get_native_address(context, inst->immed);
1914 } 1901 }
1915 jmp(code, call_dst); 1902 jmp(code, call_dst);
1916 break; 1903 break;
1917 } 1904 }
1918 case Z80_CALLCC: { 1905 case Z80_CALLCC: {
1919 cycles(&opts->gen, 10);//T States: 4,3,3 (false case) 1906 cycles(&opts->gen, num_cycles + 6);//T States: 4,3,3 (false case)
1920 uint8_t cond = CC_Z; 1907 uint8_t cond = CC_Z;
1921 switch (inst->reg) 1908 switch (inst->reg)
1922 { 1909 {
1923 case Z80_CC_NZ: 1910 case Z80_CC_NZ:
1924 cond = CC_NZ; 1911 cond = CC_NZ;
1957 jmp(code, call_dst); 1944 jmp(code, call_dst);
1958 *no_call_off = code->cur - (no_call_off+1); 1945 *no_call_off = code->cur - (no_call_off+1);
1959 break; 1946 break;
1960 } 1947 }
1961 case Z80_RET: 1948 case Z80_RET:
1962 cycles(&opts->gen, 4);//T States: 4 1949 cycles(&opts->gen, num_cycles);//T States: 4
1963 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); 1950 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W);
1964 call(code, opts->read_16);//T STates: 3, 3 1951 call(code, opts->read_16);//T STates: 3, 3
1965 add_ir(code, 2, opts->regs[Z80_SP], SZ_W); 1952 add_ir(code, 2, opts->regs[Z80_SP], SZ_W);
1966 call(code, opts->native_addr); 1953 call(code, opts->native_addr);
1967 jmp_r(code, opts->gen.scratch1); 1954 jmp_r(code, opts->gen.scratch1);
1968 break; 1955 break;
1969 case Z80_RETCC: { 1956 case Z80_RETCC: {
1970 cycles(&opts->gen, 5);//T States: 5 1957 cycles(&opts->gen, num_cycles + 1);//T States: 5
1971 uint8_t cond = CC_Z; 1958 uint8_t cond = CC_Z;
1972 switch (inst->reg) 1959 switch (inst->reg)
1973 { 1960 {
1974 case Z80_CC_NZ: 1961 case Z80_CC_NZ:
1975 cond = CC_NZ; 1962 cond = CC_NZ;
2002 *no_call_off = code->cur - (no_call_off+1); 1989 *no_call_off = code->cur - (no_call_off+1);
2003 break; 1990 break;
2004 } 1991 }
2005 case Z80_RETI: 1992 case Z80_RETI:
2006 //For some systems, this may need a callback for signalling interrupt routine completion 1993 //For some systems, this may need a callback for signalling interrupt routine completion
2007 cycles(&opts->gen, 8);//T States: 4, 4 1994 cycles(&opts->gen, num_cycles);//T States: 4, 4
2008 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); 1995 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W);
2009 call(code, opts->read_16);//T STates: 3, 3 1996 call(code, opts->read_16);//T STates: 3, 3
2010 add_ir(code, 2, opts->regs[Z80_SP], SZ_W); 1997 add_ir(code, 2, opts->regs[Z80_SP], SZ_W);
2011 call(code, opts->native_addr); 1998 call(code, opts->native_addr);
2012 jmp_r(code, opts->gen.scratch1); 1999 jmp_r(code, opts->gen.scratch1);
2013 break; 2000 break;
2014 case Z80_RETN: 2001 case Z80_RETN:
2015 cycles(&opts->gen, 8);//T States: 4, 4 2002 cycles(&opts->gen, num_cycles);//T States: 4, 4
2016 mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, iff2), opts->gen.scratch2, SZ_B); 2003 mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, iff2), opts->gen.scratch2, SZ_B);
2017 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); 2004 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W);
2018 mov_rrdisp(code, opts->gen.scratch2, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B); 2005 mov_rrdisp(code, opts->gen.scratch2, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B);
2019 call(code, opts->read_16);//T STates: 3, 3 2006 call(code, opts->read_16);//T STates: 3, 3
2020 add_ir(code, 2, opts->regs[Z80_SP], SZ_W); 2007 add_ir(code, 2, opts->regs[Z80_SP], SZ_W);
2021 call(code, opts->native_addr); 2008 call(code, opts->native_addr);
2022 jmp_r(code, opts->gen.scratch1); 2009 jmp_r(code, opts->gen.scratch1);
2023 break; 2010 break;
2024 case Z80_RST: { 2011 case Z80_RST: {
2025 //RST is basically CALL to an address in page 0 2012 //RST is basically CALL to an address in page 0
2026 cycles(&opts->gen, 5);//T States: 5 2013 cycles(&opts->gen, num_cycles + 1);//T States: 5
2027 sub_ir(code, 2, opts->regs[Z80_SP], SZ_W); 2014 sub_ir(code, 2, opts->regs[Z80_SP], SZ_W);
2028 mov_ir(code, address + 1, opts->gen.scratch1, SZ_W); 2015 mov_ir(code, address + 1, opts->gen.scratch1, SZ_W);
2029 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); 2016 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W);
2030 call(code, opts->write_16_highfirst);//T States: 3, 3 2017 call(code, opts->write_16_highfirst);//T States: 3, 3
2031 code_ptr call_dst = z80_get_native_address(context, inst->immed); 2018 code_ptr call_dst = z80_get_native_address(context, inst->immed);
2036 } 2023 }
2037 jmp(code, call_dst); 2024 jmp(code, call_dst);
2038 break; 2025 break;
2039 } 2026 }
2040 case Z80_IN: 2027 case Z80_IN:
2041 cycles(&opts->gen, inst->reg == inst->addr_mode == Z80_IMMED_INDIRECT ? 7 : 8);//T States: 4 3/4 2028 if (inst->addr_mode == Z80_IMMED_INDIRECT) {
2029 num_cycles += 3;
2030 }
2031 cycles(&opts->gen, num_cycles);//T States: 4 3/4
2042 if (inst->addr_mode == Z80_IMMED_INDIRECT) { 2032 if (inst->addr_mode == Z80_IMMED_INDIRECT) {
2043 mov_ir(code, inst->immed, opts->gen.scratch1, SZ_B); 2033 mov_ir(code, inst->immed, opts->gen.scratch1, SZ_B);
2044 } else { 2034 } else {
2045 mov_rr(code, opts->regs[Z80_C], opts->gen.scratch1, SZ_B); 2035 mov_rr(code, opts->regs[Z80_C], opts->gen.scratch1, SZ_B);
2046 } 2036 }
2066 /*case Z80_INI: 2056 /*case Z80_INI:
2067 case Z80_INIR: 2057 case Z80_INIR:
2068 case Z80_IND: 2058 case Z80_IND:
2069 case Z80_INDR:*/ 2059 case Z80_INDR:*/
2070 case Z80_OUT: 2060 case Z80_OUT:
2071 cycles(&opts->gen, inst->reg == Z80_A ? 7 : 8);//T States: 4 3/4 2061 if (inst->reg == Z80_A) {
2062 num_cycles += 3;
2063 }
2064 cycles(&opts->gen, num_cycles);//T States: 4 3/4
2072 if ((inst->addr_mode & 0x1F) == Z80_IMMED_INDIRECT) { 2065 if ((inst->addr_mode & 0x1F) == Z80_IMMED_INDIRECT) {
2073 mov_ir(code, inst->immed, opts->gen.scratch2, SZ_B); 2066 mov_ir(code, inst->immed, opts->gen.scratch2, SZ_B);
2074 } else { 2067 } else {
2075 zreg_to_native(opts, Z80_C, opts->gen.scratch2); 2068 zreg_to_native(opts, Z80_C, opts->gen.scratch2);
2076 mov_rr(code, opts->regs[Z80_C], opts->gen.scratch2, SZ_B); 2069 mov_rr(code, opts->regs[Z80_C], opts->gen.scratch2, SZ_B);
2085 } 2078 }
2086 call(code, opts->write_io); 2079 call(code, opts->write_io);
2087 z80_save_reg(inst, opts); 2080 z80_save_reg(inst, opts);
2088 break; 2081 break;
2089 case Z80_OUTI: 2082 case Z80_OUTI:
2090 cycles(&opts->gen, 9);//T States: 4, 5 2083 cycles(&opts->gen, num_cycles + 1);//T States: 4, 5
2091 //read from (HL) 2084 //read from (HL)
2092 zreg_to_native(opts, Z80_HL, opts->gen.scratch1); 2085 zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
2093 call(code, opts->read_8);//T states 3 2086 call(code, opts->read_8);//T states 3
2094 //undocumented N flag behavior 2087 //undocumented N flag behavior
2095 //flag set on bit 7 of value written 2088 //flag set on bit 7 of value written
2130 } 2123 }
2131 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); 2124 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
2132 break; 2125 break;
2133 case Z80_OTIR: { 2126 case Z80_OTIR: {
2134 code_ptr start = code->cur; 2127 code_ptr start = code->cur;
2135 cycles(&opts->gen, 9);//T States: 4, 5 2128 cycles(&opts->gen, num_cycles + 1);//T States: 4, 5
2136 //read from (HL) 2129 //read from (HL)
2137 zreg_to_native(opts, Z80_HL, opts->gen.scratch1); 2130 zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
2138 call(code, opts->read_8);//T states 3 2131 call(code, opts->read_8);//T states 3
2139 //undocumented N flag behavior 2132 //undocumented N flag behavior
2140 //flag set on bit 7 of value written 2133 //flag set on bit 7 of value written
2185 jmp(code, start); 2178 jmp(code, start);
2186 *done = code->cur - (done + 1); 2179 *done = code->cur - (done + 1);
2187 break; 2180 break;
2188 } 2181 }
2189 case Z80_OUTD: 2182 case Z80_OUTD:
2190 cycles(&opts->gen, 9);//T States: 4, 5 2183 cycles(&opts->gen, num_cycles + 1);//T States: 4, 5
2191 //read from (HL) 2184 //read from (HL)
2192 zreg_to_native(opts, Z80_HL, opts->gen.scratch1); 2185 zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
2193 call(code, opts->read_8);//T states 3 2186 call(code, opts->read_8);//T states 3
2194 //undocumented N flag behavior 2187 //undocumented N flag behavior
2195 //flag set on bit 7 of value written 2188 //flag set on bit 7 of value written
2230 } 2223 }
2231 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); 2224 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
2232 break; 2225 break;
2233 case Z80_OTDR: { 2226 case Z80_OTDR: {
2234 code_ptr start = code->cur; 2227 code_ptr start = code->cur;
2235 cycles(&opts->gen, 9);//T States: 4, 5 2228 cycles(&opts->gen, num_cycles + 1);//T States: 4, 5
2236 //read from (HL) 2229 //read from (HL)
2237 zreg_to_native(opts, Z80_HL, opts->gen.scratch1); 2230 zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
2238 call(code, opts->read_8);//T states 3 2231 call(code, opts->read_8);//T states 3
2239 //undocumented N flag behavior 2232 //undocumented N flag behavior
2240 //flag set on bit 7 of value written 2233 //flag set on bit 7 of value written
2333 check_alloc_code(code, 32); 2326 check_alloc_code(code, 32);
2334 code_info stub = {code->cur, NULL}; 2327 code_info stub = {code->cur, NULL};
2335 //TODO: make this play well with the breakpoint code 2328 //TODO: make this play well with the breakpoint code
2336 mov_ir(code, address, opts->gen.scratch1, SZ_W); 2329 mov_ir(code, address, opts->gen.scratch1, SZ_W);
2337 call(code, opts->read_8); 2330 call(code, opts->read_8);
2338 //normal opcode fetch is already factored into instruction timing 2331 //opcode fetch M-cycles have one extra T-state
2339 //back out the base 3 cycles from a read here 2332 cycles(&opts->gen, 1);
2340 //not quite perfect, but it will have to do for now 2333 //TODO: increment R
2341 cycles(&opts->gen, -3);
2342 check_cycles_int(&opts->gen, address); 2334 check_cycles_int(&opts->gen, address);
2343 call(code, opts->gen.save_context); 2335 call(code, opts->gen.save_context);
2344 mov_irdisp(code, address, opts->gen.context_reg, offsetof(z80_context, pc), SZ_W); 2336 mov_irdisp(code, address, opts->gen.context_reg, offsetof(z80_context, pc), SZ_W);
2345 push_r(code, opts->gen.context_reg); 2337 push_r(code, opts->gen.context_reg);
2346 call_args(code, (code_ptr)z80_interp_handler, 2, opts->gen.scratch1, opts->gen.context_reg); 2338 call_args(code, (code_ptr)z80_interp_handler, 2, opts->gen.scratch1, opts->gen.context_reg);