comparison m68k_core_x86.c @ 894:a7774fc2de4b

Partially working change to do proper stack alignment rather than doing a lame alignment check when calling a C compile dfunction. 68K core seems okay, but Z80 is busted.
author Michael Pavone <pavone@retrodev.com>
date Wed, 25 Nov 2015 08:40:45 -0800
parents fb4d09f874dd
children 6011409ded0d
comparison
equal deleted inserted replaced
893:4f46b4cd5035 894:a7774fc2de4b
1672 //TODO: cycle exact division 1672 //TODO: cycle exact division
1673 cycles(&opts->gen, inst->op == M68K_DIVS ? 158 : 140); 1673 cycles(&opts->gen, inst->op == M68K_DIVS ? 158 : 140);
1674 set_flag(opts, 0, FLAG_C); 1674 set_flag(opts, 0, FLAG_C);
1675 push_r(code, RDX); 1675 push_r(code, RDX);
1676 push_r(code, RAX); 1676 push_r(code, RAX);
1677 uint32_t tmp_stack_off = code->stack_off;
1677 if (dst_op->mode == MODE_REG_DIRECT) { 1678 if (dst_op->mode == MODE_REG_DIRECT) {
1678 mov_rr(code, dst_op->base, RAX, SZ_D); 1679 mov_rr(code, dst_op->base, RAX, SZ_D);
1679 } else { 1680 } else {
1680 mov_rdispr(code, dst_op->base, dst_op->disp, RAX, SZ_D); 1681 mov_rdispr(code, dst_op->base, dst_op->disp, RAX, SZ_D);
1681 } 1682 }
1715 pop_r(code, RAX); 1716 pop_r(code, RAX);
1716 pop_r(code, RDX); 1717 pop_r(code, RDX);
1717 mov_ir(code, VECTOR_INT_DIV_ZERO, opts->gen.scratch2, SZ_D); 1718 mov_ir(code, VECTOR_INT_DIV_ZERO, opts->gen.scratch2, SZ_D);
1718 mov_ir(code, inst->address+isize, opts->gen.scratch1, SZ_D); 1719 mov_ir(code, inst->address+isize, opts->gen.scratch1, SZ_D);
1719 jmp(code, opts->trap); 1720 jmp(code, opts->trap);
1721
1722 code->stack_off = tmp_stack_off;
1720 *not_zero = code->cur - (not_zero+1); 1723 *not_zero = code->cur - (not_zero+1);
1721 if (inst->op == M68K_DIVS) { 1724 if (inst->op == M68K_DIVS) {
1722 cdq(code); 1725 cdq(code);
1723 } else { 1726 } else {
1724 xor_rr(code, RDX, RDX, SZ_D); 1727 xor_rr(code, RDX, RDX, SZ_D);
1759 pop_r(code, RDX); 1762 pop_r(code, RDX);
1760 update_flags(opts, V0|Z|N); 1763 update_flags(opts, V0|Z|N);
1761 } 1764 }
1762 code_ptr end_off = code->cur + 1; 1765 code_ptr end_off = code->cur + 1;
1763 jmp(code, code->cur + 2); 1766 jmp(code, code->cur + 2);
1767 code->stack_off = tmp_stack_off;
1764 *norm_off = code->cur - (norm_off + 1); 1768 *norm_off = code->cur - (norm_off + 1);
1765 if (inst->op == M68K_DIVS) { 1769 if (inst->op == M68K_DIVS) {
1766 *skip_sec_check = code->cur - (skip_sec_check+1); 1770 *skip_sec_check = code->cur - (skip_sec_check+1);
1767 } 1771 }
1768 pop_r(code, RAX); 1772 pop_r(code, RAX);
2512 cmp_irdisp(code, 0, opts->gen.context_reg, offsetof(m68k_context, should_return), SZ_B); 2516 cmp_irdisp(code, 0, opts->gen.context_reg, offsetof(m68k_context, should_return), SZ_B);
2513 code_ptr do_ret = code->cur + 1; 2517 code_ptr do_ret = code->cur + 1;
2514 jcc(code, CC_NZ, do_ret); 2518 jcc(code, CC_NZ, do_ret);
2515 retn(code); 2519 retn(code);
2516 *do_ret = code->cur - (do_ret+1); 2520 *do_ret = code->cur - (do_ret+1);
2521 uint32_t tmp_stack_off = code->stack_off;
2522 //fetch return address and adjust RSP
2517 pop_r(code, opts->gen.scratch1); 2523 pop_r(code, opts->gen.scratch1);
2524 add_ir(code, 16-sizeof(void *), RSP, SZ_PTR);
2525 //save return address for restoring later
2518 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(m68k_context, resume_pc), SZ_PTR); 2526 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(m68k_context, resume_pc), SZ_PTR);
2519 retn(code); 2527 retn(code);
2528 code->stack_off = tmp_stack_off;
2520 *do_int = code->cur - (do_int+1); 2529 *do_int = code->cur - (do_int+1);
2521 //implement 1 instruction latency 2530 //implement 1 instruction latency
2522 cmp_irdisp(code, 0, opts->gen.context_reg, offsetof(m68k_context, int_pending), SZ_B); 2531 cmp_irdisp(code, 0, opts->gen.context_reg, offsetof(m68k_context, int_pending), SZ_B);
2523 do_int = code->cur + 1; 2532 do_int = code->cur + 1;
2524 jcc(code, CC_NZ, do_int); 2533 jcc(code, CC_NZ, do_int);
2591 add_ir(code, 0x60, opts->gen.scratch1, SZ_D); 2600 add_ir(code, 0x60, opts->gen.scratch1, SZ_D);
2592 call(code, opts->read_32); 2601 call(code, opts->read_32);
2593 call(code, opts->native_addr_and_sync); 2602 call(code, opts->native_addr_and_sync);
2594 //2 prefetch bus operations + 2 idle bus cycles 2603 //2 prefetch bus operations + 2 idle bus cycles
2595 cycles(&opts->gen, 10); 2604 cycles(&opts->gen, 10);
2605 tmp_stack_off = code->stack_off;
2596 //discard function return address 2606 //discard function return address
2597 pop_r(code, opts->gen.scratch2); 2607 pop_r(code, opts->gen.scratch2);
2608 add_ir(code, 16-sizeof(void *), RSP, SZ_PTR);
2598 jmp_r(code, opts->gen.scratch1); 2609 jmp_r(code, opts->gen.scratch1);
2610 code->stack_off = tmp_stack_off;
2599 2611
2600 opts->trap = code->cur; 2612 opts->trap = code->cur;
2601 push_r(code, opts->gen.scratch2); 2613 push_r(code, opts->gen.scratch2);
2602 //swap USP and SSP if not already in supervisor mode 2614 //swap USP and SSP if not already in supervisor mode
2603 check_user_mode_swap_ssp_usp(opts); 2615 check_user_mode_swap_ssp_usp(opts);