# HG changeset patch # User Michael Pavone # Date 1481786772 28800 # Node ID 4bc27caa6e2097eeb4a880b67a75fd8e35e072b7 # Parent 87114df913ec021c2b3f28d1ff012d611f3f3c21 Fix a subtle bug in interrupt handling introduced with the move to a single cycle register in the Z80 core. Fixes regression in Puyo Puyo 2 diff -r 87114df913ec -r 4bc27caa6e20 backend_x86.c --- a/backend_x86.c Wed Dec 14 20:20:34 2016 -0800 +++ b/backend_x86.c Wed Dec 14 23:26:12 2016 -0800 @@ -13,13 +13,16 @@ void check_cycles_int(cpu_options *opts, uint32_t address) { code_info *code = &opts->code; + uint8_t cc; if (opts->limit < 0) { - or_rr(code, opts->cycles, opts->cycles, SZ_D); + cmp_ir(code, 1, opts->cycles, SZ_D); + cc = CC_NS; } else { cmp_rr(code, opts->cycles, opts->limit, SZ_D); + cc = CC_A; } code_ptr jmp_off = code->cur+1; - jcc(code, CC_NS, jmp_off+1); + jcc(code, cc, jmp_off+1); mov_ir(code, address, opts->scratch1, SZ_D); call(code, opts->handle_cycle_limit_int); *jmp_off = code->cur - (jmp_off+1); @@ -28,14 +31,17 @@ void check_cycles(cpu_options * opts) { code_info *code = &opts->code; + uint8_t cc; if (opts->limit < 0) { - or_rr(code, opts->cycles, opts->cycles, SZ_D); + cmp_ir(code, 1, opts->cycles, SZ_D); + cc = CC_NS; } else { cmp_rr(code, opts->cycles, opts->limit, SZ_D); + cc = CC_A; } check_alloc_code(code, MAX_INST_LEN*2); code_ptr jmp_off = code->cur+1; - jcc(code, CC_NS, jmp_off+1); + jcc(code, cc, jmp_off+1); call(code, opts->handle_cycle_limit); *jmp_off = code->cur - (jmp_off+1); }