comparison z80_to_x86.c @ 803:236a184bf6f0

Merge
author Michael Pavone <pavone@retrodev.com>
date Sun, 26 Jul 2015 16:51:03 -0700
parents 724bbec47f86
children ab017fb09e77
comparison
equal deleted inserted replaced
802:6811f601008f 803:236a184bf6f0
5 */ 5 */
6 #include "z80inst.h" 6 #include "z80inst.h"
7 #include "z80_to_x86.h" 7 #include "z80_to_x86.h"
8 #include "gen_x86.h" 8 #include "gen_x86.h"
9 #include "mem.h" 9 #include "mem.h"
10 #include "util.h"
10 #include <stdio.h> 11 #include <stdio.h>
11 #include <stdlib.h> 12 #include <stdlib.h>
12 #include <stddef.h> 13 #include <stddef.h>
13 #include <string.h> 14 #include <string.h>
14 15
15 #define MODE_UNUSED (MODE_IMMED-1) 16 #define MODE_UNUSED (MODE_IMMED-1)
16
17 #define ZCYCLES RBP
18 #define ZLIMIT RDI
19 #define SCRATCH1 R13
20 #define SCRATCH2 R14
21 #define CONTEXT RSI
22 17
23 //#define DO_DEBUG_PRINT 18 //#define DO_DEBUG_PRINT
24 19
25 #ifdef DO_DEBUG_PRINT 20 #ifdef DO_DEBUG_PRINT
26 #define dprintf printf 21 #define dprintf printf
27 #else 22 #else
28 #define dprintf 23 #define dprintf
29 #endif 24 #endif
30 25
31 void z80_read_byte(); 26 uint32_t zbreakpoint_patch(z80_context * context, uint16_t address, code_ptr dst);
32 void z80_read_word(); 27 void z80_handle_deferred(z80_context * context);
33 void z80_write_byte();
34 void z80_write_word_highfirst();
35 void z80_write_word_lowfirst();
36 void z80_save_context();
37 void z80_native_addr();
38 void z80_do_sync();
39 void z80_handle_cycle_limit_int();
40 void z80_retrans_stub();
41 void z80_io_read();
42 void z80_io_write();
43 void z80_halt();
44 void z80_save_context();
45 void z80_load_context();
46 28
47 uint8_t z80_size(z80inst * inst) 29 uint8_t z80_size(z80inst * inst)
48 { 30 {
49 uint8_t reg = (inst->reg & 0x1F); 31 uint8_t reg = (inst->reg & 0x1F);
50 if (reg != Z80_UNUSED && reg != Z80_USE_IMMED) { 32 if (reg != Z80_UNUSED && reg != Z80_USE_IMMED) {
52 } 34 }
53 //TODO: Handle any necessary special cases 35 //TODO: Handle any necessary special cases
54 return SZ_B; 36 return SZ_B;
55 } 37 }
56 38
57 uint8_t * zcycles(uint8_t * dst, uint32_t num_cycles) 39 uint8_t zf_off(uint8_t flag)
58 { 40 {
59 return add_ir(dst, num_cycles, ZCYCLES, SZ_D); 41 return offsetof(z80_context, flags) + flag;
60 } 42 }
61 43
62 uint8_t * z80_check_cycles_int(uint8_t * dst, uint16_t address) 44 uint8_t zaf_off(uint8_t flag)
63 { 45 {
64 dst = cmp_rr(dst, ZCYCLES, ZLIMIT, SZ_D); 46 return offsetof(z80_context, alt_flags) + flag;
65 uint8_t * jmp_off = dst+1; 47 }
66 dst = jcc(dst, CC_NC, dst + 7); 48
67 dst = mov_ir(dst, address, SCRATCH1, SZ_W); 49 uint8_t zr_off(uint8_t reg)
68 dst = call(dst, (uint8_t *)z80_handle_cycle_limit_int); 50 {
69 *jmp_off = dst - (jmp_off+1); 51 if (reg > Z80_A) {
70 return dst; 52 reg = z80_low_reg(reg);
71 } 53 }
72 54 return offsetof(z80_context, regs) + reg;
73 uint8_t * translate_z80_reg(z80inst * inst, x86_ea * ea, uint8_t * dst, x86_z80_options * opts) 55 }
74 { 56
57 uint8_t zar_off(uint8_t reg)
58 {
59 if (reg > Z80_A) {
60 reg = z80_low_reg(reg);
61 }
62 return offsetof(z80_context, alt_regs) + reg;
63 }
64
65 void zreg_to_native(z80_options *opts, uint8_t reg, uint8_t native_reg)
66 {
67 if (opts->regs[reg] >= 0) {
68 mov_rr(&opts->gen.code, opts->regs[reg], native_reg, reg > Z80_A ? SZ_W : SZ_B);
69 } else {
70 mov_rdispr(&opts->gen.code, opts->gen.context_reg, zr_off(reg), native_reg, reg > Z80_A ? SZ_W : SZ_B);
71 }
72 }
73
74 void native_to_zreg(z80_options *opts, uint8_t native_reg, uint8_t reg)
75 {
76 if (opts->regs[reg] >= 0) {
77 mov_rr(&opts->gen.code, native_reg, opts->regs[reg], reg > Z80_A ? SZ_W : SZ_B);
78 } else {
79 mov_rrdisp(&opts->gen.code, native_reg, opts->gen.context_reg, zr_off(reg), reg > Z80_A ? SZ_W : SZ_B);
80 }
81 }
82
83 void translate_z80_reg(z80inst * inst, host_ea * ea, z80_options * opts)
84 {
85 code_info *code = &opts->gen.code;
75 if (inst->reg == Z80_USE_IMMED) { 86 if (inst->reg == Z80_USE_IMMED) {
76 ea->mode = MODE_IMMED; 87 ea->mode = MODE_IMMED;
77 ea->disp = inst->immed; 88 ea->disp = inst->immed;
78 } else if ((inst->reg & 0x1F) == Z80_UNUSED) { 89 } else if ((inst->reg & 0x1F) == Z80_UNUSED) {
79 ea->mode = MODE_UNUSED; 90 ea->mode = MODE_UNUSED;
80 } else { 91 } else {
81 ea->mode = MODE_REG_DIRECT; 92 ea->mode = MODE_REG_DIRECT;
82 if (inst->reg == Z80_IYH) { 93 if (inst->reg == Z80_IYH && opts->regs[Z80_IYL] >= 0) {
83 if ((inst->addr_mode & 0x1F) == Z80_REG && inst->ea_reg == Z80_IYL) { 94 if ((inst->addr_mode & 0x1F) == Z80_REG && inst->ea_reg == Z80_IYL) {
84 dst = mov_rr(dst, opts->regs[Z80_IY], SCRATCH1, SZ_W); 95 mov_rr(code, opts->regs[Z80_IY], opts->gen.scratch1, SZ_W);
85 dst = ror_ir(dst, 8, SCRATCH1, SZ_W); 96 ror_ir(code, 8, opts->gen.scratch1, SZ_W);
86 ea->base = SCRATCH1; 97 ea->base = opts->gen.scratch1;
87 } else { 98 } else {
88 ea->base = opts->regs[Z80_IYL]; 99 ea->base = opts->regs[Z80_IYL];
89 dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W); 100 ror_ir(code, 8, opts->regs[Z80_IY], SZ_W);
90 } 101 }
91 } else if(opts->regs[inst->reg] >= 0) { 102 } else if(opts->regs[inst->reg] >= 0) {
92 ea->base = opts->regs[inst->reg]; 103 ea->base = opts->regs[inst->reg];
93 if (ea->base >= AH && ea->base <= BH) { 104 if (ea->base >= AH && ea->base <= BH) {
94 if ((inst->addr_mode & 0x1F) == Z80_REG) { 105 if ((inst->addr_mode & 0x1F) == Z80_REG) {
95 uint8_t other_reg = opts->regs[inst->ea_reg]; 106 uint8_t other_reg = opts->regs[inst->ea_reg];
96 if (other_reg >= R8 || (other_reg >= RSP && other_reg <= RDI)) { 107 if (other_reg >= R8 || (other_reg >= RSP && other_reg <= RDI)) {
97 //we can't mix an *H reg with a register that requires the REX prefix 108 //we can't mix an *H reg with a register that requires the REX prefix
98 ea->base = opts->regs[z80_low_reg(inst->reg)]; 109 ea->base = opts->regs[z80_low_reg(inst->reg)];
99 dst = ror_ir(dst, 8, ea->base, SZ_W); 110 ror_ir(code, 8, ea->base, SZ_W);
100 } 111 }
101 } else if((inst->addr_mode & 0x1F) != Z80_UNUSED && (inst->addr_mode & 0x1F) != Z80_IMMED) { 112 } else if((inst->addr_mode & 0x1F) != Z80_UNUSED && (inst->addr_mode & 0x1F) != Z80_IMMED) {
102 //temp regs require REX prefix too 113 //temp regs require REX prefix too
103 ea->base = opts->regs[z80_low_reg(inst->reg)]; 114 ea->base = opts->regs[z80_low_reg(inst->reg)];
104 dst = ror_ir(dst, 8, ea->base, SZ_W); 115 ror_ir(code, 8, ea->base, SZ_W);
105 } 116 }
106 } 117 }
107 } else { 118 } else {
108 ea->mode = MODE_REG_DISPLACE8; 119 ea->mode = MODE_REG_DISPLACE8;
109 ea->base = CONTEXT; 120 ea->base = opts->gen.context_reg;
110 ea->disp = offsetof(z80_context, regs) + inst->reg; 121 ea->disp = zr_off(inst->reg);
111 } 122 }
112 } 123 }
113 return dst; 124 }
114 } 125
115 126 void z80_save_reg(z80inst * inst, z80_options * opts)
116 uint8_t * z80_save_reg(uint8_t * dst, z80inst * inst, x86_z80_options * opts) 127 {
117 { 128 code_info *code = &opts->gen.code;
118 if (inst->reg == Z80_IYH) { 129 if (inst->reg == Z80_USE_IMMED || inst->reg == Z80_UNUSED) {
130 return;
131 }
132 if (inst->reg == Z80_IYH && opts->regs[Z80_IYL] >= 0) {
119 if ((inst->addr_mode & 0x1F) == Z80_REG && inst->ea_reg == Z80_IYL) { 133 if ((inst->addr_mode & 0x1F) == Z80_REG && inst->ea_reg == Z80_IYL) {
120 dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W); 134 ror_ir(code, 8, opts->regs[Z80_IY], SZ_W);
121 dst = mov_rr(dst, SCRATCH1, opts->regs[Z80_IYL], SZ_B); 135 mov_rr(code, opts->gen.scratch1, opts->regs[Z80_IYL], SZ_B);
122 dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W); 136 ror_ir(code, 8, opts->regs[Z80_IY], SZ_W);
123 } else { 137 } else {
124 dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W); 138 ror_ir(code, 8, opts->regs[Z80_IY], SZ_W);
125 } 139 }
126 } else if (opts->regs[inst->reg] >= AH && opts->regs[inst->reg] <= BH) { 140 } else if (opts->regs[inst->reg] >= AH && opts->regs[inst->reg] <= BH) {
127 if ((inst->addr_mode & 0x1F) == Z80_REG) { 141 if ((inst->addr_mode & 0x1F) == Z80_REG) {
128 uint8_t other_reg = opts->regs[inst->ea_reg]; 142 uint8_t other_reg = opts->regs[inst->ea_reg];
129 if (other_reg >= R8 || (other_reg >= RSP && other_reg <= RDI)) { 143 if (other_reg >= R8 || (other_reg >= RSP && other_reg <= RDI)) {
130 //we can't mix an *H reg with a register that requires the REX prefix 144 //we can't mix an *H reg with a register that requires the REX prefix
131 dst = ror_ir(dst, 8, opts->regs[z80_low_reg(inst->reg)], SZ_W); 145 ror_ir(code, 8, opts->regs[z80_low_reg(inst->reg)], SZ_W);
132 } 146 }
133 } else if((inst->addr_mode & 0x1F) != Z80_UNUSED && (inst->addr_mode & 0x1F) != Z80_IMMED) { 147 } else if((inst->addr_mode & 0x1F) != Z80_UNUSED && (inst->addr_mode & 0x1F) != Z80_IMMED) {
134 //temp regs require REX prefix too 148 //temp regs require REX prefix too
135 dst = ror_ir(dst, 8, opts->regs[z80_low_reg(inst->reg)], SZ_W); 149 ror_ir(code, 8, opts->regs[z80_low_reg(inst->reg)], SZ_W);
136 } 150 }
137 } 151 }
138 return dst; 152 }
139 } 153
140 154 void translate_z80_ea(z80inst * inst, host_ea * ea, z80_options * opts, uint8_t read, uint8_t modify)
141 uint8_t * translate_z80_ea(z80inst * inst, x86_ea * ea, uint8_t * dst, x86_z80_options * opts, uint8_t read, uint8_t modify) 155 {
142 { 156 code_info *code = &opts->gen.code;
143 uint8_t size, reg, areg; 157 uint8_t size, areg;
158 int8_t reg;
144 ea->mode = MODE_REG_DIRECT; 159 ea->mode = MODE_REG_DIRECT;
145 areg = read ? SCRATCH1 : SCRATCH2; 160 areg = read ? opts->gen.scratch1 : opts->gen.scratch2;
146 switch(inst->addr_mode & 0x1F) 161 switch(inst->addr_mode & 0x1F)
147 { 162 {
148 case Z80_REG: 163 case Z80_REG:
149 if (inst->ea_reg == Z80_IYH) { 164 if (inst->ea_reg == Z80_IYH && opts->regs[Z80_IYL] >= 0) {
150 if (inst->reg == Z80_IYL) { 165 if (inst->reg == Z80_IYL) {
151 dst = mov_rr(dst, opts->regs[Z80_IY], SCRATCH1, SZ_W); 166 mov_rr(code, opts->regs[Z80_IY], opts->gen.scratch1, SZ_W);
152 dst = ror_ir(dst, 8, SCRATCH1, SZ_W); 167 ror_ir(code, 8, opts->gen.scratch1, SZ_W);
153 ea->base = SCRATCH1; 168 ea->base = opts->gen.scratch1;
154 } else { 169 } else {
155 ea->base = opts->regs[Z80_IYL]; 170 ea->base = opts->regs[Z80_IYL];
156 dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W); 171 ror_ir(code, 8, opts->regs[Z80_IY], SZ_W);
157 } 172 }
158 } else { 173 } else if(opts->regs[inst->ea_reg] >= 0) {
159 ea->base = opts->regs[inst->ea_reg]; 174 ea->base = opts->regs[inst->ea_reg];
160 if (ea->base >= AH && ea->base <= BH && inst->reg != Z80_UNUSED && inst->reg != Z80_USE_IMMED) { 175 if (ea->base >= AH && ea->base <= BH && inst->reg != Z80_UNUSED && inst->reg != Z80_USE_IMMED) {
161 uint8_t other_reg = opts->regs[inst->reg]; 176 uint8_t other_reg = opts->regs[inst->reg];
177 #ifdef X86_64
162 if (other_reg >= R8 || (other_reg >= RSP && other_reg <= RDI)) { 178 if (other_reg >= R8 || (other_reg >= RSP && other_reg <= RDI)) {
163 //we can't mix an *H reg with a register that requires the REX prefix 179 //we can't mix an *H reg with a register that requires the REX prefix
164 ea->base = opts->regs[z80_low_reg(inst->ea_reg)]; 180 ea->base = opts->regs[z80_low_reg(inst->ea_reg)];
165 dst = ror_ir(dst, 8, ea->base, SZ_W); 181 ror_ir(code, 8, ea->base, SZ_W);
166 } 182 }
167 } 183 #endif
184 }
185 } else {
186 ea->mode = MODE_REG_DISPLACE8;
187 ea->base = opts->gen.context_reg;
188 ea->disp = zr_off(inst->ea_reg);
168 } 189 }
169 break; 190 break;
170 case Z80_REG_INDIRECT: 191 case Z80_REG_INDIRECT:
171 dst = mov_rr(dst, opts->regs[inst->ea_reg], areg, SZ_W); 192 zreg_to_native(opts, inst->ea_reg, areg);
172 size = z80_size(inst); 193 size = z80_size(inst);
173 if (read) { 194 if (read) {
174 if (modify) { 195 if (modify) {
175 //dst = push_r(dst, SCRATCH1); 196 //push_r(code, opts->gen.scratch1);
176 dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, offsetof(z80_context, scratch1), SZ_W); 197 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(z80_context, scratch1), SZ_W);
177 } 198 }
178 if (size == SZ_B) { 199 if (size == SZ_B) {
179 dst = call(dst, (uint8_t *)z80_read_byte); 200 call(code, opts->read_8);
180 } else { 201 } else {
181 dst = call(dst, (uint8_t *)z80_read_word); 202 call(code, opts->read_16);
182 } 203 }
183 if (modify) { 204 if (modify) {
184 //dst = pop_r(dst, SCRATCH2); 205 //pop_r(code, opts->gen.scratch2);
185 dst = mov_rdisp8r(dst, CONTEXT, offsetof(z80_context, scratch1), SCRATCH2, SZ_W); 206 mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, scratch1), opts->gen.scratch2, SZ_W);
186 } 207 }
187 } 208 }
188 ea->base = SCRATCH1; 209 ea->base = opts->gen.scratch1;
189 break; 210 break;
190 case Z80_IMMED: 211 case Z80_IMMED:
191 ea->mode = MODE_IMMED; 212 ea->mode = MODE_IMMED;
192 ea->disp = inst->immed; 213 ea->disp = inst->immed;
193 break; 214 break;
194 case Z80_IMMED_INDIRECT: 215 case Z80_IMMED_INDIRECT:
195 dst = mov_ir(dst, inst->immed, areg, SZ_W); 216 mov_ir(code, inst->immed, areg, SZ_W);
196 size = z80_size(inst); 217 size = z80_size(inst);
197 if (read) { 218 if (read) {
198 /*if (modify) { 219 /*if (modify) {
199 dst = push_r(dst, SCRATCH1); 220 push_r(code, opts->gen.scratch1);
200 }*/ 221 }*/
201 if (size == SZ_B) { 222 if (size == SZ_B) {
202 dst = call(dst, (uint8_t *)z80_read_byte); 223 call(code, opts->read_8);
203 } else { 224 } else {
204 dst = call(dst, (uint8_t *)z80_read_word); 225 call(code, opts->read_16);
205 } 226 }
206 if (modify) { 227 if (modify) {
207 //dst = pop_r(dst, SCRATCH2); 228 //pop_r(code, opts->gen.scratch2);
208 dst = mov_ir(dst, inst->immed, SCRATCH2, SZ_W); 229 mov_ir(code, inst->immed, opts->gen.scratch2, SZ_W);
209 } 230 }
210 } 231 }
211 ea->base = SCRATCH1; 232 ea->base = opts->gen.scratch1;
212 break; 233 break;
213 case Z80_IX_DISPLACE: 234 case Z80_IX_DISPLACE:
214 case Z80_IY_DISPLACE: 235 case Z80_IY_DISPLACE:
215 reg = opts->regs[(inst->addr_mode & 0x1F) == Z80_IX_DISPLACE ? Z80_IX : Z80_IY]; 236 zreg_to_native(opts, (inst->addr_mode & 0x1F) == Z80_IX_DISPLACE ? Z80_IX : Z80_IY, areg);
216 dst = mov_rr(dst, reg, areg, SZ_W); 237 add_ir(code, inst->ea_reg & 0x80 ? inst->ea_reg - 256 : inst->ea_reg, areg, SZ_W);
217 dst = add_ir(dst, inst->ea_reg & 0x80 ? inst->ea_reg - 256 : inst->ea_reg, areg, SZ_W);
218 size = z80_size(inst); 238 size = z80_size(inst);
219 if (read) { 239 if (read) {
220 if (modify) { 240 if (modify) {
221 //dst = push_r(dst, SCRATCH1); 241 //push_r(code, opts->gen.scratch1);
222 dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, offsetof(z80_context, scratch1), SZ_W); 242 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(z80_context, scratch1), SZ_W);
223 } 243 }
224 if (size == SZ_B) { 244 if (size == SZ_B) {
225 dst = call(dst, (uint8_t *)z80_read_byte); 245 call(code, opts->read_8);
226 } else { 246 } else {
227 dst = call(dst, (uint8_t *)z80_read_word); 247 call(code, opts->read_16);
228 } 248 }
229 if (modify) { 249 if (modify) {
230 //dst = pop_r(dst, SCRATCH2); 250 //pop_r(code, opts->gen.scratch2);
231 dst = mov_rdisp8r(dst, CONTEXT, offsetof(z80_context, scratch1), SCRATCH2, SZ_W); 251 mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, scratch1), opts->gen.scratch2, SZ_W);
232 } 252 }
233 } 253 }
234 ea->base = SCRATCH1; 254 ea->base = opts->gen.scratch1;
235 break; 255 break;
236 case Z80_UNUSED: 256 case Z80_UNUSED:
237 ea->mode = MODE_UNUSED; 257 ea->mode = MODE_UNUSED;
238 break; 258 break;
239 default: 259 default:
240 fprintf(stderr, "Unrecognized Z80 addressing mode %d\n", inst->addr_mode & 0x1F); 260 fatal_error("Unrecognized Z80 addressing mode %d\n", inst->addr_mode & 0x1F);
241 exit(1); 261 }
242 } 262 }
243 return dst; 263
244 } 264 void z80_save_ea(code_info *code, z80inst * inst, z80_options * opts)
245
246 uint8_t * z80_save_ea(uint8_t * dst, z80inst * inst, x86_z80_options * opts)
247 { 265 {
248 if ((inst->addr_mode & 0x1F) == Z80_REG) { 266 if ((inst->addr_mode & 0x1F) == Z80_REG) {
249 if (inst->ea_reg == Z80_IYH) { 267 if (inst->ea_reg == Z80_IYH && opts->regs[Z80_IYL] >= 0) {
250 if (inst->reg == Z80_IYL) { 268 if (inst->reg == Z80_IYL) {
251 dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W); 269 ror_ir(code, 8, opts->regs[Z80_IY], SZ_W);
252 dst = mov_rr(dst, SCRATCH1, opts->regs[Z80_IYL], SZ_B); 270 mov_rr(code, opts->gen.scratch1, opts->regs[Z80_IYL], SZ_B);
253 dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W); 271 ror_ir(code, 8, opts->regs[Z80_IY], SZ_W);
254 } else { 272 } else {
255 dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W); 273 ror_ir(code, 8, opts->regs[Z80_IY], SZ_W);
256 } 274 }
257 } else if (inst->reg != Z80_UNUSED && inst->reg != Z80_USE_IMMED && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) { 275 } else if (inst->reg != Z80_UNUSED && inst->reg != Z80_USE_IMMED && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) {
258 uint8_t other_reg = opts->regs[inst->reg]; 276 uint8_t other_reg = opts->regs[inst->reg];
277 #ifdef X86_64
259 if (other_reg >= R8 || (other_reg >= RSP && other_reg <= RDI)) { 278 if (other_reg >= R8 || (other_reg >= RSP && other_reg <= RDI)) {
260 //we can't mix an *H reg with a register that requires the REX prefix 279 //we can't mix an *H reg with a register that requires the REX prefix
261 dst = ror_ir(dst, 8, opts->regs[z80_low_reg(inst->ea_reg)], SZ_W); 280 ror_ir(code, 8, opts->regs[z80_low_reg(inst->ea_reg)], SZ_W);
262 } 281 }
263 } 282 #endif
264 } 283 }
265 return dst; 284 }
266 } 285 }
267 286
268 uint8_t * z80_save_result(uint8_t * dst, z80inst * inst) 287 void z80_save_result(z80_options *opts, z80inst * inst)
269 { 288 {
270 switch(inst->addr_mode & 0x1f) 289 switch(inst->addr_mode & 0x1f)
271 { 290 {
272 case Z80_REG_INDIRECT: 291 case Z80_REG_INDIRECT:
273 case Z80_IMMED_INDIRECT: 292 case Z80_IMMED_INDIRECT:
274 case Z80_IX_DISPLACE: 293 case Z80_IX_DISPLACE:
275 case Z80_IY_DISPLACE: 294 case Z80_IY_DISPLACE:
276 if (z80_size(inst) == SZ_B) { 295 if (z80_size(inst) == SZ_B) {
277 dst = call(dst, (uint8_t *)z80_write_byte); 296 call(&opts->gen.code, opts->write_8);
278 } else { 297 } else {
279 dst = call(dst, (uint8_t *)z80_write_word_lowfirst); 298 call(&opts->gen.code, opts->write_16_lowfirst);
280 } 299 }
281 } 300 }
282 return dst;
283 } 301 }
284 302
285 enum { 303 enum {
286 DONT_READ=0, 304 DONT_READ=0,
287 READ 305 READ
289 307
290 enum { 308 enum {
291 DONT_MODIFY=0, 309 DONT_MODIFY=0,
292 MODIFY 310 MODIFY
293 }; 311 };
294
295 uint8_t zf_off(uint8_t flag)
296 {
297 return offsetof(z80_context, flags) + flag;
298 }
299
300 uint8_t zaf_off(uint8_t flag)
301 {
302 return offsetof(z80_context, alt_flags) + flag;
303 }
304
305 uint8_t zar_off(uint8_t reg)
306 {
307 return offsetof(z80_context, alt_regs) + reg;
308 }
309 312
310 void z80_print_regs_exit(z80_context * context) 313 void z80_print_regs_exit(z80_context * context)
311 { 314 {
312 printf("A: %X\nB: %X\nC: %X\nD: %X\nE: %X\nHL: %X\nIX: %X\nIY: %X\nSP: %X\n\nIM: %d, IFF1: %d, IFF2: %d\n", 315 printf("A: %X\nB: %X\nC: %X\nD: %X\nE: %X\nHL: %X\nIX: %X\nIY: %X\nSP: %X\n\nIM: %d, IFF1: %d, IFF2: %d\n",
313 context->regs[Z80_A], context->regs[Z80_B], context->regs[Z80_C], 316 context->regs[Z80_A], context->regs[Z80_B], context->regs[Z80_C],
324 (context->alt_regs[Z80_IXH] << 8) | context->alt_regs[Z80_IXL], 327 (context->alt_regs[Z80_IXH] << 8) | context->alt_regs[Z80_IXL],
325 (context->alt_regs[Z80_IYH] << 8) | context->alt_regs[Z80_IYL]); 328 (context->alt_regs[Z80_IYH] << 8) | context->alt_regs[Z80_IYL]);
326 exit(0); 329 exit(0);
327 } 330 }
328 331
329 uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context, uint16_t address) 332 void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, uint8_t interp)
330 { 333 {
331 uint32_t cycles; 334 uint32_t num_cycles;
332 x86_ea src_op, dst_op; 335 host_ea src_op, dst_op;
333 uint8_t size; 336 uint8_t size;
334 x86_z80_options *opts = context->options; 337 z80_options *opts = context->options;
335 uint8_t * start = dst; 338 uint8_t * start = opts->gen.code.cur;
336 dst = z80_check_cycles_int(dst, address); 339 code_info *code = &opts->gen.code;
340 if (!interp) {
341 check_cycles_int(&opts->gen, address);
342 if (context->breakpoint_flags[address / sizeof(uint8_t)] & (1 << (address % sizeof(uint8_t)))) {
343 zbreakpoint_patch(context, address, start);
344 }
345 #ifdef Z80_LOG_ADDRESS
346 log_address(&opts->gen, address, "Z80: %X @ %d\n");
347 #endif
348 }
337 switch(inst->op) 349 switch(inst->op)
338 { 350 {
339 case Z80_LD: 351 case Z80_LD:
340 size = z80_size(inst); 352 size = z80_size(inst);
341 switch (inst->addr_mode & 0x1F) 353 switch (inst->addr_mode & 0x1F)
342 { 354 {
343 case Z80_REG: 355 case Z80_REG:
344 case Z80_REG_INDIRECT: 356 case Z80_REG_INDIRECT:
345 cycles = size == SZ_B ? 4 : 6; 357 num_cycles = size == SZ_B ? 4 : 6;
346 if (inst->ea_reg == Z80_IX || inst->ea_reg == Z80_IY) { 358 if (inst->ea_reg == Z80_IX || inst->ea_reg == Z80_IY) {
347 cycles += 4; 359 num_cycles += 4;
348 } 360 }
349 if (inst->reg == Z80_I || inst->ea_reg == Z80_I) { 361 if (inst->reg == Z80_I || inst->ea_reg == Z80_I) {
350 cycles += 5; 362 num_cycles += 5;
351 } 363 }
352 break; 364 break;
353 case Z80_IMMED: 365 case Z80_IMMED:
354 cycles = size == SZ_B ? 7 : 10; 366 num_cycles = size == SZ_B ? 7 : 10;
355 break; 367 break;
356 case Z80_IMMED_INDIRECT: 368 case Z80_IMMED_INDIRECT:
357 cycles = 10; 369 num_cycles = 10;
358 break; 370 break;
359 case Z80_IX_DISPLACE: 371 case Z80_IX_DISPLACE:
360 case Z80_IY_DISPLACE: 372 case Z80_IY_DISPLACE:
361 cycles = 16; 373 num_cycles = 16;
362 break; 374 break;
363 } 375 }
364 if ((inst->reg >= Z80_IXL && inst->reg <= Z80_IYH) || inst->reg == Z80_IX || inst->reg == Z80_IY) { 376 if ((inst->reg >= Z80_IXL && inst->reg <= Z80_IYH) || inst->reg == Z80_IX || inst->reg == Z80_IY) {
365 cycles += 4; 377 num_cycles += 4;
366 } 378 }
367 dst = zcycles(dst, cycles); 379 cycles(&opts->gen, num_cycles);
368 if (inst->addr_mode & Z80_DIR) { 380 if (inst->addr_mode & Z80_DIR) {
369 dst = translate_z80_ea(inst, &dst_op, dst, opts, DONT_READ, MODIFY); 381 translate_z80_ea(inst, &dst_op, opts, DONT_READ, MODIFY);
370 dst = translate_z80_reg(inst, &src_op, dst, opts); 382 translate_z80_reg(inst, &src_op, opts);
371 } else { 383 } else {
372 dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); 384 translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY);
373 dst = translate_z80_reg(inst, &dst_op, dst, opts); 385 translate_z80_reg(inst, &dst_op, opts);
374 } 386 }
375 if (src_op.mode == MODE_REG_DIRECT) { 387 if (src_op.mode == MODE_REG_DIRECT) {
376 if(dst_op.mode == MODE_REG_DISPLACE8) { 388 if(dst_op.mode == MODE_REG_DISPLACE8) {
377 dst = mov_rrdisp8(dst, src_op.base, dst_op.base, dst_op.disp, size); 389 mov_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, size);
378 } else { 390 } else {
379 dst = mov_rr(dst, src_op.base, dst_op.base, size); 391 mov_rr(code, src_op.base, dst_op.base, size);
380 } 392 }
381 } else if(src_op.mode == MODE_IMMED) { 393 } else if(src_op.mode == MODE_IMMED) {
382 dst = mov_ir(dst, src_op.disp, dst_op.base, size); 394 if(dst_op.mode == MODE_REG_DISPLACE8) {
383 } else { 395 mov_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, size);
384 dst = mov_rdisp8r(dst, src_op.base, src_op.disp, dst_op.base, size); 396 } else {
385 } 397 mov_ir(code, src_op.disp, dst_op.base, size);
386 dst = z80_save_reg(dst, inst, opts); 398 }
387 dst = z80_save_ea(dst, inst, opts); 399 } else {
400 if(dst_op.mode == MODE_REG_DISPLACE8) {
401 mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, size);
402 mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, size);
403 } else {
404 mov_rdispr(code, src_op.base, src_op.disp, dst_op.base, size);
405 }
406 }
407 if (inst->ea_reg == Z80_I && inst->addr_mode == Z80_REG) {
408 //ld a, i sets some flags
409 //TODO: Implement half-carry flag
410 cmp_ir(code, 0, dst_op.base, SZ_B);
411 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
412 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
413 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);;
414 mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, iff2), opts->gen.scratch1, SZ_B);
415 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zf_off(ZF_PV), SZ_B);
416 }
417 z80_save_reg(inst, opts);
418 z80_save_ea(code, inst, opts);
388 if (inst->addr_mode & Z80_DIR) { 419 if (inst->addr_mode & Z80_DIR) {
389 dst = z80_save_result(dst, inst); 420 z80_save_result(opts, inst);
390 } 421 }
391 break; 422 break;
392 case Z80_PUSH: 423 case Z80_PUSH:
393 dst = zcycles(dst, (inst->reg == Z80_IX || inst->reg == Z80_IY) ? 9 : 5); 424 cycles(&opts->gen, (inst->reg == Z80_IX || inst->reg == Z80_IY) ? 9 : 5);
394 dst = sub_ir(dst, 2, opts->regs[Z80_SP], SZ_W); 425 sub_ir(code, 2, opts->regs[Z80_SP], SZ_W);
395 if (inst->reg == Z80_AF) { 426 if (inst->reg == Z80_AF) {
396 dst = mov_rr(dst, opts->regs[Z80_A], SCRATCH1, SZ_B); 427 zreg_to_native(opts, Z80_A, opts->gen.scratch1);
397 dst = shl_ir(dst, 8, SCRATCH1, SZ_W); 428 shl_ir(code, 8, opts->gen.scratch1, SZ_W);
398 dst = mov_rdisp8r(dst, CONTEXT, zf_off(ZF_S), SCRATCH1, SZ_B); 429 mov_rdispr(code, opts->gen.context_reg, zf_off(ZF_S), opts->gen.scratch1, SZ_B);
399 dst = shl_ir(dst, 1, SCRATCH1, SZ_B); 430 shl_ir(code, 1, opts->gen.scratch1, SZ_B);
400 dst = or_rdisp8r(dst, CONTEXT, zf_off(ZF_Z), SCRATCH1, SZ_B); 431 or_rdispr(code, opts->gen.context_reg, zf_off(ZF_Z), opts->gen.scratch1, SZ_B);
401 dst = shl_ir(dst, 2, SCRATCH1, SZ_B); 432 shl_ir(code, 2, opts->gen.scratch1, SZ_B);
402 dst = or_rdisp8r(dst, CONTEXT, zf_off(ZF_H), SCRATCH1, SZ_B); 433 or_rdispr(code, opts->gen.context_reg, zf_off(ZF_H), opts->gen.scratch1, SZ_B);
403 dst = shl_ir(dst, 2, SCRATCH1, SZ_B); 434 shl_ir(code, 2, opts->gen.scratch1, SZ_B);
404 dst = or_rdisp8r(dst, CONTEXT, zf_off(ZF_PV), SCRATCH1, SZ_B); 435 or_rdispr(code, opts->gen.context_reg, zf_off(ZF_PV), opts->gen.scratch1, SZ_B);
405 dst = shl_ir(dst, 1, SCRATCH1, SZ_B); 436 shl_ir(code, 1, opts->gen.scratch1, SZ_B);
406 dst = or_rdisp8r(dst, CONTEXT, zf_off(ZF_N), SCRATCH1, SZ_B); 437 or_rdispr(code, opts->gen.context_reg, zf_off(ZF_N), opts->gen.scratch1, SZ_B);
407 dst = shl_ir(dst, 1, SCRATCH1, SZ_B); 438 shl_ir(code, 1, opts->gen.scratch1, SZ_B);
408 dst = or_rdisp8r(dst, CONTEXT, zf_off(ZF_C), SCRATCH1, SZ_B); 439 or_rdispr(code, opts->gen.context_reg, zf_off(ZF_C), opts->gen.scratch1, SZ_B);
409 } else { 440 } else {
410 dst = translate_z80_reg(inst, &src_op, dst, opts); 441 zreg_to_native(opts, inst->reg, opts->gen.scratch1);
411 dst = mov_rr(dst, src_op.base, SCRATCH1, SZ_W); 442 }
412 } 443 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W);
413 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH2, SZ_W); 444 call(code, opts->write_16_highfirst);
414 dst = call(dst, (uint8_t *)z80_write_word_highfirst);
415 //no call to save_z80_reg needed since there's no chance we'll use the only 445 //no call to save_z80_reg needed since there's no chance we'll use the only
416 //the upper half of a register pair 446 //the upper half of a register pair
417 break; 447 break;
418 case Z80_POP: 448 case Z80_POP:
419 dst = zcycles(dst, (inst->reg == Z80_IX || inst->reg == Z80_IY) ? 8 : 4); 449 cycles(&opts->gen, (inst->reg == Z80_IX || inst->reg == Z80_IY) ? 8 : 4);
420 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); 450 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W);
421 dst = call(dst, (uint8_t *)z80_read_word); 451 call(code, opts->read_16);
422 dst = add_ir(dst, 2, opts->regs[Z80_SP], SZ_W); 452 add_ir(code, 2, opts->regs[Z80_SP], SZ_W);
423 if (inst->reg == Z80_AF) { 453 if (inst->reg == Z80_AF) {
424 454
425 dst = bt_ir(dst, 0, SCRATCH1, SZ_W); 455 bt_ir(code, 0, opts->gen.scratch1, SZ_W);
426 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); 456 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
427 dst = bt_ir(dst, 1, SCRATCH1, SZ_W); 457 bt_ir(code, 1, opts->gen.scratch1, SZ_W);
428 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_N)); 458 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_N));
429 dst = bt_ir(dst, 2, SCRATCH1, SZ_W); 459 bt_ir(code, 2, opts->gen.scratch1, SZ_W);
430 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_PV)); 460 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_PV));
431 dst = bt_ir(dst, 4, SCRATCH1, SZ_W); 461 bt_ir(code, 4, opts->gen.scratch1, SZ_W);
432 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_H)); 462 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_H));
433 dst = bt_ir(dst, 6, SCRATCH1, SZ_W); 463 bt_ir(code, 6, opts->gen.scratch1, SZ_W);
434 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_Z)); 464 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_Z));
435 dst = bt_ir(dst, 7, SCRATCH1, SZ_W); 465 bt_ir(code, 7, opts->gen.scratch1, SZ_W);
436 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_S)); 466 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_S));
437 dst = shr_ir(dst, 8, SCRATCH1, SZ_W); 467 shr_ir(code, 8, opts->gen.scratch1, SZ_W);
438 dst = mov_rr(dst, SCRATCH1, opts->regs[Z80_A], SZ_B); 468 native_to_zreg(opts, opts->gen.scratch1, Z80_A);
439 } else { 469 } else {
440 dst = translate_z80_reg(inst, &src_op, dst, opts); 470 native_to_zreg(opts, opts->gen.scratch1, inst->reg);
441 dst = mov_rr(dst, SCRATCH1, src_op.base, SZ_W);
442 } 471 }
443 //no call to save_z80_reg needed since there's no chance we'll use the only 472 //no call to save_z80_reg needed since there's no chance we'll use the only
444 //the upper half of a register pair 473 //the upper half of a register pair
445 break; 474 break;
446 case Z80_EX: 475 case Z80_EX:
447 if (inst->addr_mode == Z80_REG || inst->reg == Z80_HL) { 476 if (inst->addr_mode == Z80_REG || inst->reg == Z80_HL) {
448 cycles = 4; 477 num_cycles = 4;
449 } else { 478 } else {
450 cycles = 8; 479 num_cycles = 8;
451 } 480 }
452 dst = zcycles(dst, cycles); 481 cycles(&opts->gen, num_cycles);
453 if (inst->addr_mode == Z80_REG) { 482 if (inst->addr_mode == Z80_REG) {
454 if(inst->reg == Z80_AF) { 483 if(inst->reg == Z80_AF) {
455 dst = mov_rr(dst, opts->regs[Z80_A], SCRATCH1, SZ_B); 484 zreg_to_native(opts, Z80_A, opts->gen.scratch1);
456 dst = mov_rdisp8r(dst, CONTEXT, zar_off(Z80_A), opts->regs[Z80_A], SZ_B); 485 mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_A), opts->gen.scratch2, SZ_B);
457 dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, zar_off(Z80_A), SZ_B); 486 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_A), SZ_B);
487 native_to_zreg(opts, opts->gen.scratch2, Z80_A);
458 488
459 //Flags are currently word aligned, so we can move 489 //Flags are currently word aligned, so we can move
460 //them efficiently a word at a time 490 //them efficiently a word at a time
461 for (int f = ZF_C; f < ZF_NUM; f+=2) { 491 for (int f = ZF_C; f < ZF_NUM; f+=2) {
462 dst = mov_rdisp8r(dst, CONTEXT, zf_off(f), SCRATCH1, SZ_W); 492 mov_rdispr(code, opts->gen.context_reg, zf_off(f), opts->gen.scratch1, SZ_W);
463 dst = mov_rdisp8r(dst, CONTEXT, zaf_off(f), SCRATCH2, SZ_W); 493 mov_rdispr(code, opts->gen.context_reg, zaf_off(f), opts->gen.scratch2, SZ_W);
464 dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, zaf_off(f), SZ_W); 494 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zaf_off(f), SZ_W);
465 dst = mov_rrdisp8(dst, SCRATCH2, CONTEXT, zf_off(f), SZ_W); 495 mov_rrdisp(code, opts->gen.scratch2, opts->gen.context_reg, zf_off(f), SZ_W);
466 } 496 }
467 } else { 497 } else {
468 dst = xchg_rr(dst, opts->regs[Z80_DE], opts->regs[Z80_HL], SZ_W); 498 if (opts->regs[Z80_DE] >= 0 && opts->regs[Z80_HL] >= 0) {
469 } 499 xchg_rr(code, opts->regs[Z80_DE], opts->regs[Z80_HL], SZ_W);
470 } else { 500 } else {
471 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); 501 zreg_to_native(opts, Z80_DE, opts->gen.scratch1);
472 dst = call(dst, (uint8_t *)z80_read_byte); 502 zreg_to_native(opts, Z80_HL, opts->gen.scratch2);
473 dst = xchg_rr(dst, opts->regs[inst->reg], SCRATCH1, SZ_B); 503 native_to_zreg(opts, opts->gen.scratch1, Z80_HL);
474 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH2, SZ_W); 504 native_to_zreg(opts, opts->gen.scratch2, Z80_DE);
475 dst = call(dst, (uint8_t *)z80_write_byte); 505 }
476 dst = zcycles(dst, 1); 506 }
507 } else {
508 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W);
509 call(code, opts->read_8);
510 if (opts->regs[inst->reg] >= 0) {
511 xchg_rr(code, opts->regs[inst->reg], opts->gen.scratch1, SZ_B);
512 } else {
513 zreg_to_native(opts, inst->reg, opts->gen.scratch2);
514 xchg_rr(code, opts->gen.scratch1, opts->gen.scratch2, SZ_B);
515 native_to_zreg(opts, opts->gen.scratch2, inst->reg);
516 }
517 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W);
518 call(code, opts->write_8);
519 cycles(&opts->gen, 1);
477 uint8_t high_reg = z80_high_reg(inst->reg); 520 uint8_t high_reg = z80_high_reg(inst->reg);
478 uint8_t use_reg; 521 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W);
479 //even though some of the upper halves can be used directly 522 add_ir(code, 1, opts->gen.scratch1, SZ_W);
480 //the limitations on mixing *H regs with the REX prefix 523 call(code, opts->read_8);
481 //prevent us from taking advantage of it 524 if (opts->regs[inst->reg] >= 0) {
482 use_reg = opts->regs[inst->reg]; 525 //even though some of the upper halves can be used directly
483 dst = ror_ir(dst, 8, use_reg, SZ_W); 526 //the limitations on mixing *H regs with the REX prefix
484 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); 527 //prevent us from taking advantage of it
485 dst = add_ir(dst, 1, SCRATCH1, SZ_W); 528 uint8_t use_reg = opts->regs[inst->reg];
486 dst = call(dst, (uint8_t *)z80_read_byte); 529 ror_ir(code, 8, use_reg, SZ_W);
487 dst = xchg_rr(dst, use_reg, SCRATCH1, SZ_B); 530 xchg_rr(code, use_reg, opts->gen.scratch1, SZ_B);
488 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH2, SZ_W); 531 //restore reg to normal rotation
489 dst = add_ir(dst, 1, SCRATCH2, SZ_W); 532 ror_ir(code, 8, use_reg, SZ_W);
490 dst = call(dst, (uint8_t *)z80_write_byte); 533 } else {
491 //restore reg to normal rotation 534 zreg_to_native(opts, high_reg, opts->gen.scratch2);
492 dst = ror_ir(dst, 8, use_reg, SZ_W); 535 xchg_rr(code, opts->gen.scratch1, opts->gen.scratch2, SZ_B);
493 dst = zcycles(dst, 2); 536 native_to_zreg(opts, opts->gen.scratch2, high_reg);
537 }
538 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W);
539 add_ir(code, 1, opts->gen.scratch2, SZ_W);
540 call(code, opts->write_8);
541 cycles(&opts->gen, 2);
494 } 542 }
495 break; 543 break;
496 case Z80_EXX: 544 case Z80_EXX:
497 dst = zcycles(dst, 4); 545 cycles(&opts->gen, 4);
498 dst = mov_rr(dst, opts->regs[Z80_BC], SCRATCH1, SZ_W); 546 zreg_to_native(opts, Z80_BC, opts->gen.scratch1);
499 dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH2, SZ_W); 547 mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_BC), opts->gen.scratch2, SZ_W);
500 dst = mov_rdisp8r(dst, CONTEXT, zar_off(Z80_C), opts->regs[Z80_BC], SZ_W); 548 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_BC), SZ_W);
501 dst = mov_rdisp8r(dst, CONTEXT, zar_off(Z80_L), opts->regs[Z80_HL], SZ_W); 549 native_to_zreg(opts, opts->gen.scratch2, Z80_BC);
502 dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, zar_off(Z80_C), SZ_W); 550
503 dst = mov_rrdisp8(dst, SCRATCH2, CONTEXT, zar_off(Z80_L), SZ_W); 551 zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
504 dst = mov_rr(dst, opts->regs[Z80_DE], SCRATCH1, SZ_W); 552 mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_HL), opts->gen.scratch2, SZ_W);
505 dst = mov_rdisp8r(dst, CONTEXT, zar_off(Z80_E), opts->regs[Z80_DE], SZ_W); 553 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_HL), SZ_W);
506 dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, zar_off(Z80_E), SZ_W); 554 native_to_zreg(opts, opts->gen.scratch2, Z80_HL);
555
556 zreg_to_native(opts, Z80_DE, opts->gen.scratch1);
557 mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_DE), opts->gen.scratch2, SZ_W);
558 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_DE), SZ_W);
559 native_to_zreg(opts, opts->gen.scratch2, Z80_DE);
507 break; 560 break;
508 case Z80_LDI: { 561 case Z80_LDI: {
509 dst = zcycles(dst, 8); 562 cycles(&opts->gen, 8);
510 dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH1, SZ_W); 563 zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
511 dst = call(dst, (uint8_t *)z80_read_byte); 564 call(code, opts->read_8);
512 dst = mov_rr(dst, opts->regs[Z80_DE], SCRATCH2, SZ_W); 565 zreg_to_native(opts, Z80_DE, opts->gen.scratch2);
513 dst = call(dst, (uint8_t *)z80_write_byte); 566 call(code, opts->write_8);
514 dst = zcycles(dst, 2); 567 cycles(&opts->gen, 2);
515 dst = add_ir(dst, 1, opts->regs[Z80_DE], SZ_W); 568 if (opts->regs[Z80_DE] >= 0) {
516 dst = add_ir(dst, 1, opts->regs[Z80_HL], SZ_W); 569 add_ir(code, 1, opts->regs[Z80_DE], SZ_W);
517 dst = sub_ir(dst, 1, opts->regs[Z80_BC], SZ_W); 570 } else {
571 add_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_DE), SZ_W);
572 }
573 if (opts->regs[Z80_HL] >= 0) {
574 add_ir(code, 1, opts->regs[Z80_HL], SZ_W);
575 } else {
576 add_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_HL), SZ_W);
577 }
578 if (opts->regs[Z80_BC] >= 0) {
579 sub_ir(code, 1, opts->regs[Z80_BC], SZ_W);
580 } else {
581 sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_BC), SZ_W);
582 }
518 //TODO: Implement half-carry 583 //TODO: Implement half-carry
519 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); 584 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
520 dst = setcc_rdisp8(dst, CC_NZ, CONTEXT, zf_off(ZF_PV)); 585 setcc_rdisp(code, CC_NZ, opts->gen.context_reg, zf_off(ZF_PV));
521 break; 586 break;
522 } 587 }
523 case Z80_LDIR: { 588 case Z80_LDIR: {
524 dst = zcycles(dst, 8); 589 cycles(&opts->gen, 8);
525 dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH1, SZ_W); 590 zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
526 dst = call(dst, (uint8_t *)z80_read_byte); 591 call(code, opts->read_8);
527 dst = mov_rr(dst, opts->regs[Z80_DE], SCRATCH2, SZ_W); 592 zreg_to_native(opts, Z80_DE, opts->gen.scratch2);
528 dst = call(dst, (uint8_t *)z80_write_byte); 593 call(code, opts->write_8);
529 dst = add_ir(dst, 1, opts->regs[Z80_DE], SZ_W); 594 if (opts->regs[Z80_DE] >= 0) {
530 dst = add_ir(dst, 1, opts->regs[Z80_HL], SZ_W); 595 add_ir(code, 1, opts->regs[Z80_DE], SZ_W);
531 596 } else {
532 dst = sub_ir(dst, 1, opts->regs[Z80_BC], SZ_W); 597 add_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_DE), SZ_W);
533 uint8_t * cont = dst+1; 598 }
534 dst = jcc(dst, CC_Z, dst+2); 599 if (opts->regs[Z80_HL] >= 0) {
535 dst = zcycles(dst, 7); 600 add_ir(code, 1, opts->regs[Z80_HL], SZ_W);
601 } else {
602 add_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_HL), SZ_W);
603 }
604 if (opts->regs[Z80_BC] >= 0) {
605 sub_ir(code, 1, opts->regs[Z80_BC], SZ_W);
606 } else {
607 sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_BC), SZ_W);
608 }
609 uint8_t * cont = code->cur+1;
610 jcc(code, CC_Z, code->cur+2);
611 cycles(&opts->gen, 7);
536 //TODO: Figure out what the flag state should be here 612 //TODO: Figure out what the flag state should be here
537 //TODO: Figure out whether an interrupt can interrupt this 613 //TODO: Figure out whether an interrupt can interrupt this
538 dst = jmp(dst, start); 614 jmp(code, start);
539 *cont = dst - (cont + 1); 615 *cont = code->cur - (cont + 1);
540 dst = zcycles(dst, 2); 616 cycles(&opts->gen, 2);
541 //TODO: Implement half-carry 617 //TODO: Implement half-carry
542 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); 618 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
543 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_PV), SZ_B); 619 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B);
544 break; 620 break;
545 } 621 }
546 case Z80_LDD: { 622 case Z80_LDD: {
547 dst = zcycles(dst, 8); 623 cycles(&opts->gen, 8);
548 dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH1, SZ_W); 624 zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
549 dst = call(dst, (uint8_t *)z80_read_byte); 625 call(code, opts->read_8);
550 dst = mov_rr(dst, opts->regs[Z80_DE], SCRATCH2, SZ_W); 626 zreg_to_native(opts, Z80_DE, opts->gen.scratch2);
551 dst = call(dst, (uint8_t *)z80_write_byte); 627 call(code, opts->write_8);
552 dst = zcycles(dst, 2); 628 cycles(&opts->gen, 2);
553 dst = sub_ir(dst, 1, opts->regs[Z80_DE], SZ_W); 629 if (opts->regs[Z80_DE] >= 0) {
554 dst = sub_ir(dst, 1, opts->regs[Z80_HL], SZ_W); 630 sub_ir(code, 1, opts->regs[Z80_DE], SZ_W);
555 dst = sub_ir(dst, 1, opts->regs[Z80_BC], SZ_W); 631 } else {
632 sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_DE), SZ_W);
633 }
634 if (opts->regs[Z80_HL] >= 0) {
635 add_ir(code, 1, opts->regs[Z80_HL], SZ_W);
636 } else {
637 sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_HL), SZ_W);
638 }
639 if (opts->regs[Z80_BC] >= 0) {
640 sub_ir(code, 1, opts->regs[Z80_BC], SZ_W);
641 } else {
642 sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_BC), SZ_W);
643 }
556 //TODO: Implement half-carry 644 //TODO: Implement half-carry
557 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); 645 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
558 dst = setcc_rdisp8(dst, CC_NZ, CONTEXT, zf_off(ZF_PV)); 646 setcc_rdisp(code, CC_NZ, opts->gen.context_reg, zf_off(ZF_PV));
559 break; 647 break;
560 } 648 }
561 case Z80_LDDR: { 649 case Z80_LDDR: {
562 dst = zcycles(dst, 8); 650 cycles(&opts->gen, 8);
563 dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH1, SZ_W); 651 zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
564 dst = call(dst, (uint8_t *)z80_read_byte); 652 call(code, opts->read_8);
565 dst = mov_rr(dst, opts->regs[Z80_DE], SCRATCH2, SZ_W); 653 zreg_to_native(opts, Z80_DE, opts->gen.scratch2);
566 dst = call(dst, (uint8_t *)z80_write_byte); 654 call(code, opts->write_8);
567 dst = sub_ir(dst, 1, opts->regs[Z80_DE], SZ_W); 655 if (opts->regs[Z80_DE] >= 0) {
568 dst = sub_ir(dst, 1, opts->regs[Z80_HL], SZ_W); 656 sub_ir(code, 1, opts->regs[Z80_DE], SZ_W);
569 657 } else {
570 dst = sub_ir(dst, 1, opts->regs[Z80_BC], SZ_W); 658 sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_DE), SZ_W);
571 uint8_t * cont = dst+1; 659 }
572 dst = jcc(dst, CC_Z, dst+2); 660 if (opts->regs[Z80_HL] >= 0) {
573 dst = zcycles(dst, 7); 661 add_ir(code, 1, opts->regs[Z80_HL], SZ_W);
662 } else {
663 sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_HL), SZ_W);
664 }
665 if (opts->regs[Z80_BC] >= 0) {
666 sub_ir(code, 1, opts->regs[Z80_BC], SZ_W);
667 } else {
668 sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_BC), SZ_W);
669 }
670 uint8_t * cont = code->cur+1;
671 jcc(code, CC_Z, code->cur+2);
672 cycles(&opts->gen, 7);
574 //TODO: Figure out what the flag state should be here 673 //TODO: Figure out what the flag state should be here
575 //TODO: Figure out whether an interrupt can interrupt this 674 //TODO: Figure out whether an interrupt can interrupt this
576 dst = jmp(dst, start); 675 jmp(code, start);
577 *cont = dst - (cont + 1); 676 *cont = code->cur - (cont + 1);
578 dst = zcycles(dst, 2); 677 cycles(&opts->gen, 2);
579 //TODO: Implement half-carry 678 //TODO: Implement half-carry
580 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); 679 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
581 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_PV), SZ_B); 680 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B);
582 break; 681 break;
583 } 682 }
584 /*case Z80_CPI: 683 /*case Z80_CPI:
585 case Z80_CPIR: 684 case Z80_CPIR:
586 case Z80_CPD: 685 case Z80_CPD:
587 case Z80_CPDR: 686 case Z80_CPDR:
588 break;*/ 687 break;*/
589 case Z80_ADD: 688 case Z80_ADD:
590 cycles = 4; 689 num_cycles = 4;
591 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { 690 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
592 cycles += 12; 691 num_cycles += 12;
593 } else if(inst->addr_mode == Z80_IMMED) { 692 } else if(inst->addr_mode == Z80_IMMED) {
594 cycles += 3; 693 num_cycles += 3;
595 } else if(z80_size(inst) == SZ_W) { 694 } else if(z80_size(inst) == SZ_W) {
596 cycles += 4; 695 num_cycles += 4;
597 } 696 }
598 dst = zcycles(dst, cycles); 697 cycles(&opts->gen, num_cycles);
599 dst = translate_z80_reg(inst, &dst_op, dst, opts); 698 translate_z80_reg(inst, &dst_op, opts);
600 dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); 699 translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY);
601 if (src_op.mode == MODE_REG_DIRECT) { 700 if (dst_op.mode == MODE_REG_DIRECT) {
602 dst = add_rr(dst, src_op.base, dst_op.base, z80_size(inst)); 701 if (src_op.mode == MODE_REG_DIRECT) {
603 } else { 702 add_rr(code, src_op.base, dst_op.base, z80_size(inst));
604 dst = add_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); 703 } else if (src_op.mode == MODE_IMMED) {
605 } 704 add_ir(code, src_op.disp, dst_op.base, z80_size(inst));
606 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); 705 } else {
607 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); 706 add_rdispr(code, src_op.base, src_op.disp, dst_op.base, z80_size(inst));
707 }
708 } else {
709 if (src_op.mode == MODE_REG_DIRECT) {
710 add_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, z80_size(inst));
711 } else if (src_op.mode == MODE_IMMED) {
712 add_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, z80_size(inst));
713 } else {
714 mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, z80_size(inst));
715 add_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, z80_size(inst));
716 }
717 }
718 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
719 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
608 //TODO: Implement half-carry flag 720 //TODO: Implement half-carry flag
609 if (z80_size(inst) == SZ_B) { 721 if (z80_size(inst) == SZ_B) {
610 dst = setcc_rdisp8(dst, CC_O, CONTEXT, zf_off(ZF_PV)); 722 setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV));
611 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); 723 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
612 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); 724 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
613 } 725 }
614 dst = z80_save_reg(dst, inst, opts); 726 z80_save_reg(inst, opts);
615 dst = z80_save_ea(dst, inst, opts); 727 z80_save_ea(code, inst, opts);
616 break; 728 break;
617 case Z80_ADC: 729 case Z80_ADC:
618 cycles = 4; 730 num_cycles = 4;
619 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { 731 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
620 cycles += 12; 732 num_cycles += 12;
621 } else if(inst->addr_mode == Z80_IMMED) { 733 } else if(inst->addr_mode == Z80_IMMED) {
622 cycles += 3; 734 num_cycles += 3;
623 } else if(z80_size(inst) == SZ_W) { 735 } else if(z80_size(inst) == SZ_W) {
624 cycles += 4; 736 num_cycles += 4;
625 } 737 }
626 dst = zcycles(dst, cycles); 738 cycles(&opts->gen, num_cycles);
627 dst = translate_z80_reg(inst, &dst_op, dst, opts); 739 translate_z80_reg(inst, &dst_op, opts);
628 dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); 740 translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY);
629 dst = bt_irdisp8(dst, 0, CONTEXT, zf_off(ZF_C), SZ_B); 741 bt_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B);
742 if (dst_op.mode == MODE_REG_DIRECT) {
743 if (src_op.mode == MODE_REG_DIRECT) {
744 adc_rr(code, src_op.base, dst_op.base, z80_size(inst));
745 } else if (src_op.mode == MODE_IMMED) {
746 adc_ir(code, src_op.disp, dst_op.base, z80_size(inst));
747 } else {
748 adc_rdispr(code, src_op.base, src_op.disp, dst_op.base, z80_size(inst));
749 }
750 } else {
751 if (src_op.mode == MODE_REG_DIRECT) {
752 adc_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, z80_size(inst));
753 } else if (src_op.mode == MODE_IMMED) {
754 adc_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, z80_size(inst));
755 } else {
756 mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, z80_size(inst));
757 adc_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, z80_size(inst));
758 }
759 }
760 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
761 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
762 //TODO: Implement half-carry flag
763 setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV));
764 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
765 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
766 z80_save_reg(inst, opts);
767 z80_save_ea(code, inst, opts);
768 break;
769 case Z80_SUB:
770 num_cycles = 4;
771 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
772 num_cycles += 12;
773 } else if(inst->addr_mode == Z80_IMMED) {
774 num_cycles += 3;
775 }
776 cycles(&opts->gen, num_cycles);
777 translate_z80_reg(inst, &dst_op, opts);
778 translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY);
779 if (dst_op.mode == MODE_REG_DIRECT) {
780 if (src_op.mode == MODE_REG_DIRECT) {
781 sub_rr(code, src_op.base, dst_op.base, z80_size(inst));
782 } else if (src_op.mode == MODE_IMMED) {
783 sub_ir(code, src_op.disp, dst_op.base, z80_size(inst));
784 } else {
785 sub_rdispr(code, src_op.base, src_op.disp, dst_op.base, z80_size(inst));
786 }
787 } else {
788 if (src_op.mode == MODE_REG_DIRECT) {
789 sub_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, z80_size(inst));
790 } else if (src_op.mode == MODE_IMMED) {
791 sub_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, z80_size(inst));
792 } else {
793 mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, z80_size(inst));
794 sub_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, z80_size(inst));
795 }
796 }
797 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
798 mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
799 setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV));
800 //TODO: Implement half-carry flag
801 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
802 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
803 z80_save_reg(inst, opts);
804 z80_save_ea(code, inst, opts);
805 break;
806 case Z80_SBC:
807 num_cycles = 4;
808 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
809 num_cycles += 12;
810 } else if(inst->addr_mode == Z80_IMMED) {
811 num_cycles += 3;
812 } else if(z80_size(inst) == SZ_W) {
813 num_cycles += 4;
814 }
815 cycles(&opts->gen, num_cycles);
816 translate_z80_reg(inst, &dst_op, opts);
817 translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY);
818 bt_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B);
819 if (dst_op.mode == MODE_REG_DIRECT) {
820 if (src_op.mode == MODE_REG_DIRECT) {
821 sbb_rr(code, src_op.base, dst_op.base, z80_size(inst));
822 } else if (src_op.mode == MODE_IMMED) {
823 sbb_ir(code, src_op.disp, dst_op.base, z80_size(inst));
824 } else {
825 sbb_rdispr(code, src_op.base, src_op.disp, dst_op.base, z80_size(inst));
826 }
827 } else {
828 if (src_op.mode == MODE_REG_DIRECT) {
829 sbb_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, z80_size(inst));
830 } else if (src_op.mode == MODE_IMMED) {
831 sbb_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, z80_size(inst));
832 } else {
833 mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, z80_size(inst));
834 sbb_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, z80_size(inst));
835 }
836 }
837 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
838 mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
839 //TODO: Implement half-carry flag
840 setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV));
841 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
842 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
843 z80_save_reg(inst, opts);
844 z80_save_ea(code, inst, opts);
845 break;
846 case Z80_AND:
847 num_cycles = 4;
848 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
849 num_cycles += 12;
850 } else if(inst->addr_mode == Z80_IMMED) {
851 num_cycles += 3;
852 } else if(z80_size(inst) == SZ_W) {
853 num_cycles += 4;
854 }
855 cycles(&opts->gen, num_cycles);
856 translate_z80_reg(inst, &dst_op, opts);
857 translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY);
630 if (src_op.mode == MODE_REG_DIRECT) { 858 if (src_op.mode == MODE_REG_DIRECT) {
631 dst = adc_rr(dst, src_op.base, dst_op.base, z80_size(inst)); 859 and_rr(code, src_op.base, dst_op.base, z80_size(inst));
632 } else { 860 } else if (src_op.mode == MODE_IMMED) {
633 dst = adc_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); 861 and_ir(code, src_op.disp, dst_op.base, z80_size(inst));
634 } 862 } else {
635 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); 863 and_rdispr(code, src_op.base, src_op.disp, dst_op.base, z80_size(inst));
636 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B);
637 //TODO: Implement half-carry flag
638 dst = setcc_rdisp8(dst, CC_O, CONTEXT, zf_off(ZF_PV));
639 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z));
640 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S));
641 dst = z80_save_reg(dst, inst, opts);
642 dst = z80_save_ea(dst, inst, opts);
643 break;
644 case Z80_SUB:
645 cycles = 4;
646 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
647 cycles += 12;
648 } else if(inst->addr_mode == Z80_IMMED) {
649 cycles += 3;
650 }
651 dst = zcycles(dst, cycles);
652 dst = translate_z80_reg(inst, &dst_op, dst, opts);
653 dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY);
654 if (src_op.mode == MODE_REG_DIRECT) {
655 dst = sub_rr(dst, src_op.base, dst_op.base, z80_size(inst));
656 } else {
657 dst = sub_ir(dst, src_op.disp, dst_op.base, z80_size(inst));
658 }
659 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C));
660 dst = mov_irdisp8(dst, 1, CONTEXT, zf_off(ZF_N), SZ_B);
661 dst = setcc_rdisp8(dst, CC_O, CONTEXT, zf_off(ZF_PV));
662 //TODO: Implement half-carry flag
663 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z));
664 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S));
665 dst = z80_save_reg(dst, inst, opts);
666 dst = z80_save_ea(dst, inst, opts);
667 break;
668 case Z80_SBC:
669 cycles = 4;
670 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
671 cycles += 12;
672 } else if(inst->addr_mode == Z80_IMMED) {
673 cycles += 3;
674 } else if(z80_size(inst) == SZ_W) {
675 cycles += 4;
676 }
677 dst = zcycles(dst, cycles);
678 dst = translate_z80_reg(inst, &dst_op, dst, opts);
679 dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY);
680 dst = bt_irdisp8(dst, 0, CONTEXT, zf_off(ZF_C), SZ_B);
681 if (src_op.mode == MODE_REG_DIRECT) {
682 dst = sbb_rr(dst, src_op.base, dst_op.base, z80_size(inst));
683 } else {
684 dst = sbb_ir(dst, src_op.disp, dst_op.base, z80_size(inst));
685 }
686 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C));
687 dst = mov_irdisp8(dst, 1, CONTEXT, zf_off(ZF_N), SZ_B);
688 //TODO: Implement half-carry flag
689 dst = setcc_rdisp8(dst, CC_O, CONTEXT, zf_off(ZF_PV));
690 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z));
691 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S));
692 dst = z80_save_reg(dst, inst, opts);
693 dst = z80_save_ea(dst, inst, opts);
694 break;
695 case Z80_AND:
696 cycles = 4;
697 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
698 cycles += 12;
699 } else if(inst->addr_mode == Z80_IMMED) {
700 cycles += 3;
701 } else if(z80_size(inst) == SZ_W) {
702 cycles += 4;
703 }
704 dst = zcycles(dst, cycles);
705 dst = translate_z80_reg(inst, &dst_op, dst, opts);
706 dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY);
707 if (src_op.mode == MODE_REG_DIRECT) {
708 dst = and_rr(dst, src_op.base, dst_op.base, z80_size(inst));
709 } else {
710 dst = and_ir(dst, src_op.disp, dst_op.base, z80_size(inst));
711 } 864 }
712 //TODO: Cleanup flags 865 //TODO: Cleanup flags
713 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); 866 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
714 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); 867 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
715 //TODO: Implement half-carry flag 868 //TODO: Implement half-carry flag
716 if (z80_size(inst) == SZ_B) { 869 if (z80_size(inst) == SZ_B) {
717 dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); 870 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
718 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); 871 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
719 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); 872 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
720 } 873 }
721 dst = z80_save_reg(dst, inst, opts); 874 z80_save_reg(inst, opts);
722 dst = z80_save_ea(dst, inst, opts); 875 z80_save_ea(code, inst, opts);
723 break; 876 break;
724 case Z80_OR: 877 case Z80_OR:
725 cycles = 4; 878 num_cycles = 4;
726 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { 879 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
727 cycles += 12; 880 num_cycles += 12;
728 } else if(inst->addr_mode == Z80_IMMED) { 881 } else if(inst->addr_mode == Z80_IMMED) {
729 cycles += 3; 882 num_cycles += 3;
730 } else if(z80_size(inst) == SZ_W) { 883 } else if(z80_size(inst) == SZ_W) {
731 cycles += 4; 884 num_cycles += 4;
732 } 885 }
733 dst = zcycles(dst, cycles); 886 cycles(&opts->gen, num_cycles);
734 dst = translate_z80_reg(inst, &dst_op, dst, opts); 887 translate_z80_reg(inst, &dst_op, opts);
735 dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); 888 translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY);
736 if (src_op.mode == MODE_REG_DIRECT) { 889 if (src_op.mode == MODE_REG_DIRECT) {
737 dst = or_rr(dst, src_op.base, dst_op.base, z80_size(inst)); 890 or_rr(code, src_op.base, dst_op.base, z80_size(inst));
738 } else { 891 } else if (src_op.mode == MODE_IMMED) {
739 dst = or_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); 892 or_ir(code, src_op.disp, dst_op.base, z80_size(inst));
893 } else {
894 or_rdispr(code, src_op.base, src_op.disp, dst_op.base, z80_size(inst));
740 } 895 }
741 //TODO: Cleanup flags 896 //TODO: Cleanup flags
742 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); 897 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
743 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); 898 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
744 //TODO: Implement half-carry flag 899 //TODO: Implement half-carry flag
745 if (z80_size(inst) == SZ_B) { 900 if (z80_size(inst) == SZ_B) {
746 dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); 901 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
747 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); 902 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
748 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); 903 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
749 } 904 }
750 dst = z80_save_reg(dst, inst, opts); 905 z80_save_reg(inst, opts);
751 dst = z80_save_ea(dst, inst, opts); 906 z80_save_ea(code, inst, opts);
752 break; 907 break;
753 case Z80_XOR: 908 case Z80_XOR:
754 cycles = 4; 909 num_cycles = 4;
755 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { 910 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
756 cycles += 12; 911 num_cycles += 12;
757 } else if(inst->addr_mode == Z80_IMMED) { 912 } else if(inst->addr_mode == Z80_IMMED) {
758 cycles += 3; 913 num_cycles += 3;
759 } else if(z80_size(inst) == SZ_W) { 914 } else if(z80_size(inst) == SZ_W) {
760 cycles += 4; 915 num_cycles += 4;
761 } 916 }
762 dst = zcycles(dst, cycles); 917 cycles(&opts->gen, num_cycles);
763 dst = translate_z80_reg(inst, &dst_op, dst, opts); 918 translate_z80_reg(inst, &dst_op, opts);
764 dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); 919 translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY);
765 if (src_op.mode == MODE_REG_DIRECT) { 920 if (src_op.mode == MODE_REG_DIRECT) {
766 dst = xor_rr(dst, src_op.base, dst_op.base, z80_size(inst)); 921 xor_rr(code, src_op.base, dst_op.base, z80_size(inst));
767 } else { 922 } else if (src_op.mode == MODE_IMMED) {
768 dst = xor_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); 923 xor_ir(code, src_op.disp, dst_op.base, z80_size(inst));
924 } else {
925 xor_rdispr(code, src_op.base, src_op.disp, dst_op.base, z80_size(inst));
769 } 926 }
770 //TODO: Cleanup flags 927 //TODO: Cleanup flags
771 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); 928 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
772 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); 929 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
773 //TODO: Implement half-carry flag 930 //TODO: Implement half-carry flag
774 if (z80_size(inst) == SZ_B) { 931 if (z80_size(inst) == SZ_B) {
775 dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); 932 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
776 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); 933 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
777 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); 934 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
778 } 935 }
779 dst = z80_save_reg(dst, inst, opts); 936 z80_save_reg(inst, opts);
780 dst = z80_save_ea(dst, inst, opts); 937 z80_save_ea(code, inst, opts);
781 break; 938 break;
782 case Z80_CP: 939 case Z80_CP:
783 cycles = 4; 940 num_cycles = 4;
784 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { 941 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
785 cycles += 12; 942 num_cycles += 12;
786 } else if(inst->addr_mode == Z80_IMMED) { 943 } else if(inst->addr_mode == Z80_IMMED) {
787 cycles += 3; 944 num_cycles += 3;
788 } 945 }
789 dst = zcycles(dst, cycles); 946 cycles(&opts->gen, num_cycles);
790 dst = translate_z80_reg(inst, &dst_op, dst, opts); 947 translate_z80_reg(inst, &dst_op, opts);
791 dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); 948 translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY);
792 if (src_op.mode == MODE_REG_DIRECT) { 949 if (src_op.mode == MODE_REG_DIRECT) {
793 dst = cmp_rr(dst, src_op.base, dst_op.base, z80_size(inst)); 950 cmp_rr(code, src_op.base, dst_op.base, z80_size(inst));
794 } else { 951 } else if (src_op.mode == MODE_IMMED) {
795 dst = cmp_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); 952 cmp_ir(code, src_op.disp, dst_op.base, z80_size(inst));
796 } 953 } else {
797 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); 954 cmp_rdispr(code, src_op.base, src_op.disp, dst_op.base, z80_size(inst));
798 dst = mov_irdisp8(dst, 1, CONTEXT, zf_off(ZF_N), SZ_B); 955 }
799 dst = setcc_rdisp8(dst, CC_O, CONTEXT, zf_off(ZF_PV)); 956 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
957 mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
958 setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV));
800 //TODO: Implement half-carry flag 959 //TODO: Implement half-carry flag
801 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); 960 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
802 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); 961 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
803 dst = z80_save_reg(dst, inst, opts); 962 z80_save_reg(inst, opts);
804 dst = z80_save_ea(dst, inst, opts); 963 z80_save_ea(code, inst, opts);
805 break; 964 break;
806 case Z80_INC: 965 case Z80_INC:
807 cycles = 4; 966 num_cycles = 4;
808 if (inst->reg == Z80_IX || inst->reg == Z80_IY) { 967 if (inst->reg == Z80_IX || inst->reg == Z80_IY) {
809 cycles += 6; 968 num_cycles += 6;
810 } else if(z80_size(inst) == SZ_W) { 969 } else if(z80_size(inst) == SZ_W) {
811 cycles += 2; 970 num_cycles += 2;
812 } else if(inst->reg == Z80_IXH || inst->reg == Z80_IXL || inst->reg == Z80_IYH || inst->reg == Z80_IYL || inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { 971 } else if(inst->reg == Z80_IXH || inst->reg == Z80_IXL || inst->reg == Z80_IYH || inst->reg == Z80_IYL || inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
813 cycles += 4; 972 num_cycles += 4;
814 } 973 }
815 dst = zcycles(dst, cycles); 974 cycles(&opts->gen, num_cycles);
816 dst = translate_z80_reg(inst, &dst_op, dst, opts); 975 translate_z80_reg(inst, &dst_op, opts);
817 if (dst_op.mode == MODE_UNUSED) { 976 if (dst_op.mode == MODE_UNUSED) {
818 dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); 977 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
819 } 978 }
820 dst = add_ir(dst, 1, dst_op.base, z80_size(inst)); 979 if (dst_op.mode == MODE_REG_DIRECT) {
980 add_ir(code, 1, dst_op.base, z80_size(inst));
981 } else {
982 add_irdisp(code, 1, dst_op.base, dst_op.disp, z80_size(inst));
983 }
821 if (z80_size(inst) == SZ_B) { 984 if (z80_size(inst) == SZ_B) {
822 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); 985 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
823 //TODO: Implement half-carry flag 986 //TODO: Implement half-carry flag
824 dst = setcc_rdisp8(dst, CC_O, CONTEXT, zf_off(ZF_PV)); 987 setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV));
825 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); 988 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
826 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); 989 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
827 } 990 }
828 dst = z80_save_reg(dst, inst, opts); 991 z80_save_reg(inst, opts);
829 dst = z80_save_ea(dst, inst, opts); 992 z80_save_ea(code, inst, opts);
830 dst = z80_save_result(dst, inst); 993 z80_save_result(opts, inst);
831 break; 994 break;
832 case Z80_DEC: 995 case Z80_DEC:
833 cycles = 4; 996 num_cycles = 4;
834 if (inst->reg == Z80_IX || inst->reg == Z80_IY) { 997 if (inst->reg == Z80_IX || inst->reg == Z80_IY) {
835 cycles += 6; 998 num_cycles += 6;
836 } else if(z80_size(inst) == SZ_W) { 999 } else if(z80_size(inst) == SZ_W) {
837 cycles += 2; 1000 num_cycles += 2;
838 } else if(inst->reg == Z80_IXH || inst->reg == Z80_IXL || inst->reg == Z80_IYH || inst->reg == Z80_IYL || inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { 1001 } else if(inst->reg == Z80_IXH || inst->reg == Z80_IXL || inst->reg == Z80_IYH || inst->reg == Z80_IYL || inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
839 cycles += 4; 1002 num_cycles += 4;
840 } 1003 }
841 dst = zcycles(dst, cycles); 1004 cycles(&opts->gen, num_cycles);
842 dst = translate_z80_reg(inst, &dst_op, dst, opts); 1005 translate_z80_reg(inst, &dst_op, opts);
843 if (dst_op.mode == MODE_UNUSED) { 1006 if (dst_op.mode == MODE_UNUSED) {
844 dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); 1007 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
845 } 1008 }
846 dst = sub_ir(dst, 1, dst_op.base, z80_size(inst)); 1009 if (dst_op.mode == MODE_REG_DIRECT) {
1010 sub_ir(code, 1, dst_op.base, z80_size(inst));
1011 } else {
1012 sub_irdisp(code, 1, dst_op.base, dst_op.disp, z80_size(inst));
1013 }
1014
847 if (z80_size(inst) == SZ_B) { 1015 if (z80_size(inst) == SZ_B) {
848 dst = mov_irdisp8(dst, 1, CONTEXT, zf_off(ZF_N), SZ_B); 1016 mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
849 //TODO: Implement half-carry flag 1017 //TODO: Implement half-carry flag
850 dst = setcc_rdisp8(dst, CC_O, CONTEXT, zf_off(ZF_PV)); 1018 setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV));
851 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); 1019 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
852 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); 1020 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
853 } 1021 }
854 dst = z80_save_reg(dst, inst, opts); 1022 z80_save_reg(inst, opts);
855 dst = z80_save_ea(dst, inst, opts); 1023 z80_save_ea(code, inst, opts);
856 dst = z80_save_result(dst, inst); 1024 z80_save_result(opts, inst);
857 break; 1025 break;
858 //case Z80_DAA: 1026 //case Z80_DAA:
859 case Z80_CPL: 1027 case Z80_CPL:
860 dst = zcycles(dst, 4); 1028 cycles(&opts->gen, 4);
861 dst = not_r(dst, opts->regs[Z80_A], SZ_B); 1029 not_r(code, opts->regs[Z80_A], SZ_B);
862 //TODO: Implement half-carry flag 1030 //TODO: Implement half-carry flag
863 dst = mov_irdisp8(dst, 1, CONTEXT, zf_off(ZF_N), SZ_B); 1031 mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
864 break; 1032 break;
865 case Z80_NEG: 1033 case Z80_NEG:
866 dst = zcycles(dst, 8); 1034 cycles(&opts->gen, 8);
867 dst = neg_r(dst, opts->regs[Z80_A], SZ_B); 1035 neg_r(code, opts->regs[Z80_A], SZ_B);
868 //TODO: Implement half-carry flag 1036 //TODO: Implement half-carry flag
869 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); 1037 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
870 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); 1038 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
871 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); 1039 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
872 dst = setcc_rdisp8(dst, CC_O, CONTEXT, zf_off(ZF_PV)); 1040 setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV));
873 dst = mov_irdisp8(dst, 1, CONTEXT, zf_off(ZF_N), SZ_B); 1041 mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
874 break; 1042 break;
875 case Z80_CCF: 1043 case Z80_CCF:
876 dst = zcycles(dst, 4); 1044 cycles(&opts->gen, 4);
877 dst = xor_irdisp8(dst, 1, CONTEXT, zf_off(ZF_C), SZ_B); 1045 xor_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_C), SZ_B);
878 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); 1046 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
879 //TODO: Implement half-carry flag 1047 //TODO: Implement half-carry flag
880 break; 1048 break;
881 case Z80_SCF: 1049 case Z80_SCF:
882 dst = zcycles(dst, 4); 1050 cycles(&opts->gen, 4);
883 dst = mov_irdisp8(dst, 1, CONTEXT, zf_off(ZF_C), SZ_B); 1051 mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_C), SZ_B);
884 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); 1052 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
885 //TODO: Implement half-carry flag 1053 //TODO: Implement half-carry flag
886 break; 1054 break;
887 case Z80_NOP: 1055 case Z80_NOP:
888 if (inst->immed == 42) { 1056 if (inst->immed == 42) {
889 dst = call(dst, (uint8_t *)z80_save_context); 1057 call(code, opts->gen.save_context);
890 dst = mov_rr(dst, CONTEXT, RDI, SZ_Q); 1058 call_args(code, (code_ptr)z80_print_regs_exit, 1, opts->gen.context_reg);
891 dst = jmp(dst, (uint8_t *)z80_print_regs_exit); 1059 } else {
892 } else { 1060 cycles(&opts->gen, 4 * inst->immed);
893 dst = zcycles(dst, 4 * inst->immed); 1061 }
894 } 1062 break;
895 break; 1063 case Z80_HALT: {
896 case Z80_HALT: 1064 code_ptr loop_top = code->cur;
897 dst = zcycles(dst, 4); 1065 //this isn't terribly efficient, but it's good enough for now
898 dst = mov_ir(dst, address, SCRATCH1, SZ_W); 1066 cycles(&opts->gen, 4);
899 uint8_t * call_inst = dst; 1067 check_cycles_int(&opts->gen, address);
900 dst = call(dst, (uint8_t *)z80_halt); 1068 jmp(code, loop_top);
901 dst = jmp(dst, call_inst); 1069 break;
902 break; 1070 }
903 case Z80_DI: 1071 case Z80_DI:
904 dst = zcycles(dst, 4); 1072 cycles(&opts->gen, 4);
905 dst = mov_irdisp8(dst, 0, CONTEXT, offsetof(z80_context, iff1), SZ_B); 1073 mov_irdisp(code, 0, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B);
906 dst = mov_irdisp8(dst, 0, CONTEXT, offsetof(z80_context, iff2), SZ_B); 1074 mov_irdisp(code, 0, opts->gen.context_reg, offsetof(z80_context, iff2), SZ_B);
907 dst = mov_rdisp8r(dst, CONTEXT, offsetof(z80_context, sync_cycle), ZLIMIT, SZ_D); 1075 mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, sync_cycle), opts->gen.limit, SZ_D);
908 dst = mov_irdisp8(dst, 0xFFFFFFFF, CONTEXT, offsetof(z80_context, int_cycle), SZ_D); 1076 mov_irdisp(code, 0xFFFFFFFF, opts->gen.context_reg, offsetof(z80_context, int_cycle), SZ_D);
909 break; 1077 break;
910 case Z80_EI: 1078 case Z80_EI:
911 dst = zcycles(dst, 4); 1079 cycles(&opts->gen, 4);
912 dst = mov_rrdisp32(dst, ZCYCLES, CONTEXT, offsetof(z80_context, int_enable_cycle), SZ_D); 1080 mov_rrdisp(code, opts->gen.cycles, opts->gen.context_reg, offsetof(z80_context, int_enable_cycle), SZ_D);
913 dst = mov_irdisp8(dst, 1, CONTEXT, offsetof(z80_context, iff1), SZ_B); 1081 mov_irdisp(code, 1, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B);
914 dst = mov_irdisp8(dst, 1, CONTEXT, offsetof(z80_context, iff2), SZ_B); 1082 mov_irdisp(code, 1, opts->gen.context_reg, offsetof(z80_context, iff2), SZ_B);
915 //interrupt enable has a one-instruction latency, minimum instruction duration is 4 cycles 1083 //interrupt enable has a one-instruction latency, minimum instruction duration is 4 cycles
916 dst = add_irdisp32(dst, 4, CONTEXT, offsetof(z80_context, int_enable_cycle), SZ_D); 1084 add_irdisp(code, 4*opts->gen.clock_divider, opts->gen.context_reg, offsetof(z80_context, int_enable_cycle), SZ_D);
917 dst = call(dst, (uint8_t *)z80_do_sync); 1085 call(code, opts->do_sync);
918 break; 1086 break;
919 case Z80_IM: 1087 case Z80_IM:
920 dst = zcycles(dst, 4); 1088 cycles(&opts->gen, 4);
921 dst = mov_irdisp8(dst, inst->immed, CONTEXT, offsetof(z80_context, im), SZ_B); 1089 mov_irdisp(code, inst->immed, opts->gen.context_reg, offsetof(z80_context, im), SZ_B);
922 break; 1090 break;
923 case Z80_RLC: 1091 case Z80_RLC:
924 cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8); 1092 num_cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8);
925 dst = zcycles(dst, cycles); 1093 cycles(&opts->gen, num_cycles);
926 if (inst->addr_mode != Z80_UNUSED) { 1094 if (inst->addr_mode != Z80_UNUSED) {
927 dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); 1095 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
928 dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register 1096 translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register
929 dst = zcycles(dst, 1); 1097 cycles(&opts->gen, 1);
930 } else { 1098 } else {
931 src_op.mode = MODE_UNUSED; 1099 src_op.mode = MODE_UNUSED;
932 dst = translate_z80_reg(inst, &dst_op, dst, opts); 1100 translate_z80_reg(inst, &dst_op, opts);
933 } 1101 }
934 dst = rol_ir(dst, 1, dst_op.base, SZ_B); 1102 if (dst_op.mode == MODE_REG_DIRECT) {
935 if (src_op.mode != MODE_UNUSED) { 1103 rol_ir(code, 1, dst_op.base, SZ_B);
936 dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); 1104 } else {
937 } 1105 rol_irdisp(code, 1, dst_op.base, dst_op.disp, SZ_B);
938 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); 1106 }
939 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); 1107 if (src_op.mode == MODE_REG_DIRECT) {
1108 mov_rr(code, dst_op.base, src_op.base, SZ_B);
1109 } else if(src_op.mode == MODE_REG_DISPLACE8) {
1110 mov_rrdisp(code, dst_op.base, src_op.base, src_op.disp, SZ_B);
1111 }
1112 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
1113 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
940 //TODO: Implement half-carry flag 1114 //TODO: Implement half-carry flag
941 dst = cmp_ir(dst, 0, dst_op.base, SZ_B); 1115 if (inst->immed) {
942 dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); 1116 //rlca does not set these flags
943 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); 1117 if (dst_op.mode == MODE_REG_DIRECT) {
944 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); 1118 cmp_ir(code, 0, dst_op.base, SZ_B);
1119 } else {
1120 cmp_irdisp(code, 0, dst_op.base, dst_op.disp, SZ_B);
1121 }
1122 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
1123 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
1124 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
1125 }
945 if (inst->addr_mode != Z80_UNUSED) { 1126 if (inst->addr_mode != Z80_UNUSED) {
946 dst = z80_save_result(dst, inst); 1127 z80_save_result(opts, inst);
947 if (src_op.mode != MODE_UNUSED) { 1128 if (src_op.mode != MODE_UNUSED) {
948 dst = z80_save_reg(dst, inst, opts); 1129 z80_save_reg(inst, opts);
949 } 1130 }
950 } else { 1131 } else {
951 dst = z80_save_reg(dst, inst, opts); 1132 z80_save_reg(inst, opts);
952 } 1133 }
953 break; 1134 break;
954 case Z80_RL: 1135 case Z80_RL:
955 cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8); 1136 num_cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8);
956 dst = zcycles(dst, cycles); 1137 cycles(&opts->gen, num_cycles);
957 if (inst->addr_mode != Z80_UNUSED) { 1138 if (inst->addr_mode != Z80_UNUSED) {
958 dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); 1139 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
959 dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register 1140 translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register
960 dst = zcycles(dst, 1); 1141 cycles(&opts->gen, 1);
961 } else { 1142 } else {
962 src_op.mode = MODE_UNUSED; 1143 src_op.mode = MODE_UNUSED;
963 dst = translate_z80_reg(inst, &dst_op, dst, opts); 1144 translate_z80_reg(inst, &dst_op, opts);
964 } 1145 }
965 dst = bt_irdisp8(dst, 0, CONTEXT, zf_off(ZF_C), SZ_B); 1146 bt_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B);
966 dst = rcl_ir(dst, 1, dst_op.base, SZ_B); 1147 if (dst_op.mode == MODE_REG_DIRECT) {
967 if (src_op.mode != MODE_UNUSED) { 1148 rcl_ir(code, 1, dst_op.base, SZ_B);
968 dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); 1149 } else {
969 } 1150 rcl_irdisp(code, 1, dst_op.base, dst_op.disp, SZ_B);
970 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); 1151 }
971 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); 1152 if (src_op.mode == MODE_REG_DIRECT) {
1153 mov_rr(code, dst_op.base, src_op.base, SZ_B);
1154 } else if(src_op.mode == MODE_REG_DISPLACE8) {
1155 mov_rrdisp(code, dst_op.base, src_op.base, src_op.disp, SZ_B);
1156 }
1157 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
1158 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
972 //TODO: Implement half-carry flag 1159 //TODO: Implement half-carry flag
973 dst = cmp_ir(dst, 0, dst_op.base, SZ_B); 1160 if (inst->immed) {
974 dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); 1161 //rla does not set these flags
975 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); 1162 if (dst_op.mode == MODE_REG_DIRECT) {
976 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); 1163 cmp_ir(code, 0, dst_op.base, SZ_B);
1164 } else {
1165 cmp_irdisp(code, 0, dst_op.base, dst_op.disp, SZ_B);
1166 }
1167 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
1168 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
1169 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
1170 }
977 if (inst->addr_mode != Z80_UNUSED) { 1171 if (inst->addr_mode != Z80_UNUSED) {
978 dst = z80_save_result(dst, inst); 1172 z80_save_result(opts, inst);
979 if (src_op.mode != MODE_UNUSED) { 1173 if (src_op.mode != MODE_UNUSED) {
980 dst = z80_save_reg(dst, inst, opts); 1174 z80_save_reg(inst, opts);
981 } 1175 }
982 } else { 1176 } else {
983 dst = z80_save_reg(dst, inst, opts); 1177 z80_save_reg(inst, opts);
984 } 1178 }
985 break; 1179 break;
986 case Z80_RRC: 1180 case Z80_RRC:
987 cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8); 1181 num_cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8);
988 dst = zcycles(dst, cycles); 1182 cycles(&opts->gen, num_cycles);
989 if (inst->addr_mode != Z80_UNUSED) { 1183 if (inst->addr_mode != Z80_UNUSED) {
990 dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); 1184 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
991 dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register 1185 translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register
992 dst = zcycles(dst, 1); 1186 cycles(&opts->gen, 1);
993 } else { 1187 } else {
994 src_op.mode = MODE_UNUSED; 1188 src_op.mode = MODE_UNUSED;
995 dst = translate_z80_reg(inst, &dst_op, dst, opts); 1189 translate_z80_reg(inst, &dst_op, opts);
996 } 1190 }
997 dst = ror_ir(dst, 1, dst_op.base, SZ_B); 1191 if (dst_op.mode == MODE_REG_DIRECT) {
998 if (src_op.mode != MODE_UNUSED) { 1192 ror_ir(code, 1, dst_op.base, SZ_B);
999 dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); 1193 } else {
1000 } 1194 ror_irdisp(code, 1, dst_op.base, dst_op.disp, SZ_B);
1001 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); 1195 }
1002 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); 1196 if (src_op.mode == MODE_REG_DIRECT) {
1197 mov_rr(code, dst_op.base, src_op.base, SZ_B);
1198 } else if(src_op.mode == MODE_REG_DISPLACE8) {
1199 mov_rrdisp(code, dst_op.base, src_op.base, src_op.disp, SZ_B);
1200 }
1201 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
1202 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
1003 //TODO: Implement half-carry flag 1203 //TODO: Implement half-carry flag
1004 dst = cmp_ir(dst, 0, dst_op.base, SZ_B); 1204 if (inst->immed) {
1005 dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); 1205 //rrca does not set these flags
1006 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); 1206 if (dst_op.mode == MODE_REG_DIRECT) {
1007 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); 1207 cmp_ir(code, 0, dst_op.base, SZ_B);
1208 } else {
1209 cmp_irdisp(code, 0, dst_op.base, dst_op.disp, SZ_B);
1210 }
1211 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
1212 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
1213 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
1214 }
1008 if (inst->addr_mode != Z80_UNUSED) { 1215 if (inst->addr_mode != Z80_UNUSED) {
1009 dst = z80_save_result(dst, inst); 1216 z80_save_result(opts, inst);
1010 if (src_op.mode != MODE_UNUSED) { 1217 if (src_op.mode != MODE_UNUSED) {
1011 dst = z80_save_reg(dst, inst, opts); 1218 z80_save_reg(inst, opts);
1012 } 1219 }
1013 } else { 1220 } else {
1014 dst = z80_save_reg(dst, inst, opts); 1221 z80_save_reg(inst, opts);
1015 } 1222 }
1016 break; 1223 break;
1017 case Z80_RR: 1224 case Z80_RR:
1018 cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8); 1225 num_cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8);
1019 dst = zcycles(dst, cycles); 1226 cycles(&opts->gen, num_cycles);
1020 if (inst->addr_mode != Z80_UNUSED) { 1227 if (inst->addr_mode != Z80_UNUSED) {
1021 dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); 1228 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
1022 dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register 1229 translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register
1023 dst = zcycles(dst, 1); 1230 cycles(&opts->gen, 1);
1024 } else { 1231 } else {
1025 src_op.mode = MODE_UNUSED; 1232 src_op.mode = MODE_UNUSED;
1026 dst = translate_z80_reg(inst, &dst_op, dst, opts); 1233 translate_z80_reg(inst, &dst_op, opts);
1027 } 1234 }
1028 dst = bt_irdisp8(dst, 0, CONTEXT, zf_off(ZF_C), SZ_B); 1235 bt_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B);
1029 dst = rcr_ir(dst, 1, dst_op.base, SZ_B); 1236 if (dst_op.mode == MODE_REG_DIRECT) {
1030 if (src_op.mode != MODE_UNUSED) { 1237 rcr_ir(code, 1, dst_op.base, SZ_B);
1031 dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); 1238 } else {
1032 } 1239 rcr_irdisp(code, 1, dst_op.base, dst_op.disp, SZ_B);
1033 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); 1240 }
1034 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); 1241 if (src_op.mode == MODE_REG_DIRECT) {
1242 mov_rr(code, dst_op.base, src_op.base, SZ_B);
1243 } else if(src_op.mode == MODE_REG_DISPLACE8) {
1244 mov_rrdisp(code, dst_op.base, src_op.base, src_op.disp, SZ_B);
1245 }
1246 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
1247 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
1035 //TODO: Implement half-carry flag 1248 //TODO: Implement half-carry flag
1036 dst = cmp_ir(dst, 0, dst_op.base, SZ_B); 1249 if (inst->immed) {
1037 dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); 1250 //rra does not set these flags
1038 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); 1251 if (dst_op.mode == MODE_REG_DIRECT) {
1039 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); 1252 cmp_ir(code, 0, dst_op.base, SZ_B);
1253 } else {
1254 cmp_irdisp(code, 0, dst_op.base, dst_op.disp, SZ_B);
1255 }
1256 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
1257 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
1258 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
1259 }
1040 if (inst->addr_mode != Z80_UNUSED) { 1260 if (inst->addr_mode != Z80_UNUSED) {
1041 dst = z80_save_result(dst, inst); 1261 z80_save_result(opts, inst);
1042 if (src_op.mode != MODE_UNUSED) { 1262 if (src_op.mode != MODE_UNUSED) {
1043 dst = z80_save_reg(dst, inst, opts); 1263 z80_save_reg(inst, opts);
1044 } 1264 }
1045 } else { 1265 } else {
1046 dst = z80_save_reg(dst, inst, opts); 1266 z80_save_reg(inst, opts);
1047 } 1267 }
1048 break; 1268 break;
1049 case Z80_SLA: 1269 case Z80_SLA:
1050 case Z80_SLL: 1270 case Z80_SLL:
1051 cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8; 1271 num_cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8;
1052 dst = zcycles(dst, cycles); 1272 cycles(&opts->gen, num_cycles);
1053 if (inst->addr_mode != Z80_UNUSED) { 1273 if (inst->addr_mode != Z80_UNUSED) {
1054 dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); 1274 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
1055 dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register 1275 translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register
1056 dst = zcycles(dst, 1); 1276 cycles(&opts->gen, 1);
1057 } else { 1277 } else {
1058 src_op.mode = MODE_UNUSED; 1278 src_op.mode = MODE_UNUSED;
1059 dst = translate_z80_reg(inst, &dst_op, dst, opts); 1279 translate_z80_reg(inst, &dst_op, opts);
1060 } 1280 }
1061 dst = shl_ir(dst, 1, dst_op.base, SZ_B); 1281 if (dst_op.mode == MODE_REG_DIRECT) {
1062 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); 1282 shl_ir(code, 1, dst_op.base, SZ_B);
1283 } else {
1284 shl_irdisp(code, 1, dst_op.base, dst_op.disp, SZ_B);
1285 }
1286 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
1063 if (inst->op == Z80_SLL) { 1287 if (inst->op == Z80_SLL) {
1064 dst = or_ir(dst, 1, dst_op.base, SZ_B); 1288 if (dst_op.mode == MODE_REG_DIRECT) {
1065 } 1289 or_ir(code, 1, dst_op.base, SZ_B);
1066 if (src_op.mode != MODE_UNUSED) { 1290 } else {
1067 dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); 1291 or_irdisp(code, 1, dst_op.base, dst_op.disp, SZ_B);
1068 } 1292 }
1069 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); 1293 }
1294 if (src_op.mode == MODE_REG_DIRECT) {
1295 mov_rr(code, dst_op.base, src_op.base, SZ_B);
1296 } else if(src_op.mode == MODE_REG_DISPLACE8) {
1297 mov_rrdisp(code, dst_op.base, src_op.base, src_op.disp, SZ_B);
1298 }
1299 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
1070 //TODO: Implement half-carry flag 1300 //TODO: Implement half-carry flag
1071 dst = cmp_ir(dst, 0, dst_op.base, SZ_B); 1301 if (dst_op.mode == MODE_REG_DIRECT) {
1072 dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); 1302 cmp_ir(code, 0, dst_op.base, SZ_B);
1073 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); 1303 } else {
1074 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); 1304 cmp_irdisp(code, 0, dst_op.base, dst_op.disp, SZ_B);
1305 }
1306 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
1307 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
1308 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
1075 if (inst->addr_mode != Z80_UNUSED) { 1309 if (inst->addr_mode != Z80_UNUSED) {
1076 dst = z80_save_result(dst, inst); 1310 z80_save_result(opts, inst);
1077 if (src_op.mode != MODE_UNUSED) { 1311 if (src_op.mode != MODE_UNUSED) {
1078 dst = z80_save_reg(dst, inst, opts); 1312 z80_save_reg(inst, opts);
1079 } 1313 }
1080 } else { 1314 } else {
1081 dst = z80_save_reg(dst, inst, opts); 1315 z80_save_reg(inst, opts);
1082 } 1316 }
1083 break; 1317 break;
1084 case Z80_SRA: 1318 case Z80_SRA:
1085 cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8; 1319 num_cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8;
1086 dst = zcycles(dst, cycles); 1320 cycles(&opts->gen, num_cycles);
1087 if (inst->addr_mode != Z80_UNUSED) { 1321 if (inst->addr_mode != Z80_UNUSED) {
1088 dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); 1322 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
1089 dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register 1323 translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register
1090 dst = zcycles(dst, 1); 1324 cycles(&opts->gen, 1);
1091 } else { 1325 } else {
1092 src_op.mode = MODE_UNUSED; 1326 src_op.mode = MODE_UNUSED;
1093 dst = translate_z80_reg(inst, &dst_op, dst, opts); 1327 translate_z80_reg(inst, &dst_op, opts);
1094 } 1328 }
1095 dst = sar_ir(dst, 1, dst_op.base, SZ_B); 1329 if (dst_op.mode == MODE_REG_DIRECT) {
1096 if (src_op.mode != MODE_UNUSED) { 1330 sar_ir(code, 1, dst_op.base, SZ_B);
1097 dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); 1331 } else {
1098 } 1332 sar_irdisp(code, 1, dst_op.base, dst_op.disp, SZ_B);
1099 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); 1333 }
1100 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); 1334 if (src_op.mode == MODE_REG_DIRECT) {
1335 mov_rr(code, dst_op.base, src_op.base, SZ_B);
1336 } else if(src_op.mode == MODE_REG_DISPLACE8) {
1337 mov_rrdisp(code, dst_op.base, src_op.base, src_op.disp, SZ_B);
1338 }
1339 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
1340 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
1101 //TODO: Implement half-carry flag 1341 //TODO: Implement half-carry flag
1102 dst = cmp_ir(dst, 0, dst_op.base, SZ_B); 1342 if (dst_op.mode == MODE_REG_DIRECT) {
1103 dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); 1343 cmp_ir(code, 0, dst_op.base, SZ_B);
1104 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); 1344 } else {
1105 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); 1345 cmp_irdisp(code, 0, dst_op.base, dst_op.disp, SZ_B);
1346 }
1347 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
1348 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
1349 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
1106 if (inst->addr_mode != Z80_UNUSED) { 1350 if (inst->addr_mode != Z80_UNUSED) {
1107 dst = z80_save_result(dst, inst); 1351 z80_save_result(opts, inst);
1108 if (src_op.mode != MODE_UNUSED) { 1352 if (src_op.mode != MODE_UNUSED) {
1109 dst = z80_save_reg(dst, inst, opts); 1353 z80_save_reg(inst, opts);
1110 } 1354 }
1111 } else { 1355 } else {
1112 dst = z80_save_reg(dst, inst, opts); 1356 z80_save_reg(inst, opts);
1113 } 1357 }
1114 break; 1358 break;
1115 case Z80_SRL: 1359 case Z80_SRL:
1116 cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8; 1360 num_cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8;
1117 dst = zcycles(dst, cycles); 1361 cycles(&opts->gen, num_cycles);
1118 if (inst->addr_mode != Z80_UNUSED) { 1362 if (inst->addr_mode != Z80_UNUSED) {
1119 dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); 1363 translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
1120 dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register 1364 translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register
1121 dst = zcycles(dst, 1); 1365 cycles(&opts->gen, 1);
1122 } else { 1366 } else {
1123 src_op.mode = MODE_UNUSED; 1367 src_op.mode = MODE_UNUSED;
1124 dst = translate_z80_reg(inst, &dst_op, dst, opts); 1368 translate_z80_reg(inst, &dst_op, opts);
1125 } 1369 }
1126 dst = shr_ir(dst, 1, dst_op.base, SZ_B); 1370 if (dst_op.mode == MODE_REG_DIRECT) {
1127 if (src_op.mode != MODE_UNUSED) { 1371 shr_ir(code, 1, dst_op.base, SZ_B);
1128 dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); 1372 } else {
1129 } 1373 shr_irdisp(code, 1, dst_op.base, dst_op.disp, SZ_B);
1130 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); 1374 }
1131 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); 1375 if (src_op.mode == MODE_REG_DIRECT) {
1376 mov_rr(code, dst_op.base, src_op.base, SZ_B);
1377 } else if(src_op.mode == MODE_REG_DISPLACE8) {
1378 mov_rrdisp(code, dst_op.base, src_op.base, src_op.disp, SZ_B);
1379 }
1380 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
1381 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
1132 //TODO: Implement half-carry flag 1382 //TODO: Implement half-carry flag
1133 dst = cmp_ir(dst, 0, dst_op.base, SZ_B); 1383 if (dst_op.mode == MODE_REG_DIRECT) {
1134 dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); 1384 cmp_ir(code, 0, dst_op.base, SZ_B);
1135 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); 1385 } else {
1136 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); 1386 cmp_irdisp(code, 0, dst_op.base, dst_op.disp, SZ_B);
1387 }
1388 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
1389 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
1390 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
1137 if (inst->addr_mode != Z80_UNUSED) { 1391 if (inst->addr_mode != Z80_UNUSED) {
1138 dst = z80_save_result(dst, inst); 1392 z80_save_result(opts, inst);
1139 if (src_op.mode != MODE_UNUSED) { 1393 if (src_op.mode != MODE_UNUSED) {
1140 dst = z80_save_reg(dst, inst, opts); 1394 z80_save_reg(inst, opts);
1141 } 1395 }
1142 } else { 1396 } else {
1143 dst = z80_save_reg(dst, inst, opts); 1397 z80_save_reg(inst, opts);
1144 } 1398 }
1145 break; 1399 break;
1146 case Z80_RLD: 1400 case Z80_RLD:
1147 dst = zcycles(dst, 8); 1401 cycles(&opts->gen, 8);
1148 dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH1, SZ_W); 1402 zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
1149 dst = call(dst, (uint8_t *)z80_read_byte); 1403 call(code, opts->read_8);
1150 //Before: (HL) = 0x12, A = 0x34 1404 //Before: (HL) = 0x12, A = 0x34
1151 //After: (HL) = 0x24, A = 0x31 1405 //After: (HL) = 0x24, A = 0x31
1152 dst = mov_rr(dst, opts->regs[Z80_A], SCRATCH2, SZ_B); 1406 zreg_to_native(opts, Z80_A, opts->gen.scratch2);
1153 dst = shl_ir(dst, 4, SCRATCH1, SZ_W); 1407 shl_ir(code, 4, opts->gen.scratch1, SZ_W);
1154 dst = and_ir(dst, 0xF, SCRATCH2, SZ_W); 1408 and_ir(code, 0xF, opts->gen.scratch2, SZ_W);
1155 dst = and_ir(dst, 0xFFF, SCRATCH1, SZ_W); 1409 and_ir(code, 0xFFF, opts->gen.scratch1, SZ_W);
1156 dst = and_ir(dst, 0xF0, opts->regs[Z80_A], SZ_B); 1410 and_ir(code, 0xF0, opts->regs[Z80_A], SZ_B);
1157 dst = or_rr(dst, SCRATCH2, SCRATCH1, SZ_W); 1411 or_rr(code, opts->gen.scratch2, opts->gen.scratch1, SZ_W);
1158 //SCRATCH1 = 0x0124 1412 //opts->gen.scratch1 = 0x0124
1159 dst = ror_ir(dst, 8, SCRATCH1, SZ_W); 1413 ror_ir(code, 8, opts->gen.scratch1, SZ_W);
1160 dst = zcycles(dst, 4); 1414 cycles(&opts->gen, 4);
1161 dst = or_rr(dst, SCRATCH1, opts->regs[Z80_A], SZ_B); 1415 or_rr(code, opts->gen.scratch1, opts->regs[Z80_A], SZ_B);
1162 //set flags 1416 //set flags
1163 //TODO: Implement half-carry flag 1417 //TODO: Implement half-carry flag
1164 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); 1418 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
1165 dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); 1419 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
1166 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); 1420 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
1167 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); 1421 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
1168 1422
1169 dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH2, SZ_W); 1423 zreg_to_native(opts, Z80_HL, opts->gen.scratch2);
1170 dst = ror_ir(dst, 8, SCRATCH1, SZ_W); 1424 ror_ir(code, 8, opts->gen.scratch1, SZ_W);
1171 dst = call(dst, (uint8_t *)z80_write_byte); 1425 call(code, opts->write_8);
1172 break; 1426 break;
1173 case Z80_RRD: 1427 case Z80_RRD:
1174 dst = zcycles(dst, 8); 1428 cycles(&opts->gen, 8);
1175 dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH1, SZ_W); 1429 zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
1176 dst = call(dst, (uint8_t *)z80_read_byte); 1430 call(code, opts->read_8);
1177 //Before: (HL) = 0x12, A = 0x34 1431 //Before: (HL) = 0x12, A = 0x34
1178 //After: (HL) = 0x41, A = 0x32 1432 //After: (HL) = 0x41, A = 0x32
1179 dst = movzx_rr(dst, opts->regs[Z80_A], SCRATCH2, SZ_B, SZ_W); 1433 zreg_to_native(opts, Z80_A, opts->gen.scratch2);
1180 dst = ror_ir(dst, 4, SCRATCH1, SZ_W); 1434 ror_ir(code, 4, opts->gen.scratch1, SZ_W);
1181 dst = shl_ir(dst, 4, SCRATCH2, SZ_W); 1435 shl_ir(code, 4, opts->gen.scratch2, SZ_W);
1182 dst = and_ir(dst, 0xF00F, SCRATCH1, SZ_W); 1436 and_ir(code, 0xF00F, opts->gen.scratch1, SZ_W);
1183 dst = and_ir(dst, 0xF0, opts->regs[Z80_A], SZ_B); 1437 and_ir(code, 0xF0, opts->regs[Z80_A], SZ_B);
1184 //SCRATCH1 = 0x2001 1438 //opts->gen.scratch1 = 0x2001
1185 //SCRATCH2 = 0x0040 1439 //opts->gen.scratch2 = 0x0040
1186 dst = or_rr(dst, SCRATCH2, SCRATCH1, SZ_W); 1440 or_rr(code, opts->gen.scratch2, opts->gen.scratch1, SZ_W);
1187 //SCRATCH1 = 0x2041 1441 //opts->gen.scratch1 = 0x2041
1188 dst = ror_ir(dst, 8, SCRATCH1, SZ_W); 1442 ror_ir(code, 8, opts->gen.scratch1, SZ_W);
1189 dst = zcycles(dst, 4); 1443 cycles(&opts->gen, 4);
1190 dst = shr_ir(dst, 4, SCRATCH1, SZ_B); 1444 shr_ir(code, 4, opts->gen.scratch1, SZ_B);
1191 dst = or_rr(dst, SCRATCH1, opts->regs[Z80_A], SZ_B); 1445 or_rr(code, opts->gen.scratch1, opts->regs[Z80_A], SZ_B);
1192 //set flags 1446 //set flags
1193 //TODO: Implement half-carry flag 1447 //TODO: Implement half-carry flag
1194 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); 1448 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
1195 dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); 1449 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
1196 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); 1450 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
1197 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); 1451 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
1198 1452
1199 dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH2, SZ_W); 1453 zreg_to_native(opts, Z80_HL, opts->gen.scratch2);
1200 dst = ror_ir(dst, 8, SCRATCH1, SZ_W); 1454 ror_ir(code, 8, opts->gen.scratch1, SZ_W);
1201 dst = call(dst, (uint8_t *)z80_write_byte); 1455 call(code, opts->write_8);
1202 break; 1456 break;
1203 case Z80_BIT: { 1457 case Z80_BIT: {
1204 cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16; 1458 num_cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16;
1205 dst = zcycles(dst, cycles); 1459 cycles(&opts->gen, num_cycles);
1206 uint8_t bit; 1460 uint8_t bit;
1207 if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) { 1461 if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) {
1208 src_op.base = opts->regs[z80_word_reg(inst->ea_reg)]; 1462 src_op.base = opts->regs[z80_word_reg(inst->ea_reg)];
1463 src_op.mode = MODE_REG_DIRECT;
1209 size = SZ_W; 1464 size = SZ_W;
1210 bit = inst->immed + 8; 1465 bit = inst->immed + 8;
1211 } else { 1466 } else {
1212 size = SZ_B; 1467 size = SZ_B;
1213 bit = inst->immed; 1468 bit = inst->immed;
1214 dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); 1469 translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY);
1215 } 1470 }
1216 if (inst->addr_mode != Z80_REG) { 1471 if (inst->addr_mode != Z80_REG) {
1217 //Reads normally take 3 cycles, but the read at the end of a bit instruction takes 4 1472 //Reads normally take 3 cycles, but the read at the end of a bit instruction takes 4
1218 dst = zcycles(dst, 1); 1473 cycles(&opts->gen, 1);
1219 } 1474 }
1220 dst = bt_ir(dst, bit, src_op.base, size); 1475 if (src_op.mode == MODE_REG_DIRECT) {
1221 dst = setcc_rdisp8(dst, CC_NC, CONTEXT, zf_off(ZF_Z)); 1476 bt_ir(code, bit, src_op.base, size);
1222 dst = setcc_rdisp8(dst, CC_NC, CONTEXT, zf_off(ZF_PV)); 1477 } else {
1223 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); 1478 bt_irdisp(code, bit, src_op.base, src_op.disp, size);
1479 }
1480 setcc_rdisp(code, CC_NC, opts->gen.context_reg, zf_off(ZF_Z));
1481 setcc_rdisp(code, CC_NC, opts->gen.context_reg, zf_off(ZF_PV));
1482 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
1224 if (inst->immed == 7) { 1483 if (inst->immed == 7) {
1225 dst = cmp_ir(dst, 0, src_op.base, size); 1484 if (src_op.mode == MODE_REG_DIRECT) {
1226 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); 1485 cmp_ir(code, 0, src_op.base, size);
1227 } else { 1486 } else {
1228 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_S), SZ_B); 1487 cmp_irdisp(code, 0, src_op.base, src_op.disp, size);
1488 }
1489 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
1490 } else {
1491 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B);
1229 } 1492 }
1230 break; 1493 break;
1231 } 1494 }
1232 case Z80_SET: { 1495 case Z80_SET: {
1233 cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16; 1496 num_cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16;
1234 dst = zcycles(dst, cycles); 1497 cycles(&opts->gen, num_cycles);
1235 uint8_t bit; 1498 uint8_t bit;
1236 if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) { 1499 if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) {
1237 src_op.base = opts->regs[z80_word_reg(inst->ea_reg)]; 1500 src_op.base = opts->regs[z80_word_reg(inst->ea_reg)];
1501 src_op.mode = MODE_REG_DIRECT;
1238 size = SZ_W; 1502 size = SZ_W;
1239 bit = inst->immed + 8; 1503 bit = inst->immed + 8;
1240 } else { 1504 } else {
1241 size = SZ_B; 1505 size = SZ_B;
1242 bit = inst->immed; 1506 bit = inst->immed;
1243 dst = translate_z80_ea(inst, &src_op, dst, opts, READ, MODIFY); 1507 translate_z80_ea(inst, &src_op, opts, READ, MODIFY);
1244 } 1508 }
1245 if (inst->reg != Z80_USE_IMMED) { 1509 if (inst->reg != Z80_USE_IMMED) {
1246 dst = translate_z80_reg(inst, &dst_op, dst, opts); 1510 translate_z80_reg(inst, &dst_op, opts);
1247 } 1511 }
1248 if (inst->addr_mode != Z80_REG) { 1512 if (inst->addr_mode != Z80_REG) {
1249 //Reads normally take 3 cycles, but the read in the middle of a set instruction takes 4 1513 //Reads normally take 3 cycles, but the read in the middle of a set instruction takes 4
1250 dst = zcycles(dst, 1); 1514 cycles(&opts->gen, 1);
1251 } 1515 }
1252 dst = bts_ir(dst, bit, src_op.base, size); 1516 if (src_op.mode == MODE_REG_DIRECT) {
1517 bts_ir(code, bit, src_op.base, size);
1518 } else {
1519 bts_irdisp(code, bit, src_op.base, src_op.disp, size);
1520 }
1253 if (inst->reg != Z80_USE_IMMED) { 1521 if (inst->reg != Z80_USE_IMMED) {
1254 if (size == SZ_W) { 1522 if (size == SZ_W) {
1523 #ifdef X86_64
1255 if (dst_op.base >= R8) { 1524 if (dst_op.base >= R8) {
1256 dst = ror_ir(dst, 8, src_op.base, SZ_W); 1525 ror_ir(code, 8, src_op.base, SZ_W);
1257 dst = mov_rr(dst, opts->regs[z80_low_reg(inst->ea_reg)], dst_op.base, SZ_B); 1526 mov_rr(code, opts->regs[z80_low_reg(inst->ea_reg)], dst_op.base, SZ_B);
1258 dst = ror_ir(dst, 8, src_op.base, SZ_W); 1527 ror_ir(code, 8, src_op.base, SZ_W);
1259 } else { 1528 } else {
1260 dst = mov_rr(dst, opts->regs[inst->ea_reg], dst_op.base, SZ_B); 1529 #endif
1530 if (dst_op.mode == MODE_REG_DIRECT) {
1531 zreg_to_native(opts, inst->ea_reg, dst_op.base);
1532 } else {
1533 zreg_to_native(opts, inst->ea_reg, opts->gen.scratch1);
1534 mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, SZ_B);
1535 }
1536 #ifdef X86_64
1261 } 1537 }
1262 } else { 1538 #endif
1263 dst = mov_rr(dst, src_op.base, dst_op.base, SZ_B); 1539 } else {
1540 if (dst_op.mode == MODE_REG_DIRECT) {
1541 if (src_op.mode == MODE_REG_DIRECT) {
1542 mov_rr(code, src_op.base, dst_op.base, SZ_B);
1543 } else {
1544 mov_rdispr(code, src_op.base, src_op.disp, dst_op.base, SZ_B);
1545 }
1546 } else if (src_op.mode == MODE_REG_DIRECT) {
1547 mov_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, SZ_B);
1548 } else {
1549 mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, SZ_B);
1550 mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, SZ_B);
1551 }
1264 } 1552 }
1265 } 1553 }
1266 if ((inst->addr_mode & 0x1F) != Z80_REG) { 1554 if ((inst->addr_mode & 0x1F) != Z80_REG) {
1267 dst = z80_save_result(dst, inst); 1555 z80_save_result(opts, inst);
1268 if (inst->reg != Z80_USE_IMMED) { 1556 if (inst->reg != Z80_USE_IMMED) {
1269 dst = z80_save_reg(dst, inst, opts); 1557 z80_save_reg(inst, opts);
1270 } 1558 }
1271 } 1559 }
1272 break; 1560 break;
1273 } 1561 }
1274 case Z80_RES: { 1562 case Z80_RES: {
1275 cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16; 1563 num_cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16;
1276 dst = zcycles(dst, cycles); 1564 cycles(&opts->gen, num_cycles);
1277 uint8_t bit; 1565 uint8_t bit;
1278 if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) { 1566 if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) {
1279 src_op.base = opts->regs[z80_word_reg(inst->ea_reg)]; 1567 src_op.base = opts->regs[z80_word_reg(inst->ea_reg)];
1568 src_op.mode = MODE_REG_DIRECT;
1280 size = SZ_W; 1569 size = SZ_W;
1281 bit = inst->immed + 8; 1570 bit = inst->immed + 8;
1282 } else { 1571 } else {
1283 size = SZ_B; 1572 size = SZ_B;
1284 bit = inst->immed; 1573 bit = inst->immed;
1285 dst = translate_z80_ea(inst, &src_op, dst, opts, READ, MODIFY); 1574 translate_z80_ea(inst, &src_op, opts, READ, MODIFY);
1286 } 1575 }
1287 if (inst->reg != Z80_USE_IMMED) { 1576 if (inst->reg != Z80_USE_IMMED) {
1288 dst = translate_z80_reg(inst, &dst_op, dst, opts); 1577 translate_z80_reg(inst, &dst_op, opts);
1289 } 1578 }
1290 if (inst->addr_mode != Z80_REG) { 1579 if (inst->addr_mode != Z80_REG) {
1291 //Reads normally take 3 cycles, but the read in the middle of a set instruction takes 4 1580 //Reads normally take 3 cycles, but the read in the middle of a set instruction takes 4
1292 dst = zcycles(dst, 1); 1581 cycles(&opts->gen, 1);
1293 } 1582 }
1294 dst = btr_ir(dst, bit, src_op.base, size); 1583 if (src_op.mode == MODE_REG_DIRECT) {
1584 btr_ir(code, bit, src_op.base, size);
1585 } else {
1586 btr_irdisp(code, bit, src_op.base, src_op.disp, size);
1587 }
1295 if (inst->reg != Z80_USE_IMMED) { 1588 if (inst->reg != Z80_USE_IMMED) {
1296 if (size == SZ_W) { 1589 if (size == SZ_W) {
1590 #ifdef X86_64
1297 if (dst_op.base >= R8) { 1591 if (dst_op.base >= R8) {
1298 dst = ror_ir(dst, 8, src_op.base, SZ_W); 1592 ror_ir(code, 8, src_op.base, SZ_W);
1299 dst = mov_rr(dst, opts->regs[z80_low_reg(inst->ea_reg)], dst_op.base, SZ_B); 1593 mov_rr(code, opts->regs[z80_low_reg(inst->ea_reg)], dst_op.base, SZ_B);
1300 dst = ror_ir(dst, 8, src_op.base, SZ_W); 1594 ror_ir(code, 8, src_op.base, SZ_W);
1301 } else { 1595 } else {
1302 dst = mov_rr(dst, opts->regs[inst->ea_reg], dst_op.base, SZ_B); 1596 #endif
1597 if (dst_op.mode == MODE_REG_DIRECT) {
1598 zreg_to_native(opts, inst->ea_reg, dst_op.base);
1599 } else {
1600 zreg_to_native(opts, inst->ea_reg, opts->gen.scratch1);
1601 mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, SZ_B);
1602 }
1603 #ifdef X86_64
1303 } 1604 }
1304 } else { 1605 #endif
1305 dst = mov_rr(dst, src_op.base, dst_op.base, SZ_B); 1606 } else {
1607 if (dst_op.mode == MODE_REG_DIRECT) {
1608 if (src_op.mode == MODE_REG_DIRECT) {
1609 mov_rr(code, src_op.base, dst_op.base, SZ_B);
1610 } else {
1611 mov_rdispr(code, src_op.base, src_op.disp, dst_op.base, SZ_B);
1612 }
1613 } else if (src_op.mode == MODE_REG_DIRECT) {
1614 mov_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, SZ_B);
1615 } else {
1616 mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, SZ_B);
1617 mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, SZ_B);
1618 }
1306 } 1619 }
1307 } 1620 }
1308 if (inst->addr_mode != Z80_REG) { 1621 if (inst->addr_mode != Z80_REG) {
1309 dst = z80_save_result(dst, inst); 1622 z80_save_result(opts, inst);
1310 if (inst->reg != Z80_USE_IMMED) { 1623 if (inst->reg != Z80_USE_IMMED) {
1311 dst = z80_save_reg(dst, inst, opts); 1624 z80_save_reg(inst, opts);
1312 } 1625 }
1313 } 1626 }
1314 break; 1627 break;
1315 } 1628 }
1316 case Z80_JP: { 1629 case Z80_JP: {
1317 cycles = 4; 1630 num_cycles = 4;
1318 if (inst->addr_mode != Z80_REG_INDIRECT) { 1631 if (inst->addr_mode != Z80_REG_INDIRECT) {
1319 cycles += 6; 1632 num_cycles += 6;
1320 } else if(inst->ea_reg == Z80_IX || inst->ea_reg == Z80_IY) { 1633 } else if(inst->ea_reg == Z80_IX || inst->ea_reg == Z80_IY) {
1321 cycles += 4; 1634 num_cycles += 4;
1322 } 1635 }
1323 dst = zcycles(dst, cycles); 1636 cycles(&opts->gen, num_cycles);
1324 if (inst->addr_mode != Z80_REG_INDIRECT && inst->immed < 0x4000) { 1637 if (inst->addr_mode != Z80_REG_INDIRECT) {
1325 uint8_t * call_dst = z80_get_native_address(context, inst->immed); 1638 code_ptr call_dst = z80_get_native_address(context, inst->immed);
1326 if (!call_dst) { 1639 if (!call_dst) {
1327 opts->deferred = defer_address(opts->deferred, inst->immed, dst + 1); 1640 opts->gen.deferred = defer_address(opts->gen.deferred, inst->immed, code->cur + 1);
1328 //fake address to force large displacement 1641 //fake address to force large displacement
1329 call_dst = dst + 256; 1642 call_dst = code->cur + 256;
1330 } 1643 }
1331 dst = jmp(dst, call_dst); 1644 jmp(code, call_dst);
1332 } else { 1645 } else {
1333 if (inst->addr_mode == Z80_REG_INDIRECT) { 1646 if (inst->addr_mode == Z80_REG_INDIRECT) {
1334 dst = mov_rr(dst, opts->regs[inst->ea_reg], SCRATCH1, SZ_W); 1647 zreg_to_native(opts, inst->ea_reg, opts->gen.scratch1);
1335 } else { 1648 } else {
1336 dst = mov_ir(dst, inst->immed, SCRATCH1, SZ_W); 1649 mov_ir(code, inst->immed, opts->gen.scratch1, SZ_W);
1337 } 1650 }
1338 dst = call(dst, (uint8_t *)z80_native_addr); 1651 call(code, opts->native_addr);
1339 dst = jmp_r(dst, SCRATCH1); 1652 jmp_r(code, opts->gen.scratch1);
1340 } 1653 }
1341 break; 1654 break;
1342 } 1655 }
1343 case Z80_JPCC: { 1656 case Z80_JPCC: {
1344 dst = zcycles(dst, 7);//T States: 4,3 1657 cycles(&opts->gen, 7);//T States: 4,3
1345 uint8_t cond = CC_Z; 1658 uint8_t cond = CC_Z;
1346 switch (inst->reg) 1659 switch (inst->reg)
1347 { 1660 {
1348 case Z80_CC_NZ: 1661 case Z80_CC_NZ:
1349 cond = CC_NZ; 1662 cond = CC_NZ;
1350 case Z80_CC_Z: 1663 case Z80_CC_Z:
1351 dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_Z), SZ_B); 1664 cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_Z), SZ_B);
1352 break; 1665 break;
1353 case Z80_CC_NC: 1666 case Z80_CC_NC:
1354 cond = CC_NZ; 1667 cond = CC_NZ;
1355 case Z80_CC_C: 1668 case Z80_CC_C:
1356 dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_C), SZ_B); 1669 cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B);
1357 break; 1670 break;
1358 case Z80_CC_PO: 1671 case Z80_CC_PO:
1359 cond = CC_NZ; 1672 cond = CC_NZ;
1360 case Z80_CC_PE: 1673 case Z80_CC_PE:
1361 dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_PV), SZ_B); 1674 cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B);
1362 break; 1675 break;
1363 case Z80_CC_P: 1676 case Z80_CC_P:
1364 cond = CC_NZ; 1677 cond = CC_NZ;
1365 case Z80_CC_M: 1678 case Z80_CC_M:
1366 dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_S), SZ_B); 1679 cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B);
1367 break; 1680 break;
1368 } 1681 }
1369 uint8_t *no_jump_off = dst+1; 1682 uint8_t *no_jump_off = code->cur+1;
1370 dst = jcc(dst, cond, dst+2); 1683 jcc(code, cond, code->cur+2);
1371 dst = zcycles(dst, 5);//T States: 5 1684 cycles(&opts->gen, 5);//T States: 5
1372 uint16_t dest_addr = inst->immed; 1685 uint16_t dest_addr = inst->immed;
1373 if (dest_addr < 0x4000) { 1686 code_ptr call_dst = z80_get_native_address(context, dest_addr);
1374 uint8_t * call_dst = z80_get_native_address(context, dest_addr);
1375 if (!call_dst) { 1687 if (!call_dst) {
1376 opts->deferred = defer_address(opts->deferred, dest_addr, dst + 1); 1688 opts->gen.deferred = defer_address(opts->gen.deferred, dest_addr, code->cur + 1);
1377 //fake address to force large displacement 1689 //fake address to force large displacement
1378 call_dst = dst + 256; 1690 call_dst = code->cur + 256;
1379 } 1691 }
1380 dst = jmp(dst, call_dst); 1692 jmp(code, call_dst);
1381 } else { 1693 *no_jump_off = code->cur - (no_jump_off+1);
1382 dst = mov_ir(dst, dest_addr, SCRATCH1, SZ_W);
1383 dst = call(dst, (uint8_t *)z80_native_addr);
1384 dst = jmp_r(dst, SCRATCH1);
1385 }
1386 *no_jump_off = dst - (no_jump_off+1);
1387 break; 1694 break;
1388 } 1695 }
1389 case Z80_JR: { 1696 case Z80_JR: {
1390 dst = zcycles(dst, 12);//T States: 4,3,5 1697 cycles(&opts->gen, 12);//T States: 4,3,5
1391 uint16_t dest_addr = address + inst->immed + 2; 1698 uint16_t dest_addr = address + inst->immed + 2;
1392 if (dest_addr < 0x4000) { 1699 code_ptr call_dst = z80_get_native_address(context, dest_addr);
1393 uint8_t * call_dst = z80_get_native_address(context, dest_addr);
1394 if (!call_dst) { 1700 if (!call_dst) {
1395 opts->deferred = defer_address(opts->deferred, dest_addr, dst + 1); 1701 opts->gen.deferred = defer_address(opts->gen.deferred, dest_addr, code->cur + 1);
1396 //fake address to force large displacement 1702 //fake address to force large displacement
1397 call_dst = dst + 256; 1703 call_dst = code->cur + 256;
1398 } 1704 }
1399 dst = jmp(dst, call_dst); 1705 jmp(code, call_dst);
1400 } else {
1401 dst = mov_ir(dst, dest_addr, SCRATCH1, SZ_W);
1402 dst = call(dst, (uint8_t *)z80_native_addr);
1403 dst = jmp_r(dst, SCRATCH1);
1404 }
1405 break; 1706 break;
1406 } 1707 }
1407 case Z80_JRCC: { 1708 case Z80_JRCC: {
1408 dst = zcycles(dst, 7);//T States: 4,3 1709 cycles(&opts->gen, 7);//T States: 4,3
1409 uint8_t cond = CC_Z; 1710 uint8_t cond = CC_Z;
1410 switch (inst->reg) 1711 switch (inst->reg)
1411 { 1712 {
1412 case Z80_CC_NZ: 1713 case Z80_CC_NZ:
1413 cond = CC_NZ; 1714 cond = CC_NZ;
1414 case Z80_CC_Z: 1715 case Z80_CC_Z:
1415 dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_Z), SZ_B); 1716 cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_Z), SZ_B);
1416 break; 1717 break;
1417 case Z80_CC_NC: 1718 case Z80_CC_NC:
1418 cond = CC_NZ; 1719 cond = CC_NZ;
1419 case Z80_CC_C: 1720 case Z80_CC_C:
1420 dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_C), SZ_B); 1721 cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B);
1421 break; 1722 break;
1422 } 1723 }
1423 uint8_t *no_jump_off = dst+1; 1724 uint8_t *no_jump_off = code->cur+1;
1424 dst = jcc(dst, cond, dst+2); 1725 jcc(code, cond, code->cur+2);
1425 dst = zcycles(dst, 5);//T States: 5 1726 cycles(&opts->gen, 5);//T States: 5
1426 uint16_t dest_addr = address + inst->immed + 2; 1727 uint16_t dest_addr = address + inst->immed + 2;
1427 if (dest_addr < 0x4000) { 1728 code_ptr call_dst = z80_get_native_address(context, dest_addr);
1428 uint8_t * call_dst = z80_get_native_address(context, dest_addr);
1429 if (!call_dst) { 1729 if (!call_dst) {
1430 opts->deferred = defer_address(opts->deferred, dest_addr, dst + 1); 1730 opts->gen.deferred = defer_address(opts->gen.deferred, dest_addr, code->cur + 1);
1431 //fake address to force large displacement 1731 //fake address to force large displacement
1432 call_dst = dst + 256; 1732 call_dst = code->cur + 256;
1433 } 1733 }
1434 dst = jmp(dst, call_dst); 1734 jmp(code, call_dst);
1435 } else { 1735 *no_jump_off = code->cur - (no_jump_off+1);
1436 dst = mov_ir(dst, dest_addr, SCRATCH1, SZ_W); 1736 break;
1437 dst = call(dst, (uint8_t *)z80_native_addr); 1737 }
1438 dst = jmp_r(dst, SCRATCH1); 1738 case Z80_DJNZ: {
1439 } 1739 cycles(&opts->gen, 8);//T States: 5,3
1440 *no_jump_off = dst - (no_jump_off+1); 1740 if (opts->regs[Z80_B] >= 0) {
1441 break; 1741 sub_ir(code, 1, opts->regs[Z80_B], SZ_B);
1442 } 1742 } else {
1443 case Z80_DJNZ: 1743 sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_B), SZ_B);
1444 dst = zcycles(dst, 8);//T States: 5,3 1744 }
1445 dst = sub_ir(dst, 1, opts->regs[Z80_B], SZ_B); 1745 uint8_t *no_jump_off = code->cur+1;
1446 uint8_t *no_jump_off = dst+1; 1746 jcc(code, CC_Z, code->cur+2);
1447 dst = jcc(dst, CC_Z, dst+2); 1747 cycles(&opts->gen, 5);//T States: 5
1448 dst = zcycles(dst, 5);//T States: 5
1449 uint16_t dest_addr = address + inst->immed + 2; 1748 uint16_t dest_addr = address + inst->immed + 2;
1450 if (dest_addr < 0x4000) { 1749 code_ptr call_dst = z80_get_native_address(context, dest_addr);
1451 uint8_t * call_dst = z80_get_native_address(context, dest_addr);
1452 if (!call_dst) { 1750 if (!call_dst) {
1453 opts->deferred = defer_address(opts->deferred, dest_addr, dst + 1); 1751 opts->gen.deferred = defer_address(opts->gen.deferred, dest_addr, code->cur + 1);
1454 //fake address to force large displacement 1752 //fake address to force large displacement
1455 call_dst = dst + 256; 1753 call_dst = code->cur + 256;
1456 } 1754 }
1457 dst = jmp(dst, call_dst); 1755 jmp(code, call_dst);
1458 } else { 1756 *no_jump_off = code->cur - (no_jump_off+1);
1459 dst = mov_ir(dst, dest_addr, SCRATCH1, SZ_W); 1757 break;
1460 dst = call(dst, (uint8_t *)z80_native_addr); 1758 }
1461 dst = jmp_r(dst, SCRATCH1);
1462 }
1463 *no_jump_off = dst - (no_jump_off+1);
1464 break;
1465 case Z80_CALL: { 1759 case Z80_CALL: {
1466 dst = zcycles(dst, 11);//T States: 4,3,4 1760 cycles(&opts->gen, 11);//T States: 4,3,4
1467 dst = sub_ir(dst, 2, opts->regs[Z80_SP], SZ_W); 1761 sub_ir(code, 2, opts->regs[Z80_SP], SZ_W);
1468 dst = mov_ir(dst, address + 3, SCRATCH1, SZ_W); 1762 mov_ir(code, address + 3, opts->gen.scratch1, SZ_W);
1469 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH2, SZ_W); 1763 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W);
1470 dst = call(dst, (uint8_t *)z80_write_word_highfirst);//T States: 3, 3 1764 call(code, opts->write_16_highfirst);//T States: 3, 3
1471 if (inst->immed < 0x4000) { 1765 code_ptr call_dst = z80_get_native_address(context, inst->immed);
1472 uint8_t * call_dst = z80_get_native_address(context, inst->immed);
1473 if (!call_dst) { 1766 if (!call_dst) {
1474 opts->deferred = defer_address(opts->deferred, inst->immed, dst + 1); 1767 opts->gen.deferred = defer_address(opts->gen.deferred, inst->immed, code->cur + 1);
1475 //fake address to force large displacement 1768 //fake address to force large displacement
1476 call_dst = dst + 256; 1769 call_dst = code->cur + 256;
1477 } 1770 }
1478 dst = jmp(dst, call_dst); 1771 jmp(code, call_dst);
1479 } else { 1772 break;
1480 dst = mov_ir(dst, inst->immed, SCRATCH1, SZ_W); 1773 }
1481 dst = call(dst, (uint8_t *)z80_native_addr); 1774 case Z80_CALLCC: {
1482 dst = jmp_r(dst, SCRATCH1); 1775 cycles(&opts->gen, 10);//T States: 4,3,3 (false case)
1483 }
1484 break;
1485 }
1486 case Z80_CALLCC:
1487 dst = zcycles(dst, 10);//T States: 4,3,3 (false case)
1488 uint8_t cond = CC_Z; 1776 uint8_t cond = CC_Z;
1489 switch (inst->reg) 1777 switch (inst->reg)
1490 { 1778 {
1491 case Z80_CC_NZ: 1779 case Z80_CC_NZ:
1492 cond = CC_NZ; 1780 cond = CC_NZ;
1493 case Z80_CC_Z: 1781 case Z80_CC_Z:
1494 dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_Z), SZ_B); 1782 cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_Z), SZ_B);
1495 break; 1783 break;
1496 case Z80_CC_NC: 1784 case Z80_CC_NC:
1497 cond = CC_NZ; 1785 cond = CC_NZ;
1498 case Z80_CC_C: 1786 case Z80_CC_C:
1499 dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_C), SZ_B); 1787 cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B);
1500 break; 1788 break;
1501 case Z80_CC_PO: 1789 case Z80_CC_PO:
1502 cond = CC_NZ; 1790 cond = CC_NZ;
1503 case Z80_CC_PE: 1791 case Z80_CC_PE:
1504 dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_PV), SZ_B); 1792 cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B);
1505 break; 1793 break;
1506 case Z80_CC_P: 1794 case Z80_CC_P:
1507 cond = CC_NZ; 1795 cond = CC_NZ;
1508 case Z80_CC_M: 1796 case Z80_CC_M:
1509 dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_S), SZ_B); 1797 cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B);
1510 break; 1798 break;
1511 } 1799 }
1512 uint8_t *no_call_off = dst+1; 1800 uint8_t *no_call_off = code->cur+1;
1513 dst = jcc(dst, cond, dst+2); 1801 jcc(code, cond, code->cur+2);
1514 dst = zcycles(dst, 1);//Last of the above T states takes an extra cycle in the true case 1802 cycles(&opts->gen, 1);//Last of the above T states takes an extra cycle in the true case
1515 dst = sub_ir(dst, 2, opts->regs[Z80_SP], SZ_W); 1803 sub_ir(code, 2, opts->regs[Z80_SP], SZ_W);
1516 dst = mov_ir(dst, address + 3, SCRATCH1, SZ_W); 1804 mov_ir(code, address + 3, opts->gen.scratch1, SZ_W);
1517 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH2, SZ_W); 1805 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W);
1518 dst = call(dst, (uint8_t *)z80_write_word_highfirst);//T States: 3, 3 1806 call(code, opts->write_16_highfirst);//T States: 3, 3
1519 if (inst->immed < 0x4000) { 1807 code_ptr call_dst = z80_get_native_address(context, inst->immed);
1520 uint8_t * call_dst = z80_get_native_address(context, inst->immed);
1521 if (!call_dst) { 1808 if (!call_dst) {
1522 opts->deferred = defer_address(opts->deferred, inst->immed, dst + 1); 1809 opts->gen.deferred = defer_address(opts->gen.deferred, inst->immed, code->cur + 1);
1523 //fake address to force large displacement 1810 //fake address to force large displacement
1524 call_dst = dst + 256; 1811 call_dst = code->cur + 256;
1525 } 1812 }
1526 dst = jmp(dst, call_dst); 1813 jmp(code, call_dst);
1527 } else { 1814 *no_call_off = code->cur - (no_call_off+1);
1528 dst = mov_ir(dst, inst->immed, SCRATCH1, SZ_W); 1815 break;
1529 dst = call(dst, (uint8_t *)z80_native_addr); 1816 }
1530 dst = jmp_r(dst, SCRATCH1);
1531 }
1532 *no_call_off = dst - (no_call_off+1);
1533 break;
1534 case Z80_RET: 1817 case Z80_RET:
1535 dst = zcycles(dst, 4);//T States: 4 1818 cycles(&opts->gen, 4);//T States: 4
1536 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); 1819 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W);
1537 dst = call(dst, (uint8_t *)z80_read_word);//T STates: 3, 3 1820 call(code, opts->read_16);//T STates: 3, 3
1538 dst = add_ir(dst, 2, opts->regs[Z80_SP], SZ_W); 1821 add_ir(code, 2, opts->regs[Z80_SP], SZ_W);
1539 dst = call(dst, (uint8_t *)z80_native_addr); 1822 call(code, opts->native_addr);
1540 dst = jmp_r(dst, SCRATCH1); 1823 jmp_r(code, opts->gen.scratch1);
1541 break; 1824 break;
1542 case Z80_RETCC: { 1825 case Z80_RETCC: {
1543 dst = zcycles(dst, 5);//T States: 5 1826 cycles(&opts->gen, 5);//T States: 5
1544 uint8_t cond = CC_Z; 1827 uint8_t cond = CC_Z;
1545 switch (inst->reg) 1828 switch (inst->reg)
1546 { 1829 {
1547 case Z80_CC_NZ: 1830 case Z80_CC_NZ:
1548 cond = CC_NZ; 1831 cond = CC_NZ;
1549 case Z80_CC_Z: 1832 case Z80_CC_Z:
1550 dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_Z), SZ_B); 1833 cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_Z), SZ_B);
1551 break; 1834 break;
1552 case Z80_CC_NC: 1835 case Z80_CC_NC:
1553 cond = CC_NZ; 1836 cond = CC_NZ;
1554 case Z80_CC_C: 1837 case Z80_CC_C:
1555 dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_C), SZ_B); 1838 cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B);
1556 break; 1839 break;
1557 case Z80_CC_PO: 1840 case Z80_CC_PO:
1558 cond = CC_NZ; 1841 cond = CC_NZ;
1559 case Z80_CC_PE: 1842 case Z80_CC_PE:
1560 dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_PV), SZ_B); 1843 cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B);
1561 break; 1844 break;
1562 case Z80_CC_P: 1845 case Z80_CC_P:
1563 cond = CC_NZ; 1846 cond = CC_NZ;
1564 case Z80_CC_M: 1847 case Z80_CC_M:
1565 dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_S), SZ_B); 1848 cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B);
1566 break; 1849 break;
1567 } 1850 }
1568 uint8_t *no_call_off = dst+1; 1851 uint8_t *no_call_off = code->cur+1;
1569 dst = jcc(dst, cond, dst+2); 1852 jcc(code, cond, code->cur+2);
1570 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); 1853 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W);
1571 dst = call(dst, (uint8_t *)z80_read_word);//T STates: 3, 3 1854 call(code, opts->read_16);//T STates: 3, 3
1572 dst = add_ir(dst, 2, opts->regs[Z80_SP], SZ_W); 1855 add_ir(code, 2, opts->regs[Z80_SP], SZ_W);
1573 dst = call(dst, (uint8_t *)z80_native_addr); 1856 call(code, opts->native_addr);
1574 dst = jmp_r(dst, SCRATCH1); 1857 jmp_r(code, opts->gen.scratch1);
1575 *no_call_off = dst - (no_call_off+1); 1858 *no_call_off = code->cur - (no_call_off+1);
1576 break; 1859 break;
1577 } 1860 }
1578 case Z80_RETI: 1861 case Z80_RETI:
1579 //For some systems, this may need a callback for signalling interrupt routine completion 1862 //For some systems, this may need a callback for signalling interrupt routine completion
1580 dst = zcycles(dst, 8);//T States: 4, 4 1863 cycles(&opts->gen, 8);//T States: 4, 4
1581 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); 1864 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W);
1582 dst = call(dst, (uint8_t *)z80_read_word);//T STates: 3, 3 1865 call(code, opts->read_16);//T STates: 3, 3
1583 dst = add_ir(dst, 2, opts->regs[Z80_SP], SZ_W); 1866 add_ir(code, 2, opts->regs[Z80_SP], SZ_W);
1584 dst = call(dst, (uint8_t *)z80_native_addr); 1867 call(code, opts->native_addr);
1585 dst = jmp_r(dst, SCRATCH1); 1868 jmp_r(code, opts->gen.scratch1);
1586 break; 1869 break;
1587 case Z80_RETN: 1870 case Z80_RETN:
1588 dst = zcycles(dst, 8);//T States: 4, 4 1871 cycles(&opts->gen, 8);//T States: 4, 4
1589 dst = mov_rdisp8r(dst, CONTEXT, offsetof(z80_context, iff2), SCRATCH2, SZ_B); 1872 mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, iff2), opts->gen.scratch2, SZ_B);
1590 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); 1873 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W);
1591 dst = mov_rrdisp8(dst, SCRATCH2, CONTEXT, offsetof(z80_context, iff1), SZ_B); 1874 mov_rrdisp(code, opts->gen.scratch2, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B);
1592 dst = call(dst, (uint8_t *)z80_read_word);//T STates: 3, 3 1875 call(code, opts->read_16);//T STates: 3, 3
1593 dst = add_ir(dst, 2, opts->regs[Z80_SP], SZ_W); 1876 add_ir(code, 2, opts->regs[Z80_SP], SZ_W);
1594 dst = call(dst, (uint8_t *)z80_native_addr); 1877 call(code, opts->native_addr);
1595 dst = jmp_r(dst, SCRATCH1); 1878 jmp_r(code, opts->gen.scratch1);
1596 break; 1879 break;
1597 case Z80_RST: { 1880 case Z80_RST: {
1598 //RST is basically CALL to an address in page 0 1881 //RST is basically CALL to an address in page 0
1599 dst = zcycles(dst, 5);//T States: 5 1882 cycles(&opts->gen, 5);//T States: 5
1600 dst = sub_ir(dst, 2, opts->regs[Z80_SP], SZ_W); 1883 sub_ir(code, 2, opts->regs[Z80_SP], SZ_W);
1601 dst = mov_ir(dst, address + 1, SCRATCH1, SZ_W); 1884 mov_ir(code, address + 1, opts->gen.scratch1, SZ_W);
1602 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH2, SZ_W); 1885 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W);
1603 dst = call(dst, (uint8_t *)z80_write_word_highfirst);//T States: 3, 3 1886 call(code, opts->write_16_highfirst);//T States: 3, 3
1604 uint8_t * call_dst = z80_get_native_address(context, inst->immed); 1887 code_ptr call_dst = z80_get_native_address(context, inst->immed);
1605 if (!call_dst) { 1888 if (!call_dst) {
1606 opts->deferred = defer_address(opts->deferred, inst->immed, dst + 1); 1889 opts->gen.deferred = defer_address(opts->gen.deferred, inst->immed, code->cur + 1);
1607 //fake address to force large displacement 1890 //fake address to force large displacement
1608 call_dst = dst + 256; 1891 call_dst = code->cur + 256;
1609 } 1892 }
1610 dst = jmp(dst, call_dst); 1893 jmp(code, call_dst);
1611 break; 1894 break;
1612 } 1895 }
1613 case Z80_IN: 1896 case Z80_IN:
1614 dst = zcycles(dst, inst->reg == Z80_A ? 7 : 8);//T States: 4 3/4 1897 cycles(&opts->gen, inst->reg == Z80_A ? 7 : 8);//T States: 4 3/4
1615 if (inst->addr_mode == Z80_IMMED_INDIRECT) { 1898 if (inst->addr_mode == Z80_IMMED_INDIRECT) {
1616 dst = mov_ir(dst, inst->immed, SCRATCH1, SZ_B); 1899 mov_ir(code, inst->immed, opts->gen.scratch1, SZ_B);
1617 } else { 1900 } else {
1618 dst = mov_rr(dst, opts->regs[Z80_C], SCRATCH1, SZ_B); 1901 mov_rr(code, opts->regs[Z80_C], opts->gen.scratch1, SZ_B);
1619 } 1902 }
1620 dst = call(dst, (uint8_t *)z80_io_read); 1903 call(code, opts->read_io);
1621 translate_z80_reg(inst, &dst_op, dst, opts); 1904 translate_z80_reg(inst, &dst_op, opts);
1622 dst = mov_rr(dst, SCRATCH1, dst_op.base, SZ_B); 1905 if (dst_op.mode == MODE_REG_DIRECT) {
1623 dst = z80_save_reg(dst, inst, opts); 1906 mov_rr(code, opts->gen.scratch1, dst_op.base, SZ_B);
1907 } else {
1908 mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, SZ_B);
1909 }
1910 z80_save_reg(inst, opts);
1624 break; 1911 break;
1625 /*case Z80_INI: 1912 /*case Z80_INI:
1626 case Z80_INIR: 1913 case Z80_INIR:
1627 case Z80_IND: 1914 case Z80_IND:
1628 case Z80_INDR:*/ 1915 case Z80_INDR:*/
1629 case Z80_OUT: 1916 case Z80_OUT:
1630 dst = zcycles(dst, inst->reg == Z80_A ? 7 : 8);//T States: 4 3/4 1917 cycles(&opts->gen, inst->reg == Z80_A ? 7 : 8);//T States: 4 3/4
1631 if ((inst->addr_mode & 0x1F) == Z80_IMMED_INDIRECT) { 1918 if ((inst->addr_mode & 0x1F) == Z80_IMMED_INDIRECT) {
1632 dst = mov_ir(dst, inst->immed, SCRATCH2, SZ_B); 1919 mov_ir(code, inst->immed, opts->gen.scratch2, SZ_B);
1633 } else { 1920 } else {
1634 dst = mov_rr(dst, opts->regs[Z80_C], SCRATCH2, SZ_B); 1921 zreg_to_native(opts, Z80_C, opts->gen.scratch2);
1635 } 1922 mov_rr(code, opts->regs[Z80_C], opts->gen.scratch2, SZ_B);
1636 translate_z80_reg(inst, &src_op, dst, opts); 1923 }
1637 dst = mov_rr(dst, dst_op.base, SCRATCH1, SZ_B); 1924 translate_z80_reg(inst, &src_op, opts);
1638 dst = call(dst, (uint8_t *)z80_io_write); 1925 if (src_op.mode == MODE_REG_DIRECT) {
1639 dst = z80_save_reg(dst, inst, opts); 1926 mov_rr(code, src_op.base, opts->gen.scratch1, SZ_B);
1927 } else if (src_op.mode == MODE_IMMED) {
1928 mov_ir(code, src_op.disp, opts->gen.scratch1, SZ_B);
1929 } else {
1930 mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, SZ_B);
1931 }
1932 call(code, opts->write_io);
1933 z80_save_reg(inst, opts);
1640 break; 1934 break;
1641 /*case Z80_OUTI: 1935 /*case Z80_OUTI:
1642 case Z80_OTIR: 1936 case Z80_OTIR:
1643 case Z80_OUTD: 1937 case Z80_OUTD:
1644 case Z80_OTDR:*/ 1938 case Z80_OTDR:*/
1645 default: { 1939 default: {
1646 char disbuf[80]; 1940 char disbuf[80];
1647 z80_disasm(inst, disbuf, address); 1941 z80_disasm(inst, disbuf, address);
1648 fprintf(stderr, "unimplemented instruction: %s at %X\n", disbuf, address);
1649 FILE * f = fopen("zram.bin", "wb"); 1942 FILE * f = fopen("zram.bin", "wb");
1650 fwrite(context->mem_pointers[0], 1, 8 * 1024, f); 1943 fwrite(context->mem_pointers[0], 1, 8 * 1024, f);
1651 fclose(f); 1944 fclose(f);
1652 exit(1); 1945 fatal_error("unimplemented Z80 instruction: %s at %X\nZ80 RAM has been saved to zram.bin for debugging", disbuf, address);
1653 } 1946 }
1654 } 1947 }
1655 return dst; 1948 }
1656 } 1949
1950 uint8_t * z80_interp_handler(uint8_t opcode, z80_context * context)
1951 {
1952 if (!context->interp_code[opcode]) {
1953 if (opcode == 0xCB || (opcode >= 0xDD && (opcode & 0xF) == 0xD)) {
1954 fatal_error("Encountered prefix byte %X at address %X. Z80 interpeter doesn't support those yet.", opcode, context->pc);
1955 }
1956 uint8_t codebuf[8];
1957 memset(codebuf, 0, sizeof(codebuf));
1958 codebuf[0] = opcode;
1959 z80inst inst;
1960 uint8_t * after = z80_decode(codebuf, &inst);
1961 if (after - codebuf > 1) {
1962 fatal_error("Encountered multi-byte Z80 instruction at %X. Z80 interpeter doesn't support those yet.", context->pc);
1963 }
1964
1965 z80_options * opts = context->options;
1966 code_info *code = &opts->gen.code;
1967 check_alloc_code(code, ZMAX_NATIVE_SIZE);
1968 context->interp_code[opcode] = code->cur;
1969 translate_z80inst(&inst, context, 0, 1);
1970 mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, pc), opts->gen.scratch1, SZ_W);
1971 add_ir(code, after - codebuf, opts->gen.scratch1, SZ_W);
1972 call(code, opts->native_addr);
1973 jmp_r(code, opts->gen.scratch1);
1974 z80_handle_deferred(context);
1975 }
1976 return context->interp_code[opcode];
1977 }
1978
1979 code_info z80_make_interp_stub(z80_context * context, uint16_t address)
1980 {
1981 z80_options *opts = context->options;
1982 code_info * code = &opts->gen.code;
1983 check_alloc_code(code, 32);
1984 code_info stub = {code->cur, NULL};
1985 //TODO: make this play well with the breakpoint code
1986 mov_ir(code, address, opts->gen.scratch1, SZ_W);
1987 call(code, opts->read_8);
1988 //normal opcode fetch is already factored into instruction timing
1989 //back out the base 3 cycles from a read here
1990 //not quite perfect, but it will have to do for now
1991 cycles(&opts->gen, -3);
1992 check_cycles_int(&opts->gen, address);
1993 call(code, opts->gen.save_context);
1994 mov_irdisp(code, address, opts->gen.context_reg, offsetof(z80_context, pc), SZ_W);
1995 push_r(code, opts->gen.context_reg);
1996 call_args(code, (code_ptr)z80_interp_handler, 2, opts->gen.scratch1, opts->gen.context_reg);
1997 mov_rr(code, RAX, opts->gen.scratch1, SZ_PTR);
1998 pop_r(code, opts->gen.context_reg);
1999 call(code, opts->gen.load_context);
2000 jmp_r(code, opts->gen.scratch1);
2001 stub.last = code->cur;
2002 return stub;
2003 }
2004
1657 2005
1658 uint8_t * z80_get_native_address(z80_context * context, uint32_t address) 2006 uint8_t * z80_get_native_address(z80_context * context, uint32_t address)
1659 { 2007 {
1660 native_map_slot *map; 2008 native_map_slot *map;
1661 if (address < 0x4000) { 2009 if (address < 0x4000) {
1662 address &= 0x1FFF; 2010 address &= 0x1FFF;
1663 map = context->static_code_map; 2011 map = context->static_code_map;
1664 } else if (address >= 0x8000) {
1665 address &= 0x7FFF;
1666 map = context->banked_code_map + context->bank_reg;
1667 } else { 2012 } else {
1668 //dprintf("z80_get_native_address: %X NULL\n", address); 2013 address -= 0x4000;
1669 return NULL; 2014 map = context->banked_code_map;
1670 } 2015 }
1671 if (!map->base || !map->offsets || map->offsets[address] == INVALID_OFFSET || map->offsets[address] == EXTENSION_WORD) { 2016 if (!map->base || !map->offsets || map->offsets[address] == INVALID_OFFSET || map->offsets[address] == EXTENSION_WORD) {
1672 //dprintf("z80_get_native_address: %X NULL\n", address); 2017 //dprintf("z80_get_native_address: %X NULL\n", address);
1673 return NULL; 2018 return NULL;
1674 } 2019 }
1675 //dprintf("z80_get_native_address: %X %p\n", address, map->base + map->offsets[address]); 2020 //dprintf("z80_get_native_address: %X %p\n", address, map->base + map->offsets[address]);
1676 return map->base + map->offsets[address]; 2021 return map->base + map->offsets[address];
1677 } 2022 }
1678 2023
1679 uint8_t z80_get_native_inst_size(x86_z80_options * opts, uint32_t address) 2024 uint8_t z80_get_native_inst_size(z80_options * opts, uint32_t address)
1680 { 2025 {
2026 //TODO: Fix for addresses >= 0x4000
1681 if (address >= 0x4000) { 2027 if (address >= 0x4000) {
1682 return 0; 2028 return 0;
1683 } 2029 }
1684 return opts->ram_inst_sizes[address & 0x1FFF]; 2030 return opts->gen.ram_inst_sizes[0][address & 0x1FFF];
1685 } 2031 }
1686 2032
1687 void z80_map_native_address(z80_context * context, uint32_t address, uint8_t * native_address, uint8_t size, uint8_t native_size) 2033 void z80_map_native_address(z80_context * context, uint32_t address, uint8_t * native_address, uint8_t size, uint8_t native_size)
1688 { 2034 {
1689 uint32_t orig_address = address; 2035 uint32_t orig_address = address;
1690 native_map_slot *map; 2036 native_map_slot *map;
1691 x86_z80_options * opts = context->options; 2037 z80_options * opts = context->options;
1692 if (address < 0x4000) { 2038 if (address < 0x4000) {
1693 address &= 0x1FFF; 2039 address &= 0x1FFF;
1694 map = context->static_code_map; 2040 map = context->static_code_map;
1695 opts->ram_inst_sizes[address] = native_size; 2041 opts->gen.ram_inst_sizes[0][address] = native_size;
1696 context->ram_code_flags[(address & 0x1C00) >> 10] |= 1 << ((address & 0x380) >> 7); 2042 context->ram_code_flags[(address & 0x1C00) >> 10] |= 1 << ((address & 0x380) >> 7);
1697 context->ram_code_flags[((address + size) & 0x1C00) >> 10] |= 1 << (((address + size) & 0x380) >> 7); 2043 context->ram_code_flags[((address + size) & 0x1C00) >> 10] |= 1 << (((address + size) & 0x380) >> 7);
1698 } else if (address >= 0x8000) { 2044 } else {
1699 address &= 0x7FFF; 2045 //HERE
1700 map = context->banked_code_map + context->bank_reg; 2046 address -= 0x4000;
2047 map = context->banked_code_map;
1701 if (!map->offsets) { 2048 if (!map->offsets) {
1702 map->offsets = malloc(sizeof(int32_t) * 0x8000); 2049 map->offsets = malloc(sizeof(int32_t) * 0xC000);
1703 memset(map->offsets, 0xFF, sizeof(int32_t) * 0x8000); 2050 memset(map->offsets, 0xFF, sizeof(int32_t) * 0xC000);
1704 } 2051 }
1705 } else {
1706 return;
1707 } 2052 }
1708 if (!map->base) { 2053 if (!map->base) {
1709 map->base = native_address; 2054 map->base = native_address;
1710 } 2055 }
1711 map->offsets[address] = native_address - map->base; 2056 map->offsets[address] = native_address - map->base;
1712 for(--size, orig_address++; size; --size, orig_address++) { 2057 for(--size, orig_address++; size; --size, orig_address++) {
1713 address = orig_address; 2058 address = orig_address;
1714 if (address < 0x4000) { 2059 if (address < 0x4000) {
1715 address &= 0x1FFF; 2060 address &= 0x1FFF;
1716 map = context->static_code_map; 2061 map = context->static_code_map;
1717 } else if (address >= 0x8000) { 2062 } else {
1718 address &= 0x7FFF; 2063 address -= 0x4000;
1719 map = context->banked_code_map + context->bank_reg; 2064 map = context->banked_code_map;
1720 } else {
1721 return;
1722 } 2065 }
1723 if (!map->offsets) { 2066 if (!map->offsets) {
1724 map->offsets = malloc(sizeof(int32_t) * 0x8000); 2067 map->offsets = malloc(sizeof(int32_t) * 0xC000);
1725 memset(map->offsets, 0xFF, sizeof(int32_t) * 0x8000); 2068 memset(map->offsets, 0xFF, sizeof(int32_t) * 0xC000);
1726 } 2069 }
1727 map->offsets[address] = EXTENSION_WORD; 2070 map->offsets[address] = EXTENSION_WORD;
1728 } 2071 }
1729 } 2072 }
1730 2073
1731 #define INVALID_INSTRUCTION_START 0xFEEDFEED 2074 #define INVALID_INSTRUCTION_START 0xFEEDFEED
1732 2075
1733 uint32_t z80_get_instruction_start(native_map_slot * static_code_map, uint32_t address) 2076 uint32_t z80_get_instruction_start(native_map_slot * static_code_map, uint32_t address)
1734 { 2077 {
2078 //TODO: Fixme for address >= 0x4000
1735 if (!static_code_map->base || address >= 0x4000) { 2079 if (!static_code_map->base || address >= 0x4000) {
1736 return INVALID_INSTRUCTION_START; 2080 return INVALID_INSTRUCTION_START;
1737 } 2081 }
1738 address &= 0x1FFF; 2082 address &= 0x1FFF;
1739 if (static_code_map->offsets[address] == INVALID_OFFSET) { 2083 if (static_code_map->offsets[address] == INVALID_OFFSET) {
1748 2092
1749 z80_context * z80_handle_code_write(uint32_t address, z80_context * context) 2093 z80_context * z80_handle_code_write(uint32_t address, z80_context * context)
1750 { 2094 {
1751 uint32_t inst_start = z80_get_instruction_start(context->static_code_map, address); 2095 uint32_t inst_start = z80_get_instruction_start(context->static_code_map, address);
1752 if (inst_start != INVALID_INSTRUCTION_START) { 2096 if (inst_start != INVALID_INSTRUCTION_START) {
1753 uint8_t * dst = z80_get_native_address(context, inst_start); 2097 code_ptr dst = z80_get_native_address(context, inst_start);
1754 dprintf("patching code at %p for Z80 instruction at %X due to write to %X\n", dst, inst_start, address); 2098 code_info code = {dst, dst+16};
1755 dst = mov_ir(dst, inst_start, SCRATCH1, SZ_D); 2099 z80_options * opts = context->options;
1756 dst = call(dst, (uint8_t *)z80_retrans_stub); 2100 dprintf("patching code at %p for Z80 instruction at %X due to write to %X\n", code.cur, inst_start, address);
2101 mov_ir(&code, inst_start, opts->gen.scratch1, SZ_D);
2102 call(&code, opts->retrans_stub);
1757 } 2103 }
1758 return context; 2104 return context;
1759 } 2105 }
1760 2106
1761 uint8_t * z80_get_native_address_trans(z80_context * context, uint32_t address) 2107 uint8_t * z80_get_native_address_trans(z80_context * context, uint32_t address)
1771 return addr; 2117 return addr;
1772 } 2118 }
1773 2119
1774 void z80_handle_deferred(z80_context * context) 2120 void z80_handle_deferred(z80_context * context)
1775 { 2121 {
1776 x86_z80_options * opts = context->options; 2122 z80_options * opts = context->options;
1777 process_deferred(&opts->deferred, context, (native_addr_func)z80_get_native_address); 2123 process_deferred(&opts->gen.deferred, context, (native_addr_func)z80_get_native_address);
1778 if (opts->deferred) { 2124 if (opts->gen.deferred) {
1779 translate_z80_stream(context, opts->deferred->address); 2125 translate_z80_stream(context, opts->gen.deferred->address);
1780 } 2126 }
1781 } 2127 }
1782 2128
2129 extern void * z80_retranslate_inst(uint32_t address, z80_context * context, uint8_t * orig_start) asm("z80_retranslate_inst");
1783 void * z80_retranslate_inst(uint32_t address, z80_context * context, uint8_t * orig_start) 2130 void * z80_retranslate_inst(uint32_t address, z80_context * context, uint8_t * orig_start)
1784 { 2131 {
1785 char disbuf[80]; 2132 char disbuf[80];
1786 x86_z80_options * opts = context->options; 2133 z80_options * opts = context->options;
1787 uint8_t orig_size = z80_get_native_inst_size(opts, address); 2134 uint8_t orig_size = z80_get_native_inst_size(opts, address);
1788 uint32_t orig = address; 2135 code_info *code = &opts->gen.code;
1789 address &= 0x1FFF; 2136 uint8_t *after, *inst = get_native_pointer(address, (void **)context->mem_pointers, &opts->gen);
1790 uint8_t * dst = opts->cur_code;
1791 uint8_t * dst_end = opts->code_end;
1792 uint8_t *after, *inst = context->mem_pointers[0] + address;
1793 z80inst instbuf; 2137 z80inst instbuf;
1794 dprintf("Retranslating code at Z80 address %X, native address %p\n", address, orig_start); 2138 dprintf("Retranslating code at Z80 address %X, native address %p\n", address, orig_start);
1795 after = z80_decode(inst, &instbuf); 2139 after = z80_decode(inst, &instbuf);
1796 #ifdef DO_DEBUG_PRINT 2140 #ifdef DO_DEBUG_PRINT
1797 z80_disasm(&instbuf, disbuf, address); 2141 z80_disasm(&instbuf, disbuf, address);
1800 } else { 2144 } else {
1801 printf("%X\t%s\n", address, disbuf); 2145 printf("%X\t%s\n", address, disbuf);
1802 } 2146 }
1803 #endif 2147 #endif
1804 if (orig_size != ZMAX_NATIVE_SIZE) { 2148 if (orig_size != ZMAX_NATIVE_SIZE) {
1805 if (dst_end - dst < ZMAX_NATIVE_SIZE) { 2149 check_alloc_code(code, ZMAX_NATIVE_SIZE);
1806 size_t size = 1024*1024; 2150 code_ptr start = code->cur;
1807 dst = alloc_code(&size); 2151 deferred_addr * orig_deferred = opts->gen.deferred;
1808 opts->code_end = dst_end = dst + size; 2152 translate_z80inst(&instbuf, context, address, 0);
1809 opts->cur_code = dst; 2153 /*
1810 }
1811 deferred_addr * orig_deferred = opts->deferred;
1812 uint8_t * native_end = translate_z80inst(&instbuf, dst, context, address);
1813 if ((native_end - dst) <= orig_size) { 2154 if ((native_end - dst) <= orig_size) {
1814 uint8_t * native_next = z80_get_native_address(context, address + after-inst); 2155 uint8_t * native_next = z80_get_native_address(context, address + after-inst);
1815 if (native_next && ((native_next == orig_start + orig_size) || (orig_size - (native_end - dst)) > 5)) { 2156 if (native_next && ((native_next == orig_start + orig_size) || (orig_size - (native_end - dst)) > 5)) {
1816 remove_deferred_until(&opts->deferred, orig_deferred); 2157 remove_deferred_until(&opts->gen.deferred, orig_deferred);
1817 native_end = translate_z80inst(&instbuf, orig_start, context, address); 2158 native_end = translate_z80inst(&instbuf, orig_start, context, address, 0);
1818 if (native_next == orig_start + orig_size && (native_next-native_end) < 2) { 2159 if (native_next == orig_start + orig_size && (native_next-native_end) < 2) {
1819 while (native_end < orig_start + orig_size) { 2160 while (native_end < orig_start + orig_size) {
1820 *(native_end++) = 0x90; //NOP 2161 *(native_end++) = 0x90; //NOP
1821 } 2162 }
1822 } else { 2163 } else {
1823 jmp(native_end, native_next); 2164 jmp(native_end, native_next);
1824 } 2165 }
1825 z80_handle_deferred(context); 2166 z80_handle_deferred(context);
1826 return orig_start; 2167 return orig_start;
1827 } 2168 }
1828 } 2169 }*/
1829 z80_map_native_address(context, address, dst, after-inst, ZMAX_NATIVE_SIZE); 2170 z80_map_native_address(context, address, start, after-inst, ZMAX_NATIVE_SIZE);
1830 opts->cur_code = dst+ZMAX_NATIVE_SIZE; 2171 code_info tmp_code = {orig_start, orig_start + 16};
1831 jmp(orig_start, dst); 2172 jmp(&tmp_code, start);
2173 tmp_code = *code;
2174 code->cur = start + ZMAX_NATIVE_SIZE;
1832 if (!z80_is_terminal(&instbuf)) { 2175 if (!z80_is_terminal(&instbuf)) {
1833 jmp(native_end, z80_get_native_address_trans(context, address + after-inst)); 2176 jmp(&tmp_code, z80_get_native_address_trans(context, address + after-inst));
1834 } 2177 }
1835 z80_handle_deferred(context); 2178 z80_handle_deferred(context);
1836 return dst; 2179 return start;
1837 } else { 2180 } else {
1838 dst = translate_z80inst(&instbuf, orig_start, context, address); 2181 code_info tmp_code = *code;
2182 code->cur = orig_start;
2183 code->last = orig_start + ZMAX_NATIVE_SIZE;
2184 translate_z80inst(&instbuf, context, address, 0);
2185 code_info tmp2 = *code;
2186 *code = tmp_code;
1839 if (!z80_is_terminal(&instbuf)) { 2187 if (!z80_is_terminal(&instbuf)) {
1840 dst = jmp(dst, z80_get_native_address_trans(context, address + after-inst)); 2188
2189 jmp(&tmp2, z80_get_native_address_trans(context, address + after-inst));
1841 } 2190 }
1842 z80_handle_deferred(context); 2191 z80_handle_deferred(context);
1843 return orig_start; 2192 return orig_start;
1844 } 2193 }
1845 } 2194 }
1848 { 2197 {
1849 char disbuf[80]; 2198 char disbuf[80];
1850 if (z80_get_native_address(context, address)) { 2199 if (z80_get_native_address(context, address)) {
1851 return; 2200 return;
1852 } 2201 }
1853 x86_z80_options * opts = context->options; 2202 z80_options * opts = context->options;
1854 uint32_t start_address = address; 2203 uint32_t start_address = address;
1855 uint8_t * encoded = NULL, *next; 2204
1856 if (address < 0x4000) { 2205 do
1857 encoded = context->mem_pointers[0] + (address & 0x1FFF);
1858 } else if(address >= 0x8000 && context->mem_pointers[1]) {
1859 printf("attempt to translate Z80 code from banked area at address %X\n", address);
1860 exit(1);
1861 //encoded = context->mem_pointers[1] + (address & 0x7FFF);
1862 }
1863 while (encoded != NULL)
1864 { 2206 {
1865 z80inst inst; 2207 z80inst inst;
1866 dprintf("translating Z80 code at address %X\n", address); 2208 dprintf("translating Z80 code at address %X\n", address);
1867 do { 2209 do {
1868 if (opts->code_end-opts->cur_code < ZMAX_NATIVE_SIZE) {
1869 if (opts->code_end-opts->cur_code < 5) {
1870 puts("out of code memory, not enough space for jmp to next chunk");
1871 exit(1);
1872 }
1873 size_t size = 1024*1024;
1874 opts->cur_code = alloc_code(&size);
1875 opts->code_end = opts->cur_code + size;
1876 jmp(opts->cur_code, opts->cur_code);
1877 }
1878 if (address > 0x4000 && address < 0x8000) {
1879 opts->cur_code = xor_rr(opts->cur_code, RDI, RDI, SZ_D);
1880 opts->cur_code = call(opts->cur_code, (uint8_t *)exit);
1881 break;
1882 }
1883 uint8_t * existing = z80_get_native_address(context, address); 2210 uint8_t * existing = z80_get_native_address(context, address);
1884 if (existing) { 2211 if (existing) {
1885 opts->cur_code = jmp(opts->cur_code, existing); 2212 jmp(&opts->gen.code, existing);
1886 break; 2213 break;
1887 } 2214 }
2215 uint8_t * encoded, *next;
2216 encoded = get_native_pointer(address, (void **)context->mem_pointers, &opts->gen);
2217 if (!encoded) {
2218 code_info stub = z80_make_interp_stub(context, address);
2219 z80_map_native_address(context, address, stub.cur, 1, stub.last - stub.cur);
2220 break;
2221 }
2222 //make sure prologue is in a contiguous chunk of code
2223 check_code_prologue(&opts->gen.code);
1888 next = z80_decode(encoded, &inst); 2224 next = z80_decode(encoded, &inst);
1889 #ifdef DO_DEBUG_PRINT 2225 #ifdef DO_DEBUG_PRINT
1890 z80_disasm(&inst, disbuf, address); 2226 z80_disasm(&inst, disbuf, address);
1891 if (inst.op == Z80_NOP) { 2227 if (inst.op == Z80_NOP) {
1892 printf("%X\t%s(%d)\n", address, disbuf, inst.immed); 2228 printf("%X\t%s(%d)\n", address, disbuf, inst.immed);
1893 } else { 2229 } else {
1894 printf("%X\t%s\n", address, disbuf); 2230 printf("%X\t%s\n", address, disbuf);
1895 } 2231 }
1896 #endif 2232 #endif
1897 uint8_t *after = translate_z80inst(&inst, opts->cur_code, context, address); 2233 code_ptr start = opts->gen.code.cur;
1898 z80_map_native_address(context, address, opts->cur_code, next-encoded, after - opts->cur_code); 2234 translate_z80inst(&inst, context, address, 0);
1899 opts->cur_code = after; 2235 z80_map_native_address(context, address, start, next-encoded, opts->gen.code.cur - start);
1900 address += next-encoded; 2236 address += next-encoded;
1901 if (address > 0xFFFF) {
1902 address &= 0xFFFF; 2237 address &= 0xFFFF;
1903
1904 } else {
1905 encoded = next;
1906 }
1907 } while (!z80_is_terminal(&inst)); 2238 } while (!z80_is_terminal(&inst));
1908 process_deferred(&opts->deferred, context, (native_addr_func)z80_get_native_address); 2239 process_deferred(&opts->gen.deferred, context, (native_addr_func)z80_get_native_address);
1909 if (opts->deferred) { 2240 if (opts->gen.deferred) {
1910 address = opts->deferred->address; 2241 address = opts->gen.deferred->address;
1911 dprintf("defferred address: %X\n", address); 2242 dprintf("defferred address: %X\n", address);
1912 if (address < 0x4000) { 2243 }
1913 encoded = context->mem_pointers[0] + (address & 0x1FFF); 2244 } while (opts->gen.deferred);
1914 } else if (address > 0x8000 && context->mem_pointers[1]) { 2245 }
1915 encoded = context->mem_pointers[1] + (address & 0x7FFF); 2246
1916 } else { 2247 void init_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t num_chunks, uint32_t clock_divider)
1917 printf("attempt to translate non-memory address: %X\n", address); 2248 {
1918 exit(1); 2249 memset(options, 0, sizeof(*options));
1919 } 2250
1920 } else { 2251 options->gen.memmap = chunks;
1921 encoded = NULL; 2252 options->gen.memmap_chunks = num_chunks;
1922 } 2253 options->gen.address_size = SZ_W;
1923 } 2254 options->gen.address_mask = 0xFFFF;
1924 } 2255 options->gen.max_address = 0x10000;
1925 2256 options->gen.bus_cycles = 3;
1926 void init_x86_z80_opts(x86_z80_options * options) 2257 options->gen.clock_divider = clock_divider;
1927 { 2258 options->gen.mem_ptr_off = offsetof(z80_context, mem_pointers);
2259 options->gen.ram_flags_off = offsetof(z80_context, ram_code_flags);
2260 options->gen.ram_flags_shift = 7;
2261
1928 options->flags = 0; 2262 options->flags = 0;
2263 #ifdef X86_64
1929 options->regs[Z80_B] = BH; 2264 options->regs[Z80_B] = BH;
1930 options->regs[Z80_C] = RBX; 2265 options->regs[Z80_C] = RBX;
1931 options->regs[Z80_D] = CH; 2266 options->regs[Z80_D] = CH;
1932 options->regs[Z80_E] = RCX; 2267 options->regs[Z80_E] = RCX;
1933 options->regs[Z80_H] = AH; 2268 options->regs[Z80_H] = AH;
1944 options->regs[Z80_HL] = RAX; 2279 options->regs[Z80_HL] = RAX;
1945 options->regs[Z80_SP] = R9; 2280 options->regs[Z80_SP] = R9;
1946 options->regs[Z80_AF] = -1; 2281 options->regs[Z80_AF] = -1;
1947 options->regs[Z80_IX] = RDX; 2282 options->regs[Z80_IX] = RDX;
1948 options->regs[Z80_IY] = R8; 2283 options->regs[Z80_IY] = R8;
1949 size_t size = 1024 * 1024; 2284
1950 options->cur_code = alloc_code(&size); 2285 options->gen.scratch1 = R13;
1951 options->code_end = options->cur_code + size; 2286 options->gen.scratch2 = R14;
1952 options->ram_inst_sizes = malloc(sizeof(uint8_t) * 0x2000); 2287 #else
1953 memset(options->ram_inst_sizes, 0, sizeof(uint8_t) * 0x2000); 2288 memset(options->regs, -1, sizeof(options->regs));
1954 options->deferred = NULL; 2289 options->regs[Z80_A] = RAX;
1955 } 2290 options->regs[Z80_SP] = RBX;
1956 2291
1957 void init_z80_context(z80_context * context, x86_z80_options * options) 2292 options->gen.scratch1 = RCX;
2293 options->gen.scratch2 = RDX;
2294 #endif
2295
2296 options->gen.context_reg = RSI;
2297 options->gen.cycles = RBP;
2298 options->gen.limit = RDI;
2299
2300 options->gen.native_code_map = malloc(sizeof(native_map_slot));
2301 memset(options->gen.native_code_map, 0, sizeof(native_map_slot));
2302 options->gen.deferred = NULL;
2303 options->gen.ram_inst_sizes = malloc(sizeof(uint8_t) * 0x2000 + sizeof(uint8_t *));
2304 options->gen.ram_inst_sizes[0] = (uint8_t *)(options->gen.ram_inst_sizes + 1);
2305 memset(options->gen.ram_inst_sizes[0], 0, sizeof(uint8_t) * 0x2000);
2306
2307 code_info *code = &options->gen.code;
2308 init_code_info(code);
2309
2310 options->save_context_scratch = code->cur;
2311 mov_rrdisp(code, options->gen.scratch1, options->gen.context_reg, offsetof(z80_context, scratch1), SZ_W);
2312 mov_rrdisp(code, options->gen.scratch2, options->gen.context_reg, offsetof(z80_context, scratch2), SZ_W);
2313
2314 options->gen.save_context = code->cur;
2315 for (int i = 0; i <= Z80_A; i++)
2316 {
2317 int reg;
2318 uint8_t size;
2319 if (i < Z80_I) {
2320 reg = i /2 + Z80_BC + (i > Z80_H ? 2 : 0);
2321 size = SZ_W;
2322 } else {
2323 reg = i;
2324 size = SZ_B;
2325 }
2326 if (options->regs[reg] >= 0) {
2327 mov_rrdisp(code, options->regs[reg], options->gen.context_reg, offsetof(z80_context, regs) + i, size);
2328 }
2329 if (size == SZ_W) {
2330 i++;
2331 }
2332 }
2333 if (options->regs[Z80_SP] >= 0) {
2334 mov_rrdisp(code, options->regs[Z80_SP], options->gen.context_reg, offsetof(z80_context, sp), SZ_W);
2335 }
2336 mov_rrdisp(code, options->gen.limit, options->gen.context_reg, offsetof(z80_context, target_cycle), SZ_D);
2337 mov_rrdisp(code, options->gen.cycles, options->gen.context_reg, offsetof(z80_context, current_cycle), SZ_D);
2338 retn(code);
2339
2340 options->load_context_scratch = code->cur;
2341 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, scratch1), options->gen.scratch1, SZ_W);
2342 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, scratch2), options->gen.scratch2, SZ_W);
2343 options->gen.load_context = code->cur;
2344 for (int i = 0; i <= Z80_A; i++)
2345 {
2346 int reg;
2347 uint8_t size;
2348 if (i < Z80_I) {
2349 reg = i /2 + Z80_BC + (i > Z80_H ? 2 : 0);
2350 size = SZ_W;
2351 } else {
2352 reg = i;
2353 size = SZ_B;
2354 }
2355 if (options->regs[reg] >= 0) {
2356 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, regs) + i, options->regs[reg], size);
2357 }
2358 if (size == SZ_W) {
2359 i++;
2360 }
2361 }
2362 if (options->regs[Z80_SP] >= 0) {
2363 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, sp), options->regs[Z80_SP], SZ_W);
2364 }
2365 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, target_cycle), options->gen.limit, SZ_D);
2366 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, current_cycle), options->gen.cycles, SZ_D);
2367 retn(code);
2368
2369 options->native_addr = code->cur;
2370 call(code, options->gen.save_context);
2371 push_r(code, options->gen.context_reg);
2372 movzx_rr(code, options->gen.scratch1, options->gen.scratch1, SZ_W, SZ_D);
2373 call_args(code, (code_ptr)z80_get_native_address_trans, 2, options->gen.context_reg, options->gen.scratch1);
2374 mov_rr(code, RAX, options->gen.scratch1, SZ_PTR);
2375 pop_r(code, options->gen.context_reg);
2376 call(code, options->gen.load_context);
2377 retn(code);
2378
2379 options->gen.handle_cycle_limit = code->cur;
2380 cmp_rdispr(code, options->gen.context_reg, offsetof(z80_context, sync_cycle), options->gen.cycles, SZ_D);
2381 code_ptr no_sync = code->cur+1;
2382 jcc(code, CC_B, no_sync);
2383 mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, pc), SZ_W);
2384 call(code, options->save_context_scratch);
2385 pop_r(code, RAX); //return address in read/write func
2386 pop_r(code, RBX); //return address in translated code
2387 sub_ir(code, 5, RAX, SZ_PTR); //adjust return address to point to the call that got us here
2388 mov_rrdisp(code, RBX, options->gen.context_reg, offsetof(z80_context, extra_pc), SZ_PTR);
2389 mov_rrind(code, RAX, options->gen.context_reg, SZ_PTR);
2390 restore_callee_save_regs(code);
2391 *no_sync = code->cur - (no_sync + 1);
2392 //return to caller of z80_run
2393 retn(code);
2394
2395 options->gen.handle_code_write = (code_ptr)z80_handle_code_write;
2396
2397 options->read_8 = gen_mem_fun(&options->gen, chunks, num_chunks, READ_8, &options->read_8_noinc);
2398 options->write_8 = gen_mem_fun(&options->gen, chunks, num_chunks, WRITE_8, &options->write_8_noinc);
2399
2400 options->gen.handle_cycle_limit_int = code->cur;
2401 cmp_rdispr(code, options->gen.context_reg, offsetof(z80_context, int_cycle), options->gen.cycles, SZ_D);
2402 code_ptr skip_int = code->cur+1;
2403 jcc(code, CC_B, skip_int);
2404 //set limit to the cycle limit
2405 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, sync_cycle), options->gen.limit, SZ_D);
2406 //disable interrupts
2407 mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, iff1), SZ_B);
2408 mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, iff2), SZ_B);
2409 cycles(&options->gen, 7);
2410 //save return address (in scratch1) to Z80 stack
2411 sub_ir(code, 2, options->regs[Z80_SP], SZ_W);
2412 mov_rr(code, options->regs[Z80_SP], options->gen.scratch2, SZ_W);
2413 //we need to do check_cycles and cycles outside of the write_8 call
2414 //so that the stack has the correct depth if we need to return to C
2415 //for a synchronization
2416 check_cycles(&options->gen);
2417 cycles(&options->gen, 3);
2418 //save word to write before call to write_8_noinc
2419 push_r(code, options->gen.scratch1);
2420 call(code, options->write_8_noinc);
2421 //restore word to write
2422 pop_r(code, options->gen.scratch1);
2423 //write high byte to SP+1
2424 mov_rr(code, options->regs[Z80_SP], options->gen.scratch2, SZ_W);
2425 add_ir(code, 1, options->gen.scratch2, SZ_W);
2426 shr_ir(code, 8, options->gen.scratch1, SZ_W);
2427 check_cycles(&options->gen);
2428 cycles(&options->gen, 3);
2429 call(code, options->write_8_noinc);
2430 //dispose of return address as we'll be jumping somewhere else
2431 pop_r(code, options->gen.scratch2);
2432 //TODO: Support interrupt mode 0 and 2
2433 mov_ir(code, 0x38, options->gen.scratch1, SZ_W);
2434 call(code, options->native_addr);
2435 mov_rrind(code, options->gen.scratch1, options->gen.context_reg, SZ_PTR);
2436 restore_callee_save_regs(code);
2437 //return to caller of z80_run to sync
2438 retn(code);
2439 *skip_int = code->cur - (skip_int+1);
2440 cmp_rdispr(code, options->gen.context_reg, offsetof(z80_context, sync_cycle), options->gen.cycles, SZ_D);
2441 code_ptr skip_sync = code->cur + 1;
2442 jcc(code, CC_B, skip_sync);
2443 //save PC
2444 mov_rrdisp(code, options->gen.scratch1, options->gen.context_reg, offsetof(z80_context, pc), SZ_D);
2445 options->do_sync = code->cur;
2446 call(code, options->gen.save_context);
2447 pop_rind(code, options->gen.context_reg);
2448 //restore callee saved registers
2449 restore_callee_save_regs(code);
2450 //return to caller of z80_run
2451 *skip_sync = code->cur - (skip_sync+1);
2452 retn(code);
2453
2454 options->read_io = code->cur;
2455 check_cycles(&options->gen);
2456 cycles(&options->gen, 4);
2457 //Genesis has no IO hardware and always returns FF
2458 //eventually this should use a second memory map array
2459 mov_ir(code, 0xFF, options->gen.scratch1, SZ_B);
2460 retn(code);
2461
2462 options->write_io = code->cur;
2463 check_cycles(&options->gen);
2464 cycles(&options->gen, 4);
2465 retn(code);
2466
2467 options->read_16 = code->cur;
2468 cycles(&options->gen, 3);
2469 check_cycles(&options->gen);
2470 //TODO: figure out how to handle the extra wait state for word reads to bank area
2471 //may also need special handling to avoid too much stack depth when access is blocked
2472 push_r(code, options->gen.scratch1);
2473 call(code, options->read_8_noinc);
2474 mov_rr(code, options->gen.scratch1, options->gen.scratch2, SZ_B);
2475 #ifndef X86_64
2476 //scratch 2 is a caller save register in 32-bit builds and may be clobbered by something called from the read8 fun
2477 mov_rrdisp(code, options->gen.scratch1, options->gen.context_reg, offsetof(z80_context, scratch2), SZ_B);
2478 #endif
2479 pop_r(code, options->gen.scratch1);
2480 add_ir(code, 1, options->gen.scratch1, SZ_W);
2481 cycles(&options->gen, 3);
2482 check_cycles(&options->gen);
2483 call(code, options->read_8_noinc);
2484 shl_ir(code, 8, options->gen.scratch1, SZ_W);
2485 #ifdef X86_64
2486 mov_rr(code, options->gen.scratch2, options->gen.scratch1, SZ_B);
2487 #else
2488 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, scratch2), options->gen.scratch1, SZ_B);
2489 #endif
2490 retn(code);
2491
2492 options->write_16_highfirst = code->cur;
2493 cycles(&options->gen, 3);
2494 check_cycles(&options->gen);
2495 push_r(code, options->gen.scratch2);
2496 push_r(code, options->gen.scratch1);
2497 add_ir(code, 1, options->gen.scratch2, SZ_W);
2498 shr_ir(code, 8, options->gen.scratch1, SZ_W);
2499 call(code, options->write_8_noinc);
2500 pop_r(code, options->gen.scratch1);
2501 pop_r(code, options->gen.scratch2);
2502 cycles(&options->gen, 3);
2503 check_cycles(&options->gen);
2504 //TODO: Check if we can get away with TCO here
2505 call(code, options->write_8_noinc);
2506 retn(code);
2507
2508 options->write_16_lowfirst = code->cur;
2509 cycles(&options->gen, 3);
2510 check_cycles(&options->gen);
2511 push_r(code, options->gen.scratch2);
2512 push_r(code, options->gen.scratch1);
2513 call(code, options->write_8_noinc);
2514 pop_r(code, options->gen.scratch1);
2515 pop_r(code, options->gen.scratch2);
2516 add_ir(code, 1, options->gen.scratch2, SZ_W);
2517 shr_ir(code, 8, options->gen.scratch1, SZ_W);
2518 cycles(&options->gen, 3);
2519 check_cycles(&options->gen);
2520 //TODO: Check if we can get away with TCO here
2521 call(code, options->write_8_noinc);
2522 retn(code);
2523
2524 options->retrans_stub = code->cur;
2525 //pop return address
2526 pop_r(code, options->gen.scratch2);
2527 call(code, options->gen.save_context);
2528 //adjust pointer before move and call instructions that got us here
2529 sub_ir(code, options->gen.scratch1 >= R8 ? 11 : 10, options->gen.scratch2, SZ_PTR);
2530 push_r(code, options->gen.context_reg);
2531 call_args(code, (code_ptr)z80_retranslate_inst, 3, options->gen.scratch1, options->gen.context_reg, options->gen.scratch2);
2532 pop_r(code, options->gen.context_reg);
2533 mov_rr(code, RAX, options->gen.scratch1, SZ_PTR);
2534 call(code, options->gen.load_context);
2535 jmp_r(code, options->gen.scratch1);
2536
2537 options->run = (z80_run_fun)code->cur;
2538 save_callee_save_regs(code);
2539 #ifdef X86_64
2540 mov_rr(code, RDI, options->gen.context_reg, SZ_PTR);
2541 #else
2542 mov_rdispr(code, RSP, 5 * sizeof(int32_t), options->gen.context_reg, SZ_PTR);
2543 #endif
2544 call(code, options->load_context_scratch);
2545 cmp_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, extra_pc), SZ_PTR);
2546 code_ptr no_extra = code->cur+1;
2547 jcc(code, CC_Z, no_extra);
2548 push_rdisp(code, options->gen.context_reg, offsetof(z80_context, extra_pc));
2549 mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, extra_pc), SZ_PTR);
2550 *no_extra = code->cur - (no_extra + 1);
2551 jmp_rind(code, options->gen.context_reg);
2552 }
2553
2554 void init_z80_context(z80_context * context, z80_options * options)
1958 { 2555 {
1959 memset(context, 0, sizeof(*context)); 2556 memset(context, 0, sizeof(*context));
1960 context->static_code_map = malloc(sizeof(*context->static_code_map)); 2557 context->static_code_map = malloc(sizeof(*context->static_code_map));
1961 context->static_code_map->base = NULL; 2558 context->static_code_map->base = NULL;
1962 context->static_code_map->offsets = malloc(sizeof(int32_t) * 0x2000); 2559 context->static_code_map->offsets = malloc(sizeof(int32_t) * 0x2000);
1963 memset(context->static_code_map->offsets, 0xFF, sizeof(int32_t) * 0x2000); 2560 memset(context->static_code_map->offsets, 0xFF, sizeof(int32_t) * 0x2000);
1964 context->banked_code_map = malloc(sizeof(native_map_slot) * (1 << 9)); 2561 context->banked_code_map = malloc(sizeof(native_map_slot));
1965 memset(context->banked_code_map, 0, sizeof(native_map_slot) * (1 << 9)); 2562 memset(context->banked_code_map, 0, sizeof(native_map_slot));
1966 context->options = options; 2563 context->options = options;
1967 } 2564 context->int_cycle = CYCLE_NEVER;
1968 2565 context->int_pulse_start = CYCLE_NEVER;
1969 void z80_reset(z80_context * context) 2566 context->int_pulse_end = CYCLE_NEVER;
1970 { 2567 }
1971 context->im = 0; 2568
1972 context->iff1 = context->iff2 = 0; 2569 void z80_run(z80_context * context, uint32_t target_cycle)
1973 context->native_pc = z80_get_native_address_trans(context, 0); 2570 {
1974 context->extra_pc = NULL; 2571 if (context->reset || context->busack) {
2572 context->current_cycle = target_cycle;
2573 } else {
2574 if (context->current_cycle < target_cycle) {
2575 //busreq is sampled at the end of an m-cycle
2576 //we can approximate that by running for a single m-cycle after a bus request
2577 context->sync_cycle = context->busreq ? context->current_cycle + 3*context->options->gen.clock_divider : target_cycle;
2578 if (!context->native_pc) {
2579 context->native_pc = z80_get_native_address_trans(context, context->pc);
2580 }
2581 while (context->current_cycle < context->sync_cycle)
2582 {
2583 if (context->int_pulse_end < context->current_cycle || context->int_pulse_end == CYCLE_NEVER) {
2584 z80_next_int_pulse(context);
2585 }
2586 if (context->iff1) {
2587 context->int_cycle = context->int_pulse_start < context->int_enable_cycle ? context->int_enable_cycle : context->int_pulse_start;
2588 } else {
2589 context->int_cycle = CYCLE_NEVER;
2590 }
2591 context->target_cycle = context->sync_cycle < context->int_cycle ? context->sync_cycle : context->int_cycle;
2592 dprintf("Running Z80 from cycle %d to cycle %d. Int cycle: %d (%d - %d)\n", context->current_cycle, context->sync_cycle, context->int_cycle, context->int_pulse_start, context->int_pulse_end);
2593 context->options->run(context);
2594 dprintf("Z80 ran to cycle %d\n", context->current_cycle);
2595 }
2596 if (context->busreq) {
2597 context->busack = 1;
2598 context->current_cycle = target_cycle;
2599 }
2600 }
2601 }
2602 }
2603
2604 void z80_assert_reset(z80_context * context, uint32_t cycle)
2605 {
2606 z80_run(context, cycle);
2607 context->reset = 1;
2608 }
2609
2610 void z80_clear_reset(z80_context * context, uint32_t cycle)
2611 {
2612 z80_run(context, cycle);
2613 if (context->reset) {
2614 //TODO: Handle case where reset is not asserted long enough
2615 context->im = 0;
2616 context->iff1 = context->iff2 = 0;
2617 context->native_pc = NULL;
2618 context->extra_pc = NULL;
2619 context->pc = 0;
2620 context->reset = 0;
2621 if (context->busreq) {
2622 //TODO: Figure out appropriate delay
2623 context->busack = 1;
2624 }
2625 }
2626 }
2627
2628 void z80_assert_busreq(z80_context * context, uint32_t cycle)
2629 {
2630 z80_run(context, cycle);
2631 context->busreq = 1;
2632 }
2633
2634 void z80_clear_busreq(z80_context * context, uint32_t cycle)
2635 {
2636 z80_run(context, cycle);
2637 context->busreq = 0;
2638 context->busack = 0;
2639 }
2640
2641 uint8_t z80_get_busack(z80_context * context, uint32_t cycle)
2642 {
2643 z80_run(context, cycle);
2644 return context->busack;
2645 }
2646
2647 void z80_adjust_cycles(z80_context * context, uint32_t deduction)
2648 {
2649 if (context->current_cycle < deduction) {
2650 fprintf(stderr, "WARNING: Deduction of %u cycles when Z80 cycle counter is only %u\n", deduction, context->current_cycle);
2651 context->current_cycle = 0;
2652 } else {
2653 context->current_cycle -= deduction;
2654 }
2655 if (context->int_enable_cycle != CYCLE_NEVER) {
2656 if (context->int_enable_cycle < deduction) {
2657 context->int_enable_cycle = 0;
2658 } else {
2659 context->int_enable_cycle -= deduction;
2660 }
2661 }
2662 if (context->int_pulse_start != CYCLE_NEVER) {
2663 if (context->int_pulse_end < deduction) {
2664 context->int_pulse_start = context->int_pulse_end = CYCLE_NEVER;
2665 } else {
2666 context->int_pulse_end -= deduction;
2667 if (context->int_pulse_start < deduction) {
2668 context->int_pulse_start = 0;
2669 } else {
2670 context->int_pulse_start -= deduction;
2671 }
2672 }
2673 }
2674 }
2675
2676 uint32_t zbreakpoint_patch(z80_context * context, uint16_t address, code_ptr dst)
2677 {
2678 code_info code = {dst, dst+16};
2679 mov_ir(&code, address, context->options->gen.scratch1, SZ_W);
2680 call(&code, context->bp_stub);
2681 return code.cur-dst;
2682 }
2683
2684 void zcreate_stub(z80_context * context)
2685 {
2686 z80_options * opts = context->options;
2687 code_info *code = &opts->gen.code;
2688 check_code_prologue(code);
2689 context->bp_stub = code->cur;
2690
2691 //Calculate length of prologue
2692 check_cycles_int(&opts->gen, 0);
2693 int check_int_size = code->cur-context->bp_stub;
2694 code->cur = context->bp_stub;
2695
2696 //Calculate length of patch
2697 int patch_size = zbreakpoint_patch(context, 0, code->cur);
2698
2699 //Save context and call breakpoint handler
2700 call(code, opts->gen.save_context);
2701 push_r(code, opts->gen.scratch1);
2702 call_args_abi(code, context->bp_handler, 2, opts->gen.context_reg, opts->gen.scratch1);
2703 mov_rr(code, RAX, opts->gen.context_reg, SZ_PTR);
2704 //Restore context
2705 call(code, opts->gen.load_context);
2706 pop_r(code, opts->gen.scratch1);
2707 //do prologue stuff
2708 cmp_rr(code, opts->gen.cycles, opts->gen.limit, SZ_D);
2709 uint8_t * jmp_off = code->cur+1;
2710 jcc(code, CC_NC, code->cur + 7);
2711 pop_r(code, opts->gen.scratch1);
2712 add_ir(code, check_int_size - patch_size, opts->gen.scratch1, SZ_PTR);
2713 push_r(code, opts->gen.scratch1);
2714 jmp(code, opts->gen.handle_cycle_limit_int);
2715 *jmp_off = code->cur - (jmp_off+1);
2716 //jump back to body of translated instruction
2717 pop_r(code, opts->gen.scratch1);
2718 add_ir(code, check_int_size - patch_size, opts->gen.scratch1, SZ_PTR);
2719 jmp_r(code, opts->gen.scratch1);
1975 } 2720 }
1976 2721
1977 void zinsert_breakpoint(z80_context * context, uint16_t address, uint8_t * bp_handler) 2722 void zinsert_breakpoint(z80_context * context, uint16_t address, uint8_t * bp_handler)
1978 { 2723 {
1979 static uint8_t * bp_stub = NULL; 2724 context->bp_handler = bp_handler;
1980 uint8_t * native = z80_get_native_address_trans(context, address); 2725 uint8_t bit = 1 << (address % sizeof(uint8_t));
1981 uint8_t * start_native = native; 2726 if (!(bit & context->breakpoint_flags[address / sizeof(uint8_t)])) {
1982 native = mov_ir(native, address, SCRATCH1, SZ_W); 2727 context->breakpoint_flags[address / sizeof(uint8_t)] |= bit;
1983 if (!bp_stub) { 2728 if (!context->bp_stub) {
1984 x86_z80_options * opts = context->options; 2729 zcreate_stub(context);
1985 uint8_t * dst = opts->cur_code; 2730 }
1986 uint8_t * dst_end = opts->code_end; 2731 uint8_t * native = z80_get_native_address(context, address);
1987 if (dst_end - dst < 128) { 2732 if (native) {
1988 size_t size = 1024*1024; 2733 zbreakpoint_patch(context, address, native);
1989 dst = alloc_code(&size); 2734 }
1990 opts->code_end = dst_end = dst + size;
1991 }
1992 bp_stub = dst;
1993 native = call(native, bp_stub);
1994
1995 //Calculate length of prologue
1996 dst = z80_check_cycles_int(dst, address);
1997 int check_int_size = dst-bp_stub;
1998 dst = bp_stub;
1999
2000 //Save context and call breakpoint handler
2001 dst = call(dst, (uint8_t *)z80_save_context);
2002 dst = push_r(dst, SCRATCH1);
2003 dst = mov_rr(dst, CONTEXT, RDI, SZ_Q);
2004 dst = mov_rr(dst, SCRATCH1, RSI, SZ_W);
2005 dst = call(dst, bp_handler);
2006 dst = mov_rr(dst, RAX, CONTEXT, SZ_Q);
2007 //Restore context
2008 dst = call(dst, (uint8_t *)z80_load_context);
2009 dst = pop_r(dst, SCRATCH1);
2010 //do prologue stuff
2011 dst = cmp_rr(dst, ZCYCLES, ZLIMIT, SZ_D);
2012 uint8_t * jmp_off = dst+1;
2013 dst = jcc(dst, CC_NC, dst + 7);
2014 dst = pop_r(dst, SCRATCH1);
2015 dst = add_ir(dst, check_int_size - (native-start_native), SCRATCH1, SZ_Q);
2016 dst = push_r(dst, SCRATCH1);
2017 dst = jmp(dst, (uint8_t *)z80_handle_cycle_limit_int);
2018 *jmp_off = dst - (jmp_off+1);
2019 //jump back to body of translated instruction
2020 dst = pop_r(dst, SCRATCH1);
2021 dst = add_ir(dst, check_int_size - (native-start_native), SCRATCH1, SZ_Q);
2022 dst = jmp_r(dst, SCRATCH1);
2023 opts->cur_code = dst;
2024 } else {
2025 native = call(native, bp_stub);
2026 } 2735 }
2027 } 2736 }
2028 2737
2029 void zremove_breakpoint(z80_context * context, uint16_t address) 2738 void zremove_breakpoint(z80_context * context, uint16_t address)
2030 { 2739 {
2740 context->breakpoint_flags[address / sizeof(uint8_t)] &= ~(1 << (address % sizeof(uint8_t)));
2031 uint8_t * native = z80_get_native_address(context, address); 2741 uint8_t * native = z80_get_native_address(context, address);
2032 z80_check_cycles_int(native, address); 2742 if (native) {
2033 } 2743 z80_options * opts = context->options;
2034 2744 code_info tmp_code = opts->gen.code;
2035 2745 opts->gen.code.cur = native;
2746 opts->gen.code.last = native + 16;
2747 check_cycles_int(&opts->gen, address);
2748 opts->gen.code = tmp_code;
2749 }
2750 }
2751