Mercurial > repos > blastem
comparison z80_to_x86.c @ 590:ea80559c67cb
WIP effort to update z80 core for code gen changes
author | Michael Pavone <pavone@retrodev.com> |
---|---|
date | Sun, 14 Dec 2014 16:45:23 -0800 |
parents | a3b48a57e847 |
children | 966b46c68942 |
comparison
equal
deleted
inserted
replaced
589:2dde38c1744f | 590:ea80559c67cb |
---|---|
52 } | 52 } |
53 //TODO: Handle any necessary special cases | 53 //TODO: Handle any necessary special cases |
54 return SZ_B; | 54 return SZ_B; |
55 } | 55 } |
56 | 56 |
57 uint8_t * zcycles(uint8_t * dst, uint32_t num_cycles) | 57 void translate_z80_reg(z80inst * inst, x86_ea * ea, z80_options * opts) |
58 { | 58 { |
59 return add_ir(dst, num_cycles, ZCYCLES, SZ_D); | 59 code_info *code = &opts->gen.code; |
60 } | |
61 | |
62 uint8_t * z80_check_cycles_int(uint8_t * dst, uint16_t address) | |
63 { | |
64 dst = cmp_rr(dst, ZCYCLES, ZLIMIT, SZ_D); | |
65 uint8_t * jmp_off = dst+1; | |
66 dst = jcc(dst, CC_NC, dst + 7); | |
67 dst = mov_ir(dst, address, SCRATCH1, SZ_W); | |
68 dst = call(dst, (uint8_t *)z80_handle_cycle_limit_int); | |
69 *jmp_off = dst - (jmp_off+1); | |
70 return dst; | |
71 } | |
72 | |
73 uint8_t * translate_z80_reg(z80inst * inst, x86_ea * ea, uint8_t * dst, x86_z80_options * opts) | |
74 { | |
75 if (inst->reg == Z80_USE_IMMED) { | 60 if (inst->reg == Z80_USE_IMMED) { |
76 ea->mode = MODE_IMMED; | 61 ea->mode = MODE_IMMED; |
77 ea->disp = inst->immed; | 62 ea->disp = inst->immed; |
78 } else if ((inst->reg & 0x1F) == Z80_UNUSED) { | 63 } else if ((inst->reg & 0x1F) == Z80_UNUSED) { |
79 ea->mode = MODE_UNUSED; | 64 ea->mode = MODE_UNUSED; |
80 } else { | 65 } else { |
81 ea->mode = MODE_REG_DIRECT; | 66 ea->mode = MODE_REG_DIRECT; |
82 if (inst->reg == Z80_IYH) { | 67 if (inst->reg == Z80_IYH) { |
83 if ((inst->addr_mode & 0x1F) == Z80_REG && inst->ea_reg == Z80_IYL) { | 68 if ((inst->addr_mode & 0x1F) == Z80_REG && inst->ea_reg == Z80_IYL) { |
84 dst = mov_rr(dst, opts->regs[Z80_IY], SCRATCH1, SZ_W); | 69 mov_rr(code, opts->regs[Z80_IY], opts->gen.scratch1, SZ_W); |
85 dst = ror_ir(dst, 8, SCRATCH1, SZ_W); | 70 ror_ir(code, 8, opts->gen.scratch1, SZ_W); |
86 ea->base = SCRATCH1; | 71 ea->base = opts->gen.scratch1; |
87 } else { | 72 } else { |
88 ea->base = opts->regs[Z80_IYL]; | 73 ea->base = opts->regs[Z80_IYL]; |
89 dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W); | 74 ror_ir(code, 8, opts->regs[Z80_IY], SZ_W); |
90 } | 75 } |
91 } else if(opts->regs[inst->reg] >= 0) { | 76 } else if(opts->regs[inst->reg] >= 0) { |
92 ea->base = opts->regs[inst->reg]; | 77 ea->base = opts->regs[inst->reg]; |
93 if (ea->base >= AH && ea->base <= BH) { | 78 if (ea->base >= AH && ea->base <= BH) { |
94 if ((inst->addr_mode & 0x1F) == Z80_REG) { | 79 if ((inst->addr_mode & 0x1F) == Z80_REG) { |
95 uint8_t other_reg = opts->regs[inst->ea_reg]; | 80 uint8_t other_reg = opts->regs[inst->ea_reg]; |
96 if (other_reg >= R8 || (other_reg >= RSP && other_reg <= RDI)) { | 81 if (other_reg >= R8 || (other_reg >= RSP && other_reg <= RDI)) { |
97 //we can't mix an *H reg with a register that requires the REX prefix | 82 //we can't mix an *H reg with a register that requires the REX prefix |
98 ea->base = opts->regs[z80_low_reg(inst->reg)]; | 83 ea->base = opts->regs[z80_low_reg(inst->reg)]; |
99 dst = ror_ir(dst, 8, ea->base, SZ_W); | 84 ror_ir(code, 8, ea->base, SZ_W); |
100 } | 85 } |
101 } else if((inst->addr_mode & 0x1F) != Z80_UNUSED && (inst->addr_mode & 0x1F) != Z80_IMMED) { | 86 } else if((inst->addr_mode & 0x1F) != Z80_UNUSED && (inst->addr_mode & 0x1F) != Z80_IMMED) { |
102 //temp regs require REX prefix too | 87 //temp regs require REX prefix too |
103 ea->base = opts->regs[z80_low_reg(inst->reg)]; | 88 ea->base = opts->regs[z80_low_reg(inst->reg)]; |
104 dst = ror_ir(dst, 8, ea->base, SZ_W); | 89 ror_ir(code, 8, ea->base, SZ_W); |
105 } | 90 } |
106 } | 91 } |
107 } else { | 92 } else { |
108 ea->mode = MODE_REG_DISPLACE8; | 93 ea->mode = MODE_REG_DISPLACE8; |
109 ea->base = CONTEXT; | 94 ea->base = opts->gen.context_reg; |
110 ea->disp = offsetof(z80_context, regs) + inst->reg; | 95 ea->disp = offsetof(z80_context, regs) + inst->reg; |
111 } | 96 } |
112 } | 97 } |
113 return dst; | 98 } |
114 } | 99 |
115 | 100 void z80_save_reg(z80inst * inst, z80_options * opts) |
116 uint8_t * z80_save_reg(uint8_t * dst, z80inst * inst, x86_z80_options * opts) | 101 { |
117 { | 102 code_info *code = &opts->gen.code; |
118 if (inst->reg == Z80_IYH) { | 103 if (inst->reg == Z80_IYH) { |
119 if ((inst->addr_mode & 0x1F) == Z80_REG && inst->ea_reg == Z80_IYL) { | 104 if ((inst->addr_mode & 0x1F) == Z80_REG && inst->ea_reg == Z80_IYL) { |
120 dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W); | 105 ror_ir(code, 8, opts->regs[Z80_IY], SZ_W); |
121 dst = mov_rr(dst, SCRATCH1, opts->regs[Z80_IYL], SZ_B); | 106 mov_rr(code, opts->gen.scratch1, opts->regs[Z80_IYL], SZ_B); |
122 dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W); | 107 ror_ir(code, 8, opts->regs[Z80_IY], SZ_W); |
123 } else { | 108 } else { |
124 dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W); | 109 ror_ir(code, 8, opts->regs[Z80_IY], SZ_W); |
125 } | 110 } |
126 } else if (opts->regs[inst->reg] >= AH && opts->regs[inst->reg] <= BH) { | 111 } else if (opts->regs[inst->reg] >= AH && opts->regs[inst->reg] <= BH) { |
127 if ((inst->addr_mode & 0x1F) == Z80_REG) { | 112 if ((inst->addr_mode & 0x1F) == Z80_REG) { |
128 uint8_t other_reg = opts->regs[inst->ea_reg]; | 113 uint8_t other_reg = opts->regs[inst->ea_reg]; |
129 if (other_reg >= R8 || (other_reg >= RSP && other_reg <= RDI)) { | 114 if (other_reg >= R8 || (other_reg >= RSP && other_reg <= RDI)) { |
130 //we can't mix an *H reg with a register that requires the REX prefix | 115 //we can't mix an *H reg with a register that requires the REX prefix |
131 dst = ror_ir(dst, 8, opts->regs[z80_low_reg(inst->reg)], SZ_W); | 116 ror_ir(code, 8, opts->regs[z80_low_reg(inst->reg)], SZ_W); |
132 } | 117 } |
133 } else if((inst->addr_mode & 0x1F) != Z80_UNUSED && (inst->addr_mode & 0x1F) != Z80_IMMED) { | 118 } else if((inst->addr_mode & 0x1F) != Z80_UNUSED && (inst->addr_mode & 0x1F) != Z80_IMMED) { |
134 //temp regs require REX prefix too | 119 //temp regs require REX prefix too |
135 dst = ror_ir(dst, 8, opts->regs[z80_low_reg(inst->reg)], SZ_W); | 120 ror_ir(code, 8, opts->regs[z80_low_reg(inst->reg)], SZ_W); |
136 } | 121 } |
137 } | 122 } |
138 return dst; | 123 } |
139 } | 124 |
140 | 125 void translate_z80_ea(z80inst * inst, x86_ea * ea, z80_options * opts, uint8_t read, uint8_t modify) |
141 uint8_t * translate_z80_ea(z80inst * inst, x86_ea * ea, uint8_t * dst, x86_z80_options * opts, uint8_t read, uint8_t modify) | 126 { |
142 { | 127 code_info *code = &opts->gen.code; |
143 uint8_t size, reg, areg; | 128 uint8_t size, reg, areg; |
144 ea->mode = MODE_REG_DIRECT; | 129 ea->mode = MODE_REG_DIRECT; |
145 areg = read ? SCRATCH1 : SCRATCH2; | 130 areg = read ? opts->gen.scratch1 : opts->gen.scratch2; |
146 switch(inst->addr_mode & 0x1F) | 131 switch(inst->addr_mode & 0x1F) |
147 { | 132 { |
148 case Z80_REG: | 133 case Z80_REG: |
149 if (inst->ea_reg == Z80_IYH) { | 134 if (inst->ea_reg == Z80_IYH) { |
150 if (inst->reg == Z80_IYL) { | 135 if (inst->reg == Z80_IYL) { |
151 dst = mov_rr(dst, opts->regs[Z80_IY], SCRATCH1, SZ_W); | 136 mov_rr(code, opts->regs[Z80_IY], opts->gen.scratch1, SZ_W); |
152 dst = ror_ir(dst, 8, SCRATCH1, SZ_W); | 137 ror_ir(code, 8, opts->gen.scratch1, SZ_W); |
153 ea->base = SCRATCH1; | 138 ea->base = opts->gen.scratch1; |
154 } else { | 139 } else { |
155 ea->base = opts->regs[Z80_IYL]; | 140 ea->base = opts->regs[Z80_IYL]; |
156 dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W); | 141 ror_ir(code, 8, opts->regs[Z80_IY], SZ_W); |
157 } | 142 } |
158 } else { | 143 } else { |
159 ea->base = opts->regs[inst->ea_reg]; | 144 ea->base = opts->regs[inst->ea_reg]; |
160 if (ea->base >= AH && ea->base <= BH && inst->reg != Z80_UNUSED && inst->reg != Z80_USE_IMMED) { | 145 if (ea->base >= AH && ea->base <= BH && inst->reg != Z80_UNUSED && inst->reg != Z80_USE_IMMED) { |
161 uint8_t other_reg = opts->regs[inst->reg]; | 146 uint8_t other_reg = opts->regs[inst->reg]; |
162 if (other_reg >= R8 || (other_reg >= RSP && other_reg <= RDI)) { | 147 if (other_reg >= R8 || (other_reg >= RSP && other_reg <= RDI)) { |
163 //we can't mix an *H reg with a register that requires the REX prefix | 148 //we can't mix an *H reg with a register that requires the REX prefix |
164 ea->base = opts->regs[z80_low_reg(inst->ea_reg)]; | 149 ea->base = opts->regs[z80_low_reg(inst->ea_reg)]; |
165 dst = ror_ir(dst, 8, ea->base, SZ_W); | 150 ror_ir(code, 8, ea->base, SZ_W); |
166 } | 151 } |
167 } | 152 } |
168 } | 153 } |
169 break; | 154 break; |
170 case Z80_REG_INDIRECT: | 155 case Z80_REG_INDIRECT: |
171 dst = mov_rr(dst, opts->regs[inst->ea_reg], areg, SZ_W); | 156 mov_rr(code, opts->regs[inst->ea_reg], areg, SZ_W); |
172 size = z80_size(inst); | 157 size = z80_size(inst); |
173 if (read) { | 158 if (read) { |
174 if (modify) { | 159 if (modify) { |
175 //dst = push_r(dst, SCRATCH1); | 160 //push_r(code, opts->gen.scratch1); |
176 dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, offsetof(z80_context, scratch1), SZ_W); | 161 mov_rrdisp8(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(z80_context, scratch1), SZ_W); |
177 } | 162 } |
178 if (size == SZ_B) { | 163 if (size == SZ_B) { |
179 dst = call(dst, (uint8_t *)z80_read_byte); | 164 call(code, opts->read_8); |
180 } else { | 165 } else { |
181 dst = call(dst, (uint8_t *)z80_read_word); | 166 dst = call(dst, opts->read_16); |
182 } | 167 } |
183 if (modify) { | 168 if (modify) { |
184 //dst = pop_r(dst, SCRATCH2); | 169 //pop_r(code, opts->gen.scratch2); |
185 dst = mov_rdisp8r(dst, CONTEXT, offsetof(z80_context, scratch1), SCRATCH2, SZ_W); | 170 mov_rdisp8r(code, opts->gen.context_reg, offsetof(z80_context, scratch1), opts->gen.scratch2, SZ_W); |
186 } | 171 } |
187 } | 172 } |
188 ea->base = SCRATCH1; | 173 ea->base = opts->gen.scratch1; |
189 break; | 174 break; |
190 case Z80_IMMED: | 175 case Z80_IMMED: |
191 ea->mode = MODE_IMMED; | 176 ea->mode = MODE_IMMED; |
192 ea->disp = inst->immed; | 177 ea->disp = inst->immed; |
193 break; | 178 break; |
194 case Z80_IMMED_INDIRECT: | 179 case Z80_IMMED_INDIRECT: |
195 dst = mov_ir(dst, inst->immed, areg, SZ_W); | 180 dst = mov_ir(dst, inst->immed, areg, SZ_W); |
196 size = z80_size(inst); | 181 size = z80_size(inst); |
197 if (read) { | 182 if (read) { |
198 /*if (modify) { | 183 /*if (modify) { |
199 dst = push_r(dst, SCRATCH1); | 184 dst = push_r(dst, opts->gen.scratch1); |
200 }*/ | 185 }*/ |
201 if (size == SZ_B) { | 186 if (size == SZ_B) { |
202 dst = call(dst, (uint8_t *)z80_read_byte); | 187 dst = call(dst, (uint8_t *)z80_read_byte); |
203 } else { | 188 } else { |
204 dst = call(dst, (uint8_t *)z80_read_word); | 189 dst = call(dst, (uint8_t *)z80_read_word); |
205 } | 190 } |
206 if (modify) { | 191 if (modify) { |
207 //dst = pop_r(dst, SCRATCH2); | 192 //dst = pop_r(dst, opts->gen.scratch2); |
208 dst = mov_ir(dst, inst->immed, SCRATCH2, SZ_W); | 193 dst = mov_ir(dst, inst->immed, opts->gen.scratch2, SZ_W); |
209 } | 194 } |
210 } | 195 } |
211 ea->base = SCRATCH1; | 196 ea->base = opts->gen.scratch1; |
212 break; | 197 break; |
213 case Z80_IX_DISPLACE: | 198 case Z80_IX_DISPLACE: |
214 case Z80_IY_DISPLACE: | 199 case Z80_IY_DISPLACE: |
215 reg = opts->regs[(inst->addr_mode & 0x1F) == Z80_IX_DISPLACE ? Z80_IX : Z80_IY]; | 200 reg = opts->regs[(inst->addr_mode & 0x1F) == Z80_IX_DISPLACE ? Z80_IX : Z80_IY]; |
216 dst = mov_rr(dst, reg, areg, SZ_W); | 201 dst = mov_rr(dst, reg, areg, SZ_W); |
217 dst = add_ir(dst, inst->ea_reg & 0x80 ? inst->ea_reg - 256 : inst->ea_reg, areg, SZ_W); | 202 dst = add_ir(dst, inst->ea_reg & 0x80 ? inst->ea_reg - 256 : inst->ea_reg, areg, SZ_W); |
218 size = z80_size(inst); | 203 size = z80_size(inst); |
219 if (read) { | 204 if (read) { |
220 if (modify) { | 205 if (modify) { |
221 //dst = push_r(dst, SCRATCH1); | 206 //dst = push_r(dst, opts->gen.scratch1); |
222 dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, offsetof(z80_context, scratch1), SZ_W); | 207 dst = mov_rrdisp8(dst, opts->gen.scratch1, opts->gen.context_reg, offsetof(z80_context, scratch1), SZ_W); |
223 } | 208 } |
224 if (size == SZ_B) { | 209 if (size == SZ_B) { |
225 dst = call(dst, (uint8_t *)z80_read_byte); | 210 dst = call(dst, (uint8_t *)z80_read_byte); |
226 } else { | 211 } else { |
227 dst = call(dst, (uint8_t *)z80_read_word); | 212 dst = call(dst, (uint8_t *)z80_read_word); |
228 } | 213 } |
229 if (modify) { | 214 if (modify) { |
230 //dst = pop_r(dst, SCRATCH2); | 215 //dst = pop_r(dst, opts->gen.scratch2); |
231 dst = mov_rdisp8r(dst, CONTEXT, offsetof(z80_context, scratch1), SCRATCH2, SZ_W); | 216 dst = mov_rdisp8r(dst, opts->gen.context_reg, offsetof(z80_context, scratch1), opts->gen.scratch2, SZ_W); |
232 } | 217 } |
233 } | 218 } |
234 ea->base = SCRATCH1; | 219 ea->base = opts->gen.scratch1; |
235 break; | 220 break; |
236 case Z80_UNUSED: | 221 case Z80_UNUSED: |
237 ea->mode = MODE_UNUSED; | 222 ea->mode = MODE_UNUSED; |
238 break; | 223 break; |
239 default: | 224 default: |
241 exit(1); | 226 exit(1); |
242 } | 227 } |
243 return dst; | 228 return dst; |
244 } | 229 } |
245 | 230 |
246 uint8_t * z80_save_ea(uint8_t * dst, z80inst * inst, x86_z80_options * opts) | 231 uint8_t * z80_save_ea(uint8_t * dst, z80inst * inst, z80_options * opts) |
247 { | 232 { |
248 if ((inst->addr_mode & 0x1F) == Z80_REG) { | 233 if ((inst->addr_mode & 0x1F) == Z80_REG) { |
249 if (inst->ea_reg == Z80_IYH) { | 234 if (inst->ea_reg == Z80_IYH) { |
250 if (inst->reg == Z80_IYL) { | 235 if (inst->reg == Z80_IYL) { |
251 dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W); | 236 dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W); |
252 dst = mov_rr(dst, SCRATCH1, opts->regs[Z80_IYL], SZ_B); | 237 dst = mov_rr(dst, opts->gen.scratch1, opts->regs[Z80_IYL], SZ_B); |
253 dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W); | 238 dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W); |
254 } else { | 239 } else { |
255 dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W); | 240 dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W); |
256 } | 241 } |
257 } else if (inst->reg != Z80_UNUSED && inst->reg != Z80_USE_IMMED && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) { | 242 } else if (inst->reg != Z80_UNUSED && inst->reg != Z80_USE_IMMED && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) { |
324 (context->alt_regs[Z80_IXH] << 8) | context->alt_regs[Z80_IXL], | 309 (context->alt_regs[Z80_IXH] << 8) | context->alt_regs[Z80_IXL], |
325 (context->alt_regs[Z80_IYH] << 8) | context->alt_regs[Z80_IYL]); | 310 (context->alt_regs[Z80_IYH] << 8) | context->alt_regs[Z80_IYL]); |
326 exit(0); | 311 exit(0); |
327 } | 312 } |
328 | 313 |
329 uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context, uint16_t address) | 314 uint8_t * translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) |
330 { | 315 { |
331 uint32_t cycles; | 316 uint32_t cycles; |
332 x86_ea src_op, dst_op; | 317 x86_ea src_op, dst_op; |
333 uint8_t size; | 318 uint8_t size; |
334 x86_z80_options *opts = context->options; | 319 z80_options *opts = context->options; |
335 uint8_t * start = dst; | 320 uint8_t * start = opts->code.cur; |
336 dst = z80_check_cycles_int(dst, address); | 321 check_cycles_int(&opts->gen, address); |
337 switch(inst->op) | 322 switch(inst->op) |
338 { | 323 { |
339 case Z80_LD: | 324 case Z80_LD: |
340 size = z80_size(inst); | 325 size = z80_size(inst); |
341 switch (inst->addr_mode & 0x1F) | 326 switch (inst->addr_mode & 0x1F) |
362 break; | 347 break; |
363 } | 348 } |
364 if ((inst->reg >= Z80_IXL && inst->reg <= Z80_IYH) || inst->reg == Z80_IX || inst->reg == Z80_IY) { | 349 if ((inst->reg >= Z80_IXL && inst->reg <= Z80_IYH) || inst->reg == Z80_IX || inst->reg == Z80_IY) { |
365 cycles += 4; | 350 cycles += 4; |
366 } | 351 } |
367 dst = zcycles(dst, cycles); | 352 dst = cycles(&opts->gen, cycles); |
368 if (inst->addr_mode & Z80_DIR) { | 353 if (inst->addr_mode & Z80_DIR) { |
369 dst = translate_z80_ea(inst, &dst_op, dst, opts, DONT_READ, MODIFY); | 354 dst = translate_z80_ea(inst, &dst_op, dst, opts, DONT_READ, MODIFY); |
370 dst = translate_z80_reg(inst, &src_op, dst, opts); | 355 dst = translate_z80_reg(inst, &src_op, dst, opts); |
371 } else { | 356 } else { |
372 dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); | 357 dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); |
388 if (inst->addr_mode & Z80_DIR) { | 373 if (inst->addr_mode & Z80_DIR) { |
389 dst = z80_save_result(dst, inst); | 374 dst = z80_save_result(dst, inst); |
390 } | 375 } |
391 break; | 376 break; |
392 case Z80_PUSH: | 377 case Z80_PUSH: |
393 dst = zcycles(dst, (inst->reg == Z80_IX || inst->reg == Z80_IY) ? 9 : 5); | 378 dst = cycles(&opts->gen, (inst->reg == Z80_IX || inst->reg == Z80_IY) ? 9 : 5); |
394 dst = sub_ir(dst, 2, opts->regs[Z80_SP], SZ_W); | 379 dst = sub_ir(dst, 2, opts->regs[Z80_SP], SZ_W); |
395 if (inst->reg == Z80_AF) { | 380 if (inst->reg == Z80_AF) { |
396 dst = mov_rr(dst, opts->regs[Z80_A], SCRATCH1, SZ_B); | 381 dst = mov_rr(dst, opts->regs[Z80_A], opts->gen.scratch1, SZ_B); |
397 dst = shl_ir(dst, 8, SCRATCH1, SZ_W); | 382 dst = shl_ir(dst, 8, opts->gen.scratch1, SZ_W); |
398 dst = mov_rdisp8r(dst, CONTEXT, zf_off(ZF_S), SCRATCH1, SZ_B); | 383 dst = mov_rdisp8r(dst, opts->gen.context_reg, zf_off(ZF_S), opts->gen.scratch1, SZ_B); |
399 dst = shl_ir(dst, 1, SCRATCH1, SZ_B); | 384 dst = shl_ir(dst, 1, opts->gen.scratch1, SZ_B); |
400 dst = or_rdisp8r(dst, CONTEXT, zf_off(ZF_Z), SCRATCH1, SZ_B); | 385 dst = or_rdisp8r(dst, opts->gen.context_reg, zf_off(ZF_Z), opts->gen.scratch1, SZ_B); |
401 dst = shl_ir(dst, 2, SCRATCH1, SZ_B); | 386 dst = shl_ir(dst, 2, opts->gen.scratch1, SZ_B); |
402 dst = or_rdisp8r(dst, CONTEXT, zf_off(ZF_H), SCRATCH1, SZ_B); | 387 dst = or_rdisp8r(dst, opts->gen.context_reg, zf_off(ZF_H), opts->gen.scratch1, SZ_B); |
403 dst = shl_ir(dst, 2, SCRATCH1, SZ_B); | 388 dst = shl_ir(dst, 2, opts->gen.scratch1, SZ_B); |
404 dst = or_rdisp8r(dst, CONTEXT, zf_off(ZF_PV), SCRATCH1, SZ_B); | 389 dst = or_rdisp8r(dst, opts->gen.context_reg, zf_off(ZF_PV), opts->gen.scratch1, SZ_B); |
405 dst = shl_ir(dst, 1, SCRATCH1, SZ_B); | 390 dst = shl_ir(dst, 1, opts->gen.scratch1, SZ_B); |
406 dst = or_rdisp8r(dst, CONTEXT, zf_off(ZF_N), SCRATCH1, SZ_B); | 391 dst = or_rdisp8r(dst, opts->gen.context_reg, zf_off(ZF_N), opts->gen.scratch1, SZ_B); |
407 dst = shl_ir(dst, 1, SCRATCH1, SZ_B); | 392 dst = shl_ir(dst, 1, opts->gen.scratch1, SZ_B); |
408 dst = or_rdisp8r(dst, CONTEXT, zf_off(ZF_C), SCRATCH1, SZ_B); | 393 dst = or_rdisp8r(dst, opts->gen.context_reg, zf_off(ZF_C), opts->gen.scratch1, SZ_B); |
409 } else { | 394 } else { |
410 dst = translate_z80_reg(inst, &src_op, dst, opts); | 395 dst = translate_z80_reg(inst, &src_op, dst, opts); |
411 dst = mov_rr(dst, src_op.base, SCRATCH1, SZ_W); | 396 dst = mov_rr(dst, src_op.base, opts->gen.scratch1, SZ_W); |
412 } | 397 } |
413 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH2, SZ_W); | 398 dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); |
414 dst = call(dst, (uint8_t *)z80_write_word_highfirst); | 399 dst = call(dst, (uint8_t *)z80_write_word_highfirst); |
415 //no call to save_z80_reg needed since there's no chance we'll use the only | 400 //no call to save_z80_reg needed since there's no chance we'll use the only |
416 //the upper half of a register pair | 401 //the upper half of a register pair |
417 break; | 402 break; |
418 case Z80_POP: | 403 case Z80_POP: |
419 dst = zcycles(dst, (inst->reg == Z80_IX || inst->reg == Z80_IY) ? 8 : 4); | 404 dst = cycles(&opts->gen, (inst->reg == Z80_IX || inst->reg == Z80_IY) ? 8 : 4); |
420 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); | 405 dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); |
421 dst = call(dst, (uint8_t *)z80_read_word); | 406 dst = call(dst, (uint8_t *)z80_read_word); |
422 dst = add_ir(dst, 2, opts->regs[Z80_SP], SZ_W); | 407 dst = add_ir(dst, 2, opts->regs[Z80_SP], SZ_W); |
423 if (inst->reg == Z80_AF) { | 408 if (inst->reg == Z80_AF) { |
424 | 409 |
425 dst = bt_ir(dst, 0, SCRATCH1, SZ_W); | 410 dst = bt_ir(dst, 0, opts->gen.scratch1, SZ_W); |
426 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); | 411 dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); |
427 dst = bt_ir(dst, 1, SCRATCH1, SZ_W); | 412 dst = bt_ir(dst, 1, opts->gen.scratch1, SZ_W); |
428 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_N)); | 413 dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_N)); |
429 dst = bt_ir(dst, 2, SCRATCH1, SZ_W); | 414 dst = bt_ir(dst, 2, opts->gen.scratch1, SZ_W); |
430 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_PV)); | 415 dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_PV)); |
431 dst = bt_ir(dst, 4, SCRATCH1, SZ_W); | 416 dst = bt_ir(dst, 4, opts->gen.scratch1, SZ_W); |
432 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_H)); | 417 dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_H)); |
433 dst = bt_ir(dst, 6, SCRATCH1, SZ_W); | 418 dst = bt_ir(dst, 6, opts->gen.scratch1, SZ_W); |
434 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_Z)); | 419 dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_Z)); |
435 dst = bt_ir(dst, 7, SCRATCH1, SZ_W); | 420 dst = bt_ir(dst, 7, opts->gen.scratch1, SZ_W); |
436 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_S)); | 421 dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_S)); |
437 dst = shr_ir(dst, 8, SCRATCH1, SZ_W); | 422 dst = shr_ir(dst, 8, opts->gen.scratch1, SZ_W); |
438 dst = mov_rr(dst, SCRATCH1, opts->regs[Z80_A], SZ_B); | 423 dst = mov_rr(dst, opts->gen.scratch1, opts->regs[Z80_A], SZ_B); |
439 } else { | 424 } else { |
440 dst = translate_z80_reg(inst, &src_op, dst, opts); | 425 dst = translate_z80_reg(inst, &src_op, dst, opts); |
441 dst = mov_rr(dst, SCRATCH1, src_op.base, SZ_W); | 426 dst = mov_rr(dst, opts->gen.scratch1, src_op.base, SZ_W); |
442 } | 427 } |
443 //no call to save_z80_reg needed since there's no chance we'll use the only | 428 //no call to save_z80_reg needed since there's no chance we'll use the only |
444 //the upper half of a register pair | 429 //the upper half of a register pair |
445 break; | 430 break; |
446 case Z80_EX: | 431 case Z80_EX: |
447 if (inst->addr_mode == Z80_REG || inst->reg == Z80_HL) { | 432 if (inst->addr_mode == Z80_REG || inst->reg == Z80_HL) { |
448 cycles = 4; | 433 cycles = 4; |
449 } else { | 434 } else { |
450 cycles = 8; | 435 cycles = 8; |
451 } | 436 } |
452 dst = zcycles(dst, cycles); | 437 dst = cycles(&opts->gen, cycles); |
453 if (inst->addr_mode == Z80_REG) { | 438 if (inst->addr_mode == Z80_REG) { |
454 if(inst->reg == Z80_AF) { | 439 if(inst->reg == Z80_AF) { |
455 dst = mov_rr(dst, opts->regs[Z80_A], SCRATCH1, SZ_B); | 440 dst = mov_rr(dst, opts->regs[Z80_A], opts->gen.scratch1, SZ_B); |
456 dst = mov_rdisp8r(dst, CONTEXT, zar_off(Z80_A), opts->regs[Z80_A], SZ_B); | 441 dst = mov_rdisp8r(dst, opts->gen.context_reg, zar_off(Z80_A), opts->regs[Z80_A], SZ_B); |
457 dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, zar_off(Z80_A), SZ_B); | 442 dst = mov_rrdisp8(dst, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_A), SZ_B); |
458 | 443 |
459 //Flags are currently word aligned, so we can move | 444 //Flags are currently word aligned, so we can move |
460 //them efficiently a word at a time | 445 //them efficiently a word at a time |
461 for (int f = ZF_C; f < ZF_NUM; f+=2) { | 446 for (int f = ZF_C; f < ZF_NUM; f+=2) { |
462 dst = mov_rdisp8r(dst, CONTEXT, zf_off(f), SCRATCH1, SZ_W); | 447 dst = mov_rdisp8r(dst, opts->gen.context_reg, zf_off(f), opts->gen.scratch1, SZ_W); |
463 dst = mov_rdisp8r(dst, CONTEXT, zaf_off(f), SCRATCH2, SZ_W); | 448 dst = mov_rdisp8r(dst, opts->gen.context_reg, zaf_off(f), opts->gen.scratch2, SZ_W); |
464 dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, zaf_off(f), SZ_W); | 449 dst = mov_rrdisp8(dst, opts->gen.scratch1, opts->gen.context_reg, zaf_off(f), SZ_W); |
465 dst = mov_rrdisp8(dst, SCRATCH2, CONTEXT, zf_off(f), SZ_W); | 450 dst = mov_rrdisp8(dst, opts->gen.scratch2, opts->gen.context_reg, zf_off(f), SZ_W); |
466 } | 451 } |
467 } else { | 452 } else { |
468 dst = xchg_rr(dst, opts->regs[Z80_DE], opts->regs[Z80_HL], SZ_W); | 453 dst = xchg_rr(dst, opts->regs[Z80_DE], opts->regs[Z80_HL], SZ_W); |
469 } | 454 } |
470 } else { | 455 } else { |
471 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); | 456 dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); |
472 dst = call(dst, (uint8_t *)z80_read_byte); | 457 dst = call(dst, (uint8_t *)z80_read_byte); |
473 dst = xchg_rr(dst, opts->regs[inst->reg], SCRATCH1, SZ_B); | 458 dst = xchg_rr(dst, opts->regs[inst->reg], opts->gen.scratch1, SZ_B); |
474 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH2, SZ_W); | 459 dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); |
475 dst = call(dst, (uint8_t *)z80_write_byte); | 460 dst = call(dst, (uint8_t *)z80_write_byte); |
476 dst = zcycles(dst, 1); | 461 dst = cycles(&opts->gen, 1); |
477 uint8_t high_reg = z80_high_reg(inst->reg); | 462 uint8_t high_reg = z80_high_reg(inst->reg); |
478 uint8_t use_reg; | 463 uint8_t use_reg; |
479 //even though some of the upper halves can be used directly | 464 //even though some of the upper halves can be used directly |
480 //the limitations on mixing *H regs with the REX prefix | 465 //the limitations on mixing *H regs with the REX prefix |
481 //prevent us from taking advantage of it | 466 //prevent us from taking advantage of it |
482 use_reg = opts->regs[inst->reg]; | 467 use_reg = opts->regs[inst->reg]; |
483 dst = ror_ir(dst, 8, use_reg, SZ_W); | 468 dst = ror_ir(dst, 8, use_reg, SZ_W); |
484 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); | 469 dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); |
485 dst = add_ir(dst, 1, SCRATCH1, SZ_W); | 470 dst = add_ir(dst, 1, opts->gen.scratch1, SZ_W); |
486 dst = call(dst, (uint8_t *)z80_read_byte); | 471 dst = call(dst, (uint8_t *)z80_read_byte); |
487 dst = xchg_rr(dst, use_reg, SCRATCH1, SZ_B); | 472 dst = xchg_rr(dst, use_reg, opts->gen.scratch1, SZ_B); |
488 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH2, SZ_W); | 473 dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); |
489 dst = add_ir(dst, 1, SCRATCH2, SZ_W); | 474 dst = add_ir(dst, 1, opts->gen.scratch2, SZ_W); |
490 dst = call(dst, (uint8_t *)z80_write_byte); | 475 dst = call(dst, (uint8_t *)z80_write_byte); |
491 //restore reg to normal rotation | 476 //restore reg to normal rotation |
492 dst = ror_ir(dst, 8, use_reg, SZ_W); | 477 dst = ror_ir(dst, 8, use_reg, SZ_W); |
493 dst = zcycles(dst, 2); | 478 dst = cycles(&opts->gen, 2); |
494 } | 479 } |
495 break; | 480 break; |
496 case Z80_EXX: | 481 case Z80_EXX: |
497 dst = zcycles(dst, 4); | 482 dst = cycles(&opts->gen, 4); |
498 dst = mov_rr(dst, opts->regs[Z80_BC], SCRATCH1, SZ_W); | 483 dst = mov_rr(dst, opts->regs[Z80_BC], opts->gen.scratch1, SZ_W); |
499 dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH2, SZ_W); | 484 dst = mov_rr(dst, opts->regs[Z80_HL], opts->gen.scratch2, SZ_W); |
500 dst = mov_rdisp8r(dst, CONTEXT, zar_off(Z80_C), opts->regs[Z80_BC], SZ_W); | 485 dst = mov_rdisp8r(dst, opts->gen.context_reg, zar_off(Z80_C), opts->regs[Z80_BC], SZ_W); |
501 dst = mov_rdisp8r(dst, CONTEXT, zar_off(Z80_L), opts->regs[Z80_HL], SZ_W); | 486 dst = mov_rdisp8r(dst, opts->gen.context_reg, zar_off(Z80_L), opts->regs[Z80_HL], SZ_W); |
502 dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, zar_off(Z80_C), SZ_W); | 487 dst = mov_rrdisp8(dst, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_C), SZ_W); |
503 dst = mov_rrdisp8(dst, SCRATCH2, CONTEXT, zar_off(Z80_L), SZ_W); | 488 dst = mov_rrdisp8(dst, opts->gen.scratch2, opts->gen.context_reg, zar_off(Z80_L), SZ_W); |
504 dst = mov_rr(dst, opts->regs[Z80_DE], SCRATCH1, SZ_W); | 489 dst = mov_rr(dst, opts->regs[Z80_DE], opts->gen.scratch1, SZ_W); |
505 dst = mov_rdisp8r(dst, CONTEXT, zar_off(Z80_E), opts->regs[Z80_DE], SZ_W); | 490 dst = mov_rdisp8r(dst, opts->gen.context_reg, zar_off(Z80_E), opts->regs[Z80_DE], SZ_W); |
506 dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, zar_off(Z80_E), SZ_W); | 491 dst = mov_rrdisp8(dst, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_E), SZ_W); |
507 break; | 492 break; |
508 case Z80_LDI: { | 493 case Z80_LDI: { |
509 dst = zcycles(dst, 8); | 494 dst = cycles(&opts->gen, 8); |
510 dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH1, SZ_W); | 495 dst = mov_rr(dst, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); |
511 dst = call(dst, (uint8_t *)z80_read_byte); | 496 dst = call(dst, (uint8_t *)z80_read_byte); |
512 dst = mov_rr(dst, opts->regs[Z80_DE], SCRATCH2, SZ_W); | 497 dst = mov_rr(dst, opts->regs[Z80_DE], opts->gen.scratch2, SZ_W); |
513 dst = call(dst, (uint8_t *)z80_write_byte); | 498 dst = call(dst, (uint8_t *)z80_write_byte); |
514 dst = zcycles(dst, 2); | 499 dst = cycles(&opts->gen, 2); |
515 dst = add_ir(dst, 1, opts->regs[Z80_DE], SZ_W); | 500 dst = add_ir(dst, 1, opts->regs[Z80_DE], SZ_W); |
516 dst = add_ir(dst, 1, opts->regs[Z80_HL], SZ_W); | 501 dst = add_ir(dst, 1, opts->regs[Z80_HL], SZ_W); |
517 dst = sub_ir(dst, 1, opts->regs[Z80_BC], SZ_W); | 502 dst = sub_ir(dst, 1, opts->regs[Z80_BC], SZ_W); |
518 //TODO: Implement half-carry | 503 //TODO: Implement half-carry |
519 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); | 504 dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); |
520 dst = setcc_rdisp8(dst, CC_NZ, CONTEXT, zf_off(ZF_PV)); | 505 dst = setcc_rdisp8(dst, CC_NZ, opts->gen.context_reg, zf_off(ZF_PV)); |
521 break; | 506 break; |
522 } | 507 } |
523 case Z80_LDIR: { | 508 case Z80_LDIR: { |
524 dst = zcycles(dst, 8); | 509 dst = cycles(&opts->gen, 8); |
525 dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH1, SZ_W); | 510 dst = mov_rr(dst, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); |
526 dst = call(dst, (uint8_t *)z80_read_byte); | 511 dst = call(dst, (uint8_t *)z80_read_byte); |
527 dst = mov_rr(dst, opts->regs[Z80_DE], SCRATCH2, SZ_W); | 512 dst = mov_rr(dst, opts->regs[Z80_DE], opts->gen.scratch2, SZ_W); |
528 dst = call(dst, (uint8_t *)z80_write_byte); | 513 dst = call(dst, (uint8_t *)z80_write_byte); |
529 dst = add_ir(dst, 1, opts->regs[Z80_DE], SZ_W); | 514 dst = add_ir(dst, 1, opts->regs[Z80_DE], SZ_W); |
530 dst = add_ir(dst, 1, opts->regs[Z80_HL], SZ_W); | 515 dst = add_ir(dst, 1, opts->regs[Z80_HL], SZ_W); |
531 | 516 |
532 dst = sub_ir(dst, 1, opts->regs[Z80_BC], SZ_W); | 517 dst = sub_ir(dst, 1, opts->regs[Z80_BC], SZ_W); |
533 uint8_t * cont = dst+1; | 518 uint8_t * cont = dst+1; |
534 dst = jcc(dst, CC_Z, dst+2); | 519 dst = jcc(dst, CC_Z, dst+2); |
535 dst = zcycles(dst, 7); | 520 dst = cycles(&opts->gen, 7); |
536 //TODO: Figure out what the flag state should be here | 521 //TODO: Figure out what the flag state should be here |
537 //TODO: Figure out whether an interrupt can interrupt this | 522 //TODO: Figure out whether an interrupt can interrupt this |
538 dst = jmp(dst, start); | 523 dst = jmp(dst, start); |
539 *cont = dst - (cont + 1); | 524 *cont = dst - (cont + 1); |
540 dst = zcycles(dst, 2); | 525 dst = cycles(&opts->gen, 2); |
541 //TODO: Implement half-carry | 526 //TODO: Implement half-carry |
542 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); | 527 dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); |
543 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_PV), SZ_B); | 528 dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B); |
544 break; | 529 break; |
545 } | 530 } |
546 case Z80_LDD: { | 531 case Z80_LDD: { |
547 dst = zcycles(dst, 8); | 532 dst = cycles(&opts->gen, 8); |
548 dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH1, SZ_W); | 533 dst = mov_rr(dst, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); |
549 dst = call(dst, (uint8_t *)z80_read_byte); | 534 dst = call(dst, (uint8_t *)z80_read_byte); |
550 dst = mov_rr(dst, opts->regs[Z80_DE], SCRATCH2, SZ_W); | 535 dst = mov_rr(dst, opts->regs[Z80_DE], opts->gen.scratch2, SZ_W); |
551 dst = call(dst, (uint8_t *)z80_write_byte); | 536 dst = call(dst, (uint8_t *)z80_write_byte); |
552 dst = zcycles(dst, 2); | 537 dst = cycles(&opts->gen, 2); |
553 dst = sub_ir(dst, 1, opts->regs[Z80_DE], SZ_W); | 538 dst = sub_ir(dst, 1, opts->regs[Z80_DE], SZ_W); |
554 dst = sub_ir(dst, 1, opts->regs[Z80_HL], SZ_W); | 539 dst = sub_ir(dst, 1, opts->regs[Z80_HL], SZ_W); |
555 dst = sub_ir(dst, 1, opts->regs[Z80_BC], SZ_W); | 540 dst = sub_ir(dst, 1, opts->regs[Z80_BC], SZ_W); |
556 //TODO: Implement half-carry | 541 //TODO: Implement half-carry |
557 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); | 542 dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); |
558 dst = setcc_rdisp8(dst, CC_NZ, CONTEXT, zf_off(ZF_PV)); | 543 dst = setcc_rdisp8(dst, CC_NZ, opts->gen.context_reg, zf_off(ZF_PV)); |
559 break; | 544 break; |
560 } | 545 } |
561 case Z80_LDDR: { | 546 case Z80_LDDR: { |
562 dst = zcycles(dst, 8); | 547 dst = cycles(&opts->gen, 8); |
563 dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH1, SZ_W); | 548 dst = mov_rr(dst, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); |
564 dst = call(dst, (uint8_t *)z80_read_byte); | 549 dst = call(dst, (uint8_t *)z80_read_byte); |
565 dst = mov_rr(dst, opts->regs[Z80_DE], SCRATCH2, SZ_W); | 550 dst = mov_rr(dst, opts->regs[Z80_DE], opts->gen.scratch2, SZ_W); |
566 dst = call(dst, (uint8_t *)z80_write_byte); | 551 dst = call(dst, (uint8_t *)z80_write_byte); |
567 dst = sub_ir(dst, 1, opts->regs[Z80_DE], SZ_W); | 552 dst = sub_ir(dst, 1, opts->regs[Z80_DE], SZ_W); |
568 dst = sub_ir(dst, 1, opts->regs[Z80_HL], SZ_W); | 553 dst = sub_ir(dst, 1, opts->regs[Z80_HL], SZ_W); |
569 | 554 |
570 dst = sub_ir(dst, 1, opts->regs[Z80_BC], SZ_W); | 555 dst = sub_ir(dst, 1, opts->regs[Z80_BC], SZ_W); |
571 uint8_t * cont = dst+1; | 556 uint8_t * cont = dst+1; |
572 dst = jcc(dst, CC_Z, dst+2); | 557 dst = jcc(dst, CC_Z, dst+2); |
573 dst = zcycles(dst, 7); | 558 dst = cycles(&opts->gen, 7); |
574 //TODO: Figure out what the flag state should be here | 559 //TODO: Figure out what the flag state should be here |
575 //TODO: Figure out whether an interrupt can interrupt this | 560 //TODO: Figure out whether an interrupt can interrupt this |
576 dst = jmp(dst, start); | 561 dst = jmp(dst, start); |
577 *cont = dst - (cont + 1); | 562 *cont = dst - (cont + 1); |
578 dst = zcycles(dst, 2); | 563 dst = cycles(&opts->gen, 2); |
579 //TODO: Implement half-carry | 564 //TODO: Implement half-carry |
580 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); | 565 dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); |
581 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_PV), SZ_B); | 566 dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B); |
582 break; | 567 break; |
583 } | 568 } |
584 /*case Z80_CPI: | 569 /*case Z80_CPI: |
585 case Z80_CPIR: | 570 case Z80_CPIR: |
586 case Z80_CPD: | 571 case Z80_CPD: |
593 } else if(inst->addr_mode == Z80_IMMED) { | 578 } else if(inst->addr_mode == Z80_IMMED) { |
594 cycles += 3; | 579 cycles += 3; |
595 } else if(z80_size(inst) == SZ_W) { | 580 } else if(z80_size(inst) == SZ_W) { |
596 cycles += 4; | 581 cycles += 4; |
597 } | 582 } |
598 dst = zcycles(dst, cycles); | 583 dst = cycles(&opts->gen, cycles); |
599 dst = translate_z80_reg(inst, &dst_op, dst, opts); | 584 dst = translate_z80_reg(inst, &dst_op, dst, opts); |
600 dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); | 585 dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); |
601 if (src_op.mode == MODE_REG_DIRECT) { | 586 if (src_op.mode == MODE_REG_DIRECT) { |
602 dst = add_rr(dst, src_op.base, dst_op.base, z80_size(inst)); | 587 dst = add_rr(dst, src_op.base, dst_op.base, z80_size(inst)); |
603 } else { | 588 } else { |
604 dst = add_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); | 589 dst = add_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); |
605 } | 590 } |
606 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); | 591 dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); |
607 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); | 592 dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); |
608 //TODO: Implement half-carry flag | 593 //TODO: Implement half-carry flag |
609 if (z80_size(inst) == SZ_B) { | 594 if (z80_size(inst) == SZ_B) { |
610 dst = setcc_rdisp8(dst, CC_O, CONTEXT, zf_off(ZF_PV)); | 595 dst = setcc_rdisp8(dst, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); |
611 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); | 596 dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); |
612 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); | 597 dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); |
613 } | 598 } |
614 dst = z80_save_reg(dst, inst, opts); | 599 dst = z80_save_reg(dst, inst, opts); |
615 dst = z80_save_ea(dst, inst, opts); | 600 dst = z80_save_ea(dst, inst, opts); |
616 break; | 601 break; |
617 case Z80_ADC: | 602 case Z80_ADC: |
621 } else if(inst->addr_mode == Z80_IMMED) { | 606 } else if(inst->addr_mode == Z80_IMMED) { |
622 cycles += 3; | 607 cycles += 3; |
623 } else if(z80_size(inst) == SZ_W) { | 608 } else if(z80_size(inst) == SZ_W) { |
624 cycles += 4; | 609 cycles += 4; |
625 } | 610 } |
626 dst = zcycles(dst, cycles); | 611 dst = cycles(&opts->gen, cycles); |
627 dst = translate_z80_reg(inst, &dst_op, dst, opts); | 612 dst = translate_z80_reg(inst, &dst_op, dst, opts); |
628 dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); | 613 dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); |
629 dst = bt_irdisp8(dst, 0, CONTEXT, zf_off(ZF_C), SZ_B); | 614 dst = bt_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); |
630 if (src_op.mode == MODE_REG_DIRECT) { | 615 if (src_op.mode == MODE_REG_DIRECT) { |
631 dst = adc_rr(dst, src_op.base, dst_op.base, z80_size(inst)); | 616 dst = adc_rr(dst, src_op.base, dst_op.base, z80_size(inst)); |
632 } else { | 617 } else { |
633 dst = adc_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); | 618 dst = adc_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); |
634 } | 619 } |
635 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); | 620 dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); |
636 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); | 621 dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); |
637 //TODO: Implement half-carry flag | 622 //TODO: Implement half-carry flag |
638 dst = setcc_rdisp8(dst, CC_O, CONTEXT, zf_off(ZF_PV)); | 623 dst = setcc_rdisp8(dst, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); |
639 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); | 624 dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); |
640 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); | 625 dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); |
641 dst = z80_save_reg(dst, inst, opts); | 626 dst = z80_save_reg(dst, inst, opts); |
642 dst = z80_save_ea(dst, inst, opts); | 627 dst = z80_save_ea(dst, inst, opts); |
643 break; | 628 break; |
644 case Z80_SUB: | 629 case Z80_SUB: |
645 cycles = 4; | 630 cycles = 4; |
646 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { | 631 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { |
647 cycles += 12; | 632 cycles += 12; |
648 } else if(inst->addr_mode == Z80_IMMED) { | 633 } else if(inst->addr_mode == Z80_IMMED) { |
649 cycles += 3; | 634 cycles += 3; |
650 } | 635 } |
651 dst = zcycles(dst, cycles); | 636 dst = cycles(&opts->gen, cycles); |
652 dst = translate_z80_reg(inst, &dst_op, dst, opts); | 637 dst = translate_z80_reg(inst, &dst_op, dst, opts); |
653 dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); | 638 dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); |
654 if (src_op.mode == MODE_REG_DIRECT) { | 639 if (src_op.mode == MODE_REG_DIRECT) { |
655 dst = sub_rr(dst, src_op.base, dst_op.base, z80_size(inst)); | 640 dst = sub_rr(dst, src_op.base, dst_op.base, z80_size(inst)); |
656 } else { | 641 } else { |
657 dst = sub_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); | 642 dst = sub_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); |
658 } | 643 } |
659 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); | 644 dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); |
660 dst = mov_irdisp8(dst, 1, CONTEXT, zf_off(ZF_N), SZ_B); | 645 dst = mov_irdisp8(dst, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); |
661 dst = setcc_rdisp8(dst, CC_O, CONTEXT, zf_off(ZF_PV)); | 646 dst = setcc_rdisp8(dst, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); |
662 //TODO: Implement half-carry flag | 647 //TODO: Implement half-carry flag |
663 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); | 648 dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); |
664 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); | 649 dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); |
665 dst = z80_save_reg(dst, inst, opts); | 650 dst = z80_save_reg(dst, inst, opts); |
666 dst = z80_save_ea(dst, inst, opts); | 651 dst = z80_save_ea(dst, inst, opts); |
667 break; | 652 break; |
668 case Z80_SBC: | 653 case Z80_SBC: |
669 cycles = 4; | 654 cycles = 4; |
672 } else if(inst->addr_mode == Z80_IMMED) { | 657 } else if(inst->addr_mode == Z80_IMMED) { |
673 cycles += 3; | 658 cycles += 3; |
674 } else if(z80_size(inst) == SZ_W) { | 659 } else if(z80_size(inst) == SZ_W) { |
675 cycles += 4; | 660 cycles += 4; |
676 } | 661 } |
677 dst = zcycles(dst, cycles); | 662 dst = cycles(&opts->gen, cycles); |
678 dst = translate_z80_reg(inst, &dst_op, dst, opts); | 663 dst = translate_z80_reg(inst, &dst_op, dst, opts); |
679 dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); | 664 dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); |
680 dst = bt_irdisp8(dst, 0, CONTEXT, zf_off(ZF_C), SZ_B); | 665 dst = bt_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); |
681 if (src_op.mode == MODE_REG_DIRECT) { | 666 if (src_op.mode == MODE_REG_DIRECT) { |
682 dst = sbb_rr(dst, src_op.base, dst_op.base, z80_size(inst)); | 667 dst = sbb_rr(dst, src_op.base, dst_op.base, z80_size(inst)); |
683 } else { | 668 } else { |
684 dst = sbb_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); | 669 dst = sbb_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); |
685 } | 670 } |
686 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); | 671 dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); |
687 dst = mov_irdisp8(dst, 1, CONTEXT, zf_off(ZF_N), SZ_B); | 672 dst = mov_irdisp8(dst, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); |
688 //TODO: Implement half-carry flag | 673 //TODO: Implement half-carry flag |
689 dst = setcc_rdisp8(dst, CC_O, CONTEXT, zf_off(ZF_PV)); | 674 dst = setcc_rdisp8(dst, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); |
690 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); | 675 dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); |
691 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); | 676 dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); |
692 dst = z80_save_reg(dst, inst, opts); | 677 dst = z80_save_reg(dst, inst, opts); |
693 dst = z80_save_ea(dst, inst, opts); | 678 dst = z80_save_ea(dst, inst, opts); |
694 break; | 679 break; |
695 case Z80_AND: | 680 case Z80_AND: |
696 cycles = 4; | 681 cycles = 4; |
699 } else if(inst->addr_mode == Z80_IMMED) { | 684 } else if(inst->addr_mode == Z80_IMMED) { |
700 cycles += 3; | 685 cycles += 3; |
701 } else if(z80_size(inst) == SZ_W) { | 686 } else if(z80_size(inst) == SZ_W) { |
702 cycles += 4; | 687 cycles += 4; |
703 } | 688 } |
704 dst = zcycles(dst, cycles); | 689 dst = cycles(&opts->gen, cycles); |
705 dst = translate_z80_reg(inst, &dst_op, dst, opts); | 690 dst = translate_z80_reg(inst, &dst_op, dst, opts); |
706 dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); | 691 dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); |
707 if (src_op.mode == MODE_REG_DIRECT) { | 692 if (src_op.mode == MODE_REG_DIRECT) { |
708 dst = and_rr(dst, src_op.base, dst_op.base, z80_size(inst)); | 693 dst = and_rr(dst, src_op.base, dst_op.base, z80_size(inst)); |
709 } else { | 694 } else { |
710 dst = and_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); | 695 dst = and_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); |
711 } | 696 } |
712 //TODO: Cleanup flags | 697 //TODO: Cleanup flags |
713 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); | 698 dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); |
714 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); | 699 dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); |
715 //TODO: Implement half-carry flag | 700 //TODO: Implement half-carry flag |
716 if (z80_size(inst) == SZ_B) { | 701 if (z80_size(inst) == SZ_B) { |
717 dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); | 702 dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); |
718 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); | 703 dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); |
719 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); | 704 dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); |
720 } | 705 } |
721 dst = z80_save_reg(dst, inst, opts); | 706 dst = z80_save_reg(dst, inst, opts); |
722 dst = z80_save_ea(dst, inst, opts); | 707 dst = z80_save_ea(dst, inst, opts); |
723 break; | 708 break; |
724 case Z80_OR: | 709 case Z80_OR: |
728 } else if(inst->addr_mode == Z80_IMMED) { | 713 } else if(inst->addr_mode == Z80_IMMED) { |
729 cycles += 3; | 714 cycles += 3; |
730 } else if(z80_size(inst) == SZ_W) { | 715 } else if(z80_size(inst) == SZ_W) { |
731 cycles += 4; | 716 cycles += 4; |
732 } | 717 } |
733 dst = zcycles(dst, cycles); | 718 dst = cycles(&opts->gen, cycles); |
734 dst = translate_z80_reg(inst, &dst_op, dst, opts); | 719 dst = translate_z80_reg(inst, &dst_op, dst, opts); |
735 dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); | 720 dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); |
736 if (src_op.mode == MODE_REG_DIRECT) { | 721 if (src_op.mode == MODE_REG_DIRECT) { |
737 dst = or_rr(dst, src_op.base, dst_op.base, z80_size(inst)); | 722 dst = or_rr(dst, src_op.base, dst_op.base, z80_size(inst)); |
738 } else { | 723 } else { |
739 dst = or_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); | 724 dst = or_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); |
740 } | 725 } |
741 //TODO: Cleanup flags | 726 //TODO: Cleanup flags |
742 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); | 727 dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); |
743 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); | 728 dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); |
744 //TODO: Implement half-carry flag | 729 //TODO: Implement half-carry flag |
745 if (z80_size(inst) == SZ_B) { | 730 if (z80_size(inst) == SZ_B) { |
746 dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); | 731 dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); |
747 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); | 732 dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); |
748 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); | 733 dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); |
749 } | 734 } |
750 dst = z80_save_reg(dst, inst, opts); | 735 dst = z80_save_reg(dst, inst, opts); |
751 dst = z80_save_ea(dst, inst, opts); | 736 dst = z80_save_ea(dst, inst, opts); |
752 break; | 737 break; |
753 case Z80_XOR: | 738 case Z80_XOR: |
757 } else if(inst->addr_mode == Z80_IMMED) { | 742 } else if(inst->addr_mode == Z80_IMMED) { |
758 cycles += 3; | 743 cycles += 3; |
759 } else if(z80_size(inst) == SZ_W) { | 744 } else if(z80_size(inst) == SZ_W) { |
760 cycles += 4; | 745 cycles += 4; |
761 } | 746 } |
762 dst = zcycles(dst, cycles); | 747 dst = cycles(&opts->gen, cycles); |
763 dst = translate_z80_reg(inst, &dst_op, dst, opts); | 748 dst = translate_z80_reg(inst, &dst_op, dst, opts); |
764 dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); | 749 dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); |
765 if (src_op.mode == MODE_REG_DIRECT) { | 750 if (src_op.mode == MODE_REG_DIRECT) { |
766 dst = xor_rr(dst, src_op.base, dst_op.base, z80_size(inst)); | 751 dst = xor_rr(dst, src_op.base, dst_op.base, z80_size(inst)); |
767 } else { | 752 } else { |
768 dst = xor_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); | 753 dst = xor_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); |
769 } | 754 } |
770 //TODO: Cleanup flags | 755 //TODO: Cleanup flags |
771 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); | 756 dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); |
772 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); | 757 dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); |
773 //TODO: Implement half-carry flag | 758 //TODO: Implement half-carry flag |
774 if (z80_size(inst) == SZ_B) { | 759 if (z80_size(inst) == SZ_B) { |
775 dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); | 760 dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); |
776 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); | 761 dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); |
777 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); | 762 dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); |
778 } | 763 } |
779 dst = z80_save_reg(dst, inst, opts); | 764 dst = z80_save_reg(dst, inst, opts); |
780 dst = z80_save_ea(dst, inst, opts); | 765 dst = z80_save_ea(dst, inst, opts); |
781 break; | 766 break; |
782 case Z80_CP: | 767 case Z80_CP: |
784 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { | 769 if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { |
785 cycles += 12; | 770 cycles += 12; |
786 } else if(inst->addr_mode == Z80_IMMED) { | 771 } else if(inst->addr_mode == Z80_IMMED) { |
787 cycles += 3; | 772 cycles += 3; |
788 } | 773 } |
789 dst = zcycles(dst, cycles); | 774 dst = cycles(&opts->gen, cycles); |
790 dst = translate_z80_reg(inst, &dst_op, dst, opts); | 775 dst = translate_z80_reg(inst, &dst_op, dst, opts); |
791 dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); | 776 dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); |
792 if (src_op.mode == MODE_REG_DIRECT) { | 777 if (src_op.mode == MODE_REG_DIRECT) { |
793 dst = cmp_rr(dst, src_op.base, dst_op.base, z80_size(inst)); | 778 dst = cmp_rr(dst, src_op.base, dst_op.base, z80_size(inst)); |
794 } else { | 779 } else { |
795 dst = cmp_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); | 780 dst = cmp_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); |
796 } | 781 } |
797 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); | 782 dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); |
798 dst = mov_irdisp8(dst, 1, CONTEXT, zf_off(ZF_N), SZ_B); | 783 dst = mov_irdisp8(dst, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); |
799 dst = setcc_rdisp8(dst, CC_O, CONTEXT, zf_off(ZF_PV)); | 784 dst = setcc_rdisp8(dst, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); |
800 //TODO: Implement half-carry flag | 785 //TODO: Implement half-carry flag |
801 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); | 786 dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); |
802 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); | 787 dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); |
803 dst = z80_save_reg(dst, inst, opts); | 788 dst = z80_save_reg(dst, inst, opts); |
804 dst = z80_save_ea(dst, inst, opts); | 789 dst = z80_save_ea(dst, inst, opts); |
805 break; | 790 break; |
806 case Z80_INC: | 791 case Z80_INC: |
807 cycles = 4; | 792 cycles = 4; |
810 } else if(z80_size(inst) == SZ_W) { | 795 } else if(z80_size(inst) == SZ_W) { |
811 cycles += 2; | 796 cycles += 2; |
812 } else if(inst->reg == Z80_IXH || inst->reg == Z80_IXL || inst->reg == Z80_IYH || inst->reg == Z80_IYL || inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { | 797 } else if(inst->reg == Z80_IXH || inst->reg == Z80_IXL || inst->reg == Z80_IYH || inst->reg == Z80_IYL || inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { |
813 cycles += 4; | 798 cycles += 4; |
814 } | 799 } |
815 dst = zcycles(dst, cycles); | 800 dst = cycles(&opts->gen, cycles); |
816 dst = translate_z80_reg(inst, &dst_op, dst, opts); | 801 dst = translate_z80_reg(inst, &dst_op, dst, opts); |
817 if (dst_op.mode == MODE_UNUSED) { | 802 if (dst_op.mode == MODE_UNUSED) { |
818 dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); | 803 dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); |
819 } | 804 } |
820 dst = add_ir(dst, 1, dst_op.base, z80_size(inst)); | 805 dst = add_ir(dst, 1, dst_op.base, z80_size(inst)); |
821 if (z80_size(inst) == SZ_B) { | 806 if (z80_size(inst) == SZ_B) { |
822 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); | 807 dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); |
823 //TODO: Implement half-carry flag | 808 //TODO: Implement half-carry flag |
824 dst = setcc_rdisp8(dst, CC_O, CONTEXT, zf_off(ZF_PV)); | 809 dst = setcc_rdisp8(dst, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); |
825 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); | 810 dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); |
826 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); | 811 dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); |
827 } | 812 } |
828 dst = z80_save_reg(dst, inst, opts); | 813 dst = z80_save_reg(dst, inst, opts); |
829 dst = z80_save_ea(dst, inst, opts); | 814 dst = z80_save_ea(dst, inst, opts); |
830 dst = z80_save_result(dst, inst); | 815 dst = z80_save_result(dst, inst); |
831 break; | 816 break; |
836 } else if(z80_size(inst) == SZ_W) { | 821 } else if(z80_size(inst) == SZ_W) { |
837 cycles += 2; | 822 cycles += 2; |
838 } else if(inst->reg == Z80_IXH || inst->reg == Z80_IXL || inst->reg == Z80_IYH || inst->reg == Z80_IYL || inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { | 823 } else if(inst->reg == Z80_IXH || inst->reg == Z80_IXL || inst->reg == Z80_IYH || inst->reg == Z80_IYL || inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { |
839 cycles += 4; | 824 cycles += 4; |
840 } | 825 } |
841 dst = zcycles(dst, cycles); | 826 dst = cycles(&opts->gen, cycles); |
842 dst = translate_z80_reg(inst, &dst_op, dst, opts); | 827 dst = translate_z80_reg(inst, &dst_op, dst, opts); |
843 if (dst_op.mode == MODE_UNUSED) { | 828 if (dst_op.mode == MODE_UNUSED) { |
844 dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); | 829 dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); |
845 } | 830 } |
846 dst = sub_ir(dst, 1, dst_op.base, z80_size(inst)); | 831 dst = sub_ir(dst, 1, dst_op.base, z80_size(inst)); |
847 if (z80_size(inst) == SZ_B) { | 832 if (z80_size(inst) == SZ_B) { |
848 dst = mov_irdisp8(dst, 1, CONTEXT, zf_off(ZF_N), SZ_B); | 833 dst = mov_irdisp8(dst, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); |
849 //TODO: Implement half-carry flag | 834 //TODO: Implement half-carry flag |
850 dst = setcc_rdisp8(dst, CC_O, CONTEXT, zf_off(ZF_PV)); | 835 dst = setcc_rdisp8(dst, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); |
851 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); | 836 dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); |
852 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); | 837 dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); |
853 } | 838 } |
854 dst = z80_save_reg(dst, inst, opts); | 839 dst = z80_save_reg(dst, inst, opts); |
855 dst = z80_save_ea(dst, inst, opts); | 840 dst = z80_save_ea(dst, inst, opts); |
856 dst = z80_save_result(dst, inst); | 841 dst = z80_save_result(dst, inst); |
857 break; | 842 break; |
858 //case Z80_DAA: | 843 //case Z80_DAA: |
859 case Z80_CPL: | 844 case Z80_CPL: |
860 dst = zcycles(dst, 4); | 845 dst = cycles(&opts->gen, 4); |
861 dst = not_r(dst, opts->regs[Z80_A], SZ_B); | 846 dst = not_r(dst, opts->regs[Z80_A], SZ_B); |
862 //TODO: Implement half-carry flag | 847 //TODO: Implement half-carry flag |
863 dst = mov_irdisp8(dst, 1, CONTEXT, zf_off(ZF_N), SZ_B); | 848 dst = mov_irdisp8(dst, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); |
864 break; | 849 break; |
865 case Z80_NEG: | 850 case Z80_NEG: |
866 dst = zcycles(dst, 8); | 851 dst = cycles(&opts->gen, 8); |
867 dst = neg_r(dst, opts->regs[Z80_A], SZ_B); | 852 dst = neg_r(dst, opts->regs[Z80_A], SZ_B); |
868 //TODO: Implement half-carry flag | 853 //TODO: Implement half-carry flag |
869 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); | 854 dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); |
870 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); | 855 dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); |
871 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); | 856 dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); |
872 dst = setcc_rdisp8(dst, CC_O, CONTEXT, zf_off(ZF_PV)); | 857 dst = setcc_rdisp8(dst, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); |
873 dst = mov_irdisp8(dst, 1, CONTEXT, zf_off(ZF_N), SZ_B); | 858 dst = mov_irdisp8(dst, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); |
874 break; | 859 break; |
875 case Z80_CCF: | 860 case Z80_CCF: |
876 dst = zcycles(dst, 4); | 861 dst = cycles(&opts->gen, 4); |
877 dst = xor_irdisp8(dst, 1, CONTEXT, zf_off(ZF_C), SZ_B); | 862 dst = xor_irdisp8(dst, 1, opts->gen.context_reg, zf_off(ZF_C), SZ_B); |
878 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); | 863 dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); |
879 //TODO: Implement half-carry flag | 864 //TODO: Implement half-carry flag |
880 break; | 865 break; |
881 case Z80_SCF: | 866 case Z80_SCF: |
882 dst = zcycles(dst, 4); | 867 dst = cycles(&opts->gen, 4); |
883 dst = mov_irdisp8(dst, 1, CONTEXT, zf_off(ZF_C), SZ_B); | 868 dst = mov_irdisp8(dst, 1, opts->gen.context_reg, zf_off(ZF_C), SZ_B); |
884 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); | 869 dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); |
885 //TODO: Implement half-carry flag | 870 //TODO: Implement half-carry flag |
886 break; | 871 break; |
887 case Z80_NOP: | 872 case Z80_NOP: |
888 if (inst->immed == 42) { | 873 if (inst->immed == 42) { |
889 dst = call(dst, (uint8_t *)z80_save_context); | 874 dst = call(dst, (uint8_t *)z80_save_context); |
890 dst = mov_rr(dst, CONTEXT, RDI, SZ_Q); | 875 dst = mov_rr(dst, opts->gen.context_reg, RDI, SZ_Q); |
891 dst = jmp(dst, (uint8_t *)z80_print_regs_exit); | 876 dst = jmp(dst, (uint8_t *)z80_print_regs_exit); |
892 } else { | 877 } else { |
893 dst = zcycles(dst, 4 * inst->immed); | 878 dst = cycles(&opts->gen, 4 * inst->immed); |
894 } | 879 } |
895 break; | 880 break; |
896 case Z80_HALT: | 881 case Z80_HALT: |
897 dst = zcycles(dst, 4); | 882 dst = cycles(&opts->gen, 4); |
898 dst = mov_ir(dst, address, SCRATCH1, SZ_W); | 883 dst = mov_ir(dst, address, opts->gen.scratch1, SZ_W); |
899 uint8_t * call_inst = dst; | 884 uint8_t * call_inst = dst; |
900 dst = call(dst, (uint8_t *)z80_halt); | 885 dst = call(dst, (uint8_t *)z80_halt); |
901 dst = jmp(dst, call_inst); | 886 dst = jmp(dst, call_inst); |
902 break; | 887 break; |
903 case Z80_DI: | 888 case Z80_DI: |
904 dst = zcycles(dst, 4); | 889 dst = cycles(&opts->gen, 4); |
905 dst = mov_irdisp8(dst, 0, CONTEXT, offsetof(z80_context, iff1), SZ_B); | 890 dst = mov_irdisp8(dst, 0, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B); |
906 dst = mov_irdisp8(dst, 0, CONTEXT, offsetof(z80_context, iff2), SZ_B); | 891 dst = mov_irdisp8(dst, 0, opts->gen.context_reg, offsetof(z80_context, iff2), SZ_B); |
907 dst = mov_rdisp8r(dst, CONTEXT, offsetof(z80_context, sync_cycle), ZLIMIT, SZ_D); | 892 dst = mov_rdisp8r(dst, opts->gen.context_reg, offsetof(z80_context, sync_cycle), opts->gen.limit, SZ_D); |
908 dst = mov_irdisp8(dst, 0xFFFFFFFF, CONTEXT, offsetof(z80_context, int_cycle), SZ_D); | 893 dst = mov_irdisp8(dst, 0xFFFFFFFF, opts->gen.context_reg, offsetof(z80_context, int_cycle), SZ_D); |
909 break; | 894 break; |
910 case Z80_EI: | 895 case Z80_EI: |
911 dst = zcycles(dst, 4); | 896 dst = cycles(&opts->gen, 4); |
912 dst = mov_rrdisp32(dst, ZCYCLES, CONTEXT, offsetof(z80_context, int_enable_cycle), SZ_D); | 897 dst = mov_rrdisp32(dst, opts->gen.cycles, opts->gen.context_reg, offsetof(z80_context, int_enable_cycle), SZ_D); |
913 dst = mov_irdisp8(dst, 1, CONTEXT, offsetof(z80_context, iff1), SZ_B); | 898 dst = mov_irdisp8(dst, 1, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B); |
914 dst = mov_irdisp8(dst, 1, CONTEXT, offsetof(z80_context, iff2), SZ_B); | 899 dst = mov_irdisp8(dst, 1, opts->gen.context_reg, offsetof(z80_context, iff2), SZ_B); |
915 //interrupt enable has a one-instruction latency, minimum instruction duration is 4 cycles | 900 //interrupt enable has a one-instruction latency, minimum instruction duration is 4 cycles |
916 dst = add_irdisp32(dst, 4, CONTEXT, offsetof(z80_context, int_enable_cycle), SZ_D); | 901 dst = add_irdisp32(dst, 4, opts->gen.context_reg, offsetof(z80_context, int_enable_cycle), SZ_D); |
917 dst = call(dst, (uint8_t *)z80_do_sync); | 902 dst = call(dst, (uint8_t *)z80_do_sync); |
918 break; | 903 break; |
919 case Z80_IM: | 904 case Z80_IM: |
920 dst = zcycles(dst, 4); | 905 dst = cycles(&opts->gen, 4); |
921 dst = mov_irdisp8(dst, inst->immed, CONTEXT, offsetof(z80_context, im), SZ_B); | 906 dst = mov_irdisp8(dst, inst->immed, opts->gen.context_reg, offsetof(z80_context, im), SZ_B); |
922 break; | 907 break; |
923 case Z80_RLC: | 908 case Z80_RLC: |
924 cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8); | 909 cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8); |
925 dst = zcycles(dst, cycles); | 910 dst = cycles(&opts->gen, cycles); |
926 if (inst->addr_mode != Z80_UNUSED) { | 911 if (inst->addr_mode != Z80_UNUSED) { |
927 dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); | 912 dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); |
928 dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register | 913 dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register |
929 dst = zcycles(dst, 1); | 914 dst = cycles(&opts->gen, 1); |
930 } else { | 915 } else { |
931 src_op.mode = MODE_UNUSED; | 916 src_op.mode = MODE_UNUSED; |
932 dst = translate_z80_reg(inst, &dst_op, dst, opts); | 917 dst = translate_z80_reg(inst, &dst_op, dst, opts); |
933 } | 918 } |
934 dst = rol_ir(dst, 1, dst_op.base, SZ_B); | 919 dst = rol_ir(dst, 1, dst_op.base, SZ_B); |
935 if (src_op.mode != MODE_UNUSED) { | 920 if (src_op.mode != MODE_UNUSED) { |
936 dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); | 921 dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); |
937 } | 922 } |
938 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); | 923 dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); |
939 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); | 924 dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); |
940 //TODO: Implement half-carry flag | 925 //TODO: Implement half-carry flag |
941 dst = cmp_ir(dst, 0, dst_op.base, SZ_B); | 926 dst = cmp_ir(dst, 0, dst_op.base, SZ_B); |
942 dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); | 927 dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); |
943 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); | 928 dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); |
944 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); | 929 dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); |
945 if (inst->addr_mode != Z80_UNUSED) { | 930 if (inst->addr_mode != Z80_UNUSED) { |
946 dst = z80_save_result(dst, inst); | 931 dst = z80_save_result(dst, inst); |
947 if (src_op.mode != MODE_UNUSED) { | 932 if (src_op.mode != MODE_UNUSED) { |
948 dst = z80_save_reg(dst, inst, opts); | 933 dst = z80_save_reg(dst, inst, opts); |
949 } | 934 } |
951 dst = z80_save_reg(dst, inst, opts); | 936 dst = z80_save_reg(dst, inst, opts); |
952 } | 937 } |
953 break; | 938 break; |
954 case Z80_RL: | 939 case Z80_RL: |
955 cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8); | 940 cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8); |
956 dst = zcycles(dst, cycles); | 941 dst = cycles(&opts->gen, cycles); |
957 if (inst->addr_mode != Z80_UNUSED) { | 942 if (inst->addr_mode != Z80_UNUSED) { |
958 dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); | 943 dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); |
959 dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register | 944 dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register |
960 dst = zcycles(dst, 1); | 945 dst = cycles(&opts->gen, 1); |
961 } else { | 946 } else { |
962 src_op.mode = MODE_UNUSED; | 947 src_op.mode = MODE_UNUSED; |
963 dst = translate_z80_reg(inst, &dst_op, dst, opts); | 948 dst = translate_z80_reg(inst, &dst_op, dst, opts); |
964 } | 949 } |
965 dst = bt_irdisp8(dst, 0, CONTEXT, zf_off(ZF_C), SZ_B); | 950 dst = bt_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); |
966 dst = rcl_ir(dst, 1, dst_op.base, SZ_B); | 951 dst = rcl_ir(dst, 1, dst_op.base, SZ_B); |
967 if (src_op.mode != MODE_UNUSED) { | 952 if (src_op.mode != MODE_UNUSED) { |
968 dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); | 953 dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); |
969 } | 954 } |
970 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); | 955 dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); |
971 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); | 956 dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); |
972 //TODO: Implement half-carry flag | 957 //TODO: Implement half-carry flag |
973 dst = cmp_ir(dst, 0, dst_op.base, SZ_B); | 958 dst = cmp_ir(dst, 0, dst_op.base, SZ_B); |
974 dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); | 959 dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); |
975 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); | 960 dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); |
976 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); | 961 dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); |
977 if (inst->addr_mode != Z80_UNUSED) { | 962 if (inst->addr_mode != Z80_UNUSED) { |
978 dst = z80_save_result(dst, inst); | 963 dst = z80_save_result(dst, inst); |
979 if (src_op.mode != MODE_UNUSED) { | 964 if (src_op.mode != MODE_UNUSED) { |
980 dst = z80_save_reg(dst, inst, opts); | 965 dst = z80_save_reg(dst, inst, opts); |
981 } | 966 } |
983 dst = z80_save_reg(dst, inst, opts); | 968 dst = z80_save_reg(dst, inst, opts); |
984 } | 969 } |
985 break; | 970 break; |
986 case Z80_RRC: | 971 case Z80_RRC: |
987 cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8); | 972 cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8); |
988 dst = zcycles(dst, cycles); | 973 dst = cycles(&opts->gen, cycles); |
989 if (inst->addr_mode != Z80_UNUSED) { | 974 if (inst->addr_mode != Z80_UNUSED) { |
990 dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); | 975 dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); |
991 dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register | 976 dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register |
992 dst = zcycles(dst, 1); | 977 dst = cycles(&opts->gen, 1); |
993 } else { | 978 } else { |
994 src_op.mode = MODE_UNUSED; | 979 src_op.mode = MODE_UNUSED; |
995 dst = translate_z80_reg(inst, &dst_op, dst, opts); | 980 dst = translate_z80_reg(inst, &dst_op, dst, opts); |
996 } | 981 } |
997 dst = ror_ir(dst, 1, dst_op.base, SZ_B); | 982 dst = ror_ir(dst, 1, dst_op.base, SZ_B); |
998 if (src_op.mode != MODE_UNUSED) { | 983 if (src_op.mode != MODE_UNUSED) { |
999 dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); | 984 dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); |
1000 } | 985 } |
1001 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); | 986 dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); |
1002 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); | 987 dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); |
1003 //TODO: Implement half-carry flag | 988 //TODO: Implement half-carry flag |
1004 dst = cmp_ir(dst, 0, dst_op.base, SZ_B); | 989 dst = cmp_ir(dst, 0, dst_op.base, SZ_B); |
1005 dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); | 990 dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); |
1006 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); | 991 dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); |
1007 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); | 992 dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); |
1008 if (inst->addr_mode != Z80_UNUSED) { | 993 if (inst->addr_mode != Z80_UNUSED) { |
1009 dst = z80_save_result(dst, inst); | 994 dst = z80_save_result(dst, inst); |
1010 if (src_op.mode != MODE_UNUSED) { | 995 if (src_op.mode != MODE_UNUSED) { |
1011 dst = z80_save_reg(dst, inst, opts); | 996 dst = z80_save_reg(dst, inst, opts); |
1012 } | 997 } |
1014 dst = z80_save_reg(dst, inst, opts); | 999 dst = z80_save_reg(dst, inst, opts); |
1015 } | 1000 } |
1016 break; | 1001 break; |
1017 case Z80_RR: | 1002 case Z80_RR: |
1018 cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8); | 1003 cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8); |
1019 dst = zcycles(dst, cycles); | 1004 dst = cycles(&opts->gen, cycles); |
1020 if (inst->addr_mode != Z80_UNUSED) { | 1005 if (inst->addr_mode != Z80_UNUSED) { |
1021 dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); | 1006 dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); |
1022 dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register | 1007 dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register |
1023 dst = zcycles(dst, 1); | 1008 dst = cycles(&opts->gen, 1); |
1024 } else { | 1009 } else { |
1025 src_op.mode = MODE_UNUSED; | 1010 src_op.mode = MODE_UNUSED; |
1026 dst = translate_z80_reg(inst, &dst_op, dst, opts); | 1011 dst = translate_z80_reg(inst, &dst_op, dst, opts); |
1027 } | 1012 } |
1028 dst = bt_irdisp8(dst, 0, CONTEXT, zf_off(ZF_C), SZ_B); | 1013 dst = bt_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); |
1029 dst = rcr_ir(dst, 1, dst_op.base, SZ_B); | 1014 dst = rcr_ir(dst, 1, dst_op.base, SZ_B); |
1030 if (src_op.mode != MODE_UNUSED) { | 1015 if (src_op.mode != MODE_UNUSED) { |
1031 dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); | 1016 dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); |
1032 } | 1017 } |
1033 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); | 1018 dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); |
1034 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); | 1019 dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); |
1035 //TODO: Implement half-carry flag | 1020 //TODO: Implement half-carry flag |
1036 dst = cmp_ir(dst, 0, dst_op.base, SZ_B); | 1021 dst = cmp_ir(dst, 0, dst_op.base, SZ_B); |
1037 dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); | 1022 dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); |
1038 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); | 1023 dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); |
1039 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); | 1024 dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); |
1040 if (inst->addr_mode != Z80_UNUSED) { | 1025 if (inst->addr_mode != Z80_UNUSED) { |
1041 dst = z80_save_result(dst, inst); | 1026 dst = z80_save_result(dst, inst); |
1042 if (src_op.mode != MODE_UNUSED) { | 1027 if (src_op.mode != MODE_UNUSED) { |
1043 dst = z80_save_reg(dst, inst, opts); | 1028 dst = z80_save_reg(dst, inst, opts); |
1044 } | 1029 } |
1047 } | 1032 } |
1048 break; | 1033 break; |
1049 case Z80_SLA: | 1034 case Z80_SLA: |
1050 case Z80_SLL: | 1035 case Z80_SLL: |
1051 cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8; | 1036 cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8; |
1052 dst = zcycles(dst, cycles); | 1037 dst = cycles(&opts->gen, cycles); |
1053 if (inst->addr_mode != Z80_UNUSED) { | 1038 if (inst->addr_mode != Z80_UNUSED) { |
1054 dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); | 1039 dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); |
1055 dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register | 1040 dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register |
1056 dst = zcycles(dst, 1); | 1041 dst = cycles(&opts->gen, 1); |
1057 } else { | 1042 } else { |
1058 src_op.mode = MODE_UNUSED; | 1043 src_op.mode = MODE_UNUSED; |
1059 dst = translate_z80_reg(inst, &dst_op, dst, opts); | 1044 dst = translate_z80_reg(inst, &dst_op, dst, opts); |
1060 } | 1045 } |
1061 dst = shl_ir(dst, 1, dst_op.base, SZ_B); | 1046 dst = shl_ir(dst, 1, dst_op.base, SZ_B); |
1062 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); | 1047 dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); |
1063 if (inst->op == Z80_SLL) { | 1048 if (inst->op == Z80_SLL) { |
1064 dst = or_ir(dst, 1, dst_op.base, SZ_B); | 1049 dst = or_ir(dst, 1, dst_op.base, SZ_B); |
1065 } | 1050 } |
1066 if (src_op.mode != MODE_UNUSED) { | 1051 if (src_op.mode != MODE_UNUSED) { |
1067 dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); | 1052 dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); |
1068 } | 1053 } |
1069 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); | 1054 dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); |
1070 //TODO: Implement half-carry flag | 1055 //TODO: Implement half-carry flag |
1071 dst = cmp_ir(dst, 0, dst_op.base, SZ_B); | 1056 dst = cmp_ir(dst, 0, dst_op.base, SZ_B); |
1072 dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); | 1057 dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); |
1073 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); | 1058 dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); |
1074 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); | 1059 dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); |
1075 if (inst->addr_mode != Z80_UNUSED) { | 1060 if (inst->addr_mode != Z80_UNUSED) { |
1076 dst = z80_save_result(dst, inst); | 1061 dst = z80_save_result(dst, inst); |
1077 if (src_op.mode != MODE_UNUSED) { | 1062 if (src_op.mode != MODE_UNUSED) { |
1078 dst = z80_save_reg(dst, inst, opts); | 1063 dst = z80_save_reg(dst, inst, opts); |
1079 } | 1064 } |
1081 dst = z80_save_reg(dst, inst, opts); | 1066 dst = z80_save_reg(dst, inst, opts); |
1082 } | 1067 } |
1083 break; | 1068 break; |
1084 case Z80_SRA: | 1069 case Z80_SRA: |
1085 cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8; | 1070 cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8; |
1086 dst = zcycles(dst, cycles); | 1071 dst = cycles(&opts->gen, cycles); |
1087 if (inst->addr_mode != Z80_UNUSED) { | 1072 if (inst->addr_mode != Z80_UNUSED) { |
1088 dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); | 1073 dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); |
1089 dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register | 1074 dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register |
1090 dst = zcycles(dst, 1); | 1075 dst = cycles(&opts->gen, 1); |
1091 } else { | 1076 } else { |
1092 src_op.mode = MODE_UNUSED; | 1077 src_op.mode = MODE_UNUSED; |
1093 dst = translate_z80_reg(inst, &dst_op, dst, opts); | 1078 dst = translate_z80_reg(inst, &dst_op, dst, opts); |
1094 } | 1079 } |
1095 dst = sar_ir(dst, 1, dst_op.base, SZ_B); | 1080 dst = sar_ir(dst, 1, dst_op.base, SZ_B); |
1096 if (src_op.mode != MODE_UNUSED) { | 1081 if (src_op.mode != MODE_UNUSED) { |
1097 dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); | 1082 dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); |
1098 } | 1083 } |
1099 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); | 1084 dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); |
1100 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); | 1085 dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); |
1101 //TODO: Implement half-carry flag | 1086 //TODO: Implement half-carry flag |
1102 dst = cmp_ir(dst, 0, dst_op.base, SZ_B); | 1087 dst = cmp_ir(dst, 0, dst_op.base, SZ_B); |
1103 dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); | 1088 dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); |
1104 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); | 1089 dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); |
1105 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); | 1090 dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); |
1106 if (inst->addr_mode != Z80_UNUSED) { | 1091 if (inst->addr_mode != Z80_UNUSED) { |
1107 dst = z80_save_result(dst, inst); | 1092 dst = z80_save_result(dst, inst); |
1108 if (src_op.mode != MODE_UNUSED) { | 1093 if (src_op.mode != MODE_UNUSED) { |
1109 dst = z80_save_reg(dst, inst, opts); | 1094 dst = z80_save_reg(dst, inst, opts); |
1110 } | 1095 } |
1112 dst = z80_save_reg(dst, inst, opts); | 1097 dst = z80_save_reg(dst, inst, opts); |
1113 } | 1098 } |
1114 break; | 1099 break; |
1115 case Z80_SRL: | 1100 case Z80_SRL: |
1116 cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8; | 1101 cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8; |
1117 dst = zcycles(dst, cycles); | 1102 dst = cycles(&opts->gen, cycles); |
1118 if (inst->addr_mode != Z80_UNUSED) { | 1103 if (inst->addr_mode != Z80_UNUSED) { |
1119 dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); | 1104 dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); |
1120 dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register | 1105 dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register |
1121 dst = zcycles(dst, 1); | 1106 dst = cycles(&opts->gen, 1); |
1122 } else { | 1107 } else { |
1123 src_op.mode = MODE_UNUSED; | 1108 src_op.mode = MODE_UNUSED; |
1124 dst = translate_z80_reg(inst, &dst_op, dst, opts); | 1109 dst = translate_z80_reg(inst, &dst_op, dst, opts); |
1125 } | 1110 } |
1126 dst = shr_ir(dst, 1, dst_op.base, SZ_B); | 1111 dst = shr_ir(dst, 1, dst_op.base, SZ_B); |
1127 if (src_op.mode != MODE_UNUSED) { | 1112 if (src_op.mode != MODE_UNUSED) { |
1128 dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); | 1113 dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); |
1129 } | 1114 } |
1130 dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); | 1115 dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); |
1131 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); | 1116 dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); |
1132 //TODO: Implement half-carry flag | 1117 //TODO: Implement half-carry flag |
1133 dst = cmp_ir(dst, 0, dst_op.base, SZ_B); | 1118 dst = cmp_ir(dst, 0, dst_op.base, SZ_B); |
1134 dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); | 1119 dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); |
1135 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); | 1120 dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); |
1136 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); | 1121 dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); |
1137 if (inst->addr_mode != Z80_UNUSED) { | 1122 if (inst->addr_mode != Z80_UNUSED) { |
1138 dst = z80_save_result(dst, inst); | 1123 dst = z80_save_result(dst, inst); |
1139 if (src_op.mode != MODE_UNUSED) { | 1124 if (src_op.mode != MODE_UNUSED) { |
1140 dst = z80_save_reg(dst, inst, opts); | 1125 dst = z80_save_reg(dst, inst, opts); |
1141 } | 1126 } |
1142 } else { | 1127 } else { |
1143 dst = z80_save_reg(dst, inst, opts); | 1128 dst = z80_save_reg(dst, inst, opts); |
1144 } | 1129 } |
1145 break; | 1130 break; |
1146 case Z80_RLD: | 1131 case Z80_RLD: |
1147 dst = zcycles(dst, 8); | 1132 dst = cycles(&opts->gen, 8); |
1148 dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH1, SZ_W); | 1133 dst = mov_rr(dst, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); |
1149 dst = call(dst, (uint8_t *)z80_read_byte); | 1134 dst = call(dst, (uint8_t *)z80_read_byte); |
1150 //Before: (HL) = 0x12, A = 0x34 | 1135 //Before: (HL) = 0x12, A = 0x34 |
1151 //After: (HL) = 0x24, A = 0x31 | 1136 //After: (HL) = 0x24, A = 0x31 |
1152 dst = mov_rr(dst, opts->regs[Z80_A], SCRATCH2, SZ_B); | 1137 dst = mov_rr(dst, opts->regs[Z80_A], opts->gen.scratch2, SZ_B); |
1153 dst = shl_ir(dst, 4, SCRATCH1, SZ_W); | 1138 dst = shl_ir(dst, 4, opts->gen.scratch1, SZ_W); |
1154 dst = and_ir(dst, 0xF, SCRATCH2, SZ_W); | 1139 dst = and_ir(dst, 0xF, opts->gen.scratch2, SZ_W); |
1155 dst = and_ir(dst, 0xFFF, SCRATCH1, SZ_W); | 1140 dst = and_ir(dst, 0xFFF, opts->gen.scratch1, SZ_W); |
1156 dst = and_ir(dst, 0xF0, opts->regs[Z80_A], SZ_B); | 1141 dst = and_ir(dst, 0xF0, opts->regs[Z80_A], SZ_B); |
1157 dst = or_rr(dst, SCRATCH2, SCRATCH1, SZ_W); | 1142 dst = or_rr(dst, opts->gen.scratch2, opts->gen.scratch1, SZ_W); |
1158 //SCRATCH1 = 0x0124 | 1143 //opts->gen.scratch1 = 0x0124 |
1159 dst = ror_ir(dst, 8, SCRATCH1, SZ_W); | 1144 dst = ror_ir(dst, 8, opts->gen.scratch1, SZ_W); |
1160 dst = zcycles(dst, 4); | 1145 dst = cycles(&opts->gen, 4); |
1161 dst = or_rr(dst, SCRATCH1, opts->regs[Z80_A], SZ_B); | 1146 dst = or_rr(dst, opts->gen.scratch1, opts->regs[Z80_A], SZ_B); |
1162 //set flags | 1147 //set flags |
1163 //TODO: Implement half-carry flag | 1148 //TODO: Implement half-carry flag |
1164 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); | 1149 dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); |
1165 dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); | 1150 dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); |
1166 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); | 1151 dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); |
1167 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); | 1152 dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); |
1168 | 1153 |
1169 dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH2, SZ_W); | 1154 dst = mov_rr(dst, opts->regs[Z80_HL], opts->gen.scratch2, SZ_W); |
1170 dst = ror_ir(dst, 8, SCRATCH1, SZ_W); | 1155 dst = ror_ir(dst, 8, opts->gen.scratch1, SZ_W); |
1171 dst = call(dst, (uint8_t *)z80_write_byte); | 1156 dst = call(dst, (uint8_t *)z80_write_byte); |
1172 break; | 1157 break; |
1173 case Z80_RRD: | 1158 case Z80_RRD: |
1174 dst = zcycles(dst, 8); | 1159 dst = cycles(&opts->gen, 8); |
1175 dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH1, SZ_W); | 1160 dst = mov_rr(dst, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); |
1176 dst = call(dst, (uint8_t *)z80_read_byte); | 1161 dst = call(dst, (uint8_t *)z80_read_byte); |
1177 //Before: (HL) = 0x12, A = 0x34 | 1162 //Before: (HL) = 0x12, A = 0x34 |
1178 //After: (HL) = 0x41, A = 0x32 | 1163 //After: (HL) = 0x41, A = 0x32 |
1179 dst = movzx_rr(dst, opts->regs[Z80_A], SCRATCH2, SZ_B, SZ_W); | 1164 dst = movzx_rr(dst, opts->regs[Z80_A], opts->gen.scratch2, SZ_B, SZ_W); |
1180 dst = ror_ir(dst, 4, SCRATCH1, SZ_W); | 1165 dst = ror_ir(dst, 4, opts->gen.scratch1, SZ_W); |
1181 dst = shl_ir(dst, 4, SCRATCH2, SZ_W); | 1166 dst = shl_ir(dst, 4, opts->gen.scratch2, SZ_W); |
1182 dst = and_ir(dst, 0xF00F, SCRATCH1, SZ_W); | 1167 dst = and_ir(dst, 0xF00F, opts->gen.scratch1, SZ_W); |
1183 dst = and_ir(dst, 0xF0, opts->regs[Z80_A], SZ_B); | 1168 dst = and_ir(dst, 0xF0, opts->regs[Z80_A], SZ_B); |
1184 //SCRATCH1 = 0x2001 | 1169 //opts->gen.scratch1 = 0x2001 |
1185 //SCRATCH2 = 0x0040 | 1170 //opts->gen.scratch2 = 0x0040 |
1186 dst = or_rr(dst, SCRATCH2, SCRATCH1, SZ_W); | 1171 dst = or_rr(dst, opts->gen.scratch2, opts->gen.scratch1, SZ_W); |
1187 //SCRATCH1 = 0x2041 | 1172 //opts->gen.scratch1 = 0x2041 |
1188 dst = ror_ir(dst, 8, SCRATCH1, SZ_W); | 1173 dst = ror_ir(dst, 8, opts->gen.scratch1, SZ_W); |
1189 dst = zcycles(dst, 4); | 1174 dst = cycles(&opts->gen, 4); |
1190 dst = shr_ir(dst, 4, SCRATCH1, SZ_B); | 1175 dst = shr_ir(dst, 4, opts->gen.scratch1, SZ_B); |
1191 dst = or_rr(dst, SCRATCH1, opts->regs[Z80_A], SZ_B); | 1176 dst = or_rr(dst, opts->gen.scratch1, opts->regs[Z80_A], SZ_B); |
1192 //set flags | 1177 //set flags |
1193 //TODO: Implement half-carry flag | 1178 //TODO: Implement half-carry flag |
1194 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); | 1179 dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); |
1195 dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); | 1180 dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); |
1196 dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); | 1181 dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); |
1197 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); | 1182 dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); |
1198 | 1183 |
1199 dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH2, SZ_W); | 1184 dst = mov_rr(dst, opts->regs[Z80_HL], opts->gen.scratch2, SZ_W); |
1200 dst = ror_ir(dst, 8, SCRATCH1, SZ_W); | 1185 dst = ror_ir(dst, 8, opts->gen.scratch1, SZ_W); |
1201 dst = call(dst, (uint8_t *)z80_write_byte); | 1186 dst = call(dst, (uint8_t *)z80_write_byte); |
1202 break; | 1187 break; |
1203 case Z80_BIT: { | 1188 case Z80_BIT: { |
1204 cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16; | 1189 cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16; |
1205 dst = zcycles(dst, cycles); | 1190 dst = cycles(&opts->gen, cycles); |
1206 uint8_t bit; | 1191 uint8_t bit; |
1207 if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) { | 1192 if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) { |
1208 src_op.base = opts->regs[z80_word_reg(inst->ea_reg)]; | 1193 src_op.base = opts->regs[z80_word_reg(inst->ea_reg)]; |
1209 size = SZ_W; | 1194 size = SZ_W; |
1210 bit = inst->immed + 8; | 1195 bit = inst->immed + 8; |
1213 bit = inst->immed; | 1198 bit = inst->immed; |
1214 dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); | 1199 dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); |
1215 } | 1200 } |
1216 if (inst->addr_mode != Z80_REG) { | 1201 if (inst->addr_mode != Z80_REG) { |
1217 //Reads normally take 3 cycles, but the read at the end of a bit instruction takes 4 | 1202 //Reads normally take 3 cycles, but the read at the end of a bit instruction takes 4 |
1218 dst = zcycles(dst, 1); | 1203 dst = cycles(&opts->gen, 1); |
1219 } | 1204 } |
1220 dst = bt_ir(dst, bit, src_op.base, size); | 1205 dst = bt_ir(dst, bit, src_op.base, size); |
1221 dst = setcc_rdisp8(dst, CC_NC, CONTEXT, zf_off(ZF_Z)); | 1206 dst = setcc_rdisp8(dst, CC_NC, opts->gen.context_reg, zf_off(ZF_Z)); |
1222 dst = setcc_rdisp8(dst, CC_NC, CONTEXT, zf_off(ZF_PV)); | 1207 dst = setcc_rdisp8(dst, CC_NC, opts->gen.context_reg, zf_off(ZF_PV)); |
1223 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); | 1208 dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); |
1224 if (inst->immed == 7) { | 1209 if (inst->immed == 7) { |
1225 dst = cmp_ir(dst, 0, src_op.base, size); | 1210 dst = cmp_ir(dst, 0, src_op.base, size); |
1226 dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); | 1211 dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); |
1227 } else { | 1212 } else { |
1228 dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_S), SZ_B); | 1213 dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B); |
1229 } | 1214 } |
1230 break; | 1215 break; |
1231 } | 1216 } |
1232 case Z80_SET: { | 1217 case Z80_SET: { |
1233 cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16; | 1218 cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16; |
1234 dst = zcycles(dst, cycles); | 1219 dst = cycles(&opts->gen, cycles); |
1235 uint8_t bit; | 1220 uint8_t bit; |
1236 if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) { | 1221 if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) { |
1237 src_op.base = opts->regs[z80_word_reg(inst->ea_reg)]; | 1222 src_op.base = opts->regs[z80_word_reg(inst->ea_reg)]; |
1238 size = SZ_W; | 1223 size = SZ_W; |
1239 bit = inst->immed + 8; | 1224 bit = inst->immed + 8; |
1245 if (inst->reg != Z80_USE_IMMED) { | 1230 if (inst->reg != Z80_USE_IMMED) { |
1246 dst = translate_z80_reg(inst, &dst_op, dst, opts); | 1231 dst = translate_z80_reg(inst, &dst_op, dst, opts); |
1247 } | 1232 } |
1248 if (inst->addr_mode != Z80_REG) { | 1233 if (inst->addr_mode != Z80_REG) { |
1249 //Reads normally take 3 cycles, but the read in the middle of a set instruction takes 4 | 1234 //Reads normally take 3 cycles, but the read in the middle of a set instruction takes 4 |
1250 dst = zcycles(dst, 1); | 1235 dst = cycles(&opts->gen, 1); |
1251 } | 1236 } |
1252 dst = bts_ir(dst, bit, src_op.base, size); | 1237 dst = bts_ir(dst, bit, src_op.base, size); |
1253 if (inst->reg != Z80_USE_IMMED) { | 1238 if (inst->reg != Z80_USE_IMMED) { |
1254 if (size == SZ_W) { | 1239 if (size == SZ_W) { |
1255 if (dst_op.base >= R8) { | 1240 if (dst_op.base >= R8) { |
1271 } | 1256 } |
1272 break; | 1257 break; |
1273 } | 1258 } |
1274 case Z80_RES: { | 1259 case Z80_RES: { |
1275 cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16; | 1260 cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16; |
1276 dst = zcycles(dst, cycles); | 1261 dst = cycles(&opts->gen, cycles); |
1277 uint8_t bit; | 1262 uint8_t bit; |
1278 if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) { | 1263 if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) { |
1279 src_op.base = opts->regs[z80_word_reg(inst->ea_reg)]; | 1264 src_op.base = opts->regs[z80_word_reg(inst->ea_reg)]; |
1280 size = SZ_W; | 1265 size = SZ_W; |
1281 bit = inst->immed + 8; | 1266 bit = inst->immed + 8; |
1287 if (inst->reg != Z80_USE_IMMED) { | 1272 if (inst->reg != Z80_USE_IMMED) { |
1288 dst = translate_z80_reg(inst, &dst_op, dst, opts); | 1273 dst = translate_z80_reg(inst, &dst_op, dst, opts); |
1289 } | 1274 } |
1290 if (inst->addr_mode != Z80_REG) { | 1275 if (inst->addr_mode != Z80_REG) { |
1291 //Reads normally take 3 cycles, but the read in the middle of a set instruction takes 4 | 1276 //Reads normally take 3 cycles, but the read in the middle of a set instruction takes 4 |
1292 dst = zcycles(dst, 1); | 1277 dst = cycles(&opts->gen, 1); |
1293 } | 1278 } |
1294 dst = btr_ir(dst, bit, src_op.base, size); | 1279 dst = btr_ir(dst, bit, src_op.base, size); |
1295 if (inst->reg != Z80_USE_IMMED) { | 1280 if (inst->reg != Z80_USE_IMMED) { |
1296 if (size == SZ_W) { | 1281 if (size == SZ_W) { |
1297 if (dst_op.base >= R8) { | 1282 if (dst_op.base >= R8) { |
1318 if (inst->addr_mode != Z80_REG_INDIRECT) { | 1303 if (inst->addr_mode != Z80_REG_INDIRECT) { |
1319 cycles += 6; | 1304 cycles += 6; |
1320 } else if(inst->ea_reg == Z80_IX || inst->ea_reg == Z80_IY) { | 1305 } else if(inst->ea_reg == Z80_IX || inst->ea_reg == Z80_IY) { |
1321 cycles += 4; | 1306 cycles += 4; |
1322 } | 1307 } |
1323 dst = zcycles(dst, cycles); | 1308 dst = cycles(&opts->gen, cycles); |
1324 if (inst->addr_mode != Z80_REG_INDIRECT && inst->immed < 0x4000) { | 1309 if (inst->addr_mode != Z80_REG_INDIRECT && inst->immed < 0x4000) { |
1325 uint8_t * call_dst = z80_get_native_address(context, inst->immed); | 1310 uint8_t * call_dst = z80_get_native_address(context, inst->immed); |
1326 if (!call_dst) { | 1311 if (!call_dst) { |
1327 opts->deferred = defer_address(opts->deferred, inst->immed, dst + 1); | 1312 opts->deferred = defer_address(opts->deferred, inst->immed, dst + 1); |
1328 //fake address to force large displacement | 1313 //fake address to force large displacement |
1329 call_dst = dst + 256; | 1314 call_dst = dst + 256; |
1330 } | 1315 } |
1331 dst = jmp(dst, call_dst); | 1316 dst = jmp(dst, call_dst); |
1332 } else { | 1317 } else { |
1333 if (inst->addr_mode == Z80_REG_INDIRECT) { | 1318 if (inst->addr_mode == Z80_REG_INDIRECT) { |
1334 dst = mov_rr(dst, opts->regs[inst->ea_reg], SCRATCH1, SZ_W); | 1319 dst = mov_rr(dst, opts->regs[inst->ea_reg], opts->gen.scratch1, SZ_W); |
1335 } else { | 1320 } else { |
1336 dst = mov_ir(dst, inst->immed, SCRATCH1, SZ_W); | 1321 dst = mov_ir(dst, inst->immed, opts->gen.scratch1, SZ_W); |
1337 } | 1322 } |
1338 dst = call(dst, (uint8_t *)z80_native_addr); | 1323 dst = call(dst, (uint8_t *)z80_native_addr); |
1339 dst = jmp_r(dst, SCRATCH1); | 1324 dst = jmp_r(dst, opts->gen.scratch1); |
1340 } | 1325 } |
1341 break; | 1326 break; |
1342 } | 1327 } |
1343 case Z80_JPCC: { | 1328 case Z80_JPCC: { |
1344 dst = zcycles(dst, 7);//T States: 4,3 | 1329 dst = cycles(&opts->gen, 7);//T States: 4,3 |
1345 uint8_t cond = CC_Z; | 1330 uint8_t cond = CC_Z; |
1346 switch (inst->reg) | 1331 switch (inst->reg) |
1347 { | 1332 { |
1348 case Z80_CC_NZ: | 1333 case Z80_CC_NZ: |
1349 cond = CC_NZ; | 1334 cond = CC_NZ; |
1350 case Z80_CC_Z: | 1335 case Z80_CC_Z: |
1351 dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_Z), SZ_B); | 1336 dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_Z), SZ_B); |
1352 break; | 1337 break; |
1353 case Z80_CC_NC: | 1338 case Z80_CC_NC: |
1354 cond = CC_NZ; | 1339 cond = CC_NZ; |
1355 case Z80_CC_C: | 1340 case Z80_CC_C: |
1356 dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_C), SZ_B); | 1341 dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); |
1357 break; | 1342 break; |
1358 case Z80_CC_PO: | 1343 case Z80_CC_PO: |
1359 cond = CC_NZ; | 1344 cond = CC_NZ; |
1360 case Z80_CC_PE: | 1345 case Z80_CC_PE: |
1361 dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_PV), SZ_B); | 1346 dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B); |
1362 break; | 1347 break; |
1363 case Z80_CC_P: | 1348 case Z80_CC_P: |
1364 cond = CC_NZ; | 1349 cond = CC_NZ; |
1365 case Z80_CC_M: | 1350 case Z80_CC_M: |
1366 dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_S), SZ_B); | 1351 dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B); |
1367 break; | 1352 break; |
1368 } | 1353 } |
1369 uint8_t *no_jump_off = dst+1; | 1354 uint8_t *no_jump_off = dst+1; |
1370 dst = jcc(dst, cond, dst+2); | 1355 dst = jcc(dst, cond, dst+2); |
1371 dst = zcycles(dst, 5);//T States: 5 | 1356 dst = cycles(&opts->gen, 5);//T States: 5 |
1372 uint16_t dest_addr = inst->immed; | 1357 uint16_t dest_addr = inst->immed; |
1373 if (dest_addr < 0x4000) { | 1358 if (dest_addr < 0x4000) { |
1374 uint8_t * call_dst = z80_get_native_address(context, dest_addr); | 1359 uint8_t * call_dst = z80_get_native_address(context, dest_addr); |
1375 if (!call_dst) { | 1360 if (!call_dst) { |
1376 opts->deferred = defer_address(opts->deferred, dest_addr, dst + 1); | 1361 opts->deferred = defer_address(opts->deferred, dest_addr, dst + 1); |
1377 //fake address to force large displacement | 1362 //fake address to force large displacement |
1378 call_dst = dst + 256; | 1363 call_dst = dst + 256; |
1379 } | 1364 } |
1380 dst = jmp(dst, call_dst); | 1365 dst = jmp(dst, call_dst); |
1381 } else { | 1366 } else { |
1382 dst = mov_ir(dst, dest_addr, SCRATCH1, SZ_W); | 1367 dst = mov_ir(dst, dest_addr, opts->gen.scratch1, SZ_W); |
1383 dst = call(dst, (uint8_t *)z80_native_addr); | 1368 dst = call(dst, (uint8_t *)z80_native_addr); |
1384 dst = jmp_r(dst, SCRATCH1); | 1369 dst = jmp_r(dst, opts->gen.scratch1); |
1385 } | 1370 } |
1386 *no_jump_off = dst - (no_jump_off+1); | 1371 *no_jump_off = dst - (no_jump_off+1); |
1387 break; | 1372 break; |
1388 } | 1373 } |
1389 case Z80_JR: { | 1374 case Z80_JR: { |
1390 dst = zcycles(dst, 12);//T States: 4,3,5 | 1375 dst = cycles(&opts->gen, 12);//T States: 4,3,5 |
1391 uint16_t dest_addr = address + inst->immed + 2; | 1376 uint16_t dest_addr = address + inst->immed + 2; |
1392 if (dest_addr < 0x4000) { | 1377 if (dest_addr < 0x4000) { |
1393 uint8_t * call_dst = z80_get_native_address(context, dest_addr); | 1378 uint8_t * call_dst = z80_get_native_address(context, dest_addr); |
1394 if (!call_dst) { | 1379 if (!call_dst) { |
1395 opts->deferred = defer_address(opts->deferred, dest_addr, dst + 1); | 1380 opts->deferred = defer_address(opts->deferred, dest_addr, dst + 1); |
1396 //fake address to force large displacement | 1381 //fake address to force large displacement |
1397 call_dst = dst + 256; | 1382 call_dst = dst + 256; |
1398 } | 1383 } |
1399 dst = jmp(dst, call_dst); | 1384 dst = jmp(dst, call_dst); |
1400 } else { | 1385 } else { |
1401 dst = mov_ir(dst, dest_addr, SCRATCH1, SZ_W); | 1386 dst = mov_ir(dst, dest_addr, opts->gen.scratch1, SZ_W); |
1402 dst = call(dst, (uint8_t *)z80_native_addr); | 1387 dst = call(dst, (uint8_t *)z80_native_addr); |
1403 dst = jmp_r(dst, SCRATCH1); | 1388 dst = jmp_r(dst, opts->gen.scratch1); |
1404 } | 1389 } |
1405 break; | 1390 break; |
1406 } | 1391 } |
1407 case Z80_JRCC: { | 1392 case Z80_JRCC: { |
1408 dst = zcycles(dst, 7);//T States: 4,3 | 1393 dst = cycles(&opts->gen, 7);//T States: 4,3 |
1409 uint8_t cond = CC_Z; | 1394 uint8_t cond = CC_Z; |
1410 switch (inst->reg) | 1395 switch (inst->reg) |
1411 { | 1396 { |
1412 case Z80_CC_NZ: | 1397 case Z80_CC_NZ: |
1413 cond = CC_NZ; | 1398 cond = CC_NZ; |
1414 case Z80_CC_Z: | 1399 case Z80_CC_Z: |
1415 dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_Z), SZ_B); | 1400 dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_Z), SZ_B); |
1416 break; | 1401 break; |
1417 case Z80_CC_NC: | 1402 case Z80_CC_NC: |
1418 cond = CC_NZ; | 1403 cond = CC_NZ; |
1419 case Z80_CC_C: | 1404 case Z80_CC_C: |
1420 dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_C), SZ_B); | 1405 dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); |
1421 break; | 1406 break; |
1422 } | 1407 } |
1423 uint8_t *no_jump_off = dst+1; | 1408 uint8_t *no_jump_off = dst+1; |
1424 dst = jcc(dst, cond, dst+2); | 1409 dst = jcc(dst, cond, dst+2); |
1425 dst = zcycles(dst, 5);//T States: 5 | 1410 dst = cycles(&opts->gen, 5);//T States: 5 |
1426 uint16_t dest_addr = address + inst->immed + 2; | 1411 uint16_t dest_addr = address + inst->immed + 2; |
1427 if (dest_addr < 0x4000) { | 1412 if (dest_addr < 0x4000) { |
1428 uint8_t * call_dst = z80_get_native_address(context, dest_addr); | 1413 uint8_t * call_dst = z80_get_native_address(context, dest_addr); |
1429 if (!call_dst) { | 1414 if (!call_dst) { |
1430 opts->deferred = defer_address(opts->deferred, dest_addr, dst + 1); | 1415 opts->deferred = defer_address(opts->deferred, dest_addr, dst + 1); |
1431 //fake address to force large displacement | 1416 //fake address to force large displacement |
1432 call_dst = dst + 256; | 1417 call_dst = dst + 256; |
1433 } | 1418 } |
1434 dst = jmp(dst, call_dst); | 1419 dst = jmp(dst, call_dst); |
1435 } else { | 1420 } else { |
1436 dst = mov_ir(dst, dest_addr, SCRATCH1, SZ_W); | 1421 dst = mov_ir(dst, dest_addr, opts->gen.scratch1, SZ_W); |
1437 dst = call(dst, (uint8_t *)z80_native_addr); | 1422 dst = call(dst, (uint8_t *)z80_native_addr); |
1438 dst = jmp_r(dst, SCRATCH1); | 1423 dst = jmp_r(dst, opts->gen.scratch1); |
1439 } | 1424 } |
1440 *no_jump_off = dst - (no_jump_off+1); | 1425 *no_jump_off = dst - (no_jump_off+1); |
1441 break; | 1426 break; |
1442 } | 1427 } |
1443 case Z80_DJNZ: | 1428 case Z80_DJNZ: |
1444 dst = zcycles(dst, 8);//T States: 5,3 | 1429 dst = cycles(&opts->gen, 8);//T States: 5,3 |
1445 dst = sub_ir(dst, 1, opts->regs[Z80_B], SZ_B); | 1430 dst = sub_ir(dst, 1, opts->regs[Z80_B], SZ_B); |
1446 uint8_t *no_jump_off = dst+1; | 1431 uint8_t *no_jump_off = dst+1; |
1447 dst = jcc(dst, CC_Z, dst+2); | 1432 dst = jcc(dst, CC_Z, dst+2); |
1448 dst = zcycles(dst, 5);//T States: 5 | 1433 dst = cycles(&opts->gen, 5);//T States: 5 |
1449 uint16_t dest_addr = address + inst->immed + 2; | 1434 uint16_t dest_addr = address + inst->immed + 2; |
1450 if (dest_addr < 0x4000) { | 1435 if (dest_addr < 0x4000) { |
1451 uint8_t * call_dst = z80_get_native_address(context, dest_addr); | 1436 uint8_t * call_dst = z80_get_native_address(context, dest_addr); |
1452 if (!call_dst) { | 1437 if (!call_dst) { |
1453 opts->deferred = defer_address(opts->deferred, dest_addr, dst + 1); | 1438 opts->deferred = defer_address(opts->deferred, dest_addr, dst + 1); |
1454 //fake address to force large displacement | 1439 //fake address to force large displacement |
1455 call_dst = dst + 256; | 1440 call_dst = dst + 256; |
1456 } | 1441 } |
1457 dst = jmp(dst, call_dst); | 1442 dst = jmp(dst, call_dst); |
1458 } else { | 1443 } else { |
1459 dst = mov_ir(dst, dest_addr, SCRATCH1, SZ_W); | 1444 dst = mov_ir(dst, dest_addr, opts->gen.scratch1, SZ_W); |
1460 dst = call(dst, (uint8_t *)z80_native_addr); | 1445 dst = call(dst, (uint8_t *)z80_native_addr); |
1461 dst = jmp_r(dst, SCRATCH1); | 1446 dst = jmp_r(dst, opts->gen.scratch1); |
1462 } | 1447 } |
1463 *no_jump_off = dst - (no_jump_off+1); | 1448 *no_jump_off = dst - (no_jump_off+1); |
1464 break; | 1449 break; |
1465 case Z80_CALL: { | 1450 case Z80_CALL: { |
1466 dst = zcycles(dst, 11);//T States: 4,3,4 | 1451 dst = cycles(&opts->gen, 11);//T States: 4,3,4 |
1467 dst = sub_ir(dst, 2, opts->regs[Z80_SP], SZ_W); | 1452 dst = sub_ir(dst, 2, opts->regs[Z80_SP], SZ_W); |
1468 dst = mov_ir(dst, address + 3, SCRATCH1, SZ_W); | 1453 dst = mov_ir(dst, address + 3, opts->gen.scratch1, SZ_W); |
1469 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH2, SZ_W); | 1454 dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); |
1470 dst = call(dst, (uint8_t *)z80_write_word_highfirst);//T States: 3, 3 | 1455 dst = call(dst, (uint8_t *)z80_write_word_highfirst);//T States: 3, 3 |
1471 if (inst->immed < 0x4000) { | 1456 if (inst->immed < 0x4000) { |
1472 uint8_t * call_dst = z80_get_native_address(context, inst->immed); | 1457 uint8_t * call_dst = z80_get_native_address(context, inst->immed); |
1473 if (!call_dst) { | 1458 if (!call_dst) { |
1474 opts->deferred = defer_address(opts->deferred, inst->immed, dst + 1); | 1459 opts->deferred = defer_address(opts->deferred, inst->immed, dst + 1); |
1475 //fake address to force large displacement | 1460 //fake address to force large displacement |
1476 call_dst = dst + 256; | 1461 call_dst = dst + 256; |
1477 } | 1462 } |
1478 dst = jmp(dst, call_dst); | 1463 dst = jmp(dst, call_dst); |
1479 } else { | 1464 } else { |
1480 dst = mov_ir(dst, inst->immed, SCRATCH1, SZ_W); | 1465 dst = mov_ir(dst, inst->immed, opts->gen.scratch1, SZ_W); |
1481 dst = call(dst, (uint8_t *)z80_native_addr); | 1466 dst = call(dst, (uint8_t *)z80_native_addr); |
1482 dst = jmp_r(dst, SCRATCH1); | 1467 dst = jmp_r(dst, opts->gen.scratch1); |
1483 } | 1468 } |
1484 break; | 1469 break; |
1485 } | 1470 } |
1486 case Z80_CALLCC: | 1471 case Z80_CALLCC: |
1487 dst = zcycles(dst, 10);//T States: 4,3,3 (false case) | 1472 dst = cycles(&opts->gen, 10);//T States: 4,3,3 (false case) |
1488 uint8_t cond = CC_Z; | 1473 uint8_t cond = CC_Z; |
1489 switch (inst->reg) | 1474 switch (inst->reg) |
1490 { | 1475 { |
1491 case Z80_CC_NZ: | 1476 case Z80_CC_NZ: |
1492 cond = CC_NZ; | 1477 cond = CC_NZ; |
1493 case Z80_CC_Z: | 1478 case Z80_CC_Z: |
1494 dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_Z), SZ_B); | 1479 dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_Z), SZ_B); |
1495 break; | 1480 break; |
1496 case Z80_CC_NC: | 1481 case Z80_CC_NC: |
1497 cond = CC_NZ; | 1482 cond = CC_NZ; |
1498 case Z80_CC_C: | 1483 case Z80_CC_C: |
1499 dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_C), SZ_B); | 1484 dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); |
1500 break; | 1485 break; |
1501 case Z80_CC_PO: | 1486 case Z80_CC_PO: |
1502 cond = CC_NZ; | 1487 cond = CC_NZ; |
1503 case Z80_CC_PE: | 1488 case Z80_CC_PE: |
1504 dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_PV), SZ_B); | 1489 dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B); |
1505 break; | 1490 break; |
1506 case Z80_CC_P: | 1491 case Z80_CC_P: |
1507 cond = CC_NZ; | 1492 cond = CC_NZ; |
1508 case Z80_CC_M: | 1493 case Z80_CC_M: |
1509 dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_S), SZ_B); | 1494 dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B); |
1510 break; | 1495 break; |
1511 } | 1496 } |
1512 uint8_t *no_call_off = dst+1; | 1497 uint8_t *no_call_off = dst+1; |
1513 dst = jcc(dst, cond, dst+2); | 1498 dst = jcc(dst, cond, dst+2); |
1514 dst = zcycles(dst, 1);//Last of the above T states takes an extra cycle in the true case | 1499 dst = cycles(&opts->gen, 1);//Last of the above T states takes an extra cycle in the true case |
1515 dst = sub_ir(dst, 2, opts->regs[Z80_SP], SZ_W); | 1500 dst = sub_ir(dst, 2, opts->regs[Z80_SP], SZ_W); |
1516 dst = mov_ir(dst, address + 3, SCRATCH1, SZ_W); | 1501 dst = mov_ir(dst, address + 3, opts->gen.scratch1, SZ_W); |
1517 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH2, SZ_W); | 1502 dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); |
1518 dst = call(dst, (uint8_t *)z80_write_word_highfirst);//T States: 3, 3 | 1503 dst = call(dst, (uint8_t *)z80_write_word_highfirst);//T States: 3, 3 |
1519 if (inst->immed < 0x4000) { | 1504 if (inst->immed < 0x4000) { |
1520 uint8_t * call_dst = z80_get_native_address(context, inst->immed); | 1505 uint8_t * call_dst = z80_get_native_address(context, inst->immed); |
1521 if (!call_dst) { | 1506 if (!call_dst) { |
1522 opts->deferred = defer_address(opts->deferred, inst->immed, dst + 1); | 1507 opts->deferred = defer_address(opts->deferred, inst->immed, dst + 1); |
1523 //fake address to force large displacement | 1508 //fake address to force large displacement |
1524 call_dst = dst + 256; | 1509 call_dst = dst + 256; |
1525 } | 1510 } |
1526 dst = jmp(dst, call_dst); | 1511 dst = jmp(dst, call_dst); |
1527 } else { | 1512 } else { |
1528 dst = mov_ir(dst, inst->immed, SCRATCH1, SZ_W); | 1513 dst = mov_ir(dst, inst->immed, opts->gen.scratch1, SZ_W); |
1529 dst = call(dst, (uint8_t *)z80_native_addr); | 1514 dst = call(dst, (uint8_t *)z80_native_addr); |
1530 dst = jmp_r(dst, SCRATCH1); | 1515 dst = jmp_r(dst, opts->gen.scratch1); |
1531 } | 1516 } |
1532 *no_call_off = dst - (no_call_off+1); | 1517 *no_call_off = dst - (no_call_off+1); |
1533 break; | 1518 break; |
1534 case Z80_RET: | 1519 case Z80_RET: |
1535 dst = zcycles(dst, 4);//T States: 4 | 1520 dst = cycles(&opts->gen, 4);//T States: 4 |
1536 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); | 1521 dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); |
1537 dst = call(dst, (uint8_t *)z80_read_word);//T STates: 3, 3 | 1522 dst = call(dst, (uint8_t *)z80_read_word);//T STates: 3, 3 |
1538 dst = add_ir(dst, 2, opts->regs[Z80_SP], SZ_W); | 1523 dst = add_ir(dst, 2, opts->regs[Z80_SP], SZ_W); |
1539 dst = call(dst, (uint8_t *)z80_native_addr); | 1524 dst = call(dst, (uint8_t *)z80_native_addr); |
1540 dst = jmp_r(dst, SCRATCH1); | 1525 dst = jmp_r(dst, opts->gen.scratch1); |
1541 break; | 1526 break; |
1542 case Z80_RETCC: { | 1527 case Z80_RETCC: { |
1543 dst = zcycles(dst, 5);//T States: 5 | 1528 dst = cycles(&opts->gen, 5);//T States: 5 |
1544 uint8_t cond = CC_Z; | 1529 uint8_t cond = CC_Z; |
1545 switch (inst->reg) | 1530 switch (inst->reg) |
1546 { | 1531 { |
1547 case Z80_CC_NZ: | 1532 case Z80_CC_NZ: |
1548 cond = CC_NZ; | 1533 cond = CC_NZ; |
1549 case Z80_CC_Z: | 1534 case Z80_CC_Z: |
1550 dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_Z), SZ_B); | 1535 dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_Z), SZ_B); |
1551 break; | 1536 break; |
1552 case Z80_CC_NC: | 1537 case Z80_CC_NC: |
1553 cond = CC_NZ; | 1538 cond = CC_NZ; |
1554 case Z80_CC_C: | 1539 case Z80_CC_C: |
1555 dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_C), SZ_B); | 1540 dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); |
1556 break; | 1541 break; |
1557 case Z80_CC_PO: | 1542 case Z80_CC_PO: |
1558 cond = CC_NZ; | 1543 cond = CC_NZ; |
1559 case Z80_CC_PE: | 1544 case Z80_CC_PE: |
1560 dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_PV), SZ_B); | 1545 dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B); |
1561 break; | 1546 break; |
1562 case Z80_CC_P: | 1547 case Z80_CC_P: |
1563 cond = CC_NZ; | 1548 cond = CC_NZ; |
1564 case Z80_CC_M: | 1549 case Z80_CC_M: |
1565 dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_S), SZ_B); | 1550 dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B); |
1566 break; | 1551 break; |
1567 } | 1552 } |
1568 uint8_t *no_call_off = dst+1; | 1553 uint8_t *no_call_off = dst+1; |
1569 dst = jcc(dst, cond, dst+2); | 1554 dst = jcc(dst, cond, dst+2); |
1570 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); | 1555 dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); |
1571 dst = call(dst, (uint8_t *)z80_read_word);//T STates: 3, 3 | 1556 dst = call(dst, (uint8_t *)z80_read_word);//T STates: 3, 3 |
1572 dst = add_ir(dst, 2, opts->regs[Z80_SP], SZ_W); | 1557 dst = add_ir(dst, 2, opts->regs[Z80_SP], SZ_W); |
1573 dst = call(dst, (uint8_t *)z80_native_addr); | 1558 dst = call(dst, (uint8_t *)z80_native_addr); |
1574 dst = jmp_r(dst, SCRATCH1); | 1559 dst = jmp_r(dst, opts->gen.scratch1); |
1575 *no_call_off = dst - (no_call_off+1); | 1560 *no_call_off = dst - (no_call_off+1); |
1576 break; | 1561 break; |
1577 } | 1562 } |
1578 case Z80_RETI: | 1563 case Z80_RETI: |
1579 //For some systems, this may need a callback for signalling interrupt routine completion | 1564 //For some systems, this may need a callback for signalling interrupt routine completion |
1580 dst = zcycles(dst, 8);//T States: 4, 4 | 1565 dst = cycles(&opts->gen, 8);//T States: 4, 4 |
1581 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); | 1566 dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); |
1582 dst = call(dst, (uint8_t *)z80_read_word);//T STates: 3, 3 | 1567 dst = call(dst, (uint8_t *)z80_read_word);//T STates: 3, 3 |
1583 dst = add_ir(dst, 2, opts->regs[Z80_SP], SZ_W); | 1568 dst = add_ir(dst, 2, opts->regs[Z80_SP], SZ_W); |
1584 dst = call(dst, (uint8_t *)z80_native_addr); | 1569 dst = call(dst, (uint8_t *)z80_native_addr); |
1585 dst = jmp_r(dst, SCRATCH1); | 1570 dst = jmp_r(dst, opts->gen.scratch1); |
1586 break; | 1571 break; |
1587 case Z80_RETN: | 1572 case Z80_RETN: |
1588 dst = zcycles(dst, 8);//T States: 4, 4 | 1573 dst = cycles(&opts->gen, 8);//T States: 4, 4 |
1589 dst = mov_rdisp8r(dst, CONTEXT, offsetof(z80_context, iff2), SCRATCH2, SZ_B); | 1574 dst = mov_rdisp8r(dst, opts->gen.context_reg, offsetof(z80_context, iff2), opts->gen.scratch2, SZ_B); |
1590 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); | 1575 dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); |
1591 dst = mov_rrdisp8(dst, SCRATCH2, CONTEXT, offsetof(z80_context, iff1), SZ_B); | 1576 dst = mov_rrdisp8(dst, opts->gen.scratch2, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B); |
1592 dst = call(dst, (uint8_t *)z80_read_word);//T STates: 3, 3 | 1577 dst = call(dst, (uint8_t *)z80_read_word);//T STates: 3, 3 |
1593 dst = add_ir(dst, 2, opts->regs[Z80_SP], SZ_W); | 1578 dst = add_ir(dst, 2, opts->regs[Z80_SP], SZ_W); |
1594 dst = call(dst, (uint8_t *)z80_native_addr); | 1579 dst = call(dst, (uint8_t *)z80_native_addr); |
1595 dst = jmp_r(dst, SCRATCH1); | 1580 dst = jmp_r(dst, opts->gen.scratch1); |
1596 break; | 1581 break; |
1597 case Z80_RST: { | 1582 case Z80_RST: { |
1598 //RST is basically CALL to an address in page 0 | 1583 //RST is basically CALL to an address in page 0 |
1599 dst = zcycles(dst, 5);//T States: 5 | 1584 dst = cycles(&opts->gen, 5);//T States: 5 |
1600 dst = sub_ir(dst, 2, opts->regs[Z80_SP], SZ_W); | 1585 dst = sub_ir(dst, 2, opts->regs[Z80_SP], SZ_W); |
1601 dst = mov_ir(dst, address + 1, SCRATCH1, SZ_W); | 1586 dst = mov_ir(dst, address + 1, opts->gen.scratch1, SZ_W); |
1602 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH2, SZ_W); | 1587 dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); |
1603 dst = call(dst, (uint8_t *)z80_write_word_highfirst);//T States: 3, 3 | 1588 dst = call(dst, (uint8_t *)z80_write_word_highfirst);//T States: 3, 3 |
1604 uint8_t * call_dst = z80_get_native_address(context, inst->immed); | 1589 uint8_t * call_dst = z80_get_native_address(context, inst->immed); |
1605 if (!call_dst) { | 1590 if (!call_dst) { |
1606 opts->deferred = defer_address(opts->deferred, inst->immed, dst + 1); | 1591 opts->deferred = defer_address(opts->deferred, inst->immed, dst + 1); |
1607 //fake address to force large displacement | 1592 //fake address to force large displacement |
1609 } | 1594 } |
1610 dst = jmp(dst, call_dst); | 1595 dst = jmp(dst, call_dst); |
1611 break; | 1596 break; |
1612 } | 1597 } |
1613 case Z80_IN: | 1598 case Z80_IN: |
1614 dst = zcycles(dst, inst->reg == Z80_A ? 7 : 8);//T States: 4 3/4 | 1599 dst = cycles(&opts->gen, inst->reg == Z80_A ? 7 : 8);//T States: 4 3/4 |
1615 if (inst->addr_mode == Z80_IMMED_INDIRECT) { | 1600 if (inst->addr_mode == Z80_IMMED_INDIRECT) { |
1616 dst = mov_ir(dst, inst->immed, SCRATCH1, SZ_B); | 1601 dst = mov_ir(dst, inst->immed, opts->gen.scratch1, SZ_B); |
1617 } else { | 1602 } else { |
1618 dst = mov_rr(dst, opts->regs[Z80_C], SCRATCH1, SZ_B); | 1603 dst = mov_rr(dst, opts->regs[Z80_C], opts->gen.scratch1, SZ_B); |
1619 } | 1604 } |
1620 dst = call(dst, (uint8_t *)z80_io_read); | 1605 dst = call(dst, (uint8_t *)z80_io_read); |
1621 translate_z80_reg(inst, &dst_op, dst, opts); | 1606 translate_z80_reg(inst, &dst_op, dst, opts); |
1622 dst = mov_rr(dst, SCRATCH1, dst_op.base, SZ_B); | 1607 dst = mov_rr(dst, opts->gen.scratch1, dst_op.base, SZ_B); |
1623 dst = z80_save_reg(dst, inst, opts); | 1608 dst = z80_save_reg(dst, inst, opts); |
1624 break; | 1609 break; |
1625 /*case Z80_INI: | 1610 /*case Z80_INI: |
1626 case Z80_INIR: | 1611 case Z80_INIR: |
1627 case Z80_IND: | 1612 case Z80_IND: |
1628 case Z80_INDR:*/ | 1613 case Z80_INDR:*/ |
1629 case Z80_OUT: | 1614 case Z80_OUT: |
1630 dst = zcycles(dst, inst->reg == Z80_A ? 7 : 8);//T States: 4 3/4 | 1615 dst = cycles(&opts->gen, inst->reg == Z80_A ? 7 : 8);//T States: 4 3/4 |
1631 if ((inst->addr_mode & 0x1F) == Z80_IMMED_INDIRECT) { | 1616 if ((inst->addr_mode & 0x1F) == Z80_IMMED_INDIRECT) { |
1632 dst = mov_ir(dst, inst->immed, SCRATCH2, SZ_B); | 1617 dst = mov_ir(dst, inst->immed, opts->gen.scratch2, SZ_B); |
1633 } else { | 1618 } else { |
1634 dst = mov_rr(dst, opts->regs[Z80_C], SCRATCH2, SZ_B); | 1619 dst = mov_rr(dst, opts->regs[Z80_C], opts->gen.scratch2, SZ_B); |
1635 } | 1620 } |
1636 translate_z80_reg(inst, &src_op, dst, opts); | 1621 translate_z80_reg(inst, &src_op, dst, opts); |
1637 dst = mov_rr(dst, dst_op.base, SCRATCH1, SZ_B); | 1622 dst = mov_rr(dst, dst_op.base, opts->gen.scratch1, SZ_B); |
1638 dst = call(dst, (uint8_t *)z80_io_write); | 1623 dst = call(dst, (uint8_t *)z80_io_write); |
1639 dst = z80_save_reg(dst, inst, opts); | 1624 dst = z80_save_reg(dst, inst, opts); |
1640 break; | 1625 break; |
1641 /*case Z80_OUTI: | 1626 /*case Z80_OUTI: |
1642 case Z80_OTIR: | 1627 case Z80_OTIR: |
1674 } | 1659 } |
1675 //dprintf("z80_get_native_address: %X %p\n", address, map->base + map->offsets[address]); | 1660 //dprintf("z80_get_native_address: %X %p\n", address, map->base + map->offsets[address]); |
1676 return map->base + map->offsets[address]; | 1661 return map->base + map->offsets[address]; |
1677 } | 1662 } |
1678 | 1663 |
1679 uint8_t z80_get_native_inst_size(x86_z80_options * opts, uint32_t address) | 1664 uint8_t z80_get_native_inst_size(z80_options * opts, uint32_t address) |
1680 { | 1665 { |
1681 if (address >= 0x4000) { | 1666 if (address >= 0x4000) { |
1682 return 0; | 1667 return 0; |
1683 } | 1668 } |
1684 return opts->ram_inst_sizes[address & 0x1FFF]; | 1669 return opts->ram_inst_sizes[address & 0x1FFF]; |
1686 | 1671 |
1687 void z80_map_native_address(z80_context * context, uint32_t address, uint8_t * native_address, uint8_t size, uint8_t native_size) | 1672 void z80_map_native_address(z80_context * context, uint32_t address, uint8_t * native_address, uint8_t size, uint8_t native_size) |
1688 { | 1673 { |
1689 uint32_t orig_address = address; | 1674 uint32_t orig_address = address; |
1690 native_map_slot *map; | 1675 native_map_slot *map; |
1691 x86_z80_options * opts = context->options; | 1676 z80_options * opts = context->options; |
1692 if (address < 0x4000) { | 1677 if (address < 0x4000) { |
1693 address &= 0x1FFF; | 1678 address &= 0x1FFF; |
1694 map = context->static_code_map; | 1679 map = context->static_code_map; |
1695 opts->ram_inst_sizes[address] = native_size; | 1680 opts->ram_inst_sizes[address] = native_size; |
1696 context->ram_code_flags[(address & 0x1C00) >> 10] |= 1 << ((address & 0x380) >> 7); | 1681 context->ram_code_flags[(address & 0x1C00) >> 10] |= 1 << ((address & 0x380) >> 7); |
1750 { | 1735 { |
1751 uint32_t inst_start = z80_get_instruction_start(context->static_code_map, address); | 1736 uint32_t inst_start = z80_get_instruction_start(context->static_code_map, address); |
1752 if (inst_start != INVALID_INSTRUCTION_START) { | 1737 if (inst_start != INVALID_INSTRUCTION_START) { |
1753 uint8_t * dst = z80_get_native_address(context, inst_start); | 1738 uint8_t * dst = z80_get_native_address(context, inst_start); |
1754 dprintf("patching code at %p for Z80 instruction at %X due to write to %X\n", dst, inst_start, address); | 1739 dprintf("patching code at %p for Z80 instruction at %X due to write to %X\n", dst, inst_start, address); |
1755 dst = mov_ir(dst, inst_start, SCRATCH1, SZ_D); | 1740 dst = mov_ir(dst, inst_start, opts->gen.scratch1, SZ_D); |
1756 dst = call(dst, (uint8_t *)z80_retrans_stub); | 1741 dst = call(dst, (uint8_t *)z80_retrans_stub); |
1757 } | 1742 } |
1758 return context; | 1743 return context; |
1759 } | 1744 } |
1760 | 1745 |
1771 return addr; | 1756 return addr; |
1772 } | 1757 } |
1773 | 1758 |
1774 void z80_handle_deferred(z80_context * context) | 1759 void z80_handle_deferred(z80_context * context) |
1775 { | 1760 { |
1776 x86_z80_options * opts = context->options; | 1761 z80_options * opts = context->options; |
1777 process_deferred(&opts->deferred, context, (native_addr_func)z80_get_native_address); | 1762 process_deferred(&opts->deferred, context, (native_addr_func)z80_get_native_address); |
1778 if (opts->deferred) { | 1763 if (opts->deferred) { |
1779 translate_z80_stream(context, opts->deferred->address); | 1764 translate_z80_stream(context, opts->deferred->address); |
1780 } | 1765 } |
1781 } | 1766 } |
1782 | 1767 |
1783 void * z80_retranslate_inst(uint32_t address, z80_context * context, uint8_t * orig_start) | 1768 void * z80_retranslate_inst(uint32_t address, z80_context * context, uint8_t * orig_start) |
1784 { | 1769 { |
1785 char disbuf[80]; | 1770 char disbuf[80]; |
1786 x86_z80_options * opts = context->options; | 1771 z80_options * opts = context->options; |
1787 uint8_t orig_size = z80_get_native_inst_size(opts, address); | 1772 uint8_t orig_size = z80_get_native_inst_size(opts, address); |
1788 uint32_t orig = address; | 1773 uint32_t orig = address; |
1789 address &= 0x1FFF; | 1774 address &= 0x1FFF; |
1790 uint8_t * dst = opts->cur_code; | 1775 uint8_t * dst = opts->cur_code; |
1791 uint8_t * dst_end = opts->code_end; | 1776 uint8_t * dst_end = opts->code_end; |
1848 { | 1833 { |
1849 char disbuf[80]; | 1834 char disbuf[80]; |
1850 if (z80_get_native_address(context, address)) { | 1835 if (z80_get_native_address(context, address)) { |
1851 return; | 1836 return; |
1852 } | 1837 } |
1853 x86_z80_options * opts = context->options; | 1838 z80_options * opts = context->options; |
1854 uint32_t start_address = address; | 1839 uint32_t start_address = address; |
1855 uint8_t * encoded = NULL, *next; | 1840 uint8_t * encoded = NULL, *next; |
1856 if (address < 0x4000) { | 1841 if (address < 0x4000) { |
1857 encoded = context->mem_pointers[0] + (address & 0x1FFF); | 1842 encoded = context->mem_pointers[0] + (address & 0x1FFF); |
1858 } else if(address >= 0x8000 && context->mem_pointers[1]) { | 1843 } else if(address >= 0x8000 && context->mem_pointers[1]) { |
1921 encoded = NULL; | 1906 encoded = NULL; |
1922 } | 1907 } |
1923 } | 1908 } |
1924 } | 1909 } |
1925 | 1910 |
1926 void init_x86_z80_opts(x86_z80_options * options) | 1911 void init_x86_z80_opts(z80_options * options, memmap_chunk * chunks, uint32_t num_chunks) |
1927 { | 1912 { |
1913 memset(options, 0, sizeof(*options)); | |
1914 | |
1915 options->gen.address_size = SZ_W; | |
1916 options->gen.address_mask = 0xFFFF; | |
1917 options->gen.max_address = 0x10000; | |
1918 options->gen.bus_cycles = 3; | |
1919 options->gen.mem_ptr_off = offsetof(z80_context, mem_pointers); | |
1920 options->gen.ram_flags_off = offsetof(z80_context, ram_code_flags); | |
1921 | |
1928 options->flags = 0; | 1922 options->flags = 0; |
1929 options->regs[Z80_B] = BH; | 1923 options->regs[Z80_B] = BH; |
1930 options->regs[Z80_C] = RBX; | 1924 options->regs[Z80_C] = RBX; |
1931 options->regs[Z80_D] = CH; | 1925 options->regs[Z80_D] = CH; |
1932 options->regs[Z80_E] = RCX; | 1926 options->regs[Z80_E] = RCX; |
1944 options->regs[Z80_HL] = RAX; | 1938 options->regs[Z80_HL] = RAX; |
1945 options->regs[Z80_SP] = R9; | 1939 options->regs[Z80_SP] = R9; |
1946 options->regs[Z80_AF] = -1; | 1940 options->regs[Z80_AF] = -1; |
1947 options->regs[Z80_IX] = RDX; | 1941 options->regs[Z80_IX] = RDX; |
1948 options->regs[Z80_IY] = R8; | 1942 options->regs[Z80_IY] = R8; |
1949 size_t size = 1024 * 1024; | 1943 |
1950 options->cur_code = alloc_code(&size); | 1944 options->bank_reg = R15; |
1951 options->code_end = options->cur_code + size; | 1945 options->bank_pointer = R12; |
1952 options->ram_inst_sizes = malloc(sizeof(uint8_t) * 0x2000); | 1946 |
1947 options->gen.context_reg = RSI; | |
1948 options->gen.cycles = RBP; | |
1949 options->gen.limit = RDI; | |
1950 options->gen.scratch1 = R13; | |
1951 options->gen.scratch2 = R14; | |
1952 | |
1953 options->gen.native_code_map = malloc(sizeof(native_map_slot)); | |
1954 memset(options->gen.native_code_map, 0, sizeof(native_map_slot)); | |
1955 options->gen.deferred = NULL; | |
1956 options->gen.ram_inst_sizes = malloc(sizeof(uint8_t) * 0x2000); | |
1953 memset(options->ram_inst_sizes, 0, sizeof(uint8_t) * 0x2000); | 1957 memset(options->ram_inst_sizes, 0, sizeof(uint8_t) * 0x2000); |
1954 options->deferred = NULL; | 1958 |
1955 } | 1959 code_info *code = &options->gen.code; |
1956 | 1960 init_code_info(code); |
1957 void init_z80_context(z80_context * context, x86_z80_options * options) | 1961 |
1962 options->save_context_scratch = code->cur; | |
1963 mov_rrdisp(code, options->gen.scratch1, options->gen.context_reg, offsetof(z80_context, scratch1), SZ_W); | |
1964 mov_rrdisp(code, options->gen.scratch2, options->gen.context_reg, offsetof(z80_context, scratch2), SZ_W); | |
1965 | |
1966 options->gen.save_context = code->cur; | |
1967 for (int i = 0; i <= Z80_A; i++) | |
1968 { | |
1969 int reg; | |
1970 uint8_t size; | |
1971 if (i < Z80_I) { | |
1972 int reg = i /2 + Z80_BC; | |
1973 size = SZ_W; | |
1974 | |
1975 } else { | |
1976 reg = i; | |
1977 size = SZ_B; | |
1978 } | |
1979 if (options->regs[reg] >= 0) { | |
1980 mov_rrdisp(code, options->regs[reg], options->gen.context_reg, offsetof(z80_context, regs) + i, size); | |
1981 } | |
1982 } | |
1983 if (options->regs[Z80_SP] >= 0) { | |
1984 mov_rrdisp(code, options->regs[Z80_SP], options->gen.context_reg, offsetof(z80_context, sp), SZ_W); | |
1985 } | |
1986 mov_rrdisp(code, options->gen.limit, options->gen.context_reg, offsetof(z80_context, target_cycle), SZ_D); | |
1987 mov_rrdisp(code, options->gen.cycles, options->gen.context_reg, offsetof(z80_context, current_cycle), SZ_D); | |
1988 mov_rrdisp(code, options->bank_reg, options->gen.context_reg, offsetof(z80_context, bank_reg), SZ_W); | |
1989 mov_rrdisp(code, options->bank_pointer, options->gen.context_reg, offsetof(z80_context, mem_pointers) + sizeof(uint8_t *) * 1, SZ_PTR); | |
1990 | |
1991 options->load_context_scratch = code->cur; | |
1992 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, scratch1), options->gen.scratch1, SZ_W); | |
1993 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, scratch2), options->gen.scratch2, SZ_W); | |
1994 options->gen.load_context = code->cur; | |
1995 for (int i = 0; i <= Z80_A; i++) | |
1996 { | |
1997 int reg; | |
1998 uint8_t size; | |
1999 if (i < Z80_I) { | |
2000 int reg = i /2 + Z80_BC; | |
2001 size = SZ_W; | |
2002 | |
2003 } else { | |
2004 reg = i; | |
2005 size = SZ_B; | |
2006 } | |
2007 if (options->regs[reg] >= 0) { | |
2008 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, regs) + i, options->regs[reg], size); | |
2009 } | |
2010 } | |
2011 if (options->regs[Z80_SP] >= 0) { | |
2012 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, sp), options->regs[Z80_SP], SZ_W); | |
2013 } | |
2014 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, target_cycle), options->gen.limit, SZ_D); | |
2015 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, current_cycle), options->gen.cycles, SZ_D); | |
2016 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, bank_reg), options->bank_reg, SZ_W); | |
2017 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, mem_pointers) + sizeof(uint8_t *) * 1, options->bank_pointer, SZ_PTR); | |
2018 | |
2019 options->gen.handle_cycle_limit = code->cur; | |
2020 cmp_rdispr(code, options->gen.context_reg, offsetof(z80_context, sync_cycle), options->gen.cycles, SZ_D); | |
2021 code_ptr no_sync = code->cur+1; | |
2022 jcc(code, CC_B, no_sync); | |
2023 mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, pc), SZ_W); | |
2024 call(code, options->save_context_scratch); | |
2025 pop_r(code, RAX); //return address in read/write func | |
2026 pop_r(code, RBX); //return address in translated code | |
2027 sub_ir(code, 5, RAX, SZ_PTR); //adjust return address to point to the call that got us here | |
2028 mov_rrdisp(code, RBX, options->gen.context_reg, offsetof(z80_context, extra_pc), SZ_PTR); | |
2029 mov_rrind(code, RAX, options->gen.context_reg, SZ_PTR); | |
2030 //restore callee saved registers | |
2031 pop_r(code, R15) | |
2032 pop_r(code, R14) | |
2033 pop_r(code, R13) | |
2034 pop_r(code, R12) | |
2035 pop_r(code, RBP) | |
2036 pop_r(code, RBX) | |
2037 *no_sync = code->cur - no_sync; | |
2038 //return to caller of z80_run | |
2039 retn(code); | |
2040 | |
2041 options->gen.read_8 = gen_mem_fun(&options->gen, chunks, num_chunks, READ_8, NULL); | |
2042 options->gen.write_8 = gen_mem_fun(&options->gen, chunks, num_chunks, WRITE_8, &options->write_8_noinc); | |
2043 | |
2044 options->gen.handle_cycle_limit_int = code->cur; | |
2045 cmp_rdispr(code, options->gen.context_reg, offsetof(z80_context, int_cycle), options->gen.cycles, SZ_D); | |
2046 code_ptr skip_int = code->cur+1; | |
2047 jcc(code, CC_B, skip_int); | |
2048 //set limit to the cycle limit | |
2049 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, sync_cycle), options->gen.limit, SZ_D); | |
2050 //disable interrupts | |
2051 move_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, iff1), SZ_B); | |
2052 move_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, iff2), SZ_B); | |
2053 cycles(&options->gen, 7); | |
2054 //save return address (in scratch1) to Z80 stack | |
2055 sub_ir(code, 2, options->regs[Z80_SP], SZ_W); | |
2056 mov_rr(code, options->regs[Z80_SP], options->gen.scratch2, SZ_W); | |
2057 //we need to do check_cycles and cycles outside of the write_8 call | |
2058 //so that the stack has the correct depth if we need to return to C | |
2059 //for a synchronization | |
2060 check_cycles(&options->gen); | |
2061 cycles(&options->gen, 3); | |
2062 //save word to write before call to write_8_noinc | |
2063 push_r(code, options->gen.scratch1); | |
2064 call(code, options->write_8_noinc); | |
2065 //restore word to write | |
2066 pop_r(code, options->gen.scratch1); | |
2067 //write high byte to SP+1 | |
2068 mov_rr(code, options->regs[Z80_SP], options->gen.scratch2, SZ_W); | |
2069 add_ir(code, 1, options->gen.scratch2, SZ_W); | |
2070 shr_ir(code, 8, options->gen.scratch1, SZ_W); | |
2071 check_cycles(&options->gen); | |
2072 cycles(&options->gen, 3); | |
2073 call(code, options->write_8_noinc); | |
2074 //dispose of return address as we'll be jumping somewhere else | |
2075 pop_r(options->gen.scratch2); | |
2076 //TODO: Support interrupt mode 0 and 2 | |
2077 mov_ir(code, 0x38, options->gen.scratch1, SZ_W); | |
2078 call(code, (code_ptr)z80_native_addr); | |
2079 jmp_r(code, options->gen.scratch1); | |
2080 } | |
2081 | |
2082 void init_z80_context(z80_context * context, z80_options * options) | |
1958 { | 2083 { |
1959 memset(context, 0, sizeof(*context)); | 2084 memset(context, 0, sizeof(*context)); |
1960 context->static_code_map = malloc(sizeof(*context->static_code_map)); | 2085 context->static_code_map = malloc(sizeof(*context->static_code_map)); |
1961 context->static_code_map->base = NULL; | 2086 context->static_code_map->base = NULL; |
1962 context->static_code_map->offsets = malloc(sizeof(int32_t) * 0x2000); | 2087 context->static_code_map->offsets = malloc(sizeof(int32_t) * 0x2000); |
1977 void zinsert_breakpoint(z80_context * context, uint16_t address, uint8_t * bp_handler) | 2102 void zinsert_breakpoint(z80_context * context, uint16_t address, uint8_t * bp_handler) |
1978 { | 2103 { |
1979 static uint8_t * bp_stub = NULL; | 2104 static uint8_t * bp_stub = NULL; |
1980 uint8_t * native = z80_get_native_address_trans(context, address); | 2105 uint8_t * native = z80_get_native_address_trans(context, address); |
1981 uint8_t * start_native = native; | 2106 uint8_t * start_native = native; |
1982 native = mov_ir(native, address, SCRATCH1, SZ_W); | 2107 native = mov_ir(native, address, opts->gen.scratch1, SZ_W); |
1983 if (!bp_stub) { | 2108 if (!bp_stub) { |
1984 x86_z80_options * opts = context->options; | 2109 z80_options * opts = context->options; |
1985 uint8_t * dst = opts->cur_code; | 2110 uint8_t * dst = opts->cur_code; |
1986 uint8_t * dst_end = opts->code_end; | 2111 uint8_t * dst_end = opts->code_end; |
1987 if (dst_end - dst < 128) { | 2112 if (dst_end - dst < 128) { |
1988 size_t size = 1024*1024; | 2113 size_t size = 1024*1024; |
1989 dst = alloc_code(&size); | 2114 dst = alloc_code(&size); |
1997 int check_int_size = dst-bp_stub; | 2122 int check_int_size = dst-bp_stub; |
1998 dst = bp_stub; | 2123 dst = bp_stub; |
1999 | 2124 |
2000 //Save context and call breakpoint handler | 2125 //Save context and call breakpoint handler |
2001 dst = call(dst, (uint8_t *)z80_save_context); | 2126 dst = call(dst, (uint8_t *)z80_save_context); |
2002 dst = push_r(dst, SCRATCH1); | 2127 dst = push_r(dst, opts->gen.scratch1); |
2003 dst = mov_rr(dst, CONTEXT, RDI, SZ_Q); | 2128 dst = mov_rr(dst, opts->gen.context_reg, RDI, SZ_Q); |
2004 dst = mov_rr(dst, SCRATCH1, RSI, SZ_W); | 2129 dst = mov_rr(dst, opts->gen.scratch1, RSI, SZ_W); |
2005 dst = call(dst, bp_handler); | 2130 dst = call(dst, bp_handler); |
2006 dst = mov_rr(dst, RAX, CONTEXT, SZ_Q); | 2131 dst = mov_rr(dst, RAX, opts->gen.context_reg, SZ_Q); |
2007 //Restore context | 2132 //Restore context |
2008 dst = call(dst, (uint8_t *)z80_load_context); | 2133 dst = call(dst, (uint8_t *)z80_load_context); |
2009 dst = pop_r(dst, SCRATCH1); | 2134 dst = pop_r(dst, opts->gen.scratch1); |
2010 //do prologue stuff | 2135 //do prologue stuff |
2011 dst = cmp_rr(dst, ZCYCLES, ZLIMIT, SZ_D); | 2136 dst = cmp_rr(dst, opts->gen.cycles, opts->gen.limit, SZ_D); |
2012 uint8_t * jmp_off = dst+1; | 2137 uint8_t * jmp_off = dst+1; |
2013 dst = jcc(dst, CC_NC, dst + 7); | 2138 dst = jcc(dst, CC_NC, dst + 7); |
2014 dst = pop_r(dst, SCRATCH1); | 2139 dst = pop_r(dst, opts->gen.scratch1); |
2015 dst = add_ir(dst, check_int_size - (native-start_native), SCRATCH1, SZ_Q); | 2140 dst = add_ir(dst, check_int_size - (native-start_native), opts->gen.scratch1, SZ_Q); |
2016 dst = push_r(dst, SCRATCH1); | 2141 dst = push_r(dst, opts->gen.scratch1); |
2017 dst = jmp(dst, (uint8_t *)z80_handle_cycle_limit_int); | 2142 dst = jmp(dst, (uint8_t *)z80_handle_cycle_limit_int); |
2018 *jmp_off = dst - (jmp_off+1); | 2143 *jmp_off = dst - (jmp_off+1); |
2019 //jump back to body of translated instruction | 2144 //jump back to body of translated instruction |
2020 dst = pop_r(dst, SCRATCH1); | 2145 dst = pop_r(dst, opts->gen.scratch1); |
2021 dst = add_ir(dst, check_int_size - (native-start_native), SCRATCH1, SZ_Q); | 2146 dst = add_ir(dst, check_int_size - (native-start_native), opts->gen.scratch1, SZ_Q); |
2022 dst = jmp_r(dst, SCRATCH1); | 2147 dst = jmp_r(dst, opts->gen.scratch1); |
2023 opts->cur_code = dst; | 2148 opts->cur_code = dst; |
2024 } else { | 2149 } else { |
2025 native = call(native, bp_stub); | 2150 native = call(native, bp_stub); |
2026 } | 2151 } |
2027 } | 2152 } |