Mercurial > repos > blastem
comparison z80_to_x86.c @ 253:3b34deba4ca0
Squashing some bugs introduced when I switched the register assignments for z80_write_byte around.
author | Mike Pavone <pavone@retrodev.com> |
---|---|
date | Mon, 29 Apr 2013 22:32:21 -0700 |
parents | 63b9a500a00b |
children | 64feb6b67244 |
comparison
equal
deleted
inserted
replaced
252:63b9a500a00b | 253:3b34deba4ca0 |
---|---|
196 return dst; | 196 return dst; |
197 } | 197 } |
198 | 198 |
199 uint8_t * z80_save_result(uint8_t * dst, z80inst * inst) | 199 uint8_t * z80_save_result(uint8_t * dst, z80inst * inst) |
200 { | 200 { |
201 if (z80_size(inst) == SZ_B) { | 201 switch(inst->addr_mode & 0x1f) |
202 dst = call(dst, (uint8_t *)z80_write_byte); | 202 { |
203 } else { | 203 case Z80_REG_INDIRECT: |
204 dst = call(dst, (uint8_t *)z80_write_word_lowfirst); | 204 case Z80_IMMED_INDIRECT: |
205 case Z80_IX_DISPLACE: | |
206 case Z80_IY_DISPLACE: | |
207 if (z80_size(inst) == SZ_B) { | |
208 dst = call(dst, (uint8_t *)z80_write_byte); | |
209 } else { | |
210 dst = call(dst, (uint8_t *)z80_write_word_lowfirst); | |
211 } | |
205 } | 212 } |
206 return dst; | 213 return dst; |
207 } | 214 } |
208 | 215 |
209 enum { | 216 enum { |
305 break; | 312 break; |
306 case Z80_PUSH: | 313 case Z80_PUSH: |
307 dst = zcycles(dst, (inst->reg == Z80_IX || inst->reg == Z80_IY) ? 9 : 5); | 314 dst = zcycles(dst, (inst->reg == Z80_IX || inst->reg == Z80_IY) ? 9 : 5); |
308 dst = sub_ir(dst, 2, opts->regs[Z80_SP], SZ_W); | 315 dst = sub_ir(dst, 2, opts->regs[Z80_SP], SZ_W); |
309 if (inst->reg == Z80_AF) { | 316 if (inst->reg == Z80_AF) { |
310 dst = mov_rdisp8r(dst, CONTEXT, zf_off(ZF_S), SCRATCH2, SZ_B); | 317 dst = mov_rdisp8r(dst, CONTEXT, zf_off(ZF_S), SCRATCH1, SZ_B); |
311 dst = shl_ir(dst, 1, SCRATCH2, SZ_B); | 318 dst = shl_ir(dst, 1, SCRATCH1, SZ_B); |
312 dst = or_rdisp8r(dst, CONTEXT, zf_off(ZF_Z), SCRATCH2, SZ_B); | 319 dst = or_rdisp8r(dst, CONTEXT, zf_off(ZF_Z), SCRATCH1, SZ_B); |
313 dst = shl_ir(dst, 2, SCRATCH2, SZ_B); | 320 dst = shl_ir(dst, 2, SCRATCH1, SZ_B); |
314 dst = or_rdisp8r(dst, CONTEXT, zf_off(ZF_H), SCRATCH2, SZ_B); | 321 dst = or_rdisp8r(dst, CONTEXT, zf_off(ZF_H), SCRATCH1, SZ_B); |
315 dst = shl_ir(dst, 2, SCRATCH2, SZ_B); | 322 dst = shl_ir(dst, 2, SCRATCH1, SZ_B); |
316 dst = or_rdisp8r(dst, CONTEXT, zf_off(ZF_PV), SCRATCH2, SZ_B); | 323 dst = or_rdisp8r(dst, CONTEXT, zf_off(ZF_PV), SCRATCH1, SZ_B); |
317 dst = shl_ir(dst, 1, SCRATCH2, SZ_B); | 324 dst = shl_ir(dst, 1, SCRATCH1, SZ_B); |
318 dst = or_rdisp8r(dst, CONTEXT, zf_off(ZF_N), SCRATCH2, SZ_B); | 325 dst = or_rdisp8r(dst, CONTEXT, zf_off(ZF_N), SCRATCH1, SZ_B); |
319 dst = shl_ir(dst, 1, SCRATCH2, SZ_B); | 326 dst = shl_ir(dst, 1, SCRATCH1, SZ_B); |
320 dst = or_rdisp8r(dst, CONTEXT, zf_off(ZF_C), SCRATCH2, SZ_B); | 327 dst = or_rdisp8r(dst, CONTEXT, zf_off(ZF_C), SCRATCH1, SZ_B); |
321 dst = shl_ir(dst, 8, SCRATCH2, SZ_W); | 328 dst = shl_ir(dst, 8, SCRATCH1, SZ_W); |
322 dst = mov_rr(dst, opts->regs[Z80_A], SCRATCH2, SZ_B); | 329 dst = mov_rr(dst, opts->regs[Z80_A], SCRATCH1, SZ_B); |
323 } else { | 330 } else { |
324 dst = translate_z80_reg(inst, &src_op, dst, opts); | 331 dst = translate_z80_reg(inst, &src_op, dst, opts); |
325 dst = mov_rr(dst, src_op.base, SCRATCH2, SZ_W); | 332 dst = mov_rr(dst, src_op.base, SCRATCH1, SZ_W); |
326 } | 333 } |
327 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); | 334 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH2, SZ_W); |
328 dst = call(dst, (uint8_t *)z80_write_word_highfirst); | 335 dst = call(dst, (uint8_t *)z80_write_word_highfirst); |
329 //no call to save_z80_reg needed since there's no chance we'll use the only | 336 //no call to save_z80_reg needed since there's no chance we'll use the only |
330 //the upper half of a register pair | 337 //the upper half of a register pair |
331 break; | 338 break; |
332 case Z80_POP: | 339 case Z80_POP: |
372 //them efficiently a word at a time | 379 //them efficiently a word at a time |
373 for (int f = ZF_C; f < ZF_NUM; f+=2) { | 380 for (int f = ZF_C; f < ZF_NUM; f+=2) { |
374 dst = mov_rdisp8r(dst, CONTEXT, zf_off(f), SCRATCH1, SZ_W); | 381 dst = mov_rdisp8r(dst, CONTEXT, zf_off(f), SCRATCH1, SZ_W); |
375 dst = mov_rdisp8r(dst, CONTEXT, zaf_off(f), SCRATCH2, SZ_W); | 382 dst = mov_rdisp8r(dst, CONTEXT, zaf_off(f), SCRATCH2, SZ_W); |
376 dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, zaf_off(f), SZ_W); | 383 dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, zaf_off(f), SZ_W); |
377 dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, zf_off(f), SZ_W); | 384 dst = mov_rrdisp8(dst, SCRATCH2, CONTEXT, zf_off(f), SZ_W); |
378 } | 385 } |
379 } else { | 386 } else { |
380 dst = xchg_rr(dst, opts->regs[Z80_DE], opts->regs[Z80_HL], SZ_W); | 387 dst = xchg_rr(dst, opts->regs[Z80_DE], opts->regs[Z80_HL], SZ_W); |
381 } | 388 } |
382 } else { | 389 } else { |
383 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); | 390 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); |
384 dst = call(dst, (uint8_t *)z80_read_byte); | 391 dst = call(dst, (uint8_t *)z80_read_byte); |
385 dst = mov_rr(dst, opts->regs[inst->reg], SCRATCH2, SZ_B); | 392 dst = xchg_rr(dst, opts->regs[inst->reg], SCRATCH1, SZ_B); |
386 dst = mov_rr(dst, SCRATCH1, opts->regs[inst->reg], SZ_B); | 393 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH2, SZ_W); |
387 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); | |
388 dst = call(dst, (uint8_t *)z80_write_byte); | 394 dst = call(dst, (uint8_t *)z80_write_byte); |
389 dst = zcycles(dst, 1); | 395 dst = zcycles(dst, 1); |
390 uint8_t high_reg = z80_high_reg(inst->reg); | 396 uint8_t high_reg = z80_high_reg(inst->reg); |
391 uint8_t use_reg; | 397 uint8_t use_reg; |
392 //even though some of the upper halves can be used directly | 398 //even though some of the upper halves can be used directly |
395 use_reg = opts->regs[inst->reg]; | 401 use_reg = opts->regs[inst->reg]; |
396 dst = ror_ir(dst, 8, use_reg, SZ_W); | 402 dst = ror_ir(dst, 8, use_reg, SZ_W); |
397 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); | 403 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); |
398 dst = add_ir(dst, 1, SCRATCH1, SZ_W); | 404 dst = add_ir(dst, 1, SCRATCH1, SZ_W); |
399 dst = call(dst, (uint8_t *)z80_read_byte); | 405 dst = call(dst, (uint8_t *)z80_read_byte); |
400 dst = mov_rr(dst, use_reg, SCRATCH2, SZ_B); | 406 dst = xchg_rr(dst, use_reg, SCRATCH1, SZ_B); |
401 dst = mov_rr(dst, SCRATCH1, use_reg, SZ_B); | 407 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH2, SZ_W); |
402 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); | 408 dst = add_ir(dst, 1, SCRATCH2, SZ_W); |
403 dst = add_ir(dst, 1, SCRATCH1, SZ_W); | |
404 dst = call(dst, (uint8_t *)z80_write_byte); | 409 dst = call(dst, (uint8_t *)z80_write_byte); |
405 //restore reg to normal rotation | 410 //restore reg to normal rotation |
406 dst = ror_ir(dst, 8, use_reg, SZ_W); | 411 dst = ror_ir(dst, 8, use_reg, SZ_W); |
407 dst = zcycles(dst, 2); | 412 dst = zcycles(dst, 2); |
408 } | 413 } |
1009 *no_jump_off = dst - (no_jump_off+1); | 1014 *no_jump_off = dst - (no_jump_off+1); |
1010 break; | 1015 break; |
1011 case Z80_CALL: { | 1016 case Z80_CALL: { |
1012 dst = zcycles(dst, 11);//T States: 4,3,4 | 1017 dst = zcycles(dst, 11);//T States: 4,3,4 |
1013 dst = sub_ir(dst, 2, opts->regs[Z80_SP], SZ_W); | 1018 dst = sub_ir(dst, 2, opts->regs[Z80_SP], SZ_W); |
1014 dst = mov_ir(dst, address + 3, SCRATCH2, SZ_W); | 1019 dst = mov_ir(dst, address + 3, SCRATCH1, SZ_W); |
1015 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); | 1020 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH2, SZ_W); |
1016 dst = call(dst, (uint8_t *)z80_write_word_highfirst);//T States: 3, 3 | 1021 dst = call(dst, (uint8_t *)z80_write_word_highfirst);//T States: 3, 3 |
1017 if (inst->immed < 0x4000) { | 1022 if (inst->immed < 0x4000) { |
1018 uint8_t * call_dst = z80_get_native_address(context, inst->immed); | 1023 uint8_t * call_dst = z80_get_native_address(context, inst->immed); |
1019 if (!call_dst) { | 1024 if (!call_dst) { |
1020 opts->deferred = defer_address(opts->deferred, inst->immed, dst + 1); | 1025 opts->deferred = defer_address(opts->deferred, inst->immed, dst + 1); |
1056 } | 1061 } |
1057 uint8_t *no_call_off = dst+1; | 1062 uint8_t *no_call_off = dst+1; |
1058 dst = jcc(dst, cond, dst+2); | 1063 dst = jcc(dst, cond, dst+2); |
1059 dst = zcycles(dst, 1);//Last of the above T states takes an extra cycle in the true case | 1064 dst = zcycles(dst, 1);//Last of the above T states takes an extra cycle in the true case |
1060 dst = sub_ir(dst, 2, opts->regs[Z80_SP], SZ_W); | 1065 dst = sub_ir(dst, 2, opts->regs[Z80_SP], SZ_W); |
1061 dst = mov_ir(dst, address + 3, SCRATCH2, SZ_W); | 1066 dst = mov_ir(dst, address + 3, SCRATCH1, SZ_W); |
1062 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); | 1067 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH2, SZ_W); |
1063 dst = call(dst, (uint8_t *)z80_write_word_highfirst);//T States: 3, 3 | 1068 dst = call(dst, (uint8_t *)z80_write_word_highfirst);//T States: 3, 3 |
1064 if (inst->immed < 0x4000) { | 1069 if (inst->immed < 0x4000) { |
1065 uint8_t * call_dst = z80_get_native_address(context, inst->immed); | 1070 uint8_t * call_dst = z80_get_native_address(context, inst->immed); |
1066 if (!call_dst) { | 1071 if (!call_dst) { |
1067 opts->deferred = defer_address(opts->deferred, inst->immed, dst + 1); | 1072 opts->deferred = defer_address(opts->deferred, inst->immed, dst + 1); |
1123 case Z80_RETN:*/ | 1128 case Z80_RETN:*/ |
1124 case Z80_RST: { | 1129 case Z80_RST: { |
1125 //RST is basically CALL to an address in page 0 | 1130 //RST is basically CALL to an address in page 0 |
1126 dst = zcycles(dst, 5);//T States: 5 | 1131 dst = zcycles(dst, 5);//T States: 5 |
1127 dst = sub_ir(dst, 2, opts->regs[Z80_SP], SZ_W); | 1132 dst = sub_ir(dst, 2, opts->regs[Z80_SP], SZ_W); |
1128 dst = mov_ir(dst, address + 3, SCRATCH2, SZ_W); | 1133 dst = mov_ir(dst, address + 3, SCRATCH1, SZ_W); |
1129 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); | 1134 dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH2, SZ_W); |
1130 dst = call(dst, (uint8_t *)z80_write_word_highfirst);//T States: 3, 3 | 1135 dst = call(dst, (uint8_t *)z80_write_word_highfirst);//T States: 3, 3 |
1131 uint8_t * call_dst = z80_get_native_address(context, inst->immed); | 1136 uint8_t * call_dst = z80_get_native_address(context, inst->immed); |
1132 if (!call_dst) { | 1137 if (!call_dst) { |
1133 opts->deferred = defer_address(opts->deferred, inst->immed, dst + 1); | 1138 opts->deferred = defer_address(opts->deferred, inst->immed, dst + 1); |
1134 //fake address to force large displacement | 1139 //fake address to force large displacement |
1206 } | 1211 } |
1207 if (!map->base) { | 1212 if (!map->base) { |
1208 map->base = native_address; | 1213 map->base = native_address; |
1209 } | 1214 } |
1210 map->offsets[address] = native_address - map->base; | 1215 map->offsets[address] = native_address - map->base; |
1211 for(--size; size; --size, orig_address++) { | 1216 for(--size, orig_address++; size; --size, orig_address++) { |
1212 address = orig_address; | 1217 address = orig_address; |
1213 if (address < 0x4000) { | 1218 if (address < 0x4000) { |
1214 address &= 0x1FFF; | 1219 address &= 0x1FFF; |
1215 map = context->static_code_map; | 1220 map = context->static_code_map; |
1216 } else if (address >= 0x8000) { | 1221 } else if (address >= 0x8000) { |