comparison m68k_core_x86.c @ 574:1594525e2157

More 68K core refactoring to both reduce the amount of code and better split the host-cpu specific parts from the generic parts
author Michael Pavone <pavone@retrodev.com>
date Mon, 03 Mar 2014 22:17:20 -0800
parents c90fc522e7e3
children a6f2db4df70d
comparison
equal deleted inserted replaced
573:29d99db6f55d 574:1594525e2157
12 #include <stdio.h> 12 #include <stdio.h>
13 #include <stddef.h> 13 #include <stddef.h>
14 #include <stdlib.h> 14 #include <stdlib.h>
15 #include <string.h> 15 #include <string.h>
16 16
17 #define BUS 4
18 #define PREDEC_PENALTY 2
19
20 #define CYCLES RAX 17 #define CYCLES RAX
21 #define LIMIT RBP 18 #define LIMIT RBP
22 #define CONTEXT RSI 19 #define CONTEXT RSI
23 #define SCRATCH1 RCX 20 #define SCRATCH1 RCX
24 21
33 FLAG_N, 30 FLAG_N,
34 FLAG_Z, 31 FLAG_Z,
35 FLAG_V, 32 FLAG_V,
36 FLAG_C 33 FLAG_C
37 }; 34 };
38
39 char disasm_buf[1024];
40
41 m68k_context * sync_components(m68k_context * context, uint32_t address);
42
43 void m68k_invalid();
44 void bcd_add();
45 void bcd_sub();
46
47 35
48 void set_flag(m68k_options * opts, uint8_t val, uint8_t flag) 36 void set_flag(m68k_options * opts, uint8_t val, uint8_t flag)
49 { 37 {
50 if (opts->flag_regs[flag] >= 0) { 38 if (opts->flag_regs[flag] >= 0) {
51 mov_ir(&opts->gen.code, val, opts->flag_regs[flag], SZ_B); 39 mov_ir(&opts->gen.code, val, opts->flag_regs[flag], SZ_B);
177 cmp_rrdisp(code, opts->flag_regs[flag1], opts->gen.context_reg, offsetof(m68k_context, flags) + flag2, SZ_B); 165 cmp_rrdisp(code, opts->flag_regs[flag1], opts->gen.context_reg, offsetof(m68k_context, flags) + flag2, SZ_B);
178 } else { 166 } else {
179 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, flags) + flag1, opts->gen.scratch1, SZ_B); 167 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, flags) + flag1, opts->gen.scratch1, SZ_B);
180 cmp_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(m68k_context, flags) + flag2, SZ_B); 168 cmp_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(m68k_context, flags) + flag2, SZ_B);
181 } 169 }
170 }
171
172 void areg_to_native(m68k_options *opts, uint8_t reg, uint8_t native_reg)
173 {
174 if (opts->aregs[reg] >= 0) {
175 mov_rr(&opts->gen.code, opts->aregs[reg], native_reg, SZ_D);
176 } else {
177 mov_rdispr(&opts->gen.code, opts->gen.context_reg, areg_offset(reg), native_reg, SZ_D);
178 }
179 }
180
181 void dreg_to_native(m68k_options *opts, uint8_t reg, uint8_t native_reg)
182 {
183 if (opts->dregs[reg] >= 0) {
184 mov_rr(&opts->gen.code, opts->dregs[reg], native_reg, SZ_D);
185 } else {
186 mov_rdispr(&opts->gen.code, opts->gen.context_reg, dreg_offset(reg), native_reg, SZ_D);
187 }
188 }
189
190 void areg_to_native_sx(m68k_options *opts, uint8_t reg, uint8_t native_reg)
191 {
192 if (opts->aregs[reg] >= 0) {
193 movsx_rr(&opts->gen.code, opts->aregs[reg], native_reg, SZ_W, SZ_D);
194 } else {
195 movsx_rdispr(&opts->gen.code, opts->gen.context_reg, areg_offset(reg), native_reg, SZ_W, SZ_D);
196 }
197 }
198
199 void dreg_to_native_sx(m68k_options *opts, uint8_t reg, uint8_t native_reg)
200 {
201 if (opts->dregs[reg] >= 0) {
202 movsx_rr(&opts->gen.code, opts->dregs[reg], native_reg, SZ_W, SZ_D);
203 } else {
204 movsx_rdispr(&opts->gen.code, opts->gen.context_reg, dreg_offset(reg), native_reg, SZ_W, SZ_D);
205 }
206 }
207
208 void native_to_areg(m68k_options *opts, uint8_t native_reg, uint8_t reg)
209 {
210 if (opts->aregs[reg] >= 0) {
211 mov_rr(&opts->gen.code, native_reg, opts->aregs[reg], SZ_D);
212 } else {
213 mov_rrdisp(&opts->gen.code, native_reg, opts->gen.context_reg, areg_offset(reg), SZ_D);
214 }
215 }
216
217 void native_to_dreg(m68k_options *opts, uint8_t native_reg, uint8_t reg)
218 {
219 if (opts->dregs[reg] >= 0) {
220 mov_rr(&opts->gen.code, native_reg, opts->dregs[reg], SZ_D);
221 } else {
222 mov_rrdisp(&opts->gen.code, native_reg, opts->gen.context_reg, dreg_offset(reg), SZ_D);
223 }
224 }
225
226 void ldi_areg(m68k_options *opts, int32_t value, uint8_t reg)
227 {
228 if (opts->aregs[reg] >= 0) {
229 mov_ir(&opts->gen.code, value, opts->aregs[reg], SZ_D);
230 } else {
231 mov_irdisp(&opts->gen.code, value, opts->gen.context_reg, areg_offset(reg), SZ_D);
232 }
233 }
234
235 void ldi_native(m68k_options *opts, int32_t value, uint8_t reg)
236 {
237 mov_ir(&opts->gen.code, value, reg, SZ_D);
238 }
239
240 void addi_areg(m68k_options *opts, int32_t val, uint8_t reg)
241 {
242 if (opts->aregs[reg] >= 0) {
243 add_ir(&opts->gen.code, val, opts->aregs[reg], SZ_D);
244 } else {
245 add_irdisp(&opts->gen.code, val, opts->gen.context_reg, areg_offset(reg), SZ_D);
246 }
247 }
248
249 void subi_areg(m68k_options *opts, int32_t val, uint8_t reg)
250 {
251 if (opts->aregs[reg] >= 0) {
252 sub_ir(&opts->gen.code, val, opts->aregs[reg], SZ_D);
253 } else {
254 sub_irdisp(&opts->gen.code, val, opts->gen.context_reg, areg_offset(reg), SZ_D);
255 }
256 }
257
258 void add_areg_native(m68k_options *opts, uint8_t reg, uint8_t native_reg)
259 {
260 if (opts->aregs[reg] >= 0) {
261 add_rr(&opts->gen.code, opts->aregs[reg], native_reg, SZ_D);
262 } else {
263 add_rdispr(&opts->gen.code, opts->gen.context_reg, areg_offset(reg), native_reg, SZ_D);
264 }
265 }
266
267 void add_dreg_native(m68k_options *opts, uint8_t reg, uint8_t native_reg)
268 {
269 if (opts->dregs[reg] >= 0) {
270 add_rr(&opts->gen.code, opts->dregs[reg], native_reg, SZ_D);
271 } else {
272 add_rdispr(&opts->gen.code, opts->gen.context_reg, dreg_offset(reg), native_reg, SZ_D);
273 }
274 }
275
276 void calc_areg_displace(m68k_options *opts, m68k_op_info *op, uint8_t native_reg)
277 {
278 areg_to_native(opts, op->params.regs.pri, native_reg);
279 add_ir(&opts->gen.code, op->params.regs.displacement, native_reg, SZ_D);
280 }
281
282 void calc_index_disp8(m68k_options *opts, m68k_op_info *op, uint8_t native_reg)
283 {
284 uint8_t sec_reg = (op->params.regs.sec >> 1) & 0x7;
285 if (op->params.regs.sec & 1) {
286 if (op->params.regs.sec & 0x10) {
287 add_areg_native(opts, sec_reg, native_reg);
288 } else {
289 add_dreg_native(opts, sec_reg, native_reg);
290 }
291 } else {
292 uint8_t other_reg = native_reg == opts->gen.scratch1 ? opts->gen.scratch2 : opts->gen.scratch1;
293 if (op->params.regs.sec & 0x10) {
294 areg_to_native_sx(opts, sec_reg, other_reg);
295 } else {
296 dreg_to_native_sx(opts, sec_reg, other_reg);
297 }
298 add_rr(&opts->gen.code, other_reg, native_reg, SZ_D);
299 }
300 if (op->params.regs.displacement) {
301 add_ir(&opts->gen.code, op->params.regs.displacement, native_reg, SZ_D);
302 }
303 }
304
305 void calc_areg_index_disp8(m68k_options *opts, m68k_op_info *op, uint8_t native_reg)
306 {
307 areg_to_native(opts, op->params.regs.pri, native_reg);
308 calc_index_disp8(opts, op, native_reg);
182 } 309 }
183 310
184 void translate_m68k_op(m68kinst * inst, x86_ea * ea, m68k_options * opts, uint8_t dst) 311 void translate_m68k_op(m68kinst * inst, x86_ea * ea, m68k_options * opts, uint8_t dst)
185 { 312 {
186 code_info *code = &opts->gen.code; 313 code_info *code = &opts->gen.code;
228 } 355 }
229 dec_amount = inst->extra.size == OPSIZE_WORD ? 2 : (inst->extra.size == OPSIZE_LONG ? 4 : (op->params.regs.pri == 7 ? 2 :1)); 356 dec_amount = inst->extra.size == OPSIZE_WORD ? 2 : (inst->extra.size == OPSIZE_LONG ? 4 : (op->params.regs.pri == 7 ? 2 :1));
230 if (!dst) { 357 if (!dst) {
231 cycles(&opts->gen, PREDEC_PENALTY); 358 cycles(&opts->gen, PREDEC_PENALTY);
232 } 359 }
233 if (opts->aregs[op->params.regs.pri] >= 0) { 360 subi_areg(opts, dec_amount, op->params.regs.pri);
234 sub_ir(code, dec_amount, opts->aregs[op->params.regs.pri], SZ_D);
235 } else {
236 sub_irdisp(code, dec_amount, opts->gen.context_reg, reg_offset(op), SZ_D);
237 }
238 case MODE_AREG_INDIRECT: 361 case MODE_AREG_INDIRECT:
239 case MODE_AREG_POSTINC: 362 case MODE_AREG_POSTINC:
240 if (opts->aregs[op->params.regs.pri] >= 0) { 363 areg_to_native(opts, op->params.regs.pri, opts->gen.scratch1);
241 mov_rr(code, opts->aregs[op->params.regs.pri], opts->gen.scratch1, SZ_D);
242 } else {
243 mov_rdispr(code, opts->gen.context_reg, reg_offset(op), opts->gen.scratch1, SZ_D);
244 }
245 m68k_read_size(opts, inst->extra.size); 364 m68k_read_size(opts, inst->extra.size);
246 365
247 if (dst) { 366 if (dst) {
248 if (inst->src.addr_mode == MODE_AREG_PREDEC) { 367 if (inst->src.addr_mode == MODE_AREG_PREDEC) {
249 //restore src operand to opts->gen.scratch2 368 //restore src operand to opts->gen.scratch2
250 pop_r(code, opts->gen.scratch2); 369 pop_r(code, opts->gen.scratch2);
251 } else { 370 } else {
252 //save reg value in opts->gen.scratch2 so we can use it to save the result in memory later 371 //save reg value in opts->gen.scratch2 so we can use it to save the result in memory later
253 if (opts->aregs[op->params.regs.pri] >= 0) { 372 areg_to_native(opts, op->params.regs.pri, opts->gen.scratch2);
254 mov_rr(code, opts->aregs[op->params.regs.pri], opts->gen.scratch2, SZ_D);
255 } else {
256 mov_rdispr(code, opts->gen.context_reg, reg_offset(op), opts->gen.scratch2, SZ_D);
257 }
258 } 373 }
259 } 374 }
260 375
261 if (op->addr_mode == MODE_AREG_POSTINC) { 376 if (op->addr_mode == MODE_AREG_POSTINC) {
262 inc_amount = inst->extra.size == OPSIZE_WORD ? 2 : (inst->extra.size == OPSIZE_LONG ? 4 : (op->params.regs.pri == 7 ? 2 : 1)); 377 inc_amount = inst->extra.size == OPSIZE_WORD ? 2 : (inst->extra.size == OPSIZE_LONG ? 4 : (op->params.regs.pri == 7 ? 2 : 1));
263 if (opts->aregs[op->params.regs.pri] >= 0) { 378 addi_areg(opts, inc_amount, op->params.regs.pri);
264 add_ir(code, inc_amount, opts->aregs[op->params.regs.pri], SZ_D);
265 } else {
266 add_irdisp(code, inc_amount, opts->gen.context_reg, reg_offset(op), SZ_D);
267 }
268 } 379 }
269 ea->mode = MODE_REG_DIRECT; 380 ea->mode = MODE_REG_DIRECT;
270 ea->base = (!dst && inst->dst.addr_mode == MODE_AREG_PREDEC && inst->op != M68K_MOVE) ? opts->gen.scratch2 : opts->gen.scratch1; 381 ea->base = (!dst && inst->dst.addr_mode == MODE_AREG_PREDEC && inst->op != M68K_MOVE) ? opts->gen.scratch2 : opts->gen.scratch1;
271 break; 382 break;
272 case MODE_AREG_DISPLACE: 383 case MODE_AREG_DISPLACE:
273 cycles(&opts->gen, BUS); 384 cycles(&opts->gen, BUS);
274 if (opts->aregs[op->params.regs.pri] >= 0) { 385 calc_areg_displace(opts, op, opts->gen.scratch1);
275 mov_rr(code, opts->aregs[op->params.regs.pri], opts->gen.scratch1, SZ_D);
276 } else {
277 mov_rdispr(code, opts->gen.context_reg, reg_offset(op), opts->gen.scratch1, SZ_D);
278 }
279 add_ir(code, op->params.regs.displacement, opts->gen.scratch1, SZ_D);
280 if (dst) { 386 if (dst) {
281 push_r(code, opts->gen.scratch1); 387 push_r(code, opts->gen.scratch1);
282 } 388 }
283 m68k_read_size(opts, inst->extra.size); 389 m68k_read_size(opts, inst->extra.size);
284 if (dst) { 390 if (dst) {
288 ea->mode = MODE_REG_DIRECT; 394 ea->mode = MODE_REG_DIRECT;
289 ea->base = opts->gen.scratch1; 395 ea->base = opts->gen.scratch1;
290 break; 396 break;
291 case MODE_AREG_INDEX_DISP8: 397 case MODE_AREG_INDEX_DISP8:
292 cycles(&opts->gen, 6); 398 cycles(&opts->gen, 6);
293 if (opts->aregs[op->params.regs.pri] >= 0) { 399 calc_areg_index_disp8(opts, op, opts->gen.scratch1);
294 mov_rr(code, opts->aregs[op->params.regs.pri], opts->gen.scratch1, SZ_D);
295 } else {
296 mov_rdispr(code, opts->gen.context_reg, reg_offset(op), opts->gen.scratch1, SZ_D);
297 }
298 sec_reg = (op->params.regs.sec >> 1) & 0x7;
299 if (op->params.regs.sec & 1) {
300 if (op->params.regs.sec & 0x10) {
301 if (opts->aregs[sec_reg] >= 0) {
302 add_rr(code, opts->aregs[sec_reg], opts->gen.scratch1, SZ_D);
303 } else {
304 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
305 }
306 } else {
307 if (opts->dregs[sec_reg] >= 0) {
308 add_rr(code, opts->dregs[sec_reg], opts->gen.scratch1, SZ_D);
309 } else {
310 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
311 }
312 }
313 } else {
314 if (op->params.regs.sec & 0x10) {
315 if (opts->aregs[sec_reg] >= 0) {
316 movsx_rr(code, opts->aregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
317 } else {
318 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
319 }
320 } else {
321 if (opts->dregs[sec_reg] >= 0) {
322 movsx_rr(code, opts->dregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
323 } else {
324 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
325 }
326 }
327 add_rr(code, opts->gen.scratch2, opts->gen.scratch1, SZ_D);
328 }
329 if (op->params.regs.displacement) {
330 add_ir(code, op->params.regs.displacement, opts->gen.scratch1, SZ_D);
331 }
332 if (dst) { 400 if (dst) {
333 push_r(code, opts->gen.scratch1); 401 push_r(code, opts->gen.scratch1);
334 } 402 }
335 m68k_read_size(opts, inst->extra.size); 403 m68k_read_size(opts, inst->extra.size);
336 if (dst) { 404 if (dst) {
355 ea->base = opts->gen.scratch1; 423 ea->base = opts->gen.scratch1;
356 break; 424 break;
357 case MODE_PC_INDEX_DISP8: 425 case MODE_PC_INDEX_DISP8:
358 cycles(&opts->gen, 6); 426 cycles(&opts->gen, 6);
359 mov_ir(code, inst->address+2, opts->gen.scratch1, SZ_D); 427 mov_ir(code, inst->address+2, opts->gen.scratch1, SZ_D);
360 sec_reg = (op->params.regs.sec >> 1) & 0x7; 428 calc_index_disp8(opts, op, opts->gen.scratch1);
361 if (op->params.regs.sec & 1) {
362 if (op->params.regs.sec & 0x10) {
363 if (opts->aregs[sec_reg] >= 0) {
364 add_rr(code, opts->aregs[sec_reg], opts->gen.scratch1, SZ_D);
365 } else {
366 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
367 }
368 } else {
369 if (opts->dregs[sec_reg] >= 0) {
370 add_rr(code, opts->dregs[sec_reg], opts->gen.scratch1, SZ_D);
371 } else {
372 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
373 }
374 }
375 } else {
376 if (op->params.regs.sec & 0x10) {
377 if (opts->aregs[sec_reg] >= 0) {
378 movsx_rr(code, opts->aregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
379 } else {
380 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
381 }
382 } else {
383 if (opts->dregs[sec_reg] >= 0) {
384 movsx_rr(code, opts->dregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
385 } else {
386 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
387 }
388 }
389 add_rr(code, opts->gen.scratch2, opts->gen.scratch1, SZ_D);
390 }
391 if (op->params.regs.displacement) {
392 add_ir(code, op->params.regs.displacement, opts->gen.scratch1, SZ_D);
393 }
394 if (dst) { 429 if (dst) {
395 push_r(code, opts->gen.scratch1); 430 push_r(code, opts->gen.scratch1);
396 } 431 }
397 m68k_read_size(opts, inst->extra.size); 432 m68k_read_size(opts, inst->extra.size);
398 if (dst) { 433 if (dst) {
448 void m68k_save_result(m68kinst * inst, m68k_options * opts) 483 void m68k_save_result(m68kinst * inst, m68k_options * opts)
449 { 484 {
450 code_info *code = &opts->gen.code; 485 code_info *code = &opts->gen.code;
451 if (inst->dst.addr_mode != MODE_REG && inst->dst.addr_mode != MODE_AREG) { 486 if (inst->dst.addr_mode != MODE_REG && inst->dst.addr_mode != MODE_AREG) {
452 if (inst->dst.addr_mode == MODE_AREG_PREDEC && inst->src.addr_mode == MODE_AREG_PREDEC && inst->op != M68K_MOVE) { 487 if (inst->dst.addr_mode == MODE_AREG_PREDEC && inst->src.addr_mode == MODE_AREG_PREDEC && inst->op != M68K_MOVE) {
453 if (opts->aregs[inst->dst.params.regs.pri] >= 0) { 488 areg_to_native(opts, inst->dst.params.regs.pri, opts->gen.scratch2);
454 mov_rr(code, opts->aregs[inst->dst.params.regs.pri], opts->gen.scratch2, SZ_D);
455 } else {
456 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->dst)), opts->gen.scratch2, SZ_D);
457 }
458 } 489 }
459 switch (inst->extra.size) 490 switch (inst->extra.size)
460 { 491 {
461 case OPSIZE_BYTE: 492 case OPSIZE_BYTE:
462 call(code, opts->write_8); 493 call(code, opts->write_8);
529 set_flag_cond(opts, CC_S, FLAG_N); 560 set_flag_cond(opts, CC_S, FLAG_N);
530 } 561 }
531 break; 562 break;
532 case MODE_AREG_PREDEC: 563 case MODE_AREG_PREDEC:
533 dec_amount = inst->extra.size == OPSIZE_WORD ? 2 : (inst->extra.size == OPSIZE_LONG ? 4 : (inst->dst.params.regs.pri == 7 ? 2 : 1)); 564 dec_amount = inst->extra.size == OPSIZE_WORD ? 2 : (inst->extra.size == OPSIZE_LONG ? 4 : (inst->dst.params.regs.pri == 7 ? 2 : 1));
534 if (opts->aregs[inst->dst.params.regs.pri] >= 0) { 565 subi_areg(opts, dec_amount, inst->dst.params.regs.pri);
535 sub_ir(code, dec_amount, opts->aregs[inst->dst.params.regs.pri], SZ_D);
536 } else {
537 sub_irdisp(code, dec_amount, opts->gen.context_reg, reg_offset(&(inst->dst)), SZ_D);
538 }
539 case MODE_AREG_INDIRECT: 566 case MODE_AREG_INDIRECT:
540 case MODE_AREG_POSTINC: 567 case MODE_AREG_POSTINC:
541 if (opts->aregs[inst->dst.params.regs.pri] >= 0) { 568 areg_to_native(opts, inst->dst.params.regs.pri, opts->gen.scratch2);
542 mov_rr(code, opts->aregs[inst->dst.params.regs.pri], opts->gen.scratch2, SZ_D);
543 } else {
544 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->dst)), opts->gen.scratch2, SZ_D);
545 }
546 if (src.mode == MODE_REG_DIRECT) { 569 if (src.mode == MODE_REG_DIRECT) {
547 if (src.base != opts->gen.scratch1) { 570 if (src.base != opts->gen.scratch1) {
548 mov_rr(code, src.base, opts->gen.scratch1, inst->extra.size); 571 mov_rr(code, src.base, opts->gen.scratch1, inst->extra.size);
549 } 572 }
550 } else if (src.mode == MODE_REG_DISPLACE8) { 573 } else if (src.mode == MODE_REG_DISPLACE8) {
559 } 582 }
560 m68k_write_size(opts, inst->extra.size); 583 m68k_write_size(opts, inst->extra.size);
561 584
562 if (inst->dst.addr_mode == MODE_AREG_POSTINC) { 585 if (inst->dst.addr_mode == MODE_AREG_POSTINC) {
563 inc_amount = inst->extra.size == OPSIZE_WORD ? 2 : (inst->extra.size == OPSIZE_LONG ? 4 : (inst->dst.params.regs.pri == 7 ? 2 : 1)); 586 inc_amount = inst->extra.size == OPSIZE_WORD ? 2 : (inst->extra.size == OPSIZE_LONG ? 4 : (inst->dst.params.regs.pri == 7 ? 2 : 1));
564 if (opts->aregs[inst->dst.params.regs.pri] >= 0) { 587 addi_areg(opts, inc_amount, inst->dst.params.regs.pri);
565 add_ir(code, inc_amount, opts->aregs[inst->dst.params.regs.pri], SZ_D);
566 } else {
567 add_irdisp(code, inc_amount, opts->gen.context_reg, reg_offset(&(inst->dst)), SZ_D);
568 }
569 } 588 }
570 break; 589 break;
571 case MODE_AREG_DISPLACE: 590 case MODE_AREG_DISPLACE:
572 cycles(&opts->gen, BUS); 591 cycles(&opts->gen, BUS);
573 if (opts->aregs[inst->dst.params.regs.pri] >= 0) { 592 calc_areg_displace(opts, &inst->dst, opts->gen.scratch2);
574 mov_rr(code, opts->aregs[inst->dst.params.regs.pri], opts->gen.scratch2, SZ_D);
575 } else {
576 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->dst)), opts->gen.scratch2, SZ_D);
577 }
578 add_ir(code, inst->dst.params.regs.displacement, opts->gen.scratch2, SZ_D);
579 if (src.mode == MODE_REG_DIRECT) { 593 if (src.mode == MODE_REG_DIRECT) {
580 if (src.base != opts->gen.scratch1) { 594 if (src.base != opts->gen.scratch1) {
581 mov_rr(code, src.base, opts->gen.scratch1, inst->extra.size); 595 mov_rr(code, src.base, opts->gen.scratch1, inst->extra.size);
582 } 596 }
583 } else if (src.mode == MODE_REG_DISPLACE8) { 597 } else if (src.mode == MODE_REG_DISPLACE8) {
592 } 606 }
593 m68k_write_size(opts, inst->extra.size); 607 m68k_write_size(opts, inst->extra.size);
594 break; 608 break;
595 case MODE_AREG_INDEX_DISP8: 609 case MODE_AREG_INDEX_DISP8:
596 cycles(&opts->gen, 6);//TODO: Check to make sure this is correct 610 cycles(&opts->gen, 6);//TODO: Check to make sure this is correct
597 if (opts->aregs[inst->dst.params.regs.pri] >= 0) { 611 //calc_areg_index_disp8 will clober scratch1 when a 16-bit index is used
598 mov_rr(code, opts->aregs[inst->dst.params.regs.pri], opts->gen.scratch2, SZ_D); 612 if (src.base == opts->gen.scratch1 && !(inst->dst.params.regs.sec & 1)) {
599 } else { 613 push_r(code, opts->gen.scratch1);
600 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->dst)), opts->gen.scratch2, SZ_D); 614 }
601 } 615 calc_areg_index_disp8(opts, &inst->dst, opts->gen.scratch2);
602 sec_reg = (inst->dst.params.regs.sec >> 1) & 0x7; 616 if (src.base == opts->gen.scratch1 && !(inst->dst.params.regs.sec & 1)) {
603 if (inst->dst.params.regs.sec & 1) { 617 pop_r(code, opts->gen.scratch1);
604 if (inst->dst.params.regs.sec & 0x10) {
605 if (opts->aregs[sec_reg] >= 0) {
606 add_rr(code, opts->aregs[sec_reg], opts->gen.scratch2, SZ_D);
607 } else {
608 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_D);
609 }
610 } else {
611 if (opts->dregs[sec_reg] >= 0) {
612 add_rr(code, opts->dregs[sec_reg], opts->gen.scratch2, SZ_D);
613 } else {
614 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_D);
615 }
616 }
617 } else {
618 if (src.base == opts->gen.scratch1) {
619 push_r(code, opts->gen.scratch1);
620 }
621 if (inst->dst.params.regs.sec & 0x10) {
622 if (opts->aregs[sec_reg] >= 0) {
623 movsx_rr(code, opts->aregs[sec_reg], opts->gen.scratch1, SZ_W, SZ_D);
624 } else {
625 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_W, SZ_D);
626 }
627 } else {
628 if (opts->dregs[sec_reg] >= 0) {
629 movsx_rr(code, opts->dregs[sec_reg], opts->gen.scratch1, SZ_W, SZ_D);
630 } else {
631 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_W, SZ_D);
632 }
633 }
634 add_rr(code, opts->gen.scratch1, opts->gen.scratch2, SZ_D);
635 if (src.base == opts->gen.scratch1) {
636 pop_r(code, opts->gen.scratch1);
637 }
638 }
639 if (inst->dst.params.regs.displacement) {
640 add_ir(code, inst->dst.params.regs.displacement, opts->gen.scratch2, SZ_D);
641 } 618 }
642 if (src.mode == MODE_REG_DIRECT) { 619 if (src.mode == MODE_REG_DIRECT) {
643 if (src.base != opts->gen.scratch1) { 620 if (src.base != opts->gen.scratch1) {
644 mov_rr(code, src.base, opts->gen.scratch1, inst->extra.size); 621 mov_rr(code, src.base, opts->gen.scratch1, inst->extra.size);
645 } 622 }
675 m68k_write_size(opts, inst->extra.size); 652 m68k_write_size(opts, inst->extra.size);
676 break; 653 break;
677 case MODE_PC_INDEX_DISP8: 654 case MODE_PC_INDEX_DISP8:
678 cycles(&opts->gen, 6);//TODO: Check to make sure this is correct 655 cycles(&opts->gen, 6);//TODO: Check to make sure this is correct
679 mov_ir(code, inst->address, opts->gen.scratch2, SZ_D); 656 mov_ir(code, inst->address, opts->gen.scratch2, SZ_D);
680 sec_reg = (inst->dst.params.regs.sec >> 1) & 0x7; 657 if (src.base == opts->gen.scratch1 && !(inst->dst.params.regs.sec & 1)) {
681 if (inst->dst.params.regs.sec & 1) { 658 push_r(code, opts->gen.scratch1);
682 if (inst->dst.params.regs.sec & 0x10) { 659 }
683 if (opts->aregs[sec_reg] >= 0) { 660 calc_index_disp8(opts, &inst->dst, opts->gen.scratch2);
684 add_rr(code, opts->aregs[sec_reg], opts->gen.scratch2, SZ_D); 661 if (src.base == opts->gen.scratch1 && !(inst->dst.params.regs.sec & 1)) {
685 } else { 662 pop_r(code, opts->gen.scratch1);
686 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_D);
687 }
688 } else {
689 if (opts->dregs[sec_reg] >= 0) {
690 add_rr(code, opts->dregs[sec_reg], opts->gen.scratch2, SZ_D);
691 } else {
692 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_D);
693 }
694 }
695 } else {
696 if (src.base == opts->gen.scratch1) {
697 push_r(code, opts->gen.scratch1);
698 }
699 if (inst->dst.params.regs.sec & 0x10) {
700 if (opts->aregs[sec_reg] >= 0) {
701 movsx_rr(code, opts->aregs[sec_reg], opts->gen.scratch1, SZ_W, SZ_D);
702 } else {
703 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_W, SZ_D);
704 }
705 } else {
706 if (opts->dregs[sec_reg] >= 0) {
707 movsx_rr(code, opts->dregs[sec_reg], opts->gen.scratch1, SZ_W, SZ_D);
708 } else {
709 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_W, SZ_D);
710 }
711 }
712 add_rr(code, opts->gen.scratch1, opts->gen.scratch2, SZ_D);
713 if (src.base == opts->gen.scratch1) {
714 pop_r(code, opts->gen.scratch1);
715 }
716 }
717 if (inst->dst.params.regs.displacement) {
718 add_ir(code, inst->dst.params.regs.displacement, opts->gen.scratch2, SZ_D);
719 } 663 }
720 if (src.mode == MODE_REG_DIRECT) { 664 if (src.mode == MODE_REG_DIRECT) {
721 if (src.base != opts->gen.scratch1) { 665 if (src.base != opts->gen.scratch1) {
722 mov_rr(code, src.base, opts->gen.scratch1, inst->extra.size); 666 mov_rr(code, src.base, opts->gen.scratch1, inst->extra.size);
723 } 667 }
778 int8_t dir; 722 int8_t dir;
779 switch (inst->dst.addr_mode) 723 switch (inst->dst.addr_mode)
780 { 724 {
781 case MODE_AREG_INDIRECT: 725 case MODE_AREG_INDIRECT:
782 case MODE_AREG_PREDEC: 726 case MODE_AREG_PREDEC:
783 if (opts->aregs[inst->dst.params.regs.pri] >= 0) { 727 areg_to_native(opts, inst->dst.params.regs.pri, opts->gen.scratch2);
784 mov_rr(code, opts->aregs[inst->dst.params.regs.pri], opts->gen.scratch2, SZ_D);
785 } else {
786 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->dst)), opts->gen.scratch2, SZ_D);
787 }
788 break; 728 break;
789 case MODE_AREG_DISPLACE: 729 case MODE_AREG_DISPLACE:
790 early_cycles += BUS; 730 early_cycles += BUS;
791 reg = opts->gen.scratch2; 731 calc_areg_displace(opts, &inst->dst, opts->gen.scratch2);
792 if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
793 mov_rr(code, opts->aregs[inst->dst.params.regs.pri], opts->gen.scratch2, SZ_D);
794 } else {
795 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->dst)), opts->gen.scratch2, SZ_D);
796 }
797 add_ir(code, inst->dst.params.regs.displacement, opts->gen.scratch2, SZ_D);
798 break; 732 break;
799 case MODE_AREG_INDEX_DISP8: 733 case MODE_AREG_INDEX_DISP8:
800 early_cycles += 6; 734 early_cycles += 6;
801 if (opts->aregs[inst->dst.params.regs.pri] >= 0) { 735 calc_areg_index_disp8(opts, &inst->dst, opts->gen.scratch2);
802 mov_rr(code, opts->aregs[inst->dst.params.regs.pri], opts->gen.scratch2, SZ_D);
803 } else {
804 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->dst)), opts->gen.scratch2, SZ_D);
805 }
806 sec_reg = (inst->dst.params.regs.sec >> 1) & 0x7;
807 if (inst->dst.params.regs.sec & 1) {
808 if (inst->dst.params.regs.sec & 0x10) {
809 if (opts->aregs[sec_reg] >= 0) {
810 add_rr(code, opts->aregs[sec_reg], opts->gen.scratch2, SZ_D);
811 } else {
812 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_D);
813 }
814 } else {
815 if (opts->dregs[sec_reg] >= 0) {
816 add_rr(code, opts->dregs[sec_reg], opts->gen.scratch2, SZ_D);
817 } else {
818 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_D);
819 }
820 }
821 } else {
822 if (inst->dst.params.regs.sec & 0x10) {
823 if (opts->aregs[sec_reg] >= 0) {
824 movsx_rr(code, opts->aregs[sec_reg], opts->gen.scratch1, SZ_W, SZ_D);
825 } else {
826 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_W, SZ_D);
827 }
828 } else {
829 if (opts->dregs[sec_reg] >= 0) {
830 movsx_rr(code, opts->dregs[sec_reg], opts->gen.scratch1, SZ_W, SZ_D);
831 } else {
832 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_W, SZ_D);
833 }
834 }
835 add_rr(code, opts->gen.scratch1, opts->gen.scratch2, SZ_D);
836 }
837 if (inst->dst.params.regs.displacement) {
838 add_ir(code, inst->dst.params.regs.displacement, opts->gen.scratch2, SZ_D);
839 }
840 break; 736 break;
841 case MODE_PC_DISPLACE: 737 case MODE_PC_DISPLACE:
842 early_cycles += BUS; 738 early_cycles += BUS;
843 mov_ir(code, inst->dst.params.regs.displacement + inst->address+2, opts->gen.scratch2, SZ_D); 739 mov_ir(code, inst->dst.params.regs.displacement + inst->address+2, opts->gen.scratch2, SZ_D);
844 break; 740 break;
845 case MODE_PC_INDEX_DISP8: 741 case MODE_PC_INDEX_DISP8:
846 early_cycles += 6; 742 early_cycles += 6;
847 mov_ir(code, inst->address+2, opts->gen.scratch2, SZ_D); 743 mov_ir(code, inst->address+2, opts->gen.scratch2, SZ_D);
848 sec_reg = (inst->dst.params.regs.sec >> 1) & 0x7; 744 calc_index_disp8(opts, &inst->dst, opts->gen.scratch2);
849 if (inst->dst.params.regs.sec & 1) {
850 if (inst->dst.params.regs.sec & 0x10) {
851 if (opts->aregs[sec_reg] >= 0) {
852 add_rr(code, opts->aregs[sec_reg], opts->gen.scratch2, SZ_D);
853 } else {
854 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_D);
855 }
856 } else {
857 if (opts->dregs[sec_reg] >= 0) {
858 add_rr(code, opts->dregs[sec_reg], opts->gen.scratch2, SZ_D);
859 } else {
860 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_D);
861 }
862 }
863 } else {
864 if (inst->dst.params.regs.sec & 0x10) {
865 if (opts->aregs[sec_reg] >= 0) {
866 movsx_rr(code, opts->aregs[sec_reg], opts->gen.scratch1, SZ_W, SZ_D);
867 } else {
868 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_W, SZ_D);
869 }
870 } else {
871 if (opts->dregs[sec_reg] >= 0) {
872 movsx_rr(code, opts->dregs[sec_reg], opts->gen.scratch1, SZ_W, SZ_D);
873 } else {
874 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_W, SZ_D);
875 }
876 }
877 add_rr(code, opts->gen.scratch1, opts->gen.scratch2, SZ_D);
878 }
879 if (inst->dst.params.regs.displacement) {
880 add_ir(code, inst->dst.params.regs.displacement, opts->gen.scratch2, SZ_D);
881 }
882 break;
883 case MODE_ABSOLUTE: 745 case MODE_ABSOLUTE:
884 early_cycles += 4; 746 early_cycles += 4;
885 case MODE_ABSOLUTE_SHORT: 747 case MODE_ABSOLUTE_SHORT:
886 early_cycles += 4; 748 early_cycles += 4;
887 mov_ir(code, inst->dst.params.immed, opts->gen.scratch2, SZ_D); 749 mov_ir(code, inst->dst.params.immed, opts->gen.scratch2, SZ_D);
904 if (inst->dst.addr_mode == MODE_AREG_PREDEC) { 766 if (inst->dst.addr_mode == MODE_AREG_PREDEC) {
905 sub_ir(code, (inst->extra.size == OPSIZE_LONG) ? 4 : 2, opts->gen.scratch2, SZ_D); 767 sub_ir(code, (inst->extra.size == OPSIZE_LONG) ? 4 : 2, opts->gen.scratch2, SZ_D);
906 } 768 }
907 push_r(code, opts->gen.scratch2); 769 push_r(code, opts->gen.scratch2);
908 if (reg > 7) { 770 if (reg > 7) {
909 if (opts->aregs[reg-8] >= 0) { 771 areg_to_native(opts, reg-8, opts->gen.scratch1);
910 mov_rr(code, opts->aregs[reg-8], opts->gen.scratch1, inst->extra.size);
911 } else {
912 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * (reg-8), opts->gen.scratch1, inst->extra.size);
913 }
914 } else { 772 } else {
915 if (opts->dregs[reg] >= 0) { 773 dreg_to_native(opts, reg, opts->gen.scratch1);
916 mov_rr(code, opts->dregs[reg], opts->gen.scratch1, inst->extra.size);
917 } else {
918 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t) * (reg), opts->gen.scratch1, inst->extra.size);
919 }
920 } 774 }
921 if (inst->extra.size == OPSIZE_LONG) { 775 if (inst->extra.size == OPSIZE_LONG) {
922 call(code, opts->write_32_lowfirst); 776 call(code, opts->write_32_lowfirst);
923 } else { 777 } else {
924 call(code, opts->write_16); 778 call(code, opts->write_16);
928 add_ir(code, (inst->extra.size == OPSIZE_LONG) ? 4 : 2, opts->gen.scratch2, SZ_D); 782 add_ir(code, (inst->extra.size == OPSIZE_LONG) ? 4 : 2, opts->gen.scratch2, SZ_D);
929 } 783 }
930 } 784 }
931 } 785 }
932 if (inst->dst.addr_mode == MODE_AREG_PREDEC) { 786 if (inst->dst.addr_mode == MODE_AREG_PREDEC) {
933 if (opts->aregs[inst->dst.params.regs.pri] >= 0) { 787 native_to_areg(opts, opts->gen.scratch2, inst->dst.params.regs.pri);
934 mov_rr(code, opts->gen.scratch2, opts->aregs[inst->dst.params.regs.pri], SZ_D);
935 } else {
936 mov_rrdisp(code, opts->gen.scratch2, opts->gen.context_reg, reg_offset(&(inst->dst)), SZ_D);
937 }
938 } 788 }
939 } else { 789 } else {
940 //mem to reg 790 //mem to reg
941 early_cycles = 4; 791 early_cycles = 4;
942 switch (inst->src.addr_mode) 792 switch (inst->src.addr_mode)
943 { 793 {
944 case MODE_AREG_INDIRECT: 794 case MODE_AREG_INDIRECT:
945 case MODE_AREG_POSTINC: 795 case MODE_AREG_POSTINC:
946 if (opts->aregs[inst->src.params.regs.pri] >= 0) { 796 areg_to_native(opts, inst->src.params.regs.pri, opts->gen.scratch1);
947 mov_rr(code, opts->aregs[inst->src.params.regs.pri], opts->gen.scratch1, SZ_D);
948 } else {
949 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src)), opts->gen.scratch1, SZ_D);
950 }
951 break; 797 break;
952 case MODE_AREG_DISPLACE: 798 case MODE_AREG_DISPLACE:
953 early_cycles += BUS; 799 early_cycles += BUS;
954 reg = opts->gen.scratch2; 800 reg = opts->gen.scratch2;
955 if (opts->aregs[inst->src.params.regs.pri] >= 0) { 801 calc_areg_displace(opts, &inst->src, opts->gen.scratch1);
956 mov_rr(code, opts->aregs[inst->src.params.regs.pri], opts->gen.scratch1, SZ_D);
957 } else {
958 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src)), opts->gen.scratch1, SZ_D);
959 }
960 add_ir(code, inst->src.params.regs.displacement, opts->gen.scratch1, SZ_D);
961 break; 802 break;
962 case MODE_AREG_INDEX_DISP8: 803 case MODE_AREG_INDEX_DISP8:
963 early_cycles += 6; 804 early_cycles += 6;
964 if (opts->aregs[inst->src.params.regs.pri] >= 0) { 805 calc_areg_index_disp8(opts, &inst->src, opts->gen.scratch1);
965 mov_rr(code, opts->aregs[inst->src.params.regs.pri], opts->gen.scratch1, SZ_D);
966 } else {
967 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src)), opts->gen.scratch1, SZ_D);
968 }
969 sec_reg = (inst->src.params.regs.sec >> 1) & 0x7;
970 if (inst->src.params.regs.sec & 1) {
971 if (inst->src.params.regs.sec & 0x10) {
972 if (opts->aregs[sec_reg] >= 0) {
973 add_rr(code, opts->aregs[sec_reg], opts->gen.scratch1, SZ_D);
974 } else {
975 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
976 }
977 } else {
978 if (opts->dregs[sec_reg] >= 0) {
979 add_rr(code, opts->dregs[sec_reg], opts->gen.scratch1, SZ_D);
980 } else {
981 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
982 }
983 }
984 } else {
985 if (inst->src.params.regs.sec & 0x10) {
986 if (opts->aregs[sec_reg] >= 0) {
987 movsx_rr(code, opts->aregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
988 } else {
989 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
990 }
991 } else {
992 if (opts->dregs[sec_reg] >= 0) {
993 movsx_rr(code, opts->dregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
994 } else {
995 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
996 }
997 }
998 add_rr(code, opts->gen.scratch2, opts->gen.scratch1, SZ_D);
999 }
1000 if (inst->src.params.regs.displacement) {
1001 add_ir(code, inst->src.params.regs.displacement, opts->gen.scratch1, SZ_D);
1002 }
1003 break; 806 break;
1004 case MODE_PC_DISPLACE: 807 case MODE_PC_DISPLACE:
1005 early_cycles += BUS; 808 early_cycles += BUS;
1006 mov_ir(code, inst->src.params.regs.displacement + inst->address+2, opts->gen.scratch1, SZ_D); 809 mov_ir(code, inst->src.params.regs.displacement + inst->address+2, opts->gen.scratch1, SZ_D);
1007 break; 810 break;
1008 case MODE_PC_INDEX_DISP8: 811 case MODE_PC_INDEX_DISP8:
1009 early_cycles += 6; 812 early_cycles += 6;
1010 mov_ir(code, inst->address+2, opts->gen.scratch1, SZ_D); 813 mov_ir(code, inst->address+2, opts->gen.scratch1, SZ_D);
1011 sec_reg = (inst->src.params.regs.sec >> 1) & 0x7; 814 calc_index_disp8(opts, &inst->src, opts->gen.scratch1);
1012 if (inst->src.params.regs.sec & 1) {
1013 if (inst->src.params.regs.sec & 0x10) {
1014 if (opts->aregs[sec_reg] >= 0) {
1015 add_rr(code, opts->aregs[sec_reg], opts->gen.scratch1, SZ_D);
1016 } else {
1017 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
1018 }
1019 } else {
1020 if (opts->dregs[sec_reg] >= 0) {
1021 add_rr(code, opts->dregs[sec_reg], opts->gen.scratch1, SZ_D);
1022 } else {
1023 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
1024 }
1025 }
1026 } else {
1027 if (inst->src.params.regs.sec & 0x10) {
1028 if (opts->aregs[sec_reg] >= 0) {
1029 movsx_rr(code, opts->aregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
1030 } else {
1031 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
1032 }
1033 } else {
1034 if (opts->dregs[sec_reg] >= 0) {
1035 movsx_rr(code, opts->dregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
1036 } else {
1037 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
1038 }
1039 }
1040 add_rr(code, opts->gen.scratch2, opts->gen.scratch1, SZ_D);
1041 }
1042 if (inst->src.params.regs.displacement) {
1043 add_ir(code, inst->src.params.regs.displacement, opts->gen.scratch1, SZ_D);
1044 }
1045 break; 815 break;
1046 case MODE_ABSOLUTE: 816 case MODE_ABSOLUTE:
1047 early_cycles += 4; 817 early_cycles += 4;
1048 case MODE_ABSOLUTE_SHORT: 818 case MODE_ABSOLUTE_SHORT:
1049 early_cycles += 4; 819 early_cycles += 4;
1065 } 835 }
1066 if (inst->extra.size == OPSIZE_WORD) { 836 if (inst->extra.size == OPSIZE_WORD) {
1067 movsx_rr(code, opts->gen.scratch1, opts->gen.scratch1, SZ_W, SZ_D); 837 movsx_rr(code, opts->gen.scratch1, opts->gen.scratch1, SZ_W, SZ_D);
1068 } 838 }
1069 if (reg > 7) { 839 if (reg > 7) {
1070 if (opts->aregs[reg-8] >= 0) { 840 native_to_areg(opts, opts->gen.scratch1, reg-8);
1071 mov_rr(code, opts->gen.scratch1, opts->aregs[reg-8], SZ_D);
1072 } else {
1073 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * (reg-8), SZ_D);
1074 }
1075 } else { 841 } else {
1076 if (opts->dregs[reg] >= 0) { 842 native_to_dreg(opts, opts->gen.scratch1, reg);
1077 mov_rr(code, opts->gen.scratch1, opts->dregs[reg], SZ_D);
1078 } else {
1079 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t) * (reg), SZ_D);
1080 }
1081 } 843 }
1082 pop_r(code, opts->gen.scratch1); 844 pop_r(code, opts->gen.scratch1);
1083 add_ir(code, (inst->extra.size == OPSIZE_LONG) ? 4 : 2, opts->gen.scratch1, SZ_D); 845 add_ir(code, (inst->extra.size == OPSIZE_LONG) ? 4 : 2, opts->gen.scratch1, SZ_D);
1084 } 846 }
1085 } 847 }
1086 if (inst->src.addr_mode == MODE_AREG_POSTINC) { 848 if (inst->src.addr_mode == MODE_AREG_POSTINC) {
1087 if (opts->aregs[inst->src.params.regs.pri] >= 0) { 849 native_to_areg(opts, opts->gen.scratch1, inst->src.params.regs.pri);
1088 mov_rr(code, opts->gen.scratch1, opts->aregs[inst->src.params.regs.pri], SZ_D);
1089 } else {
1090 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, reg_offset(&(inst->src)), SZ_D);
1091 }
1092 } 850 }
1093 } 851 }
1094 //prefetch 852 //prefetch
1095 cycles(&opts->gen, 4); 853 cycles(&opts->gen, 4);
1096 } 854 }
1139 set_flag_cond(opts, CC_Z, FLAG_Z); 897 set_flag_cond(opts, CC_Z, FLAG_Z);
1140 set_flag_cond(opts, CC_S, FLAG_N); 898 set_flag_cond(opts, CC_S, FLAG_N);
1141 //M68K EXT only operates on registers so no need for a call to save result here 899 //M68K EXT only operates on registers so no need for a call to save result here
1142 } 900 }
1143 901
1144 void translate_m68k_lea(m68k_options * opts, m68kinst * inst)
1145 {
1146 code_info *code = &opts->gen.code;
1147 int8_t dst_reg = native_reg(&(inst->dst), opts), sec_reg;
1148 switch(inst->src.addr_mode)
1149 {
1150 case MODE_AREG_INDIRECT:
1151 cycles(&opts->gen, BUS);
1152 if (opts->aregs[inst->src.params.regs.pri] >= 0) {
1153 if (dst_reg >= 0) {
1154 mov_rr(code, opts->aregs[inst->src.params.regs.pri], dst_reg, SZ_D);
1155 } else {
1156 mov_rrdisp(code, opts->aregs[inst->src.params.regs.pri], opts->gen.context_reg, offsetof(m68k_context, aregs) + 4 * inst->dst.params.regs.pri, SZ_D);
1157 }
1158 } else {
1159 if (dst_reg >= 0) {
1160 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + 4 * inst->src.params.regs.pri, dst_reg, SZ_D);
1161 } else {
1162 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + 4 * inst->src.params.regs.pri, opts->gen.scratch1, SZ_D);
1163 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(m68k_context, aregs) + 4 * inst->dst.params.regs.pri, SZ_D);
1164 }
1165 }
1166 break;
1167 case MODE_AREG_DISPLACE:
1168 cycles(&opts->gen, 8);
1169 if (dst_reg >= 0) {
1170 if (inst->src.params.regs.pri != inst->dst.params.regs.pri) {
1171 if (opts->aregs[inst->src.params.regs.pri] >= 0) {
1172 mov_rr(code, opts->aregs[inst->src.params.regs.pri], dst_reg, SZ_D);
1173 } else {
1174 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src)), dst_reg, SZ_D);
1175 }
1176 }
1177 add_ir(code, inst->src.params.regs.displacement, dst_reg, SZ_D);
1178 } else {
1179 if (inst->src.params.regs.pri != inst->dst.params.regs.pri) {
1180 if (opts->aregs[inst->src.params.regs.pri] >= 0) {
1181 mov_rrdisp(code, opts->aregs[inst->src.params.regs.pri], opts->gen.context_reg, reg_offset(&(inst->dst)), SZ_D);
1182 } else {
1183 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src)), opts->gen.scratch1, SZ_D);
1184 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, reg_offset(&(inst->dst)), SZ_D);
1185 }
1186 }
1187 add_irdisp(code, inst->src.params.regs.displacement, opts->gen.context_reg, reg_offset(&(inst->dst)), SZ_D);
1188 }
1189 break;
1190 case MODE_AREG_INDEX_DISP8:
1191 cycles(&opts->gen, 12);
1192 if (opts->aregs[inst->src.params.regs.pri] >= 0) {
1193 mov_rr(code, opts->aregs[inst->src.params.regs.pri], opts->gen.scratch2, SZ_D);
1194 } else {
1195 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src)), opts->gen.scratch2, SZ_D);
1196 }
1197 sec_reg = (inst->src.params.regs.sec >> 1) & 0x7;
1198 if (inst->src.params.regs.sec & 1) {
1199 if (inst->src.params.regs.sec & 0x10) {
1200 if (opts->aregs[sec_reg] >= 0) {
1201 add_rr(code, opts->aregs[sec_reg], opts->gen.scratch2, SZ_D);
1202 } else {
1203 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_D);
1204 }
1205 } else {
1206 if (opts->dregs[sec_reg] >= 0) {
1207 add_rr(code, opts->dregs[sec_reg], opts->gen.scratch2, SZ_D);
1208 } else {
1209 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_D);
1210 }
1211 }
1212 } else {
1213 if (inst->src.params.regs.sec & 0x10) {
1214 if (opts->aregs[sec_reg] >= 0) {
1215 movsx_rr(code, opts->aregs[sec_reg], opts->gen.scratch1, SZ_W, SZ_D);
1216 } else {
1217 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_W, SZ_D);
1218 }
1219 } else {
1220 if (opts->dregs[sec_reg] >= 0) {
1221 movsx_rr(code, opts->dregs[sec_reg], opts->gen.scratch1, SZ_W, SZ_D);
1222 } else {
1223 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_W, SZ_D);
1224 }
1225 }
1226 add_rr(code, opts->gen.scratch1, opts->gen.scratch2, SZ_D);
1227 }
1228 if (inst->src.params.regs.displacement) {
1229 add_ir(code, inst->src.params.regs.displacement, opts->gen.scratch2, SZ_D);
1230 }
1231 if (dst_reg >= 0) {
1232 mov_rr(code, opts->gen.scratch2, dst_reg, SZ_D);
1233 } else {
1234 mov_rrdisp(code, opts->gen.scratch2, opts->gen.context_reg, reg_offset(&(inst->dst)), SZ_D);
1235 }
1236 break;
1237 case MODE_PC_DISPLACE:
1238 cycles(&opts->gen, 8);
1239 if (dst_reg >= 0) {
1240 mov_ir(code, inst->src.params.regs.displacement + inst->address+2, dst_reg, SZ_D);
1241 } else {
1242 mov_irdisp(code, inst->src.params.regs.displacement + inst->address+2, opts->gen.context_reg, offsetof(m68k_context, aregs) + 4 * inst->dst.params.regs.pri, SZ_D);
1243 }
1244 break;
1245 case MODE_PC_INDEX_DISP8:
1246 cycles(&opts->gen, BUS*3);
1247 mov_ir(code, inst->address+2, opts->gen.scratch1, SZ_D);
1248 sec_reg = (inst->src.params.regs.sec >> 1) & 0x7;
1249 if (inst->src.params.regs.sec & 1) {
1250 if (inst->src.params.regs.sec & 0x10) {
1251 if (opts->aregs[sec_reg] >= 0) {
1252 add_rr(code, opts->aregs[sec_reg], opts->gen.scratch1, SZ_D);
1253 } else {
1254 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
1255 }
1256 } else {
1257 if (opts->dregs[sec_reg] >= 0) {
1258 add_rr(code, opts->dregs[sec_reg], opts->gen.scratch1, SZ_D);
1259 } else {
1260 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
1261 }
1262 }
1263 } else {
1264 if (inst->src.params.regs.sec & 0x10) {
1265 if (opts->aregs[sec_reg] >= 0) {
1266 movsx_rr(code, opts->aregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
1267 } else {
1268 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
1269 }
1270 } else {
1271 if (opts->dregs[sec_reg] >= 0) {
1272 movsx_rr(code, opts->dregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
1273 } else {
1274 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
1275 }
1276 }
1277 add_rr(code, opts->gen.scratch2, opts->gen.scratch1, SZ_D);
1278 }
1279 if (inst->src.params.regs.displacement) {
1280 add_ir(code, inst->src.params.regs.displacement, opts->gen.scratch1, SZ_D);
1281 }
1282 if (dst_reg >= 0) {
1283 mov_rr(code, opts->gen.scratch1, dst_reg, SZ_D);
1284 } else {
1285 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, reg_offset(&(inst->dst)), SZ_D);
1286 }
1287 break;
1288 case MODE_ABSOLUTE:
1289 case MODE_ABSOLUTE_SHORT:
1290 cycles(&opts->gen, (inst->src.addr_mode == MODE_ABSOLUTE) ? BUS * 3 : BUS * 2);
1291 if (dst_reg >= 0) {
1292 mov_ir(code, inst->src.params.immed, dst_reg, SZ_D);
1293 } else {
1294 mov_irdisp(code, inst->src.params.immed, opts->gen.context_reg, reg_offset(&(inst->dst)), SZ_D);
1295 }
1296 break;
1297 default:
1298 m68k_disasm(inst, disasm_buf);
1299 printf("%X: %s\naddress mode %d not implemented (lea src)\n", inst->address, disasm_buf, inst->src.addr_mode);
1300 exit(1);
1301 }
1302 }
1303
1304 void translate_m68k_pea(m68k_options * opts, m68kinst * inst)
1305 {
1306 code_info *code = &opts->gen.code;
1307 uint8_t sec_reg;
1308 switch(inst->src.addr_mode)
1309 {
1310 case MODE_AREG_INDIRECT:
1311 cycles(&opts->gen, BUS);
1312 if (opts->aregs[inst->src.params.regs.pri] >= 0) {
1313 mov_rr(code, opts->aregs[inst->src.params.regs.pri], opts->gen.scratch1, SZ_D);
1314 } else {
1315 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + 4 * inst->src.params.regs.pri, opts->gen.scratch1, SZ_D);
1316 }
1317 break;
1318 case MODE_AREG_DISPLACE:
1319 cycles(&opts->gen, 8);
1320 if (opts->aregs[inst->src.params.regs.pri] >= 0) {
1321 mov_rr(code, opts->aregs[inst->src.params.regs.pri], opts->gen.scratch1, SZ_D);
1322 } else {
1323 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src)), opts->gen.scratch1, SZ_D);
1324 }
1325 add_ir(code, inst->src.params.regs.displacement, opts->gen.scratch1, SZ_D);
1326 break;
1327 case MODE_AREG_INDEX_DISP8:
1328 cycles(&opts->gen, 6);//TODO: Check to make sure this is correct
1329 if (opts->aregs[inst->src.params.regs.pri] >= 0) {
1330 mov_rr(code, opts->aregs[inst->src.params.regs.pri], opts->gen.scratch1, SZ_D);
1331 } else {
1332 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src)), opts->gen.scratch1, SZ_D);
1333 }
1334 sec_reg = (inst->src.params.regs.sec >> 1) & 0x7;
1335 if (inst->src.params.regs.sec & 1) {
1336 if (inst->src.params.regs.sec & 0x10) {
1337 if (opts->aregs[sec_reg] >= 0) {
1338 add_rr(code, opts->aregs[sec_reg], opts->gen.scratch1, SZ_D);
1339 } else {
1340 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
1341 }
1342 } else {
1343 if (opts->dregs[sec_reg] >= 0) {
1344 add_rr(code, opts->dregs[sec_reg], opts->gen.scratch1, SZ_D);
1345 } else {
1346 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
1347 }
1348 }
1349 } else {
1350 if (inst->src.params.regs.sec & 0x10) {
1351 if (opts->aregs[sec_reg] >= 0) {
1352 movsx_rr(code, opts->aregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
1353 } else {
1354 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
1355 }
1356 } else {
1357 if (opts->dregs[sec_reg] >= 0) {
1358 movsx_rr(code, opts->dregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
1359 } else {
1360 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
1361 }
1362 }
1363 add_rr(code, opts->gen.scratch2, opts->gen.scratch1, SZ_D);
1364 }
1365 if (inst->src.params.regs.displacement) {
1366 add_ir(code, inst->src.params.regs.displacement, opts->gen.scratch1, SZ_D);
1367 }
1368 break;
1369 case MODE_PC_DISPLACE:
1370 cycles(&opts->gen, 8);
1371 mov_ir(code, inst->src.params.regs.displacement + inst->address+2, opts->gen.scratch1, SZ_D);
1372 break;
1373 case MODE_ABSOLUTE:
1374 case MODE_ABSOLUTE_SHORT:
1375 cycles(&opts->gen, (inst->src.addr_mode == MODE_ABSOLUTE) ? BUS * 3 : BUS * 2);
1376 mov_ir(code, inst->src.params.immed, opts->gen.scratch1, SZ_D);
1377 break;
1378 default:
1379 m68k_disasm(inst, disasm_buf);
1380 printf("%X: %s\naddress mode %d not implemented (lea src)\n", inst->address, disasm_buf, inst->src.addr_mode);
1381 exit(1);
1382 }
1383 sub_ir(code, 4, opts->aregs[7], SZ_D);
1384 mov_rr(code, opts->aregs[7], opts->gen.scratch2, SZ_D);
1385 call(code, opts->write_32_lowfirst);
1386 }
1387
1388 void translate_m68k_bsr(m68k_options * opts, m68kinst * inst)
1389 {
1390 code_info *code = &opts->gen.code;
1391 int32_t disp = inst->src.params.immed;
1392 uint32_t after = inst->address + (inst->variant == VAR_BYTE ? 2 : 4);
1393 //TODO: Add cycles in the right place relative to pushing the return address on the stack
1394 cycles(&opts->gen, 10);
1395 mov_ir(code, after, opts->gen.scratch1, SZ_D);
1396 sub_ir(code, 4, opts->aregs[7], SZ_D);
1397 mov_rr(code, opts->aregs[7], opts->gen.scratch2, SZ_D);
1398 call(code, opts->write_32_highfirst);
1399 code_ptr dest_addr = get_native_address(opts->gen.native_code_map, (inst->address+2) + disp);
1400 if (!dest_addr) {
1401 opts->gen.deferred = defer_address(opts->gen.deferred, (inst->address+2) + disp, code->cur + 1);
1402 //dummy address to be replaced later
1403 dest_addr = code->cur + 256;
1404 }
1405 jmp(code, dest_addr);
1406 }
1407
1408 uint8_t m68k_eval_cond(m68k_options * opts, uint8_t cc) 902 uint8_t m68k_eval_cond(m68k_options * opts, uint8_t cc)
1409 { 903 {
1410 uint8_t cond = CC_NZ; 904 uint8_t cond = CC_NZ;
1411 switch (cc) 905 switch (cc)
1412 { 906 {
1456 { 950 {
1457 code_info *code = &opts->gen.code; 951 code_info *code = &opts->gen.code;
1458 cycles(&opts->gen, 10);//TODO: Adjust this for branch not taken case 952 cycles(&opts->gen, 10);//TODO: Adjust this for branch not taken case
1459 int32_t disp = inst->src.params.immed; 953 int32_t disp = inst->src.params.immed;
1460 uint32_t after = inst->address + 2; 954 uint32_t after = inst->address + 2;
1461 code_ptr dest_addr = get_native_address(opts->gen.native_code_map, after + disp);
1462 if (inst->extra.cond == COND_TRUE) { 955 if (inst->extra.cond == COND_TRUE) {
1463 if (!dest_addr) { 956 jump_m68k_abs(opts, after + disp);
1464 opts->gen.deferred = defer_address(opts->gen.deferred, after + disp, code->cur + 1); 957 } else {
1465 //dummy address to be replaced later, make sure it generates a 4-byte displacement 958 code_ptr dest_addr = get_native_address(opts->gen.native_code_map, after + disp);
1466 dest_addr = code->cur + 256;
1467 }
1468 jmp(code, dest_addr);
1469 } else {
1470 uint8_t cond = m68k_eval_cond(opts, inst->extra.cond); 959 uint8_t cond = m68k_eval_cond(opts, inst->extra.cond);
1471 if (!dest_addr) { 960 if (!dest_addr) {
1472 opts->gen.deferred = defer_address(opts->gen.deferred, after + disp, code->cur + 2); 961 opts->gen.deferred = defer_address(opts->gen.deferred, after + disp, code->cur + 2);
1473 //dummy address to be replaced later, make sure it generates a 4-byte displacement 962 //dummy address to be replaced later, make sure it generates a 4-byte displacement
1474 dest_addr = code->cur + 256; 963 dest_addr = code->cur + 256;
1516 mov_irdisp(code, 0xFF, dst_op.base, dst_op.disp, SZ_B); 1005 mov_irdisp(code, 0xFF, dst_op.base, dst_op.disp, SZ_B);
1517 } 1006 }
1518 *end_off = code->cur - (end_off+1); 1007 *end_off = code->cur - (end_off+1);
1519 } 1008 }
1520 m68k_save_result(inst, opts); 1009 m68k_save_result(inst, opts);
1521 }
1522
1523 void translate_m68k_jmp_jsr(m68k_options * opts, m68kinst * inst)
1524 {
1525 uint8_t is_jsr = inst->op == M68K_JSR;
1526 code_info *code = &opts->gen.code;
1527 code_ptr dest_addr;
1528 uint8_t sec_reg;
1529 uint32_t after;
1530 uint32_t m68k_addr;
1531 switch(inst->src.addr_mode)
1532 {
1533 case MODE_AREG_INDIRECT:
1534 cycles(&opts->gen, BUS*2);
1535 if (is_jsr) {
1536 mov_ir(code, inst->address + 2, opts->gen.scratch1, SZ_D);
1537 sub_ir(code, 4, opts->aregs[7], SZ_D);
1538 mov_rr(code, opts->aregs[7], opts->gen.scratch2, SZ_D);
1539 call(code, opts->write_32_highfirst);
1540 }
1541 if (opts->aregs[inst->src.params.regs.pri] >= 0) {
1542 mov_rr(code, opts->aregs[inst->src.params.regs.pri], opts->gen.scratch1, SZ_D);
1543 } else {
1544 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + 4 * inst->src.params.regs.pri, opts->gen.scratch1, SZ_D);
1545 }
1546 call(code, opts->native_addr);
1547 jmp_r(code, opts->gen.scratch1);
1548 break;
1549 case MODE_AREG_DISPLACE:
1550 cycles(&opts->gen, BUS*2);
1551 if (is_jsr) {
1552 mov_ir(code, inst->address + 4, opts->gen.scratch1, SZ_D);
1553 sub_ir(code, 4, opts->aregs[7], SZ_D);
1554 mov_rr(code, opts->aregs[7], opts->gen.scratch2, SZ_D);
1555 call(code, opts->write_32_highfirst);
1556 }
1557 if (opts->aregs[inst->src.params.regs.pri] >= 0) {
1558 mov_rr(code, opts->aregs[inst->src.params.regs.pri], opts->gen.scratch1, SZ_D);
1559 } else {
1560 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + 4 * inst->src.params.regs.pri, opts->gen.scratch1, SZ_D);
1561 }
1562 add_ir(code, inst->src.params.regs.displacement, opts->gen.scratch1, SZ_D);
1563 call(code, opts->native_addr);
1564 jmp_r(code, opts->gen.scratch1);
1565 break;
1566 case MODE_AREG_INDEX_DISP8:
1567 cycles(&opts->gen, BUS*3);//TODO: CHeck that this is correct
1568 if (is_jsr) {
1569 mov_ir(code, inst->address + 4, opts->gen.scratch1, SZ_D);
1570 sub_ir(code, 4, opts->aregs[7], SZ_D);
1571 mov_rr(code, opts->aregs[7], opts->gen.scratch2, SZ_D);
1572 call(code, opts->write_32_highfirst);
1573 }
1574 if (opts->aregs[inst->src.params.regs.pri] >= 0) {
1575 mov_rr(code, opts->aregs[inst->src.params.regs.pri], opts->gen.scratch1, SZ_D);
1576 } else {
1577 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src)), opts->gen.scratch1, SZ_D);
1578 }
1579 sec_reg = (inst->src.params.regs.sec >> 1) & 0x7;
1580 if (inst->src.params.regs.sec & 1) {
1581 //32-bit index register
1582 if (inst->src.params.regs.sec & 0x10) {
1583 if (opts->aregs[sec_reg] >= 0) {
1584 add_rr(code, opts->aregs[sec_reg], opts->gen.scratch1, SZ_D);
1585 } else {
1586 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
1587 }
1588 } else {
1589 if (opts->dregs[sec_reg] >= 0) {
1590 add_rr(code, opts->dregs[sec_reg], opts->gen.scratch1, SZ_D);
1591 } else {
1592 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
1593 }
1594 }
1595 } else {
1596 //16-bit index register
1597 if (inst->src.params.regs.sec & 0x10) {
1598 if (opts->aregs[sec_reg] >= 0) {
1599 movsx_rr(code, opts->aregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
1600 } else {
1601 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
1602 }
1603 } else {
1604 if (opts->dregs[sec_reg] >= 0) {
1605 movsx_rr(code, opts->dregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
1606 } else {
1607 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
1608 }
1609 }
1610 add_rr(code, opts->gen.scratch2, opts->gen.scratch1, SZ_D);
1611 }
1612 if (inst->src.params.regs.displacement) {
1613 add_ir(code, inst->src.params.regs.displacement, opts->gen.scratch1, SZ_D);
1614 }
1615 call(code, opts->native_addr);
1616 jmp_r(code, opts->gen.scratch1);
1617 break;
1618 case MODE_PC_DISPLACE:
1619 //TODO: Add cycles in the right place relative to pushing the return address on the stack
1620 cycles(&opts->gen, 10);
1621 if (is_jsr) {
1622 mov_ir(code, inst->address + 4, opts->gen.scratch1, SZ_D);
1623 sub_ir(code, 4, opts->aregs[7], SZ_D);
1624 mov_rr(code, opts->aregs[7], opts->gen.scratch2, SZ_D);
1625 call(code, opts->write_32_highfirst);
1626 }
1627 m68k_addr = inst->src.params.regs.displacement + inst->address + 2;
1628 if ((m68k_addr & 0xFFFFFF) < 0x400000) {
1629 dest_addr = get_native_address(opts->gen.native_code_map, m68k_addr);
1630 if (!dest_addr) {
1631 opts->gen.deferred = defer_address(opts->gen.deferred, m68k_addr, code->cur + 1);
1632 //dummy address to be replaced later, make sure it generates a 4-byte displacement
1633 dest_addr = code->cur + 256;
1634 }
1635 jmp(code, dest_addr);
1636 } else {
1637 mov_ir(code, m68k_addr, opts->gen.scratch1, SZ_D);
1638 call(code, opts->native_addr);
1639 jmp_r(code, opts->gen.scratch1);
1640 }
1641 break;
1642 case MODE_PC_INDEX_DISP8:
1643 cycles(&opts->gen, BUS*3);//TODO: CHeck that this is correct
1644 if (is_jsr) {
1645 mov_ir(code, inst->address + 4, opts->gen.scratch1, SZ_D);
1646 sub_ir(code, 4, opts->aregs[7], SZ_D);
1647 mov_rr(code, opts->aregs[7], opts->gen.scratch2, SZ_D);
1648 call(code, opts->write_32_highfirst);
1649 }
1650 mov_ir(code, inst->address+2, opts->gen.scratch1, SZ_D);
1651 sec_reg = (inst->src.params.regs.sec >> 1) & 0x7;
1652 if (inst->src.params.regs.sec & 1) {
1653 if (inst->src.params.regs.sec & 0x10) {
1654 if (opts->aregs[sec_reg] >= 0) {
1655 add_rr(code, opts->aregs[sec_reg], opts->gen.scratch1, SZ_D);
1656 } else {
1657 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
1658 }
1659 } else {
1660 if (opts->dregs[sec_reg] >= 0) {
1661 add_rr(code, opts->dregs[sec_reg], opts->gen.scratch1, SZ_D);
1662 } else {
1663 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
1664 }
1665 }
1666 } else {
1667 if (inst->src.params.regs.sec & 0x10) {
1668 if (opts->aregs[sec_reg] >= 0) {
1669 movsx_rr(code, opts->aregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
1670 } else {
1671 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
1672 }
1673 } else {
1674 if (opts->dregs[sec_reg] >= 0) {
1675 movsx_rr(code, opts->dregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
1676 } else {
1677 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
1678 }
1679 }
1680 add_rr(code, opts->gen.scratch2, opts->gen.scratch1, SZ_D);
1681 }
1682 if (inst->src.params.regs.displacement) {
1683 add_ir(code, inst->src.params.regs.displacement, opts->gen.scratch1, SZ_D);
1684 }
1685 call(code, opts->native_addr);
1686 jmp_r(code, opts->gen.scratch1);
1687 break;
1688 case MODE_ABSOLUTE:
1689 case MODE_ABSOLUTE_SHORT:
1690 //TODO: Add cycles in the right place relative to pushing the return address on the stack
1691 cycles(&opts->gen, inst->src.addr_mode == MODE_ABSOLUTE ? 12 : 10);
1692 if (is_jsr) {
1693 mov_ir(code, inst->address + (inst->src.addr_mode == MODE_ABSOLUTE ? 6 : 4), opts->gen.scratch1, SZ_D);
1694 sub_ir(code, 4, opts->aregs[7], SZ_D);
1695 mov_rr(code, opts->aregs[7], opts->gen.scratch2, SZ_D);
1696 call(code, opts->write_32_highfirst);
1697 }
1698 m68k_addr = inst->src.params.immed;
1699 if ((m68k_addr & 0xFFFFFF) < 0x400000) {
1700 dest_addr = get_native_address(opts->gen.native_code_map, m68k_addr);
1701 if (!dest_addr) {
1702 opts->gen.deferred = defer_address(opts->gen.deferred, m68k_addr, code->cur + 1);
1703 //dummy address to be replaced later, make sure it generates a 4-byte displacement
1704 dest_addr = code->cur + 256;
1705 }
1706 jmp(code, dest_addr);
1707 } else {
1708 mov_ir(code, m68k_addr, opts->gen.scratch1, SZ_D);
1709 call(code, opts->native_addr);
1710 jmp_r(code, opts->gen.scratch1);
1711 }
1712 break;
1713 default:
1714 m68k_disasm(inst, disasm_buf);
1715 printf("%s\naddress mode %d not yet supported (%s)\n", disasm_buf, inst->src.addr_mode, is_jsr ? "jsr" : "jmp");
1716 exit(1);
1717 }
1718 }
1719
1720 void translate_m68k_rts(m68k_options * opts, m68kinst * inst)
1721 {
1722 code_info *code = &opts->gen.code;
1723 //TODO: Add cycles
1724 mov_rr(code, opts->aregs[7], opts->gen.scratch1, SZ_D);
1725 add_ir(code, 4, opts->aregs[7], SZ_D);
1726 call(code, opts->read_32);
1727 call(code, opts->native_addr);
1728 jmp_r(code, opts->gen.scratch1);
1729 } 1010 }
1730 1011
1731 void translate_m68k_dbcc(m68k_options * opts, m68kinst * inst) 1012 void translate_m68k_dbcc(m68k_options * opts, m68kinst * inst)
1732 { 1013 {
1733 code_info *code = &opts->gen.code; 1014 code_info *code = &opts->gen.code;
1750 cmp_irdisp(code, -1, opts->gen.context_reg, offsetof(m68k_context, dregs) + 4 * inst->dst.params.regs.pri, SZ_W); 1031 cmp_irdisp(code, -1, opts->gen.context_reg, offsetof(m68k_context, dregs) + 4 * inst->dst.params.regs.pri, SZ_W);
1751 } 1032 }
1752 code_ptr loop_end_loc = code->cur + 1; 1033 code_ptr loop_end_loc = code->cur + 1;
1753 jcc(code, CC_Z, code->cur + 2); 1034 jcc(code, CC_Z, code->cur + 2);
1754 uint32_t after = inst->address + 2; 1035 uint32_t after = inst->address + 2;
1755 code_ptr dest_addr = get_native_address(opts->gen.native_code_map, after + inst->src.params.immed); 1036 jump_m68k_abs(opts, after + inst->src.params.immed);
1756 if (!dest_addr) {
1757 opts->gen.deferred = defer_address(opts->gen.deferred, after + inst->src.params.immed, code->cur + 1);
1758 //dummy address to be replaced later, make sure it generates a 4-byte displacement
1759 dest_addr = code->cur + 256;
1760 }
1761 jmp(code, dest_addr);
1762 *loop_end_loc = code->cur - (loop_end_loc+1); 1037 *loop_end_loc = code->cur - (loop_end_loc+1);
1763 if (skip_loc) { 1038 if (skip_loc) {
1764 cycles(&opts->gen, 2); 1039 cycles(&opts->gen, 2);
1765 *skip_loc = code->cur - (skip_loc+1); 1040 *skip_loc = code->cur - (skip_loc+1);
1766 cycles(&opts->gen, 2); 1041 cycles(&opts->gen, 2);
1767 } else { 1042 } else {
1768 cycles(&opts->gen, 4); 1043 cycles(&opts->gen, 4);
1769 } 1044 }
1770 } 1045 }
1771 1046
1772 void translate_m68k_link(m68k_options * opts, m68kinst * inst)
1773 {
1774 code_info *code = &opts->gen.code;
1775 int8_t reg = native_reg(&(inst->src), opts);
1776 //compensate for displacement word
1777 cycles(&opts->gen, BUS);
1778 sub_ir(code, 4, opts->aregs[7], SZ_D);
1779 mov_rr(code, opts->aregs[7], opts->gen.scratch2, SZ_D);
1780 if (reg >= 0) {
1781 mov_rr(code, reg, opts->gen.scratch1, SZ_D);
1782 } else {
1783 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src)), opts->gen.scratch1, SZ_D);
1784 }
1785 call(code, opts->write_32_highfirst);
1786 if (reg >= 0) {
1787 mov_rr(code, opts->aregs[7], reg, SZ_D);
1788 } else {
1789 mov_rrdisp(code, opts->aregs[7], opts->gen.context_reg, reg_offset(&(inst->src)), SZ_D);
1790 }
1791 add_ir(code, inst->dst.params.immed, opts->aregs[7], SZ_D);
1792 //prefetch
1793 cycles(&opts->gen, BUS);
1794 }
1795
1796 void translate_m68k_movep(m68k_options * opts, m68kinst * inst) 1047 void translate_m68k_movep(m68k_options * opts, m68kinst * inst)
1797 { 1048 {
1798 code_info *code = &opts->gen.code; 1049 code_info *code = &opts->gen.code;
1799 int8_t reg; 1050 int8_t reg;
1800 cycles(&opts->gen, BUS*2); 1051 cycles(&opts->gen, BUS*2);
1801 if (inst->src.addr_mode == MODE_REG) { 1052 if (inst->src.addr_mode == MODE_REG) {
1802 if (opts->aregs[inst->dst.params.regs.pri] >= 0) { 1053 calc_areg_displace(opts, &inst->dst, opts->gen.scratch2);
1803 mov_rr(code, opts->aregs[inst->dst.params.regs.pri], opts->gen.scratch2, SZ_D);
1804 } else {
1805 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->dst)), opts->gen.scratch2, SZ_D);
1806 }
1807 if (inst->dst.params.regs.displacement) {
1808 add_ir(code, inst->dst.params.regs.displacement, opts->gen.scratch2, SZ_D);
1809 }
1810 reg = native_reg(&(inst->src), opts); 1054 reg = native_reg(&(inst->src), opts);
1811 if (inst->extra.size == OPSIZE_LONG) { 1055 if (inst->extra.size == OPSIZE_LONG) {
1812 if (reg >= 0) { 1056 if (reg >= 0) {
1813 mov_rr(code, reg, opts->gen.scratch1, SZ_D); 1057 mov_rr(code, reg, opts->gen.scratch1, SZ_D);
1814 shr_ir(code, 24, opts->gen.scratch1, SZ_D); 1058 shr_ir(code, 24, opts->gen.scratch1, SZ_D);
1846 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src)), opts->gen.scratch1, SZ_B); 1090 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src)), opts->gen.scratch1, SZ_B);
1847 } 1091 }
1848 add_ir(code, 2, opts->gen.scratch2, SZ_D); 1092 add_ir(code, 2, opts->gen.scratch2, SZ_D);
1849 call(code, opts->write_8); 1093 call(code, opts->write_8);
1850 } else { 1094 } else {
1851 if (opts->aregs[inst->src.params.regs.pri] >= 0) { 1095 calc_areg_displace(opts, &inst->src, opts->gen.scratch1);
1852 mov_rr(code, opts->aregs[inst->src.params.regs.pri], opts->gen.scratch1, SZ_D);
1853 } else {
1854 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src)), opts->gen.scratch1, SZ_D);
1855 }
1856 if (inst->src.params.regs.displacement) {
1857 add_ir(code, inst->src.params.regs.displacement, opts->gen.scratch1, SZ_D);
1858 }
1859 reg = native_reg(&(inst->dst), opts); 1096 reg = native_reg(&(inst->dst), opts);
1860 if (inst->extra.size == OPSIZE_LONG) { 1097 if (inst->extra.size == OPSIZE_LONG) {
1861 if (reg >= 0) { 1098 if (reg >= 0) {
1862 push_r(code, opts->gen.scratch1); 1099 push_r(code, opts->gen.scratch1);
1863 call(code, opts->read_8); 1100 call(code, opts->read_8);
2142 return translate_m68k_clr(opts, inst); 1379 return translate_m68k_clr(opts, inst);
2143 } else if(inst->op == M68K_MOVEM) { 1380 } else if(inst->op == M68K_MOVEM) {
2144 return translate_m68k_movem(opts, inst); 1381 return translate_m68k_movem(opts, inst);
2145 } else if(inst->op == M68K_LINK) { 1382 } else if(inst->op == M68K_LINK) {
2146 return translate_m68k_link(opts, inst); 1383 return translate_m68k_link(opts, inst);
1384 } else if(inst->op == M68K_UNLK) {
1385 return translate_m68k_unlk(opts, inst);
2147 } else if(inst->op == M68K_EXT) { 1386 } else if(inst->op == M68K_EXT) {
2148 return translate_m68k_ext(opts, inst); 1387 return translate_m68k_ext(opts, inst);
2149 } else if(inst->op == M68K_SCC) { 1388 } else if(inst->op == M68K_SCC) {
2150 return translate_m68k_scc(opts, inst); 1389 return translate_m68k_scc(opts, inst);
2151 } else if(inst->op == M68K_MOVEP) { 1390 } else if(inst->op == M68K_MOVEP) {
2721 case M68K_MOVE_USP: 1960 case M68K_MOVE_USP:
2722 cycles(&opts->gen, BUS); 1961 cycles(&opts->gen, BUS);
2723 //TODO: Trap if not in supervisor mode 1962 //TODO: Trap if not in supervisor mode
2724 //bt_irdisp(code, BIT_SUPERVISOR, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B); 1963 //bt_irdisp(code, BIT_SUPERVISOR, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
2725 if (inst->src.addr_mode == MODE_UNUSED) { 1964 if (inst->src.addr_mode == MODE_UNUSED) {
2726 if (dst_op.mode == MODE_REG_DIRECT) { 1965 areg_to_native(opts, 8, dst_op.mode == MODE_REG_DIRECT ? dst_op.base : opts->gen.scratch1);
2727 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, dst_op.base, SZ_D); 1966 if (dst_op.mode != MODE_REG_DIRECT) {
2728 } else {
2729 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, opts->gen.scratch1, SZ_D);
2730 mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, SZ_D); 1967 mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, SZ_D);
2731 } 1968 }
2732 } else { 1969 } else {
2733 if (src_op.mode == MODE_REG_DIRECT) { 1970 if (src_op.mode != MODE_REG_DIRECT) {
2734 mov_rrdisp(code, src_op.base, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, SZ_D);
2735 } else {
2736 mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, SZ_D); 1971 mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, SZ_D);
2737 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, SZ_D); 1972 src_op.base = opts->gen.scratch1;
2738 } 1973 }
1974 native_to_areg(opts, src_op.base, 8);
2739 } 1975 }
2740 break; 1976 break;
2741 //case M68K_MOVEP: 1977 //case M68K_MOVEP:
2742 case M68K_MULS: 1978 case M68K_MULS:
2743 case M68K_MULU: 1979 case M68K_MULU:
3131 } 2367 }
3132 break; 2368 break;
3133 case M68K_RTE: 2369 case M68K_RTE:
3134 //TODO: Trap if not in system mode 2370 //TODO: Trap if not in system mode
3135 //Read saved SR 2371 //Read saved SR
3136 mov_rr(code, opts->aregs[7], opts->gen.scratch1, SZ_D); 2372 areg_to_native(opts, 7, opts->gen.scratch1);
3137 call(code, opts->read_16); 2373 call(code, opts->read_16);
3138 add_ir(code, 2, opts->aregs[7], SZ_D); 2374 addi_areg(opts, 2, 7);
3139 call(code, opts->set_sr); 2375 call(code, opts->set_sr);
3140 //Read saved PC 2376 //Read saved PC
3141 mov_rr(code, opts->aregs[7], opts->gen.scratch1, SZ_D); 2377 areg_to_native(opts, 7, opts->gen.scratch1);
3142 call(code, opts->read_32); 2378 call(code, opts->read_32);
3143 add_ir(code, 4, opts->aregs[7], SZ_D); 2379 addi_areg(opts, 4, 7);
3144 //Check if we've switched to user mode and swap stack pointers if needed 2380 //Check if we've switched to user mode and swap stack pointers if needed
3145 bt_irdisp(code, 5, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B); 2381 bt_irdisp(code, 5, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
3146 end_off = code->cur + 1; 2382 end_off = code->cur + 1;
3147 jcc(code, CC_C, code->cur + 2); 2383 jcc(code, CC_C, code->cur + 2);
3148 mov_rr(code, opts->aregs[7], opts->gen.scratch2, SZ_D); 2384 swap_ssp_usp(opts);
3149 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, opts->aregs[7], SZ_D);
3150 mov_rrdisp(code, opts->gen.scratch2, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, SZ_D);
3151 *end_off = code->cur - (end_off+1); 2385 *end_off = code->cur - (end_off+1);
3152 //Get native address, sync components, recalculate integer points and jump to returned address 2386 //Get native address, sync components, recalculate integer points and jump to returned address
3153 call(code, opts->native_addr_and_sync); 2387 call(code, opts->native_addr_and_sync);
3154 jmp_r(code, opts->gen.scratch1); 2388 jmp_r(code, opts->gen.scratch1);
3155 break; 2389 break;
3156 case M68K_RTR: 2390 case M68K_RTR:
3157 //Read saved CCR 2391 translate_m68k_rtr(opts, inst);
3158 mov_rr(code, opts->aregs[7], opts->gen.scratch1, SZ_D);
3159 call(code, opts->read_16);
3160 add_ir(code, 2, opts->aregs[7], SZ_D);
3161 call(code, opts->set_ccr);
3162 //Read saved PC
3163 mov_rr(code, opts->aregs[7], opts->gen.scratch1, SZ_D);
3164 call(code, opts->read_32);
3165 add_ir(code, 4, opts->aregs[7], SZ_D);
3166 //Get native address and jump to it
3167 call(code, opts->native_addr);
3168 jmp_r(code, opts->gen.scratch1);
3169 break; 2392 break;
3170 case M68K_SBCD: { 2393 case M68K_SBCD: {
3171 if (src_op.base != opts->gen.scratch2) { 2394 if (src_op.base != opts->gen.scratch2) {
3172 if (src_op.mode == MODE_REG_DIRECT) { 2395 if (src_op.mode == MODE_REG_DIRECT) {
3173 mov_rr(code, src_op.base, opts->gen.scratch2, SZ_B); 2396 mov_rr(code, src_op.base, opts->gen.scratch2, SZ_B);
3214 set_flag(opts, (src_op.disp >> 3) & 0x1, FLAG_N); 2437 set_flag(opts, (src_op.disp >> 3) & 0x1, FLAG_N);
3215 set_flag(opts, (src_op.disp >> 4) & 0x1, FLAG_X); 2438 set_flag(opts, (src_op.disp >> 4) & 0x1, FLAG_X);
3216 mov_irdisp(code, (src_op.disp >> 8), opts->gen.context_reg, offsetof(m68k_context, status), SZ_B); 2439 mov_irdisp(code, (src_op.disp >> 8), opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
3217 if (!((inst->src.params.immed >> 8) & (1 << BIT_SUPERVISOR))) { 2440 if (!((inst->src.params.immed >> 8) & (1 << BIT_SUPERVISOR))) {
3218 //leave supervisor mode 2441 //leave supervisor mode
3219 mov_rr(code, opts->aregs[7], opts->gen.scratch1, SZ_D); 2442 swap_ssp_usp(opts);
3220 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, opts->aregs[7], SZ_D);
3221 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, SZ_D);
3222 } 2443 }
3223 code_ptr loop_top = code->cur; 2444 code_ptr loop_top = code->cur;
3224 call(code, opts->do_sync); 2445 call(code, opts->do_sync);
3225 cmp_rr(code, opts->gen.limit, opts->gen.cycles, SZ_D); 2446 cmp_rr(code, opts->gen.limit, opts->gen.cycles, SZ_D);
3226 code_ptr normal_cycle_up = code->cur + 1; 2447 code_ptr normal_cycle_up = code->cur + 1;
3315 set_flag_cond(opts, CC_S, FLAG_N); 2536 set_flag_cond(opts, CC_S, FLAG_N);
3316 set_flag(opts, 0, FLAG_V); 2537 set_flag(opts, 0, FLAG_V);
3317 break; 2538 break;
3318 //case M68K_TAS: 2539 //case M68K_TAS:
3319 case M68K_TRAP: 2540 case M68K_TRAP:
3320 mov_ir(code, src_op.disp + VECTOR_TRAP_0, opts->gen.scratch2, SZ_D); 2541 translate_m68k_trap(opts, inst);
3321 mov_ir(code, inst->address+2, opts->gen.scratch1, SZ_D);
3322 jmp(code, opts->trap);
3323 break; 2542 break;
3324 //case M68K_TRAPV: 2543 //case M68K_TRAPV:
3325 case M68K_TST: 2544 case M68K_TST:
3326 cycles(&opts->gen, BUS); 2545 cycles(&opts->gen, BUS);
3327 if (src_op.mode == MODE_REG_DIRECT) { 2546 if (src_op.mode == MODE_REG_DIRECT) {
3331 } 2550 }
3332 set_flag(opts, 0, FLAG_C); 2551 set_flag(opts, 0, FLAG_C);
3333 set_flag_cond(opts, CC_Z, FLAG_Z); 2552 set_flag_cond(opts, CC_Z, FLAG_Z);
3334 set_flag_cond(opts, CC_S, FLAG_N); 2553 set_flag_cond(opts, CC_S, FLAG_N);
3335 set_flag(opts, 0, FLAG_V); 2554 set_flag(opts, 0, FLAG_V);
3336 break;
3337 case M68K_UNLK:
3338 cycles(&opts->gen, BUS);
3339 if (dst_op.mode == MODE_REG_DIRECT) {
3340 mov_rr(code, dst_op.base, opts->aregs[7], SZ_D);
3341 } else {
3342 mov_rdispr(code, dst_op.base, dst_op.disp, opts->aregs[7], SZ_D);
3343 }
3344 mov_rr(code, opts->aregs[7], opts->gen.scratch1, SZ_D);
3345 call(code, opts->read_32);
3346 if (dst_op.mode == MODE_REG_DIRECT) {
3347 mov_rr(code, opts->gen.scratch1, dst_op.base, SZ_D);
3348 } else {
3349 mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, SZ_D);
3350 }
3351 add_ir(code, 4, opts->aregs[7], SZ_D);
3352 break; 2555 break;
3353 default: 2556 default:
3354 m68k_disasm(inst, disasm_buf); 2557 m68k_disasm(inst, disasm_buf);
3355 printf("%X: %s\ninstruction %d not yet implemented\n", inst->address, disasm_buf, inst->op); 2558 printf("%X: %s\ninstruction %d not yet implemented\n", inst->address, disasm_buf, inst->op);
3356 exit(1); 2559 exit(1);
4139 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, sync_cycle), LIMIT, SZ_D); 3342 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, sync_cycle), LIMIT, SZ_D);
4140 //swap USP and SSP if not already in supervisor mode 3343 //swap USP and SSP if not already in supervisor mode
4141 bt_irdisp(code, 5, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B); 3344 bt_irdisp(code, 5, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
4142 code_ptr already_supervisor = code->cur + 1; 3345 code_ptr already_supervisor = code->cur + 1;
4143 jcc(code, CC_C, code->cur + 2); 3346 jcc(code, CC_C, code->cur + 2);
4144 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, opts->gen.scratch2, SZ_D); 3347 swap_ssp_usp(opts);
4145 mov_rrdisp(code, opts->aregs[7], opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, SZ_D);
4146 mov_rr(code, opts->gen.scratch2, opts->aregs[7], SZ_D);
4147 *already_supervisor = code->cur - (already_supervisor+1); 3348 *already_supervisor = code->cur - (already_supervisor+1);
4148 //save PC 3349 //save PC
4149 sub_ir(code, 4, opts->aregs[7], SZ_D); 3350 subi_areg(opts, 4, 7);
4150 mov_rr(code, opts->aregs[7], opts->gen.scratch2, SZ_D); 3351 areg_to_native(opts, 7, opts->gen.scratch2);
4151 call(code, opts->write_32_lowfirst); 3352 call(code, opts->write_32_lowfirst);
4152 //save status register 3353 //save status register
4153 sub_ir(code, 2, opts->aregs[7], SZ_D); 3354 subi_areg(opts, 2, 7);
4154 call(code, opts->get_sr); 3355 call(code, opts->get_sr);
4155 mov_rr(code, opts->aregs[7], opts->gen.scratch2, SZ_D); 3356 areg_to_native(opts, 7, opts->gen.scratch2);
4156 call(code, opts->write_16); 3357 call(code, opts->write_16);
4157 //update status register 3358 //update status register
4158 and_irdisp(code, 0xF8, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B); 3359 and_irdisp(code, 0xF8, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
4159 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, int_num), opts->gen.scratch1, SZ_B); 3360 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, int_num), opts->gen.scratch1, SZ_B);
4160 or_ir(code, 0x20, opts->gen.scratch1, SZ_B); 3361 or_ir(code, 0x20, opts->gen.scratch1, SZ_B);
4175 push_r(code, opts->gen.scratch2); 3376 push_r(code, opts->gen.scratch2);
4176 //swap USP and SSP if not already in supervisor mode 3377 //swap USP and SSP if not already in supervisor mode
4177 bt_irdisp(code, 5, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B); 3378 bt_irdisp(code, 5, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
4178 already_supervisor = code->cur + 1; 3379 already_supervisor = code->cur + 1;
4179 jcc(code, CC_C, code->cur + 2); 3380 jcc(code, CC_C, code->cur + 2);
4180 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, opts->gen.scratch2, SZ_D); 3381 swap_ssp_usp(opts);
4181 mov_rrdisp(code, opts->aregs[7], opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, SZ_D);
4182 mov_rr(code, opts->gen.scratch2, opts->aregs[7], SZ_D);
4183 *already_supervisor = code->cur - (already_supervisor+1); 3382 *already_supervisor = code->cur - (already_supervisor+1);
4184 //save PC 3383 //save PC
4185 sub_ir(code, 4, opts->aregs[7], SZ_D); 3384 subi_areg(opts, 4, 7);
4186 mov_rr(code, opts->aregs[7], opts->gen.scratch2, SZ_D); 3385 areg_to_native(opts, 7, opts->gen.scratch2);
4187 call(code, opts->write_32_lowfirst); 3386 call(code, opts->write_32_lowfirst);
4188 //save status register 3387 //save status register
4189 sub_ir(code, 2, opts->aregs[7], SZ_D); 3388 subi_areg(opts, 2, 7);
4190 call(code, opts->get_sr); 3389 call(code, opts->get_sr);
4191 mov_rr(code, opts->aregs[7], opts->gen.scratch2, SZ_D); 3390 areg_to_native(opts, 7, opts->gen.scratch2);
4192 call(code, opts->write_16); 3391 call(code, opts->write_16);
4193 //set supervisor bit 3392 //set supervisor bit
4194 or_irdisp(code, 0x20, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B); 3393 or_irdisp(code, 0x20, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
4195 //calculate vector address 3394 //calculate vector address
4196 pop_r(code, opts->gen.scratch1); 3395 pop_r(code, opts->gen.scratch1);
4198 call(code, opts->read_32); 3397 call(code, opts->read_32);
4199 call(code, opts->native_addr_and_sync); 3398 call(code, opts->native_addr_and_sync);
4200 cycles(&opts->gen, 18); 3399 cycles(&opts->gen, 18);
4201 jmp_r(code, opts->gen.scratch1); 3400 jmp_r(code, opts->gen.scratch1);
4202 } 3401 }
4203
4204 void init_68k_context(m68k_context * context, native_map_slot * native_code_map, void * opts)
4205 {
4206 memset(context, 0, sizeof(m68k_context));
4207 context->native_code_map = native_code_map;
4208 context->options = opts;
4209 context->int_cycle = 0xFFFFFFFF;
4210 context->status = 0x27;
4211 }
4212