comparison m68k_core_x86.c @ 570:76bba9ffe351

Initial stab at separating the generic parts of the 68K core from the host-cpu specific parts.
author Michael Pavone <pavone@retrodev.com>
date Sun, 02 Mar 2014 16:34:29 -0800
parents m68k_to_x86.c@9b7fcf748be0
children c90fc522e7e3
comparison
equal deleted inserted replaced
569:9b7fcf748be0 570:76bba9ffe351
1 /*
2 Copyright 2013 Michael Pavone
3 This file is part of BlastEm.
4 BlastEm is free software distributed under the terms of the GNU General Public License version 3 or greater. See COPYING for full license text.
5 */
6 #include "gen_x86.h"
7 #include "m68k_core.h"
8 #include "m68k_internal.h"
9 #include "68kinst.h"
10 #include "mem.h"
11 #include "backend.h"
12 #include <stdio.h>
13 #include <stddef.h>
14 #include <stdlib.h>
15 #include <string.h>
16
17 #define BUS 4
18 #define PREDEC_PENALTY 2
19
20 #define CYCLES RAX
21 #define LIMIT RBP
22 #define CONTEXT RSI
23 #define SCRATCH1 RCX
24
25 #ifdef X86_64
26 #define SCRATCH2 RDI
27 #else
28 #define SCRATCH2 RBX
29 #endif
30
31 enum {
32 FLAG_X,
33 FLAG_N,
34 FLAG_Z,
35 FLAG_V,
36 FLAG_C
37 };
38
39 char disasm_buf[1024];
40
41 m68k_context * sync_components(m68k_context * context, uint32_t address);
42
43 void m68k_invalid();
44 void bcd_add();
45 void bcd_sub();
46
47
48 void set_flag(m68k_options * opts, uint8_t val, uint8_t flag)
49 {
50 if (opts->flag_regs[flag] >= 0) {
51 mov_ir(&opts->gen.code, val, opts->flag_regs[flag], SZ_B);
52 } else {
53 int8_t offset = offsetof(m68k_context, flags) + flag;
54 if (offset) {
55 mov_irdisp(&opts->gen.code, val, opts->gen.context_reg, offset, SZ_B);
56 } else {
57 mov_irind(&opts->gen.code, val, opts->gen.context_reg, SZ_B);
58 }
59 }
60 }
61
62 void set_flag_cond(m68k_options *opts, uint8_t cond, uint8_t flag)
63 {
64 if (opts->flag_regs[flag] >= 0) {
65 setcc_r(&opts->gen.code, cond, opts->flag_regs[flag]);
66 } else {
67 int8_t offset = offsetof(m68k_context, flags) + flag;
68 if (offset) {
69 setcc_rdisp(&opts->gen.code, cond, opts->gen.context_reg, offset);
70 } else {
71 setcc_rind(&opts->gen.code, cond, opts->gen.context_reg);
72 }
73 }
74 }
75
76 void check_flag(m68k_options *opts, uint8_t flag)
77 {
78 if (opts->flag_regs[flag] >= 0) {
79 cmp_ir(&opts->gen.code, 0, opts->flag_regs[flag], SZ_B);
80 } else {
81 cmp_irdisp(&opts->gen.code, 0, opts->gen.context_reg, offsetof(m68k_context, flags) + flag, SZ_B);
82 }
83 }
84
85 void flag_to_reg(m68k_options *opts, uint8_t flag, uint8_t reg)
86 {
87 if (opts->flag_regs[flag] >= 0) {
88 mov_rr(&opts->gen.code, opts->flag_regs[flag], reg, SZ_B);
89 } else {
90 int8_t offset = offsetof(m68k_context, flags) + flag;
91 if (offset) {
92 mov_rdispr(&opts->gen.code, opts->gen.context_reg, offset, reg, SZ_B);
93 } else {
94 mov_rindr(&opts->gen.code, opts->gen.context_reg, reg, SZ_B);
95 }
96 }
97 }
98
99 void reg_to_flag(m68k_options *opts, uint8_t reg, uint8_t flag)
100 {
101 if (opts->flag_regs[flag] >= 0) {
102 mov_rr(&opts->gen.code, reg, opts->flag_regs[flag], SZ_B);
103 } else {
104 int8_t offset = offsetof(m68k_context, flags) + flag;
105 if (offset) {
106 mov_rrdisp(&opts->gen.code, reg, opts->gen.context_reg, offset, SZ_B);
107 } else {
108 mov_rrind(&opts->gen.code, reg, opts->gen.context_reg, SZ_B);
109 }
110 }
111 }
112
113 void flag_to_flag(m68k_options *opts, uint8_t flag1, uint8_t flag2)
114 {
115 code_info *code = &opts->gen.code;
116 if (opts->flag_regs[flag1] >= 0 && opts->flag_regs[flag2] >= 0) {
117 mov_rr(code, opts->flag_regs[flag1], opts->flag_regs[flag2], SZ_B);
118 } else if(opts->flag_regs[flag1] >= 0) {
119 mov_rrdisp(code, opts->flag_regs[flag1], opts->gen.context_reg, offsetof(m68k_context, flags) + flag2, SZ_B);
120 } else if (opts->flag_regs[flag2] >= 0) {
121 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, flags) + flag1, opts->flag_regs[flag2], SZ_B);
122 } else {
123 push_r(code, opts->gen.scratch1);
124 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, flags) + flag1, opts->gen.scratch1, SZ_B);
125 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(m68k_context, flags) + flag2, SZ_B);
126 pop_r(code, opts->gen.scratch1);
127 }
128 }
129
130 void flag_to_carry(m68k_options * opts, uint8_t flag)
131 {
132 if (opts->flag_regs[flag] >= 0) {
133 bt_ir(&opts->gen.code, 0, opts->flag_regs[flag], SZ_B);
134 } else {
135 bt_irdisp(&opts->gen.code, 0, opts->gen.context_reg, offsetof(m68k_context, flags) + flag, SZ_B);
136 }
137 }
138
139 void or_flag_to_reg(m68k_options *opts, uint8_t flag, uint8_t reg)
140 {
141 if (opts->flag_regs[flag] >= 0) {
142 or_rr(&opts->gen.code, opts->flag_regs[flag], reg, SZ_B);
143 } else {
144 or_rdispr(&opts->gen.code, opts->gen.context_reg, offsetof(m68k_context, flags) + flag, reg, SZ_B);
145 }
146 }
147
148 void xor_flag_to_reg(m68k_options *opts, uint8_t flag, uint8_t reg)
149 {
150 if (opts->flag_regs[flag] >= 0) {
151 xor_rr(&opts->gen.code, opts->flag_regs[flag], reg, SZ_B);
152 } else {
153 xor_rdispr(&opts->gen.code, opts->gen.context_reg, offsetof(m68k_context, flags) + flag, reg, SZ_B);
154 }
155 }
156
157 void xor_flag(m68k_options *opts, uint8_t val, uint8_t flag)
158 {
159 if (opts->flag_regs[flag] >= 0) {
160 xor_ir(&opts->gen.code, val, opts->flag_regs[flag], SZ_B);
161 } else {
162 xor_irdisp(&opts->gen.code, val, opts->gen.context_reg, offsetof(m68k_context, flags) + flag, SZ_B);
163 }
164 }
165
166 void cmp_flags(m68k_options *opts, uint8_t flag1, uint8_t flag2)
167 {
168 code_info *code = &opts->gen.code;
169 if (opts->flag_regs[flag1] >= 0 && opts->flag_regs[flag2] >= 0) {
170 cmp_rr(code, opts->flag_regs[flag1], opts->flag_regs[flag2], SZ_B);
171 } else if(opts->flag_regs[flag1] >= 0 || opts->flag_regs[flag2] >= 0) {
172 if (opts->flag_regs[flag2] >= 0) {
173 uint8_t tmp = flag1;
174 flag1 = flag2;
175 flag2 = tmp;
176 }
177 cmp_rrdisp(code, opts->flag_regs[flag1], opts->gen.context_reg, offsetof(m68k_context, flags) + flag2, SZ_B);
178 } else {
179 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, flags) + flag1, opts->gen.scratch1, SZ_B);
180 cmp_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(m68k_context, flags) + flag2, SZ_B);
181 }
182 }
183
184 void translate_m68k_src(m68kinst * inst, x86_ea * ea, m68k_options * opts)
185 {
186 code_info *code = &opts->gen.code;
187 int8_t reg = native_reg(&(inst->src), opts);
188 uint8_t sec_reg;
189 int32_t dec_amount,inc_amount;
190 if (reg >= 0) {
191 ea->mode = MODE_REG_DIRECT;
192 if (inst->dst.addr_mode == MODE_AREG && inst->extra.size == OPSIZE_WORD) {
193 movsx_rr(code, reg, opts->gen.scratch1, SZ_W, SZ_D);
194 ea->base = opts->gen.scratch1;
195 } else {
196 ea->base = reg;
197 }
198 return;
199 }
200 switch (inst->src.addr_mode)
201 {
202 case MODE_REG:
203 case MODE_AREG:
204 //We only get one memory parameter, so if the dst operand is a register in memory,
205 //we need to copy this to a temp register first
206 reg = native_reg(&(inst->dst), opts);
207 if (reg >= 0 || inst->dst.addr_mode == MODE_UNUSED || !(inst->dst.addr_mode == MODE_REG || inst->dst.addr_mode == MODE_AREG)
208 || inst->op == M68K_EXG) {
209
210 ea->mode = MODE_REG_DISPLACE8;
211 ea->base = opts->gen.context_reg;
212 ea->disp = reg_offset(&(inst->src));
213 } else {
214 if (inst->dst.addr_mode == MODE_AREG && inst->extra.size == OPSIZE_WORD) {
215 movsx_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src)), opts->gen.scratch1, SZ_W, SZ_D);
216 } else {
217 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src)), opts->gen.scratch1, inst->extra.size);
218 }
219 ea->mode = MODE_REG_DIRECT;
220 ea->base = opts->gen.scratch1;
221 //we're explicitly handling the areg dest here, so we exit immediately
222 return;
223 }
224 break;
225 case MODE_AREG_PREDEC:
226 dec_amount = inst->extra.size == OPSIZE_WORD ? 2 : (inst->extra.size == OPSIZE_LONG ? 4 : (inst->src.params.regs.pri == 7 ? 2 :1));
227 cycles(&opts->gen, PREDEC_PENALTY);
228 if (opts->aregs[inst->src.params.regs.pri] >= 0) {
229 sub_ir(code, dec_amount, opts->aregs[inst->src.params.regs.pri], SZ_D);
230 } else {
231 sub_irdisp(code, dec_amount, opts->gen.context_reg, reg_offset(&(inst->src)), SZ_D);
232 }
233 case MODE_AREG_INDIRECT:
234 case MODE_AREG_POSTINC:
235 if (opts->aregs[inst->src.params.regs.pri] >= 0) {
236 mov_rr(code, opts->aregs[inst->src.params.regs.pri], opts->gen.scratch1, SZ_D);
237 } else {
238 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src)), opts->gen.scratch1, SZ_D);
239 }
240 m68k_read_size(opts, inst->extra.size);
241
242 if (inst->src.addr_mode == MODE_AREG_POSTINC) {
243 inc_amount = inst->extra.size == OPSIZE_WORD ? 2 : (inst->extra.size == OPSIZE_LONG ? 4 : (inst->src.params.regs.pri == 7 ? 2 : 1));
244 if (opts->aregs[inst->src.params.regs.pri] >= 0) {
245 add_ir(code, inc_amount, opts->aregs[inst->src.params.regs.pri], SZ_D);
246 } else {
247 add_irdisp(code, inc_amount, opts->gen.context_reg, reg_offset(&(inst->src)), SZ_D);
248 }
249 }
250 ea->mode = MODE_REG_DIRECT;
251 ea->base = (inst->dst.addr_mode == MODE_AREG_PREDEC && inst->op != M68K_MOVE) ? opts->gen.scratch2 : opts->gen.scratch1;
252 break;
253 case MODE_AREG_DISPLACE:
254 cycles(&opts->gen, BUS);
255 if (opts->aregs[inst->src.params.regs.pri] >= 0) {
256 mov_rr(code, opts->aregs[inst->src.params.regs.pri], opts->gen.scratch1, SZ_D);
257 } else {
258 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src)), opts->gen.scratch1, SZ_D);
259 }
260 add_ir(code, inst->src.params.regs.displacement, opts->gen.scratch1, SZ_D);
261 m68k_read_size(opts, inst->extra.size);
262
263 ea->mode = MODE_REG_DIRECT;
264 ea->base = opts->gen.scratch1;
265 break;
266 case MODE_AREG_INDEX_DISP8:
267 cycles(&opts->gen, 6);
268 if (opts->aregs[inst->src.params.regs.pri] >= 0) {
269 mov_rr(code, opts->aregs[inst->src.params.regs.pri], opts->gen.scratch1, SZ_D);
270 } else {
271 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src)), opts->gen.scratch1, SZ_D);
272 }
273 sec_reg = (inst->src.params.regs.sec >> 1) & 0x7;
274 if (inst->src.params.regs.sec & 1) {
275 if (inst->src.params.regs.sec & 0x10) {
276 if (opts->aregs[sec_reg] >= 0) {
277 add_rr(code, opts->aregs[sec_reg], opts->gen.scratch1, SZ_D);
278 } else {
279 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
280 }
281 } else {
282 if (opts->dregs[sec_reg] >= 0) {
283 add_rr(code, opts->dregs[sec_reg], opts->gen.scratch1, SZ_D);
284 } else {
285 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
286 }
287 }
288 } else {
289 if (inst->src.params.regs.sec & 0x10) {
290 if (opts->aregs[sec_reg] >= 0) {
291 movsx_rr(code, opts->aregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
292 } else {
293 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
294 }
295 } else {
296 if (opts->dregs[sec_reg] >= 0) {
297 movsx_rr(code, opts->dregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
298 } else {
299 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
300 }
301 }
302 add_rr(code, opts->gen.scratch2, opts->gen.scratch1, SZ_D);
303 }
304 if (inst->src.params.regs.displacement) {
305 add_ir(code, inst->src.params.regs.displacement, opts->gen.scratch1, SZ_D);
306 }
307 m68k_read_size(opts, inst->extra.size);
308
309 ea->mode = MODE_REG_DIRECT;
310 ea->base = opts->gen.scratch1;
311 break;
312 case MODE_PC_DISPLACE:
313 cycles(&opts->gen, BUS);
314 mov_ir(code, inst->src.params.regs.displacement + inst->address+2, opts->gen.scratch1, SZ_D);
315 m68k_read_size(opts, inst->extra.size);
316
317 ea->mode = MODE_REG_DIRECT;
318 ea->base = opts->gen.scratch1;
319 break;
320 case MODE_PC_INDEX_DISP8:
321 cycles(&opts->gen, 6);
322 mov_ir(code, inst->address+2, opts->gen.scratch1, SZ_D);
323 sec_reg = (inst->src.params.regs.sec >> 1) & 0x7;
324 if (inst->src.params.regs.sec & 1) {
325 if (inst->src.params.regs.sec & 0x10) {
326 if (opts->aregs[sec_reg] >= 0) {
327 add_rr(code, opts->aregs[sec_reg], opts->gen.scratch1, SZ_D);
328 } else {
329 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
330 }
331 } else {
332 if (opts->dregs[sec_reg] >= 0) {
333 add_rr(code, opts->dregs[sec_reg], opts->gen.scratch1, SZ_D);
334 } else {
335 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
336 }
337 }
338 } else {
339 if (inst->src.params.regs.sec & 0x10) {
340 if (opts->aregs[sec_reg] >= 0) {
341 movsx_rr(code, opts->aregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
342 } else {
343 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
344 }
345 } else {
346 if (opts->dregs[sec_reg] >= 0) {
347 movsx_rr(code, opts->dregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
348 } else {
349 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
350 }
351 }
352 add_rr(code, opts->gen.scratch2, opts->gen.scratch1, SZ_D);
353 }
354 if (inst->src.params.regs.displacement) {
355 add_ir(code, inst->src.params.regs.displacement, opts->gen.scratch1, SZ_D);
356 }
357 m68k_read_size(opts, inst->extra.size);
358
359 ea->mode = MODE_REG_DIRECT;
360 ea->base = opts->gen.scratch1;
361 break;
362 case MODE_ABSOLUTE:
363 case MODE_ABSOLUTE_SHORT:
364 cycles(&opts->gen, inst->src.addr_mode == MODE_ABSOLUTE ? BUS*2 : BUS);
365 mov_ir(code, inst->src.params.immed, opts->gen.scratch1, SZ_D);
366 m68k_read_size(opts, inst->extra.size);
367
368 ea->mode = MODE_REG_DIRECT;
369 ea->base = opts->gen.scratch1;
370 break;
371 case MODE_IMMEDIATE:
372 case MODE_IMMEDIATE_WORD:
373 if (inst->variant != VAR_QUICK) {
374 cycles(&opts->gen, (inst->extra.size == OPSIZE_LONG && inst->src.addr_mode == MODE_IMMEDIATE) ? BUS*2 : BUS);
375 }
376 ea->mode = MODE_IMMED;
377 ea->disp = inst->src.params.immed;
378 if (inst->dst.addr_mode == MODE_AREG && inst->extra.size == OPSIZE_WORD && ea->disp & 0x8000) {
379 ea->disp |= 0xFFFF0000;
380 }
381 return;
382 default:
383 m68k_disasm(inst, disasm_buf);
384 printf("%X: %s\naddress mode %d not implemented (src)\n", inst->address, disasm_buf, inst->src.addr_mode);
385 exit(1);
386 }
387 if (inst->dst.addr_mode == MODE_AREG && inst->extra.size == OPSIZE_WORD) {
388 if (ea->mode == MODE_REG_DIRECT) {
389 movsx_rr(code, ea->base, opts->gen.scratch1, SZ_W, SZ_D);
390 } else {
391 movsx_rdispr(code, ea->base, ea->disp, opts->gen.scratch1, SZ_W, SZ_D);
392 ea->mode = MODE_REG_DIRECT;
393 }
394 ea->base = opts->gen.scratch1;
395 }
396 }
397
398 void translate_m68k_dst(m68kinst * inst, x86_ea * ea, m68k_options * opts, uint8_t fake_read)
399 {
400 code_info *code = &opts->gen.code;
401 int8_t reg = native_reg(&(inst->dst), opts), sec_reg;
402 int32_t dec_amount, inc_amount;
403 if (reg >= 0) {
404 ea->mode = MODE_REG_DIRECT;
405 ea->base = reg;
406 return;
407 }
408 switch (inst->dst.addr_mode)
409 {
410 case MODE_REG:
411 case MODE_AREG:
412 ea->mode = MODE_REG_DISPLACE8;
413 ea->base = opts->gen.context_reg;
414 ea->disp = reg_offset(&(inst->dst));
415 break;
416 case MODE_AREG_PREDEC:
417 if (inst->src.addr_mode == MODE_AREG_PREDEC) {
418 push_r(code, opts->gen.scratch1);
419 }
420 dec_amount = inst->extra.size == OPSIZE_WORD ? 2 : (inst->extra.size == OPSIZE_LONG ? 4 : (inst->dst.params.regs.pri == 7 ? 2 : 1));
421 if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
422 sub_ir(code, dec_amount, opts->aregs[inst->dst.params.regs.pri], SZ_D);
423 } else {
424 sub_irdisp(code, dec_amount, opts->gen.context_reg, reg_offset(&(inst->dst)), SZ_D);
425 }
426 case MODE_AREG_INDIRECT:
427 case MODE_AREG_POSTINC:
428 if (fake_read) {
429 cycles(&opts->gen, inst->extra.size == OPSIZE_LONG ? 8 : 4);
430 } else {
431 if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
432 mov_rr(code, opts->aregs[inst->dst.params.regs.pri], opts->gen.scratch1, SZ_D);
433 } else {
434 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->dst)), opts->gen.scratch1, SZ_D);
435 }
436 m68k_read_size(opts, inst->extra.size);
437 }
438 if (inst->src.addr_mode == MODE_AREG_PREDEC) {
439 //restore src operand to opts->gen.scratch2
440 pop_r(code, opts->gen.scratch2);
441 } else {
442 //save reg value in opts->gen.scratch2 so we can use it to save the result in memory later
443 if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
444 mov_rr(code, opts->aregs[inst->dst.params.regs.pri], opts->gen.scratch2, SZ_D);
445 } else {
446 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->dst)), opts->gen.scratch2, SZ_D);
447 }
448 }
449
450 if (inst->dst.addr_mode == MODE_AREG_POSTINC) {
451 inc_amount = inst->extra.size == OPSIZE_WORD ? 2 : (inst->extra.size == OPSIZE_LONG ? 4 : (inst->dst.params.regs.pri == 7 ? 2 : 1));
452 if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
453 add_ir(code, inc_amount, opts->aregs[inst->dst.params.regs.pri], SZ_D);
454 } else {
455 add_irdisp(code, inc_amount, opts->gen.context_reg, reg_offset(&(inst->dst)), SZ_D);
456 }
457 }
458 ea->mode = MODE_REG_DIRECT;
459 ea->base = opts->gen.scratch1;
460 break;
461 case MODE_AREG_DISPLACE:
462 cycles(&opts->gen, fake_read ? BUS+(inst->extra.size == OPSIZE_LONG ? BUS*2 : BUS) : BUS);
463 reg = fake_read ? opts->gen.scratch2 : opts->gen.scratch1;
464 if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
465 mov_rr(code, opts->aregs[inst->dst.params.regs.pri], reg, SZ_D);
466 } else {
467 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->dst)), reg, SZ_D);
468 }
469 add_ir(code, inst->dst.params.regs.displacement, reg, SZ_D);
470 if (!fake_read) {
471 push_r(code, opts->gen.scratch1);
472 m68k_read_size(opts, inst->extra.size);
473 pop_r(code, opts->gen.scratch2);
474 }
475 ea->mode = MODE_REG_DIRECT;
476 ea->base = opts->gen.scratch1;
477 break;
478 case MODE_AREG_INDEX_DISP8:
479 cycles(&opts->gen, fake_read ? (6 + inst->extra.size == OPSIZE_LONG ? 8 : 4) : 6);
480 if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
481 mov_rr(code, opts->aregs[inst->dst.params.regs.pri], opts->gen.scratch1, SZ_D);
482 } else {
483 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->dst)), opts->gen.scratch1, SZ_D);
484 }
485 sec_reg = (inst->dst.params.regs.sec >> 1) & 0x7;
486 if (inst->dst.params.regs.sec & 1) {
487 if (inst->dst.params.regs.sec & 0x10) {
488 if (opts->aregs[sec_reg] >= 0) {
489 add_rr(code, opts->aregs[sec_reg], opts->gen.scratch1, SZ_D);
490 } else {
491 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
492 }
493 } else {
494 if (opts->dregs[sec_reg] >= 0) {
495 add_rr(code, opts->dregs[sec_reg], opts->gen.scratch1, SZ_D);
496 } else {
497 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
498 }
499 }
500 } else {
501 if (inst->dst.params.regs.sec & 0x10) {
502 if (opts->aregs[sec_reg] >= 0) {
503 movsx_rr(code, opts->aregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
504 } else {
505 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
506 }
507 } else {
508 if (opts->dregs[sec_reg] >= 0) {
509 movsx_rr(code, opts->dregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
510 } else {
511 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
512 }
513 }
514 add_rr(code, opts->gen.scratch2, opts->gen.scratch1, SZ_D);
515 }
516 if (inst->dst.params.regs.displacement) {
517 add_ir(code, inst->dst.params.regs.displacement, opts->gen.scratch1, SZ_D);
518 }
519 if (fake_read) {
520 mov_rr(code, opts->gen.scratch1, opts->gen.scratch2, SZ_D);
521 } else {
522 push_r(code, opts->gen.scratch1);
523 m68k_read_size(opts, inst->extra.size);
524 pop_r(code, opts->gen.scratch2);
525 }
526 ea->mode = MODE_REG_DIRECT;
527 ea->base = opts->gen.scratch1;
528 break;
529 case MODE_PC_DISPLACE:
530 cycles(&opts->gen, fake_read ? BUS+(inst->extra.size == OPSIZE_LONG ? BUS*2 : BUS) : BUS);
531 mov_ir(code, inst->dst.params.regs.displacement + inst->address+2, fake_read ? opts->gen.scratch2 : opts->gen.scratch1, SZ_D);
532 if (!fake_read) {
533 push_r(code, opts->gen.scratch1);
534 m68k_read_size(opts, inst->extra.size);
535 pop_r(code, opts->gen.scratch2);
536 }
537 ea->mode = MODE_REG_DIRECT;
538 ea->base = opts->gen.scratch1;
539 break;
540 case MODE_PC_INDEX_DISP8:
541 cycles(&opts->gen, fake_read ? (6 + inst->extra.size == OPSIZE_LONG ? 8 : 4) : 6);
542 mov_ir(code, inst->address+2, opts->gen.scratch1, SZ_D);
543 sec_reg = (inst->dst.params.regs.sec >> 1) & 0x7;
544 if (inst->dst.params.regs.sec & 1) {
545 if (inst->dst.params.regs.sec & 0x10) {
546 if (opts->aregs[sec_reg] >= 0) {
547 add_rr(code, opts->aregs[sec_reg], opts->gen.scratch1, SZ_D);
548 } else {
549 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
550 }
551 } else {
552 if (opts->dregs[sec_reg] >= 0) {
553 add_rr(code, opts->dregs[sec_reg], opts->gen.scratch1, SZ_D);
554 } else {
555 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
556 }
557 }
558 } else {
559 if (inst->dst.params.regs.sec & 0x10) {
560 if (opts->aregs[sec_reg] >= 0) {
561 movsx_rr(code, opts->aregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
562 } else {
563 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
564 }
565 } else {
566 if (opts->dregs[sec_reg] >= 0) {
567 movsx_rr(code, opts->dregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
568 } else {
569 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
570 }
571 }
572 add_rr(code, opts->gen.scratch2, opts->gen.scratch1, SZ_D);
573 }
574 if (inst->dst.params.regs.displacement) {
575 add_ir(code, inst->dst.params.regs.displacement, opts->gen.scratch1, SZ_D);
576 }
577 if (fake_read) {
578 mov_rr(code, opts->gen.scratch1, opts->gen.scratch2, SZ_D);
579 } else {
580 push_r(code, opts->gen.scratch1);
581 m68k_read_size(opts, inst->extra.size);
582 pop_r(code, opts->gen.scratch2);
583 }
584 ea->mode = MODE_REG_DIRECT;
585 ea->base = opts->gen.scratch1;
586 break;
587 case MODE_ABSOLUTE:
588 case MODE_ABSOLUTE_SHORT:
589 //Add cycles for reading address from instruction stream
590 cycles(&opts->gen, (inst->dst.addr_mode == MODE_ABSOLUTE ? BUS*2 : BUS) + (fake_read ? (inst->extra.size == OPSIZE_LONG ? BUS*2 : BUS) : 0));
591 mov_ir(code, inst->dst.params.immed, fake_read ? opts->gen.scratch2 : opts->gen.scratch1, SZ_D);
592 if (!fake_read) {
593 push_r(code, opts->gen.scratch1);
594 m68k_read_size(opts, inst->extra.size);
595 pop_r(code, opts->gen.scratch2);
596 }
597 ea->mode = MODE_REG_DIRECT;
598 ea->base = opts->gen.scratch1;
599 break;
600 default:
601 m68k_disasm(inst, disasm_buf);
602 printf("%X: %s\naddress mode %d not implemented (dst)\n", inst->address, disasm_buf, inst->dst.addr_mode);
603 exit(1);
604 }
605 }
606
607 void m68k_save_result(m68kinst * inst, m68k_options * opts)
608 {
609 code_info *code = &opts->gen.code;
610 if (inst->dst.addr_mode != MODE_REG && inst->dst.addr_mode != MODE_AREG) {
611 if (inst->dst.addr_mode == MODE_AREG_PREDEC && inst->src.addr_mode == MODE_AREG_PREDEC && inst->op != M68K_MOVE) {
612 if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
613 mov_rr(code, opts->aregs[inst->dst.params.regs.pri], opts->gen.scratch2, SZ_D);
614 } else {
615 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->dst)), opts->gen.scratch2, SZ_D);
616 }
617 }
618 switch (inst->extra.size)
619 {
620 case OPSIZE_BYTE:
621 call(code, opts->write_8);
622 break;
623 case OPSIZE_WORD:
624 call(code, opts->write_16);
625 break;
626 case OPSIZE_LONG:
627 call(code, opts->write_32_lowfirst);
628 break;
629 }
630 }
631 }
632
633 void translate_m68k_move(m68k_options * opts, m68kinst * inst)
634 {
635 code_info *code = &opts->gen.code;
636 int8_t reg, flags_reg, sec_reg;
637 uint8_t dir = 0;
638 int32_t offset;
639 int32_t inc_amount, dec_amount;
640 x86_ea src;
641 translate_m68k_src(inst, &src, opts);
642 reg = native_reg(&(inst->dst), opts);
643 if (inst->dst.addr_mode != MODE_AREG) {
644 //update statically set flags
645 set_flag(opts, 0, FLAG_V);
646 set_flag(opts, 0, FLAG_C);
647 }
648
649 if (inst->dst.addr_mode != MODE_AREG) {
650 if (src.mode == MODE_REG_DIRECT) {
651 flags_reg = src.base;
652 } else {
653 if (reg >= 0) {
654 flags_reg = reg;
655 } else {
656 if(src.mode == MODE_REG_DISPLACE8) {
657 mov_rdispr(code, src.base, src.disp, opts->gen.scratch1, inst->extra.size);
658 } else {
659 mov_ir(code, src.disp, opts->gen.scratch1, inst->extra.size);
660 }
661 src.mode = MODE_REG_DIRECT;
662 flags_reg = src.base = opts->gen.scratch1;
663 }
664 }
665 }
666 uint8_t size = inst->extra.size;
667 switch(inst->dst.addr_mode)
668 {
669 case MODE_AREG:
670 size = OPSIZE_LONG;
671 case MODE_REG:
672 if (reg >= 0) {
673 if (src.mode == MODE_REG_DIRECT) {
674 mov_rr(code, src.base, reg, size);
675 } else if (src.mode == MODE_REG_DISPLACE8) {
676 mov_rdispr(code, src.base, src.disp, reg, size);
677 } else {
678 mov_ir(code, src.disp, reg, size);
679 }
680 } else if(src.mode == MODE_REG_DIRECT) {
681 mov_rrdisp(code, src.base, opts->gen.context_reg, reg_offset(&(inst->dst)), size);
682 } else {
683 mov_irdisp(code, src.disp, opts->gen.context_reg, reg_offset(&(inst->dst)), size);
684 }
685 if (inst->dst.addr_mode != MODE_AREG) {
686 cmp_ir(code, 0, flags_reg, size);
687 set_flag_cond(opts, CC_Z, FLAG_Z);
688 set_flag_cond(opts, CC_S, FLAG_N);
689 }
690 break;
691 case MODE_AREG_PREDEC:
692 dec_amount = inst->extra.size == OPSIZE_WORD ? 2 : (inst->extra.size == OPSIZE_LONG ? 4 : (inst->dst.params.regs.pri == 7 ? 2 : 1));
693 if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
694 sub_ir(code, dec_amount, opts->aregs[inst->dst.params.regs.pri], SZ_D);
695 } else {
696 sub_irdisp(code, dec_amount, opts->gen.context_reg, reg_offset(&(inst->dst)), SZ_D);
697 }
698 case MODE_AREG_INDIRECT:
699 case MODE_AREG_POSTINC:
700 if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
701 mov_rr(code, opts->aregs[inst->dst.params.regs.pri], opts->gen.scratch2, SZ_D);
702 } else {
703 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->dst)), opts->gen.scratch2, SZ_D);
704 }
705 if (src.mode == MODE_REG_DIRECT) {
706 if (src.base != opts->gen.scratch1) {
707 mov_rr(code, src.base, opts->gen.scratch1, inst->extra.size);
708 }
709 } else if (src.mode == MODE_REG_DISPLACE8) {
710 mov_rdispr(code, src.base, src.disp, opts->gen.scratch1, inst->extra.size);
711 } else {
712 mov_ir(code, src.disp, opts->gen.scratch1, inst->extra.size);
713 }
714 if (inst->dst.addr_mode != MODE_AREG) {
715 cmp_ir(code, 0, flags_reg, inst->extra.size);
716 set_flag_cond(opts, CC_Z, FLAG_Z);
717 set_flag_cond(opts, CC_S, FLAG_N);
718 }
719 m68k_write_size(opts, inst->extra.size);
720
721 if (inst->dst.addr_mode == MODE_AREG_POSTINC) {
722 inc_amount = inst->extra.size == OPSIZE_WORD ? 2 : (inst->extra.size == OPSIZE_LONG ? 4 : (inst->dst.params.regs.pri == 7 ? 2 : 1));
723 if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
724 add_ir(code, inc_amount, opts->aregs[inst->dst.params.regs.pri], SZ_D);
725 } else {
726 add_irdisp(code, inc_amount, opts->gen.context_reg, reg_offset(&(inst->dst)), SZ_D);
727 }
728 }
729 break;
730 case MODE_AREG_DISPLACE:
731 cycles(&opts->gen, BUS);
732 if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
733 mov_rr(code, opts->aregs[inst->dst.params.regs.pri], opts->gen.scratch2, SZ_D);
734 } else {
735 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->dst)), opts->gen.scratch2, SZ_D);
736 }
737 add_ir(code, inst->dst.params.regs.displacement, opts->gen.scratch2, SZ_D);
738 if (src.mode == MODE_REG_DIRECT) {
739 if (src.base != opts->gen.scratch1) {
740 mov_rr(code, src.base, opts->gen.scratch1, inst->extra.size);
741 }
742 } else if (src.mode == MODE_REG_DISPLACE8) {
743 mov_rdispr(code, src.base, src.disp, opts->gen.scratch1, inst->extra.size);
744 } else {
745 mov_ir(code, src.disp, opts->gen.scratch1, inst->extra.size);
746 }
747 if (inst->dst.addr_mode != MODE_AREG) {
748 cmp_ir(code, 0, flags_reg, inst->extra.size);
749 set_flag_cond(opts, CC_Z, FLAG_Z);
750 set_flag_cond(opts, CC_S, FLAG_N);
751 }
752 m68k_write_size(opts, inst->extra.size);
753 break;
754 case MODE_AREG_INDEX_DISP8:
755 cycles(&opts->gen, 6);//TODO: Check to make sure this is correct
756 if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
757 mov_rr(code, opts->aregs[inst->dst.params.regs.pri], opts->gen.scratch2, SZ_D);
758 } else {
759 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->dst)), opts->gen.scratch2, SZ_D);
760 }
761 sec_reg = (inst->dst.params.regs.sec >> 1) & 0x7;
762 if (inst->dst.params.regs.sec & 1) {
763 if (inst->dst.params.regs.sec & 0x10) {
764 if (opts->aregs[sec_reg] >= 0) {
765 add_rr(code, opts->aregs[sec_reg], opts->gen.scratch2, SZ_D);
766 } else {
767 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_D);
768 }
769 } else {
770 if (opts->dregs[sec_reg] >= 0) {
771 add_rr(code, opts->dregs[sec_reg], opts->gen.scratch2, SZ_D);
772 } else {
773 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_D);
774 }
775 }
776 } else {
777 if (src.base == opts->gen.scratch1) {
778 push_r(code, opts->gen.scratch1);
779 }
780 if (inst->dst.params.regs.sec & 0x10) {
781 if (opts->aregs[sec_reg] >= 0) {
782 movsx_rr(code, opts->aregs[sec_reg], opts->gen.scratch1, SZ_W, SZ_D);
783 } else {
784 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_W, SZ_D);
785 }
786 } else {
787 if (opts->dregs[sec_reg] >= 0) {
788 movsx_rr(code, opts->dregs[sec_reg], opts->gen.scratch1, SZ_W, SZ_D);
789 } else {
790 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_W, SZ_D);
791 }
792 }
793 add_rr(code, opts->gen.scratch1, opts->gen.scratch2, SZ_D);
794 if (src.base == opts->gen.scratch1) {
795 pop_r(code, opts->gen.scratch1);
796 }
797 }
798 if (inst->dst.params.regs.displacement) {
799 add_ir(code, inst->dst.params.regs.displacement, opts->gen.scratch2, SZ_D);
800 }
801 if (src.mode == MODE_REG_DIRECT) {
802 if (src.base != opts->gen.scratch1) {
803 mov_rr(code, src.base, opts->gen.scratch1, inst->extra.size);
804 }
805 } else if (src.mode == MODE_REG_DISPLACE8) {
806 mov_rdispr(code, src.base, src.disp, opts->gen.scratch1, inst->extra.size);
807 } else {
808 mov_ir(code, src.disp, opts->gen.scratch1, inst->extra.size);
809 }
810 if (inst->dst.addr_mode != MODE_AREG) {
811 cmp_ir(code, 0, flags_reg, inst->extra.size);
812 set_flag_cond(opts, CC_Z, FLAG_Z);
813 set_flag_cond(opts, CC_S, FLAG_N);
814 }
815 m68k_write_size(opts, inst->extra.size);
816 break;
817 case MODE_PC_DISPLACE:
818 cycles(&opts->gen, BUS);
819 mov_ir(code, inst->dst.params.regs.displacement + inst->address+2, opts->gen.scratch2, SZ_D);
820 if (src.mode == MODE_REG_DIRECT) {
821 if (src.base != opts->gen.scratch1) {
822 mov_rr(code, src.base, opts->gen.scratch1, inst->extra.size);
823 }
824 } else if (src.mode == MODE_REG_DISPLACE8) {
825 mov_rdispr(code, src.base, src.disp, opts->gen.scratch1, inst->extra.size);
826 } else {
827 mov_ir(code, src.disp, opts->gen.scratch1, inst->extra.size);
828 }
829 if (inst->dst.addr_mode != MODE_AREG) {
830 cmp_ir(code, 0, flags_reg, inst->extra.size);
831 set_flag_cond(opts, CC_Z, FLAG_Z);
832 set_flag_cond(opts, CC_S, FLAG_N);
833 }
834 m68k_write_size(opts, inst->extra.size);
835 break;
836 case MODE_PC_INDEX_DISP8:
837 cycles(&opts->gen, 6);//TODO: Check to make sure this is correct
838 mov_ir(code, inst->address, opts->gen.scratch2, SZ_D);
839 sec_reg = (inst->dst.params.regs.sec >> 1) & 0x7;
840 if (inst->dst.params.regs.sec & 1) {
841 if (inst->dst.params.regs.sec & 0x10) {
842 if (opts->aregs[sec_reg] >= 0) {
843 add_rr(code, opts->aregs[sec_reg], opts->gen.scratch2, SZ_D);
844 } else {
845 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_D);
846 }
847 } else {
848 if (opts->dregs[sec_reg] >= 0) {
849 add_rr(code, opts->dregs[sec_reg], opts->gen.scratch2, SZ_D);
850 } else {
851 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_D);
852 }
853 }
854 } else {
855 if (src.base == opts->gen.scratch1) {
856 push_r(code, opts->gen.scratch1);
857 }
858 if (inst->dst.params.regs.sec & 0x10) {
859 if (opts->aregs[sec_reg] >= 0) {
860 movsx_rr(code, opts->aregs[sec_reg], opts->gen.scratch1, SZ_W, SZ_D);
861 } else {
862 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_W, SZ_D);
863 }
864 } else {
865 if (opts->dregs[sec_reg] >= 0) {
866 movsx_rr(code, opts->dregs[sec_reg], opts->gen.scratch1, SZ_W, SZ_D);
867 } else {
868 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_W, SZ_D);
869 }
870 }
871 add_rr(code, opts->gen.scratch1, opts->gen.scratch2, SZ_D);
872 if (src.base == opts->gen.scratch1) {
873 pop_r(code, opts->gen.scratch1);
874 }
875 }
876 if (inst->dst.params.regs.displacement) {
877 add_ir(code, inst->dst.params.regs.displacement, opts->gen.scratch2, SZ_D);
878 }
879 if (src.mode == MODE_REG_DIRECT) {
880 if (src.base != opts->gen.scratch1) {
881 mov_rr(code, src.base, opts->gen.scratch1, inst->extra.size);
882 }
883 } else if (src.mode == MODE_REG_DISPLACE8) {
884 mov_rdispr(code, src.base, src.disp, opts->gen.scratch1, inst->extra.size);
885 } else {
886 mov_ir(code, src.disp, opts->gen.scratch1, inst->extra.size);
887 }
888 if (inst->dst.addr_mode != MODE_AREG) {
889 cmp_ir(code, 0, flags_reg, inst->extra.size);
890 set_flag_cond(opts, CC_Z, FLAG_Z);
891 set_flag_cond(opts, CC_S, FLAG_N);
892 }
893 m68k_write_size(opts, inst->extra.size);
894 break;
895 case MODE_ABSOLUTE:
896 case MODE_ABSOLUTE_SHORT:
897 if (src.mode == MODE_REG_DIRECT) {
898 if (src.base != opts->gen.scratch1) {
899 mov_rr(code, src.base, opts->gen.scratch1, inst->extra.size);
900 }
901 } else if (src.mode == MODE_REG_DISPLACE8) {
902 mov_rdispr(code, src.base, src.disp, opts->gen.scratch1, inst->extra.size);
903 } else {
904 mov_ir(code, src.disp, opts->gen.scratch1, inst->extra.size);
905 }
906 if (inst->dst.addr_mode == MODE_ABSOLUTE) {
907 cycles(&opts->gen, BUS*2);
908 } else {
909 cycles(&opts->gen, BUS);
910 }
911 mov_ir(code, inst->dst.params.immed, opts->gen.scratch2, SZ_D);
912 if (inst->dst.addr_mode != MODE_AREG) {
913 cmp_ir(code, 0, flags_reg, inst->extra.size);
914 set_flag_cond(opts, CC_Z, FLAG_Z);
915 set_flag_cond(opts, CC_S, FLAG_N);
916 }
917 m68k_write_size(opts, inst->extra.size);
918 break;
919 default:
920 m68k_disasm(inst, disasm_buf);
921 printf("%X: %s\naddress mode %d not implemented (move dst)\n", inst->address, disasm_buf, inst->dst.addr_mode);
922 exit(1);
923 }
924
925 //add cycles for prefetch
926 cycles(&opts->gen, BUS);
927 }
928
929 void translate_m68k_movem(m68k_options * opts, m68kinst * inst)
930 {
931 code_info *code = &opts->gen.code;
932 int8_t bit,reg,sec_reg;
933 uint8_t early_cycles;
934 if(inst->src.addr_mode == MODE_REG) {
935 //reg to mem
936 early_cycles = 8;
937 int8_t dir;
938 switch (inst->dst.addr_mode)
939 {
940 case MODE_AREG_INDIRECT:
941 case MODE_AREG_PREDEC:
942 if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
943 mov_rr(code, opts->aregs[inst->dst.params.regs.pri], opts->gen.scratch2, SZ_D);
944 } else {
945 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->dst)), opts->gen.scratch2, SZ_D);
946 }
947 break;
948 case MODE_AREG_DISPLACE:
949 early_cycles += BUS;
950 reg = opts->gen.scratch2;
951 if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
952 mov_rr(code, opts->aregs[inst->dst.params.regs.pri], opts->gen.scratch2, SZ_D);
953 } else {
954 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->dst)), opts->gen.scratch2, SZ_D);
955 }
956 add_ir(code, inst->dst.params.regs.displacement, opts->gen.scratch2, SZ_D);
957 break;
958 case MODE_AREG_INDEX_DISP8:
959 early_cycles += 6;
960 if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
961 mov_rr(code, opts->aregs[inst->dst.params.regs.pri], opts->gen.scratch2, SZ_D);
962 } else {
963 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->dst)), opts->gen.scratch2, SZ_D);
964 }
965 sec_reg = (inst->dst.params.regs.sec >> 1) & 0x7;
966 if (inst->dst.params.regs.sec & 1) {
967 if (inst->dst.params.regs.sec & 0x10) {
968 if (opts->aregs[sec_reg] >= 0) {
969 add_rr(code, opts->aregs[sec_reg], opts->gen.scratch2, SZ_D);
970 } else {
971 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_D);
972 }
973 } else {
974 if (opts->dregs[sec_reg] >= 0) {
975 add_rr(code, opts->dregs[sec_reg], opts->gen.scratch2, SZ_D);
976 } else {
977 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_D);
978 }
979 }
980 } else {
981 if (inst->dst.params.regs.sec & 0x10) {
982 if (opts->aregs[sec_reg] >= 0) {
983 movsx_rr(code, opts->aregs[sec_reg], opts->gen.scratch1, SZ_W, SZ_D);
984 } else {
985 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_W, SZ_D);
986 }
987 } else {
988 if (opts->dregs[sec_reg] >= 0) {
989 movsx_rr(code, opts->dregs[sec_reg], opts->gen.scratch1, SZ_W, SZ_D);
990 } else {
991 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_W, SZ_D);
992 }
993 }
994 add_rr(code, opts->gen.scratch1, opts->gen.scratch2, SZ_D);
995 }
996 if (inst->dst.params.regs.displacement) {
997 add_ir(code, inst->dst.params.regs.displacement, opts->gen.scratch2, SZ_D);
998 }
999 break;
1000 case MODE_PC_DISPLACE:
1001 early_cycles += BUS;
1002 mov_ir(code, inst->dst.params.regs.displacement + inst->address+2, opts->gen.scratch2, SZ_D);
1003 break;
1004 case MODE_PC_INDEX_DISP8:
1005 early_cycles += 6;
1006 mov_ir(code, inst->address+2, opts->gen.scratch2, SZ_D);
1007 sec_reg = (inst->dst.params.regs.sec >> 1) & 0x7;
1008 if (inst->dst.params.regs.sec & 1) {
1009 if (inst->dst.params.regs.sec & 0x10) {
1010 if (opts->aregs[sec_reg] >= 0) {
1011 add_rr(code, opts->aregs[sec_reg], opts->gen.scratch2, SZ_D);
1012 } else {
1013 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_D);
1014 }
1015 } else {
1016 if (opts->dregs[sec_reg] >= 0) {
1017 add_rr(code, opts->dregs[sec_reg], opts->gen.scratch2, SZ_D);
1018 } else {
1019 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_D);
1020 }
1021 }
1022 } else {
1023 if (inst->dst.params.regs.sec & 0x10) {
1024 if (opts->aregs[sec_reg] >= 0) {
1025 movsx_rr(code, opts->aregs[sec_reg], opts->gen.scratch1, SZ_W, SZ_D);
1026 } else {
1027 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_W, SZ_D);
1028 }
1029 } else {
1030 if (opts->dregs[sec_reg] >= 0) {
1031 movsx_rr(code, opts->dregs[sec_reg], opts->gen.scratch1, SZ_W, SZ_D);
1032 } else {
1033 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_W, SZ_D);
1034 }
1035 }
1036 add_rr(code, opts->gen.scratch1, opts->gen.scratch2, SZ_D);
1037 }
1038 if (inst->dst.params.regs.displacement) {
1039 add_ir(code, inst->dst.params.regs.displacement, opts->gen.scratch2, SZ_D);
1040 }
1041 break;
1042 case MODE_ABSOLUTE:
1043 early_cycles += 4;
1044 case MODE_ABSOLUTE_SHORT:
1045 early_cycles += 4;
1046 mov_ir(code, inst->dst.params.immed, opts->gen.scratch2, SZ_D);
1047 break;
1048 default:
1049 m68k_disasm(inst, disasm_buf);
1050 printf("%X: %s\naddress mode %d not implemented (movem dst)\n", inst->address, disasm_buf, inst->dst.addr_mode);
1051 exit(1);
1052 }
1053 if (inst->dst.addr_mode == MODE_AREG_PREDEC) {
1054 reg = 15;
1055 dir = -1;
1056 } else {
1057 reg = 0;
1058 dir = 1;
1059 }
1060 cycles(&opts->gen, early_cycles);
1061 for(bit=0; reg < 16 && reg >= 0; reg += dir, bit++) {
1062 if (inst->src.params.immed & (1 << bit)) {
1063 if (inst->dst.addr_mode == MODE_AREG_PREDEC) {
1064 sub_ir(code, (inst->extra.size == OPSIZE_LONG) ? 4 : 2, opts->gen.scratch2, SZ_D);
1065 }
1066 push_r(code, opts->gen.scratch2);
1067 if (reg > 7) {
1068 if (opts->aregs[reg-8] >= 0) {
1069 mov_rr(code, opts->aregs[reg-8], opts->gen.scratch1, inst->extra.size);
1070 } else {
1071 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * (reg-8), opts->gen.scratch1, inst->extra.size);
1072 }
1073 } else {
1074 if (opts->dregs[reg] >= 0) {
1075 mov_rr(code, opts->dregs[reg], opts->gen.scratch1, inst->extra.size);
1076 } else {
1077 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t) * (reg), opts->gen.scratch1, inst->extra.size);
1078 }
1079 }
1080 if (inst->extra.size == OPSIZE_LONG) {
1081 call(code, opts->write_32_lowfirst);
1082 } else {
1083 call(code, opts->write_16);
1084 }
1085 pop_r(code, opts->gen.scratch2);
1086 if (inst->dst.addr_mode != MODE_AREG_PREDEC) {
1087 add_ir(code, (inst->extra.size == OPSIZE_LONG) ? 4 : 2, opts->gen.scratch2, SZ_D);
1088 }
1089 }
1090 }
1091 if (inst->dst.addr_mode == MODE_AREG_PREDEC) {
1092 if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
1093 mov_rr(code, opts->gen.scratch2, opts->aregs[inst->dst.params.regs.pri], SZ_D);
1094 } else {
1095 mov_rrdisp(code, opts->gen.scratch2, opts->gen.context_reg, reg_offset(&(inst->dst)), SZ_D);
1096 }
1097 }
1098 } else {
1099 //mem to reg
1100 early_cycles = 4;
1101 switch (inst->src.addr_mode)
1102 {
1103 case MODE_AREG_INDIRECT:
1104 case MODE_AREG_POSTINC:
1105 if (opts->aregs[inst->src.params.regs.pri] >= 0) {
1106 mov_rr(code, opts->aregs[inst->src.params.regs.pri], opts->gen.scratch1, SZ_D);
1107 } else {
1108 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src)), opts->gen.scratch1, SZ_D);
1109 }
1110 break;
1111 case MODE_AREG_DISPLACE:
1112 early_cycles += BUS;
1113 reg = opts->gen.scratch2;
1114 if (opts->aregs[inst->src.params.regs.pri] >= 0) {
1115 mov_rr(code, opts->aregs[inst->src.params.regs.pri], opts->gen.scratch1, SZ_D);
1116 } else {
1117 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src)), opts->gen.scratch1, SZ_D);
1118 }
1119 add_ir(code, inst->src.params.regs.displacement, opts->gen.scratch1, SZ_D);
1120 break;
1121 case MODE_AREG_INDEX_DISP8:
1122 early_cycles += 6;
1123 if (opts->aregs[inst->src.params.regs.pri] >= 0) {
1124 mov_rr(code, opts->aregs[inst->src.params.regs.pri], opts->gen.scratch1, SZ_D);
1125 } else {
1126 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src)), opts->gen.scratch1, SZ_D);
1127 }
1128 sec_reg = (inst->src.params.regs.sec >> 1) & 0x7;
1129 if (inst->src.params.regs.sec & 1) {
1130 if (inst->src.params.regs.sec & 0x10) {
1131 if (opts->aregs[sec_reg] >= 0) {
1132 add_rr(code, opts->aregs[sec_reg], opts->gen.scratch1, SZ_D);
1133 } else {
1134 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
1135 }
1136 } else {
1137 if (opts->dregs[sec_reg] >= 0) {
1138 add_rr(code, opts->dregs[sec_reg], opts->gen.scratch1, SZ_D);
1139 } else {
1140 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
1141 }
1142 }
1143 } else {
1144 if (inst->src.params.regs.sec & 0x10) {
1145 if (opts->aregs[sec_reg] >= 0) {
1146 movsx_rr(code, opts->aregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
1147 } else {
1148 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
1149 }
1150 } else {
1151 if (opts->dregs[sec_reg] >= 0) {
1152 movsx_rr(code, opts->dregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
1153 } else {
1154 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
1155 }
1156 }
1157 add_rr(code, opts->gen.scratch2, opts->gen.scratch1, SZ_D);
1158 }
1159 if (inst->src.params.regs.displacement) {
1160 add_ir(code, inst->src.params.regs.displacement, opts->gen.scratch1, SZ_D);
1161 }
1162 break;
1163 case MODE_PC_DISPLACE:
1164 early_cycles += BUS;
1165 mov_ir(code, inst->src.params.regs.displacement + inst->address+2, opts->gen.scratch1, SZ_D);
1166 break;
1167 case MODE_PC_INDEX_DISP8:
1168 early_cycles += 6;
1169 mov_ir(code, inst->address+2, opts->gen.scratch1, SZ_D);
1170 sec_reg = (inst->src.params.regs.sec >> 1) & 0x7;
1171 if (inst->src.params.regs.sec & 1) {
1172 if (inst->src.params.regs.sec & 0x10) {
1173 if (opts->aregs[sec_reg] >= 0) {
1174 add_rr(code, opts->aregs[sec_reg], opts->gen.scratch1, SZ_D);
1175 } else {
1176 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
1177 }
1178 } else {
1179 if (opts->dregs[sec_reg] >= 0) {
1180 add_rr(code, opts->dregs[sec_reg], opts->gen.scratch1, SZ_D);
1181 } else {
1182 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
1183 }
1184 }
1185 } else {
1186 if (inst->src.params.regs.sec & 0x10) {
1187 if (opts->aregs[sec_reg] >= 0) {
1188 movsx_rr(code, opts->aregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
1189 } else {
1190 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
1191 }
1192 } else {
1193 if (opts->dregs[sec_reg] >= 0) {
1194 movsx_rr(code, opts->dregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
1195 } else {
1196 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
1197 }
1198 }
1199 add_rr(code, opts->gen.scratch2, opts->gen.scratch1, SZ_D);
1200 }
1201 if (inst->src.params.regs.displacement) {
1202 add_ir(code, inst->src.params.regs.displacement, opts->gen.scratch1, SZ_D);
1203 }
1204 break;
1205 case MODE_ABSOLUTE:
1206 early_cycles += 4;
1207 case MODE_ABSOLUTE_SHORT:
1208 early_cycles += 4;
1209 mov_ir(code, inst->src.params.immed, opts->gen.scratch1, SZ_D);
1210 break;
1211 default:
1212 m68k_disasm(inst, disasm_buf);
1213 printf("%X: %s\naddress mode %d not implemented (movem src)\n", inst->address, disasm_buf, inst->src.addr_mode);
1214 exit(1);
1215 }
1216 cycles(&opts->gen, early_cycles);
1217 for(reg = 0; reg < 16; reg ++) {
1218 if (inst->dst.params.immed & (1 << reg)) {
1219 push_r(code, opts->gen.scratch1);
1220 if (inst->extra.size == OPSIZE_LONG) {
1221 call(code, opts->read_32);
1222 } else {
1223 call(code, opts->read_16);
1224 }
1225 if (inst->extra.size == OPSIZE_WORD) {
1226 movsx_rr(code, opts->gen.scratch1, opts->gen.scratch1, SZ_W, SZ_D);
1227 }
1228 if (reg > 7) {
1229 if (opts->aregs[reg-8] >= 0) {
1230 mov_rr(code, opts->gen.scratch1, opts->aregs[reg-8], SZ_D);
1231 } else {
1232 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * (reg-8), SZ_D);
1233 }
1234 } else {
1235 if (opts->dregs[reg] >= 0) {
1236 mov_rr(code, opts->gen.scratch1, opts->dregs[reg], SZ_D);
1237 } else {
1238 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t) * (reg), SZ_D);
1239 }
1240 }
1241 pop_r(code, opts->gen.scratch1);
1242 add_ir(code, (inst->extra.size == OPSIZE_LONG) ? 4 : 2, opts->gen.scratch1, SZ_D);
1243 }
1244 }
1245 if (inst->src.addr_mode == MODE_AREG_POSTINC) {
1246 if (opts->aregs[inst->src.params.regs.pri] >= 0) {
1247 mov_rr(code, opts->gen.scratch1, opts->aregs[inst->src.params.regs.pri], SZ_D);
1248 } else {
1249 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, reg_offset(&(inst->src)), SZ_D);
1250 }
1251 }
1252 }
1253 //prefetch
1254 cycles(&opts->gen, 4);
1255 }
1256
1257 void translate_m68k_clr(m68k_options * opts, m68kinst * inst)
1258 {
1259 code_info *code = &opts->gen.code;
1260 set_flag(opts, 0, FLAG_N);
1261 set_flag(opts, 0, FLAG_V);
1262 set_flag(opts, 0, FLAG_C);
1263 set_flag(opts, 1, FLAG_Z);
1264 int8_t reg = native_reg(&(inst->dst), opts);
1265 if (reg >= 0) {
1266 cycles(&opts->gen, (inst->extra.size == OPSIZE_LONG ? 6 : 4));
1267 xor_rr(code, reg, reg, inst->extra.size);
1268 return;
1269 }
1270 x86_ea dst_op;
1271 translate_m68k_dst(inst, &dst_op, opts, 1);
1272 if (dst_op.mode == MODE_REG_DIRECT) {
1273 xor_rr(code, dst_op.base, dst_op.base, inst->extra.size);
1274 } else {
1275 mov_irdisp(code, 0, dst_op.base, dst_op.disp, inst->extra.size);
1276 }
1277 m68k_save_result(inst, opts);
1278 }
1279
1280 void translate_m68k_ext(m68k_options * opts, m68kinst * inst)
1281 {
1282 code_info *code = &opts->gen.code;
1283 x86_ea dst_op;
1284 uint8_t dst_size = inst->extra.size;
1285 inst->extra.size--;
1286 translate_m68k_dst(inst, &dst_op, opts, 0);
1287 if (dst_op.mode == MODE_REG_DIRECT) {
1288 movsx_rr(code, dst_op.base, dst_op.base, inst->extra.size, dst_size);
1289 cmp_ir(code, 0, dst_op.base, dst_size);
1290 } else {
1291 movsx_rdispr(code, dst_op.base, dst_op.disp, opts->gen.scratch1, inst->extra.size, dst_size);
1292 cmp_ir(code, 0, opts->gen.scratch1, dst_size);
1293 mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, dst_size);
1294 }
1295 inst->extra.size = dst_size;
1296 set_flag(opts, 0, FLAG_V);
1297 set_flag(opts, 0, FLAG_C);
1298 set_flag_cond(opts, CC_Z, FLAG_Z);
1299 set_flag_cond(opts, CC_S, FLAG_N);
1300 //M68K EXT only operates on registers so no need for a call to save result here
1301 }
1302
1303 void translate_m68k_lea(m68k_options * opts, m68kinst * inst)
1304 {
1305 code_info *code = &opts->gen.code;
1306 int8_t dst_reg = native_reg(&(inst->dst), opts), sec_reg;
1307 switch(inst->src.addr_mode)
1308 {
1309 case MODE_AREG_INDIRECT:
1310 cycles(&opts->gen, BUS);
1311 if (opts->aregs[inst->src.params.regs.pri] >= 0) {
1312 if (dst_reg >= 0) {
1313 mov_rr(code, opts->aregs[inst->src.params.regs.pri], dst_reg, SZ_D);
1314 } else {
1315 mov_rrdisp(code, opts->aregs[inst->src.params.regs.pri], opts->gen.context_reg, offsetof(m68k_context, aregs) + 4 * inst->dst.params.regs.pri, SZ_D);
1316 }
1317 } else {
1318 if (dst_reg >= 0) {
1319 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + 4 * inst->src.params.regs.pri, dst_reg, SZ_D);
1320 } else {
1321 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + 4 * inst->src.params.regs.pri, opts->gen.scratch1, SZ_D);
1322 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(m68k_context, aregs) + 4 * inst->dst.params.regs.pri, SZ_D);
1323 }
1324 }
1325 break;
1326 case MODE_AREG_DISPLACE:
1327 cycles(&opts->gen, 8);
1328 if (dst_reg >= 0) {
1329 if (inst->src.params.regs.pri != inst->dst.params.regs.pri) {
1330 if (opts->aregs[inst->src.params.regs.pri] >= 0) {
1331 mov_rr(code, opts->aregs[inst->src.params.regs.pri], dst_reg, SZ_D);
1332 } else {
1333 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src)), dst_reg, SZ_D);
1334 }
1335 }
1336 add_ir(code, inst->src.params.regs.displacement, dst_reg, SZ_D);
1337 } else {
1338 if (inst->src.params.regs.pri != inst->dst.params.regs.pri) {
1339 if (opts->aregs[inst->src.params.regs.pri] >= 0) {
1340 mov_rrdisp(code, opts->aregs[inst->src.params.regs.pri], opts->gen.context_reg, reg_offset(&(inst->dst)), SZ_D);
1341 } else {
1342 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src)), opts->gen.scratch1, SZ_D);
1343 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, reg_offset(&(inst->dst)), SZ_D);
1344 }
1345 }
1346 add_irdisp(code, inst->src.params.regs.displacement, opts->gen.context_reg, reg_offset(&(inst->dst)), SZ_D);
1347 }
1348 break;
1349 case MODE_AREG_INDEX_DISP8:
1350 cycles(&opts->gen, 12);
1351 if (opts->aregs[inst->src.params.regs.pri] >= 0) {
1352 mov_rr(code, opts->aregs[inst->src.params.regs.pri], opts->gen.scratch2, SZ_D);
1353 } else {
1354 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src)), opts->gen.scratch2, SZ_D);
1355 }
1356 sec_reg = (inst->src.params.regs.sec >> 1) & 0x7;
1357 if (inst->src.params.regs.sec & 1) {
1358 if (inst->src.params.regs.sec & 0x10) {
1359 if (opts->aregs[sec_reg] >= 0) {
1360 add_rr(code, opts->aregs[sec_reg], opts->gen.scratch2, SZ_D);
1361 } else {
1362 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_D);
1363 }
1364 } else {
1365 if (opts->dregs[sec_reg] >= 0) {
1366 add_rr(code, opts->dregs[sec_reg], opts->gen.scratch2, SZ_D);
1367 } else {
1368 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_D);
1369 }
1370 }
1371 } else {
1372 if (inst->src.params.regs.sec & 0x10) {
1373 if (opts->aregs[sec_reg] >= 0) {
1374 movsx_rr(code, opts->aregs[sec_reg], opts->gen.scratch1, SZ_W, SZ_D);
1375 } else {
1376 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_W, SZ_D);
1377 }
1378 } else {
1379 if (opts->dregs[sec_reg] >= 0) {
1380 movsx_rr(code, opts->dregs[sec_reg], opts->gen.scratch1, SZ_W, SZ_D);
1381 } else {
1382 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_W, SZ_D);
1383 }
1384 }
1385 add_rr(code, opts->gen.scratch1, opts->gen.scratch2, SZ_D);
1386 }
1387 if (inst->src.params.regs.displacement) {
1388 add_ir(code, inst->src.params.regs.displacement, opts->gen.scratch2, SZ_D);
1389 }
1390 if (dst_reg >= 0) {
1391 mov_rr(code, opts->gen.scratch2, dst_reg, SZ_D);
1392 } else {
1393 mov_rrdisp(code, opts->gen.scratch2, opts->gen.context_reg, reg_offset(&(inst->dst)), SZ_D);
1394 }
1395 break;
1396 case MODE_PC_DISPLACE:
1397 cycles(&opts->gen, 8);
1398 if (dst_reg >= 0) {
1399 mov_ir(code, inst->src.params.regs.displacement + inst->address+2, dst_reg, SZ_D);
1400 } else {
1401 mov_irdisp(code, inst->src.params.regs.displacement + inst->address+2, opts->gen.context_reg, offsetof(m68k_context, aregs) + 4 * inst->dst.params.regs.pri, SZ_D);
1402 }
1403 break;
1404 case MODE_PC_INDEX_DISP8:
1405 cycles(&opts->gen, BUS*3);
1406 mov_ir(code, inst->address+2, opts->gen.scratch1, SZ_D);
1407 sec_reg = (inst->src.params.regs.sec >> 1) & 0x7;
1408 if (inst->src.params.regs.sec & 1) {
1409 if (inst->src.params.regs.sec & 0x10) {
1410 if (opts->aregs[sec_reg] >= 0) {
1411 add_rr(code, opts->aregs[sec_reg], opts->gen.scratch1, SZ_D);
1412 } else {
1413 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
1414 }
1415 } else {
1416 if (opts->dregs[sec_reg] >= 0) {
1417 add_rr(code, opts->dregs[sec_reg], opts->gen.scratch1, SZ_D);
1418 } else {
1419 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
1420 }
1421 }
1422 } else {
1423 if (inst->src.params.regs.sec & 0x10) {
1424 if (opts->aregs[sec_reg] >= 0) {
1425 movsx_rr(code, opts->aregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
1426 } else {
1427 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
1428 }
1429 } else {
1430 if (opts->dregs[sec_reg] >= 0) {
1431 movsx_rr(code, opts->dregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
1432 } else {
1433 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
1434 }
1435 }
1436 add_rr(code, opts->gen.scratch2, opts->gen.scratch1, SZ_D);
1437 }
1438 if (inst->src.params.regs.displacement) {
1439 add_ir(code, inst->src.params.regs.displacement, opts->gen.scratch1, SZ_D);
1440 }
1441 if (dst_reg >= 0) {
1442 mov_rr(code, opts->gen.scratch1, dst_reg, SZ_D);
1443 } else {
1444 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, reg_offset(&(inst->dst)), SZ_D);
1445 }
1446 break;
1447 case MODE_ABSOLUTE:
1448 case MODE_ABSOLUTE_SHORT:
1449 cycles(&opts->gen, (inst->src.addr_mode == MODE_ABSOLUTE) ? BUS * 3 : BUS * 2);
1450 if (dst_reg >= 0) {
1451 mov_ir(code, inst->src.params.immed, dst_reg, SZ_D);
1452 } else {
1453 mov_irdisp(code, inst->src.params.immed, opts->gen.context_reg, reg_offset(&(inst->dst)), SZ_D);
1454 }
1455 break;
1456 default:
1457 m68k_disasm(inst, disasm_buf);
1458 printf("%X: %s\naddress mode %d not implemented (lea src)\n", inst->address, disasm_buf, inst->src.addr_mode);
1459 exit(1);
1460 }
1461 }
1462
1463 void translate_m68k_pea(m68k_options * opts, m68kinst * inst)
1464 {
1465 code_info *code = &opts->gen.code;
1466 uint8_t sec_reg;
1467 switch(inst->src.addr_mode)
1468 {
1469 case MODE_AREG_INDIRECT:
1470 cycles(&opts->gen, BUS);
1471 if (opts->aregs[inst->src.params.regs.pri] >= 0) {
1472 mov_rr(code, opts->aregs[inst->src.params.regs.pri], opts->gen.scratch1, SZ_D);
1473 } else {
1474 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + 4 * inst->src.params.regs.pri, opts->gen.scratch1, SZ_D);
1475 }
1476 break;
1477 case MODE_AREG_DISPLACE:
1478 cycles(&opts->gen, 8);
1479 if (opts->aregs[inst->src.params.regs.pri] >= 0) {
1480 mov_rr(code, opts->aregs[inst->src.params.regs.pri], opts->gen.scratch1, SZ_D);
1481 } else {
1482 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src)), opts->gen.scratch1, SZ_D);
1483 }
1484 add_ir(code, inst->src.params.regs.displacement, opts->gen.scratch1, SZ_D);
1485 break;
1486 case MODE_AREG_INDEX_DISP8:
1487 cycles(&opts->gen, 6);//TODO: Check to make sure this is correct
1488 if (opts->aregs[inst->src.params.regs.pri] >= 0) {
1489 mov_rr(code, opts->aregs[inst->src.params.regs.pri], opts->gen.scratch1, SZ_D);
1490 } else {
1491 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src)), opts->gen.scratch1, SZ_D);
1492 }
1493 sec_reg = (inst->src.params.regs.sec >> 1) & 0x7;
1494 if (inst->src.params.regs.sec & 1) {
1495 if (inst->src.params.regs.sec & 0x10) {
1496 if (opts->aregs[sec_reg] >= 0) {
1497 add_rr(code, opts->aregs[sec_reg], opts->gen.scratch1, SZ_D);
1498 } else {
1499 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
1500 }
1501 } else {
1502 if (opts->dregs[sec_reg] >= 0) {
1503 add_rr(code, opts->dregs[sec_reg], opts->gen.scratch1, SZ_D);
1504 } else {
1505 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
1506 }
1507 }
1508 } else {
1509 if (inst->src.params.regs.sec & 0x10) {
1510 if (opts->aregs[sec_reg] >= 0) {
1511 movsx_rr(code, opts->aregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
1512 } else {
1513 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
1514 }
1515 } else {
1516 if (opts->dregs[sec_reg] >= 0) {
1517 movsx_rr(code, opts->dregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
1518 } else {
1519 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
1520 }
1521 }
1522 add_rr(code, opts->gen.scratch2, opts->gen.scratch1, SZ_D);
1523 }
1524 if (inst->src.params.regs.displacement) {
1525 add_ir(code, inst->src.params.regs.displacement, opts->gen.scratch1, SZ_D);
1526 }
1527 break;
1528 case MODE_PC_DISPLACE:
1529 cycles(&opts->gen, 8);
1530 mov_ir(code, inst->src.params.regs.displacement + inst->address+2, opts->gen.scratch1, SZ_D);
1531 break;
1532 case MODE_ABSOLUTE:
1533 case MODE_ABSOLUTE_SHORT:
1534 cycles(&opts->gen, (inst->src.addr_mode == MODE_ABSOLUTE) ? BUS * 3 : BUS * 2);
1535 mov_ir(code, inst->src.params.immed, opts->gen.scratch1, SZ_D);
1536 break;
1537 default:
1538 m68k_disasm(inst, disasm_buf);
1539 printf("%X: %s\naddress mode %d not implemented (lea src)\n", inst->address, disasm_buf, inst->src.addr_mode);
1540 exit(1);
1541 }
1542 sub_ir(code, 4, opts->aregs[7], SZ_D);
1543 mov_rr(code, opts->aregs[7], opts->gen.scratch2, SZ_D);
1544 call(code, opts->write_32_lowfirst);
1545 }
1546
1547 void translate_m68k_bsr(m68k_options * opts, m68kinst * inst)
1548 {
1549 code_info *code = &opts->gen.code;
1550 int32_t disp = inst->src.params.immed;
1551 uint32_t after = inst->address + (inst->variant == VAR_BYTE ? 2 : 4);
1552 //TODO: Add cycles in the right place relative to pushing the return address on the stack
1553 cycles(&opts->gen, 10);
1554 mov_ir(code, after, opts->gen.scratch1, SZ_D);
1555 sub_ir(code, 4, opts->aregs[7], SZ_D);
1556 mov_rr(code, opts->aregs[7], opts->gen.scratch2, SZ_D);
1557 call(code, opts->write_32_highfirst);
1558 code_ptr dest_addr = get_native_address(opts->gen.native_code_map, (inst->address+2) + disp);
1559 if (!dest_addr) {
1560 opts->gen.deferred = defer_address(opts->gen.deferred, (inst->address+2) + disp, code->cur + 1);
1561 //dummy address to be replaced later
1562 dest_addr = code->cur + 256;
1563 }
1564 jmp(code, dest_addr);
1565 }
1566
1567 uint8_t m68k_eval_cond(m68k_options * opts, uint8_t cc)
1568 {
1569 uint8_t cond = CC_NZ;
1570 switch (cc)
1571 {
1572 case COND_HIGH:
1573 cond = CC_Z;
1574 case COND_LOW_SAME:
1575 flag_to_reg(opts, FLAG_Z, opts->gen.scratch1);
1576 or_flag_to_reg(opts, FLAG_C, opts->gen.scratch1);
1577 break;
1578 case COND_CARRY_CLR:
1579 cond = CC_Z;
1580 case COND_CARRY_SET:
1581 check_flag(opts, FLAG_C);
1582 break;
1583 case COND_NOT_EQ:
1584 cond = CC_Z;
1585 case COND_EQ:
1586 check_flag(opts, FLAG_Z);
1587 break;
1588 case COND_OVERF_CLR:
1589 cond = CC_Z;
1590 case COND_OVERF_SET:
1591 check_flag(opts, FLAG_V);
1592 break;
1593 case COND_PLUS:
1594 cond = CC_Z;
1595 case COND_MINUS:
1596 check_flag(opts, FLAG_N);
1597 break;
1598 case COND_GREATER_EQ:
1599 cond = CC_Z;
1600 case COND_LESS:
1601 cmp_flags(opts, FLAG_N, FLAG_V);
1602 break;
1603 case COND_GREATER:
1604 cond = CC_Z;
1605 case COND_LESS_EQ:
1606 flag_to_reg(opts, FLAG_V, opts->gen.scratch1);
1607 xor_flag_to_reg(opts, FLAG_N, opts->gen.scratch1);
1608 or_flag_to_reg(opts, FLAG_Z, opts->gen.scratch1);
1609 break;
1610 }
1611 return cond;
1612 }
1613
1614 void translate_m68k_bcc(m68k_options * opts, m68kinst * inst)
1615 {
1616 code_info *code = &opts->gen.code;
1617 cycles(&opts->gen, 10);//TODO: Adjust this for branch not taken case
1618 int32_t disp = inst->src.params.immed;
1619 uint32_t after = inst->address + 2;
1620 code_ptr dest_addr = get_native_address(opts->gen.native_code_map, after + disp);
1621 if (inst->extra.cond == COND_TRUE) {
1622 if (!dest_addr) {
1623 opts->gen.deferred = defer_address(opts->gen.deferred, after + disp, code->cur + 1);
1624 //dummy address to be replaced later, make sure it generates a 4-byte displacement
1625 dest_addr = code->cur + 256;
1626 }
1627 jmp(code, dest_addr);
1628 } else {
1629 uint8_t cond = m68k_eval_cond(opts, inst->extra.cond);
1630 if (!dest_addr) {
1631 opts->gen.deferred = defer_address(opts->gen.deferred, after + disp, code->cur + 2);
1632 //dummy address to be replaced later, make sure it generates a 4-byte displacement
1633 dest_addr = code->cur + 256;
1634 }
1635 jcc(code, cond, dest_addr);
1636 }
1637 }
1638
1639 void translate_m68k_scc(m68k_options * opts, m68kinst * inst)
1640 {
1641 code_info *code = &opts->gen.code;
1642 uint8_t cond = inst->extra.cond;
1643 x86_ea dst_op;
1644 inst->extra.size = OPSIZE_BYTE;
1645 translate_m68k_dst(inst, &dst_op, opts, 1);
1646 if (cond == COND_TRUE || cond == COND_FALSE) {
1647 if ((inst->dst.addr_mode == MODE_REG || inst->dst.addr_mode == MODE_AREG) && inst->extra.cond == COND_TRUE) {
1648 cycles(&opts->gen, 6);
1649 } else {
1650 cycles(&opts->gen, BUS);
1651 }
1652 if (dst_op.mode == MODE_REG_DIRECT) {
1653 mov_ir(code, cond == COND_TRUE ? 0xFF : 0, dst_op.base, SZ_B);
1654 } else {
1655 mov_irdisp(code, cond == COND_TRUE ? 0xFF : 0, dst_op.base, dst_op.disp, SZ_B);
1656 }
1657 } else {
1658 uint8_t cc = m68k_eval_cond(opts, cond);
1659 check_alloc_code(code, 6*MAX_INST_LEN);
1660 code_ptr true_off = code->cur + 1;
1661 jcc(code, cc, code->cur+2);
1662 cycles(&opts->gen, BUS);
1663 if (dst_op.mode == MODE_REG_DIRECT) {
1664 mov_ir(code, 0, dst_op.base, SZ_B);
1665 } else {
1666 mov_irdisp(code, 0, dst_op.base, dst_op.disp, SZ_B);
1667 }
1668 code_ptr end_off = code->cur+1;
1669 jmp(code, code->cur+2);
1670 *true_off = code->cur - (true_off+1);
1671 cycles(&opts->gen, 6);
1672 if (dst_op.mode == MODE_REG_DIRECT) {
1673 mov_ir(code, 0xFF, dst_op.base, SZ_B);
1674 } else {
1675 mov_irdisp(code, 0xFF, dst_op.base, dst_op.disp, SZ_B);
1676 }
1677 *end_off = code->cur - (end_off+1);
1678 }
1679 m68k_save_result(inst, opts);
1680 }
1681
1682 void translate_m68k_jmp_jsr(m68k_options * opts, m68kinst * inst)
1683 {
1684 uint8_t is_jsr = inst->op == M68K_JSR;
1685 code_info *code = &opts->gen.code;
1686 code_ptr dest_addr;
1687 uint8_t sec_reg;
1688 uint32_t after;
1689 uint32_t m68k_addr;
1690 switch(inst->src.addr_mode)
1691 {
1692 case MODE_AREG_INDIRECT:
1693 cycles(&opts->gen, BUS*2);
1694 if (is_jsr) {
1695 mov_ir(code, inst->address + 2, opts->gen.scratch1, SZ_D);
1696 sub_ir(code, 4, opts->aregs[7], SZ_D);
1697 mov_rr(code, opts->aregs[7], opts->gen.scratch2, SZ_D);
1698 call(code, opts->write_32_highfirst);
1699 }
1700 if (opts->aregs[inst->src.params.regs.pri] >= 0) {
1701 mov_rr(code, opts->aregs[inst->src.params.regs.pri], opts->gen.scratch1, SZ_D);
1702 } else {
1703 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + 4 * inst->src.params.regs.pri, opts->gen.scratch1, SZ_D);
1704 }
1705 call(code, opts->native_addr);
1706 jmp_r(code, opts->gen.scratch1);
1707 break;
1708 case MODE_AREG_DISPLACE:
1709 cycles(&opts->gen, BUS*2);
1710 if (is_jsr) {
1711 mov_ir(code, inst->address + 4, opts->gen.scratch1, SZ_D);
1712 sub_ir(code, 4, opts->aregs[7], SZ_D);
1713 mov_rr(code, opts->aregs[7], opts->gen.scratch2, SZ_D);
1714 call(code, opts->write_32_highfirst);
1715 }
1716 if (opts->aregs[inst->src.params.regs.pri] >= 0) {
1717 mov_rr(code, opts->aregs[inst->src.params.regs.pri], opts->gen.scratch1, SZ_D);
1718 } else {
1719 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + 4 * inst->src.params.regs.pri, opts->gen.scratch1, SZ_D);
1720 }
1721 add_ir(code, inst->src.params.regs.displacement, opts->gen.scratch1, SZ_D);
1722 call(code, opts->native_addr);
1723 jmp_r(code, opts->gen.scratch1);
1724 break;
1725 case MODE_AREG_INDEX_DISP8:
1726 cycles(&opts->gen, BUS*3);//TODO: CHeck that this is correct
1727 if (is_jsr) {
1728 mov_ir(code, inst->address + 4, opts->gen.scratch1, SZ_D);
1729 sub_ir(code, 4, opts->aregs[7], SZ_D);
1730 mov_rr(code, opts->aregs[7], opts->gen.scratch2, SZ_D);
1731 call(code, opts->write_32_highfirst);
1732 }
1733 if (opts->aregs[inst->src.params.regs.pri] >= 0) {
1734 mov_rr(code, opts->aregs[inst->src.params.regs.pri], opts->gen.scratch1, SZ_D);
1735 } else {
1736 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src)), opts->gen.scratch1, SZ_D);
1737 }
1738 sec_reg = (inst->src.params.regs.sec >> 1) & 0x7;
1739 if (inst->src.params.regs.sec & 1) {
1740 //32-bit index register
1741 if (inst->src.params.regs.sec & 0x10) {
1742 if (opts->aregs[sec_reg] >= 0) {
1743 add_rr(code, opts->aregs[sec_reg], opts->gen.scratch1, SZ_D);
1744 } else {
1745 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
1746 }
1747 } else {
1748 if (opts->dregs[sec_reg] >= 0) {
1749 add_rr(code, opts->dregs[sec_reg], opts->gen.scratch1, SZ_D);
1750 } else {
1751 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
1752 }
1753 }
1754 } else {
1755 //16-bit index register
1756 if (inst->src.params.regs.sec & 0x10) {
1757 if (opts->aregs[sec_reg] >= 0) {
1758 movsx_rr(code, opts->aregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
1759 } else {
1760 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
1761 }
1762 } else {
1763 if (opts->dregs[sec_reg] >= 0) {
1764 movsx_rr(code, opts->dregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
1765 } else {
1766 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
1767 }
1768 }
1769 add_rr(code, opts->gen.scratch2, opts->gen.scratch1, SZ_D);
1770 }
1771 if (inst->src.params.regs.displacement) {
1772 add_ir(code, inst->src.params.regs.displacement, opts->gen.scratch1, SZ_D);
1773 }
1774 call(code, opts->native_addr);
1775 jmp_r(code, opts->gen.scratch1);
1776 break;
1777 case MODE_PC_DISPLACE:
1778 //TODO: Add cycles in the right place relative to pushing the return address on the stack
1779 cycles(&opts->gen, 10);
1780 if (is_jsr) {
1781 mov_ir(code, inst->address + 4, opts->gen.scratch1, SZ_D);
1782 sub_ir(code, 4, opts->aregs[7], SZ_D);
1783 mov_rr(code, opts->aregs[7], opts->gen.scratch2, SZ_D);
1784 call(code, opts->write_32_highfirst);
1785 }
1786 m68k_addr = inst->src.params.regs.displacement + inst->address + 2;
1787 if ((m68k_addr & 0xFFFFFF) < 0x400000) {
1788 dest_addr = get_native_address(opts->gen.native_code_map, m68k_addr);
1789 if (!dest_addr) {
1790 opts->gen.deferred = defer_address(opts->gen.deferred, m68k_addr, code->cur + 1);
1791 //dummy address to be replaced later, make sure it generates a 4-byte displacement
1792 dest_addr = code->cur + 256;
1793 }
1794 jmp(code, dest_addr);
1795 } else {
1796 mov_ir(code, m68k_addr, opts->gen.scratch1, SZ_D);
1797 call(code, opts->native_addr);
1798 jmp_r(code, opts->gen.scratch1);
1799 }
1800 break;
1801 case MODE_PC_INDEX_DISP8:
1802 cycles(&opts->gen, BUS*3);//TODO: CHeck that this is correct
1803 if (is_jsr) {
1804 mov_ir(code, inst->address + 4, opts->gen.scratch1, SZ_D);
1805 sub_ir(code, 4, opts->aregs[7], SZ_D);
1806 mov_rr(code, opts->aregs[7], opts->gen.scratch2, SZ_D);
1807 call(code, opts->write_32_highfirst);
1808 }
1809 mov_ir(code, inst->address+2, opts->gen.scratch1, SZ_D);
1810 sec_reg = (inst->src.params.regs.sec >> 1) & 0x7;
1811 if (inst->src.params.regs.sec & 1) {
1812 if (inst->src.params.regs.sec & 0x10) {
1813 if (opts->aregs[sec_reg] >= 0) {
1814 add_rr(code, opts->aregs[sec_reg], opts->gen.scratch1, SZ_D);
1815 } else {
1816 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
1817 }
1818 } else {
1819 if (opts->dregs[sec_reg] >= 0) {
1820 add_rr(code, opts->dregs[sec_reg], opts->gen.scratch1, SZ_D);
1821 } else {
1822 add_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch1, SZ_D);
1823 }
1824 }
1825 } else {
1826 if (inst->src.params.regs.sec & 0x10) {
1827 if (opts->aregs[sec_reg] >= 0) {
1828 movsx_rr(code, opts->aregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
1829 } else {
1830 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
1831 }
1832 } else {
1833 if (opts->dregs[sec_reg] >= 0) {
1834 movsx_rr(code, opts->dregs[sec_reg], opts->gen.scratch2, SZ_W, SZ_D);
1835 } else {
1836 movsx_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, opts->gen.scratch2, SZ_W, SZ_D);
1837 }
1838 }
1839 add_rr(code, opts->gen.scratch2, opts->gen.scratch1, SZ_D);
1840 }
1841 if (inst->src.params.regs.displacement) {
1842 add_ir(code, inst->src.params.regs.displacement, opts->gen.scratch1, SZ_D);
1843 }
1844 call(code, opts->native_addr);
1845 jmp_r(code, opts->gen.scratch1);
1846 break;
1847 case MODE_ABSOLUTE:
1848 case MODE_ABSOLUTE_SHORT:
1849 //TODO: Add cycles in the right place relative to pushing the return address on the stack
1850 cycles(&opts->gen, inst->src.addr_mode == MODE_ABSOLUTE ? 12 : 10);
1851 if (is_jsr) {
1852 mov_ir(code, inst->address + (inst->src.addr_mode == MODE_ABSOLUTE ? 6 : 4), opts->gen.scratch1, SZ_D);
1853 sub_ir(code, 4, opts->aregs[7], SZ_D);
1854 mov_rr(code, opts->aregs[7], opts->gen.scratch2, SZ_D);
1855 call(code, opts->write_32_highfirst);
1856 }
1857 m68k_addr = inst->src.params.immed;
1858 if ((m68k_addr & 0xFFFFFF) < 0x400000) {
1859 dest_addr = get_native_address(opts->gen.native_code_map, m68k_addr);
1860 if (!dest_addr) {
1861 opts->gen.deferred = defer_address(opts->gen.deferred, m68k_addr, code->cur + 1);
1862 //dummy address to be replaced later, make sure it generates a 4-byte displacement
1863 dest_addr = code->cur + 256;
1864 }
1865 jmp(code, dest_addr);
1866 } else {
1867 mov_ir(code, m68k_addr, opts->gen.scratch1, SZ_D);
1868 call(code, opts->native_addr);
1869 jmp_r(code, opts->gen.scratch1);
1870 }
1871 break;
1872 default:
1873 m68k_disasm(inst, disasm_buf);
1874 printf("%s\naddress mode %d not yet supported (%s)\n", disasm_buf, inst->src.addr_mode, is_jsr ? "jsr" : "jmp");
1875 exit(1);
1876 }
1877 }
1878
1879 void translate_m68k_rts(m68k_options * opts, m68kinst * inst)
1880 {
1881 code_info *code = &opts->gen.code;
1882 //TODO: Add cycles
1883 mov_rr(code, opts->aregs[7], opts->gen.scratch1, SZ_D);
1884 add_ir(code, 4, opts->aregs[7], SZ_D);
1885 call(code, opts->read_32);
1886 call(code, opts->native_addr);
1887 jmp_r(code, opts->gen.scratch1);
1888 }
1889
1890 void translate_m68k_dbcc(m68k_options * opts, m68kinst * inst)
1891 {
1892 code_info *code = &opts->gen.code;
1893 //best case duration
1894 cycles(&opts->gen, 10);
1895 code_ptr skip_loc = NULL;
1896 //TODO: Check if COND_TRUE technically valid here even though
1897 //it's basically a slow NOP
1898 if (inst->extra.cond != COND_FALSE) {
1899 uint8_t cond = m68k_eval_cond(opts, inst->extra.cond);
1900 check_alloc_code(code, 6*MAX_INST_LEN);
1901 skip_loc = code->cur + 1;
1902 jcc(code, cond, code->cur + 2);
1903 }
1904 if (opts->dregs[inst->dst.params.regs.pri] >= 0) {
1905 sub_ir(code, 1, opts->dregs[inst->dst.params.regs.pri], SZ_W);
1906 cmp_ir(code, -1, opts->dregs[inst->dst.params.regs.pri], SZ_W);
1907 } else {
1908 sub_irdisp(code, 1, opts->gen.context_reg, offsetof(m68k_context, dregs) + 4 * inst->dst.params.regs.pri, SZ_W);
1909 cmp_irdisp(code, -1, opts->gen.context_reg, offsetof(m68k_context, dregs) + 4 * inst->dst.params.regs.pri, SZ_W);
1910 }
1911 code_ptr loop_end_loc = code->cur + 1;
1912 jcc(code, CC_Z, code->cur + 2);
1913 uint32_t after = inst->address + 2;
1914 code_ptr dest_addr = get_native_address(opts->gen.native_code_map, after + inst->src.params.immed);
1915 if (!dest_addr) {
1916 opts->gen.deferred = defer_address(opts->gen.deferred, after + inst->src.params.immed, code->cur + 1);
1917 //dummy address to be replaced later, make sure it generates a 4-byte displacement
1918 dest_addr = code->cur + 256;
1919 }
1920 jmp(code, dest_addr);
1921 *loop_end_loc = code->cur - (loop_end_loc+1);
1922 if (skip_loc) {
1923 cycles(&opts->gen, 2);
1924 *skip_loc = code->cur - (skip_loc+1);
1925 cycles(&opts->gen, 2);
1926 } else {
1927 cycles(&opts->gen, 4);
1928 }
1929 }
1930
1931 void translate_m68k_link(m68k_options * opts, m68kinst * inst)
1932 {
1933 code_info *code = &opts->gen.code;
1934 int8_t reg = native_reg(&(inst->src), opts);
1935 //compensate for displacement word
1936 cycles(&opts->gen, BUS);
1937 sub_ir(code, 4, opts->aregs[7], SZ_D);
1938 mov_rr(code, opts->aregs[7], opts->gen.scratch2, SZ_D);
1939 if (reg >= 0) {
1940 mov_rr(code, reg, opts->gen.scratch1, SZ_D);
1941 } else {
1942 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src)), opts->gen.scratch1, SZ_D);
1943 }
1944 call(code, opts->write_32_highfirst);
1945 if (reg >= 0) {
1946 mov_rr(code, opts->aregs[7], reg, SZ_D);
1947 } else {
1948 mov_rrdisp(code, opts->aregs[7], opts->gen.context_reg, reg_offset(&(inst->src)), SZ_D);
1949 }
1950 add_ir(code, inst->dst.params.immed, opts->aregs[7], SZ_D);
1951 //prefetch
1952 cycles(&opts->gen, BUS);
1953 }
1954
1955 void translate_m68k_movep(m68k_options * opts, m68kinst * inst)
1956 {
1957 code_info *code = &opts->gen.code;
1958 int8_t reg;
1959 cycles(&opts->gen, BUS*2);
1960 if (inst->src.addr_mode == MODE_REG) {
1961 if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
1962 mov_rr(code, opts->aregs[inst->dst.params.regs.pri], opts->gen.scratch2, SZ_D);
1963 } else {
1964 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->dst)), opts->gen.scratch2, SZ_D);
1965 }
1966 if (inst->dst.params.regs.displacement) {
1967 add_ir(code, inst->dst.params.regs.displacement, opts->gen.scratch2, SZ_D);
1968 }
1969 reg = native_reg(&(inst->src), opts);
1970 if (inst->extra.size == OPSIZE_LONG) {
1971 if (reg >= 0) {
1972 mov_rr(code, reg, opts->gen.scratch1, SZ_D);
1973 shr_ir(code, 24, opts->gen.scratch1, SZ_D);
1974 push_r(code, opts->gen.scratch2);
1975 call(code, opts->write_8);
1976 pop_r(code, opts->gen.scratch2);
1977 mov_rr(code, reg, opts->gen.scratch1, SZ_D);
1978 shr_ir(code, 16, opts->gen.scratch1, SZ_D);
1979
1980 } else {
1981 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src))+3, opts->gen.scratch1, SZ_B);
1982 push_r(code, opts->gen.scratch2);
1983 call(code, opts->write_8);
1984 pop_r(code, opts->gen.scratch2);
1985 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src))+2, opts->gen.scratch1, SZ_B);
1986 }
1987 add_ir(code, 2, opts->gen.scratch2, SZ_D);
1988 push_r(code, opts->gen.scratch2);
1989 call(code, opts->write_8);
1990 pop_r(code, opts->gen.scratch2);
1991 add_ir(code, 2, opts->gen.scratch2, SZ_D);
1992 }
1993 if (reg >= 0) {
1994 mov_rr(code, reg, opts->gen.scratch1, SZ_W);
1995 shr_ir(code, 8, opts->gen.scratch1, SZ_W);
1996 push_r(code, opts->gen.scratch2);
1997 call(code, opts->write_8);
1998 pop_r(code, opts->gen.scratch2);
1999 mov_rr(code, reg, opts->gen.scratch1, SZ_W);
2000 } else {
2001 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src))+1, opts->gen.scratch1, SZ_B);
2002 push_r(code, opts->gen.scratch2);
2003 call(code, opts->write_8);
2004 pop_r(code, opts->gen.scratch2);
2005 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src)), opts->gen.scratch1, SZ_B);
2006 }
2007 add_ir(code, 2, opts->gen.scratch2, SZ_D);
2008 call(code, opts->write_8);
2009 } else {
2010 if (opts->aregs[inst->src.params.regs.pri] >= 0) {
2011 mov_rr(code, opts->aregs[inst->src.params.regs.pri], opts->gen.scratch1, SZ_D);
2012 } else {
2013 mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src)), opts->gen.scratch1, SZ_D);
2014 }
2015 if (inst->src.params.regs.displacement) {
2016 add_ir(code, inst->src.params.regs.displacement, opts->gen.scratch1, SZ_D);
2017 }
2018 reg = native_reg(&(inst->dst), opts);
2019 if (inst->extra.size == OPSIZE_LONG) {
2020 if (reg >= 0) {
2021 push_r(code, opts->gen.scratch1);
2022 call(code, opts->read_8);
2023 shl_ir(code, 24, opts->gen.scratch1, SZ_D);
2024 mov_rr(code, opts->gen.scratch1, reg, SZ_D);
2025 pop_r(code, opts->gen.scratch1);
2026 add_ir(code, 2, opts->gen.scratch1, SZ_D);
2027 push_r(code, opts->gen.scratch1);
2028 call(code, opts->read_8);
2029 shl_ir(code, 16, opts->gen.scratch1, SZ_D);
2030 or_rr(code, opts->gen.scratch1, reg, SZ_D);
2031 } else {
2032 push_r(code, opts->gen.scratch1);
2033 call(code, opts->read_8);
2034 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, reg_offset(&(inst->dst))+3, SZ_B);
2035 pop_r(code, opts->gen.scratch1);
2036 add_ir(code, 2, opts->gen.scratch1, SZ_D);
2037 push_r(code, opts->gen.scratch1);
2038 call(code, opts->read_8);
2039 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, reg_offset(&(inst->dst))+2, SZ_B);
2040 }
2041 pop_r(code, opts->gen.scratch1);
2042 add_ir(code, 2, opts->gen.scratch1, SZ_D);
2043 }
2044 push_r(code, opts->gen.scratch1);
2045 call(code, opts->read_8);
2046 if (reg >= 0) {
2047
2048 shl_ir(code, 8, opts->gen.scratch1, SZ_W);
2049 mov_rr(code, opts->gen.scratch1, reg, SZ_W);
2050 pop_r(code, opts->gen.scratch1);
2051 add_ir(code, 2, opts->gen.scratch1, SZ_D);
2052 call(code, opts->read_8);
2053 mov_rr(code, opts->gen.scratch1, reg, SZ_B);
2054 } else {
2055 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, reg_offset(&(inst->dst))+1, SZ_B);
2056 pop_r(code, opts->gen.scratch1);
2057 add_ir(code, 2, opts->gen.scratch1, SZ_D);
2058 call(code, opts->read_8);
2059 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, reg_offset(&(inst->dst)), SZ_B);
2060 }
2061 }
2062 }
2063
2064 void translate_m68k_cmp(m68k_options * opts, m68kinst * inst)
2065 {
2066 code_info *code = &opts->gen.code;
2067 uint8_t size = inst->extra.size;
2068 x86_ea src_op, dst_op;
2069 translate_m68k_src(inst, &src_op, opts);
2070 if (inst->dst.addr_mode == MODE_AREG_POSTINC) {
2071 push_r(code, opts->gen.scratch1);
2072 translate_m68k_dst(inst, &dst_op, opts, 0);
2073 pop_r(code, opts->gen.scratch2);
2074 src_op.base = opts->gen.scratch2;
2075 } else {
2076 translate_m68k_dst(inst, &dst_op, opts, 0);
2077 if (inst->dst.addr_mode == MODE_AREG && size == OPSIZE_WORD) {
2078 size = OPSIZE_LONG;
2079 }
2080 }
2081 cycles(&opts->gen, BUS);
2082 if (src_op.mode == MODE_REG_DIRECT) {
2083 if (dst_op.mode == MODE_REG_DIRECT) {
2084 cmp_rr(code, src_op.base, dst_op.base, size);
2085 } else {
2086 cmp_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, size);
2087 }
2088 } else if (src_op.mode == MODE_REG_DISPLACE8) {
2089 cmp_rdispr(code, src_op.base, src_op.disp, dst_op.base, size);
2090 } else {
2091 if (dst_op.mode == MODE_REG_DIRECT) {
2092 cmp_ir(code, src_op.disp, dst_op.base, size);
2093 } else {
2094 cmp_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, size);
2095 }
2096 }
2097 set_flag_cond(opts, CC_C, FLAG_C);
2098 set_flag_cond(opts, CC_Z, FLAG_Z);
2099 set_flag_cond(opts, CC_S, FLAG_N);
2100 set_flag_cond(opts, CC_O, FLAG_V);
2101 }
2102
2103 typedef void (*shift_ir_t)(code_info *code, uint8_t val, uint8_t dst, uint8_t size);
2104 typedef void (*shift_irdisp_t)(code_info *code, uint8_t val, uint8_t dst_base, int32_t disp, uint8_t size);
2105 typedef void (*shift_clr_t)(code_info *code, uint8_t dst, uint8_t size);
2106 typedef void (*shift_clrdisp_t)(code_info *code, uint8_t dst_base, int32_t disp, uint8_t size);
2107
2108 void translate_shift(m68k_options * opts, m68kinst * inst, x86_ea *src_op, x86_ea * dst_op, shift_ir_t shift_ir, shift_irdisp_t shift_irdisp, shift_clr_t shift_clr, shift_clrdisp_t shift_clrdisp, shift_ir_t special, shift_irdisp_t special_disp)
2109 {
2110 code_info *code = &opts->gen.code;
2111 code_ptr end_off = NULL;
2112 code_ptr nz_off = NULL;
2113 code_ptr z_off = NULL;
2114 if (inst->src.addr_mode == MODE_UNUSED) {
2115 cycles(&opts->gen, BUS);
2116 //Memory shift
2117 shift_ir(code, 1, dst_op->base, SZ_W);
2118 } else {
2119 cycles(&opts->gen, inst->extra.size == OPSIZE_LONG ? 8 : 6);
2120 if (src_op->mode == MODE_IMMED) {
2121 if (src_op->disp != 1 && inst->op == M68K_ASL) {
2122 set_flag(opts, 0, FLAG_V);
2123 for (int i = 0; i < src_op->disp; i++) {
2124 if (dst_op->mode == MODE_REG_DIRECT) {
2125 shift_ir(code, 1, dst_op->base, inst->extra.size);
2126 } else {
2127 shift_irdisp(code, 1, dst_op->base, dst_op->disp, inst->extra.size);
2128 }
2129 check_alloc_code(code, 2*MAX_INST_LEN);
2130 code_ptr after_flag_set = code->cur + 1;
2131 jcc(code, CC_NO, code->cur + 2);
2132 set_flag(opts, 1, FLAG_V);
2133 *after_flag_set = code->cur - (after_flag_set+1);
2134 }
2135 } else {
2136 if (dst_op->mode == MODE_REG_DIRECT) {
2137 shift_ir(code, src_op->disp, dst_op->base, inst->extra.size);
2138 } else {
2139 shift_irdisp(code, src_op->disp, dst_op->base, dst_op->disp, inst->extra.size);
2140 }
2141 set_flag_cond(opts, CC_O, FLAG_V);
2142 }
2143 } else {
2144 if (src_op->base != RCX) {
2145 if (src_op->mode == MODE_REG_DIRECT) {
2146 mov_rr(code, src_op->base, RCX, SZ_B);
2147 } else {
2148 mov_rdispr(code, src_op->base, src_op->disp, RCX, SZ_B);
2149 }
2150
2151 }
2152 and_ir(code, 63, RCX, SZ_D);
2153 check_alloc_code(code, 7*MAX_INST_LEN);
2154 nz_off = code->cur + 1;
2155 jcc(code, CC_NZ, code->cur + 2);
2156 //Flag behavior for shift count of 0 is different for x86 than 68K
2157 if (dst_op->mode == MODE_REG_DIRECT) {
2158 cmp_ir(code, 0, dst_op->base, inst->extra.size);
2159 } else {
2160 cmp_irdisp(code, 0, dst_op->base, dst_op->disp, inst->extra.size);
2161 }
2162 set_flag_cond(opts, CC_Z, FLAG_Z);
2163 set_flag_cond(opts, CC_S, FLAG_N);
2164 set_flag(opts, 0, FLAG_C);
2165 //For other instructions, this flag will be set below
2166 if (inst->op == M68K_ASL) {
2167 set_flag(opts, 0, FLAG_V);
2168 }
2169 z_off = code->cur + 1;
2170 jmp(code, code->cur + 2);
2171 *nz_off = code->cur - (nz_off + 1);
2172 //add 2 cycles for every bit shifted
2173 add_rr(code, RCX, CYCLES, SZ_D);
2174 add_rr(code, RCX, CYCLES, SZ_D);
2175 if (inst->op == M68K_ASL) {
2176 //ASL has Overflow flag behavior that depends on all of the bits shifted through the MSB
2177 //Easiest way to deal with this is to shift one bit at a time
2178 set_flag(opts, 0, FLAG_V);
2179 check_alloc_code(code, 5*MAX_INST_LEN);
2180 code_ptr loop_start = code->cur;
2181 if (dst_op->mode == MODE_REG_DIRECT) {
2182 shift_ir(code, 1, dst_op->base, inst->extra.size);
2183 } else {
2184 shift_irdisp(code, 1, dst_op->base, dst_op->disp, inst->extra.size);
2185 }
2186 code_ptr after_flag_set = code->cur + 1;
2187 jcc(code, CC_NO, code->cur + 2);
2188 set_flag(opts, 1, FLAG_V);
2189 *after_flag_set = code->cur - (after_flag_set+1);
2190 loop(code, loop_start);
2191 } else {
2192 //x86 shifts modulo 32 for operand sizes less than 64-bits
2193 //but M68K shifts modulo 64, so we need to check for large shifts here
2194 cmp_ir(code, 32, RCX, SZ_B);
2195 check_alloc_code(code, 14*MAX_INST_LEN);
2196 code_ptr norm_shift_off = code->cur + 1;
2197 jcc(code, CC_L, code->cur + 2);
2198 if (special) {
2199 code_ptr after_flag_set = NULL;
2200 if (inst->extra.size == OPSIZE_LONG) {
2201 code_ptr neq_32_off = code->cur + 1;
2202 jcc(code, CC_NZ, code->cur + 2);
2203
2204 //set the carry bit to the lsb
2205 if (dst_op->mode == MODE_REG_DIRECT) {
2206 special(code, 1, dst_op->base, SZ_D);
2207 } else {
2208 special_disp(code, 1, dst_op->base, dst_op->disp, SZ_D);
2209 }
2210 set_flag_cond(opts, CC_C, FLAG_C);
2211 after_flag_set = code->cur + 1;
2212 jmp(code, code->cur + 2);
2213 *neq_32_off = code->cur - (neq_32_off+1);
2214 }
2215 set_flag(opts, 0, FLAG_C);
2216 if (after_flag_set) {
2217 *after_flag_set = code->cur - (after_flag_set+1);
2218 }
2219 set_flag(opts, 1, FLAG_Z);
2220 set_flag(opts, 0, FLAG_N);
2221 if (dst_op->mode == MODE_REG_DIRECT) {
2222 xor_rr(code, dst_op->base, dst_op->base, inst->extra.size);
2223 } else {
2224 mov_irdisp(code, 0, dst_op->base, dst_op->disp, inst->extra.size);
2225 }
2226 } else {
2227 if (dst_op->mode == MODE_REG_DIRECT) {
2228 shift_ir(code, 31, dst_op->base, inst->extra.size);
2229 shift_ir(code, 1, dst_op->base, inst->extra.size);
2230 } else {
2231 shift_irdisp(code, 31, dst_op->base, dst_op->disp, inst->extra.size);
2232 shift_irdisp(code, 1, dst_op->base, dst_op->disp, inst->extra.size);
2233 }
2234
2235 }
2236 end_off = code->cur + 1;
2237 jmp(code, code->cur + 2);
2238 *norm_shift_off = code->cur - (norm_shift_off+1);
2239 if (dst_op->mode == MODE_REG_DIRECT) {
2240 shift_clr(code, dst_op->base, inst->extra.size);
2241 } else {
2242 shift_clrdisp(code, dst_op->base, dst_op->disp, inst->extra.size);
2243 }
2244 }
2245 }
2246
2247 }
2248 if (!special && end_off) {
2249 *end_off = code->cur - (end_off + 1);
2250 }
2251 set_flag_cond(opts, CC_C, FLAG_C);
2252 set_flag_cond(opts, CC_Z, FLAG_Z);
2253 set_flag_cond(opts, CC_S, FLAG_N);
2254 if (special && end_off) {
2255 *end_off = code->cur - (end_off + 1);
2256 }
2257 //set X flag to same as C flag
2258 if (opts->flag_regs[FLAG_C] >= 0) {
2259 flag_to_flag(opts, FLAG_C, FLAG_X);
2260 } else {
2261 set_flag_cond(opts, CC_C, FLAG_X);
2262 }
2263 if (z_off) {
2264 *z_off = code->cur - (z_off + 1);
2265 }
2266 if (inst->op != M68K_ASL) {
2267 set_flag(opts, 0, FLAG_V);
2268 }
2269 if (inst->src.addr_mode == MODE_UNUSED) {
2270 m68k_save_result(inst, opts);
2271 }
2272 }
2273
2274 #define BIT_SUPERVISOR 5
2275
2276 void translate_m68k(m68k_options * opts, m68kinst * inst)
2277 {
2278 code_ptr end_off, zero_off, norm_off;
2279 uint8_t dst_reg;
2280 code_info *code = &opts->gen.code;
2281 check_cycles_int(&opts->gen, inst->address);
2282 if (inst->op == M68K_MOVE) {
2283 return translate_m68k_move(opts, inst);
2284 } else if(inst->op == M68K_LEA) {
2285 return translate_m68k_lea(opts, inst);
2286 } else if(inst->op == M68K_PEA) {
2287 return translate_m68k_pea(opts, inst);
2288 } else if(inst->op == M68K_BSR) {
2289 return translate_m68k_bsr(opts, inst);
2290 } else if(inst->op == M68K_BCC) {
2291 return translate_m68k_bcc(opts, inst);
2292 } else if(inst->op == M68K_JMP) {
2293 return translate_m68k_jmp_jsr(opts, inst);
2294 } else if(inst->op == M68K_JSR) {
2295 return translate_m68k_jmp_jsr(opts, inst);
2296 } else if(inst->op == M68K_RTS) {
2297 return translate_m68k_rts(opts, inst);
2298 } else if(inst->op == M68K_DBCC) {
2299 return translate_m68k_dbcc(opts, inst);
2300 } else if(inst->op == M68K_CLR) {
2301 return translate_m68k_clr(opts, inst);
2302 } else if(inst->op == M68K_MOVEM) {
2303 return translate_m68k_movem(opts, inst);
2304 } else if(inst->op == M68K_LINK) {
2305 return translate_m68k_link(opts, inst);
2306 } else if(inst->op == M68K_EXT) {
2307 return translate_m68k_ext(opts, inst);
2308 } else if(inst->op == M68K_SCC) {
2309 return translate_m68k_scc(opts, inst);
2310 } else if(inst->op == M68K_MOVEP) {
2311 return translate_m68k_movep(opts, inst);
2312 } else if(inst->op == M68K_INVALID) {
2313 if (inst->src.params.immed == 0x7100) {
2314 return retn(code);
2315 }
2316 mov_ir(code, inst->address, opts->gen.scratch1, SZ_D);
2317 return call(code, (code_ptr)m68k_invalid);
2318 } else if(inst->op == M68K_CMP) {
2319 return translate_m68k_cmp(opts, inst);
2320 }
2321 x86_ea src_op, dst_op;
2322 if (inst->src.addr_mode != MODE_UNUSED) {
2323 translate_m68k_src(inst, &src_op, opts);
2324 }
2325 if (inst->dst.addr_mode != MODE_UNUSED) {
2326 translate_m68k_dst(inst, &dst_op, opts, 0);
2327 }
2328 uint8_t size;
2329 switch(inst->op)
2330 {
2331 case M68K_ABCD:
2332 if (src_op.base != opts->gen.scratch2) {
2333 if (src_op.mode == MODE_REG_DIRECT) {
2334 mov_rr(code, src_op.base, opts->gen.scratch2, SZ_B);
2335 } else {
2336 mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch2, SZ_B);
2337 }
2338 }
2339 if (dst_op.base != opts->gen.scratch1) {
2340 if (dst_op.mode == MODE_REG_DIRECT) {
2341 mov_rr(code, dst_op.base, opts->gen.scratch1, SZ_B);
2342 } else {
2343 mov_rdispr(code, dst_op.base, dst_op.disp, opts->gen.scratch1, SZ_B);
2344 }
2345 }
2346 flag_to_carry(opts, FLAG_X);
2347 jcc(code, CC_NC, code->cur + 5);
2348 add_ir(code, 1, opts->gen.scratch1, SZ_B);
2349 call(code, (code_ptr)bcd_add);
2350 reg_to_flag(opts, CH, FLAG_C);
2351 reg_to_flag(opts, CH, FLAG_X);
2352 cmp_ir(code, 0, opts->gen.scratch1, SZ_B);
2353 jcc(code, CC_Z, code->cur + 4);
2354 set_flag(opts, 0, FLAG_Z);
2355 if (dst_op.base != opts->gen.scratch1) {
2356 if (dst_op.mode == MODE_REG_DIRECT) {
2357 mov_rr(code, opts->gen.scratch1, dst_op.base, SZ_B);
2358 } else {
2359 mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, SZ_B);
2360 }
2361 }
2362 m68k_save_result(inst, opts);
2363 break;
2364 case M68K_ADD:
2365 cycles(&opts->gen, BUS);
2366 size = inst->dst.addr_mode == MODE_AREG ? OPSIZE_LONG : inst->extra.size;
2367 if (src_op.mode == MODE_REG_DIRECT) {
2368 if (dst_op.mode == MODE_REG_DIRECT) {
2369 add_rr(code, src_op.base, dst_op.base, size);
2370 } else {
2371 add_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, size);
2372 }
2373 } else if (src_op.mode == MODE_REG_DISPLACE8) {
2374 add_rdispr(code, src_op.base, src_op.disp, dst_op.base, size);
2375 } else {
2376 if (dst_op.mode == MODE_REG_DIRECT) {
2377 add_ir(code, src_op.disp, dst_op.base, size);
2378 } else {
2379 add_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, size);
2380 }
2381 }
2382 if (inst->dst.addr_mode != MODE_AREG) {
2383 set_flag_cond(opts, CC_C, FLAG_C);
2384 set_flag_cond(opts, CC_Z, FLAG_Z);
2385 set_flag_cond(opts, CC_S, FLAG_N);
2386 set_flag_cond(opts, CC_O, FLAG_V);
2387 if (opts->flag_regs[FLAG_C] >= 0) {
2388 flag_to_flag(opts, FLAG_C, FLAG_X);
2389 } else {
2390 set_flag_cond(opts, CC_C, FLAG_X);
2391 }
2392 }
2393 m68k_save_result(inst, opts);
2394 break;
2395 case M68K_ADDX: {
2396 cycles(&opts->gen, BUS);
2397 flag_to_carry(opts, FLAG_X);
2398 if (src_op.mode == MODE_REG_DIRECT) {
2399 if (dst_op.mode == MODE_REG_DIRECT) {
2400 adc_rr(code, src_op.base, dst_op.base, inst->extra.size);
2401 } else {
2402 adc_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, inst->extra.size);
2403 }
2404 } else if (src_op.mode == MODE_REG_DISPLACE8) {
2405 adc_rdispr(code, src_op.base, src_op.disp, dst_op.base, inst->extra.size);
2406 } else {
2407 if (dst_op.mode == MODE_REG_DIRECT) {
2408 adc_ir(code, src_op.disp, dst_op.base, inst->extra.size);
2409 } else {
2410 adc_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size);
2411 }
2412 }
2413 set_flag_cond(opts, CC_C, FLAG_C);
2414
2415 check_alloc_code(code, 2*MAX_INST_LEN);
2416 code_ptr after_flag_set = code->cur + 1;
2417 jcc(code, CC_Z, code->cur + 2);
2418 set_flag(opts, 0, FLAG_Z);
2419 *after_flag_set = code->cur - (after_flag_set+1);
2420 set_flag_cond(opts, CC_S, FLAG_N);
2421 set_flag_cond(opts, CC_O, FLAG_V);
2422 if (opts->flag_regs[FLAG_C] >= 0) {
2423 flag_to_flag(opts, FLAG_C, FLAG_X);
2424 } else {
2425 set_flag_cond(opts, CC_C, FLAG_X);
2426 }
2427 m68k_save_result(inst, opts);
2428 break;
2429 }
2430 case M68K_AND:
2431 cycles(&opts->gen, BUS);
2432 if (src_op.mode == MODE_REG_DIRECT) {
2433 if (dst_op.mode == MODE_REG_DIRECT) {
2434 and_rr(code, src_op.base, dst_op.base, inst->extra.size);
2435 } else {
2436 and_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, inst->extra.size);
2437 }
2438 } else if (src_op.mode == MODE_REG_DISPLACE8) {
2439 and_rdispr(code, src_op.base, src_op.disp, dst_op.base, inst->extra.size);
2440 } else {
2441 if (dst_op.mode == MODE_REG_DIRECT) {
2442 and_ir(code, src_op.disp, dst_op.base, inst->extra.size);
2443 } else {
2444 and_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size);
2445 }
2446 }
2447 set_flag(opts, 0, FLAG_C);
2448 set_flag_cond(opts, CC_Z, FLAG_Z);
2449 set_flag_cond(opts, CC_S, FLAG_N);
2450 set_flag(opts, 0, FLAG_V);
2451 m68k_save_result(inst, opts);
2452 break;
2453 case M68K_ANDI_CCR:
2454 case M68K_ANDI_SR:
2455 cycles(&opts->gen, 20);
2456 //TODO: If ANDI to SR, trap if not in supervisor mode
2457 if (!(inst->src.params.immed & 0x1)) {
2458 set_flag(opts, 0, FLAG_C);
2459 }
2460 if (!(inst->src.params.immed & 0x2)) {
2461 set_flag(opts, 0, FLAG_V);
2462 }
2463 if (!(inst->src.params.immed & 0x4)) {
2464 set_flag(opts, 0, FLAG_Z);
2465 }
2466 if (!(inst->src.params.immed & 0x8)) {
2467 set_flag(opts, 0, FLAG_N);
2468 }
2469 if (!(inst->src.params.immed & 0x10)) {
2470 set_flag(opts, 0, FLAG_X);
2471 }
2472 if (inst->op == M68K_ANDI_SR) {
2473 and_irdisp(code, inst->src.params.immed >> 8, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
2474 if (!((inst->src.params.immed >> 8) & (1 << BIT_SUPERVISOR))) {
2475 //leave supervisor mode
2476 mov_rr(code, opts->aregs[7], opts->gen.scratch1, SZ_B);
2477 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, opts->aregs[7], SZ_B);
2478 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, SZ_B);
2479 }
2480 if (inst->src.params.immed & 0x700) {
2481 call(code, opts->do_sync);
2482 }
2483 }
2484 break;
2485 case M68K_ASL:
2486 case M68K_LSL:
2487 translate_shift(opts, inst, &src_op, &dst_op, shl_ir, shl_irdisp, shl_clr, shl_clrdisp, shr_ir, shr_irdisp);
2488 break;
2489 case M68K_ASR:
2490 translate_shift(opts, inst, &src_op, &dst_op, sar_ir, sar_irdisp, sar_clr, sar_clrdisp, NULL, NULL);
2491 break;
2492 case M68K_LSR:
2493 translate_shift(opts, inst, &src_op, &dst_op, shr_ir, shr_irdisp, shr_clr, shr_clrdisp, shl_ir, shl_irdisp);
2494 break;
2495 case M68K_BCHG:
2496 case M68K_BCLR:
2497 case M68K_BSET:
2498 case M68K_BTST:
2499 cycles(&opts->gen, inst->extra.size == OPSIZE_BYTE ? 4 : (
2500 inst->op == M68K_BTST ? 6 : (inst->op == M68K_BCLR ? 10 : 8))
2501 );
2502 if (src_op.mode == MODE_IMMED) {
2503 if (inst->extra.size == OPSIZE_BYTE) {
2504 src_op.disp &= 0x7;
2505 }
2506 if (inst->op == M68K_BTST) {
2507 if (dst_op.mode == MODE_REG_DIRECT) {
2508 bt_ir(code, src_op.disp, dst_op.base, inst->extra.size);
2509 } else {
2510 bt_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size);
2511 }
2512 } else if (inst->op == M68K_BSET) {
2513 if (dst_op.mode == MODE_REG_DIRECT) {
2514 bts_ir(code, src_op.disp, dst_op.base, inst->extra.size);
2515 } else {
2516 bts_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size);
2517 }
2518 } else if (inst->op == M68K_BCLR) {
2519 if (dst_op.mode == MODE_REG_DIRECT) {
2520 btr_ir(code, src_op.disp, dst_op.base, inst->extra.size);
2521 } else {
2522 btr_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size);
2523 }
2524 } else {
2525 if (dst_op.mode == MODE_REG_DIRECT) {
2526 btc_ir(code, src_op.disp, dst_op.base, inst->extra.size);
2527 } else {
2528 btc_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size);
2529 }
2530 }
2531 } else {
2532 if (src_op.mode == MODE_REG_DISPLACE8 || (inst->dst.addr_mode != MODE_REG && src_op.base != opts->gen.scratch1 && src_op.base != opts->gen.scratch2)) {
2533 if (dst_op.base == opts->gen.scratch1) {
2534 push_r(code, opts->gen.scratch2);
2535 if (src_op.mode == MODE_REG_DIRECT) {
2536 mov_rr(code, src_op.base, opts->gen.scratch2, SZ_B);
2537 } else {
2538 mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch2, SZ_B);
2539 }
2540 src_op.base = opts->gen.scratch2;
2541 } else {
2542 if (src_op.mode == MODE_REG_DIRECT) {
2543 mov_rr(code, src_op.base, opts->gen.scratch1, SZ_B);
2544 } else {
2545 mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, SZ_B);
2546 }
2547 src_op.base = opts->gen.scratch1;
2548 }
2549 }
2550 uint8_t size = inst->extra.size;
2551 if (dst_op.mode == MODE_REG_DISPLACE8) {
2552 if (src_op.base != opts->gen.scratch1 && src_op.base != opts->gen.scratch2) {
2553 if (src_op.mode == MODE_REG_DIRECT) {
2554 mov_rr(code, src_op.base, opts->gen.scratch1, SZ_D);
2555 } else {
2556 mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, SZ_D);
2557 src_op.mode = MODE_REG_DIRECT;
2558 }
2559 src_op.base = opts->gen.scratch1;
2560 }
2561 //b### with register destination is modulo 32
2562 //x86 with a memory destination isn't modulo anything
2563 //so use an and here to force the value to be modulo 32
2564 and_ir(code, 31, opts->gen.scratch1, SZ_D);
2565 } else if(inst->dst.addr_mode != MODE_REG) {
2566 //b### with memory destination is modulo 8
2567 //x86-64 doesn't support 8-bit bit operations
2568 //so we fake it by forcing the bit number to be modulo 8
2569 and_ir(code, 7, src_op.base, SZ_D);
2570 size = SZ_D;
2571 }
2572 if (inst->op == M68K_BTST) {
2573 if (dst_op.mode == MODE_REG_DIRECT) {
2574 bt_rr(code, src_op.base, dst_op.base, size);
2575 } else {
2576 bt_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, size);
2577 }
2578 } else if (inst->op == M68K_BSET) {
2579 if (dst_op.mode == MODE_REG_DIRECT) {
2580 bts_rr(code, src_op.base, dst_op.base, size);
2581 } else {
2582 bts_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, size);
2583 }
2584 } else if (inst->op == M68K_BCLR) {
2585 if (dst_op.mode == MODE_REG_DIRECT) {
2586 btr_rr(code, src_op.base, dst_op.base, size);
2587 } else {
2588 btr_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, size);
2589 }
2590 } else {
2591 if (dst_op.mode == MODE_REG_DIRECT) {
2592 btc_rr(code, src_op.base, dst_op.base, size);
2593 } else {
2594 btc_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, size);
2595 }
2596 }
2597 if (src_op.base == opts->gen.scratch2) {
2598 pop_r(code, opts->gen.scratch2);
2599 }
2600 }
2601 //x86 sets the carry flag to the value of the bit tested
2602 //68K sets the zero flag to the complement of the bit tested
2603 set_flag_cond(opts, CC_NC, FLAG_Z);
2604 if (inst->op != M68K_BTST) {
2605 m68k_save_result(inst, opts);
2606 }
2607 break;
2608 case M68K_CHK:
2609 {
2610 cycles(&opts->gen, 6);
2611 if (dst_op.mode == MODE_REG_DIRECT) {
2612 cmp_ir(code, 0, dst_op.base, inst->extra.size);
2613 } else {
2614 cmp_irdisp(code, 0, dst_op.base, dst_op.disp, inst->extra.size);
2615 }
2616 uint32_t isize;
2617 switch(inst->src.addr_mode)
2618 {
2619 case MODE_AREG_DISPLACE:
2620 case MODE_AREG_INDEX_DISP8:
2621 case MODE_ABSOLUTE_SHORT:
2622 case MODE_PC_INDEX_DISP8:
2623 case MODE_PC_DISPLACE:
2624 case MODE_IMMEDIATE:
2625 isize = 4;
2626 break;
2627 case MODE_ABSOLUTE:
2628 isize = 6;
2629 break;
2630 default:
2631 isize = 2;
2632 }
2633 //make sure we won't start a new chunk in the middle of these branches
2634 check_alloc_code(code, MAX_INST_LEN * 11);
2635 code_ptr passed = code->cur + 1;
2636 jcc(code, CC_GE, code->cur + 2);
2637 set_flag(opts, 1, FLAG_N);
2638 mov_ir(code, VECTOR_CHK, opts->gen.scratch2, SZ_D);
2639 mov_ir(code, inst->address+isize, opts->gen.scratch1, SZ_D);
2640 jmp(code, opts->trap);
2641 *passed = code->cur - (passed+1);
2642 if (dst_op.mode == MODE_REG_DIRECT) {
2643 if (src_op.mode == MODE_REG_DIRECT) {
2644 cmp_rr(code, src_op.base, dst_op.base, inst->extra.size);
2645 } else if(src_op.mode == MODE_REG_DISPLACE8) {
2646 cmp_rdispr(code, src_op.base, src_op.disp, dst_op.base, inst->extra.size);
2647 } else {
2648 cmp_ir(code, src_op.disp, dst_op.base, inst->extra.size);
2649 }
2650 } else if(dst_op.mode == MODE_REG_DISPLACE8) {
2651 if (src_op.mode == MODE_REG_DIRECT) {
2652 cmp_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, inst->extra.size);
2653 } else {
2654 cmp_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size);
2655 }
2656 }
2657 passed = code->cur + 1;
2658 jcc(code, CC_LE, code->cur + 2);
2659 set_flag(opts, 0, FLAG_N);
2660 mov_ir(code, VECTOR_CHK, opts->gen.scratch2, SZ_D);
2661 mov_ir(code, inst->address+isize, opts->gen.scratch1, SZ_D);
2662 jmp(code, opts->trap);
2663 *passed = code->cur - (passed+1);
2664 cycles(&opts->gen, 4);
2665 break;
2666 }
2667 case M68K_DIVS:
2668 case M68K_DIVU:
2669 {
2670 check_alloc_code(code, MAX_NATIVE_SIZE);
2671 //TODO: cycle exact division
2672 cycles(&opts->gen, inst->op == M68K_DIVS ? 158 : 140);
2673 set_flag(opts, 0, FLAG_C);
2674 push_r(code, RDX);
2675 push_r(code, RAX);
2676 if (dst_op.mode == MODE_REG_DIRECT) {
2677 mov_rr(code, dst_op.base, RAX, SZ_D);
2678 } else {
2679 mov_rdispr(code, dst_op.base, dst_op.disp, RAX, SZ_D);
2680 }
2681 if (src_op.mode == MODE_IMMED) {
2682 mov_ir(code, (src_op.disp & 0x8000) && inst->op == M68K_DIVS ? src_op.disp | 0xFFFF0000 : src_op.disp, opts->gen.scratch2, SZ_D);
2683 } else if (src_op.mode == MODE_REG_DIRECT) {
2684 if (inst->op == M68K_DIVS) {
2685 movsx_rr(code, src_op.base, opts->gen.scratch2, SZ_W, SZ_D);
2686 } else {
2687 movzx_rr(code, src_op.base, opts->gen.scratch2, SZ_W, SZ_D);
2688 }
2689 } else if (src_op.mode == MODE_REG_DISPLACE8) {
2690 if (inst->op == M68K_DIVS) {
2691 movsx_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch2, SZ_W, SZ_D);
2692 } else {
2693 movzx_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch2, SZ_W, SZ_D);
2694 }
2695 }
2696 cmp_ir(code, 0, opts->gen.scratch2, SZ_D);
2697 check_alloc_code(code, 6*MAX_INST_LEN);
2698 code_ptr not_zero = code->cur + 1;
2699 jcc(code, CC_NZ, code->cur + 2);
2700 pop_r(code, RAX);
2701 pop_r(code, RDX);
2702 mov_ir(code, VECTOR_INT_DIV_ZERO, opts->gen.scratch2, SZ_D);
2703 mov_ir(code, inst->address+2, opts->gen.scratch1, SZ_D);
2704 jmp(code, opts->trap);
2705 *not_zero = code->cur - (not_zero+1);
2706 if (inst->op == M68K_DIVS) {
2707 cdq(code);
2708 } else {
2709 xor_rr(code, RDX, RDX, SZ_D);
2710 }
2711 if (inst->op == M68K_DIVS) {
2712 idiv_r(code, opts->gen.scratch2, SZ_D);
2713 } else {
2714 div_r(code, opts->gen.scratch2, SZ_D);
2715 }
2716 code_ptr skip_sec_check;
2717 if (inst->op == M68K_DIVS) {
2718 cmp_ir(code, 0x8000, RAX, SZ_D);
2719 skip_sec_check = code->cur + 1;
2720 jcc(code, CC_GE, code->cur + 2);
2721 cmp_ir(code, -0x8000, RAX, SZ_D);
2722 norm_off = code->cur + 1;
2723 jcc(code, CC_L, code->cur + 2);
2724 } else {
2725 cmp_ir(code, 0x10000, RAX, SZ_D);
2726 norm_off = code->cur + 1;
2727 jcc(code, CC_NC, code->cur + 2);
2728 }
2729 if (dst_op.mode == MODE_REG_DIRECT) {
2730 mov_rr(code, RDX, dst_op.base, SZ_W);
2731 shl_ir(code, 16, dst_op.base, SZ_D);
2732 mov_rr(code, RAX, dst_op.base, SZ_W);
2733 } else {
2734 mov_rrdisp(code, RDX, dst_op.base, dst_op.disp, SZ_W);
2735 shl_irdisp(code, 16, dst_op.base, dst_op.disp, SZ_D);
2736 mov_rrdisp(code, RAX, dst_op.base, dst_op.disp, SZ_W);
2737 }
2738 cmp_ir(code, 0, RAX, SZ_W);
2739 pop_r(code, RAX);
2740 pop_r(code, RDX);
2741 set_flag(opts, 0, FLAG_V);
2742 set_flag_cond(opts, CC_Z, FLAG_Z);
2743 set_flag_cond(opts, CC_S, FLAG_N);
2744 end_off = code->cur + 1;
2745 jmp(code, code->cur + 2);
2746 *norm_off = code->cur - (norm_off + 1);
2747 if (inst->op == M68K_DIVS) {
2748 *skip_sec_check = code->cur - (skip_sec_check+1);
2749 }
2750 pop_r(code, RAX);
2751 pop_r(code, RDX);
2752 set_flag(opts, 1, FLAG_V);
2753 *end_off = code->cur - (end_off + 1);
2754 break;
2755 }
2756 case M68K_EOR:
2757 cycles(&opts->gen, BUS);
2758 if (src_op.mode == MODE_REG_DIRECT) {
2759 if (dst_op.mode == MODE_REG_DIRECT) {
2760 xor_rr(code, src_op.base, dst_op.base, inst->extra.size);
2761 } else {
2762 xor_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, inst->extra.size);
2763 }
2764 } else if (src_op.mode == MODE_REG_DISPLACE8) {
2765 xor_rdispr(code, src_op.base, src_op.disp, dst_op.base, inst->extra.size);
2766 } else {
2767 if (dst_op.mode == MODE_REG_DIRECT) {
2768 xor_ir(code, src_op.disp, dst_op.base, inst->extra.size);
2769 } else {
2770 xor_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size);
2771 }
2772 }
2773 set_flag(opts, 0, FLAG_C);
2774 set_flag_cond(opts, CC_Z, FLAG_Z);
2775 set_flag_cond(opts, CC_S, FLAG_N);
2776 set_flag(opts, 0, FLAG_V);
2777 m68k_save_result(inst, opts);
2778 break;
2779 case M68K_EORI_CCR:
2780 case M68K_EORI_SR:
2781 cycles(&opts->gen, 20);
2782 //TODO: If ANDI to SR, trap if not in supervisor mode
2783 if (inst->src.params.immed & 0x1) {
2784 xor_flag(opts, 1, FLAG_C);
2785 }
2786 if (inst->src.params.immed & 0x2) {
2787 xor_flag(opts, 1, FLAG_V);
2788 }
2789 if (inst->src.params.immed & 0x4) {
2790 xor_flag(opts, 1, FLAG_Z);
2791 }
2792 if (inst->src.params.immed & 0x8) {
2793 xor_flag(opts, 1, FLAG_N);
2794 }
2795 if (inst->src.params.immed & 0x10) {
2796 xor_flag(opts, 1, FLAG_X);
2797 }
2798 if (inst->op == M68K_ORI_SR) {
2799 xor_irdisp(code, inst->src.params.immed >> 8, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
2800 if (inst->src.params.immed & 0x700) {
2801 call(code, opts->do_sync);
2802 }
2803 }
2804 break;
2805 case M68K_EXG:
2806 cycles(&opts->gen, 6);
2807 if (dst_op.mode == MODE_REG_DIRECT) {
2808 mov_rr(code, dst_op.base, opts->gen.scratch2, SZ_D);
2809 if (src_op.mode == MODE_REG_DIRECT) {
2810 mov_rr(code, src_op.base, dst_op.base, SZ_D);
2811 mov_rr(code, opts->gen.scratch2, src_op.base, SZ_D);
2812 } else {
2813 mov_rdispr(code, src_op.base, src_op.disp, dst_op.base, SZ_D);
2814 mov_rrdisp(code, opts->gen.scratch2, src_op.base, src_op.disp, SZ_D);
2815 }
2816 } else {
2817 mov_rdispr(code, dst_op.base, dst_op.disp, opts->gen.scratch2, SZ_D);
2818 if (src_op.mode == MODE_REG_DIRECT) {
2819 mov_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, SZ_D);
2820 mov_rr(code, opts->gen.scratch2, src_op.base, SZ_D);
2821 } else {
2822 mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, SZ_D);
2823 mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, SZ_D);
2824 mov_rrdisp(code, opts->gen.scratch2, src_op.base, src_op.disp, SZ_D);
2825 }
2826 }
2827 break;
2828 case M68K_ILLEGAL:
2829 call(code, opts->gen.save_context);
2830 #ifdef X86_64
2831 mov_rr(code, opts->gen.context_reg, RDI, SZ_PTR);
2832 #else
2833 push_r(code, opts->gen.context_reg);
2834 #endif
2835 call(code, (code_ptr)print_regs_exit);
2836 break;
2837 case M68K_MOVE_FROM_SR:
2838 //TODO: Trap if not in system mode
2839 call(code, opts->get_sr);
2840 if (dst_op.mode == MODE_REG_DIRECT) {
2841 mov_rr(code, opts->gen.scratch1, dst_op.base, SZ_W);
2842 } else {
2843 mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, SZ_W);
2844 }
2845 m68k_save_result(inst, opts);
2846 break;
2847 case M68K_MOVE_CCR:
2848 case M68K_MOVE_SR:
2849 //TODO: Privilege check for MOVE to SR
2850 if (src_op.mode == MODE_IMMED) {
2851 set_flag(opts, src_op.disp & 0x1, FLAG_C);
2852 set_flag(opts, (src_op.disp >> 1) & 0x1, FLAG_V);
2853 set_flag(opts, (src_op.disp >> 2) & 0x1, FLAG_Z);
2854 set_flag(opts, (src_op.disp >> 3) & 0x1, FLAG_N);
2855 set_flag(opts, (src_op.disp >> 4) & 0x1, FLAG_X);
2856 if (inst->op == M68K_MOVE_SR) {
2857 mov_irdisp(code, (src_op.disp >> 8), opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
2858 if (!((inst->src.params.immed >> 8) & (1 << BIT_SUPERVISOR))) {
2859 //leave supervisor mode
2860 mov_rr(code, opts->aregs[7], opts->gen.scratch1, SZ_D);
2861 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, opts->aregs[7], SZ_D);
2862 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, SZ_D);
2863 }
2864 call(code, opts->do_sync);
2865 }
2866 cycles(&opts->gen, 12);
2867 } else {
2868 if (src_op.base != opts->gen.scratch1) {
2869 if (src_op.mode == MODE_REG_DIRECT) {
2870 mov_rr(code, src_op.base, opts->gen.scratch1, SZ_W);
2871 } else {
2872 mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, SZ_W);
2873 }
2874 }
2875 call(code, inst->op == M68K_MOVE_SR ? opts->set_sr : opts->set_ccr);
2876 cycles(&opts->gen, 12);
2877
2878 }
2879 break;
2880 case M68K_MOVE_USP:
2881 cycles(&opts->gen, BUS);
2882 //TODO: Trap if not in supervisor mode
2883 //bt_irdisp(code, BIT_SUPERVISOR, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
2884 if (inst->src.addr_mode == MODE_UNUSED) {
2885 if (dst_op.mode == MODE_REG_DIRECT) {
2886 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, dst_op.base, SZ_D);
2887 } else {
2888 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, opts->gen.scratch1, SZ_D);
2889 mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, SZ_D);
2890 }
2891 } else {
2892 if (src_op.mode == MODE_REG_DIRECT) {
2893 mov_rrdisp(code, src_op.base, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, SZ_D);
2894 } else {
2895 mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, SZ_D);
2896 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, SZ_D);
2897 }
2898 }
2899 break;
2900 //case M68K_MOVEP:
2901 case M68K_MULS:
2902 case M68K_MULU:
2903 cycles(&opts->gen, 70); //TODO: Calculate the actual value based on the value of the <ea> parameter
2904 if (src_op.mode == MODE_IMMED) {
2905 mov_ir(code, inst->op == M68K_MULU ? (src_op.disp & 0xFFFF) : ((src_op.disp & 0x8000) ? src_op.disp | 0xFFFF0000 : src_op.disp), opts->gen.scratch1, SZ_D);
2906 } else if (src_op.mode == MODE_REG_DIRECT) {
2907 if (inst->op == M68K_MULS) {
2908 movsx_rr(code, src_op.base, opts->gen.scratch1, SZ_W, SZ_D);
2909 } else {
2910 movzx_rr(code, src_op.base, opts->gen.scratch1, SZ_W, SZ_D);
2911 }
2912 } else {
2913 if (inst->op == M68K_MULS) {
2914 movsx_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, SZ_W, SZ_D);
2915 } else {
2916 movzx_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, SZ_W, SZ_D);
2917 }
2918 }
2919 if (dst_op.mode == MODE_REG_DIRECT) {
2920 dst_reg = dst_op.base;
2921 if (inst->op == M68K_MULS) {
2922 movsx_rr(code, dst_reg, dst_reg, SZ_W, SZ_D);
2923 } else {
2924 movzx_rr(code, dst_reg, dst_reg, SZ_W, SZ_D);
2925 }
2926 } else {
2927 dst_reg = opts->gen.scratch2;
2928 if (inst->op == M68K_MULS) {
2929 movsx_rdispr(code, dst_op.base, dst_op.disp, opts->gen.scratch2, SZ_W, SZ_D);
2930 } else {
2931 movzx_rdispr(code, dst_op.base, dst_op.disp, opts->gen.scratch2, SZ_W, SZ_D);
2932 }
2933 }
2934 imul_rr(code, opts->gen.scratch1, dst_reg, SZ_D);
2935 if (dst_op.mode == MODE_REG_DISPLACE8) {
2936 mov_rrdisp(code, dst_reg, dst_op.base, dst_op.disp, SZ_D);
2937 }
2938 set_flag(opts, 0, FLAG_V);
2939 set_flag(opts, 0, FLAG_C);
2940 cmp_ir(code, 0, dst_reg, SZ_D);
2941 set_flag_cond(opts, CC_Z, FLAG_Z);
2942 set_flag_cond(opts, CC_S, FLAG_N);
2943 break;
2944 //case M68K_NBCD:
2945 case M68K_NEG:
2946 cycles(&opts->gen, BUS);
2947 if (dst_op.mode == MODE_REG_DIRECT) {
2948 neg_r(code, dst_op.base, inst->extra.size);
2949 } else {
2950 neg_rdisp(code, dst_op.base, dst_op.disp, inst->extra.size);
2951 }
2952 set_flag_cond(opts, CC_C, FLAG_C);
2953 set_flag_cond(opts, CC_Z, FLAG_Z);
2954 set_flag_cond(opts, CC_S, FLAG_N);
2955 set_flag_cond(opts, CC_O, FLAG_V);
2956 if (opts->flag_regs[FLAG_C] >= 0) {
2957 flag_to_flag(opts, FLAG_C, FLAG_X);
2958 } else {
2959 set_flag_cond(opts, CC_C, FLAG_X);
2960 }
2961 m68k_save_result(inst, opts);
2962 break;
2963 case M68K_NEGX: {
2964 cycles(&opts->gen, BUS);
2965 if (dst_op.mode == MODE_REG_DIRECT) {
2966 if (dst_op.base == opts->gen.scratch1) {
2967 push_r(code, opts->gen.scratch2);
2968 xor_rr(code, opts->gen.scratch2, opts->gen.scratch2, inst->extra.size);
2969 flag_to_carry(opts, FLAG_X);
2970 sbb_rr(code, dst_op.base, opts->gen.scratch2, inst->extra.size);
2971 mov_rr(code, opts->gen.scratch2, dst_op.base, inst->extra.size);
2972 pop_r(code, opts->gen.scratch2);
2973 } else {
2974 xor_rr(code, opts->gen.scratch1, opts->gen.scratch1, inst->extra.size);
2975 flag_to_carry(opts, FLAG_X);
2976 sbb_rr(code, dst_op.base, opts->gen.scratch1, inst->extra.size);
2977 mov_rr(code, opts->gen.scratch1, dst_op.base, inst->extra.size);
2978 }
2979 } else {
2980 xor_rr(code, opts->gen.scratch1, opts->gen.scratch1, inst->extra.size);
2981 flag_to_carry(opts, FLAG_X);
2982 sbb_rdispr(code, dst_op.base, dst_op.disp, opts->gen.scratch1, inst->extra.size);
2983 mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, inst->extra.size);
2984 }
2985 set_flag_cond(opts, CC_C, FLAG_C);
2986 code_ptr after_flag_set = code->cur + 1;
2987 jcc(code, CC_Z, code->cur + 2);
2988 set_flag(opts, 0, FLAG_Z);
2989 *after_flag_set = code->cur - (after_flag_set+1);
2990 set_flag_cond(opts, CC_S, FLAG_N);
2991 set_flag_cond(opts, CC_O, FLAG_V);
2992 if (opts->flag_regs[FLAG_C] >= 0) {
2993 flag_to_flag(opts, FLAG_C, FLAG_X);
2994 } else {
2995 set_flag_cond(opts, CC_C, FLAG_X);
2996 }
2997 m68k_save_result(inst, opts);
2998 break;
2999 }
3000 case M68K_NOP:
3001 cycles(&opts->gen, BUS);
3002 break;
3003 case M68K_NOT:
3004 if (dst_op.mode == MODE_REG_DIRECT) {
3005 not_r(code, dst_op.base, inst->extra.size);
3006 cmp_ir(code, 0, dst_op.base, inst->extra.size);
3007 } else {
3008 not_rdisp(code, dst_op.base, dst_op.disp, inst->extra.size);
3009 cmp_irdisp(code, 0, dst_op.base, dst_op.disp, inst->extra.size);
3010 }
3011
3012 set_flag(opts, 0, FLAG_C);
3013 set_flag_cond(opts, CC_Z, FLAG_Z);
3014 set_flag_cond(opts, CC_S, FLAG_N);
3015 set_flag(opts, 0, FLAG_V);
3016 m68k_save_result(inst, opts);
3017 break;
3018 case M68K_OR:
3019 cycles(&opts->gen, BUS);
3020 if (src_op.mode == MODE_REG_DIRECT) {
3021 if (dst_op.mode == MODE_REG_DIRECT) {
3022 or_rr(code, src_op.base, dst_op.base, inst->extra.size);
3023 } else {
3024 or_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, inst->extra.size);
3025 }
3026 } else if (src_op.mode == MODE_REG_DISPLACE8) {
3027 or_rdispr(code, src_op.base, src_op.disp, dst_op.base, inst->extra.size);
3028 } else {
3029 if (dst_op.mode == MODE_REG_DIRECT) {
3030 or_ir(code, src_op.disp, dst_op.base, inst->extra.size);
3031 } else {
3032 or_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size);
3033 }
3034 }
3035 set_flag(opts, 0, FLAG_C);
3036 set_flag_cond(opts, CC_Z, FLAG_Z);
3037 set_flag_cond(opts, CC_S, FLAG_N);
3038 set_flag(opts, 0, FLAG_V);
3039 m68k_save_result(inst, opts);
3040 break;
3041 case M68K_ORI_CCR:
3042 case M68K_ORI_SR:
3043 cycles(&opts->gen, 20);
3044 //TODO: If ANDI to SR, trap if not in supervisor mode
3045 if (inst->src.params.immed & 0x1) {
3046 set_flag(opts, 1, FLAG_C);
3047 }
3048 if (inst->src.params.immed & 0x2) {
3049 set_flag(opts, 1, FLAG_V);
3050 }
3051 if (inst->src.params.immed & 0x4) {
3052 set_flag(opts, 1, FLAG_Z);
3053 }
3054 if (inst->src.params.immed & 0x8) {
3055 set_flag(opts, 1, FLAG_N);
3056 }
3057 if (inst->src.params.immed & 0x10) {
3058 set_flag(opts, 1, FLAG_X);
3059 }
3060 if (inst->op == M68K_ORI_SR) {
3061 or_irdisp(code, inst->src.params.immed >> 8, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
3062 if (inst->src.params.immed & 0x700) {
3063 call(code, opts->do_sync);
3064 }
3065 }
3066 break;
3067 case M68K_RESET:
3068 call(code, opts->gen.save_context);
3069 #ifdef X86_64
3070 mov_rr(code, opts->gen.context_reg, RDI, SZ_PTR);
3071 #else
3072 push_r(code, opts->gen.context_reg);
3073 #endif
3074 call(code, (code_ptr)print_regs_exit);
3075 break;
3076 case M68K_ROL:
3077 case M68K_ROR:
3078 set_flag(opts, 0, FLAG_V);
3079 if (inst->src.addr_mode == MODE_UNUSED) {
3080 cycles(&opts->gen, BUS);
3081 //Memory rotate
3082 if (inst->op == M68K_ROL) {
3083 rol_ir(code, 1, dst_op.base, inst->extra.size);
3084 } else {
3085 ror_ir(code, 1, dst_op.base, inst->extra.size);
3086 }
3087 set_flag_cond(opts, CC_C, FLAG_C);
3088 cmp_ir(code, 0, dst_op.base, inst->extra.size);
3089 set_flag_cond(opts, CC_Z, FLAG_Z);
3090 set_flag_cond(opts, CC_S, FLAG_N);
3091 m68k_save_result(inst, opts);
3092 } else {
3093 if (src_op.mode == MODE_IMMED) {
3094 cycles(&opts->gen, (inst->extra.size == OPSIZE_LONG ? 8 : 6) + src_op.disp*2);
3095 if (dst_op.mode == MODE_REG_DIRECT) {
3096 if (inst->op == M68K_ROL) {
3097 rol_ir(code, src_op.disp, dst_op.base, inst->extra.size);
3098 } else {
3099 ror_ir(code, src_op.disp, dst_op.base, inst->extra.size);
3100 }
3101 } else {
3102 if (inst->op == M68K_ROL) {
3103 rol_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size);
3104 } else {
3105 ror_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size);
3106 }
3107 }
3108 set_flag_cond(opts, CC_C, FLAG_C);
3109 } else {
3110 if (src_op.mode == MODE_REG_DIRECT) {
3111 if (src_op.base != opts->gen.scratch1) {
3112 mov_rr(code, src_op.base, opts->gen.scratch1, SZ_B);
3113 }
3114 } else {
3115 mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, SZ_B);
3116 }
3117 and_ir(code, 63, opts->gen.scratch1, SZ_D);
3118 zero_off = code->cur + 1;
3119 jcc(code, CC_Z, code->cur + 2);
3120 add_rr(code, opts->gen.scratch1, CYCLES, SZ_D);
3121 add_rr(code, opts->gen.scratch1, CYCLES, SZ_D);
3122 cmp_ir(code, 32, opts->gen.scratch1, SZ_B);
3123 norm_off = code->cur + 1;
3124 jcc(code, CC_L, code->cur + 2);
3125 sub_ir(code, 32, opts->gen.scratch1, SZ_B);
3126 if (dst_op.mode == MODE_REG_DIRECT) {
3127 if (inst->op == M68K_ROL) {
3128 rol_ir(code, 31, dst_op.base, inst->extra.size);
3129 rol_ir(code, 1, dst_op.base, inst->extra.size);
3130 } else {
3131 ror_ir(code, 31, dst_op.base, inst->extra.size);
3132 ror_ir(code, 1, dst_op.base, inst->extra.size);
3133 }
3134 } else {
3135 if (inst->op == M68K_ROL) {
3136 rol_irdisp(code, 31, dst_op.base, dst_op.disp, inst->extra.size);
3137 rol_irdisp(code, 1, dst_op.base, dst_op.disp, inst->extra.size);
3138 } else {
3139 ror_irdisp(code, 31, dst_op.base, dst_op.disp, inst->extra.size);
3140 ror_irdisp(code, 1, dst_op.base, dst_op.disp, inst->extra.size);
3141 }
3142 }
3143 *norm_off = code->cur - (norm_off+1);
3144 if (dst_op.mode == MODE_REG_DIRECT) {
3145 if (inst->op == M68K_ROL) {
3146 rol_clr(code, dst_op.base, inst->extra.size);
3147 } else {
3148 ror_clr(code, dst_op.base, inst->extra.size);
3149 }
3150 } else {
3151 if (inst->op == M68K_ROL) {
3152 rol_clrdisp(code, dst_op.base, dst_op.disp, inst->extra.size);
3153 } else {
3154 ror_clrdisp(code, dst_op.base, dst_op.disp, inst->extra.size);
3155 }
3156 }
3157 set_flag_cond(opts, CC_C, FLAG_C);
3158 end_off = code->cur + 1;
3159 jmp(code, code->cur + 2);
3160 *zero_off = code->cur - (zero_off+1);
3161 set_flag(opts, 0, FLAG_C);
3162 *end_off = code->cur - (end_off+1);
3163 }
3164 if (dst_op.mode == MODE_REG_DIRECT) {
3165 cmp_ir(code, 0, dst_op.base, inst->extra.size);
3166 } else {
3167 cmp_irdisp(code, 0, dst_op.base, dst_op.disp, inst->extra.size);
3168 }
3169 set_flag_cond(opts, CC_Z, FLAG_Z);
3170 set_flag_cond(opts, CC_S, FLAG_N);
3171 }
3172 break;
3173 case M68K_ROXL:
3174 case M68K_ROXR:
3175 set_flag(opts, 0, FLAG_V);
3176 if (inst->src.addr_mode == MODE_UNUSED) {
3177 cycles(&opts->gen, BUS);
3178 //Memory rotate
3179 flag_to_carry(opts, FLAG_X);
3180 if (inst->op == M68K_ROXL) {
3181 rcl_ir(code, 1, dst_op.base, inst->extra.size);
3182 } else {
3183 rcr_ir(code, 1, dst_op.base, inst->extra.size);
3184 }
3185 set_flag_cond(opts, CC_C, FLAG_C);
3186 if (opts->flag_regs[FLAG_C] < 0) {
3187 set_flag_cond(opts, CC_C, FLAG_X);
3188 }
3189 cmp_ir(code, 0, dst_op.base, inst->extra.size);
3190 set_flag_cond(opts, CC_Z, FLAG_Z);
3191 set_flag_cond(opts, CC_S, FLAG_N);
3192 if (opts->flag_regs[FLAG_C] >= 0) {
3193 flag_to_flag(opts, FLAG_C, FLAG_X);
3194 }
3195 m68k_save_result(inst, opts);
3196 } else {
3197 if (src_op.mode == MODE_IMMED) {
3198 cycles(&opts->gen, (inst->extra.size == OPSIZE_LONG ? 8 : 6) + src_op.disp*2);
3199 flag_to_carry(opts, FLAG_X);
3200 if (dst_op.mode == MODE_REG_DIRECT) {
3201 if (inst->op == M68K_ROXL) {
3202 rcl_ir(code, src_op.disp, dst_op.base, inst->extra.size);
3203 } else {
3204 rcr_ir(code, src_op.disp, dst_op.base, inst->extra.size);
3205 }
3206 } else {
3207 if (inst->op == M68K_ROXL) {
3208 rcl_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size);
3209 } else {
3210 rcr_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size);
3211 }
3212 }
3213 set_flag_cond(opts, CC_C, FLAG_C);
3214 if (opts->flag_regs[FLAG_C] >= 0) {
3215 flag_to_flag(opts, FLAG_C, FLAG_X);
3216 } else {
3217 set_flag_cond(opts, CC_C, FLAG_X);
3218 }
3219 } else {
3220 if (src_op.mode == MODE_REG_DIRECT) {
3221 if (src_op.base != opts->gen.scratch1) {
3222 mov_rr(code, src_op.base, opts->gen.scratch1, SZ_B);
3223 }
3224 } else {
3225 mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, SZ_B);
3226 }
3227 and_ir(code, 63, opts->gen.scratch1, SZ_D);
3228 zero_off = code->cur + 1;
3229 jcc(code, CC_Z, code->cur + 2);
3230 add_rr(code, opts->gen.scratch1, CYCLES, SZ_D);
3231 add_rr(code, opts->gen.scratch1, CYCLES, SZ_D);
3232 cmp_ir(code, 32, opts->gen.scratch1, SZ_B);
3233 norm_off = code->cur + 1;
3234 jcc(code, CC_L, code->cur + 2);
3235 flag_to_carry(opts, FLAG_X);
3236 if (dst_op.mode == MODE_REG_DIRECT) {
3237 if (inst->op == M68K_ROXL) {
3238 rcl_ir(code, 31, dst_op.base, inst->extra.size);
3239 rcl_ir(code, 1, dst_op.base, inst->extra.size);
3240 } else {
3241 rcr_ir(code, 31, dst_op.base, inst->extra.size);
3242 rcr_ir(code, 1, dst_op.base, inst->extra.size);
3243 }
3244 } else {
3245 if (inst->op == M68K_ROXL) {
3246 rcl_irdisp(code, 31, dst_op.base, dst_op.disp, inst->extra.size);
3247 rcl_irdisp(code, 1, dst_op.base, dst_op.disp, inst->extra.size);
3248 } else {
3249 rcr_irdisp(code, 31, dst_op.base, dst_op.disp, inst->extra.size);
3250 rcr_irdisp(code, 1, dst_op.base, dst_op.disp, inst->extra.size);
3251 }
3252 }
3253 set_flag_cond(opts, CC_C, FLAG_X);
3254 sub_ir(code, 32, opts->gen.scratch1, SZ_B);
3255 *norm_off = code->cur - (norm_off+1);
3256 flag_to_carry(opts, FLAG_X);
3257 if (dst_op.mode == MODE_REG_DIRECT) {
3258 if (inst->op == M68K_ROXL) {
3259 rcl_clr(code, dst_op.base, inst->extra.size);
3260 } else {
3261 rcr_clr(code, dst_op.base, inst->extra.size);
3262 }
3263 } else {
3264 if (inst->op == M68K_ROXL) {
3265 rcl_clrdisp(code, dst_op.base, dst_op.disp, inst->extra.size);
3266 } else {
3267 rcr_clrdisp(code, dst_op.base, dst_op.disp, inst->extra.size);
3268 }
3269 }
3270 set_flag_cond(opts, CC_C, FLAG_C);
3271 if (opts->flag_regs[FLAG_C] >= 0) {
3272 flag_to_flag(opts, FLAG_C, FLAG_X);
3273 } else {
3274 set_flag_cond(opts, CC_C, FLAG_X);
3275 }
3276 end_off = code->cur + 1;
3277 jmp(code, code->cur + 2);
3278 *zero_off = code->cur - (zero_off+1);
3279 //Carry flag is set to X flag when count is 0, this is different from ROR/ROL
3280 flag_to_flag(opts, FLAG_X, FLAG_C);
3281 *end_off = code->cur - (end_off+1);
3282 }
3283 if (dst_op.mode == MODE_REG_DIRECT) {
3284 cmp_ir(code, 0, dst_op.base, inst->extra.size);
3285 } else {
3286 cmp_irdisp(code, 0, dst_op.base, dst_op.disp, inst->extra.size);
3287 }
3288 set_flag_cond(opts, CC_Z, FLAG_Z);
3289 set_flag_cond(opts, CC_S, FLAG_N);
3290 }
3291 break;
3292 case M68K_RTE:
3293 //TODO: Trap if not in system mode
3294 //Read saved SR
3295 mov_rr(code, opts->aregs[7], opts->gen.scratch1, SZ_D);
3296 call(code, opts->read_16);
3297 add_ir(code, 2, opts->aregs[7], SZ_D);
3298 call(code, opts->set_sr);
3299 //Read saved PC
3300 mov_rr(code, opts->aregs[7], opts->gen.scratch1, SZ_D);
3301 call(code, opts->read_32);
3302 add_ir(code, 4, opts->aregs[7], SZ_D);
3303 //Check if we've switched to user mode and swap stack pointers if needed
3304 bt_irdisp(code, 5, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
3305 end_off = code->cur + 1;
3306 jcc(code, CC_C, code->cur + 2);
3307 mov_rr(code, opts->aregs[7], opts->gen.scratch2, SZ_D);
3308 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, opts->aregs[7], SZ_D);
3309 mov_rrdisp(code, opts->gen.scratch2, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, SZ_D);
3310 *end_off = code->cur - (end_off+1);
3311 //Get native address, sync components, recalculate integer points and jump to returned address
3312 call(code, opts->native_addr_and_sync);
3313 jmp_r(code, opts->gen.scratch1);
3314 break;
3315 case M68K_RTR:
3316 //Read saved CCR
3317 mov_rr(code, opts->aregs[7], opts->gen.scratch1, SZ_D);
3318 call(code, opts->read_16);
3319 add_ir(code, 2, opts->aregs[7], SZ_D);
3320 call(code, opts->set_ccr);
3321 //Read saved PC
3322 mov_rr(code, opts->aregs[7], opts->gen.scratch1, SZ_D);
3323 call(code, opts->read_32);
3324 add_ir(code, 4, opts->aregs[7], SZ_D);
3325 //Get native address and jump to it
3326 call(code, opts->native_addr);
3327 jmp_r(code, opts->gen.scratch1);
3328 break;
3329 case M68K_SBCD: {
3330 if (src_op.base != opts->gen.scratch2) {
3331 if (src_op.mode == MODE_REG_DIRECT) {
3332 mov_rr(code, src_op.base, opts->gen.scratch2, SZ_B);
3333 } else {
3334 mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch2, SZ_B);
3335 }
3336 }
3337 if (dst_op.base != opts->gen.scratch1) {
3338 if (dst_op.mode == MODE_REG_DIRECT) {
3339 mov_rr(code, dst_op.base, opts->gen.scratch1, SZ_B);
3340 } else {
3341 mov_rdispr(code, dst_op.base, dst_op.disp, opts->gen.scratch1, SZ_B);
3342 }
3343 }
3344 flag_to_carry(opts, FLAG_X);
3345 jcc(code, CC_NC, code->cur + 5);
3346 sub_ir(code, 1, opts->gen.scratch1, SZ_B);
3347 call(code, (code_ptr)bcd_sub);
3348 reg_to_flag(opts, CH, FLAG_C);
3349 reg_to_flag(opts, CH, FLAG_X);
3350 cmp_ir(code, 0, opts->gen.scratch1, SZ_B);
3351 code_ptr after_flag_set = code->cur+1;
3352 jcc(code, CC_Z, code->cur + 2);
3353 set_flag(opts, 0, FLAG_Z);
3354 *after_flag_set = code->cur - (after_flag_set+1);
3355 if (dst_op.base != opts->gen.scratch1) {
3356 if (dst_op.mode == MODE_REG_DIRECT) {
3357 mov_rr(code, opts->gen.scratch1, dst_op.base, SZ_B);
3358 } else {
3359 mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, SZ_B);
3360 }
3361 }
3362 m68k_save_result(inst, opts);
3363 break;
3364 }
3365 case M68K_STOP: {
3366 //TODO: Trap if not in system mode
3367 //manual says 4 cycles, but it has to be at least 8 since it's a 2-word instruction
3368 //possibly even 12 since that's how long MOVE to SR takes
3369 cycles(&opts->gen, BUS*2);
3370 set_flag(opts, src_op.disp & 0x1, FLAG_C);
3371 set_flag(opts, (src_op.disp >> 1) & 0x1, FLAG_V);
3372 set_flag(opts, (src_op.disp >> 2) & 0x1, FLAG_Z);
3373 set_flag(opts, (src_op.disp >> 3) & 0x1, FLAG_N);
3374 set_flag(opts, (src_op.disp >> 4) & 0x1, FLAG_X);
3375 mov_irdisp(code, (src_op.disp >> 8), opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
3376 if (!((inst->src.params.immed >> 8) & (1 << BIT_SUPERVISOR))) {
3377 //leave supervisor mode
3378 mov_rr(code, opts->aregs[7], opts->gen.scratch1, SZ_D);
3379 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, opts->aregs[7], SZ_D);
3380 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, SZ_D);
3381 }
3382 code_ptr loop_top = code->cur;
3383 call(code, opts->do_sync);
3384 cmp_rr(code, opts->gen.limit, opts->gen.cycles, SZ_D);
3385 code_ptr normal_cycle_up = code->cur + 1;
3386 jcc(code, CC_A, code->cur + 2);
3387 cycles(&opts->gen, BUS);
3388 code_ptr after_cycle_up = code->cur + 1;
3389 jmp(code, code->cur + 2);
3390 *normal_cycle_up = code->cur - (normal_cycle_up + 1);
3391 mov_rr(code, opts->gen.limit, opts->gen.cycles, SZ_D);
3392 *after_cycle_up = code->cur - (after_cycle_up+1);
3393 cmp_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, int_cycle), opts->gen.cycles, SZ_D);
3394 jcc(code, CC_C, loop_top);
3395 break;
3396 }
3397 case M68K_SUB:
3398 size = inst->dst.addr_mode == MODE_AREG ? OPSIZE_LONG : inst->extra.size;
3399 cycles(&opts->gen, BUS);
3400 if (src_op.mode == MODE_REG_DIRECT) {
3401 if (dst_op.mode == MODE_REG_DIRECT) {
3402 sub_rr(code, src_op.base, dst_op.base, size);
3403 } else {
3404 sub_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, size);
3405 }
3406 } else if (src_op.mode == MODE_REG_DISPLACE8) {
3407 sub_rdispr(code, src_op.base, src_op.disp, dst_op.base, size);
3408 } else {
3409 if (dst_op.mode == MODE_REG_DIRECT) {
3410 sub_ir(code, src_op.disp, dst_op.base, size);
3411 } else {
3412 sub_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, size);
3413 }
3414 }
3415 if (inst->dst.addr_mode != MODE_AREG) {
3416 set_flag_cond(opts, CC_C, FLAG_C);
3417 set_flag_cond(opts, CC_Z, FLAG_Z);
3418 set_flag_cond(opts, CC_S, FLAG_N);
3419 set_flag_cond(opts, CC_O, FLAG_V);
3420 if (opts->flag_regs[FLAG_C] >= 0) {
3421 flag_to_flag(opts, FLAG_C, FLAG_X);
3422 } else {
3423 set_flag_cond(opts, CC_C, FLAG_X);
3424 }
3425 }
3426 m68k_save_result(inst, opts);
3427 break;
3428 case M68K_SUBX: {
3429 cycles(&opts->gen, BUS);
3430 flag_to_carry(opts, FLAG_X);
3431 if (src_op.mode == MODE_REG_DIRECT) {
3432 if (dst_op.mode == MODE_REG_DIRECT) {
3433 sbb_rr(code, src_op.base, dst_op.base, inst->extra.size);
3434 } else {
3435 sbb_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, inst->extra.size);
3436 }
3437 } else if (src_op.mode == MODE_REG_DISPLACE8) {
3438 sbb_rdispr(code, src_op.base, src_op.disp, dst_op.base, inst->extra.size);
3439 } else {
3440 if (dst_op.mode == MODE_REG_DIRECT) {
3441 sbb_ir(code, src_op.disp, dst_op.base, inst->extra.size);
3442 } else {
3443 sbb_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size);
3444 }
3445 }
3446 set_flag_cond(opts, CC_C, FLAG_C);
3447 if (opts->flag_regs[FLAG_C] < 0) {
3448 set_flag_cond(opts, CC_C, FLAG_X);
3449 }
3450 code_ptr after_flag_set = code->cur + 1;
3451 jcc(code, CC_Z, code->cur + 2);
3452 set_flag(opts, 0, FLAG_Z);
3453 *after_flag_set = code->cur - (after_flag_set+1);
3454 set_flag_cond(opts, CC_S, FLAG_N);
3455 set_flag_cond(opts, CC_O, FLAG_V);
3456 if (opts->flag_regs[FLAG_C] >= 0) {
3457 flag_to_flag(opts, FLAG_C, FLAG_X);
3458 }
3459 m68k_save_result(inst, opts);
3460 break;
3461 }
3462 case M68K_SWAP:
3463 cycles(&opts->gen, BUS);
3464 if (src_op.mode == MODE_REG_DIRECT) {
3465 rol_ir(code, 16, src_op.base, SZ_D);
3466 cmp_ir(code, 0, src_op.base, SZ_D);
3467 } else{
3468 rol_irdisp(code, 16, src_op.base, src_op.disp, SZ_D);
3469 cmp_irdisp(code, 0, src_op.base, src_op.disp, SZ_D);
3470 }
3471
3472 set_flag(opts, 0, FLAG_C);
3473 set_flag_cond(opts, CC_Z, FLAG_Z);
3474 set_flag_cond(opts, CC_S, FLAG_N);
3475 set_flag(opts, 0, FLAG_V);
3476 break;
3477 //case M68K_TAS:
3478 case M68K_TRAP:
3479 mov_ir(code, src_op.disp + VECTOR_TRAP_0, opts->gen.scratch2, SZ_D);
3480 mov_ir(code, inst->address+2, opts->gen.scratch1, SZ_D);
3481 jmp(code, opts->trap);
3482 break;
3483 //case M68K_TRAPV:
3484 case M68K_TST:
3485 cycles(&opts->gen, BUS);
3486 if (src_op.mode == MODE_REG_DIRECT) {
3487 cmp_ir(code, 0, src_op.base, inst->extra.size);
3488 } else { //M68000 doesn't support immedate operand for tst, so this must be MODE_REG_DISPLACE8
3489 cmp_irdisp(code, 0, src_op.base, src_op.disp, inst->extra.size);
3490 }
3491 set_flag(opts, 0, FLAG_C);
3492 set_flag_cond(opts, CC_Z, FLAG_Z);
3493 set_flag_cond(opts, CC_S, FLAG_N);
3494 set_flag(opts, 0, FLAG_V);
3495 break;
3496 case M68K_UNLK:
3497 cycles(&opts->gen, BUS);
3498 if (dst_op.mode == MODE_REG_DIRECT) {
3499 mov_rr(code, dst_op.base, opts->aregs[7], SZ_D);
3500 } else {
3501 mov_rdispr(code, dst_op.base, dst_op.disp, opts->aregs[7], SZ_D);
3502 }
3503 mov_rr(code, opts->aregs[7], opts->gen.scratch1, SZ_D);
3504 call(code, opts->read_32);
3505 if (dst_op.mode == MODE_REG_DIRECT) {
3506 mov_rr(code, opts->gen.scratch1, dst_op.base, SZ_D);
3507 } else {
3508 mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, SZ_D);
3509 }
3510 add_ir(code, 4, opts->aregs[7], SZ_D);
3511 break;
3512 default:
3513 m68k_disasm(inst, disasm_buf);
3514 printf("%X: %s\ninstruction %d not yet implemented\n", inst->address, disasm_buf, inst->op);
3515 exit(1);
3516 }
3517 }
3518
3519 void translate_out_of_bounds(code_info *code)
3520 {
3521 xor_rr(code, RDI, RDI, SZ_D);
3522 #ifdef X86_32
3523 push_r(code, RDI);
3524 #endif
3525 call(code, (code_ptr)exit);
3526 }
3527
3528 void check_code_prologue(code_info *code)
3529 {
3530 check_alloc_code(code, MAX_INST_LEN*4);
3531 }
3532
3533 void * m68k_retranslate_inst(uint32_t address, m68k_context * context)
3534 {
3535 m68k_options * opts = context->options;
3536 code_info *code = &opts->gen.code;
3537 uint8_t orig_size = get_native_inst_size(opts, address);
3538 code_ptr orig_start = get_native_address(context->native_code_map, address);
3539 uint32_t orig = address;
3540 code_info orig_code;
3541 orig_code.cur = orig_start;
3542 orig_code.last = orig_start + orig_size + 5;
3543 address &= 0xFFFF;
3544 uint16_t *after, *inst = context->mem_pointers[1] + address/2;
3545 m68kinst instbuf;
3546 after = m68k_decode(inst, &instbuf, orig);
3547 if (orig_size != MAX_NATIVE_SIZE) {
3548 deferred_addr * orig_deferred = opts->gen.deferred;
3549
3550 //make sure the beginning of the code for an instruction is contiguous
3551 check_alloc_code(code, MAX_INST_LEN*4);
3552 code_ptr native_start = code->cur;
3553 translate_m68k(opts, &instbuf);
3554 code_ptr native_end = code->cur;
3555 uint8_t is_terminal = m68k_is_terminal(&instbuf);
3556 if ((native_end - native_start) <= orig_size) {
3557 code_ptr native_next;
3558 if (!is_terminal) {
3559 native_next = get_native_address(context->native_code_map, orig + (after-inst)*2);
3560 }
3561 if (is_terminal || (native_next && ((native_next == orig_start + orig_size) || (orig_size - (native_end - native_start)) > 5))) {
3562 remove_deferred_until(&opts->gen.deferred, orig_deferred);
3563 code_info tmp;
3564 tmp.cur = code->cur;
3565 tmp.last = code->last;
3566 code->cur = orig_code.cur;
3567 code->last = orig_code.last;
3568 translate_m68k(opts, &instbuf);
3569 native_end = orig_code.cur = code->cur;
3570 code->cur = tmp.cur;
3571 code->last = tmp.last;
3572 if (!is_terminal) {
3573 if (native_next == orig_start + orig_size && (native_next-native_end) < 2) {
3574 while (orig_code.cur < orig_start + orig_size) {
3575 *(orig_code.cur++) = 0x90; //NOP
3576 }
3577 } else {
3578 jmp(&orig_code, native_next);
3579 }
3580 }
3581 m68k_handle_deferred(context);
3582 return orig_start;
3583 }
3584 }
3585
3586 map_native_address(context, instbuf.address, native_start, (after-inst)*2, MAX_NATIVE_SIZE);
3587
3588 jmp(&orig_code, native_start);
3589 if (!m68k_is_terminal(&instbuf)) {
3590 code_ptr native_end = code->cur;
3591 code->cur = native_start + MAX_NATIVE_SIZE;
3592 code_ptr rest = get_native_address_trans(context, orig + (after-inst)*2);
3593 code_ptr tmp = code->cur;
3594 code->cur = native_end;
3595 jmp(code, rest);
3596 code->cur = tmp;
3597 } else {
3598 code->cur = native_start + MAX_NATIVE_SIZE;
3599 }
3600 m68k_handle_deferred(context);
3601 return native_start;
3602 } else {
3603 code_info tmp;
3604 tmp.cur = code->cur;
3605 tmp.last = code->last;
3606 code->cur = orig_code.cur;
3607 code->last = orig_code.last;
3608 translate_m68k(opts, &instbuf);
3609 if (!m68k_is_terminal(&instbuf)) {
3610 jmp(code, get_native_address_trans(context, orig + (after-inst)*2));
3611 }
3612 code->cur = tmp.cur;
3613 code->last = tmp.last;
3614 m68k_handle_deferred(context);
3615 return orig_start;
3616 }
3617 }
3618
3619 m68k_context * m68k_handle_code_write(uint32_t address, m68k_context * context)
3620 {
3621 uint32_t inst_start = get_instruction_start(context->native_code_map, address | 0xFF0000);
3622 if (inst_start) {
3623 m68k_options * options = context->options;
3624 code_info *code = &options->gen.code;
3625 code_ptr dst = get_native_address(context->native_code_map, inst_start);
3626 code_info orig;
3627 orig.cur = dst;
3628 orig.last = dst + 128;
3629 mov_ir(&orig, inst_start, options->gen.scratch2, SZ_D);
3630
3631 if (!options->retrans_stub) {
3632 options->retrans_stub = code->cur;
3633 call(code, options->gen.save_context);
3634 push_r(code, options->gen.context_reg);
3635 #ifdef X86_32
3636 push_r(code, options->gen.context_reg);
3637 push_r(code, options->gen.scratch2);
3638 #endif
3639 call(code, (code_ptr)m68k_retranslate_inst);
3640 #ifdef X86_32
3641 add_ir(code, 8, RSP, SZ_D);
3642 #endif
3643 pop_r(code, options->gen.context_reg);
3644 mov_rr(code, RAX, options->gen.scratch1, SZ_PTR);
3645 call(code, options->gen.load_context);
3646 jmp_r(code, options->gen.scratch1);
3647 }
3648 jmp(&orig, options->retrans_stub);
3649 }
3650 return context;
3651 }
3652
3653 void insert_breakpoint(m68k_context * context, uint32_t address, code_ptr bp_handler)
3654 {
3655 static code_ptr bp_stub = NULL;
3656 m68k_options * opts = context->options;
3657 code_info native;
3658 native.cur = get_native_address_trans(context, address);
3659 native.last = native.cur + 128;
3660 code_ptr start_native = native.cur;
3661 mov_ir(&native, address, opts->gen.scratch1, SZ_D);
3662 if (!bp_stub) {
3663 code_info *code = &opts->gen.code;
3664 check_alloc_code(code, 5);
3665 bp_stub = code->cur;
3666 call(&native, bp_stub);
3667
3668 //Calculate length of prologue
3669 check_cycles_int(&opts->gen, address);
3670 int check_int_size = code->cur-bp_stub;
3671 code->cur = bp_stub;
3672
3673 //Save context and call breakpoint handler
3674 call(code, opts->gen.save_context);
3675 push_r(code, opts->gen.scratch1);
3676 #ifdef X86_64
3677 mov_rr(code, opts->gen.context_reg, RDI, SZ_PTR);
3678 mov_rr(code, opts->gen.scratch1, RSI, SZ_D);
3679 #else
3680 push_r(code, opts->gen.scratch1);
3681 push_r(code, opts->gen.context_reg);
3682 #endif
3683 call(code, bp_handler);
3684 #ifdef X86_32
3685 add_ir(code, 8, RSP, SZ_D);
3686 #endif
3687 mov_rr(code, RAX, opts->gen.context_reg, SZ_PTR);
3688 //Restore context
3689 call(code, opts->gen.load_context);
3690 pop_r(code, opts->gen.scratch1);
3691 //do prologue stuff
3692 cmp_rr(code, opts->gen.cycles, opts->gen.limit, SZ_D);
3693 code_ptr jmp_off = code->cur + 1;
3694 jcc(code, CC_NC, code->cur + 7);
3695 call(code, opts->gen.handle_cycle_limit_int);
3696 *jmp_off = code->cur - (jmp_off+1);
3697 //jump back to body of translated instruction
3698 pop_r(code, opts->gen.scratch1);
3699 add_ir(code, check_int_size - (native.cur-start_native), opts->gen.scratch1, SZ_PTR);
3700 jmp_r(code, opts->gen.scratch1);
3701 } else {
3702 call(&native, bp_stub);
3703 }
3704 }
3705
3706 code_ptr gen_mem_fun(cpu_options * opts, memmap_chunk * memmap, uint32_t num_chunks, ftype fun_type)
3707 {
3708 code_info *code = &opts->code;
3709 code_ptr start = code->cur;
3710 check_cycles(opts);
3711 cycles(opts, BUS);
3712 and_ir(code, 0xFFFFFF, opts->scratch1, SZ_D);
3713 code_ptr lb_jcc = NULL, ub_jcc = NULL;
3714 uint8_t is_write = fun_type == WRITE_16 || fun_type == WRITE_8;
3715 uint8_t adr_reg = is_write ? opts->scratch2 : opts->scratch1;
3716 uint16_t access_flag = is_write ? MMAP_WRITE : MMAP_READ;
3717 uint8_t size = (fun_type == READ_16 || fun_type == WRITE_16) ? SZ_W : SZ_B;
3718 for (uint32_t chunk = 0; chunk < num_chunks; chunk++)
3719 {
3720 if (memmap[chunk].start > 0) {
3721 cmp_ir(code, memmap[chunk].start, adr_reg, SZ_D);
3722 lb_jcc = code->cur + 1;
3723 jcc(code, CC_C, code->cur + 2);
3724 }
3725 if (memmap[chunk].end < 0x1000000) {
3726 cmp_ir(code, memmap[chunk].end, adr_reg, SZ_D);
3727 ub_jcc = code->cur + 1;
3728 jcc(code, CC_NC, code->cur + 2);
3729 }
3730
3731 if (memmap[chunk].mask != 0xFFFFFF) {
3732 and_ir(code, memmap[chunk].mask, adr_reg, SZ_D);
3733 }
3734 void * cfun;
3735 switch (fun_type)
3736 {
3737 case READ_16:
3738 cfun = memmap[chunk].read_16;
3739 break;
3740 case READ_8:
3741 cfun = memmap[chunk].read_8;
3742 break;
3743 case WRITE_16:
3744 cfun = memmap[chunk].write_16;
3745 break;
3746 case WRITE_8:
3747 cfun = memmap[chunk].write_8;
3748 break;
3749 default:
3750 cfun = NULL;
3751 }
3752 if(memmap[chunk].buffer && memmap[chunk].flags & access_flag) {
3753 if (memmap[chunk].flags & MMAP_PTR_IDX) {
3754 if (memmap[chunk].flags & MMAP_FUNC_NULL) {
3755 cmp_irdisp(code, 0, opts->context_reg, offsetof(m68k_context, mem_pointers) + sizeof(void*) * memmap[chunk].ptr_index, SZ_PTR);
3756 code_ptr not_null = code->cur + 1;
3757 jcc(code, CC_NZ, code->cur + 2);
3758 call(code, opts->save_context);
3759 #ifdef X86_64
3760 if (is_write) {
3761 if (opts->scratch2 != RDI) {
3762 mov_rr(code, opts->scratch2, RDI, SZ_D);
3763 }
3764 mov_rr(code, opts->scratch1, RDX, size);
3765 } else {
3766 push_r(code, opts->context_reg);
3767 mov_rr(code, opts->scratch1, RDI, SZ_D);
3768 }
3769 test_ir(code, 8, RSP, SZ_D);
3770 code_ptr adjust_rsp = code->cur + 1;
3771 jcc(code, CC_NZ, code->cur + 2);
3772 call(code, cfun);
3773 code_ptr no_adjust = code->cur + 1;
3774 jmp(code, code->cur + 2);
3775 *adjust_rsp = code->cur - (adjust_rsp + 1);
3776 sub_ir(code, 8, RSP, SZ_PTR);
3777 call(code, cfun);
3778 add_ir(code, 8, RSP, SZ_PTR);
3779 *no_adjust = code->cur - (no_adjust + 1);
3780 #else
3781 if (is_write) {
3782 push_r(code, opts->scratch1);
3783 } else {
3784 push_r(code, opts->context_reg);//save opts->context_reg for later
3785 }
3786 push_r(code, opts->context_reg);
3787 push_r(code, is_write ? opts->scratch2 : opts->scratch1);
3788 call(code, cfun);
3789 add_ir(code, is_write ? 12 : 8, RSP, SZ_D);
3790 #endif
3791 if (is_write) {
3792 mov_rr(code, RAX, opts->context_reg, SZ_PTR);
3793 } else {
3794 pop_r(code, opts->context_reg);
3795 mov_rr(code, RAX, opts->scratch1, size);
3796 }
3797 jmp(code, opts->load_context);
3798
3799 *not_null = code->cur - (not_null + 1);
3800 }
3801 if (size == SZ_B) {
3802 xor_ir(code, 1, adr_reg, SZ_D);
3803 }
3804 add_rdispr(code, opts->context_reg, offsetof(m68k_context, mem_pointers) + sizeof(void*) * memmap[chunk].ptr_index, adr_reg, SZ_PTR);
3805 if (is_write) {
3806 mov_rrind(code, opts->scratch1, opts->scratch2, size);
3807
3808 } else {
3809 mov_rindr(code, opts->scratch1, opts->scratch1, size);
3810 }
3811 } else {
3812 uint8_t tmp_size = size;
3813 if (size == SZ_B) {
3814 if ((memmap[chunk].flags & MMAP_ONLY_ODD) || (memmap[chunk].flags & MMAP_ONLY_EVEN)) {
3815 bt_ir(code, 0, adr_reg, SZ_D);
3816 code_ptr good_addr = code->cur + 1;
3817 jcc(code, (memmap[chunk].flags & MMAP_ONLY_ODD) ? CC_C : CC_NC, code->cur + 2);
3818 if (!is_write) {
3819 mov_ir(code, 0xFF, opts->scratch1, SZ_B);
3820 }
3821 retn(code);
3822 *good_addr = code->cur - (good_addr + 1);
3823 shr_ir(code, 1, adr_reg, SZ_D);
3824 } else {
3825 xor_ir(code, 1, adr_reg, SZ_D);
3826 }
3827 } else if ((memmap[chunk].flags & MMAP_ONLY_ODD) || (memmap[chunk].flags & MMAP_ONLY_EVEN)) {
3828 tmp_size = SZ_B;
3829 shr_ir(code, 1, adr_reg, SZ_D);
3830 if ((memmap[chunk].flags & MMAP_ONLY_EVEN) && is_write) {
3831 shr_ir(code, 8, opts->scratch1, SZ_W);
3832 }
3833 }
3834 if ((intptr_t)memmap[chunk].buffer <= 0x7FFFFFFF && (intptr_t)memmap[chunk].buffer >= -2147483648) {
3835 if (is_write) {
3836 mov_rrdisp(code, opts->scratch1, opts->scratch2, (intptr_t)memmap[chunk].buffer, tmp_size);
3837 } else {
3838 mov_rdispr(code, opts->scratch1, (intptr_t)memmap[chunk].buffer, opts->scratch1, tmp_size);
3839 }
3840 } else {
3841 if (is_write) {
3842 push_r(code, opts->scratch1);
3843 mov_ir(code, (intptr_t)memmap[chunk].buffer, opts->scratch1, SZ_PTR);
3844 add_rr(code, opts->scratch1, opts->scratch2, SZ_PTR);
3845 pop_r(code, opts->scratch1);
3846 mov_rrind(code, opts->scratch1, opts->scratch2, tmp_size);
3847 } else {
3848 mov_ir(code, (intptr_t)memmap[chunk].buffer, opts->scratch2, SZ_PTR);
3849 mov_rindexr(code, opts->scratch2, opts->scratch1, 1, opts->scratch1, tmp_size);
3850 }
3851 }
3852 if (size != tmp_size && !is_write) {
3853 if (memmap[chunk].flags & MMAP_ONLY_EVEN) {
3854 shl_ir(code, 8, opts->scratch1, SZ_W);
3855 mov_ir(code, 0xFF, opts->scratch1, SZ_B);
3856 } else {
3857 or_ir(code, 0xFF00, opts->scratch1, SZ_W);
3858 }
3859 }
3860 }
3861 if (is_write && (memmap[chunk].flags & MMAP_CODE)) {
3862 mov_rr(code, opts->scratch2, opts->scratch1, SZ_D);
3863 shr_ir(code, 11, opts->scratch1, SZ_D);
3864 bt_rrdisp(code, opts->scratch1, opts->context_reg, offsetof(m68k_context, ram_code_flags), SZ_D);
3865 code_ptr not_code = code->cur + 1;
3866 jcc(code, CC_NC, code->cur + 2);
3867 call(code, opts->save_context);
3868 #ifdef X86_32
3869 push_r(code, opts->context_reg);
3870 push_r(code, opts->scratch2);
3871 #endif
3872 call(code, (code_ptr)m68k_handle_code_write);
3873 #ifdef X86_32
3874 add_ir(code, 8, RSP, SZ_D);
3875 #endif
3876 mov_rr(code, RAX, opts->context_reg, SZ_PTR);
3877 call(code, opts->load_context);
3878 *not_code = code->cur - (not_code+1);
3879 }
3880 retn(code);
3881 } else if (cfun) {
3882 call(code, opts->save_context);
3883 #ifdef X86_64
3884 if (is_write) {
3885 if (opts->scratch2 != RDI) {
3886 mov_rr(code, opts->scratch2, RDI, SZ_D);
3887 }
3888 mov_rr(code, opts->scratch1, RDX, size);
3889 } else {
3890 push_r(code, opts->context_reg);
3891 mov_rr(code, opts->scratch1, RDI, SZ_D);
3892 }
3893 test_ir(code, 8, RSP, SZ_D);
3894 code_ptr adjust_rsp = code->cur + 1;
3895 jcc(code, CC_NZ, code->cur + 2);
3896 call(code, cfun);
3897 code_ptr no_adjust = code->cur + 1;
3898 jmp(code, code->cur + 2);
3899 *adjust_rsp = code->cur - (adjust_rsp + 1);
3900 sub_ir(code, 8, RSP, SZ_PTR);
3901 call(code, cfun);
3902 add_ir(code, 8, RSP, SZ_PTR);
3903 *no_adjust = code->cur - (no_adjust+1);
3904 #else
3905 if (is_write) {
3906 push_r(code, opts->scratch1);
3907 } else {
3908 push_r(code, opts->context_reg);//save opts->context_reg for later
3909 }
3910 push_r(code, opts->context_reg);
3911 push_r(code, is_write ? opts->scratch2 : opts->scratch1);
3912 call(code, cfun);
3913 add_ir(code, is_write ? 12 : 8, RSP, SZ_D);
3914 #endif
3915 if (is_write) {
3916 mov_rr(code, RAX, opts->context_reg, SZ_PTR);
3917 } else {
3918 pop_r(code, opts->context_reg);
3919 mov_rr(code, RAX, opts->scratch1, size);
3920 }
3921 jmp(code, opts->load_context);
3922 } else {
3923 //Not sure the best course of action here
3924 if (!is_write) {
3925 mov_ir(code, size == SZ_B ? 0xFF : 0xFFFF, opts->scratch1, size);
3926 }
3927 retn(code);
3928 }
3929 if (lb_jcc) {
3930 *lb_jcc = code->cur - (lb_jcc+1);
3931 lb_jcc = NULL;
3932 }
3933 if (ub_jcc) {
3934 *ub_jcc = code->cur - (ub_jcc+1);
3935 ub_jcc = NULL;
3936 }
3937 }
3938 if (!is_write) {
3939 mov_ir(code, size == SZ_B ? 0xFF : 0xFFFF, opts->scratch1, size);
3940 }
3941 retn(code);
3942 return start;
3943 }
3944
3945 void init_m68k_opts(m68k_options * opts, memmap_chunk * memmap, uint32_t num_chunks)
3946 {
3947 memset(opts, 0, sizeof(*opts));
3948 for (int i = 0; i < 8; i++)
3949 {
3950 opts->dregs[i] = opts->aregs[i] = -1;
3951 }
3952 #ifdef X86_64
3953 opts->dregs[0] = R10;
3954 opts->dregs[1] = R11;
3955 opts->dregs[2] = R12;
3956 opts->dregs[3] = R8;
3957 opts->aregs[0] = R13;
3958 opts->aregs[1] = R14;
3959 opts->aregs[2] = R9;
3960 opts->aregs[7] = R15;
3961
3962 opts->flag_regs[0] = -1;
3963 opts->flag_regs[1] = RBX;
3964 opts->flag_regs[2] = RDX;
3965 opts->flag_regs[3] = BH;
3966 opts->flag_regs[4] = DH;
3967
3968 opts->gen.scratch2 = RDI;
3969 #else
3970 opts->dregs[0] = RDX;
3971 opts->aregs[7] = RDI;
3972
3973 for (int i = 0; i < 5; i++)
3974 {
3975 opts->flag_regs[i] = -1;
3976 }
3977 opts->gen.scratch2 = RBX;
3978 #endif
3979 opts->gen.context_reg = RSI;
3980 opts->gen.cycles = RAX;
3981 opts->gen.limit = RBP;
3982 opts->gen.scratch1 = RCX;
3983
3984
3985 opts->gen.native_code_map = malloc(sizeof(native_map_slot) * NATIVE_MAP_CHUNKS);
3986 memset(opts->gen.native_code_map, 0, sizeof(native_map_slot) * NATIVE_MAP_CHUNKS);
3987 opts->gen.deferred = NULL;
3988 opts->gen.ram_inst_sizes = malloc(sizeof(uint8_t *) * 64);
3989 memset(opts->gen.ram_inst_sizes, 0, sizeof(uint8_t *) * 64);
3990
3991 code_info *code = &opts->gen.code;
3992 init_code_info(code);
3993
3994 opts->gen.save_context = code->cur;
3995 for (int i = 0; i < 5; i++)
3996 if (opts->flag_regs[i] >= 0) {
3997 mov_rrdisp(code, opts->flag_regs[i], opts->gen.context_reg, offsetof(m68k_context, flags) + i, SZ_B);
3998 }
3999 for (int i = 0; i < 8; i++)
4000 {
4001 if (opts->dregs[i] >= 0) {
4002 mov_rrdisp(code, opts->dregs[i], opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t) * i, SZ_D);
4003 }
4004 if (opts->aregs[i] >= 0) {
4005 mov_rrdisp(code, opts->aregs[i], opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * i, SZ_D);
4006 }
4007 }
4008 mov_rrdisp(code, opts->gen.cycles, opts->gen.context_reg, offsetof(m68k_context, current_cycle), SZ_D);
4009 retn(code);
4010
4011 opts->gen.load_context = code->cur;
4012 for (int i = 0; i < 5; i++)
4013 if (opts->flag_regs[i] >= 0) {
4014 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, flags) + i, opts->flag_regs[i], SZ_B);
4015 }
4016 for (int i = 0; i < 8; i++)
4017 {
4018 if (opts->dregs[i] >= 0) {
4019 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t) * i, opts->dregs[i], SZ_D);
4020 }
4021 if (opts->aregs[i] >= 0) {
4022 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * i, opts->aregs[i], SZ_D);
4023 }
4024 }
4025 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, current_cycle), CYCLES, SZ_D);
4026 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, target_cycle), LIMIT, SZ_D);
4027 retn(code);
4028
4029 opts->start_context = (start_fun)code->cur;
4030 #ifdef X86_64
4031 if (opts->gen.scratch2 != RDI) {
4032 mov_rr(code, RDI, opts->gen.scratch2, SZ_PTR);
4033 }
4034 //save callee save registers
4035 push_r(code, RBP);
4036 push_r(code, R12);
4037 push_r(code, R13);
4038 push_r(code, R14);
4039 push_r(code, R15);
4040 #else
4041 //save callee save registers
4042 push_r(code, RBP);
4043 push_r(code, RBX);
4044 push_r(code, RSI);
4045 push_r(code, RDI);
4046
4047 mov_rdispr(code, RSP, 20, opts->gen.scratch2, SZ_D);
4048 mov_rdispr(code, RSP, 24, opts->gen.context_reg, SZ_D);
4049 #endif
4050 call(code, opts->gen.load_context);
4051 call_r(code, opts->gen.scratch2);
4052 call(code, opts->gen.save_context);
4053 #ifdef X86_64
4054 //restore callee save registers
4055 pop_r(code, R15);
4056 pop_r(code, R14);
4057 pop_r(code, R13);
4058 pop_r(code, R12);
4059 pop_r(code, RBP);
4060 #else
4061 pop_r(code, RDI);
4062 pop_r(code, RSI);
4063 pop_r(code, RBX);
4064 pop_r(code, RBP);
4065 #endif
4066 retn(code);
4067
4068 opts->native_addr = code->cur;
4069 call(code, opts->gen.save_context);
4070 push_r(code, opts->gen.context_reg);
4071 #ifdef X86_64
4072 mov_rr(code, opts->gen.context_reg, RDI, SZ_PTR); //move context to 1st arg reg
4073 mov_rr(code, opts->gen.scratch1, RSI, SZ_D); //move address to 2nd arg reg
4074 #else
4075 push_r(code, opts->gen.scratch1);
4076 push_r(code, opts->gen.context_reg);
4077 #endif
4078 call(code, (code_ptr)get_native_address_trans);
4079 #ifdef X86_32
4080 add_ir(code, 8, RSP, SZ_D);
4081 #endif
4082 mov_rr(code, RAX, opts->gen.scratch1, SZ_PTR); //move result to scratch reg
4083 pop_r(code, opts->gen.context_reg);
4084 call(code, opts->gen.load_context);
4085 retn(code);
4086
4087 opts->native_addr_and_sync = code->cur;
4088 call(code, opts->gen.save_context);
4089 push_r(code, opts->gen.scratch1);
4090 #ifdef X86_64
4091 mov_rr(code, opts->gen.context_reg, RDI, SZ_PTR);
4092 xor_rr(code, RSI, RSI, SZ_D);
4093 test_ir(code, 8, RSP, SZ_PTR); //check stack alignment
4094 code_ptr do_adjust_rsp = code->cur + 1;
4095 jcc(code, CC_NZ, code->cur + 2);
4096 call(code, (code_ptr)sync_components);
4097 code_ptr no_adjust_rsp = code->cur + 1;
4098 jmp(code, code->cur + 2);
4099 *do_adjust_rsp = code->cur - (do_adjust_rsp+1);
4100 sub_ir(code, 8, RSP, SZ_PTR);
4101 call(code, (code_ptr)sync_components);
4102 add_ir(code, 8, RSP, SZ_PTR);
4103 *no_adjust_rsp = code->cur - (no_adjust_rsp+1);
4104 pop_r(code, RSI);
4105 push_r(code, RAX);
4106 mov_rr(code, RAX, RDI, SZ_PTR);
4107 call(code, (code_ptr)get_native_address_trans);
4108 #else
4109 //TODO: Add support for pushing a constant in gen_x86
4110 xor_rr(code, RAX, RAX, SZ_D);
4111 push_r(code, RAX);
4112 push_r(code, opts->gen.context_reg);
4113 call(code, (code_ptr)sync_components);
4114 add_ir(code, 8, RSP, SZ_D);
4115 pop_r(code, RSI); //restore saved address from opts->gen.scratch1
4116 push_r(code, RAX); //save context pointer for later
4117 push_r(code, RSI); //2nd arg -- address
4118 push_r(code, RAX); //1st arg -- context pointer
4119 call(code, (code_ptr)get_native_address_trans);
4120 add_ir(code, 8, RSP, SZ_D);
4121 #endif
4122
4123 mov_rr(code, RAX, opts->gen.scratch1, SZ_PTR); //move result to scratch reg
4124 pop_r(code, opts->gen.context_reg);
4125 call(code, opts->gen.load_context);
4126 retn(code);
4127
4128 opts->gen.handle_cycle_limit = code->cur;
4129 cmp_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, sync_cycle), CYCLES, SZ_D);
4130 code_ptr skip_sync = code->cur + 1;
4131 jcc(code, CC_C, code->cur + 2);
4132 opts->do_sync = code->cur;
4133 push_r(code, opts->gen.scratch1);
4134 push_r(code, opts->gen.scratch2);
4135 call(code, opts->gen.save_context);
4136 #ifdef X86_64
4137 mov_rr(code, opts->gen.context_reg, RDI, SZ_PTR);
4138 xor_rr(code, RSI, RSI, SZ_D);
4139 test_ir(code, 8, RSP, SZ_D);
4140 code_ptr adjust_rsp = code->cur + 1;
4141 jcc(code, CC_NZ, code->cur + 2);
4142 call(code, (code_ptr)sync_components);
4143 code_ptr no_adjust = code->cur + 1;
4144 jmp(code, code->cur + 2);
4145 *adjust_rsp = code->cur - (adjust_rsp + 1);
4146 sub_ir(code, 8, RSP, SZ_PTR);
4147 call(code, (code_ptr)sync_components);
4148 add_ir(code, 8, RSP, SZ_PTR);
4149 *no_adjust = code->cur - (no_adjust+1);
4150 #else
4151 //TODO: Add support for pushing a constant in gen_x86
4152 xor_rr(code, RAX, RAX, SZ_D);
4153 push_r(code, RAX);
4154 push_r(code, opts->gen.context_reg);
4155 call(code, (code_ptr)sync_components);
4156 add_ir(code, 8, RSP, SZ_D);
4157 #endif
4158 mov_rr(code, RAX, opts->gen.context_reg, SZ_PTR);
4159 call(code, opts->gen.load_context);
4160 pop_r(code, opts->gen.scratch2);
4161 pop_r(code, opts->gen.scratch1);
4162 *skip_sync = code->cur - (skip_sync+1);
4163 retn(code);
4164
4165 opts->read_16 = gen_mem_fun(&opts->gen, memmap, num_chunks, READ_16);
4166 opts->read_8 = gen_mem_fun(&opts->gen, memmap, num_chunks, READ_8);
4167 opts->write_16 = gen_mem_fun(&opts->gen, memmap, num_chunks, WRITE_16);
4168 opts->write_8 = gen_mem_fun(&opts->gen, memmap, num_chunks, WRITE_8);
4169
4170 opts->read_32 = code->cur;
4171 push_r(code, opts->gen.scratch1);
4172 call(code, opts->read_16);
4173 mov_rr(code, opts->gen.scratch1, opts->gen.scratch2, SZ_W);
4174 pop_r(code, opts->gen.scratch1);
4175 push_r(code, opts->gen.scratch2);
4176 add_ir(code, 2, opts->gen.scratch1, SZ_D);
4177 call(code, opts->read_16);
4178 pop_r(code, opts->gen.scratch2);
4179 movzx_rr(code, opts->gen.scratch1, opts->gen.scratch1, SZ_W, SZ_D);
4180 shl_ir(code, 16, opts->gen.scratch2, SZ_D);
4181 or_rr(code, opts->gen.scratch2, opts->gen.scratch1, SZ_D);
4182 retn(code);
4183
4184 opts->write_32_lowfirst = code->cur;
4185 push_r(code, opts->gen.scratch2);
4186 push_r(code, opts->gen.scratch1);
4187 add_ir(code, 2, opts->gen.scratch2, SZ_D);
4188 call(code, opts->write_16);
4189 pop_r(code, opts->gen.scratch1);
4190 pop_r(code, opts->gen.scratch2);
4191 shr_ir(code, 16, opts->gen.scratch1, SZ_D);
4192 jmp(code, opts->write_16);
4193
4194 opts->write_32_highfirst = code->cur;
4195 push_r(code, opts->gen.scratch1);
4196 push_r(code, opts->gen.scratch2);
4197 shr_ir(code, 16, opts->gen.scratch1, SZ_D);
4198 call(code, opts->write_16);
4199 pop_r(code, opts->gen.scratch2);
4200 pop_r(code, opts->gen.scratch1);
4201 add_ir(code, 2, opts->gen.scratch2, SZ_D);
4202 jmp(code, opts->write_16);
4203
4204 opts->get_sr = code->cur;
4205 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, status), opts->gen.scratch1, SZ_B);
4206 shl_ir(code, 8, opts->gen.scratch1, SZ_W);
4207 if (opts->flag_regs[FLAG_X] >= 0) {
4208 mov_rr(code, opts->flag_regs[FLAG_X], opts->gen.scratch1, SZ_B);
4209 } else {
4210 int8_t offset = offsetof(m68k_context, flags);
4211 if (offset) {
4212 mov_rdispr(code, opts->gen.context_reg, offset, opts->gen.scratch1, SZ_B);
4213 } else {
4214 mov_rindr(code, opts->gen.context_reg, opts->gen.scratch1, SZ_B);
4215 }
4216 }
4217 for (int flag = FLAG_N; flag <= FLAG_C; flag++)
4218 {
4219 shl_ir(code, 1, opts->gen.scratch1, SZ_B);
4220 if (opts->flag_regs[flag] >= 0) {
4221 or_rr(code, opts->flag_regs[flag], opts->gen.scratch1, SZ_B);
4222 } else {
4223 or_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, flags) + flag, opts->gen.scratch1, SZ_B);
4224 }
4225 }
4226 retn(code);
4227
4228 opts->set_sr = code->cur;
4229 for (int flag = FLAG_C; flag >= FLAG_X; flag--)
4230 {
4231 rcr_ir(code, 1, opts->gen.scratch1, SZ_B);
4232 if (opts->flag_regs[flag] >= 0) {
4233 setcc_r(code, CC_C, opts->flag_regs[flag]);
4234 } else {
4235 int8_t offset = offsetof(m68k_context, flags) + flag;
4236 if (offset) {
4237 setcc_rdisp(code, CC_C, opts->gen.context_reg, offset);
4238 } else {
4239 setcc_rind(code, CC_C, opts->gen.context_reg);
4240 }
4241 }
4242 }
4243 shr_ir(code, 8, opts->gen.scratch1, SZ_W);
4244 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
4245 retn(code);
4246
4247 opts->set_ccr = code->cur;
4248 for (int flag = FLAG_C; flag >= FLAG_X; flag--)
4249 {
4250 rcr_ir(code, 1, opts->gen.scratch1, SZ_B);
4251 if (opts->flag_regs[flag] >= 0) {
4252 setcc_r(code, CC_C, opts->flag_regs[flag]);
4253 } else {
4254 int8_t offset = offsetof(m68k_context, flags) + flag;
4255 if (offset) {
4256 setcc_rdisp(code, CC_C, opts->gen.context_reg, offset);
4257 } else {
4258 setcc_rind(code, CC_C, opts->gen.context_reg);
4259 }
4260 }
4261 }
4262 retn(code);
4263
4264 opts->gen.handle_cycle_limit_int = code->cur;
4265 cmp_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, int_cycle), CYCLES, SZ_D);
4266 code_ptr do_int = code->cur + 1;
4267 jcc(code, CC_NC, code->cur + 2);
4268 cmp_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, sync_cycle), CYCLES, SZ_D);
4269 skip_sync = code->cur + 1;
4270 jcc(code, CC_C, code->cur + 2);
4271 call(code, opts->gen.save_context);
4272 #ifdef X86_64
4273 mov_rr(code, opts->gen.context_reg, RDI, SZ_PTR);
4274 mov_rr(code, opts->gen.scratch1, RSI, SZ_D);
4275 test_ir(code, 8, RSP, SZ_D);
4276 adjust_rsp = code->cur + 1;
4277 jcc(code, CC_NZ, code->cur + 2);
4278 call(code, (code_ptr)sync_components);
4279 no_adjust = code->cur + 1;
4280 jmp(code, code->cur + 2);
4281 *adjust_rsp = code->cur - (adjust_rsp + 1);
4282 sub_ir(code, 8, RSP, SZ_PTR);
4283 call(code, (code_ptr)sync_components);
4284 add_ir(code, 8, RSP, SZ_PTR);
4285 *no_adjust = code->cur - (no_adjust+1);
4286 #else
4287 push_r(code, opts->gen.scratch1);
4288 push_r(code, opts->gen.context_reg);
4289 call(code, (code_ptr)sync_components);
4290 add_ir(code, 8, RSP, SZ_D);
4291 #endif
4292 mov_rr(code, RAX, opts->gen.context_reg, SZ_PTR);
4293 jmp(code, opts->gen.load_context);
4294 *skip_sync = code->cur - (skip_sync+1);
4295 retn(code);
4296 *do_int = code->cur - (do_int+1);
4297 //set target cycle to sync cycle
4298 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, sync_cycle), LIMIT, SZ_D);
4299 //swap USP and SSP if not already in supervisor mode
4300 bt_irdisp(code, 5, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
4301 code_ptr already_supervisor = code->cur + 1;
4302 jcc(code, CC_C, code->cur + 2);
4303 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, opts->gen.scratch2, SZ_D);
4304 mov_rrdisp(code, opts->aregs[7], opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, SZ_D);
4305 mov_rr(code, opts->gen.scratch2, opts->aregs[7], SZ_D);
4306 *already_supervisor = code->cur - (already_supervisor+1);
4307 //save PC
4308 sub_ir(code, 4, opts->aregs[7], SZ_D);
4309 mov_rr(code, opts->aregs[7], opts->gen.scratch2, SZ_D);
4310 call(code, opts->write_32_lowfirst);
4311 //save status register
4312 sub_ir(code, 2, opts->aregs[7], SZ_D);
4313 call(code, opts->get_sr);
4314 mov_rr(code, opts->aregs[7], opts->gen.scratch2, SZ_D);
4315 call(code, opts->write_16);
4316 //update status register
4317 and_irdisp(code, 0xF8, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
4318 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, int_num), opts->gen.scratch1, SZ_B);
4319 or_ir(code, 0x20, opts->gen.scratch1, SZ_B);
4320 or_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
4321 //calculate interrupt vector address
4322 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, int_num), opts->gen.scratch1, SZ_D);
4323 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(m68k_context, int_ack), SZ_W);
4324 shl_ir(code, 2, opts->gen.scratch1, SZ_D);
4325 add_ir(code, 0x60, opts->gen.scratch1, SZ_D);
4326 call(code, opts->read_32);
4327 call(code, opts->native_addr_and_sync);
4328 cycles(&opts->gen, 24);
4329 //discard function return address
4330 pop_r(code, opts->gen.scratch2);
4331 jmp_r(code, opts->gen.scratch1);
4332
4333 opts->trap = code->cur;
4334 push_r(code, opts->gen.scratch2);
4335 //swap USP and SSP if not already in supervisor mode
4336 bt_irdisp(code, 5, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
4337 already_supervisor = code->cur + 1;
4338 jcc(code, CC_C, code->cur + 2);
4339 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, opts->gen.scratch2, SZ_D);
4340 mov_rrdisp(code, opts->aregs[7], opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, SZ_D);
4341 mov_rr(code, opts->gen.scratch2, opts->aregs[7], SZ_D);
4342 *already_supervisor = code->cur - (already_supervisor+1);
4343 //save PC
4344 sub_ir(code, 4, opts->aregs[7], SZ_D);
4345 mov_rr(code, opts->aregs[7], opts->gen.scratch2, SZ_D);
4346 call(code, opts->write_32_lowfirst);
4347 //save status register
4348 sub_ir(code, 2, opts->aregs[7], SZ_D);
4349 call(code, opts->get_sr);
4350 mov_rr(code, opts->aregs[7], opts->gen.scratch2, SZ_D);
4351 call(code, opts->write_16);
4352 //set supervisor bit
4353 or_irdisp(code, 0x20, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
4354 //calculate vector address
4355 pop_r(code, opts->gen.scratch1);
4356 shl_ir(code, 2, opts->gen.scratch1, SZ_D);
4357 call(code, opts->read_32);
4358 call(code, opts->native_addr_and_sync);
4359 cycles(&opts->gen, 18);
4360 jmp_r(code, opts->gen.scratch1);
4361 }
4362
4363 void init_68k_context(m68k_context * context, native_map_slot * native_code_map, void * opts)
4364 {
4365 memset(context, 0, sizeof(m68k_context));
4366 context->native_code_map = native_code_map;
4367 context->options = opts;
4368 context->int_cycle = 0xFFFFFFFF;
4369 context->status = 0x27;
4370 }
4371