Mercurial > repos > blastem
comparison backend_x86.c @ 744:fc68992cf18d
Merge windows branch with latest changes
author | Michael Pavone <pavone@retrodev.com> |
---|---|
date | Thu, 28 May 2015 21:19:55 -0700 |
parents | 7f96bd1cb1be |
children | 59b499f6b24f |
comparison
equal
deleted
inserted
replaced
743:cf78cb045fa4 | 744:fc68992cf18d |
---|---|
1 #include "backend.h" | 1 #include "backend.h" |
2 #include "gen_x86.h" | 2 #include "gen_x86.h" |
3 | 3 |
4 void cycles(cpu_options *opts, uint32_t num) | 4 void cycles(cpu_options *opts, uint32_t num) |
5 { | 5 { |
6 add_ir(&opts->code, num, opts->cycles, SZ_D); | 6 add_ir(&opts->code, num*opts->clock_divider, opts->cycles, SZ_D); |
7 } | 7 } |
8 | 8 |
9 void check_cycles_int(cpu_options *opts, uint32_t address) | 9 void check_cycles_int(cpu_options *opts, uint32_t address) |
10 { | 10 { |
11 code_info *code = &opts->code; | 11 code_info *code = &opts->code; |
12 cmp_rr(code, opts->cycles, opts->limit, SZ_D); | 12 cmp_rr(code, opts->cycles, opts->limit, SZ_D); |
13 code_ptr jmp_off = code->cur+1; | 13 code_ptr jmp_off = code->cur+1; |
14 jcc(code, CC_NC, jmp_off+1); | 14 jcc(code, CC_A, jmp_off+1); |
15 mov_ir(code, address, opts->scratch1, SZ_D); | 15 mov_ir(code, address, opts->scratch1, SZ_D); |
16 call(code, opts->handle_cycle_limit_int); | 16 call(code, opts->handle_cycle_limit_int); |
17 *jmp_off = code->cur - (jmp_off+1); | 17 *jmp_off = code->cur - (jmp_off+1); |
18 } | 18 } |
19 | 19 |
21 { | 21 { |
22 code_info *code = &opts->code; | 22 code_info *code = &opts->code; |
23 cmp_rr(code, opts->cycles, opts->limit, SZ_D); | 23 cmp_rr(code, opts->cycles, opts->limit, SZ_D); |
24 check_alloc_code(code, MAX_INST_LEN*2); | 24 check_alloc_code(code, MAX_INST_LEN*2); |
25 code_ptr jmp_off = code->cur+1; | 25 code_ptr jmp_off = code->cur+1; |
26 jcc(code, CC_NC, jmp_off+1); | 26 jcc(code, CC_A, jmp_off+1); |
27 call(code, opts->handle_cycle_limit); | 27 call(code, opts->handle_cycle_limit); |
28 *jmp_off = code->cur - (jmp_off+1); | 28 *jmp_off = code->cur - (jmp_off+1); |
29 } | 29 } |
30 | 30 |
31 code_ptr gen_mem_fun(cpu_options * opts, memmap_chunk * memmap, uint32_t num_chunks, ftype fun_type) | 31 void log_address(cpu_options *opts, uint32_t address, char * format) |
32 { | |
33 code_info *code = &opts->code; | |
34 call(code, opts->save_context); | |
35 push_r(code, opts->context_reg); | |
36 mov_rr(code, opts->cycles, RDX, SZ_D); | |
37 mov_ir(code, (int64_t)format, RDI, SZ_PTR); | |
38 mov_ir(code, address, RSI, SZ_D); | |
39 call_args_abi(code, (code_ptr)printf, 3, RDI, RSI, RDX); | |
40 pop_r(code, opts->context_reg); | |
41 call(code, opts->load_context); | |
42 } | |
43 | |
44 void check_code_prologue(code_info *code) | |
45 { | |
46 check_alloc_code(code, MAX_INST_LEN*4); | |
47 } | |
48 | |
49 code_ptr gen_mem_fun(cpu_options * opts, memmap_chunk const * memmap, uint32_t num_chunks, ftype fun_type, code_ptr *after_inc) | |
32 { | 50 { |
33 code_info *code = &opts->code; | 51 code_info *code = &opts->code; |
34 code_ptr start = code->cur; | 52 code_ptr start = code->cur; |
35 check_cycles(opts); | 53 check_cycles(opts); |
36 cycles(opts, opts->bus_cycles); | 54 cycles(opts, opts->bus_cycles); |
55 if (after_inc) { | |
56 *after_inc = code->cur; | |
57 } | |
37 if (opts->address_size == SZ_D && opts->address_mask < 0xFFFFFFFF) { | 58 if (opts->address_size == SZ_D && opts->address_mask < 0xFFFFFFFF) { |
38 and_ir(code, opts->address_mask, opts->scratch1, SZ_D); | 59 and_ir(code, opts->address_mask, opts->scratch1, SZ_D); |
39 } | 60 } |
40 code_ptr lb_jcc = NULL, ub_jcc = NULL; | 61 code_ptr lb_jcc = NULL, ub_jcc = NULL; |
41 uint8_t is_write = fun_type == WRITE_16 || fun_type == WRITE_8; | 62 uint8_t is_write = fun_type == WRITE_16 || fun_type == WRITE_8; |
42 uint8_t adr_reg = is_write ? opts->scratch2 : opts->scratch1; | 63 uint8_t adr_reg = is_write ? opts->scratch2 : opts->scratch1; |
43 uint16_t access_flag = is_write ? MMAP_WRITE : MMAP_READ; | 64 uint16_t access_flag = is_write ? MMAP_WRITE : MMAP_READ; |
44 uint8_t size = (fun_type == READ_16 || fun_type == WRITE_16) ? SZ_W : SZ_B; | 65 uint8_t size = (fun_type == READ_16 || fun_type == WRITE_16) ? SZ_W : SZ_B; |
66 uint32_t ram_flags_off = opts->ram_flags_off; | |
45 for (uint32_t chunk = 0; chunk < num_chunks; chunk++) | 67 for (uint32_t chunk = 0; chunk < num_chunks; chunk++) |
46 { | 68 { |
47 if (memmap[chunk].start > 0) { | 69 if (memmap[chunk].start > 0) { |
48 cmp_ir(code, memmap[chunk].start, adr_reg, opts->address_size); | 70 cmp_ir(code, memmap[chunk].start, adr_reg, opts->address_size); |
49 lb_jcc = code->cur + 1; | 71 lb_jcc = code->cur + 1; |
74 cfun = memmap[chunk].write_8; | 96 cfun = memmap[chunk].write_8; |
75 break; | 97 break; |
76 default: | 98 default: |
77 cfun = NULL; | 99 cfun = NULL; |
78 } | 100 } |
79 if(memmap[chunk].buffer && memmap[chunk].flags & access_flag) { | 101 if(memmap[chunk].flags & access_flag) { |
80 if (memmap[chunk].flags & MMAP_PTR_IDX) { | 102 if (memmap[chunk].flags & MMAP_PTR_IDX) { |
81 if (memmap[chunk].flags & MMAP_FUNC_NULL) { | 103 if (memmap[chunk].flags & MMAP_FUNC_NULL) { |
82 cmp_irdisp(code, 0, opts->context_reg, opts->mem_ptr_off + sizeof(void*) * memmap[chunk].ptr_index, SZ_PTR); | 104 cmp_irdisp(code, 0, opts->context_reg, opts->mem_ptr_off + sizeof(void*) * memmap[chunk].ptr_index, SZ_PTR); |
83 code_ptr not_null = code->cur + 1; | 105 code_ptr not_null = code->cur + 1; |
84 jcc(code, CC_NZ, code->cur + 2); | 106 jcc(code, CC_NZ, code->cur + 2); |
85 call(code, opts->save_context); | 107 call(code, opts->save_context); |
86 #ifdef X86_64 | |
87 if (is_write) { | 108 if (is_write) { |
88 if (opts->scratch2 != RDI) { | 109 call_args_abi(code, cfun, 3, opts->scratch2, opts->context_reg, opts->scratch1); |
89 mov_rr(code, opts->scratch2, RDI, opts->address_size); | 110 mov_rr(code, RAX, opts->context_reg, SZ_PTR); |
90 } | |
91 mov_rr(code, opts->scratch1, RDX, size); | |
92 } else { | 111 } else { |
93 push_r(code, opts->context_reg); | 112 push_r(code, opts->context_reg); |
94 mov_rr(code, opts->scratch1, RDI, opts->address_size); | 113 call_args_abi(code, cfun, 2, opts->scratch1, opts->context_reg); |
95 } | |
96 test_ir(code, 8, RSP, opts->address_size); | |
97 code_ptr adjust_rsp = code->cur + 1; | |
98 jcc(code, CC_NZ, code->cur + 2); | |
99 call(code, cfun); | |
100 code_ptr no_adjust = code->cur + 1; | |
101 jmp(code, code->cur + 2); | |
102 *adjust_rsp = code->cur - (adjust_rsp + 1); | |
103 sub_ir(code, 8, RSP, SZ_PTR); | |
104 call(code, cfun); | |
105 add_ir(code, 8, RSP, SZ_PTR); | |
106 *no_adjust = code->cur - (no_adjust + 1); | |
107 #else | |
108 if (is_write) { | |
109 push_r(code, opts->scratch1); | |
110 } else { | |
111 push_r(code, opts->context_reg);//save opts->context_reg for later | |
112 } | |
113 push_r(code, opts->context_reg); | |
114 push_r(code, is_write ? opts->scratch2 : opts->scratch1); | |
115 call(code, cfun); | |
116 add_ir(code, is_write ? 12 : 8, RSP, opts->address_size); | |
117 #endif | |
118 if (is_write) { | |
119 mov_rr(code, RAX, opts->context_reg, SZ_PTR); | |
120 } else { | |
121 pop_r(code, opts->context_reg); | 114 pop_r(code, opts->context_reg); |
122 mov_rr(code, RAX, opts->scratch1, size); | 115 mov_rr(code, RAX, opts->scratch1, size); |
123 } | 116 } |
124 jmp(code, opts->load_context); | 117 jmp(code, opts->load_context); |
125 | 118 |
126 *not_null = code->cur - (not_null + 1); | 119 *not_null = code->cur - (not_null + 1); |
127 } | 120 } |
128 if (opts->byte_swap && size == SZ_B) { | 121 if ((opts->byte_swap || memmap[chunk].flags & MMAP_BYTESWAP) && size == SZ_B) { |
129 xor_ir(code, 1, adr_reg, opts->address_size); | 122 xor_ir(code, 1, adr_reg, opts->address_size); |
130 } | 123 } |
131 if (opts->address_size != SZ_D) { | 124 if (opts->address_size != SZ_D) { |
132 movzx_rr(code, adr_reg, adr_reg, opts->address_size, SZ_D); | 125 movzx_rr(code, adr_reg, adr_reg, opts->address_size, SZ_D); |
133 } | 126 } |
149 mov_ir(code, 0xFF, opts->scratch1, SZ_B); | 142 mov_ir(code, 0xFF, opts->scratch1, SZ_B); |
150 } | 143 } |
151 retn(code); | 144 retn(code); |
152 *good_addr = code->cur - (good_addr + 1); | 145 *good_addr = code->cur - (good_addr + 1); |
153 shr_ir(code, 1, adr_reg, opts->address_size); | 146 shr_ir(code, 1, adr_reg, opts->address_size); |
154 } else { | 147 } else if (opts->byte_swap || memmap[chunk].flags & MMAP_BYTESWAP) { |
155 xor_ir(code, 1, adr_reg, opts->address_size); | 148 xor_ir(code, 1, adr_reg, opts->address_size); |
156 } | 149 } |
157 } else if ((memmap[chunk].flags & MMAP_ONLY_ODD) || (memmap[chunk].flags & MMAP_ONLY_EVEN)) { | 150 } else if ((memmap[chunk].flags & MMAP_ONLY_ODD) || (memmap[chunk].flags & MMAP_ONLY_EVEN)) { |
158 tmp_size = SZ_B; | 151 tmp_size = SZ_B; |
159 shr_ir(code, 1, adr_reg, opts->address_size); | 152 shr_ir(code, 1, adr_reg, opts->address_size); |
160 if ((memmap[chunk].flags & MMAP_ONLY_EVEN) && is_write) { | 153 if ((memmap[chunk].flags & MMAP_ONLY_EVEN) && is_write) { |
161 shr_ir(code, 8, opts->scratch1, SZ_W); | 154 shr_ir(code, 8, opts->scratch1, SZ_W); |
162 } | 155 } |
156 } | |
157 if (opts->address_size != SZ_D) { | |
158 movzx_rr(code, adr_reg, adr_reg, opts->address_size, SZ_D); | |
163 } | 159 } |
164 if ((intptr_t)memmap[chunk].buffer <= 0x7FFFFFFF && (intptr_t)memmap[chunk].buffer >= -2147483648) { | 160 if ((intptr_t)memmap[chunk].buffer <= 0x7FFFFFFF && (intptr_t)memmap[chunk].buffer >= -2147483648) { |
165 if (is_write) { | 161 if (is_write) { |
166 mov_rrdisp(code, opts->scratch1, opts->scratch2, (intptr_t)memmap[chunk].buffer, tmp_size); | 162 mov_rrdisp(code, opts->scratch1, opts->scratch2, (intptr_t)memmap[chunk].buffer, tmp_size); |
167 } else { | 163 } else { |
187 or_ir(code, 0xFF00, opts->scratch1, SZ_W); | 183 or_ir(code, 0xFF00, opts->scratch1, SZ_W); |
188 } | 184 } |
189 } | 185 } |
190 } | 186 } |
191 if (is_write && (memmap[chunk].flags & MMAP_CODE)) { | 187 if (is_write && (memmap[chunk].flags & MMAP_CODE)) { |
192 //TODO: Fixme for Z80 | |
193 mov_rr(code, opts->scratch2, opts->scratch1, opts->address_size); | 188 mov_rr(code, opts->scratch2, opts->scratch1, opts->address_size); |
194 shr_ir(code, 11, opts->scratch1, opts->address_size); | 189 shr_ir(code, opts->ram_flags_shift, opts->scratch1, opts->address_size); |
195 bt_rrdisp(code, opts->scratch1, opts->context_reg, opts->ram_flags_off, opts->address_size); | 190 bt_rrdisp(code, opts->scratch1, opts->context_reg, ram_flags_off, opts->address_size); |
191 //FIXME: These adjustments to ram_flags_off need to take into account bits vs bytes and ram_flags_shift | |
192 if (memmap[chunk].mask == opts->address_mask) { | |
193 ram_flags_off += memmap[chunk].end - memmap[chunk].start; | |
194 } else { | |
195 ram_flags_off += memmap[chunk].mask + 1; | |
196 } | |
196 code_ptr not_code = code->cur + 1; | 197 code_ptr not_code = code->cur + 1; |
197 jcc(code, CC_NC, code->cur + 2); | 198 jcc(code, CC_NC, code->cur + 2); |
198 call(code, opts->save_context); | 199 call(code, opts->save_context); |
199 #ifdef X86_32 | 200 call_args(code, opts->handle_code_write, 2, opts->scratch2, opts->context_reg); |
200 push_r(code, opts->context_reg); | |
201 push_r(code, opts->scratch2); | |
202 #endif | |
203 call(code, opts->handle_code_write); | |
204 #ifdef X86_32 | |
205 add_ir(code, 8, RSP, SZ_D); | |
206 #endif | |
207 mov_rr(code, RAX, opts->context_reg, SZ_PTR); | 201 mov_rr(code, RAX, opts->context_reg, SZ_PTR); |
208 call(code, opts->load_context); | 202 call(code, opts->load_context); |
209 *not_code = code->cur - (not_code+1); | 203 *not_code = code->cur - (not_code+1); |
210 } | 204 } |
211 retn(code); | 205 retn(code); |
212 } else if (cfun) { | 206 } else if (cfun) { |
213 call(code, opts->save_context); | 207 call(code, opts->save_context); |
214 #ifdef X86_64 | |
215 if (is_write) { | 208 if (is_write) { |
216 if (opts->scratch2 != RDI) { | 209 call_args_abi(code, cfun, 3, opts->scratch2, opts->context_reg, opts->scratch1); |
217 mov_rr(code, opts->scratch2, RDI, opts->address_size); | 210 mov_rr(code, RAX, opts->context_reg, SZ_PTR); |
218 } | |
219 mov_rr(code, opts->scratch1, RDX, size); | |
220 } else { | 211 } else { |
221 push_r(code, opts->context_reg); | 212 push_r(code, opts->context_reg); |
222 mov_rr(code, opts->scratch1, RDI, opts->address_size); | 213 call_args_abi(code, cfun, 2, opts->scratch1, opts->context_reg); |
223 } | |
224 test_ir(code, 8, RSP, SZ_D); | |
225 code_ptr adjust_rsp = code->cur + 1; | |
226 jcc(code, CC_NZ, code->cur + 2); | |
227 call(code, cfun); | |
228 code_ptr no_adjust = code->cur + 1; | |
229 jmp(code, code->cur + 2); | |
230 *adjust_rsp = code->cur - (adjust_rsp + 1); | |
231 sub_ir(code, 8, RSP, SZ_PTR); | |
232 call(code, cfun); | |
233 add_ir(code, 8, RSP, SZ_PTR); | |
234 *no_adjust = code->cur - (no_adjust+1); | |
235 #else | |
236 if (is_write) { | |
237 push_r(code, opts->scratch1); | |
238 } else { | |
239 push_r(code, opts->context_reg);//save opts->context_reg for later | |
240 } | |
241 push_r(code, opts->context_reg); | |
242 push_r(code, is_write ? opts->scratch2 : opts->scratch1); | |
243 call(code, cfun); | |
244 add_ir(code, is_write ? 12 : 8, RSP, SZ_D); | |
245 #endif | |
246 if (is_write) { | |
247 mov_rr(code, RAX, opts->context_reg, SZ_PTR); | |
248 } else { | |
249 pop_r(code, opts->context_reg); | 214 pop_r(code, opts->context_reg); |
250 mov_rr(code, RAX, opts->scratch1, size); | 215 mov_rr(code, RAX, opts->scratch1, size); |
251 } | 216 } |
252 jmp(code, opts->load_context); | 217 jmp(code, opts->load_context); |
253 } else { | 218 } else { |