Mercurial > repos > blastem
comparison backend_x86.c @ 589:2dde38c1744f
Split gen_mem_fun out of m68k_core_x86 and make it more generic so it can be used by the Z80 core
author | Michael Pavone <pavone@retrodev.com> |
---|---|
date | Tue, 11 Mar 2014 09:44:47 -0700 |
parents | 8e395210f50f |
children | ea80559c67cb |
comparison
equal
deleted
inserted
replaced
588:963d5901f583 | 589:2dde38c1744f |
---|---|
25 code_ptr jmp_off = code->cur+1; | 25 code_ptr jmp_off = code->cur+1; |
26 jcc(code, CC_NC, jmp_off+1); | 26 jcc(code, CC_NC, jmp_off+1); |
27 call(code, opts->handle_cycle_limit); | 27 call(code, opts->handle_cycle_limit); |
28 *jmp_off = code->cur - (jmp_off+1); | 28 *jmp_off = code->cur - (jmp_off+1); |
29 } | 29 } |
30 | |
31 code_ptr gen_mem_fun(cpu_options * opts, memmap_chunk * memmap, uint32_t num_chunks, ftype fun_type) | |
32 { | |
33 code_info *code = &opts->code; | |
34 code_ptr start = code->cur; | |
35 check_cycles(opts); | |
36 cycles(opts, opts->bus_cycles); | |
37 if (opts->address_size == SZ_D && opts->address_mask < 0xFFFFFFFF) { | |
38 and_ir(code, opts->address_mask, opts->scratch1, SZ_D); | |
39 } | |
40 code_ptr lb_jcc = NULL, ub_jcc = NULL; | |
41 uint8_t is_write = fun_type == WRITE_16 || fun_type == WRITE_8; | |
42 uint8_t adr_reg = is_write ? opts->scratch2 : opts->scratch1; | |
43 uint16_t access_flag = is_write ? MMAP_WRITE : MMAP_READ; | |
44 uint8_t size = (fun_type == READ_16 || fun_type == WRITE_16) ? SZ_W : SZ_B; | |
45 for (uint32_t chunk = 0; chunk < num_chunks; chunk++) | |
46 { | |
47 if (memmap[chunk].start > 0) { | |
48 cmp_ir(code, memmap[chunk].start, adr_reg, opts->address_size); | |
49 lb_jcc = code->cur + 1; | |
50 jcc(code, CC_C, code->cur + 2); | |
51 } | |
52 if (memmap[chunk].end < opts->max_address) { | |
53 cmp_ir(code, memmap[chunk].end, adr_reg, opts->address_size); | |
54 ub_jcc = code->cur + 1; | |
55 jcc(code, CC_NC, code->cur + 2); | |
56 } | |
57 | |
58 if (memmap[chunk].mask != opts->address_mask) { | |
59 and_ir(code, memmap[chunk].mask, adr_reg, opts->address_size); | |
60 } | |
61 void * cfun; | |
62 switch (fun_type) | |
63 { | |
64 case READ_16: | |
65 cfun = memmap[chunk].read_16; | |
66 break; | |
67 case READ_8: | |
68 cfun = memmap[chunk].read_8; | |
69 break; | |
70 case WRITE_16: | |
71 cfun = memmap[chunk].write_16; | |
72 break; | |
73 case WRITE_8: | |
74 cfun = memmap[chunk].write_8; | |
75 break; | |
76 default: | |
77 cfun = NULL; | |
78 } | |
79 if(memmap[chunk].buffer && memmap[chunk].flags & access_flag) { | |
80 if (memmap[chunk].flags & MMAP_PTR_IDX) { | |
81 if (memmap[chunk].flags & MMAP_FUNC_NULL) { | |
82 cmp_irdisp(code, 0, opts->context_reg, opts->mem_ptr_off + sizeof(void*) * memmap[chunk].ptr_index, SZ_PTR); | |
83 code_ptr not_null = code->cur + 1; | |
84 jcc(code, CC_NZ, code->cur + 2); | |
85 call(code, opts->save_context); | |
86 #ifdef X86_64 | |
87 if (is_write) { | |
88 if (opts->scratch2 != RDI) { | |
89 mov_rr(code, opts->scratch2, RDI, opts->address_size); | |
90 } | |
91 mov_rr(code, opts->scratch1, RDX, size); | |
92 } else { | |
93 push_r(code, opts->context_reg); | |
94 mov_rr(code, opts->scratch1, RDI, opts->address_size); | |
95 } | |
96 test_ir(code, 8, RSP, opts->address_size); | |
97 code_ptr adjust_rsp = code->cur + 1; | |
98 jcc(code, CC_NZ, code->cur + 2); | |
99 call(code, cfun); | |
100 code_ptr no_adjust = code->cur + 1; | |
101 jmp(code, code->cur + 2); | |
102 *adjust_rsp = code->cur - (adjust_rsp + 1); | |
103 sub_ir(code, 8, RSP, SZ_PTR); | |
104 call(code, cfun); | |
105 add_ir(code, 8, RSP, SZ_PTR); | |
106 *no_adjust = code->cur - (no_adjust + 1); | |
107 #else | |
108 if (is_write) { | |
109 push_r(code, opts->scratch1); | |
110 } else { | |
111 push_r(code, opts->context_reg);//save opts->context_reg for later | |
112 } | |
113 push_r(code, opts->context_reg); | |
114 push_r(code, is_write ? opts->scratch2 : opts->scratch1); | |
115 call(code, cfun); | |
116 add_ir(code, is_write ? 12 : 8, RSP, opts->address_size); | |
117 #endif | |
118 if (is_write) { | |
119 mov_rr(code, RAX, opts->context_reg, SZ_PTR); | |
120 } else { | |
121 pop_r(code, opts->context_reg); | |
122 mov_rr(code, RAX, opts->scratch1, size); | |
123 } | |
124 jmp(code, opts->load_context); | |
125 | |
126 *not_null = code->cur - (not_null + 1); | |
127 } | |
128 if (opts->byte_swap && size == SZ_B) { | |
129 xor_ir(code, 1, adr_reg, opts->address_size); | |
130 } | |
131 if (opts->address_size != SZ_D) { | |
132 movzx_rr(code, adr_reg, adr_reg, opts->address_size, SZ_D); | |
133 } | |
134 add_rdispr(code, opts->context_reg, opts->mem_ptr_off + sizeof(void*) * memmap[chunk].ptr_index, adr_reg, SZ_PTR); | |
135 if (is_write) { | |
136 mov_rrind(code, opts->scratch1, opts->scratch2, size); | |
137 | |
138 } else { | |
139 mov_rindr(code, opts->scratch1, opts->scratch1, size); | |
140 } | |
141 } else { | |
142 uint8_t tmp_size = size; | |
143 if (size == SZ_B) { | |
144 if ((memmap[chunk].flags & MMAP_ONLY_ODD) || (memmap[chunk].flags & MMAP_ONLY_EVEN)) { | |
145 bt_ir(code, 0, adr_reg, opts->address_size); | |
146 code_ptr good_addr = code->cur + 1; | |
147 jcc(code, (memmap[chunk].flags & MMAP_ONLY_ODD) ? CC_C : CC_NC, code->cur + 2); | |
148 if (!is_write) { | |
149 mov_ir(code, 0xFF, opts->scratch1, SZ_B); | |
150 } | |
151 retn(code); | |
152 *good_addr = code->cur - (good_addr + 1); | |
153 shr_ir(code, 1, adr_reg, opts->address_size); | |
154 } else { | |
155 xor_ir(code, 1, adr_reg, opts->address_size); | |
156 } | |
157 } else if ((memmap[chunk].flags & MMAP_ONLY_ODD) || (memmap[chunk].flags & MMAP_ONLY_EVEN)) { | |
158 tmp_size = SZ_B; | |
159 shr_ir(code, 1, adr_reg, opts->address_size); | |
160 if ((memmap[chunk].flags & MMAP_ONLY_EVEN) && is_write) { | |
161 shr_ir(code, 8, opts->scratch1, SZ_W); | |
162 } | |
163 } | |
164 if ((intptr_t)memmap[chunk].buffer <= 0x7FFFFFFF && (intptr_t)memmap[chunk].buffer >= -2147483648) { | |
165 if (is_write) { | |
166 mov_rrdisp(code, opts->scratch1, opts->scratch2, (intptr_t)memmap[chunk].buffer, tmp_size); | |
167 } else { | |
168 mov_rdispr(code, opts->scratch1, (intptr_t)memmap[chunk].buffer, opts->scratch1, tmp_size); | |
169 } | |
170 } else { | |
171 if (is_write) { | |
172 push_r(code, opts->scratch1); | |
173 mov_ir(code, (intptr_t)memmap[chunk].buffer, opts->scratch1, SZ_PTR); | |
174 add_rr(code, opts->scratch1, opts->scratch2, SZ_PTR); | |
175 pop_r(code, opts->scratch1); | |
176 mov_rrind(code, opts->scratch1, opts->scratch2, tmp_size); | |
177 } else { | |
178 mov_ir(code, (intptr_t)memmap[chunk].buffer, opts->scratch2, SZ_PTR); | |
179 mov_rindexr(code, opts->scratch2, opts->scratch1, 1, opts->scratch1, tmp_size); | |
180 } | |
181 } | |
182 if (size != tmp_size && !is_write) { | |
183 if (memmap[chunk].flags & MMAP_ONLY_EVEN) { | |
184 shl_ir(code, 8, opts->scratch1, SZ_W); | |
185 mov_ir(code, 0xFF, opts->scratch1, SZ_B); | |
186 } else { | |
187 or_ir(code, 0xFF00, opts->scratch1, SZ_W); | |
188 } | |
189 } | |
190 } | |
191 if (is_write && (memmap[chunk].flags & MMAP_CODE)) { | |
192 //TODO: Fixme for Z80 | |
193 mov_rr(code, opts->scratch2, opts->scratch1, opts->address_size); | |
194 shr_ir(code, 11, opts->scratch1, opts->address_size); | |
195 bt_rrdisp(code, opts->scratch1, opts->context_reg, opts->ram_flags_off, opts->address_size); | |
196 code_ptr not_code = code->cur + 1; | |
197 jcc(code, CC_NC, code->cur + 2); | |
198 call(code, opts->save_context); | |
199 #ifdef X86_32 | |
200 push_r(code, opts->context_reg); | |
201 push_r(code, opts->scratch2); | |
202 #endif | |
203 call(code, opts->handle_code_write); | |
204 #ifdef X86_32 | |
205 add_ir(code, 8, RSP, SZ_D); | |
206 #endif | |
207 mov_rr(code, RAX, opts->context_reg, SZ_PTR); | |
208 call(code, opts->load_context); | |
209 *not_code = code->cur - (not_code+1); | |
210 } | |
211 retn(code); | |
212 } else if (cfun) { | |
213 call(code, opts->save_context); | |
214 #ifdef X86_64 | |
215 if (is_write) { | |
216 if (opts->scratch2 != RDI) { | |
217 mov_rr(code, opts->scratch2, RDI, opts->address_size); | |
218 } | |
219 mov_rr(code, opts->scratch1, RDX, size); | |
220 } else { | |
221 push_r(code, opts->context_reg); | |
222 mov_rr(code, opts->scratch1, RDI, opts->address_size); | |
223 } | |
224 test_ir(code, 8, RSP, SZ_D); | |
225 code_ptr adjust_rsp = code->cur + 1; | |
226 jcc(code, CC_NZ, code->cur + 2); | |
227 call(code, cfun); | |
228 code_ptr no_adjust = code->cur + 1; | |
229 jmp(code, code->cur + 2); | |
230 *adjust_rsp = code->cur - (adjust_rsp + 1); | |
231 sub_ir(code, 8, RSP, SZ_PTR); | |
232 call(code, cfun); | |
233 add_ir(code, 8, RSP, SZ_PTR); | |
234 *no_adjust = code->cur - (no_adjust+1); | |
235 #else | |
236 if (is_write) { | |
237 push_r(code, opts->scratch1); | |
238 } else { | |
239 push_r(code, opts->context_reg);//save opts->context_reg for later | |
240 } | |
241 push_r(code, opts->context_reg); | |
242 push_r(code, is_write ? opts->scratch2 : opts->scratch1); | |
243 call(code, cfun); | |
244 add_ir(code, is_write ? 12 : 8, RSP, SZ_D); | |
245 #endif | |
246 if (is_write) { | |
247 mov_rr(code, RAX, opts->context_reg, SZ_PTR); | |
248 } else { | |
249 pop_r(code, opts->context_reg); | |
250 mov_rr(code, RAX, opts->scratch1, size); | |
251 } | |
252 jmp(code, opts->load_context); | |
253 } else { | |
254 //Not sure the best course of action here | |
255 if (!is_write) { | |
256 mov_ir(code, size == SZ_B ? 0xFF : 0xFFFF, opts->scratch1, size); | |
257 } | |
258 retn(code); | |
259 } | |
260 if (lb_jcc) { | |
261 *lb_jcc = code->cur - (lb_jcc+1); | |
262 lb_jcc = NULL; | |
263 } | |
264 if (ub_jcc) { | |
265 *ub_jcc = code->cur - (ub_jcc+1); | |
266 ub_jcc = NULL; | |
267 } | |
268 } | |
269 if (!is_write) { | |
270 mov_ir(code, size == SZ_B ? 0xFF : 0xFFFF, opts->scratch1, size); | |
271 } | |
272 retn(code); | |
273 return start; | |
274 } |