comparison m68k_core_x86.c @ 589:2dde38c1744f

Split gen_mem_fun out of m68k_core_x86 and make it more generic so it can be used by the Z80 core
author Michael Pavone <pavone@retrodev.com>
date Tue, 11 Mar 2014 09:44:47 -0700
parents 963d5901f583
children ea80559c67cb
comparison
equal deleted inserted replaced
588:963d5901f583 589:2dde38c1744f
2162 } else { 2162 } else {
2163 call(&native, bp_stub); 2163 call(&native, bp_stub);
2164 } 2164 }
2165 } 2165 }
2166 2166
2167 code_ptr gen_mem_fun(cpu_options * opts, memmap_chunk * memmap, uint32_t num_chunks, ftype fun_type)
2168 {
2169 code_info *code = &opts->code;
2170 code_ptr start = code->cur;
2171 check_cycles(opts);
2172 cycles(opts, BUS);
2173 and_ir(code, 0xFFFFFF, opts->scratch1, SZ_D);
2174 code_ptr lb_jcc = NULL, ub_jcc = NULL;
2175 uint8_t is_write = fun_type == WRITE_16 || fun_type == WRITE_8;
2176 uint8_t adr_reg = is_write ? opts->scratch2 : opts->scratch1;
2177 uint16_t access_flag = is_write ? MMAP_WRITE : MMAP_READ;
2178 uint8_t size = (fun_type == READ_16 || fun_type == WRITE_16) ? SZ_W : SZ_B;
2179 for (uint32_t chunk = 0; chunk < num_chunks; chunk++)
2180 {
2181 if (memmap[chunk].start > 0) {
2182 cmp_ir(code, memmap[chunk].start, adr_reg, SZ_D);
2183 lb_jcc = code->cur + 1;
2184 jcc(code, CC_C, code->cur + 2);
2185 }
2186 if (memmap[chunk].end < 0x1000000) {
2187 cmp_ir(code, memmap[chunk].end, adr_reg, SZ_D);
2188 ub_jcc = code->cur + 1;
2189 jcc(code, CC_NC, code->cur + 2);
2190 }
2191
2192 if (memmap[chunk].mask != 0xFFFFFF) {
2193 and_ir(code, memmap[chunk].mask, adr_reg, SZ_D);
2194 }
2195 void * cfun;
2196 switch (fun_type)
2197 {
2198 case READ_16:
2199 cfun = memmap[chunk].read_16;
2200 break;
2201 case READ_8:
2202 cfun = memmap[chunk].read_8;
2203 break;
2204 case WRITE_16:
2205 cfun = memmap[chunk].write_16;
2206 break;
2207 case WRITE_8:
2208 cfun = memmap[chunk].write_8;
2209 break;
2210 default:
2211 cfun = NULL;
2212 }
2213 if(memmap[chunk].buffer && memmap[chunk].flags & access_flag) {
2214 if (memmap[chunk].flags & MMAP_PTR_IDX) {
2215 if (memmap[chunk].flags & MMAP_FUNC_NULL) {
2216 cmp_irdisp(code, 0, opts->context_reg, offsetof(m68k_context, mem_pointers) + sizeof(void*) * memmap[chunk].ptr_index, SZ_PTR);
2217 code_ptr not_null = code->cur + 1;
2218 jcc(code, CC_NZ, code->cur + 2);
2219 call(code, opts->save_context);
2220 #ifdef X86_64
2221 if (is_write) {
2222 if (opts->scratch2 != RDI) {
2223 mov_rr(code, opts->scratch2, RDI, SZ_D);
2224 }
2225 mov_rr(code, opts->scratch1, RDX, size);
2226 } else {
2227 push_r(code, opts->context_reg);
2228 mov_rr(code, opts->scratch1, RDI, SZ_D);
2229 }
2230 test_ir(code, 8, RSP, SZ_D);
2231 code_ptr adjust_rsp = code->cur + 1;
2232 jcc(code, CC_NZ, code->cur + 2);
2233 call(code, cfun);
2234 code_ptr no_adjust = code->cur + 1;
2235 jmp(code, code->cur + 2);
2236 *adjust_rsp = code->cur - (adjust_rsp + 1);
2237 sub_ir(code, 8, RSP, SZ_PTR);
2238 call(code, cfun);
2239 add_ir(code, 8, RSP, SZ_PTR);
2240 *no_adjust = code->cur - (no_adjust + 1);
2241 #else
2242 if (is_write) {
2243 push_r(code, opts->scratch1);
2244 } else {
2245 push_r(code, opts->context_reg);//save opts->context_reg for later
2246 }
2247 push_r(code, opts->context_reg);
2248 push_r(code, is_write ? opts->scratch2 : opts->scratch1);
2249 call(code, cfun);
2250 add_ir(code, is_write ? 12 : 8, RSP, SZ_D);
2251 #endif
2252 if (is_write) {
2253 mov_rr(code, RAX, opts->context_reg, SZ_PTR);
2254 } else {
2255 pop_r(code, opts->context_reg);
2256 mov_rr(code, RAX, opts->scratch1, size);
2257 }
2258 jmp(code, opts->load_context);
2259
2260 *not_null = code->cur - (not_null + 1);
2261 }
2262 if (size == SZ_B) {
2263 xor_ir(code, 1, adr_reg, SZ_D);
2264 }
2265 add_rdispr(code, opts->context_reg, offsetof(m68k_context, mem_pointers) + sizeof(void*) * memmap[chunk].ptr_index, adr_reg, SZ_PTR);
2266 if (is_write) {
2267 mov_rrind(code, opts->scratch1, opts->scratch2, size);
2268
2269 } else {
2270 mov_rindr(code, opts->scratch1, opts->scratch1, size);
2271 }
2272 } else {
2273 uint8_t tmp_size = size;
2274 if (size == SZ_B) {
2275 if ((memmap[chunk].flags & MMAP_ONLY_ODD) || (memmap[chunk].flags & MMAP_ONLY_EVEN)) {
2276 bt_ir(code, 0, adr_reg, SZ_D);
2277 code_ptr good_addr = code->cur + 1;
2278 jcc(code, (memmap[chunk].flags & MMAP_ONLY_ODD) ? CC_C : CC_NC, code->cur + 2);
2279 if (!is_write) {
2280 mov_ir(code, 0xFF, opts->scratch1, SZ_B);
2281 }
2282 retn(code);
2283 *good_addr = code->cur - (good_addr + 1);
2284 shr_ir(code, 1, adr_reg, SZ_D);
2285 } else {
2286 xor_ir(code, 1, adr_reg, SZ_D);
2287 }
2288 } else if ((memmap[chunk].flags & MMAP_ONLY_ODD) || (memmap[chunk].flags & MMAP_ONLY_EVEN)) {
2289 tmp_size = SZ_B;
2290 shr_ir(code, 1, adr_reg, SZ_D);
2291 if ((memmap[chunk].flags & MMAP_ONLY_EVEN) && is_write) {
2292 shr_ir(code, 8, opts->scratch1, SZ_W);
2293 }
2294 }
2295 if ((intptr_t)memmap[chunk].buffer <= 0x7FFFFFFF && (intptr_t)memmap[chunk].buffer >= -2147483648) {
2296 if (is_write) {
2297 mov_rrdisp(code, opts->scratch1, opts->scratch2, (intptr_t)memmap[chunk].buffer, tmp_size);
2298 } else {
2299 mov_rdispr(code, opts->scratch1, (intptr_t)memmap[chunk].buffer, opts->scratch1, tmp_size);
2300 }
2301 } else {
2302 if (is_write) {
2303 push_r(code, opts->scratch1);
2304 mov_ir(code, (intptr_t)memmap[chunk].buffer, opts->scratch1, SZ_PTR);
2305 add_rr(code, opts->scratch1, opts->scratch2, SZ_PTR);
2306 pop_r(code, opts->scratch1);
2307 mov_rrind(code, opts->scratch1, opts->scratch2, tmp_size);
2308 } else {
2309 mov_ir(code, (intptr_t)memmap[chunk].buffer, opts->scratch2, SZ_PTR);
2310 mov_rindexr(code, opts->scratch2, opts->scratch1, 1, opts->scratch1, tmp_size);
2311 }
2312 }
2313 if (size != tmp_size && !is_write) {
2314 if (memmap[chunk].flags & MMAP_ONLY_EVEN) {
2315 shl_ir(code, 8, opts->scratch1, SZ_W);
2316 mov_ir(code, 0xFF, opts->scratch1, SZ_B);
2317 } else {
2318 or_ir(code, 0xFF00, opts->scratch1, SZ_W);
2319 }
2320 }
2321 }
2322 if (is_write && (memmap[chunk].flags & MMAP_CODE)) {
2323 mov_rr(code, opts->scratch2, opts->scratch1, SZ_D);
2324 shr_ir(code, 11, opts->scratch1, SZ_D);
2325 bt_rrdisp(code, opts->scratch1, opts->context_reg, offsetof(m68k_context, ram_code_flags), SZ_D);
2326 code_ptr not_code = code->cur + 1;
2327 jcc(code, CC_NC, code->cur + 2);
2328 call(code, opts->save_context);
2329 #ifdef X86_32
2330 push_r(code, opts->context_reg);
2331 push_r(code, opts->scratch2);
2332 #endif
2333 call(code, (code_ptr)m68k_handle_code_write);
2334 #ifdef X86_32
2335 add_ir(code, 8, RSP, SZ_D);
2336 #endif
2337 mov_rr(code, RAX, opts->context_reg, SZ_PTR);
2338 call(code, opts->load_context);
2339 *not_code = code->cur - (not_code+1);
2340 }
2341 retn(code);
2342 } else if (cfun) {
2343 call(code, opts->save_context);
2344 #ifdef X86_64
2345 if (is_write) {
2346 if (opts->scratch2 != RDI) {
2347 mov_rr(code, opts->scratch2, RDI, SZ_D);
2348 }
2349 mov_rr(code, opts->scratch1, RDX, size);
2350 } else {
2351 push_r(code, opts->context_reg);
2352 mov_rr(code, opts->scratch1, RDI, SZ_D);
2353 }
2354 test_ir(code, 8, RSP, SZ_D);
2355 code_ptr adjust_rsp = code->cur + 1;
2356 jcc(code, CC_NZ, code->cur + 2);
2357 call(code, cfun);
2358 code_ptr no_adjust = code->cur + 1;
2359 jmp(code, code->cur + 2);
2360 *adjust_rsp = code->cur - (adjust_rsp + 1);
2361 sub_ir(code, 8, RSP, SZ_PTR);
2362 call(code, cfun);
2363 add_ir(code, 8, RSP, SZ_PTR);
2364 *no_adjust = code->cur - (no_adjust+1);
2365 #else
2366 if (is_write) {
2367 push_r(code, opts->scratch1);
2368 } else {
2369 push_r(code, opts->context_reg);//save opts->context_reg for later
2370 }
2371 push_r(code, opts->context_reg);
2372 push_r(code, is_write ? opts->scratch2 : opts->scratch1);
2373 call(code, cfun);
2374 add_ir(code, is_write ? 12 : 8, RSP, SZ_D);
2375 #endif
2376 if (is_write) {
2377 mov_rr(code, RAX, opts->context_reg, SZ_PTR);
2378 } else {
2379 pop_r(code, opts->context_reg);
2380 mov_rr(code, RAX, opts->scratch1, size);
2381 }
2382 jmp(code, opts->load_context);
2383 } else {
2384 //Not sure the best course of action here
2385 if (!is_write) {
2386 mov_ir(code, size == SZ_B ? 0xFF : 0xFFFF, opts->scratch1, size);
2387 }
2388 retn(code);
2389 }
2390 if (lb_jcc) {
2391 *lb_jcc = code->cur - (lb_jcc+1);
2392 lb_jcc = NULL;
2393 }
2394 if (ub_jcc) {
2395 *ub_jcc = code->cur - (ub_jcc+1);
2396 ub_jcc = NULL;
2397 }
2398 }
2399 if (!is_write) {
2400 mov_ir(code, size == SZ_B ? 0xFF : 0xFFFF, opts->scratch1, size);
2401 }
2402 retn(code);
2403 return start;
2404 }
2405
2406 void init_m68k_opts(m68k_options * opts, memmap_chunk * memmap, uint32_t num_chunks) 2167 void init_m68k_opts(m68k_options * opts, memmap_chunk * memmap, uint32_t num_chunks)
2407 { 2168 {
2408 memset(opts, 0, sizeof(*opts)); 2169 memset(opts, 0, sizeof(*opts));
2170 opts->gen.address_size = SZ_D;
2171 opts->gen.address_mask = 0xFFFFFF;
2172 opts->gen.max_address = 0x1000000;
2173 opts->gen.bus_cycles = BUS;
2174 opts->gen.mem_ptr_off = offsetof(m68k_context, mem_pointers);
2175 opts->gen.ram_flags_off = offsetof(m68k_context, ram_code_flags);
2409 for (int i = 0; i < 8; i++) 2176 for (int i = 0; i < 8; i++)
2410 { 2177 {
2411 opts->dregs[i] = opts->aregs[i] = -1; 2178 opts->dregs[i] = opts->aregs[i] = -1;
2412 } 2179 }
2413 #ifdef X86_64 2180 #ifdef X86_64