Mercurial > repos > blastem
comparison backend.c @ 2666:38c281ef57b0
Memory access optimizaiton in new 68K core that gives a modest speed bump on average and will allow low-cost watchpoints
author | Michael Pavone <pavone@retrodev.com> |
---|---|
date | Fri, 07 Mar 2025 23:40:58 -0800 |
parents | 9caebcfeac72 |
children | b0b6c6042103 |
comparison
equal
deleted
inserted
replaced
2665:54ac5fe14cf9 | 2666:38c281ef57b0 |
---|---|
338 } | 338 } |
339 } | 339 } |
340 } | 340 } |
341 return size; | 341 return size; |
342 } | 342 } |
343 | |
344 uint16_t interp_read_direct_16(uint32_t address, void *context, void *data) | |
345 { | |
346 return *(uint16_t *)((address & 0xFFFE) + (uint8_t *)data); | |
347 } | |
348 | |
349 uint8_t interp_read_direct_8(uint32_t address, void *context, void *data) | |
350 { | |
351 return ((uint8_t *)data)[(address & 0xFFFF) ^ 1]; | |
352 } | |
353 | |
354 void interp_write_direct_16(uint32_t address, void *context, uint16_t value, void *data) | |
355 { | |
356 *(uint16_t *)((address & 0xFFFE) + (uint8_t *)data) = value; | |
357 } | |
358 | |
359 void interp_write_direct_8(uint32_t address, void *context, uint8_t value, void *data) | |
360 { | |
361 ((uint8_t *)data)[(address & 0xFFFF) ^ 1] = value; | |
362 } | |
363 | |
364 uint16_t interp_read_indexed_16(uint32_t address, void *context, void *data) | |
365 { | |
366 return *(uint16_t *)((*(uint8_t **)data) + (address & 0xFFFE)); | |
367 } | |
368 | |
369 uint8_t interp_read_indexed_8(uint32_t address, void *context, void *data) | |
370 { | |
371 return (*(uint8_t **)data)[(address & 0xFFFF) ^ 1]; | |
372 } | |
373 | |
374 void interp_write_indexed_16(uint32_t address, void *context, uint16_t value, void *data) | |
375 { | |
376 *(uint16_t *)((*(uint8_t **)data) + (address & 0xFFFE)) = value; | |
377 } | |
378 | |
379 void interp_write_indexed_8(uint32_t address, void *context, uint8_t value, void *data) | |
380 { | |
381 (*(uint8_t **)data)[(address & 0xFFFF) ^ 1] = value; | |
382 } | |
383 | |
384 uint16_t interp_read_fixed_16(uint32_t address, void *context, void *data) | |
385 { | |
386 return (uintptr_t)data; | |
387 } | |
388 | |
389 uint8_t interp_read_fixed_8(uint32_t address, void *context, void *data) | |
390 { | |
391 uint16_t val = (uintptr_t)data; | |
392 if (address & 1) { | |
393 return val; | |
394 } | |
395 return val >> 8; | |
396 } | |
397 | |
398 void interp_write_ignored_16(uint32_t address, void *context, uint16_t value, void *data) | |
399 { | |
400 } | |
401 | |
402 void interp_write_ignored_8(uint32_t address, void *context, uint8_t value, void *data) | |
403 { | |
404 } | |
405 | |
406 uint16_t interp_read_map_16(uint32_t address, void *context, void *data) | |
407 { | |
408 const memmap_chunk *chunk = data; | |
409 cpu_options * opts = *(cpu_options **)context; | |
410 if (address < chunk->start || address >= chunk->end) | |
411 { | |
412 const memmap_chunk *map_end = opts->memmap + opts->memmap_chunks; | |
413 for (chunk++; chunk < map_end; chunk++) | |
414 { | |
415 if (address >= chunk->start && address < chunk->end) { | |
416 break; | |
417 } | |
418 } | |
419 if (chunk == map_end) { | |
420 return 0xFFFF; | |
421 } | |
422 } | |
423 uint32_t offset = address & chunk->mask; | |
424 if (chunk->flags & MMAP_READ) { | |
425 uint8_t *base; | |
426 if (chunk->flags & MMAP_PTR_IDX) { | |
427 uint8_t ** mem_pointers = (uint8_t**)(opts->mem_ptr_off + (uint8_t *)context); | |
428 base = mem_pointers[chunk->ptr_index]; | |
429 } else { | |
430 base = chunk->buffer; | |
431 } | |
432 if (base) { | |
433 uint16_t val; | |
434 if (chunk->shift > 0) { | |
435 offset <<= chunk->shift; | |
436 } else if (chunk->shift < 0){ | |
437 offset >>= chunk->shift; | |
438 } | |
439 if ((chunk->flags & MMAP_ONLY_ODD) || (chunk->flags & MMAP_ONLY_EVEN)) { | |
440 offset /= 2; | |
441 val = base[offset]; | |
442 if (chunk->flags & MMAP_ONLY_ODD) { | |
443 val |= 0xFF00; | |
444 } else { | |
445 val = val << 8 | 0xFF; | |
446 } | |
447 } else { | |
448 val = *(uint16_t *)(base + offset); | |
449 } | |
450 return val; | |
451 } | |
452 } | |
453 if ((!(chunk->flags & MMAP_READ) || (chunk->flags & MMAP_FUNC_NULL)) && chunk->read_16) { | |
454 return chunk->read_16(offset, context); | |
455 } | |
456 return 0xFFFF; | |
457 } | |
458 | |
459 uint8_t interp_read_map_8(uint32_t address, void *context, void *data) | |
460 { | |
461 const memmap_chunk *chunk = data; | |
462 cpu_options * opts = *(cpu_options **)context; | |
463 if (address < chunk->start || address >= chunk->end) | |
464 { | |
465 const memmap_chunk *map_end = opts->memmap + opts->memmap_chunks; | |
466 for (chunk++; chunk < map_end; chunk++) | |
467 { | |
468 if (address >= chunk->start && address < chunk->end) { | |
469 break; | |
470 } | |
471 } | |
472 | |
473 if (chunk == map_end) { | |
474 return 0xFF; | |
475 } | |
476 } | |
477 uint32_t offset = address & chunk->mask; | |
478 if (chunk->flags & MMAP_READ) { | |
479 uint8_t *base; | |
480 if (chunk->flags & MMAP_PTR_IDX) { | |
481 uint8_t ** mem_pointers = (uint8_t**)(opts->mem_ptr_off + (uint8_t *)context); | |
482 base = mem_pointers[chunk->ptr_index]; | |
483 } else { | |
484 base = chunk->buffer; | |
485 } | |
486 if (base) { | |
487 if (chunk->shift > 0) { | |
488 offset <<= chunk->shift; | |
489 } else if (chunk->shift < 0){ | |
490 offset >>= chunk->shift; | |
491 } | |
492 if ((chunk->flags & MMAP_ONLY_ODD) || (chunk->flags & MMAP_ONLY_EVEN)) { | |
493 if (address & 1) { | |
494 if (chunk->flags & MMAP_ONLY_EVEN) { | |
495 return 0xFF; | |
496 } | |
497 } else if (chunk->flags & MMAP_ONLY_ODD) { | |
498 return 0xFF; | |
499 } | |
500 offset /= 2; | |
501 } else if(opts->byte_swap) { | |
502 offset ^= 1; | |
503 } | |
504 return base[offset]; | |
505 } | |
506 } | |
507 if ((!(chunk->flags & MMAP_READ) || (chunk->flags & MMAP_FUNC_NULL)) && chunk->read_8) { | |
508 return chunk->read_8(offset, context); | |
509 } | |
510 return 0xFF; | |
511 } | |
512 | |
513 void interp_write_map_16(uint32_t address, void *context, uint16_t value, void *data) | |
514 { | |
515 const memmap_chunk *chunk = data; | |
516 cpu_options * opts = *(cpu_options **)context; | |
517 if (address < chunk->start || address >= chunk->end) | |
518 { | |
519 const memmap_chunk *map_end = opts->memmap + opts->memmap_chunks; | |
520 for (chunk++; chunk < map_end; chunk++) | |
521 { | |
522 if (address >= chunk->start && address < chunk->end) { | |
523 break; | |
524 } | |
525 } | |
526 if (chunk == map_end) { | |
527 return; | |
528 } | |
529 } | |
530 uint32_t offset = address & chunk->mask; | |
531 if (chunk->flags & MMAP_WRITE) { | |
532 uint8_t *base; | |
533 if (chunk->flags & MMAP_PTR_IDX) { | |
534 uint8_t ** mem_pointers = (uint8_t**)(opts->mem_ptr_off + (uint8_t *)context); | |
535 base = mem_pointers[chunk->ptr_index]; | |
536 } else { | |
537 base = chunk->buffer; | |
538 } | |
539 if (base) { | |
540 if (chunk->shift > 0) { | |
541 offset <<= chunk->shift; | |
542 } else if (chunk->shift < 0){ | |
543 offset >>= chunk->shift; | |
544 } | |
545 if ((chunk->flags & MMAP_ONLY_ODD) || (chunk->flags & MMAP_ONLY_EVEN)) { | |
546 offset /= 2; | |
547 if (chunk->flags & MMAP_ONLY_EVEN) { | |
548 value >>= 16; | |
549 } | |
550 base[offset] = value; | |
551 } else { | |
552 *(uint16_t *)(base + offset) = value; | |
553 } | |
554 return; | |
555 } | |
556 } | |
557 if ((!(chunk->flags & MMAP_WRITE) || (chunk->flags & MMAP_FUNC_NULL)) && chunk->write_16) { | |
558 chunk->write_16(offset, context, value); | |
559 } | |
560 } | |
561 | |
562 void interp_write_map_8(uint32_t address, void *context, uint8_t value, void *data) | |
563 { | |
564 const memmap_chunk *chunk = data; | |
565 cpu_options * opts = *(cpu_options **)context; | |
566 if (address < chunk->start || address >= chunk->end) | |
567 { | |
568 const memmap_chunk *map_end = opts->memmap + opts->memmap_chunks; | |
569 for (chunk++; chunk < map_end; chunk++) | |
570 { | |
571 if (address >= chunk->start && address < chunk->end) { | |
572 break; | |
573 } | |
574 } | |
575 if (chunk == map_end) { | |
576 return; | |
577 } | |
578 } | |
579 uint32_t offset = address & chunk->mask; | |
580 if (chunk->flags & MMAP_WRITE) { | |
581 uint8_t *base; | |
582 if (chunk->flags & MMAP_PTR_IDX) { | |
583 uint8_t ** mem_pointers = (uint8_t**)(opts->mem_ptr_off + (uint8_t *)context); | |
584 base = mem_pointers[chunk->ptr_index]; | |
585 } else { | |
586 base = chunk->buffer; | |
587 } | |
588 if (base) { | |
589 if (chunk->shift > 0) { | |
590 offset <<= chunk->shift; | |
591 } else if (chunk->shift < 0){ | |
592 offset >>= chunk->shift; | |
593 } | |
594 if ((chunk->flags & MMAP_ONLY_ODD) || (chunk->flags & MMAP_ONLY_EVEN)) { | |
595 if (address & 1) { | |
596 if (chunk->flags & MMAP_ONLY_EVEN) { | |
597 return; | |
598 } | |
599 } else if (chunk->flags & MMAP_ONLY_ODD) { | |
600 return; | |
601 } | |
602 offset /= 2; | |
603 } else if(opts->byte_swap) { | |
604 offset ^= 1; | |
605 } | |
606 base[offset] = value; | |
607 } | |
608 } | |
609 if ((!(chunk->flags & MMAP_WRITE) || (chunk->flags & MMAP_FUNC_NULL)) && chunk->write_8) { | |
610 chunk->write_8(offset, context, value); | |
611 } | |
612 } | |
613 | |
614 interp_read_16 get_interp_read_16(void *context, cpu_options *opts, uint32_t start, uint32_t end, void **data_out) | |
615 { | |
616 const memmap_chunk *chunk; | |
617 for (chunk = opts->memmap; chunk < opts->memmap + opts->memmap_chunks; chunk++) | |
618 { | |
619 if (chunk->end > start && chunk->start < end) { | |
620 break; | |
621 } | |
622 } | |
623 if (chunk == opts->memmap + opts->memmap_chunks) { | |
624 *data_out = (void *)(uintptr_t)0xFFFF; | |
625 return interp_read_fixed_16; | |
626 } | |
627 if (chunk->end < end || chunk->start > start) { | |
628 goto use_map; | |
629 } | |
630 if (chunk->flags & MMAP_READ) { | |
631 if ((chunk->flags & (MMAP_ONLY_ODD|MMAP_ONLY_EVEN|MMAP_FUNC_NULL)) || chunk->shift) { | |
632 goto use_map; | |
633 } | |
634 if (!chunk->mask && !(chunk->flags & ~MMAP_READ)) { | |
635 uintptr_t value = *(uint16_t *)chunk->buffer; | |
636 *data_out = (void *)value; | |
637 return interp_read_fixed_16; | |
638 } | |
639 if ((chunk->mask & 0xFFFF) != 0xFFFF) { | |
640 goto use_map; | |
641 } | |
642 if (chunk->flags & MMAP_PTR_IDX) { | |
643 if (chunk->mask != 0xFFFF && start > 0) { | |
644 goto use_map; | |
645 } | |
646 *data_out = (void *)(chunk->ptr_index + (void **)(((char *)context) + opts->mem_ptr_off)); | |
647 return interp_read_indexed_16; | |
648 } else { | |
649 *data_out = (start & chunk->mask) + (uint8_t *)chunk->buffer; | |
650 return interp_read_direct_16; | |
651 } | |
652 } | |
653 if (chunk->read_16 && chunk->mask == opts->address_mask) { | |
654 *data_out = NULL; | |
655 //This is not safe for all calling conventions due to the extra param | |
656 //but should work for the ones we actually care about | |
657 return (interp_read_16)chunk->read_16; | |
658 } | |
659 use_map: | |
660 *data_out = (void *)chunk; | |
661 return interp_read_map_16; | |
662 } | |
663 | |
664 interp_read_8 get_interp_read_8(void *context, cpu_options *opts, uint32_t start, uint32_t end, void **data_out) | |
665 { | |
666 const memmap_chunk *chunk; | |
667 for (chunk = opts->memmap; chunk < opts->memmap + opts->memmap_chunks; chunk++) | |
668 { | |
669 if (chunk->end > start && chunk->start < end) { | |
670 break; | |
671 } | |
672 } | |
673 if (chunk == opts->memmap + opts->memmap_chunks) { | |
674 *data_out = (void *)(uintptr_t)0xFFFF; | |
675 return interp_read_fixed_8; | |
676 } | |
677 if (chunk->end != end || chunk->start != start) { | |
678 goto use_map; | |
679 } | |
680 if (chunk->flags & MMAP_READ) { | |
681 if ((chunk->flags & (MMAP_ONLY_ODD|MMAP_ONLY_EVEN|MMAP_FUNC_NULL)) || chunk->shift) { | |
682 goto use_map; | |
683 } | |
684 if (!chunk->mask && !(chunk->flags & ~MMAP_READ)) { | |
685 uintptr_t value = *(uint8_t *)chunk->buffer; | |
686 *data_out = (void *)value; | |
687 return interp_read_fixed_8; | |
688 } | |
689 if ((chunk->mask & 0xFFFF) != 0xFFFF) { | |
690 goto use_map; | |
691 } | |
692 if (chunk->flags & MMAP_PTR_IDX) { | |
693 if (chunk->mask != 0xFFFF && start > 0) { | |
694 goto use_map; | |
695 } | |
696 *data_out = (void *)(chunk->ptr_index + (void **)(((char *)context) + opts->mem_ptr_off)); | |
697 return interp_read_indexed_8; | |
698 } else { | |
699 *data_out = (start & chunk->mask) + (uint8_t *)chunk->buffer; | |
700 return interp_read_direct_8; | |
701 } | |
702 } | |
703 if (chunk->read_8 && chunk->mask == opts->address_mask) { | |
704 *data_out = NULL; | |
705 //This is not safe for all calling conventions due to the extra param | |
706 //but should work for the ones we actually care about | |
707 return (interp_read_8)chunk->read_8; | |
708 } | |
709 use_map: | |
710 *data_out = (void *)chunk; | |
711 return interp_read_map_8; | |
712 } | |
713 | |
714 interp_write_16 get_interp_write_16(void *context, cpu_options *opts, uint32_t start, uint32_t end, void **data_out) | |
715 { | |
716 const memmap_chunk *chunk; | |
717 for (chunk = opts->memmap; chunk < opts->memmap + opts->memmap_chunks; chunk++) | |
718 { | |
719 if (chunk->end > start && chunk->start < end) { | |
720 break; | |
721 } | |
722 } | |
723 if (chunk == opts->memmap + opts->memmap_chunks) { | |
724 *data_out = NULL; | |
725 return interp_write_ignored_16; | |
726 } | |
727 if (chunk->end != end || chunk->start != start) { | |
728 goto use_map; | |
729 } | |
730 if (chunk->flags & MMAP_READ) { | |
731 if ((chunk->flags & (MMAP_ONLY_ODD|MMAP_ONLY_EVEN|MMAP_FUNC_NULL)) || chunk->shift || (chunk->mask & 0xFFFF) != 0xFFFF) { | |
732 goto use_map; | |
733 } | |
734 if (chunk->flags & MMAP_PTR_IDX) { | |
735 if (chunk->mask != 0xFFFF && start > 0) { | |
736 goto use_map; | |
737 } | |
738 *data_out = (void *)(chunk->ptr_index + (void **)(((char *)context) + opts->mem_ptr_off)); | |
739 return interp_write_indexed_16; | |
740 } else { | |
741 *data_out = (start & chunk->mask) + (uint8_t *)chunk->buffer; | |
742 return interp_write_direct_16; | |
743 } | |
744 } | |
745 if (chunk->write_16 && chunk->mask == opts->address_mask) { | |
746 *data_out = NULL; | |
747 //This is not safe for all calling conventions due to the extra param | |
748 //but should work for the ones we actually care about | |
749 return (interp_write_16)chunk->write_16; | |
750 } | |
751 use_map: | |
752 *data_out = (void *)chunk; | |
753 return interp_write_map_16; | |
754 } | |
755 | |
756 interp_write_8 get_interp_write_8(void *context, cpu_options *opts, uint32_t start, uint32_t end, void **data_out) | |
757 { | |
758 const memmap_chunk *chunk; | |
759 for (chunk = opts->memmap; chunk < opts->memmap + opts->memmap_chunks; chunk++) | |
760 { | |
761 if (chunk->end > start && chunk->start < end) { | |
762 break; | |
763 } | |
764 } | |
765 if (chunk == opts->memmap + opts->memmap_chunks) { | |
766 *data_out = NULL; | |
767 return interp_write_ignored_8; | |
768 } | |
769 if (chunk->end != end || chunk->start != start) { | |
770 goto use_map; | |
771 } | |
772 if (chunk->flags & MMAP_READ) { | |
773 if ((chunk->flags & (MMAP_ONLY_ODD|MMAP_ONLY_EVEN|MMAP_FUNC_NULL)) || chunk->shift | |
774 || (chunk->mask & 0xFFFF) != 0xFFFF || !opts->byte_swap | |
775 ) { | |
776 goto use_map; | |
777 } | |
778 if (chunk->flags & MMAP_PTR_IDX) { | |
779 if (chunk->mask != 0xFFFF && start > 0) { | |
780 goto use_map; | |
781 } | |
782 *data_out = (void *)(chunk->ptr_index + (void **)(((char *)context) + opts->mem_ptr_off)); | |
783 return interp_write_indexed_8; | |
784 } else { | |
785 *data_out = (start & chunk->mask) + (uint8_t *)chunk->buffer; | |
786 return interp_write_direct_8; | |
787 } | |
788 } | |
789 if (chunk->write_16 && chunk->mask == opts->address_mask) { | |
790 *data_out = NULL; | |
791 //This is not safe for all calling conventions due to the extra param | |
792 //but should work for the ones we actually care about | |
793 return (interp_write_8)chunk->write_8; | |
794 } | |
795 use_map: | |
796 *data_out = (void *)chunk; | |
797 return interp_write_map_8; | |
798 } |