changeset 2675:dbff641a33df

Implement Z80/PSG clock speed test register bit
author Michael Pavone <pavone@retrodev.com>
date Fri, 14 Mar 2025 01:18:11 -0700
parents 07cc0f7109f0
children 7e86ec94c899
files genesis.c vdp.c vdp.h z80.cpu z80_to_x86.c z80_to_x86.h z80_util.c
diffstat 7 files changed, 273 insertions(+), 216 deletions(-) [+]
line wrap: on
line diff
--- a/genesis.c	Sun Mar 09 22:53:04 2025 -0700
+++ b/genesis.c	Fri Mar 14 01:18:11 2025 -0700
@@ -254,6 +254,7 @@
 		segacd_register_section_handlers(cd, buf);
 	}
 	uint8_t tmss_old = gen->tmss;
+	uint8_t old_z80_clock_bit = gen->vdp->test_regs[1] & 1;
 	gen->tmss = 0xFF;
 	while (buf->cur_pos < buf->size)
 	{
@@ -286,6 +287,14 @@
 #endif
 	free(buf->handlers);
 	buf->handlers = NULL;
+	uint8_t new_z80_clock_bit = gen->vdp->test_regs[1] & 1;
+	if (old_z80_clock_bit != new_z80_clock_bit) {
+		gen->z80->Z80_OPTS->gen.clock_divider = new_z80_clock_bit ? MCLKS_PER_YM : MCLKS_PER_Z80;
+		gen->psg->clock_inc = new_z80_clock_bit ? MCLKS_PER_YM * 16 : MCLKS_PER_PSG;
+		psg_adjust_master_clock(gen->psg, gen->master_clock);
+		z80_invalidate_code_range(gen->z80, 0, 0x4000);
+		z80_clock_divider_updated(gen->z80->Z80_OPTS);
+	}
 }
 
 static void deserialize(system_header *sys, uint8_t *data, size_t size)
@@ -1067,8 +1076,19 @@
 		}
 	} else if (vdp_port < 0x18) {
 		psg_write(gen->psg, value);
+	} else if (vdp_port < 0x1C) {
+		vdp_test_port_select(gen->vdp, value);
 	} else {
+		uint8_t old_z80_clock_bit = gen->vdp->test_regs[1] & 1;
 		vdp_test_port_write(gen->vdp, value);
+		uint8_t new_z80_clock_bit = gen->vdp->test_regs[1] & 1;
+		if (old_z80_clock_bit != new_z80_clock_bit) {
+			gen->z80->Z80_OPTS->gen.clock_divider = new_z80_clock_bit ? MCLKS_PER_YM : MCLKS_PER_Z80;
+			gen->psg->clock_inc = new_z80_clock_bit ? MCLKS_PER_YM * 16 : MCLKS_PER_PSG;
+			psg_adjust_master_clock(gen->psg, gen->master_clock);
+			z80_invalidate_code_range(gen->z80, 0, 0x4000);
+			z80_clock_divider_updated(gen->z80->Z80_OPTS);
+		}
 	}
 
 	if (did_dma) {
--- a/vdp.c	Sun Mar 09 22:53:04 2025 -0700
+++ b/vdp.c	Fri Mar 14 01:18:11 2025 -0700
@@ -727,8 +727,8 @@
 		   (context->flags & FLAG_PENDING) ? "word" : (context->flags2 & FLAG2_BYTE_PENDING) ? "byte" : "none",
 		   context->vcounter, context->hslot*2, (context->flags2 & FLAG2_VINT_PENDING) ? "true" : "false",
 		   (context->flags2 & FLAG2_HINT_PENDING) ? "true" : "false", vdp_status(context));
-	printf("\nDebug Register: %X | Output disabled: %s, Force Layer: %d\n", context->test_port,
-		(context->test_port & TEST_BIT_DISABLE)  ? "true" : "false", context->test_port >> 7 & 3
+	printf("\nDebug Register: %X | Output disabled: %s, Force Layer: %d\n", context->test_regs[0],
+		(context->test_regs[0] & TEST_BIT_DISABLE)  ? "true" : "false", context->test_regs[0] >> 7 & 3
 	);
 }
 
@@ -1812,8 +1812,8 @@
 {
 	uint8_t *dst;
 	uint8_t *debug_dst;
-	uint8_t output_disabled = (context->test_port & TEST_BIT_DISABLE) != 0;
-	uint8_t test_layer = context->test_port >> 7 & 3;
+	uint8_t output_disabled = (context->test_regs[0] & TEST_BIT_DISABLE) != 0;
+	uint8_t test_layer = context->test_regs[0] >> 7 & 3;
 	if (context->state == PREPARING && !test_layer) {
 		if (col) {
 			col -= 2;
@@ -2915,10 +2915,10 @@
 {
 	uint8_t *dst = context->compositebuf + BORDER_LEFT + ((context->regs[REG_MODE_4] & BIT_H40) ? 320 : 256);
 	uint8_t pixel = context->regs[REG_BG_COLOR] & 0x3F;
-	if ((context->test_port & TEST_BIT_DISABLE) != 0) {
+	if ((context->test_regs[0] & TEST_BIT_DISABLE) != 0) {
 		pixel = 0x3F;
 	}
-	uint8_t test_layer = context->test_port >> 7 & 3;
+	uint8_t test_layer = context->test_regs[0] >> 7 & 3;
 	if (test_layer) {
 		switch(test_layer)
 			{
@@ -3276,7 +3276,7 @@
 	uint32_t mask;
 	uint32_t const slot_cycles = MCLKS_SLOT_H40;
 	uint8_t bgindex = context->regs[REG_BG_COLOR] & 0x3F;
-	uint8_t test_layer = context->test_port >> 7 & 3;
+	uint8_t test_layer = context->test_regs[0] >> 7 & 3;
 
 	//165
 	if (!(context->regs[REG_MODE_3] & BIT_VSCROLL)) {
@@ -3482,7 +3482,7 @@
 	uint32_t mask;
 	uint32_t const slot_cycles = MCLKS_SLOT_H40;
 	uint8_t bgindex = context->regs[REG_BG_COLOR] & 0x3F;
-	uint8_t test_layer = context->test_port >> 7 & 3;
+	uint8_t test_layer = context->test_regs[0] >> 7 & 3;
 	if (!context->output) {
 		//This shouldn't happen normally, but it can theoretically
 		//happen when doing border busting
@@ -3712,7 +3712,7 @@
 	uint32_t mask;
 	uint32_t const slot_cycles = MCLKS_SLOT_H32;
 	uint8_t bgindex = context->regs[REG_BG_COLOR] & 0x3F;
-	uint8_t test_layer = context->test_port >> 7 & 3;
+	uint8_t test_layer = context->test_regs[0] >> 7 & 3;
 	if (!context->output) {
 		//This shouldn't happen normally, but it can theoretically
 		//happen when doing border busting
@@ -3930,7 +3930,7 @@
 	uint32_t mask;
 	uint32_t const slot_cycles = MCLKS_SLOT_H32;
 	uint8_t bgindex = 0x10 | (context->regs[REG_BG_COLOR] & 0xF) + MODE4_OFFSET;
-	uint8_t test_layer = context->test_port >> 7 & 3;
+	uint8_t test_layer = context->test_regs[0] >> 7 & 3;
 	if (!context->output) {
 		//This shouldn't happen normally, but it can theoretically
 		//happen when doing border busting
@@ -4810,7 +4810,7 @@
 		dst = NULL;
 	}
 
-	uint8_t test_layer = context->test_port >> 7 & 3;
+	uint8_t test_layer = context->test_regs[0] >> 7 & 3;
 
 	while(context->cycles < target_cycles)
 	{
@@ -5301,9 +5301,16 @@
 	increment_address(context);
 }
 
+void vdp_test_port_select(vdp_context * context, uint16_t value)
+{
+	context->selected_test_reg = value >> 8 & 0xF;
+}
+
 void vdp_test_port_write(vdp_context * context, uint16_t value)
 {
-	context->test_port = value;
+	if (context->selected_test_reg < 8) {
+		context->test_regs[context->selected_test_reg] = value;
+	}
 }
 
 uint16_t vdp_status(vdp_context *context)
@@ -5714,7 +5721,7 @@
 	}
 }
 
-#define VDP_STATE_VERSION 4
+#define VDP_STATE_VERSION 5
 void vdp_serialize(vdp_context *context, serialize_buffer *buf)
 {
 	save_int8(buf, VDP_STATE_VERSION);
@@ -5762,7 +5769,7 @@
 	save_int16(buf, context->vscroll_latch[1]);
 	save_int16(buf, context->col_1);
 	save_int16(buf, context->col_2);
-	save_int16(buf, context->test_port);
+	save_int16(buf, context->test_regs[0]);
 	save_buffer8(buf, context->tmp_buf_a, SCROLL_BUFFER_SIZE);
 	save_buffer8(buf, context->tmp_buf_b, SCROLL_BUFFER_SIZE);
 	save_int8(buf, context->buf_a_off);
@@ -5799,6 +5806,8 @@
 	save_int8(buf, context->cd);
 	save_int8(buf, context->window_h_latch);
 	save_int8(buf, context->window_v_latch);
+	save_buffer16(buf, context->test_regs + 1, 7);
+	save_int8(buf, context->selected_test_reg);
 }
 
 void vdp_deserialize(deserialize_buffer *buf, void *vcontext)
@@ -5867,7 +5876,7 @@
 	context->vscroll_latch[1] = load_int16(buf);
 	context->col_1 = load_int16(buf);
 	context->col_2 = load_int16(buf);
-	context->test_port = load_int16(buf);
+	context->test_regs[0] = load_int16(buf);
 	load_buffer8(buf, context->tmp_buf_a, SCROLL_BUFFER_SIZE);
 	load_buffer8(buf, context->tmp_buf_b, SCROLL_BUFFER_SIZE);
 	context->buf_a_off = load_int8(buf) & SCROLL_BUFFER_MASK;
@@ -5946,6 +5955,13 @@
 	} else {
 		context->address_latch = context->address;
 	}
+	if (version > 4) {
+		load_buffer16(buf, context->test_regs + 1, 7);
+		context->selected_test_reg = load_int8(buf);
+	} else {
+		memset(context->test_regs + 1, 0, 7 * sizeof(uint16_t));
+		context->selected_test_reg = 0;
+	}
 	update_video_params(context);
 }
 
--- a/vdp.h	Sun Mar 09 22:53:04 2025 -0700
+++ b/vdp.h	Fri Mar 14 01:18:11 2025 -0700
@@ -236,7 +236,7 @@
 	uint16_t       col_2;
 	uint16_t       hv_latch;
 	uint16_t       prefetch;
-	uint16_t       test_port;
+	uint16_t       test_regs[8];
 	//stores 2-bit palette + 4-bit palette index + priority for current sprite line
 	uint8_t        linebuf[LINEBUF_SIZE];
 	uint8_t        compositebuf[LINEBUF_SIZE];
@@ -269,6 +269,7 @@
 	uint8_t        cram_latch;
 	uint8_t        window_h_latch;
 	uint8_t        window_v_latch;
+	uint8_t        selected_test_reg;
 	int32_t        color_map[1 << 12];
 	uint8_t        vdpmem[];
 };
@@ -289,6 +290,7 @@
 void vdp_control_port_write_pbc(vdp_context * context, uint8_t value);
 void vdp_data_port_write(vdp_context * context, uint16_t value);
 void vdp_data_port_write_pbc(vdp_context * context, uint8_t value);
+void vdp_test_port_select(vdp_context * context, uint16_t value);
 void vdp_test_port_write(vdp_context * context, uint16_t value);
 uint16_t vdp_control_port_read(vdp_context * context);
 uint16_t vdp_data_port_read(vdp_context * context, uint32_t *cpu_cycle, uint32_t cpu_divider);
--- a/z80.cpu	Sun Mar 09 22:53:04 2025 -0700
+++ b/z80.cpu	Fri Mar 14 01:18:11 2025 -0700
@@ -26,6 +26,7 @@
 	void zremove_breakpoint(z80_context * context, uint16_t address);
 	void z80_options_free(z80_options *opts);
 	void z80_sync_cycle(z80_context *context, uint32_t target_cycle);
+	void z80_clock_divider_updated(z80_options *options);
 
 regs
 	main 8 b c d e h l f a
--- a/z80_to_x86.c	Sun Mar 09 22:53:04 2025 -0700
+++ b/z80_to_x86.c	Fri Mar 14 01:18:11 2025 -0700
@@ -3211,6 +3211,9 @@
 	options->gen.mem_ptr_off = offsetof(z80_context, mem_pointers);
 	options->gen.ram_flags_off = offsetof(z80_context, ram_code_flags);
 	options->gen.ram_flags_shift = 7;
+	options->io_memmap = io_chunks;
+	options->io_memmap_chunks = num_io_chunks;
+	options->io_address_mask = io_address_mask;
 
 	options->flags = 0;
 #ifdef X86_64
@@ -3379,206 +3382,7 @@
 
 	options->gen.handle_code_write = (code_ptr)z80_handle_code_write;
 
-	options->read_8 = gen_mem_fun(&options->gen, chunks, num_chunks, READ_8, &options->read_8_noinc);
-	options->write_8 = gen_mem_fun(&options->gen, chunks, num_chunks, WRITE_8, &options->write_8_noinc);
-
-	code_ptr skip_int = code->cur;
-	//calculate adjust size
-	add_ir(code, 16-sizeof(void *), RSP, SZ_PTR);
-	uint32_t adjust_size = code->cur - skip_int;
-	code->cur = skip_int;
-
-	cmp_rdispr(code, options->gen.context_reg, offsetof(z80_context, sync_cycle), options->gen.cycles, SZ_D);
-	code_ptr skip_sync = code->cur + 1;
-	jcc(code, CC_B, skip_sync);
-	neg_r(code, options->gen.cycles, SZ_D);
-	add_rdispr(code, options->gen.context_reg, offsetof(z80_context, target_cycle), options->gen.cycles, SZ_D);
-	//save PC
-	mov_rrdisp(code, options->gen.scratch1, options->gen.context_reg, offsetof(z80_context, pc), SZ_D);
-	options->do_sync = code->cur;
-	call(code, options->gen.save_context);
-	tmp_stack_off = code->stack_off;
-	//pop return address off the stack and save for resume later
-	//pop_rind(code, options->gen.context_reg);
-	pop_r(code, RAX);
-	add_ir(code, adjust_size, RAX, SZ_PTR);
-	add_ir(code, 16-sizeof(void *), RSP, SZ_PTR);
-	mov_rrind(code, RAX, options->gen.context_reg, SZ_PTR);
-
-	//restore callee saved registers
-	restore_callee_save_regs(code);
-	//return to caller of z80_run
-	retn(code);
-	*skip_sync = code->cur - (skip_sync+1);
-	neg_r(code, options->gen.cycles, SZ_D);
-	add_rdispr(code, options->gen.context_reg, offsetof(z80_context, target_cycle), options->gen.cycles, SZ_D);
-	retn(code);
-	code->stack_off = tmp_stack_off;
-
-	options->gen.handle_cycle_limit_int = code->cur;
-	neg_r(code, options->gen.cycles, SZ_D);
-	add_rdispr(code, options->gen.context_reg, offsetof(z80_context, target_cycle), options->gen.cycles, SZ_D);
-	cmp_rdispr(code, options->gen.context_reg, offsetof(z80_context, int_cycle), options->gen.cycles, SZ_D);
-	jcc(code, CC_B, skip_int);
-	//check that we are not past the end of interrupt pulse
-	cmp_rrdisp(code, options->gen.cycles, options->gen.context_reg, offsetof(z80_context, int_pulse_end), SZ_D);
-	jcc(code, CC_B, skip_int);
-	//set limit to the cycle limit
-	mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, sync_cycle), options->gen.scratch2, SZ_D);
-	mov_rrdisp(code, options->gen.scratch2, options->gen.context_reg, offsetof(z80_context, target_cycle), SZ_D);
-	neg_r(code, options->gen.cycles, SZ_D);
-	add_rr(code, options->gen.scratch2, options->gen.cycles, SZ_D);
-	//disable interrupts
-	cmp_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, int_is_nmi), SZ_B);
-	code_ptr is_nmi = code->cur + 1;
-	jcc(code, CC_NZ, is_nmi);
-	mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, iff1), SZ_B);
-	mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, iff2), SZ_B);
-	cycles(&options->gen, 6); //interupt ack cycle
-	code_ptr after_int_disable = code->cur + 1;
-	jmp(code, after_int_disable);
-	*is_nmi = code->cur - (is_nmi + 1);
-	mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, iff1), options->gen.scratch2, SZ_B);
-	mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, iff1), SZ_B);
-	mov_rrdisp(code, options->gen.scratch2, options->gen.context_reg, offsetof(z80_context, iff2), SZ_B);
-	cycles(&options->gen, 5); //NMI processing cycles
-	*after_int_disable = code->cur - (after_int_disable + 1);
-	//save return address (in scratch1) to Z80 stack
-	sub_ir(code, 2, options->regs[Z80_SP], SZ_W);
-	mov_rr(code, options->regs[Z80_SP], options->gen.scratch2, SZ_W);
-	//we need to do check_cycles and cycles outside of the write_8 call
-	//so that the stack has the correct depth if we need to return to C
-	//for a synchronization
-	check_cycles(&options->gen);
-	cycles(&options->gen, 3);
-	//save word to write before call to write_8_noinc
-	push_r(code, options->gen.scratch1);
-	call(code, options->write_8_noinc);
-	//restore word to write
-	pop_r(code, options->gen.scratch1);
-	//write high byte to SP+1
-	mov_rr(code, options->regs[Z80_SP], options->gen.scratch2, SZ_W);
-	add_ir(code, 1, options->gen.scratch2, SZ_W);
-	shr_ir(code, 8, options->gen.scratch1, SZ_W);
-	check_cycles(&options->gen);
-	cycles(&options->gen, 3);
-	call(code, options->write_8_noinc);
-	//dispose of return address as we'll be jumping somewhere else
-	add_ir(code, 16, RSP, SZ_PTR);
-	cmp_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, int_is_nmi), SZ_B);
-	is_nmi = code->cur + 1;
-	jcc(code, CC_NZ, is_nmi);
-	//TODO: Support interrupt mode 0, not needed for Genesis sit it seems to read $FF during intack
-	//which is conveniently rst $38, i.e. the same thing that im 1 does
-	//check interrupt mode
-	cmp_irdisp(code, 2, options->gen.context_reg, offsetof(z80_context, im), SZ_B);
-	code_ptr im2 = code->cur + 1;
-	jcc(code, CC_Z, im2);
-	mov_ir(code, 0x38, options->gen.scratch1, SZ_W);
-	cycles(&options->gen, 1); //total time for mode 0/1 is 13 t-states
-	code_ptr after_int_dest = code->cur + 1;
-	jmp(code, after_int_dest);
-	*im2 = code->cur - (im2 + 1);
-	//read vector address from I << 8 | vector
-	mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, regs) + Z80_I, options->gen.scratch1, SZ_B);
-	shl_ir(code, 8, options->gen.scratch1, SZ_W);
-	movzx_rdispr(code, options->gen.context_reg, offsetof(z80_context, im2_vector), options->gen.scratch2, SZ_B, SZ_W);
-	or_rr(code, options->gen.scratch2, options->gen.scratch1, SZ_W);
-	push_r(code, options->gen.scratch1);
-	cycles(&options->gen, 3);
-	call(code, options->read_8_noinc);
-	pop_r(code, options->gen.scratch2);
-	push_r(code, options->gen.scratch1);
-	mov_rr(code, options->gen.scratch2, options->gen.scratch1, SZ_W);
-	add_ir(code, 1, options->gen.scratch1, SZ_W);
-	cycles(&options->gen, 3);
-	call(code, options->read_8_noinc);
-	pop_r(code, options->gen.scratch2);
-	shl_ir(code, 8, options->gen.scratch1, SZ_W);
-	movzx_rr(code, options->gen.scratch2, options->gen.scratch2, SZ_B, SZ_W);
-	or_rr(code, options->gen.scratch2, options->gen.scratch1, SZ_W);
-	code_ptr after_int_dest2 = code->cur + 1;
-	jmp(code, after_int_dest2);
-	*is_nmi = code->cur - (is_nmi + 1);
-	mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, int_is_nmi), SZ_B);
-	mov_irdisp(code, CYCLE_NEVER, options->gen.context_reg, offsetof(z80_context, nmi_start), SZ_D);
-	mov_ir(code, 0x66, options->gen.scratch1, SZ_W);
-	*after_int_dest = code->cur - (after_int_dest + 1);
-	*after_int_dest2 = code->cur - (after_int_dest2 + 1);
-	call(code, options->native_addr);
-	mov_rrind(code, options->gen.scratch1, options->gen.context_reg, SZ_PTR);
-	tmp_stack_off = code->stack_off;
-	restore_callee_save_regs(code);
-	//return to caller of z80_run to sync
-	retn(code);
-	code->stack_off = tmp_stack_off;
-
-	//HACK
-	options->gen.address_size = SZ_D;
-	options->gen.address_mask = io_address_mask;
-	options->gen.bus_cycles = 4;
-	options->read_io = gen_mem_fun(&options->gen, io_chunks, num_io_chunks, READ_8, NULL);
-	options->write_io = gen_mem_fun(&options->gen, io_chunks, num_io_chunks, WRITE_8, NULL);
-	options->gen.address_size = SZ_W;
-	options->gen.address_mask = 0xFFFF;
-	options->gen.bus_cycles = 3;
-
-	options->read_16 = code->cur;
-	cycles(&options->gen, 3);
-	check_cycles(&options->gen);
-	//TODO: figure out how to handle the extra wait state for word reads to bank area
-	//may also need special handling to avoid too much stack depth when access is blocked
-	push_r(code, options->gen.scratch1);
-	call(code, options->read_8_noinc);
-	mov_rr(code, options->gen.scratch1, options->gen.scratch2, SZ_B);
-#ifndef X86_64
-	//scratch 2 is a caller save register in 32-bit builds and may be clobbered by something called from the read8 fun
-	mov_rrdisp(code, options->gen.scratch1, options->gen.context_reg, offsetof(z80_context, scratch2), SZ_B);
-#endif
-	pop_r(code, options->gen.scratch1);
-	add_ir(code, 1, options->gen.scratch1, SZ_W);
-	cycles(&options->gen, 3);
-	check_cycles(&options->gen);
-	call(code, options->read_8_noinc);
-	shl_ir(code, 8, options->gen.scratch1, SZ_W);
-#ifdef X86_64
-	mov_rr(code, options->gen.scratch2, options->gen.scratch1, SZ_B);
-#else
-	mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, scratch2), options->gen.scratch1, SZ_B);
-#endif
-	retn(code);
-
-	options->write_16_highfirst = code->cur;
-	cycles(&options->gen, 3);
-	check_cycles(&options->gen);
-	push_r(code, options->gen.scratch2);
-	push_r(code, options->gen.scratch1);
-	add_ir(code, 1, options->gen.scratch2, SZ_W);
-	shr_ir(code, 8, options->gen.scratch1, SZ_W);
-	call(code, options->write_8_noinc);
-	pop_r(code, options->gen.scratch1);
-	pop_r(code, options->gen.scratch2);
-	cycles(&options->gen, 3);
-	check_cycles(&options->gen);
-	//TODO: Check if we can get away with TCO here
-	call(code, options->write_8_noinc);
-	retn(code);
-
-	options->write_16_lowfirst = code->cur;
-	cycles(&options->gen, 3);
-	check_cycles(&options->gen);
-	push_r(code, options->gen.scratch2);
-	push_r(code, options->gen.scratch1);
-	call(code, options->write_8_noinc);
-	pop_r(code, options->gen.scratch1);
-	pop_r(code, options->gen.scratch2);
-	add_ir(code, 1, options->gen.scratch2, SZ_W);
-	shr_ir(code, 8, options->gen.scratch1, SZ_W);
-	cycles(&options->gen, 3);
-	check_cycles(&options->gen);
-	//TODO: Check if we can get away with TCO here
-	call(code, options->write_8_noinc);
-	retn(code);
+	z80_clock_divider_updated(options);
 
 	options->retrans_stub = code->cur;
 	tmp_stack_off = code->stack_off;
@@ -3865,6 +3669,212 @@
 	code->stack_off = start_stack_off;
 }
 
+void z80_clock_divider_updated(z80_options *options)
+{
+	//TODO: make this not leak memory whenever the clock changes
+	options->read_8 = gen_mem_fun(&options->gen, options->gen.memmap, options->gen.memmap_chunks, READ_8, &options->read_8_noinc);
+	options->write_8 = gen_mem_fun(&options->gen, options->gen.memmap, options->gen.memmap_chunks, WRITE_8, &options->write_8_noinc);
+	
+	code_info *code = &options->gen.code;
+	code_ptr skip_int = code->cur;
+	//calculate adjust size
+	add_ir(code, 16-sizeof(void *), RSP, SZ_PTR);
+	uint32_t adjust_size = code->cur - skip_int;
+	code->cur = skip_int;
+
+	cmp_rdispr(code, options->gen.context_reg, offsetof(z80_context, sync_cycle), options->gen.cycles, SZ_D);
+	code_ptr skip_sync = code->cur + 1;
+	jcc(code, CC_B, skip_sync);
+	neg_r(code, options->gen.cycles, SZ_D);
+	add_rdispr(code, options->gen.context_reg, offsetof(z80_context, target_cycle), options->gen.cycles, SZ_D);
+	//save PC
+	mov_rrdisp(code, options->gen.scratch1, options->gen.context_reg, offsetof(z80_context, pc), SZ_D);
+	options->do_sync = code->cur;
+	call(code, options->gen.save_context);
+	uint32_t tmp_stack_off = code->stack_off;
+	//pop return address off the stack and save for resume later
+	//pop_rind(code, options->gen.context_reg);
+	pop_r(code, RAX);
+	add_ir(code, adjust_size, RAX, SZ_PTR);
+	add_ir(code, 16-sizeof(void *), RSP, SZ_PTR);
+	mov_rrind(code, RAX, options->gen.context_reg, SZ_PTR);
+
+	//restore callee saved registers
+	restore_callee_save_regs(code);
+	//return to caller of z80_run
+	retn(code);
+	*skip_sync = code->cur - (skip_sync+1);
+	neg_r(code, options->gen.cycles, SZ_D);
+	add_rdispr(code, options->gen.context_reg, offsetof(z80_context, target_cycle), options->gen.cycles, SZ_D);
+	retn(code);
+	code->stack_off = tmp_stack_off;
+	
+	options->gen.handle_cycle_limit_int = code->cur;
+	neg_r(code, options->gen.cycles, SZ_D);
+	add_rdispr(code, options->gen.context_reg, offsetof(z80_context, target_cycle), options->gen.cycles, SZ_D);
+	cmp_rdispr(code, options->gen.context_reg, offsetof(z80_context, int_cycle), options->gen.cycles, SZ_D);
+	jcc(code, CC_B, skip_int);
+	//check that we are not past the end of interrupt pulse
+	cmp_rrdisp(code, options->gen.cycles, options->gen.context_reg, offsetof(z80_context, int_pulse_end), SZ_D);
+	jcc(code, CC_B, skip_int);
+	//set limit to the cycle limit
+	mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, sync_cycle), options->gen.scratch2, SZ_D);
+	mov_rrdisp(code, options->gen.scratch2, options->gen.context_reg, offsetof(z80_context, target_cycle), SZ_D);
+	neg_r(code, options->gen.cycles, SZ_D);
+	add_rr(code, options->gen.scratch2, options->gen.cycles, SZ_D);
+	//disable interrupts
+	cmp_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, int_is_nmi), SZ_B);
+	code_ptr is_nmi = code->cur + 1;
+	jcc(code, CC_NZ, is_nmi);
+	mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, iff1), SZ_B);
+	mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, iff2), SZ_B);
+	cycles(&options->gen, 6); //interupt ack cycle
+	code_ptr after_int_disable = code->cur + 1;
+	jmp(code, after_int_disable);
+	*is_nmi = code->cur - (is_nmi + 1);
+	mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, iff1), options->gen.scratch2, SZ_B);
+	mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, iff1), SZ_B);
+	mov_rrdisp(code, options->gen.scratch2, options->gen.context_reg, offsetof(z80_context, iff2), SZ_B);
+	cycles(&options->gen, 5); //NMI processing cycles
+	*after_int_disable = code->cur - (after_int_disable + 1);
+	//save return address (in scratch1) to Z80 stack
+	sub_ir(code, 2, options->regs[Z80_SP], SZ_W);
+	mov_rr(code, options->regs[Z80_SP], options->gen.scratch2, SZ_W);
+	//we need to do check_cycles and cycles outside of the write_8 call
+	//so that the stack has the correct depth if we need to return to C
+	//for a synchronization
+	check_cycles(&options->gen);
+	cycles(&options->gen, 3);
+	//save word to write before call to write_8_noinc
+	push_r(code, options->gen.scratch1);
+	call(code, options->write_8_noinc);
+	//restore word to write
+	pop_r(code, options->gen.scratch1);
+	//write high byte to SP+1
+	mov_rr(code, options->regs[Z80_SP], options->gen.scratch2, SZ_W);
+	add_ir(code, 1, options->gen.scratch2, SZ_W);
+	shr_ir(code, 8, options->gen.scratch1, SZ_W);
+	check_cycles(&options->gen);
+	cycles(&options->gen, 3);
+	call(code, options->write_8_noinc);
+	//dispose of return address as we'll be jumping somewhere else
+	add_ir(code, 16, RSP, SZ_PTR);
+	cmp_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, int_is_nmi), SZ_B);
+	is_nmi = code->cur + 1;
+	jcc(code, CC_NZ, is_nmi);
+	//TODO: Support interrupt mode 0, not needed for Genesis sit it seems to read $FF during intack
+	//which is conveniently rst $38, i.e. the same thing that im 1 does
+	//check interrupt mode
+	cmp_irdisp(code, 2, options->gen.context_reg, offsetof(z80_context, im), SZ_B);
+	code_ptr im2 = code->cur + 1;
+	jcc(code, CC_Z, im2);
+	mov_ir(code, 0x38, options->gen.scratch1, SZ_W);
+	cycles(&options->gen, 1); //total time for mode 0/1 is 13 t-states
+	code_ptr after_int_dest = code->cur + 1;
+	jmp(code, after_int_dest);
+	*im2 = code->cur - (im2 + 1);
+	//read vector address from I << 8 | vector
+	mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, regs) + Z80_I, options->gen.scratch1, SZ_B);
+	shl_ir(code, 8, options->gen.scratch1, SZ_W);
+	movzx_rdispr(code, options->gen.context_reg, offsetof(z80_context, im2_vector), options->gen.scratch2, SZ_B, SZ_W);
+	or_rr(code, options->gen.scratch2, options->gen.scratch1, SZ_W);
+	push_r(code, options->gen.scratch1);
+	cycles(&options->gen, 3);
+	call(code, options->read_8_noinc);
+	pop_r(code, options->gen.scratch2);
+	push_r(code, options->gen.scratch1);
+	mov_rr(code, options->gen.scratch2, options->gen.scratch1, SZ_W);
+	add_ir(code, 1, options->gen.scratch1, SZ_W);
+	cycles(&options->gen, 3);
+	call(code, options->read_8_noinc);
+	pop_r(code, options->gen.scratch2);
+	shl_ir(code, 8, options->gen.scratch1, SZ_W);
+	movzx_rr(code, options->gen.scratch2, options->gen.scratch2, SZ_B, SZ_W);
+	or_rr(code, options->gen.scratch2, options->gen.scratch1, SZ_W);
+	code_ptr after_int_dest2 = code->cur + 1;
+	jmp(code, after_int_dest2);
+	*is_nmi = code->cur - (is_nmi + 1);
+	mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, int_is_nmi), SZ_B);
+	mov_irdisp(code, CYCLE_NEVER, options->gen.context_reg, offsetof(z80_context, nmi_start), SZ_D);
+	mov_ir(code, 0x66, options->gen.scratch1, SZ_W);
+	*after_int_dest = code->cur - (after_int_dest + 1);
+	*after_int_dest2 = code->cur - (after_int_dest2 + 1);
+	call(code, options->native_addr);
+	mov_rrind(code, options->gen.scratch1, options->gen.context_reg, SZ_PTR);
+	tmp_stack_off = code->stack_off;
+	restore_callee_save_regs(code);
+	//return to caller of z80_run to sync
+	retn(code);
+	code->stack_off = tmp_stack_off;
+	
+	//HACK
+	options->gen.address_size = SZ_D;
+	options->gen.address_mask = options->io_address_mask;
+	options->gen.bus_cycles = 4;
+	options->read_io = gen_mem_fun(&options->gen, options->io_memmap, options->io_memmap_chunks, READ_8, NULL);
+	options->write_io = gen_mem_fun(&options->gen, options->io_memmap, options->io_memmap_chunks, WRITE_8, NULL);
+	options->gen.address_size = SZ_W;
+	options->gen.address_mask = 0xFFFF;
+	options->gen.bus_cycles = 3;
+	
+	options->read_16 = code->cur;
+	cycles(&options->gen, 3);
+	check_cycles(&options->gen);
+	//TODO: figure out how to handle the extra wait state for word reads to bank area
+	//may also need special handling to avoid too much stack depth when access is blocked
+	push_r(code, options->gen.scratch1);
+	call(code, options->read_8_noinc);
+	mov_rr(code, options->gen.scratch1, options->gen.scratch2, SZ_B);
+#ifndef X86_64
+	//scratch 2 is a caller save register in 32-bit builds and may be clobbered by something called from the read8 fun
+	mov_rrdisp(code, options->gen.scratch1, options->gen.context_reg, offsetof(z80_context, scratch2), SZ_B);
+#endif
+	pop_r(code, options->gen.scratch1);
+	add_ir(code, 1, options->gen.scratch1, SZ_W);
+	cycles(&options->gen, 3);
+	check_cycles(&options->gen);
+	call(code, options->read_8_noinc);
+	shl_ir(code, 8, options->gen.scratch1, SZ_W);
+#ifdef X86_64
+	mov_rr(code, options->gen.scratch2, options->gen.scratch1, SZ_B);
+#else
+	mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, scratch2), options->gen.scratch1, SZ_B);
+#endif
+	retn(code);
+
+	options->write_16_highfirst = code->cur;
+	cycles(&options->gen, 3);
+	check_cycles(&options->gen);
+	push_r(code, options->gen.scratch2);
+	push_r(code, options->gen.scratch1);
+	add_ir(code, 1, options->gen.scratch2, SZ_W);
+	shr_ir(code, 8, options->gen.scratch1, SZ_W);
+	call(code, options->write_8_noinc);
+	pop_r(code, options->gen.scratch1);
+	pop_r(code, options->gen.scratch2);
+	cycles(&options->gen, 3);
+	check_cycles(&options->gen);
+	//TODO: Check if we can get away with TCO here
+	call(code, options->write_8_noinc);
+	retn(code);
+
+	options->write_16_lowfirst = code->cur;
+	cycles(&options->gen, 3);
+	check_cycles(&options->gen);
+	push_r(code, options->gen.scratch2);
+	push_r(code, options->gen.scratch1);
+	call(code, options->write_8_noinc);
+	pop_r(code, options->gen.scratch1);
+	pop_r(code, options->gen.scratch2);
+	add_ir(code, 1, options->gen.scratch2, SZ_W);
+	shr_ir(code, 8, options->gen.scratch1, SZ_W);
+	cycles(&options->gen, 3);
+	check_cycles(&options->gen);
+	//TODO: Check if we can get away with TCO here
+	call(code, options->write_8_noinc);
+	retn(code);
+}
+
 void zinsert_breakpoint(z80_context * context, uint16_t address, uint8_t * bp_handler)
 {
 	context->bp_handler = bp_handler;
--- a/z80_to_x86.h	Sun Mar 09 22:53:04 2025 -0700
+++ b/z80_to_x86.h	Fri Mar 14 01:18:11 2025 -0700
@@ -52,8 +52,11 @@
 	code_ptr        write_16_lowfirst;
 	code_ptr		read_io;
 	code_ptr		write_io;
+	memmap_chunk const *io_memmap;
+	uint32_t        io_memmap_chunks;
 
 	uint32_t        flags;
+	uint16_t        io_address_mask;
 	int8_t          regs[Z80_UNUSED];
 	z80_ctx_fun     run;
 } z80_options;
@@ -115,6 +118,7 @@
 z80_context * z80_handle_code_write(uint32_t address, z80_context * context);
 void z80_invalidate_code_range(z80_context *context, uint32_t start, uint32_t end);
 void z80_reset(z80_context * context);
+void z80_clock_divider_updated(z80_options *options);
 void zinsert_breakpoint(z80_context * context, uint16_t address, uint8_t * bp_handler);
 void zremove_breakpoint(z80_context * context, uint16_t address);
 void z80_add_watchpoint(z80_context *context, uint16_t address, uint16_t size);
--- a/z80_util.c	Sun Mar 09 22:53:04 2025 -0700
+++ b/z80_util.c	Fri Mar 14 01:18:11 2025 -0700
@@ -339,3 +339,7 @@
 void zremove_breakpoint(z80_context * context, uint16_t address)
 {
 }
+
+void z80_clock_divider_updated(z80_options *options)
+{
+}