changeset 1769:8fe162bdb038 mame_interp

Merge from default
author Michael Pavone <pavone@retrodev.com>
date Fri, 01 Mar 2019 14:17:29 -0800
parents 63256371046f (current diff) 8a29c250f352 (diff)
children a81db00e171a
files Makefile backend.c backend.h blastcpm.c blastem.c debug.h gdb_remote.c genesis.c genesis.h sms.c sms.h
diffstat 19 files changed, 3692 insertions(+), 224 deletions(-) [+]
line wrap: on
line diff
--- a/Makefile	Fri Mar 01 08:17:57 2019 -0800
+++ b/Makefile	Fri Mar 01 14:17:29 2019 -0800
@@ -121,7 +121,8 @@
 endif
 
 ifdef PROFILE
-LDFLAGS+= -Wl,--no-as-needed -lprofiler -Wl,--as-needed
+PROFFLAGS:= -Wl,--no-as-needed -lprofiler -Wl,--as-needed
+CFLAGS+= -g3
 endif
 ifdef NOGL
 CFLAGS+= -DDISABLE_OPENGL
@@ -164,8 +165,13 @@
 endif
 endif
 
+ifdef NEW_CORE
+Z80OBJS=z80.o z80inst.o 
+CFLAGS+= -DNEW_CORE
+else
 #Z80OBJS=z80inst.o z80_to_x86.o
 Z80OBJS=z80inst.o mame_z80/z80.o
+endif
 AUDIOOBJS=ym2612.o psg.o wave.o
 CONFIGOBJS=config.o tern.o util.o paths.o 
 NUKLEAROBJS=$(FONT) nuklear_ui/blastem_nuklear.o nuklear_ui/sfnt.o controller_info.o
@@ -238,7 +244,7 @@
 	$(CC) -shared -o $@ $^ $(LDFLAGS)
 
 blastem$(EXE) : $(MAINOBJS)
-	$(CC) -o $@ $^ $(LDFLAGS)
+	$(CC) -o $@ $^ $(LDFLAGS) $(PROFFLAGS)
 	$(FIXUP) ./$@
 	
 blastjag$(EXE) : jaguar.o jag_video.o $(RENDEROBJS) serialize.o $(M68KOBJS) $(TRANSOBJS) $(CONFIGOBJS)
@@ -263,7 +269,7 @@
 	$(CC) -o transz80 transz80.o $(Z80OBJS) $(TRANSOBJS)
 
 ztestrun : ztestrun.o serialize.o $(Z80OBJS) $(TRANSOBJS)
-	$(CC) -o ztestrun ztestrun.o $(Z80OBJS) $(TRANSOBJS) $(OPT)
+	$(CC) -o ztestrun $^ $(OPT)
 
 ztestgen : ztestgen.o z80inst.o
 	$(CC) -ggdb -o ztestgen ztestgen.o z80inst.o
@@ -277,7 +283,7 @@
 	$(FIXUP) ./$@
 
 blastcpm : blastcpm.o util.o serialize.o $(Z80OBJS) $(TRANSOBJS)
-	$(CC) -o $@ $^ $(OPT)
+	$(CC) -o $@ $^ $(OPT) $(PROFFLAGS)
 
 test : test.o vdp.o
 	$(CC) -o test test.o vdp.o
@@ -302,6 +308,9 @@
 
 vos_prog_info : vos_prog_info.o vos_program_module.o
 	$(CC) -o vos_prog_info vos_prog_info.o vos_program_module.o
+	
+%.c : %.cpu cpu_dsl.py
+	./cpu_dsl.py -d goto $< > $@
 
 %.o : %.S
 	$(CC) -c -o $@ $<
--- a/backend.c	Fri Mar 01 08:17:57 2019 -0800
+++ b/backend.c	Fri Mar 01 14:17:29 2019 -0800
@@ -95,6 +95,31 @@
 	return NULL;
 }
 
+void * get_native_write_pointer(uint32_t address, void ** mem_pointers, cpu_options * opts)
+{
+	memmap_chunk const * memmap = opts->memmap;
+	address &= opts->address_mask;
+	for (uint32_t chunk = 0; chunk < opts->memmap_chunks; chunk++)
+	{
+		if (address >= memmap[chunk].start && address < memmap[chunk].end) {
+			if (!(memmap[chunk].flags & (MMAP_WRITE))) {
+				return NULL;
+			}
+			uint8_t * base = memmap[chunk].flags & MMAP_PTR_IDX
+				? mem_pointers[memmap[chunk].ptr_index]
+				: memmap[chunk].buffer;
+			if (!base) {
+				if (memmap[chunk].flags & MMAP_AUX_BUFF) {
+					return memmap[chunk].buffer + (address & memmap[chunk].aux_mask);
+				}
+				return NULL;
+			}
+			return base + (address & memmap[chunk].mask);
+		}
+	}
+	return NULL;
+}
+
 uint16_t read_word(uint32_t address, void **mem_pointers, cpu_options *opts, void *context)
 {
 	memmap_chunk const *chunk = find_map_chunk(address, opts, 0, NULL);
@@ -169,7 +194,7 @@
 	if (!chunk) {
 		return 0xFF;
 	}
-	uint32_t offset = (address - chunk->start) & chunk->mask;
+	uint32_t offset = address & chunk->mask;
 	if (chunk->flags & MMAP_READ) {
 		uint8_t *base;
 		if (chunk->flags & MMAP_PTR_IDX) {
@@ -178,13 +203,17 @@
 			base = chunk->buffer;
 		}
 		if (base) {
-			uint16_t val;
 			if ((chunk->flags & MMAP_ONLY_ODD) || (chunk->flags & MMAP_ONLY_EVEN)) {
+				if (address & 1) {
+					if (chunk->flags & MMAP_ONLY_EVEN) {
+						return 0xFF;
+					}
+				} else if (chunk->flags & MMAP_ONLY_ODD) {
+					return 0xFF;
+				}
 				offset /= 2;
-			} else if (opts->byte_swap || (chunk->flags & MMAP_BYTESWAP)) {
-				offset ^= 1;
 			}
-			return base[offset];;
+			return base[offset];
 		}
 	}
 	if ((!(chunk->flags & MMAP_READ) || (chunk->flags & MMAP_FUNC_NULL)) && chunk->read_8) {
@@ -199,7 +228,7 @@
 	if (!chunk) {
 		return;
 	}
-	uint32_t offset = (address - chunk->start) & chunk->mask;
+	uint32_t offset = address & chunk->mask;
 	if (chunk->flags & MMAP_WRITE) {
 		uint8_t *base;
 		if (chunk->flags & MMAP_PTR_IDX) {
@@ -209,23 +238,16 @@
 		}
 		if (base) {
 			if ((chunk->flags & MMAP_ONLY_ODD) || (chunk->flags & MMAP_ONLY_EVEN)) {
-				
-				if (chunk->flags & MMAP_ONLY_EVEN) {
-					if (offset & 1) {
-						return;
-					}
-				} else {
-					if (!(offset & 1)) {
+				if (address & 1) {
+					if (chunk->flags & MMAP_ONLY_EVEN) {
 						return;
 					}
+				} else if (chunk->flags & MMAP_ONLY_ODD) {
+					return;
 				}
 				offset /= 2;
-				
-			} else if (opts->byte_swap || (chunk->flags & MMAP_BYTESWAP)) {
-				offset ^= 1;
 			}
 			base[offset] = value;
-			return;
 		}
 	}
 	if ((!(chunk->flags & MMAP_WRITE) || (chunk->flags & MMAP_FUNC_NULL)) && chunk->write_8) {
--- a/backend.h	Fri Mar 01 08:17:57 2019 -0800
+++ b/backend.h	Fri Mar 01 14:17:29 2019 -0800
@@ -114,6 +114,7 @@
 code_ptr gen_mem_fun(cpu_options * opts, memmap_chunk const * memmap, uint32_t num_chunks, ftype fun_type, code_ptr *after_inc);
 #endif
 void * get_native_pointer(uint32_t address, void ** mem_pointers, cpu_options * opts);
+void * get_native_write_pointer(uint32_t address, void ** mem_pointers, cpu_options * opts);
 uint16_t read_word(uint32_t address, void **mem_pointers, cpu_options *opts, void *context);
 void write_word(uint32_t address, uint16_t value, void **mem_pointers, cpu_options *opts, void *context);
 uint8_t read_byte(uint32_t address, void **mem_pointers, cpu_options *opts, void *context);
--- a/blastcpm.c	Fri Mar 01 08:17:57 2019 -0800
+++ b/blastcpm.c	Fri Mar 01 14:17:29 2019 -0800
@@ -2,13 +2,18 @@
 #include <stdlib.h>
 #include <stddef.h>
 #include <string.h>
+#include <time.h>
 #include <sys/select.h>
 
+#ifdef NEW_CORE
+#include "z80.h"
+#else
 #ifdef USE_NATIVE
 #include "z80_to_x86.h"
 #else
 #include "mame_z80/z80.h"
 #endif
+#endif
 #include "util.h"
 
 uint8_t ram[64 * 1024];
@@ -18,10 +23,12 @@
 #define OS_RESET 0xE403
 int headless = 1;
 
+#ifndef NEW_CORE
 void z80_next_int_pulse(z80_context * context)
 {
 	context->int_pulse_start = context->int_pulse_end = CYCLE_NEVER;
 }
+#endif
 
 void render_errorbox(char *title, char *message)
 {
@@ -59,8 +66,18 @@
 	return select(fileno(stdin)+1, &read_fds, NULL, NULL, &timeout) > 0; 
 }
 
+time_t start;
+uint64_t total_cycles;
 void *exit_write(uint32_t address, void *context, uint8_t value)
 {
+	time_t duration = time(NULL) - start;
+	z80_context *z80 = context;
+#ifdef NEW_CORE
+	total_cycles += z80->cycles;
+#else
+	total_cycles += context->current_cycle;
+#endif
+	printf("Effective clock speed: %f MHz\n", ((double)total_cycles) / (1000000.0 * duration));
 	exit(0);
 	return context;
 }
@@ -69,7 +86,7 @@
 	{ 0x0000, 0x10000,  0xFFFF, 0, 0, MMAP_READ | MMAP_WRITE | MMAP_CODE, ram, NULL, NULL, NULL, NULL},
 };
 
-const memmap_chunk io_map[] = {
+memmap_chunk io_map[] = {
 	{ 0x0, 0x1, 0xFFFF, 0, 0, 0, NULL, NULL, NULL, console_read, console_write},
 	{ 0x1, 0x2, 0xFFFF, 0, 0, 0, NULL, NULL, NULL, console_status_read, console_flush_write},
 	{ 0x2, 0x3, 0xFFFF, 0, 0, 0, NULL, NULL, NULL, NULL, exit_write},
@@ -107,10 +124,19 @@
 	z80_context *context;
 	init_z80_opts(&opts, z80_map, 1, io_map, 3, 1, 0xFF);
 	context = init_z80_context(&opts);
+	start = time(NULL);
 	for(;;)
 	{
+#ifdef NEW_CORE
+		z80_execute(context, 1000000);
+		total_cycles += context->cycles;
+		context->cycles = 0;
+#else
 		z80_run(context, 1000000);
+		total_cycles += context->current_cycle;
 		context->current_cycle = 0;
+#endif
+		
 	}
 	return 0;
 }
\ No newline at end of file
--- a/blastem.c	Fri Mar 01 08:17:57 2019 -0800
+++ b/blastem.c	Fri Mar 01 14:17:29 2019 -0800
@@ -11,11 +11,15 @@
 #include "system.h"
 #include "68kinst.h"
 #include "m68k_core.h"
+#ifdef NEW_CORE
+#include "z80.h"
+#else
 #ifdef USE_NATIVE
 #include "z80_to_x86.h"
 #else
 #include "mame_z80/z80.h"
 #endif
+#endif
 #include "mem.h"
 #include "vdp.h"
 #include "render.h"
--- a/cpu_dsl.py	Fri Mar 01 08:17:57 2019 -0800
+++ b/cpu_dsl.py	Fri Mar 01 14:17:29 2019 -0800
@@ -20,6 +20,14 @@
 			self.addOp(NormalOp(parts))
 		return self
 		
+	def processOps(self, prog, fieldVals, output, otype, oplist):
+		for i in range(0, len(oplist)):
+			if i + 1 < len(oplist) and oplist[i+1].op == 'update_flags':
+				flagUpdates, _ = prog.flags.parseFlagUpdate(oplist[i+1].params[0])
+			else:
+				flagUpdates = None
+			oplist[i].generate(prog, self, fieldVals, output, otype, flagUpdates)
+		
 	def resolveLocal(self, name):
 		return None
 			
@@ -47,7 +55,7 @@
 	def addOp(self, op):
 		if op.op == 'local':
 			name = op.params[0]
-			size = op.params[1]
+			size = int(op.params[1])
 			self.locals[name] = size
 		elif op.op == 'invalid':
 			name = op.params[0]
@@ -121,9 +129,14 @@
 			output.append('\n\tuint{sz}_t {name};'.format(sz=self.locals[var], name=var))
 		self.newLocals = []
 		fieldVals,_ = self.getFieldVals(value)
-		for op in self.implementation:
-			op.generate(prog, self, fieldVals, output, otype)
-		begin = '\nvoid ' + self.generateName(value) + '(' + prog.context_type + ' *context)\n{'
+		self.processOps(prog, fieldVals, output, otype, self.implementation)
+		
+		if prog.dispatch == 'call':
+			begin = '\nvoid ' + self.generateName(value) + '(' + prog.context_type + ' *context)\n{'
+		elif prog.dispatch == 'goto':
+			begin = '\n' + self.generateName(value) + ': {'
+		else:
+			raise Exception('Unsupported dispatch type ' + prog.dispatch)
 		if prog.needFlagCoalesce:
 			begin += prog.flags.coalesceFlags(prog, otype)
 		if prog.needFlagDisperse:
@@ -131,6 +144,8 @@
 		for var in self.newLocals:
 			begin += '\n\tuint{sz}_t {name};'.format(sz=self.locals[var], name=var)
 		prog.popScope()
+		if prog.dispatch == 'goto':
+			output += prog.nextInstruction(otype)
 		return begin + ''.join(output) + '\n}'
 		
 	def __str__(self):
@@ -150,16 +165,17 @@
 		self.arg_map = {}
 		self.locals = {}
 		self.regValues = {}
+		self.argValues = {}
 	
 	def addOp(self, op):
 		if op.op == 'arg':
 			name = op.params[0]
-			size = op.params[1]
+			size = int(op.params[1])
 			self.arg_map[name] = len(self.args)
 			self.args.append((name, size))
 		elif op.op == 'local':
 			name = op.params[0]
-			size = op.params[1]
+			size = int(op.params[1])
 			self.locals[name] = size
 		else:
 			self.implementation.append(op)
@@ -173,7 +189,12 @@
 		self.locals[name] = size
 	
 	def localSize(self, name):
-		return self.locals.get(name)
+		if name in self.locals:
+			return self.locals[name]
+		if name in self.arg_map:
+			argIndex = self.arg_map[name]
+			return self.args[argIndex][1]
+		return None
 			
 	def inline(self, prog, params, output, otype, parent):
 		if len(params) != len(self.args):
@@ -189,8 +210,8 @@
 		for name in self.locals:
 			size = self.locals[name]
 			output.append('\n\tuint{size}_t {sub}_{local};'.format(size=size, sub=self.name, local=name))
-		for op in self.implementation:
-			op.generate(prog, self, argValues, output, otype)
+		self.argValues = argValues
+		self.processOps(prog, argValues, output, otype, self.implementation)
 		prog.popScope()
 		
 	def __str__(self):
@@ -209,23 +230,66 @@
 		self.impls = {}
 		self.outOp = ()
 	def cBinaryOperator(self, op):
-		def _impl(prog, params):
+		def _impl(prog, params, rawParams, flagUpdates):
 			if op == '-':
 				a = params[1]
 				b = params[0]
 			else:
 				a = params[0]
 				b = params[1]
-			return '\n\t{dst} = {a} {op} {b};'.format(
-				dst = params[2], a = a, b = b, op = op
+			needsCarry = needsOflow = needsHalf = False
+			if flagUpdates:
+				for flag in flagUpdates:
+					calc = prog.flags.flagCalc[flag]
+					if calc == 'carry':
+						needsCarry = True
+					elif calc == 'half-carry':
+						needsHalf = True
+					elif calc == 'overflow':
+						needsOflow = True
+			decl = ''
+			if needsCarry or needsOflow or needsHalf:
+				size = prog.paramSize(rawParams[2])
+				if needsCarry and op != 'lsr':
+					size *= 2
+				decl,name = prog.getTemp(size)
+				dst = prog.carryFlowDst = name
+				prog.lastA = a
+				prog.lastB = b
+				prog.lastBFlow = b if op == '-' else '(~{b})'.format(b=b)
+			else:
+				dst = params[2]
+			return decl + '\n\t{dst} = {a} {op} {b};'.format(
+				dst = dst, a = a, b = b, op = op
 			)
 		self.impls['c'] = _impl
 		self.outOp = (2,)
 		return self
 	def cUnaryOperator(self, op):
-		def _impl(prog, params):
-			return '\n\t{dst} = {op}{a};'.format(
-				dst = params[1], a = params[0], op = op
+		def _impl(prog, params, rawParams, flagUpdates):
+			dst = params[1]
+			decl = ''
+			if op == '-':
+				if flagUpdates:
+					for flag in flagUpdates:
+						calc = prog.flags.flagCalc[flag]
+						if calc == 'carry':
+							needsCarry = True
+						elif calc == 'half-carry':
+							needsHalf = True
+						elif calc == 'overflow':
+							needsOflow = True
+				if needsCarry or needsOflow or needsHalf:
+					size = prog.paramSize(rawParams[1])
+					if needsCarry:
+						size *= 2
+					decl,name = prog.getTemp(size)
+					dst = prog.carryFlowDst = name
+					prog.lastA = 0
+					prog.lastB = params[0]
+					prog.lastBFlow = params[0]
+			return decl + '\n\t{dst} = {op}{a};'.format(
+				dst = dst, a = params[0], op = op
 			)
 		self.impls['c'] = _impl
 		self.outOp = (1,)
@@ -244,11 +308,21 @@
 		return not self.evalFun is None
 	def numArgs(self):
 		return self.evalFun.__code__.co_argcount
-	def generate(self, otype, prog, params, rawParams):
+	def numParams(self):
+		if self.outOp:
+			params = max(self.outOp) + 1
+		else:
+			params = 0
+		if self.evalFun:
+			params = max(params, self.numArgs())
+		return params
+	def generate(self, otype, prog, params, rawParams, flagUpdates):
 		if self.impls[otype].__code__.co_argcount == 2:
 			return self.impls[otype](prog, params)
+		elif self.impls[otype].__code__.co_argcount == 3:
+			return self.impls[otype](prog, params, rawParams)
 		else:
-			return self.impls[otype](prog, params, rawParams)
+			return self.impls[otype](prog, params, rawParams, flagUpdates)
 		
 		
 def _xchgCImpl(prog, params, rawParams):
@@ -261,50 +335,54 @@
 		table = 'main'
 	else:
 		table = params[1]
-	return '\n\timpl_{tbl}[{op}](context);'.format(tbl = table, op = params[0])
+	if prog.dispatch == 'call':
+		return '\n\timpl_{tbl}[{op}](context);'.format(tbl = table, op = params[0])
+	elif prog.dispatch == 'goto':
+		return '\n\tgoto *impl_{tbl}[{op}];'.format(tbl = table, op = params[0])
+	else:
+		raise Exception('Unsupported dispatch type ' + prog.dispatch)
 
 def _updateFlagsCImpl(prog, params, rawParams):
-	i = 0
-	last = ''
-	autoUpdate = set()
-	explicit = {}
-	for c in params[0]:
-		if c.isdigit():
-			if last.isalpha():
-				num = int(c)
-				if num > 1:
-					raise Exception(c + ' is not a valid digit for update_flags')
-				explicit[last] = num
-				last = c
-			else:
-				raise Exception('Digit must follow flag letter in update_flags')
-		else:
-			if last.isalpha():
-				autoUpdate.add(last)
-			last = c
-	if last.isalpha():
-		autoUpdate.add(last)
+	autoUpdate, explicit = prog.flags.parseFlagUpdate(params[0])
 	output = []
-	#TODO: handle autoUpdate flags
+	parity = None
+	directFlags = {}
 	for flag in autoUpdate:
 		calc = prog.flags.flagCalc[flag]
 		calc,_,resultBit = calc.partition('-')
-		lastDst = prog.resolveParam(prog.lastDst, None, {})
+		if prog.carryFlowDst:
+			lastDst = prog.carryFlowDst
+		else:
+			lastDst = prog.resolveParam(prog.lastDst, prog.currentScope, {})
 		storage = prog.flags.getStorage(flag)
-		if calc == 'bit' or calc == 'sign':
+		if calc == 'bit' or calc == 'sign' or calc == 'carry' or calc == 'half' or calc == 'overflow':
+			myRes = lastDst
 			if calc == 'sign':
 				resultBit = prog.paramSize(prog.lastDst) - 1
+			elif calc == 'carry':
+				if prog.lastOp.op in ('asr', 'lsr'):
+					resultBit = 0
+					myRes = prog.lastA
+				else:
+					resultBit = prog.paramSize(prog.lastDst)
+					if prog.lastOp.op == 'ror':
+						resultBit -= 1
+			elif calc == 'half':
+				resultBit = prog.paramSize(prog.lastDst) - 4
+				myRes = '({a} ^ {b} ^ {res})'.format(a = prog.lastA, b = prog.lastB, res = lastDst)
+			elif calc == 'overflow':
+				resultBit = prog.paramSize(prog.lastDst) - 1
+				myRes = '((({a} ^ {b})) & ({a} ^ {res}))'.format(a = prog.lastA, b = prog.lastBFlow, res = lastDst)
 			else:
-				resultBit = int(resultBit)
+				#Note: offsetting this by the operation size - 8 makes sense for the Z80
+				#but might not for other CPUs with this kind of fixed bit flag behavior
+				resultBit = int(resultBit) + prog.paramSize(prog.lastDst) - 8
 			if type(storage) is tuple:
 				reg,storageBit = storage
-				reg = prog.resolveParam(reg, None, {})
 				if storageBit == resultBit:
-					#TODO: optimize this case
-					output.append('\n\t{reg} = ({reg} & ~{mask}U) | ({res} & {mask}U);'.format(
-						reg = reg, mask = 1 << resultBit, res = lastDst
-					))
+					directFlags.setdefault((reg, myRes), []).append(resultBit)
 				else:
+					reg = prog.resolveParam(reg, None, {})
 					if resultBit > storageBit:
 						op = '>>'
 						shift = resultBit - storageBit
@@ -312,34 +390,77 @@
 						op = '<<'
 						shift = storageBit - resultBit
 					output.append('\n\t{reg} = ({reg} & ~{mask}U) | ({res} {op} {shift}U & {mask}U);'.format(
-						reg = reg, mask = 1 << storageBit, res = lastDst, op = op, shift = shift
+						reg = reg, mask = 1 << storageBit, res = myRes, op = op, shift = shift
 					))
 			else:
 				reg = prog.resolveParam(storage, None, {})
-				output.append('\n\t{reg} = {res} & {mask}U;'.format(reg=reg, res=lastDst, mask = 1 << resultBit))
+				maxBit = prog.paramSize(storage) - 1
+				if resultBit > maxBit:
+					output.append('\n\t{reg} = {res} >> {shift} & {mask}U;'.format(reg=reg, res=myRes, shift = resultBit - maxBit, mask = 1 << maxBit))
+				else:
+					output.append('\n\t{reg} = {res} & {mask}U;'.format(reg=reg, res=myRes, mask = 1 << resultBit))
 		elif calc == 'zero':
+			if prog.carryFlowDst:
+				realSize = prog.paramSize(prog.lastDst)
+				if realSize != prog.paramSize(prog.carryFlowDst):
+					lastDst = '({res} & {mask})'.format(res=lastDst, mask = (1 << realSize) - 1)
 			if type(storage) is tuple:
 				reg,storageBit = storage
 				reg = prog.resolveParam(reg, None, {})
 				output.append('\n\t{reg} = {res} ? ({reg} & {mask}U) : ({reg} | {bit}U);'.format(
 					reg = reg, mask = ~(1 << storageBit), res = lastDst, bit = 1 << storageBit
 				))
-			elif prog.paramSize(prog.lastDst) > prog.paramSize(storage):
-				reg = prog.resolveParam(storage, None, {})
-				output.append('\n\t{reg} = {res} != 0;'.format(
-					reg = reg, res = lastDst
-				))
 			else:
 				reg = prog.resolveParam(storage, None, {})
-				output.append('\n\t{reg} = {res};'.format(reg = reg, res = lastDst))
-		elif calc == 'half-carry':
-			pass
-		elif calc == 'carry':
-			pass
-		elif calc == 'overflow':
-			pass
+				output.append('\n\t{reg} = {res} == 0;'.format(
+					reg = reg, res = lastDst
+				))
 		elif calc == 'parity':
-			pass
+			parity = storage
+			paritySize = prog.paramSize(prog.lastDst)
+			if prog.carryFlowDst:
+				parityDst = paritySrc = prog.carryFlowDst
+			else:
+				paritySrc = lastDst
+				decl,name = prog.getTemp(paritySize)
+				output.append(decl)
+				parityDst = name
+		else:
+			raise Exception('Unknown flag calc type: ' + calc)
+	for reg, myRes in directFlags:
+		bits = directFlags[(reg, myRes)]
+		resolved = prog.resolveParam(reg, None, {})
+		if len(bits) == len(prog.flags.storageToFlags[reg]):
+			output.append('\n\t{reg} = {res};'.format(reg = resolved, res = myRes))
+		else:
+			mask = 0
+			for bit in bits:
+				mask |= 1 << bit
+			output.append('\n\t{reg} = ({reg} & ~{mask}U) | ({res} & {mask}U);'.format(
+				reg = resolved, mask = mask, res = myRes
+			))
+	if prog.carryFlowDst:
+		if prog.lastOp.op != 'cmp':
+			output.append('\n\t{dst} = {tmpdst};'.format(dst = prog.resolveParam(prog.lastDst, prog.currentScope, {}), tmpdst = prog.carryFlowDst))
+		prog.carryFlowDst = None
+	if parity:
+		if paritySize > 8:
+			if paritySize > 16:
+				output.append('\n\t{dst} = {src} ^ ({src} >> 16);'.format(dst=parityDst, src=paritySrc))
+				paritySrc = parityDst
+			output.append('\n\t{dst} = {src} ^ ({src} >> 8);'.format(dst=parityDst, src=paritySrc))
+			paritySrc = parityDst
+		output.append('\n\t{dst} = ({src} ^ ({src} >> 4)) & 0xF;'.format(dst=parityDst, src=paritySrc))
+		if type(parity) is tuple:
+			reg,bit = parity
+			reg = prog.resolveParam(reg, None, {})
+			output.append('\n\t{flag} = ({flag} & ~{mask}U) | ((0x6996 >> {parity}) << {bit} & {mask}U);'.format(
+				flag=reg, mask = 1 << bit, bit = bit, parity = parityDst
+			))
+		else:
+			reg = prog.resolveParam(parity, None, {})
+			output.append('\n\t{flag} = 0x9669 >> {parity} & 1;'.format(flag=reg, parity=parityDst))
+			
 	#TODO: combine explicit flags targeting the same storage location
 	for flag in explicit:
 		location = prog.flags.getStorage(flag)
@@ -358,31 +479,213 @@
 			output.append('\n\t{reg} = {val};'.format(reg=reg, val=explicit[flag]))
 	return ''.join(output)
 	
-def _cmpCImpl(prog, params):
-	size = prog.paramSize(params[1])
+def _cmpCImpl(prog, params, rawParams, flagUpdates):
+	size = prog.paramSize(rawParams[1])
+	needsCarry = False
+	if flagUpdates:
+		for flag in flagUpdates:
+			calc = prog.flags.flagCalc[flag]
+			if calc == 'carry':
+				needsCarry = True
+				break
+	if needsCarry:
+		size *= 2
 	tmpvar = 'cmp_tmp{sz}__'.format(sz=size)
-	typename = ''
+	if flagUpdates:
+		prog.carryFlowDst = tmpvar
+		prog.lastA = params[1]
+		prog.lastB = params[0]
+		prog.lastBFlow = params[0]
 	scope = prog.getRootScope()
 	if not scope.resolveLocal(tmpvar):
 		scope.addLocal(tmpvar, size)
-	prog.lastDst = tmpvar
+	prog.lastDst = rawParams[1]
 	return '\n\t{var} = {b} - {a};'.format(var = tmpvar, a = params[0], b = params[1])
 
-def _asrCImpl(prog, params, rawParams):
-	shiftSize = prog.paramSize(rawParams[0])
-	mask = 1 << (shiftSize - 1)
-	return '\n\t{dst} = ({a} >> {b}) | ({a} & {mask});'.format(a = params[0], b = params[1], dst = params[2], mask = mask)
-		
+def _asrCImpl(prog, params, rawParams, flagUpdates):
+	needsCarry = False
+	if flagUpdates:
+		for flag in flagUpdates:
+			calc = prog.flags.flagCalc[flag]
+			if calc == 'carry':
+				needsCarry = True
+	decl = ''
+	size = prog.paramSize(rawParams[2])
+	if needsCarry:
+		decl,name = prog.getTemp(size * 2)
+		dst = prog.carryFlowDst = name
+		prog.lastA = params[0]
+	else:
+		dst = params[2]
+	mask = 1 << (size - 1)
+	return decl + '\n\t{dst} = ({a} >> {b}) | ({a} & {mask} ? 0xFFFFFFFFU << ({size} - {b}) : 0);'.format(
+		a = params[0], b = params[1], dst = dst, mask = mask, size=size)
+	
+def _sext(size, src):
+	if size == 16:
+		return src | 0xFF00 if src & 0x80 else src
+	else:
+		return src | 0xFFFF0000 if src & 0x8000 else src
+
+def _sextCImpl(prog, params, rawParms):
+	if params[0] == 16:
+		fmt = '\n\t{dst} = {src} & 0x80 ? {src} | 0xFF00 : {src};'
+	else:
+		fmt = '\n\t{dst} = {src} & 0x8000 ? {src} | 0xFFFF0000 : {src};'
+	return fmt.format(src=params[1], dst=params[2])
+	
+def _getCarryCheck(prog):
+	carryFlag = None
+	for flag in prog.flags.flagCalc:
+		if prog.flags.flagCalc[flag] == 'carry':
+			carryFlag = flag
+	if carryFlag is None:
+		raise Exception('adc requires a defined carry flag')
+	carryStorage = prog.flags.getStorage(carryFlag)
+	if type(carryStorage) is tuple:
+		reg,bit = carryStorage
+		reg = prog.resolveReg(reg, None, (), False)
+		return '({reg} & 1 << {bit})'.format(reg=reg, bit=bit)
+	else:
+		return prog.resolveReg(carryStorage, None, (), False)
+
+def _adcCImpl(prog, params, rawParams, flagUpdates):
+	needsCarry = needsOflow = needsHalf = False
+	if flagUpdates:
+		for flag in flagUpdates:
+			calc = prog.flags.flagCalc[flag]
+			if calc == 'carry':
+				needsCarry = True
+			elif calc == 'half-carry':
+				needsHalf = True
+			elif calc == 'overflow':
+				needsOflow = True
+	decl = ''
+	carryCheck = _getCarryCheck(prog)
+	if needsCarry or needsOflow or needsHalf:
+		size = prog.paramSize(rawParams[2])
+		if needsCarry:
+			size *= 2
+		decl,name = prog.getTemp(size)
+		dst = prog.carryFlowDst = name
+		prog.lastA = params[0]
+		prog.lastB = params[1]
+		prog.lastBFlow = '(~{b})'.format(b=params[1])
+	else:
+		dst = params[2]
+	return decl + '\n\t{dst} = {a} + {b} + ({check} ? 1 : 0);'.format(dst = dst,
+		a = params[0], b = params[1], check = carryCheck
+	)
+
+def _sbcCImpl(prog, params, rawParams, flagUpdates):
+	needsCarry = needsOflow = needsHalf = False
+	if flagUpdates:
+		for flag in flagUpdates:
+			calc = prog.flags.flagCalc[flag]
+			if calc == 'carry':
+				needsCarry = True
+			elif calc == 'half-carry':
+				needsHalf = True
+			elif calc == 'overflow':
+				needsOflow = True
+	decl = ''
+	carryCheck = _getCarryCheck(prog)
+	if needsCarry or needsOflow or needsHalf:
+		size = prog.paramSize(rawParams[2])
+		if needsCarry:
+			size *= 2
+		decl,name = prog.getTemp(size)
+		dst = prog.carryFlowDst = name
+		prog.lastA = params[1]
+		prog.lastB = params[0]
+		prog.lastBFlow = params[0]
+	else:
+		dst = params[2]
+	return decl + '\n\t{dst} = {b} - {a} - ({check} ? 1 : 0);'.format(dst = dst,
+		a = params[0], b = params[1], check=_getCarryCheck(prog)
+	)
+	
+def _rolCImpl(prog, params, rawParams, flagUpdates):
+	needsCarry = False
+	if flagUpdates:
+		for flag in flagUpdates:
+			calc = prog.flags.flagCalc[flag]
+			if calc == 'carry':
+				needsCarry = True
+	decl = ''
+	size = prog.paramSize(rawParams[2])
+	if needsCarry:
+		decl,name = prog.getTemp(size * 2)
+		dst = prog.carryFlowDst = name
+	else:
+		dst = params[2]
+	return decl + '\n\t{dst} = {a} << {b} | {a} >> ({size} - {b});'.format(dst = dst,
+		a = params[0], b = params[1], size=size
+	)
+	
+def _rlcCImpl(prog, params, rawParams, flagUpdates):
+	needsCarry = False
+	if flagUpdates:
+		for flag in flagUpdates:
+			calc = prog.flags.flagCalc[flag]
+			if calc == 'carry':
+				needsCarry = True
+	decl = ''
+	carryCheck = _getCarryCheck(prog)
+	size = prog.paramSize(rawParams[2])
+	if needsCarry:
+		decl,name = prog.getTemp(size * 2)
+		dst = prog.carryFlowDst = name
+	else:
+		dst = params[2]
+	return decl + '\n\t{dst} = {a} << {b} | {a} >> ({size} + 1 - {b}) | ({check} ? 1 : 0) << ({b} - 1);'.format(dst = dst,
+		a = params[0], b = params[1], size=size, check=carryCheck
+	)
+	
+def _rorCImpl(prog, params, rawParams, flagUpdates):
+	size = prog.paramSize(rawParams[2])
+	return '\n\t{dst} = {a} >> {b} | {a} << ({size} - {b});'.format(dst = params[2],
+		a = params[0], b = params[1], size=size
+	)
+
+def _rrcCImpl(prog, params, rawParams, flagUpdates):
+	needsCarry = False
+	if flagUpdates:
+		for flag in flagUpdates:
+			calc = prog.flags.flagCalc[flag]
+			if calc == 'carry':
+				needsCarry = True
+	decl = ''
+	carryCheck = _getCarryCheck(prog)
+	size = prog.paramSize(rawParams[2])
+	if needsCarry:
+		decl,name = prog.getTemp(size * 2)
+		dst = prog.carryFlowDst = name
+	else:
+		dst = params[2]
+	return decl + '\n\t{dst} = {a} >> {b} | {a} << ({size} + 1 - {b}) | ({check} ? 1 : 0) << ({size}-{b});'.format(dst = dst,
+		a = params[0], b = params[1], size=size, check=carryCheck
+	)
+	
+def _updateSyncCImpl(prog, params):
+	return '\n\t{sync}(context, target_cycle);'.format(sync=prog.sync_cycle)
+
 _opMap = {
 	'mov': Op(lambda val: val).cUnaryOperator(''),
 	'not': Op(lambda val: ~val).cUnaryOperator('~'),
 	'lnot': Op(lambda val: 0 if val else 1).cUnaryOperator('!'),
 	'neg': Op(lambda val: -val).cUnaryOperator('-'),
 	'add': Op(lambda a, b: a + b).cBinaryOperator('+'),
+	'adc': Op().addImplementation('c', 2, _adcCImpl),
 	'sub': Op(lambda a, b: b - a).cBinaryOperator('-'),
+	'sbc': Op().addImplementation('c', 2, _sbcCImpl),
 	'lsl': Op(lambda a, b: a << b).cBinaryOperator('<<'),
 	'lsr': Op(lambda a, b: a >> b).cBinaryOperator('>>'),
 	'asr': Op(lambda a, b: a >> b).addImplementation('c', 2, _asrCImpl),
+	'rol': Op().addImplementation('c', 2, _rolCImpl),
+	'rlc': Op().addImplementation('c', 2, _rlcCImpl),
+	'ror': Op().addImplementation('c', 2, _rorCImpl),
+	'rrc': Op().addImplementation('c', 2, _rrcCImpl),
 	'and': Op(lambda a, b: a & b).cBinaryOperator('&'),
 	'or':  Op(lambda a, b: a | b).cBinaryOperator('|'),
 	'xor': Op(lambda a, b: a ^ b).cBinaryOperator('^'),
@@ -390,6 +693,7 @@
 		'c', 1, lambda prog, params: '\n\t{dst} = abs({src});'.format(dst=params[1], src=params[0])
 	),
 	'cmp': Op().addImplementation('c', None, _cmpCImpl),
+	'sext': Op(_sext).addImplementation('c', 2, _sextCImpl),
 	'ocall': Op().addImplementation('c', None, lambda prog, params: '\n\t{pre}{fun}({args});'.format(
 		pre = prog.prefix, fun = params[0], args = ', '.join(['context'] + [str(p) for p in params[1:]])
 	)),
@@ -410,7 +714,8 @@
 	)),
 	'xchg': Op().addImplementation('c', (0,1), _xchgCImpl),
 	'dispatch': Op().addImplementation('c', None, _dispatchCImpl),
-	'update_flags': Op().addImplementation('c', None, _updateFlagsCImpl)
+	'update_flags': Op().addImplementation('c', None, _updateFlagsCImpl),
+	'update_sync': Op().addImplementation('c', None, _updateSyncCImpl)
 }
 
 #represents a simple DSL instruction
@@ -419,13 +724,17 @@
 		self.op = parts[0]
 		self.params = parts[1:]
 		
-	def generate(self, prog, parent, fieldVals, output, otype):
+	def generate(self, prog, parent, fieldVals, output, otype, flagUpdates):
 		procParams = []
-		allParamsConst = True
+		allParamsConst = flagUpdates is None and not prog.conditional
 		opDef = _opMap.get(self.op)
 		for param in self.params:
 			allowConst = (self.op in prog.subroutines or len(procParams) != len(self.params) - 1) and param in parent.regValues
 			isDst = (not opDef is None) and len(procParams) in opDef.outOp
+			if isDst and self.op == 'xchg':
+				#xchg uses its regs as both source and destination
+				#we need to resolve as both so that disperse/coalesce flag stuff gets done
+				prog.resolveParam(param, parent, fieldVals, allowConst, False)
 			param = prog.resolveParam(param, parent, fieldVals, allowConst, isDst)
 			
 			if (not type(param) is int) and len(procParams) != len(self.params) - 1:
@@ -448,6 +757,8 @@
 			#TODO: Disassembler
 			pass
 		elif not opDef is None:
+			if opDef.numParams() > len(procParams):
+				raise Exception('Insufficient params for ' + self.op + ' (' + ', '.join(self.params) + ')')
 			if opDef.canEval() and allParamsConst:
 				#do constant folding
 				if opDef.numArgs() >= len(procParams):
@@ -461,10 +772,29 @@
 					dst = maybeLocal
 				parent.regValues[dst] = result
 				if prog.isReg(dst):
-					output.append(_opMap['mov'].generate(otype, prog, procParams, self.params))
+					shortProc = (procParams[0], procParams[-1])
+					shortParams = (self.params[0], self.params[-1])
+					output.append(_opMap['mov'].generate(otype, prog, shortProc, shortParams, None))
 			else:
-				output.append(opDef.generate(otype, prog, procParams, self.params))
+				output.append(opDef.generate(otype, prog, procParams, self.params, flagUpdates))
+				for dstIdx in opDef.outOp:
+					dst = self.params[dstIdx]
+					while dst in prog.meta:
+						dst = prog.meta[dst]
+					if dst in parent.regValues:
+						del parent.regValues[dst]
+					
 		elif self.op in prog.subroutines:
+			procParams = []
+			for param in self.params:
+				begin,sep,end = param.partition('.')
+				if sep:
+					if end in fieldVals:
+						param = begin + '.' + str(fieldVals[end])
+				else:
+					if param in fieldVals:
+						param = fieldVals[param]
+				procParams.append(param)
 			prog.subroutines[self.op].inline(prog, procParams, output, otype, parent)
 		else:
 			output.append('\n\t' + self.op + '(' + ', '.join([str(p) for p in procParams]) + ');')
@@ -517,7 +847,7 @@
 			return self.current_locals[name]
 		return self.parent.localSize(name)
 			
-	def generate(self, prog, parent, fieldVals, output, otype):
+	def generate(self, prog, parent, fieldVals, output, otype, flagUpdates):
 		prog.pushScope(self)
 		param = prog.resolveParam(self.param, parent, fieldVals)
 		if type(param) is int:
@@ -527,39 +857,42 @@
 				output.append('\n\t{')
 				for local in self.case_locals[param]:
 					output.append('\n\tuint{0}_t {1};'.format(self.case_locals[param][local], local))
-				for op in self.cases[param]:
-					op.generate(prog, self, fieldVals, output, otype)
+				self.processOps(prog, fieldVals, output, otype, self.cases[param])
 				output.append('\n\t}')
 			elif self.default:
 				self.current_locals = self.default_locals
 				output.append('\n\t{')
 				for local in self.default_locals:
 					output.append('\n\tuint{0}_t {1};'.format(self.default[local], local))
-				for op in self.default:
-					op.generate(prog, self, fieldVals, output, otype)
+				self.processOps(prog, fieldVals, output, otype, self.default)
 				output.append('\n\t}')
 		else:
+			oldCond = prog.conditional
+			prog.conditional = True
 			output.append('\n\tswitch(' + param + ')')
 			output.append('\n\t{')
 			for case in self.cases:
+				temp = prog.temp.copy()
 				self.current_locals = self.case_locals[case]
 				self.regValues = dict(self.parent.regValues)
 				output.append('\n\tcase {0}U: '.format(case) + '{')
 				for local in self.case_locals[case]:
 					output.append('\n\tuint{0}_t {1};'.format(self.case_locals[case][local], local))
-				for op in self.cases[case]:
-					op.generate(prog, self, fieldVals, output, otype)
+				self.processOps(prog, fieldVals, output, otype, self.cases[case])
 				output.append('\n\tbreak;')
 				output.append('\n\t}')
+				prog.temp = temp
 			if self.default:
+				temp = prog.temp.copy()
 				self.current_locals = self.default_locals
 				self.regValues = dict(self.parent.regValues)
 				output.append('\n\tdefault: {')
 				for local in self.default_locals:
 					output.append('\n\tuint{0}_t {1};'.format(self.default_locals[local], local))
-				for op in self.default:
-					op.generate(prog, self, fieldVals, output, otype)
+				self.processOps(prog, fieldVals, output, otype, self.default)
+				prog.temp = temp
 			output.append('\n\t}')
+			prog.conditional = oldCond
 		prog.popScope()
 	
 	def __str__(self):
@@ -579,11 +912,19 @@
 		params = [prog.resolveParam(p, parent, fieldVals) for p in prog.lastOp.params]
 		return '\n\tif ({a} >= {b}) '.format(a=params[1], b = params[0]) + '{'
 	else:
-		raise ion(">=U not implemented in the general case yet")
+		raise Exception(">=U not implemented in the general case yet")
+
+def _eqCImpl(prog, parent, fieldVals, output):
+	return '\n\tif (!{a}) {'.format(a=prog.resolveParam(prog.lastDst, None, {}))
+
+def _neqCImpl(prog, parent, fieldVals, output):
+	return '\n\tif ({a}) {'.format(a=prog.resolveParam(prog.lastDst, None, {}))
 	
 _ifCmpImpl = {
 	'c': {
-		'>=U': _geuCImpl
+		'>=U': _geuCImpl,
+		'=': _eqCImpl,
+		'!=': _neqCImpl
 	}
 }
 #represents a DSL conditional construct
@@ -606,7 +947,7 @@
 		if op.op == 'local':
 			name = op.params[0]
 			size = op.params[1]
-			self.locals[name] = size
+			self.curLocals[name] = size
 		elif op.op == 'else':
 			self.curLocals = self.elseLocals
 			self.curBody = self.elseBody
@@ -617,23 +958,25 @@
 		return self.curLocals.get(name)
 		
 	def resolveLocal(self, name):
-		if name in self.locals:
+		if name in self.curLocals:
 			return name
 		return self.parent.resolveLocal(name)
 		
 	def _genTrueBody(self, prog, fieldVals, output, otype):
 		self.curLocals = self.locals
+		subOut = []
+		self.processOps(prog, fieldVals, subOut, otype, self.body)
 		for local in self.locals:
 			output.append('\n\tuint{sz}_t {nm};'.format(sz=self.locals[local], nm=local))
-		for op in self.body:
-			op.generate(prog, self, fieldVals, output, otype)
+		output += subOut
 			
 	def _genFalseBody(self, prog, fieldVals, output, otype):
 		self.curLocals = self.elseLocals
+		subOut = []
+		self.processOps(prog, fieldVals, subOut, otype, self.elseBody)
 		for local in self.elseLocals:
 			output.append('\n\tuint{sz}_t {nm};'.format(sz=self.elseLocals[local], nm=local))
-		for op in self.elseBody:
-			op.generate(prog, self, fieldVals, output, otype)
+		output += subOut
 	
 	def _genConstParam(self, param, prog, fieldVals, output, otype):
 		if param:
@@ -641,29 +984,43 @@
 		else:
 			self._genFalseBody(prog, fieldVals, output, otype)
 			
-	def generate(self, prog, parent, fieldVals, output, otype):
+	def generate(self, prog, parent, fieldVals, output, otype, flagUpdates):
 		self.regValues = parent.regValues
 		try:
 			self._genConstParam(prog.checkBool(self.cond), prog, fieldVals, output, otype)
 		except Exception:
 			if self.cond in _ifCmpImpl[otype]:
+				oldCond = prog.conditional
+				prog.conditional = True
+				temp = prog.temp.copy()
 				output.append(_ifCmpImpl[otype][self.cond](prog, parent, fieldVals, output))
 				self._genTrueBody(prog, fieldVals, output, otype)
+				prog.temp = temp
 				if self.elseBody:
+					temp = prog.temp.copy()
 					output.append('\n\t} else {')
 					self._genFalseBody(prog, fieldVals, output, otype)
+					prog.temp = temp
 				output.append('\n\t}')
+				prog.conditional = oldCond
 			else:
 				cond = prog.resolveParam(self.cond, parent, fieldVals)
 				if type(cond) is int:
 					self._genConstParam(cond, prog, fieldVals, output, otype)
 				else:
+					temp = prog.temp.copy()
 					output.append('\n\tif ({cond}) '.format(cond=cond) + '{')
+					oldCond = prog.conditional
+					prog.conditional = True
 					self._genTrueBody(prog, fieldVals, output, otype)
+					prog.temp = temp
 					if self.elseBody:
+						temp = prog.temp.copy()
 						output.append('\n\t} else {')
 						self._genFalseBody(prog, fieldVals, output, otype)
+						prog.temp = temp
 					output.append('\n\t}')
+					prog.conditional = oldCond
 						
 	
 	def __str__(self):
@@ -679,12 +1036,14 @@
 		self.pointers = {}
 		self.regArrays = {}
 		self.regToArray = {}
+		self.addReg('cycles', 32)
+		self.addReg('sync_cycle', 32)
 	
 	def addReg(self, name, size):
 		self.regs[name] = size
 		
-	def addPointer(self, name, size):
-		self.pointers[name] = size
+	def addPointer(self, name, size, count):
+		self.pointers[name] = (size, count)
 	
 	def addRegArray(self, name, size, regs):
 		self.regArrays[name] = (size, regs)
@@ -721,12 +1080,15 @@
 	
 	def processLine(self, parts):
 		if len(parts) == 3:
-			self.addRegArray(parts[0], int(parts[1]), int(parts[2]))
+			if parts[1].startswith('ptr'):
+				self.addPointer(parts[0], parts[1][3:], int(parts[2]))
+			else:
+				self.addRegArray(parts[0], int(parts[1]), int(parts[2]))
 		elif len(parts) > 2:
 			self.addRegArray(parts[0], int(parts[1]), parts[2:])
 		else:
 			if parts[1].startswith('ptr'):
-				self.addPointer(parts[0], int(parts[1][3:]))
+				self.addPointer(parts[0], parts[1][3:], 1)
 			else:
 				self.addReg(parts[0], int(parts[1]))
 		return self
@@ -734,7 +1096,18 @@
 	def writeHeader(self, otype, hFile):
 		fieldList = []
 		for pointer in self.pointers:
-			hFile.write('\n\tuint{sz}_t *{nm};'.format(nm=pointer, sz=self.pointers[pointer]))
+			stars = '*'
+			ptype, count = self.pointers[pointer]
+			while ptype.startswith('ptr'):
+				stars += '*'
+				ptype = ptype[3:]
+			if ptype.isdigit():
+				ptype = 'uint{sz}_t'.format(sz=ptype)
+			if count > 1:
+				arr = '[{n}]'.format(n=count)
+			else:
+				arr = ''
+			hFile.write('\n\t{ptype} {stars}{nm}{arr};'.format(nm=pointer, ptype=ptype, stars=stars, arr=arr))
 		for reg in self.regs:
 			if not self.isRegArrayMember(reg):
 				fieldList.append((self.regs[reg], 1, reg))
@@ -757,6 +1130,7 @@
 		self.flagCalc = {}
 		self.flagStorage = {}
 		self.flagReg = None
+		self.storageToFlags = {}
 		self.maxBit = -1
 	
 	def processLine(self, parts):
@@ -777,6 +1151,8 @@
 				self.flagBits[flag] = bit
 			self.flagCalc[flag] = calc
 			self.flagStorage[flag] = storage
+			storage,_,storebit = storage.partition('.')
+			self.storageToFlags.setdefault(storage, []).append((storebit, flag))
 		return self
 	
 	def getStorage(self, flag):
@@ -788,6 +1164,28 @@
 		else:
 			return loc 
 	
+	def parseFlagUpdate(self, flagString):
+		last = ''
+		autoUpdate = set()
+		explicit = {}
+		for c in flagString:
+			if c.isdigit():
+				if last.isalpha():
+					num = int(c)
+					if num > 1:
+						raise Exception(c + ' is not a valid digit for update_flags')
+					explicit[last] = num
+					last = c
+				else:
+					raise Exception('Digit must follow flag letter in update_flags')
+			else:
+				if last.isalpha():
+					autoUpdate.add(last)
+				last = c
+		if last.isalpha():
+			autoUpdate.add(last)
+		return (autoUpdate, explicit)
+	
 	def disperseFlags(self, prog, otype):
 		bitToFlag = [None] * (self.maxBit+1)
 		src = prog.resolveReg(self.flagReg, None, {})
@@ -884,7 +1282,7 @@
 						src=src, dst=dst, srcbit=srcbit, dstbit=dstbit
 					))
 			if direct:
-				output.append('\n\t{dst} |= {src} & {mask}'.format(
+				output.append('\n\t{dst} |= {src} & {mask};'.format(
 					dst=dst, src=src, mask=direct
 				))
 		return ''.join(output)
@@ -902,12 +1300,20 @@
 		self.extra_tables = info.get('extra_tables', [])
 		self.context_type = self.prefix + 'context'
 		self.body = info.get('body', [None])[0]
+		self.interrupt = info.get('interrupt', [None])[0]
+		self.sync_cycle = info.get('sync_cycle', [None])[0]
 		self.includes = info.get('include', [])
 		self.flags = flags
 		self.lastDst = None
 		self.scopes = []
 		self.currentScope = None
 		self.lastOp = None
+		self.carryFlowDst = None
+		self.lastA = None
+		self.lastB = None
+		self.lastBFlow = None
+		self.conditional = False
+		self.declares = []
 		
 	def __str__(self):
 		pieces = []
@@ -930,21 +1336,21 @@
 		hFile.write('\n}} {0}options;'.format(self.prefix))
 		hFile.write('\n\ntypedef struct {')
 		hFile.write('\n\t{0}options *opts;'.format(self.prefix))
-		hFile.write('\n\tuint32_t cycles;')
 		self.regs.writeHeader(otype, hFile)
 		hFile.write('\n}} {0}context;'.format(self.prefix))
 		hFile.write('\n')
+		hFile.write('\nvoid {pre}execute({type} *context, uint32_t target_cycle);'.format(pre = self.prefix, type = self.context_type))
+		for decl in self.declares:
+			hFile.write('\n' + decl)
 		hFile.write('\n#endif //{0}_'.format(macro))
 		hFile.write('\n')
 		hFile.close()
-	def build(self, otype):
-		body = []
+		
+	def _buildTable(self, otype, table, body, lateBody):
 		pieces = []
-		for include in self.includes:
-			body.append('#include "{0}"\n'.format(include))
-		for table in self.instructions:
-			opmap = [None] * (1 << self.opsize)
-			bodymap = {}
+		opmap = [None] * (1 << self.opsize)
+		bodymap = {}
+		if table in self.instructions:
 			instructions = self.instructions[table]
 			instructions.sort()
 			for inst in instructions:
@@ -957,8 +1363,8 @@
 						self.lastOp = None
 						opmap[val] = inst.generateName(val)
 						bodymap[val] = inst.generateBody(val, self, otype)
-			
-			pieces.append('\ntypedef void (*impl_fun)({pre}context *context);'.format(pre=self.prefix))
+		
+		if self.dispatch == 'call':
 			pieces.append('\nstatic impl_fun impl_{name}[{sz}] = {{'.format(name = table, sz=len(opmap)))
 			for inst in range(0, len(opmap)):
 				op = opmap[inst]
@@ -968,20 +1374,76 @@
 					pieces.append('\n\t' + op + ',')
 					body.append(bodymap[inst])
 			pieces.append('\n};')
-		if self.body in self.subroutines:
+		elif self.dispatch == 'goto':
+			body.append('\n\tstatic void *impl_{name}[{sz}] = {{'.format(name = table, sz=len(opmap)))
+			for inst in range(0, len(opmap)):
+				op = opmap[inst]
+				if op is None:
+					body.append('\n\t\t&&unimplemented,')
+				else:
+					body.append('\n\t\t&&' + op + ',')
+					lateBody.append(bodymap[inst])
+			body.append('\n\t};')
+		else:
+			raise Exception("unimplmeneted dispatch type " + self.dispatch)
+		body.extend(pieces)
+		
+	def nextInstruction(self, otype):
+		output = []
+		if self.dispatch == 'goto':
+			if self.interrupt in self.subroutines:
+				output.append('\n\tif (context->cycles >= context->sync_cycle) {')
+			output.append('\n\tif (context->cycles >= target_cycle) { return; }')
+			if self.interrupt in self.subroutines:
+				self.meta = {}
+				self.temp = {}
+				self.subroutines[self.interrupt].inline(self, [], output, otype, None)
+				output.append('\n\t}')
+			
+			self.meta = {}
+			self.temp = {}
+			self.subroutines[self.body].inline(self, [], output, otype, None)
+		return output
+	
+	def build(self, otype):
+		body = []
+		pieces = []
+		for include in self.includes:
+			body.append('#include "{0}"\n'.format(include))
+		if self.dispatch == 'call':
+			body.append('\nstatic void unimplemented({pre}context *context)'.format(pre = self.prefix))
+			body.append('\n{')
+			body.append('\n\tfatal_error("Unimplemented instruction\\n");')
+			body.append('\n}\n')
+			body.append('\ntypedef void (*impl_fun)({pre}context *context);'.format(pre=self.prefix))
+			for table in self.extra_tables:
+				body.append('\nstatic impl_fun impl_{name}[{sz}];'.format(name = table, sz=(1 << self.opsize)))
+			body.append('\nstatic impl_fun impl_main[{sz}];'.format(sz=(1 << self.opsize)))
+		elif self.dispatch == 'goto':
+			body.append('\nvoid {pre}execute({type} *context, uint32_t target_cycle)'.format(pre = self.prefix, type = self.context_type))
+			body.append('\n{')
+			
+		for table in self.extra_tables:
+			self._buildTable(otype, table, body, pieces)
+		self._buildTable(otype, 'main', body, pieces)
+		if self.dispatch == 'call' and self.body in self.subroutines:
 			pieces.append('\nvoid {pre}execute({type} *context, uint32_t target_cycle)'.format(pre = self.prefix, type = self.context_type))
 			pieces.append('\n{')
+			pieces.append('\n\t{sync}(context, target_cycle);'.format(sync=self.sync_cycle))
 			pieces.append('\n\twhile (context->cycles < target_cycle)')
 			pieces.append('\n\t{')
+			#TODO: Handle interrupts in call dispatch mode
 			self.meta = {}
 			self.temp = {}
 			self.subroutines[self.body].inline(self, [], pieces, otype, None)
 			pieces.append('\n\t}')
 			pieces.append('\n}')
-		body.append('\nstatic void unimplemented({pre}context *context)'.format(pre = self.prefix))
-		body.append('\n{')
-		body.append('\n\tfatal_error("Unimplemented instruction");')
-		body.append('\n}\n')
+		elif self.dispatch == 'goto':
+			body.append('\n\t{sync}(context, target_cycle);'.format(sync=self.sync_cycle))
+			body += self.nextInstruction(otype)
+			pieces.append('\nunimplemented:')
+			pieces.append('\n\tfatal_error("Unimplemented instruction\\n");')
+			pieces.append('\n}')
 		return ''.join(body) +  ''.join(pieces)
 		
 	def checkBool(self, name):
@@ -992,8 +1454,8 @@
 	def getTemp(self, size):
 		if size in self.temp:
 			return ('', self.temp[size])
-		self.temp[size] = 'tmp{sz}'.format(sz=size);
-		return ('\n\tuint{sz}_t tmp{sz};'.format(sz=size), self.temp[size])
+		self.temp[size] = 'gen_tmp{sz}__'.format(sz=size);
+		return ('\n\tuint{sz}_t gen_tmp{sz}__;'.format(sz=size), self.temp[size])
 		
 	def resolveParam(self, param, parent, fieldVals, allowConstant=True, isdst=False):
 		keepGoing = True
@@ -1013,14 +1475,20 @@
 						return parent.regValues[param]
 					maybeLocal = parent.resolveLocal(param)
 					if maybeLocal:
+						if isdst:
+							self.lastDst = param
 						return maybeLocal
 				if param in fieldVals:
 					param = fieldVals[param]
+					fieldVals = {}
+					keepGoing = True
 				elif param in self.meta:
 					param = self.meta[param]
 					keepGoing = True
 				elif self.isReg(param):
-					param = self.resolveReg(param, parent, fieldVals, isdst)
+					return self.resolveReg(param, parent, fieldVals, isdst)
+		if isdst:
+			self.lastDst = param
 		return param
 	
 	def isReg(self, name):
@@ -1070,9 +1538,12 @@
 	
 	
 	def paramSize(self, name):
-		size = self.currentScope.localSize(name)
-		if size:
-			return size
+		if name in self.meta:
+			return self.paramSize(self.meta[name])
+		for i in range(len(self.scopes) -1, -1, -1):
+			size = self.scopes[i].localSize(name)
+			if size:
+				return size
 		begin,sep,_ = name.partition('.')
 		if sep and self.regs.isRegArray(begin):
 			return self.regs.regArrays[begin][0]
@@ -1092,11 +1563,13 @@
 	def getRootScope(self):
 		return self.scopes[0]
 
-def parse(f):
+def parse(args):
+	f = args.source
 	instructions = {}
 	subroutines = {}
 	registers = None
 	flags = None
+	declares = []
 	errors = []
 	info = {}
 	line_num = 0
@@ -1108,9 +1581,22 @@
 			continue
 		if line[0].isspace():
 			if not cur_object is None:
-				parts = [el.strip() for el in line.split(' ')]
+				sep = True
+				parts = []
+				while sep:
+					before,sep,after = line.partition('"')
+					before = before.strip()
+					if before:
+						parts += [el.strip() for el in before.split(' ')]
+					if sep:
+						#TODO: deal with escaped quotes
+						inside,sep,after = after.partition('"')
+						parts.append('"' + inside + '"')
+					line = after
 				if type(cur_object) is dict:
 					cur_object[parts[0]] = parts[1:]
+				elif type(cur_object) is list:
+					cur_object.append(' '.join(parts))
 				else:
 					cur_object = cur_object.processLine(parts)
 				
@@ -1168,6 +1654,8 @@
 				if flags is None:
 					flags = Flags()
 				cur_object = flags
+			elif line.strip() == 'declare':
+				cur_object = declares
 			else:
 				cur_object = SubRoutine(line.strip())
 				subroutines[cur_object.name] = cur_object
@@ -1175,8 +1663,19 @@
 		print(errors)
 	else:
 		p = Program(registers, instructions, subroutines, info, flags)
+		p.dispatch = args.dispatch
+		p.declares = declares
 		p.booleans['dynarec'] = False
 		p.booleans['interp'] = True
+		if args.define:
+			for define in args.define:
+				name,sep,val = define.partition('=')
+				name = name.strip()
+				val = val.strip()
+				if sep:
+					p.booleans[name] = bool(val)
+				else:
+					p.booleans[name] = True
 		
 		if 'header' in info:
 			print('#include "{0}"'.format(info['header'][0]))
@@ -1186,8 +1685,12 @@
 		print(p.build('c'))
 
 def main(argv):
-	f =  open(argv[1])
-	parse(f)
+	from argparse import ArgumentParser, FileType
+	argParser = ArgumentParser(description='CPU emulator DSL compiler')
+	argParser.add_argument('source', type=FileType('r'))
+	argParser.add_argument('-D', '--define', action='append')
+	argParser.add_argument('-d', '--dispatch', choices=('call', 'switch', 'goto'), default='call')
+	parse(argParser.parse_args(argv[1:]))
 
 if __name__ == '__main__':
 	from sys import argv
--- a/debug.c	Fri Mar 01 08:17:57 2019 -0800
+++ b/debug.c	Fri Mar 01 14:17:29 2019 -0800
@@ -9,6 +9,13 @@
 #include "render.h"
 #include "util.h"
 #include "terminal.h"
+#include "z80inst.h"
+
+#ifdef NEW_CORE
+#define Z80_OPTS opts
+#else
+#define Z80_OPTS options
+#endif
 
 static bp_def * breakpoints = NULL;
 static bp_def * zbreakpoints = NULL;
@@ -190,6 +197,7 @@
 	}
 	switch (param[0])
 	{
+#ifndef NEW_CORE
 	case 'a':
 		if (param[1] == 'f') {
 			if(param[2] == '\'') {
@@ -331,6 +339,7 @@
 			value = context->im;
 		}
 		break;
+#endif
 	case 's':
 		if (param[1] == 'p') {
 			value = context->sp;
@@ -342,7 +351,7 @@
 			if (p_addr < 0x4000) {
 				value = system->zram[p_addr & 0x1FFF];
 			} else if(p_addr >= 0x8000) {
-				uint32_t v_addr = context->bank_reg << 15;
+				uint32_t v_addr = system->z80_bank_reg << 15;
 				v_addr += p_addr & 0x7FFF;
 				if (v_addr < 0x400000) {
 					value = system->cart[v_addr/2];
@@ -377,7 +386,7 @@
 	} else {
 		zremove_breakpoint(context, address);
 	}
-	uint8_t * pc = get_native_pointer(address, (void **)context->mem_pointers, &context->options->gen);
+	uint8_t * pc = get_native_pointer(address, (void **)context->mem_pointers, &context->Z80_OPTS->gen);
 	if (!pc) {
 		fatal_error("Failed to get native pointer on entering Z80 debugger at address %X\n", address);
 	}
@@ -487,19 +496,21 @@
 					if (inst.addr_mode == Z80_IMMED) {
 						after = inst.immed;
 					} else if (inst.ea_reg == Z80_HL) {
+#ifndef NEW_CORE
 						after = context->regs[Z80_H] << 8 | context->regs[Z80_L];
 					} else if (inst.ea_reg == Z80_IX) {
 						after = context->regs[Z80_IXH] << 8 | context->regs[Z80_IXL];
 					} else if (inst.ea_reg == Z80_IY) {
 						after = context->regs[Z80_IYH] << 8 | context->regs[Z80_IYL];
+#endif
 					}
 				} else if(inst.op == Z80_JR) {
 					after += inst.immed;
 				} else if(inst.op == Z80_RET) {
-					uint8_t *sp = get_native_pointer(context->sp, (void **)context->mem_pointers, &context->options->gen);
+					uint8_t *sp = get_native_pointer(context->sp, (void **)context->mem_pointers, &context->Z80_OPTS->gen);
 					if (sp) {
 						after = *sp;
-						sp = get_native_pointer((context->sp + 1) & 0xFFFF, (void **)context->mem_pointers, &context->options->gen);
+						sp = get_native_pointer((context->sp + 1) & 0xFFFF, (void **)context->mem_pointers, &context->Z80_OPTS->gen);
 						if (sp) {
 							after |= *sp << 8;
 						}
@@ -527,9 +538,9 @@
 					break;
 				}
 				memmap_chunk const *ram_chunk = NULL;
-				for (int i = 0; i < context->options->gen.memmap_chunks; i++)
+				for (int i = 0; i < context->Z80_OPTS->gen.memmap_chunks; i++)
 				{
-					memmap_chunk const *cur = context->options->gen.memmap + i;
+					memmap_chunk const *cur = context->Z80_OPTS->gen.memmap + i;
 					if (cur->flags & MMAP_WRITE) {
 						ram_chunk = cur;
 						break;
@@ -540,7 +551,7 @@
 					if (size > ram_chunk->mask) {
 						size = ram_chunk->mask+1;
 					}
-					uint8_t *buf = get_native_pointer(ram_chunk->start, (void **)context->mem_pointers, &context->options->gen);
+					uint8_t *buf = get_native_pointer(ram_chunk->start, (void **)context->mem_pointers, &context->Z80_OPTS->gen);
 					FILE * f = fopen(param, "wb");
 					if (f) {
 						if(fwrite(buf, 1, size, f) != size) {
@@ -558,8 +569,8 @@
 			}
 			default:
 				if (
-					!context->options->gen.debug_cmd_handler
-					|| !context->options->gen.debug_cmd_handler(&system->header, input_buf)
+					!context->Z80_OPTS->gen.debug_cmd_handler
+					|| !context->Z80_OPTS->gen.debug_cmd_handler(&system->header, input_buf)
 				) {
 					fprintf(stderr, "Unrecognized debugger command %s\n", input_buf);
 				}
--- a/debug.h	Fri Mar 01 08:17:57 2019 -0800
+++ b/debug.h	Fri Mar 01 14:17:29 2019 -0800
@@ -3,11 +3,15 @@
 
 #include <stdint.h>
 #include "m68k_core.h"
+#ifdef NEW_CORE
+#include "z80.h"
+#else
 #ifdef USE_NATIVE
 #include "z80_to_x86.h"
 #else
 #include "mame_z80/z80.h"
 #endif
+#endif
 
 typedef struct disp_def {
 	struct disp_def * next;
--- a/gdb_remote.c	Fri Mar 01 08:17:57 2019 -0800
+++ b/gdb_remote.c	Fri Mar 01 14:17:29 2019 -0800
@@ -170,7 +170,7 @@
 	if (address >= 0xA00000 && address < 0xA04000) {
 		gen->zram[address & 0x1FFF] = value;
 		genesis_context * gen = context->system;
-#ifndef NO_Z80
+#if !defined(NO_Z80) && !defined(NEW_CORE)
 		z80_handle_code_write(address & 0x1FFF, gen->z80);
 #endif
 		return;
--- a/genesis.c	Fri Mar 01 08:17:57 2019 -0800
+++ b/genesis.c	Fri Mar 01 14:17:29 2019 -0800
@@ -39,6 +39,15 @@
 #define MAX_SOUND_CYCLES 100000	
 #endif
 
+#ifdef NEW_CORE
+#define Z80_CYCLE cycles
+#define Z80_OPTS opts
+#define z80_handle_code_write(...)
+#else
+#define Z80_CYCLE current_cycle
+#define Z80_OPTS options
+#endif
+
 void genesis_serialize(genesis_context *gen, serialize_buffer *buf, uint32_t m68k_pc)
 {
 	start_section(buf, SECTION_68000);
@@ -64,7 +73,7 @@
 	start_section(buf, SECTION_GEN_BUS_ARBITER);
 	save_int8(buf, gen->z80->reset);
 	save_int8(buf, gen->z80->busreq);
-	save_int16(buf, gen->z80->bank_reg);
+	save_int16(buf, gen->z80_bank_reg);
 	end_section(buf);
 	
 	start_section(buf, SECTION_SEGA_IO_1);
@@ -141,11 +150,12 @@
 
 static void update_z80_bank_pointer(genesis_context *gen)
 {
-	if (gen->z80->bank_reg < 0x140) {
-		gen->z80->mem_pointers[1] = get_native_pointer(gen->z80->bank_reg << 15, (void **)gen->m68k->mem_pointers, &gen->m68k->options->gen);
+	if (gen->z80_bank_reg < 0x140) {
+		gen->z80->mem_pointers[1] = get_native_pointer(gen->z80_bank_reg << 15, (void **)gen->m68k->mem_pointers, &gen->m68k->options->gen);
 	} else {
 		gen->z80->mem_pointers[1] = NULL;
 	}
+	z80_invalidate_code_range(gen->z80, 0x8000, 0xFFFF);
 }
 
 static void bus_arbiter_deserialize(deserialize_buffer *buf, void *vgen)
@@ -153,7 +163,7 @@
 	genesis_context *gen = vgen;
 	gen->z80->reset = load_int8(buf);
 	gen->z80->busreq = load_int8(buf);
-	gen->z80->bank_reg = load_int16(buf) & 0x1FF;
+	gen->z80_bank_reg = load_int16(buf) & 0x1FF;
 }
 
 static void adjust_int_cycle(m68k_context * context, vdp_context * v_context);
@@ -289,21 +299,32 @@
 static void z80_next_int_pulse(z80_context * z_context)
 {
 	genesis_context * gen = z_context->system;
-	vdp_run_context(gen->vdp, z_context->current_cycle);
+#ifdef NEW_CORE
+	z_context->int_cycle = vdp_next_vint_z80(gen->vdp);
+	z_context->int_end_cycle = z_context->int_cycle + Z80_INT_PULSE_MCLKS;
+	z_context->int_value = 0xFF;
+	z80_sync_cycle(z_context, z_context->sync_cycle);
+#else
 	z_context->int_pulse_start = vdp_next_vint_z80(gen->vdp);
 	z_context->int_pulse_end = z_context->int_pulse_start + Z80_INT_PULSE_MCLKS;
 	z_context->im2_vector = 0xFF;
+#endif
 }
 
 static void sync_z80(z80_context * z_context, uint32_t mclks)
 {
 #ifndef NO_Z80
 	if (z80_enabled) {
+#ifdef NEW_CORE
+		if (z_context->int_cycle == 0xFFFFFFFFU) {
+			z80_next_int_pulse(z_context);
+		}
+#endif
 		z80_run(z_context, mclks);
 	} else
 #endif
 	{
-		z_context->current_cycle = mclks;
+		z_context->Z80_CYCLE = mclks;
 	}
 }
 
@@ -414,9 +435,14 @@
 			gen->header.enter_debugger = 0;
 			debugger(context, address);
 		}
+#ifdef NEW_CORE
+		if (gen->header.save_state) {
+#else
 		if (gen->header.save_state && (z_context->pc || !z_context->native_pc || z_context->reset || !z_context->busreq)) {
+#endif
 			uint8_t slot = gen->header.save_state - 1;
 			gen->header.save_state = 0;
+#ifndef NEW_CORE
 			if (z_context->native_pc && !z_context->reset) {
 				//advance Z80 core to the start of an instruction
 				while (!z_context->pc)
@@ -424,6 +450,7 @@
 					sync_z80(z_context, z_context->current_cycle + MCLKS_PER_Z80);
 				}
 			}
+#endif
 			char *save_path = slot == SERIALIZE_SLOT ? NULL : get_slot_name(&gen->header, slot, use_native_states ? "state" : "gst");
 			if (use_native_states || slot == SERIALIZE_SLOT) {
 				serialize_buffer state;
@@ -576,16 +603,16 @@
 	if (vdp_port < 0x10) {
 		//These probably won't currently interact well with the 68K accessing the VDP
 		if (vdp_port < 4) {
-			vdp_run_context(gen->vdp, context->current_cycle);
+			vdp_run_context(gen->vdp, context->Z80_CYCLE);
 			vdp_data_port_write(gen->vdp, value << 8 | value);
 		} else if (vdp_port < 8) {
-			vdp_run_context_full(gen->vdp, context->current_cycle);
+			vdp_run_context_full(gen->vdp, context->Z80_CYCLE);
 			vdp_control_port_write(gen->vdp, value << 8 | value);
 		} else {
 			fatal_error("Illegal write to HV Counter port %X\n", vdp_port);
 		}
 	} else if (vdp_port < 0x18) {
-		sync_sound(gen, context->current_cycle);
+		sync_sound(gen, context->Z80_CYCLE);
 		psg_write(gen->psg, value);
 	} else {
 		vdp_test_port_write(gen->vdp, value);
@@ -664,7 +691,7 @@
 	genesis_context * gen = context->system;
 	//VDP access goes over the 68K bus like a bank area access
 	//typical delay from bus arbitration
-	context->current_cycle += 3 * MCLKS_PER_Z80;
+	context->Z80_CYCLE += 3 * MCLKS_PER_Z80;
 	//TODO: add cycle for an access right after a previous one
 	//TODO: Below cycle time is an estimate based on the time between 68K !BG goes low and Z80 !MREQ goes high
 	//      Needs a new logic analyzer capture to get the actual delay on the 68K side
@@ -675,7 +702,7 @@
 	uint16_t ret;
 	if (vdp_port < 0x10) {
 		//These probably won't currently interact well with the 68K accessing the VDP
-		vdp_run_context(gen->vdp, context->current_cycle);
+		vdp_run_context(gen->vdp, context->Z80_CYCLE);
 		if (vdp_port < 4) {
 			ret = vdp_data_port_read(gen->vdp);
 		} else if (vdp_port < 8) {
@@ -716,9 +743,9 @@
 					ym_address_write_part1(gen->ym, value);
 				}
 			} else if (location == 0x6000) {
-				gen->z80->bank_reg = (gen->z80->bank_reg >> 1 | value << 8) & 0x1FF;
-				if (gen->z80->bank_reg < 0x80) {
-					gen->z80->mem_pointers[1] = (gen->z80->bank_reg << 15) + ((char *)gen->z80->mem_pointers[2]);
+				gen->z80_bank_reg = (gen->z80_bank_reg >> 1 | value << 8) & 0x1FF;
+				if (gen->z80_bank_reg < 0x80) {
+					gen->z80->mem_pointers[1] = (gen->z80_bank_reg << 15) + ((char *)gen->z80->mem_pointers[2]);
 				} else {
 					gen->z80->mem_pointers[1] = NULL;
 				}
@@ -947,7 +974,7 @@
 {
 	z80_context * context = vcontext;
 	genesis_context * gen = context->system;
-	sync_sound(gen, context->current_cycle);
+	sync_sound(gen, context->Z80_CYCLE);
 	if (location & 1) {
 		ym_data_write(gen->ym, value);
 	} else if (location & 2) {
@@ -962,7 +989,7 @@
 {
 	z80_context * context = vcontext;
 	genesis_context * gen = context->system;
-	sync_sound(gen, context->current_cycle);
+	sync_sound(gen, context->Z80_CYCLE);
 	return ym_read_status(gen->ym);
 }
 
@@ -972,16 +999,16 @@
 	genesis_context *gen = context->system;
 
 	if (gen->bus_busy) {
-#ifdef USE_NATIVE
-		context->current_cycle = context->sync_cycle;
+#if defined(USE_NATIVE) || defined(NEW_CORE)
+		context->Z80_CYCLE = gen->m68k->current_cycle;
 #else
 		context->m_icount = 0;
 #endif
 	}
 
 	//typical delay from bus arbitration
-#ifdef USE_NATIVE
-	context->current_cycle += 3 * MCLKS_PER_Z80;
+#if defined(USE_NATIVE) || defined(NEW_CORE)
+	context->Z80_CYCLE += 3 * MCLKS_PER_Z80;
 #else
 	context->m_icount -= 3;
 #endif
@@ -994,11 +1021,11 @@
 	if (context->mem_pointers[1]) {
 		return context->mem_pointers[1][location ^ 1];
 	}
-	uint32_t address = context->bank_reg << 15 | location;
+	uint32_t address = gen->z80_bank_reg << 15 | location;
 	if (address >= 0xC00000 && address < 0xE00000) {
 		return z80_vdp_port_read(location & 0xFF, context);
 	} else {
-		fprintf(stderr, "Unhandled read by Z80 from address %X through banked memory area (%X)\n", address, context->bank_reg << 15);
+		fprintf(stderr, "Unhandled read by Z80 from address %X through banked memory area (%X)\n", address, gen->z80_bank_reg << 15);
 	}
 	return 0;
 }
@@ -1008,15 +1035,15 @@
 	z80_context * context = vcontext;
 	genesis_context *gen = context->system;
 	if (gen->bus_busy) {
-#ifdef USE_NATIVE
-		context->current_cycle = context->sync_cycle;
+#if defined(USE_NATIVE) || defined(NEW_CORE)
+		context->Z80_CYCLE = gen->m68k->current_cycle;
 #else
 		context->m_icount = 0;
 #endif
 	}
 	//typical delay from bus arbitration
-#ifdef USE_NATIVE
-	context->current_cycle += 3 * MCLKS_PER_Z80;
+#if defined(USE_NATIVE) || defined(NEW_CORE)
+	context->Z80_CYCLE += 3 * MCLKS_PER_Z80;
 #else
 	context->m_icount -= 3;
 #endif
@@ -1026,7 +1053,7 @@
 	gen->m68k->current_cycle += 8 * MCLKS_PER_68K;
 
 	location &= 0x7FFF;
-	uint32_t address = context->bank_reg << 15 | location;
+	uint32_t address = gen->z80_bank_reg << 15 | location;
 	if (address >= 0xE00000) {
 		address &= 0xFFFF;
 		((uint8_t *)gen->work_ram)[address ^ 1] = value;
@@ -1041,8 +1068,9 @@
 static void *z80_write_bank_reg(uint32_t location, void * vcontext, uint8_t value)
 {
 	z80_context * context = vcontext;
+	genesis_context *gen = context->system;
 
-	context->bank_reg = (context->bank_reg >> 1 | value << 8) & 0x1FF;
+	gen->z80_bank_reg = (gen->z80_bank_reg >> 1 | value << 8) & 0x1FF;
 	update_z80_bank_pointer(context->system);
 
 	return context;
@@ -1273,7 +1301,7 @@
 	free(gen->cart);
 	free(gen->m68k);
 	free(gen->work_ram);
-	z80_options_free(gen->z80->options);
+	z80_options_free(gen->z80->Z80_OPTS);
 	free(gen->z80);
 	free(gen->zram);
 	ym_free(gen->ym);
@@ -1401,7 +1429,9 @@
 	z80_options *z_opts = malloc(sizeof(z80_options));
 	init_z80_opts(z_opts, z80_map, 5, NULL, 0, MCLKS_PER_Z80, 0xFFFF);
 	gen->z80 = init_z80_context(z_opts);
+#ifndef NEW_CORE
 	gen->z80->next_int_pulse = z80_next_int_pulse;
+#endif
 	z80_assert_reset(gen->z80, 0);
 #else
 	gen->z80 = calloc(1, sizeof(z80_context));
--- a/genesis.h	Fri Mar 01 08:17:57 2019 -0800
+++ b/genesis.h	Fri Mar 01 14:17:29 2019 -0800
@@ -9,11 +9,15 @@
 #include <stdint.h>
 #include "system.h"
 #include "m68k_core.h"
+#ifdef NEW_CORE
+#include "z80.h"
+#else
 #ifdef USE_NATIVE
 #include "z80_to_x86.h"
 #else
 #include "mame_z80/z80.h"
 #endif
+#endif
 #include "ym2612.h"
 #include "vdp.h"
 #include "psg.h"
@@ -52,6 +56,7 @@
 	uint32_t        int_latency_prev2;
 	uint32_t        reset_cycle;
 	uint8_t         bank_regs[8];
+	uint16_t        z80_bank_reg;
 	uint16_t        mapper_start_index;
 	uint8_t         mapper_type;
 	uint8_t         save_type;
--- a/gst.c	Fri Mar 01 08:17:57 2019 -0800
+++ b/gst.c	Fri Mar 01 14:17:29 2019 -0800
@@ -144,6 +144,7 @@
 	}
 	uint8_t * curpos = regdata;
 	uint8_t f = *(curpos++);
+#ifndef NEW_CORE
 	context->flags[ZF_C] = f & 1;
 	f >>= 1;
 	context->flags[ZF_N] = f & 1;
@@ -200,6 +201,7 @@
 		context->mem_pointers[1] = NULL;
 	}
 	context->bank_reg = bank >> 15;
+#endif
 	uint8_t buffer[Z80_RAM_BYTES];
 	fseek(gstfile, GST_Z80_RAM, SEEK_SET);
 	if(fread(buffer, 1, sizeof(buffer), gstfile) != (8*1024)) {
@@ -210,11 +212,15 @@
 	{
 		if (context->mem_pointers[0][i] != buffer[i]) {
 			context->mem_pointers[0][i] = buffer[i];
+#ifndef NEW_CORE
 			z80_handle_code_write(i, context);
+#endif
 		}
 	}
+#ifndef NEW_CORE
 	context->native_pc = NULL;
 	context->extra_pc = NULL;
+#endif
 	return 1;
 }
 
@@ -296,6 +302,7 @@
 	uint8_t regdata[GST_Z80_REG_SIZE];
 	uint8_t * curpos = regdata;
 	memset(regdata, 0, sizeof(regdata));
+#ifndef NEW_CORE
 	uint8_t f = context->flags[ZF_S];
 	f <<= 1;
 	f |= context->flags[ZF_Z] ;
@@ -348,6 +355,7 @@
 	curpos += 3;
 	uint32_t bank = context->bank_reg << 15;
 	write_le_32(curpos, bank);
+#endif
 	fseek(gstfile, GST_Z80_REGS, SEEK_SET);
 	if (fwrite(regdata, 1, sizeof(regdata), gstfile) != sizeof(regdata)) {
 		return 0;
--- a/sms.c	Fri Mar 01 08:17:57 2019 -0800
+++ b/sms.c	Fri Mar 01 14:17:29 2019 -0800
@@ -9,26 +9,35 @@
 #include "saves.h"
 #include "bindings.h"
 
+#ifdef NEW_CORE
+#define Z80_CYCLE cycles
+#define Z80_OPTS opts
+#define z80_handle_code_write(...)
+#else
+#define Z80_CYCLE current_cycle
+#define Z80_OPTS options
+#endif
+
 static void *memory_io_write(uint32_t location, void *vcontext, uint8_t value)
 {
 	z80_context *z80 = vcontext;
 	sms_context *sms = z80->system;
 	if (location & 1) {
 		uint8_t fuzzy_ctrl_0 = sms->io.ports[0].control, fuzzy_ctrl_1 = sms->io.ports[1].control;
-		io_control_write(sms->io.ports, (~value) << 5 & 0x60, z80->current_cycle);
+		io_control_write(sms->io.ports, (~value) << 5 & 0x60, z80->Z80_CYCLE);
 		fuzzy_ctrl_0 |= sms->io.ports[0].control;
-		io_control_write(sms->io.ports+1, (~value) << 3 & 0x60, z80->current_cycle);
+		io_control_write(sms->io.ports+1, (~value) << 3 & 0x60, z80->Z80_CYCLE);
 		fuzzy_ctrl_1 |= sms->io.ports[1].control;
 		if (
 			(fuzzy_ctrl_0 & 0x40 & (sms->io.ports[0].output ^ (value << 1)) & (value << 1))
 			|| (fuzzy_ctrl_0 & 0x40 & (sms->io.ports[1].output ^ (value >> 1)) & (value >> 1))
 		) {
 			//TH is an output and it went from 0 -> 1
-			vdp_run_context(sms->vdp, z80->current_cycle);
+			vdp_run_context(sms->vdp, z80->Z80_CYCLE);
 			vdp_latch_hv(sms->vdp);
 		}
-		io_data_write(sms->io.ports, value << 1, z80->current_cycle);
-		io_data_write(sms->io.ports + 1, value >> 1, z80->current_cycle);
+		io_data_write(sms->io.ports, value << 1, z80->Z80_CYCLE);
+		io_data_write(sms->io.ports + 1, value >> 1, z80->Z80_CYCLE);
 	} else {
 		//TODO: memory control write
 	}
@@ -39,7 +48,7 @@
 {
 	z80_context *z80 = vcontext;
 	sms_context *sms = z80->system;
-	vdp_run_context(sms->vdp, z80->current_cycle);
+	vdp_run_context(sms->vdp, z80->Z80_CYCLE);
 	uint16_t hv = vdp_hv_counter_read(sms->vdp);
 	if (location & 1) {
 		return hv;
@@ -52,7 +61,7 @@
 {
 	z80_context *z80 = vcontext;
 	sms_context *sms = z80->system;
-	psg_run(sms->psg, z80->current_cycle);
+	psg_run(sms->psg, z80->Z80_CYCLE);
 	psg_write(sms->psg, value);
 	return vcontext;
 }
@@ -61,14 +70,19 @@
 {
 	uint32_t vint = vdp_next_vint(sms->vdp);
 	uint32_t hint = vdp_next_hint(sms->vdp);
+#ifdef NEW_CORE
+	sms->z80->int_cycle = vint < hint ? vint : hint;
+	z80_sync_cycle(sms->z80, sms->z80->sync_cycle);
+#else
 	sms->z80->int_pulse_start = vint < hint ? vint : hint;
+#endif
 }
 
 static uint8_t vdp_read(uint32_t location, void *vcontext)
 {
 	z80_context *z80 = vcontext;
 	sms_context *sms = z80->system;
-	vdp_run_context(sms->vdp, z80->current_cycle);
+	vdp_run_context(sms->vdp, z80->Z80_CYCLE);
 	if (location & 1) {
 		uint8_t ret = vdp_control_port_read(sms->vdp);
 		sms->vdp->flags2 &= ~(FLAG2_VINT_PENDING|FLAG2_HINT_PENDING);
@@ -84,11 +98,11 @@
 	z80_context *z80 = vcontext;
 	sms_context *sms = z80->system;
 	if (location & 1) {
-		vdp_run_context_full(sms->vdp, z80->current_cycle);
+		vdp_run_context_full(sms->vdp, z80->Z80_CYCLE);
 		vdp_control_port_write_pbc(sms->vdp, value);
 		update_interrupts(sms);
 	} else {
-		vdp_run_context(sms->vdp, z80->current_cycle);
+		vdp_run_context(sms->vdp, z80->Z80_CYCLE);
 		vdp_data_port_write_pbc(sms->vdp, value);
 	}
 	return vcontext;
@@ -99,13 +113,13 @@
 	z80_context *z80 = vcontext;
 	sms_context *sms = z80->system;
 	if (location == 0xC0 || location == 0xDC) {
-		uint8_t port_a = io_data_read(sms->io.ports, z80->current_cycle);
-		uint8_t port_b = io_data_read(sms->io.ports+1, z80->current_cycle);
+		uint8_t port_a = io_data_read(sms->io.ports, z80->Z80_CYCLE);
+		uint8_t port_b = io_data_read(sms->io.ports+1, z80->Z80_CYCLE);
 		return (port_a & 0x3F) | (port_b << 6);
 	}
 	if (location == 0xC1 || location == 0xDD) {
-		uint8_t port_a = io_data_read(sms->io.ports, z80->current_cycle);
-		uint8_t port_b = io_data_read(sms->io.ports+1, z80->current_cycle);
+		uint8_t port_a = io_data_read(sms->io.ports, z80->Z80_CYCLE);
+		uint8_t port_b = io_data_read(sms->io.ports+1, z80->Z80_CYCLE);
 		return (port_a & 0x40) | (port_b >> 2 & 0xF) | (port_b << 1 & 0x80) | 0x10;
 	}
 	return 0xFF;
@@ -343,7 +357,7 @@
 	sms_context *sms = (sms_context *)system;
 	char *statepath = get_slot_name(system, slot, "state");
 	uint8_t ret;
-#ifdef USE_NATIVE
+#if !defined(NEW_CORE) && defined(USE_NATIVE)
 	if (!sms->z80->native_pc) {
 		ret = get_modification_time(statepath) != 0;
 		if (ret) {
@@ -362,7 +376,7 @@
 static void run_sms(system_header *system)
 {
 	sms_context *sms = (sms_context *)system;
-	uint32_t target_cycle = sms->z80->current_cycle + 3420*16;
+	uint32_t target_cycle = sms->z80->Z80_CYCLE + 3420*16;
 	//TODO: PAL support
 	render_set_video_standard(VID_NTSC);
 	while (!sms->should_return)
@@ -378,7 +392,11 @@
 			zdebugger(sms->z80, sms->z80->pc);
 		}
 #endif
+#ifdef NEW_CORE
+		if (sms->z80->nmi_cycle == CYCLE_NEVER) {
+#else
 		if (sms->z80->nmi_start == CYCLE_NEVER) {
+#endif
 			uint32_t nmi = vdp_next_nmi(sms->vdp);
 			if (nmi != CYCLE_NEVER) {
 				z80_assert_nmi(sms->z80, nmi);
@@ -386,9 +404,9 @@
 		}
 		z80_run(sms->z80, target_cycle);
 		if (sms->z80->reset) {
-			z80_clear_reset(sms->z80, sms->z80->current_cycle + 128*15);
+			z80_clear_reset(sms->z80, sms->z80->Z80_CYCLE + 128*15);
 		}
-		target_cycle = sms->z80->current_cycle;
+		target_cycle = sms->z80->Z80_CYCLE;
 		vdp_run_context(sms->vdp, target_cycle);
 		psg_run(sms->psg, target_cycle);
 		
@@ -396,7 +414,7 @@
 		if (system->save_state) {
 			while (!sms->z80->pc) {
 				//advance Z80 to an instruction boundary
-				z80_run(sms->z80, sms->z80->current_cycle + 1);
+				z80_run(sms->z80, sms->z80->Z80_CYCLE + 1);
 			}
 			save_state(sms, system->save_state - 1);
 			system->save_state = 0;
@@ -405,9 +423,9 @@
 		
 		target_cycle += 3420*16;
 		if (target_cycle > 0x10000000) {
-			uint32_t adjust = sms->z80->current_cycle - 3420*262*2;
-			io_adjust_cycles(sms->io.ports, sms->z80->current_cycle, adjust);
-			io_adjust_cycles(sms->io.ports+1, sms->z80->current_cycle, adjust);
+			uint32_t adjust = sms->z80->Z80_CYCLE - 3420*262*2;
+			io_adjust_cycles(sms->io.ports, sms->z80->Z80_CYCLE, adjust);
+			io_adjust_cycles(sms->io.ports+1, sms->z80->Z80_CYCLE, adjust);
 			z80_adjust_cycles(sms->z80, adjust);
 			vdp_adjust_cycles(sms->vdp, adjust);
 			sms->psg->cycles -= adjust;
@@ -457,8 +475,8 @@
 static void soft_reset(system_header *system)
 {
 	sms_context *sms = (sms_context *)system;
-	z80_assert_reset(sms->z80, sms->z80->current_cycle);
-#ifdef USE_NATIVE
+	z80_assert_reset(sms->z80, sms->z80->Z80_CYCLE);
+#if !defined(NEW_CORE) && defined(USE_NATIVE)
 	sms->z80->target_cycle = sms->z80->sync_cycle = sms->z80->current_cycle;
 #endif
 }
@@ -467,7 +485,7 @@
 {
 	sms_context *sms = (sms_context *)system;
 	vdp_free(sms->vdp);
-	z80_options_free(sms->z80->options);
+	z80_options_free(sms->z80->Z80_OPTS);
 	free(sms->z80);
 	psg_free(sms->psg);
 	free(sms);
@@ -483,7 +501,9 @@
 	sms_context *sms = (sms_context *)system;
 	sms->should_return = 1;
 #ifdef USE_NATIVE
-	sms->z80->target_cycle = sms->z80->sync_cycle = sms->z80->current_cycle;
+#ifndef NEW_CORE
+	sms->z80->target_cycle = sms->z80->sync_cycle = sms->z80->Z80_CYCLE;
+#endif
 #endif
 }
 
@@ -589,7 +609,7 @@
 	init_z80_opts(zopts, sms->header.info.map, sms->header.info.map_chunks, io_map, 4, 15, 0xFF);
 	sms->z80 = init_z80_context(zopts);
 	sms->z80->system = sms;
-	sms->z80->options->gen.debug_cmd_handler = debug_commands;
+	sms->z80->Z80_OPTS->gen.debug_cmd_handler = debug_commands;
 	
 	sms->rom = media->buffer;
 	sms->rom_size = rom_size;
--- a/sms.h	Fri Mar 01 08:17:57 2019 -0800
+++ b/sms.h	Fri Mar 01 14:17:29 2019 -0800
@@ -4,11 +4,15 @@
 #include "system.h"
 #include "vdp.h"
 #include "psg.h"
+#ifdef NEW_CORE
+#include "z80.h"
+#else
 #ifdef USE_NATIVE
 #include "z80_to_x86.h"
 #else
 #include "mame_z80/z80.h"
 #endif
+#endif
 #include "io.h"
 
 #define SMS_RAM_SIZE (8*1024)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/z80.cpu	Fri Mar 01 14:17:29 2019 -0800
@@ -0,0 +1,2539 @@
+info
+	prefix z80_
+	opcode_size 8
+	extra_tables cb ed dded fded ddcb fdcb dd fd
+	body z80_run_op
+	sync_cycle z80_sync_cycle
+	interrupt z80_interrupt
+	include z80_util.c
+	header z80.h
+	
+declare
+	void init_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t num_chunks, memmap_chunk const * io_chunks, uint32_t num_io_chunks, uint32_t clock_divider, uint32_t io_address_mask);
+	z80_context * init_z80_context(z80_options *options);
+	void z80_run(z80_context *context, uint32_t target_cycle);
+	void z80_assert_reset(z80_context * context, uint32_t cycle);
+	void z80_clear_reset(z80_context * context, uint32_t cycle);
+	void z80_assert_busreq(z80_context * context, uint32_t cycle);
+	void z80_clear_busreq(z80_context * context, uint32_t cycle);
+	void z80_assert_nmi(z80_context *context, uint32_t cycle);
+	uint8_t z80_get_busack(z80_context * context, uint32_t cycle);
+	void z80_invalidate_code_range(z80_context *context, uint32_t start, uint32_t end);
+	void z80_adjust_cycles(z80_context * context, uint32_t deduction);
+	void z80_serialize(z80_context *context, serialize_buffer *buf);
+	void z80_deserialize(deserialize_buffer *buf, void *vcontext);
+	void zinsert_breakpoint(z80_context * context, uint16_t address, uint8_t * bp_handler);
+	void zremove_breakpoint(z80_context * context, uint16_t address);
+	void z80_options_free(z80_options *opts);
+	void z80_sync_cycle(z80_context *context, uint32_t target_cycle);
+
+regs
+	main 8 b c d e h l f a
+	alt 8 b' c' d' e' h' l' f' a'
+	i 8
+	r 8
+	rhigh 8
+	iff1 8
+	iff2 8
+	imode 8
+	sp 16
+	ix 16
+	iy 16
+	pc 16
+	wz 16
+	nflag 8
+	last_flag_result 8
+	pvflag 8
+	chflags 8
+	zflag 8
+	scratch1 16
+	scratch2 16
+	busreq 8
+	busack 8
+	reset 8
+	io_map ptrmemmap_chunk
+	io_chunks 32
+	io_mask 32
+	int_cycle 32
+	int_end_cycle 32
+	int_value 8
+	nmi_cycle 32
+	system ptrvoid
+	fastread ptr8 64
+	fastwrite ptr8 64
+	mem_pointers ptr8 4
+	
+flags
+	register f
+	S 7 sign last_flag_result.7
+	Z 6 zero zflag
+	Y 5 bit-5 last_flag_result.5
+	H 4 half-carry chflags.3
+	P 2 parity pvflag
+	V 2 overflow pvflag
+	X 3 bit-3 last_flag_result.3
+	N 1 none nflag
+	C 0 carry chflags.7
+
+	
+z80_op_fetch
+	cycles 1
+	add 1 r r
+	mov pc scratch1
+	ocall read_8
+	add 1 pc pc
+	
+z80_run_op
+	#printf "Z80: %X @ %d\n" pc cycles
+	#printf "Z80: %X - A: %X, B: %X, C: %X D: %X, E: %X, H: %X, L: %X, SP: %X, IX: %X, IY: %X @ %d\n" pc a b c d e h l sp ix iy cycles
+	z80_op_fetch
+	dispatch scratch1
+
+z80_interrupt
+	cmp int_cycle cycles
+	if >=U
+	
+	mov 0 iff1
+	mov 0 iff2
+	cycles 6
+	update_sync
+	
+	switch imode
+	case 0
+	dispatch int_value
+	
+	case 1
+	dispatch 0xFF
+	
+	case 2
+	lsl i 8 pc
+	or int_value pc pc
+	#CD is call
+	dispatch 0xCD
+	end
+	
+	else
+	
+	cmp nmi_cycle cycles
+	if >=U
+	
+	mov 0xFFFFFFFF nmi_cycle
+	mov 0 iff1
+	local pch 8
+	lsr pc 8 pch
+	meta high pch
+	meta low pc
+	z80_push
+	mov 0x66 pc
+	update_sync
+	
+	end
+	end
+	
+	
+11001011 cb_prefix
+	z80_op_fetch
+	dispatch scratch1 cb
+
+11011101 dd_prefix
+	z80_op_fetch
+	dispatch scratch1 dd
+
+11101101 ed_prefix
+	z80_op_fetch
+	dispatch scratch1 ed
+
+11111101 fd_prefix
+	z80_op_fetch
+	dispatch scratch1 fd
+	
+dd 11001011 ddcb_prefix
+	z80_calc_index ix
+	cycles 2
+	mov pc scratch1
+	ocall read_8
+	add 1 pc pc
+	dispatch scratch1 ddcb
+	
+fd 11001011 fdcb_prefix
+	z80_calc_index iy
+	cycles 2
+	mov pc scratch1
+	ocall read_8
+	add 1 pc pc
+	dispatch scratch1 fdcb
+	
+z80_check_cond
+	arg cond 8
+	local invert 8
+	switch cond
+	case 0
+	meta istrue invert
+	lnot zflag invert
+	
+	case 1
+	meta istrue zflag
+	
+	case 2
+	meta istrue invert
+	not chflags invert
+	and 0x80 invert invert
+	
+	case 3
+	meta istrue invert
+	and 0x80 chflags invert
+	
+	case 4
+	meta istrue invert
+	lnot pvflag invert
+	
+	case 5
+	meta istrue pvflag
+	
+	case 6
+	meta istrue invert
+	not last_flag_result invert
+	and 0x80 invert invert
+	
+	case 7
+	meta istrue invert
+	and 0x80 last_flag_result invert
+	
+	end
+	
+z80_fetch_hl
+	lsl h 8 scratch1
+	or l scratch1 scratch1
+	ocall read_8
+	
+z80_store_hl
+	lsl h 8 scratch2
+	or l scratch2 scratch2
+	ocall write_8
+
+z80_fetch_immed
+	mov pc scratch1
+	ocall read_8
+	add 1 pc pc
+	
+z80_fetch_immed16
+	mov pc scratch1
+	ocall read_8
+	mov scratch1 wz
+	add 1 pc pc
+	mov pc scratch1
+	ocall read_8
+	add 1 pc pc
+	lsl scratch1 8 scratch1
+	or scratch1 wz wz
+
+z80_fetch_immed_reg16
+	mov pc scratch1
+	ocall read_8
+	mov scratch1 low
+	add 1 pc pc
+	mov pc scratch1
+	ocall read_8
+	mov scratch1 high
+	add 1 pc pc
+	
+z80_fetch_immed_to_reg16
+	mov pc scratch1
+	ocall read_8
+	mov scratch1 reg
+	add 1 pc pc
+	mov pc scratch1
+	ocall read_8
+	add 1 pc pc
+	lsl scratch1 8 scratch1
+	or scratch1 reg reg
+
+01RRR110 ld_from_hl
+	z80_fetch_hl
+	mov scratch1 main.R
+
+01DDDSSS ld_from_reg
+	mov main.S main.D
+	
+dd 01DDD100 ld_from_ixh
+	invalid D 6
+	lsr ix 8 main.D
+	
+dd 01100SSS ld_to_ixh
+	invalid S 6
+	local tmp 16
+	and 0xFF ix ix
+	lsl main.S 8 tmp
+	or tmp ix ix
+	
+dd 0110D10S ld_ixb_to_ixb
+
+dd 01DDD101 ld_from_ixl
+	invalid D 6
+	mov ix main.D
+	
+dd 01101SSS ld_to_ixl
+	invalid S 6
+	and 0xFF00 ix ix
+	or main.S ix ix
+	
+dd 01100101 ld_ixl_to_ixh
+	local tmp 16
+	lsl ix 8 tmp
+	and 0xFF ix ix
+	or tmp ix ix
+	
+dd 01101100 ld_ixh_to_ixl
+	local tmp 16
+	lsr ix 8 tmp
+	and 0xFF00 ix ix
+	or tmp ix ix
+	
+fd 01DDD100 ld_from_iyh
+	invalid D 6
+	lsr iy 8 main.D
+	
+fd 01100SSS ld_to_iyh
+	invalid S 6
+	local tmp 16
+	and 0xFF iy iy
+	lsl main.S 8 tmp
+	or tmp iy iy
+	
+fd 0110D10S ld_iyb_to_iyb
+
+fd 01DDD101 ld_from_iyl
+	invalid D 6
+	mov iy main.D
+	
+fd 01101SSS ld_to_iyl
+	invalid S 6
+	and 0xFF00 iy iy
+	or main.S iy iy
+	
+fd 01100101 ld_iyl_to_iyh
+	local tmp 16
+	lsl iy 8 tmp
+	and 0xFF iy iy
+	or tmp iy iy
+	
+fd 01101100 ld_iyh_to_iyl
+	local tmp 16
+	lsr iy 8 tmp
+	and 0xFF00 iy iy
+	or tmp iy iy
+	
+z80_calc_index
+	arg index 16
+	mov index wz
+	z80_fetch_immed
+	sext 16 scratch1 scratch1
+	add scratch1 wz wz
+
+z80_fetch_index
+	arg index 16
+	z80_calc_index index
+	mov wz scratch1
+	cycles 5
+	ocall read_8
+	
+z80_store_index
+	mov wz scratch2
+	ocall write_8
+	
+dd 01RRR110 ld_from_ix
+	z80_fetch_index ix
+	mov scratch1 main.R
+
+fd 01RRR110 ld_from_iy
+	z80_fetch_index iy
+	mov scratch1 main.R
+
+00RRR110 ld_immed
+	z80_fetch_immed
+	mov scratch1 main.R
+	
+dd 00100110 ld_immed_ixh
+	z80_fetch_immed
+	lsl scratch1 8 scratch1
+	and 0xFF ix ix
+	or scratch1 ix ix
+	
+dd 00101110 ld_immed_ixl
+	z80_fetch_immed
+	and 0xFF00 ix ix
+	or scratch1 ix ix
+	
+fd 00100110 ld_immed_iyh
+	z80_fetch_immed
+	lsl scratch1 8 scratch1
+	and 0xFF iy iy
+	or scratch1 iy iy
+	
+fd 00101110 ld_immed_iyl
+	z80_fetch_immed
+	and 0xFF00 iy iy
+	or scratch1 iy iy
+
+01110RRR ld_to_hl
+	mov main.R scratch1
+	z80_store_hl
+
+dd 01110RRR ld_to_ix
+	z80_calc_index ix
+	mov wz scratch2
+	mov main.R scratch1
+	cycles 5
+	ocall write_8
+
+fd 01110RRR ld_to_iy
+	z80_calc_index iy
+	mov wz scratch2
+	mov main.R scratch1
+	cycles 5
+	ocall write_8
+
+00110110 ld_to_hl_immed
+	z80_fetch_immed
+	z80_store_hl
+	
+dd 00110110 ld_to_ixd_immed
+	z80_calc_index ix
+	z80_fetch_immed
+	cycles 2
+	mov wz scratch2
+	ocall write_8
+	
+fd 00110110 ld_to_iyd_immed
+	z80_calc_index iy
+	z80_fetch_immed
+	cycles 2
+	mov wz scratch2
+	ocall write_8
+
+00001010 ld_a_from_bc
+	lsl b 8 wz
+	or c wz wz
+	mov wz scratch1
+	add 1 wz wz
+	ocall read_8
+	mov scratch1 a
+
+00011010 ld_a_from_de
+	lsl d 8 wz
+	or e wz wz
+	mov wz scratch1
+	add 1 wz wz
+	ocall read_8
+	mov scratch1 a
+
+00111010 ld_a_from_immed
+	z80_fetch_immed16
+	mov wz scratch1
+	add 1 wz wz
+	ocall read_8
+	mov scratch1 a
+	
+00000010 ld_a_to_bc
+	local tmp 8
+	lsl b 8 scratch2
+	or c scratch2 scratch2
+	mov a scratch1
+	add c 1 tmp
+	lsl a 8 wz
+	or tmp wz wz
+	ocall write_8
+	
+00010010 ld_a_to_de
+	local tmp 8
+	lsl d 8 scratch2
+	or e scratch2 scratch2
+	mov a scratch1
+	add e 1 tmp
+	lsl a 8 wz
+	or tmp wz wz
+	ocall write_8
+
+00110010 ld_a_to_immed
+	local tmp 16
+	z80_fetch_immed16
+	mov wz scratch2
+	mov a scratch1
+	add 1 wz wz
+	ocall write_8
+	and 0xFF wz wz
+	lsl a 8 tmp
+	or tmp wz wz
+
+ed 01000111 ld_i_a
+	mov a i
+	cycles 1
+
+ed 01001111 ld_r_a
+	mov a r
+	and 0x80 a rhigh
+	cycles 1
+
+ed 01011111 ld_a_r
+	cycles 1
+	and 0x7F r a
+	or rhigh a a
+	update_flags SZYH0XN0
+	mov iff2 pvflag
+	
+ed 01010111 ld_a_i
+	cycles 1
+	mov i a
+	update_flags SZYH0XN0
+	mov iff2 pvflag
+
+00000001 ld_bc_immed
+	meta high b
+	meta low c
+	z80_fetch_immed_reg16
+
+00010001 ld_de_immed
+	meta high d
+	meta low e
+	z80_fetch_immed_reg16
+
+00100001 ld_hl_immed
+	meta high h
+	meta low l
+	z80_fetch_immed_reg16
+
+00110001 ld_sp_immed
+	meta reg sp
+	z80_fetch_immed_to_reg16
+
+dd 00100001 ld_ix_immed
+	meta reg ix
+	z80_fetch_immed_to_reg16
+
+fd 00100001 ld_iy_immed
+	meta reg iy
+	z80_fetch_immed_to_reg16
+	
+z80_fetch16_from_immed
+	z80_fetch_immed16
+	mov wz scratch1
+	ocall read_8
+	mov scratch1 low
+	add 1 wz wz
+	mov wz scratch1
+	ocall read_8
+	mov scratch1 high
+	add 1 wz wz
+
+00101010 ld_hl_from_immed
+	meta low l
+	meta high h
+	z80_fetch16_from_immed
+
+ed 01001011 ld_bc_from_immed
+	meta low c
+	meta high b
+	z80_fetch16_from_immed
+
+ed 01011011 ld_de_from_immed
+	meta low e
+	meta high d
+	z80_fetch16_from_immed
+
+ed 01101011 ld_hl_from_immed_slow
+	meta low l
+	meta high h
+	z80_fetch16_from_immed
+	
+z80_fetch_reg16_from_immed
+	z80_fetch_immed16
+	mov wz scratch1
+	ocall read_8
+	mov scratch1 reg
+	add 1 wz wz
+	mov wz scratch1
+	ocall read_8
+	lsl scratch1 8 scratch1
+	or scratch1 reg reg
+	add 1 wz wz
+
+ed 01111011 ld_sp_from_immed
+	meta reg sp
+	z80_fetch_reg16_from_immed
+
+dd 00101010 ld_ix_from_immed
+	meta reg ix
+	z80_fetch_reg16_from_immed
+
+fd 00101010 ld_iy_from_immed
+	meta reg iy
+	z80_fetch_reg16_from_immed
+
+00100010 ld_hl_to_immed
+	z80_fetch_immed16
+	mov wz scratch2
+	mov l scratch1
+	ocall write_8
+	add 1 wz wz
+	mov wz scratch2
+	mov h scratch1
+	ocall write_8
+	add 1 wz wz
+	
+dd 00100010 ld_ix_to_immed
+	z80_fetch_immed16
+	mov wz scratch2
+	mov ix scratch1
+	ocall write_8
+	add 1 wz wz
+	mov wz scratch2
+	lsr ix 8 scratch1
+	ocall write_8
+	add 1 wz wz
+	
+fd 00100010 ld_iy_to_immed
+	z80_fetch_immed16
+	mov wz scratch2
+	mov iy scratch1
+	ocall write_8
+	add 1 wz wz
+	mov wz scratch2
+	lsr iy 8 scratch1
+	ocall write_8
+	add 1 wz wz
+	
+z80_regpair_to_immed
+	z80_fetch_immed16
+	mov wz scratch2
+	mov low scratch1
+	ocall write_8
+	add 1 wz wz
+	mov high scratch1
+	mov wz scratch2
+	ocall write_8
+	add 1 wz wz
+	
+ed 01000011 ld_bc_to_immed
+	meta low c
+	meta high b
+	z80_regpair_to_immed
+
+ed 01010011 ld_de_to_immed
+	meta low e
+	meta high d
+	z80_regpair_to_immed
+	
+ed 01100011 ld_hl_to_immed_slow
+	meta low l
+	meta high h
+	z80_regpair_to_immed
+	
+ed 01110011 ld_sp_to_immed
+	meta low sp
+	local sph 8
+	lsr sp 8 sph
+	meta high sph
+	z80_regpair_to_immed
+
+11111001 ld_sp_hl
+	cycles 2
+	lsl h 8 sp
+	or l sp sp
+	
+dd 11111001 ld_sp_ix
+	cycles 2
+	mov ix sp
+
+fd 11111001 ld_sp_iy
+	cycles 2
+	mov iy sp
+
+z80_push
+	cycles 1
+	sub 1 sp sp
+	mov sp scratch2
+	mov high scratch1
+	ocall write_8
+	sub 1 sp sp
+	mov sp scratch2
+	mov low scratch1
+	ocall write_8
+
+11000101 push_bc
+	meta high b
+	meta low c
+	z80_push
+
+11010101 push_de
+	meta high d
+	meta low e
+	z80_push
+
+11100101 push_hl
+	meta high h
+	meta low l
+	z80_push
+
+11110101 push_af
+	meta high a
+	meta low f
+	z80_push
+	
+dd 11100101 push_ix
+	local ixh 8
+	lsr ix 8 ixh
+	meta high ixh
+	meta low ix
+	z80_push
+
+fd 11100101 push_iy
+	local iyh 8
+	lsr iy 8 iyh
+	meta high iyh
+	meta low iy
+	z80_push
+
+z80_pop
+	mov sp scratch1
+	ocall read_8
+	add 1 sp sp
+	mov scratch1 low
+	mov sp scratch1
+	ocall read_8
+	add 1 sp sp
+	mov scratch1 high
+
+11000001 pop_bc
+	meta high b
+	meta low c
+	z80_pop
+
+11010001 pop_de
+	meta high d
+	meta low e
+	z80_pop
+
+11100001 pop_hl
+	meta high h
+	meta low l
+	z80_pop
+
+11110001 pop_af
+	meta high a
+	meta low f
+	z80_pop
+
+dd 11100001 pop_ix
+	local ixh 16
+	meta high ixh
+	meta low ix
+	z80_pop
+	lsl ixh 8 ixh
+	or ixh ix ix
+
+fd 11100001 pop_iy
+	local iyh 16
+	meta high iyh
+	meta low iy
+	z80_pop
+	lsl iyh 8 iyh
+	or iyh iy iy
+
+11101011 ex_de_hl
+	xchg e l
+	xchg d h
+
+00001000 ex_af_af
+	xchg a a'
+	xchg f f'
+
+11011001 exx
+	xchg b b'
+	xchg c c'
+	xchg d d'
+	xchg e e'
+	xchg h h'
+	xchg l l'
+
+11100011 ex_sp_hl
+	mov sp scratch1
+	ocall read_8
+	xchg l scratch1
+	cycles 1
+	mov sp scratch2
+	ocall write_8
+	add 1 sp scratch1
+	ocall read_8
+	xchg h scratch1
+	cycles 2
+	add 1 sp scratch2
+	ocall write_8
+	lsl h 8 wz
+	or l wz wz
+	
+dd 11100011 ex_sp_ix
+	mov sp scratch1
+	ocall read_8
+	mov scratch1 wz
+	mov ix scratch1
+	cycles 1
+	mov sp scratch2
+	ocall write_8
+	add 1 sp scratch1
+	ocall read_8
+	lsl scratch1 8 scratch1
+	or scratch1 wz wz
+	lsr ix 8 scratch1
+	cycles 2
+	add 1 sp scratch2
+	ocall write_8
+	mov wz ix
+	
+fd 11100011 ex_sp_iy
+	mov sp scratch1
+	ocall read_8
+	mov scratch1 wz
+	mov iy scratch1
+	cycles 1
+	mov sp scratch2
+	ocall write_8
+	add 1 sp scratch1
+	ocall read_8
+	lsl scratch1 8 scratch1
+	or scratch1 wz wz
+	lsr iy 8 scratch1
+	cycles 2
+	add 1 sp scratch2
+	ocall write_8
+	mov wz iy
+
+10000RRR add_reg
+	add a main.R a
+	update_flags SZYHVXN0C
+	
+dd 10000100 add_ixh
+	lsr ix 8 scratch1
+	add a scratch1 a
+	update_flags SZYHVXN0C
+	
+dd 10000101 add_ixl
+	and ix 0xFF scratch1
+	add a scratch1 a
+	update_flags SZYHVXN0C
+	
+fd 10000100 add_iyh
+	lsr iy 8 scratch1
+	add a scratch1 a
+	update_flags SZYHVXN0C
+	
+fd 10000101 add_iyl
+	and iy 0xFF scratch1
+	add a scratch1 a
+	update_flags SZYHVXN0C
+	
+10000110 add_hl
+	z80_fetch_hl
+	add a scratch1 a
+	update_flags SZYHVXN0C
+	
+dd 10000110 add_ixd
+	z80_fetch_index ix
+	add a scratch1 a
+	update_flags SZYHVXN0C
+	
+fd 10000110 add_iyd
+	z80_fetch_index iy
+	add a scratch1 a
+	update_flags SZYHVXN0C
+
+11000110 add_immed
+	z80_fetch_immed
+	add a scratch1 a
+	update_flags SZYHVXN0C
+	
+z80_add16_hl
+	arg src 16
+	lsl h 8 hlt
+	or l hlt hlt
+	add 1 hlt wz
+	add src hlt hlt
+	update_flags YHXN0C
+	mov hlt l
+	lsr hlt 8 h
+	cycles 7
+	
+00001001 add_hl_bc
+	local hlw 16
+	local bcw 16
+	meta hlt hlw
+	lsl b 8 bcw
+	or c bcw bcw
+	z80_add16_hl bcw
+	
+00011001 add_hl_de
+	local hlw 16
+	local dew 16
+	meta hlt hlw
+	lsl d 8 dew
+	or e dew dew
+	z80_add16_hl dew
+	
+00101001 add_hl_hl
+	local hlw 16
+	meta hlt hlw
+	z80_add16_hl hlw
+	
+00111001 add_hl_sp
+	local hlw 16
+	meta hlt hlw
+	z80_add16_hl sp
+	
+dd 00001001 add_ix_bc
+	lsl b 8 scratch1
+	or c scratch1 scratch1
+	add scratch1 ix ix
+	update_flags YHXN0C
+	cycles 7
+	
+dd 00011001 add_ix_de
+	lsl d 8 scratch1
+	or e scratch1 scratch1
+	add scratch1 ix ix
+	update_flags YHXN0C
+	cycles 7
+	
+dd 00101001 add_ix_ix
+	add ix ix ix
+	update_flags YHXN0C
+	cycles 7
+	
+dd 00111001 add_ix_sp
+	add sp ix ix
+	update_flags YHXN0C
+	cycles 7
+	
+fd 00001001 add_iy_bc
+	lsl b 8 scratch1
+	or c scratch1 scratch1
+	add scratch1 iy iy
+	update_flags YHXN0C
+	cycles 7
+	
+fd 00011001 add_iy_de
+	lsl d 8 scratch1
+	or e scratch1 scratch1
+	add scratch1 iy iy
+	update_flags YHXN0C
+	cycles 7
+	
+fd 00101001 add_iy_iy
+	add iy iy iy
+	update_flags YHXN0C
+	cycles 7
+	
+fd 00111001 add_iy_sp
+	add sp iy iy
+	update_flags YHXN0C
+	cycles 7
+	
+10001RRR adc_reg
+	adc a main.R a
+	update_flags SZYHVXN0C
+	
+dd 10001100 adc_ixh
+	lsr ix 8 scratch1
+	adc a scratch1 a
+	update_flags SZYHVXN0C
+	
+dd 10001101 adc_ixl
+	and ix 0xFF scratch1
+	adc a scratch1 a
+	update_flags SZYHVXN0C
+	
+fd 10001100 adc_iyh
+	lsr iy 8 scratch1
+	adc a scratch1 a
+	update_flags SZYHVXN0C
+	
+fd 10001101 adc_iyl
+	and iy 0xFF scratch1
+	adc a scratch1 a
+	update_flags SZYHVXN0C
+
+10001110 adc_hl
+	z80_fetch_hl
+	adc a scratch1 a
+	update_flags SZYHVXN0C
+	
+dd 10001110 adc_ixd
+	z80_fetch_index ix
+	adc a scratch1 a
+	update_flags SZYHVXN0C
+	
+fd 10001110 adc_iyd
+	z80_fetch_index iy
+	adc a scratch1 a
+	update_flags SZYHVXN0C
+
+11001110 adc_immed
+	z80_fetch_immed
+	adc a scratch1 a
+	update_flags SZYHVXN0C
+	
+z80_adc16_hl
+	arg src 16
+	lsl h 8 hlt
+	or l hlt hlt
+	add 1 hlt wz
+	adc src hlt hlt
+	update_flags SZYHVXN0C
+	mov hlt l
+	lsr hlt 8 h
+	cycles 7
+	
+ed 01001010 adc_hl_bc
+	local hlw 16
+	local bcw 16
+	meta hlt hlw
+	lsl b 8 bcw
+	or c bcw bcw
+	z80_adc16_hl bcw
+	
+ed 01011010 adc_hl_de
+	local hlw 16
+	local dew 16
+	meta hlt hlw
+	lsl d 8 dew
+	or e dew dew
+	z80_adc16_hl dew
+	
+ed 01101010 adc_hl_hl
+	local hlw 16
+	meta hlt hlw
+	z80_adc16_hl hlw
+
+	
+ed 01111010 adc_hl_sp
+	local hlw 16
+	meta hlt hlw
+	z80_adc16_hl sp
+
+10010RRR sub_reg
+	sub main.R a a
+	update_flags SZYHVXN1C
+	
+dd 10010100 sub_ixh
+	lsr ix 8 scratch1
+	sub scratch1 a a
+	update_flags SZYHVXN1C
+	
+dd 10010101 sub_ixl
+	and ix 0xFF scratch1
+	sub scratch1 a a
+	update_flags SZYHVXN1C
+	
+fd 10010100 sub_iyh
+	lsr iy 8 scratch1
+	sub scratch1 a a
+	update_flags SZYHVXN1C
+	
+fd 10010101 sub_iyl
+	and iy 0xFF scratch1
+	sub scratch1 a a
+	update_flags SZYHVXN1C
+	
+10010110 sub_hl
+	z80_fetch_hl
+	sub scratch1 a a
+	update_flags SZYHVXN1C
+	
+dd 10010110 sub_ixd
+	z80_fetch_index ix
+	sub scratch1 a a
+	update_flags SZYHVXN1C
+
+fd 10010110 sub_iyd
+	z80_fetch_index iy
+	sub scratch1 a a
+	update_flags SZYHVXN1C
+
+11010110 sub_immed
+	z80_fetch_immed
+	sub scratch1 a a
+	update_flags SZYHVXN1C
+
+10011RRR sbc_reg
+	sbc main.R a a
+	update_flags SZYHVXN1C
+	
+dd 10011100 sbc_ixh
+	lsr ix 8 scratch1
+	sbc scratch1 a a
+	update_flags SZYHVXN1C
+	
+dd 10011101 sbc_ixl
+	and ix 0xFF scratch1
+	sbc scratch1 a a
+	update_flags SZYHVXN1C
+	
+fd 10011100 sbc_iyh
+	lsr iy 8 scratch1
+	sbc scratch1 a a
+	update_flags SZYHVXN1C
+	
+fd 10011101 sbc_iyl
+	and iy 0xFF scratch1
+	sbc scratch1 a a
+	update_flags SZYHVXN1C
+	
+	
+10011110 sbc_hl
+	z80_fetch_hl
+	sbc scratch1 a a
+	update_flags SZYHVXN1C
+	
+dd 10011110 sbc_ixd
+	z80_fetch_index ix
+	sbc scratch1 a a
+	update_flags SZYHVXN1C
+
+fd 10011110 sbc_iyd
+	z80_fetch_index iy
+	sbc scratch1 a a
+	update_flags SZYHVXN1C
+
+11011110 sbc_immed
+	z80_fetch_immed
+	sbc scratch1 a a
+	update_flags SZYHVXN1C
+	
+z80_sbc16_hl
+	arg src 16
+	lsl h 8 hlt
+	or l hlt hlt
+	add 1 hlt wz
+	sbc src hlt hlt
+	update_flags SZYHVXN1C
+	mov hlt l
+	lsr hlt 8 h
+	cycles 7
+	
+ed 01000010 sbc_hl_bc
+	local hlw 16
+	local bcw 16
+	meta hlt hlw
+	lsl b 8 bcw
+	or c bcw bcw
+	z80_sbc16_hl bcw
+	
+ed 01010010 sbc_hl_de
+	local hlw 16
+	local dew 16
+	meta hlt hlw
+	lsl d 8 dew
+	or e dew dew
+	z80_sbc16_hl dew
+	
+ed 01100010 sbc_hl_hl
+	local hlw 16
+	meta hlt hlw
+	z80_sbc16_hl hlw
+
+	
+ed 01110010 sbc_hl_sp
+	local hlw 16
+	meta hlt hlw
+	z80_sbc16_hl sp
+
+10100RRR and_reg
+	and a main.R a
+	update_flags SZYH1PXN0C0
+	
+dd 10100100 and_ixh
+	lsr ix 8 scratch1
+	and scratch1 a a
+	update_flags SZYH1PXN0C0
+	
+dd 10100101 and_ixl
+	and ix a a
+	update_flags SZYH1PXN0C0
+	
+fd 10100100 and_iyh
+	lsr iy 8 scratch1
+	and scratch1 a a
+	update_flags SZYH1PXN0C0
+	
+fd 10100101 and_iyl
+	and iy a a
+	update_flags SZYH1PXN0C0
+	
+10100110 and_hl
+	z80_fetch_hl
+	and a scratch1 a
+	update_flags SZYH1PXN0C0
+	
+dd 10100110 and_ixd
+	z80_fetch_index ix
+	and a scratch1 a
+	update_flags SZYH1PXN0C0
+	
+fd 10100110 and_iyd
+	z80_fetch_index iy
+	and a scratch1 a
+	update_flags SZYH1PXN0C0
+
+11100110 and_immed
+	z80_fetch_immed
+	and a scratch1 a
+	update_flags SZYH1PXN0C0
+	
+10110RRR or_reg
+	or a main.R a
+	update_flags SZYH0PXN0C0
+	
+dd 10110100 or_ixh
+	lsr ix 8 scratch1
+	or scratch1 a a
+	update_flags SZYH0PXN0C0
+	
+dd 10110101 or_ixl
+	or ix a a
+	update_flags SZYH0PXN0C0
+	
+fd 10110100 or_iyh
+	lsr iy 8 scratch1
+	or scratch1 a a
+	update_flags SZYH0PXN0C0
+	
+fd 10110101 or_iyl
+	or iy a a
+	update_flags SZYH0PXN0C0
+	
+10110110 or_hl
+	z80_fetch_hl
+	or a scratch1 a
+	update_flags SZYH0PXN0C0
+	
+dd 10110110 or_ixd
+	z80_fetch_index ix
+	or a scratch1 a
+	update_flags SZYH0PXN0C0
+	
+fd 10110110 or_iyd
+	z80_fetch_index iy
+	or a scratch1 a
+	update_flags SZYH0PXN0C0
+
+11110110 or_immed
+	z80_fetch_immed
+	or a scratch1 a
+	update_flags SZYH0PXN0C0
+	
+10101RRR xor_reg
+	xor a main.R a
+	update_flags SZYH0PXN0C0
+	
+dd 10101100 xor_ixh
+	lsr ix 8 scratch1
+	xor scratch1 a a
+	update_flags SZYH0PXN0C0
+	
+dd 10101101 xor_ixl
+	xor ix a a
+	update_flags SZYH0PXN0C0
+	
+fd 10101100 xor_iyh
+	lsr iy 8 scratch1
+	xor scratch1 a a
+	update_flags SZYH0PXN0C0
+	
+fd 10101101 xor_iyl
+	xor iy a a
+	update_flags SZYH0PXN0C0
+	
+10101110 xor_hl
+	z80_fetch_hl
+	xor a scratch1 a
+	update_flags SZYH0PXN0C0
+	
+dd 10101110 xor_ixd
+	z80_fetch_index ix
+	xor a scratch1 a
+	update_flags SZYH0PXN0C0
+	
+fd 10101110 xor_iyd
+	z80_fetch_index iy
+	xor a scratch1 a
+	update_flags SZYH0PXN0C0
+
+11101110 xor_immed
+	z80_fetch_immed
+	xor a scratch1 a
+	update_flags SZYH0PXN0C0
+
+10111RRR cp_reg
+	mov main.R last_flag_result
+	cmp main.R a
+	update_flags SZHVN1C
+	
+dd 10111100 cp_ixh
+	local tmp 8
+	lsr ix 8 tmp
+	mov tmp last_flag_result
+	cmp tmp a
+	update_flags SZHVN1C
+	
+dd 10111101 cp_ixl
+	local tmp 8
+	mov ix tmp
+	mov ix last_flag_result
+	cmp tmp a
+	update_flags SZHVN1C
+	
+fd 10111100 cp_iyh
+	local tmp 8
+	lsr iy 8 tmp
+	mov tmp last_flag_result
+	cmp tmp a
+	update_flags SZHVN1C
+	
+fd 10111101 cp_iyl
+	local tmp 8
+	mov iy tmp
+	mov iy last_flag_result
+	cmp tmp a
+	update_flags SZHVN1C
+	
+10111110 cp_hl
+	local tmp 8
+	z80_fetch_hl
+	mov scratch1 tmp
+	mov scratch1 last_flag_result
+	cmp tmp a
+	update_flags SZHVN1C
+	
+dd 10111110 cp_ixd
+	local tmp 8
+	z80_fetch_index ix
+	mov scratch1 tmp
+	mov scratch1 last_flag_result
+	cmp tmp a
+	update_flags SZHVN1C
+	
+fd 10111110 cp_iyd
+	local tmp 8
+	z80_fetch_index iy
+	mov scratch1 tmp
+	mov scratch1 last_flag_result
+	cmp tmp a
+	update_flags SZHVN1C
+
+11111110 cp_immed
+	local tmp 8
+	z80_fetch_immed
+	mov scratch1 tmp
+	mov scratch1 last_flag_result
+	cmp tmp a
+	update_flags SZHVN1C
+
+00RRR100 inc_reg
+	add 1 main.R main.R
+	update_flags SZYHVXN0
+	
+dd 00100100 inc_ixh
+	add 0x100 ix ix
+	update_flags SZYHVXN0
+	
+dd 00101100 inc_ixl
+	local tmp 8
+	mov ix tmp
+	add 1 tmp tmp
+	update_flags SZYHVXN0
+	and 0xFF00 ix ix
+	or tmp ix ix
+	
+fd 00100100 inc_iyh
+	add 0x100 iy iy
+	update_flags SZYHVXN0
+	
+fd 00101100 inc_iyl
+	local tmp 8
+	mov iy tmp
+	add 1 tmp tmp
+	update_flags SZYHVXN0
+	and 0xFF00 iy iy
+	or tmp iy iy
+	
+00110100 inc_hl
+	local tmp 8
+	z80_fetch_hl
+	#TODO: Either make DSL compiler smart enough to optimize these unnecessary moves out
+	#or add some syntax to force a certain size on an operation so they are unnecessary
+	mov scratch1 tmp
+	add 1 tmp tmp
+	update_flags SZYHVXN0
+	mov tmp scratch1
+	cycles 1
+	z80_store_hl
+	
+dd 00110100 inc_ixd
+	local tmp 8
+	z80_fetch_index ix
+	#TODO: Either make DSL compiler smart enough to optimize these unnecessary moves out
+	#or add some syntax to force a certain size on an operation so they are unnecessary
+	mov scratch1 tmp
+	add 1 tmp tmp
+	update_flags SZYHVXN0
+	mov tmp scratch1
+	cycles 1
+	z80_store_index
+
+fd 00110100 inc_iyd
+	local tmp 8
+	z80_fetch_index iy
+	#TODO: Either make DSL compiler smart enough to optimize these unnecessary moves out
+	#or add some syntax to force a certain size on an operation so they are unnecessary
+	mov scratch1 tmp
+	add 1 tmp tmp
+	update_flags SZYHVXN0
+	mov tmp scratch1
+	cycles 1
+	z80_store_index
+	
+z80_inc_pair
+	arg high 8
+	arg low 8
+	cycles 2
+	local word 16
+	lsl high 8 word
+	or low word word
+	add 1 word word
+	mov word low
+	lsr word 8 high
+	
+00000011 inc_bc
+	z80_inc_pair b c
+	
+00010011 inc_de
+	z80_inc_pair d e
+	
+00100011 inc16_hl
+	z80_inc_pair h l
+
+00110011 inc_sp
+	add 1 sp sp
+	
+dd 00100011 inc_ix
+	add 1 ix ix
+
+fd 00100011 inc_iy
+	add 1 iy iy
+
+00RRR101 dec_reg
+	sub 1 main.R main.R
+	update_flags SZYHVXN1
+	
+dd 00100101 dec_ixh
+	sub 0x100 ix ix
+	update_flags SZYHVXN1
+	
+dd 00101101 dec_ixl
+	local tmp 8
+	mov ix tmp
+	sub 1 tmp tmp
+	update_flags SZYHVXN1
+	and 0xFF00 ix ix
+	or tmp ix ix
+	
+fd 00100101 dec_iyh
+	sub 0x100 iy iy
+	update_flags SZYHVXN1
+	
+fd 00101101 dec_iyl
+	local tmp 8
+	mov iy tmp
+	sub 1 tmp tmp
+	update_flags SZYHVXN1
+	and 0xFF00 iy iy
+	or tmp iy iy
+	
+00110101 dec_hl
+	local tmp 8
+	z80_fetch_hl
+	#TODO: Either make DSL compiler smart enough to optimize these unnecessary moves out
+	#or add some syntax to force a certain size on an operation so they are unnecessary
+	mov scratch1 tmp
+	sub 1 tmp tmp
+	update_flags SZYHVXN1
+	mov tmp scratch1
+	cycles 1
+	z80_store_hl
+	
+dd 00110101 dec_ixd
+	local tmp 8
+	z80_fetch_index ix
+	#TODO: Either make DSL compiler smart enough to optimize these unnecessary moves out
+	#or add some syntax to force a certain size on an operation so they are unnecessary
+	mov scratch1 tmp
+	sub 1 tmp tmp
+	update_flags SZYHVXN1
+	mov tmp scratch1
+	cycles 1
+	z80_store_index
+
+fd 00110101 dec_iyd
+	local tmp 8
+	z80_fetch_index iy
+	#TODO: Either make DSL compiler smart enough to optimize these unnecessary moves out
+	#or add some syntax to force a certain size on an operation so they are unnecessary
+	mov scratch1 tmp
+	sub 1 tmp tmp
+	update_flags SZYHVXN1
+	mov tmp scratch1
+	cycles 1
+	z80_store_index
+	
+z80_dec_pair
+	arg high 8
+	arg low 8
+	local word 16
+	lsl high 8 word
+	or low word word
+	sub 1 word word
+	mov word low
+	lsr word 8 high
+	cycles 2
+	
+00001011 dec_bc
+	z80_dec_pair b c
+	
+00011011 dec_de
+	z80_dec_pair d e
+	
+00101011 dec16_hl
+	z80_dec_pair h l
+
+00111011 dec_sp
+	sub 1 sp sp
+	
+dd 00101011 dec_ix
+	sub 1 ix ix
+
+fd 00101011 dec_iy
+	sub 1 iy iy
+
+00101111 cpl
+	not a a
+	update_flags YH1XN1
+
+ed 01DDD100 neg
+	neg a a
+	update_flags SZYHVXN1C
+
+00111111 ccf
+	local tmp 8
+	and 0x80 last_flag_result last_flag_result
+	and 0x7F a tmp
+	or tmp last_flag_result last_flag_result
+	and 0x80 chflags chflags
+	lsr chflags 4 tmp
+	or tmp chflags chflags
+	xor 0x80 chflags chflags
+	update_flags N0
+
+00110111 scf
+	local tmp 8
+	and 0x80 last_flag_result last_flag_result
+	and 0x7F a tmp
+	or tmp last_flag_result last_flag_result
+	update_flags H0N0C1
+
+00000000 nop
+
+01110110 halt
+	cmp nmi_cycle cycles
+	if >=U
+	
+	else
+	cmp int_cycle cycles
+	if >=U
+	
+	if iff1
+	else
+	sub 1 pc pc
+	end
+	
+	else
+	sub 1 pc pc
+	end
+	end
+
+11110011 di
+	mov 0 iff1
+	mov 0 iff2
+	update_sync
+
+11111011 ei
+	mov 1 iff1
+	mov 1 iff2
+	update_sync
+	cmp int_cycle cycles
+	if >=U
+	
+	add 1 cycles int_cycle
+	
+	end
+
+ed 01D00110 im0
+	mov 0 imode
+
+ed 01D10110 im1
+	mov 1 imode
+
+ed 01D11110 im2
+	mov 2 imode
+	
+ed 01D01110 im3
+	#some sources call this mode 0/1, but unclear
+	#if the behavior is really different from im 0
+	mov 0 imode
+	
+11000011 jp
+	z80_fetch_immed16
+	mov wz pc
+	
+11101001 jp_hl
+	lsl h 8 pc
+	or l pc pc
+	
+dd 11101001 jp_ix
+	mov ix pc
+	
+fd 11101001 jp_iy
+	mov iy pc
+	
+11CCC010 jp_cc
+	z80_check_cond C
+	z80_fetch_immed16
+	if istrue
+	
+	mov wz pc
+	
+	end
+	
+00011000 jr
+	z80_fetch_immed
+	#TODO: determine if this updates wz
+	sext 16 scratch1 scratch1
+	add scratch1 pc pc
+	cycles 5
+	
+001CC000 jr_cc
+	z80_check_cond C
+	z80_fetch_immed
+	
+	if istrue
+	
+	sext 16 scratch1 scratch1
+	add scratch1 pc pc
+	cycles 5
+	
+	end
+	
+00010000 djnz
+	cycles 1
+	z80_fetch_immed
+	sub 1 b b
+	
+	if b
+	
+	sext 16 scratch1 scratch1
+	add scratch1 pc pc
+	cycles 5
+	
+	end
+	
+
+11001101 call_uncond
+	z80_fetch_immed16
+	local pch 8
+	lsr pc 8 pch
+	meta high pch
+	meta low pc
+	z80_push
+	mov wz pc
+	
+11CCC100 call_cond
+	local pch 8
+	z80_fetch_immed16
+	z80_check_cond C
+	
+	if istrue
+	
+	lsr pc 8 pch
+	meta high pch
+	meta low pc
+	z80_push
+	mov wz pc
+	
+	end
+	
+11TTT111 rst
+	local pch 8
+	lsr pc 8 pch
+	meta high pch
+	meta low pc
+	z80_push
+	lsl T 3 scratch1
+	mov scratch1 pc
+
+11001001 ret
+	local pch 16
+	meta high pch
+	meta low pc
+	z80_pop
+	lsl pch 8 pch
+	or pch pc pc
+	
+ed 01001101 reti
+	local pch 16
+	cycles 1
+	meta high pch
+	meta low pc
+	z80_pop
+	lsl pch 8 pch
+	or pch pc pc
+	
+ed 01NN1101 retn
+	mov iff2 iff1
+	local pch 16
+	cycles 1
+	meta high pch
+	meta low pc
+	z80_pop
+	lsl pch 8 pch
+	or pch pc pc
+	
+11CCC000 ret_cond
+	local pch 16
+	cycles 1
+	z80_check_cond C
+	if istrue
+	
+	meta high pch
+	meta low pc
+	z80_pop
+	lsl pch 8 pch
+	or pch pc pc
+	
+	end
+
+11011011 in_abs
+	z80_fetch_immed
+	ocall io_read8
+	mov scratch1 a
+	
+ed 01RRR000 in_bc
+	lsl b 8 scratch1
+	or c scratch1 scratch1
+	ocall io_read8
+	mov scratch1 main.R
+	
+z80_ini_ind
+	arg change 16
+	local tmp 8
+	cycles 1
+	
+	lsl 8 b wz
+	or c wz wz
+	add change wz wz
+	
+	sub 1 b b
+	update_flags SZYX
+	
+	lsl b 8 scratch1
+	or c scratch1 scratch1
+	ocall io_read8
+	
+	and 0x80 scratch1 nflag
+	
+	mov wz tmp
+	add tmp scratch1 tmp
+	update_flags C
+	
+	z80_store_hl
+	
+	lsl h 8 scratch2
+	or l scratch2 scratch2
+	add change scratch2 scratch2
+	mov scratch2 l
+	lsr scratch2 8 h
+	
+	and 7 tmp tmp
+	xor b tmp tmp
+	update_flags P
+	lsr chflags 4 tmp
+	or tmp chflags chflags
+	
+ed 10100010 ini
+	z80_ini_ind 1
+	
+ed 10110010 inir
+	z80_ini_ind 1
+	if zflag
+	else
+	sub 2 pc pc
+	cycles 5
+	end
+	
+ed 10101010 ind
+	z80_ini_ind -1
+	
+ed 10111010 indr
+	z80_ini_ind -1
+	if zflag
+	else
+	sub 2 pc pc
+	cycles 5
+	end
+	
+11010011 out_abs
+	z80_fetch_immed
+	mov scratch1 scratch2
+	mov a scratch1
+	ocall io_write8
+	
+ed 01RRR001 out_bc
+	lsl b 8 scratch2
+	or c scratch2 scratch2
+	mov main.R scratch1
+	ocall io_write8
+	
+z80_outi_outd
+	arg change 16
+	local tmp 8
+	cycles 1
+	z80_fetch_hl
+	
+	and 0x80 scratch1 nflag
+	
+	lsl h 8 scratch2
+	or l scratch2 scratch2
+	add change scratch2 scratch2
+	mov scratch2 l
+	lsr scratch2 8 h
+	
+	add l scratch1 tmp
+	update_flags C
+	and 7 tmp tmp
+	
+	lsl b 8 scratch2
+	or c scratch2 scratch2
+	ocall io_write8
+	
+	sub 1 b b
+	update_flags SZYX
+	
+	lsl 8 b wz
+	or c wz wz
+	add change wz wz
+	
+	xor b tmp tmp
+	update_flags P
+	lsr chflags 4 tmp
+	or tmp chflags chflags
+	
+ed 10100011 outi
+	z80_outi_outd 1
+
+ed 10110011 otir
+	z80_outi_outd 1
+	if zflag
+	else
+	sub 2 pc pc
+	cycles 5
+	end
+	
+ed 10101011 outd
+	z80_outi_outd -1
+	
+ed 10111011 otdr
+	z80_outi_outd -1
+	if zflag
+	else
+	sub 2 pc pc
+	cycles 5
+	end
+	
+00000111 rlca
+	rol a 1 a
+	update_flags YH0XN0C
+	
+00010111 rla
+	rlc a 1 a
+	update_flags YH0XN0C
+	
+00001111 rrca
+	ror a 1 a
+	update_flags YH0XN0C
+
+00011111 rra
+	rrc a 1 a
+	update_flags YH0XN0C
+	
+cb 00000RRR rlc
+	rol main.R 1 main.R
+	update_flags SZYH0PXN0C
+	
+cb 00000110 rlc_hl
+	local tmp 8
+	z80_fetch_hl
+	mov scratch1 tmp
+	rol tmp 1 tmp
+	update_flags SZYH0PXN0C
+	mov tmp scratch1
+	z80_store_hl
+	
+z80_rlc_index
+	arg tmp 8
+	mov wz scratch1
+	ocall read_8
+	cycles 1
+	mov scratch1 tmp
+	rol tmp 1 tmp
+	update_flags SZYH0PXN0C
+	mov tmp scratch1
+	z80_store_index
+	
+ddcb 00000110 rlc_ixd
+	local tmp 8
+	z80_rlc_index tmp
+	
+ddcb 00000RRR rlc_ixd_reg
+	z80_rlc_index main.R
+	
+fdcb 00000110 rlc_iyd
+	local tmp 8
+	z80_rlc_index tmp
+	
+fdcb 00000RRR rlc_iyd_reg
+	z80_rlc_index main.R
+	
+cb 00010RRR rl
+	rlc main.R 1 main.R
+	update_flags SZYH0PXN0C
+
+cb 00010110 rl_hl
+	local tmp 8
+	z80_fetch_hl
+	mov scratch1 tmp
+	rlc tmp 1 tmp
+	update_flags SZYH0PXN0C
+	mov tmp scratch1
+	z80_store_hl
+	
+z80_rl_index
+	arg tmp 8
+	mov wz scratch1
+	ocall read_8
+	cycles 1
+	mov scratch1 tmp
+	rlc tmp 1 tmp
+	update_flags SZYH0PXN0C
+	mov tmp scratch1
+	z80_store_index
+	
+ddcb 00010110 rl_ixd
+	local tmp 8
+	z80_rl_index tmp
+	
+fdcb 00010110 rl_iyd
+	local tmp 8
+	z80_rl_index tmp
+	
+	
+ddcb 00010RRR rl_ixd_reg
+	z80_rl_index main.R
+	
+fdcb 00010RRR rl_iyd_reg
+	z80_rl_index main.R
+	
+cb 00001RRR rrc
+	ror main.R 1 main.R
+	update_flags SZYH0PXN0C
+	
+cb 00001110 rrc_hl
+	local tmp 8
+	z80_fetch_hl
+	mov scratch1 tmp
+	ror tmp 1 tmp
+	update_flags SZYH0PXN0C
+	mov tmp scratch1
+	z80_store_hl
+	
+z80_rrc_index
+	arg tmp 8
+	mov wz scratch1
+	ocall read_8
+	cycles 1
+	mov scratch1 tmp
+	ror tmp 1 tmp
+	update_flags SZYH0PXN0C
+	mov tmp scratch1
+	z80_store_index
+	
+ddcb 00001110 rrc_ixd
+	local tmp 8
+	z80_rrc_index tmp
+	
+ddcb 00001RRR rrc_ixd_reg
+	z80_rrc_index main.R
+	
+fdcb 00001110 rrc_iyd
+	local tmp 8
+	z80_rrc_index tmp
+	
+fdcb 00001RRR rrc_iyd_reg
+	z80_rrc_index main.R
+	
+cb 00011RRR rr
+	rrc main.R 1 main.R
+	update_flags SZYH0PXN0C
+	
+cb 00011110 rr_hl
+	local tmp 8
+	z80_fetch_hl
+	mov scratch1 tmp
+	rrc tmp 1 tmp
+	update_flags SZYH0PXN0C
+	mov tmp scratch1
+	z80_store_hl
+	
+z80_rr_index
+	arg tmp 8
+	mov wz scratch1
+	ocall read_8
+	cycles 1
+	mov scratch1 tmp
+	rrc tmp 1 tmp
+	update_flags SZYH0PXN0C
+	mov tmp scratch1
+	z80_store_index
+	
+ddcb 00011110 rr_ixd
+	local tmp 8
+	z80_rr_index tmp
+	
+ddcb 00011RRR rr_ixd_reg
+	z80_rr_index main.R
+	
+fdcb 00011110 rr_iyd
+	local tmp 8
+	z80_rr_index tmp
+	
+fdcb 00011RRR rr_iyd_reg
+	z80_rr_index main.R
+	
+cb 00100RRR sla
+	lsl main.R 1 main.R
+	update_flags SZYH0PXN0C
+	
+cb 00100110 sla_hl
+	local tmp 8
+	z80_fetch_hl
+	mov scratch1 tmp
+	lsl tmp 1 tmp
+	update_flags SZYH0PXN0C
+	mov tmp scratch1
+	z80_store_hl
+	
+z80_sla_index
+	arg tmp 8
+	mov wz scratch1
+	ocall read_8
+	cycles 1
+	mov scratch1 tmp
+	lsl tmp 1 tmp
+	update_flags SZYH0PXN0C
+	mov tmp scratch1
+	z80_store_index
+	
+ddcb 00100110 sla_ixd
+	local tmp 8
+	z80_sla_index tmp
+	
+ddcb 00100RRR sla_ixd_reg
+	z80_sla_index main.R
+	
+fdcb 00100110 sla_iyd
+	local tmp 8
+	z80_sla_index tmp
+	
+fdcb 00100RRR sla_iyd_reg
+	z80_sla_index main.R
+	
+cb 00101RRR sra
+	asr main.R 1 main.R
+	update_flags SZYH0PXN0C
+	
+cb 00101110 sra_hl
+	local tmp 8
+	z80_fetch_hl
+	mov scratch1 tmp
+	asr tmp 1 tmp
+	update_flags SZYH0PXN0C
+	mov tmp scratch1
+	z80_store_hl
+	
+z80_sra_index
+	arg tmp 8
+	mov wz scratch1
+	ocall read_8
+	cycles 1
+	mov scratch1 tmp
+	asr tmp 1 tmp
+	update_flags SZYH0PXN0C
+	mov tmp scratch1
+	z80_store_index
+	
+ddcb 00101110 sra_ixd
+	local tmp 8
+	z80_sra_index tmp
+	
+ddcb 00101RRR sra_ixd_reg
+	z80_sra_index main.R
+	
+fdcb 00101110 sra_iyd
+	local tmp 8
+	z80_sra_index tmp
+	
+fdcb 00101RRR sra_iyd_reg
+	z80_sra_index main.R
+	
+cb 00110RRR sll
+	lsl main.R 1 main.R
+	update_flags SZ0YH0XN0C
+	or 1 main.R main.R
+	update_flags P
+	
+cb 00110110 sll_hl
+	local tmp 8
+	z80_fetch_hl
+	mov scratch1 tmp
+	lsl tmp 1 tmp
+	update_flags SZ0YH0XN0C
+	or 1 tmp tmp
+	update_flags P
+	mov tmp scratch1
+	z80_store_hl
+	
+z80_sll_index
+	arg tmp 8
+	mov wz scratch1
+	ocall read_8
+	cycles 1
+	mov scratch1 tmp
+	lsl tmp 1 tmp
+	update_flags SZ0YH0XN0C
+	or 1 tmp tmp
+	update_flags P
+	mov tmp scratch1
+	z80_store_index
+	
+ddcb 00110110 sll_ixd
+	local tmp 8
+	z80_sll_index tmp
+	
+ddcb 00110RRR sll_ixd_reg
+	z80_sll_index main.R
+	
+fdcb 00110110 sll_iyd
+	local tmp 8
+	z80_sll_index tmp
+	
+fdcb 00110RRR sll_iyd_reg
+	z80_sll_index main.R
+	
+cb 00111RRR srl
+	lsr main.R 1 main.R
+	update_flags SZYH0PXN0C
+	
+cb 00111110 srl_hl
+	local tmp 8
+	z80_fetch_hl
+	mov scratch1 tmp
+	lsr tmp 1 tmp
+	update_flags SZYH0PXN0C
+	mov tmp scratch1
+	z80_store_hl
+	
+z80_srl_index
+	arg tmp 8
+	mov wz scratch1
+	ocall read_8
+	cycles 1
+	mov scratch1 tmp
+	lsr tmp 1 tmp
+	update_flags SZYH0PXN0C
+	mov tmp scratch1
+	z80_store_index
+	
+ddcb 00111110 srl_ixd
+	local tmp 8
+	z80_srl_index tmp
+	
+ddcb 00111RRR srl_ixd_reg
+	z80_srl_index main.R
+	
+fdcb 00111110 srl_iyd
+	local tmp 8
+	z80_srl_index tmp
+	
+fdcb 00111RRR srl_iyd_reg
+	z80_srl_index main.R
+	
+cb 01BBBRRR bit_reg
+	local tmp 8
+	lsl 1 B tmp
+	mov main.R last_flag_result
+	and main.R tmp tmp
+	update_flags SZH1PN0
+	
+cb 01BBB110 bit_hl
+	local tmp 8
+	z80_fetch_hl
+	cycles 1
+	lsl 1 B tmp
+	lsr wz 8 last_flag_result
+	and scratch1 tmp tmp
+	update_flags SZH1PN0
+	
+	
+ddcb 01BBBRRR bit_ixd
+	local tmp 8
+	mov wz scratch1
+	ocall read_8
+	cycles 1
+	lsl 1 B tmp
+	lsr wz 8 last_flag_result
+	and scratch1 tmp tmp
+	update_flags SZH1PN0
+	
+fdcb 01BBBRRR bit_iyd
+	local tmp 8
+	mov wz scratch1
+	ocall read_8
+	cycles 1
+	lsl 1 B tmp
+	lsr wz 8 last_flag_result
+	and scratch1 tmp tmp
+	update_flags SZH1PN0
+	
+cb 10BBBRRR res_reg
+	local tmp 8
+	lsl 1 B tmp
+	not tmp tmp
+	and main.R tmp main.R
+	
+cb 10BBB110 res_hl
+	z80_fetch_hl
+	cycles 1
+	local tmp 8
+	lsl 1 B tmp
+	not tmp tmp
+	and scratch1 tmp scratch1
+	z80_store_hl
+	
+z80_res_index
+	arg bit 8
+	arg tmp 8
+	lsl 1 bit tmp
+	not tmp tmp
+	mov wz scratch1
+	ocall read_8
+	cycles 1
+	and scratch1 tmp tmp
+	mov tmp scratch1
+	z80_store_index
+	
+ddcb 10BBB110 res_ixd
+	local tmp 8
+	z80_res_index B tmp
+	
+ddcb 10BBBRRR res_ixd_reg
+	z80_res_index B main.R
+	
+fdcb 10BBB110 res_iyd
+	local tmp 8
+	z80_res_index B tmp
+	
+fdcb 10BBBRRR res_iyd_reg
+	z80_res_index B main.R
+	
+cb 11BBBRRR set_reg
+	local tmp 8
+	lsl 1 B tmp
+	or main.R tmp main.R
+	
+cb 11BBB110 set_hl
+	z80_fetch_hl
+	cycles 1
+	local tmp 8
+	lsl 1 B tmp
+	or scratch1 tmp scratch1
+	z80_store_hl
+	
+z80_set_index
+	arg bit 8
+	arg tmp 8
+	lsl 1 bit tmp
+	mov wz scratch1
+	ocall read_8
+	cycles 1
+	or scratch1 tmp tmp
+	mov tmp scratch1
+	z80_store_index
+	
+ddcb 11BBB110 set_ixd
+	local tmp 8
+	z80_set_index B tmp
+	
+ddcb 11BBBRRR set_ixd_reg
+	z80_set_index B main.R
+	
+fdcb 11BBB110 set_iyd
+	local tmp 8
+	z80_set_index B tmp
+	
+fdcb 11BBBRRR set_iyd_reg
+	z80_set_index B main.R
+	
+z80_fetch_mod_hl
+	local tmp 16
+	arg change 16
+	lsl h 8 tmp
+	or l tmp tmp
+	mov tmp scratch1
+	add change tmp tmp
+	mov tmp l
+	lsr tmp 8 h
+	ocall read_8
+	cycles 2
+	
+z80_ldd_ldi
+	arg change 16
+	local tmp 16
+	local tmp8 8
+	z80_fetch_mod_hl change
+	
+	add a scratch1 tmp8
+	update_flags H0XN0
+	
+	and 0x2 tmp8 tmp8
+	lsl tmp8 4 tmp8
+	and 0x88 last_flag_result last_flag_result
+	or tmp8 last_flag_result last_flag_result
+	
+	lsl d 8 tmp
+	or e tmp tmp
+	mov tmp scratch2
+	add change tmp tmp
+	mov tmp e
+	lsr tmp 8 d
+	ocall write_8
+	
+	lsl b 8 tmp
+	or c tmp tmp
+	sub 1 tmp tmp
+	
+	mov tmp c
+	lsr tmp 8 b
+	mov c pvflag
+	or b pvflag pvflag
+	
+
+ed 10100000 ldi
+	z80_ldd_ldi 1
+
+ed 10101000 ldd
+	z80_ldd_ldi -1
+
+ed 10110000 ldir
+	z80_ldd_ldi 1
+	if pvflag
+	
+	add 1 pc wz
+	sub 2 pc pc
+	cycles 5
+	
+	end
+
+ed 10111000 lddr
+	z80_ldd_ldi -1
+	if pvflag
+	
+	add 1 pc wz
+	sub 2 pc pc
+	cycles 5
+	
+	end
+	
+z80_cpd_cpi
+	local tmp 16
+	local tmp8 8
+	local hf 8
+	arg change 16
+	
+	z80_fetch_mod_hl change
+	sub scratch1 a tmp8
+	update_flags SZHN1
+	
+	lsr chflags 3 hf
+	and 1 hf hf
+	
+	sub hf tmp8 tmp8
+	update_flags X
+	
+	and 0x2 tmp8 tmp8
+	lsl tmp8 4 tmp8
+	and 0x88 last_flag_result last_flag_result
+	or tmp8 last_flag_result last_flag_result
+	
+	lsl b 8 tmp
+	or c tmp tmp
+	sub 1 tmp tmp
+	
+	mov tmp c
+	lsr tmp 8 b
+	mov c pvflag
+	or b pvflag pvflag
+	
+	cycles 5
+	
+ed 10100001 cpi
+	z80_cpd_cpi 1
+	
+ed 10101001 cpd
+	z80_cpd_cpi -1
+	
+ed 10110001 cpir
+	z80_cpd_cpi 1
+	if pvflag
+	
+	if zflag
+	
+	else
+	
+	add 1 pc wz
+	sub 2 pc pc
+	cycles 5
+	
+	end
+	end
+	
+ed 10111001 cpdr
+	z80_cpd_cpi -1
+	if pvflag
+	
+	if zflag
+	
+	else
+	
+	add 1 pc wz
+	sub 2 pc pc
+	cycles 5
+	
+	end
+	end
+
+00100111 daa
+	local diff 8
+	local tmp 8
+	local low 8
+	and 0xF a low
+	and 0x8 chflags tmp
+	if tmp
+	
+	mov 6 diff
+	
+	else
+	
+	cmp 0xA low
+	if >=U
+	mov 6 diff
+	else
+	mov 0 diff
+	end
+	
+	end
+	
+	and 0x80 chflags tmp
+	if tmp
+	
+	or 0x60 diff diff
+	update_flags C1
+	
+	else
+	
+	cmp 0x9A a
+	if >=U
+	or 0x60 diff diff
+	update_flags C1
+	else
+	update_flags C0
+	end
+	end
+	
+	if nflag
+	
+	sub diff a a
+	update_flags SZYHPX
+	
+	else
+	
+	add diff a a
+	update_flags SZYHPX
+	
+	end
+	
+dd OOOOOOOO dd_normal
+	dispatch O
+
+fd OOOOOOOO fd_normal
+	dispatch O
+	
+ed 01101111 rld
+	local tmp 8
+	local tmp2 8
+	z80_fetch_hl
+	cycles 4
+	
+	lsr scratch1 4 tmp
+	
+	lsl scratch1 4 scratch1
+	
+	and 0xF a tmp2
+	or tmp2 scratch1 scratch1
+	
+	and 0xF0 a a
+	or tmp a a
+	update_flags SZYH0XPN0
+	z80_store_hl
+	
+ed 01100111 rrd
+	local tmp 8
+	local tmp2 8
+	z80_fetch_hl
+	cycles 4
+	
+	and 0xF scratch1 tmp
+	lsr scratch1 4 scratch1
+	
+	lsl a 4 tmp2
+	or tmp2 scratch1 scratch1
+	
+	and 0xF0 a a
+	or tmp a a
+	update_flags SZYH0XPN0
+	z80_store_hl
+	
\ No newline at end of file
--- a/z80_to_x86.c	Fri Mar 01 08:17:57 2019 -0800
+++ b/z80_to_x86.c	Fri Mar 01 14:17:29 2019 -0800
@@ -355,6 +355,8 @@
 			}
 			if (inst->reg == Z80_I || inst->ea_reg == Z80_I || inst->reg == Z80_R || inst->ea_reg == Z80_R) {
 				num_cycles += 1;
+			} else if (inst->reg == Z80_USE_IMMED) {
+				num_cycles += 3;
 			}
 			break;
 		case Z80_IMMED:
@@ -874,7 +876,7 @@
 		} else if(inst->addr_mode == Z80_IMMED) {
 			num_cycles += 3;
 		} else if(z80_size(inst) == SZ_W) {
-			num_cycles += 4;
+			num_cycles += 7;
 		}
 		cycles(&opts->gen, num_cycles);
 		translate_z80_reg(inst, &dst_op, opts);
@@ -942,7 +944,7 @@
 		} else if(inst->addr_mode == Z80_IMMED) {
 			num_cycles += 3;
 		} else if(z80_size(inst) == SZ_W) {
-			num_cycles += 4;
+			num_cycles += 7;
 		}
 		cycles(&opts->gen, num_cycles);
 		translate_z80_reg(inst, &dst_op, opts);
@@ -1073,7 +1075,7 @@
 		} else if(inst->addr_mode == Z80_IMMED) {
 			num_cycles += 3;
 		} else if(z80_size(inst) == SZ_W) {
-			num_cycles += 4;
+			num_cycles += 7;
 		}
 		cycles(&opts->gen, num_cycles);
 		translate_z80_reg(inst, &dst_op, opts);
@@ -1261,6 +1263,10 @@
 	case Z80_DEC:
 		if(z80_size(inst) == SZ_W) {
 			num_cycles += 2;
+		} else if (inst->addr_mode == Z80_REG_INDIRECT) {
+			num_cycles += 1;
+		} else if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
+			num_cycles += 9;
 		}
 		cycles(&opts->gen, num_cycles);
 		translate_z80_reg(inst, &dst_op, opts);
@@ -1853,7 +1859,7 @@
 		break;
 	case Z80_BIT: {
 		if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
-			num_cycles += 8;
+			num_cycles += 4;
 		}
 		cycles(&opts->gen, num_cycles);
 		uint8_t bit;
@@ -1914,7 +1920,7 @@
 	}
 	case Z80_SET: {
 		if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
-			num_cycles += 8;
+			num_cycles += 4;
 		}
 		cycles(&opts->gen, num_cycles);
 		uint8_t bit;
@@ -1983,7 +1989,7 @@
 	}
 	case Z80_RES: {
 		if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
-			num_cycles += 8;
+			num_cycles += 4;
 		}
 		cycles(&opts->gen, num_cycles);
 		uint8_t bit;
@@ -2593,7 +2599,7 @@
 		break;
 	}
 	case Z80_OUT:
-		if (inst->reg == Z80_A) {
+		if (inst->addr_mode == Z80_IMMED_INDIRECT) {
 			num_cycles += 3;
 		}
 		cycles(&opts->gen, num_cycles);//T States: 4 3/4
@@ -2658,7 +2664,6 @@
 		setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
 		break;
 	case Z80_OTIR: {
-		code_ptr start = code->cur;
 		cycles(&opts->gen, num_cycles + 1);//T States: 4, 5
 		//read from (HL)
 		zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
@@ -2758,7 +2763,6 @@
 		setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
 		break;
 	case Z80_OTDR: {
-		code_ptr start = code->cur;
 		cycles(&opts->gen, num_cycles + 1);//T States: 4, 5
 		//read from (HL)
 		zreg_to_native(opts, Z80_HL, opts->gen.scratch1);
@@ -3420,14 +3424,15 @@
 	jcc(code, CC_NZ, is_nmi);
 	mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, iff1), SZ_B);
 	mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, iff2), SZ_B);
+	cycles(&options->gen, 6); //interupt ack cycle
 	code_ptr after_int_disable = code->cur + 1;
 	jmp(code, after_int_disable);
 	*is_nmi = code->cur - (is_nmi + 1);
 	mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, iff1), options->gen.scratch2, SZ_B);
 	mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, iff1), SZ_B);
 	mov_rrdisp(code, options->gen.scratch2, options->gen.context_reg, offsetof(z80_context, iff2), SZ_B);
+	cycles(&options->gen, 5); //NMI processing cycles
 	*after_int_disable = code->cur - (after_int_disable + 1);
-	cycles(&options->gen, 7);
 	//save return address (in scratch1) to Z80 stack
 	sub_ir(code, 2, options->regs[Z80_SP], SZ_W);
 	mov_rr(code, options->regs[Z80_SP], options->gen.scratch2, SZ_W);
@@ -3453,7 +3458,6 @@
 	cmp_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, int_is_nmi), SZ_B);
 	is_nmi = code->cur + 1;
 	jcc(code, CC_NZ, is_nmi);
-	cycles(&options->gen, 6); //interupt ack cycle
 	//TODO: Support interrupt mode 0, not needed for Genesis sit it seems to read $FF during intack
 	//which is conveniently rst $38, i.e. the same thing that im 1 does
 	//check interrupt mode
@@ -3461,6 +3465,7 @@
 	code_ptr im2 = code->cur + 1;
 	jcc(code, CC_Z, im2);
 	mov_ir(code, 0x38, options->gen.scratch1, SZ_W);
+	cycles(&options->gen, 1); //total time for mode 0/1 is 13 t-states
 	code_ptr after_int_dest = code->cur + 1;
 	jmp(code, after_int_dest);
 	*im2 = code->cur - (im2 + 1);
@@ -3501,10 +3506,12 @@
 	//HACK
 	options->gen.address_size = SZ_D;
 	options->gen.address_mask = io_address_mask;
+	options->gen.bus_cycles = 4;
 	options->read_io = gen_mem_fun(&options->gen, io_chunks, num_io_chunks, READ_8, NULL);
 	options->write_io = gen_mem_fun(&options->gen, io_chunks, num_io_chunks, WRITE_8, NULL);
 	options->gen.address_size = SZ_W;
 	options->gen.address_mask = 0xFFFF;
+	options->gen.bus_cycles = 3;
 
 	options->read_16 = code->cur;
 	cycles(&options->gen, 3);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/z80_util.c	Fri Mar 01 14:17:29 2019 -0800
@@ -0,0 +1,245 @@
+#include <string.h>
+
+void z80_read_8(z80_context *context)
+{
+	context->cycles += 3 * context->opts->gen.clock_divider;
+	uint8_t *fast = context->fastread[context->scratch1 >> 10];
+	if (fast) {
+		context->scratch1 = fast[context->scratch1 & 0x3FF];
+	} else {
+		context->scratch1 = read_byte(context->scratch1, (void **)context->mem_pointers, &context->opts->gen, context);
+	}
+}
+
+void z80_write_8(z80_context *context)
+{
+	context->cycles += 3 * context->opts->gen.clock_divider;
+	uint8_t *fast = context->fastwrite[context->scratch2 >> 10];
+	if (fast) {
+		fast[context->scratch2 & 0x3FF] = context->scratch1;
+	} else {
+		write_byte(context->scratch2, context->scratch1, (void **)context->mem_pointers, &context->opts->gen, context);
+	}
+}
+
+void z80_io_read8(z80_context *context)
+{
+	uint32_t tmp_mask = context->opts->gen.address_mask;
+	memmap_chunk const *tmp_map = context->opts->gen.memmap;
+	uint32_t tmp_chunks = context->opts->gen.memmap_chunks;
+	
+	context->opts->gen.address_mask = context->io_mask;
+	context->opts->gen.memmap = context->io_map;
+	context->opts->gen.memmap_chunks = context->io_chunks;
+	
+	context->cycles += 4 * context->opts->gen.clock_divider;
+	context->scratch1 = read_byte(context->scratch1, (void **)context->mem_pointers, &context->opts->gen, context);
+	
+	context->opts->gen.address_mask = tmp_mask;
+	context->opts->gen.memmap = tmp_map;
+	context->opts->gen.memmap_chunks = tmp_chunks;
+}
+
+void z80_io_write8(z80_context *context)
+{
+	uint32_t tmp_mask = context->opts->gen.address_mask;
+	memmap_chunk const *tmp_map = context->opts->gen.memmap;
+	uint32_t tmp_chunks = context->opts->gen.memmap_chunks;
+	
+	context->opts->gen.address_mask = context->io_mask;
+	context->opts->gen.memmap = context->io_map;
+	context->opts->gen.memmap_chunks = context->io_chunks;
+	
+	context->cycles += 4 * context->opts->gen.clock_divider;
+	write_byte(context->scratch2, context->scratch1, (void **)context->mem_pointers, &context->opts->gen, context);
+	
+	context->opts->gen.address_mask = tmp_mask;
+	context->opts->gen.memmap = tmp_map;
+	context->opts->gen.memmap_chunks = tmp_chunks;
+}
+
+//quick hack until I get a chance to change which init method these get passed to
+static memmap_chunk const * tmp_io_chunks;
+static uint32_t tmp_num_io_chunks, tmp_io_mask;
+void init_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t num_chunks, memmap_chunk const * io_chunks, uint32_t num_io_chunks, uint32_t clock_divider, uint32_t io_address_mask)
+{
+	memset(options, 0, sizeof(*options));
+	options->gen.memmap = chunks;
+	options->gen.memmap_chunks = num_chunks;
+	options->gen.address_mask = 0xFFFF;
+	options->gen.max_address = 0xFFFF;
+	options->gen.clock_divider = clock_divider;
+	tmp_io_chunks = io_chunks;
+	tmp_num_io_chunks = num_io_chunks;
+	tmp_io_mask = io_address_mask;
+}
+
+void z80_options_free(z80_options *opts)
+{
+	free(opts);
+}
+
+z80_context * init_z80_context(z80_options *options)
+{
+	z80_context *context = calloc(1, sizeof(z80_context));
+	context->opts = options;
+	context->io_map = (memmap_chunk *)tmp_io_chunks;
+	context->io_chunks = tmp_num_io_chunks;
+	context->io_mask = tmp_io_mask;
+	context->int_cycle = context->int_end_cycle = context->nmi_cycle = 0xFFFFFFFFU;
+	z80_invalidate_code_range(context, 0, 0xFFFF);
+	return context;
+}
+
+void z80_sync_cycle(z80_context *context, uint32_t target_cycle)
+{
+	if (context->iff1 && context->int_cycle < target_cycle) {
+		if (context->cycles > context->int_end_cycle) {
+			context->int_cycle = 0xFFFFFFFFU;
+		} else {
+			target_cycle = context->int_cycle;
+		}
+	};
+	if (context->nmi_cycle < target_cycle) {
+		target_cycle = context->nmi_cycle;
+	}
+	context->sync_cycle = target_cycle;
+}
+
+void z80_run(z80_context *context, uint32_t target_cycle)
+{
+	if (context->reset || context->busack) {
+		context->cycles = target_cycle;
+	} else if (target_cycle > context->cycles) {
+		if (context->busreq) {
+			//busreq is sampled at the end of an m-cycle
+			//we can approximate that by running for a single m-cycle after a bus request
+			target_cycle = context->cycles + 4 * context->opts->gen.clock_divider;
+		}
+		z80_execute(context, target_cycle);
+		if (context->busreq) {
+			context->busack = 1;
+		}
+	}
+}
+
+void z80_assert_reset(z80_context * context, uint32_t cycle)
+{
+	z80_run(context, cycle);
+	context->reset = 1;
+}
+
+void z80_clear_reset(z80_context * context, uint32_t cycle)
+{
+	z80_run(context, cycle);
+	if (context->reset) {
+		context->imode = 0;
+		context->iff1 = context->iff2 = 0;
+		context->pc = 0;
+		context->reset = 0;
+		if (context->busreq) {
+			//TODO: Figure out appropriate delay
+			context->busack = 1;
+		}
+	}
+}
+
+#define MAX_MCYCLE_LENGTH 6
+void z80_assert_busreq(z80_context * context, uint32_t cycle)
+{
+	z80_run(context, cycle);
+	context->busreq = 1;
+	//this is an imperfect aproximation since most M-cycles take less tstates than the max
+	//and a short 3-tstate m-cycle can take an unbounded number due to wait states
+	if (context->cycles - cycle > MAX_MCYCLE_LENGTH * context->opts->gen.clock_divider) {
+		context->busack = 1;
+	}
+}
+
+void z80_clear_busreq(z80_context * context, uint32_t cycle)
+{
+	z80_run(context, cycle);
+	context->busreq = 0;
+	context->busack = 0;
+	//there appears to be at least a 1 Z80 cycle delay between busreq
+	//being released and resumption of execution
+	context->cycles += context->opts->gen.clock_divider;
+}
+
+void z80_assert_nmi(z80_context *context, uint32_t cycle)
+{
+	context->nmi_cycle = cycle;
+}
+
+uint8_t z80_get_busack(z80_context * context, uint32_t cycle)
+{
+	z80_run(context, cycle);
+	return context->busack;
+}
+
+void z80_invalidate_code_range(z80_context *context, uint32_t startA, uint32_t endA)
+{
+	for(startA &= ~0x3FF; startA < endA; startA += 1024)
+	{
+		uint8_t *start = get_native_pointer(startA, (void**)context->mem_pointers, &context->opts->gen);
+		if (start) {
+			uint8_t *end = get_native_pointer(startA + 1023, (void**)context->mem_pointers, &context->opts->gen);
+			if (!end || end - start != 1023) {
+				start = NULL;
+			}
+		}
+		context->fastread[startA >> 10] = start;
+		start = get_native_write_pointer(startA, (void**)context->mem_pointers, &context->opts->gen);
+		if (start) {
+			uint8_t *end = get_native_write_pointer(startA + 1023, (void**)context->mem_pointers, &context->opts->gen);
+			if (!end || end - start != 1023) {
+				start = NULL;
+			}
+		}
+		context->fastwrite[startA >> 10] = start;
+	}
+}
+
+void z80_adjust_cycles(z80_context * context, uint32_t deduction)
+{
+	context->cycles -= deduction;
+	if (context->int_cycle != 0xFFFFFFFFU) {
+		if (context->int_cycle > deduction) {
+			context->int_cycle -= deduction;
+		} else {
+			context->int_cycle = 0;
+		}
+	}
+	if (context->int_end_cycle != 0xFFFFFFFFU) {
+		if (context->int_end_cycle > deduction) {
+			context->int_end_cycle -= deduction;
+		} else {
+			context->int_end_cycle = 0;
+		}
+	}
+	if (context->nmi_cycle != 0xFFFFFFFFU) {
+		if (context->nmi_cycle > deduction) {
+			context->nmi_cycle -= deduction;
+		} else {
+			context->nmi_cycle = 0;
+		}
+	}
+}
+
+void z80_serialize(z80_context *context, serialize_buffer *buf)
+{
+	//TODO: Implement me
+}
+
+void z80_deserialize(deserialize_buffer *buf, void *vcontext)
+{
+	//TODO: Implement me
+}
+
+void zinsert_breakpoint(z80_context * context, uint16_t address, uint8_t * bp_handler)
+{
+}
+
+void zremove_breakpoint(z80_context * context, uint16_t address)
+{
+}
--- a/ztestgen.c	Fri Mar 01 08:17:57 2019 -0800
+++ b/ztestgen.c	Fri Mar 01 14:17:29 2019 -0800
@@ -11,6 +11,7 @@
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <errno.h>
+#include <time.h>
 
 extern z80inst z80_tbl_a[256];
 extern z80inst z80_tbl_extd[0xC0-0x40];
@@ -530,6 +531,7 @@
 
 int main(int argc, char ** argv)
 {
+	srand(time(NULL));
 	z80_gen_all();
 	return 0;
 }
--- a/ztestrun.c	Fri Mar 01 08:17:57 2019 -0800
+++ b/ztestrun.c	Fri Mar 01 14:17:29 2019 -0800
@@ -4,7 +4,12 @@
  BlastEm is free software distributed under the terms of the GNU General Public License version 3 or greater. See COPYING for full license text.
 */
 #include "z80inst.h"
+#ifdef NEW_CORE
+#include "z80.h"
+#include <string.h>
+#else
 #include "z80_to_x86.h"
+#endif
 #include "mem.h"
 #include "vdp.h"
 #include <stdio.h>
@@ -42,10 +47,12 @@
 	{ 0x0000, 0x100, 0xFF, 0, 0, 0,                                  NULL,    NULL, NULL, z80_unmapped_read, z80_unmapped_write}
 };
 
+#ifndef NEW_CORE
 void z80_next_int_pulse(z80_context * context)
 {
 	context->int_pulse_start = context->int_pulse_end = CYCLE_NEVER;
 }
+#endif
 
 int main(int argc, char ** argv)
 {
@@ -91,6 +98,26 @@
 	fclose(f);
 	init_z80_opts(&opts, z80_map, 2, port_map, 1, 1, 0xFF);
 	context = init_z80_context(&opts);
+#ifdef NEW_CORE
+	z80_execute(context, 1000);
+	printf("A: %X\nB: %X\nC: %X\nD: %X\nE: %X\nHL: %X\nIX: %X\nIY: %X\nSP: %X\n\nIM: %d, IFF1: %d, IFF2: %d\n",
+		context->main[7], context->main[0], context->main[1],
+		context->main[2], context->main[3],
+		(context->main[4] << 8) | context->main[5],
+		context->ix,
+		context->iy,
+		context->sp, context->imode, context->iff1, context->iff2);
+	printf("Flags: SZYHXVNC\n"
+	       "       %d%d%d%d%d%d%d%d\n", 
+			context->last_flag_result >> 7, context->zflag != 0, context->last_flag_result >> 5 & 1, context->chflags >> 3 & 1, 
+			context->last_flag_result >> 3 & 1, context->pvflag != 0, context->nflag != 0, context->chflags >> 7 & 1
+	);
+	puts("--Alternate Regs--");
+	printf("A: %X\nB: %X\nC: %X\nD: %X\nE: %X\nHL: %X\n",
+		context->alt[7], context->alt[0], context->alt[1],
+		context->alt[2], context->alt[3],
+		(context->alt[4] << 8) | context->alt[5]);
+#else
 	//Z80 RAM
 	context->mem_pointers[0] = z80_ram;
 	if (retranslate) {
@@ -122,5 +149,6 @@
 		context->alt_regs[Z80_A], context->alt_regs[Z80_B], context->alt_regs[Z80_C],
 		context->alt_regs[Z80_D], context->alt_regs[Z80_E],
 		(context->alt_regs[Z80_H] << 8) | context->alt_regs[Z80_L]);
+#endif
 	return 0;
 }