Mercurial > repos > blastem
comparison z80_util.c @ 1752:d6d4c006a7b3
Initial attempt at interrupts in new Z80 core and integrating it into main executable
author | Michael Pavone <pavone@retrodev.com> |
---|---|
date | Sun, 10 Feb 2019 11:58:23 -0800 |
parents | 01236179fc71 |
children | 33ec5df77fac |
comparison
equal
deleted
inserted
replaced
1751:c5d4e1d14dac | 1752:d6d4c006a7b3 |
---|---|
5 context->cycles += 3 * context->opts->gen.clock_divider; | 5 context->cycles += 3 * context->opts->gen.clock_divider; |
6 uint8_t *fast = context->fastmem[context->scratch1 >> 10]; | 6 uint8_t *fast = context->fastmem[context->scratch1 >> 10]; |
7 if (fast) { | 7 if (fast) { |
8 context->scratch1 = fast[context->scratch1 & 0x3FF]; | 8 context->scratch1 = fast[context->scratch1 & 0x3FF]; |
9 } else { | 9 } else { |
10 context->scratch1 = read_byte(context->scratch1, NULL, &context->opts->gen, context); | 10 context->scratch1 = read_byte(context->scratch1, (void **)context->mem_pointers, &context->opts->gen, context); |
11 } | 11 } |
12 } | 12 } |
13 | 13 |
14 void z80_write_8(z80_context *context) | 14 void z80_write_8(z80_context *context) |
15 { | 15 { |
16 context->cycles += 3 * context->opts->gen.clock_divider; | 16 context->cycles += 3 * context->opts->gen.clock_divider; |
17 uint8_t *fast = context->fastmem[context->scratch2 >> 10]; | 17 uint8_t *fast = context->fastmem[context->scratch2 >> 10]; |
18 if (fast) { | 18 if (fast) { |
19 fast[context->scratch2 & 0x3FF] = context->scratch1; | 19 fast[context->scratch2 & 0x3FF] = context->scratch1; |
20 } else { | 20 } else { |
21 write_byte(context->scratch2, context->scratch1, NULL, &context->opts->gen, context); | 21 write_byte(context->scratch2, context->scratch1, (void **)context->mem_pointers, &context->opts->gen, context); |
22 } | 22 } |
23 } | 23 } |
24 | 24 |
25 void z80_io_read8(z80_context *context) | 25 void z80_io_read8(z80_context *context) |
26 { | 26 { |
30 | 30 |
31 context->opts->gen.address_mask = context->io_mask; | 31 context->opts->gen.address_mask = context->io_mask; |
32 context->opts->gen.memmap = context->io_map; | 32 context->opts->gen.memmap = context->io_map; |
33 context->opts->gen.memmap_chunks = context->io_chunks; | 33 context->opts->gen.memmap_chunks = context->io_chunks; |
34 | 34 |
35 context->scratch1 = read_byte(context->scratch1, NULL, &context->opts->gen, context); | 35 context->scratch1 = read_byte(context->scratch1, (void **)context->mem_pointers, &context->opts->gen, context); |
36 | 36 |
37 context->opts->gen.address_mask = tmp_mask; | 37 context->opts->gen.address_mask = tmp_mask; |
38 context->opts->gen.memmap = tmp_map; | 38 context->opts->gen.memmap = tmp_map; |
39 context->opts->gen.memmap_chunks = tmp_chunks; | 39 context->opts->gen.memmap_chunks = tmp_chunks; |
40 } | 40 } |
47 | 47 |
48 context->opts->gen.address_mask = context->io_mask; | 48 context->opts->gen.address_mask = context->io_mask; |
49 context->opts->gen.memmap = context->io_map; | 49 context->opts->gen.memmap = context->io_map; |
50 context->opts->gen.memmap_chunks = context->io_chunks; | 50 context->opts->gen.memmap_chunks = context->io_chunks; |
51 | 51 |
52 write_byte(context->scratch2, context->scratch1, NULL, &context->opts->gen, context); | 52 write_byte(context->scratch2, context->scratch1, (void **)context->mem_pointers, &context->opts->gen, context); |
53 | 53 |
54 context->opts->gen.address_mask = tmp_mask; | 54 context->opts->gen.address_mask = tmp_mask; |
55 context->opts->gen.memmap = tmp_map; | 55 context->opts->gen.memmap = tmp_map; |
56 context->opts->gen.memmap_chunks = tmp_chunks; | 56 context->opts->gen.memmap_chunks = tmp_chunks; |
57 } | 57 } |
70 tmp_io_chunks = io_chunks; | 70 tmp_io_chunks = io_chunks; |
71 tmp_num_io_chunks = num_io_chunks; | 71 tmp_num_io_chunks = num_io_chunks; |
72 tmp_io_mask = io_address_mask; | 72 tmp_io_mask = io_address_mask; |
73 } | 73 } |
74 | 74 |
75 void z80_options_free(z80_options *opts) | |
76 { | |
77 free(opts); | |
78 } | |
79 | |
75 z80_context * init_z80_context(z80_options *options) | 80 z80_context * init_z80_context(z80_options *options) |
76 { | 81 { |
77 z80_context *context = calloc(1, sizeof(z80_context)); | 82 z80_context *context = calloc(1, sizeof(z80_context)); |
78 context->opts = options; | 83 context->opts = options; |
79 context->io_map = (memmap_chunk *)tmp_io_chunks; | 84 context->io_map = (memmap_chunk *)tmp_io_chunks; |
80 context->io_chunks = tmp_num_io_chunks; | 85 context->io_chunks = tmp_num_io_chunks; |
81 context->io_mask = tmp_io_mask; | 86 context->io_mask = tmp_io_mask; |
87 context->int_cycle = context->nmi_cycle = 0xFFFFFFFFU; | |
82 for(uint32_t address = 0; address < 0x10000; address+=1024) | 88 for(uint32_t address = 0; address < 0x10000; address+=1024) |
83 { | 89 { |
84 uint8_t *start = get_native_pointer(address, NULL, &options->gen); | 90 uint8_t *start = get_native_pointer(address, (void**)context->mem_pointers, &options->gen); |
85 if (start) { | 91 if (start) { |
86 uint8_t *end = get_native_pointer(address + 1023, NULL, &options->gen); | 92 uint8_t *end = get_native_pointer(address + 1023, (void**)context->mem_pointers, &options->gen); |
87 if (end && end - start == 1023) { | 93 if (end && end - start == 1023) { |
88 context->fastmem[address >> 10] = start; | 94 context->fastmem[address >> 10] = start; |
89 } | 95 } |
90 } | 96 } |
91 } | 97 } |
92 return context; | 98 return context; |
93 } | 99 } |
94 | 100 |
101 uint32_t z80_sync_cycle(z80_context *context, uint32_t target_cycle) | |
102 { | |
103 if (context->iff1 && context->int_cycle < target_cycle) { | |
104 target_cycle = context->int_cycle; | |
105 }; | |
106 if (context->nmi_cycle < target_cycle) { | |
107 target_cycle = context->nmi_cycle; | |
108 } | |
109 return target_cycle; | |
110 } | |
111 | |
112 void z80_run(z80_context *context, uint32_t target_cycle) | |
113 { | |
114 if (context->reset || context->busack) { | |
115 context->cycles = target_cycle; | |
116 } else if (target_cycle > context->cycles) { | |
117 if (context->busreq) { | |
118 //busreq is sampled at the end of an m-cycle | |
119 //we can approximate that by running for a single m-cycle after a bus request | |
120 target_cycle = context->cycles + 4 * context->opts->gen.clock_divider; | |
121 } | |
122 z80_execute(context, target_cycle); | |
123 if (context->busreq) { | |
124 context->busack = 1; | |
125 } | |
126 } | |
127 } | |
128 | |
129 void z80_assert_reset(z80_context * context, uint32_t cycle) | |
130 { | |
131 z80_run(context, cycle); | |
132 context->reset = 1; | |
133 } | |
134 | |
135 void z80_clear_reset(z80_context * context, uint32_t cycle) | |
136 { | |
137 z80_run(context, cycle); | |
138 if (context->reset) { | |
139 context->imode = 0; | |
140 context->iff1 = context->iff2 = 0; | |
141 context->pc = 0; | |
142 context->reset = 0; | |
143 if (context->busreq) { | |
144 //TODO: Figure out appropriate delay | |
145 context->busack = 1; | |
146 } | |
147 } | |
148 } | |
149 | |
150 #define MAX_MCYCLE_LENGTH 6 | |
151 void z80_assert_busreq(z80_context * context, uint32_t cycle) | |
152 { | |
153 z80_run(context, cycle); | |
154 context->busreq = 1; | |
155 //this is an imperfect aproximation since most M-cycles take less tstates than the max | |
156 //and a short 3-tstate m-cycle can take an unbounded number due to wait states | |
157 if (context->cycles - cycle > MAX_MCYCLE_LENGTH * context->opts->gen.clock_divider) { | |
158 context->busack = 1; | |
159 } | |
160 } | |
161 | |
162 void z80_clear_busreq(z80_context * context, uint32_t cycle) | |
163 { | |
164 z80_run(context, cycle); | |
165 context->busreq = 0; | |
166 context->busack = 0; | |
167 //there appears to be at least a 1 Z80 cycle delay between busreq | |
168 //being released and resumption of execution | |
169 context->cycles += context->opts->gen.clock_divider; | |
170 } | |
171 | |
172 void z80_assert_nmi(z80_context *context, uint32_t cycle) | |
173 { | |
174 context->nmi_cycle = cycle; | |
175 } | |
176 | |
177 uint8_t z80_get_busack(z80_context * context, uint32_t cycle) | |
178 { | |
179 z80_run(context, cycle); | |
180 return context->busack; | |
181 } | |
182 | |
183 void z80_invalidate_code_range(z80_context *context, uint32_t startA, uint32_t endA) | |
184 { | |
185 for(startA &= ~0x3FF; startA += 1024; startA < endA) | |
186 { | |
187 uint8_t *start = get_native_pointer(startA, (void**)context->mem_pointers, &context->opts->gen); | |
188 if (start) { | |
189 uint8_t *end = get_native_pointer(startA + 1023, (void**)context->mem_pointers, &context->opts->gen); | |
190 if (!end || end - start != 1023) { | |
191 start = NULL; | |
192 } | |
193 } | |
194 context->fastmem[startA >> 10] = start; | |
195 } | |
196 } | |
197 | |
198 void z80_adjust_cycles(z80_context * context, uint32_t deduction) | |
199 { | |
200 context->cycles -= deduction; | |
201 if (context->int_cycle != 0xFFFFFFFFU) { | |
202 if (context->int_cycle > deduction) { | |
203 context->int_cycle -= deduction; | |
204 } else { | |
205 context->int_cycle = 0; | |
206 } | |
207 } | |
208 if (context->nmi_cycle != 0xFFFFFFFFU) { | |
209 if (context->nmi_cycle > deduction) { | |
210 context->nmi_cycle -= deduction; | |
211 } else { | |
212 context->nmi_cycle = 0; | |
213 } | |
214 } | |
215 } | |
216 | |
217 void z80_serialize(z80_context *context, serialize_buffer *buf) | |
218 { | |
219 //TODO: Implement me | |
220 } | |
221 | |
222 void z80_deserialize(deserialize_buffer *buf, void *vcontext) | |
223 { | |
224 //TODO: Implement me | |
225 } | |
226 | |
227 void zinsert_breakpoint(z80_context * context, uint16_t address, uint8_t * bp_handler) | |
228 { | |
229 } | |
230 | |
231 void zremove_breakpoint(z80_context * context, uint16_t address) | |
232 { | |
233 } |