comparison jag_video.c @ 1090:a68274a25e2f

Initial stab at implementing the Jaguar object processor
author Michael Pavone <pavone@retrodev.com>
date Sun, 16 Oct 2016 18:25:18 -0700
parents 87597a048d38
children faa3a4617f62
comparison
equal deleted inserted replaced
1089:87597a048d38 1090:a68274a25e2f
1 #include <stdint.h> 1 #include <stdint.h>
2 #include <stdlib.h> 2 #include <stdlib.h>
3 #include <stdio.h> 3 #include <stdio.h>
4 #include "jag_video.h" 4 #include "jag_video.h"
5 #include "jaguar.h"
5 #include "render.h" 6 #include "render.h"
6 7
7 enum { 8 enum {
8 VMODE_CRY, 9 VMODE_CRY,
9 VMODE_RGB24, 10 VMODE_RGB24,
165 copy_16(dst, len, linebuffer, table_variable); 166 copy_16(dst, len, linebuffer, table_variable);
166 break; 167 break;
167 } 168 }
168 } 169 }
169 170
171 enum {
172 OBJ_IDLE,
173 OBJ_FETCH_DESC1,
174 OBJ_FETCH_DESC2,
175 OBJ_FETCH_DESC3,
176 OBJ_PROCESS,
177 OBJ_HEIGHT_WB,
178 OBJ_REMAINDER_WB,
179 OBJ_GPU_WAIT
180 };
181
182 enum {
183 OBJ_BITMAP,
184 OBJ_SCALED,
185 OBJ_GPU,
186 OBJ_BRANCH,
187 OBJ_STOP
188 };
189
190 void op_run(jag_video *context)
191 {
192 while (context->op.cycles < context->cycles)
193 {
194 switch (context->op.state)
195 {
196 case OBJ_IDLE:
197 case OBJ_GPU_WAIT:
198 context->op.cycles = context->cycles;
199 break;
200 case OBJ_FETCH_DESC1: {
201 uint32_t address = context->regs[VID_OBJLIST1] | context->regs[VID_OBJLIST2] << 16;
202 uint64_t val = jag_read_phrase(context->system, address, &context->op.cycles);
203 address += 8;
204
205 context->regs[VID_OBJ0] = val >> 48;
206 context->regs[VID_OBJ1] = val >> 32;
207 context->regs[VID_OBJ2] = val >> 16;
208 context->regs[VID_OBJ3] = val;
209 context->op.type = val & 7;
210 context->op.has_prefetch = 0;
211 uint16_t ypos = val >> 3 & 0x7FF;
212 switch (context->op.type)
213 {
214 case OBJ_BITMAP:
215 case OBJ_SCALED: {
216 uint16_t height = val >> 14 & 0x7FF;
217 uint32_t link = (address & 0xC00007) | (val >> 21 & 0x3FFFF8);
218 if ((ypos == 0x7FF || context->regs[VID_VCOUNT] >= ypos) && height) {
219 context->op.state = OBJ_FETCH_DESC2;
220 context->op.obj_start = address - 8;
221 context->op.ypos = ypos;
222 context->op.height = height;
223 context->op.link = link;
224 context->op.data_address = val >> 40 & 0xFFFFF8;
225 context->op.cur_address = context->op.data_address;
226 } else {
227 //object is not visible on this line, advance to next object
228 address = link;
229 }
230 break;
231 }
232 case OBJ_GPU:
233 context->op.state = OBJ_GPU_WAIT;
234 break;
235 case OBJ_BRANCH: {
236 uint8_t branch;
237 switch(val >> 14 & 7)
238 {
239 case 0:
240 branch = ypos == context->regs[VID_VCOUNT] || ypos == 0x7FF;
241 break;
242 case 1:
243 branch = ypos > context->regs[VID_VCOUNT];
244 break;
245 case 2:
246 branch = ypos < context->regs[VID_VCOUNT];
247 break;
248 case 3:
249 branch = context->regs[VID_OBJFLAG] & 1;
250 break;
251 case 4:
252 branch = (context->regs[VID_HCOUNT] & 0x400) != 0;
253 break;
254 default:
255 branch = 0;
256 fprintf(stderr, "Invalid branch CC type %d in object at %X\n", (int)(val >> 14 & 7), address-8);
257 break;
258 }
259 if (branch) {
260 address &= 0xC00007;
261 address |= val >> 21 & 0x3FFFF8;
262 }
263 }
264 case OBJ_STOP:
265 //TODO: trigger interrupt
266 context->op.state = OBJ_IDLE;
267 break;
268 }
269 context->regs[VID_OBJLIST1] = address;
270 context->regs[VID_OBJLIST2] = address >> 16;
271 break;
272 }
273 case OBJ_FETCH_DESC2: {
274 uint32_t address = context->regs[VID_OBJLIST1] | context->regs[VID_OBJLIST2] << 16;
275 uint64_t val = jag_read_phrase(context->system, address, &context->op.cycles);
276 address += 8;
277
278 context->op.xpos = val & 0xFFF;
279 if (context->op.xpos & 0x800) {
280 context->op.xpos |= 0xF000;
281 }
282 context->op.increment = (val >> 15 & 0x7) * 8;
283 context->op.bpp = 1 << (val >> 12 & 7);
284 if (context->op.bpp == 32) {
285 context->op.bpp = 24;
286 }
287 context->op.line_pitch = (val >> 18 & 0x3FF) * 8;
288 if (context->op.bpp < 8) {
289 context->op.pal_offset = val >> 37;
290 if (context->op.bpp == 4) {
291 context->op.pal_offset &= 0xF0;
292 } else if(context->op.bpp == 2) {
293 context->op.pal_offset &= 0xFC;
294 } else {
295 context->op.pal_offset &= 0xFE;
296 }
297 } else {
298 context->op.pal_offset = 0;
299 }
300 context->op.line_phrases = val >> 28 & 0x3FF;
301 context->op.hflip = (val & (1UL << 45)) != 0;
302 context->op.addpixels = (val & (1UL << 46)) != 0;
303 context->op.transparent = (val & (1UL << 47)) != 0;
304 //TODO: do something with RELEASE flag
305 context->op.leftclip = val >> 49;
306 if (context->op.type == OBJ_SCALED) {
307 context->op.state = OBJ_FETCH_DESC3;
308 switch (context->op.bpp)
309 {
310 case 1:
311 context->op.leftclip &= 0x3F;
312
313 break;
314 //documentation isn't clear exactly how this works for higher bpp values
315 case 2:
316 context->op.leftclip &= 0x3E;
317 break;
318 case 4:
319 context->op.leftclip &= 0x3C;
320 break;
321 case 8:
322 context->op.leftclip &= 0x38;
323 break;
324 case 16:
325 context->op.leftclip &= 0x30;
326 break;
327 default:
328 context->op.leftclip = 0x20;
329 break;
330 }
331 } else {
332 context->op.state = OBJ_PROCESS;
333 address = context->op.link;
334 switch (context->op.bpp)
335 {
336 case 1:
337 context->op.leftclip &= 0x3E;
338 break;
339 case 2:
340 context->op.leftclip &= 0x3C;
341 break;
342 //values for 4bpp and up are sort of a guess
343 case 4:
344 context->op.leftclip &= 0x38;
345 break;
346 case 8:
347 context->op.leftclip &= 0x30;
348 break;
349 case 16:
350 context->op.leftclip &= 0x20;
351 break;
352 default:
353 context->op.leftclip = 0;
354 break;
355 }
356 }
357 if (context->op.xpos < 0) {
358 int16_t pixels_per_phrase = 64 / context->op.bpp;
359 int16_t clip = -context->op.xpos / pixels_per_phrase;
360 int16_t rem = -context->op.xpos % pixels_per_phrase;
361 if (clip >= context->op.line_phrases) {
362 context->op.line_phrases = 0;
363 } else {
364 context->op.line_phrases -= clip;
365 context->op.leftclip += rem * context->op.bpp;
366 if (context->op.leftclip >= 64) {
367 context->op.line_phrases--;
368 context->op.leftclip -= 64;
369 }
370
371 }
372 } else if (context->op.bpp < 32){
373 context->op.lb_offset = context->op.xpos;
374 } else {
375 context->op.lb_offset = context->op.xpos * 2;
376 }
377 if (context->op.lb_offset >= LINEBUFFER_WORDS || !context->op.line_phrases) {
378 //ignore objects that are completely offscreen
379 //not sure if that's how the hardware does it, but it would make sense
380 context->op.state = OBJ_FETCH_DESC1;
381 address = context->op.link;
382 }
383 context->regs[VID_OBJLIST1] = address;
384 context->regs[VID_OBJLIST2] = address >> 16;
385 break;
386 }
387 case OBJ_FETCH_DESC3: {
388 uint32_t address = context->regs[VID_OBJLIST1] | context->regs[VID_OBJLIST2] << 16;
389 uint64_t val = jag_read_phrase(context->system, address, &context->op.cycles);
390
391 context->op.state = OBJ_PROCESS;
392 context->op.hscale = val & 0xFF;;
393 context->op.hremainder = val & 0xFF;
394 context->op.vscale = val >> 8 & 0xFF;
395 context->op.remainder = val >> 16 & 0xFF;
396
397 context->regs[VID_OBJLIST1] = context->op.link;
398 context->regs[VID_OBJLIST2] = context->op.link >> 16;
399 break;
400 }
401 case OBJ_PROCESS: {
402 uint32_t proc_cycles = 0;
403 if (!context->op.has_prefetch && context->op.line_phrases) {
404 context->op.prefetch = jag_read_phrase(context->system, context->op.cur_address, &proc_cycles);
405 context->op.cur_address += context->op.increment;
406 context->op.has_prefetch = 1;
407 context->op.line_phrases--;
408 }
409 if (!proc_cycles) {
410 //run at least one cycle of writes even if we didn't spend any time reading
411 proc_cycles = 1;
412 }
413 while (proc_cycles)
414 {
415 if (context->op.type == OBJ_SCALED && context->op.hscale) {
416 while (context->op.hremainder <= 0 && context->op.im_bits) {
417 context->op.im_bits -= context->op.bpp;
418 context->op.hremainder += context->op.hscale;
419 }
420 }
421 if (context->op.im_bits) {
422 uint32_t val = context->op.im_data >> (context->op.im_bits - context->op.bpp);
423 val &= (1 << context->op.bpp) - 1;
424 context->op.im_bits -= context->op.bpp;
425 if (context->op.bpp < 16) {
426 val = context->clut[val + context->op.pal_offset];
427 }
428 if (context->op.bpp == 32) {
429 context->write_line_buffer[context->op.lb_offset++] = val >> 16;
430 }
431 context->write_line_buffer[context->op.lb_offset++] = val;
432 if (context->op.type == OBJ_SCALED) {
433 context->op.hremainder -= 0x20;
434 }
435 }
436 if (context->op.im_bits && context->op.bpp < 32 && context->op.type == OBJ_BITMAP && context->op.lb_offset < LINEBUFFER_WORDS) {
437 uint32_t val = context->op.im_data >> (context->op.im_bits - context->op.bpp);
438 val &= (1 << context->op.bpp) - 1;
439 context->op.im_bits -= context->op.bpp;
440 val = context->clut[val + context->op.pal_offset];
441 context->write_line_buffer[context->op.lb_offset++] = val;
442 }
443 context->op_cycles++;
444 proc_cycles--;
445 }
446 if (!context->op.im_bits && context->op.has_prefetch) {
447 context->op.im_data = context->op.prefetch;
448 context->op.has_prefetch = 0;
449 //docs say this is supposed to be a value in pixels
450 //but given the "significant" bits part I'm guessing
451 //this is actually how many bits are pre-shifted off
452 //the first phrase read in a line
453 context->op.im_bits = 64 - context->op.leftclip;
454 context->op.leftclip = 0;
455 }
456 if (context->op.lb_offset == LINEBUFFER_WORDS || (!context->op.im_bits && !context->op.line_phrases)) {
457 context->op.state = OBJ_HEIGHT_WB;
458 }
459 break;
460 }
461 case OBJ_HEIGHT_WB: {
462 if (context->op.type == OBJ_BITMAP) {
463 context->op.height--;
464 context->op.data_address += context->op.line_pitch;
465 context->op.state = OBJ_FETCH_DESC1;
466 } else {
467 context->op.remainder -= 0x20;
468 context->op.state = OBJ_REMAINDER_WB;
469 while (context->op.height && context->op.remainder <= 0) {
470 context->op.height--;
471 context->op.remainder += context->op.vscale;
472 context->op.data_address += context->op.line_pitch;
473 }
474 }
475 uint64_t val = context->op.type | context->op.ypos << 3 | context->op.height << 14
476 | ((uint64_t)context->op.link & 0x3FFFF8) << 21 | ((uint64_t)context->op.data_address) << 40;
477 context->op.cycles += jag_write_phrase(context->system, context->op.obj_start, val);
478 break;
479 }
480 case OBJ_REMAINDER_WB: {
481 uint64_t val = context->op.hscale | context->op.vscale << 8 | context->op.remainder << 16;
482 context->op.cycles += jag_write_phrase(context->system, context->op.obj_start+16, val);
483 context->op.state = OBJ_FETCH_DESC1;
484 break;
485 }
486 }
487 }
488 }
489
170 void jag_video_run(jag_video *context, uint32_t target_cycle) 490 void jag_video_run(jag_video *context, uint32_t target_cycle)
171 { 491 {
172 if (context->regs[VID_VMODE] & BIT_TBGEN) { 492 if (context->regs[VID_VMODE] & BIT_TBGEN) {
173 while (context->cycles < target_cycle) 493 while (context->cycles < target_cycle)
174 { 494 {
193 for (int i = 0; i < LINEBUFFER_WORDS; i++) 513 for (int i = 0; i < LINEBUFFER_WORDS; i++)
194 { 514 {
195 context->write_line_buffer[i] = context->regs[VID_BGCOLOR]; 515 context->write_line_buffer[i] = context->regs[VID_BGCOLOR];
196 } 516 }
197 517
198 //TODO: kick off object processor 518 //kick off object processor
519 context->op.state = OBJ_FETCH_DESC1;
520 } else if (context->regs[VID_HCOUNT] == context->regs[VID_HDISP_END]) {
521 //stob object processor
522 context->op.state = OBJ_IDLE;
199 } 523 }
200 524
525 context->cycles++;
526 op_run(context);
527
528 //advance counters
201 if ( 529 if (
202 !context->output 530 !context->output
203 && context->regs[VID_VCOUNT] == context->regs[VID_VDISP_BEGIN] 531 && context->regs[VID_VCOUNT] == context->regs[VID_VDISP_BEGIN]
204 && context->regs[VID_HCOUNT] == context->regs[VID_HDISP_BEGIN1] 532 && context->regs[VID_HCOUNT] == context->regs[VID_HDISP_BEGIN1]
205 ) { 533 ) {
221 context->regs[VID_VCOUNT]++; 549 context->regs[VID_VCOUNT]++;
222 } 550 }
223 } else { 551 } else {
224 context->regs[VID_HCOUNT]++; 552 context->regs[VID_HCOUNT]++;
225 } 553 }
226 context->cycles++; 554
227 } 555 }
228 } else { 556 } else {
229 context->cycles = target_cycle; 557 context->cycles = target_cycle;
230 } 558 }
231 } 559 }