2 // Alternate GPU core... Testing purposes only!
7 // Random stuff from GPU.CPP
9 /*static uint8 * gpu_ram_8;
11 static uint32 gpu_acc;
12 static uint32 gpu_remain;
13 static uint32 gpu_hidata;
14 static uint32 gpu_flags;
15 static uint32 gpu_matrix_control;
16 static uint32 gpu_pointer_to_matrix;
17 static uint32 gpu_data_organization;
18 static uint32 gpu_control;
19 static uint32 gpu_div_control;
20 static uint8 gpu_flag_z, gpu_flag_n, gpu_flag_c;
21 static uint8 gpu_alternate_flag_z, gpu_alternate_flag_n, gpu_alternate_flag_c;
22 static uint32 * gpu_reg;
23 static uint32 * gpu_alternate_reg;
24 static uint32 * gpu_reg_bank_0;
25 static uint32 * gpu_reg_bank_1;
27 static uint32 gpu_opcode_first_parameter;
28 static uint32 gpu_opcode_second_parameter;*/
32 const INT32 qtable[32] =
33 { 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
34 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31 };
36 const INT32 sqtable[32] =
37 { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
38 -16, -15, -14, -13, -12, -11, -10, -9, -8, -7, -6, -5, -4, -3, -2, -1 };
40 const UINT8 gpu_opcode_times[64] =
41 { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
42 3, 3, 1, 3, 1,18, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
43 3, 3, 2, 2, 2, 2, 3, 6, 6, 4, 6, 6, 6, 1, 1, 1,
44 1, 2, 2, 2, 1, 1,20, 3, 3, 1, 6, 6, 2, 2, 3, 3 };
46 UINT8 jump_condition[32][8];
50 memset(jump_condition, 0, 32 * 8 * sizeof(UINT8));
52 for(int j=0; j<32; j++)
54 for(int i=0; i<8; i++)
66 if(i & (0x2 << (j >> 4)))
70 if(!(i & (0x2 << (j >> 4))))
73 jump_condition[j][i] = r;
89 gpu_flag_z = d == 0 ? 1 : 0;
99 gpu_flag_c = r & 0x100000000 ? 1 : 0;
101 gpu_flag_z = RN == 0 ? 1 : 0;
102 gpu_flag_n = RN & 0x80000000 ? 1 : 0;
106 void opcode_addc(void)
112 gpu_flag_c = r & 0x100000000 ? 1 : 0;
114 gpu_flag_z = RN == 0 ? 1 : 0;
115 gpu_flag_n = RN & 0x80000000 ? 1 : 0;
119 void opcode_addq(void)
121 int s = qtable[IMM_1];
124 gpu_flag_c = r & 0x100000000 ? 1 : 0;
126 gpu_flag_z = RN == 0 ? 1 : 0;
127 gpu_flag_n = RN & 0x80000000 ? 1 : 0;
131 void opcode_addqt(void)
137 void opcode_and(void)
140 gpu_flag_z = RN == 0 ? 1 : 0;
141 gpu_flag_n = RN & 0x80000000 ? 1 : 0;
145 void opcode_bclr(void)
148 gpu_flag_z = RN == 0 ? 1 : 0;
149 gpu_flag_n = RN & 0x80000000 ? 1 : 0;
153 void opcode_bset(void)
156 gpu_flag_z = RN == 0 ? 1 : 0;
157 gpu_flag_n = RN & 0x80000000 ? 1 : 0;
161 void opcode_btst(void)
163 gpu_flag_z = RN & (1 << IMM_1) ? 0 : 1;
167 void opcode_cmp(void)
171 gpu_flag_c = (unsigned int)d < (unsigned int)s;
173 gpu_flag_z = d == 0 ? 1 : 0;
174 gpu_flag_n = d & 0x80000000 ? 1 : 0;
178 void opcode_cmpq(void)
180 int s = sqtable[IMM_1];
182 gpu_flag_c = (unsigned int)d < (unsigned int)s;
184 gpu_flag_z = d == 0 ? 1 : 0;
185 gpu_flag_n = d & 0x80000000 ? 1 : 0;
189 void opcode_div(void)
192 if(gpu_div_control == 0) {
200 UINT64 q = (UINT64)(RN)<<16;
201 UINT64 d = (UINT64)(RM);
202 UINT32 r = (UINT64)(q / d);
203 UINT32 r2 = (UINT64)(q % d);
211 void opcode_imacn(void)
220 void opcode_imult(void)
226 gpu_flag_z = r == 0 ? 1 : 0;
227 gpu_flag_n = r & 0x80000000 ? 1 : 0;
230 // case 18: // IMULTN
231 void opcode_imultn(void)
237 gpu_flag_z = r == 0 ? 1 : 0;
238 gpu_flag_n = r & 0x80000000 ? 1 : 0;
244 UINT32 dw = (gpu_flag_z & 0x1) | ((gpu_flag_n & 0x1) << 2) | ((gpu_flag_c & 0x1) << 1);
245 if (jump_condition[IMM_2][dw])
248 fprintf(log_get(), " --> JR: Branch taken. ");
249 signed int offset = IMM_1 & 0x10 ? (0xFFFFFFF0 | (IMM_1 & 0xF)) : (IMM_1 & 0xF);
250 UINT32 delayed_jump_address = gpu_pc + 2 + (offset * 2);
254 // gpu_pc = delayed_jump_address;
255 gpu_pc = delayed_jump_address - 2;
260 void opcode_jump(void)
262 UINT32 dw = (gpu_flag_z & 0x1) | ((gpu_flag_n & 0x1) << 2) | ((gpu_flag_c & 0x1) << 1);
263 if (jump_condition[IMM_2][dw])
266 fprintf(log_get(), " --> JUMP: Branch taken. ");
267 UINT32 delayed_jump_address = RM & 0xFFFFFE;
271 // gpu_pc = delayed_jump_address;
272 gpu_pc = delayed_jump_address - 2;
277 void opcode_load(void)
280 if(address >= 0xF03000 && address < 0xF04000) {
281 // RN = _rotl(*(UINT32*)(&MEM[address]),16);
282 RN = gpu_long_read(address);
283 } else if(address >= 0xF0B000 && address < 0xF0C000) {
284 // RN = _rotl(*(UINT32*)(&MEM[address-0x8000]),16);
285 RN = gpu_long_read(address-0x8000);
287 // RN = ReadMem32(address);
288 RN = jaguar_long_read(address);
292 // case 43: // LOAD (R14+m)
293 void opcode_load_r14_indexed(void)
295 UINT32 address = gpu_reg[14] + (qtable[IMM_1] << 2);
296 if(address >= 0xF03000 && address < 0xF04000) {
297 // RN = _rotl(*(UINT32*)(&MEM[address]),16);
298 RN = gpu_long_read(address);
299 } else if(address >= 0xF0B000 && address < 0xF0C000) {
300 // RN = _rotl(*(UINT32*)(&MEM[address-0x8000]),16);
301 RN = gpu_long_read(address-0x8000);
303 // RN = ReadMem32(address);
304 RN = jaguar_long_read(address);
308 // case 44: // LOAD (R15+m)
309 void opcode_load_r15_indexed(void)
311 UINT32 address = gpu_reg[15] + (qtable[IMM_1] << 2);
312 if(address >= 0xF03000 && address < 0xF04000) {
313 // RN = _rotl(*(UINT32*)(&MEM[address]),16);
314 RN = gpu_long_read(address);
315 } else if(address >= 0xF0B000 && address < 0xF0C000) {
316 // RN = _rotl(*(UINT32*)(&MEM[address-0x8000]),16);
317 RN = gpu_long_read(address-0x8000);
319 // RN = ReadMem32(address);
320 RN = jaguar_long_read(address);
324 // case 58: // LOAD (R14+Rm)
325 void opcode_load_r14_ri(void)
327 UINT32 address = gpu_reg[14] + RM;
328 if(address >= 0xF03000 && address < 0xF04000) {
329 // RN = _rotl(*(UINT32*)(&MEM[address]),16);
330 RN = gpu_long_read(address);
331 } else if(address >= 0xF0B000 && address < 0xF0C000) {
332 // RN = _rotl(*(UINT32*)(&MEM[address-0x8000]),16);
333 RN = gpu_long_read(address-0x8000);
335 // RN = ReadMem32(address);
336 RN = jaguar_long_read(address);
340 // case 59: // LOAD (R15+Rm)
341 void opcode_load_r15_ri(void)
343 UINT32 address = gpu_reg[15] + RM;
344 if(address >= 0xF03000 && address < 0xF04000) {
345 // RN = _rotl(*(UINT32*)(&MEM[address]),16);
346 RN = gpu_long_read(address);
347 } else if(address >= 0xF0B000 && address < 0xF0C000) {
348 // RN = _rotl(*(UINT32*)(&MEM[address-0x8000]),16);
349 RN = gpu_long_read(address-0x8000);
351 // RN = ReadMem32(address);
352 RN = jaguar_long_read(address);
357 void opcode_loadb(void)
359 if(RM >= 0xF03000 && RM < 0xF04000) {
360 // RN = ReadMem32(RM);
361 RN = gpu_long_read(RM);
363 // RN = ReadMem8(RM);
364 RN = jaguar_byte_read(RM);
369 void opcode_loadw(void)
371 if(RM >= 0xF03000 && RM < 0xF04000) {
372 // RN = ReadMem32(RM);
373 RN = gpu_long_read(RM);
375 // RN = ReadMem16(RM);
376 RN = jaguar_word_read(RM);
381 void opcode_loadp(void)
383 if(RM >= 0xF03000 && RM < 0xF04000) {
384 // RN = ReadMem32(RM);
385 RN = gpu_long_read(RM);
387 // RN = ReadMem32(RM);
388 // gpu_hidata = ReadMem32(RM+4);
389 RN = gpu_long_read(RM);
390 gpu_hidata = gpu_long_read(RM + 4);
395 void opcode_move(void)
400 // case 51: // MOVE PC,Rn
401 void opcode_move_pc(void)
406 // case 37: // MOVEFA
407 void opcode_movefa(void)
409 RN = gpu_alternate_reg[IMM_1];
413 void opcode_movei(void)
415 // This instruction is followed by 32-bit value in LSW / MSW format...
416 // RN = (uint32)gpu_word_read(gpu_pc) | ((uint32)gpu_word_read(gpu_pc + 2) << 16);
418 // RN = _rotl(*(UINT32*)(&MEM[address]),16);
419 // RN = *(UINT32*)(&MEM[gpu_pc+2]);
420 RN = _rotl(gpu_long_read(gpu_pc + 2), 16);
425 void opcode_moveq(void)
430 // case 36: // MOVETA
431 void opcode_moveta(void)
433 gpu_alternate_reg[IMM_2] = RM;
437 void opcode_mtoi(void)
439 int d = RN & 0x7FFFFF;
440 if(RN & 0x80000000) {
444 gpu_flag_z = d == 0 ? 1 : 0;
445 gpu_flag_n = d & 0x80000000 ? 1 : 0;
449 void opcode_mult(void)
451 unsigned short s = RM;
452 unsigned short d = RN;
455 gpu_flag_z = r == 0 ? 1 : 0;
456 gpu_flag_n = r & 0x80000000 ? 1 : 0;
460 void opcode_mmult(void)
462 int size = gpu_matrix_control & 0xF;
463 int address = gpu_pointer_to_matrix;
465 if (gpu_matrix_control & 0x10)
470 for(int i=0; i<size; i++)
473 // m = ReadMem16(address+2);
474 m = gpu_word_read(address + 2);
476 r = gpu_alternate_reg[IMM_1+(i>>1)] >> 16;
478 r = (gpu_alternate_reg[IMM_1+(i>>1)] & 0xFFFF);
479 result += (int)(r * m);
492 gpu_flag_n = (result < 0) ? 1 : 0;
493 gpu_flag_z = (result == 0) ? 1 : 0;
497 void opcode_neg(void)
501 gpu_flag_c = d - s < d;
504 gpu_flag_z = d == 0 ? 1 : 0;
505 gpu_flag_n = d & 0x80000000 ? 1 : 0;
509 void opcode_nop(void)
514 void opcode_normi(void)
516 /*unsigned int d = RN;
518 while ((d & 0xffc00000) == 0)
523 while ((d & 0xff800000) != 0)
529 gpu_flag_z = r == 0 ? 1 : 0;
530 gpu_flag_n = r & 0x80000000 ? 1 : 0;*/
535 void opcode_not(void)
540 gpu_flag_z = d == 0 ? 1 : 0;
541 gpu_flag_n = d & 0x80000000 ? 1 : 0;
551 gpu_flag_z = d == 0 ? 1 : 0;
552 gpu_flag_n = d & 0x80000000 ? 1 : 0;
555 // case 63: // PACK / UNPACK
556 void opcode_pack(void)
560 int c1 = (RN & 0x3C00000) >> 10;
561 int c2 = (RN & 0x1E000) >> 5;
567 int c1 = (RN & 0xF000) << 10;
568 int c2 = (RN & 0xF00) << 5;
574 // case 19: // RESMAC
575 void opcode_resmac(void)
581 void opcode_ror(void)
585 gpu_flag_c = d & 0x80000000 ? 1 : 0;
588 gpu_flag_z = d == 0 ? 1 : 0;
589 gpu_flag_n = d & 0x80000000 ? 1 : 0;
593 void opcode_rorq(void)
596 int shift = qtable[IMM_1];
597 gpu_flag_c = d & 0x80000000 ? 1 : 0;
600 gpu_flag_z = d == 0 ? 1 : 0;
601 gpu_flag_n = d & 0x80000000 ? 1 : 0;
605 void opcode_sat8(void)
613 gpu_flag_z = d == 0 ? 1 : 0;
618 void opcode_sat16(void)
626 gpu_flag_z = d == 0 ? 1 : 0;
631 void opcode_sat24(void)
639 gpu_flag_z = d == 0 ? 1 : 0;
647 if (shift & 0x80000000)
649 gpu_flag_c = RN & 0x80000000 ? 1 : 0;
653 gpu_flag_z = RN == 0 ? 1 : 0;
654 gpu_flag_n = RN & 0x80000000 ? 1 : 0;
658 gpu_flag_c = RN & 0x1 ? 1 : 0;
662 gpu_flag_z = RN == 0 ? 1 : 0;
663 gpu_flag_n = RN & 0x80000000 ? 1 : 0;
668 void opcode_sha(void)
671 if(shift & 0x80000000) {
672 gpu_flag_c = RN & 0x80000000 ? 1 : 0;
676 gpu_flag_z = RN == 0 ? 1 : 0;
677 gpu_flag_n = RN & 0x80000000 ? 1 : 0;
679 gpu_flag_c = RN & 0x1 ? 1 : 0;
683 gpu_flag_z = RN == 0 ? 1 : 0;
684 gpu_flag_n = RN & 0x80000000 ? 1 : 0;
689 void opcode_sharq(void)
692 int shift = qtable[IMM_1];
693 gpu_flag_c = d & 0x1 ? 1 : 0;
696 gpu_flag_z = d == 0 ? 1 : 0;
697 gpu_flag_n = d & 0x80000000 ? 1 : 0;
701 void opcode_shlq(void)
704 int shift = 32 - IMM_1;
705 gpu_flag_c = d & 0x80000000 ? 1 : 0;
708 gpu_flag_z = d == 0 ? 1 : 0;
709 gpu_flag_n = d & 0x80000000 ? 1 : 0;
713 void opcode_shrq(void)
716 int shift = qtable[IMM_1];
717 gpu_flag_c = d & 0x1 ? 1 : 0;
720 gpu_flag_z = d == 0 ? 1 : 0;
721 gpu_flag_n = d & 0x80000000 ? 1 : 0;
725 void opcode_store(void)
728 if(address >= 0xF03000 && address < 0xF04000) {
729 // *(UINT32*)(&MEM[address]) = _rotl(RN,16);
730 gpu_long_write(address, RN);
731 } else if(address >= 0xF0B000 && address < 0xF0C000) {
732 // *(UINT32*)(&MEM[address-0x8000]) = _rotl(RN,16);
733 gpu_long_write(address-0x8000, RN);
735 // WriteMem32(address,RN);
736 jaguar_long_write(address, RN);
740 // case 49: // STORE (R14+m)
741 void opcode_store_r14_indexed(void)
743 UINT32 address = gpu_reg[14] + (qtable[IMM_1] << 2);
744 if(address >= 0xF03000 && address < 0xF04000) {
745 // *(UINT32*)(&MEM[address]) = _rotl(RN,16);
746 gpu_long_write(address, RN);
747 } else if(address >= 0xF0B000 && address < 0xF0C000) {
748 // *(UINT32*)(&MEM[address-0x8000]) = _rotl(RN,16);
749 gpu_long_write(address-0x8000, RN);
751 // WriteMem32(address,RN);
752 jaguar_long_write(address, RN);
756 // case 50: // STORE (R15+m)
757 void opcode_store_r15_indexed(void)
759 UINT32 address = gpu_reg[15] + (qtable[IMM_1] << 2);
760 if(address >= 0xF03000 && address < 0xF04000) {
761 // *(UINT32*)(&MEM[address]) = _rotl(RN,16);
762 gpu_long_write(address, RN);
763 } else if(address >= 0xF0B000 && address < 0xF0C000) {
764 // *(UINT32*)(&MEM[address-0x8000]) = _rotl(RN,16);
765 gpu_long_write(address-0x8000, RN);
767 // WriteMem32(address,RN);
768 jaguar_long_write(address, RN);
772 // case 60: // STORE (R14+Rm)
773 void opcode_store_r14_ri(void)
775 UINT32 address = gpu_reg[14] + RM;
776 if(address >= 0xF03000 && address < 0xF04000) {
777 // *(UINT32*)(&MEM[address]) = _rotl(RN,16);
778 gpu_long_write(address, RN);
779 } else if(address >= 0xF0B000 && address < 0xF0C000) {
780 // *(UINT32*)(&MEM[address-0x8000]) = _rotl(RN,16);
781 gpu_long_write(address-0x8000, RN);
783 // WriteMem32(address,RN);
784 jaguar_long_write(address, RN);
788 // case 61: // STORE (R15+Rm)
789 void opcode_store_r15_ri(void)
791 UINT32 address = gpu_reg[15] + RM;
792 if(address >= 0xF03000 && address < 0xF04000) {
793 // *(UINT32*)(&MEM[address]) = _rotl(RN,16);
794 gpu_long_write(address, RN);
795 } else if(address >= 0xF0B000 && address < 0xF0C000) {
796 // *(UINT32*)(&MEM[address-0x8000]) = _rotl(RN,16);
797 gpu_long_write(address-0x8000, RN);
799 // WriteMem32(address,RN);
800 jaguar_long_write(address, RN);
804 // case 45: // STOREB
805 void opcode_storeb(void)
807 if(RM>0xF03000 && RM<0xF04000) {
808 // WriteMem32(RM,RN);
809 gpu_long_write(RM, RN);
811 // WriteMem8(RM,(UINT8)RN);
812 jaguar_byte_write(RM, (UINT8)RN);
816 // case 46: // STOREW
817 void opcode_storew(void)
819 if(RM>0xF03000 && RM<0xF04000) {
820 // WriteMem32(RM,RN);
821 gpu_long_write(RM, RN);
823 // WriteMem16(RM,(WORD)RN);
824 jaguar_word_write(RM, (UINT16)RN);
828 // case 48: // STOREP
829 void opcode_storep(void)
831 if (RM>0xF03000 && RM<0xF04000)
833 // WriteMem32(RM,RN);
834 gpu_long_write(RM, RN);
838 // WriteMem32(RM,RN);
839 // WriteMem32(RM+4,gpu_hidata);
840 jaguar_long_write(RM, RN);
841 jaguar_long_write(RM + 4, gpu_hidata);
846 void opcode_sub(void)
851 gpu_flag_c = r & 0x100000000 ? 1 : 0;
853 gpu_flag_z = RN == 0 ? 1 : 0;
854 gpu_flag_n = RN & 0x80000000 ? 1 : 0;
858 void opcode_subc(void)
864 gpu_flag_c = r & 0x100000000 ? 1 : 0;
866 gpu_flag_z = RN == 0 ? 1 : 0;
867 gpu_flag_n = RN & 0x80000000 ? 1 : 0;
871 void opcode_subq(void)
873 int s = qtable[IMM_1];
876 gpu_flag_c = r & 0x100000000 ? 1 : 0;
878 gpu_flag_z = RN == 0 ? 1 : 0;
879 gpu_flag_n = RN & 0x80000000 ? 1 : 0;
883 void opcode_subqt(void)
889 void opcode_xor(void)
895 gpu_flag_z = d == 0 ? 1 : 0;
896 gpu_flag_n = d & 0x80000000 ? 1 : 0;