trunk/src/emu/cpu/sharc/sharc.c
| r17844 | r17845 | |
| 73 | 73 | UINT32 loop_type; |
| 74 | 74 | } LADDR; |
| 75 | 75 | |
| 76 | typedef struct |
| 77 | { |
| 78 | UINT32 src; |
| 79 | UINT32 dst; |
| 80 | UINT32 chain_ptr; |
| 81 | INT32 src_modifier; |
| 82 | INT32 dst_modifier; |
| 83 | INT32 src_count; |
| 84 | INT32 dst_count; |
| 85 | INT32 pmode; |
| 86 | INT32 chained_direction; |
| 87 | emu_timer *timer; |
| 88 | bool active; |
| 89 | } DMA_OP; |
| 90 | |
| 76 | 91 | typedef struct _SHARC_REGS SHARC_REGS; |
| 77 | 92 | struct _SHARC_REGS |
| 78 | 93 | { |
| r17844 | r17845 | |
| 147 | 162 | |
| 148 | 163 | SHARC_BOOT_MODE boot_mode; |
| 149 | 164 | |
| 150 | | UINT32 dmaop_src; |
| 151 | | UINT32 dmaop_dst; |
| 152 | | UINT32 dmaop_chain_ptr; |
| 153 | | INT32 dmaop_src_modifier; |
| 154 | | INT32 dmaop_dst_modifier; |
| 155 | | INT32 dmaop_src_count; |
| 156 | | INT32 dmaop_dst_count; |
| 157 | | INT32 dmaop_pmode; |
| 158 | | INT32 dmaop_cycles; |
| 159 | | INT32 dmaop_channel; |
| 160 | | INT32 dmaop_chained_direction; |
| 165 | DMA_OP dma_op[12]; |
| 166 | UINT32 dma_status; |
| 161 | 167 | |
| 162 | 168 | INT32 interrupt_active; |
| 163 | 169 | |
| r17844 | r17845 | |
| 260 | 266 | |
| 261 | 267 | case 0x37: // DMA status |
| 262 | 268 | { |
| 263 | | UINT32 r = 0; |
| 264 | | if (cpustate->dmaop_cycles > 0) |
| 265 | | { |
| 266 | | r |= 1 << cpustate->dmaop_channel; |
| 267 | | } |
| 268 | | return r; |
| 269 | return cpustate->dma_status; |
| 269 | 270 | } |
| 270 | 271 | default: fatalerror("sharc_iop_r: Unimplemented IOP reg %02X at %08X\n", address, cpustate->pc); |
| 271 | 272 | } |
| r17844 | r17845 | |
| 292 | 293 | case 0x1c: |
| 293 | 294 | { |
| 294 | 295 | cpustate->dma[6].control = data; |
| 295 | | //add_iop_write_latency_effect(cpustate, 0x1c, data, 1); |
| 296 | 296 | sharc_iop_delayed_w(cpustate, 0x1c, data, 1); |
| 297 | 297 | break; |
| 298 | 298 | } |
| r17844 | r17845 | |
| 312 | 312 | case 0x1d: |
| 313 | 313 | { |
| 314 | 314 | cpustate->dma[7].control = data; |
| 315 | | //add_iop_write_latency_effect(cpustate, 0x1d, data, 30); |
| 316 | 315 | sharc_iop_delayed_w(cpustate, 0x1d, data, 30); |
| 317 | 316 | break; |
| 318 | 317 | } |
| r17844 | r17845 | |
| 438 | 437 | |
| 439 | 438 | cpustate->delayed_iop_timer = device->machine().scheduler().timer_alloc(FUNC(sharc_iop_delayed_write_callback), cpustate); |
| 440 | 439 | |
| 440 | for (int i=0; i < 12; i++) |
| 441 | { |
| 442 | cpustate->dma_op[i].active = false; |
| 443 | cpustate->dma_op[i].timer = device->machine().scheduler().timer_alloc(FUNC(sharc_dma_callback), cpustate); |
| 444 | } |
| 445 | |
| 441 | 446 | device->save_item(NAME(cpustate->pc)); |
| 442 | 447 | device->save_pointer(NAME(&cpustate->r[0].r), ARRAY_LENGTH(cpustate->r)); |
| 443 | 448 | device->save_pointer(NAME(&cpustate->reg_alt[0].r), ARRAY_LENGTH(cpustate->reg_alt)); |
| r17844 | r17845 | |
| 523 | 528 | device->save_item(NAME(cpustate->irq_active)); |
| 524 | 529 | device->save_item(NAME(cpustate->active_irq_num)); |
| 525 | 530 | |
| 526 | | device->save_item(NAME(cpustate->dmaop_src)); |
| 527 | | device->save_item(NAME(cpustate->dmaop_dst)); |
| 528 | | device->save_item(NAME(cpustate->dmaop_chain_ptr)); |
| 529 | | device->save_item(NAME(cpustate->dmaop_src_modifier)); |
| 530 | | device->save_item(NAME(cpustate->dmaop_dst_modifier)); |
| 531 | | device->save_item(NAME(cpustate->dmaop_src_count)); |
| 532 | | device->save_item(NAME(cpustate->dmaop_dst_count)); |
| 533 | | device->save_item(NAME(cpustate->dmaop_pmode)); |
| 534 | | device->save_item(NAME(cpustate->dmaop_cycles)); |
| 535 | | device->save_item(NAME(cpustate->dmaop_channel)); |
| 536 | | device->save_item(NAME(cpustate->dmaop_chained_direction)); |
| 531 | for (saveindex = 0; saveindex < ARRAY_LENGTH(cpustate->dma_op); saveindex++) |
| 532 | { |
| 533 | device->save_item(NAME(cpustate->dma_op[saveindex].src), saveindex); |
| 534 | device->save_item(NAME(cpustate->dma_op[saveindex].dst), saveindex); |
| 535 | device->save_item(NAME(cpustate->dma_op[saveindex].chain_ptr), saveindex); |
| 536 | device->save_item(NAME(cpustate->dma_op[saveindex].src_modifier), saveindex); |
| 537 | device->save_item(NAME(cpustate->dma_op[saveindex].dst_modifier), saveindex); |
| 538 | device->save_item(NAME(cpustate->dma_op[saveindex].src_count), saveindex); |
| 539 | device->save_item(NAME(cpustate->dma_op[saveindex].dst_count), saveindex); |
| 540 | device->save_item(NAME(cpustate->dma_op[saveindex].pmode), saveindex); |
| 541 | device->save_item(NAME(cpustate->dma_op[saveindex].chained_direction), saveindex); |
| 542 | device->save_item(NAME(cpustate->dma_op[saveindex].active), saveindex); |
| 543 | } |
| 537 | 544 | |
| 545 | device->save_item(NAME(cpustate->dma_status)); |
| 546 | |
| 538 | 547 | device->save_item(NAME(cpustate->interrupt_active)); |
| 539 | 548 | |
| 540 | 549 | device->save_item(NAME(cpustate->iop_delayed_reg)); |
| r17844 | r17845 | |
| 571 | 580 | cpustate->dma[6].control = 0x2a1; |
| 572 | 581 | |
| 573 | 582 | sharc_dma_exec(cpustate, 6); |
| 574 | | dma_op(cpustate, cpustate->dmaop_src, cpustate->dmaop_dst, cpustate->dmaop_src_modifier, cpustate->dmaop_dst_modifier, |
| 575 | | cpustate->dmaop_src_count, cpustate->dmaop_dst_count, cpustate->dmaop_pmode); |
| 576 | | cpustate->dmaop_cycles = 0; |
| 577 | | |
| 583 | dma_op(cpustate, 6); |
| 584 | |
| 585 | cpustate->dma_op[6].timer->adjust(attotime::never, 0); |
| 578 | 586 | break; |
| 579 | 587 | } |
| 580 | 588 | |
| r17844 | r17845 | |
| 679 | 687 | |
| 680 | 688 | if (cpustate->idle && cpustate->irq_active == 0) |
| 681 | 689 | { |
| 682 | | // handle pending DMA transfers |
| 683 | | if (cpustate->dmaop_cycles > 0) |
| 684 | | { |
| 685 | | cpustate->dmaop_cycles -= cpustate->icount; |
| 686 | | if (cpustate->dmaop_cycles <= 0) |
| 687 | | { |
| 688 | | cpustate->dmaop_cycles = 0; |
| 689 | | dma_op(cpustate, cpustate->dmaop_src, cpustate->dmaop_dst, cpustate->dmaop_src_modifier, cpustate->dmaop_dst_modifier, cpustate->dmaop_src_count, cpustate->dmaop_dst_count, cpustate->dmaop_pmode); |
| 690 | | if (cpustate->dmaop_chain_ptr != 0) |
| 691 | | { |
| 692 | | schedule_chained_dma_op(cpustate, cpustate->dmaop_channel, cpustate->dmaop_chain_ptr, cpustate->dmaop_chained_direction); |
| 693 | | } |
| 694 | | } |
| 695 | | } |
| 696 | | |
| 697 | 690 | cpustate->icount = 0; |
| 698 | 691 | debugger_instruction_hook(device, cpustate->daddr); |
| 699 | 692 | } |
| r17844 | r17845 | |
| 790 | 783 | } |
| 791 | 784 | } |
| 792 | 785 | |
| 793 | | // DMA transfer |
| 794 | | if (cpustate->dmaop_cycles > 0) |
| 795 | | { |
| 796 | | --cpustate->dmaop_cycles; |
| 797 | | if (cpustate->dmaop_cycles <= 0) |
| 798 | | { |
| 799 | | cpustate->irptl |= (1 << (cpustate->dmaop_channel+10)); |
| 800 | | |
| 801 | | /* DMA interrupt */ |
| 802 | | if (cpustate->imask & (1 << (cpustate->dmaop_channel+10))) |
| 803 | | { |
| 804 | | cpustate->irq_active |= 1 << (cpustate->dmaop_channel+10); |
| 805 | | } |
| 806 | | |
| 807 | | dma_op(cpustate, cpustate->dmaop_src, cpustate->dmaop_dst, cpustate->dmaop_src_modifier, cpustate->dmaop_dst_modifier, cpustate->dmaop_src_count, cpustate->dmaop_dst_count, cpustate->dmaop_pmode); |
| 808 | | if (cpustate->dmaop_chain_ptr != 0) |
| 809 | | { |
| 810 | | schedule_chained_dma_op(cpustate, cpustate->dmaop_channel, cpustate->dmaop_chain_ptr, cpustate->dmaop_chained_direction); |
| 811 | | } |
| 812 | | } |
| 813 | | } |
| 814 | | |
| 815 | 786 | --cpustate->icount; |
| 816 | 787 | }; |
| 817 | 788 | } |
trunk/src/emu/cpu/sharc/sharcdma.c
| r17844 | r17845 | |
| 19 | 19 | UINT32 ext_modifier = dm_read32(cpustate, op_ptr - 6); |
| 20 | 20 | UINT32 ext_count = dm_read32(cpustate, op_ptr - 7); |
| 21 | 21 | |
| 22 | | if (cpustate->dmaop_cycles > 0) |
| 22 | if (cpustate->dma_op[channel].active) |
| 23 | 23 | { |
| 24 | 24 | fatalerror("schedule_chained_dma_op: DMA operation already scheduled at %08X!\n", cpustate->pc); |
| 25 | 25 | } |
| 26 | 26 | |
| 27 | 27 | if (chained_direction) // Transmit to external |
| 28 | 28 | { |
| 29 | | cpustate->dmaop_dst = ext_index; |
| 30 | | cpustate->dmaop_dst_modifier = ext_modifier; |
| 31 | | cpustate->dmaop_dst_count = ext_count; |
| 32 | | cpustate->dmaop_src = int_index; |
| 33 | | cpustate->dmaop_src_modifier = int_modifier; |
| 34 | | cpustate->dmaop_src_count = int_count; |
| 29 | cpustate->dma_op[channel].dst = ext_index; |
| 30 | cpustate->dma_op[channel].dst_modifier = ext_modifier; |
| 31 | cpustate->dma_op[channel].dst_count = ext_count; |
| 32 | cpustate->dma_op[channel].src = int_index; |
| 33 | cpustate->dma_op[channel].src_modifier = int_modifier; |
| 34 | cpustate->dma_op[channel].src_count = int_count; |
| 35 | 35 | } |
| 36 | | else // Receive from external |
| 36 | else // Receive from external |
| 37 | 37 | { |
| 38 | | cpustate->dmaop_src = ext_index; |
| 39 | | cpustate->dmaop_src_modifier = ext_modifier; |
| 40 | | cpustate->dmaop_src_count = ext_count; |
| 41 | | cpustate->dmaop_dst = int_index; |
| 42 | | cpustate->dmaop_dst_modifier = int_modifier; |
| 43 | | cpustate->dmaop_dst_count = int_count; |
| 38 | cpustate->dma_op[channel].src = ext_index; |
| 39 | cpustate->dma_op[channel].src_modifier = ext_modifier; |
| 40 | cpustate->dma_op[channel].src_count = ext_count; |
| 41 | cpustate->dma_op[channel].dst = int_index; |
| 42 | cpustate->dma_op[channel].dst_modifier = int_modifier; |
| 43 | cpustate->dma_op[channel].dst_count = int_count; |
| 44 | 44 | } |
| 45 | 45 | |
| 46 | | cpustate->dmaop_pmode = 0; |
| 47 | | cpustate->dmaop_channel = channel; |
| 48 | | cpustate->dmaop_cycles = cpustate->dmaop_src_count / 4; |
| 49 | | cpustate->dmaop_chain_ptr = chain_ptr; |
| 50 | | cpustate->dmaop_chained_direction = chained_direction; |
| 46 | cpustate->dma_op[channel].pmode = 0; |
| 47 | cpustate->dma_op[channel].chain_ptr = chain_ptr; |
| 48 | cpustate->dma_op[channel].chained_direction = chained_direction; |
| 49 | |
| 50 | cpustate->dma_op[channel].active = true; |
| 51 | |
| 52 | int cycles = cpustate->dma_op[channel].src_count / 4; |
| 53 | cpustate->dma_op[channel].timer->adjust(cpustate->device->cycles_to_attotime(cycles), channel); |
| 54 | |
| 55 | // enable busy flag |
| 56 | cpustate->dma_status |= (1 << channel); |
| 51 | 57 | } |
| 52 | 58 | |
| 53 | 59 | static void schedule_dma_op(SHARC_REGS *cpustate, int channel, UINT32 src, UINT32 dst, int src_modifier, int dst_modifier, int src_count, int dst_count, int pmode) |
| 54 | 60 | { |
| 55 | | if (cpustate->dmaop_cycles > 0) |
| 61 | if (cpustate->dma_op[channel].active) |
| 56 | 62 | { |
| 57 | 63 | fatalerror("schedule_dma_op: DMA operation already scheduled at %08X!\n", cpustate->pc); |
| 58 | 64 | } |
| 59 | 65 | |
| 60 | | cpustate->dmaop_channel = channel; |
| 61 | | cpustate->dmaop_src = src; |
| 62 | | cpustate->dmaop_dst = dst; |
| 63 | | cpustate->dmaop_src_modifier = src_modifier; |
| 64 | | cpustate->dmaop_dst_modifier = dst_modifier; |
| 65 | | cpustate->dmaop_src_count = src_count; |
| 66 | | cpustate->dmaop_dst_count = dst_count; |
| 67 | | cpustate->dmaop_pmode = pmode; |
| 68 | | cpustate->dmaop_chain_ptr = 0; |
| 69 | | cpustate->dmaop_cycles = src_count / 4; |
| 66 | cpustate->dma_op[channel].src = src; |
| 67 | cpustate->dma_op[channel].dst = dst; |
| 68 | cpustate->dma_op[channel].src_modifier = src_modifier; |
| 69 | cpustate->dma_op[channel].dst_modifier = dst_modifier; |
| 70 | cpustate->dma_op[channel].src_count = src_count; |
| 71 | cpustate->dma_op[channel].dst_count = dst_count; |
| 72 | cpustate->dma_op[channel].pmode = pmode; |
| 73 | cpustate->dma_op[channel].chain_ptr = 0; |
| 74 | |
| 75 | cpustate->dma_op[channel].active = true; |
| 76 | |
| 77 | int cycles = src_count / 4; |
| 78 | cpustate->dma_op[channel].timer->adjust(cpustate->device->cycles_to_attotime(cycles), channel); |
| 79 | |
| 80 | // enable busy flag |
| 81 | cpustate->dma_status |= (1 << channel); |
| 70 | 82 | } |
| 71 | 83 | |
| 72 | | static void dma_op(SHARC_REGS *cpustate, UINT32 src, UINT32 dst, int src_modifier, int dst_modifier, int src_count, int dst_count, int pmode) |
| 84 | static void dma_op(SHARC_REGS *cpustate, int channel) |
| 73 | 85 | { |
| 74 | 86 | int i; |
| 87 | UINT32 src = cpustate->dma_op[channel].src; |
| 88 | UINT32 dst = cpustate->dma_op[channel].dst; |
| 89 | int src_modifier = cpustate->dma_op[channel].src_modifier; |
| 90 | int dst_modifier = cpustate->dma_op[channel].dst_modifier; |
| 91 | int src_count = cpustate->dma_op[channel].src_count; |
| 92 | //int dst_count = cpustate->dma_op[channel].dst_count; |
| 93 | int pmode = cpustate->dma_op[channel].pmode; |
| 94 | |
| 75 | 95 | //printf("dma_op: %08X, %08X, %08X, %08X, %08X, %08X, %d\n", src, dst, src_modifier, dst_modifier, src_count, dst_count, pmode); |
| 76 | 96 | |
| 77 | 97 | switch (pmode) |
| r17844 | r17845 | |
| 124 | 144 | } |
| 125 | 145 | } |
| 126 | 146 | |
| 127 | | if (cpustate->dmaop_channel == 6) |
| 147 | if (channel == 6) |
| 128 | 148 | { |
| 129 | | cpustate->irptl |= (1 << (cpustate->dmaop_channel+10)); |
| 149 | cpustate->irptl |= (1 << (channel+10)); |
| 130 | 150 | |
| 131 | 151 | /* DMA interrupt */ |
| 132 | | if (cpustate->imask & (1 << (cpustate->dmaop_channel+10))) |
| 152 | if (cpustate->imask & (1 << (channel+10))) |
| 133 | 153 | { |
| 134 | | cpustate->irq_active |= 1 << (cpustate->dmaop_channel+10); |
| 154 | cpustate->irq_active |= 1 << (channel+10); |
| 135 | 155 | } |
| 136 | 156 | } |
| 157 | |
| 158 | // clear busy flag |
| 159 | cpustate->dma_status &= ~(1 << channel); |
| 160 | |
| 161 | cpustate->dma_op[channel].active = false; |
| 137 | 162 | } |
| 138 | 163 | |
| 139 | 164 | static void sharc_dma_exec(SHARC_REGS *cpustate, int channel) |
| r17844 | r17845 | |
| 202 | 227 | schedule_dma_op(cpustate, channel, src, dst, src_modifier, dst_modifier, src_count, dst_count, pmode); |
| 203 | 228 | } |
| 204 | 229 | } |
| 230 | |
| 231 | static TIMER_CALLBACK(sharc_dma_callback) |
| 232 | { |
| 233 | SHARC_REGS *cpustate = (SHARC_REGS *)ptr; |
| 234 | int channel = param; |
| 235 | |
| 236 | cpustate->dma_op[channel].timer->adjust(attotime::never, 0); |
| 237 | |
| 238 | cpustate->irptl |= (1 << (channel+10)); |
| 239 | |
| 240 | // DMA interrupt |
| 241 | if (cpustate->imask & (1 << (channel+10))) |
| 242 | { |
| 243 | cpustate->irq_active |= 1 << (channel+10); |
| 244 | } |
| 245 | |
| 246 | dma_op(cpustate, channel); |
| 247 | if (cpustate->dma_op[channel].chain_ptr != 0) |
| 248 | { |
| 249 | schedule_chained_dma_op(cpustate, channel, cpustate->dma_op[channel].chain_ptr, cpustate->dma_op[channel].chained_direction); |
| 250 | } |
| 251 | } |