Previous 199869 Revisions Next

r17845 Wednesday 12th September, 2012 at 18:59:17 UTC by Ville Linde
sharc: Rewrote DMA handling to use timers.
[src/emu/cpu/sharc]sharc.c sharcdma.c

trunk/src/emu/cpu/sharc/sharc.c
r17844r17845
7373   UINT32 loop_type;
7474} LADDR;
7575
76typedef struct
77{
78   UINT32 src;
79   UINT32 dst;
80   UINT32 chain_ptr;
81   INT32 src_modifier;
82   INT32 dst_modifier;
83   INT32 src_count;
84   INT32 dst_count;
85   INT32 pmode;
86   INT32 chained_direction;
87   emu_timer *timer;
88   bool active;
89} DMA_OP;
90
7691typedef struct _SHARC_REGS SHARC_REGS;
7792struct _SHARC_REGS
7893{
r17844r17845
147162
148163   SHARC_BOOT_MODE boot_mode;
149164
150   UINT32 dmaop_src;
151   UINT32 dmaop_dst;
152   UINT32 dmaop_chain_ptr;
153   INT32 dmaop_src_modifier;
154   INT32 dmaop_dst_modifier;
155   INT32 dmaop_src_count;
156   INT32 dmaop_dst_count;
157   INT32 dmaop_pmode;
158   INT32 dmaop_cycles;
159   INT32 dmaop_channel;
160   INT32 dmaop_chained_direction;
165   DMA_OP dma_op[12];
166   UINT32 dma_status;
161167
162168   INT32 interrupt_active;
163169
r17844r17845
260266
261267      case 0x37:      // DMA status
262268      {
263         UINT32 r = 0;
264         if (cpustate->dmaop_cycles > 0)
265         {
266            r |= 1 << cpustate->dmaop_channel;
267         }
268         return r;
269         return cpustate->dma_status;
269270      }
270271      default:      fatalerror("sharc_iop_r: Unimplemented IOP reg %02X at %08X\n", address, cpustate->pc);
271272   }
r17844r17845
292293      case 0x1c:
293294      {
294295         cpustate->dma[6].control = data;
295         //add_iop_write_latency_effect(cpustate, 0x1c, data, 1);
296296         sharc_iop_delayed_w(cpustate, 0x1c, data, 1);
297297         break;
298298      }
r17844r17845
312312      case 0x1d:
313313      {
314314         cpustate->dma[7].control = data;
315         //add_iop_write_latency_effect(cpustate, 0x1d, data, 30);
316315         sharc_iop_delayed_w(cpustate, 0x1d, data, 30);
317316         break;
318317      }
r17844r17845
438437
439438   cpustate->delayed_iop_timer = device->machine().scheduler().timer_alloc(FUNC(sharc_iop_delayed_write_callback), cpustate);
440439
440   for (int i=0; i < 12; i++)
441   {
442      cpustate->dma_op[i].active = false;
443      cpustate->dma_op[i].timer = device->machine().scheduler().timer_alloc(FUNC(sharc_dma_callback), cpustate);
444   }
445
441446   device->save_item(NAME(cpustate->pc));
442447   device->save_pointer(NAME(&cpustate->r[0].r), ARRAY_LENGTH(cpustate->r));
443448   device->save_pointer(NAME(&cpustate->reg_alt[0].r), ARRAY_LENGTH(cpustate->reg_alt));
r17844r17845
523528   device->save_item(NAME(cpustate->irq_active));
524529   device->save_item(NAME(cpustate->active_irq_num));
525530
526   device->save_item(NAME(cpustate->dmaop_src));
527   device->save_item(NAME(cpustate->dmaop_dst));
528   device->save_item(NAME(cpustate->dmaop_chain_ptr));
529   device->save_item(NAME(cpustate->dmaop_src_modifier));
530   device->save_item(NAME(cpustate->dmaop_dst_modifier));
531   device->save_item(NAME(cpustate->dmaop_src_count));
532   device->save_item(NAME(cpustate->dmaop_dst_count));
533   device->save_item(NAME(cpustate->dmaop_pmode));
534   device->save_item(NAME(cpustate->dmaop_cycles));
535   device->save_item(NAME(cpustate->dmaop_channel));
536   device->save_item(NAME(cpustate->dmaop_chained_direction));
531   for (saveindex = 0; saveindex < ARRAY_LENGTH(cpustate->dma_op); saveindex++)
532   {
533      device->save_item(NAME(cpustate->dma_op[saveindex].src), saveindex);
534      device->save_item(NAME(cpustate->dma_op[saveindex].dst), saveindex);
535      device->save_item(NAME(cpustate->dma_op[saveindex].chain_ptr), saveindex);
536      device->save_item(NAME(cpustate->dma_op[saveindex].src_modifier), saveindex);
537      device->save_item(NAME(cpustate->dma_op[saveindex].dst_modifier), saveindex);
538      device->save_item(NAME(cpustate->dma_op[saveindex].src_count), saveindex);
539      device->save_item(NAME(cpustate->dma_op[saveindex].dst_count), saveindex);
540      device->save_item(NAME(cpustate->dma_op[saveindex].pmode), saveindex);
541      device->save_item(NAME(cpustate->dma_op[saveindex].chained_direction), saveindex);
542      device->save_item(NAME(cpustate->dma_op[saveindex].active), saveindex);
543   }
537544
545   device->save_item(NAME(cpustate->dma_status));
546
538547   device->save_item(NAME(cpustate->interrupt_active));
539548
540549   device->save_item(NAME(cpustate->iop_delayed_reg));
r17844r17845
571580         cpustate->dma[6].control      = 0x2a1;
572581
573582         sharc_dma_exec(cpustate, 6);
574         dma_op(cpustate, cpustate->dmaop_src, cpustate->dmaop_dst, cpustate->dmaop_src_modifier, cpustate->dmaop_dst_modifier,
575               cpustate->dmaop_src_count, cpustate->dmaop_dst_count, cpustate->dmaop_pmode);
576         cpustate->dmaop_cycles = 0;
577
583         dma_op(cpustate, 6);
584         
585         cpustate->dma_op[6].timer->adjust(attotime::never, 0);
578586         break;
579587      }
580588
r17844r17845
679687
680688   if (cpustate->idle && cpustate->irq_active == 0)
681689   {
682      // handle pending DMA transfers
683      if (cpustate->dmaop_cycles > 0)
684      {
685         cpustate->dmaop_cycles -= cpustate->icount;
686         if (cpustate->dmaop_cycles <= 0)
687         {
688            cpustate->dmaop_cycles = 0;
689            dma_op(cpustate, cpustate->dmaop_src, cpustate->dmaop_dst, cpustate->dmaop_src_modifier, cpustate->dmaop_dst_modifier, cpustate->dmaop_src_count, cpustate->dmaop_dst_count, cpustate->dmaop_pmode);
690            if (cpustate->dmaop_chain_ptr != 0)
691            {
692               schedule_chained_dma_op(cpustate, cpustate->dmaop_channel, cpustate->dmaop_chain_ptr, cpustate->dmaop_chained_direction);
693            }
694         }
695      }
696
697690      cpustate->icount = 0;
698691      debugger_instruction_hook(device, cpustate->daddr);
699692   }
r17844r17845
790783         }
791784      }
792785
793      // DMA transfer
794      if (cpustate->dmaop_cycles > 0)
795      {
796         --cpustate->dmaop_cycles;
797         if (cpustate->dmaop_cycles <= 0)
798         {
799            cpustate->irptl |= (1 << (cpustate->dmaop_channel+10));
800
801            /* DMA interrupt */
802            if (cpustate->imask & (1 << (cpustate->dmaop_channel+10)))
803            {
804               cpustate->irq_active |= 1 << (cpustate->dmaop_channel+10);
805            }
806
807            dma_op(cpustate, cpustate->dmaop_src, cpustate->dmaop_dst, cpustate->dmaop_src_modifier, cpustate->dmaop_dst_modifier, cpustate->dmaop_src_count, cpustate->dmaop_dst_count, cpustate->dmaop_pmode);
808            if (cpustate->dmaop_chain_ptr != 0)
809            {
810               schedule_chained_dma_op(cpustate, cpustate->dmaop_channel, cpustate->dmaop_chain_ptr, cpustate->dmaop_chained_direction);
811            }
812         }
813      }
814
815786      --cpustate->icount;
816787   };
817788}
trunk/src/emu/cpu/sharc/sharcdma.c
r17844r17845
1919   UINT32 ext_modifier    = dm_read32(cpustate, op_ptr - 6);
2020   UINT32 ext_count      = dm_read32(cpustate, op_ptr - 7);
2121
22   if (cpustate->dmaop_cycles > 0)
22   if (cpustate->dma_op[channel].active)
2323   {
2424      fatalerror("schedule_chained_dma_op: DMA operation already scheduled at %08X!\n", cpustate->pc);
2525   }
2626
2727   if (chained_direction)      // Transmit to external
2828   {
29      cpustate->dmaop_dst          = ext_index;
30      cpustate->dmaop_dst_modifier   = ext_modifier;
31      cpustate->dmaop_dst_count      = ext_count;
32      cpustate->dmaop_src            = int_index;
33      cpustate->dmaop_src_modifier   = int_modifier;
34      cpustate->dmaop_src_count      = int_count;
29      cpustate->dma_op[channel].dst         = ext_index;
30      cpustate->dma_op[channel].dst_modifier   = ext_modifier;
31      cpustate->dma_op[channel].dst_count      = ext_count;
32      cpustate->dma_op[channel].src         = int_index;
33      cpustate->dma_op[channel].src_modifier   = int_modifier;
34      cpustate->dma_op[channel].src_count      = int_count;
3535   }
36   else         // Receive from external
36   else                  // Receive from external
3737   {
38      cpustate->dmaop_src          = ext_index;
39      cpustate->dmaop_src_modifier   = ext_modifier;
40      cpustate->dmaop_src_count      = ext_count;
41      cpustate->dmaop_dst            = int_index;
42      cpustate->dmaop_dst_modifier   = int_modifier;
43      cpustate->dmaop_dst_count      = int_count;
38      cpustate->dma_op[channel].src          = ext_index;
39      cpustate->dma_op[channel].src_modifier   = ext_modifier;
40      cpustate->dma_op[channel].src_count      = ext_count;
41      cpustate->dma_op[channel].dst         = int_index;
42      cpustate->dma_op[channel].dst_modifier   = int_modifier;
43      cpustate->dma_op[channel].dst_count      = int_count;
4444   }
4545
46   cpustate->dmaop_pmode = 0;
47   cpustate->dmaop_channel = channel;
48   cpustate->dmaop_cycles = cpustate->dmaop_src_count / 4;
49   cpustate->dmaop_chain_ptr = chain_ptr;
50   cpustate->dmaop_chained_direction = chained_direction;
46   cpustate->dma_op[channel].pmode = 0;
47   cpustate->dma_op[channel].chain_ptr = chain_ptr;
48   cpustate->dma_op[channel].chained_direction = chained_direction;
49
50   cpustate->dma_op[channel].active = true;
51
52   int cycles = cpustate->dma_op[channel].src_count / 4;
53   cpustate->dma_op[channel].timer->adjust(cpustate->device->cycles_to_attotime(cycles), channel);
54
55   // enable busy flag
56   cpustate->dma_status |= (1 << channel);
5157}
5258
5359static void schedule_dma_op(SHARC_REGS *cpustate, int channel, UINT32 src, UINT32 dst, int src_modifier, int dst_modifier, int src_count, int dst_count, int pmode)
5460{
55   if (cpustate->dmaop_cycles > 0)
61   if (cpustate->dma_op[channel].active)
5662   {
5763      fatalerror("schedule_dma_op: DMA operation already scheduled at %08X!\n", cpustate->pc);
5864   }
5965
60   cpustate->dmaop_channel = channel;
61   cpustate->dmaop_src = src;
62   cpustate->dmaop_dst = dst;
63   cpustate->dmaop_src_modifier = src_modifier;
64   cpustate->dmaop_dst_modifier = dst_modifier;
65   cpustate->dmaop_src_count = src_count;
66   cpustate->dmaop_dst_count = dst_count;
67   cpustate->dmaop_pmode = pmode;
68   cpustate->dmaop_chain_ptr = 0;
69   cpustate->dmaop_cycles = src_count / 4;
66   cpustate->dma_op[channel].src = src;
67   cpustate->dma_op[channel].dst = dst;
68   cpustate->dma_op[channel].src_modifier = src_modifier;
69   cpustate->dma_op[channel].dst_modifier = dst_modifier;
70   cpustate->dma_op[channel].src_count = src_count;
71   cpustate->dma_op[channel].dst_count = dst_count;
72   cpustate->dma_op[channel].pmode = pmode;
73   cpustate->dma_op[channel].chain_ptr = 0;
74
75   cpustate->dma_op[channel].active = true;
76
77   int cycles = src_count / 4;
78   cpustate->dma_op[channel].timer->adjust(cpustate->device->cycles_to_attotime(cycles), channel);
79
80   // enable busy flag
81   cpustate->dma_status |= (1 << channel);
7082}
7183
72static void dma_op(SHARC_REGS *cpustate, UINT32 src, UINT32 dst, int src_modifier, int dst_modifier, int src_count, int dst_count, int pmode)
84static void dma_op(SHARC_REGS *cpustate, int channel)
7385{
7486   int i;
87   UINT32 src         = cpustate->dma_op[channel].src;
88   UINT32 dst         = cpustate->dma_op[channel].dst;
89   int src_modifier   = cpustate->dma_op[channel].src_modifier;
90   int dst_modifier   = cpustate->dma_op[channel].dst_modifier;
91   int src_count      = cpustate->dma_op[channel].src_count;
92   //int dst_count      = cpustate->dma_op[channel].dst_count;
93   int pmode         = cpustate->dma_op[channel].pmode;
94
7595   //printf("dma_op: %08X, %08X, %08X, %08X, %08X, %08X, %d\n", src, dst, src_modifier, dst_modifier, src_count, dst_count, pmode);
7696
7797   switch (pmode)
r17844r17845
124144      }
125145   }
126146
127   if (cpustate->dmaop_channel == 6)
147   if (channel == 6)
128148   {
129      cpustate->irptl |= (1 << (cpustate->dmaop_channel+10));
149      cpustate->irptl |= (1 << (channel+10));
130150
131151      /* DMA interrupt */
132      if (cpustate->imask & (1 << (cpustate->dmaop_channel+10)))
152      if (cpustate->imask & (1 << (channel+10)))
133153      {
134         cpustate->irq_active |= 1 << (cpustate->dmaop_channel+10);
154         cpustate->irq_active |= 1 << (channel+10);
135155      }
136156   }
157
158   // clear busy flag
159   cpustate->dma_status &= ~(1 << channel);
160
161   cpustate->dma_op[channel].active = false;
137162}
138163
139164static void sharc_dma_exec(SHARC_REGS *cpustate, int channel)
r17844r17845
202227      schedule_dma_op(cpustate, channel, src, dst, src_modifier, dst_modifier, src_count, dst_count, pmode);
203228   }
204229}
230
231static TIMER_CALLBACK(sharc_dma_callback)
232{
233   SHARC_REGS *cpustate = (SHARC_REGS *)ptr;
234   int channel = param;
235
236   cpustate->dma_op[channel].timer->adjust(attotime::never, 0);
237
238   cpustate->irptl |= (1 << (channel+10));
239
240   // DMA interrupt
241   if (cpustate->imask & (1 << (channel+10)))
242   {
243      cpustate->irq_active |= 1 << (channel+10);
244   }
245
246   dma_op(cpustate, channel);
247   if (cpustate->dma_op[channel].chain_ptr != 0)
248   {
249      schedule_chained_dma_op(cpustate, channel, cpustate->dma_op[channel].chain_ptr, cpustate->dma_op[channel].chained_direction);
250   }
251}

Previous 199869 Revisions Next


© 1997-2024 The MAME Team