trunk/src/emu/cpu/arm7/arm7drc.c
| r28735 | r28736 | |
| 1 | | /***************************************************************************** |
| 2 | | * |
| 3 | | * arm7.c |
| 4 | | * Portable CPU Emulator for 32-bit ARM v3/4/5/6 |
| 5 | | * |
| 6 | | * Copyright Steve Ellenoff, all rights reserved. |
| 7 | | * Thumb, DSP, and MMU support and many bugfixes by R. Belmont and Ryan Holtz. |
| 8 | | * Dyanmic Recompiler (DRC) / Just In Time Compiler (JIT) by Ryan Holtz. |
| 9 | | * |
| 10 | | * - This source code is released as freeware for non-commercial purposes. |
| 11 | | * - You are free to use and redistribute this code in modified or |
| 12 | | * unmodified form, provided you list me in the credits. |
| 13 | | * - If you modify this source code, you must add a notice to each modified |
| 14 | | * source file that it has been changed. If you're a nice person, you |
| 15 | | * will clearly mark each change too. :) |
| 16 | | * - If you wish to use this for commercial purposes, please contact me at |
| 17 | | * sellenoff@hotmail.com |
| 18 | | * - The author of this copywritten work reserves the right to change the |
| 19 | | * terms of its usage and license at any time, including retroactively |
| 20 | | * - This entire notice must remain in the source code. |
| 21 | | * |
| 22 | | * This work is based on: |
| 23 | | * #1) 'Atmel Corporation ARM7TDMI (Thumb) Datasheet - January 1999' |
| 24 | | * #2) Arm 2/3/6 emulator By Bryan McPhail (bmcphail@tendril.co.uk) and Phil Stroffolino (MAME CORE 0.76) |
| 25 | | * |
| 26 | | *****************************************************************************/ |
| 27 | | |
| 28 | | /****************************************************************************** |
| 29 | | * Notes: |
| 30 | | |
| 31 | | ** This is a plain vanilla implementation of an ARM7 cpu which incorporates my ARM7 core. |
| 32 | | It can be used as is, or used to demonstrate how to utilize the arm7 core to create a cpu |
| 33 | | that uses the core, since there are numerous different mcu packages that incorporate an arm7 core. |
| 34 | | |
| 35 | | See the notes in the arm7core.c file itself regarding issues/limitations of the arm7 core. |
| 36 | | ** |
| 37 | | *****************************************************************************/ |
| 38 | | |
| 39 | | |
| 40 | | /*************************************************************************** |
| 41 | | DEBUGGING |
| 42 | | ***************************************************************************/ |
| 43 | | |
| 44 | | #define LOG_UML (0) |
| 45 | | #define LOG_NATIVE (0) |
| 46 | | |
| 47 | | #define SINGLE_INSTRUCTION_MODE (0) |
| 48 | | |
| 49 | | /*************************************************************************** |
| 50 | | CONSTANTS |
| 51 | | ***************************************************************************/ |
| 52 | | |
| 53 | | #include "arm7tdrc.c" |
| 54 | | |
| 55 | | /* map variables */ |
| 56 | | #define MAPVAR_PC uml::M0 |
| 57 | | #define MAPVAR_CYCLES uml::M1 |
| 58 | | |
| 59 | | /* size of the execution code cache */ |
| 60 | | #define CACHE_SIZE (32 * 1024 * 1024) |
| 61 | | |
| 62 | | /* compilation boundaries -- how far back/forward does the analysis extend? */ |
| 63 | | #define COMPILE_BACKWARDS_BYTES 128 |
| 64 | | #define COMPILE_FORWARDS_BYTES 512 |
| 65 | | #define COMPILE_MAX_INSTRUCTIONS ((COMPILE_BACKWARDS_BYTES/4) + (COMPILE_FORWARDS_BYTES/4)) |
| 66 | | #define COMPILE_MAX_SEQUENCE 64 |
| 67 | | |
| 68 | | /* exit codes */ |
| 69 | | #define EXECUTE_OUT_OF_CYCLES 0 |
| 70 | | #define EXECUTE_MISSING_CODE 1 |
| 71 | | #define EXECUTE_UNMAPPED_CODE 2 |
| 72 | | #define EXECUTE_RESET_CACHE 3 |
| 73 | | |
| 74 | | |
| 75 | | /*************************************************************************** |
| 76 | | INLINE FUNCTIONS |
| 77 | | ***************************************************************************/ |
| 78 | | |
| 79 | | /*------------------------------------------------- |
| 80 | | epc - compute the exception PC from a |
| 81 | | descriptor |
| 82 | | -------------------------------------------------*/ |
| 83 | | |
| 84 | | INLINE UINT32 epc(const opcode_desc *desc) |
| 85 | | { |
| 86 | | return desc->pc; |
| 87 | | } |
| 88 | | |
| 89 | | |
| 90 | | /*------------------------------------------------- |
| 91 | | alloc_handle - allocate a handle if not |
| 92 | | already allocated |
| 93 | | -------------------------------------------------*/ |
| 94 | | |
| 95 | | INLINE void alloc_handle(drcuml_state *drcuml, uml::code_handle **handleptr, const char *name) |
| 96 | | { |
| 97 | | if (*handleptr == NULL) |
| 98 | | *handleptr = drcuml->handle_alloc(name); |
| 99 | | } |
| 100 | | |
| 101 | | |
| 102 | | /*------------------------------------------------- |
| 103 | | load_fast_iregs - load any fast integer |
| 104 | | registers |
| 105 | | -------------------------------------------------*/ |
| 106 | | |
| 107 | | void arm7_cpu_device::load_fast_iregs(drcuml_block *block) |
| 108 | | { |
| 109 | | int regnum; |
| 110 | | |
| 111 | | for (regnum = 0; regnum < ARRAY_LENGTH(m_impstate.regmap); regnum++) |
| 112 | | if (m_impstate.regmap[regnum].is_int_register()) |
| 113 | | UML_DMOV(block, uml::ireg(m_impstate.regmap[regnum].ireg() - uml::REG_I0), uml::mem(&m_r[regnum])); |
| 114 | | } |
| 115 | | |
| 116 | | |
| 117 | | /*------------------------------------------------- |
| 118 | | save_fast_iregs - save any fast integer |
| 119 | | registers |
| 120 | | -------------------------------------------------*/ |
| 121 | | |
| 122 | | void arm7_cpu_device::save_fast_iregs(drcuml_block *block) |
| 123 | | { |
| 124 | | int regnum; |
| 125 | | |
| 126 | | for (regnum = 0; regnum < ARRAY_LENGTH(m_impstate.regmap); regnum++) |
| 127 | | if (m_impstate.regmap[regnum].is_int_register()) |
| 128 | | UML_DMOV(block, uml::mem(&m_r[regnum]), uml::ireg(m_impstate.regmap[regnum].ireg() - uml::REG_I0)); |
| 129 | | } |
| 130 | | |
| 131 | | |
| 132 | | |
| 133 | | /*************************************************************************** |
| 134 | | CORE CALLBACKS |
| 135 | | ***************************************************************************/ |
| 136 | | |
| 137 | | /*------------------------------------------------- |
| 138 | | arm7_init - initialize the processor |
| 139 | | -------------------------------------------------*/ |
| 140 | | |
| 141 | | void arm7_cpu_device::arm7_drc_init() |
| 142 | | { |
| 143 | | drc_cache *cache; |
| 144 | | drcbe_info beinfo; |
| 145 | | UINT32 flags = 0; |
| 146 | | |
| 147 | | /* allocate enough space for the cache and the core */ |
| 148 | | cache = auto_alloc(machine(), drc_cache(CACHE_SIZE)); |
| 149 | | if (cache == NULL) |
| 150 | | fatalerror("Unable to allocate cache of size %d\n", (UINT32)(CACHE_SIZE)); |
| 151 | | |
| 152 | | /* allocate the implementation-specific state from the full cache */ |
| 153 | | memset(&m_impstate, 0, sizeof(m_impstate)); |
| 154 | | m_impstate.cache = cache; |
| 155 | | |
| 156 | | /* initialize the UML generator */ |
| 157 | | if (LOG_UML) |
| 158 | | flags |= DRCUML_OPTION_LOG_UML; |
| 159 | | if (LOG_NATIVE) |
| 160 | | flags |= DRCUML_OPTION_LOG_NATIVE; |
| 161 | | m_impstate.drcuml = new drcuml_state(*this, *cache, flags, 1, 32, 1); |
| 162 | | |
| 163 | | /* add symbols for our stuff */ |
| 164 | | m_impstate.drcuml->symbol_add(&m_icount, sizeof(m_icount), "icount"); |
| 165 | | for (int regnum = 0; regnum < 37; regnum++) |
| 166 | | { |
| 167 | | char buf[10]; |
| 168 | | sprintf(buf, "r%d", regnum); |
| 169 | | m_impstate.drcuml->symbol_add(&m_r[regnum], sizeof(m_r[regnum]), buf); |
| 170 | | } |
| 171 | | m_impstate.drcuml->symbol_add(&m_impstate.mode, sizeof(m_impstate.mode), "mode"); |
| 172 | | m_impstate.drcuml->symbol_add(&m_impstate.arg0, sizeof(m_impstate.arg0), "arg0"); |
| 173 | | m_impstate.drcuml->symbol_add(&m_impstate.arg1, sizeof(m_impstate.arg1), "arg1"); |
| 174 | | m_impstate.drcuml->symbol_add(&m_impstate.numcycles, sizeof(m_impstate.numcycles), "numcycles"); |
| 175 | | //m_impstate.drcuml->symbol_add(&m_impstate.fpmode, sizeof(m_impstate.fpmode), "fpmode"); // TODO |
| 176 | | |
| 177 | | /* initialize the front-end helper */ |
| 178 | | //m_impstate.drcfe = auto_alloc(machine(), arm7_frontend(this, COMPILE_BACKWARDS_BYTES, COMPILE_FORWARDS_BYTES, SINGLE_INSTRUCTION_MODE ? 1 : COMPILE_MAX_SEQUENCE)); |
| 179 | | |
| 180 | | /* allocate memory for cache-local state and initialize it */ |
| 181 | | //memcpy(&m_impstate.fpmode, fpmode_source, sizeof(fpmode_source)); // TODO |
| 182 | | |
| 183 | | /* compute the register parameters */ |
| 184 | | for (int regnum = 0; regnum < 37; regnum++) |
| 185 | | { |
| 186 | | m_impstate.regmap[regnum] = (regnum == 0) ? uml::parameter(0) : uml::parameter::make_memory(&m_r[regnum]); |
| 187 | | } |
| 188 | | |
| 189 | | /* if we have registers to spare, assign r2, r3, r4 to leftovers */ |
| 190 | | //if (!DISABLE_FAST_REGISTERS) // TODO |
| 191 | | { |
| 192 | | m_impstate.drcuml->get_backend_info(beinfo); |
| 193 | | if (beinfo.direct_iregs > 4) |
| 194 | | { // PC |
| 195 | | m_impstate.regmap[eR15] = uml::I4; |
| 196 | | } |
| 197 | | if (beinfo.direct_iregs > 5) |
| 198 | | { // Status |
| 199 | | m_impstate.regmap[eCPSR] = uml::I5; |
| 200 | | } |
| 201 | | if (beinfo.direct_iregs > 6) |
| 202 | | { // SP |
| 203 | | m_impstate.regmap[eR13] = uml::I6; |
| 204 | | } |
| 205 | | } |
| 206 | | |
| 207 | | /* mark the cache dirty so it is updated on next execute */ |
| 208 | | m_impstate.cache_dirty = TRUE; |
| 209 | | } |
| 210 | | |
| 211 | | |
| 212 | | /*------------------------------------------------- |
| 213 | | arm7_execute - execute the CPU for the |
| 214 | | specified number of cycles |
| 215 | | -------------------------------------------------*/ |
| 216 | | |
| 217 | | void arm7_cpu_device::execute_run_drc() |
| 218 | | { |
| 219 | | drcuml_state *drcuml = m_impstate.drcuml; |
| 220 | | int execute_result; |
| 221 | | |
| 222 | | /* reset the cache if dirty */ |
| 223 | | if (m_impstate.cache_dirty) |
| 224 | | code_flush_cache(); |
| 225 | | m_impstate.cache_dirty = FALSE; |
| 226 | | |
| 227 | | /* execute */ |
| 228 | | do |
| 229 | | { |
| 230 | | /* run as much as we can */ |
| 231 | | execute_result = drcuml->execute(*m_impstate.entry); |
| 232 | | |
| 233 | | /* if we need to recompile, do it */ |
| 234 | | if (execute_result == EXECUTE_MISSING_CODE) |
| 235 | | code_compile_block(m_impstate.mode, m_r[eR15]); |
| 236 | | else if (execute_result == EXECUTE_UNMAPPED_CODE) |
| 237 | | fatalerror("Attempted to execute unmapped code at PC=%08X\n", m_r[eR15]); |
| 238 | | else if (execute_result == EXECUTE_RESET_CACHE) |
| 239 | | code_flush_cache(); |
| 240 | | |
| 241 | | } while (execute_result != EXECUTE_OUT_OF_CYCLES); |
| 242 | | } |
| 243 | | |
| 244 | | /*------------------------------------------------- |
| 245 | | arm7_exit - cleanup from execution |
| 246 | | -------------------------------------------------*/ |
| 247 | | |
| 248 | | void arm7_cpu_device::arm7_drc_exit() |
| 249 | | { |
| 250 | | /* clean up the DRC */ |
| 251 | | //auto_free(machine(), m_impstate.drcfe); |
| 252 | | delete m_impstate.drcuml; |
| 253 | | auto_free(machine(), m_impstate.cache); |
| 254 | | } |
| 255 | | |
| 256 | | |
| 257 | | /*------------------------------------------------- |
| 258 | | arm7drc_set_options - configure DRC options |
| 259 | | -------------------------------------------------*/ |
| 260 | | |
| 261 | | void arm7_cpu_device::arm7drc_set_options(UINT32 options) |
| 262 | | { |
| 263 | | m_impstate.drcoptions = options; |
| 264 | | } |
| 265 | | |
| 266 | | |
| 267 | | /*------------------------------------------------- |
| 268 | | arm7drc_add_fastram - add a new fastram |
| 269 | | region |
| 270 | | -------------------------------------------------*/ |
| 271 | | |
| 272 | | void arm7_cpu_device::arm7drc_add_fastram(offs_t start, offs_t end, UINT8 readonly, void *base) |
| 273 | | { |
| 274 | | if (m_impstate.fastram_select < ARRAY_LENGTH(m_impstate.fastram)) |
| 275 | | { |
| 276 | | m_impstate.fastram[m_impstate.fastram_select].start = start; |
| 277 | | m_impstate.fastram[m_impstate.fastram_select].end = end; |
| 278 | | m_impstate.fastram[m_impstate.fastram_select].readonly = readonly; |
| 279 | | m_impstate.fastram[m_impstate.fastram_select].base = base; |
| 280 | | m_impstate.fastram_select++; |
| 281 | | } |
| 282 | | } |
| 283 | | |
| 284 | | |
| 285 | | /*------------------------------------------------- |
| 286 | | arm7drc_add_hotspot - add a new hotspot |
| 287 | | -------------------------------------------------*/ |
| 288 | | |
| 289 | | void arm7_cpu_device::arm7drc_add_hotspot(offs_t pc, UINT32 opcode, UINT32 cycles) |
| 290 | | { |
| 291 | | if (m_impstate.hotspot_select < ARRAY_LENGTH(m_impstate.hotspot)) |
| 292 | | { |
| 293 | | m_impstate.hotspot[m_impstate.hotspot_select].pc = pc; |
| 294 | | m_impstate.hotspot[m_impstate.hotspot_select].opcode = opcode; |
| 295 | | m_impstate.hotspot[m_impstate.hotspot_select].cycles = cycles; |
| 296 | | m_impstate.hotspot_select++; |
| 297 | | } |
| 298 | | } |
| 299 | | |
| 300 | | |
| 301 | | |
| 302 | | /*************************************************************************** |
| 303 | | CACHE MANAGEMENT |
| 304 | | ***************************************************************************/ |
| 305 | | |
| 306 | | /*------------------------------------------------- |
| 307 | | code_flush_cache - flush the cache and |
| 308 | | regenerate static code |
| 309 | | -------------------------------------------------*/ |
| 310 | | |
| 311 | | void arm7_cpu_device::code_flush_cache() |
| 312 | | { |
| 313 | | /* empty the transient cache contents */ |
| 314 | | m_impstate.drcuml->reset(); |
| 315 | | |
| 316 | | try |
| 317 | | { |
| 318 | | /* generate the entry point and out-of-cycles handlers */ |
| 319 | | static_generate_entry_point(); |
| 320 | | static_generate_nocode_handler(); |
| 321 | | static_generate_out_of_cycles(); |
| 322 | | static_generate_tlb_translate(NULL); // TODO FIXME |
| 323 | | static_generate_detect_fault(NULL); // TODO FIXME |
| 324 | | //static_generate_tlb_mismatch(); |
| 325 | | |
| 326 | | /* add subroutines for memory accesses */ |
| 327 | | static_generate_memory_accessor(1, FALSE, FALSE, "read8", &m_impstate.read8); |
| 328 | | static_generate_memory_accessor(1, TRUE, FALSE, "write8", &m_impstate.write8); |
| 329 | | static_generate_memory_accessor(2, FALSE, FALSE, "read16", &m_impstate.read16); |
| 330 | | static_generate_memory_accessor(2, TRUE, FALSE, "write16", &m_impstate.write16); |
| 331 | | static_generate_memory_accessor(4, FALSE, FALSE, "read32", &m_impstate.read32); |
| 332 | | static_generate_memory_accessor(4, TRUE, FALSE, "write32", &m_impstate.write32); |
| 333 | | } |
| 334 | | catch (drcuml_block::abort_compilation &) |
| 335 | | { |
| 336 | | fatalerror("Unrecoverable error generating static code\n"); |
| 337 | | } |
| 338 | | } |
| 339 | | |
| 340 | | |
| 341 | | /*------------------------------------------------- |
| 342 | | code_compile_block - compile a block of the |
| 343 | | given mode at the specified pc |
| 344 | | -------------------------------------------------*/ |
| 345 | | |
| 346 | | void arm7_cpu_device::code_compile_block(UINT8 mode, offs_t pc) |
| 347 | | { |
| 348 | | drcuml_state *drcuml = m_impstate.drcuml; |
| 349 | | compiler_state compiler = { 0 }; |
| 350 | | const opcode_desc *seqlast; |
| 351 | | int override = FALSE; |
| 352 | | |
| 353 | | g_profiler.start(PROFILER_DRC_COMPILE); |
| 354 | | |
| 355 | | /* get a description of this sequence */ |
| 356 | | // TODO FIXME |
| 357 | | const opcode_desc *desclist = NULL; //m_impstate.drcfe->describe_code(pc); // TODO |
| 358 | | // if (LOG_UML || LOG_NATIVE) |
| 359 | | // log_opcode_desc(drcuml, desclist, 0); |
| 360 | | |
| 361 | | /* if we get an error back, flush the cache and try again */ |
| 362 | | bool succeeded = false; |
| 363 | | while (!succeeded) |
| 364 | | { |
| 365 | | try |
| 366 | | { |
| 367 | | /* start the block */ |
| 368 | | drcuml_block *block = drcuml->begin_block(4096); |
| 369 | | |
| 370 | | /* loop until we get through all instruction sequences */ |
| 371 | | for (const opcode_desc *seqhead = desclist; seqhead != NULL; seqhead = seqlast->next()) |
| 372 | | { |
| 373 | | const opcode_desc *curdesc; |
| 374 | | UINT32 nextpc; |
| 375 | | |
| 376 | | /* add a code log entry */ |
| 377 | | if (LOG_UML) |
| 378 | | block->append_comment("-------------------------"); // comment |
| 379 | | |
| 380 | | /* determine the last instruction in this sequence */ |
| 381 | | for (seqlast = seqhead; seqlast != NULL; seqlast = seqlast->next()) |
| 382 | | if (seqlast->flags & OPFLAG_END_SEQUENCE) |
| 383 | | break; |
| 384 | | assert(seqlast != NULL); |
| 385 | | |
| 386 | | /* if we don't have a hash for this mode/pc, or if we are overriding all, add one */ |
| 387 | | if (override || !drcuml->hash_exists(mode, seqhead->pc)) |
| 388 | | UML_HASH(block, mode, seqhead->pc); // hash mode,pc |
| 389 | | |
| 390 | | /* if we already have a hash, and this is the first sequence, assume that we */ |
| 391 | | /* are recompiling due to being out of sync and allow future overrides */ |
| 392 | | else if (seqhead == desclist) |
| 393 | | { |
| 394 | | override = TRUE; |
| 395 | | UML_HASH(block, mode, seqhead->pc); // hash mode,pc |
| 396 | | } |
| 397 | | |
| 398 | | /* otherwise, redispatch to that fixed PC and skip the rest of the processing */ |
| 399 | | else |
| 400 | | { |
| 401 | | UML_LABEL(block, seqhead->pc | 0x80000000); // label seqhead->pc | 0x80000000 |
| 402 | | UML_HASHJMP(block, 0, seqhead->pc, *m_impstate.nocode); |
| 403 | | // hashjmp <mode>,seqhead->pc,nocode |
| 404 | | continue; |
| 405 | | } |
| 406 | | |
| 407 | | /* validate this code block if we're not pointing into ROM */ |
| 408 | | if (m_program->get_write_ptr(seqhead->physpc) != NULL) |
| 409 | | generate_checksum_block(block, &compiler, seqhead, seqlast); |
| 410 | | |
| 411 | | /* label this instruction, if it may be jumped to locally */ |
| 412 | | if (seqhead->flags & OPFLAG_IS_BRANCH_TARGET) |
| 413 | | UML_LABEL(block, seqhead->pc | 0x80000000); // label seqhead->pc | 0x80000000 |
| 414 | | |
| 415 | | /* iterate over instructions in the sequence and compile them */ |
| 416 | | for (curdesc = seqhead; curdesc != seqlast->next(); curdesc = curdesc->next()) |
| 417 | | generate_sequence_instruction(block, &compiler, curdesc); |
| 418 | | |
| 419 | | /* if we need to return to the start, do it */ |
| 420 | | if (seqlast->flags & OPFLAG_RETURN_TO_START) |
| 421 | | nextpc = pc; |
| 422 | | |
| 423 | | /* otherwise we just go to the next instruction */ |
| 424 | | else |
| 425 | | nextpc = seqlast->pc + (seqlast->skipslots + 1) * 4; |
| 426 | | |
| 427 | | /* count off cycles and go there */ |
| 428 | | generate_update_cycles(block, &compiler, nextpc); // <subtract cycles> |
| 429 | | |
| 430 | | /* if the last instruction can change modes, use a variable mode; otherwise, assume the same mode */ |
| 431 | | /*if (seqlast->flags & OPFLAG_CAN_CHANGE_MODES) |
| 432 | | UML_HASHJMP(block, uml::mem(&m_impstate.mode), nextpc, *m_impstate.nocode); |
| 433 | | // hashjmp <mode>,nextpc,nocode |
| 434 | | else*/ if (seqlast->next() == NULL || seqlast->next()->pc != nextpc) |
| 435 | | UML_HASHJMP(block, m_impstate.mode, nextpc, *m_impstate.nocode); |
| 436 | | // hashjmp <mode>,nextpc,nocode |
| 437 | | } |
| 438 | | |
| 439 | | /* end the sequence */ |
| 440 | | block->end(); |
| 441 | | g_profiler.stop(); |
| 442 | | succeeded = true; |
| 443 | | } |
| 444 | | catch (drcuml_block::abort_compilation &) |
| 445 | | { |
| 446 | | code_flush_cache(); |
| 447 | | } |
| 448 | | } |
| 449 | | } |
| 450 | | |
| 451 | | |
| 452 | | /*************************************************************************** |
| 453 | | C FUNCTION CALLBACKS |
| 454 | | ***************************************************************************/ |
| 455 | | |
| 456 | | /*------------------------------------------------- |
| 457 | | cfunc_get_cycles - compute the total number |
| 458 | | of cycles executed so far |
| 459 | | -------------------------------------------------*/ |
| 460 | | |
| 461 | | void arm7_cpu_device::cfunc_get_cycles() |
| 462 | | { |
| 463 | | m_impstate.numcycles = total_cycles(); |
| 464 | | } |
| 465 | | |
| 466 | | |
| 467 | | /*------------------------------------------------- |
| 468 | | cfunc_unimplemented - handler for |
| 469 | | unimplemented opcdes |
| 470 | | -------------------------------------------------*/ |
| 471 | | |
| 472 | | void arm7_cpu_device::cfunc_unimplemented() |
| 473 | | { |
| 474 | | UINT32 opcode = m_impstate.arg0; |
| 475 | | fatalerror("PC=%08X: Unimplemented op %08X\n", m_r[eR15], opcode); |
| 476 | | } |
| 477 | | |
| 478 | | |
| 479 | | /*************************************************************************** |
| 480 | | STATIC CODEGEN |
| 481 | | ***************************************************************************/ |
| 482 | | |
| 483 | | /*------------------------------------------------- |
| 484 | | static_generate_entry_point - generate a |
| 485 | | static entry point |
| 486 | | -------------------------------------------------*/ |
| 487 | | |
| 488 | | void arm7_cpu_device::static_generate_entry_point() |
| 489 | | { |
| 490 | | drcuml_state *drcuml = m_impstate.drcuml; |
| 491 | | uml::code_label nodabt; |
| 492 | | uml::code_label nofiq; |
| 493 | | uml::code_label noirq; |
| 494 | | uml::code_label irq32; |
| 495 | | uml::code_label nopabd; |
| 496 | | uml::code_label nound; |
| 497 | | uml::code_label swi32; |
| 498 | | uml::code_label irqadjust; |
| 499 | | uml::code_label done; |
| 500 | | drcuml_block *block; |
| 501 | | |
| 502 | | block = drcuml->begin_block(110); |
| 503 | | |
| 504 | | /* forward references */ |
| 505 | | //alloc_handle(drcuml, &m_impstate.exception_norecover[EXCEPTION_INTERRUPT], "interrupt_norecover"); |
| 506 | | alloc_handle(drcuml, &m_impstate.nocode, "nocode"); |
| 507 | | alloc_handle(drcuml, &m_impstate.detect_fault, "detect_fault"); |
| 508 | | alloc_handle(drcuml, &m_impstate.tlb_translate, "tlb_translate"); |
| 509 | | |
| 510 | | alloc_handle(drcuml, &m_impstate.entry, "entry"); |
| 511 | | UML_HANDLE(block, *m_impstate.entry); // handle entry |
| 512 | | |
| 513 | | /* load fast integer registers */ |
| 514 | | load_fast_iregs(block); |
| 515 | | |
| 516 | | UML_CALLH(block, *m_impstate.check_irq); |
| 517 | | |
| 518 | | /* generate a hash jump via the current mode and PC */ |
| 519 | | UML_HASHJMP(block, 0, uml::mem(&m_pc), *m_impstate.nocode); // hashjmp 0,<pc>,nocode |
| 520 | | block->end(); |
| 521 | | } |
| 522 | | |
| 523 | | |
| 524 | | /*------------------------------------------------- |
| 525 | | static_generate_check_irq - generate a handler |
| 526 | | to check IRQs |
| 527 | | -------------------------------------------------*/ |
| 528 | | |
| 529 | | void arm7_cpu_device::static_generate_check_irq() |
| 530 | | { |
| 531 | | drcuml_state *drcuml = m_impstate.drcuml; |
| 532 | | drcuml_block *block; |
| 533 | | uml::code_label noirq; |
| 534 | | int nodabt = 0; |
| 535 | | int nopabt = 0; |
| 536 | | int irqadjust = 0; |
| 537 | | int nofiq = 0; |
| 538 | | int irq32 = 0; |
| 539 | | int swi32 = 0; |
| 540 | | int done = 0; |
| 541 | | int label = 1; |
| 542 | | |
| 543 | | /* begin generating */ |
| 544 | | block = drcuml->begin_block(120); |
| 545 | | |
| 546 | | /* generate a hash jump via the current mode and PC */ |
| 547 | | alloc_handle(drcuml, &m_impstate.check_irq, "check_irq"); |
| 548 | | UML_HANDLE(block, *m_impstate.check_irq); // handle check_irq |
| 549 | | /* Exception priorities: |
| 550 | | |
| 551 | | Reset |
| 552 | | Data abort |
| 553 | | FIRQ |
| 554 | | IRQ |
| 555 | | Prefetch abort |
| 556 | | Undefined instruction |
| 557 | | Software Interrupt |
| 558 | | */ |
| 559 | | |
| 560 | | UML_ADD(block, uml::I0, uml::mem(&R15), 4); // add i0, PC, 4 ;insn pc |
| 561 | | |
| 562 | | // Data Abort |
| 563 | | UML_TEST(block, uml::mem(&m_pendingAbtD), 1); // test pendingAbtD, 1 |
| 564 | | UML_JMPc(block, uml::COND_Z, nodabt = label++); // jmpz nodabt |
| 565 | | |
| 566 | | UML_ROLINS(block, uml::mem(&GET_CPSR), eARM7_MODE_ABT, 0, MODE_FLAG); // rolins CPSR, eARM7_MODE_ABT, 0, MODE_FLAG |
| 567 | | UML_MOV(block, uml::mem(&GET_REGISTER(14)), uml::I0); // mov LR, i0 |
| 568 | | UML_MOV(block, uml::mem(&GET_REGISTER(SPSR)), uml::mem(&GET_CPSR)); // mov SPSR, CPSR |
| 569 | | UML_OR(block, uml::mem(&GET_CPSR), uml::mem(&GET_CPSR), I_MASK); // or CPSR, CPSR, I_MASK |
| 570 | | UML_ROLAND(block, uml::mem(&GET_CPSR), uml::mem(&GET_CPSR), 0, ~T_MASK); // roland CPSR, CPSR, 0, ~T_MASK |
| 571 | | UML_MOV(block, uml::mem(&R15), 0x00000010); // mov PC, 0x10 (Data Abort vector address) |
| 572 | | UML_MOV(block, uml::mem(&m_pendingAbtD), 0); // mov pendingAbtD, 0 |
| 573 | | UML_JMP(block, irqadjust = label++); // jmp irqadjust |
| 574 | | |
| 575 | | UML_LABEL(block, nodabt); // nodabt: |
| 576 | | |
| 577 | | // FIQ |
| 578 | | UML_TEST(block, uml::mem(&m_pendingFiq), 1); // test pendingFiq, 1 |
| 579 | | UML_JMPc(block, uml::COND_Z, nofiq = label++); // jmpz nofiq |
| 580 | | UML_TEST(block, uml::mem(&GET_CPSR), F_MASK); // test CPSR, F_MASK |
| 581 | | UML_JMPc(block, uml::COND_Z, nofiq); // jmpz nofiq |
| 582 | | |
| 583 | | UML_MOV(block, uml::mem(&GET_REGISTER(14)), uml::I0); // mov LR, i0 |
| 584 | | UML_MOV(block, uml::mem(&GET_REGISTER(SPSR)), uml::mem(&GET_CPSR)); // mov SPSR, CPSR |
| 585 | | UML_OR(block, uml::mem(&GET_CPSR), uml::mem(&GET_CPSR), I_MASK | F_MASK); // or CPSR, CPSR, I_MASK | F_MASK |
| 586 | | UML_ROLAND(block, uml::mem(&GET_CPSR), uml::mem(&GET_CPSR), 0, ~T_MASK); // roland CPSR, CPSR, 0, ~T_MASK |
| 587 | | UML_MOV(block, uml::mem(&R15), 0x0000001c); // mov PC, 0x1c (FIQ vector address) |
| 588 | | UML_MOV(block, uml::mem(&m_pendingFiq), 0); // mov pendingFiq, 0 |
| 589 | | UML_JMP(block, irqadjust); // jmp irqadjust |
| 590 | | |
| 591 | | UML_LABEL(block, nofiq); // nofiq: |
| 592 | | |
| 593 | | // IRQ |
| 594 | | UML_TEST(block, uml::mem(&m_pendingIrq), 1); // test pendingIrq, 1 |
| 595 | | UML_JMPc(block, uml::COND_Z, noirq = label++); // jmpz noirq |
| 596 | | UML_TEST(block, uml::mem(&GET_CPSR), I_MASK); // test CPSR, I_MASK |
| 597 | | UML_JMPc(block, uml::COND_Z, noirq); // jmpz noirq |
| 598 | | |
| 599 | | UML_MOV(block, uml::mem(&GET_REGISTER(14)), uml::I0); // mov LR, i0 |
| 600 | | UML_TEST(block, uml::mem(&GET_CPSR), SR_MODE32); // test CPSR, MODE32 |
| 601 | | UML_JMPc(block, uml::COND_NZ, irq32 = label++); // jmpnz irq32 |
| 602 | | UML_AND(block, uml::I1, uml::I0, 0xf4000000); // and i1, i0, 0xf4000000 |
| 603 | | UML_OR(block, uml::mem(&R15), uml::I1, 0x0800001a); // or PC, i1, 0x0800001a |
| 604 | | UML_AND(block, uml::I1, uml::mem(&GET_CPSR), 0x0fffff3f); // and i1, CPSR, 0x0fffff3f |
| 605 | | UML_ROLAND(block, uml::I0, uml::mem(&R15), 32-20, 0x0000000c); // roland i0, R15, 32-20, 0x0000000c |
| 606 | | UML_ROLINS(block, uml::I0, uml::mem(&R15), 0, 0xf0000000); // rolins i0, R15, 0, 0xf0000000 |
| 607 | | UML_OR(block, uml::mem(&GET_CPSR), uml::I0, uml::I1); // or CPSR, i0, i1 |
| 608 | | UML_JMP(block, irqadjust); // jmp irqadjust |
| 609 | | |
| 610 | | UML_LABEL(block, irq32); // irq32: |
| 611 | | UML_MOV(block, uml::mem(&GET_REGISTER(SPSR)), uml::mem(&GET_CPSR)); // mov SPSR, CPSR |
| 612 | | UML_OR(block, uml::mem(&GET_CPSR), uml::mem(&GET_CPSR), I_MASK); // or CPSR, CPSR, I_MASK |
| 613 | | UML_ROLAND(block, uml::mem(&GET_CPSR), uml::mem(&GET_CPSR), 0, ~T_MASK); // roland CPSR, CPSR, 0, ~T_MASK |
| 614 | | UML_MOV(block, uml::mem(&R15), 0x00000018); // mov PC, 0x18 (IRQ vector address) |
| 615 | | |
| 616 | | UML_JMP(block, irqadjust); // jmp irqadjust |
| 617 | | |
| 618 | | UML_LABEL(block, noirq); // noirq: |
| 619 | | |
| 620 | | // Prefetch Abort |
| 621 | | UML_TEST(block, uml::mem(&m_pendingAbtP), 1); // test pendingAbtP, 1 |
| 622 | | UML_JMPc(block, uml::COND_Z, nopabt = label++); // jmpz nopabt |
| 623 | | |
| 624 | | UML_ROLINS(block, uml::mem(&GET_CPSR), eARM7_MODE_ABT, 0, MODE_FLAG); // rolins CPSR, eARM7_MODE_ABT, 0, MODE_FLAG |
| 625 | | UML_MOV(block, uml::mem(&GET_REGISTER(14)), uml::I0); // mov LR, i0 |
| 626 | | UML_MOV(block, uml::mem(&GET_REGISTER(SPSR)), uml::mem(&GET_CPSR)); // mov SPSR, CPSR |
| 627 | | UML_OR(block, uml::mem(&GET_CPSR), uml::mem(&GET_CPSR), I_MASK); // or CPSR, CPSR, I_MASK |
| 628 | | UML_ROLAND(block, uml::mem(&GET_CPSR), uml::mem(&GET_CPSR), 0, ~T_MASK); // roland CPSR, CPSR, 0, ~T_MASK |
| 629 | | UML_MOV(block, uml::mem(&R15), 0x0000000c); // mov PC, 0x0c (Prefetch Abort vector address) |
| 630 | | UML_MOV(block, uml::mem(&m_pendingAbtP), 0); // mov pendingAbtP, 0 |
| 631 | | UML_JMP(block, irqadjust); // jmp irqadjust |
| 632 | | |
| 633 | | UML_LABEL(block, nopabt); // nopabt: |
| 634 | | |
| 635 | | // Undefined instruction |
| 636 | | UML_TEST(block, uml::mem(&m_pendingUnd), 1); // test pendingUnd, 1 |
| 637 | | UML_JMPc(block, uml::COND_Z, nopabt = label++); // jmpz nound |
| 638 | | |
| 639 | | UML_ROLINS(block, uml::mem(&GET_CPSR), eARM7_MODE_UND, 0, MODE_FLAG); // rolins CPSR, eARM7_MODE_UND, 0, MODE_FLAG |
| 640 | | UML_MOV(block, uml::I1, (UINT64)-4); // mov i1, -4 |
| 641 | | UML_TEST(block, uml::mem(&GET_CPSR), T_MASK); // test CPSR, T_MASK |
| 642 | | UML_MOVc(block, uml::COND_NZ, uml::I1, (UINT64)-2); // movnz i1, -2 |
| 643 | | UML_ADD(block, uml::mem(&GET_REGISTER(14)), uml::I0, uml::I1); // add LR, i0, i1 |
| 644 | | UML_MOV(block, uml::mem(&GET_REGISTER(SPSR)), uml::mem(&GET_CPSR)); // mov SPSR, CPSR |
| 645 | | UML_OR(block, uml::mem(&GET_CPSR), uml::mem(&GET_CPSR), I_MASK); // or CPSR, CPSR, I_MASK |
| 646 | | UML_ROLAND(block, uml::mem(&GET_CPSR), uml::mem(&GET_CPSR), 0, ~T_MASK); // roland CPSR, CPSR, 0, ~T_MASK |
| 647 | | UML_MOV(block, uml::mem(&R15), 0x00000004); // mov PC, 0x0c (Undefined Insn vector address) |
| 648 | | UML_MOV(block, uml::mem(&m_pendingUnd), 0); // mov pendingUnd, 0 |
| 649 | | UML_JMP(block, irqadjust); // jmp irqadjust |
| 650 | | |
| 651 | | UML_LABEL(block, nopabt); // nopabt: |
| 652 | | |
| 653 | | // Software Interrupt |
| 654 | | UML_TEST(block, uml::mem(&m_pendingSwi), 1); // test pendingSwi, 1 |
| 655 | | UML_JMPc(block, uml::COND_Z, done = label++); // jmpz done |
| 656 | | |
| 657 | | UML_ROLINS(block, uml::mem(&GET_CPSR), eARM7_MODE_SVC, 0, MODE_FLAG); // rolins CPSR, eARM7_MODE_SVC, 0, MODE_FLAG |
| 658 | | UML_MOV(block, uml::I1, (UINT64)-4); // mov i1, -4 |
| 659 | | UML_TEST(block, uml::mem(&GET_CPSR), T_MASK); // test CPSR, T_MASK |
| 660 | | UML_MOVc(block, uml::COND_NZ, uml::I1, (UINT64)-2); // movnz i1, -2 |
| 661 | | UML_ADD(block, uml::mem(&GET_REGISTER(14)), uml::I0, uml::I1); // add LR, i0, i1 |
| 662 | | |
| 663 | | UML_TEST(block, uml::mem(&GET_CPSR), SR_MODE32); // test CPSR, MODE32 |
| 664 | | UML_JMPc(block, uml::COND_NZ, swi32 = label++); // jmpnz swi32 |
| 665 | | UML_AND(block, uml::I1, uml::I0, 0xf4000000); // and i1, i0, 0xf4000000 |
| 666 | | UML_OR(block, uml::mem(&R15), uml::I1, 0x0800001b); // or PC, i1, 0x0800001b |
| 667 | | UML_AND(block, uml::I1, uml::mem(&GET_CPSR), 0x0fffff3f); // and i1, CPSR, 0x0fffff3f |
| 668 | | UML_ROLAND(block, uml::I0, uml::mem(&R15), 32-20, 0x0000000c); // roland i0, R15, 32-20, 0x0000000c |
| 669 | | UML_ROLINS(block, uml::I0, uml::mem(&R15), 0, 0xf0000000); // rolins i0, R15, 0, 0xf0000000 |
| 670 | | UML_OR(block, uml::mem(&GET_CPSR), uml::I0, uml::I1); // or CPSR, i0, i1 |
| 671 | | UML_MOV(block, uml::mem(&m_pendingSwi), 0); // mov pendingSwi, 0 |
| 672 | | UML_JMP(block, irqadjust); // jmp irqadjust |
| 673 | | |
| 674 | | UML_LABEL(block, swi32); // irq32: |
| 675 | | UML_MOV(block, uml::mem(&GET_REGISTER(SPSR)), uml::mem(&GET_CPSR)); // mov SPSR, CPSR |
| 676 | | UML_OR(block, uml::mem(&GET_CPSR), uml::mem(&GET_CPSR), I_MASK); // or CPSR, CPSR, I_MASK |
| 677 | | UML_ROLAND(block, uml::mem(&GET_CPSR), uml::mem(&GET_CPSR), 0, ~T_MASK); // roland CPSR, CPSR, 0, ~T_MASK |
| 678 | | UML_MOV(block, uml::mem(&R15), 0x00000008); // mov PC, 0x08 (SWI vector address) |
| 679 | | UML_MOV(block, uml::mem(&m_pendingSwi), 0); // mov pendingSwi, 0 |
| 680 | | UML_JMP(block, irqadjust); // jmp irqadjust |
| 681 | | |
| 682 | | UML_LABEL(block, irqadjust); // irqadjust: |
| 683 | | UML_MOV(block, uml::I1, 0); // mov i1, 0 |
| 684 | | UML_TEST(block, uml::mem(&COPRO_CTRL), COPRO_CTRL_MMU_EN | COPRO_CTRL_INTVEC_ADJUST); // test COPRO_CTRL, MMU_EN | INTVEC_ADJUST |
| 685 | | UML_MOVc(block, uml::COND_NZ, uml::I1, 0xffff0000); // movnz i1, 0xffff0000 |
| 686 | | UML_OR(block, uml::mem(&R15), uml::mem(&R15), uml::I1); // or PC, i1 |
| 687 | | |
| 688 | | UML_LABEL(block, done); // done: |
| 689 | | |
| 690 | | block->end(); |
| 691 | | }; |
| 692 | | |
| 693 | | /*------------------------------------------------- |
| 694 | | static_generate_nocode_handler - generate an |
| 695 | | exception handler for "out of code" |
| 696 | | -------------------------------------------------*/ |
| 697 | | |
| 698 | | void arm7_cpu_device::static_generate_nocode_handler() |
| 699 | | { |
| 700 | | drcuml_state *drcuml = m_impstate.drcuml; |
| 701 | | drcuml_block *block; |
| 702 | | |
| 703 | | /* begin generating */ |
| 704 | | block = drcuml->begin_block(10); |
| 705 | | |
| 706 | | /* generate a hash jump via the current mode and PC */ |
| 707 | | alloc_handle(drcuml, &m_impstate.nocode, "nocode"); |
| 708 | | UML_HANDLE(block, *m_impstate.nocode); // handle nocode |
| 709 | | UML_GETEXP(block, uml::I0); // getexp i0 |
| 710 | | UML_MOV(block, uml::mem(&R15), uml::I0); // mov [pc],i0 |
| 711 | | save_fast_iregs(block); |
| 712 | | UML_EXIT(block, EXECUTE_MISSING_CODE); // exit EXECUTE_MISSING_CODE |
| 713 | | |
| 714 | | block->end(); |
| 715 | | } |
| 716 | | |
| 717 | | |
| 718 | | /*------------------------------------------------- |
| 719 | | static_generate_out_of_cycles - generate an |
| 720 | | out of cycles exception handler |
| 721 | | -------------------------------------------------*/ |
| 722 | | |
| 723 | | void arm7_cpu_device::static_generate_out_of_cycles() |
| 724 | | { |
| 725 | | drcuml_state *drcuml = m_impstate.drcuml; |
| 726 | | drcuml_block *block; |
| 727 | | |
| 728 | | /* begin generating */ |
| 729 | | block = drcuml->begin_block(10); |
| 730 | | |
| 731 | | /* generate a hash jump via the current mode and PC */ |
| 732 | | alloc_handle(drcuml, &m_impstate.out_of_cycles, "out_of_cycles"); |
| 733 | | UML_HANDLE(block, *m_impstate.out_of_cycles); // handle out_of_cycles |
| 734 | | UML_GETEXP(block, uml::I0); // getexp i0 |
| 735 | | UML_MOV(block, uml::mem(&R15), uml::I0); // mov <pc>,i0 |
| 736 | | save_fast_iregs(block); |
| 737 | | UML_EXIT(block, EXECUTE_OUT_OF_CYCLES); // exit EXECUTE_OUT_OF_CYCLES |
| 738 | | |
| 739 | | block->end(); |
| 740 | | } |
| 741 | | |
| 742 | | |
| 743 | | /*------------------------------------------------------------------ |
| 744 | | static_generate_tlb_translate |
| 745 | | ------------------------------------------------------------------*/ |
| 746 | | |
| 747 | | void arm7_cpu_device::static_generate_detect_fault(uml::code_handle **handleptr) |
| 748 | | { |
| 749 | | /* on entry, flags are in I2, vaddr is in I3, desc_lvl1 is in I4, ap is in R5 */ |
| 750 | | /* on exit, fault result is in I6 */ |
| 751 | | drcuml_state *drcuml = m_impstate.drcuml; |
| 752 | | drcuml_block *block; |
| 753 | | int donefault = 0; |
| 754 | | int checkuser = 0; |
| 755 | | int label = 1; |
| 756 | | |
| 757 | | /* begin generating */ |
| 758 | | block = drcuml->begin_block(1024); |
| 759 | | |
| 760 | | /* add a global entry for this */ |
| 761 | | alloc_handle(drcuml, &m_impstate.detect_fault, "detect_fault"); |
| 762 | | UML_HANDLE(block, *m_impstate.detect_fault); // handle detect_fault |
| 763 | | |
| 764 | | UML_ROLAND(block, uml::I6, uml::I4, 32-4, 0x0f<<1); // roland i6, i4, 32-4, 0xf<<1 |
| 765 | | UML_ROLAND(block, uml::I6, uml::mem(&COPRO_DOMAIN_ACCESS_CONTROL), uml::I6, 3);// roland i6, COPRO_DOMAIN_ACCESS_CONTROL, i6, 3 |
| 766 | | // if permission == 3, FAULT_NONE |
| 767 | | UML_CMP(block, uml::I6, 3); // cmp i6, 3 |
| 768 | | UML_MOVc(block, uml::COND_E, uml::I6, FAULT_NONE); // move i6, FAULT_NONE |
| 769 | | UML_JMPc(block, uml::COND_E, donefault = label++); // jmpe donefault |
| 770 | | // if permission == 0 || permission == 2, FAULT_DOMAIN |
| 771 | | UML_CMP(block, uml::I6, 1); // cmp i6, 1 |
| 772 | | UML_MOVc(block, uml::COND_NE, uml::I6, FAULT_DOMAIN); // movne i6, FAULT_DOMAIN |
| 773 | | UML_JMPc(block, uml::COND_NE, donefault); // jmpne donefault |
| 774 | | |
| 775 | | // if permission == 1 |
| 776 | | UML_CMP(block, uml::I5, 3); // cmp i5, 3 |
| 777 | | UML_MOVc(block, uml::COND_E, uml::I6, FAULT_NONE); // move i6, FAULT_NONE |
| 778 | | UML_JMPc(block, uml::COND_E, donefault); // jmpe donefault |
| 779 | | UML_CMP(block, uml::I5, 0); // cmp i5, 1 |
| 780 | | UML_JMPc(block, uml::COND_NE, checkuser = label++); // jmpne checkuser |
| 781 | | UML_ROLAND(block, uml::I6, uml::mem(&COPRO_CTRL), // roland i6, COPRO_CTRL, 32 - COPRO_CTRL_SYSTEM_SHIFT, |
| 782 | | 32 - COPRO_CTRL_SYSTEM_SHIFT, // COPRO_CTRL_SYSTEM | COPRO_CTRL_ROM |
| 783 | | COPRO_CTRL_SYSTEM | COPRO_CTRL_ROM); |
| 784 | | // if s == 0 && r == 0, FAULT_PERMISSION |
| 785 | | UML_CMP(block, uml::I6, 0); // cmp i6, 0 |
| 786 | | UML_MOVc(block, uml::COND_E, uml::I6, FAULT_PERMISSION); // move i6, FAULT_PERMISSION |
| 787 | | UML_JMPc(block, uml::COND_E, donefault); // jmpe donefault |
| 788 | | // if s == 1 && r == 1, FAULT_PERMISSION |
| 789 | | UML_CMP(block, uml::I6, 3); // cmp i6, 3 |
| 790 | | UML_MOVc(block, uml::COND_E, uml::I6, FAULT_PERMISSION); // move i6, FAULT_PERMISSION |
| 791 | | UML_JMPc(block, uml::COND_E, donefault); // jmpe donefault |
| 792 | | // if flags & TLB_WRITE, FAULT_PERMISSION |
| 793 | | UML_TEST(block, uml::I2, ARM7_TLB_WRITE); // test i2, ARM7_TLB_WRITE |
| 794 | | UML_MOVc(block, uml::COND_NZ, uml::I6, FAULT_PERMISSION); // move i6, FAULT_PERMISSION |
| 795 | | UML_JMPc(block, uml::COND_NZ, donefault); // jmpe donefault |
| 796 | | // if r == 1 && s == 0, FAULT_NONE |
| 797 | | UML_CMP(block, uml::I6, 2); // cmp i6, 2 |
| 798 | | UML_MOVc(block, uml::COND_E, uml::I6, FAULT_NONE); // move i6, FAULT_NONE |
| 799 | | UML_JMPc(block, uml::COND_E, donefault); // jmpe donefault |
| 800 | | UML_AND(block, uml::I6, uml::mem(&GET_CPSR), MODE_FLAG); // and i6, GET_CPSR, MODE_FLAG |
| 801 | | UML_CMP(block, uml::I6, eARM7_MODE_USER); // cmp i6, eARM7_MODE_USER |
| 802 | | // if r == 0 && s == 1 && usermode, FAULT_PERMISSION |
| 803 | | UML_MOVc(block, uml::COND_E, uml::I6, FAULT_PERMISSION); // move i6, FAULT_PERMISSION |
| 804 | | UML_MOVc(block, uml::COND_NE, uml::I6, FAULT_NONE); // movne i6, FAULT_NONE |
| 805 | | UML_JMP(block, donefault); // jmp donefault |
| 806 | | |
| 807 | | UML_LABEL(block, checkuser); // checkuser: |
| 808 | | // if !write, FAULT_NONE |
| 809 | | UML_TEST(block, uml::I2, ARM7_TLB_WRITE); // test i2, ARM7_TLB_WRITE |
| 810 | | UML_MOVc(block, uml::COND_Z, uml::I6, FAULT_NONE); // movz i6, FAULT_NONE |
| 811 | | UML_JMPc(block, uml::COND_Z, donefault); // jmp donefault |
| 812 | | UML_AND(block, uml::I6, uml::mem(&GET_CPSR), MODE_FLAG); // and i6, GET_CPSR, MODE_FLAG |
| 813 | | UML_CMP(block, uml::I6, eARM7_MODE_USER); // cmp i6, eARM7_MODE_USER |
| 814 | | UML_MOVc(block, uml::COND_E, uml::I6, FAULT_PERMISSION); // move i6, FAULT_PERMISSION |
| 815 | | UML_MOVc(block, uml::COND_NE, uml::I6, FAULT_NONE); // move i6, FAULT_NONE |
| 816 | | |
| 817 | | UML_LABEL(block, donefault); // donefault: |
| 818 | | UML_RET(block); // ret |
| 819 | | } |
| 820 | | |
| 821 | | /*------------------------------------------------------------------ |
| 822 | | static_generate_tlb_translate |
| 823 | | ------------------------------------------------------------------*/ |
| 824 | | |
| 825 | | void arm7_cpu_device::static_generate_tlb_translate(uml::code_handle **handleptr) |
| 826 | | { |
| 827 | | /* on entry, address is in I0 and flags are in I2 */ |
| 828 | | /* on exit, translated address is in I0 and success/failure is in I2 */ |
| 829 | | /* routine trashes I4-I7 */ |
| 830 | | drcuml_state *drcuml = m_impstate.drcuml; |
| 831 | | drcuml_block *block; |
| 832 | | uml::code_label smallfault; |
| 833 | | uml::code_label smallprefetch; |
| 834 | | int nopid = 0; |
| 835 | | int nounmapped = 0; |
| 836 | | int nounmapped2 = 0; |
| 837 | | int nocoarse = 0; |
| 838 | | int nofine = 0; |
| 839 | | int nosection = 0; |
| 840 | | int nolargepage = 0; |
| 841 | | int nosmallpage = 0; |
| 842 | | int notinypage = 0; |
| 843 | | int handlefault = 0; |
| 844 | | int level2 = 0; |
| 845 | | int prefetch = 0; |
| 846 | | int prefetch2 = 0; |
| 847 | | int label = 1; |
| 848 | | |
| 849 | | /* begin generating */ |
| 850 | | block = drcuml->begin_block(170); |
| 851 | | |
| 852 | | alloc_handle(drcuml, &m_impstate.tlb_translate, "tlb_translate"); |
| 853 | | UML_HANDLE(block, *m_impstate.tlb_translate); // handle tlb_translate |
| 854 | | |
| 855 | | // I3: vaddr |
| 856 | | UML_CMP(block, uml::I0, 32 * 1024 * 1024); // cmp i0, 32*1024*1024 |
| 857 | | UML_JMPc(block, uml::COND_GE, nopid = label++); // jmpge nopid |
| 858 | | UML_AND(block, uml::I3, uml::mem(&COPRO_FCSE_PID), 0xfe000000); // and i3, COPRO_FCSE_PID, 0xfe000000 |
| 859 | | UML_ADD(block, uml::I3, uml::I3, uml::I0); // add i3, i3, i0 |
| 860 | | |
| 861 | | // I4: desc_lvl1 |
| 862 | | UML_AND(block, uml::I4, uml::mem(&COPRO_TLB_BASE), COPRO_TLB_BASE_MASK); // and i4, COPRO_TLB_BASE, COPRO_TLB_BASE_MASK |
| 863 | | UML_ROLINS(block, uml::I4, uml::I3, 32 - COPRO_TLB_VADDR_FLTI_MASK_SHIFT, // rolins i4, i3, 32-COPRO_TLB_VADDR_FLTI_MASK_SHIFT, |
| 864 | | COPRO_TLB_VADDR_FLTI_MASK); // COPRO_TLB_VADDR_FLTI_MASK |
| 865 | | UML_READ(block, uml::I4, uml::I4, uml::SIZE_DWORD, uml::SPACE_PROGRAM); // read32 i4, i4, PROGRAM |
| 866 | | |
| 867 | | // I7: desc_lvl1 & 3 |
| 868 | | UML_AND(block, uml::I7, uml::I4, 3); // and i7, i4, 3 |
| 869 | | |
| 870 | | UML_CMP(block, uml::I7, COPRO_TLB_UNMAPPED); // cmp i7, COPRO_TLB_UNMAPPED |
| 871 | | UML_JMPc(block, uml::COND_NE, nounmapped = label++); // jmpne nounmapped |
| 872 | | |
| 873 | | // TLB Unmapped |
| 874 | | UML_TEST(block, uml::I2, ARM7_TLB_ABORT_D); // test i2, ARM7_TLB_ABORT_D |
| 875 | | UML_MOVc(block, uml::COND_E, uml::mem(&COPRO_FAULT_STATUS_D), (5 << 0)); // move COPRO_FAULT_STATUS_D, (5 << 0) |
| 876 | | UML_MOVc(block, uml::COND_E, uml::mem(&COPRO_FAULT_ADDRESS), uml::I3); // move COPRO_FAULT_ADDRESS, i3 |
| 877 | | UML_MOVc(block, uml::COND_E, uml::mem(&m_pendingAbtD), 1); // move pendingAbtD, 1 |
| 878 | | UML_MOVc(block, uml::COND_E, uml::I2, 0); // move i2, 0 |
| 879 | | UML_RETc(block, uml::COND_E); // rete |
| 880 | | |
| 881 | | UML_TEST(block, uml::I2, ARM7_TLB_ABORT_P); // test i2, ARM7_TLB_ABORT_P |
| 882 | | UML_MOVc(block, uml::COND_E, uml::mem(&m_pendingAbtP), 1); // move pendingAbtP, 1 |
| 883 | | UML_MOV(block, uml::I2, 0); // mov i2, 0 |
| 884 | | UML_RET(block); // ret |
| 885 | | |
| 886 | | UML_LABEL(block, nounmapped); // nounmapped: |
| 887 | | UML_CMP(block, uml::I7, COPRO_TLB_COARSE_TABLE); // cmp i7, COPRO_TLB_COARSE_TABLE |
| 888 | | UML_JMPc(block, uml::COND_NE, nocoarse = label++); // jmpne nocoarse |
| 889 | | |
| 890 | | UML_ROLAND(block, uml::I5, uml::I4, 32-4, 0x0f<<1); // roland i5, i4, 32-4, 0xf<<1 |
| 891 | | UML_ROLAND(block, uml::I5, uml::mem(&COPRO_DOMAIN_ACCESS_CONTROL), uml::I5, 3);// roland i5, COPRO_DOMAIN_ACCESS_CONTROL, i5, 3 |
| 892 | | UML_CMP(block, uml::I5, 1); // cmp i5, 1 |
| 893 | | UML_JMPc(block, uml::COND_E, level2 = label++); // jmpe level2 |
| 894 | | UML_CMP(block, uml::I5, 3); // cmp i5, 3 |
| 895 | | UML_JMPc(block, uml::COND_NE, nofine = label++); // jmpne nofine |
| 896 | | UML_LABEL(block, level2); // level2: |
| 897 | | |
| 898 | | // I7: desc_level2 |
| 899 | | UML_AND(block, uml::I7, uml::I4, COPRO_TLB_CFLD_ADDR_MASK); // and i7, i4, COPRO_TLB_CFLD_ADDR_MASK |
| 900 | | UML_ROLINS(block, uml::I7, uml::I3, 32 - COPRO_TLB_VADDR_CSLTI_MASK_SHIFT,// rolins i7, i3, 32 - COPRO_TLB_VADDR_CSLTI_MASK_SHIFT |
| 901 | | COPRO_TLB_VADDR_CSLTI_MASK); // COPRO_TLB_VADDR_CSLTI_MASK |
| 902 | | UML_READ(block, uml::I7, uml::I7, uml::SIZE_DWORD, uml::SPACE_PROGRAM); // read32 i7, i7, PROGRAM |
| 903 | | UML_JMP(block, nofine); // jmp nofine |
| 904 | | |
| 905 | | UML_LABEL(block, nocoarse); // nocoarse: |
| 906 | | UML_CMP(block, uml::I7, COPRO_TLB_SECTION_TABLE); // cmp i7, COPRO_TLB_SECTION_TABLE |
| 907 | | UML_JMPc(block, uml::COND_NE, nosection = label++); // jmpne nosection |
| 908 | | |
| 909 | | UML_ROLAND(block, uml::I5, uml::I4, 32-10, 3); // roland i7, i4, 32-10, 3 |
| 910 | | // result in I6 |
| 911 | | UML_CALLH(block, *m_impstate.detect_fault); // callh detect_fault |
| 912 | | UML_CMP(block, uml::I6, FAULT_NONE); // cmp i6, FAULT_NONE |
| 913 | | UML_JMPc(block, uml::COND_NE, handlefault = label++); // jmpne handlefault |
| 914 | | |
| 915 | | // no fault, return translated address |
| 916 | | UML_AND(block, uml::I0, uml::I3, ~COPRO_TLB_SECTION_PAGE_MASK); // and i0, i3, ~COPRO_TLB_SECTION_PAGE_MASK |
| 917 | | UML_ROLINS(block, uml::I0, uml::I4, 0, COPRO_TLB_SECTION_PAGE_MASK); // rolins i0, i4, COPRO_TLB_SECTION_PAGE_MASK |
| 918 | | UML_MOV(block, uml::I2, 1); // mov i2, 1 |
| 919 | | UML_RET(block); // ret |
| 920 | | |
| 921 | | UML_LABEL(block, handlefault); // handlefault: |
| 922 | | UML_TEST(block, uml::I2, ARM7_TLB_ABORT_D); // test i2, ARM7_TLB_ABORT_D |
| 923 | | UML_JMPc(block, uml::COND_Z, prefetch = label++); // jmpz prefetch |
| 924 | | UML_MOV(block, uml::mem(&COPRO_FAULT_ADDRESS), uml::I3); // mov COPRO_FAULT_ADDRESS, i3 |
| 925 | | UML_MOV(block, uml::mem(&m_pendingAbtD), 1); // mov m_pendingAbtD, 1 |
| 926 | | UML_ROLAND(block, uml::I5, uml::I4, 31, 0xf0); // roland i5, i4, 31, 0xf0 |
| 927 | | UML_CMP(block, uml::I6, FAULT_DOMAIN); // cmp i6, FAULT_DOMAIN |
| 928 | | UML_MOVc(block, uml::COND_E, uml::I6, 9 << 0); // move i6, 9 << 0 |
| 929 | | UML_MOVc(block, uml::COND_NE, uml::I6, 13 << 0); // movne i6, 13 << 0 |
| 930 | | UML_OR(block, uml::mem(&COPRO_FAULT_STATUS_D), uml::I5, uml::I6); // or COPRO_FAULT_STATUS_D, i5, i6 |
| 931 | | UML_MOV(block, uml::I2, 0); // mov i2, 0 |
| 932 | | UML_RET(block); // ret |
| 933 | | |
| 934 | | UML_LABEL(block, prefetch); // prefetch: |
| 935 | | UML_MOV(block, uml::mem(&m_pendingAbtP), 1); // mov m_pendingAbtP, 1 |
| 936 | | UML_MOV(block, uml::I2, 0); // mov i2, 0 |
| 937 | | UML_RET(block); // ret |
| 938 | | |
| 939 | | UML_LABEL(block, nosection); // nosection: |
| 940 | | UML_CMP(block, uml::I7, COPRO_TLB_FINE_TABLE); // cmp i7, COPRO_TLB_FINE_TABLE |
| 941 | | UML_JMPc(block, uml::COND_NE, nofine); // jmpne nofine |
| 942 | | |
| 943 | | // Not yet implemented |
| 944 | | UML_MOV(block, uml::I2, 1); // mov i2, 1 |
| 945 | | UML_RET(block); // ret |
| 946 | | |
| 947 | | UML_LABEL(block, nofine); // nofine: |
| 948 | | |
| 949 | | // I7: desc_lvl2 |
| 950 | | UML_AND(block, uml::I6, uml::I7, 3); // and i6, i7, 3 |
| 951 | | UML_CMP(block, uml::I6, COPRO_TLB_UNMAPPED); // cmp i6, COPRO_TLB_UNMAPPED |
| 952 | | UML_JMPc(block, uml::COND_NE, nounmapped2 = label++); // jmpne nounmapped2 |
| 953 | | |
| 954 | | UML_TEST(block, uml::I2, ARM7_TLB_ABORT_D); // test i2, ARM7_TLB_ABORT_D |
| 955 | | UML_JMPc(block, uml::COND_Z, prefetch2 = label++); // jmpz prefetch2 |
| 956 | | UML_MOV(block, uml::mem(&COPRO_FAULT_ADDRESS), uml::I3); // mov COPRO_FAULT_ADDRESS, i3 |
| 957 | | UML_MOV(block, uml::mem(&m_pendingAbtD), 1); // mov m_pendingAbtD, 1 |
| 958 | | UML_ROLAND(block, uml::I5, uml::I4, 31, 0xf0); // roland i5, i4, 31, 0xf0 |
| 959 | | UML_OR(block, uml::I5, uml::I5, 7 << 0); // or i5, i5, 7 << 0 |
| 960 | | UML_OR(block, uml::mem(&COPRO_FAULT_STATUS_D), uml::I5, uml::I6); // or COPRO_FAULT_STATUS_D, i5, i6 |
| 961 | | UML_MOV(block, uml::I2, 0); // mov i2, 0 |
| 962 | | UML_RET(block); // ret |
| 963 | | |
| 964 | | UML_LABEL(block, prefetch2); // prefetch2: |
| 965 | | UML_MOV(block, uml::mem(&m_pendingAbtP), 1); // mov m_pendingAbtP, 1 |
| 966 | | UML_MOV(block, uml::I2, 0); // mov i2, 0 |
| 967 | | UML_RET(block); // ret |
| 968 | | |
| 969 | | UML_LABEL(block, nounmapped2); // nounmapped2: |
| 970 | | UML_CMP(block, uml::I6, COPRO_TLB_LARGE_PAGE); // cmp i6, COPRO_TLB_LARGE_PAGE |
| 971 | | UML_JMPc(block, uml::COND_NE, nolargepage = label++); // jmpne nolargepage |
| 972 | | |
| 973 | | UML_AND(block, uml::I0, uml::I3, ~COPRO_TLB_LARGE_PAGE_MASK); // and i0, i3, ~COPRO_TLB_LARGE_PAGE_MASK |
| 974 | | UML_ROLINS(block, uml::I0, uml::I7, 0, COPRO_TLB_LARGE_PAGE_MASK); // rolins i0, i7, 0, COPRO_TLB_LARGE_PAGE_MASK |
| 975 | | UML_MOV(block, uml::I2, 1); // mov i2, 1 |
| 976 | | UML_RET(block); // ret |
| 977 | | |
| 978 | | UML_LABEL(block, nolargepage); // nolargepage: |
| 979 | | UML_CMP(block, uml::I6, COPRO_TLB_SMALL_PAGE); // cmp i6, COPRO_TLB_SMALL_PAGE |
| 980 | | UML_JMPc(block, uml::COND_NE, nosmallpage = label++); // jmpne nosmallpage |
| 981 | | |
| 982 | | UML_ROLAND(block, uml::I5, uml::I3, 32-9, 3<<1); // roland i5, i3, 32-9, 3<<1 |
| 983 | | UML_ROLAND(block, uml::I6, uml::I7, 32-4, 0xff); // roland i6, i7, 32-4, 0xff |
| 984 | | UML_SHR(block, uml::I5, uml::I7, uml::I5); // shr i5, i7, i5 |
| 985 | | UML_AND(block, uml::I5, uml::I5, 3); // and i5, i5, 3 |
| 986 | | // result in I6 |
| 987 | | UML_CALLH(block, *m_impstate.detect_fault); // callh detect_fault |
| 988 | | |
| 989 | | UML_CMP(block, uml::I6, FAULT_NONE); // cmp i6, FAULT_NONE |
| 990 | | UML_JMPc(block, uml::COND_NE, smallfault = label++); // jmpne smallfault |
| 991 | | UML_AND(block, uml::I0, uml::I7, COPRO_TLB_SMALL_PAGE_MASK); // and i0, i7, COPRO_TLB_SMALL_PAGE_MASK |
| 992 | | UML_ROLINS(block, uml::I0, uml::I3, 0, ~COPRO_TLB_SMALL_PAGE_MASK); // rolins i0, i3, 0, ~COPRO_TLB_SMALL_PAGE_MASK |
| 993 | | UML_MOV(block, uml::I2, 1); // mov i2, 1 |
| 994 | | UML_RET(block); // ret |
| 995 | | |
| 996 | | UML_LABEL(block, smallfault); // smallfault: |
| 997 | | UML_TEST(block, uml::I2, ARM7_TLB_ABORT_D); // test i2, ARM7_TLB_ABORT_D |
| 998 | | UML_JMPc(block, uml::COND_NZ, smallprefetch = label++); // jmpnz smallprefetch |
| 999 | | UML_MOV(block, uml::mem(&COPRO_FAULT_ADDRESS), uml::I3); // mov COPRO_FAULT_ADDRESS, i3 |
| 1000 | | UML_MOV(block, uml::mem(&m_pendingAbtD), 1); // mov pendingAbtD, 1 |
| 1001 | | UML_CMP(block, uml::I6, FAULT_DOMAIN); // cmp i6, FAULT_DOMAIN |
| 1002 | | UML_MOVc(block, uml::COND_E, uml::I5, 11 << 0); // move i5, 11 << 0 |
| 1003 | | UML_MOVc(block, uml::COND_NE, uml::I5, 15 << 0); // movne i5, 15 << 0 |
| 1004 | | UML_ROLINS(block, uml::I5, uml::I4, 31, 0xf0); // rolins i5, i4, 31, 0xf0 |
| 1005 | | UML_MOV(block, uml::mem(&COPRO_FAULT_STATUS_D), uml::I5); // mov COPRO_FAULT_STATUS_D, i5 |
| 1006 | | UML_MOV(block, uml::I2, 0); // mov i2, 0 |
| 1007 | | UML_RET(block); // ret |
| 1008 | | |
| 1009 | | UML_LABEL(block, smallprefetch); // smallprefetch: |
| 1010 | | UML_MOV(block, uml::mem(&m_pendingAbtP), 1); // mov pendingAbtP, 1 |
| 1011 | | UML_MOV(block, uml::I2, 0); // mov i2, 0 |
| 1012 | | UML_RET(block); // ret |
| 1013 | | |
| 1014 | | UML_LABEL(block, nosmallpage); // nosmallpage: |
| 1015 | | UML_CMP(block, uml::I6, COPRO_TLB_TINY_PAGE); // cmp i6, COPRO_TLB_TINY_PAGE |
| 1016 | | UML_JMPc(block, uml::COND_NE, notinypage = label++); // jmpne notinypage |
| 1017 | | |
| 1018 | | UML_AND(block, uml::I0, uml::I3, ~COPRO_TLB_TINY_PAGE_MASK); // and i0, i3, ~COPRO_TLB_TINY_PAGE_MASK |
| 1019 | | UML_ROLINS(block, uml::I0, uml::I7, 0, COPRO_TLB_TINY_PAGE_MASK); // rolins i0, i7, 0, COPRO_TLB_TINY_PAGE_MASK |
| 1020 | | UML_MOV(block, uml::I2, 1); // mov i2, 1 |
| 1021 | | UML_RET(block); // ret |
| 1022 | | |
| 1023 | | UML_LABEL(block, notinypage); // notinypage: |
| 1024 | | UML_MOV(block, uml::I0, uml::I3); // mov i0, i3 |
| 1025 | | UML_RET(block); // ret |
| 1026 | | |
| 1027 | | block->end(); |
| 1028 | | } |
| 1029 | | |
| 1030 | | /*------------------------------------------------------------------ |
| 1031 | | static_generate_memory_accessor |
| 1032 | | ------------------------------------------------------------------*/ |
| 1033 | | |
| 1034 | | void arm7_cpu_device::static_generate_memory_accessor(int size, bool istlb, bool iswrite, const char *name, uml::code_handle **handleptr) |
| 1035 | | { |
| 1036 | | /* on entry, address is in I0; data for writes is in I1, fetch type in I2 */ |
| 1037 | | /* on exit, read result is in I0 */ |
| 1038 | | /* routine trashes I0-I3 */ |
| 1039 | | drcuml_state *drcuml = m_impstate.drcuml; |
| 1040 | | drcuml_block *block; |
| 1041 | | //int tlbmiss = 0; |
| 1042 | | int label = 1; |
| 1043 | | |
| 1044 | | /* begin generating */ |
| 1045 | | block = drcuml->begin_block(1024); |
| 1046 | | |
| 1047 | | /* add a global entry for this */ |
| 1048 | | alloc_handle(drcuml, handleptr, name); |
| 1049 | | UML_HANDLE(block, **handleptr); // handle *handleptr |
| 1050 | | |
| 1051 | | if (istlb) |
| 1052 | | { |
| 1053 | | UML_TEST(block, uml::mem(&COPRO_CTRL), COPRO_CTRL_MMU_EN); // test COPRO_CTRL, COPRO_CTRL_MMU_EN |
| 1054 | | if (iswrite) |
| 1055 | | { |
| 1056 | | UML_MOVc(block, uml::COND_NZ, uml::I3, ARM7_TLB_WRITE); // movnz i3, ARM7_TLB_WRITE |
| 1057 | | } |
| 1058 | | else |
| 1059 | | { |
| 1060 | | UML_MOVc(block, uml::COND_NZ, uml::I3, ARM7_TLB_READ); // movnz i3, ARM7_TLB_READ |
| 1061 | | } |
| 1062 | | UML_OR(block, uml::I2, uml::I2, uml::I3); // or i2, i2, i3 |
| 1063 | | UML_CALLHc(block, uml::COND_NZ, *m_impstate.tlb_translate); // callhnz tlb_translate |
| 1064 | | } |
| 1065 | | |
| 1066 | | /* general case: assume paging and perform a translation */ |
| 1067 | | if ((machine().debug_flags & DEBUG_FLAG_ENABLED) == 0) |
| 1068 | | { |
| 1069 | | for (int ramnum = 0; ramnum < ARM7_MAX_FASTRAM; ramnum++) |
| 1070 | | { |
| 1071 | | if (m_impstate.fastram[ramnum].base != NULL && (!iswrite || !m_impstate.fastram[ramnum].readonly)) |
| 1072 | | { |
| 1073 | | void *fastbase = (UINT8 *)m_impstate.fastram[ramnum].base - m_impstate.fastram[ramnum].start; |
| 1074 | | UINT32 skip = label++; |
| 1075 | | if (m_impstate.fastram[ramnum].end != 0xffffffff) |
| 1076 | | { |
| 1077 | | UML_CMP(block, uml::I0, m_impstate.fastram[ramnum].end); // cmp i0, end |
| 1078 | | UML_JMPc(block, uml::COND_A, skip); // ja skip |
| 1079 | | } |
| 1080 | | if (m_impstate.fastram[ramnum].start != 0x00000000) |
| 1081 | | { |
| 1082 | | UML_CMP(block, uml::I0, m_impstate.fastram[ramnum].start); // cmp i0, fastram_start |
| 1083 | | UML_JMPc(block, uml::COND_B, skip); // jb skip |
| 1084 | | } |
| 1085 | | |
| 1086 | | if (!iswrite) |
| 1087 | | { |
| 1088 | | if (size == 1) |
| 1089 | | { |
| 1090 | | UML_XOR(block, uml::I0, uml::I0, (m_endian == ENDIANNESS_BIG) ? BYTE4_XOR_BE(0) : BYTE4_XOR_LE(0)); |
| 1091 | | // xor i0, i0, bytexor |
| 1092 | | UML_LOAD(block, uml::I0, fastbase, uml::I0, uml::SIZE_BYTE, uml::SCALE_x1); // load i0, fastbase, i0, byte |
| 1093 | | } |
| 1094 | | else if (size == 2) |
| 1095 | | { |
| 1096 | | UML_XOR(block, uml::I0, uml::I0, (m_endian == ENDIANNESS_BIG) ? WORD_XOR_BE(0) : WORD_XOR_LE(0)); |
| 1097 | | // xor i0, i0, wordxor |
| 1098 | | UML_LOAD(block, uml::I0, fastbase, uml::I0, uml::SIZE_WORD, uml::SCALE_x1); // load i0, fastbase, i0, word_x1 |
| 1099 | | } |
| 1100 | | else if (size == 4) |
| 1101 | | { |
| 1102 | | UML_LOAD(block, uml::I0, fastbase, uml::I0, uml::SIZE_DWORD, uml::SCALE_x1); // load i0, fastbase, i0, dword_x1 |
| 1103 | | } |
| 1104 | | UML_RET(block); // ret |
| 1105 | | } |
| 1106 | | else |
| 1107 | | { |
| 1108 | | if (size == 1) |
| 1109 | | { |
| 1110 | | UML_XOR(block, uml::I0, uml::I0, (m_endian == ENDIANNESS_BIG) ? BYTE4_XOR_BE(0) : BYTE4_XOR_LE(0)); |
| 1111 | | // xor i0, i0, bytexor |
| 1112 | | UML_STORE(block, fastbase, uml::I0, uml::I1, uml::SIZE_BYTE, uml::SCALE_x1); // store fastbase, i0, i1, byte |
| 1113 | | } |
| 1114 | | else if (size == 2) |
| 1115 | | { |
| 1116 | | UML_XOR(block, uml::I0, uml::I0, (m_endian == ENDIANNESS_BIG) ? WORD_XOR_BE(0) : WORD_XOR_LE(0)); |
| 1117 | | // xor i0, i0, wordxor |
| 1118 | | UML_STORE(block, fastbase, uml::I0, uml::I1, uml::SIZE_WORD, uml::SCALE_x1); // store fastbase, i0, i1, word_x1 |
| 1119 | | } |
| 1120 | | else if (size == 4) |
| 1121 | | { |
| 1122 | | UML_STORE(block, fastbase, uml::I0, uml::I1, uml::SIZE_DWORD, uml::SCALE_x1); // store fastbase,i0,i1,dword_x1 |
| 1123 | | } |
| 1124 | | UML_RET(block); // ret |
| 1125 | | } |
| 1126 | | |
| 1127 | | UML_LABEL(block, skip); // skip: |
| 1128 | | } |
| 1129 | | } |
| 1130 | | } |
| 1131 | | |
| 1132 | | switch (size) |
| 1133 | | { |
| 1134 | | case 1: |
| 1135 | | if (iswrite) |
| 1136 | | { |
| 1137 | | UML_WRITE(block, uml::I0, uml::I1, uml::SIZE_BYTE, uml::SPACE_PROGRAM); // write i0, i1, program_byte |
| 1138 | | } |
| 1139 | | else |
| 1140 | | { |
| 1141 | | UML_READ(block, uml::I0, uml::I0, uml::SIZE_BYTE, uml::SPACE_PROGRAM); // read i0, i0, program_byte |
| 1142 | | } |
| 1143 | | break; |
| 1144 | | |
| 1145 | | case 2: |
| 1146 | | if (iswrite) |
| 1147 | | { |
| 1148 | | UML_WRITE(block, uml::I0, uml::I1, uml::SIZE_WORD, uml::SPACE_PROGRAM); // write i0,i1,program_word |
| 1149 | | } |
| 1150 | | else |
| 1151 | | { |
| 1152 | | UML_READ(block, uml::I0, uml::I0, uml::SIZE_WORD, uml::SPACE_PROGRAM); // read i0,i0,program_word |
| 1153 | | } |
| 1154 | | break; |
| 1155 | | |
| 1156 | | case 4: |
| 1157 | | if (iswrite) |
| 1158 | | { |
| 1159 | | UML_WRITE(block, uml::I0, uml::I1, uml::SIZE_DWORD, uml::SPACE_PROGRAM); // write i0,i1,program_dword |
| 1160 | | } |
| 1161 | | else |
| 1162 | | { |
| 1163 | | UML_READ(block, uml::I0, uml::I0, uml::SIZE_DWORD, uml::SPACE_PROGRAM); // read i0,i0,program_dword |
| 1164 | | } |
| 1165 | | break; |
| 1166 | | } |
| 1167 | | UML_RET(block); // ret |
| 1168 | | |
| 1169 | | block->end(); |
| 1170 | | } |
| 1171 | | |
| 1172 | | /*************************************************************************** |
| 1173 | | CODE GENERATION |
| 1174 | | ***************************************************************************/ |
| 1175 | | |
| 1176 | | /*------------------------------------------------- |
| 1177 | | generate_update_cycles - generate code to |
| 1178 | | subtract cycles from the icount and generate |
| 1179 | | an exception if out |
| 1180 | | -------------------------------------------------*/ |
| 1181 | | |
| 1182 | | void arm7_cpu_device::generate_update_cycles(drcuml_block *block, compiler_state *compiler, uml::parameter param) |
| 1183 | | { |
| 1184 | | /* check full interrupts if pending */ |
| 1185 | | if (compiler->checkints) |
| 1186 | | { |
| 1187 | | uml::code_label skip; |
| 1188 | | |
| 1189 | | compiler->checkints = FALSE; |
| 1190 | | UML_CALLH(block, *m_impstate.check_irq); |
| 1191 | | } |
| 1192 | | |
| 1193 | | /* account for cycles */ |
| 1194 | | if (compiler->cycles > 0) |
| 1195 | | { |
| 1196 | | UML_SUB(block, uml::mem(&m_icount), uml::mem(&m_icount), MAPVAR_CYCLES); // sub icount,icount,cycles |
| 1197 | | UML_MAPVAR(block, MAPVAR_CYCLES, 0); // mapvar cycles,0 |
| 1198 | | UML_EXHc(block, uml::COND_S, *m_impstate.out_of_cycles, param); // exh out_of_cycles,nextpc |
| 1199 | | } |
| 1200 | | compiler->cycles = 0; |
| 1201 | | } |
| 1202 | | |
| 1203 | | |
| 1204 | | /*------------------------------------------------- |
| 1205 | | generate_checksum_block - generate code to |
| 1206 | | validate a sequence of opcodes |
| 1207 | | -------------------------------------------------*/ |
| 1208 | | |
| 1209 | | void arm7_cpu_device::generate_checksum_block(drcuml_block *block, compiler_state *compiler, const opcode_desc *seqhead, const opcode_desc *seqlast) |
| 1210 | | { |
| 1211 | | const opcode_desc *curdesc; |
| 1212 | | if (LOG_UML) |
| 1213 | | { |
| 1214 | | block->append_comment("[Validation for %08X]", seqhead->pc); // comment |
| 1215 | | } |
| 1216 | | |
| 1217 | | /* loose verify or single instruction: just compare and fail */ |
| 1218 | | if (!(m_impstate.drcoptions & ARM7DRC_STRICT_VERIFY) || seqhead->next() == NULL) |
| 1219 | | { |
| 1220 | | if (!(seqhead->flags & OPFLAG_VIRTUAL_NOOP)) |
| 1221 | | { |
| 1222 | | UINT32 sum = seqhead->opptr.l[0]; |
| 1223 | | void *base = m_direct->read_decrypted_ptr(seqhead->physpc); |
| 1224 | | UML_LOAD(block, uml::I0, base, 0, uml::SIZE_DWORD, uml::SCALE_x4); // load i0,base,0,dword |
| 1225 | | |
| 1226 | | if (seqhead->delay.first() != NULL && seqhead->physpc != seqhead->delay.first()->physpc) |
| 1227 | | { |
| 1228 | | base = m_direct->read_decrypted_ptr(seqhead->delay.first()->physpc); |
| 1229 | | UML_LOAD(block, uml::I1, base, 0, uml::SIZE_DWORD, uml::SCALE_x4); // load i1,base,dword |
| 1230 | | UML_ADD(block, uml::I0, uml::I0, uml::I1); // add i0,i0,i1 |
| 1231 | | |
| 1232 | | sum += seqhead->delay.first()->opptr.l[0]; |
| 1233 | | } |
| 1234 | | |
| 1235 | | UML_CMP(block, uml::I0, sum); // cmp i0,opptr[0] |
| 1236 | | UML_EXHc(block, uml::COND_NE, *m_impstate.nocode, epc(seqhead)); // exne nocode,seqhead->pc |
| 1237 | | } |
| 1238 | | } |
| 1239 | | |
| 1240 | | /* full verification; sum up everything */ |
| 1241 | | else |
| 1242 | | { |
| 1243 | | UINT32 sum = 0; |
| 1244 | | void *base = m_direct->read_decrypted_ptr(seqhead->physpc); |
| 1245 | | UML_LOAD(block, uml::I0, base, 0, uml::SIZE_DWORD, uml::SCALE_x4); // load i0,base,0,dword |
| 1246 | | sum += seqhead->opptr.l[0]; |
| 1247 | | for (curdesc = seqhead->next(); curdesc != seqlast->next(); curdesc = curdesc->next()) |
| 1248 | | if (!(curdesc->flags & OPFLAG_VIRTUAL_NOOP)) |
| 1249 | | { |
| 1250 | | base = m_direct->read_decrypted_ptr(curdesc->physpc); |
| 1251 | | UML_LOAD(block, uml::I1, base, 0, uml::SIZE_DWORD, uml::SCALE_x4); // load i1,base,dword |
| 1252 | | UML_ADD(block, uml::I0, uml::I0, uml::I1); // add i0,i0,i1 |
| 1253 | | sum += curdesc->opptr.l[0]; |
| 1254 | | |
| 1255 | | if (curdesc->delay.first() != NULL && (curdesc == seqlast || (curdesc->next() != NULL && curdesc->next()->physpc != curdesc->delay.first()->physpc))) |
| 1256 | | { |
| 1257 | | base = m_direct->read_decrypted_ptr(curdesc->delay.first()->physpc); |
| 1258 | | UML_LOAD(block, uml::I1, base, 0, uml::SIZE_DWORD, uml::SCALE_x4); // load i1,base,dword |
| 1259 | | UML_ADD(block, uml::I0, uml::I0, uml::I1); // add i0,i0,i1 |
| 1260 | | sum += curdesc->delay.first()->opptr.l[0]; |
| 1261 | | } |
| 1262 | | } |
| 1263 | | UML_CMP(block, uml::I0, sum); // cmp i0,sum |
| 1264 | | UML_EXHc(block, uml::COND_NE, *m_impstate.nocode, epc(seqhead)); // exne nocode,seqhead->pc |
| 1265 | | } |
| 1266 | | } |
| 1267 | | |
| 1268 | | |
| 1269 | | /*------------------------------------------------- |
| 1270 | | generate_sequence_instruction - generate code |
| 1271 | | for a single instruction in a sequence |
| 1272 | | -------------------------------------------------*/ |
| 1273 | | |
| 1274 | | void arm7_cpu_device::generate_sequence_instruction(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 1275 | | { |
| 1276 | | //offs_t expc; |
| 1277 | | int hotnum; |
| 1278 | | |
| 1279 | | /* add an entry for the log */ |
| 1280 | | // TODO FIXME |
| 1281 | | // if (LOG_UML && !(desc->flags & OPFLAG_VIRTUAL_NOOP)) |
| 1282 | | // log_add_disasm_comment(block, desc->pc, desc->opptr.l[0]); |
| 1283 | | |
| 1284 | | /* set the PC map variable */ |
| 1285 | | //expc = (desc->flags & OPFLAG_IN_DELAY_SLOT) ? desc->pc - 3 : desc->pc; |
| 1286 | | UML_MAPVAR(block, MAPVAR_PC, desc->pc); // mapvar PC,pc |
| 1287 | | |
| 1288 | | /* accumulate total cycles */ |
| 1289 | | compiler->cycles += desc->cycles; |
| 1290 | | |
| 1291 | | /* update the icount map variable */ |
| 1292 | | UML_MAPVAR(block, MAPVAR_CYCLES, compiler->cycles); // mapvar CYCLES,compiler->cycles |
| 1293 | | |
| 1294 | | /* is this a hotspot? */ |
| 1295 | | for (hotnum = 0; hotnum < ARM7_MAX_HOTSPOTS; hotnum++) |
| 1296 | | { |
| 1297 | | if (m_impstate.hotspot[hotnum].pc != 0 && desc->pc == m_impstate.hotspot[hotnum].pc && desc->opptr.l[0] == m_impstate.hotspot[hotnum].opcode) |
| 1298 | | { |
| 1299 | | compiler->cycles += m_impstate.hotspot[hotnum].cycles; |
| 1300 | | break; |
| 1301 | | } |
| 1302 | | } |
| 1303 | | |
| 1304 | | /* update the icount map variable */ |
| 1305 | | UML_MAPVAR(block, MAPVAR_CYCLES, compiler->cycles); // mapvar CYCLES,compiler->cycles |
| 1306 | | |
| 1307 | | /* if we are debugging, call the debugger */ |
| 1308 | | if ((machine().debug_flags & DEBUG_FLAG_ENABLED) != 0) |
| 1309 | | { |
| 1310 | | UML_MOV(block, uml::mem(&R15), desc->pc); // mov [pc],desc->pc |
| 1311 | | save_fast_iregs(block); |
| 1312 | | UML_DEBUG(block, desc->pc); // debug desc->pc |
| 1313 | | } |
| 1314 | | |
| 1315 | | /* if we hit an unmapped address, fatal error */ |
| 1316 | | if (desc->flags & OPFLAG_COMPILER_UNMAPPED) |
| 1317 | | { |
| 1318 | | UML_MOV(block, uml::mem(&R15), desc->pc); // mov R15,desc->pc |
| 1319 | | save_fast_iregs(block); |
| 1320 | | UML_EXIT(block, EXECUTE_UNMAPPED_CODE); // exit EXECUTE_UNMAPPED_CODE |
| 1321 | | } |
| 1322 | | |
| 1323 | | /* otherwise, unless this is a virtual no-op, it's a regular instruction */ |
| 1324 | | else if (!(desc->flags & OPFLAG_VIRTUAL_NOOP)) |
| 1325 | | { |
| 1326 | | /* compile the instruction */ |
| 1327 | | if (!generate_opcode(block, compiler, desc)) |
| 1328 | | { |
| 1329 | | UML_MOV(block, uml::mem(&R15), desc->pc); // mov R15,desc->pc |
| 1330 | | UML_MOV(block, uml::mem(&m_impstate.arg0), desc->opptr.l[0]); // mov [arg0],desc->opptr.l |
| 1331 | | //UML_CALLC(block, cfunc_unimplemented, arm); // callc cfunc_unimplemented // TODO FIXME |
| 1332 | | } |
| 1333 | | } |
| 1334 | | } |
| 1335 | | |
| 1336 | | |
| 1337 | | /*------------------------------------------------------------------ |
| 1338 | | generate_delay_slot_and_branch |
| 1339 | | ------------------------------------------------------------------*/ |
| 1340 | | |
| 1341 | | void arm7_cpu_device::generate_delay_slot_and_branch(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc, UINT8 linkreg) |
| 1342 | | { |
| 1343 | | compiler_state compiler_temp = *compiler; |
| 1344 | | |
| 1345 | | /* update the cycles and jump through the hash table to the target */ |
| 1346 | | if (desc->targetpc != BRANCH_TARGET_DYNAMIC) |
| 1347 | | { |
| 1348 | | generate_update_cycles(block, &compiler_temp, desc->targetpc); // <subtract cycles> |
| 1349 | | UML_HASHJMP(block, 0, desc->targetpc, *m_impstate.nocode); |
| 1350 | | // hashjmp 0,desc->targetpc,nocode |
| 1351 | | } |
| 1352 | | else |
| 1353 | | { |
| 1354 | | generate_update_cycles(block, &compiler_temp, uml::mem(&m_impstate.jmpdest)); |
| 1355 | | // <subtract cycles> |
| 1356 | | UML_HASHJMP(block, 0, uml::mem(&m_impstate.jmpdest), *m_impstate.nocode);// hashjmp 0,<rsreg>,nocode |
| 1357 | | } |
| 1358 | | |
| 1359 | | /* update the label */ |
| 1360 | | compiler->labelnum = compiler_temp.labelnum; |
| 1361 | | |
| 1362 | | /* reset the mapvar to the current cycles and account for skipped slots */ |
| 1363 | | compiler->cycles += desc->skipslots; |
| 1364 | | UML_MAPVAR(block, MAPVAR_CYCLES, compiler->cycles); // mapvar CYCLES,compiler->cycles |
| 1365 | | } |
| 1366 | | |
| 1367 | | |
| 1368 | | const arm7_cpu_device::drcarm7ops_ophandler arm7_cpu_device::drcops_handler[0x10] = |
| 1369 | | { |
| 1370 | | &arm7_cpu_device::drcarm7ops_0123, &arm7_cpu_device::drcarm7ops_0123, &arm7_cpu_device::drcarm7ops_0123, &arm7_cpu_device::drcarm7ops_0123, |
| 1371 | | &arm7_cpu_device::drcarm7ops_4567, &arm7_cpu_device::drcarm7ops_4567, &arm7_cpu_device::drcarm7ops_4567, &arm7_cpu_device::drcarm7ops_4567, |
| 1372 | | &arm7_cpu_device::drcarm7ops_89, &arm7_cpu_device::drcarm7ops_89, &arm7_cpu_device::drcarm7ops_ab, &arm7_cpu_device::drcarm7ops_ab, |
| 1373 | | &arm7_cpu_device::drcarm7ops_cd, &arm7_cpu_device::drcarm7ops_cd, &arm7_cpu_device::drcarm7ops_e, &arm7_cpu_device::drcarm7ops_f, |
| 1374 | | }; |
| 1375 | | |
| 1376 | | void arm7_cpu_device::saturate_qbit_overflow(drcuml_block *block) |
| 1377 | | { |
| 1378 | | UML_MOV(block, uml::I1, 0); |
| 1379 | | UML_DCMP(block, uml::I0, 0x000000007fffffffL); |
| 1380 | | UML_MOVc(block, uml::COND_G, uml::I1, Q_MASK); |
| 1381 | | UML_MOVc(block, uml::COND_G, uml::I0, 0x7fffffff); |
| 1382 | | UML_DCMP(block, uml::I0, U64(0xffffffff80000000)); |
| 1383 | | UML_MOVc(block, uml::COND_L, uml::I1, Q_MASK); |
| 1384 | | UML_MOVc(block, uml::COND_L, uml::I0, 0x80000000); |
| 1385 | | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I1); |
| 1386 | | } |
| 1387 | | |
| 1388 | | bool arm7_cpu_device::drcarm7ops_0123(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc, UINT32 insn) |
| 1389 | | { |
| 1390 | | uml::code_label done; |
| 1391 | | /* Branch and Exchange (BX) */ |
| 1392 | | if ((insn & 0x0ffffff0) == 0x012fff10) // bits 27-4 == 000100101111111111110001 |
| 1393 | | { |
| 1394 | | UML_MOV(block, DRC_PC, DRC_REG(insn & 0x0f)); |
| 1395 | | UML_TEST(block, DRC_PC, 1); |
| 1396 | | UML_JMPc(block, uml::COND_Z, done = compiler->labelnum++); |
| 1397 | | UML_OR(block, DRC_CPSR, DRC_CPSR, T_MASK); |
| 1398 | | UML_AND(block, DRC_PC, DRC_PC, ~1); |
| 1399 | | } |
| 1400 | | else if ((insn & 0x0ff000f0) == 0x01600010) // CLZ - v5 |
| 1401 | | { |
| 1402 | | UINT32 rm = insn&0xf; |
| 1403 | | UINT32 rd = (insn>>12)&0xf; |
| 1404 | | |
| 1405 | | UML_LZCNT(block, DRC_REG(rd), DRC_REG(rm)); |
| 1406 | | UML_ADD(block, DRC_PC, DRC_PC, 4); |
| 1407 | | } |
| 1408 | | else if ((insn & 0x0ff000f0) == 0x01000050) // QADD - v5 |
| 1409 | | { |
| 1410 | | UINT32 rm = insn&0xf; |
| 1411 | | UINT32 rn = (insn>>16)&0xf; |
| 1412 | | UINT32 rd = (insn>>12)&0xf; |
| 1413 | | UML_DSEXT(block, uml::I0, DRC_REG(rm), uml::SIZE_DWORD); |
| 1414 | | UML_DSEXT(block, uml::I1, DRC_REG(rn), uml::SIZE_DWORD); |
| 1415 | | UML_DADD(block, uml::I0, uml::I0, uml::I1); |
| 1416 | | saturate_qbit_overflow(block); |
| 1417 | | UML_MOV(block, DRC_REG(rd), uml::I0); |
| 1418 | | UML_ADD(block, DRC_PC, DRC_PC, 4); |
| 1419 | | } |
| 1420 | | else if ((insn & 0x0ff000f0) == 0x01400050) // QDADD - v5 |
| 1421 | | { |
| 1422 | | UINT32 rm = insn&0xf; |
| 1423 | | UINT32 rn = (insn>>16)&0xf; |
| 1424 | | UINT32 rd = (insn>>12)&0xf; |
| 1425 | | |
| 1426 | | UML_DSEXT(block, uml::I1, DRC_REG(rn), uml::SIZE_DWORD); |
| 1427 | | UML_DADD(block, uml::I0, uml::I1, uml::I1); |
| 1428 | | saturate_qbit_overflow(block); |
| 1429 | | |
| 1430 | | UML_DSEXT(block, uml::I0, DRC_REG(rm), uml::SIZE_DWORD); |
| 1431 | | UML_DSEXT(block, uml::I1, DRC_REG(rn), uml::SIZE_DWORD); |
| 1432 | | UML_DADD(block, uml::I1, uml::I1, uml::I1); |
| 1433 | | UML_DADD(block, uml::I0, uml::I0, uml::I1); |
| 1434 | | saturate_qbit_overflow(block); |
| 1435 | | UML_MOV(block, DRC_REG(rd), uml::I0); |
| 1436 | | |
| 1437 | | UML_ADD(block, DRC_PC, DRC_PC, 4); |
| 1438 | | } |
| 1439 | | else if ((insn & 0x0ff000f0) == 0x01200050) // QSUB - v5 |
| 1440 | | { |
| 1441 | | UINT32 rm = insn&0xf; |
| 1442 | | UINT32 rn = (insn>>16)&0xf; |
| 1443 | | UINT32 rd = (insn>>12)&0xf; |
| 1444 | | |
| 1445 | | UML_DSEXT(block, uml::I0, DRC_REG(rm), uml::SIZE_DWORD); |
| 1446 | | UML_DSEXT(block, uml::I1, DRC_REG(rn), uml::SIZE_DWORD); |
| 1447 | | UML_DSUB(block, uml::I0, uml::I0, uml::I1); |
| 1448 | | saturate_qbit_overflow(block); |
| 1449 | | UML_MOV(block, DRC_REG(rd), uml::I0); |
| 1450 | | UML_ADD(block, DRC_PC, DRC_PC, 4); |
| 1451 | | } |
| 1452 | | else if ((insn & 0x0ff000f0) == 0x01600050) // QDSUB - v5 |
| 1453 | | { |
| 1454 | | UINT32 rm = insn&0xf; |
| 1455 | | UINT32 rn = (insn>>16)&0xf; |
| 1456 | | UINT32 rd = (insn>>12)&0xf; |
| 1457 | | |
| 1458 | | UML_DSEXT(block, uml::I1, DRC_REG(rn), uml::SIZE_DWORD); |
| 1459 | | UML_DADD(block, uml::I0, uml::I1, uml::I1); |
| 1460 | | saturate_qbit_overflow(block); |
| 1461 | | |
| 1462 | | UML_DSEXT(block, uml::I0, DRC_REG(rm), uml::SIZE_DWORD); |
| 1463 | | UML_DSEXT(block, uml::I1, DRC_REG(rn), uml::SIZE_DWORD); |
| 1464 | | UML_DADD(block, uml::I1, uml::I1, uml::I1); |
| 1465 | | UML_DSUB(block, uml::I0, uml::I0, uml::I1); |
| 1466 | | saturate_qbit_overflow(block); |
| 1467 | | UML_MOV(block, DRC_REG(rd), uml::I0); |
| 1468 | | |
| 1469 | | UML_ADD(block, DRC_PC, DRC_PC, 4); |
| 1470 | | } |
| 1471 | | else if ((insn & 0x0ff00090) == 0x01000080) // SMLAxy - v5 |
| 1472 | | { |
| 1473 | | UINT32 rm = insn&0xf; |
| 1474 | | UINT32 rn = (insn>>8)&0xf; |
| 1475 | | UINT32 rd = (insn>>16)&0xf; |
| 1476 | | UINT32 ra = (insn>>12)&0xf; |
| 1477 | | |
| 1478 | | UML_MOV(block, uml::I0, DRC_REG(rm)); |
| 1479 | | UML_MOV(block, uml::I1, DRC_REG(rn)); |
| 1480 | | |
| 1481 | | // select top and bottom halves of src1/src2 and sign extend if necessary |
| 1482 | | if (insn & 0x20) |
| 1483 | | { |
| 1484 | | UML_SHR(block, uml::I0, uml::I0, 16); |
| 1485 | | } |
| 1486 | | UML_SEXT(block, uml::I0, uml::I0, uml::SIZE_WORD); |
| 1487 | | |
| 1488 | | if (insn & 0x40) |
| 1489 | | { |
| 1490 | | UML_SHR(block, uml::I1, uml::I1, 16); |
| 1491 | | } |
| 1492 | | UML_SEXT(block, uml::I0, uml::I0, uml::SIZE_WORD); |
| 1493 | | |
| 1494 | | // do the signed multiply |
| 1495 | | UML_MULS(block, uml::I0, uml::I1, uml::I0, uml::I1); |
| 1496 | | UML_DSHL(block, uml::I0, uml::I0, 32); |
| 1497 | | UML_DOR(block, uml::I0, uml::I0, uml::I1); |
| 1498 | | UML_MOV(block, uml::I1, DRC_REG(ra)); |
| 1499 | | UML_DADD(block, uml::I0, uml::I0, uml::I1); |
| 1500 | | // and the accumulate. NOTE: only the accumulate can cause an overflow, which is why we do it this way. |
| 1501 | | saturate_qbit_overflow(block); |
| 1502 | | UML_MOV(block, DRC_REG(rd), uml::I0); |
| 1503 | | UML_ADD(block, DRC_PC, DRC_PC, 4); |
| 1504 | | } |
| 1505 | | else if ((insn & 0x0ff00090) == 0x01400080) // SMLALxy - v5 |
| 1506 | | { |
| 1507 | | UINT32 rm = insn&0xf; |
| 1508 | | UINT32 rn = (insn>>8)&0xf; |
| 1509 | | UINT32 rdh = (insn>>16)&0xf; |
| 1510 | | UINT32 rdl = (insn>>12)&0xf; |
| 1511 | | |
| 1512 | | UML_DSEXT(block, uml::I0, DRC_REG(rm), uml::SIZE_DWORD); |
| 1513 | | UML_DSEXT(block, uml::I1, DRC_REG(rn), uml::SIZE_DWORD); |
| 1514 | | // do the signed multiply |
| 1515 | | UML_DMULS(block, uml::I2, uml::I3, uml::I0, uml::I1); |
| 1516 | | |
| 1517 | | UML_MOV(block, uml::I0, DRC_REG(rdh)); |
| 1518 | | UML_MOV(block, uml::I1, DRC_REG(rdl)); |
| 1519 | | UML_DSHL(block, uml::I0, uml::I0, 32); |
| 1520 | | UML_DOR(block, uml::I0, uml::I0, uml::I1); |
| 1521 | | UML_DADD(block, uml::I0, uml::I0, uml::I2); |
| 1522 | | UML_MOV(block, DRC_REG(rdl), uml::I0); |
| 1523 | | UML_DSHR(block, uml::I0, uml::I0, 32); |
| 1524 | | UML_MOV(block, DRC_REG(rdh), uml::I0); |
| 1525 | | } |
| 1526 | | else if ((insn & 0x0ff00090) == 0x01600080) // SMULxy - v5 |
| 1527 | | { |
| 1528 | | INT32 src1 = GET_REGISTER(insn&0xf); |
| 1529 | | INT32 src2 = GET_REGISTER((insn>>8)&0xf); |
| 1530 | | INT32 res; |
| 1531 | | |
| 1532 | | // select top and bottom halves of src1/src2 and sign extend if necessary |
| 1533 | | if (insn & 0x20) |
| 1534 | | { |
| 1535 | | src1 >>= 16; |
| 1536 | | } |
| 1537 | | |
| 1538 | | src1 &= 0xffff; |
| 1539 | | if (src1 & 0x8000) |
| 1540 | | { |
| 1541 | | src1 |= 0xffff0000; |
| 1542 | | } |
| 1543 | | |
| 1544 | | if (insn & 0x40) |
| 1545 | | { |
| 1546 | | src2 >>= 16; |
| 1547 | | } |
| 1548 | | |
| 1549 | | src2 &= 0xffff; |
| 1550 | | if (src2 & 0x8000) |
| 1551 | | { |
| 1552 | | src2 |= 0xffff0000; |
| 1553 | | } |
| 1554 | | |
| 1555 | | res = src1 * src2; |
| 1556 | | SET_REGISTER((insn>>16)&0xf, res); |
| 1557 | | R15 += 4; |
| 1558 | | } |
| 1559 | | else if ((insn & 0x0ff000b0) == 0x012000a0) // SMULWy - v5 |
| 1560 | | { |
| 1561 | | INT32 src1 = GET_REGISTER(insn&0xf); |
| 1562 | | INT32 src2 = GET_REGISTER((insn>>8)&0xf); |
| 1563 | | INT64 res; |
| 1564 | | |
| 1565 | | if (insn & 0x40) |
| 1566 | | { |
| 1567 | | src2 >>= 16; |
| 1568 | | } |
| 1569 | | else |
| 1570 | | { |
| 1571 | | src2 &= 0xffff; |
| 1572 | | if (src2 & 0x8000) |
| 1573 | | { |
| 1574 | | src2 |= 0xffff; |
| 1575 | | } |
| 1576 | | } |
| 1577 | | |
| 1578 | | res = (INT64)src1 * (INT64)src2; |
| 1579 | | res >>= 16; |
| 1580 | | SET_REGISTER((insn>>16)&0xf, (UINT32)res); |
| 1581 | | } |
| 1582 | | else if ((insn & 0x0ff000b0) == 0x01200080) // SMLAWy - v5 |
| 1583 | | { |
| 1584 | | INT32 src1 = GET_REGISTER(insn&0xf); |
| 1585 | | INT32 src2 = GET_REGISTER((insn>>8)&0xf); |
| 1586 | | INT32 src3 = GET_REGISTER((insn>>12)&0xf); |
| 1587 | | INT64 res; |
| 1588 | | |
| 1589 | | if (insn & 0x40) |
| 1590 | | { |
| 1591 | | src2 >>= 16; |
| 1592 | | } |
| 1593 | | else |
| 1594 | | { |
| 1595 | | src2 &= 0xffff; |
| 1596 | | if (src2 & 0x8000) |
| 1597 | | { |
| 1598 | | src2 |= 0xffff; |
| 1599 | | } |
| 1600 | | } |
| 1601 | | |
| 1602 | | res = (INT64)src1 * (INT64)src2; |
| 1603 | | res >>= 16; |
| 1604 | | |
| 1605 | | // check for overflow and set the Q bit |
| 1606 | | saturate_qbit_overflow((INT64)src3 + res); |
| 1607 | | |
| 1608 | | // do the real accumulate |
| 1609 | | src3 += (INT32)res; |
| 1610 | | |
| 1611 | | // write the result back |
| 1612 | | SET_REGISTER((insn>>16)&0xf, (UINT32)res); |
| 1613 | | } |
| 1614 | | else |
| 1615 | | /* Multiply OR Swap OR Half Word Data Transfer */ |
| 1616 | | if ((insn & 0x0e000000) == 0 && (insn & 0x80) && (insn & 0x10)) // bits 27-25=000 bit 7=1 bit 4=1 |
| 1617 | | { |
| 1618 | | /* Half Word Data Transfer */ |
| 1619 | | if (insn & 0x60) // bits = 6-5 != 00 |
| 1620 | | { |
| 1621 | | HandleHalfWordDT(insn); |
| 1622 | | } |
| 1623 | | else |
| 1624 | | /* Swap */ |
| 1625 | | if (insn & 0x01000000) // bit 24 = 1 |
| 1626 | | { |
| 1627 | | HandleSwap(insn); |
| 1628 | | } |
| 1629 | | /* Multiply Or Multiply Long */ |
| 1630 | | else |
| 1631 | | { |
| 1632 | | /* multiply long */ |
| 1633 | | if (insn & 0x800000) // Bit 23 = 1 for Multiply Long |
| 1634 | | { |
| 1635 | | /* Signed? */ |
| 1636 | | if (insn & 0x00400000) |
| 1637 | | HandleSMulLong(insn); |
| 1638 | | else |
| 1639 | | HandleUMulLong(insn); |
| 1640 | | } |
| 1641 | | /* multiply */ |
| 1642 | | else |
| 1643 | | { |
| 1644 | | HandleMul(insn); |
| 1645 | | } |
| 1646 | | R15 += 4; |
| 1647 | | } |
| 1648 | | } |
| 1649 | | /* Data Processing OR PSR Transfer */ |
| 1650 | | else if ((insn & 0x0c000000) == 0) // bits 27-26 == 00 - This check can only exist properly after Multiplication check above |
| 1651 | | { |
| 1652 | | /* PSR Transfer (MRS & MSR) */ |
| 1653 | | if (((insn & 0x00100000) == 0) && ((insn & 0x01800000) == 0x01000000)) // S bit must be clear, and bit 24,23 = 10 |
| 1654 | | { |
| 1655 | | HandlePSRTransfer(insn); |
| 1656 | | ARM7_ICOUNT += 2; // PSR only takes 1 - S Cycle, so we add + 2, since at end, we -3.. |
| 1657 | | R15 += 4; |
| 1658 | | } |
| 1659 | | /* Data Processing */ |
| 1660 | | else |
| 1661 | | { |
| 1662 | | HandleALU(insn); |
| 1663 | | } |
| 1664 | | } |
| 1665 | | |
| 1666 | | UML_LABEL(block, done); |
| 1667 | | return true; |
| 1668 | | } |
| 1669 | | |
| 1670 | | bool arm7_cpu_device::drcarm7ops_4567(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc, UINT32 op) |
| 1671 | | { |
| 1672 | | return false; |
| 1673 | | } |
| 1674 | | |
| 1675 | | bool arm7_cpu_device::drcarm7ops_89(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc, UINT32 op) |
| 1676 | | { |
| 1677 | | return false; |
| 1678 | | } |
| 1679 | | |
| 1680 | | bool arm7_cpu_device::drcarm7ops_ab(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc, UINT32 op) |
| 1681 | | { |
| 1682 | | return false; |
| 1683 | | } |
| 1684 | | |
| 1685 | | bool arm7_cpu_device::drcarm7ops_cd(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc, UINT32 op) |
| 1686 | | { |
| 1687 | | return false; |
| 1688 | | } |
| 1689 | | |
| 1690 | | bool arm7_cpu_device::drcarm7ops_e(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc, UINT32 op) |
| 1691 | | { |
| 1692 | | return false; |
| 1693 | | } |
| 1694 | | |
| 1695 | | bool arm7_cpu_device::drcarm7ops_f(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc, UINT32 op) |
| 1696 | | { |
| 1697 | | return false; |
| 1698 | | } |
| 1699 | | |
| 1700 | | /*------------------------------------------------- |
| 1701 | | generate_opcode - generate code for a specific |
| 1702 | | opcode |
| 1703 | | -------------------------------------------------*/ |
| 1704 | | |
| 1705 | | int arm7_cpu_device::generate_opcode(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 1706 | | { |
| 1707 | | //int in_delay_slot = ((desc->flags & OPFLAG_IN_DELAY_SLOT) != 0); |
| 1708 | | UINT32 op = desc->opptr.l[0]; |
| 1709 | | UINT8 opswitch = op >> 26; |
| 1710 | | uml::code_label skip; |
| 1711 | | uml::code_label contdecode; |
| 1712 | | uml::code_label unexecuted; |
| 1713 | | |
| 1714 | | if (T_IS_SET(GET_CPSR)) |
| 1715 | | { |
| 1716 | | // "In Thumb state, bit [0] is undefined and must be ignored. Bits [31:1] contain the PC." |
| 1717 | | UML_AND(block, uml::I0, DRC_PC, ~1); |
| 1718 | | } |
| 1719 | | else |
| 1720 | | { |
| 1721 | | UML_AND(block, uml::I0, DRC_PC, ~3); |
| 1722 | | } |
| 1723 | | |
| 1724 | | UML_TEST(block, uml::mem(&COPRO_CTRL), COPRO_CTRL_MMU_EN); // test COPRO_CTRL, COPRO_CTRL_MMU_EN |
| 1725 | | UML_MOVc(block, uml::COND_NZ, uml::I2, ARM7_TLB_ABORT_P | ARM7_TLB_READ); // movnz i0, ARM7_TLB_ABORT_P | ARM7_TLB_READ |
| 1726 | | UML_CALLHc(block, uml::COND_NZ, *m_impstate.tlb_translate); // callhnz tlb_translate); |
| 1727 | | |
| 1728 | | if (T_IS_SET(GET_CPSR)) |
| 1729 | | { |
| 1730 | | //UML_CALLH(block, *m_impstate.drcthumb[(op & 0xffc0) >> 6]); // callh drcthumb[op] // TODO FIXME |
| 1731 | | return TRUE; |
| 1732 | | } |
| 1733 | | |
| 1734 | | switch (op >> INSN_COND_SHIFT) |
| 1735 | | { |
| 1736 | | case COND_EQ: |
| 1737 | | UML_TEST(block, DRC_CPSR, Z_MASK); |
| 1738 | | UML_JMPc(block, uml::COND_Z, unexecuted = compiler->labelnum++); |
| 1739 | | break; |
| 1740 | | case COND_NE: |
| 1741 | | UML_TEST(block, DRC_CPSR, Z_MASK); |
| 1742 | | UML_JMPc(block, uml::COND_NZ, unexecuted = compiler->labelnum++); |
| 1743 | | break; |
| 1744 | | case COND_CS: |
| 1745 | | UML_TEST(block, DRC_CPSR, C_MASK); |
| 1746 | | UML_JMPc(block, uml::COND_Z, unexecuted = compiler->labelnum++); |
| 1747 | | break; |
| 1748 | | case COND_CC: |
| 1749 | | UML_TEST(block, DRC_CPSR, C_MASK); |
| 1750 | | UML_JMPc(block, uml::COND_NZ, unexecuted = compiler->labelnum++); |
| 1751 | | break; |
| 1752 | | case COND_MI: |
| 1753 | | UML_TEST(block, DRC_CPSR, N_MASK); |
| 1754 | | UML_JMPc(block, uml::COND_Z, unexecuted = compiler->labelnum++); |
| 1755 | | break; |
| 1756 | | case COND_PL: |
| 1757 | | UML_TEST(block, DRC_CPSR, N_MASK); |
| 1758 | | UML_JMPc(block, uml::COND_NZ, unexecuted = compiler->labelnum++); |
| 1759 | | break; |
| 1760 | | case COND_VS: |
| 1761 | | UML_TEST(block, DRC_CPSR, V_MASK); |
| 1762 | | UML_JMPc(block, uml::COND_Z, unexecuted = compiler->labelnum++); |
| 1763 | | break; |
| 1764 | | case COND_VC: |
| 1765 | | UML_TEST(block, DRC_CPSR, V_MASK); |
| 1766 | | UML_JMPc(block, uml::COND_NZ, unexecuted = compiler->labelnum++); |
| 1767 | | break; |
| 1768 | | case COND_HI: |
| 1769 | | UML_TEST(block, DRC_CPSR, Z_MASK); |
| 1770 | | UML_JMPc(block, uml::COND_NZ, unexecuted = compiler->labelnum++); |
| 1771 | | UML_TEST(block, DRC_CPSR, C_MASK); |
| 1772 | | UML_JMPc(block, uml::COND_Z, unexecuted = compiler->labelnum++); |
| 1773 | | break; |
| 1774 | | case COND_LS: |
| 1775 | | UML_TEST(block, DRC_CPSR, Z_MASK); |
| 1776 | | UML_JMPc(block, uml::COND_NZ, contdecode = compiler->labelnum++); |
| 1777 | | UML_TEST(block, DRC_CPSR, C_MASK); |
| 1778 | | UML_JMPc(block, uml::COND_Z, contdecode); |
| 1779 | | UML_JMP(block, unexecuted); |
| 1780 | | break; |
| 1781 | | case COND_GE: |
| 1782 | | UML_TEST(block, DRC_CPSR, N_MASK); |
| 1783 | | UML_MOVc(block, uml::COND_Z, uml::I0, 0); |
| 1784 | | UML_MOVc(block, uml::COND_NZ, uml::I0, 1); |
| 1785 | | UML_TEST(block, DRC_CPSR, V_MASK); |
| 1786 | | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 1787 | | UML_MOVc(block, uml::COND_NZ, uml::I1, 1); |
| 1788 | | UML_CMP(block, uml::I0, uml::I1); |
| 1789 | | UML_JMPc(block, uml::COND_NE, unexecuted); |
| 1790 | | break; |
| 1791 | | case COND_LT: |
| 1792 | | UML_TEST(block, DRC_CPSR, N_MASK); |
| 1793 | | UML_MOVc(block, uml::COND_Z, uml::I0, 0); |
| 1794 | | UML_MOVc(block, uml::COND_NZ, uml::I0, 1); |
| 1795 | | UML_TEST(block, DRC_CPSR, V_MASK); |
| 1796 | | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 1797 | | UML_MOVc(block, uml::COND_NZ, uml::I1, 1); |
| 1798 | | UML_CMP(block, uml::I0, uml::I1); |
| 1799 | | UML_JMPc(block, uml::COND_E, unexecuted); |
| 1800 | | break; |
| 1801 | | case COND_GT: |
| 1802 | | UML_TEST(block, DRC_CPSR, Z_MASK); |
| 1803 | | UML_JMPc(block, uml::COND_NZ, unexecuted); |
| 1804 | | UML_TEST(block, DRC_CPSR, N_MASK); |
| 1805 | | UML_MOVc(block, uml::COND_Z, uml::I0, 0); |
| 1806 | | UML_MOVc(block, uml::COND_NZ, uml::I0, 1); |
| 1807 | | UML_TEST(block, DRC_CPSR, V_MASK); |
| 1808 | | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 1809 | | UML_MOVc(block, uml::COND_NZ, uml::I1, 1); |
| 1810 | | UML_CMP(block, uml::I0, uml::I1); |
| 1811 | | UML_JMPc(block, uml::COND_NE, unexecuted); |
| 1812 | | break; |
| 1813 | | case COND_LE: |
| 1814 | | UML_TEST(block, DRC_CPSR, N_MASK); |
| 1815 | | UML_MOVc(block, uml::COND_Z, uml::I0, 0); |
| 1816 | | UML_MOVc(block, uml::COND_NZ, uml::I0, 1); |
| 1817 | | UML_TEST(block, DRC_CPSR, V_MASK); |
| 1818 | | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 1819 | | UML_MOVc(block, uml::COND_NZ, uml::I1, 1); |
| 1820 | | UML_CMP(block, uml::I0, uml::I1); |
| 1821 | | UML_JMPc(block, uml::COND_NE, contdecode); |
| 1822 | | UML_TEST(block, DRC_CPSR, Z_MASK); |
| 1823 | | UML_JMPc(block, uml::COND_Z, unexecuted); |
| 1824 | | break; |
| 1825 | | case COND_NV: |
| 1826 | | UML_JMP(block, unexecuted); |
| 1827 | | break; |
| 1828 | | } |
| 1829 | | |
| 1830 | | UML_LABEL(block, contdecode); |
| 1831 | | |
| 1832 | | (this->*drcops_handler[(op & 0xF000000) >> 24])(block, compiler, desc, op); |
| 1833 | | |
| 1834 | | UML_LABEL(block, unexecuted); |
| 1835 | | UML_ADD(block, DRC_PC, DRC_PC, 4); |
| 1836 | | UML_ADD(block, MAPVAR_CYCLES, MAPVAR_CYCLES, 2); // add cycles, cycles, 2 |
| 1837 | | |
| 1838 | | UML_LABEL(block, skip); |
| 1839 | | |
| 1840 | | switch (opswitch) |
| 1841 | | { |
| 1842 | | /* ----- sub-groups ----- */ |
| 1843 | | |
| 1844 | | case 0x00: /* SPECIAL - MIPS I */ |
| 1845 | | return TRUE; |
| 1846 | | |
| 1847 | | // TODO: FINISH ME |
| 1848 | | } |
| 1849 | | |
| 1850 | | return FALSE; |
| 1851 | | } |
trunk/src/emu/cpu/arm7/arm7tdrc.c
| r28735 | r28736 | |
| 1 | | #include "emu.h" |
| 2 | | #include "arm7core.h" |
| 3 | | #include "arm7help.h" |
| 4 | | |
| 5 | | |
| 6 | | const arm7_cpu_device::arm7thumb_drcophandler arm7_cpu_device::drcthumb_handler[0x40*0x10] = |
| 7 | | { |
| 8 | | // #define THUMB_SHIFT_R ((UINT16)0x0800) |
| 9 | | &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, |
| 10 | | &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, |
| 11 | | &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, |
| 12 | | &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, |
| 13 | | &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, |
| 14 | | &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, |
| 15 | | &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, |
| 16 | | &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, |
| 17 | | // #define THUMB_INSN_ADDSUB ((UINT16)0x0800) // #define THUMB_ADDSUB_TYPE ((UINT16)0x0600) |
| 18 | | &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, |
| 19 | | &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, |
| 20 | | &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, |
| 21 | | &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, |
| 22 | | &arm7_cpu_device::drctg01_10, &arm7_cpu_device::drctg01_10, &arm7_cpu_device::drctg01_10, &arm7_cpu_device::drctg01_10, &arm7_cpu_device::drctg01_10, &arm7_cpu_device::drctg01_10, &arm7_cpu_device::drctg01_10, &arm7_cpu_device::drctg01_10, |
| 23 | | &arm7_cpu_device::drctg01_11, &arm7_cpu_device::drctg01_11, &arm7_cpu_device::drctg01_11, &arm7_cpu_device::drctg01_11, &arm7_cpu_device::drctg01_11, &arm7_cpu_device::drctg01_11, &arm7_cpu_device::drctg01_11, &arm7_cpu_device::drctg01_11, |
| 24 | | &arm7_cpu_device::drctg01_12, &arm7_cpu_device::drctg01_12, &arm7_cpu_device::drctg01_12, &arm7_cpu_device::drctg01_12, &arm7_cpu_device::drctg01_12, &arm7_cpu_device::drctg01_12, &arm7_cpu_device::drctg01_12, &arm7_cpu_device::drctg01_12, |
| 25 | | &arm7_cpu_device::drctg01_13, &arm7_cpu_device::drctg01_13, &arm7_cpu_device::drctg01_13, &arm7_cpu_device::drctg01_13, &arm7_cpu_device::drctg01_13, &arm7_cpu_device::drctg01_13, &arm7_cpu_device::drctg01_13, &arm7_cpu_device::drctg01_13, |
| 26 | | // #define THUMB_INSN_CMP ((UINT16)0x0800) |
| 27 | | &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, |
| 28 | | &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, |
| 29 | | &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, |
| 30 | | &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, |
| 31 | | &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, |
| 32 | | &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, |
| 33 | | &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, |
| 34 | | &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, |
| 35 | | // #define THUMB_INSN_SUB ((UINT16)0x0800) |
| 36 | | &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, |
| 37 | | &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, |
| 38 | | &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, |
| 39 | | &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, |
| 40 | | &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, |
| 41 | | &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, |
| 42 | | &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, |
| 43 | | &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, |
| 44 | | //#define THUMB_GROUP4_TYPE ((UINT16)0x0c00) //#define THUMB_ALUOP_TYPE ((UINT16)0x03c0) // #define THUMB_HIREG_OP ((UINT16)0x0300) // #define THUMB_HIREG_H ((UINT16)0x00c0) |
| 45 | | &arm7_cpu_device::drctg04_00_00, &arm7_cpu_device::drctg04_00_01, &arm7_cpu_device::drctg04_00_02, &arm7_cpu_device::drctg04_00_03, &arm7_cpu_device::drctg04_00_04, &arm7_cpu_device::drctg04_00_05, &arm7_cpu_device::drctg04_00_06, &arm7_cpu_device::drctg04_00_07, |
| 46 | | &arm7_cpu_device::drctg04_00_08, &arm7_cpu_device::drctg04_00_09, &arm7_cpu_device::drctg04_00_0a, &arm7_cpu_device::drctg04_00_0b, &arm7_cpu_device::drctg04_00_0c, &arm7_cpu_device::drctg04_00_0d, &arm7_cpu_device::drctg04_00_0e, &arm7_cpu_device::drctg04_00_0f, |
| 47 | | &arm7_cpu_device::drctg04_01_00, &arm7_cpu_device::drctg04_01_01, &arm7_cpu_device::drctg04_01_02, &arm7_cpu_device::drctg04_01_03, &arm7_cpu_device::drctg04_01_10, &arm7_cpu_device::drctg04_01_11, &arm7_cpu_device::drctg04_01_12, &arm7_cpu_device::drctg04_01_13, |
| 48 | | &arm7_cpu_device::drctg04_01_20, &arm7_cpu_device::drctg04_01_21, &arm7_cpu_device::drctg04_01_22, &arm7_cpu_device::drctg04_01_23, &arm7_cpu_device::drctg04_01_30, &arm7_cpu_device::drctg04_01_31, &arm7_cpu_device::drctg04_01_32, &arm7_cpu_device::drctg04_01_33, |
| 49 | | &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, |
| 50 | | &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, |
| 51 | | &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, |
| 52 | | &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, |
| 53 | | //#define THUMB_GROUP5_TYPE ((UINT16)0x0e00) |
| 54 | | &arm7_cpu_device::drctg05_0, &arm7_cpu_device::drctg05_0, &arm7_cpu_device::drctg05_0, &arm7_cpu_device::drctg05_0, &arm7_cpu_device::drctg05_0, &arm7_cpu_device::drctg05_0, &arm7_cpu_device::drctg05_0, &arm7_cpu_device::drctg05_0, |
| 55 | | &arm7_cpu_device::drctg05_1, &arm7_cpu_device::drctg05_1, &arm7_cpu_device::drctg05_1, &arm7_cpu_device::drctg05_1, &arm7_cpu_device::drctg05_1, &arm7_cpu_device::drctg05_1, &arm7_cpu_device::drctg05_1, &arm7_cpu_device::drctg05_1, |
| 56 | | &arm7_cpu_device::drctg05_2, &arm7_cpu_device::drctg05_2, &arm7_cpu_device::drctg05_2, &arm7_cpu_device::drctg05_2, &arm7_cpu_device::drctg05_2, &arm7_cpu_device::drctg05_2, &arm7_cpu_device::drctg05_2, &arm7_cpu_device::drctg05_2, |
| 57 | | &arm7_cpu_device::drctg05_3, &arm7_cpu_device::drctg05_3, &arm7_cpu_device::drctg05_3, &arm7_cpu_device::drctg05_3, &arm7_cpu_device::drctg05_3, &arm7_cpu_device::drctg05_3, &arm7_cpu_device::drctg05_3, &arm7_cpu_device::drctg05_3, |
| 58 | | &arm7_cpu_device::drctg05_4, &arm7_cpu_device::drctg05_4, &arm7_cpu_device::drctg05_4, &arm7_cpu_device::drctg05_4, &arm7_cpu_device::drctg05_4, &arm7_cpu_device::drctg05_4, &arm7_cpu_device::drctg05_4, &arm7_cpu_device::drctg05_4, |
| 59 | | &arm7_cpu_device::drctg05_5, &arm7_cpu_device::drctg05_5, &arm7_cpu_device::drctg05_5, &arm7_cpu_device::drctg05_5, &arm7_cpu_device::drctg05_5, &arm7_cpu_device::drctg05_5, &arm7_cpu_device::drctg05_5, &arm7_cpu_device::drctg05_5, |
| 60 | | &arm7_cpu_device::drctg05_6, &arm7_cpu_device::drctg05_6, &arm7_cpu_device::drctg05_6, &arm7_cpu_device::drctg05_6, &arm7_cpu_device::drctg05_6, &arm7_cpu_device::drctg05_6, &arm7_cpu_device::drctg05_6, &arm7_cpu_device::drctg05_6, |
| 61 | | &arm7_cpu_device::drctg05_7, &arm7_cpu_device::drctg05_7, &arm7_cpu_device::drctg05_7, &arm7_cpu_device::drctg05_7, &arm7_cpu_device::drctg05_7, &arm7_cpu_device::drctg05_7, &arm7_cpu_device::drctg05_7, &arm7_cpu_device::drctg05_7, |
| 62 | | //#define THUMB_LSOP_L ((UINT16)0x0800) |
| 63 | | &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, |
| 64 | | &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, |
| 65 | | &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, |
| 66 | | &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, |
| 67 | | &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, |
| 68 | | &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, |
| 69 | | &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, |
| 70 | | &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, |
| 71 | | //#define THUMB_LSOP_L ((UINT16)0x0800) |
| 72 | | &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, |
| 73 | | &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, |
| 74 | | &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, |
| 75 | | &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, |
| 76 | | &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, |
| 77 | | &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, |
| 78 | | &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, |
| 79 | | &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, |
| 80 | | // #define THUMB_HALFOP_L ((UINT16)0x0800) |
| 81 | | &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, |
| 82 | | &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, |
| 83 | | &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, |
| 84 | | &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, |
| 85 | | &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, |
| 86 | | &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, |
| 87 | | &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, |
| 88 | | &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, |
| 89 | | // #define THUMB_STACKOP_L ((UINT16)0x0800) |
| 90 | | &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, |
| 91 | | &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, |
| 92 | | &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, |
| 93 | | &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, |
| 94 | | &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, |
| 95 | | &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, |
| 96 | | &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, |
| 97 | | &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, |
| 98 | | // #define THUMB_RELADDR_SP ((UINT16)0x0800) |
| 99 | | &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, |
| 100 | | &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, |
| 101 | | &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, |
| 102 | | &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, |
| 103 | | &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, |
| 104 | | &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, |
| 105 | | &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, |
| 106 | | &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, |
| 107 | | // #define THUMB_STACKOP_TYPE ((UINT16)0x0f00) |
| 108 | | &arm7_cpu_device::drctg0b_0, &arm7_cpu_device::drctg0b_0, &arm7_cpu_device::drctg0b_0, &arm7_cpu_device::drctg0b_0, &arm7_cpu_device::drctg0b_1, &arm7_cpu_device::drctg0b_1, &arm7_cpu_device::drctg0b_1, &arm7_cpu_device::drctg0b_1, |
| 109 | | &arm7_cpu_device::drctg0b_2, &arm7_cpu_device::drctg0b_2, &arm7_cpu_device::drctg0b_2, &arm7_cpu_device::drctg0b_2, &arm7_cpu_device::drctg0b_3, &arm7_cpu_device::drctg0b_3, &arm7_cpu_device::drctg0b_3, &arm7_cpu_device::drctg0b_3, |
| 110 | | &arm7_cpu_device::drctg0b_4, &arm7_cpu_device::drctg0b_4, &arm7_cpu_device::drctg0b_4, &arm7_cpu_device::drctg0b_4, &arm7_cpu_device::drctg0b_5, &arm7_cpu_device::drctg0b_5, &arm7_cpu_device::drctg0b_5, &arm7_cpu_device::drctg0b_5, |
| 111 | | &arm7_cpu_device::drctg0b_6, &arm7_cpu_device::drctg0b_6, &arm7_cpu_device::drctg0b_6, &arm7_cpu_device::drctg0b_6, &arm7_cpu_device::drctg0b_7, &arm7_cpu_device::drctg0b_7, &arm7_cpu_device::drctg0b_7, &arm7_cpu_device::drctg0b_7, |
| 112 | | &arm7_cpu_device::drctg0b_8, &arm7_cpu_device::drctg0b_8, &arm7_cpu_device::drctg0b_8, &arm7_cpu_device::drctg0b_8, &arm7_cpu_device::drctg0b_9, &arm7_cpu_device::drctg0b_9, &arm7_cpu_device::drctg0b_9, &arm7_cpu_device::drctg0b_9, |
| 113 | | &arm7_cpu_device::drctg0b_a, &arm7_cpu_device::drctg0b_a, &arm7_cpu_device::drctg0b_a, &arm7_cpu_device::drctg0b_a, &arm7_cpu_device::drctg0b_b, &arm7_cpu_device::drctg0b_b, &arm7_cpu_device::drctg0b_b, &arm7_cpu_device::drctg0b_b, |
| 114 | | &arm7_cpu_device::drctg0b_c, &arm7_cpu_device::drctg0b_c, &arm7_cpu_device::drctg0b_c, &arm7_cpu_device::drctg0b_c, &arm7_cpu_device::drctg0b_d, &arm7_cpu_device::drctg0b_d, &arm7_cpu_device::drctg0b_d, &arm7_cpu_device::drctg0b_d, |
| 115 | | &arm7_cpu_device::drctg0b_e, &arm7_cpu_device::drctg0b_e, &arm7_cpu_device::drctg0b_e, &arm7_cpu_device::drctg0b_e, &arm7_cpu_device::drctg0b_f, &arm7_cpu_device::drctg0b_f, &arm7_cpu_device::drctg0b_f, &arm7_cpu_device::drctg0b_f, |
| 116 | | // #define THUMB_MULTLS ((UINT16)0x0800) |
| 117 | | &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, |
| 118 | | &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, |
| 119 | | &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, |
| 120 | | &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, |
| 121 | | &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, |
| 122 | | &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, |
| 123 | | &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, |
| 124 | | &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, |
| 125 | | // #define THUMB_COND_TYPE ((UINT16)0x0f00) |
| 126 | | &arm7_cpu_device::drctg0d_0, &arm7_cpu_device::drctg0d_0, &arm7_cpu_device::drctg0d_0, &arm7_cpu_device::drctg0d_0, &arm7_cpu_device::drctg0d_1, &arm7_cpu_device::drctg0d_1, &arm7_cpu_device::drctg0d_1, &arm7_cpu_device::drctg0d_1, |
| 127 | | &arm7_cpu_device::drctg0d_2, &arm7_cpu_device::drctg0d_2, &arm7_cpu_device::drctg0d_2, &arm7_cpu_device::drctg0d_2, &arm7_cpu_device::drctg0d_3, &arm7_cpu_device::drctg0d_3, &arm7_cpu_device::drctg0d_3, &arm7_cpu_device::drctg0d_3, |
| 128 | | &arm7_cpu_device::drctg0d_4, &arm7_cpu_device::drctg0d_4, &arm7_cpu_device::drctg0d_4, &arm7_cpu_device::drctg0d_4, &arm7_cpu_device::drctg0d_5, &arm7_cpu_device::drctg0d_5, &arm7_cpu_device::drctg0d_5, &arm7_cpu_device::drctg0d_5, |
| 129 | | &arm7_cpu_device::drctg0d_6, &arm7_cpu_device::drctg0d_6, &arm7_cpu_device::drctg0d_6, &arm7_cpu_device::drctg0d_6, &arm7_cpu_device::drctg0d_7, &arm7_cpu_device::drctg0d_7, &arm7_cpu_device::drctg0d_7, &arm7_cpu_device::drctg0d_7, |
| 130 | | &arm7_cpu_device::drctg0d_8, &arm7_cpu_device::drctg0d_8, &arm7_cpu_device::drctg0d_8, &arm7_cpu_device::drctg0d_8, &arm7_cpu_device::drctg0d_9, &arm7_cpu_device::drctg0d_9, &arm7_cpu_device::drctg0d_9, &arm7_cpu_device::drctg0d_9, |
| 131 | | &arm7_cpu_device::drctg0d_a, &arm7_cpu_device::drctg0d_a, &arm7_cpu_device::drctg0d_a, &arm7_cpu_device::drctg0d_a, &arm7_cpu_device::drctg0d_b, &arm7_cpu_device::drctg0d_b, &arm7_cpu_device::drctg0d_b, &arm7_cpu_device::drctg0d_b, |
| 132 | | &arm7_cpu_device::drctg0d_c, &arm7_cpu_device::drctg0d_c, &arm7_cpu_device::drctg0d_c, &arm7_cpu_device::drctg0d_c, &arm7_cpu_device::drctg0d_d, &arm7_cpu_device::drctg0d_d, &arm7_cpu_device::drctg0d_d, &arm7_cpu_device::drctg0d_d, |
| 133 | | &arm7_cpu_device::drctg0d_e, &arm7_cpu_device::drctg0d_e, &arm7_cpu_device::drctg0d_e, &arm7_cpu_device::drctg0d_e, &arm7_cpu_device::drctg0d_f, &arm7_cpu_device::drctg0d_f, &arm7_cpu_device::drctg0d_f, &arm7_cpu_device::drctg0d_f, |
| 134 | | // #define THUMB_BLOP_LO ((UINT16)0x0800) |
| 135 | | &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, |
| 136 | | &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, |
| 137 | | &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, |
| 138 | | &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, |
| 139 | | &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, |
| 140 | | &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, |
| 141 | | &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, |
| 142 | | &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, |
| 143 | | // #define THUMB_BLOP_LO ((UINT16)0x0800) |
| 144 | | &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, |
| 145 | | &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, |
| 146 | | &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, |
| 147 | | &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, |
| 148 | | &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, |
| 149 | | &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, |
| 150 | | &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, |
| 151 | | &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, |
| 152 | | }; |
| 153 | | |
| 154 | | /* Shift operations */ |
| 155 | | |
| 156 | | void arm7_cpu_device::drctg00_0(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* Shift left */ |
| 157 | | { |
| 158 | | UINT32 op = desc->opptr.l[0]; |
| 159 | | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 160 | | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 161 | | INT32 offs = (op & THUMB_SHIFT_AMT) >> THUMB_SHIFT_AMT_SHIFT; |
| 162 | | |
| 163 | | UML_MOV(block, uml::I0, DRC_RS); // rrs |
| 164 | | if (offs != 0) |
| 165 | | { |
| 166 | | UML_SHL(block, DRC_RD, DRC_RS, offs); |
| 167 | | UML_AND(block, DRC_CPSR, DRC_CPSR, ~C_MASK); |
| 168 | | UML_TEST(block, uml::I0, 1 << (31 - (offs - 1))); |
| 169 | | UML_MOVc(block, uml::COND_NZ, uml::I1, C_MASK); |
| 170 | | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 171 | | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I1); |
| 172 | | } |
| 173 | | else |
| 174 | | { |
| 175 | | UML_MOV(block, DRC_RD, DRC_RS); |
| 176 | | } |
| 177 | | UML_AND(block, DRC_CPSR, DRC_CPSR, ~(Z_MASK | N_MASK)); |
| 178 | | DRCHandleALUNZFlags(DRC_RD); |
| 179 | | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 180 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 181 | | } |
| 182 | | |
| 183 | | void arm7_cpu_device::drctg00_1(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* Shift right */ |
| 184 | | { |
| 185 | | UINT32 op = desc->opptr.l[0]; |
| 186 | | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 187 | | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 188 | | INT32 offs = (op & THUMB_SHIFT_AMT) >> THUMB_SHIFT_AMT_SHIFT; |
| 189 | | |
| 190 | | UML_MOV(block, uml::I0, DRC_RS); // rrs |
| 191 | | if (offs != 0) |
| 192 | | { |
| 193 | | UML_SHR(block, DRC_RD, DRC_RS, offs); |
| 194 | | UML_AND(block, DRC_CPSR, DRC_CPSR, ~C_MASK); |
| 195 | | UML_TEST(block, uml::I0, 1 << (31 - (offs - 1))); |
| 196 | | UML_MOVc(block, uml::COND_NZ, uml::I1, C_MASK); |
| 197 | | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 198 | | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I1); |
| 199 | | } |
| 200 | | else |
| 201 | | { |
| 202 | | UML_MOV(block, DRC_RD, 0); |
| 203 | | UML_AND(block, DRC_CPSR, DRC_CPSR, ~C_MASK); |
| 204 | | UML_TEST(block, uml::I0, 0x80000000); |
| 205 | | UML_MOVc(block, uml::COND_NZ, uml::I1, C_MASK); |
| 206 | | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 207 | | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I1); |
| 208 | | } |
| 209 | | UML_AND(block, DRC_CPSR, DRC_CPSR, ~(Z_MASK | N_MASK)); |
| 210 | | DRCHandleALUNZFlags(DRC_RD); |
| 211 | | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 212 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 213 | | } |
| 214 | | |
| 215 | | /* Arithmetic */ |
| 216 | | |
| 217 | | void arm7_cpu_device::drctg01_0(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 218 | | { |
| 219 | | UINT32 op = desc->opptr.l[0]; |
| 220 | | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 221 | | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 222 | | INT32 offs = (op & THUMB_SHIFT_AMT) >> THUMB_SHIFT_AMT_SHIFT; |
| 223 | | |
| 224 | | /* ASR.. */ |
| 225 | | UML_MOV(block, uml::I0, DRC_RS); |
| 226 | | if (offs == 0) |
| 227 | | { |
| 228 | | offs = 32; |
| 229 | | } |
| 230 | | if (offs >= 32) |
| 231 | | { |
| 232 | | UML_AND(block, DRC_CPSR, DRC_CPSR, ~C_MASK); |
| 233 | | UML_SHR(block, uml::I1, uml::I0, 31); |
| 234 | | UML_TEST(block, uml::I1, ~0); |
| 235 | | UML_MOVc(block, uml::COND_NZ, uml::I1, C_MASK); |
| 236 | | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 237 | | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I1); |
| 238 | | UML_TEST(block, uml::I0, 0x80000000); |
| 239 | | UML_MOVc(block, uml::COND_NZ, DRC_RD, ~0); |
| 240 | | UML_MOVc(block, uml::COND_Z, DRC_RD, 0); |
| 241 | | } |
| 242 | | else |
| 243 | | { |
| 244 | | UML_AND(block, DRC_CPSR, DRC_CPSR, ~C_MASK); |
| 245 | | UML_TEST(block, uml::I0, 1 << (offs - 1)); |
| 246 | | UML_MOVc(block, uml::COND_NZ, uml::I1, C_MASK); |
| 247 | | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 248 | | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I1); |
| 249 | | UML_SHR(block, uml::I1, uml::I0, offs); |
| 250 | | UML_SHL(block, uml::I2, ~0, 32 - offs); |
| 251 | | UML_TEST(block, uml::I0, 0x80000000); |
| 252 | | UML_MOVc(block, uml::COND_Z, uml::I2, 0); |
| 253 | | UML_OR(block, DRC_RD, uml::I1, uml::I2); |
| 254 | | } |
| 255 | | UML_AND(block, DRC_CPSR, DRC_CPSR, ~(Z_MASK | N_MASK)); |
| 256 | | DRCHandleALUNZFlags(DRC_RD); |
| 257 | | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 258 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 259 | | } |
| 260 | | |
| 261 | | void arm7_cpu_device::drctg01_10(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 262 | | { |
| 263 | | UINT32 op = desc->opptr.l[0]; |
| 264 | | UINT32 rn = (op & THUMB_ADDSUB_RNIMM) >> THUMB_ADDSUB_RNIMM_SHIFT; |
| 265 | | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 266 | | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 267 | | UML_ADD(block, DRC_REG(rd), DRC_REG(rs), DRC_REG(rn)); |
| 268 | | DRCHandleThumbALUAddFlags(DRC_REG(rd), DRC_REG(rs), DRC_REG(rn)); |
| 269 | | } |
| 270 | | |
| 271 | | void arm7_cpu_device::drctg01_11(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* SUB Rd, Rs, Rn */ |
| 272 | | { |
| 273 | | UINT32 op = desc->opptr.l[0]; |
| 274 | | UINT32 rn = (op & THUMB_ADDSUB_RNIMM) >> THUMB_ADDSUB_RNIMM_SHIFT; |
| 275 | | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 276 | | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 277 | | UML_SUB(block, DRC_REG(rd), DRC_REG(rs), DRC_REG(rn)); |
| 278 | | DRCHandleThumbALUSubFlags(DRC_REG(rd), DRC_REG(rs), DRC_REG(rn)); |
| 279 | | } |
| 280 | | |
| 281 | | void arm7_cpu_device::drctg01_12(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* ADD Rd, Rs, #imm */ |
| 282 | | { |
| 283 | | UINT32 op = desc->opptr.l[0]; |
| 284 | | UINT32 imm = (op & THUMB_ADDSUB_RNIMM) >> THUMB_ADDSUB_RNIMM_SHIFT; |
| 285 | | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 286 | | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 287 | | UML_ADD(block, DRC_REG(rd), DRC_REG(rs), imm); |
| 288 | | DRCHandleThumbALUAddFlags(DRC_REG(rd), DRC_REG(rs), imm); |
| 289 | | } |
| 290 | | |
| 291 | | void arm7_cpu_device::drctg01_13(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* SUB Rd, Rs, #imm */ |
| 292 | | { |
| 293 | | UINT32 op = desc->opptr.l[0]; |
| 294 | | UINT32 imm = (op & THUMB_ADDSUB_RNIMM) >> THUMB_ADDSUB_RNIMM_SHIFT; |
| 295 | | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 296 | | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 297 | | UML_SUB(block, DRC_REG(rd), DRC_REG(rs), imm); |
| 298 | | DRCHandleThumbALUSubFlags(DRC_REG(rd), DRC_REG(rs), imm); |
| 299 | | } |
| 300 | | |
| 301 | | /* CMP / MOV */ |
| 302 | | |
| 303 | | void arm7_cpu_device::drctg02_0(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 304 | | { |
| 305 | | UINT32 op = desc->opptr.l[0]; |
| 306 | | UINT32 rd = (op & THUMB_INSN_IMM_RD) >> THUMB_INSN_IMM_RD_SHIFT; |
| 307 | | UINT32 op2 = (op & THUMB_INSN_IMM); |
| 308 | | UML_MOV(block, DRC_REG(rd), op2); |
| 309 | | UML_AND(block, DRC_CPSR, DRC_CPSR, ~(Z_MASK | N_MASK)); |
| 310 | | DRCHandleALUNZFlags(DRC_REG(rd)); |
| 311 | | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 312 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 313 | | } |
| 314 | | |
| 315 | | void arm7_cpu_device::drctg02_1(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 316 | | { |
| 317 | | UINT32 op = desc->opptr.l[0]; |
| 318 | | UINT32 rn = (op & THUMB_INSN_IMM_RD) >> THUMB_INSN_IMM_RD_SHIFT; |
| 319 | | UINT32 op2 = op & THUMB_INSN_IMM; |
| 320 | | |
| 321 | | UML_SUB(block, uml::I3, DRC_REG(rn), op2); |
| 322 | | DRCHandleThumbALUSubFlags(uml::I3, DRC_REG(rn), op2); |
| 323 | | } |
| 324 | | |
| 325 | | /* ADD/SUB immediate */ |
| 326 | | |
| 327 | | void arm7_cpu_device::drctg03_0(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* ADD Rd, #Offset8 */ |
| 328 | | { |
| 329 | | UINT32 op = desc->opptr.l[0]; |
| 330 | | UINT32 rn = (op & THUMB_INSN_IMM_RD) >> THUMB_INSN_IMM_RD_SHIFT; |
| 331 | | UINT32 op2 = op & THUMB_INSN_IMM; |
| 332 | | UINT32 rd = (op & THUMB_INSN_IMM_RD) >> THUMB_INSN_IMM_RD_SHIFT; |
| 333 | | UML_ADD(block, DRC_REG(rd), DRC_REG(rn), op2); |
| 334 | | DRCHandleThumbALUAddFlags(DRC_REG(rd), DRC_REG(rn), op2); |
| 335 | | } |
| 336 | | |
| 337 | | void arm7_cpu_device::drctg03_1(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* SUB Rd, #Offset8 */ |
| 338 | | { |
| 339 | | UINT32 op = desc->opptr.l[0]; |
| 340 | | UINT32 rn = (op & THUMB_INSN_IMM_RD) >> THUMB_INSN_IMM_RD_SHIFT; |
| 341 | | UINT32 op2 = op & THUMB_INSN_IMM; |
| 342 | | UINT32 rd = (op & THUMB_INSN_IMM_RD) >> THUMB_INSN_IMM_RD_SHIFT; |
| 343 | | UML_SUB(block, DRC_REG(rd), DRC_REG(rn), op2); |
| 344 | | DRCHandleThumbALUSubFlags(DRC_REG(rd), DRC_REG(rn), op2); |
| 345 | | } |
| 346 | | |
| 347 | | /* Rd & Rm instructions */ |
| 348 | | |
| 349 | | void arm7_cpu_device::drctg04_00_00(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* AND Rd, Rs */ |
| 350 | | { |
| 351 | | UINT32 op = desc->opptr.l[0]; |
| 352 | | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 353 | | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 354 | | UML_AND(block, DRC_REG(rd), DRC_REG(rd), DRC_REG(rs)); |
| 355 | | UML_AND(block, DRC_CPSR, DRC_CPSR, ~(Z_MASK | N_MASK)); |
| 356 | | DRCHandleALUNZFlags(DRC_REG(rd)); |
| 357 | | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 358 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 359 | | } |
| 360 | | |
| 361 | | void arm7_cpu_device::drctg04_00_01(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* EOR Rd, Rs */ |
| 362 | | { |
| 363 | | UINT32 op = desc->opptr.l[0]; |
| 364 | | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 365 | | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 366 | | UML_XOR(block, DRC_REG(rd), DRC_REG(rd), DRC_REG(rs)); |
| 367 | | UML_AND(block, DRC_CPSR, DRC_CPSR, ~(Z_MASK | N_MASK)); |
| 368 | | DRCHandleALUNZFlags(DRC_REG(rd)); |
| 369 | | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 370 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 371 | | } |
| 372 | | |
| 373 | | void arm7_cpu_device::drctg04_00_02(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* LSL Rd, Rs */ |
| 374 | | { |
| 375 | | UINT32 op = desc->opptr.l[0]; |
| 376 | | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 377 | | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 378 | | uml::code_label skip; |
| 379 | | uml::code_label offsg32; |
| 380 | | uml::code_label offs32; |
| 381 | | |
| 382 | | UML_AND(block, uml::I1, DRC_REG(rs), 0xff); |
| 383 | | UML_AND(block, DRC_CPSR, DRC_CPSR, ~(Z_MASK | N_MASK | C_MASK)); |
| 384 | | |
| 385 | | UML_CMP(block, uml::I1, 0); |
| 386 | | UML_JMPc(block, uml::COND_E, skip = compiler->labelnum++); |
| 387 | | |
| 388 | | UML_CMP(block, uml::I1, 32); |
| 389 | | UML_JMPc(block, uml::COND_A, offsg32 = compiler->labelnum++); |
| 390 | | UML_JMPc(block, uml::COND_E, offs32 = compiler->labelnum++); |
| 391 | | |
| 392 | | UML_SHL(block, DRC_REG(rd), DRC_REG(rd), uml::I1); |
| 393 | | UML_SUB(block, uml::I1, uml::I1, 1); |
| 394 | | UML_SUB(block, uml::I1, 31, uml::I1); |
| 395 | | UML_SHL(block, uml::I1, 1, uml::I1); |
| 396 | | UML_TEST(block, DRC_REG(rd), uml::I1); |
| 397 | | UML_MOVc(block, uml::COND_NZ, uml::I0, C_MASK); |
| 398 | | UML_MOVc(block, uml::COND_Z, uml::I0, 0); |
| 399 | | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 400 | | UML_JMP(block, skip); |
| 401 | | |
| 402 | | UML_LABEL(block, offs32); |
| 403 | | UML_TEST(block, DRC_REG(rd), 1); |
| 404 | | UML_MOVc(block, uml::COND_NZ, uml::I0, C_MASK); |
| 405 | | UML_MOVc(block, uml::COND_Z, uml::I0, 0); |
| 406 | | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 407 | | UML_MOV(block, DRC_REG(rd), 0); |
| 408 | | UML_JMP(block, skip); |
| 409 | | |
| 410 | | UML_LABEL(block, offsg32); |
| 411 | | UML_MOV(block, DRC_REG(rd), 0); |
| 412 | | |
| 413 | | UML_LABEL(block, skip); |
| 414 | | |
| 415 | | DRCHandleALUNZFlags(DRC_REG(rd)); |
| 416 | | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 417 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 418 | | } |
| 419 | | |
| 420 | | void arm7_cpu_device::drctg04_00_03(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* LSR Rd, Rs */ |
| 421 | | { |
| 422 | | UINT32 op = desc->opptr.l[0]; |
| 423 | | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 424 | | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 425 | | uml::code_label skip; |
| 426 | | uml::code_label offsg32; |
| 427 | | uml::code_label offs32; |
| 428 | | |
| 429 | | UML_AND(block, uml::I1, DRC_REG(rs), 0xff); |
| 430 | | UML_AND(block, DRC_CPSR, DRC_CPSR, ~(Z_MASK | N_MASK | C_MASK)); |
| 431 | | UML_CMP(block, uml::I1, 0); |
| 432 | | UML_JMPc(block, uml::COND_E, skip = compiler->labelnum++); |
| 433 | | |
| 434 | | UML_CMP(block, uml::I1, 32); |
| 435 | | UML_JMPc(block, uml::COND_A, offsg32 = compiler->labelnum++); |
| 436 | | UML_JMPc(block, uml::COND_E, offs32 = compiler->labelnum++); |
| 437 | | |
| 438 | | UML_SHR(block, DRC_REG(rd), DRC_REG(rd), uml::I1); |
| 439 | | UML_SUB(block, uml::I1, uml::I1, 1); // WP: TODO, Check this used to be "block, I1, 1" |
| 440 | | UML_SHL(block, uml::I1, 1, uml::I1); |
| 441 | | UML_TEST(block, DRC_REG(rd), uml::I1); |
| 442 | | UML_MOVc(block, uml::COND_NZ, uml::I0, C_MASK); |
| 443 | | UML_MOVc(block, uml::COND_Z, uml::I0, 0); |
| 444 | | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 445 | | UML_JMP(block, skip); |
| 446 | | |
| 447 | | UML_LABEL(block, offs32); |
| 448 | | UML_TEST(block, DRC_REG(rd), 0x80000000); |
| 449 | | UML_MOVc(block, uml::COND_NZ, uml::I0, C_MASK); |
| 450 | | UML_MOVc(block, uml::COND_Z, uml::I0, 0); |
| 451 | | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 452 | | UML_MOV(block, DRC_REG(rd), 0); |
| 453 | | UML_JMP(block, skip); |
| 454 | | |
| 455 | | UML_LABEL(block, offsg32); |
| 456 | | UML_MOV(block, DRC_REG(rd), 0); |
| 457 | | |
| 458 | | UML_LABEL(block, skip); |
| 459 | | |
| 460 | | DRCHandleALUNZFlags(DRC_REG(rd)); |
| 461 | | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 462 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 463 | | } |
| 464 | | |
| 465 | | void arm7_cpu_device::drctg04_00_04(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* ASR Rd, Rs */ |
| 466 | | { |
| 467 | | UINT32 op = desc->opptr.l[0]; |
| 468 | | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 469 | | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 470 | | uml::code_label skip; |
| 471 | | uml::code_label offsg32; |
| 472 | | uml::code_label offs32; |
| 473 | | |
| 474 | | UML_MOV(block, uml::I0, DRC_REG(rd)); |
| 475 | | UML_AND(block, uml::I1, DRC_REG(rs), 0xff); |
| 476 | | UML_AND(block, DRC_CPSR, DRC_CPSR, ~(Z_MASK | N_MASK | C_MASK)); |
| 477 | | UML_CMP(block, uml::I1, 0); |
| 478 | | UML_JMPc(block, uml::COND_E, skip = compiler->labelnum++); |
| 479 | | |
| 480 | | UML_SHR(block, uml::I2, uml::I0, uml::I1); |
| 481 | | UML_SUB(block, uml::I1, 32, uml::I1); |
| 482 | | UML_SHL(block, uml::I1, ~0, uml::I1); |
| 483 | | UML_TEST(block, uml::I0, 0x80000000); |
| 484 | | UML_MOVc(block, uml::COND_NZ, DRC_REG(rd), uml::I1); |
| 485 | | UML_MOVc(block, uml::COND_Z, DRC_REG(rd), 0); |
| 486 | | UML_OR(block, DRC_REG(rd), DRC_REG(rd), uml::I2); |
| 487 | | UML_JMPc(block, uml::COND_B, offs32 = compiler->labelnum++); |
| 488 | | |
| 489 | | UML_TEST(block, uml::I0, 0x80000000); |
| 490 | | UML_MOVc(block, uml::COND_NZ, DRC_REG(rd), ~0); |
| 491 | | UML_MOVc(block, uml::COND_Z, DRC_REG(rd), 0); |
| 492 | | UML_MOVc(block, uml::COND_NZ, uml::I1, C_MASK); |
| 493 | | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 494 | | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I1); |
| 495 | | UML_JMP(block, skip); |
| 496 | | |
| 497 | | UML_LABEL(block, offs32); |
| 498 | | UML_SUB(block, uml::I1, uml::I1, 1); |
| 499 | | UML_SHL(block, uml::I1, 1, uml::I1); |
| 500 | | UML_TEST(block, uml::I0, uml::I1); |
| 501 | | UML_MOVc(block, uml::COND_NZ, uml::I1, C_MASK); |
| 502 | | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 503 | | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I1); |
| 504 | | UML_JMP(block, skip); |
| 505 | | |
| 506 | | UML_LABEL(block, skip); |
| 507 | | DRCHandleALUNZFlags(DRC_REG(rd)); |
| 508 | | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 509 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 510 | | |
| 511 | | } |
| 512 | | |
| 513 | | void arm7_cpu_device::drctg04_00_05(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* ADC Rd, Rs */ |
| 514 | | { |
| 515 | | UINT32 op = desc->opptr.l[0]; |
| 516 | | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 517 | | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 518 | | UML_TEST(block, DRC_CPSR, C_MASK); |
| 519 | | UML_MOVc(block, uml::COND_NZ, uml::I3, 1); |
| 520 | | UML_MOVc(block, uml::COND_Z, uml::I3, 0); |
| 521 | | UML_ADD(block, uml::I3, uml::I3, DRC_REG(rd)); |
| 522 | | UML_ADD(block, uml::I3, uml::I3, DRC_REG(rs)); |
| 523 | | DRCHandleThumbALUAddFlags(uml::I3, DRC_REG(rd), DRC_REG(rs)); |
| 524 | | UML_MOV(block, DRC_REG(rd), uml::I3); |
| 525 | | } |
| 526 | | |
| 527 | | void arm7_cpu_device::drctg04_00_06(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* SBC Rd, Rs */ |
| 528 | | { |
| 529 | | UINT32 op = desc->opptr.l[0]; |
| 530 | | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 531 | | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 532 | | UML_TEST(block, DRC_CPSR, C_MASK); |
| 533 | | UML_MOVc(block, uml::COND_NZ, uml::I3, 0); |
| 534 | | UML_MOVc(block, uml::COND_Z, uml::I3, 1); |
| 535 | | UML_SUB(block, uml::I3, DRC_REG(rs), uml::I3); |
| 536 | | UML_ADD(block, uml::I3, DRC_REG(rd), uml::I3); |
| 537 | | DRCHandleThumbALUSubFlags(uml::I3, DRC_REG(rd), DRC_REG(rs)); |
| 538 | | UML_MOV(block, DRC_REG(rd), uml::I3); |
| 539 | | } |
| 540 | | |
| 541 | | void arm7_cpu_device::drctg04_00_07(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* ROR Rd, Rs */ |
| 542 | | { |
| 543 | | UINT32 op = desc->opptr.l[0]; |
| 544 | | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 545 | | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 546 | | UML_MOV(block, uml::I0, DRC_REG(rd)); |
| 547 | | UML_AND(block, uml::I1, DRC_REG(rs), 0x1f); |
| 548 | | UML_SHR(block, DRC_REG(rd), uml::I0, uml::I1); |
| 549 | | UML_SUB(block, uml::I2, 32, uml::I1); |
| 550 | | UML_SHL(block, uml::I2, uml::I0, uml::I2); |
| 551 | | UML_OR(block, DRC_REG(rd), DRC_REG(rd), uml::I2); |
| 552 | | UML_SUB(block, uml::I1, uml::I1, 1); |
| 553 | | UML_SHL(block, uml::I1, 1, uml::I1); |
| 554 | | UML_TEST(block, uml::I0, uml::I1); |
| 555 | | UML_MOVc(block, uml::COND_NZ, uml::I0, C_MASK); |
| 556 | | UML_MOVc(block, uml::COND_Z, uml::I0, 0); |
| 557 | | UML_AND(block, DRC_CPSR, DRC_CPSR, ~(Z_MASK | N_MASK | C_MASK)); |
| 558 | | DRCHandleALUNZFlags(DRC_REG(rd)); |
| 559 | | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 560 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 561 | | } |
| 562 | | |
| 563 | | void arm7_cpu_device::drctg04_00_08(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* TST Rd, Rs */ |
| 564 | | { |
| 565 | | UINT32 op = desc->opptr.l[0]; |
| 566 | | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 567 | | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 568 | | UML_AND(block, DRC_CPSR, DRC_CPSR, ~(Z_MASK | N_MASK)); |
| 569 | | UML_AND(block, uml::I2, DRC_REG(rd), DRC_REG(rs)); |
| 570 | | DRCHandleALUNZFlags(uml::I2); |
| 571 | | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 572 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 573 | | } |
| 574 | | |
| 575 | | void arm7_cpu_device::drctg04_00_09(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* NEG Rd, Rs */ |
| 576 | | { |
| 577 | | UINT32 op = desc->opptr.l[0]; |
| 578 | | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 579 | | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 580 | | UML_MOV(block, uml::I3, DRC_REG(rs)); |
| 581 | | UML_SUB(block, DRC_REG(rd), 0, uml::I3); |
| 582 | | DRCHandleThumbALUSubFlags(DRC_REG(rd), 0, uml::I3); |
| 583 | | } |
| 584 | | |
| 585 | | void arm7_cpu_device::drctg04_00_0a(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* CMP Rd, Rs */ |
| 586 | | { |
| 587 | | UINT32 op = desc->opptr.l[0]; |
| 588 | | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 589 | | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 590 | | UML_SUB(block, uml::I3, DRC_REG(rd), DRC_REG(rs)); |
| 591 | | DRCHandleThumbALUSubFlags(uml::I3, DRC_REG(rd), DRC_REG(rs)); |
| 592 | | } |
| 593 | | |
| 594 | | void arm7_cpu_device::drctg04_00_0b(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* CMN Rd, Rs - check flags, add dasm */ |
| 595 | | { |
| 596 | | UINT32 op = desc->opptr.l[0]; |
| 597 | | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 598 | | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 599 | | UML_ADD(block, uml::I3, DRC_REG(rd), DRC_REG(rs)); |
| 600 | | DRCHandleThumbALUAddFlags(uml::I3, DRC_REG(rd), DRC_REG(rs)); |
| 601 | | } |
| 602 | | |
| 603 | | void arm7_cpu_device::drctg04_00_0c(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* ORR Rd, Rs */ |
| 604 | | { |
| 605 | | UINT32 op = desc->opptr.l[0]; |
| 606 | | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 607 | | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 608 | | UML_OR(block, DRC_REG(rd), DRC_REG(rd), DRC_REG(rs)); |
| 609 | | UML_AND(block, DRC_CPSR, DRC_CPSR, ~(Z_MASK | N_MASK)); |
| 610 | | DRCHandleALUNZFlags(DRC_REG(rd)); |
| 611 | | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 612 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 613 | | } |
| 614 | | |
| 615 | | void arm7_cpu_device::drctg04_00_0d(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* MUL Rd, Rs */ |
| 616 | | { |
| 617 | | UINT32 op = desc->opptr.l[0]; |
| 618 | | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 619 | | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 620 | | UML_AND(block, DRC_CPSR, DRC_CPSR, ~(Z_MASK | N_MASK)); |
| 621 | | UML_MULU(block, DRC_REG(rd), uml::I1, DRC_REG(rd), DRC_REG(rs)); |
| 622 | | DRCHandleALUNZFlags(DRC_REG(rd)); |
| 623 | | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 624 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 625 | | } |
| 626 | | |
| 627 | | void arm7_cpu_device::drctg04_00_0e(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* BIC Rd, Rs */ |
| 628 | | { |
| 629 | | UINT32 op = desc->opptr.l[0]; |
| 630 | | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 631 | | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 632 | | UML_AND(block, DRC_CPSR, DRC_CPSR, ~(Z_MASK | N_MASK)); |
| 633 | | UML_XOR(block, uml::I0, DRC_REG(rs), ~0); |
| 634 | | UML_AND(block, DRC_REG(rd), DRC_REG(rd), uml::I0); |
| 635 | | DRCHandleALUNZFlags(DRC_REG(rd)); |
| 636 | | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 637 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 638 | | } |
| 639 | | |
| 640 | | void arm7_cpu_device::drctg04_00_0f(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* MVN Rd, Rs */ |
| 641 | | { |
| 642 | | UINT32 op = desc->opptr.l[0]; |
| 643 | | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 644 | | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 645 | | UML_XOR(block, uml::I0, DRC_REG(rs), ~0); |
| 646 | | UML_MOV(block, DRC_REG(rd), uml::I0); |
| 647 | | UML_AND(block, DRC_CPSR, DRC_CPSR, ~(Z_MASK | N_MASK)); |
| 648 | | DRCHandleALUNZFlags(DRC_REG(rd)); |
| 649 | | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 650 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 651 | | } |
| 652 | | |
| 653 | | /* ADD Rd, Rs group */ |
| 654 | | |
| 655 | | void arm7_cpu_device::drctg04_01_00(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 656 | | { |
| 657 | | UINT32 op = desc->opptr.l[0]; |
| 658 | | UINT32 pc = desc->pc; |
| 659 | | fatalerror("%08x: G4-1-0 Undefined Thumb instruction: %04x %x\n", pc, op, (op & THUMB_HIREG_H) >> THUMB_HIREG_H_SHIFT); |
| 660 | | } |
| 661 | | |
| 662 | | void arm7_cpu_device::drctg04_01_01(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* ADD Rd, HRs */ |
| 663 | | { |
| 664 | | UINT32 op = desc->opptr.l[0]; |
| 665 | | UINT32 rs = (op & THUMB_HIREG_RS) >> THUMB_HIREG_RS_SHIFT; |
| 666 | | UINT32 rd = op & THUMB_HIREG_RD; |
| 667 | | UML_ADD(block, DRC_REG(rd), DRC_REG(rd), DRC_REG(rs+8)); |
| 668 | | if (rs == 7) |
| 669 | | { |
| 670 | | UML_ADD(block, DRC_REG(rd), DRC_REG(rd), 4); |
| 671 | | } |
| 672 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 673 | | } |
| 674 | | |
| 675 | | void arm7_cpu_device::drctg04_01_02(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* ADD HRd, Rs */ |
| 676 | | { |
| 677 | | UINT32 op = desc->opptr.l[0]; |
| 678 | | UINT32 rs = (op & THUMB_HIREG_RS) >> THUMB_HIREG_RS_SHIFT; |
| 679 | | UINT32 rd = op & THUMB_HIREG_RD; |
| 680 | | UML_ADD(block, DRC_REG(rd+8), DRC_REG(rd+8), DRC_REG(rs)); |
| 681 | | if (rd == 7) |
| 682 | | { |
| 683 | | UML_ADD(block, DRC_REG(rd), DRC_REG(rd), 4); |
| 684 | | } |
| 685 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 686 | | } |
| 687 | | |
| 688 | | void arm7_cpu_device::drctg04_01_03(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* Add HRd, HRs */ |
| 689 | | { |
| 690 | | UINT32 op = desc->opptr.l[0]; |
| 691 | | UINT32 rs = (op & THUMB_HIREG_RS) >> THUMB_HIREG_RS_SHIFT; |
| 692 | | UINT32 rd = op & THUMB_HIREG_RD; |
| 693 | | UML_ADD(block, DRC_REG(rd+8), DRC_REG(rd+8), DRC_REG(rs+8)); |
| 694 | | // emulate the effects of pre-fetch |
| 695 | | if (rs == 7) |
| 696 | | { |
| 697 | | UML_ADD(block, DRC_REG(rd+8), DRC_REG(rd+8), 4); |
| 698 | | } |
| 699 | | if (rd == 7) |
| 700 | | { |
| 701 | | UML_ADD(block, DRC_REG(rd+8), DRC_REG(rd+8), 2); |
| 702 | | } |
| 703 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 704 | | } |
| 705 | | |
| 706 | | void arm7_cpu_device::drctg04_01_10(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* CMP Rd, Rs */ |
| 707 | | { |
| 708 | | UINT32 op = desc->opptr.l[0]; |
| 709 | | UINT32 rs = (op & THUMB_HIREG_RS) >> THUMB_HIREG_RS_SHIFT; |
| 710 | | UINT32 rd = op & THUMB_HIREG_RD; |
| 711 | | UML_SUB(block, uml::I3, DRC_REG(rd), DRC_REG(rs)); |
| 712 | | DRCHandleThumbALUSubFlags(uml::I3, DRC_REG(rd), DRC_REG(rs)); |
| 713 | | } |
| 714 | | |
| 715 | | void arm7_cpu_device::drctg04_01_11(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* CMP Rd, Hs */ |
| 716 | | { |
| 717 | | UINT32 op = desc->opptr.l[0]; |
| 718 | | UINT32 rs = (op & THUMB_HIREG_RS) >> THUMB_HIREG_RS_SHIFT; |
| 719 | | UINT32 rd = op & THUMB_HIREG_RD; |
| 720 | | UML_SUB(block, uml::I3, DRC_REG(rd), DRC_REG(rs+8)); |
| 721 | | DRCHandleThumbALUSubFlags(uml::I3, DRC_REG(rd), DRC_REG(rs+8)); |
| 722 | | } |
| 723 | | |
| 724 | | void arm7_cpu_device::drctg04_01_12(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* CMP Hd, Rs */ |
| 725 | | { |
| 726 | | UINT32 op = desc->opptr.l[0]; |
| 727 | | UINT32 rs = (op & THUMB_HIREG_RS) >> THUMB_HIREG_RS_SHIFT; |
| 728 | | UINT32 rd = op & THUMB_HIREG_RD; |
| 729 | | UML_SUB(block, uml::I3, DRC_REG(rd+8), DRC_REG(rs)); |
| 730 | | DRCHandleThumbALUSubFlags(uml::I3, DRC_REG(rd+8), DRC_REG(rs)); |
| 731 | | } |
| 732 | | |
| 733 | | void arm7_cpu_device::drctg04_01_13(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* CMP Hd, Hs */ |
| 734 | | { |
| 735 | | UINT32 op = desc->opptr.l[0]; |
| 736 | | UINT32 rs = (op & THUMB_HIREG_RS) >> THUMB_HIREG_RS_SHIFT; |
| 737 | | UINT32 rd = op & THUMB_HIREG_RD; |
| 738 | | UML_SUB(block, uml::I3, DRC_REG(rd+8), DRC_REG(rs+8)); |
| 739 | | DRCHandleThumbALUSubFlags(uml::I3, DRC_REG(rd+8), DRC_REG(rs+8)); |
| 740 | | } |
| 741 | | |
| 742 | | /* MOV group */ |
| 743 | | |
| 744 | | void arm7_cpu_device::drctg04_01_20(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* MOV Rd, Rs (undefined) */ |
| 745 | | { |
| 746 | | UINT32 op = desc->opptr.l[0]; |
| 747 | | UINT32 rs = (op & THUMB_HIREG_RS) >> THUMB_HIREG_RS_SHIFT; |
| 748 | | UINT32 rd = op & THUMB_HIREG_RD; |
| 749 | | UML_MOV(block, DRC_REG(rd), DRC_REG(rs)); |
| 750 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 751 | | } |
| 752 | | |
| 753 | | void arm7_cpu_device::drctg04_01_21(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* MOV Rd, Hs */ |
| 754 | | { |
| 755 | | UINT32 op = desc->opptr.l[0]; |
| 756 | | UINT32 rs = (op & THUMB_HIREG_RS) >> THUMB_HIREG_RS_SHIFT; |
| 757 | | UINT32 rd = op & THUMB_HIREG_RD; |
| 758 | | UML_MOV(block, DRC_REG(rd), DRC_REG(rs+8)); |
| 759 | | if (rs == 7) |
| 760 | | { |
| 761 | | UML_ADD(block, DRC_REG(rd), DRC_REG(rd), 4); |
| 762 | | } |
| 763 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 764 | | } |
| 765 | | |
| 766 | | void arm7_cpu_device::drctg04_01_22(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* MOV Hd, Rs */ |
| 767 | | { |
| 768 | | UINT32 op = desc->opptr.l[0]; |
| 769 | | UINT32 rs = (op & THUMB_HIREG_RS) >> THUMB_HIREG_RS_SHIFT; |
| 770 | | UINT32 rd = op & THUMB_HIREG_RD; |
| 771 | | UML_MOV(block, DRC_REG(rd+8), DRC_REG(rs)); |
| 772 | | // CHECKME |
| 773 | | if (rd != 7) |
| 774 | | { |
| 775 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 776 | | } |
| 777 | | else |
| 778 | | { |
| 779 | | UML_AND(block, DRC_PC, DRC_PC, ~1); |
| 780 | | } |
| 781 | | } |
| 782 | | |
| 783 | | void arm7_cpu_device::drctg04_01_23(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* MOV Hd, Hs */ |
| 784 | | { |
| 785 | | UINT32 op = desc->opptr.l[0]; |
| 786 | | UINT32 rs = (op & THUMB_HIREG_RS) >> THUMB_HIREG_RS_SHIFT; |
| 787 | | UINT32 rd = op & THUMB_HIREG_RD; |
| 788 | | UML_MOV(block, DRC_REG(rd+8), DRC_REG(rs+8)); |
| 789 | | if (rs == 7) |
| 790 | | { |
| 791 | | UML_ADD(block, DRC_REG(rd+8), DRC_REG(rd+8), 4); |
| 792 | | } |
| 793 | | if (rd != 7) |
| 794 | | { |
| 795 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 796 | | } |
| 797 | | else |
| 798 | | { |
| 799 | | UML_AND(block, DRC_PC, DRC_PC, ~1); |
| 800 | | } |
| 801 | | |
| 802 | | } |
| 803 | | |
| 804 | | void arm7_cpu_device::drctg04_01_30(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 805 | | { |
| 806 | | UINT32 op = desc->opptr.l[0]; |
| 807 | | uml::code_label switch_state; |
| 808 | | uml::code_label done; |
| 809 | | UINT32 rs = (op & THUMB_HIREG_RS) >> THUMB_HIREG_RS_SHIFT; |
| 810 | | UML_MOV(block, uml::I0, DRC_REG(rs)); |
| 811 | | UML_TEST(block, uml::I0, 1); |
| 812 | | UML_JMPc(block, uml::COND_Z, switch_state = compiler->labelnum++); |
| 813 | | UML_AND(block, uml::I0, uml::I0, ~1); |
| 814 | | UML_JMP(block, done = compiler->labelnum++); |
| 815 | | |
| 816 | | UML_LABEL(block, switch_state); |
| 817 | | UML_AND(block, DRC_CPSR, DRC_CPSR, ~T_MASK); |
| 818 | | UML_TEST(block, uml::I0, 2); |
| 819 | | UML_MOVc(block, uml::COND_NZ, uml::I1, 2); |
| 820 | | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 821 | | UML_ADD(block, uml::I0, uml::I0, uml::I1); |
| 822 | | |
| 823 | | UML_LABEL(block, done); |
| 824 | | UML_MOV(block, DRC_PC, uml::I0); |
| 825 | | } |
| 826 | | |
| 827 | | void arm7_cpu_device::drctg04_01_31(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 828 | | { |
| 829 | | UINT32 op = desc->opptr.l[0]; |
| 830 | | uml::code_label switch_state; |
| 831 | | uml::code_label done; |
| 832 | | UINT32 rs = (op & THUMB_HIREG_RS) >> THUMB_HIREG_RS_SHIFT; |
| 833 | | UML_MOV(block, uml::I0, DRC_REG(rs+8)); |
| 834 | | if(rs == 7) |
| 835 | | { |
| 836 | | UML_ADD(block, uml::I0, uml::I0, 2); |
| 837 | | } |
| 838 | | UML_TEST(block, uml::I0, 1); |
| 839 | | UML_JMPc(block, uml::COND_Z, switch_state = compiler->labelnum++); |
| 840 | | UML_AND(block, uml::I0, uml::I0, ~1); |
| 841 | | UML_JMP(block, done = compiler->labelnum++); |
| 842 | | |
| 843 | | UML_LABEL(block, switch_state); |
| 844 | | UML_AND(block, DRC_CPSR, DRC_CPSR, ~T_MASK); |
| 845 | | UML_TEST(block, uml::I0, 2); |
| 846 | | UML_MOVc(block, uml::COND_NZ, uml::I1, 2); |
| 847 | | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 848 | | UML_ADD(block, uml::I0, uml::I0, uml::I1); |
| 849 | | |
| 850 | | UML_LABEL(block, done); |
| 851 | | UML_MOV(block, DRC_PC, uml::I0); |
| 852 | | } |
| 853 | | |
| 854 | | void arm7_cpu_device::drctg04_01_32(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 855 | | { |
| 856 | | UINT32 op = desc->opptr.l[0]; |
| 857 | | UINT32 pc = desc->pc; |
| 858 | | fatalerror("%08x: G4-3 Undefined Thumb instruction: %04x\n", pc, op); |
| 859 | | } |
| 860 | | |
| 861 | | void arm7_cpu_device::drctg04_01_33(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 862 | | { |
| 863 | | UINT32 op = desc->opptr.l[0]; |
| 864 | | UINT32 pc = desc->pc; |
| 865 | | fatalerror("%08x: G4-3 Undefined Thumb instruction: %04x\n", pc, op); |
| 866 | | } |
| 867 | | |
| 868 | | void arm7_cpu_device::drctg04_0203(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 869 | | { |
| 870 | | UINT32 op = desc->opptr.l[0]; |
| 871 | | UINT32 rd = (op & THUMB_INSN_IMM_RD) >> THUMB_INSN_IMM_RD_SHIFT; |
| 872 | | UINT32 imm = 4 + ((op & THUMB_INSN_IMM) << 2); |
| 873 | | UML_AND(block, uml::I0, DRC_PC, ~2); |
| 874 | | UML_ADD(block, uml::I0, uml::I0, imm); |
| 875 | | UML_CALLH(block, *m_impstate.read32); |
| 876 | | UML_MOV(block, DRC_REG(rd), uml::I0); |
| 877 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 878 | | } |
| 879 | | |
| 880 | | /* LDR* STR* group */ |
| 881 | | |
| 882 | | void arm7_cpu_device::drctg05_0(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* STR Rd, [Rn, Rm] */ |
| 883 | | { |
| 884 | | UINT32 op = desc->opptr.l[0]; |
| 885 | | UINT32 rm = (op & THUMB_GROUP5_RM) >> THUMB_GROUP5_RM_SHIFT; |
| 886 | | UINT32 rn = (op & THUMB_GROUP5_RN) >> THUMB_GROUP5_RN_SHIFT; |
| 887 | | UINT32 rd = (op & THUMB_GROUP5_RD) >> THUMB_GROUP5_RD_SHIFT; |
| 888 | | UML_MOV(block, uml::I1, DRC_REG(rd)); |
| 889 | | UML_ADD(block, uml::I0, DRC_REG(rn), DRC_REG(rm)); |
| 890 | | UML_CALLH(block, *m_impstate.write32); |
| 891 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 892 | | } |
| 893 | | |
| 894 | | void arm7_cpu_device::drctg05_1(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* STRH Rd, [Rn, Rm] */ |
| 895 | | { |
| 896 | | UINT32 op = desc->opptr.l[0]; |
| 897 | | UINT32 rm = (op & THUMB_GROUP5_RM) >> THUMB_GROUP5_RM_SHIFT; |
| 898 | | UINT32 rn = (op & THUMB_GROUP5_RN) >> THUMB_GROUP5_RN_SHIFT; |
| 899 | | UINT32 rd = (op & THUMB_GROUP5_RD) >> THUMB_GROUP5_RD_SHIFT; |
| 900 | | UML_MOV(block, uml::I1, DRC_REG(rd)); |
| 901 | | UML_ADD(block, uml::I0, DRC_REG(rn), DRC_REG(rm)); |
| 902 | | UML_CALLH(block, *m_impstate.write16); |
| 903 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 904 | | } |
| 905 | | |
| 906 | | void arm7_cpu_device::drctg05_2(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* STRB Rd, [Rn, Rm] */ |
| 907 | | { |
| 908 | | UINT32 op = desc->opptr.l[0]; |
| 909 | | UINT32 rm = (op & THUMB_GROUP5_RM) >> THUMB_GROUP5_RM_SHIFT; |
| 910 | | UINT32 rn = (op & THUMB_GROUP5_RN) >> THUMB_GROUP5_RN_SHIFT; |
| 911 | | UINT32 rd = (op & THUMB_GROUP5_RD) >> THUMB_GROUP5_RD_SHIFT; |
| 912 | | UML_MOV(block, uml::I1, DRC_REG(rd)); |
| 913 | | UML_ADD(block, uml::I0, DRC_REG(rn), DRC_REG(rm)); |
| 914 | | UML_CALLH(block, *m_impstate.write16); |
| 915 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 916 | | } |
| 917 | | |
| 918 | | void arm7_cpu_device::drctg05_3(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* LDSB Rd, [Rn, Rm] todo, add dasm */ |
| 919 | | { |
| 920 | | UINT32 op = desc->opptr.l[0]; |
| 921 | | UINT32 rm = (op & THUMB_GROUP5_RM) >> THUMB_GROUP5_RM_SHIFT; |
| 922 | | UINT32 rn = (op & THUMB_GROUP5_RN) >> THUMB_GROUP5_RN_SHIFT; |
| 923 | | UINT32 rd = (op & THUMB_GROUP5_RD) >> THUMB_GROUP5_RD_SHIFT; |
| 924 | | UML_ADD(block, uml::I0, DRC_REG(rn), DRC_REG(rm)); |
| 925 | | UML_CALLH(block, *m_impstate.read8); |
| 926 | | UML_SEXT(block, DRC_REG(rd), uml::I0, uml::SIZE_BYTE); |
| 927 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 928 | | } |
| 929 | | |
| 930 | | void arm7_cpu_device::drctg05_4(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* LDR Rd, [Rn, Rm] */ |
| 931 | | { |
| 932 | | UINT32 op = desc->opptr.l[0]; |
| 933 | | UINT32 rm = (op & THUMB_GROUP5_RM) >> THUMB_GROUP5_RM_SHIFT; |
| 934 | | UINT32 rn = (op & THUMB_GROUP5_RN) >> THUMB_GROUP5_RN_SHIFT; |
| 935 | | UINT32 rd = (op & THUMB_GROUP5_RD) >> THUMB_GROUP5_RD_SHIFT; |
| 936 | | UML_ADD(block, uml::I0, DRC_REG(rn), DRC_REG(rm)); |
| 937 | | UML_CALLH(block, *m_impstate.read32); |
| 938 | | UML_MOV(block, DRC_REG(rd), uml::I0); |
| 939 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 940 | | } |
| 941 | | |
| 942 | | void arm7_cpu_device::drctg05_5(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* LDRH Rd, [Rn, Rm] */ |
| 943 | | { |
| 944 | | UINT32 op = desc->opptr.l[0]; |
| 945 | | UINT32 rm = (op & THUMB_GROUP5_RM) >> THUMB_GROUP5_RM_SHIFT; |
| 946 | | UINT32 rn = (op & THUMB_GROUP5_RN) >> THUMB_GROUP5_RN_SHIFT; |
| 947 | | UINT32 rd = (op & THUMB_GROUP5_RD) >> THUMB_GROUP5_RD_SHIFT; |
| 948 | | UML_ADD(block, uml::I0, DRC_REG(rn), DRC_REG(rm)); |
| 949 | | UML_CALLH(block, *m_impstate.read16); |
| 950 | | UML_MOV(block, DRC_REG(rd), uml::I0); |
| 951 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 952 | | } |
| 953 | | |
| 954 | | void arm7_cpu_device::drctg05_6(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* LDRB Rd, [Rn, Rm] */ |
| 955 | | { |
| 956 | | UINT32 op = desc->opptr.l[0]; |
| 957 | | UINT32 rm = (op & THUMB_GROUP5_RM) >> THUMB_GROUP5_RM_SHIFT; |
| 958 | | UINT32 rn = (op & THUMB_GROUP5_RN) >> THUMB_GROUP5_RN_SHIFT; |
| 959 | | UINT32 rd = (op & THUMB_GROUP5_RD) >> THUMB_GROUP5_RD_SHIFT; |
| 960 | | UML_ADD(block, uml::I0, DRC_REG(rn), DRC_REG(rm)); |
| 961 | | UML_CALLH(block, *m_impstate.read8); |
| 962 | | UML_MOV(block, DRC_REG(rd), uml::I0); |
| 963 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 964 | | } |
| 965 | | |
| 966 | | void arm7_cpu_device::drctg05_7(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* LDSH Rd, [Rn, Rm] */ |
| 967 | | { |
| 968 | | UINT32 op = desc->opptr.l[0]; |
| 969 | | UINT32 rm = (op & THUMB_GROUP5_RM) >> THUMB_GROUP5_RM_SHIFT; |
| 970 | | UINT32 rn = (op & THUMB_GROUP5_RN) >> THUMB_GROUP5_RN_SHIFT; |
| 971 | | UINT32 rd = (op & THUMB_GROUP5_RD) >> THUMB_GROUP5_RD_SHIFT; |
| 972 | | UML_ADD(block, uml::I0, DRC_REG(rn), DRC_REG(rm)); |
| 973 | | UML_CALLH(block, *m_impstate.read16); |
| 974 | | UML_SEXT(block, DRC_REG(rd), uml::I0, uml::SIZE_WORD); |
| 975 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 976 | | } |
| 977 | | |
| 978 | | /* Word Store w/ Immediate Offset */ |
| 979 | | |
| 980 | | void arm7_cpu_device::drctg06_0(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* Store */ |
| 981 | | { |
| 982 | | UINT32 op = desc->opptr.l[0]; |
| 983 | | UINT32 rn = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 984 | | UINT32 rd = op & THUMB_ADDSUB_RD; |
| 985 | | INT32 offs = ((op & THUMB_LSOP_OFFS) >> THUMB_LSOP_OFFS_SHIFT) << 2; |
| 986 | | UML_ADD(block, uml::I0, DRC_REG(rn), offs); |
| 987 | | UML_MOV(block, uml::I1, DRC_REG(rd)); |
| 988 | | UML_CALLH(block, *m_impstate.write32); |
| 989 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 990 | | } |
| 991 | | |
| 992 | | void arm7_cpu_device::drctg06_1(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* Load */ |
| 993 | | { |
| 994 | | UINT32 op = desc->opptr.l[0]; |
| 995 | | UINT32 rn = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 996 | | UINT32 rd = op & THUMB_ADDSUB_RD; |
| 997 | | INT32 offs = ((op & THUMB_LSOP_OFFS) >> THUMB_LSOP_OFFS_SHIFT) << 2; |
| 998 | | UML_ADD(block, uml::I0, DRC_REG(rn), offs); |
| 999 | | UML_CALLH(block, *m_impstate.read32); |
| 1000 | | UML_MOV(block, DRC_REG(rd), uml::I0); |
| 1001 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 1002 | | } |
| 1003 | | |
| 1004 | | /* Byte Store w/ Immeidate Offset */ |
| 1005 | | |
| 1006 | | void arm7_cpu_device::drctg07_0(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* Store */ |
| 1007 | | { |
| 1008 | | UINT32 op = desc->opptr.l[0]; |
| 1009 | | UINT32 rn = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 1010 | | UINT32 rd = op & THUMB_ADDSUB_RD; |
| 1011 | | INT32 offs = (op & THUMB_LSOP_OFFS) >> THUMB_LSOP_OFFS_SHIFT; |
| 1012 | | UML_ADD(block, uml::I0, DRC_REG(rn), offs); |
| 1013 | | UML_MOV(block, uml::I1, DRC_REG(rd)); |
| 1014 | | UML_CALLH(block, *m_impstate.write8); |
| 1015 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 1016 | | } |
| 1017 | | |
| 1018 | | void arm7_cpu_device::drctg07_1(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* Load */ |
| 1019 | | { |
| 1020 | | UINT32 op = desc->opptr.l[0]; |
| 1021 | | UINT32 rn = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 1022 | | UINT32 rd = op & THUMB_ADDSUB_RD; |
| 1023 | | INT32 offs = (op & THUMB_LSOP_OFFS) >> THUMB_LSOP_OFFS_SHIFT; |
| 1024 | | UML_ADD(block, uml::I0, DRC_REG(rn), offs); |
| 1025 | | UML_CALLH(block, *m_impstate.read8); |
| 1026 | | UML_MOV(block, DRC_REG(rd), uml::I0); |
| 1027 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 1028 | | } |
| 1029 | | |
| 1030 | | /* Load/Store Halfword */ |
| 1031 | | |
| 1032 | | void arm7_cpu_device::drctg08_0(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* Store */ |
| 1033 | | { |
| 1034 | | UINT32 op = desc->opptr.l[0]; |
| 1035 | | UINT32 offs = (op & THUMB_HALFOP_OFFS) >> THUMB_HALFOP_OFFS_SHIFT; |
| 1036 | | UINT32 rn = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 1037 | | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 1038 | | UML_ADD(block, uml::I0, DRC_REG(rn), offs << 1); |
| 1039 | | UML_MOV(block, uml::I1, DRC_REG(rd)); |
| 1040 | | UML_CALLH(block, *m_impstate.write16); |
| 1041 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 1042 | | } |
| 1043 | | |
| 1044 | | void arm7_cpu_device::drctg08_1(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* Load */ |
| 1045 | | { |
| 1046 | | UINT32 op = desc->opptr.l[0]; |
| 1047 | | UINT32 offs = (op & THUMB_HALFOP_OFFS) >> THUMB_HALFOP_OFFS_SHIFT; |
| 1048 | | UINT32 rn = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 1049 | | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 1050 | | UML_ADD(block, uml::I0, DRC_REG(rn), offs << 1); |
| 1051 | | UML_CALLH(block, *m_impstate.read16); |
| 1052 | | UML_MOV(block, DRC_REG(rd), uml::I0); |
| 1053 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 1054 | | } |
| 1055 | | |
| 1056 | | /* Stack-Relative Load/Store */ |
| 1057 | | |
| 1058 | | void arm7_cpu_device::drctg09_0(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* Store */ |
| 1059 | | { |
| 1060 | | UINT32 op = desc->opptr.l[0]; |
| 1061 | | UINT32 rd = (op & THUMB_STACKOP_RD) >> THUMB_STACKOP_RD_SHIFT; |
| 1062 | | INT32 offs = (UINT8)(op & THUMB_INSN_IMM) << 2; |
| 1063 | | UML_ADD(block, uml::I0, DRC_REG(13), offs); |
| 1064 | | UML_MOV(block, uml::I1, DRC_REG(rd)); |
| 1065 | | UML_CALLH(block, *m_impstate.write32); |
| 1066 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 1067 | | } |
| 1068 | | |
| 1069 | | void arm7_cpu_device::drctg09_1(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* Load */ |
| 1070 | | { |
| 1071 | | UINT32 op = desc->opptr.l[0]; |
| 1072 | | UINT32 rd = (op & THUMB_STACKOP_RD) >> THUMB_STACKOP_RD_SHIFT; |
| 1073 | | UINT32 offs = (UINT8)(op & THUMB_INSN_IMM) << 2; |
| 1074 | | UML_ADD(block, uml::I0, DRC_REG(13), offs); |
| 1075 | | UML_CALLH(block, *m_impstate.read32); |
| 1076 | | UML_MOV(block, DRC_REG(rd), uml::I0); |
| 1077 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 1078 | | } |
| 1079 | | |
| 1080 | | /* Get relative address */ |
| 1081 | | |
| 1082 | | void arm7_cpu_device::drctg0a_0(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* ADD Rd, PC, #nn */ |
| 1083 | | { |
| 1084 | | UINT32 op = desc->opptr.l[0]; |
| 1085 | | UINT32 rd = (op & THUMB_RELADDR_RD) >> THUMB_RELADDR_RD_SHIFT; |
| 1086 | | INT32 offs = (UINT8)(op & THUMB_INSN_IMM) << 2; |
| 1087 | | UML_ADD(block, uml::I0, DRC_PC, 4); |
| 1088 | | UML_AND(block, uml::I0, uml::I0, ~2); |
| 1089 | | UML_ADD(block, DRC_REG(rd), uml::I0, offs); |
| 1090 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 1091 | | } |
| 1092 | | |
| 1093 | | void arm7_cpu_device::drctg0a_1(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* ADD Rd, SP, #nn */ |
| 1094 | | { |
| 1095 | | UINT32 op = desc->opptr.l[0]; |
| 1096 | | UINT32 rd = (op & THUMB_RELADDR_RD) >> THUMB_RELADDR_RD_SHIFT; |
| 1097 | | INT32 offs = (UINT8)(op & THUMB_INSN_IMM) << 2; |
| 1098 | | UML_ADD(block, DRC_REG(rd), DRC_REG(13), offs); |
| 1099 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 1100 | | } |
| 1101 | | |
| 1102 | | /* Stack-Related Opcodes */ |
| 1103 | | |
| 1104 | | void arm7_cpu_device::drctg0b_0(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* ADD SP, #imm */ |
| 1105 | | { |
| 1106 | | UINT32 op = desc->opptr.l[0]; |
| 1107 | | INT32 addr = (op & THUMB_INSN_IMM); |
| 1108 | | addr &= ~THUMB_INSN_IMM_S; |
| 1109 | | addr = ((op & THUMB_INSN_IMM_S) ? -(addr << 2) : (addr << 2)); |
| 1110 | | UML_ADD(block, DRC_REG(13), DRC_REG(13), addr); |
| 1111 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 1112 | | } |
| 1113 | | |
| 1114 | | void arm7_cpu_device::drctg0b_1(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 1115 | | { |
| 1116 | | UINT32 op = desc->opptr.l[0]; |
| 1117 | | UINT32 pc = desc->pc; |
| 1118 | | fatalerror("%08x: Gb Undefined Thumb instruction: %04x\n", pc, op); |
| 1119 | | } |
| 1120 | | |
| 1121 | | void arm7_cpu_device::drctg0b_2(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 1122 | | { |
| 1123 | | UINT32 op = desc->opptr.l[0]; |
| 1124 | | UINT32 pc = desc->pc; |
| 1125 | | fatalerror("%08x: Gb Undefined Thumb instruction: %04x\n", pc, op); |
| 1126 | | } |
| 1127 | | |
| 1128 | | void arm7_cpu_device::drctg0b_3(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 1129 | | { |
| 1130 | | UINT32 op = desc->opptr.l[0]; |
| 1131 | | UINT32 pc = desc->pc; |
| 1132 | | fatalerror("%08x: Gb Undefined Thumb instruction: %04x\n", pc, op); |
| 1133 | | } |
| 1134 | | |
| 1135 | | void arm7_cpu_device::drctg0b_4(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* PUSH {Rlist} */ |
| 1136 | | { |
| 1137 | | UINT32 op = desc->opptr.l[0]; |
| 1138 | | for (INT32 offs = 7; offs >= 0; offs--) |
| 1139 | | { |
| 1140 | | if (op & (1 << offs)) |
| 1141 | | { |
| 1142 | | UML_SUB(block, DRC_REG(13), DRC_REG(13), 4); |
| 1143 | | UML_MOV(block, uml::I0, DRC_REG(13)); |
| 1144 | | UML_MOV(block, uml::I1, DRC_REG(offs)); |
| 1145 | | UML_CALLH(block, *m_impstate.write32); |
| 1146 | | } |
| 1147 | | } |
| 1148 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 1149 | | } |
| 1150 | | |
| 1151 | | void arm7_cpu_device::drctg0b_5(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* PUSH {Rlist}{LR} */ |
| 1152 | | { |
| 1153 | | UINT32 op = desc->opptr.l[0]; |
| 1154 | | UML_SUB(block, DRC_REG(13), DRC_REG(13), 4); |
| 1155 | | UML_MOV(block, uml::I0, DRC_REG(13)); |
| 1156 | | UML_MOV(block, uml::I1, DRC_REG(14)); |
| 1157 | | UML_CALLH(block, *m_impstate.write32); |
| 1158 | | for (INT32 offs = 7; offs >= 0; offs--) |
| 1159 | | { |
| 1160 | | if (op & (1 << offs)) |
| 1161 | | { |
| 1162 | | UML_SUB(block, DRC_REG(13), DRC_REG(13), 4); |
| 1163 | | UML_MOV(block, uml::I0, DRC_REG(13)); |
| 1164 | | UML_MOV(block, uml::I1, DRC_REG(offs)); |
| 1165 | | UML_CALLH(block, *m_impstate.write32); |
| 1166 | | } |
| 1167 | | } |
| 1168 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 1169 | | } |
| 1170 | | |
| 1171 | | void arm7_cpu_device::drctg0b_6(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 1172 | | { |
| 1173 | | UINT32 op = desc->opptr.l[0]; |
| 1174 | | UINT32 pc = desc->pc; |
| 1175 | | fatalerror("%08x: Gb Undefined Thumb instruction: %04x\n", pc, op); |
| 1176 | | } |
| 1177 | | |
| 1178 | | void arm7_cpu_device::drctg0b_7(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 1179 | | { |
| 1180 | | UINT32 op = desc->opptr.l[0]; |
| 1181 | | UINT32 pc = desc->pc; |
| 1182 | | fatalerror("%08x: Gb Undefined Thumb instruction: %04x\n", pc, op); |
| 1183 | | } |
| 1184 | | |
| 1185 | | void arm7_cpu_device::drctg0b_8(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 1186 | | { |
| 1187 | | UINT32 op = desc->opptr.l[0]; |
| 1188 | | UINT32 pc = desc->pc; |
| 1189 | | fatalerror("%08x: Gb Undefined Thumb instruction: %04x\n", pc, op); |
| 1190 | | } |
| 1191 | | |
| 1192 | | void arm7_cpu_device::drctg0b_9(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 1193 | | { |
| 1194 | | UINT32 op = desc->opptr.l[0]; |
| 1195 | | UINT32 pc = desc->pc; |
| 1196 | | fatalerror("%08x: Gb Undefined Thumb instruction: %04x\n", pc, op); |
| 1197 | | } |
| 1198 | | |
| 1199 | | void arm7_cpu_device::drctg0b_a(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 1200 | | { |
| 1201 | | UINT32 op = desc->opptr.l[0]; |
| 1202 | | UINT32 pc = desc->pc; |
| 1203 | | fatalerror("%08x: Gb Undefined Thumb instruction: %04x\n", pc, op); |
| 1204 | | } |
| 1205 | | |
| 1206 | | void arm7_cpu_device::drctg0b_b(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 1207 | | { |
| 1208 | | UINT32 op = desc->opptr.l[0]; |
| 1209 | | UINT32 pc = desc->pc; |
| 1210 | | fatalerror("%08x: Gb Undefined Thumb instruction: %04x\n", pc, op); |
| 1211 | | } |
| 1212 | | |
| 1213 | | void arm7_cpu_device::drctg0b_c(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* POP {Rlist} */ |
| 1214 | | { |
| 1215 | | UINT32 op = desc->opptr.l[0]; |
| 1216 | | for (INT32 offs = 0; offs < 8; offs++) |
| 1217 | | { |
| 1218 | | if (op & (1 << offs)) |
| 1219 | | { |
| 1220 | | UML_MOV(block, uml::I0, DRC_REG(13)); |
| 1221 | | UML_CALLH(block, *m_impstate.read32); |
| 1222 | | UML_MOV(block, DRC_REG(offs), uml::I0); |
| 1223 | | UML_ADD(block, DRC_REG(13), DRC_REG(13), 4); |
| 1224 | | } |
| 1225 | | } |
| 1226 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 1227 | | } |
| 1228 | | |
| 1229 | | void arm7_cpu_device::drctg0b_d(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* POP {Rlist}{PC} */ |
| 1230 | | { |
| 1231 | | UINT32 op = desc->opptr.l[0]; |
| 1232 | | uml::code_label arch5up; |
| 1233 | | uml::code_label done; |
| 1234 | | uml::code_label switch_mode; |
| 1235 | | for (INT32 offs = 0; offs < 8; offs++) |
| 1236 | | { |
| 1237 | | if (op & (1 << offs)) |
| 1238 | | { |
| 1239 | | UML_MOV(block, uml::I0, DRC_REG(13)); |
| 1240 | | UML_CALLH(block, *m_impstate.read32); |
| 1241 | | UML_MOV(block, DRC_REG(offs), uml::I0); |
| 1242 | | UML_ADD(block, DRC_REG(13), DRC_REG(13), 4); |
| 1243 | | } |
| 1244 | | } |
| 1245 | | UML_MOV(block, uml::I0, DRC_REG(13)); |
| 1246 | | UML_CALLH(block, *m_impstate.read32); |
| 1247 | | UML_CMP(block, uml::mem(&m_archRev), 4); |
| 1248 | | UML_JMPc(block, uml::COND_A, arch5up = compiler->labelnum++); |
| 1249 | | UML_AND(block, DRC_PC, uml::I0, ~1); |
| 1250 | | |
| 1251 | | UML_LABEL(block, arch5up); |
| 1252 | | |
| 1253 | | UML_TEST(block, uml::I0, 1); |
| 1254 | | UML_JMPc(block, uml::COND_Z, switch_mode = compiler->labelnum++); |
| 1255 | | |
| 1256 | | UML_AND(block, uml::I0, uml::I0, ~1); |
| 1257 | | UML_MOV(block, DRC_PC, uml::I0); |
| 1258 | | UML_JMP(block, done); |
| 1259 | | |
| 1260 | | UML_LABEL(block, switch_mode); |
| 1261 | | UML_AND(block, DRC_CPSR, DRC_CPSR, ~T_MASK); |
| 1262 | | UML_TEST(block, uml::I0, 2); |
| 1263 | | UML_MOVc(block, uml::COND_NZ, uml::I1, 2); |
| 1264 | | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 1265 | | UML_ADD(block, uml::I0, uml::I0, uml::I1); |
| 1266 | | UML_MOV(block, DRC_PC, uml::I0); |
| 1267 | | |
| 1268 | | UML_LABEL(block, done); |
| 1269 | | UML_ADD(block, DRC_REG(13), DRC_REG(13), 4); |
| 1270 | | } |
| 1271 | | |
| 1272 | | void arm7_cpu_device::drctg0b_e(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 1273 | | { |
| 1274 | | UINT32 op = desc->opptr.l[0]; |
| 1275 | | UINT32 pc = desc->pc; |
| 1276 | | fatalerror("%08x: Gb Undefined Thumb instruction: %04x\n", pc, op); |
| 1277 | | } |
| 1278 | | |
| 1279 | | void arm7_cpu_device::drctg0b_f(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 1280 | | { |
| 1281 | | UINT32 op = desc->opptr.l[0]; |
| 1282 | | UINT32 pc = desc->pc; |
| 1283 | | fatalerror("%08x: Gb Undefined Thumb instruction: %04x\n", pc, op); |
| 1284 | | } |
| 1285 | | |
| 1286 | | /* Multiple Load/Store */ |
| 1287 | | |
| 1288 | | // "The address should normally be a word aligned quantity and non-word aligned addresses do not affect the instruction." |
| 1289 | | // "However, the bottom 2 bits of the address will appear on A[1:0] and might be interpreted by the memory system." |
| 1290 | | |
| 1291 | | // GBA "BB Ball" performs an unaligned read with A[1:0] = 2 and expects A[1] not to be ignored [BP 800B90A,(R4&3)!=0] |
| 1292 | | // GBA "Gadget Racers" performs an unaligned read with A[1:0] = 1 and expects A[0] to be ignored [BP B72,(R0&3)!=0] |
| 1293 | | |
| 1294 | | void arm7_cpu_device::drctg0c_0(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* Store */ |
| 1295 | | { |
| 1296 | | UINT32 op = desc->opptr.l[0]; |
| 1297 | | UINT32 rd = (op & THUMB_MULTLS_BASE) >> THUMB_MULTLS_BASE_SHIFT; |
| 1298 | | UML_MOV(block, uml::I2, DRC_REG(rd)); |
| 1299 | | for (INT32 offs = 0; offs < 8; offs++) |
| 1300 | | { |
| 1301 | | if (op & (1 << offs)) |
| 1302 | | { |
| 1303 | | UML_AND(block, uml::I0, uml::I2, ~3); |
| 1304 | | UML_MOV(block, uml::I1, DRC_REG(offs)); |
| 1305 | | UML_CALLH(block, *m_impstate.write32); |
| 1306 | | UML_ADD(block, uml::I2, uml::I2, 4); |
| 1307 | | } |
| 1308 | | } |
| 1309 | | UML_MOV(block, DRC_REG(rd), uml::I2); |
| 1310 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 1311 | | } |
| 1312 | | |
| 1313 | | void arm7_cpu_device::drctg0c_1(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* Load */ |
| 1314 | | { |
| 1315 | | UINT32 op = desc->opptr.l[0]; |
| 1316 | | UINT32 rd = (op & THUMB_MULTLS_BASE) >> THUMB_MULTLS_BASE_SHIFT; |
| 1317 | | int rd_in_list = op & (1 << rd); |
| 1318 | | UML_MOV(block, uml::I2, DRC_REG(rd)); |
| 1319 | | for (INT32 offs = 0; offs < 8; offs++) |
| 1320 | | { |
| 1321 | | if (op & (1 << offs)) |
| 1322 | | { |
| 1323 | | UML_AND(block, uml::I0, uml::I2, ~1); |
| 1324 | | UML_CALLH(block, *m_impstate.read32); |
| 1325 | | UML_ADD(block, uml::I2, uml::I2, 4); |
| 1326 | | } |
| 1327 | | } |
| 1328 | | if (!rd_in_list) |
| 1329 | | { |
| 1330 | | UML_MOV(block, DRC_REG(rd), uml::I2); |
| 1331 | | } |
| 1332 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 1333 | | } |
| 1334 | | |
| 1335 | | /* Conditional Branch */ |
| 1336 | | |
| 1337 | | void arm7_cpu_device::drctg0d_0(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) // COND_EQ: |
| 1338 | | { |
| 1339 | | UINT32 op = desc->opptr.l[0]; |
| 1340 | | INT32 offs = ((INT8)(op & THUMB_INSN_IMM) << 1) + 4; |
| 1341 | | UML_TEST(block, DRC_CPSR, Z_MASK); |
| 1342 | | UML_MOVc(block, uml::COND_NZ, uml::I0, offs); |
| 1343 | | UML_MOVc(block, uml::COND_Z, uml::I0, 2); |
| 1344 | | UML_ADD(block, DRC_PC, DRC_PC, uml::I0); |
| 1345 | | } |
| 1346 | | |
| 1347 | | void arm7_cpu_device::drctg0d_1(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) // COND_NE: |
| 1348 | | { |
| 1349 | | UINT32 op = desc->opptr.l[0]; |
| 1350 | | INT32 offs = ((INT8)(op & THUMB_INSN_IMM) << 1) + 4; |
| 1351 | | UML_TEST(block, DRC_CPSR, Z_MASK); |
| 1352 | | UML_MOVc(block, uml::COND_Z, uml::I0, offs); |
| 1353 | | UML_MOVc(block, uml::COND_NZ, uml::I0, 2); |
| 1354 | | UML_ADD(block, DRC_PC, DRC_PC, uml::I0); |
| 1355 | | } |
| 1356 | | |
| 1357 | | void arm7_cpu_device::drctg0d_2(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) // COND_CS: |
| 1358 | | { |
| 1359 | | UINT32 op = desc->opptr.l[0]; |
| 1360 | | INT32 offs = ((INT8)(op & THUMB_INSN_IMM) << 1) + 4; |
| 1361 | | UML_TEST(block, DRC_CPSR, C_MASK); |
| 1362 | | UML_MOVc(block, uml::COND_NZ, uml::I0, offs); |
| 1363 | | UML_MOVc(block, uml::COND_Z, uml::I0, 2); |
| 1364 | | UML_ADD(block, DRC_PC, DRC_PC, uml::I0); |
| 1365 | | } |
| 1366 | | |
| 1367 | | void arm7_cpu_device::drctg0d_3(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) // COND_CC: |
| 1368 | | { |
| 1369 | | UINT32 op = desc->opptr.l[0]; |
| 1370 | | INT32 offs = ((INT8)(op & THUMB_INSN_IMM) << 1) + 4; |
| 1371 | | UML_TEST(block, DRC_CPSR, C_MASK); |
| 1372 | | UML_MOVc(block, uml::COND_Z, uml::I0, offs); |
| 1373 | | UML_MOVc(block, uml::COND_NZ, uml::I0, 2); |
| 1374 | | UML_ADD(block, DRC_PC, DRC_PC, uml::I0); |
| 1375 | | } |
| 1376 | | |
| 1377 | | void arm7_cpu_device::drctg0d_4(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) // COND_MI: |
| 1378 | | { |
| 1379 | | UINT32 op = desc->opptr.l[0]; |
| 1380 | | INT32 offs = ((INT8)(op & THUMB_INSN_IMM) << 1) + 4; |
| 1381 | | UML_TEST(block, DRC_CPSR, N_MASK); |
| 1382 | | UML_MOVc(block, uml::COND_NZ, uml::I0, offs); |
| 1383 | | UML_MOVc(block, uml::COND_Z, uml::I0, 2); |
| 1384 | | UML_ADD(block, DRC_PC, DRC_PC, uml::I0); |
| 1385 | | } |
| 1386 | | |
| 1387 | | void arm7_cpu_device::drctg0d_5(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) // COND_PL: |
| 1388 | | { |
| 1389 | | UINT32 op = desc->opptr.l[0]; |
| 1390 | | INT32 offs = ((INT8)(op & THUMB_INSN_IMM) << 1) + 4; |
| 1391 | | UML_TEST(block, DRC_CPSR, N_MASK); |
| 1392 | | UML_MOVc(block, uml::COND_Z, uml::I0, offs); |
| 1393 | | UML_MOVc(block, uml::COND_NZ, uml::I0, 2); |
| 1394 | | UML_ADD(block, DRC_PC, DRC_PC, uml::I0); |
| 1395 | | } |
| 1396 | | |
| 1397 | | void arm7_cpu_device::drctg0d_6(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) // COND_VS: |
| 1398 | | { |
| 1399 | | UINT32 op = desc->opptr.l[0]; |
| 1400 | | INT32 offs = ((INT8)(op & THUMB_INSN_IMM) << 1) + 4; |
| 1401 | | UML_TEST(block, DRC_CPSR, V_MASK); |
| 1402 | | UML_MOVc(block, uml::COND_NZ, uml::I0, offs); |
| 1403 | | UML_MOVc(block, uml::COND_Z, uml::I0, 2); |
| 1404 | | UML_ADD(block, DRC_PC, DRC_PC, uml::I0); |
| 1405 | | } |
| 1406 | | |
| 1407 | | void arm7_cpu_device::drctg0d_7(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) // COND_VC: |
| 1408 | | { |
| 1409 | | UINT32 op = desc->opptr.l[0]; |
| 1410 | | INT32 offs = ((INT8)(op & THUMB_INSN_IMM) << 1) + 4; |
| 1411 | | UML_TEST(block, DRC_CPSR, V_MASK); |
| 1412 | | UML_MOVc(block, uml::COND_Z, uml::I0, offs); |
| 1413 | | UML_MOVc(block, uml::COND_NZ, uml::I0, 2); |
| 1414 | | UML_ADD(block, DRC_PC, DRC_PC, uml::I0); |
| 1415 | | } |
| 1416 | | |
| 1417 | | void arm7_cpu_device::drctg0d_8(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) // COND_HI: |
| 1418 | | { |
| 1419 | | UINT32 op = desc->opptr.l[0]; |
| 1420 | | INT32 offs = ((INT8)(op & THUMB_INSN_IMM) << 1) + 4; |
| 1421 | | UML_TEST(block, DRC_CPSR, C_MASK); |
| 1422 | | UML_MOVc(block, uml::COND_NZ, uml::I0, 1); |
| 1423 | | UML_MOVc(block, uml::COND_Z, uml::I0, 0); |
| 1424 | | UML_TEST(block, DRC_CPSR, Z_MASK); |
| 1425 | | UML_MOVc(block, uml::COND_NZ, uml::I1, 0); |
| 1426 | | UML_MOVc(block, uml::COND_Z, uml::I1, 1); |
| 1427 | | UML_AND(block, uml::I0, uml::I0, uml::I1); |
| 1428 | | UML_TEST(block, uml::I0, 1); |
| 1429 | | UML_MOVc(block, uml::COND_NZ, uml::I0, offs); |
| 1430 | | UML_MOVc(block, uml::COND_Z, uml::I0, 2); |
| 1431 | | UML_ADD(block, DRC_PC, DRC_PC, uml::I0); |
| 1432 | | } |
| 1433 | | |
| 1434 | | void arm7_cpu_device::drctg0d_9(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) // COND_LS: |
| 1435 | | { |
| 1436 | | UINT32 op = desc->opptr.l[0]; |
| 1437 | | INT32 offs = ((INT8)(op & THUMB_INSN_IMM) << 1) + 4; |
| 1438 | | UML_TEST(block, DRC_CPSR, C_MASK); |
| 1439 | | UML_MOVc(block, uml::COND_Z, uml::I0, 1); |
| 1440 | | UML_MOVc(block, uml::COND_NZ, uml::I0, 0); |
| 1441 | | UML_TEST(block, DRC_CPSR, Z_MASK); |
| 1442 | | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 1443 | | UML_MOVc(block, uml::COND_NZ, uml::I1, 1); |
| 1444 | | UML_AND(block, uml::I0, uml::I0, uml::I1); |
| 1445 | | UML_TEST(block, uml::I0, 1); |
| 1446 | | UML_MOVc(block, uml::COND_NZ, uml::I0, offs); |
| 1447 | | UML_MOVc(block, uml::COND_Z, uml::I0, 2); |
| 1448 | | UML_ADD(block, DRC_PC, DRC_PC, uml::I0); |
| 1449 | | } |
| 1450 | | |
| 1451 | | void arm7_cpu_device::drctg0d_a(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) // COND_GE: |
| 1452 | | { |
| 1453 | | UINT32 op = desc->opptr.l[0]; |
| 1454 | | INT32 offs = ((INT8)(op & THUMB_INSN_IMM) << 1) + 4; |
| 1455 | | UML_TEST(block, DRC_CPSR, N_MASK); |
| 1456 | | UML_MOVc(block, uml::COND_Z, uml::I0, 1); |
| 1457 | | UML_MOVc(block, uml::COND_NZ, uml::I0, 0); |
| 1458 | | UML_TEST(block, DRC_CPSR, V_MASK); |
| 1459 | | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 1460 | | UML_MOVc(block, uml::COND_NZ, uml::I1, 1); |
| 1461 | | UML_CMP(block, uml::I0, uml::I1); |
| 1462 | | UML_MOVc(block, uml::COND_E, uml::I0, offs); |
| 1463 | | UML_MOVc(block, uml::COND_NE, uml::I0, 2); |
| 1464 | | UML_ADD(block, DRC_PC, DRC_PC, uml::I0); |
| 1465 | | } |
| 1466 | | |
| 1467 | | void arm7_cpu_device::drctg0d_b(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) // COND_LT: |
| 1468 | | { |
| 1469 | | UINT32 op = desc->opptr.l[0]; |
| 1470 | | INT32 offs = ((INT8)(op & THUMB_INSN_IMM) << 1) + 4; |
| 1471 | | UML_TEST(block, DRC_CPSR, N_MASK); |
| 1472 | | UML_MOVc(block, uml::COND_Z, uml::I0, 1); |
| 1473 | | UML_MOVc(block, uml::COND_NZ, uml::I0, 0); |
| 1474 | | UML_TEST(block, DRC_CPSR, V_MASK); |
| 1475 | | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 1476 | | UML_MOVc(block, uml::COND_NZ, uml::I1, 1); |
| 1477 | | UML_CMP(block, uml::I0, uml::I1); |
| 1478 | | UML_MOVc(block, uml::COND_NE, uml::I0, offs); |
| 1479 | | UML_MOVc(block, uml::COND_E, uml::I0, 2); |
| 1480 | | UML_ADD(block, DRC_PC, DRC_PC, uml::I0); |
| 1481 | | } |
| 1482 | | |
| 1483 | | void arm7_cpu_device::drctg0d_c(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) // COND_GT: |
| 1484 | | { |
| 1485 | | UINT32 op = desc->opptr.l[0]; |
| 1486 | | INT32 offs = ((INT8)(op & THUMB_INSN_IMM) << 1) + 4; |
| 1487 | | UML_TEST(block, DRC_CPSR, N_MASK); |
| 1488 | | UML_MOVc(block, uml::COND_Z, uml::I0, 1); |
| 1489 | | UML_MOVc(block, uml::COND_NZ, uml::I0, 0); |
| 1490 | | UML_TEST(block, DRC_CPSR, V_MASK); |
| 1491 | | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 1492 | | UML_MOVc(block, uml::COND_NZ, uml::I1, 1); |
| 1493 | | UML_CMP(block, uml::I0, uml::I1); |
| 1494 | | UML_MOVc(block, uml::COND_E, uml::I0, 1); |
| 1495 | | UML_MOVc(block, uml::COND_NE, uml::I0, 0); |
| 1496 | | UML_TEST(block, DRC_CPSR, Z_MASK); |
| 1497 | | UML_MOVc(block, uml::COND_NZ, uml::I1, 1); |
| 1498 | | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 1499 | | UML_AND(block, uml::I0, uml::I0, uml::I1); |
| 1500 | | UML_TEST(block, uml::I0, 1); |
| 1501 | | UML_MOVc(block, uml::COND_NZ, uml::I0, offs); |
| 1502 | | UML_MOVc(block, uml::COND_Z, uml::I0, 2); |
| 1503 | | UML_ADD(block, DRC_PC, DRC_PC, uml::I0); |
| 1504 | | } |
| 1505 | | |
| 1506 | | void arm7_cpu_device::drctg0d_d(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) // COND_LE: |
| 1507 | | { |
| 1508 | | UINT32 op = desc->opptr.l[0]; |
| 1509 | | INT32 offs = ((INT8)(op & THUMB_INSN_IMM) << 1) + 4; |
| 1510 | | UML_TEST(block, DRC_CPSR, N_MASK); |
| 1511 | | UML_MOVc(block, uml::COND_Z, uml::I0, 1); |
| 1512 | | UML_MOVc(block, uml::COND_NZ, uml::I0, 0); |
| 1513 | | UML_TEST(block, DRC_CPSR, V_MASK); |
| 1514 | | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 1515 | | UML_MOVc(block, uml::COND_NZ, uml::I1, 1); |
| 1516 | | UML_CMP(block, uml::I0, uml::I1); |
| 1517 | | UML_MOVc(block, uml::COND_NE, uml::I0, 1); |
| 1518 | | UML_MOVc(block, uml::COND_E, uml::I0, 0); |
| 1519 | | UML_TEST(block, DRC_CPSR, Z_MASK); |
| 1520 | | UML_MOVc(block, uml::COND_NZ, uml::I1, 0); |
| 1521 | | UML_MOVc(block, uml::COND_Z, uml::I1, 1); |
| 1522 | | UML_AND(block, uml::I0, uml::I0, uml::I1); |
| 1523 | | UML_TEST(block, uml::I0, 1); |
| 1524 | | UML_MOVc(block, uml::COND_NZ, uml::I0, offs); |
| 1525 | | UML_MOVc(block, uml::COND_Z, uml::I0, 2); |
| 1526 | | UML_ADD(block, DRC_PC, DRC_PC, uml::I0); |
| 1527 | | } |
| 1528 | | |
| 1529 | | void arm7_cpu_device::drctg0d_e(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) // COND_AL: |
| 1530 | | { |
| 1531 | | UINT32 op = desc->opptr.l[0]; |
| 1532 | | UINT32 pc = desc->pc; |
| 1533 | | fatalerror("%08x: Undefined Thumb instruction: %04x (ARM9 reserved)\n", pc, op); |
| 1534 | | } |
| 1535 | | |
| 1536 | | void arm7_cpu_device::drctg0d_f(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) // SWI (this is sort of a "hole" in the opcode encoding) |
| 1537 | | { |
| 1538 | | UML_MOV(block, uml::mem(&m_pendingSwi), 1); |
| 1539 | | UML_CALLH(block, *m_impstate.check_irq); |
| 1540 | | } |
| 1541 | | |
| 1542 | | /* B #offs */ |
| 1543 | | |
| 1544 | | void arm7_cpu_device::drctg0e_0(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 1545 | | { |
| 1546 | | UINT32 op = desc->opptr.l[0]; |
| 1547 | | INT32 offs = (op & THUMB_BRANCH_OFFS) << 1; |
| 1548 | | if (offs & 0x00000800) |
| 1549 | | { |
| 1550 | | offs |= 0xfffff800; |
| 1551 | | } |
| 1552 | | UML_ADD(block, DRC_PC, DRC_PC, offs + 4); |
| 1553 | | } |
| 1554 | | |
| 1555 | | void arm7_cpu_device::drctg0e_1(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 1556 | | { |
| 1557 | | UINT32 op = desc->opptr.l[0]; |
| 1558 | | UINT32 offs = (op & THUMB_BLOP_OFFS) << 1; |
| 1559 | | UML_MOV(block, uml::I0, DRC_REG(14)); |
| 1560 | | UML_ADD(block, uml::I0, uml::I0, offs); |
| 1561 | | UML_AND(block, uml::I0, uml::I0, ~3); |
| 1562 | | UML_ADD(block, DRC_REG(14), DRC_PC, 4); |
| 1563 | | UML_OR(block, DRC_REG(14), DRC_REG(14), 1); |
| 1564 | | UML_MOV(block, DRC_PC, uml::I0); |
| 1565 | | } |
| 1566 | | |
| 1567 | | /* BL */ |
| 1568 | | |
| 1569 | | void arm7_cpu_device::drctg0f_0(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 1570 | | { |
| 1571 | | UINT32 op = desc->opptr.l[0]; |
| 1572 | | UINT32 addr = (op & THUMB_BLOP_OFFS) << 12; |
| 1573 | | if (addr & (1 << 22)) |
| 1574 | | { |
| 1575 | | addr |= 0xff800000; |
| 1576 | | } |
| 1577 | | addr += 4; |
| 1578 | | UML_ADD(block, DRC_REG(14), DRC_PC, addr); |
| 1579 | | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 1580 | | } |
| 1581 | | |
| 1582 | | void arm7_cpu_device::drctg0f_1(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* BL */ |
| 1583 | | { |
| 1584 | | UINT32 op = desc->opptr.l[0]; |
| 1585 | | UINT32 addr = (op & THUMB_BLOP_OFFS) << 1; |
| 1586 | | UML_AND(block, uml::I0, DRC_REG(14), ~1); |
| 1587 | | UML_ADD(block, uml::I0, uml::I0, addr); |
| 1588 | | UML_ADD(block, DRC_REG(14), DRC_PC, 2); |
| 1589 | | UML_OR(block, DRC_REG(14), DRC_REG(14), 1); |
| 1590 | | UML_MOV(block, DRC_PC, uml::I0); |
| 1591 | | } |
trunk/src/emu/cpu/arm7/arm7drc.inc
| r0 | r28736 | |
| 1 | /***************************************************************************** |
| 2 | * |
| 3 | * arm7drc.inc |
| 4 | * Portable CPU Emulator for 32-bit ARM v3/4/5/6 |
| 5 | * |
| 6 | * Copyright Steve Ellenoff, all rights reserved. |
| 7 | * Thumb, DSP, and MMU support and many bugfixes by R. Belmont and Ryan Holtz. |
| 8 | * Dyanmic Recompiler (DRC) / Just In Time Compiler (JIT) by Ryan Holtz. |
| 9 | * |
| 10 | * - This source code is released as freeware for non-commercial purposes. |
| 11 | * - You are free to use and redistribute this code in modified or |
| 12 | * unmodified form, provided you list me in the credits. |
| 13 | * - If you modify this source code, you must add a notice to each modified |
| 14 | * source file that it has been changed. If you're a nice person, you |
| 15 | * will clearly mark each change too. :) |
| 16 | * - If you wish to use this for commercial purposes, please contact me at |
| 17 | * sellenoff@hotmail.com |
| 18 | * - The author of this copywritten work reserves the right to change the |
| 19 | * terms of its usage and license at any time, including retroactively |
| 20 | * - This entire notice must remain in the source code. |
| 21 | * |
| 22 | * This work is based on: |
| 23 | * #1) 'Atmel Corporation ARM7TDMI (Thumb) Datasheet - January 1999' |
| 24 | * #2) Arm 2/3/6 emulator By Bryan McPhail (bmcphail@tendril.co.uk) and Phil Stroffolino (MAME CORE 0.76) |
| 25 | * |
| 26 | *****************************************************************************/ |
| 27 | |
| 28 | /****************************************************************************** |
| 29 | * Notes: |
| 30 | |
| 31 | ** This is a plain vanilla implementation of an ARM7 cpu which incorporates my ARM7 core. |
| 32 | It can be used as is, or used to demonstrate how to utilize the arm7 core to create a cpu |
| 33 | that uses the core, since there are numerous different mcu packages that incorporate an arm7 core. |
| 34 | |
| 35 | See the notes in the arm7core.c file itself regarding issues/limitations of the arm7 core. |
| 36 | ** |
| 37 | *****************************************************************************/ |
| 38 | |
| 39 | |
| 40 | /*************************************************************************** |
| 41 | DEBUGGING |
| 42 | ***************************************************************************/ |
| 43 | |
| 44 | #define LOG_UML (0) |
| 45 | #define LOG_NATIVE (0) |
| 46 | |
| 47 | #define SINGLE_INSTRUCTION_MODE (0) |
| 48 | |
| 49 | /*************************************************************************** |
| 50 | CONSTANTS |
| 51 | ***************************************************************************/ |
| 52 | |
| 53 | #include "arm7tdrc.inc" |
| 54 | |
| 55 | /* map variables */ |
| 56 | #define MAPVAR_PC uml::M0 |
| 57 | #define MAPVAR_CYCLES uml::M1 |
| 58 | |
| 59 | /* size of the execution code cache */ |
| 60 | #define CACHE_SIZE (32 * 1024 * 1024) |
| 61 | |
| 62 | /* compilation boundaries -- how far back/forward does the analysis extend? */ |
| 63 | #define COMPILE_BACKWARDS_BYTES 128 |
| 64 | #define COMPILE_FORWARDS_BYTES 512 |
| 65 | #define COMPILE_MAX_INSTRUCTIONS ((COMPILE_BACKWARDS_BYTES/4) + (COMPILE_FORWARDS_BYTES/4)) |
| 66 | #define COMPILE_MAX_SEQUENCE 64 |
| 67 | |
| 68 | /* exit codes */ |
| 69 | #define EXECUTE_OUT_OF_CYCLES 0 |
| 70 | #define EXECUTE_MISSING_CODE 1 |
| 71 | #define EXECUTE_UNMAPPED_CODE 2 |
| 72 | #define EXECUTE_RESET_CACHE 3 |
| 73 | |
| 74 | |
| 75 | /*************************************************************************** |
| 76 | INLINE FUNCTIONS |
| 77 | ***************************************************************************/ |
| 78 | |
| 79 | /*------------------------------------------------- |
| 80 | epc - compute the exception PC from a |
| 81 | descriptor |
| 82 | -------------------------------------------------*/ |
| 83 | |
| 84 | INLINE UINT32 epc(const opcode_desc *desc) |
| 85 | { |
| 86 | return desc->pc; |
| 87 | } |
| 88 | |
| 89 | |
| 90 | /*------------------------------------------------- |
| 91 | alloc_handle - allocate a handle if not |
| 92 | already allocated |
| 93 | -------------------------------------------------*/ |
| 94 | |
| 95 | INLINE void alloc_handle(drcuml_state *drcuml, uml::code_handle **handleptr, const char *name) |
| 96 | { |
| 97 | if (*handleptr == NULL) |
| 98 | *handleptr = drcuml->handle_alloc(name); |
| 99 | } |
| 100 | |
| 101 | |
| 102 | /*------------------------------------------------- |
| 103 | load_fast_iregs - load any fast integer |
| 104 | registers |
| 105 | -------------------------------------------------*/ |
| 106 | |
| 107 | void arm7_cpu_device::load_fast_iregs(drcuml_block *block) |
| 108 | { |
| 109 | int regnum; |
| 110 | |
| 111 | for (regnum = 0; regnum < ARRAY_LENGTH(m_impstate.regmap); regnum++) |
| 112 | if (m_impstate.regmap[regnum].is_int_register()) |
| 113 | UML_DMOV(block, uml::ireg(m_impstate.regmap[regnum].ireg() - uml::REG_I0), uml::mem(&m_r[regnum])); |
| 114 | } |
| 115 | |
| 116 | |
| 117 | /*------------------------------------------------- |
| 118 | save_fast_iregs - save any fast integer |
| 119 | registers |
| 120 | -------------------------------------------------*/ |
| 121 | |
| 122 | void arm7_cpu_device::save_fast_iregs(drcuml_block *block) |
| 123 | { |
| 124 | int regnum; |
| 125 | |
| 126 | for (regnum = 0; regnum < ARRAY_LENGTH(m_impstate.regmap); regnum++) |
| 127 | if (m_impstate.regmap[regnum].is_int_register()) |
| 128 | UML_DMOV(block, uml::mem(&m_r[regnum]), uml::ireg(m_impstate.regmap[regnum].ireg() - uml::REG_I0)); |
| 129 | } |
| 130 | |
| 131 | |
| 132 | |
| 133 | /*************************************************************************** |
| 134 | CORE CALLBACKS |
| 135 | ***************************************************************************/ |
| 136 | |
| 137 | /*------------------------------------------------- |
| 138 | arm7_init - initialize the processor |
| 139 | -------------------------------------------------*/ |
| 140 | |
| 141 | void arm7_cpu_device::arm7_drc_init() |
| 142 | { |
| 143 | drc_cache *cache; |
| 144 | drcbe_info beinfo; |
| 145 | UINT32 flags = 0; |
| 146 | |
| 147 | /* allocate enough space for the cache and the core */ |
| 148 | cache = auto_alloc(machine(), drc_cache(CACHE_SIZE)); |
| 149 | if (cache == NULL) |
| 150 | fatalerror("Unable to allocate cache of size %d\n", (UINT32)(CACHE_SIZE)); |
| 151 | |
| 152 | /* allocate the implementation-specific state from the full cache */ |
| 153 | memset(&m_impstate, 0, sizeof(m_impstate)); |
| 154 | m_impstate.cache = cache; |
| 155 | |
| 156 | /* initialize the UML generator */ |
| 157 | if (LOG_UML) |
| 158 | flags |= DRCUML_OPTION_LOG_UML; |
| 159 | if (LOG_NATIVE) |
| 160 | flags |= DRCUML_OPTION_LOG_NATIVE; |
| 161 | m_impstate.drcuml = new drcuml_state(*this, *cache, flags, 1, 32, 1); |
| 162 | |
| 163 | /* add symbols for our stuff */ |
| 164 | m_impstate.drcuml->symbol_add(&m_icount, sizeof(m_icount), "icount"); |
| 165 | for (int regnum = 0; regnum < 37; regnum++) |
| 166 | { |
| 167 | char buf[10]; |
| 168 | sprintf(buf, "r%d", regnum); |
| 169 | m_impstate.drcuml->symbol_add(&m_r[regnum], sizeof(m_r[regnum]), buf); |
| 170 | } |
| 171 | m_impstate.drcuml->symbol_add(&m_impstate.mode, sizeof(m_impstate.mode), "mode"); |
| 172 | m_impstate.drcuml->symbol_add(&m_impstate.arg0, sizeof(m_impstate.arg0), "arg0"); |
| 173 | m_impstate.drcuml->symbol_add(&m_impstate.arg1, sizeof(m_impstate.arg1), "arg1"); |
| 174 | m_impstate.drcuml->symbol_add(&m_impstate.numcycles, sizeof(m_impstate.numcycles), "numcycles"); |
| 175 | //m_impstate.drcuml->symbol_add(&m_impstate.fpmode, sizeof(m_impstate.fpmode), "fpmode"); // TODO |
| 176 | |
| 177 | /* initialize the front-end helper */ |
| 178 | //m_impstate.drcfe = auto_alloc(machine(), arm7_frontend(this, COMPILE_BACKWARDS_BYTES, COMPILE_FORWARDS_BYTES, SINGLE_INSTRUCTION_MODE ? 1 : COMPILE_MAX_SEQUENCE)); |
| 179 | |
| 180 | /* allocate memory for cache-local state and initialize it */ |
| 181 | //memcpy(&m_impstate.fpmode, fpmode_source, sizeof(fpmode_source)); // TODO |
| 182 | |
| 183 | /* compute the register parameters */ |
| 184 | for (int regnum = 0; regnum < 37; regnum++) |
| 185 | { |
| 186 | m_impstate.regmap[regnum] = (regnum == 0) ? uml::parameter(0) : uml::parameter::make_memory(&m_r[regnum]); |
| 187 | } |
| 188 | |
| 189 | /* if we have registers to spare, assign r2, r3, r4 to leftovers */ |
| 190 | //if (!DISABLE_FAST_REGISTERS) // TODO |
| 191 | { |
| 192 | m_impstate.drcuml->get_backend_info(beinfo); |
| 193 | if (beinfo.direct_iregs > 4) |
| 194 | { // PC |
| 195 | m_impstate.regmap[eR15] = uml::I4; |
| 196 | } |
| 197 | if (beinfo.direct_iregs > 5) |
| 198 | { // Status |
| 199 | m_impstate.regmap[eCPSR] = uml::I5; |
| 200 | } |
| 201 | if (beinfo.direct_iregs > 6) |
| 202 | { // SP |
| 203 | m_impstate.regmap[eR13] = uml::I6; |
| 204 | } |
| 205 | } |
| 206 | |
| 207 | /* mark the cache dirty so it is updated on next execute */ |
| 208 | m_impstate.cache_dirty = TRUE; |
| 209 | } |
| 210 | |
| 211 | |
| 212 | /*------------------------------------------------- |
| 213 | arm7_execute - execute the CPU for the |
| 214 | specified number of cycles |
| 215 | -------------------------------------------------*/ |
| 216 | |
| 217 | void arm7_cpu_device::execute_run_drc() |
| 218 | { |
| 219 | drcuml_state *drcuml = m_impstate.drcuml; |
| 220 | int execute_result; |
| 221 | |
| 222 | /* reset the cache if dirty */ |
| 223 | if (m_impstate.cache_dirty) |
| 224 | code_flush_cache(); |
| 225 | m_impstate.cache_dirty = FALSE; |
| 226 | |
| 227 | /* execute */ |
| 228 | do |
| 229 | { |
| 230 | /* run as much as we can */ |
| 231 | execute_result = drcuml->execute(*m_impstate.entry); |
| 232 | |
| 233 | /* if we need to recompile, do it */ |
| 234 | if (execute_result == EXECUTE_MISSING_CODE) |
| 235 | code_compile_block(m_impstate.mode, m_r[eR15]); |
| 236 | else if (execute_result == EXECUTE_UNMAPPED_CODE) |
| 237 | fatalerror("Attempted to execute unmapped code at PC=%08X\n", m_r[eR15]); |
| 238 | else if (execute_result == EXECUTE_RESET_CACHE) |
| 239 | code_flush_cache(); |
| 240 | |
| 241 | } while (execute_result != EXECUTE_OUT_OF_CYCLES); |
| 242 | } |
| 243 | |
| 244 | /*------------------------------------------------- |
| 245 | arm7_exit - cleanup from execution |
| 246 | -------------------------------------------------*/ |
| 247 | |
| 248 | void arm7_cpu_device::arm7_drc_exit() |
| 249 | { |
| 250 | /* clean up the DRC */ |
| 251 | //auto_free(machine(), m_impstate.drcfe); |
| 252 | delete m_impstate.drcuml; |
| 253 | auto_free(machine(), m_impstate.cache); |
| 254 | } |
| 255 | |
| 256 | |
| 257 | /*------------------------------------------------- |
| 258 | arm7drc_set_options - configure DRC options |
| 259 | -------------------------------------------------*/ |
| 260 | |
| 261 | void arm7_cpu_device::arm7drc_set_options(UINT32 options) |
| 262 | { |
| 263 | m_impstate.drcoptions = options; |
| 264 | } |
| 265 | |
| 266 | |
| 267 | /*------------------------------------------------- |
| 268 | arm7drc_add_fastram - add a new fastram |
| 269 | region |
| 270 | -------------------------------------------------*/ |
| 271 | |
| 272 | void arm7_cpu_device::arm7drc_add_fastram(offs_t start, offs_t end, UINT8 readonly, void *base) |
| 273 | { |
| 274 | if (m_impstate.fastram_select < ARRAY_LENGTH(m_impstate.fastram)) |
| 275 | { |
| 276 | m_impstate.fastram[m_impstate.fastram_select].start = start; |
| 277 | m_impstate.fastram[m_impstate.fastram_select].end = end; |
| 278 | m_impstate.fastram[m_impstate.fastram_select].readonly = readonly; |
| 279 | m_impstate.fastram[m_impstate.fastram_select].base = base; |
| 280 | m_impstate.fastram_select++; |
| 281 | } |
| 282 | } |
| 283 | |
| 284 | |
| 285 | /*------------------------------------------------- |
| 286 | arm7drc_add_hotspot - add a new hotspot |
| 287 | -------------------------------------------------*/ |
| 288 | |
| 289 | void arm7_cpu_device::arm7drc_add_hotspot(offs_t pc, UINT32 opcode, UINT32 cycles) |
| 290 | { |
| 291 | if (m_impstate.hotspot_select < ARRAY_LENGTH(m_impstate.hotspot)) |
| 292 | { |
| 293 | m_impstate.hotspot[m_impstate.hotspot_select].pc = pc; |
| 294 | m_impstate.hotspot[m_impstate.hotspot_select].opcode = opcode; |
| 295 | m_impstate.hotspot[m_impstate.hotspot_select].cycles = cycles; |
| 296 | m_impstate.hotspot_select++; |
| 297 | } |
| 298 | } |
| 299 | |
| 300 | |
| 301 | |
| 302 | /*************************************************************************** |
| 303 | CACHE MANAGEMENT |
| 304 | ***************************************************************************/ |
| 305 | |
| 306 | /*------------------------------------------------- |
| 307 | code_flush_cache - flush the cache and |
| 308 | regenerate static code |
| 309 | -------------------------------------------------*/ |
| 310 | |
| 311 | void arm7_cpu_device::code_flush_cache() |
| 312 | { |
| 313 | /* empty the transient cache contents */ |
| 314 | m_impstate.drcuml->reset(); |
| 315 | |
| 316 | try |
| 317 | { |
| 318 | /* generate the entry point and out-of-cycles handlers */ |
| 319 | static_generate_entry_point(); |
| 320 | static_generate_nocode_handler(); |
| 321 | static_generate_out_of_cycles(); |
| 322 | static_generate_tlb_translate(NULL); // TODO FIXME |
| 323 | static_generate_detect_fault(NULL); // TODO FIXME |
| 324 | //static_generate_tlb_mismatch(); |
| 325 | |
| 326 | /* add subroutines for memory accesses */ |
| 327 | static_generate_memory_accessor(1, FALSE, FALSE, "read8", &m_impstate.read8); |
| 328 | static_generate_memory_accessor(1, TRUE, FALSE, "write8", &m_impstate.write8); |
| 329 | static_generate_memory_accessor(2, FALSE, FALSE, "read16", &m_impstate.read16); |
| 330 | static_generate_memory_accessor(2, TRUE, FALSE, "write16", &m_impstate.write16); |
| 331 | static_generate_memory_accessor(4, FALSE, FALSE, "read32", &m_impstate.read32); |
| 332 | static_generate_memory_accessor(4, TRUE, FALSE, "write32", &m_impstate.write32); |
| 333 | } |
| 334 | catch (drcuml_block::abort_compilation &) |
| 335 | { |
| 336 | fatalerror("Unrecoverable error generating static code\n"); |
| 337 | } |
| 338 | } |
| 339 | |
| 340 | |
| 341 | /*------------------------------------------------- |
| 342 | code_compile_block - compile a block of the |
| 343 | given mode at the specified pc |
| 344 | -------------------------------------------------*/ |
| 345 | |
| 346 | void arm7_cpu_device::code_compile_block(UINT8 mode, offs_t pc) |
| 347 | { |
| 348 | drcuml_state *drcuml = m_impstate.drcuml; |
| 349 | compiler_state compiler = { 0 }; |
| 350 | const opcode_desc *seqlast; |
| 351 | int override = FALSE; |
| 352 | |
| 353 | g_profiler.start(PROFILER_DRC_COMPILE); |
| 354 | |
| 355 | /* get a description of this sequence */ |
| 356 | // TODO FIXME |
| 357 | const opcode_desc *desclist = NULL; //m_impstate.drcfe->describe_code(pc); // TODO |
| 358 | // if (LOG_UML || LOG_NATIVE) |
| 359 | // log_opcode_desc(drcuml, desclist, 0); |
| 360 | |
| 361 | /* if we get an error back, flush the cache and try again */ |
| 362 | bool succeeded = false; |
| 363 | while (!succeeded) |
| 364 | { |
| 365 | try |
| 366 | { |
| 367 | /* start the block */ |
| 368 | drcuml_block *block = drcuml->begin_block(4096); |
| 369 | |
| 370 | /* loop until we get through all instruction sequences */ |
| 371 | for (const opcode_desc *seqhead = desclist; seqhead != NULL; seqhead = seqlast->next()) |
| 372 | { |
| 373 | const opcode_desc *curdesc; |
| 374 | UINT32 nextpc; |
| 375 | |
| 376 | /* add a code log entry */ |
| 377 | if (LOG_UML) |
| 378 | block->append_comment("-------------------------"); // comment |
| 379 | |
| 380 | /* determine the last instruction in this sequence */ |
| 381 | for (seqlast = seqhead; seqlast != NULL; seqlast = seqlast->next()) |
| 382 | if (seqlast->flags & OPFLAG_END_SEQUENCE) |
| 383 | break; |
| 384 | assert(seqlast != NULL); |
| 385 | |
| 386 | /* if we don't have a hash for this mode/pc, or if we are overriding all, add one */ |
| 387 | if (override || !drcuml->hash_exists(mode, seqhead->pc)) |
| 388 | UML_HASH(block, mode, seqhead->pc); // hash mode,pc |
| 389 | |
| 390 | /* if we already have a hash, and this is the first sequence, assume that we */ |
| 391 | /* are recompiling due to being out of sync and allow future overrides */ |
| 392 | else if (seqhead == desclist) |
| 393 | { |
| 394 | override = TRUE; |
| 395 | UML_HASH(block, mode, seqhead->pc); // hash mode,pc |
| 396 | } |
| 397 | |
| 398 | /* otherwise, redispatch to that fixed PC and skip the rest of the processing */ |
| 399 | else |
| 400 | { |
| 401 | UML_LABEL(block, seqhead->pc | 0x80000000); // label seqhead->pc | 0x80000000 |
| 402 | UML_HASHJMP(block, 0, seqhead->pc, *m_impstate.nocode); |
| 403 | // hashjmp <mode>,seqhead->pc,nocode |
| 404 | continue; |
| 405 | } |
| 406 | |
| 407 | /* validate this code block if we're not pointing into ROM */ |
| 408 | if (m_program->get_write_ptr(seqhead->physpc) != NULL) |
| 409 | generate_checksum_block(block, &compiler, seqhead, seqlast); |
| 410 | |
| 411 | /* label this instruction, if it may be jumped to locally */ |
| 412 | if (seqhead->flags & OPFLAG_IS_BRANCH_TARGET) |
| 413 | UML_LABEL(block, seqhead->pc | 0x80000000); // label seqhead->pc | 0x80000000 |
| 414 | |
| 415 | /* iterate over instructions in the sequence and compile them */ |
| 416 | for (curdesc = seqhead; curdesc != seqlast->next(); curdesc = curdesc->next()) |
| 417 | generate_sequence_instruction(block, &compiler, curdesc); |
| 418 | |
| 419 | /* if we need to return to the start, do it */ |
| 420 | if (seqlast->flags & OPFLAG_RETURN_TO_START) |
| 421 | nextpc = pc; |
| 422 | |
| 423 | /* otherwise we just go to the next instruction */ |
| 424 | else |
| 425 | nextpc = seqlast->pc + (seqlast->skipslots + 1) * 4; |
| 426 | |
| 427 | /* count off cycles and go there */ |
| 428 | generate_update_cycles(block, &compiler, nextpc); // <subtract cycles> |
| 429 | |
| 430 | /* if the last instruction can change modes, use a variable mode; otherwise, assume the same mode */ |
| 431 | /*if (seqlast->flags & OPFLAG_CAN_CHANGE_MODES) |
| 432 | UML_HASHJMP(block, uml::mem(&m_impstate.mode), nextpc, *m_impstate.nocode); |
| 433 | // hashjmp <mode>,nextpc,nocode |
| 434 | else*/ if (seqlast->next() == NULL || seqlast->next()->pc != nextpc) |
| 435 | UML_HASHJMP(block, m_impstate.mode, nextpc, *m_impstate.nocode); |
| 436 | // hashjmp <mode>,nextpc,nocode |
| 437 | } |
| 438 | |
| 439 | /* end the sequence */ |
| 440 | block->end(); |
| 441 | g_profiler.stop(); |
| 442 | succeeded = true; |
| 443 | } |
| 444 | catch (drcuml_block::abort_compilation &) |
| 445 | { |
| 446 | code_flush_cache(); |
| 447 | } |
| 448 | } |
| 449 | } |
| 450 | |
| 451 | |
| 452 | /*************************************************************************** |
| 453 | C FUNCTION CALLBACKS |
| 454 | ***************************************************************************/ |
| 455 | |
| 456 | /*------------------------------------------------- |
| 457 | cfunc_get_cycles - compute the total number |
| 458 | of cycles executed so far |
| 459 | -------------------------------------------------*/ |
| 460 | |
| 461 | void arm7_cpu_device::cfunc_get_cycles() |
| 462 | { |
| 463 | m_impstate.numcycles = total_cycles(); |
| 464 | } |
| 465 | |
| 466 | |
| 467 | /*------------------------------------------------- |
| 468 | cfunc_unimplemented - handler for |
| 469 | unimplemented opcdes |
| 470 | -------------------------------------------------*/ |
| 471 | |
| 472 | void arm7_cpu_device::cfunc_unimplemented() |
| 473 | { |
| 474 | UINT32 opcode = m_impstate.arg0; |
| 475 | fatalerror("PC=%08X: Unimplemented op %08X\n", m_r[eR15], opcode); |
| 476 | } |
| 477 | |
| 478 | |
| 479 | /*************************************************************************** |
| 480 | STATIC CODEGEN |
| 481 | ***************************************************************************/ |
| 482 | |
| 483 | /*------------------------------------------------- |
| 484 | static_generate_entry_point - generate a |
| 485 | static entry point |
| 486 | -------------------------------------------------*/ |
| 487 | |
| 488 | void arm7_cpu_device::static_generate_entry_point() |
| 489 | { |
| 490 | drcuml_state *drcuml = m_impstate.drcuml; |
| 491 | uml::code_label nodabt; |
| 492 | uml::code_label nofiq; |
| 493 | uml::code_label noirq; |
| 494 | uml::code_label irq32; |
| 495 | uml::code_label nopabd; |
| 496 | uml::code_label nound; |
| 497 | uml::code_label swi32; |
| 498 | uml::code_label irqadjust; |
| 499 | uml::code_label done; |
| 500 | drcuml_block *block; |
| 501 | |
| 502 | block = drcuml->begin_block(110); |
| 503 | |
| 504 | /* forward references */ |
| 505 | //alloc_handle(drcuml, &m_impstate.exception_norecover[EXCEPTION_INTERRUPT], "interrupt_norecover"); |
| 506 | alloc_handle(drcuml, &m_impstate.nocode, "nocode"); |
| 507 | alloc_handle(drcuml, &m_impstate.detect_fault, "detect_fault"); |
| 508 | alloc_handle(drcuml, &m_impstate.tlb_translate, "tlb_translate"); |
| 509 | |
| 510 | alloc_handle(drcuml, &m_impstate.entry, "entry"); |
| 511 | UML_HANDLE(block, *m_impstate.entry); // handle entry |
| 512 | |
| 513 | /* load fast integer registers */ |
| 514 | load_fast_iregs(block); |
| 515 | |
| 516 | UML_CALLH(block, *m_impstate.check_irq); |
| 517 | |
| 518 | /* generate a hash jump via the current mode and PC */ |
| 519 | UML_HASHJMP(block, 0, uml::mem(&m_pc), *m_impstate.nocode); // hashjmp 0,<pc>,nocode |
| 520 | block->end(); |
| 521 | } |
| 522 | |
| 523 | |
| 524 | /*------------------------------------------------- |
| 525 | static_generate_check_irq - generate a handler |
| 526 | to check IRQs |
| 527 | -------------------------------------------------*/ |
| 528 | |
| 529 | void arm7_cpu_device::static_generate_check_irq() |
| 530 | { |
| 531 | drcuml_state *drcuml = m_impstate.drcuml; |
| 532 | drcuml_block *block; |
| 533 | uml::code_label noirq; |
| 534 | int nodabt = 0; |
| 535 | int nopabt = 0; |
| 536 | int irqadjust = 0; |
| 537 | int nofiq = 0; |
| 538 | int irq32 = 0; |
| 539 | int swi32 = 0; |
| 540 | int done = 0; |
| 541 | int label = 1; |
| 542 | |
| 543 | /* begin generating */ |
| 544 | block = drcuml->begin_block(120); |
| 545 | |
| 546 | /* generate a hash jump via the current mode and PC */ |
| 547 | alloc_handle(drcuml, &m_impstate.check_irq, "check_irq"); |
| 548 | UML_HANDLE(block, *m_impstate.check_irq); // handle check_irq |
| 549 | /* Exception priorities: |
| 550 | |
| 551 | Reset |
| 552 | Data abort |
| 553 | FIRQ |
| 554 | IRQ |
| 555 | Prefetch abort |
| 556 | Undefined instruction |
| 557 | Software Interrupt |
| 558 | */ |
| 559 | |
| 560 | UML_ADD(block, uml::I0, uml::mem(&R15), 4); // add i0, PC, 4 ;insn pc |
| 561 | |
| 562 | // Data Abort |
| 563 | UML_TEST(block, uml::mem(&m_pendingAbtD), 1); // test pendingAbtD, 1 |
| 564 | UML_JMPc(block, uml::COND_Z, nodabt = label++); // jmpz nodabt |
| 565 | |
| 566 | UML_ROLINS(block, uml::mem(&GET_CPSR), eARM7_MODE_ABT, 0, MODE_FLAG); // rolins CPSR, eARM7_MODE_ABT, 0, MODE_FLAG |
| 567 | UML_MOV(block, uml::mem(&GET_REGISTER(14)), uml::I0); // mov LR, i0 |
| 568 | UML_MOV(block, uml::mem(&GET_REGISTER(SPSR)), uml::mem(&GET_CPSR)); // mov SPSR, CPSR |
| 569 | UML_OR(block, uml::mem(&GET_CPSR), uml::mem(&GET_CPSR), I_MASK); // or CPSR, CPSR, I_MASK |
| 570 | UML_ROLAND(block, uml::mem(&GET_CPSR), uml::mem(&GET_CPSR), 0, ~T_MASK); // roland CPSR, CPSR, 0, ~T_MASK |
| 571 | UML_MOV(block, uml::mem(&R15), 0x00000010); // mov PC, 0x10 (Data Abort vector address) |
| 572 | UML_MOV(block, uml::mem(&m_pendingAbtD), 0); // mov pendingAbtD, 0 |
| 573 | UML_JMP(block, irqadjust = label++); // jmp irqadjust |
| 574 | |
| 575 | UML_LABEL(block, nodabt); // nodabt: |
| 576 | |
| 577 | // FIQ |
| 578 | UML_TEST(block, uml::mem(&m_pendingFiq), 1); // test pendingFiq, 1 |
| 579 | UML_JMPc(block, uml::COND_Z, nofiq = label++); // jmpz nofiq |
| 580 | UML_TEST(block, uml::mem(&GET_CPSR), F_MASK); // test CPSR, F_MASK |
| 581 | UML_JMPc(block, uml::COND_Z, nofiq); // jmpz nofiq |
| 582 | |
| 583 | UML_MOV(block, uml::mem(&GET_REGISTER(14)), uml::I0); // mov LR, i0 |
| 584 | UML_MOV(block, uml::mem(&GET_REGISTER(SPSR)), uml::mem(&GET_CPSR)); // mov SPSR, CPSR |
| 585 | UML_OR(block, uml::mem(&GET_CPSR), uml::mem(&GET_CPSR), I_MASK | F_MASK); // or CPSR, CPSR, I_MASK | F_MASK |
| 586 | UML_ROLAND(block, uml::mem(&GET_CPSR), uml::mem(&GET_CPSR), 0, ~T_MASK); // roland CPSR, CPSR, 0, ~T_MASK |
| 587 | UML_MOV(block, uml::mem(&R15), 0x0000001c); // mov PC, 0x1c (FIQ vector address) |
| 588 | UML_MOV(block, uml::mem(&m_pendingFiq), 0); // mov pendingFiq, 0 |
| 589 | UML_JMP(block, irqadjust); // jmp irqadjust |
| 590 | |
| 591 | UML_LABEL(block, nofiq); // nofiq: |
| 592 | |
| 593 | // IRQ |
| 594 | UML_TEST(block, uml::mem(&m_pendingIrq), 1); // test pendingIrq, 1 |
| 595 | UML_JMPc(block, uml::COND_Z, noirq = label++); // jmpz noirq |
| 596 | UML_TEST(block, uml::mem(&GET_CPSR), I_MASK); // test CPSR, I_MASK |
| 597 | UML_JMPc(block, uml::COND_Z, noirq); // jmpz noirq |
| 598 | |
| 599 | UML_MOV(block, uml::mem(&GET_REGISTER(14)), uml::I0); // mov LR, i0 |
| 600 | UML_TEST(block, uml::mem(&GET_CPSR), SR_MODE32); // test CPSR, MODE32 |
| 601 | UML_JMPc(block, uml::COND_NZ, irq32 = label++); // jmpnz irq32 |
| 602 | UML_AND(block, uml::I1, uml::I0, 0xf4000000); // and i1, i0, 0xf4000000 |
| 603 | UML_OR(block, uml::mem(&R15), uml::I1, 0x0800001a); // or PC, i1, 0x0800001a |
| 604 | UML_AND(block, uml::I1, uml::mem(&GET_CPSR), 0x0fffff3f); // and i1, CPSR, 0x0fffff3f |
| 605 | UML_ROLAND(block, uml::I0, uml::mem(&R15), 32-20, 0x0000000c); // roland i0, R15, 32-20, 0x0000000c |
| 606 | UML_ROLINS(block, uml::I0, uml::mem(&R15), 0, 0xf0000000); // rolins i0, R15, 0, 0xf0000000 |
| 607 | UML_OR(block, uml::mem(&GET_CPSR), uml::I0, uml::I1); // or CPSR, i0, i1 |
| 608 | UML_JMP(block, irqadjust); // jmp irqadjust |
| 609 | |
| 610 | UML_LABEL(block, irq32); // irq32: |
| 611 | UML_MOV(block, uml::mem(&GET_REGISTER(SPSR)), uml::mem(&GET_CPSR)); // mov SPSR, CPSR |
| 612 | UML_OR(block, uml::mem(&GET_CPSR), uml::mem(&GET_CPSR), I_MASK); // or CPSR, CPSR, I_MASK |
| 613 | UML_ROLAND(block, uml::mem(&GET_CPSR), uml::mem(&GET_CPSR), 0, ~T_MASK); // roland CPSR, CPSR, 0, ~T_MASK |
| 614 | UML_MOV(block, uml::mem(&R15), 0x00000018); // mov PC, 0x18 (IRQ vector address) |
| 615 | |
| 616 | UML_JMP(block, irqadjust); // jmp irqadjust |
| 617 | |
| 618 | UML_LABEL(block, noirq); // noirq: |
| 619 | |
| 620 | // Prefetch Abort |
| 621 | UML_TEST(block, uml::mem(&m_pendingAbtP), 1); // test pendingAbtP, 1 |
| 622 | UML_JMPc(block, uml::COND_Z, nopabt = label++); // jmpz nopabt |
| 623 | |
| 624 | UML_ROLINS(block, uml::mem(&GET_CPSR), eARM7_MODE_ABT, 0, MODE_FLAG); // rolins CPSR, eARM7_MODE_ABT, 0, MODE_FLAG |
| 625 | UML_MOV(block, uml::mem(&GET_REGISTER(14)), uml::I0); // mov LR, i0 |
| 626 | UML_MOV(block, uml::mem(&GET_REGISTER(SPSR)), uml::mem(&GET_CPSR)); // mov SPSR, CPSR |
| 627 | UML_OR(block, uml::mem(&GET_CPSR), uml::mem(&GET_CPSR), I_MASK); // or CPSR, CPSR, I_MASK |
| 628 | UML_ROLAND(block, uml::mem(&GET_CPSR), uml::mem(&GET_CPSR), 0, ~T_MASK); // roland CPSR, CPSR, 0, ~T_MASK |
| 629 | UML_MOV(block, uml::mem(&R15), 0x0000000c); // mov PC, 0x0c (Prefetch Abort vector address) |
| 630 | UML_MOV(block, uml::mem(&m_pendingAbtP), 0); // mov pendingAbtP, 0 |
| 631 | UML_JMP(block, irqadjust); // jmp irqadjust |
| 632 | |
| 633 | UML_LABEL(block, nopabt); // nopabt: |
| 634 | |
| 635 | // Undefined instruction |
| 636 | UML_TEST(block, uml::mem(&m_pendingUnd), 1); // test pendingUnd, 1 |
| 637 | UML_JMPc(block, uml::COND_Z, nopabt = label++); // jmpz nound |
| 638 | |
| 639 | UML_ROLINS(block, uml::mem(&GET_CPSR), eARM7_MODE_UND, 0, MODE_FLAG); // rolins CPSR, eARM7_MODE_UND, 0, MODE_FLAG |
| 640 | UML_MOV(block, uml::I1, (UINT64)-4); // mov i1, -4 |
| 641 | UML_TEST(block, uml::mem(&GET_CPSR), T_MASK); // test CPSR, T_MASK |
| 642 | UML_MOVc(block, uml::COND_NZ, uml::I1, (UINT64)-2); // movnz i1, -2 |
| 643 | UML_ADD(block, uml::mem(&GET_REGISTER(14)), uml::I0, uml::I1); // add LR, i0, i1 |
| 644 | UML_MOV(block, uml::mem(&GET_REGISTER(SPSR)), uml::mem(&GET_CPSR)); // mov SPSR, CPSR |
| 645 | UML_OR(block, uml::mem(&GET_CPSR), uml::mem(&GET_CPSR), I_MASK); // or CPSR, CPSR, I_MASK |
| 646 | UML_ROLAND(block, uml::mem(&GET_CPSR), uml::mem(&GET_CPSR), 0, ~T_MASK); // roland CPSR, CPSR, 0, ~T_MASK |
| 647 | UML_MOV(block, uml::mem(&R15), 0x00000004); // mov PC, 0x0c (Undefined Insn vector address) |
| 648 | UML_MOV(block, uml::mem(&m_pendingUnd), 0); // mov pendingUnd, 0 |
| 649 | UML_JMP(block, irqadjust); // jmp irqadjust |
| 650 | |
| 651 | UML_LABEL(block, nopabt); // nopabt: |
| 652 | |
| 653 | // Software Interrupt |
| 654 | UML_TEST(block, uml::mem(&m_pendingSwi), 1); // test pendingSwi, 1 |
| 655 | UML_JMPc(block, uml::COND_Z, done = label++); // jmpz done |
| 656 | |
| 657 | UML_ROLINS(block, uml::mem(&GET_CPSR), eARM7_MODE_SVC, 0, MODE_FLAG); // rolins CPSR, eARM7_MODE_SVC, 0, MODE_FLAG |
| 658 | UML_MOV(block, uml::I1, (UINT64)-4); // mov i1, -4 |
| 659 | UML_TEST(block, uml::mem(&GET_CPSR), T_MASK); // test CPSR, T_MASK |
| 660 | UML_MOVc(block, uml::COND_NZ, uml::I1, (UINT64)-2); // movnz i1, -2 |
| 661 | UML_ADD(block, uml::mem(&GET_REGISTER(14)), uml::I0, uml::I1); // add LR, i0, i1 |
| 662 | |
| 663 | UML_TEST(block, uml::mem(&GET_CPSR), SR_MODE32); // test CPSR, MODE32 |
| 664 | UML_JMPc(block, uml::COND_NZ, swi32 = label++); // jmpnz swi32 |
| 665 | UML_AND(block, uml::I1, uml::I0, 0xf4000000); // and i1, i0, 0xf4000000 |
| 666 | UML_OR(block, uml::mem(&R15), uml::I1, 0x0800001b); // or PC, i1, 0x0800001b |
| 667 | UML_AND(block, uml::I1, uml::mem(&GET_CPSR), 0x0fffff3f); // and i1, CPSR, 0x0fffff3f |
| 668 | UML_ROLAND(block, uml::I0, uml::mem(&R15), 32-20, 0x0000000c); // roland i0, R15, 32-20, 0x0000000c |
| 669 | UML_ROLINS(block, uml::I0, uml::mem(&R15), 0, 0xf0000000); // rolins i0, R15, 0, 0xf0000000 |
| 670 | UML_OR(block, uml::mem(&GET_CPSR), uml::I0, uml::I1); // or CPSR, i0, i1 |
| 671 | UML_MOV(block, uml::mem(&m_pendingSwi), 0); // mov pendingSwi, 0 |
| 672 | UML_JMP(block, irqadjust); // jmp irqadjust |
| 673 | |
| 674 | UML_LABEL(block, swi32); // irq32: |
| 675 | UML_MOV(block, uml::mem(&GET_REGISTER(SPSR)), uml::mem(&GET_CPSR)); // mov SPSR, CPSR |
| 676 | UML_OR(block, uml::mem(&GET_CPSR), uml::mem(&GET_CPSR), I_MASK); // or CPSR, CPSR, I_MASK |
| 677 | UML_ROLAND(block, uml::mem(&GET_CPSR), uml::mem(&GET_CPSR), 0, ~T_MASK); // roland CPSR, CPSR, 0, ~T_MASK |
| 678 | UML_MOV(block, uml::mem(&R15), 0x00000008); // mov PC, 0x08 (SWI vector address) |
| 679 | UML_MOV(block, uml::mem(&m_pendingSwi), 0); // mov pendingSwi, 0 |
| 680 | UML_JMP(block, irqadjust); // jmp irqadjust |
| 681 | |
| 682 | UML_LABEL(block, irqadjust); // irqadjust: |
| 683 | UML_MOV(block, uml::I1, 0); // mov i1, 0 |
| 684 | UML_TEST(block, uml::mem(&COPRO_CTRL), COPRO_CTRL_MMU_EN | COPRO_CTRL_INTVEC_ADJUST); // test COPRO_CTRL, MMU_EN | INTVEC_ADJUST |
| 685 | UML_MOVc(block, uml::COND_NZ, uml::I1, 0xffff0000); // movnz i1, 0xffff0000 |
| 686 | UML_OR(block, uml::mem(&R15), uml::mem(&R15), uml::I1); // or PC, i1 |
| 687 | |
| 688 | UML_LABEL(block, done); // done: |
| 689 | |
| 690 | block->end(); |
| 691 | }; |
| 692 | |
| 693 | /*------------------------------------------------- |
| 694 | static_generate_nocode_handler - generate an |
| 695 | exception handler for "out of code" |
| 696 | -------------------------------------------------*/ |
| 697 | |
| 698 | void arm7_cpu_device::static_generate_nocode_handler() |
| 699 | { |
| 700 | drcuml_state *drcuml = m_impstate.drcuml; |
| 701 | drcuml_block *block; |
| 702 | |
| 703 | /* begin generating */ |
| 704 | block = drcuml->begin_block(10); |
| 705 | |
| 706 | /* generate a hash jump via the current mode and PC */ |
| 707 | alloc_handle(drcuml, &m_impstate.nocode, "nocode"); |
| 708 | UML_HANDLE(block, *m_impstate.nocode); // handle nocode |
| 709 | UML_GETEXP(block, uml::I0); // getexp i0 |
| 710 | UML_MOV(block, uml::mem(&R15), uml::I0); // mov [pc],i0 |
| 711 | save_fast_iregs(block); |
| 712 | UML_EXIT(block, EXECUTE_MISSING_CODE); // exit EXECUTE_MISSING_CODE |
| 713 | |
| 714 | block->end(); |
| 715 | } |
| 716 | |
| 717 | |
| 718 | /*------------------------------------------------- |
| 719 | static_generate_out_of_cycles - generate an |
| 720 | out of cycles exception handler |
| 721 | -------------------------------------------------*/ |
| 722 | |
| 723 | void arm7_cpu_device::static_generate_out_of_cycles() |
| 724 | { |
| 725 | drcuml_state *drcuml = m_impstate.drcuml; |
| 726 | drcuml_block *block; |
| 727 | |
| 728 | /* begin generating */ |
| 729 | block = drcuml->begin_block(10); |
| 730 | |
| 731 | /* generate a hash jump via the current mode and PC */ |
| 732 | alloc_handle(drcuml, &m_impstate.out_of_cycles, "out_of_cycles"); |
| 733 | UML_HANDLE(block, *m_impstate.out_of_cycles); // handle out_of_cycles |
| 734 | UML_GETEXP(block, uml::I0); // getexp i0 |
| 735 | UML_MOV(block, uml::mem(&R15), uml::I0); // mov <pc>,i0 |
| 736 | save_fast_iregs(block); |
| 737 | UML_EXIT(block, EXECUTE_OUT_OF_CYCLES); // exit EXECUTE_OUT_OF_CYCLES |
| 738 | |
| 739 | block->end(); |
| 740 | } |
| 741 | |
| 742 | |
| 743 | /*------------------------------------------------------------------ |
| 744 | static_generate_tlb_translate |
| 745 | ------------------------------------------------------------------*/ |
| 746 | |
| 747 | void arm7_cpu_device::static_generate_detect_fault(uml::code_handle **handleptr) |
| 748 | { |
| 749 | /* on entry, flags are in I2, vaddr is in I3, desc_lvl1 is in I4, ap is in R5 */ |
| 750 | /* on exit, fault result is in I6 */ |
| 751 | drcuml_state *drcuml = m_impstate.drcuml; |
| 752 | drcuml_block *block; |
| 753 | int donefault = 0; |
| 754 | int checkuser = 0; |
| 755 | int label = 1; |
| 756 | |
| 757 | /* begin generating */ |
| 758 | block = drcuml->begin_block(1024); |
| 759 | |
| 760 | /* add a global entry for this */ |
| 761 | alloc_handle(drcuml, &m_impstate.detect_fault, "detect_fault"); |
| 762 | UML_HANDLE(block, *m_impstate.detect_fault); // handle detect_fault |
| 763 | |
| 764 | UML_ROLAND(block, uml::I6, uml::I4, 32-4, 0x0f<<1); // roland i6, i4, 32-4, 0xf<<1 |
| 765 | UML_ROLAND(block, uml::I6, uml::mem(&COPRO_DOMAIN_ACCESS_CONTROL), uml::I6, 3);// roland i6, COPRO_DOMAIN_ACCESS_CONTROL, i6, 3 |
| 766 | // if permission == 3, FAULT_NONE |
| 767 | UML_CMP(block, uml::I6, 3); // cmp i6, 3 |
| 768 | UML_MOVc(block, uml::COND_E, uml::I6, FAULT_NONE); // move i6, FAULT_NONE |
| 769 | UML_JMPc(block, uml::COND_E, donefault = label++); // jmpe donefault |
| 770 | // if permission == 0 || permission == 2, FAULT_DOMAIN |
| 771 | UML_CMP(block, uml::I6, 1); // cmp i6, 1 |
| 772 | UML_MOVc(block, uml::COND_NE, uml::I6, FAULT_DOMAIN); // movne i6, FAULT_DOMAIN |
| 773 | UML_JMPc(block, uml::COND_NE, donefault); // jmpne donefault |
| 774 | |
| 775 | // if permission == 1 |
| 776 | UML_CMP(block, uml::I5, 3); // cmp i5, 3 |
| 777 | UML_MOVc(block, uml::COND_E, uml::I6, FAULT_NONE); // move i6, FAULT_NONE |
| 778 | UML_JMPc(block, uml::COND_E, donefault); // jmpe donefault |
| 779 | UML_CMP(block, uml::I5, 0); // cmp i5, 1 |
| 780 | UML_JMPc(block, uml::COND_NE, checkuser = label++); // jmpne checkuser |
| 781 | UML_ROLAND(block, uml::I6, uml::mem(&COPRO_CTRL), // roland i6, COPRO_CTRL, 32 - COPRO_CTRL_SYSTEM_SHIFT, |
| 782 | 32 - COPRO_CTRL_SYSTEM_SHIFT, // COPRO_CTRL_SYSTEM | COPRO_CTRL_ROM |
| 783 | COPRO_CTRL_SYSTEM | COPRO_CTRL_ROM); |
| 784 | // if s == 0 && r == 0, FAULT_PERMISSION |
| 785 | UML_CMP(block, uml::I6, 0); // cmp i6, 0 |
| 786 | UML_MOVc(block, uml::COND_E, uml::I6, FAULT_PERMISSION); // move i6, FAULT_PERMISSION |
| 787 | UML_JMPc(block, uml::COND_E, donefault); // jmpe donefault |
| 788 | // if s == 1 && r == 1, FAULT_PERMISSION |
| 789 | UML_CMP(block, uml::I6, 3); // cmp i6, 3 |
| 790 | UML_MOVc(block, uml::COND_E, uml::I6, FAULT_PERMISSION); // move i6, FAULT_PERMISSION |
| 791 | UML_JMPc(block, uml::COND_E, donefault); // jmpe donefault |
| 792 | // if flags & TLB_WRITE, FAULT_PERMISSION |
| 793 | UML_TEST(block, uml::I2, ARM7_TLB_WRITE); // test i2, ARM7_TLB_WRITE |
| 794 | UML_MOVc(block, uml::COND_NZ, uml::I6, FAULT_PERMISSION); // move i6, FAULT_PERMISSION |
| 795 | UML_JMPc(block, uml::COND_NZ, donefault); // jmpe donefault |
| 796 | // if r == 1 && s == 0, FAULT_NONE |
| 797 | UML_CMP(block, uml::I6, 2); // cmp i6, 2 |
| 798 | UML_MOVc(block, uml::COND_E, uml::I6, FAULT_NONE); // move i6, FAULT_NONE |
| 799 | UML_JMPc(block, uml::COND_E, donefault); // jmpe donefault |
| 800 | UML_AND(block, uml::I6, uml::mem(&GET_CPSR), MODE_FLAG); // and i6, GET_CPSR, MODE_FLAG |
| 801 | UML_CMP(block, uml::I6, eARM7_MODE_USER); // cmp i6, eARM7_MODE_USER |
| 802 | // if r == 0 && s == 1 && usermode, FAULT_PERMISSION |
| 803 | UML_MOVc(block, uml::COND_E, uml::I6, FAULT_PERMISSION); // move i6, FAULT_PERMISSION |
| 804 | UML_MOVc(block, uml::COND_NE, uml::I6, FAULT_NONE); // movne i6, FAULT_NONE |
| 805 | UML_JMP(block, donefault); // jmp donefault |
| 806 | |
| 807 | UML_LABEL(block, checkuser); // checkuser: |
| 808 | // if !write, FAULT_NONE |
| 809 | UML_TEST(block, uml::I2, ARM7_TLB_WRITE); // test i2, ARM7_TLB_WRITE |
| 810 | UML_MOVc(block, uml::COND_Z, uml::I6, FAULT_NONE); // movz i6, FAULT_NONE |
| 811 | UML_JMPc(block, uml::COND_Z, donefault); // jmp donefault |
| 812 | UML_AND(block, uml::I6, uml::mem(&GET_CPSR), MODE_FLAG); // and i6, GET_CPSR, MODE_FLAG |
| 813 | UML_CMP(block, uml::I6, eARM7_MODE_USER); // cmp i6, eARM7_MODE_USER |
| 814 | UML_MOVc(block, uml::COND_E, uml::I6, FAULT_PERMISSION); // move i6, FAULT_PERMISSION |
| 815 | UML_MOVc(block, uml::COND_NE, uml::I6, FAULT_NONE); // move i6, FAULT_NONE |
| 816 | |
| 817 | UML_LABEL(block, donefault); // donefault: |
| 818 | UML_RET(block); // ret |
| 819 | } |
| 820 | |
| 821 | /*------------------------------------------------------------------ |
| 822 | static_generate_tlb_translate |
| 823 | ------------------------------------------------------------------*/ |
| 824 | |
| 825 | void arm7_cpu_device::static_generate_tlb_translate(uml::code_handle **handleptr) |
| 826 | { |
| 827 | /* on entry, address is in I0 and flags are in I2 */ |
| 828 | /* on exit, translated address is in I0 and success/failure is in I2 */ |
| 829 | /* routine trashes I4-I7 */ |
| 830 | drcuml_state *drcuml = m_impstate.drcuml; |
| 831 | drcuml_block *block; |
| 832 | uml::code_label smallfault; |
| 833 | uml::code_label smallprefetch; |
| 834 | int nopid = 0; |
| 835 | int nounmapped = 0; |
| 836 | int nounmapped2 = 0; |
| 837 | int nocoarse = 0; |
| 838 | int nofine = 0; |
| 839 | int nosection = 0; |
| 840 | int nolargepage = 0; |
| 841 | int nosmallpage = 0; |
| 842 | int notinypage = 0; |
| 843 | int handlefault = 0; |
| 844 | int level2 = 0; |
| 845 | int prefetch = 0; |
| 846 | int prefetch2 = 0; |
| 847 | int label = 1; |
| 848 | |
| 849 | /* begin generating */ |
| 850 | block = drcuml->begin_block(170); |
| 851 | |
| 852 | alloc_handle(drcuml, &m_impstate.tlb_translate, "tlb_translate"); |
| 853 | UML_HANDLE(block, *m_impstate.tlb_translate); // handle tlb_translate |
| 854 | |
| 855 | // I3: vaddr |
| 856 | UML_CMP(block, uml::I0, 32 * 1024 * 1024); // cmp i0, 32*1024*1024 |
| 857 | UML_JMPc(block, uml::COND_GE, nopid = label++); // jmpge nopid |
| 858 | UML_AND(block, uml::I3, uml::mem(&COPRO_FCSE_PID), 0xfe000000); // and i3, COPRO_FCSE_PID, 0xfe000000 |
| 859 | UML_ADD(block, uml::I3, uml::I3, uml::I0); // add i3, i3, i0 |
| 860 | |
| 861 | // I4: desc_lvl1 |
| 862 | UML_AND(block, uml::I4, uml::mem(&COPRO_TLB_BASE), COPRO_TLB_BASE_MASK); // and i4, COPRO_TLB_BASE, COPRO_TLB_BASE_MASK |
| 863 | UML_ROLINS(block, uml::I4, uml::I3, 32 - COPRO_TLB_VADDR_FLTI_MASK_SHIFT, // rolins i4, i3, 32-COPRO_TLB_VADDR_FLTI_MASK_SHIFT, |
| 864 | COPRO_TLB_VADDR_FLTI_MASK); // COPRO_TLB_VADDR_FLTI_MASK |
| 865 | UML_READ(block, uml::I4, uml::I4, uml::SIZE_DWORD, uml::SPACE_PROGRAM); // read32 i4, i4, PROGRAM |
| 866 | |
| 867 | // I7: desc_lvl1 & 3 |
| 868 | UML_AND(block, uml::I7, uml::I4, 3); // and i7, i4, 3 |
| 869 | |
| 870 | UML_CMP(block, uml::I7, COPRO_TLB_UNMAPPED); // cmp i7, COPRO_TLB_UNMAPPED |
| 871 | UML_JMPc(block, uml::COND_NE, nounmapped = label++); // jmpne nounmapped |
| 872 | |
| 873 | // TLB Unmapped |
| 874 | UML_TEST(block, uml::I2, ARM7_TLB_ABORT_D); // test i2, ARM7_TLB_ABORT_D |
| 875 | UML_MOVc(block, uml::COND_E, uml::mem(&COPRO_FAULT_STATUS_D), (5 << 0)); // move COPRO_FAULT_STATUS_D, (5 << 0) |
| 876 | UML_MOVc(block, uml::COND_E, uml::mem(&COPRO_FAULT_ADDRESS), uml::I3); // move COPRO_FAULT_ADDRESS, i3 |
| 877 | UML_MOVc(block, uml::COND_E, uml::mem(&m_pendingAbtD), 1); // move pendingAbtD, 1 |
| 878 | UML_MOVc(block, uml::COND_E, uml::I2, 0); // move i2, 0 |
| 879 | UML_RETc(block, uml::COND_E); // rete |
| 880 | |
| 881 | UML_TEST(block, uml::I2, ARM7_TLB_ABORT_P); // test i2, ARM7_TLB_ABORT_P |
| 882 | UML_MOVc(block, uml::COND_E, uml::mem(&m_pendingAbtP), 1); // move pendingAbtP, 1 |
| 883 | UML_MOV(block, uml::I2, 0); // mov i2, 0 |
| 884 | UML_RET(block); // ret |
| 885 | |
| 886 | UML_LABEL(block, nounmapped); // nounmapped: |
| 887 | UML_CMP(block, uml::I7, COPRO_TLB_COARSE_TABLE); // cmp i7, COPRO_TLB_COARSE_TABLE |
| 888 | UML_JMPc(block, uml::COND_NE, nocoarse = label++); // jmpne nocoarse |
| 889 | |
| 890 | UML_ROLAND(block, uml::I5, uml::I4, 32-4, 0x0f<<1); // roland i5, i4, 32-4, 0xf<<1 |
| 891 | UML_ROLAND(block, uml::I5, uml::mem(&COPRO_DOMAIN_ACCESS_CONTROL), uml::I5, 3);// roland i5, COPRO_DOMAIN_ACCESS_CONTROL, i5, 3 |
| 892 | UML_CMP(block, uml::I5, 1); // cmp i5, 1 |
| 893 | UML_JMPc(block, uml::COND_E, level2 = label++); // jmpe level2 |
| 894 | UML_CMP(block, uml::I5, 3); // cmp i5, 3 |
| 895 | UML_JMPc(block, uml::COND_NE, nofine = label++); // jmpne nofine |
| 896 | UML_LABEL(block, level2); // level2: |
| 897 | |
| 898 | // I7: desc_level2 |
| 899 | UML_AND(block, uml::I7, uml::I4, COPRO_TLB_CFLD_ADDR_MASK); // and i7, i4, COPRO_TLB_CFLD_ADDR_MASK |
| 900 | UML_ROLINS(block, uml::I7, uml::I3, 32 - COPRO_TLB_VADDR_CSLTI_MASK_SHIFT,// rolins i7, i3, 32 - COPRO_TLB_VADDR_CSLTI_MASK_SHIFT |
| 901 | COPRO_TLB_VADDR_CSLTI_MASK); // COPRO_TLB_VADDR_CSLTI_MASK |
| 902 | UML_READ(block, uml::I7, uml::I7, uml::SIZE_DWORD, uml::SPACE_PROGRAM); // read32 i7, i7, PROGRAM |
| 903 | UML_JMP(block, nofine); // jmp nofine |
| 904 | |
| 905 | UML_LABEL(block, nocoarse); // nocoarse: |
| 906 | UML_CMP(block, uml::I7, COPRO_TLB_SECTION_TABLE); // cmp i7, COPRO_TLB_SECTION_TABLE |
| 907 | UML_JMPc(block, uml::COND_NE, nosection = label++); // jmpne nosection |
| 908 | |
| 909 | UML_ROLAND(block, uml::I5, uml::I4, 32-10, 3); // roland i7, i4, 32-10, 3 |
| 910 | // result in I6 |
| 911 | UML_CALLH(block, *m_impstate.detect_fault); // callh detect_fault |
| 912 | UML_CMP(block, uml::I6, FAULT_NONE); // cmp i6, FAULT_NONE |
| 913 | UML_JMPc(block, uml::COND_NE, handlefault = label++); // jmpne handlefault |
| 914 | |
| 915 | // no fault, return translated address |
| 916 | UML_AND(block, uml::I0, uml::I3, ~COPRO_TLB_SECTION_PAGE_MASK); // and i0, i3, ~COPRO_TLB_SECTION_PAGE_MASK |
| 917 | UML_ROLINS(block, uml::I0, uml::I4, 0, COPRO_TLB_SECTION_PAGE_MASK); // rolins i0, i4, COPRO_TLB_SECTION_PAGE_MASK |
| 918 | UML_MOV(block, uml::I2, 1); // mov i2, 1 |
| 919 | UML_RET(block); // ret |
| 920 | |
| 921 | UML_LABEL(block, handlefault); // handlefault: |
| 922 | UML_TEST(block, uml::I2, ARM7_TLB_ABORT_D); // test i2, ARM7_TLB_ABORT_D |
| 923 | UML_JMPc(block, uml::COND_Z, prefetch = label++); // jmpz prefetch |
| 924 | UML_MOV(block, uml::mem(&COPRO_FAULT_ADDRESS), uml::I3); // mov COPRO_FAULT_ADDRESS, i3 |
| 925 | UML_MOV(block, uml::mem(&m_pendingAbtD), 1); // mov m_pendingAbtD, 1 |
| 926 | UML_ROLAND(block, uml::I5, uml::I4, 31, 0xf0); // roland i5, i4, 31, 0xf0 |
| 927 | UML_CMP(block, uml::I6, FAULT_DOMAIN); // cmp i6, FAULT_DOMAIN |
| 928 | UML_MOVc(block, uml::COND_E, uml::I6, 9 << 0); // move i6, 9 << 0 |
| 929 | UML_MOVc(block, uml::COND_NE, uml::I6, 13 << 0); // movne i6, 13 << 0 |
| 930 | UML_OR(block, uml::mem(&COPRO_FAULT_STATUS_D), uml::I5, uml::I6); // or COPRO_FAULT_STATUS_D, i5, i6 |
| 931 | UML_MOV(block, uml::I2, 0); // mov i2, 0 |
| 932 | UML_RET(block); // ret |
| 933 | |
| 934 | UML_LABEL(block, prefetch); // prefetch: |
| 935 | UML_MOV(block, uml::mem(&m_pendingAbtP), 1); // mov m_pendingAbtP, 1 |
| 936 | UML_MOV(block, uml::I2, 0); // mov i2, 0 |
| 937 | UML_RET(block); // ret |
| 938 | |
| 939 | UML_LABEL(block, nosection); // nosection: |
| 940 | UML_CMP(block, uml::I7, COPRO_TLB_FINE_TABLE); // cmp i7, COPRO_TLB_FINE_TABLE |
| 941 | UML_JMPc(block, uml::COND_NE, nofine); // jmpne nofine |
| 942 | |
| 943 | // Not yet implemented |
| 944 | UML_MOV(block, uml::I2, 1); // mov i2, 1 |
| 945 | UML_RET(block); // ret |
| 946 | |
| 947 | UML_LABEL(block, nofine); // nofine: |
| 948 | |
| 949 | // I7: desc_lvl2 |
| 950 | UML_AND(block, uml::I6, uml::I7, 3); // and i6, i7, 3 |
| 951 | UML_CMP(block, uml::I6, COPRO_TLB_UNMAPPED); // cmp i6, COPRO_TLB_UNMAPPED |
| 952 | UML_JMPc(block, uml::COND_NE, nounmapped2 = label++); // jmpne nounmapped2 |
| 953 | |
| 954 | UML_TEST(block, uml::I2, ARM7_TLB_ABORT_D); // test i2, ARM7_TLB_ABORT_D |
| 955 | UML_JMPc(block, uml::COND_Z, prefetch2 = label++); // jmpz prefetch2 |
| 956 | UML_MOV(block, uml::mem(&COPRO_FAULT_ADDRESS), uml::I3); // mov COPRO_FAULT_ADDRESS, i3 |
| 957 | UML_MOV(block, uml::mem(&m_pendingAbtD), 1); // mov m_pendingAbtD, 1 |
| 958 | UML_ROLAND(block, uml::I5, uml::I4, 31, 0xf0); // roland i5, i4, 31, 0xf0 |
| 959 | UML_OR(block, uml::I5, uml::I5, 7 << 0); // or i5, i5, 7 << 0 |
| 960 | UML_OR(block, uml::mem(&COPRO_FAULT_STATUS_D), uml::I5, uml::I6); // or COPRO_FAULT_STATUS_D, i5, i6 |
| 961 | UML_MOV(block, uml::I2, 0); // mov i2, 0 |
| 962 | UML_RET(block); // ret |
| 963 | |
| 964 | UML_LABEL(block, prefetch2); // prefetch2: |
| 965 | UML_MOV(block, uml::mem(&m_pendingAbtP), 1); // mov m_pendingAbtP, 1 |
| 966 | UML_MOV(block, uml::I2, 0); // mov i2, 0 |
| 967 | UML_RET(block); // ret |
| 968 | |
| 969 | UML_LABEL(block, nounmapped2); // nounmapped2: |
| 970 | UML_CMP(block, uml::I6, COPRO_TLB_LARGE_PAGE); // cmp i6, COPRO_TLB_LARGE_PAGE |
| 971 | UML_JMPc(block, uml::COND_NE, nolargepage = label++); // jmpne nolargepage |
| 972 | |
| 973 | UML_AND(block, uml::I0, uml::I3, ~COPRO_TLB_LARGE_PAGE_MASK); // and i0, i3, ~COPRO_TLB_LARGE_PAGE_MASK |
| 974 | UML_ROLINS(block, uml::I0, uml::I7, 0, COPRO_TLB_LARGE_PAGE_MASK); // rolins i0, i7, 0, COPRO_TLB_LARGE_PAGE_MASK |
| 975 | UML_MOV(block, uml::I2, 1); // mov i2, 1 |
| 976 | UML_RET(block); // ret |
| 977 | |
| 978 | UML_LABEL(block, nolargepage); // nolargepage: |
| 979 | UML_CMP(block, uml::I6, COPRO_TLB_SMALL_PAGE); // cmp i6, COPRO_TLB_SMALL_PAGE |
| 980 | UML_JMPc(block, uml::COND_NE, nosmallpage = label++); // jmpne nosmallpage |
| 981 | |
| 982 | UML_ROLAND(block, uml::I5, uml::I3, 32-9, 3<<1); // roland i5, i3, 32-9, 3<<1 |
| 983 | UML_ROLAND(block, uml::I6, uml::I7, 32-4, 0xff); // roland i6, i7, 32-4, 0xff |
| 984 | UML_SHR(block, uml::I5, uml::I7, uml::I5); // shr i5, i7, i5 |
| 985 | UML_AND(block, uml::I5, uml::I5, 3); // and i5, i5, 3 |
| 986 | // result in I6 |
| 987 | UML_CALLH(block, *m_impstate.detect_fault); // callh detect_fault |
| 988 | |
| 989 | UML_CMP(block, uml::I6, FAULT_NONE); // cmp i6, FAULT_NONE |
| 990 | UML_JMPc(block, uml::COND_NE, smallfault = label++); // jmpne smallfault |
| 991 | UML_AND(block, uml::I0, uml::I7, COPRO_TLB_SMALL_PAGE_MASK); // and i0, i7, COPRO_TLB_SMALL_PAGE_MASK |
| 992 | UML_ROLINS(block, uml::I0, uml::I3, 0, ~COPRO_TLB_SMALL_PAGE_MASK); // rolins i0, i3, 0, ~COPRO_TLB_SMALL_PAGE_MASK |
| 993 | UML_MOV(block, uml::I2, 1); // mov i2, 1 |
| 994 | UML_RET(block); // ret |
| 995 | |
| 996 | UML_LABEL(block, smallfault); // smallfault: |
| 997 | UML_TEST(block, uml::I2, ARM7_TLB_ABORT_D); // test i2, ARM7_TLB_ABORT_D |
| 998 | UML_JMPc(block, uml::COND_NZ, smallprefetch = label++); // jmpnz smallprefetch |
| 999 | UML_MOV(block, uml::mem(&COPRO_FAULT_ADDRESS), uml::I3); // mov COPRO_FAULT_ADDRESS, i3 |
| 1000 | UML_MOV(block, uml::mem(&m_pendingAbtD), 1); // mov pendingAbtD, 1 |
| 1001 | UML_CMP(block, uml::I6, FAULT_DOMAIN); // cmp i6, FAULT_DOMAIN |
| 1002 | UML_MOVc(block, uml::COND_E, uml::I5, 11 << 0); // move i5, 11 << 0 |
| 1003 | UML_MOVc(block, uml::COND_NE, uml::I5, 15 << 0); // movne i5, 15 << 0 |
| 1004 | UML_ROLINS(block, uml::I5, uml::I4, 31, 0xf0); // rolins i5, i4, 31, 0xf0 |
| 1005 | UML_MOV(block, uml::mem(&COPRO_FAULT_STATUS_D), uml::I5); // mov COPRO_FAULT_STATUS_D, i5 |
| 1006 | UML_MOV(block, uml::I2, 0); // mov i2, 0 |
| 1007 | UML_RET(block); // ret |
| 1008 | |
| 1009 | UML_LABEL(block, smallprefetch); // smallprefetch: |
| 1010 | UML_MOV(block, uml::mem(&m_pendingAbtP), 1); // mov pendingAbtP, 1 |
| 1011 | UML_MOV(block, uml::I2, 0); // mov i2, 0 |
| 1012 | UML_RET(block); // ret |
| 1013 | |
| 1014 | UML_LABEL(block, nosmallpage); // nosmallpage: |
| 1015 | UML_CMP(block, uml::I6, COPRO_TLB_TINY_PAGE); // cmp i6, COPRO_TLB_TINY_PAGE |
| 1016 | UML_JMPc(block, uml::COND_NE, notinypage = label++); // jmpne notinypage |
| 1017 | |
| 1018 | UML_AND(block, uml::I0, uml::I3, ~COPRO_TLB_TINY_PAGE_MASK); // and i0, i3, ~COPRO_TLB_TINY_PAGE_MASK |
| 1019 | UML_ROLINS(block, uml::I0, uml::I7, 0, COPRO_TLB_TINY_PAGE_MASK); // rolins i0, i7, 0, COPRO_TLB_TINY_PAGE_MASK |
| 1020 | UML_MOV(block, uml::I2, 1); // mov i2, 1 |
| 1021 | UML_RET(block); // ret |
| 1022 | |
| 1023 | UML_LABEL(block, notinypage); // notinypage: |
| 1024 | UML_MOV(block, uml::I0, uml::I3); // mov i0, i3 |
| 1025 | UML_RET(block); // ret |
| 1026 | |
| 1027 | block->end(); |
| 1028 | } |
| 1029 | |
| 1030 | /*------------------------------------------------------------------ |
| 1031 | static_generate_memory_accessor |
| 1032 | ------------------------------------------------------------------*/ |
| 1033 | |
| 1034 | void arm7_cpu_device::static_generate_memory_accessor(int size, bool istlb, bool iswrite, const char *name, uml::code_handle **handleptr) |
| 1035 | { |
| 1036 | /* on entry, address is in I0; data for writes is in I1, fetch type in I2 */ |
| 1037 | /* on exit, read result is in I0 */ |
| 1038 | /* routine trashes I0-I3 */ |
| 1039 | drcuml_state *drcuml = m_impstate.drcuml; |
| 1040 | drcuml_block *block; |
| 1041 | //int tlbmiss = 0; |
| 1042 | int label = 1; |
| 1043 | |
| 1044 | /* begin generating */ |
| 1045 | block = drcuml->begin_block(1024); |
| 1046 | |
| 1047 | /* add a global entry for this */ |
| 1048 | alloc_handle(drcuml, handleptr, name); |
| 1049 | UML_HANDLE(block, **handleptr); // handle *handleptr |
| 1050 | |
| 1051 | if (istlb) |
| 1052 | { |
| 1053 | UML_TEST(block, uml::mem(&COPRO_CTRL), COPRO_CTRL_MMU_EN); // test COPRO_CTRL, COPRO_CTRL_MMU_EN |
| 1054 | if (iswrite) |
| 1055 | { |
| 1056 | UML_MOVc(block, uml::COND_NZ, uml::I3, ARM7_TLB_WRITE); // movnz i3, ARM7_TLB_WRITE |
| 1057 | } |
| 1058 | else |
| 1059 | { |
| 1060 | UML_MOVc(block, uml::COND_NZ, uml::I3, ARM7_TLB_READ); // movnz i3, ARM7_TLB_READ |
| 1061 | } |
| 1062 | UML_OR(block, uml::I2, uml::I2, uml::I3); // or i2, i2, i3 |
| 1063 | UML_CALLHc(block, uml::COND_NZ, *m_impstate.tlb_translate); // callhnz tlb_translate |
| 1064 | } |
| 1065 | |
| 1066 | /* general case: assume paging and perform a translation */ |
| 1067 | if ((machine().debug_flags & DEBUG_FLAG_ENABLED) == 0) |
| 1068 | { |
| 1069 | for (int ramnum = 0; ramnum < ARM7_MAX_FASTRAM; ramnum++) |
| 1070 | { |
| 1071 | if (m_impstate.fastram[ramnum].base != NULL && (!iswrite || !m_impstate.fastram[ramnum].readonly)) |
| 1072 | { |
| 1073 | void *fastbase = (UINT8 *)m_impstate.fastram[ramnum].base - m_impstate.fastram[ramnum].start; |
| 1074 | UINT32 skip = label++; |
| 1075 | if (m_impstate.fastram[ramnum].end != 0xffffffff) |
| 1076 | { |
| 1077 | UML_CMP(block, uml::I0, m_impstate.fastram[ramnum].end); // cmp i0, end |
| 1078 | UML_JMPc(block, uml::COND_A, skip); // ja skip |
| 1079 | } |
| 1080 | if (m_impstate.fastram[ramnum].start != 0x00000000) |
| 1081 | { |
| 1082 | UML_CMP(block, uml::I0, m_impstate.fastram[ramnum].start); // cmp i0, fastram_start |
| 1083 | UML_JMPc(block, uml::COND_B, skip); // jb skip |
| 1084 | } |
| 1085 | |
| 1086 | if (!iswrite) |
| 1087 | { |
| 1088 | if (size == 1) |
| 1089 | { |
| 1090 | UML_XOR(block, uml::I0, uml::I0, (m_endian == ENDIANNESS_BIG) ? BYTE4_XOR_BE(0) : BYTE4_XOR_LE(0)); |
| 1091 | // xor i0, i0, bytexor |
| 1092 | UML_LOAD(block, uml::I0, fastbase, uml::I0, uml::SIZE_BYTE, uml::SCALE_x1); // load i0, fastbase, i0, byte |
| 1093 | } |
| 1094 | else if (size == 2) |
| 1095 | { |
| 1096 | UML_XOR(block, uml::I0, uml::I0, (m_endian == ENDIANNESS_BIG) ? WORD_XOR_BE(0) : WORD_XOR_LE(0)); |
| 1097 | // xor i0, i0, wordxor |
| 1098 | UML_LOAD(block, uml::I0, fastbase, uml::I0, uml::SIZE_WORD, uml::SCALE_x1); // load i0, fastbase, i0, word_x1 |
| 1099 | } |
| 1100 | else if (size == 4) |
| 1101 | { |
| 1102 | UML_LOAD(block, uml::I0, fastbase, uml::I0, uml::SIZE_DWORD, uml::SCALE_x1); // load i0, fastbase, i0, dword_x1 |
| 1103 | } |
| 1104 | UML_RET(block); // ret |
| 1105 | } |
| 1106 | else |
| 1107 | { |
| 1108 | if (size == 1) |
| 1109 | { |
| 1110 | UML_XOR(block, uml::I0, uml::I0, (m_endian == ENDIANNESS_BIG) ? BYTE4_XOR_BE(0) : BYTE4_XOR_LE(0)); |
| 1111 | // xor i0, i0, bytexor |
| 1112 | UML_STORE(block, fastbase, uml::I0, uml::I1, uml::SIZE_BYTE, uml::SCALE_x1); // store fastbase, i0, i1, byte |
| 1113 | } |
| 1114 | else if (size == 2) |
| 1115 | { |
| 1116 | UML_XOR(block, uml::I0, uml::I0, (m_endian == ENDIANNESS_BIG) ? WORD_XOR_BE(0) : WORD_XOR_LE(0)); |
| 1117 | // xor i0, i0, wordxor |
| 1118 | UML_STORE(block, fastbase, uml::I0, uml::I1, uml::SIZE_WORD, uml::SCALE_x1); // store fastbase, i0, i1, word_x1 |
| 1119 | } |
| 1120 | else if (size == 4) |
| 1121 | { |
| 1122 | UML_STORE(block, fastbase, uml::I0, uml::I1, uml::SIZE_DWORD, uml::SCALE_x1); // store fastbase,i0,i1,dword_x1 |
| 1123 | } |
| 1124 | UML_RET(block); // ret |
| 1125 | } |
| 1126 | |
| 1127 | UML_LABEL(block, skip); // skip: |
| 1128 | } |
| 1129 | } |
| 1130 | } |
| 1131 | |
| 1132 | switch (size) |
| 1133 | { |
| 1134 | case 1: |
| 1135 | if (iswrite) |
| 1136 | { |
| 1137 | UML_WRITE(block, uml::I0, uml::I1, uml::SIZE_BYTE, uml::SPACE_PROGRAM); // write i0, i1, program_byte |
| 1138 | } |
| 1139 | else |
| 1140 | { |
| 1141 | UML_READ(block, uml::I0, uml::I0, uml::SIZE_BYTE, uml::SPACE_PROGRAM); // read i0, i0, program_byte |
| 1142 | } |
| 1143 | break; |
| 1144 | |
| 1145 | case 2: |
| 1146 | if (iswrite) |
| 1147 | { |
| 1148 | UML_WRITE(block, uml::I0, uml::I1, uml::SIZE_WORD, uml::SPACE_PROGRAM); // write i0,i1,program_word |
| 1149 | } |
| 1150 | else |
| 1151 | { |
| 1152 | UML_READ(block, uml::I0, uml::I0, uml::SIZE_WORD, uml::SPACE_PROGRAM); // read i0,i0,program_word |
| 1153 | } |
| 1154 | break; |
| 1155 | |
| 1156 | case 4: |
| 1157 | if (iswrite) |
| 1158 | { |
| 1159 | UML_WRITE(block, uml::I0, uml::I1, uml::SIZE_DWORD, uml::SPACE_PROGRAM); // write i0,i1,program_dword |
| 1160 | } |
| 1161 | else |
| 1162 | { |
| 1163 | UML_READ(block, uml::I0, uml::I0, uml::SIZE_DWORD, uml::SPACE_PROGRAM); // read i0,i0,program_dword |
| 1164 | } |
| 1165 | break; |
| 1166 | } |
| 1167 | UML_RET(block); // ret |
| 1168 | |
| 1169 | block->end(); |
| 1170 | } |
| 1171 | |
| 1172 | /*************************************************************************** |
| 1173 | CODE GENERATION |
| 1174 | ***************************************************************************/ |
| 1175 | |
| 1176 | /*------------------------------------------------- |
| 1177 | generate_update_cycles - generate code to |
| 1178 | subtract cycles from the icount and generate |
| 1179 | an exception if out |
| 1180 | -------------------------------------------------*/ |
| 1181 | |
| 1182 | void arm7_cpu_device::generate_update_cycles(drcuml_block *block, compiler_state *compiler, uml::parameter param) |
| 1183 | { |
| 1184 | /* check full interrupts if pending */ |
| 1185 | if (compiler->checkints) |
| 1186 | { |
| 1187 | uml::code_label skip; |
| 1188 | |
| 1189 | compiler->checkints = FALSE; |
| 1190 | UML_CALLH(block, *m_impstate.check_irq); |
| 1191 | } |
| 1192 | |
| 1193 | /* account for cycles */ |
| 1194 | if (compiler->cycles > 0) |
| 1195 | { |
| 1196 | UML_SUB(block, uml::mem(&m_icount), uml::mem(&m_icount), MAPVAR_CYCLES); // sub icount,icount,cycles |
| 1197 | UML_MAPVAR(block, MAPVAR_CYCLES, 0); // mapvar cycles,0 |
| 1198 | UML_EXHc(block, uml::COND_S, *m_impstate.out_of_cycles, param); // exh out_of_cycles,nextpc |
| 1199 | } |
| 1200 | compiler->cycles = 0; |
| 1201 | } |
| 1202 | |
| 1203 | |
| 1204 | /*------------------------------------------------- |
| 1205 | generate_checksum_block - generate code to |
| 1206 | validate a sequence of opcodes |
| 1207 | -------------------------------------------------*/ |
| 1208 | |
| 1209 | void arm7_cpu_device::generate_checksum_block(drcuml_block *block, compiler_state *compiler, const opcode_desc *seqhead, const opcode_desc *seqlast) |
| 1210 | { |
| 1211 | const opcode_desc *curdesc; |
| 1212 | if (LOG_UML) |
| 1213 | { |
| 1214 | block->append_comment("[Validation for %08X]", seqhead->pc); // comment |
| 1215 | } |
| 1216 | |
| 1217 | /* loose verify or single instruction: just compare and fail */ |
| 1218 | if (!(m_impstate.drcoptions & ARM7DRC_STRICT_VERIFY) || seqhead->next() == NULL) |
| 1219 | { |
| 1220 | if (!(seqhead->flags & OPFLAG_VIRTUAL_NOOP)) |
| 1221 | { |
| 1222 | UINT32 sum = seqhead->opptr.l[0]; |
| 1223 | void *base = m_direct->read_decrypted_ptr(seqhead->physpc); |
| 1224 | UML_LOAD(block, uml::I0, base, 0, uml::SIZE_DWORD, uml::SCALE_x4); // load i0,base,0,dword |
| 1225 | |
| 1226 | if (seqhead->delay.first() != NULL && seqhead->physpc != seqhead->delay.first()->physpc) |
| 1227 | { |
| 1228 | base = m_direct->read_decrypted_ptr(seqhead->delay.first()->physpc); |
| 1229 | UML_LOAD(block, uml::I1, base, 0, uml::SIZE_DWORD, uml::SCALE_x4); // load i1,base,dword |
| 1230 | UML_ADD(block, uml::I0, uml::I0, uml::I1); // add i0,i0,i1 |
| 1231 | |
| 1232 | sum += seqhead->delay.first()->opptr.l[0]; |
| 1233 | } |
| 1234 | |
| 1235 | UML_CMP(block, uml::I0, sum); // cmp i0,opptr[0] |
| 1236 | UML_EXHc(block, uml::COND_NE, *m_impstate.nocode, epc(seqhead)); // exne nocode,seqhead->pc |
| 1237 | } |
| 1238 | } |
| 1239 | |
| 1240 | /* full verification; sum up everything */ |
| 1241 | else |
| 1242 | { |
| 1243 | UINT32 sum = 0; |
| 1244 | void *base = m_direct->read_decrypted_ptr(seqhead->physpc); |
| 1245 | UML_LOAD(block, uml::I0, base, 0, uml::SIZE_DWORD, uml::SCALE_x4); // load i0,base,0,dword |
| 1246 | sum += seqhead->opptr.l[0]; |
| 1247 | for (curdesc = seqhead->next(); curdesc != seqlast->next(); curdesc = curdesc->next()) |
| 1248 | if (!(curdesc->flags & OPFLAG_VIRTUAL_NOOP)) |
| 1249 | { |
| 1250 | base = m_direct->read_decrypted_ptr(curdesc->physpc); |
| 1251 | UML_LOAD(block, uml::I1, base, 0, uml::SIZE_DWORD, uml::SCALE_x4); // load i1,base,dword |
| 1252 | UML_ADD(block, uml::I0, uml::I0, uml::I1); // add i0,i0,i1 |
| 1253 | sum += curdesc->opptr.l[0]; |
| 1254 | |
| 1255 | if (curdesc->delay.first() != NULL && (curdesc == seqlast || (curdesc->next() != NULL && curdesc->next()->physpc != curdesc->delay.first()->physpc))) |
| 1256 | { |
| 1257 | base = m_direct->read_decrypted_ptr(curdesc->delay.first()->physpc); |
| 1258 | UML_LOAD(block, uml::I1, base, 0, uml::SIZE_DWORD, uml::SCALE_x4); // load i1,base,dword |
| 1259 | UML_ADD(block, uml::I0, uml::I0, uml::I1); // add i0,i0,i1 |
| 1260 | sum += curdesc->delay.first()->opptr.l[0]; |
| 1261 | } |
| 1262 | } |
| 1263 | UML_CMP(block, uml::I0, sum); // cmp i0,sum |
| 1264 | UML_EXHc(block, uml::COND_NE, *m_impstate.nocode, epc(seqhead)); // exne nocode,seqhead->pc |
| 1265 | } |
| 1266 | } |
| 1267 | |
| 1268 | |
| 1269 | /*------------------------------------------------- |
| 1270 | generate_sequence_instruction - generate code |
| 1271 | for a single instruction in a sequence |
| 1272 | -------------------------------------------------*/ |
| 1273 | |
| 1274 | void arm7_cpu_device::generate_sequence_instruction(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 1275 | { |
| 1276 | //offs_t expc; |
| 1277 | int hotnum; |
| 1278 | |
| 1279 | /* add an entry for the log */ |
| 1280 | // TODO FIXME |
| 1281 | // if (LOG_UML && !(desc->flags & OPFLAG_VIRTUAL_NOOP)) |
| 1282 | // log_add_disasm_comment(block, desc->pc, desc->opptr.l[0]); |
| 1283 | |
| 1284 | /* set the PC map variable */ |
| 1285 | //expc = (desc->flags & OPFLAG_IN_DELAY_SLOT) ? desc->pc - 3 : desc->pc; |
| 1286 | UML_MAPVAR(block, MAPVAR_PC, desc->pc); // mapvar PC,pc |
| 1287 | |
| 1288 | /* accumulate total cycles */ |
| 1289 | compiler->cycles += desc->cycles; |
| 1290 | |
| 1291 | /* update the icount map variable */ |
| 1292 | UML_MAPVAR(block, MAPVAR_CYCLES, compiler->cycles); // mapvar CYCLES,compiler->cycles |
| 1293 | |
| 1294 | /* is this a hotspot? */ |
| 1295 | for (hotnum = 0; hotnum < ARM7_MAX_HOTSPOTS; hotnum++) |
| 1296 | { |
| 1297 | if (m_impstate.hotspot[hotnum].pc != 0 && desc->pc == m_impstate.hotspot[hotnum].pc && desc->opptr.l[0] == m_impstate.hotspot[hotnum].opcode) |
| 1298 | { |
| 1299 | compiler->cycles += m_impstate.hotspot[hotnum].cycles; |
| 1300 | break; |
| 1301 | } |
| 1302 | } |
| 1303 | |
| 1304 | /* update the icount map variable */ |
| 1305 | UML_MAPVAR(block, MAPVAR_CYCLES, compiler->cycles); // mapvar CYCLES,compiler->cycles |
| 1306 | |
| 1307 | /* if we are debugging, call the debugger */ |
| 1308 | if ((machine().debug_flags & DEBUG_FLAG_ENABLED) != 0) |
| 1309 | { |
| 1310 | UML_MOV(block, uml::mem(&R15), desc->pc); // mov [pc],desc->pc |
| 1311 | save_fast_iregs(block); |
| 1312 | UML_DEBUG(block, desc->pc); // debug desc->pc |
| 1313 | } |
| 1314 | |
| 1315 | /* if we hit an unmapped address, fatal error */ |
| 1316 | if (desc->flags & OPFLAG_COMPILER_UNMAPPED) |
| 1317 | { |
| 1318 | UML_MOV(block, uml::mem(&R15), desc->pc); // mov R15,desc->pc |
| 1319 | save_fast_iregs(block); |
| 1320 | UML_EXIT(block, EXECUTE_UNMAPPED_CODE); // exit EXECUTE_UNMAPPED_CODE |
| 1321 | } |
| 1322 | |
| 1323 | /* otherwise, unless this is a virtual no-op, it's a regular instruction */ |
| 1324 | else if (!(desc->flags & OPFLAG_VIRTUAL_NOOP)) |
| 1325 | { |
| 1326 | /* compile the instruction */ |
| 1327 | if (!generate_opcode(block, compiler, desc)) |
| 1328 | { |
| 1329 | UML_MOV(block, uml::mem(&R15), desc->pc); // mov R15,desc->pc |
| 1330 | UML_MOV(block, uml::mem(&m_impstate.arg0), desc->opptr.l[0]); // mov [arg0],desc->opptr.l |
| 1331 | //UML_CALLC(block, cfunc_unimplemented, arm); // callc cfunc_unimplemented // TODO FIXME |
| 1332 | } |
| 1333 | } |
| 1334 | } |
| 1335 | |
| 1336 | |
| 1337 | /*------------------------------------------------------------------ |
| 1338 | generate_delay_slot_and_branch |
| 1339 | ------------------------------------------------------------------*/ |
| 1340 | |
| 1341 | void arm7_cpu_device::generate_delay_slot_and_branch(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc, UINT8 linkreg) |
| 1342 | { |
| 1343 | compiler_state compiler_temp = *compiler; |
| 1344 | |
| 1345 | /* update the cycles and jump through the hash table to the target */ |
| 1346 | if (desc->targetpc != BRANCH_TARGET_DYNAMIC) |
| 1347 | { |
| 1348 | generate_update_cycles(block, &compiler_temp, desc->targetpc); // <subtract cycles> |
| 1349 | UML_HASHJMP(block, 0, desc->targetpc, *m_impstate.nocode); |
| 1350 | // hashjmp 0,desc->targetpc,nocode |
| 1351 | } |
| 1352 | else |
| 1353 | { |
| 1354 | generate_update_cycles(block, &compiler_temp, uml::mem(&m_impstate.jmpdest)); |
| 1355 | // <subtract cycles> |
| 1356 | UML_HASHJMP(block, 0, uml::mem(&m_impstate.jmpdest), *m_impstate.nocode);// hashjmp 0,<rsreg>,nocode |
| 1357 | } |
| 1358 | |
| 1359 | /* update the label */ |
| 1360 | compiler->labelnum = compiler_temp.labelnum; |
| 1361 | |
| 1362 | /* reset the mapvar to the current cycles and account for skipped slots */ |
| 1363 | compiler->cycles += desc->skipslots; |
| 1364 | UML_MAPVAR(block, MAPVAR_CYCLES, compiler->cycles); // mapvar CYCLES,compiler->cycles |
| 1365 | } |
| 1366 | |
| 1367 | |
| 1368 | const arm7_cpu_device::drcarm7ops_ophandler arm7_cpu_device::drcops_handler[0x10] = |
| 1369 | { |
| 1370 | &arm7_cpu_device::drcarm7ops_0123, &arm7_cpu_device::drcarm7ops_0123, &arm7_cpu_device::drcarm7ops_0123, &arm7_cpu_device::drcarm7ops_0123, |
| 1371 | &arm7_cpu_device::drcarm7ops_4567, &arm7_cpu_device::drcarm7ops_4567, &arm7_cpu_device::drcarm7ops_4567, &arm7_cpu_device::drcarm7ops_4567, |
| 1372 | &arm7_cpu_device::drcarm7ops_89, &arm7_cpu_device::drcarm7ops_89, &arm7_cpu_device::drcarm7ops_ab, &arm7_cpu_device::drcarm7ops_ab, |
| 1373 | &arm7_cpu_device::drcarm7ops_cd, &arm7_cpu_device::drcarm7ops_cd, &arm7_cpu_device::drcarm7ops_e, &arm7_cpu_device::drcarm7ops_f, |
| 1374 | }; |
| 1375 | |
| 1376 | void arm7_cpu_device::saturate_qbit_overflow(drcuml_block *block) |
| 1377 | { |
| 1378 | UML_MOV(block, uml::I1, 0); |
| 1379 | UML_DCMP(block, uml::I0, 0x000000007fffffffL); |
| 1380 | UML_MOVc(block, uml::COND_G, uml::I1, Q_MASK); |
| 1381 | UML_MOVc(block, uml::COND_G, uml::I0, 0x7fffffff); |
| 1382 | UML_DCMP(block, uml::I0, U64(0xffffffff80000000)); |
| 1383 | UML_MOVc(block, uml::COND_L, uml::I1, Q_MASK); |
| 1384 | UML_MOVc(block, uml::COND_L, uml::I0, 0x80000000); |
| 1385 | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I1); |
| 1386 | } |
| 1387 | |
| 1388 | bool arm7_cpu_device::drcarm7ops_0123(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc, UINT32 insn) |
| 1389 | { |
| 1390 | uml::code_label done; |
| 1391 | /* Branch and Exchange (BX) */ |
| 1392 | if ((insn & 0x0ffffff0) == 0x012fff10) // bits 27-4 == 000100101111111111110001 |
| 1393 | { |
| 1394 | UML_MOV(block, DRC_PC, DRC_REG(insn & 0x0f)); |
| 1395 | UML_TEST(block, DRC_PC, 1); |
| 1396 | UML_JMPc(block, uml::COND_Z, done = compiler->labelnum++); |
| 1397 | UML_OR(block, DRC_CPSR, DRC_CPSR, T_MASK); |
| 1398 | UML_AND(block, DRC_PC, DRC_PC, ~1); |
| 1399 | } |
| 1400 | else if ((insn & 0x0ff000f0) == 0x01600010) // CLZ - v5 |
| 1401 | { |
| 1402 | UINT32 rm = insn&0xf; |
| 1403 | UINT32 rd = (insn>>12)&0xf; |
| 1404 | |
| 1405 | UML_LZCNT(block, DRC_REG(rd), DRC_REG(rm)); |
| 1406 | UML_ADD(block, DRC_PC, DRC_PC, 4); |
| 1407 | } |
| 1408 | else if ((insn & 0x0ff000f0) == 0x01000050) // QADD - v5 |
| 1409 | { |
| 1410 | UINT32 rm = insn&0xf; |
| 1411 | UINT32 rn = (insn>>16)&0xf; |
| 1412 | UINT32 rd = (insn>>12)&0xf; |
| 1413 | UML_DSEXT(block, uml::I0, DRC_REG(rm), uml::SIZE_DWORD); |
| 1414 | UML_DSEXT(block, uml::I1, DRC_REG(rn), uml::SIZE_DWORD); |
| 1415 | UML_DADD(block, uml::I0, uml::I0, uml::I1); |
| 1416 | saturate_qbit_overflow(block); |
| 1417 | UML_MOV(block, DRC_REG(rd), uml::I0); |
| 1418 | UML_ADD(block, DRC_PC, DRC_PC, 4); |
| 1419 | } |
| 1420 | else if ((insn & 0x0ff000f0) == 0x01400050) // QDADD - v5 |
| 1421 | { |
| 1422 | UINT32 rm = insn&0xf; |
| 1423 | UINT32 rn = (insn>>16)&0xf; |
| 1424 | UINT32 rd = (insn>>12)&0xf; |
| 1425 | |
| 1426 | UML_DSEXT(block, uml::I1, DRC_REG(rn), uml::SIZE_DWORD); |
| 1427 | UML_DADD(block, uml::I0, uml::I1, uml::I1); |
| 1428 | saturate_qbit_overflow(block); |
| 1429 | |
| 1430 | UML_DSEXT(block, uml::I0, DRC_REG(rm), uml::SIZE_DWORD); |
| 1431 | UML_DSEXT(block, uml::I1, DRC_REG(rn), uml::SIZE_DWORD); |
| 1432 | UML_DADD(block, uml::I1, uml::I1, uml::I1); |
| 1433 | UML_DADD(block, uml::I0, uml::I0, uml::I1); |
| 1434 | saturate_qbit_overflow(block); |
| 1435 | UML_MOV(block, DRC_REG(rd), uml::I0); |
| 1436 | |
| 1437 | UML_ADD(block, DRC_PC, DRC_PC, 4); |
| 1438 | } |
| 1439 | else if ((insn & 0x0ff000f0) == 0x01200050) // QSUB - v5 |
| 1440 | { |
| 1441 | UINT32 rm = insn&0xf; |
| 1442 | UINT32 rn = (insn>>16)&0xf; |
| 1443 | UINT32 rd = (insn>>12)&0xf; |
| 1444 | |
| 1445 | UML_DSEXT(block, uml::I0, DRC_REG(rm), uml::SIZE_DWORD); |
| 1446 | UML_DSEXT(block, uml::I1, DRC_REG(rn), uml::SIZE_DWORD); |
| 1447 | UML_DSUB(block, uml::I0, uml::I0, uml::I1); |
| 1448 | saturate_qbit_overflow(block); |
| 1449 | UML_MOV(block, DRC_REG(rd), uml::I0); |
| 1450 | UML_ADD(block, DRC_PC, DRC_PC, 4); |
| 1451 | } |
| 1452 | else if ((insn & 0x0ff000f0) == 0x01600050) // QDSUB - v5 |
| 1453 | { |
| 1454 | UINT32 rm = insn&0xf; |
| 1455 | UINT32 rn = (insn>>16)&0xf; |
| 1456 | UINT32 rd = (insn>>12)&0xf; |
| 1457 | |
| 1458 | UML_DSEXT(block, uml::I1, DRC_REG(rn), uml::SIZE_DWORD); |
| 1459 | UML_DADD(block, uml::I0, uml::I1, uml::I1); |
| 1460 | saturate_qbit_overflow(block); |
| 1461 | |
| 1462 | UML_DSEXT(block, uml::I0, DRC_REG(rm), uml::SIZE_DWORD); |
| 1463 | UML_DSEXT(block, uml::I1, DRC_REG(rn), uml::SIZE_DWORD); |
| 1464 | UML_DADD(block, uml::I1, uml::I1, uml::I1); |
| 1465 | UML_DSUB(block, uml::I0, uml::I0, uml::I1); |
| 1466 | saturate_qbit_overflow(block); |
| 1467 | UML_MOV(block, DRC_REG(rd), uml::I0); |
| 1468 | |
| 1469 | UML_ADD(block, DRC_PC, DRC_PC, 4); |
| 1470 | } |
| 1471 | else if ((insn & 0x0ff00090) == 0x01000080) // SMLAxy - v5 |
| 1472 | { |
| 1473 | UINT32 rm = insn&0xf; |
| 1474 | UINT32 rn = (insn>>8)&0xf; |
| 1475 | UINT32 rd = (insn>>16)&0xf; |
| 1476 | UINT32 ra = (insn>>12)&0xf; |
| 1477 | |
| 1478 | UML_MOV(block, uml::I0, DRC_REG(rm)); |
| 1479 | UML_MOV(block, uml::I1, DRC_REG(rn)); |
| 1480 | |
| 1481 | // select top and bottom halves of src1/src2 and sign extend if necessary |
| 1482 | if (insn & 0x20) |
| 1483 | { |
| 1484 | UML_SHR(block, uml::I0, uml::I0, 16); |
| 1485 | } |
| 1486 | UML_SEXT(block, uml::I0, uml::I0, uml::SIZE_WORD); |
| 1487 | |
| 1488 | if (insn & 0x40) |
| 1489 | { |
| 1490 | UML_SHR(block, uml::I1, uml::I1, 16); |
| 1491 | } |
| 1492 | UML_SEXT(block, uml::I0, uml::I0, uml::SIZE_WORD); |
| 1493 | |
| 1494 | // do the signed multiply |
| 1495 | UML_MULS(block, uml::I0, uml::I1, uml::I0, uml::I1); |
| 1496 | UML_DSHL(block, uml::I0, uml::I0, 32); |
| 1497 | UML_DOR(block, uml::I0, uml::I0, uml::I1); |
| 1498 | UML_MOV(block, uml::I1, DRC_REG(ra)); |
| 1499 | UML_DADD(block, uml::I0, uml::I0, uml::I1); |
| 1500 | // and the accumulate. NOTE: only the accumulate can cause an overflow, which is why we do it this way. |
| 1501 | saturate_qbit_overflow(block); |
| 1502 | UML_MOV(block, DRC_REG(rd), uml::I0); |
| 1503 | UML_ADD(block, DRC_PC, DRC_PC, 4); |
| 1504 | } |
| 1505 | else if ((insn & 0x0ff00090) == 0x01400080) // SMLALxy - v5 |
| 1506 | { |
| 1507 | UINT32 rm = insn&0xf; |
| 1508 | UINT32 rn = (insn>>8)&0xf; |
| 1509 | UINT32 rdh = (insn>>16)&0xf; |
| 1510 | UINT32 rdl = (insn>>12)&0xf; |
| 1511 | |
| 1512 | UML_DSEXT(block, uml::I0, DRC_REG(rm), uml::SIZE_DWORD); |
| 1513 | UML_DSEXT(block, uml::I1, DRC_REG(rn), uml::SIZE_DWORD); |
| 1514 | // do the signed multiply |
| 1515 | UML_DMULS(block, uml::I2, uml::I3, uml::I0, uml::I1); |
| 1516 | |
| 1517 | UML_MOV(block, uml::I0, DRC_REG(rdh)); |
| 1518 | UML_MOV(block, uml::I1, DRC_REG(rdl)); |
| 1519 | UML_DSHL(block, uml::I0, uml::I0, 32); |
| 1520 | UML_DOR(block, uml::I0, uml::I0, uml::I1); |
| 1521 | UML_DADD(block, uml::I0, uml::I0, uml::I2); |
| 1522 | UML_MOV(block, DRC_REG(rdl), uml::I0); |
| 1523 | UML_DSHR(block, uml::I0, uml::I0, 32); |
| 1524 | UML_MOV(block, DRC_REG(rdh), uml::I0); |
| 1525 | } |
| 1526 | else if ((insn & 0x0ff00090) == 0x01600080) // SMULxy - v5 |
| 1527 | { |
| 1528 | INT32 src1 = GET_REGISTER(insn&0xf); |
| 1529 | INT32 src2 = GET_REGISTER((insn>>8)&0xf); |
| 1530 | INT32 res; |
| 1531 | |
| 1532 | // select top and bottom halves of src1/src2 and sign extend if necessary |
| 1533 | if (insn & 0x20) |
| 1534 | { |
| 1535 | src1 >>= 16; |
| 1536 | } |
| 1537 | |
| 1538 | src1 &= 0xffff; |
| 1539 | if (src1 & 0x8000) |
| 1540 | { |
| 1541 | src1 |= 0xffff0000; |
| 1542 | } |
| 1543 | |
| 1544 | if (insn & 0x40) |
| 1545 | { |
| 1546 | src2 >>= 16; |
| 1547 | } |
| 1548 | |
| 1549 | src2 &= 0xffff; |
| 1550 | if (src2 & 0x8000) |
| 1551 | { |
| 1552 | src2 |= 0xffff0000; |
| 1553 | } |
| 1554 | |
| 1555 | res = src1 * src2; |
| 1556 | SET_REGISTER((insn>>16)&0xf, res); |
| 1557 | R15 += 4; |
| 1558 | } |
| 1559 | else if ((insn & 0x0ff000b0) == 0x012000a0) // SMULWy - v5 |
| 1560 | { |
| 1561 | INT32 src1 = GET_REGISTER(insn&0xf); |
| 1562 | INT32 src2 = GET_REGISTER((insn>>8)&0xf); |
| 1563 | INT64 res; |
| 1564 | |
| 1565 | if (insn & 0x40) |
| 1566 | { |
| 1567 | src2 >>= 16; |
| 1568 | } |
| 1569 | else |
| 1570 | { |
| 1571 | src2 &= 0xffff; |
| 1572 | if (src2 & 0x8000) |
| 1573 | { |
| 1574 | src2 |= 0xffff; |
| 1575 | } |
| 1576 | } |
| 1577 | |
| 1578 | res = (INT64)src1 * (INT64)src2; |
| 1579 | res >>= 16; |
| 1580 | SET_REGISTER((insn>>16)&0xf, (UINT32)res); |
| 1581 | } |
| 1582 | else if ((insn & 0x0ff000b0) == 0x01200080) // SMLAWy - v5 |
| 1583 | { |
| 1584 | INT32 src1 = GET_REGISTER(insn&0xf); |
| 1585 | INT32 src2 = GET_REGISTER((insn>>8)&0xf); |
| 1586 | INT32 src3 = GET_REGISTER((insn>>12)&0xf); |
| 1587 | INT64 res; |
| 1588 | |
| 1589 | if (insn & 0x40) |
| 1590 | { |
| 1591 | src2 >>= 16; |
| 1592 | } |
| 1593 | else |
| 1594 | { |
| 1595 | src2 &= 0xffff; |
| 1596 | if (src2 & 0x8000) |
| 1597 | { |
| 1598 | src2 |= 0xffff; |
| 1599 | } |
| 1600 | } |
| 1601 | |
| 1602 | res = (INT64)src1 * (INT64)src2; |
| 1603 | res >>= 16; |
| 1604 | |
| 1605 | // check for overflow and set the Q bit |
| 1606 | saturate_qbit_overflow((INT64)src3 + res); |
| 1607 | |
| 1608 | // do the real accumulate |
| 1609 | src3 += (INT32)res; |
| 1610 | |
| 1611 | // write the result back |
| 1612 | SET_REGISTER((insn>>16)&0xf, (UINT32)res); |
| 1613 | } |
| 1614 | else |
| 1615 | /* Multiply OR Swap OR Half Word Data Transfer */ |
| 1616 | if ((insn & 0x0e000000) == 0 && (insn & 0x80) && (insn & 0x10)) // bits 27-25=000 bit 7=1 bit 4=1 |
| 1617 | { |
| 1618 | /* Half Word Data Transfer */ |
| 1619 | if (insn & 0x60) // bits = 6-5 != 00 |
| 1620 | { |
| 1621 | HandleHalfWordDT(insn); |
| 1622 | } |
| 1623 | else |
| 1624 | /* Swap */ |
| 1625 | if (insn & 0x01000000) // bit 24 = 1 |
| 1626 | { |
| 1627 | HandleSwap(insn); |
| 1628 | } |
| 1629 | /* Multiply Or Multiply Long */ |
| 1630 | else |
| 1631 | { |
| 1632 | /* multiply long */ |
| 1633 | if (insn & 0x800000) // Bit 23 = 1 for Multiply Long |
| 1634 | { |
| 1635 | /* Signed? */ |
| 1636 | if (insn & 0x00400000) |
| 1637 | HandleSMulLong(insn); |
| 1638 | else |
| 1639 | HandleUMulLong(insn); |
| 1640 | } |
| 1641 | /* multiply */ |
| 1642 | else |
| 1643 | { |
| 1644 | HandleMul(insn); |
| 1645 | } |
| 1646 | R15 += 4; |
| 1647 | } |
| 1648 | } |
| 1649 | /* Data Processing OR PSR Transfer */ |
| 1650 | else if ((insn & 0x0c000000) == 0) // bits 27-26 == 00 - This check can only exist properly after Multiplication check above |
| 1651 | { |
| 1652 | /* PSR Transfer (MRS & MSR) */ |
| 1653 | if (((insn & 0x00100000) == 0) && ((insn & 0x01800000) == 0x01000000)) // S bit must be clear, and bit 24,23 = 10 |
| 1654 | { |
| 1655 | HandlePSRTransfer(insn); |
| 1656 | ARM7_ICOUNT += 2; // PSR only takes 1 - S Cycle, so we add + 2, since at end, we -3.. |
| 1657 | R15 += 4; |
| 1658 | } |
| 1659 | /* Data Processing */ |
| 1660 | else |
| 1661 | { |
| 1662 | HandleALU(insn); |
| 1663 | } |
| 1664 | } |
| 1665 | |
| 1666 | UML_LABEL(block, done); |
| 1667 | return true; |
| 1668 | } |
| 1669 | |
| 1670 | bool arm7_cpu_device::drcarm7ops_4567(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc, UINT32 op) |
| 1671 | { |
| 1672 | return false; |
| 1673 | } |
| 1674 | |
| 1675 | bool arm7_cpu_device::drcarm7ops_89(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc, UINT32 op) |
| 1676 | { |
| 1677 | return false; |
| 1678 | } |
| 1679 | |
| 1680 | bool arm7_cpu_device::drcarm7ops_ab(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc, UINT32 op) |
| 1681 | { |
| 1682 | return false; |
| 1683 | } |
| 1684 | |
| 1685 | bool arm7_cpu_device::drcarm7ops_cd(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc, UINT32 op) |
| 1686 | { |
| 1687 | return false; |
| 1688 | } |
| 1689 | |
| 1690 | bool arm7_cpu_device::drcarm7ops_e(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc, UINT32 op) |
| 1691 | { |
| 1692 | return false; |
| 1693 | } |
| 1694 | |
| 1695 | bool arm7_cpu_device::drcarm7ops_f(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc, UINT32 op) |
| 1696 | { |
| 1697 | return false; |
| 1698 | } |
| 1699 | |
| 1700 | /*------------------------------------------------- |
| 1701 | generate_opcode - generate code for a specific |
| 1702 | opcode |
| 1703 | -------------------------------------------------*/ |
| 1704 | |
| 1705 | int arm7_cpu_device::generate_opcode(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 1706 | { |
| 1707 | //int in_delay_slot = ((desc->flags & OPFLAG_IN_DELAY_SLOT) != 0); |
| 1708 | UINT32 op = desc->opptr.l[0]; |
| 1709 | UINT8 opswitch = op >> 26; |
| 1710 | uml::code_label skip; |
| 1711 | uml::code_label contdecode; |
| 1712 | uml::code_label unexecuted; |
| 1713 | |
| 1714 | if (T_IS_SET(GET_CPSR)) |
| 1715 | { |
| 1716 | // "In Thumb state, bit [0] is undefined and must be ignored. Bits [31:1] contain the PC." |
| 1717 | UML_AND(block, uml::I0, DRC_PC, ~1); |
| 1718 | } |
| 1719 | else |
| 1720 | { |
| 1721 | UML_AND(block, uml::I0, DRC_PC, ~3); |
| 1722 | } |
| 1723 | |
| 1724 | UML_TEST(block, uml::mem(&COPRO_CTRL), COPRO_CTRL_MMU_EN); // test COPRO_CTRL, COPRO_CTRL_MMU_EN |
| 1725 | UML_MOVc(block, uml::COND_NZ, uml::I2, ARM7_TLB_ABORT_P | ARM7_TLB_READ); // movnz i0, ARM7_TLB_ABORT_P | ARM7_TLB_READ |
| 1726 | UML_CALLHc(block, uml::COND_NZ, *m_impstate.tlb_translate); // callhnz tlb_translate); |
| 1727 | |
| 1728 | if (T_IS_SET(GET_CPSR)) |
| 1729 | { |
| 1730 | //UML_CALLH(block, *m_impstate.drcthumb[(op & 0xffc0) >> 6]); // callh drcthumb[op] // TODO FIXME |
| 1731 | return TRUE; |
| 1732 | } |
| 1733 | |
| 1734 | switch (op >> INSN_COND_SHIFT) |
| 1735 | { |
| 1736 | case COND_EQ: |
| 1737 | UML_TEST(block, DRC_CPSR, Z_MASK); |
| 1738 | UML_JMPc(block, uml::COND_Z, unexecuted = compiler->labelnum++); |
| 1739 | break; |
| 1740 | case COND_NE: |
| 1741 | UML_TEST(block, DRC_CPSR, Z_MASK); |
| 1742 | UML_JMPc(block, uml::COND_NZ, unexecuted = compiler->labelnum++); |
| 1743 | break; |
| 1744 | case COND_CS: |
| 1745 | UML_TEST(block, DRC_CPSR, C_MASK); |
| 1746 | UML_JMPc(block, uml::COND_Z, unexecuted = compiler->labelnum++); |
| 1747 | break; |
| 1748 | case COND_CC: |
| 1749 | UML_TEST(block, DRC_CPSR, C_MASK); |
| 1750 | UML_JMPc(block, uml::COND_NZ, unexecuted = compiler->labelnum++); |
| 1751 | break; |
| 1752 | case COND_MI: |
| 1753 | UML_TEST(block, DRC_CPSR, N_MASK); |
| 1754 | UML_JMPc(block, uml::COND_Z, unexecuted = compiler->labelnum++); |
| 1755 | break; |
| 1756 | case COND_PL: |
| 1757 | UML_TEST(block, DRC_CPSR, N_MASK); |
| 1758 | UML_JMPc(block, uml::COND_NZ, unexecuted = compiler->labelnum++); |
| 1759 | break; |
| 1760 | case COND_VS: |
| 1761 | UML_TEST(block, DRC_CPSR, V_MASK); |
| 1762 | UML_JMPc(block, uml::COND_Z, unexecuted = compiler->labelnum++); |
| 1763 | break; |
| 1764 | case COND_VC: |
| 1765 | UML_TEST(block, DRC_CPSR, V_MASK); |
| 1766 | UML_JMPc(block, uml::COND_NZ, unexecuted = compiler->labelnum++); |
| 1767 | break; |
| 1768 | case COND_HI: |
| 1769 | UML_TEST(block, DRC_CPSR, Z_MASK); |
| 1770 | UML_JMPc(block, uml::COND_NZ, unexecuted = compiler->labelnum++); |
| 1771 | UML_TEST(block, DRC_CPSR, C_MASK); |
| 1772 | UML_JMPc(block, uml::COND_Z, unexecuted = compiler->labelnum++); |
| 1773 | break; |
| 1774 | case COND_LS: |
| 1775 | UML_TEST(block, DRC_CPSR, Z_MASK); |
| 1776 | UML_JMPc(block, uml::COND_NZ, contdecode = compiler->labelnum++); |
| 1777 | UML_TEST(block, DRC_CPSR, C_MASK); |
| 1778 | UML_JMPc(block, uml::COND_Z, contdecode); |
| 1779 | UML_JMP(block, unexecuted); |
| 1780 | break; |
| 1781 | case COND_GE: |
| 1782 | UML_TEST(block, DRC_CPSR, N_MASK); |
| 1783 | UML_MOVc(block, uml::COND_Z, uml::I0, 0); |
| 1784 | UML_MOVc(block, uml::COND_NZ, uml::I0, 1); |
| 1785 | UML_TEST(block, DRC_CPSR, V_MASK); |
| 1786 | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 1787 | UML_MOVc(block, uml::COND_NZ, uml::I1, 1); |
| 1788 | UML_CMP(block, uml::I0, uml::I1); |
| 1789 | UML_JMPc(block, uml::COND_NE, unexecuted); |
| 1790 | break; |
| 1791 | case COND_LT: |
| 1792 | UML_TEST(block, DRC_CPSR, N_MASK); |
| 1793 | UML_MOVc(block, uml::COND_Z, uml::I0, 0); |
| 1794 | UML_MOVc(block, uml::COND_NZ, uml::I0, 1); |
| 1795 | UML_TEST(block, DRC_CPSR, V_MASK); |
| 1796 | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 1797 | UML_MOVc(block, uml::COND_NZ, uml::I1, 1); |
| 1798 | UML_CMP(block, uml::I0, uml::I1); |
| 1799 | UML_JMPc(block, uml::COND_E, unexecuted); |
| 1800 | break; |
| 1801 | case COND_GT: |
| 1802 | UML_TEST(block, DRC_CPSR, Z_MASK); |
| 1803 | UML_JMPc(block, uml::COND_NZ, unexecuted); |
| 1804 | UML_TEST(block, DRC_CPSR, N_MASK); |
| 1805 | UML_MOVc(block, uml::COND_Z, uml::I0, 0); |
| 1806 | UML_MOVc(block, uml::COND_NZ, uml::I0, 1); |
| 1807 | UML_TEST(block, DRC_CPSR, V_MASK); |
| 1808 | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 1809 | UML_MOVc(block, uml::COND_NZ, uml::I1, 1); |
| 1810 | UML_CMP(block, uml::I0, uml::I1); |
| 1811 | UML_JMPc(block, uml::COND_NE, unexecuted); |
| 1812 | break; |
| 1813 | case COND_LE: |
| 1814 | UML_TEST(block, DRC_CPSR, N_MASK); |
| 1815 | UML_MOVc(block, uml::COND_Z, uml::I0, 0); |
| 1816 | UML_MOVc(block, uml::COND_NZ, uml::I0, 1); |
| 1817 | UML_TEST(block, DRC_CPSR, V_MASK); |
| 1818 | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 1819 | UML_MOVc(block, uml::COND_NZ, uml::I1, 1); |
| 1820 | UML_CMP(block, uml::I0, uml::I1); |
| 1821 | UML_JMPc(block, uml::COND_NE, contdecode); |
| 1822 | UML_TEST(block, DRC_CPSR, Z_MASK); |
| 1823 | UML_JMPc(block, uml::COND_Z, unexecuted); |
| 1824 | break; |
| 1825 | case COND_NV: |
| 1826 | UML_JMP(block, unexecuted); |
| 1827 | break; |
| 1828 | } |
| 1829 | |
| 1830 | UML_LABEL(block, contdecode); |
| 1831 | |
| 1832 | (this->*drcops_handler[(op & 0xF000000) >> 24])(block, compiler, desc, op); |
| 1833 | |
| 1834 | UML_LABEL(block, unexecuted); |
| 1835 | UML_ADD(block, DRC_PC, DRC_PC, 4); |
| 1836 | UML_ADD(block, MAPVAR_CYCLES, MAPVAR_CYCLES, 2); // add cycles, cycles, 2 |
| 1837 | |
| 1838 | UML_LABEL(block, skip); |
| 1839 | |
| 1840 | switch (opswitch) |
| 1841 | { |
| 1842 | /* ----- sub-groups ----- */ |
| 1843 | |
| 1844 | case 0x00: /* SPECIAL - MIPS I */ |
| 1845 | return TRUE; |
| 1846 | |
| 1847 | // TODO: FINISH ME |
| 1848 | } |
| 1849 | |
| 1850 | return FALSE; |
| 1851 | } |
trunk/src/emu/cpu/arm7/arm7tdrc.inc
| r0 | r28736 | |
| 1 | #include "emu.h" |
| 2 | #include "arm7core.h" |
| 3 | #include "arm7help.h" |
| 4 | |
| 5 | |
| 6 | const arm7_cpu_device::arm7thumb_drcophandler arm7_cpu_device::drcthumb_handler[0x40*0x10] = |
| 7 | { |
| 8 | // #define THUMB_SHIFT_R ((UINT16)0x0800) |
| 9 | &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, |
| 10 | &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, |
| 11 | &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, |
| 12 | &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, &arm7_cpu_device::drctg00_0, |
| 13 | &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, |
| 14 | &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, |
| 15 | &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, |
| 16 | &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, &arm7_cpu_device::drctg00_1, |
| 17 | // #define THUMB_INSN_ADDSUB ((UINT16)0x0800) // #define THUMB_ADDSUB_TYPE ((UINT16)0x0600) |
| 18 | &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, |
| 19 | &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, |
| 20 | &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, |
| 21 | &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, &arm7_cpu_device::drctg01_0, |
| 22 | &arm7_cpu_device::drctg01_10, &arm7_cpu_device::drctg01_10, &arm7_cpu_device::drctg01_10, &arm7_cpu_device::drctg01_10, &arm7_cpu_device::drctg01_10, &arm7_cpu_device::drctg01_10, &arm7_cpu_device::drctg01_10, &arm7_cpu_device::drctg01_10, |
| 23 | &arm7_cpu_device::drctg01_11, &arm7_cpu_device::drctg01_11, &arm7_cpu_device::drctg01_11, &arm7_cpu_device::drctg01_11, &arm7_cpu_device::drctg01_11, &arm7_cpu_device::drctg01_11, &arm7_cpu_device::drctg01_11, &arm7_cpu_device::drctg01_11, |
| 24 | &arm7_cpu_device::drctg01_12, &arm7_cpu_device::drctg01_12, &arm7_cpu_device::drctg01_12, &arm7_cpu_device::drctg01_12, &arm7_cpu_device::drctg01_12, &arm7_cpu_device::drctg01_12, &arm7_cpu_device::drctg01_12, &arm7_cpu_device::drctg01_12, |
| 25 | &arm7_cpu_device::drctg01_13, &arm7_cpu_device::drctg01_13, &arm7_cpu_device::drctg01_13, &arm7_cpu_device::drctg01_13, &arm7_cpu_device::drctg01_13, &arm7_cpu_device::drctg01_13, &arm7_cpu_device::drctg01_13, &arm7_cpu_device::drctg01_13, |
| 26 | // #define THUMB_INSN_CMP ((UINT16)0x0800) |
| 27 | &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, |
| 28 | &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, |
| 29 | &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, |
| 30 | &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, &arm7_cpu_device::drctg02_0, |
| 31 | &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, |
| 32 | &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, |
| 33 | &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, |
| 34 | &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, &arm7_cpu_device::drctg02_1, |
| 35 | // #define THUMB_INSN_SUB ((UINT16)0x0800) |
| 36 | &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, |
| 37 | &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, |
| 38 | &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, |
| 39 | &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, &arm7_cpu_device::drctg03_0, |
| 40 | &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, |
| 41 | &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, |
| 42 | &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, |
| 43 | &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, &arm7_cpu_device::drctg03_1, |
| 44 | //#define THUMB_GROUP4_TYPE ((UINT16)0x0c00) //#define THUMB_ALUOP_TYPE ((UINT16)0x03c0) // #define THUMB_HIREG_OP ((UINT16)0x0300) // #define THUMB_HIREG_H ((UINT16)0x00c0) |
| 45 | &arm7_cpu_device::drctg04_00_00, &arm7_cpu_device::drctg04_00_01, &arm7_cpu_device::drctg04_00_02, &arm7_cpu_device::drctg04_00_03, &arm7_cpu_device::drctg04_00_04, &arm7_cpu_device::drctg04_00_05, &arm7_cpu_device::drctg04_00_06, &arm7_cpu_device::drctg04_00_07, |
| 46 | &arm7_cpu_device::drctg04_00_08, &arm7_cpu_device::drctg04_00_09, &arm7_cpu_device::drctg04_00_0a, &arm7_cpu_device::drctg04_00_0b, &arm7_cpu_device::drctg04_00_0c, &arm7_cpu_device::drctg04_00_0d, &arm7_cpu_device::drctg04_00_0e, &arm7_cpu_device::drctg04_00_0f, |
| 47 | &arm7_cpu_device::drctg04_01_00, &arm7_cpu_device::drctg04_01_01, &arm7_cpu_device::drctg04_01_02, &arm7_cpu_device::drctg04_01_03, &arm7_cpu_device::drctg04_01_10, &arm7_cpu_device::drctg04_01_11, &arm7_cpu_device::drctg04_01_12, &arm7_cpu_device::drctg04_01_13, |
| 48 | &arm7_cpu_device::drctg04_01_20, &arm7_cpu_device::drctg04_01_21, &arm7_cpu_device::drctg04_01_22, &arm7_cpu_device::drctg04_01_23, &arm7_cpu_device::drctg04_01_30, &arm7_cpu_device::drctg04_01_31, &arm7_cpu_device::drctg04_01_32, &arm7_cpu_device::drctg04_01_33, |
| 49 | &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, |
| 50 | &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, |
| 51 | &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, |
| 52 | &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, &arm7_cpu_device::drctg04_0203, |
| 53 | //#define THUMB_GROUP5_TYPE ((UINT16)0x0e00) |
| 54 | &arm7_cpu_device::drctg05_0, &arm7_cpu_device::drctg05_0, &arm7_cpu_device::drctg05_0, &arm7_cpu_device::drctg05_0, &arm7_cpu_device::drctg05_0, &arm7_cpu_device::drctg05_0, &arm7_cpu_device::drctg05_0, &arm7_cpu_device::drctg05_0, |
| 55 | &arm7_cpu_device::drctg05_1, &arm7_cpu_device::drctg05_1, &arm7_cpu_device::drctg05_1, &arm7_cpu_device::drctg05_1, &arm7_cpu_device::drctg05_1, &arm7_cpu_device::drctg05_1, &arm7_cpu_device::drctg05_1, &arm7_cpu_device::drctg05_1, |
| 56 | &arm7_cpu_device::drctg05_2, &arm7_cpu_device::drctg05_2, &arm7_cpu_device::drctg05_2, &arm7_cpu_device::drctg05_2, &arm7_cpu_device::drctg05_2, &arm7_cpu_device::drctg05_2, &arm7_cpu_device::drctg05_2, &arm7_cpu_device::drctg05_2, |
| 57 | &arm7_cpu_device::drctg05_3, &arm7_cpu_device::drctg05_3, &arm7_cpu_device::drctg05_3, &arm7_cpu_device::drctg05_3, &arm7_cpu_device::drctg05_3, &arm7_cpu_device::drctg05_3, &arm7_cpu_device::drctg05_3, &arm7_cpu_device::drctg05_3, |
| 58 | &arm7_cpu_device::drctg05_4, &arm7_cpu_device::drctg05_4, &arm7_cpu_device::drctg05_4, &arm7_cpu_device::drctg05_4, &arm7_cpu_device::drctg05_4, &arm7_cpu_device::drctg05_4, &arm7_cpu_device::drctg05_4, &arm7_cpu_device::drctg05_4, |
| 59 | &arm7_cpu_device::drctg05_5, &arm7_cpu_device::drctg05_5, &arm7_cpu_device::drctg05_5, &arm7_cpu_device::drctg05_5, &arm7_cpu_device::drctg05_5, &arm7_cpu_device::drctg05_5, &arm7_cpu_device::drctg05_5, &arm7_cpu_device::drctg05_5, |
| 60 | &arm7_cpu_device::drctg05_6, &arm7_cpu_device::drctg05_6, &arm7_cpu_device::drctg05_6, &arm7_cpu_device::drctg05_6, &arm7_cpu_device::drctg05_6, &arm7_cpu_device::drctg05_6, &arm7_cpu_device::drctg05_6, &arm7_cpu_device::drctg05_6, |
| 61 | &arm7_cpu_device::drctg05_7, &arm7_cpu_device::drctg05_7, &arm7_cpu_device::drctg05_7, &arm7_cpu_device::drctg05_7, &arm7_cpu_device::drctg05_7, &arm7_cpu_device::drctg05_7, &arm7_cpu_device::drctg05_7, &arm7_cpu_device::drctg05_7, |
| 62 | //#define THUMB_LSOP_L ((UINT16)0x0800) |
| 63 | &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, |
| 64 | &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, |
| 65 | &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, |
| 66 | &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, &arm7_cpu_device::drctg06_0, |
| 67 | &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, |
| 68 | &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, |
| 69 | &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, |
| 70 | &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, &arm7_cpu_device::drctg06_1, |
| 71 | //#define THUMB_LSOP_L ((UINT16)0x0800) |
| 72 | &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, |
| 73 | &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, |
| 74 | &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, |
| 75 | &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, &arm7_cpu_device::drctg07_0, |
| 76 | &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, |
| 77 | &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, |
| 78 | &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, |
| 79 | &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, &arm7_cpu_device::drctg07_1, |
| 80 | // #define THUMB_HALFOP_L ((UINT16)0x0800) |
| 81 | &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, |
| 82 | &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, |
| 83 | &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, |
| 84 | &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, &arm7_cpu_device::drctg08_0, |
| 85 | &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, |
| 86 | &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, |
| 87 | &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, |
| 88 | &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, &arm7_cpu_device::drctg08_1, |
| 89 | // #define THUMB_STACKOP_L ((UINT16)0x0800) |
| 90 | &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, |
| 91 | &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, |
| 92 | &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, |
| 93 | &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, &arm7_cpu_device::drctg09_0, |
| 94 | &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, |
| 95 | &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, |
| 96 | &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, |
| 97 | &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, &arm7_cpu_device::drctg09_1, |
| 98 | // #define THUMB_RELADDR_SP ((UINT16)0x0800) |
| 99 | &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, |
| 100 | &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, |
| 101 | &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, |
| 102 | &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, &arm7_cpu_device::drctg0a_0, |
| 103 | &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, |
| 104 | &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, |
| 105 | &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, |
| 106 | &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, &arm7_cpu_device::drctg0a_1, |
| 107 | // #define THUMB_STACKOP_TYPE ((UINT16)0x0f00) |
| 108 | &arm7_cpu_device::drctg0b_0, &arm7_cpu_device::drctg0b_0, &arm7_cpu_device::drctg0b_0, &arm7_cpu_device::drctg0b_0, &arm7_cpu_device::drctg0b_1, &arm7_cpu_device::drctg0b_1, &arm7_cpu_device::drctg0b_1, &arm7_cpu_device::drctg0b_1, |
| 109 | &arm7_cpu_device::drctg0b_2, &arm7_cpu_device::drctg0b_2, &arm7_cpu_device::drctg0b_2, &arm7_cpu_device::drctg0b_2, &arm7_cpu_device::drctg0b_3, &arm7_cpu_device::drctg0b_3, &arm7_cpu_device::drctg0b_3, &arm7_cpu_device::drctg0b_3, |
| 110 | &arm7_cpu_device::drctg0b_4, &arm7_cpu_device::drctg0b_4, &arm7_cpu_device::drctg0b_4, &arm7_cpu_device::drctg0b_4, &arm7_cpu_device::drctg0b_5, &arm7_cpu_device::drctg0b_5, &arm7_cpu_device::drctg0b_5, &arm7_cpu_device::drctg0b_5, |
| 111 | &arm7_cpu_device::drctg0b_6, &arm7_cpu_device::drctg0b_6, &arm7_cpu_device::drctg0b_6, &arm7_cpu_device::drctg0b_6, &arm7_cpu_device::drctg0b_7, &arm7_cpu_device::drctg0b_7, &arm7_cpu_device::drctg0b_7, &arm7_cpu_device::drctg0b_7, |
| 112 | &arm7_cpu_device::drctg0b_8, &arm7_cpu_device::drctg0b_8, &arm7_cpu_device::drctg0b_8, &arm7_cpu_device::drctg0b_8, &arm7_cpu_device::drctg0b_9, &arm7_cpu_device::drctg0b_9, &arm7_cpu_device::drctg0b_9, &arm7_cpu_device::drctg0b_9, |
| 113 | &arm7_cpu_device::drctg0b_a, &arm7_cpu_device::drctg0b_a, &arm7_cpu_device::drctg0b_a, &arm7_cpu_device::drctg0b_a, &arm7_cpu_device::drctg0b_b, &arm7_cpu_device::drctg0b_b, &arm7_cpu_device::drctg0b_b, &arm7_cpu_device::drctg0b_b, |
| 114 | &arm7_cpu_device::drctg0b_c, &arm7_cpu_device::drctg0b_c, &arm7_cpu_device::drctg0b_c, &arm7_cpu_device::drctg0b_c, &arm7_cpu_device::drctg0b_d, &arm7_cpu_device::drctg0b_d, &arm7_cpu_device::drctg0b_d, &arm7_cpu_device::drctg0b_d, |
| 115 | &arm7_cpu_device::drctg0b_e, &arm7_cpu_device::drctg0b_e, &arm7_cpu_device::drctg0b_e, &arm7_cpu_device::drctg0b_e, &arm7_cpu_device::drctg0b_f, &arm7_cpu_device::drctg0b_f, &arm7_cpu_device::drctg0b_f, &arm7_cpu_device::drctg0b_f, |
| 116 | // #define THUMB_MULTLS ((UINT16)0x0800) |
| 117 | &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, |
| 118 | &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, |
| 119 | &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, |
| 120 | &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, &arm7_cpu_device::drctg0c_0, |
| 121 | &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, |
| 122 | &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, |
| 123 | &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, |
| 124 | &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, &arm7_cpu_device::drctg0c_1, |
| 125 | // #define THUMB_COND_TYPE ((UINT16)0x0f00) |
| 126 | &arm7_cpu_device::drctg0d_0, &arm7_cpu_device::drctg0d_0, &arm7_cpu_device::drctg0d_0, &arm7_cpu_device::drctg0d_0, &arm7_cpu_device::drctg0d_1, &arm7_cpu_device::drctg0d_1, &arm7_cpu_device::drctg0d_1, &arm7_cpu_device::drctg0d_1, |
| 127 | &arm7_cpu_device::drctg0d_2, &arm7_cpu_device::drctg0d_2, &arm7_cpu_device::drctg0d_2, &arm7_cpu_device::drctg0d_2, &arm7_cpu_device::drctg0d_3, &arm7_cpu_device::drctg0d_3, &arm7_cpu_device::drctg0d_3, &arm7_cpu_device::drctg0d_3, |
| 128 | &arm7_cpu_device::drctg0d_4, &arm7_cpu_device::drctg0d_4, &arm7_cpu_device::drctg0d_4, &arm7_cpu_device::drctg0d_4, &arm7_cpu_device::drctg0d_5, &arm7_cpu_device::drctg0d_5, &arm7_cpu_device::drctg0d_5, &arm7_cpu_device::drctg0d_5, |
| 129 | &arm7_cpu_device::drctg0d_6, &arm7_cpu_device::drctg0d_6, &arm7_cpu_device::drctg0d_6, &arm7_cpu_device::drctg0d_6, &arm7_cpu_device::drctg0d_7, &arm7_cpu_device::drctg0d_7, &arm7_cpu_device::drctg0d_7, &arm7_cpu_device::drctg0d_7, |
| 130 | &arm7_cpu_device::drctg0d_8, &arm7_cpu_device::drctg0d_8, &arm7_cpu_device::drctg0d_8, &arm7_cpu_device::drctg0d_8, &arm7_cpu_device::drctg0d_9, &arm7_cpu_device::drctg0d_9, &arm7_cpu_device::drctg0d_9, &arm7_cpu_device::drctg0d_9, |
| 131 | &arm7_cpu_device::drctg0d_a, &arm7_cpu_device::drctg0d_a, &arm7_cpu_device::drctg0d_a, &arm7_cpu_device::drctg0d_a, &arm7_cpu_device::drctg0d_b, &arm7_cpu_device::drctg0d_b, &arm7_cpu_device::drctg0d_b, &arm7_cpu_device::drctg0d_b, |
| 132 | &arm7_cpu_device::drctg0d_c, &arm7_cpu_device::drctg0d_c, &arm7_cpu_device::drctg0d_c, &arm7_cpu_device::drctg0d_c, &arm7_cpu_device::drctg0d_d, &arm7_cpu_device::drctg0d_d, &arm7_cpu_device::drctg0d_d, &arm7_cpu_device::drctg0d_d, |
| 133 | &arm7_cpu_device::drctg0d_e, &arm7_cpu_device::drctg0d_e, &arm7_cpu_device::drctg0d_e, &arm7_cpu_device::drctg0d_e, &arm7_cpu_device::drctg0d_f, &arm7_cpu_device::drctg0d_f, &arm7_cpu_device::drctg0d_f, &arm7_cpu_device::drctg0d_f, |
| 134 | // #define THUMB_BLOP_LO ((UINT16)0x0800) |
| 135 | &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, |
| 136 | &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, |
| 137 | &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, |
| 138 | &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, &arm7_cpu_device::drctg0e_0, |
| 139 | &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, |
| 140 | &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, |
| 141 | &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, |
| 142 | &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, &arm7_cpu_device::drctg0e_1, |
| 143 | // #define THUMB_BLOP_LO ((UINT16)0x0800) |
| 144 | &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, |
| 145 | &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, |
| 146 | &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, |
| 147 | &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, &arm7_cpu_device::drctg0f_0, |
| 148 | &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, |
| 149 | &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, |
| 150 | &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, |
| 151 | &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, &arm7_cpu_device::drctg0f_1, |
| 152 | }; |
| 153 | |
| 154 | /* Shift operations */ |
| 155 | |
| 156 | void arm7_cpu_device::drctg00_0(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* Shift left */ |
| 157 | { |
| 158 | UINT32 op = desc->opptr.l[0]; |
| 159 | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 160 | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 161 | INT32 offs = (op & THUMB_SHIFT_AMT) >> THUMB_SHIFT_AMT_SHIFT; |
| 162 | |
| 163 | UML_MOV(block, uml::I0, DRC_RS); // rrs |
| 164 | if (offs != 0) |
| 165 | { |
| 166 | UML_SHL(block, DRC_RD, DRC_RS, offs); |
| 167 | UML_AND(block, DRC_CPSR, DRC_CPSR, ~C_MASK); |
| 168 | UML_TEST(block, uml::I0, 1 << (31 - (offs - 1))); |
| 169 | UML_MOVc(block, uml::COND_NZ, uml::I1, C_MASK); |
| 170 | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 171 | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I1); |
| 172 | } |
| 173 | else |
| 174 | { |
| 175 | UML_MOV(block, DRC_RD, DRC_RS); |
| 176 | } |
| 177 | UML_AND(block, DRC_CPSR, DRC_CPSR, ~(Z_MASK | N_MASK)); |
| 178 | DRCHandleALUNZFlags(DRC_RD); |
| 179 | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 180 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 181 | } |
| 182 | |
| 183 | void arm7_cpu_device::drctg00_1(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* Shift right */ |
| 184 | { |
| 185 | UINT32 op = desc->opptr.l[0]; |
| 186 | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 187 | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 188 | INT32 offs = (op & THUMB_SHIFT_AMT) >> THUMB_SHIFT_AMT_SHIFT; |
| 189 | |
| 190 | UML_MOV(block, uml::I0, DRC_RS); // rrs |
| 191 | if (offs != 0) |
| 192 | { |
| 193 | UML_SHR(block, DRC_RD, DRC_RS, offs); |
| 194 | UML_AND(block, DRC_CPSR, DRC_CPSR, ~C_MASK); |
| 195 | UML_TEST(block, uml::I0, 1 << (31 - (offs - 1))); |
| 196 | UML_MOVc(block, uml::COND_NZ, uml::I1, C_MASK); |
| 197 | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 198 | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I1); |
| 199 | } |
| 200 | else |
| 201 | { |
| 202 | UML_MOV(block, DRC_RD, 0); |
| 203 | UML_AND(block, DRC_CPSR, DRC_CPSR, ~C_MASK); |
| 204 | UML_TEST(block, uml::I0, 0x80000000); |
| 205 | UML_MOVc(block, uml::COND_NZ, uml::I1, C_MASK); |
| 206 | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 207 | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I1); |
| 208 | } |
| 209 | UML_AND(block, DRC_CPSR, DRC_CPSR, ~(Z_MASK | N_MASK)); |
| 210 | DRCHandleALUNZFlags(DRC_RD); |
| 211 | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 212 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 213 | } |
| 214 | |
| 215 | /* Arithmetic */ |
| 216 | |
| 217 | void arm7_cpu_device::drctg01_0(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 218 | { |
| 219 | UINT32 op = desc->opptr.l[0]; |
| 220 | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 221 | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 222 | INT32 offs = (op & THUMB_SHIFT_AMT) >> THUMB_SHIFT_AMT_SHIFT; |
| 223 | |
| 224 | /* ASR.. */ |
| 225 | UML_MOV(block, uml::I0, DRC_RS); |
| 226 | if (offs == 0) |
| 227 | { |
| 228 | offs = 32; |
| 229 | } |
| 230 | if (offs >= 32) |
| 231 | { |
| 232 | UML_AND(block, DRC_CPSR, DRC_CPSR, ~C_MASK); |
| 233 | UML_SHR(block, uml::I1, uml::I0, 31); |
| 234 | UML_TEST(block, uml::I1, ~0); |
| 235 | UML_MOVc(block, uml::COND_NZ, uml::I1, C_MASK); |
| 236 | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 237 | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I1); |
| 238 | UML_TEST(block, uml::I0, 0x80000000); |
| 239 | UML_MOVc(block, uml::COND_NZ, DRC_RD, ~0); |
| 240 | UML_MOVc(block, uml::COND_Z, DRC_RD, 0); |
| 241 | } |
| 242 | else |
| 243 | { |
| 244 | UML_AND(block, DRC_CPSR, DRC_CPSR, ~C_MASK); |
| 245 | UML_TEST(block, uml::I0, 1 << (offs - 1)); |
| 246 | UML_MOVc(block, uml::COND_NZ, uml::I1, C_MASK); |
| 247 | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 248 | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I1); |
| 249 | UML_SHR(block, uml::I1, uml::I0, offs); |
| 250 | UML_SHL(block, uml::I2, ~0, 32 - offs); |
| 251 | UML_TEST(block, uml::I0, 0x80000000); |
| 252 | UML_MOVc(block, uml::COND_Z, uml::I2, 0); |
| 253 | UML_OR(block, DRC_RD, uml::I1, uml::I2); |
| 254 | } |
| 255 | UML_AND(block, DRC_CPSR, DRC_CPSR, ~(Z_MASK | N_MASK)); |
| 256 | DRCHandleALUNZFlags(DRC_RD); |
| 257 | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 258 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 259 | } |
| 260 | |
| 261 | void arm7_cpu_device::drctg01_10(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 262 | { |
| 263 | UINT32 op = desc->opptr.l[0]; |
| 264 | UINT32 rn = (op & THUMB_ADDSUB_RNIMM) >> THUMB_ADDSUB_RNIMM_SHIFT; |
| 265 | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 266 | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 267 | UML_ADD(block, DRC_REG(rd), DRC_REG(rs), DRC_REG(rn)); |
| 268 | DRCHandleThumbALUAddFlags(DRC_REG(rd), DRC_REG(rs), DRC_REG(rn)); |
| 269 | } |
| 270 | |
| 271 | void arm7_cpu_device::drctg01_11(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* SUB Rd, Rs, Rn */ |
| 272 | { |
| 273 | UINT32 op = desc->opptr.l[0]; |
| 274 | UINT32 rn = (op & THUMB_ADDSUB_RNIMM) >> THUMB_ADDSUB_RNIMM_SHIFT; |
| 275 | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 276 | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 277 | UML_SUB(block, DRC_REG(rd), DRC_REG(rs), DRC_REG(rn)); |
| 278 | DRCHandleThumbALUSubFlags(DRC_REG(rd), DRC_REG(rs), DRC_REG(rn)); |
| 279 | } |
| 280 | |
| 281 | void arm7_cpu_device::drctg01_12(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* ADD Rd, Rs, #imm */ |
| 282 | { |
| 283 | UINT32 op = desc->opptr.l[0]; |
| 284 | UINT32 imm = (op & THUMB_ADDSUB_RNIMM) >> THUMB_ADDSUB_RNIMM_SHIFT; |
| 285 | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 286 | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 287 | UML_ADD(block, DRC_REG(rd), DRC_REG(rs), imm); |
| 288 | DRCHandleThumbALUAddFlags(DRC_REG(rd), DRC_REG(rs), imm); |
| 289 | } |
| 290 | |
| 291 | void arm7_cpu_device::drctg01_13(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* SUB Rd, Rs, #imm */ |
| 292 | { |
| 293 | UINT32 op = desc->opptr.l[0]; |
| 294 | UINT32 imm = (op & THUMB_ADDSUB_RNIMM) >> THUMB_ADDSUB_RNIMM_SHIFT; |
| 295 | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 296 | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 297 | UML_SUB(block, DRC_REG(rd), DRC_REG(rs), imm); |
| 298 | DRCHandleThumbALUSubFlags(DRC_REG(rd), DRC_REG(rs), imm); |
| 299 | } |
| 300 | |
| 301 | /* CMP / MOV */ |
| 302 | |
| 303 | void arm7_cpu_device::drctg02_0(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 304 | { |
| 305 | UINT32 op = desc->opptr.l[0]; |
| 306 | UINT32 rd = (op & THUMB_INSN_IMM_RD) >> THUMB_INSN_IMM_RD_SHIFT; |
| 307 | UINT32 op2 = (op & THUMB_INSN_IMM); |
| 308 | UML_MOV(block, DRC_REG(rd), op2); |
| 309 | UML_AND(block, DRC_CPSR, DRC_CPSR, ~(Z_MASK | N_MASK)); |
| 310 | DRCHandleALUNZFlags(DRC_REG(rd)); |
| 311 | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 312 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 313 | } |
| 314 | |
| 315 | void arm7_cpu_device::drctg02_1(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 316 | { |
| 317 | UINT32 op = desc->opptr.l[0]; |
| 318 | UINT32 rn = (op & THUMB_INSN_IMM_RD) >> THUMB_INSN_IMM_RD_SHIFT; |
| 319 | UINT32 op2 = op & THUMB_INSN_IMM; |
| 320 | |
| 321 | UML_SUB(block, uml::I3, DRC_REG(rn), op2); |
| 322 | DRCHandleThumbALUSubFlags(uml::I3, DRC_REG(rn), op2); |
| 323 | } |
| 324 | |
| 325 | /* ADD/SUB immediate */ |
| 326 | |
| 327 | void arm7_cpu_device::drctg03_0(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* ADD Rd, #Offset8 */ |
| 328 | { |
| 329 | UINT32 op = desc->opptr.l[0]; |
| 330 | UINT32 rn = (op & THUMB_INSN_IMM_RD) >> THUMB_INSN_IMM_RD_SHIFT; |
| 331 | UINT32 op2 = op & THUMB_INSN_IMM; |
| 332 | UINT32 rd = (op & THUMB_INSN_IMM_RD) >> THUMB_INSN_IMM_RD_SHIFT; |
| 333 | UML_ADD(block, DRC_REG(rd), DRC_REG(rn), op2); |
| 334 | DRCHandleThumbALUAddFlags(DRC_REG(rd), DRC_REG(rn), op2); |
| 335 | } |
| 336 | |
| 337 | void arm7_cpu_device::drctg03_1(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* SUB Rd, #Offset8 */ |
| 338 | { |
| 339 | UINT32 op = desc->opptr.l[0]; |
| 340 | UINT32 rn = (op & THUMB_INSN_IMM_RD) >> THUMB_INSN_IMM_RD_SHIFT; |
| 341 | UINT32 op2 = op & THUMB_INSN_IMM; |
| 342 | UINT32 rd = (op & THUMB_INSN_IMM_RD) >> THUMB_INSN_IMM_RD_SHIFT; |
| 343 | UML_SUB(block, DRC_REG(rd), DRC_REG(rn), op2); |
| 344 | DRCHandleThumbALUSubFlags(DRC_REG(rd), DRC_REG(rn), op2); |
| 345 | } |
| 346 | |
| 347 | /* Rd & Rm instructions */ |
| 348 | |
| 349 | void arm7_cpu_device::drctg04_00_00(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* AND Rd, Rs */ |
| 350 | { |
| 351 | UINT32 op = desc->opptr.l[0]; |
| 352 | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 353 | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 354 | UML_AND(block, DRC_REG(rd), DRC_REG(rd), DRC_REG(rs)); |
| 355 | UML_AND(block, DRC_CPSR, DRC_CPSR, ~(Z_MASK | N_MASK)); |
| 356 | DRCHandleALUNZFlags(DRC_REG(rd)); |
| 357 | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 358 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 359 | } |
| 360 | |
| 361 | void arm7_cpu_device::drctg04_00_01(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* EOR Rd, Rs */ |
| 362 | { |
| 363 | UINT32 op = desc->opptr.l[0]; |
| 364 | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 365 | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 366 | UML_XOR(block, DRC_REG(rd), DRC_REG(rd), DRC_REG(rs)); |
| 367 | UML_AND(block, DRC_CPSR, DRC_CPSR, ~(Z_MASK | N_MASK)); |
| 368 | DRCHandleALUNZFlags(DRC_REG(rd)); |
| 369 | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 370 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 371 | } |
| 372 | |
| 373 | void arm7_cpu_device::drctg04_00_02(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* LSL Rd, Rs */ |
| 374 | { |
| 375 | UINT32 op = desc->opptr.l[0]; |
| 376 | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 377 | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 378 | uml::code_label skip; |
| 379 | uml::code_label offsg32; |
| 380 | uml::code_label offs32; |
| 381 | |
| 382 | UML_AND(block, uml::I1, DRC_REG(rs), 0xff); |
| 383 | UML_AND(block, DRC_CPSR, DRC_CPSR, ~(Z_MASK | N_MASK | C_MASK)); |
| 384 | |
| 385 | UML_CMP(block, uml::I1, 0); |
| 386 | UML_JMPc(block, uml::COND_E, skip = compiler->labelnum++); |
| 387 | |
| 388 | UML_CMP(block, uml::I1, 32); |
| 389 | UML_JMPc(block, uml::COND_A, offsg32 = compiler->labelnum++); |
| 390 | UML_JMPc(block, uml::COND_E, offs32 = compiler->labelnum++); |
| 391 | |
| 392 | UML_SHL(block, DRC_REG(rd), DRC_REG(rd), uml::I1); |
| 393 | UML_SUB(block, uml::I1, uml::I1, 1); |
| 394 | UML_SUB(block, uml::I1, 31, uml::I1); |
| 395 | UML_SHL(block, uml::I1, 1, uml::I1); |
| 396 | UML_TEST(block, DRC_REG(rd), uml::I1); |
| 397 | UML_MOVc(block, uml::COND_NZ, uml::I0, C_MASK); |
| 398 | UML_MOVc(block, uml::COND_Z, uml::I0, 0); |
| 399 | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 400 | UML_JMP(block, skip); |
| 401 | |
| 402 | UML_LABEL(block, offs32); |
| 403 | UML_TEST(block, DRC_REG(rd), 1); |
| 404 | UML_MOVc(block, uml::COND_NZ, uml::I0, C_MASK); |
| 405 | UML_MOVc(block, uml::COND_Z, uml::I0, 0); |
| 406 | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 407 | UML_MOV(block, DRC_REG(rd), 0); |
| 408 | UML_JMP(block, skip); |
| 409 | |
| 410 | UML_LABEL(block, offsg32); |
| 411 | UML_MOV(block, DRC_REG(rd), 0); |
| 412 | |
| 413 | UML_LABEL(block, skip); |
| 414 | |
| 415 | DRCHandleALUNZFlags(DRC_REG(rd)); |
| 416 | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 417 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 418 | } |
| 419 | |
| 420 | void arm7_cpu_device::drctg04_00_03(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* LSR Rd, Rs */ |
| 421 | { |
| 422 | UINT32 op = desc->opptr.l[0]; |
| 423 | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 424 | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 425 | uml::code_label skip; |
| 426 | uml::code_label offsg32; |
| 427 | uml::code_label offs32; |
| 428 | |
| 429 | UML_AND(block, uml::I1, DRC_REG(rs), 0xff); |
| 430 | UML_AND(block, DRC_CPSR, DRC_CPSR, ~(Z_MASK | N_MASK | C_MASK)); |
| 431 | UML_CMP(block, uml::I1, 0); |
| 432 | UML_JMPc(block, uml::COND_E, skip = compiler->labelnum++); |
| 433 | |
| 434 | UML_CMP(block, uml::I1, 32); |
| 435 | UML_JMPc(block, uml::COND_A, offsg32 = compiler->labelnum++); |
| 436 | UML_JMPc(block, uml::COND_E, offs32 = compiler->labelnum++); |
| 437 | |
| 438 | UML_SHR(block, DRC_REG(rd), DRC_REG(rd), uml::I1); |
| 439 | UML_SUB(block, uml::I1, uml::I1, 1); // WP: TODO, Check this used to be "block, I1, 1" |
| 440 | UML_SHL(block, uml::I1, 1, uml::I1); |
| 441 | UML_TEST(block, DRC_REG(rd), uml::I1); |
| 442 | UML_MOVc(block, uml::COND_NZ, uml::I0, C_MASK); |
| 443 | UML_MOVc(block, uml::COND_Z, uml::I0, 0); |
| 444 | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 445 | UML_JMP(block, skip); |
| 446 | |
| 447 | UML_LABEL(block, offs32); |
| 448 | UML_TEST(block, DRC_REG(rd), 0x80000000); |
| 449 | UML_MOVc(block, uml::COND_NZ, uml::I0, C_MASK); |
| 450 | UML_MOVc(block, uml::COND_Z, uml::I0, 0); |
| 451 | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 452 | UML_MOV(block, DRC_REG(rd), 0); |
| 453 | UML_JMP(block, skip); |
| 454 | |
| 455 | UML_LABEL(block, offsg32); |
| 456 | UML_MOV(block, DRC_REG(rd), 0); |
| 457 | |
| 458 | UML_LABEL(block, skip); |
| 459 | |
| 460 | DRCHandleALUNZFlags(DRC_REG(rd)); |
| 461 | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 462 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 463 | } |
| 464 | |
| 465 | void arm7_cpu_device::drctg04_00_04(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* ASR Rd, Rs */ |
| 466 | { |
| 467 | UINT32 op = desc->opptr.l[0]; |
| 468 | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 469 | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 470 | uml::code_label skip; |
| 471 | uml::code_label offsg32; |
| 472 | uml::code_label offs32; |
| 473 | |
| 474 | UML_MOV(block, uml::I0, DRC_REG(rd)); |
| 475 | UML_AND(block, uml::I1, DRC_REG(rs), 0xff); |
| 476 | UML_AND(block, DRC_CPSR, DRC_CPSR, ~(Z_MASK | N_MASK | C_MASK)); |
| 477 | UML_CMP(block, uml::I1, 0); |
| 478 | UML_JMPc(block, uml::COND_E, skip = compiler->labelnum++); |
| 479 | |
| 480 | UML_SHR(block, uml::I2, uml::I0, uml::I1); |
| 481 | UML_SUB(block, uml::I1, 32, uml::I1); |
| 482 | UML_SHL(block, uml::I1, ~0, uml::I1); |
| 483 | UML_TEST(block, uml::I0, 0x80000000); |
| 484 | UML_MOVc(block, uml::COND_NZ, DRC_REG(rd), uml::I1); |
| 485 | UML_MOVc(block, uml::COND_Z, DRC_REG(rd), 0); |
| 486 | UML_OR(block, DRC_REG(rd), DRC_REG(rd), uml::I2); |
| 487 | UML_JMPc(block, uml::COND_B, offs32 = compiler->labelnum++); |
| 488 | |
| 489 | UML_TEST(block, uml::I0, 0x80000000); |
| 490 | UML_MOVc(block, uml::COND_NZ, DRC_REG(rd), ~0); |
| 491 | UML_MOVc(block, uml::COND_Z, DRC_REG(rd), 0); |
| 492 | UML_MOVc(block, uml::COND_NZ, uml::I1, C_MASK); |
| 493 | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 494 | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I1); |
| 495 | UML_JMP(block, skip); |
| 496 | |
| 497 | UML_LABEL(block, offs32); |
| 498 | UML_SUB(block, uml::I1, uml::I1, 1); |
| 499 | UML_SHL(block, uml::I1, 1, uml::I1); |
| 500 | UML_TEST(block, uml::I0, uml::I1); |
| 501 | UML_MOVc(block, uml::COND_NZ, uml::I1, C_MASK); |
| 502 | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 503 | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I1); |
| 504 | UML_JMP(block, skip); |
| 505 | |
| 506 | UML_LABEL(block, skip); |
| 507 | DRCHandleALUNZFlags(DRC_REG(rd)); |
| 508 | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 509 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 510 | |
| 511 | } |
| 512 | |
| 513 | void arm7_cpu_device::drctg04_00_05(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* ADC Rd, Rs */ |
| 514 | { |
| 515 | UINT32 op = desc->opptr.l[0]; |
| 516 | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 517 | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 518 | UML_TEST(block, DRC_CPSR, C_MASK); |
| 519 | UML_MOVc(block, uml::COND_NZ, uml::I3, 1); |
| 520 | UML_MOVc(block, uml::COND_Z, uml::I3, 0); |
| 521 | UML_ADD(block, uml::I3, uml::I3, DRC_REG(rd)); |
| 522 | UML_ADD(block, uml::I3, uml::I3, DRC_REG(rs)); |
| 523 | DRCHandleThumbALUAddFlags(uml::I3, DRC_REG(rd), DRC_REG(rs)); |
| 524 | UML_MOV(block, DRC_REG(rd), uml::I3); |
| 525 | } |
| 526 | |
| 527 | void arm7_cpu_device::drctg04_00_06(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* SBC Rd, Rs */ |
| 528 | { |
| 529 | UINT32 op = desc->opptr.l[0]; |
| 530 | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 531 | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 532 | UML_TEST(block, DRC_CPSR, C_MASK); |
| 533 | UML_MOVc(block, uml::COND_NZ, uml::I3, 0); |
| 534 | UML_MOVc(block, uml::COND_Z, uml::I3, 1); |
| 535 | UML_SUB(block, uml::I3, DRC_REG(rs), uml::I3); |
| 536 | UML_ADD(block, uml::I3, DRC_REG(rd), uml::I3); |
| 537 | DRCHandleThumbALUSubFlags(uml::I3, DRC_REG(rd), DRC_REG(rs)); |
| 538 | UML_MOV(block, DRC_REG(rd), uml::I3); |
| 539 | } |
| 540 | |
| 541 | void arm7_cpu_device::drctg04_00_07(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* ROR Rd, Rs */ |
| 542 | { |
| 543 | UINT32 op = desc->opptr.l[0]; |
| 544 | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 545 | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 546 | UML_MOV(block, uml::I0, DRC_REG(rd)); |
| 547 | UML_AND(block, uml::I1, DRC_REG(rs), 0x1f); |
| 548 | UML_SHR(block, DRC_REG(rd), uml::I0, uml::I1); |
| 549 | UML_SUB(block, uml::I2, 32, uml::I1); |
| 550 | UML_SHL(block, uml::I2, uml::I0, uml::I2); |
| 551 | UML_OR(block, DRC_REG(rd), DRC_REG(rd), uml::I2); |
| 552 | UML_SUB(block, uml::I1, uml::I1, 1); |
| 553 | UML_SHL(block, uml::I1, 1, uml::I1); |
| 554 | UML_TEST(block, uml::I0, uml::I1); |
| 555 | UML_MOVc(block, uml::COND_NZ, uml::I0, C_MASK); |
| 556 | UML_MOVc(block, uml::COND_Z, uml::I0, 0); |
| 557 | UML_AND(block, DRC_CPSR, DRC_CPSR, ~(Z_MASK | N_MASK | C_MASK)); |
| 558 | DRCHandleALUNZFlags(DRC_REG(rd)); |
| 559 | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 560 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 561 | } |
| 562 | |
| 563 | void arm7_cpu_device::drctg04_00_08(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* TST Rd, Rs */ |
| 564 | { |
| 565 | UINT32 op = desc->opptr.l[0]; |
| 566 | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 567 | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 568 | UML_AND(block, DRC_CPSR, DRC_CPSR, ~(Z_MASK | N_MASK)); |
| 569 | UML_AND(block, uml::I2, DRC_REG(rd), DRC_REG(rs)); |
| 570 | DRCHandleALUNZFlags(uml::I2); |
| 571 | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 572 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 573 | } |
| 574 | |
| 575 | void arm7_cpu_device::drctg04_00_09(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* NEG Rd, Rs */ |
| 576 | { |
| 577 | UINT32 op = desc->opptr.l[0]; |
| 578 | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 579 | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 580 | UML_MOV(block, uml::I3, DRC_REG(rs)); |
| 581 | UML_SUB(block, DRC_REG(rd), 0, uml::I3); |
| 582 | DRCHandleThumbALUSubFlags(DRC_REG(rd), 0, uml::I3); |
| 583 | } |
| 584 | |
| 585 | void arm7_cpu_device::drctg04_00_0a(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* CMP Rd, Rs */ |
| 586 | { |
| 587 | UINT32 op = desc->opptr.l[0]; |
| 588 | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 589 | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 590 | UML_SUB(block, uml::I3, DRC_REG(rd), DRC_REG(rs)); |
| 591 | DRCHandleThumbALUSubFlags(uml::I3, DRC_REG(rd), DRC_REG(rs)); |
| 592 | } |
| 593 | |
| 594 | void arm7_cpu_device::drctg04_00_0b(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* CMN Rd, Rs - check flags, add dasm */ |
| 595 | { |
| 596 | UINT32 op = desc->opptr.l[0]; |
| 597 | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 598 | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 599 | UML_ADD(block, uml::I3, DRC_REG(rd), DRC_REG(rs)); |
| 600 | DRCHandleThumbALUAddFlags(uml::I3, DRC_REG(rd), DRC_REG(rs)); |
| 601 | } |
| 602 | |
| 603 | void arm7_cpu_device::drctg04_00_0c(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* ORR Rd, Rs */ |
| 604 | { |
| 605 | UINT32 op = desc->opptr.l[0]; |
| 606 | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 607 | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 608 | UML_OR(block, DRC_REG(rd), DRC_REG(rd), DRC_REG(rs)); |
| 609 | UML_AND(block, DRC_CPSR, DRC_CPSR, ~(Z_MASK | N_MASK)); |
| 610 | DRCHandleALUNZFlags(DRC_REG(rd)); |
| 611 | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 612 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 613 | } |
| 614 | |
| 615 | void arm7_cpu_device::drctg04_00_0d(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* MUL Rd, Rs */ |
| 616 | { |
| 617 | UINT32 op = desc->opptr.l[0]; |
| 618 | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 619 | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 620 | UML_AND(block, DRC_CPSR, DRC_CPSR, ~(Z_MASK | N_MASK)); |
| 621 | UML_MULU(block, DRC_REG(rd), uml::I1, DRC_REG(rd), DRC_REG(rs)); |
| 622 | DRCHandleALUNZFlags(DRC_REG(rd)); |
| 623 | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 624 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 625 | } |
| 626 | |
| 627 | void arm7_cpu_device::drctg04_00_0e(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* BIC Rd, Rs */ |
| 628 | { |
| 629 | UINT32 op = desc->opptr.l[0]; |
| 630 | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 631 | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 632 | UML_AND(block, DRC_CPSR, DRC_CPSR, ~(Z_MASK | N_MASK)); |
| 633 | UML_XOR(block, uml::I0, DRC_REG(rs), ~0); |
| 634 | UML_AND(block, DRC_REG(rd), DRC_REG(rd), uml::I0); |
| 635 | DRCHandleALUNZFlags(DRC_REG(rd)); |
| 636 | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 637 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 638 | } |
| 639 | |
| 640 | void arm7_cpu_device::drctg04_00_0f(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* MVN Rd, Rs */ |
| 641 | { |
| 642 | UINT32 op = desc->opptr.l[0]; |
| 643 | UINT32 rs = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 644 | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 645 | UML_XOR(block, uml::I0, DRC_REG(rs), ~0); |
| 646 | UML_MOV(block, DRC_REG(rd), uml::I0); |
| 647 | UML_AND(block, DRC_CPSR, DRC_CPSR, ~(Z_MASK | N_MASK)); |
| 648 | DRCHandleALUNZFlags(DRC_REG(rd)); |
| 649 | UML_OR(block, DRC_CPSR, DRC_CPSR, uml::I0); |
| 650 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 651 | } |
| 652 | |
| 653 | /* ADD Rd, Rs group */ |
| 654 | |
| 655 | void arm7_cpu_device::drctg04_01_00(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 656 | { |
| 657 | UINT32 op = desc->opptr.l[0]; |
| 658 | UINT32 pc = desc->pc; |
| 659 | fatalerror("%08x: G4-1-0 Undefined Thumb instruction: %04x %x\n", pc, op, (op & THUMB_HIREG_H) >> THUMB_HIREG_H_SHIFT); |
| 660 | } |
| 661 | |
| 662 | void arm7_cpu_device::drctg04_01_01(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* ADD Rd, HRs */ |
| 663 | { |
| 664 | UINT32 op = desc->opptr.l[0]; |
| 665 | UINT32 rs = (op & THUMB_HIREG_RS) >> THUMB_HIREG_RS_SHIFT; |
| 666 | UINT32 rd = op & THUMB_HIREG_RD; |
| 667 | UML_ADD(block, DRC_REG(rd), DRC_REG(rd), DRC_REG(rs+8)); |
| 668 | if (rs == 7) |
| 669 | { |
| 670 | UML_ADD(block, DRC_REG(rd), DRC_REG(rd), 4); |
| 671 | } |
| 672 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 673 | } |
| 674 | |
| 675 | void arm7_cpu_device::drctg04_01_02(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* ADD HRd, Rs */ |
| 676 | { |
| 677 | UINT32 op = desc->opptr.l[0]; |
| 678 | UINT32 rs = (op & THUMB_HIREG_RS) >> THUMB_HIREG_RS_SHIFT; |
| 679 | UINT32 rd = op & THUMB_HIREG_RD; |
| 680 | UML_ADD(block, DRC_REG(rd+8), DRC_REG(rd+8), DRC_REG(rs)); |
| 681 | if (rd == 7) |
| 682 | { |
| 683 | UML_ADD(block, DRC_REG(rd), DRC_REG(rd), 4); |
| 684 | } |
| 685 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 686 | } |
| 687 | |
| 688 | void arm7_cpu_device::drctg04_01_03(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* Add HRd, HRs */ |
| 689 | { |
| 690 | UINT32 op = desc->opptr.l[0]; |
| 691 | UINT32 rs = (op & THUMB_HIREG_RS) >> THUMB_HIREG_RS_SHIFT; |
| 692 | UINT32 rd = op & THUMB_HIREG_RD; |
| 693 | UML_ADD(block, DRC_REG(rd+8), DRC_REG(rd+8), DRC_REG(rs+8)); |
| 694 | // emulate the effects of pre-fetch |
| 695 | if (rs == 7) |
| 696 | { |
| 697 | UML_ADD(block, DRC_REG(rd+8), DRC_REG(rd+8), 4); |
| 698 | } |
| 699 | if (rd == 7) |
| 700 | { |
| 701 | UML_ADD(block, DRC_REG(rd+8), DRC_REG(rd+8), 2); |
| 702 | } |
| 703 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 704 | } |
| 705 | |
| 706 | void arm7_cpu_device::drctg04_01_10(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* CMP Rd, Rs */ |
| 707 | { |
| 708 | UINT32 op = desc->opptr.l[0]; |
| 709 | UINT32 rs = (op & THUMB_HIREG_RS) >> THUMB_HIREG_RS_SHIFT; |
| 710 | UINT32 rd = op & THUMB_HIREG_RD; |
| 711 | UML_SUB(block, uml::I3, DRC_REG(rd), DRC_REG(rs)); |
| 712 | DRCHandleThumbALUSubFlags(uml::I3, DRC_REG(rd), DRC_REG(rs)); |
| 713 | } |
| 714 | |
| 715 | void arm7_cpu_device::drctg04_01_11(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* CMP Rd, Hs */ |
| 716 | { |
| 717 | UINT32 op = desc->opptr.l[0]; |
| 718 | UINT32 rs = (op & THUMB_HIREG_RS) >> THUMB_HIREG_RS_SHIFT; |
| 719 | UINT32 rd = op & THUMB_HIREG_RD; |
| 720 | UML_SUB(block, uml::I3, DRC_REG(rd), DRC_REG(rs+8)); |
| 721 | DRCHandleThumbALUSubFlags(uml::I3, DRC_REG(rd), DRC_REG(rs+8)); |
| 722 | } |
| 723 | |
| 724 | void arm7_cpu_device::drctg04_01_12(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* CMP Hd, Rs */ |
| 725 | { |
| 726 | UINT32 op = desc->opptr.l[0]; |
| 727 | UINT32 rs = (op & THUMB_HIREG_RS) >> THUMB_HIREG_RS_SHIFT; |
| 728 | UINT32 rd = op & THUMB_HIREG_RD; |
| 729 | UML_SUB(block, uml::I3, DRC_REG(rd+8), DRC_REG(rs)); |
| 730 | DRCHandleThumbALUSubFlags(uml::I3, DRC_REG(rd+8), DRC_REG(rs)); |
| 731 | } |
| 732 | |
| 733 | void arm7_cpu_device::drctg04_01_13(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* CMP Hd, Hs */ |
| 734 | { |
| 735 | UINT32 op = desc->opptr.l[0]; |
| 736 | UINT32 rs = (op & THUMB_HIREG_RS) >> THUMB_HIREG_RS_SHIFT; |
| 737 | UINT32 rd = op & THUMB_HIREG_RD; |
| 738 | UML_SUB(block, uml::I3, DRC_REG(rd+8), DRC_REG(rs+8)); |
| 739 | DRCHandleThumbALUSubFlags(uml::I3, DRC_REG(rd+8), DRC_REG(rs+8)); |
| 740 | } |
| 741 | |
| 742 | /* MOV group */ |
| 743 | |
| 744 | void arm7_cpu_device::drctg04_01_20(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* MOV Rd, Rs (undefined) */ |
| 745 | { |
| 746 | UINT32 op = desc->opptr.l[0]; |
| 747 | UINT32 rs = (op & THUMB_HIREG_RS) >> THUMB_HIREG_RS_SHIFT; |
| 748 | UINT32 rd = op & THUMB_HIREG_RD; |
| 749 | UML_MOV(block, DRC_REG(rd), DRC_REG(rs)); |
| 750 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 751 | } |
| 752 | |
| 753 | void arm7_cpu_device::drctg04_01_21(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* MOV Rd, Hs */ |
| 754 | { |
| 755 | UINT32 op = desc->opptr.l[0]; |
| 756 | UINT32 rs = (op & THUMB_HIREG_RS) >> THUMB_HIREG_RS_SHIFT; |
| 757 | UINT32 rd = op & THUMB_HIREG_RD; |
| 758 | UML_MOV(block, DRC_REG(rd), DRC_REG(rs+8)); |
| 759 | if (rs == 7) |
| 760 | { |
| 761 | UML_ADD(block, DRC_REG(rd), DRC_REG(rd), 4); |
| 762 | } |
| 763 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 764 | } |
| 765 | |
| 766 | void arm7_cpu_device::drctg04_01_22(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* MOV Hd, Rs */ |
| 767 | { |
| 768 | UINT32 op = desc->opptr.l[0]; |
| 769 | UINT32 rs = (op & THUMB_HIREG_RS) >> THUMB_HIREG_RS_SHIFT; |
| 770 | UINT32 rd = op & THUMB_HIREG_RD; |
| 771 | UML_MOV(block, DRC_REG(rd+8), DRC_REG(rs)); |
| 772 | // CHECKME |
| 773 | if (rd != 7) |
| 774 | { |
| 775 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 776 | } |
| 777 | else |
| 778 | { |
| 779 | UML_AND(block, DRC_PC, DRC_PC, ~1); |
| 780 | } |
| 781 | } |
| 782 | |
| 783 | void arm7_cpu_device::drctg04_01_23(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* MOV Hd, Hs */ |
| 784 | { |
| 785 | UINT32 op = desc->opptr.l[0]; |
| 786 | UINT32 rs = (op & THUMB_HIREG_RS) >> THUMB_HIREG_RS_SHIFT; |
| 787 | UINT32 rd = op & THUMB_HIREG_RD; |
| 788 | UML_MOV(block, DRC_REG(rd+8), DRC_REG(rs+8)); |
| 789 | if (rs == 7) |
| 790 | { |
| 791 | UML_ADD(block, DRC_REG(rd+8), DRC_REG(rd+8), 4); |
| 792 | } |
| 793 | if (rd != 7) |
| 794 | { |
| 795 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 796 | } |
| 797 | else |
| 798 | { |
| 799 | UML_AND(block, DRC_PC, DRC_PC, ~1); |
| 800 | } |
| 801 | |
| 802 | } |
| 803 | |
| 804 | void arm7_cpu_device::drctg04_01_30(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 805 | { |
| 806 | UINT32 op = desc->opptr.l[0]; |
| 807 | uml::code_label switch_state; |
| 808 | uml::code_label done; |
| 809 | UINT32 rs = (op & THUMB_HIREG_RS) >> THUMB_HIREG_RS_SHIFT; |
| 810 | UML_MOV(block, uml::I0, DRC_REG(rs)); |
| 811 | UML_TEST(block, uml::I0, 1); |
| 812 | UML_JMPc(block, uml::COND_Z, switch_state = compiler->labelnum++); |
| 813 | UML_AND(block, uml::I0, uml::I0, ~1); |
| 814 | UML_JMP(block, done = compiler->labelnum++); |
| 815 | |
| 816 | UML_LABEL(block, switch_state); |
| 817 | UML_AND(block, DRC_CPSR, DRC_CPSR, ~T_MASK); |
| 818 | UML_TEST(block, uml::I0, 2); |
| 819 | UML_MOVc(block, uml::COND_NZ, uml::I1, 2); |
| 820 | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 821 | UML_ADD(block, uml::I0, uml::I0, uml::I1); |
| 822 | |
| 823 | UML_LABEL(block, done); |
| 824 | UML_MOV(block, DRC_PC, uml::I0); |
| 825 | } |
| 826 | |
| 827 | void arm7_cpu_device::drctg04_01_31(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 828 | { |
| 829 | UINT32 op = desc->opptr.l[0]; |
| 830 | uml::code_label switch_state; |
| 831 | uml::code_label done; |
| 832 | UINT32 rs = (op & THUMB_HIREG_RS) >> THUMB_HIREG_RS_SHIFT; |
| 833 | UML_MOV(block, uml::I0, DRC_REG(rs+8)); |
| 834 | if(rs == 7) |
| 835 | { |
| 836 | UML_ADD(block, uml::I0, uml::I0, 2); |
| 837 | } |
| 838 | UML_TEST(block, uml::I0, 1); |
| 839 | UML_JMPc(block, uml::COND_Z, switch_state = compiler->labelnum++); |
| 840 | UML_AND(block, uml::I0, uml::I0, ~1); |
| 841 | UML_JMP(block, done = compiler->labelnum++); |
| 842 | |
| 843 | UML_LABEL(block, switch_state); |
| 844 | UML_AND(block, DRC_CPSR, DRC_CPSR, ~T_MASK); |
| 845 | UML_TEST(block, uml::I0, 2); |
| 846 | UML_MOVc(block, uml::COND_NZ, uml::I1, 2); |
| 847 | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 848 | UML_ADD(block, uml::I0, uml::I0, uml::I1); |
| 849 | |
| 850 | UML_LABEL(block, done); |
| 851 | UML_MOV(block, DRC_PC, uml::I0); |
| 852 | } |
| 853 | |
| 854 | void arm7_cpu_device::drctg04_01_32(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 855 | { |
| 856 | UINT32 op = desc->opptr.l[0]; |
| 857 | UINT32 pc = desc->pc; |
| 858 | fatalerror("%08x: G4-3 Undefined Thumb instruction: %04x\n", pc, op); |
| 859 | } |
| 860 | |
| 861 | void arm7_cpu_device::drctg04_01_33(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 862 | { |
| 863 | UINT32 op = desc->opptr.l[0]; |
| 864 | UINT32 pc = desc->pc; |
| 865 | fatalerror("%08x: G4-3 Undefined Thumb instruction: %04x\n", pc, op); |
| 866 | } |
| 867 | |
| 868 | void arm7_cpu_device::drctg04_0203(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 869 | { |
| 870 | UINT32 op = desc->opptr.l[0]; |
| 871 | UINT32 rd = (op & THUMB_INSN_IMM_RD) >> THUMB_INSN_IMM_RD_SHIFT; |
| 872 | UINT32 imm = 4 + ((op & THUMB_INSN_IMM) << 2); |
| 873 | UML_AND(block, uml::I0, DRC_PC, ~2); |
| 874 | UML_ADD(block, uml::I0, uml::I0, imm); |
| 875 | UML_CALLH(block, *m_impstate.read32); |
| 876 | UML_MOV(block, DRC_REG(rd), uml::I0); |
| 877 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 878 | } |
| 879 | |
| 880 | /* LDR* STR* group */ |
| 881 | |
| 882 | void arm7_cpu_device::drctg05_0(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* STR Rd, [Rn, Rm] */ |
| 883 | { |
| 884 | UINT32 op = desc->opptr.l[0]; |
| 885 | UINT32 rm = (op & THUMB_GROUP5_RM) >> THUMB_GROUP5_RM_SHIFT; |
| 886 | UINT32 rn = (op & THUMB_GROUP5_RN) >> THUMB_GROUP5_RN_SHIFT; |
| 887 | UINT32 rd = (op & THUMB_GROUP5_RD) >> THUMB_GROUP5_RD_SHIFT; |
| 888 | UML_MOV(block, uml::I1, DRC_REG(rd)); |
| 889 | UML_ADD(block, uml::I0, DRC_REG(rn), DRC_REG(rm)); |
| 890 | UML_CALLH(block, *m_impstate.write32); |
| 891 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 892 | } |
| 893 | |
| 894 | void arm7_cpu_device::drctg05_1(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* STRH Rd, [Rn, Rm] */ |
| 895 | { |
| 896 | UINT32 op = desc->opptr.l[0]; |
| 897 | UINT32 rm = (op & THUMB_GROUP5_RM) >> THUMB_GROUP5_RM_SHIFT; |
| 898 | UINT32 rn = (op & THUMB_GROUP5_RN) >> THUMB_GROUP5_RN_SHIFT; |
| 899 | UINT32 rd = (op & THUMB_GROUP5_RD) >> THUMB_GROUP5_RD_SHIFT; |
| 900 | UML_MOV(block, uml::I1, DRC_REG(rd)); |
| 901 | UML_ADD(block, uml::I0, DRC_REG(rn), DRC_REG(rm)); |
| 902 | UML_CALLH(block, *m_impstate.write16); |
| 903 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 904 | } |
| 905 | |
| 906 | void arm7_cpu_device::drctg05_2(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* STRB Rd, [Rn, Rm] */ |
| 907 | { |
| 908 | UINT32 op = desc->opptr.l[0]; |
| 909 | UINT32 rm = (op & THUMB_GROUP5_RM) >> THUMB_GROUP5_RM_SHIFT; |
| 910 | UINT32 rn = (op & THUMB_GROUP5_RN) >> THUMB_GROUP5_RN_SHIFT; |
| 911 | UINT32 rd = (op & THUMB_GROUP5_RD) >> THUMB_GROUP5_RD_SHIFT; |
| 912 | UML_MOV(block, uml::I1, DRC_REG(rd)); |
| 913 | UML_ADD(block, uml::I0, DRC_REG(rn), DRC_REG(rm)); |
| 914 | UML_CALLH(block, *m_impstate.write16); |
| 915 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 916 | } |
| 917 | |
| 918 | void arm7_cpu_device::drctg05_3(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* LDSB Rd, [Rn, Rm] todo, add dasm */ |
| 919 | { |
| 920 | UINT32 op = desc->opptr.l[0]; |
| 921 | UINT32 rm = (op & THUMB_GROUP5_RM) >> THUMB_GROUP5_RM_SHIFT; |
| 922 | UINT32 rn = (op & THUMB_GROUP5_RN) >> THUMB_GROUP5_RN_SHIFT; |
| 923 | UINT32 rd = (op & THUMB_GROUP5_RD) >> THUMB_GROUP5_RD_SHIFT; |
| 924 | UML_ADD(block, uml::I0, DRC_REG(rn), DRC_REG(rm)); |
| 925 | UML_CALLH(block, *m_impstate.read8); |
| 926 | UML_SEXT(block, DRC_REG(rd), uml::I0, uml::SIZE_BYTE); |
| 927 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 928 | } |
| 929 | |
| 930 | void arm7_cpu_device::drctg05_4(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* LDR Rd, [Rn, Rm] */ |
| 931 | { |
| 932 | UINT32 op = desc->opptr.l[0]; |
| 933 | UINT32 rm = (op & THUMB_GROUP5_RM) >> THUMB_GROUP5_RM_SHIFT; |
| 934 | UINT32 rn = (op & THUMB_GROUP5_RN) >> THUMB_GROUP5_RN_SHIFT; |
| 935 | UINT32 rd = (op & THUMB_GROUP5_RD) >> THUMB_GROUP5_RD_SHIFT; |
| 936 | UML_ADD(block, uml::I0, DRC_REG(rn), DRC_REG(rm)); |
| 937 | UML_CALLH(block, *m_impstate.read32); |
| 938 | UML_MOV(block, DRC_REG(rd), uml::I0); |
| 939 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 940 | } |
| 941 | |
| 942 | void arm7_cpu_device::drctg05_5(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* LDRH Rd, [Rn, Rm] */ |
| 943 | { |
| 944 | UINT32 op = desc->opptr.l[0]; |
| 945 | UINT32 rm = (op & THUMB_GROUP5_RM) >> THUMB_GROUP5_RM_SHIFT; |
| 946 | UINT32 rn = (op & THUMB_GROUP5_RN) >> THUMB_GROUP5_RN_SHIFT; |
| 947 | UINT32 rd = (op & THUMB_GROUP5_RD) >> THUMB_GROUP5_RD_SHIFT; |
| 948 | UML_ADD(block, uml::I0, DRC_REG(rn), DRC_REG(rm)); |
| 949 | UML_CALLH(block, *m_impstate.read16); |
| 950 | UML_MOV(block, DRC_REG(rd), uml::I0); |
| 951 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 952 | } |
| 953 | |
| 954 | void arm7_cpu_device::drctg05_6(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* LDRB Rd, [Rn, Rm] */ |
| 955 | { |
| 956 | UINT32 op = desc->opptr.l[0]; |
| 957 | UINT32 rm = (op & THUMB_GROUP5_RM) >> THUMB_GROUP5_RM_SHIFT; |
| 958 | UINT32 rn = (op & THUMB_GROUP5_RN) >> THUMB_GROUP5_RN_SHIFT; |
| 959 | UINT32 rd = (op & THUMB_GROUP5_RD) >> THUMB_GROUP5_RD_SHIFT; |
| 960 | UML_ADD(block, uml::I0, DRC_REG(rn), DRC_REG(rm)); |
| 961 | UML_CALLH(block, *m_impstate.read8); |
| 962 | UML_MOV(block, DRC_REG(rd), uml::I0); |
| 963 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 964 | } |
| 965 | |
| 966 | void arm7_cpu_device::drctg05_7(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* LDSH Rd, [Rn, Rm] */ |
| 967 | { |
| 968 | UINT32 op = desc->opptr.l[0]; |
| 969 | UINT32 rm = (op & THUMB_GROUP5_RM) >> THUMB_GROUP5_RM_SHIFT; |
| 970 | UINT32 rn = (op & THUMB_GROUP5_RN) >> THUMB_GROUP5_RN_SHIFT; |
| 971 | UINT32 rd = (op & THUMB_GROUP5_RD) >> THUMB_GROUP5_RD_SHIFT; |
| 972 | UML_ADD(block, uml::I0, DRC_REG(rn), DRC_REG(rm)); |
| 973 | UML_CALLH(block, *m_impstate.read16); |
| 974 | UML_SEXT(block, DRC_REG(rd), uml::I0, uml::SIZE_WORD); |
| 975 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 976 | } |
| 977 | |
| 978 | /* Word Store w/ Immediate Offset */ |
| 979 | |
| 980 | void arm7_cpu_device::drctg06_0(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* Store */ |
| 981 | { |
| 982 | UINT32 op = desc->opptr.l[0]; |
| 983 | UINT32 rn = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 984 | UINT32 rd = op & THUMB_ADDSUB_RD; |
| 985 | INT32 offs = ((op & THUMB_LSOP_OFFS) >> THUMB_LSOP_OFFS_SHIFT) << 2; |
| 986 | UML_ADD(block, uml::I0, DRC_REG(rn), offs); |
| 987 | UML_MOV(block, uml::I1, DRC_REG(rd)); |
| 988 | UML_CALLH(block, *m_impstate.write32); |
| 989 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 990 | } |
| 991 | |
| 992 | void arm7_cpu_device::drctg06_1(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* Load */ |
| 993 | { |
| 994 | UINT32 op = desc->opptr.l[0]; |
| 995 | UINT32 rn = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 996 | UINT32 rd = op & THUMB_ADDSUB_RD; |
| 997 | INT32 offs = ((op & THUMB_LSOP_OFFS) >> THUMB_LSOP_OFFS_SHIFT) << 2; |
| 998 | UML_ADD(block, uml::I0, DRC_REG(rn), offs); |
| 999 | UML_CALLH(block, *m_impstate.read32); |
| 1000 | UML_MOV(block, DRC_REG(rd), uml::I0); |
| 1001 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 1002 | } |
| 1003 | |
| 1004 | /* Byte Store w/ Immeidate Offset */ |
| 1005 | |
| 1006 | void arm7_cpu_device::drctg07_0(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* Store */ |
| 1007 | { |
| 1008 | UINT32 op = desc->opptr.l[0]; |
| 1009 | UINT32 rn = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 1010 | UINT32 rd = op & THUMB_ADDSUB_RD; |
| 1011 | INT32 offs = (op & THUMB_LSOP_OFFS) >> THUMB_LSOP_OFFS_SHIFT; |
| 1012 | UML_ADD(block, uml::I0, DRC_REG(rn), offs); |
| 1013 | UML_MOV(block, uml::I1, DRC_REG(rd)); |
| 1014 | UML_CALLH(block, *m_impstate.write8); |
| 1015 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 1016 | } |
| 1017 | |
| 1018 | void arm7_cpu_device::drctg07_1(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* Load */ |
| 1019 | { |
| 1020 | UINT32 op = desc->opptr.l[0]; |
| 1021 | UINT32 rn = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 1022 | UINT32 rd = op & THUMB_ADDSUB_RD; |
| 1023 | INT32 offs = (op & THUMB_LSOP_OFFS) >> THUMB_LSOP_OFFS_SHIFT; |
| 1024 | UML_ADD(block, uml::I0, DRC_REG(rn), offs); |
| 1025 | UML_CALLH(block, *m_impstate.read8); |
| 1026 | UML_MOV(block, DRC_REG(rd), uml::I0); |
| 1027 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 1028 | } |
| 1029 | |
| 1030 | /* Load/Store Halfword */ |
| 1031 | |
| 1032 | void arm7_cpu_device::drctg08_0(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* Store */ |
| 1033 | { |
| 1034 | UINT32 op = desc->opptr.l[0]; |
| 1035 | UINT32 offs = (op & THUMB_HALFOP_OFFS) >> THUMB_HALFOP_OFFS_SHIFT; |
| 1036 | UINT32 rn = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 1037 | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 1038 | UML_ADD(block, uml::I0, DRC_REG(rn), offs << 1); |
| 1039 | UML_MOV(block, uml::I1, DRC_REG(rd)); |
| 1040 | UML_CALLH(block, *m_impstate.write16); |
| 1041 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 1042 | } |
| 1043 | |
| 1044 | void arm7_cpu_device::drctg08_1(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* Load */ |
| 1045 | { |
| 1046 | UINT32 op = desc->opptr.l[0]; |
| 1047 | UINT32 offs = (op & THUMB_HALFOP_OFFS) >> THUMB_HALFOP_OFFS_SHIFT; |
| 1048 | UINT32 rn = (op & THUMB_ADDSUB_RS) >> THUMB_ADDSUB_RS_SHIFT; |
| 1049 | UINT32 rd = (op & THUMB_ADDSUB_RD) >> THUMB_ADDSUB_RD_SHIFT; |
| 1050 | UML_ADD(block, uml::I0, DRC_REG(rn), offs << 1); |
| 1051 | UML_CALLH(block, *m_impstate.read16); |
| 1052 | UML_MOV(block, DRC_REG(rd), uml::I0); |
| 1053 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 1054 | } |
| 1055 | |
| 1056 | /* Stack-Relative Load/Store */ |
| 1057 | |
| 1058 | void arm7_cpu_device::drctg09_0(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* Store */ |
| 1059 | { |
| 1060 | UINT32 op = desc->opptr.l[0]; |
| 1061 | UINT32 rd = (op & THUMB_STACKOP_RD) >> THUMB_STACKOP_RD_SHIFT; |
| 1062 | INT32 offs = (UINT8)(op & THUMB_INSN_IMM) << 2; |
| 1063 | UML_ADD(block, uml::I0, DRC_REG(13), offs); |
| 1064 | UML_MOV(block, uml::I1, DRC_REG(rd)); |
| 1065 | UML_CALLH(block, *m_impstate.write32); |
| 1066 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 1067 | } |
| 1068 | |
| 1069 | void arm7_cpu_device::drctg09_1(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* Load */ |
| 1070 | { |
| 1071 | UINT32 op = desc->opptr.l[0]; |
| 1072 | UINT32 rd = (op & THUMB_STACKOP_RD) >> THUMB_STACKOP_RD_SHIFT; |
| 1073 | UINT32 offs = (UINT8)(op & THUMB_INSN_IMM) << 2; |
| 1074 | UML_ADD(block, uml::I0, DRC_REG(13), offs); |
| 1075 | UML_CALLH(block, *m_impstate.read32); |
| 1076 | UML_MOV(block, DRC_REG(rd), uml::I0); |
| 1077 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 1078 | } |
| 1079 | |
| 1080 | /* Get relative address */ |
| 1081 | |
| 1082 | void arm7_cpu_device::drctg0a_0(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* ADD Rd, PC, #nn */ |
| 1083 | { |
| 1084 | UINT32 op = desc->opptr.l[0]; |
| 1085 | UINT32 rd = (op & THUMB_RELADDR_RD) >> THUMB_RELADDR_RD_SHIFT; |
| 1086 | INT32 offs = (UINT8)(op & THUMB_INSN_IMM) << 2; |
| 1087 | UML_ADD(block, uml::I0, DRC_PC, 4); |
| 1088 | UML_AND(block, uml::I0, uml::I0, ~2); |
| 1089 | UML_ADD(block, DRC_REG(rd), uml::I0, offs); |
| 1090 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 1091 | } |
| 1092 | |
| 1093 | void arm7_cpu_device::drctg0a_1(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* ADD Rd, SP, #nn */ |
| 1094 | { |
| 1095 | UINT32 op = desc->opptr.l[0]; |
| 1096 | UINT32 rd = (op & THUMB_RELADDR_RD) >> THUMB_RELADDR_RD_SHIFT; |
| 1097 | INT32 offs = (UINT8)(op & THUMB_INSN_IMM) << 2; |
| 1098 | UML_ADD(block, DRC_REG(rd), DRC_REG(13), offs); |
| 1099 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 1100 | } |
| 1101 | |
| 1102 | /* Stack-Related Opcodes */ |
| 1103 | |
| 1104 | void arm7_cpu_device::drctg0b_0(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* ADD SP, #imm */ |
| 1105 | { |
| 1106 | UINT32 op = desc->opptr.l[0]; |
| 1107 | INT32 addr = (op & THUMB_INSN_IMM); |
| 1108 | addr &= ~THUMB_INSN_IMM_S; |
| 1109 | addr = ((op & THUMB_INSN_IMM_S) ? -(addr << 2) : (addr << 2)); |
| 1110 | UML_ADD(block, DRC_REG(13), DRC_REG(13), addr); |
| 1111 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 1112 | } |
| 1113 | |
| 1114 | void arm7_cpu_device::drctg0b_1(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 1115 | { |
| 1116 | UINT32 op = desc->opptr.l[0]; |
| 1117 | UINT32 pc = desc->pc; |
| 1118 | fatalerror("%08x: Gb Undefined Thumb instruction: %04x\n", pc, op); |
| 1119 | } |
| 1120 | |
| 1121 | void arm7_cpu_device::drctg0b_2(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 1122 | { |
| 1123 | UINT32 op = desc->opptr.l[0]; |
| 1124 | UINT32 pc = desc->pc; |
| 1125 | fatalerror("%08x: Gb Undefined Thumb instruction: %04x\n", pc, op); |
| 1126 | } |
| 1127 | |
| 1128 | void arm7_cpu_device::drctg0b_3(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 1129 | { |
| 1130 | UINT32 op = desc->opptr.l[0]; |
| 1131 | UINT32 pc = desc->pc; |
| 1132 | fatalerror("%08x: Gb Undefined Thumb instruction: %04x\n", pc, op); |
| 1133 | } |
| 1134 | |
| 1135 | void arm7_cpu_device::drctg0b_4(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* PUSH {Rlist} */ |
| 1136 | { |
| 1137 | UINT32 op = desc->opptr.l[0]; |
| 1138 | for (INT32 offs = 7; offs >= 0; offs--) |
| 1139 | { |
| 1140 | if (op & (1 << offs)) |
| 1141 | { |
| 1142 | UML_SUB(block, DRC_REG(13), DRC_REG(13), 4); |
| 1143 | UML_MOV(block, uml::I0, DRC_REG(13)); |
| 1144 | UML_MOV(block, uml::I1, DRC_REG(offs)); |
| 1145 | UML_CALLH(block, *m_impstate.write32); |
| 1146 | } |
| 1147 | } |
| 1148 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 1149 | } |
| 1150 | |
| 1151 | void arm7_cpu_device::drctg0b_5(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* PUSH {Rlist}{LR} */ |
| 1152 | { |
| 1153 | UINT32 op = desc->opptr.l[0]; |
| 1154 | UML_SUB(block, DRC_REG(13), DRC_REG(13), 4); |
| 1155 | UML_MOV(block, uml::I0, DRC_REG(13)); |
| 1156 | UML_MOV(block, uml::I1, DRC_REG(14)); |
| 1157 | UML_CALLH(block, *m_impstate.write32); |
| 1158 | for (INT32 offs = 7; offs >= 0; offs--) |
| 1159 | { |
| 1160 | if (op & (1 << offs)) |
| 1161 | { |
| 1162 | UML_SUB(block, DRC_REG(13), DRC_REG(13), 4); |
| 1163 | UML_MOV(block, uml::I0, DRC_REG(13)); |
| 1164 | UML_MOV(block, uml::I1, DRC_REG(offs)); |
| 1165 | UML_CALLH(block, *m_impstate.write32); |
| 1166 | } |
| 1167 | } |
| 1168 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 1169 | } |
| 1170 | |
| 1171 | void arm7_cpu_device::drctg0b_6(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 1172 | { |
| 1173 | UINT32 op = desc->opptr.l[0]; |
| 1174 | UINT32 pc = desc->pc; |
| 1175 | fatalerror("%08x: Gb Undefined Thumb instruction: %04x\n", pc, op); |
| 1176 | } |
| 1177 | |
| 1178 | void arm7_cpu_device::drctg0b_7(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 1179 | { |
| 1180 | UINT32 op = desc->opptr.l[0]; |
| 1181 | UINT32 pc = desc->pc; |
| 1182 | fatalerror("%08x: Gb Undefined Thumb instruction: %04x\n", pc, op); |
| 1183 | } |
| 1184 | |
| 1185 | void arm7_cpu_device::drctg0b_8(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 1186 | { |
| 1187 | UINT32 op = desc->opptr.l[0]; |
| 1188 | UINT32 pc = desc->pc; |
| 1189 | fatalerror("%08x: Gb Undefined Thumb instruction: %04x\n", pc, op); |
| 1190 | } |
| 1191 | |
| 1192 | void arm7_cpu_device::drctg0b_9(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 1193 | { |
| 1194 | UINT32 op = desc->opptr.l[0]; |
| 1195 | UINT32 pc = desc->pc; |
| 1196 | fatalerror("%08x: Gb Undefined Thumb instruction: %04x\n", pc, op); |
| 1197 | } |
| 1198 | |
| 1199 | void arm7_cpu_device::drctg0b_a(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 1200 | { |
| 1201 | UINT32 op = desc->opptr.l[0]; |
| 1202 | UINT32 pc = desc->pc; |
| 1203 | fatalerror("%08x: Gb Undefined Thumb instruction: %04x\n", pc, op); |
| 1204 | } |
| 1205 | |
| 1206 | void arm7_cpu_device::drctg0b_b(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 1207 | { |
| 1208 | UINT32 op = desc->opptr.l[0]; |
| 1209 | UINT32 pc = desc->pc; |
| 1210 | fatalerror("%08x: Gb Undefined Thumb instruction: %04x\n", pc, op); |
| 1211 | } |
| 1212 | |
| 1213 | void arm7_cpu_device::drctg0b_c(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* POP {Rlist} */ |
| 1214 | { |
| 1215 | UINT32 op = desc->opptr.l[0]; |
| 1216 | for (INT32 offs = 0; offs < 8; offs++) |
| 1217 | { |
| 1218 | if (op & (1 << offs)) |
| 1219 | { |
| 1220 | UML_MOV(block, uml::I0, DRC_REG(13)); |
| 1221 | UML_CALLH(block, *m_impstate.read32); |
| 1222 | UML_MOV(block, DRC_REG(offs), uml::I0); |
| 1223 | UML_ADD(block, DRC_REG(13), DRC_REG(13), 4); |
| 1224 | } |
| 1225 | } |
| 1226 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 1227 | } |
| 1228 | |
| 1229 | void arm7_cpu_device::drctg0b_d(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* POP {Rlist}{PC} */ |
| 1230 | { |
| 1231 | UINT32 op = desc->opptr.l[0]; |
| 1232 | uml::code_label arch5up; |
| 1233 | uml::code_label done; |
| 1234 | uml::code_label switch_mode; |
| 1235 | for (INT32 offs = 0; offs < 8; offs++) |
| 1236 | { |
| 1237 | if (op & (1 << offs)) |
| 1238 | { |
| 1239 | UML_MOV(block, uml::I0, DRC_REG(13)); |
| 1240 | UML_CALLH(block, *m_impstate.read32); |
| 1241 | UML_MOV(block, DRC_REG(offs), uml::I0); |
| 1242 | UML_ADD(block, DRC_REG(13), DRC_REG(13), 4); |
| 1243 | } |
| 1244 | } |
| 1245 | UML_MOV(block, uml::I0, DRC_REG(13)); |
| 1246 | UML_CALLH(block, *m_impstate.read32); |
| 1247 | UML_CMP(block, uml::mem(&m_archRev), 4); |
| 1248 | UML_JMPc(block, uml::COND_A, arch5up = compiler->labelnum++); |
| 1249 | UML_AND(block, DRC_PC, uml::I0, ~1); |
| 1250 | |
| 1251 | UML_LABEL(block, arch5up); |
| 1252 | |
| 1253 | UML_TEST(block, uml::I0, 1); |
| 1254 | UML_JMPc(block, uml::COND_Z, switch_mode = compiler->labelnum++); |
| 1255 | |
| 1256 | UML_AND(block, uml::I0, uml::I0, ~1); |
| 1257 | UML_MOV(block, DRC_PC, uml::I0); |
| 1258 | UML_JMP(block, done); |
| 1259 | |
| 1260 | UML_LABEL(block, switch_mode); |
| 1261 | UML_AND(block, DRC_CPSR, DRC_CPSR, ~T_MASK); |
| 1262 | UML_TEST(block, uml::I0, 2); |
| 1263 | UML_MOVc(block, uml::COND_NZ, uml::I1, 2); |
| 1264 | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 1265 | UML_ADD(block, uml::I0, uml::I0, uml::I1); |
| 1266 | UML_MOV(block, DRC_PC, uml::I0); |
| 1267 | |
| 1268 | UML_LABEL(block, done); |
| 1269 | UML_ADD(block, DRC_REG(13), DRC_REG(13), 4); |
| 1270 | } |
| 1271 | |
| 1272 | void arm7_cpu_device::drctg0b_e(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 1273 | { |
| 1274 | UINT32 op = desc->opptr.l[0]; |
| 1275 | UINT32 pc = desc->pc; |
| 1276 | fatalerror("%08x: Gb Undefined Thumb instruction: %04x\n", pc, op); |
| 1277 | } |
| 1278 | |
| 1279 | void arm7_cpu_device::drctg0b_f(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 1280 | { |
| 1281 | UINT32 op = desc->opptr.l[0]; |
| 1282 | UINT32 pc = desc->pc; |
| 1283 | fatalerror("%08x: Gb Undefined Thumb instruction: %04x\n", pc, op); |
| 1284 | } |
| 1285 | |
| 1286 | /* Multiple Load/Store */ |
| 1287 | |
| 1288 | // "The address should normally be a word aligned quantity and non-word aligned addresses do not affect the instruction." |
| 1289 | // "However, the bottom 2 bits of the address will appear on A[1:0] and might be interpreted by the memory system." |
| 1290 | |
| 1291 | // GBA "BB Ball" performs an unaligned read with A[1:0] = 2 and expects A[1] not to be ignored [BP 800B90A,(R4&3)!=0] |
| 1292 | // GBA "Gadget Racers" performs an unaligned read with A[1:0] = 1 and expects A[0] to be ignored [BP B72,(R0&3)!=0] |
| 1293 | |
| 1294 | void arm7_cpu_device::drctg0c_0(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* Store */ |
| 1295 | { |
| 1296 | UINT32 op = desc->opptr.l[0]; |
| 1297 | UINT32 rd = (op & THUMB_MULTLS_BASE) >> THUMB_MULTLS_BASE_SHIFT; |
| 1298 | UML_MOV(block, uml::I2, DRC_REG(rd)); |
| 1299 | for (INT32 offs = 0; offs < 8; offs++) |
| 1300 | { |
| 1301 | if (op & (1 << offs)) |
| 1302 | { |
| 1303 | UML_AND(block, uml::I0, uml::I2, ~3); |
| 1304 | UML_MOV(block, uml::I1, DRC_REG(offs)); |
| 1305 | UML_CALLH(block, *m_impstate.write32); |
| 1306 | UML_ADD(block, uml::I2, uml::I2, 4); |
| 1307 | } |
| 1308 | } |
| 1309 | UML_MOV(block, DRC_REG(rd), uml::I2); |
| 1310 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 1311 | } |
| 1312 | |
| 1313 | void arm7_cpu_device::drctg0c_1(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* Load */ |
| 1314 | { |
| 1315 | UINT32 op = desc->opptr.l[0]; |
| 1316 | UINT32 rd = (op & THUMB_MULTLS_BASE) >> THUMB_MULTLS_BASE_SHIFT; |
| 1317 | int rd_in_list = op & (1 << rd); |
| 1318 | UML_MOV(block, uml::I2, DRC_REG(rd)); |
| 1319 | for (INT32 offs = 0; offs < 8; offs++) |
| 1320 | { |
| 1321 | if (op & (1 << offs)) |
| 1322 | { |
| 1323 | UML_AND(block, uml::I0, uml::I2, ~1); |
| 1324 | UML_CALLH(block, *m_impstate.read32); |
| 1325 | UML_ADD(block, uml::I2, uml::I2, 4); |
| 1326 | } |
| 1327 | } |
| 1328 | if (!rd_in_list) |
| 1329 | { |
| 1330 | UML_MOV(block, DRC_REG(rd), uml::I2); |
| 1331 | } |
| 1332 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 1333 | } |
| 1334 | |
| 1335 | /* Conditional Branch */ |
| 1336 | |
| 1337 | void arm7_cpu_device::drctg0d_0(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) // COND_EQ: |
| 1338 | { |
| 1339 | UINT32 op = desc->opptr.l[0]; |
| 1340 | INT32 offs = ((INT8)(op & THUMB_INSN_IMM) << 1) + 4; |
| 1341 | UML_TEST(block, DRC_CPSR, Z_MASK); |
| 1342 | UML_MOVc(block, uml::COND_NZ, uml::I0, offs); |
| 1343 | UML_MOVc(block, uml::COND_Z, uml::I0, 2); |
| 1344 | UML_ADD(block, DRC_PC, DRC_PC, uml::I0); |
| 1345 | } |
| 1346 | |
| 1347 | void arm7_cpu_device::drctg0d_1(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) // COND_NE: |
| 1348 | { |
| 1349 | UINT32 op = desc->opptr.l[0]; |
| 1350 | INT32 offs = ((INT8)(op & THUMB_INSN_IMM) << 1) + 4; |
| 1351 | UML_TEST(block, DRC_CPSR, Z_MASK); |
| 1352 | UML_MOVc(block, uml::COND_Z, uml::I0, offs); |
| 1353 | UML_MOVc(block, uml::COND_NZ, uml::I0, 2); |
| 1354 | UML_ADD(block, DRC_PC, DRC_PC, uml::I0); |
| 1355 | } |
| 1356 | |
| 1357 | void arm7_cpu_device::drctg0d_2(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) // COND_CS: |
| 1358 | { |
| 1359 | UINT32 op = desc->opptr.l[0]; |
| 1360 | INT32 offs = ((INT8)(op & THUMB_INSN_IMM) << 1) + 4; |
| 1361 | UML_TEST(block, DRC_CPSR, C_MASK); |
| 1362 | UML_MOVc(block, uml::COND_NZ, uml::I0, offs); |
| 1363 | UML_MOVc(block, uml::COND_Z, uml::I0, 2); |
| 1364 | UML_ADD(block, DRC_PC, DRC_PC, uml::I0); |
| 1365 | } |
| 1366 | |
| 1367 | void arm7_cpu_device::drctg0d_3(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) // COND_CC: |
| 1368 | { |
| 1369 | UINT32 op = desc->opptr.l[0]; |
| 1370 | INT32 offs = ((INT8)(op & THUMB_INSN_IMM) << 1) + 4; |
| 1371 | UML_TEST(block, DRC_CPSR, C_MASK); |
| 1372 | UML_MOVc(block, uml::COND_Z, uml::I0, offs); |
| 1373 | UML_MOVc(block, uml::COND_NZ, uml::I0, 2); |
| 1374 | UML_ADD(block, DRC_PC, DRC_PC, uml::I0); |
| 1375 | } |
| 1376 | |
| 1377 | void arm7_cpu_device::drctg0d_4(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) // COND_MI: |
| 1378 | { |
| 1379 | UINT32 op = desc->opptr.l[0]; |
| 1380 | INT32 offs = ((INT8)(op & THUMB_INSN_IMM) << 1) + 4; |
| 1381 | UML_TEST(block, DRC_CPSR, N_MASK); |
| 1382 | UML_MOVc(block, uml::COND_NZ, uml::I0, offs); |
| 1383 | UML_MOVc(block, uml::COND_Z, uml::I0, 2); |
| 1384 | UML_ADD(block, DRC_PC, DRC_PC, uml::I0); |
| 1385 | } |
| 1386 | |
| 1387 | void arm7_cpu_device::drctg0d_5(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) // COND_PL: |
| 1388 | { |
| 1389 | UINT32 op = desc->opptr.l[0]; |
| 1390 | INT32 offs = ((INT8)(op & THUMB_INSN_IMM) << 1) + 4; |
| 1391 | UML_TEST(block, DRC_CPSR, N_MASK); |
| 1392 | UML_MOVc(block, uml::COND_Z, uml::I0, offs); |
| 1393 | UML_MOVc(block, uml::COND_NZ, uml::I0, 2); |
| 1394 | UML_ADD(block, DRC_PC, DRC_PC, uml::I0); |
| 1395 | } |
| 1396 | |
| 1397 | void arm7_cpu_device::drctg0d_6(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) // COND_VS: |
| 1398 | { |
| 1399 | UINT32 op = desc->opptr.l[0]; |
| 1400 | INT32 offs = ((INT8)(op & THUMB_INSN_IMM) << 1) + 4; |
| 1401 | UML_TEST(block, DRC_CPSR, V_MASK); |
| 1402 | UML_MOVc(block, uml::COND_NZ, uml::I0, offs); |
| 1403 | UML_MOVc(block, uml::COND_Z, uml::I0, 2); |
| 1404 | UML_ADD(block, DRC_PC, DRC_PC, uml::I0); |
| 1405 | } |
| 1406 | |
| 1407 | void arm7_cpu_device::drctg0d_7(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) // COND_VC: |
| 1408 | { |
| 1409 | UINT32 op = desc->opptr.l[0]; |
| 1410 | INT32 offs = ((INT8)(op & THUMB_INSN_IMM) << 1) + 4; |
| 1411 | UML_TEST(block, DRC_CPSR, V_MASK); |
| 1412 | UML_MOVc(block, uml::COND_Z, uml::I0, offs); |
| 1413 | UML_MOVc(block, uml::COND_NZ, uml::I0, 2); |
| 1414 | UML_ADD(block, DRC_PC, DRC_PC, uml::I0); |
| 1415 | } |
| 1416 | |
| 1417 | void arm7_cpu_device::drctg0d_8(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) // COND_HI: |
| 1418 | { |
| 1419 | UINT32 op = desc->opptr.l[0]; |
| 1420 | INT32 offs = ((INT8)(op & THUMB_INSN_IMM) << 1) + 4; |
| 1421 | UML_TEST(block, DRC_CPSR, C_MASK); |
| 1422 | UML_MOVc(block, uml::COND_NZ, uml::I0, 1); |
| 1423 | UML_MOVc(block, uml::COND_Z, uml::I0, 0); |
| 1424 | UML_TEST(block, DRC_CPSR, Z_MASK); |
| 1425 | UML_MOVc(block, uml::COND_NZ, uml::I1, 0); |
| 1426 | UML_MOVc(block, uml::COND_Z, uml::I1, 1); |
| 1427 | UML_AND(block, uml::I0, uml::I0, uml::I1); |
| 1428 | UML_TEST(block, uml::I0, 1); |
| 1429 | UML_MOVc(block, uml::COND_NZ, uml::I0, offs); |
| 1430 | UML_MOVc(block, uml::COND_Z, uml::I0, 2); |
| 1431 | UML_ADD(block, DRC_PC, DRC_PC, uml::I0); |
| 1432 | } |
| 1433 | |
| 1434 | void arm7_cpu_device::drctg0d_9(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) // COND_LS: |
| 1435 | { |
| 1436 | UINT32 op = desc->opptr.l[0]; |
| 1437 | INT32 offs = ((INT8)(op & THUMB_INSN_IMM) << 1) + 4; |
| 1438 | UML_TEST(block, DRC_CPSR, C_MASK); |
| 1439 | UML_MOVc(block, uml::COND_Z, uml::I0, 1); |
| 1440 | UML_MOVc(block, uml::COND_NZ, uml::I0, 0); |
| 1441 | UML_TEST(block, DRC_CPSR, Z_MASK); |
| 1442 | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 1443 | UML_MOVc(block, uml::COND_NZ, uml::I1, 1); |
| 1444 | UML_AND(block, uml::I0, uml::I0, uml::I1); |
| 1445 | UML_TEST(block, uml::I0, 1); |
| 1446 | UML_MOVc(block, uml::COND_NZ, uml::I0, offs); |
| 1447 | UML_MOVc(block, uml::COND_Z, uml::I0, 2); |
| 1448 | UML_ADD(block, DRC_PC, DRC_PC, uml::I0); |
| 1449 | } |
| 1450 | |
| 1451 | void arm7_cpu_device::drctg0d_a(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) // COND_GE: |
| 1452 | { |
| 1453 | UINT32 op = desc->opptr.l[0]; |
| 1454 | INT32 offs = ((INT8)(op & THUMB_INSN_IMM) << 1) + 4; |
| 1455 | UML_TEST(block, DRC_CPSR, N_MASK); |
| 1456 | UML_MOVc(block, uml::COND_Z, uml::I0, 1); |
| 1457 | UML_MOVc(block, uml::COND_NZ, uml::I0, 0); |
| 1458 | UML_TEST(block, DRC_CPSR, V_MASK); |
| 1459 | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 1460 | UML_MOVc(block, uml::COND_NZ, uml::I1, 1); |
| 1461 | UML_CMP(block, uml::I0, uml::I1); |
| 1462 | UML_MOVc(block, uml::COND_E, uml::I0, offs); |
| 1463 | UML_MOVc(block, uml::COND_NE, uml::I0, 2); |
| 1464 | UML_ADD(block, DRC_PC, DRC_PC, uml::I0); |
| 1465 | } |
| 1466 | |
| 1467 | void arm7_cpu_device::drctg0d_b(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) // COND_LT: |
| 1468 | { |
| 1469 | UINT32 op = desc->opptr.l[0]; |
| 1470 | INT32 offs = ((INT8)(op & THUMB_INSN_IMM) << 1) + 4; |
| 1471 | UML_TEST(block, DRC_CPSR, N_MASK); |
| 1472 | UML_MOVc(block, uml::COND_Z, uml::I0, 1); |
| 1473 | UML_MOVc(block, uml::COND_NZ, uml::I0, 0); |
| 1474 | UML_TEST(block, DRC_CPSR, V_MASK); |
| 1475 | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 1476 | UML_MOVc(block, uml::COND_NZ, uml::I1, 1); |
| 1477 | UML_CMP(block, uml::I0, uml::I1); |
| 1478 | UML_MOVc(block, uml::COND_NE, uml::I0, offs); |
| 1479 | UML_MOVc(block, uml::COND_E, uml::I0, 2); |
| 1480 | UML_ADD(block, DRC_PC, DRC_PC, uml::I0); |
| 1481 | } |
| 1482 | |
| 1483 | void arm7_cpu_device::drctg0d_c(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) // COND_GT: |
| 1484 | { |
| 1485 | UINT32 op = desc->opptr.l[0]; |
| 1486 | INT32 offs = ((INT8)(op & THUMB_INSN_IMM) << 1) + 4; |
| 1487 | UML_TEST(block, DRC_CPSR, N_MASK); |
| 1488 | UML_MOVc(block, uml::COND_Z, uml::I0, 1); |
| 1489 | UML_MOVc(block, uml::COND_NZ, uml::I0, 0); |
| 1490 | UML_TEST(block, DRC_CPSR, V_MASK); |
| 1491 | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 1492 | UML_MOVc(block, uml::COND_NZ, uml::I1, 1); |
| 1493 | UML_CMP(block, uml::I0, uml::I1); |
| 1494 | UML_MOVc(block, uml::COND_E, uml::I0, 1); |
| 1495 | UML_MOVc(block, uml::COND_NE, uml::I0, 0); |
| 1496 | UML_TEST(block, DRC_CPSR, Z_MASK); |
| 1497 | UML_MOVc(block, uml::COND_NZ, uml::I1, 1); |
| 1498 | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 1499 | UML_AND(block, uml::I0, uml::I0, uml::I1); |
| 1500 | UML_TEST(block, uml::I0, 1); |
| 1501 | UML_MOVc(block, uml::COND_NZ, uml::I0, offs); |
| 1502 | UML_MOVc(block, uml::COND_Z, uml::I0, 2); |
| 1503 | UML_ADD(block, DRC_PC, DRC_PC, uml::I0); |
| 1504 | } |
| 1505 | |
| 1506 | void arm7_cpu_device::drctg0d_d(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) // COND_LE: |
| 1507 | { |
| 1508 | UINT32 op = desc->opptr.l[0]; |
| 1509 | INT32 offs = ((INT8)(op & THUMB_INSN_IMM) << 1) + 4; |
| 1510 | UML_TEST(block, DRC_CPSR, N_MASK); |
| 1511 | UML_MOVc(block, uml::COND_Z, uml::I0, 1); |
| 1512 | UML_MOVc(block, uml::COND_NZ, uml::I0, 0); |
| 1513 | UML_TEST(block, DRC_CPSR, V_MASK); |
| 1514 | UML_MOVc(block, uml::COND_Z, uml::I1, 0); |
| 1515 | UML_MOVc(block, uml::COND_NZ, uml::I1, 1); |
| 1516 | UML_CMP(block, uml::I0, uml::I1); |
| 1517 | UML_MOVc(block, uml::COND_NE, uml::I0, 1); |
| 1518 | UML_MOVc(block, uml::COND_E, uml::I0, 0); |
| 1519 | UML_TEST(block, DRC_CPSR, Z_MASK); |
| 1520 | UML_MOVc(block, uml::COND_NZ, uml::I1, 0); |
| 1521 | UML_MOVc(block, uml::COND_Z, uml::I1, 1); |
| 1522 | UML_AND(block, uml::I0, uml::I0, uml::I1); |
| 1523 | UML_TEST(block, uml::I0, 1); |
| 1524 | UML_MOVc(block, uml::COND_NZ, uml::I0, offs); |
| 1525 | UML_MOVc(block, uml::COND_Z, uml::I0, 2); |
| 1526 | UML_ADD(block, DRC_PC, DRC_PC, uml::I0); |
| 1527 | } |
| 1528 | |
| 1529 | void arm7_cpu_device::drctg0d_e(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) // COND_AL: |
| 1530 | { |
| 1531 | UINT32 op = desc->opptr.l[0]; |
| 1532 | UINT32 pc = desc->pc; |
| 1533 | fatalerror("%08x: Undefined Thumb instruction: %04x (ARM9 reserved)\n", pc, op); |
| 1534 | } |
| 1535 | |
| 1536 | void arm7_cpu_device::drctg0d_f(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) // SWI (this is sort of a "hole" in the opcode encoding) |
| 1537 | { |
| 1538 | UML_MOV(block, uml::mem(&m_pendingSwi), 1); |
| 1539 | UML_CALLH(block, *m_impstate.check_irq); |
| 1540 | } |
| 1541 | |
| 1542 | /* B #offs */ |
| 1543 | |
| 1544 | void arm7_cpu_device::drctg0e_0(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 1545 | { |
| 1546 | UINT32 op = desc->opptr.l[0]; |
| 1547 | INT32 offs = (op & THUMB_BRANCH_OFFS) << 1; |
| 1548 | if (offs & 0x00000800) |
| 1549 | { |
| 1550 | offs |= 0xfffff800; |
| 1551 | } |
| 1552 | UML_ADD(block, DRC_PC, DRC_PC, offs + 4); |
| 1553 | } |
| 1554 | |
| 1555 | void arm7_cpu_device::drctg0e_1(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 1556 | { |
| 1557 | UINT32 op = desc->opptr.l[0]; |
| 1558 | UINT32 offs = (op & THUMB_BLOP_OFFS) << 1; |
| 1559 | UML_MOV(block, uml::I0, DRC_REG(14)); |
| 1560 | UML_ADD(block, uml::I0, uml::I0, offs); |
| 1561 | UML_AND(block, uml::I0, uml::I0, ~3); |
| 1562 | UML_ADD(block, DRC_REG(14), DRC_PC, 4); |
| 1563 | UML_OR(block, DRC_REG(14), DRC_REG(14), 1); |
| 1564 | UML_MOV(block, DRC_PC, uml::I0); |
| 1565 | } |
| 1566 | |
| 1567 | /* BL */ |
| 1568 | |
| 1569 | void arm7_cpu_device::drctg0f_0(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) |
| 1570 | { |
| 1571 | UINT32 op = desc->opptr.l[0]; |
| 1572 | UINT32 addr = (op & THUMB_BLOP_OFFS) << 12; |
| 1573 | if (addr & (1 << 22)) |
| 1574 | { |
| 1575 | addr |= 0xff800000; |
| 1576 | } |
| 1577 | addr += 4; |
| 1578 | UML_ADD(block, DRC_REG(14), DRC_PC, addr); |
| 1579 | UML_ADD(block, DRC_PC, DRC_PC, 2); |
| 1580 | } |
| 1581 | |
| 1582 | void arm7_cpu_device::drctg0f_1(drcuml_block *block, compiler_state *compiler, const opcode_desc *desc) /* BL */ |
| 1583 | { |
| 1584 | UINT32 op = desc->opptr.l[0]; |
| 1585 | UINT32 addr = (op & THUMB_BLOP_OFFS) << 1; |
| 1586 | UML_AND(block, uml::I0, DRC_REG(14), ~1); |
| 1587 | UML_ADD(block, uml::I0, uml::I0, addr); |
| 1588 | UML_ADD(block, DRC_REG(14), DRC_PC, 2); |
| 1589 | UML_OR(block, DRC_REG(14), DRC_REG(14), 1); |
| 1590 | UML_MOV(block, DRC_PC, uml::I0); |
| 1591 | } |
trunk/src/emu/cpu/adsp2100/2100ops.c
| r28735 | r28736 | |
| 1 | | /*=========================================================================== |
| 2 | | ASTAT -- ALU/MAC status register |
| 3 | | ===========================================================================*/ |
| 4 | | |
| 5 | | /* extracts flags */ |
| 6 | | #define GET_SS (m_astat & SSFLAG) |
| 7 | | #define GET_MV (m_astat & MVFLAG) |
| 8 | | #define GET_Q (m_astat & QFLAG) |
| 9 | | #define GET_S (m_astat & SFLAG) |
| 10 | | #define GET_C (m_astat & CFLAG) |
| 11 | | #define GET_V (m_astat & VFLAG) |
| 12 | | #define GET_N (m_astat & NFLAG) |
| 13 | | #define GET_Z (m_astat & ZFLAG) |
| 14 | | |
| 15 | | /* clears flags */ |
| 16 | | #define CLR_SS (m_astat &= ~SSFLAG) |
| 17 | | #define CLR_MV (m_astat &= ~MVFLAG) |
| 18 | | #define CLR_Q (m_astat &= ~QFLAG) |
| 19 | | #define CLR_S (m_astat &= ~SFLAG) |
| 20 | | #define CLR_C (m_astat &= ~CFLAG) |
| 21 | | #define CLR_V (m_astat &= ~VFLAG) |
| 22 | | #define CLR_N (m_astat &= ~NFLAG) |
| 23 | | #define CLR_Z (m_astat &= ~ZFLAG) |
| 24 | | |
| 25 | | /* sets flags */ |
| 26 | | #define SET_SS (m_astat |= SSFLAG) |
| 27 | | #define SET_MV (m_astat |= MVFLAG) |
| 28 | | #define SET_Q (m_astat |= QFLAG) |
| 29 | | #define SET_S (m_astat |= SFLAG) |
| 30 | | #define SET_C (m_astat |= CFLAG) |
| 31 | | #define SET_V (m_astat |= VFLAG) |
| 32 | | #define SET_Z (m_astat |= ZFLAG) |
| 33 | | #define SET_N (m_astat |= NFLAG) |
| 34 | | |
| 35 | | /* flag clearing; must be done before setting */ |
| 36 | | #define CLR_FLAGS (m_astat &= m_astat_clear) |
| 37 | | |
| 38 | | /* compute flags */ |
| 39 | | #define CALC_Z(r) (m_astat |= ((r & 0xffff) == 0)) |
| 40 | | #define CALC_N(r) (m_astat |= (r >> 14) & 0x02) |
| 41 | | #define CALC_V(s,d,r) (m_astat |= ((s ^ d ^ r ^ (r >> 1)) >> 13) & 0x04) |
| 42 | | #define CALC_C(r) (m_astat |= (r >> 13) & 0x08) |
| 43 | | #define CALC_C_SUB(r) (m_astat |= (~r >> 13) & 0x08) |
| 44 | | #define CALC_NZ(r) CLR_FLAGS; CALC_N(r); CALC_Z(r) |
| 45 | | #define CALC_NZV(s,d,r) CLR_FLAGS; CALC_N(r); CALC_Z(r); CALC_V(s,d,r) |
| 46 | | #define CALC_NZVC(s,d,r) CLR_FLAGS; CALC_N(r); CALC_Z(r); CALC_V(s,d,r); CALC_C(r) |
| 47 | | #define CALC_NZVC_SUB(s,d,r) CLR_FLAGS; CALC_N(r); CALC_Z(r); CALC_V(s,d,r); CALC_C_SUB(r) |
| 48 | | |
| 49 | | /* ADSP-218x constants */ |
| 50 | | static const INT32 constants[] = |
| 51 | | { |
| 52 | | 0x0001, 0xfffe, 0x0002, 0xfffd, 0x0004, 0xfffb, 0x0008, 0xfff7, |
| 53 | | 0x0010, 0xffef, 0x0020, 0xffdf, 0x0040, 0xffbf, 0x0080, 0xff7f, |
| 54 | | 0x0100, 0xfeff, 0x0200, 0xfdff, 0x0400, 0xfbff, 0x0800, 0xf7ff, |
| 55 | | 0x1000, 0xefff, 0x2000, 0xdfff, 0x4000, 0xbfff, 0x8000, 0x7fff |
| 56 | | }; |
| 57 | | |
| 58 | | |
| 59 | | |
| 60 | | /*=========================================================================== |
| 61 | | MSTAT -- ALU/MAC control register |
| 62 | | ===========================================================================*/ |
| 63 | | |
| 64 | | /* flag definitions */ |
| 65 | | #define MSTAT_BANK 0x01 /* register bank select */ |
| 66 | | #define MSTAT_REVERSE 0x02 /* bit-reverse addressing enable (DAG1) */ |
| 67 | | #define MSTAT_STICKYV 0x04 /* sticky ALU overflow enable */ |
| 68 | | #define MSTAT_SATURATE 0x08 /* AR saturation mode enable */ |
| 69 | | #define MSTAT_INTEGER 0x10 /* MAC result placement; 0=fractional, 1=integer */ |
| 70 | | #define MSTAT_TIMER 0x20 /* timer enable */ |
| 71 | | #define MSTAT_GOMODE 0x40 /* go mode enable */ |
| 72 | | |
| 73 | | /* you must call this in order to change MSTAT */ |
| 74 | | inline void adsp21xx_device::update_mstat() |
| 75 | | { |
| 76 | | if ((m_mstat ^ m_mstat_prev) & MSTAT_BANK) |
| 77 | | { |
| 78 | | adsp_core temp = m_core; |
| 79 | | m_core = m_alt; |
| 80 | | m_alt = temp; |
| 81 | | } |
| 82 | | if ((m_mstat ^ m_mstat_prev) & MSTAT_TIMER) |
| 83 | | if (m_timer_fired != NULL) |
| 84 | | (*m_timer_fired)(*this, (m_mstat & MSTAT_TIMER) != 0); |
| 85 | | if (m_mstat & MSTAT_STICKYV) |
| 86 | | m_astat_clear = ~(CFLAG | NFLAG | ZFLAG); |
| 87 | | else |
| 88 | | m_astat_clear = ~(CFLAG | VFLAG | NFLAG | ZFLAG); |
| 89 | | m_mstat_prev = m_mstat; |
| 90 | | } |
| 91 | | |
| 92 | | |
| 93 | | /*=========================================================================== |
| 94 | | SSTAT -- stack status register |
| 95 | | ===========================================================================*/ |
| 96 | | |
| 97 | | /* flag definitions */ |
| 98 | | #define PC_EMPTY 0x01 /* PC stack empty */ |
| 99 | | #define PC_OVER 0x02 /* PC stack overflow */ |
| 100 | | #define COUNT_EMPTY 0x04 /* count stack empty */ |
| 101 | | #define COUNT_OVER 0x08 /* count stack overflow */ |
| 102 | | #define STATUS_EMPTY 0x10 /* status stack empty */ |
| 103 | | #define STATUS_OVER 0x20 /* status stack overflow */ |
| 104 | | #define LOOP_EMPTY 0x40 /* loop stack empty */ |
| 105 | | #define LOOP_OVER 0x80 /* loop stack overflow */ |
| 106 | | |
| 107 | | |
| 108 | | |
| 109 | | /*=========================================================================== |
| 110 | | PC stack handlers |
| 111 | | ===========================================================================*/ |
| 112 | | |
| 113 | | inline UINT32 adsp21xx_device::pc_stack_top() |
| 114 | | { |
| 115 | | if (m_pc_sp > 0) |
| 116 | | return m_pc_stack[m_pc_sp - 1]; |
| 117 | | else |
| 118 | | return m_pc_stack[0]; |
| 119 | | } |
| 120 | | |
| 121 | | inline void adsp21xx_device::set_pc_stack_top(UINT32 top) |
| 122 | | { |
| 123 | | if (m_pc_sp > 0) |
| 124 | | m_pc_stack[m_pc_sp - 1] = top; |
| 125 | | else |
| 126 | | m_pc_stack[0] = top; |
| 127 | | } |
| 128 | | |
| 129 | | inline void adsp21xx_device::pc_stack_push() |
| 130 | | { |
| 131 | | if (m_pc_sp < PC_STACK_DEPTH) |
| 132 | | { |
| 133 | | m_pc_stack[m_pc_sp] = m_pc; |
| 134 | | m_pc_sp++; |
| 135 | | m_sstat &= ~PC_EMPTY; |
| 136 | | } |
| 137 | | else |
| 138 | | m_sstat |= PC_OVER; |
| 139 | | } |
| 140 | | |
| 141 | | inline void adsp21xx_device::pc_stack_push_val(UINT32 val) |
| 142 | | { |
| 143 | | if (m_pc_sp < PC_STACK_DEPTH) |
| 144 | | { |
| 145 | | m_pc_stack[m_pc_sp] = val; |
| 146 | | m_pc_sp++; |
| 147 | | m_sstat &= ~PC_EMPTY; |
| 148 | | } |
| 149 | | else |
| 150 | | m_sstat |= PC_OVER; |
| 151 | | } |
| 152 | | |
| 153 | | inline void adsp21xx_device::pc_stack_pop() |
| 154 | | { |
| 155 | | if (m_pc_sp > 0) |
| 156 | | { |
| 157 | | m_pc_sp--; |
| 158 | | if (m_pc_sp == 0) |
| 159 | | m_sstat |= PC_EMPTY; |
| 160 | | } |
| 161 | | m_pc = m_pc_stack[m_pc_sp]; |
| 162 | | } |
| 163 | | |
| 164 | | inline UINT32 adsp21xx_device::pc_stack_pop_val() |
| 165 | | { |
| 166 | | if (m_pc_sp > 0) |
| 167 | | { |
| 168 | | m_pc_sp--; |
| 169 | | if (m_pc_sp == 0) |
| 170 | | m_sstat |= PC_EMPTY; |
| 171 | | } |
| 172 | | return m_pc_stack[m_pc_sp]; |
| 173 | | } |
| 174 | | |
| 175 | | |
| 176 | | /*=========================================================================== |
| 177 | | CNTR stack handlers |
| 178 | | ===========================================================================*/ |
| 179 | | |
| 180 | | inline UINT32 adsp21xx_device::cntr_stack_top() |
| 181 | | { |
| 182 | | if (m_cntr_sp > 0) |
| 183 | | return m_cntr_stack[m_cntr_sp - 1]; |
| 184 | | else |
| 185 | | return m_cntr_stack[0]; |
| 186 | | } |
| 187 | | |
| 188 | | inline void adsp21xx_device::cntr_stack_push() |
| 189 | | { |
| 190 | | if (m_cntr_sp < CNTR_STACK_DEPTH) |
| 191 | | { |
| 192 | | m_cntr_stack[m_cntr_sp] = m_cntr; |
| 193 | | m_cntr_sp++; |
| 194 | | m_sstat &= ~COUNT_EMPTY; |
| 195 | | } |
| 196 | | else |
| 197 | | m_sstat |= COUNT_OVER; |
| 198 | | } |
| 199 | | |
| 200 | | inline void adsp21xx_device::cntr_stack_pop() |
| 201 | | { |
| 202 | | if (m_cntr_sp > 0) |
| 203 | | { |
| 204 | | m_cntr_sp--; |
| 205 | | if (m_cntr_sp == 0) |
| 206 | | m_sstat |= COUNT_EMPTY; |
| 207 | | } |
| 208 | | m_cntr = m_cntr_stack[m_cntr_sp]; |
| 209 | | } |
| 210 | | |
| 211 | | |
| 212 | | /*=========================================================================== |
| 213 | | LOOP stack handlers |
| 214 | | ===========================================================================*/ |
| 215 | | |
| 216 | | inline UINT32 adsp21xx_device::loop_stack_top() |
| 217 | | { |
| 218 | | if (m_loop_sp > 0) |
| 219 | | return m_loop_stack[m_loop_sp - 1]; |
| 220 | | else |
| 221 | | return m_loop_stack[0]; |
| 222 | | } |
| 223 | | |
| 224 | | inline void adsp21xx_device::loop_stack_push(UINT32 value) |
| 225 | | { |
| 226 | | if (m_loop_sp < LOOP_STACK_DEPTH) |
| 227 | | { |
| 228 | | m_loop_stack[m_loop_sp] = value; |
| 229 | | m_loop_sp++; |
| 230 | | m_loop = value >> 4; |
| 231 | | m_loop_condition = value & 15; |
| 232 | | m_sstat &= ~LOOP_EMPTY; |
| 233 | | } |
| 234 | | else |
| 235 | | m_sstat |= LOOP_OVER; |
| 236 | | } |
| 237 | | |
| 238 | | inline void adsp21xx_device::loop_stack_pop() |
| 239 | | { |
| 240 | | if (m_loop_sp > 0) |
| 241 | | { |
| 242 | | m_loop_sp--; |
| 243 | | if (m_loop_sp == 0) |
| 244 | | { |
| 245 | | m_loop = 0xffff; |
| 246 | | m_loop_condition = 0; |
| 247 | | m_sstat |= LOOP_EMPTY; |
| 248 | | } |
| 249 | | else |
| 250 | | { |
| 251 | | m_loop = m_loop_stack[m_loop_sp -1] >> 4; |
| 252 | | m_loop_condition = m_loop_stack[m_loop_sp - 1] & 15; |
| 253 | | } |
| 254 | | } |
| 255 | | } |
| 256 | | |
| 257 | | |
| 258 | | /*=========================================================================== |
| 259 | | STAT stack handlers |
| 260 | | ===========================================================================*/ |
| 261 | | |
| 262 | | inline void adsp21xx_device::stat_stack_push() |
| 263 | | { |
| 264 | | if (m_stat_sp < STAT_STACK_DEPTH) |
| 265 | | { |
| 266 | | m_stat_stack[m_stat_sp][0] = m_mstat; |
| 267 | | m_stat_stack[m_stat_sp][1] = m_imask; |
| 268 | | m_stat_stack[m_stat_sp][2] = m_astat; |
| 269 | | m_stat_sp++; |
| 270 | | m_sstat &= ~STATUS_EMPTY; |
| 271 | | } |
| 272 | | else |
| 273 | | m_sstat |= STATUS_OVER; |
| 274 | | } |
| 275 | | |
| 276 | | inline void adsp21xx_device::stat_stack_pop() |
| 277 | | { |
| 278 | | if (m_stat_sp > 0) |
| 279 | | { |
| 280 | | m_stat_sp--; |
| 281 | | if (m_stat_sp == 0) |
| 282 | | m_sstat |= STATUS_EMPTY; |
| 283 | | } |
| 284 | | m_mstat = m_stat_stack[m_stat_sp][0]; |
| 285 | | update_mstat(); |
| 286 | | m_imask = m_stat_stack[m_stat_sp][1]; |
| 287 | | m_astat = m_stat_stack[m_stat_sp][2]; |
| 288 | | check_irqs(); |
| 289 | | } |
| 290 | | |
| 291 | | |
| 292 | | |
| 293 | | /*=========================================================================== |
| 294 | | condition code checking |
| 295 | | ===========================================================================*/ |
| 296 | | |
| 297 | | // gcc doesn't want to inline this, so we use a macro |
| 298 | | #define condition(c) (((c) != 14) ? (m_condition_table[((c) << 8) | m_astat]) : slow_condition()) |
| 299 | | |
| 300 | | /* |
| 301 | | inline int adsp21xx_device::condition(int c) |
| 302 | | { |
| 303 | | if (c != 14) |
| 304 | | return m_condition_table[((c) << 8) | m_astat]; |
| 305 | | else |
| 306 | | return slow_condition(c); |
| 307 | | } |
| 308 | | */ |
| 309 | | |
| 310 | | int adsp21xx_device::slow_condition() |
| 311 | | { |
| 312 | | if ((INT32)--m_cntr > 0) |
| 313 | | return 1; |
| 314 | | else |
| 315 | | { |
| 316 | | cntr_stack_pop(); |
| 317 | | return 0; |
| 318 | | } |
| 319 | | } |
| 320 | | |
| 321 | | |
| 322 | | |
| 323 | | /*=========================================================================== |
| 324 | | register writing |
| 325 | | ===========================================================================*/ |
| 326 | | |
| 327 | | inline void adsp21xx_device::update_i(int which) |
| 328 | | { |
| 329 | | m_base[which] = m_i[which] & m_lmask[which]; |
| 330 | | } |
| 331 | | |
| 332 | | inline void adsp21xx_device::update_l(int which) |
| 333 | | { |
| 334 | | m_lmask[which] = m_mask_table[m_l[which] & 0x3fff]; |
| 335 | | m_base[which] = m_i[which] & m_lmask[which]; |
| 336 | | } |
| 337 | | |
| 338 | | void adsp21xx_device::write_reg0(int regnum, INT32 val) |
| 339 | | { |
| 340 | | switch (regnum) |
| 341 | | { |
| 342 | | case 0x00: m_core.ax0.s = val; break; |
| 343 | | case 0x01: m_core.ax1.s = val; break; |
| 344 | | case 0x02: m_core.mx0.s = val; break; |
| 345 | | case 0x03: m_core.mx1.s = val; break; |
| 346 | | case 0x04: m_core.ay0.s = val; break; |
| 347 | | case 0x05: m_core.ay1.s = val; break; |
| 348 | | case 0x06: m_core.my0.s = val; break; |
| 349 | | case 0x07: m_core.my1.s = val; break; |
| 350 | | case 0x08: m_core.si.s = val; break; |
| 351 | | case 0x09: m_core.se.s = (INT8)val; break; |
| 352 | | case 0x0a: m_core.ar.s = val; break; |
| 353 | | case 0x0b: m_core.mr.mrx.mr0.s = val; break; |
| 354 | | case 0x0c: m_core.mr.mrx.mr1.s = val; m_core.mr.mrx.mr2.s = (INT16)val >> 15; break; |
| 355 | | case 0x0d: m_core.mr.mrx.mr2.s = (INT8)val; break; |
| 356 | | case 0x0e: m_core.sr.srx.sr0.s = val; break; |
| 357 | | case 0x0f: m_core.sr.srx.sr1.s = val; break; |
| 358 | | } |
| 359 | | } |
| 360 | | |
| 361 | | void adsp21xx_device::write_reg1(int regnum, INT32 val) |
| 362 | | { |
| 363 | | int index = regnum & 3; |
| 364 | | switch (regnum >> 2) |
| 365 | | { |
| 366 | | case 0: |
| 367 | | m_i[index] = val & 0x3fff; |
| 368 | | update_i(index); |
| 369 | | break; |
| 370 | | |
| 371 | | case 1: |
| 372 | | m_m[index] = (INT32)(val << 18) >> 18; |
| 373 | | break; |
| 374 | | |
| 375 | | case 2: |
| 376 | | m_l[index] = val & 0x3fff; |
| 377 | | update_l(index); |
| 378 | | break; |
| 379 | | |
| 380 | | case 3: |
| 381 | | logerror("ADSP %04x: Writing to an invalid register!\n", m_ppc); |
| 382 | | break; |
| 383 | | } |
| 384 | | } |
| 385 | | |
| 386 | | void adsp21xx_device::write_reg2(int regnum, INT32 val) |
| 387 | | { |
| 388 | | int index = 4 + (regnum & 3); |
| 389 | | switch (regnum >> 2) |
| 390 | | { |
| 391 | | case 0: |
| 392 | | m_i[index] = val & 0x3fff; |
| 393 | | update_i(index); |
| 394 | | break; |
| 395 | | |
| 396 | | case 1: |
| 397 | | m_m[index] = (INT32)(val << 18) >> 18; |
| 398 | | break; |
| 399 | | |
| 400 | | case 2: |
| 401 | | m_l[index] = val & 0x3fff; |
| 402 | | update_l(index); |
| 403 | | break; |
| 404 | | |
| 405 | | case 3: |
| 406 | | logerror("ADSP %04x: Writing to an invalid register!\n", m_ppc); |
| 407 | | break; |
| 408 | | } |
| 409 | | } |
| 410 | | |
| 411 | | void adsp21xx_device::write_reg3(int regnum, INT32 val) |
| 412 | | { |
| 413 | | switch (regnum) |
| 414 | | { |
| 415 | | case 0x00: m_astat = val & 0x00ff; break; |
| 416 | | case 0x01: m_mstat = val & m_mstat_mask; update_mstat(); break; |
| 417 | | case 0x03: m_imask = val & m_imask_mask; check_irqs(); break; |
| 418 | | case 0x04: m_icntl = val & 0x001f; check_irqs(); break; |
| 419 | | case 0x05: cntr_stack_push(); m_cntr = val & 0x3fff; break; |
| 420 | | case 0x06: m_core.sb.s = (INT32)(val << 27) >> 27; break; |
| 421 | | case 0x07: m_px = val; break; |
| 422 | | case 0x09: if (m_sport_tx_callback != NULL) (*m_sport_tx_callback)(*this, 0, val); break; |
| 423 | | case 0x0b: if (m_sport_tx_callback != NULL) (*m_sport_tx_callback)(*this, 1, val); break; |
| 424 | | case 0x0c: |
| 425 | | m_ifc = val; |
| 426 | | if (m_chip_type >= CHIP_TYPE_ADSP2181) |
| 427 | | { |
| 428 | | /* clear timer */ |
| 429 | | if (val & 0x0002) m_irq_latch[ADSP2181_IRQ0] = 0; |
| 430 | | if (val & 0x0004) m_irq_latch[ADSP2181_IRQ1] = 0; |
| 431 | | /* clear BDMA */ |
| 432 | | if (val & 0x0010) m_irq_latch[ADSP2181_IRQE] = 0; |
| 433 | | if (val & 0x0020) m_irq_latch[ADSP2181_SPORT0_RX] = 0; |
| 434 | | if (val & 0x0040) m_irq_latch[ADSP2181_SPORT0_TX] = 0; |
| 435 | | if (val & 0x0080) m_irq_latch[ADSP2181_IRQ2] = 0; |
| 436 | | /* force timer */ |
| 437 | | if (val & 0x0200) m_irq_latch[ADSP2181_IRQ0] = 1; |
| 438 | | if (val & 0x0400) m_irq_latch[ADSP2181_IRQ1] = 1; |
| 439 | | /* force BDMA */ |
| 440 | | if (val & 0x1000) m_irq_latch[ADSP2181_IRQE] = 1; |
| 441 | | if (val & 0x2000) m_irq_latch[ADSP2181_SPORT0_RX] = 1; |
| 442 | | if (val & 0x4000) m_irq_latch[ADSP2181_SPORT0_TX] = 1; |
| 443 | | if (val & 0x8000) m_irq_latch[ADSP2181_IRQ2] = 1; |
| 444 | | } |
| 445 | | else |
| 446 | | { |
| 447 | | /* clear timer */ |
| 448 | | if (val & 0x002) m_irq_latch[ADSP2101_IRQ0] = 0; |
| 449 | | if (val & 0x004) m_irq_latch[ADSP2101_IRQ1] = 0; |
| 450 | | if (val & 0x008) m_irq_latch[ADSP2101_SPORT0_RX] = 0; |
| 451 | | if (val & 0x010) m_irq_latch[ADSP2101_SPORT0_TX] = 0; |
| 452 | | if (val & 0x020) m_irq_latch[ADSP2101_IRQ2] = 0; |
| 453 | | /* set timer */ |
| 454 | | if (val & 0x080) m_irq_latch[ADSP2101_IRQ0] = 1; |
| 455 | | if (val & 0x100) m_irq_latch[ADSP2101_IRQ1] = 1; |
| 456 | | if (val & 0x200) m_irq_latch[ADSP2101_SPORT0_RX] = 1; |
| 457 | | if (val & 0x400) m_irq_latch[ADSP2101_SPORT0_TX] = 1; |
| 458 | | if (val & 0x800) m_irq_latch[ADSP2101_IRQ2] = 1; |
| 459 | | } |
| 460 | | check_irqs(); |
| 461 | | break; |
| 462 | | case 0x0d: m_cntr = val & 0x3fff; break; |
| 463 | | case 0x0f: pc_stack_push_val(val & 0x3fff); break; |
| 464 | | default: logerror("ADSP %04x: Writing to an invalid register!\n", m_ppc); break; |
| 465 | | } |
| 466 | | } |
| 467 | | |
| 468 | | #define WRITE_REG(adsp,grp,reg,val) ((this->*wr_reg[grp][reg])(val)) |
| 469 | | |
| 470 | | |
| 471 | | |
| 472 | | /*=========================================================================== |
| 473 | | register reading |
| 474 | | ===========================================================================*/ |
| 475 | | |
| 476 | | INT32 adsp21xx_device::read_reg0(int regnum) |
| 477 | | { |
| 478 | | return *m_read0_ptr[regnum]; |
| 479 | | } |
| 480 | | |
| 481 | | INT32 adsp21xx_device::read_reg1(int regnum) |
| 482 | | { |
| 483 | | return *m_read1_ptr[regnum]; |
| 484 | | } |
| 485 | | |
| 486 | | INT32 adsp21xx_device::read_reg2(int regnum) |
| 487 | | { |
| 488 | | return *m_read2_ptr[regnum]; |
| 489 | | } |
| 490 | | |
| 491 | | INT32 adsp21xx_device::read_reg3(int regnum) |
| 492 | | { |
| 493 | | switch (regnum) |
| 494 | | { |
| 495 | | case 0x00: return m_astat; |
| 496 | | case 0x01: return m_mstat; |
| 497 | | case 0x02: return m_sstat; |
| 498 | | case 0x03: return m_imask; |
| 499 | | case 0x04: return m_icntl; |
| 500 | | case 0x05: return m_cntr; |
| 501 | | case 0x06: return m_core.sb.s; |
| 502 | | case 0x07: return m_px; |
| 503 | | case 0x08: if (m_sport_rx_callback) return (*m_sport_rx_callback)(*this, 0); else return 0; |
| 504 | | case 0x0a: if (m_sport_rx_callback) return (*m_sport_rx_callback)(*this, 1); else return 0; |
| 505 | | case 0x0f: return pc_stack_pop_val(); |
| 506 | | default: logerror("ADSP %04x: Reading from an invalid register!\n", m_ppc); return 0; |
| 507 | | } |
| 508 | | } |
| 509 | | |
| 510 | | |
| 511 | | |
| 512 | | /*=========================================================================== |
| 513 | | Modulus addressing logic |
| 514 | | ===========================================================================*/ |
| 515 | | |
| 516 | | inline void adsp21xx_device::modify_address(UINT32 ireg, UINT32 mreg) |
| 517 | | { |
| 518 | | UINT32 base = m_base[ireg]; |
| 519 | | UINT32 i = m_i[ireg]; |
| 520 | | UINT32 l = m_l[ireg]; |
| 521 | | |
| 522 | | i += m_m[mreg]; |
| 523 | | if (i < base) i += l; |
| 524 | | else if (i >= base + l) i -= l; |
| 525 | | m_i[ireg] = i; |
| 526 | | } |
| 527 | | |
| 528 | | |
| 529 | | |
| 530 | | /*=========================================================================== |
| 531 | | Data memory accessors |
| 532 | | ===========================================================================*/ |
| 533 | | |
| 534 | | inline void adsp21xx_device::data_write_dag1(UINT32 op, INT32 val) |
| 535 | | { |
| 536 | | UINT32 ireg = (op >> 2) & 3; |
| 537 | | UINT32 mreg = op & 3; |
| 538 | | UINT32 base = m_base[ireg]; |
| 539 | | UINT32 i = m_i[ireg]; |
| 540 | | UINT32 l = m_l[ireg]; |
| 541 | | |
| 542 | | if ( m_mstat & MSTAT_REVERSE ) |
| 543 | | { |
| 544 | | UINT32 ir = m_reverse_table[ i & 0x3fff ]; |
| 545 | | data_write(ir, val); |
| 546 | | } |
| 547 | | else |
| 548 | | data_write(i, val); |
| 549 | | |
| 550 | | i += m_m[mreg]; |
| 551 | | if (i < base) i += l; |
| 552 | | else if (i >= base + l) i -= l; |
| 553 | | m_i[ireg] = i; |
| 554 | | } |
| 555 | | |
| 556 | | |
| 557 | | inline UINT32 adsp21xx_device::data_read_dag1(UINT32 op) |
| 558 | | { |
| 559 | | UINT32 ireg = (op >> 2) & 3; |
| 560 | | UINT32 mreg = op & 3; |
| 561 | | UINT32 base = m_base[ireg]; |
| 562 | | UINT32 i = m_i[ireg]; |
| 563 | | UINT32 l = m_l[ireg]; |
| 564 | | UINT32 res; |
| 565 | | |
| 566 | | if (m_mstat & MSTAT_REVERSE) |
| 567 | | { |
| 568 | | UINT32 ir = m_reverse_table[i & 0x3fff]; |
| 569 | | res = data_read(ir); |
| 570 | | } |
| 571 | | else |
| 572 | | res = data_read(i); |
| 573 | | |
| 574 | | i += m_m[mreg]; |
| 575 | | if (i < base) i += l; |
| 576 | | else if (i >= base + l) i -= l; |
| 577 | | m_i[ireg] = i; |
| 578 | | |
| 579 | | return res; |
| 580 | | } |
| 581 | | |
| 582 | | inline void adsp21xx_device::data_write_dag2(UINT32 op, INT32 val) |
| 583 | | { |
| 584 | | UINT32 ireg = 4 + ((op >> 2) & 3); |
| 585 | | UINT32 mreg = 4 + (op & 3); |
| 586 | | UINT32 base = m_base[ireg]; |
| 587 | | UINT32 i = m_i[ireg]; |
| 588 | | UINT32 l = m_l[ireg]; |
| 589 | | |
| 590 | | data_write(i, val); |
| 591 | | |
| 592 | | i += m_m[mreg]; |
| 593 | | if (i < base) i += l; |
| 594 | | else if (i >= base + l) i -= l; |
| 595 | | m_i[ireg] = i; |
| 596 | | } |
| 597 | | |
| 598 | | |
| 599 | | inline UINT32 adsp21xx_device::data_read_dag2(UINT32 op) |
| 600 | | { |
| 601 | | UINT32 ireg = 4 + ((op >> 2) & 3); |
| 602 | | UINT32 mreg = 4 + (op & 3); |
| 603 | | UINT32 base = m_base[ireg]; |
| 604 | | UINT32 i = m_i[ireg]; |
| 605 | | UINT32 l = m_l[ireg]; |
| 606 | | |
| 607 | | UINT32 res = data_read(i); |
| 608 | | |
| 609 | | i += m_m[mreg]; |
| 610 | | if (i < base) i += l; |
| 611 | | else if (i >= base + l) i -= l; |
| 612 | | m_i[ireg] = i; |
| 613 | | |
| 614 | | return res; |
| 615 | | } |
| 616 | | |
| 617 | | /*=========================================================================== |
| 618 | | Program memory accessors |
| 619 | | ===========================================================================*/ |
| 620 | | |
| 621 | | inline void adsp21xx_device::pgm_write_dag2(UINT32 op, INT32 val) |
| 622 | | { |
| 623 | | UINT32 ireg = 4 + ((op >> 2) & 3); |
| 624 | | UINT32 mreg = 4 + (op & 3); |
| 625 | | UINT32 base = m_base[ireg]; |
| 626 | | UINT32 i = m_i[ireg]; |
| 627 | | UINT32 l = m_l[ireg]; |
| 628 | | |
| 629 | | program_write(i, (val << 8) | m_px); |
| 630 | | |
| 631 | | i += m_m[mreg]; |
| 632 | | if (i < base) i += l; |
| 633 | | else if (i >= base + l) i -= l; |
| 634 | | m_i[ireg] = i; |
| 635 | | } |
| 636 | | |
| 637 | | |
| 638 | | inline UINT32 adsp21xx_device::pgm_read_dag2(UINT32 op) |
| 639 | | { |
| 640 | | UINT32 ireg = 4 + ((op >> 2) & 3); |
| 641 | | UINT32 mreg = 4 + (op & 3); |
| 642 | | UINT32 base = m_base[ireg]; |
| 643 | | UINT32 i = m_i[ireg]; |
| 644 | | UINT32 l = m_l[ireg]; |
| 645 | | UINT32 res; |
| 646 | | |
| 647 | | res = program_read(i); |
| 648 | | m_px = res; |
| 649 | | res >>= 8; |
| 650 | | |
| 651 | | i += m_m[mreg]; |
| 652 | | if (i < base) i += l; |
| 653 | | else if (i >= base + l) i -= l; |
| 654 | | m_i[ireg] = i; |
| 655 | | |
| 656 | | return res; |
| 657 | | } |
| 658 | | |
| 659 | | |
| 660 | | |
| 661 | | /*=========================================================================== |
| 662 | | register reading |
| 663 | | ===========================================================================*/ |
| 664 | | |
| 665 | | #define ALU_GETXREG_UNSIGNED(x) (*(UINT16 *)m_alu_xregs[x]) |
| 666 | | #define ALU_GETYREG_UNSIGNED(y) (*(UINT16 *)m_alu_yregs[y]) |
| 667 | | |
| 668 | | #define MAC_GETXREG_UNSIGNED(x) (*(UINT16 *)m_mac_xregs[x]) |
| 669 | | #define MAC_GETXREG_SIGNED(x) (*( INT16 *)m_mac_xregs[x]) |
| 670 | | #define MAC_GETYREG_UNSIGNED(y) (*(UINT16 *)m_mac_yregs[y]) |
| 671 | | #define MAC_GETYREG_SIGNED(y) (*( INT16 *)m_mac_yregs[y]) |
| 672 | | |
| 673 | | #define SHIFT_GETXREG_UNSIGNED(x) (*(UINT16 *)m_shift_xregs[x]) |
| 674 | | #define SHIFT_GETXREG_SIGNED(x) (*( INT16 *)m_shift_xregs[x]) |
| 675 | | |
| 676 | | |
| 677 | | |
| 678 | | /*=========================================================================== |
| 679 | | ALU operations (result in AR) |
| 680 | | ===========================================================================*/ |
| 681 | | |
| 682 | | void adsp21xx_device::alu_op_ar(int op) |
| 683 | | { |
| 684 | | INT32 xop = (op >> 8) & 7; |
| 685 | | INT32 yop = (op >> 11) & 3; |
| 686 | | INT32 res; |
| 687 | | |
| 688 | | switch (op & (15<<13)) /*JB*/ |
| 689 | | { |
| 690 | | case 0x00<<13: |
| 691 | | /* Y Clear when y = 0 */ |
| 692 | | res = ALU_GETYREG_UNSIGNED(yop); |
| 693 | | CALC_NZ(res); |
| 694 | | break; |
| 695 | | case 0x01<<13: |
| 696 | | /* Y + 1 PASS 1 when y = 0 */ |
| 697 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 698 | | res = yop + 1; |
| 699 | | CALC_NZ(res); |
| 700 | | if (yop == 0x7fff) SET_V; |
| 701 | | else if (yop == 0xffff) SET_C; |
| 702 | | break; |
| 703 | | case 0x02<<13: |
| 704 | | /* X + Y + C */ |
| 705 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 706 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 707 | | yop += GET_C >> 3; |
| 708 | | res = xop + yop; |
| 709 | | CALC_NZVC(xop, yop, res); |
| 710 | | break; |
| 711 | | case 0x03<<13: |
| 712 | | /* X + Y X when y = 0 */ |
| 713 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 714 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 715 | | res = xop + yop; |
| 716 | | CALC_NZVC(xop, yop, res); |
| 717 | | break; |
| 718 | | case 0x04<<13: |
| 719 | | /* NOT Y */ |
| 720 | | res = ALU_GETYREG_UNSIGNED(yop) ^ 0xffff; |
| 721 | | CALC_NZ(res); |
| 722 | | break; |
| 723 | | case 0x05<<13: |
| 724 | | /* -Y */ |
| 725 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 726 | | res = -yop; |
| 727 | | CALC_NZ(res); |
| 728 | | if (yop == 0x8000) SET_V; |
| 729 | | if (yop == 0x0000) SET_C; |
| 730 | | break; |
| 731 | | case 0x06<<13: |
| 732 | | /* X - Y + C - 1 X + C - 1 when y = 0 */ |
| 733 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 734 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 735 | | res = xop - yop + (GET_C >> 3) - 1; |
| 736 | | CALC_NZVC_SUB(xop, yop, res); |
| 737 | | break; |
| 738 | | case 0x07<<13: |
| 739 | | /* X - Y */ |
| 740 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 741 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 742 | | res = xop - yop; |
| 743 | | CALC_NZVC_SUB(xop, yop, res); |
| 744 | | break; |
| 745 | | case 0x08<<13: |
| 746 | | /* Y - 1 PASS -1 when y = 0 */ |
| 747 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 748 | | res = yop - 1; |
| 749 | | CALC_NZ(res); |
| 750 | | if (yop == 0x8000) SET_V; |
| 751 | | else if (yop == 0x0000) SET_C; |
| 752 | | break; |
| 753 | | case 0x09<<13: |
| 754 | | /* Y - X -X when y = 0 */ |
| 755 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 756 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 757 | | res = yop - xop; |
| 758 | | CALC_NZVC_SUB(yop, xop, res); |
| 759 | | break; |
| 760 | | case 0x0a<<13: |
| 761 | | /* Y - X + C - 1 -X + C - 1 when y = 0 */ |
| 762 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 763 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 764 | | res = yop - xop + (GET_C >> 3) - 1; |
| 765 | | CALC_NZVC_SUB(yop, xop, res); |
| 766 | | break; |
| 767 | | case 0x0b<<13: |
| 768 | | /* NOT X */ |
| 769 | | res = ALU_GETXREG_UNSIGNED(xop) ^ 0xffff; |
| 770 | | CALC_NZ(res); |
| 771 | | break; |
| 772 | | case 0x0c<<13: |
| 773 | | /* X AND Y */ |
| 774 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 775 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 776 | | res = xop & yop; |
| 777 | | CALC_NZ(res); |
| 778 | | break; |
| 779 | | case 0x0d<<13: |
| 780 | | /* X OR Y */ |
| 781 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 782 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 783 | | res = xop | yop; |
| 784 | | CALC_NZ(res); |
| 785 | | break; |
| 786 | | case 0x0e<<13: |
| 787 | | /* X XOR Y */ |
| 788 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 789 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 790 | | res = xop ^ yop; |
| 791 | | CALC_NZ(res); |
| 792 | | break; |
| 793 | | case 0x0f<<13: |
| 794 | | /* ABS X */ |
| 795 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 796 | | res = (xop & 0x8000) ? -xop : xop; |
| 797 | | CLR_FLAGS; |
| 798 | | if (xop == 0) SET_Z; |
| 799 | | if (xop == 0x8000) SET_N, SET_V; |
| 800 | | if (xop & 0x8000) SET_S; |
| 801 | | break; |
| 802 | | default: |
| 803 | | res = 0; /* just to keep the compiler happy */ |
| 804 | | break; |
| 805 | | } |
| 806 | | |
| 807 | | /* saturate */ |
| 808 | | if ((m_mstat & MSTAT_SATURATE) && GET_V) res = GET_C ? -32768 : 32767; |
| 809 | | |
| 810 | | /* set the final value */ |
| 811 | | m_core.ar.u = res; |
| 812 | | } |
| 813 | | |
| 814 | | |
| 815 | | |
| 816 | | /*=========================================================================== |
| 817 | | ALU operations (result in AR, constant yop) |
| 818 | | ===========================================================================*/ |
| 819 | | |
| 820 | | void adsp21xx_device::alu_op_ar_const(int op) |
| 821 | | { |
| 822 | | INT32 xop = (op >> 8) & 7; |
| 823 | | INT32 yop = constants[((op >> 5) & 0x07) | ((op >> 8) & 0x18)]; |
| 824 | | INT32 res; |
| 825 | | |
| 826 | | switch (op & (15<<13)) /*JB*/ |
| 827 | | { |
| 828 | | case 0x00<<13: |
| 829 | | /* Y Clear when y = 0 */ |
| 830 | | res = yop; |
| 831 | | CALC_NZ(res); |
| 832 | | break; |
| 833 | | case 0x01<<13: |
| 834 | | /* Y + 1 PASS 1 when y = 0 */ |
| 835 | | res = yop + 1; |
| 836 | | CALC_NZ(res); |
| 837 | | if (yop == 0x7fff) SET_V; |
| 838 | | else if (yop == 0xffff) SET_C; |
| 839 | | break; |
| 840 | | case 0x02<<13: |
| 841 | | /* X + Y + C */ |
| 842 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 843 | | yop += GET_C >> 3; |
| 844 | | res = xop + yop; |
| 845 | | CALC_NZVC(xop, yop, res); |
| 846 | | break; |
| 847 | | case 0x03<<13: |
| 848 | | /* X + Y X when y = 0 */ |
| 849 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 850 | | res = xop + yop; |
| 851 | | CALC_NZVC(xop, yop, res); |
| 852 | | break; |
| 853 | | case 0x04<<13: |
| 854 | | /* NOT Y */ |
| 855 | | res = yop ^ 0xffff; |
| 856 | | CALC_NZ(res); |
| 857 | | break; |
| 858 | | case 0x05<<13: |
| 859 | | /* -Y */ |
| 860 | | res = -yop; |
| 861 | | CALC_NZ(res); |
| 862 | | if (yop == 0x8000) SET_V; |
| 863 | | if (yop == 0x0000) SET_C; |
| 864 | | break; |
| 865 | | case 0x06<<13: |
| 866 | | /* X - Y + C - 1 X + C - 1 when y = 0 */ |
| 867 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 868 | | res = xop - yop + (GET_C >> 3) - 1; |
| 869 | | CALC_NZVC_SUB(xop, yop, res); |
| 870 | | break; |
| 871 | | case 0x07<<13: |
| 872 | | /* X - Y */ |
| 873 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 874 | | res = xop - yop; |
| 875 | | CALC_NZVC_SUB(xop, yop, res); |
| 876 | | break; |
| 877 | | case 0x08<<13: |
| 878 | | /* Y - 1 PASS -1 when y = 0 */ |
| 879 | | res = yop - 1; |
| 880 | | CALC_NZ(res); |
| 881 | | if (yop == 0x8000) SET_V; |
| 882 | | else if (yop == 0x0000) SET_C; |
| 883 | | break; |
| 884 | | case 0x09<<13: |
| 885 | | /* Y - X -X when y = 0 */ |
| 886 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 887 | | res = yop - xop; |
| 888 | | CALC_NZVC_SUB(yop, xop, res); |
| 889 | | break; |
| 890 | | case 0x0a<<13: |
| 891 | | /* Y - X + C - 1 -X + C - 1 when y = 0 */ |
| 892 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 893 | | res = yop - xop + (GET_C >> 3) - 1; |
| 894 | | CALC_NZVC_SUB(yop, xop, res); |
| 895 | | break; |
| 896 | | case 0x0b<<13: |
| 897 | | /* NOT X */ |
| 898 | | res = ALU_GETXREG_UNSIGNED(xop) ^ 0xffff; |
| 899 | | CALC_NZ(res); |
| 900 | | break; |
| 901 | | case 0x0c<<13: |
| 902 | | /* X AND Y */ |
| 903 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 904 | | res = xop & yop; |
| 905 | | CALC_NZ(res); |
| 906 | | break; |
| 907 | | case 0x0d<<13: |
| 908 | | /* X OR Y */ |
| 909 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 910 | | res = xop | yop; |
| 911 | | CALC_NZ(res); |
| 912 | | break; |
| 913 | | case 0x0e<<13: |
| 914 | | /* X XOR Y */ |
| 915 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 916 | | res = xop ^ yop; |
| 917 | | CALC_NZ(res); |
| 918 | | break; |
| 919 | | case 0x0f<<13: |
| 920 | | /* ABS X */ |
| 921 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 922 | | res = (xop & 0x8000) ? -xop : xop; |
| 923 | | CLR_FLAGS; |
| 924 | | if (xop == 0) SET_Z; |
| 925 | | if (xop == 0x8000) SET_N, SET_V; |
| 926 | | if (xop & 0x8000) SET_S; |
| 927 | | break; |
| 928 | | default: |
| 929 | | res = 0; /* just to keep the compiler happy */ |
| 930 | | break; |
| 931 | | } |
| 932 | | |
| 933 | | /* saturate */ |
| 934 | | if ((m_mstat & MSTAT_SATURATE) && GET_V) res = GET_C ? -32768 : 32767; |
| 935 | | |
| 936 | | /* set the final value */ |
| 937 | | m_core.ar.u = res; |
| 938 | | } |
| 939 | | |
| 940 | | |
| 941 | | |
| 942 | | /*=========================================================================== |
| 943 | | ALU operations (result in AF) |
| 944 | | ===========================================================================*/ |
| 945 | | |
| 946 | | void adsp21xx_device::alu_op_af(int op) |
| 947 | | { |
| 948 | | INT32 xop = (op >> 8) & 7; |
| 949 | | INT32 yop = (op >> 11) & 3; |
| 950 | | INT32 res; |
| 951 | | |
| 952 | | switch (op & (15<<13)) /*JB*/ |
| 953 | | { |
| 954 | | case 0x00<<13: |
| 955 | | /* Y Clear when y = 0 */ |
| 956 | | res = ALU_GETYREG_UNSIGNED(yop); |
| 957 | | CALC_NZ(res); |
| 958 | | break; |
| 959 | | case 0x01<<13: |
| 960 | | /* Y + 1 PASS 1 when y = 0 */ |
| 961 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 962 | | res = yop + 1; |
| 963 | | CALC_NZ(res); |
| 964 | | if (yop == 0x7fff) SET_V; |
| 965 | | else if (yop == 0xffff) SET_C; |
| 966 | | break; |
| 967 | | case 0x02<<13: |
| 968 | | /* X + Y + C */ |
| 969 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 970 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 971 | | yop += GET_C >> 3; |
| 972 | | res = xop + yop; |
| 973 | | CALC_NZVC(xop, yop, res); |
| 974 | | break; |
| 975 | | case 0x03<<13: |
| 976 | | /* X + Y X when y = 0 */ |
| 977 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 978 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 979 | | res = xop + yop; |
| 980 | | CALC_NZVC(xop, yop, res); |
| 981 | | break; |
| 982 | | case 0x04<<13: |
| 983 | | /* NOT Y */ |
| 984 | | res = ALU_GETYREG_UNSIGNED(yop) ^ 0xffff; |
| 985 | | CALC_NZ(res); |
| 986 | | break; |
| 987 | | case 0x05<<13: |
| 988 | | /* -Y */ |
| 989 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 990 | | res = -yop; |
| 991 | | CALC_NZ(res); |
| 992 | | if (yop == 0x8000) SET_V; |
| 993 | | if (yop == 0x0000) SET_C; |
| 994 | | break; |
| 995 | | case 0x06<<13: |
| 996 | | /* X - Y + C - 1 X + C - 1 when y = 0 */ |
| 997 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 998 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 999 | | res = xop - yop + (GET_C >> 3) - 1; |
| 1000 | | CALC_NZVC_SUB(xop, yop, res); |
| 1001 | | break; |
| 1002 | | case 0x07<<13: |
| 1003 | | /* X - Y */ |
| 1004 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1005 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1006 | | res = xop - yop; |
| 1007 | | CALC_NZVC_SUB(xop, yop, res); |
| 1008 | | break; |
| 1009 | | case 0x08<<13: |
| 1010 | | /* Y - 1 PASS -1 when y = 0 */ |
| 1011 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1012 | | res = yop - 1; |
| 1013 | | CALC_NZ(res); |
| 1014 | | if (yop == 0x8000) SET_V; |
| 1015 | | else if (yop == 0x0000) SET_C; |
| 1016 | | break; |
| 1017 | | case 0x09<<13: |
| 1018 | | /* Y - X -X when y = 0 */ |
| 1019 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1020 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1021 | | res = yop - xop; |
| 1022 | | CALC_NZVC_SUB(yop, xop, res); |
| 1023 | | break; |
| 1024 | | case 0x0a<<13: |
| 1025 | | /* Y - X + C - 1 -X + C - 1 when y = 0 */ |
| 1026 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1027 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1028 | | res = yop - xop + (GET_C >> 3) - 1; |
| 1029 | | CALC_NZVC_SUB(yop, xop, res); |
| 1030 | | break; |
| 1031 | | case 0x0b<<13: |
| 1032 | | /* NOT X */ |
| 1033 | | res = ALU_GETXREG_UNSIGNED(xop) ^ 0xffff; |
| 1034 | | CALC_NZ(res); |
| 1035 | | break; |
| 1036 | | case 0x0c<<13: |
| 1037 | | /* X AND Y */ |
| 1038 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1039 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1040 | | res = xop & yop; |
| 1041 | | CALC_NZ(res); |
| 1042 | | break; |
| 1043 | | case 0x0d<<13: |
| 1044 | | /* X OR Y */ |
| 1045 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1046 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1047 | | res = xop | yop; |
| 1048 | | CALC_NZ(res); |
| 1049 | | break; |
| 1050 | | case 0x0e<<13: |
| 1051 | | /* X XOR Y */ |
| 1052 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1053 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1054 | | res = xop ^ yop; |
| 1055 | | CALC_NZ(res); |
| 1056 | | break; |
| 1057 | | case 0x0f<<13: |
| 1058 | | /* ABS X */ |
| 1059 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1060 | | res = (xop & 0x8000) ? -xop : xop; |
| 1061 | | CLR_FLAGS; |
| 1062 | | if (xop == 0) SET_Z; |
| 1063 | | if (xop == 0x8000) SET_N, SET_V; |
| 1064 | | if (xop & 0x8000) SET_S; |
| 1065 | | break; |
| 1066 | | default: |
| 1067 | | res = 0; /* just to keep the compiler happy */ |
| 1068 | | break; |
| 1069 | | } |
| 1070 | | |
| 1071 | | /* set the final value */ |
| 1072 | | m_core.af.u = res; |
| 1073 | | } |
| 1074 | | |
| 1075 | | |
| 1076 | | |
| 1077 | | /*=========================================================================== |
| 1078 | | ALU operations (result in AF, constant yop) |
| 1079 | | ===========================================================================*/ |
| 1080 | | |
| 1081 | | void adsp21xx_device::alu_op_af_const(int op) |
| 1082 | | { |
| 1083 | | INT32 xop = (op >> 8) & 7; |
| 1084 | | INT32 yop = constants[((op >> 5) & 0x07) | ((op >> 8) & 0x18)]; |
| 1085 | | INT32 res; |
| 1086 | | |
| 1087 | | switch (op & (15<<13)) /*JB*/ |
| 1088 | | { |
| 1089 | | case 0x00<<13: |
| 1090 | | /* Y Clear when y = 0 */ |
| 1091 | | res = yop; |
| 1092 | | CALC_NZ(res); |
| 1093 | | break; |
| 1094 | | case 0x01<<13: |
| 1095 | | /* Y + 1 PASS 1 when y = 0 */ |
| 1096 | | res = yop + 1; |
| 1097 | | CALC_NZ(res); |
| 1098 | | if (yop == 0x7fff) SET_V; |
| 1099 | | else if (yop == 0xffff) SET_C; |
| 1100 | | break; |
| 1101 | | case 0x02<<13: |
| 1102 | | /* X + Y + C */ |
| 1103 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1104 | | yop += GET_C >> 3; |
| 1105 | | res = xop + yop; |
| 1106 | | CALC_NZVC(xop, yop, res); |
| 1107 | | break; |
| 1108 | | case 0x03<<13: |
| 1109 | | /* X + Y X when y = 0 */ |
| 1110 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1111 | | res = xop + yop; |
| 1112 | | CALC_NZVC(xop, yop, res); |
| 1113 | | break; |
| 1114 | | case 0x04<<13: |
| 1115 | | /* NOT Y */ |
| 1116 | | res = yop ^ 0xffff; |
| 1117 | | CALC_NZ(res); |
| 1118 | | break; |
| 1119 | | case 0x05<<13: |
| 1120 | | /* -Y */ |
| 1121 | | res = -yop; |
| 1122 | | CALC_NZ(res); |
| 1123 | | if (yop == 0x8000) SET_V; |
| 1124 | | if (yop == 0x0000) SET_C; |
| 1125 | | break; |
| 1126 | | case 0x06<<13: |
| 1127 | | /* X - Y + C - 1 X + C - 1 when y = 0 */ |
| 1128 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1129 | | res = xop - yop + (GET_C >> 3) - 1; |
| 1130 | | CALC_NZVC_SUB(xop, yop, res); |
| 1131 | | break; |
| 1132 | | case 0x07<<13: |
| 1133 | | /* X - Y */ |
| 1134 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1135 | | res = xop - yop; |
| 1136 | | CALC_NZVC_SUB(xop, yop, res); |
| 1137 | | break; |
| 1138 | | case 0x08<<13: |
| 1139 | | /* Y - 1 PASS -1 when y = 0 */ |
| 1140 | | res = yop - 1; |
| 1141 | | CALC_NZ(res); |
| 1142 | | if (yop == 0x8000) SET_V; |
| 1143 | | else if (yop == 0x0000) SET_C; |
| 1144 | | break; |
| 1145 | | case 0x09<<13: |
| 1146 | | /* Y - X -X when y = 0 */ |
| 1147 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1148 | | res = yop - xop; |
| 1149 | | CALC_NZVC_SUB(yop, xop, res); |
| 1150 | | break; |
| 1151 | | case 0x0a<<13: |
| 1152 | | /* Y - X + C - 1 -X + C - 1 when y = 0 */ |
| 1153 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1154 | | res = yop - xop + (GET_C >> 3) - 1; |
| 1155 | | CALC_NZVC_SUB(yop, xop, res); |
| 1156 | | break; |
| 1157 | | case 0x0b<<13: |
| 1158 | | /* NOT X */ |
| 1159 | | res = ALU_GETXREG_UNSIGNED(xop) ^ 0xffff; |
| 1160 | | CALC_NZ(res); |
| 1161 | | break; |
| 1162 | | case 0x0c<<13: |
| 1163 | | /* X AND Y */ |
| 1164 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1165 | | res = xop & yop; |
| 1166 | | CALC_NZ(res); |
| 1167 | | break; |
| 1168 | | case 0x0d<<13: |
| 1169 | | /* X OR Y */ |
| 1170 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1171 | | res = xop | yop; |
| 1172 | | CALC_NZ(res); |
| 1173 | | break; |
| 1174 | | case 0x0e<<13: |
| 1175 | | /* X XOR Y */ |
| 1176 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1177 | | res = xop ^ yop; |
| 1178 | | CALC_NZ(res); |
| 1179 | | break; |
| 1180 | | case 0x0f<<13: |
| 1181 | | /* ABS X */ |
| 1182 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1183 | | res = (xop & 0x8000) ? -xop : xop; |
| 1184 | | CLR_FLAGS; |
| 1185 | | if (xop == 0) SET_Z; |
| 1186 | | if (xop == 0x8000) SET_N, SET_V; |
| 1187 | | if (xop & 0x8000) SET_S; |
| 1188 | | break; |
| 1189 | | default: |
| 1190 | | res = 0; /* just to keep the compiler happy */ |
| 1191 | | break; |
| 1192 | | } |
| 1193 | | |
| 1194 | | /* set the final value */ |
| 1195 | | m_core.af.u = res; |
| 1196 | | } |
| 1197 | | |
| 1198 | | |
| 1199 | | |
| 1200 | | /*=========================================================================== |
| 1201 | | ALU operations (no result) |
| 1202 | | ===========================================================================*/ |
| 1203 | | |
| 1204 | | void adsp21xx_device::alu_op_none(int op) |
| 1205 | | { |
| 1206 | | INT32 xop = (op >> 8) & 7; |
| 1207 | | INT32 yop = (op >> 11) & 3; |
| 1208 | | INT32 res; |
| 1209 | | |
| 1210 | | switch (op & (15<<13)) /*JB*/ |
| 1211 | | { |
| 1212 | | case 0x00<<13: |
| 1213 | | /* Y Clear when y = 0 */ |
| 1214 | | res = ALU_GETYREG_UNSIGNED(yop); |
| 1215 | | CALC_NZ(res); |
| 1216 | | break; |
| 1217 | | case 0x01<<13: |
| 1218 | | /* Y + 1 PASS 1 when y = 0 */ |
| 1219 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1220 | | res = yop + 1; |
| 1221 | | CALC_NZ(res); |
| 1222 | | if (yop == 0x7fff) SET_V; |
| 1223 | | else if (yop == 0xffff) SET_C; |
| 1224 | | break; |
| 1225 | | case 0x02<<13: |
| 1226 | | /* X + Y + C */ |
| 1227 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1228 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1229 | | yop += GET_C >> 3; |
| 1230 | | res = xop + yop; |
| 1231 | | CALC_NZVC(xop, yop, res); |
| 1232 | | break; |
| 1233 | | case 0x03<<13: |
| 1234 | | /* X + Y X when y = 0 */ |
| 1235 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1236 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1237 | | res = xop + yop; |
| 1238 | | CALC_NZVC(xop, yop, res); |
| 1239 | | break; |
| 1240 | | case 0x04<<13: |
| 1241 | | /* NOT Y */ |
| 1242 | | res = ALU_GETYREG_UNSIGNED(yop) ^ 0xffff; |
| 1243 | | CALC_NZ(res); |
| 1244 | | break; |
| 1245 | | case 0x05<<13: |
| 1246 | | /* -Y */ |
| 1247 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1248 | | res = -yop; |
| 1249 | | CALC_NZ(res); |
| 1250 | | if (yop == 0x8000) SET_V; |
| 1251 | | if (yop == 0x0000) SET_C; |
| 1252 | | break; |
| 1253 | | case 0x06<<13: |
| 1254 | | /* X - Y + C - 1 X + C - 1 when y = 0 */ |
| 1255 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1256 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1257 | | res = xop - yop + (GET_C >> 3) - 1; |
| 1258 | | CALC_NZVC_SUB(xop, yop, res); |
| 1259 | | break; |
| 1260 | | case 0x07<<13: |
| 1261 | | /* X - Y */ |
| 1262 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1263 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1264 | | res = xop - yop; |
| 1265 | | CALC_NZVC_SUB(xop, yop, res); |
| 1266 | | break; |
| 1267 | | case 0x08<<13: |
| 1268 | | /* Y - 1 PASS -1 when y = 0 */ |
| 1269 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1270 | | res = yop - 1; |
| 1271 | | CALC_NZ(res); |
| 1272 | | if (yop == 0x8000) SET_V; |
| 1273 | | else if (yop == 0x0000) SET_C; |
| 1274 | | break; |
| 1275 | | case 0x09<<13: |
| 1276 | | /* Y - X -X when y = 0 */ |
| 1277 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1278 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1279 | | res = yop - xop; |
| 1280 | | CALC_NZVC_SUB(yop, xop, res); |
| 1281 | | break; |
| 1282 | | case 0x0a<<13: |
| 1283 | | /* Y - X + C - 1 -X + C - 1 when y = 0 */ |
| 1284 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1285 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1286 | | res = yop - xop + (GET_C >> 3) - 1; |
| 1287 | | CALC_NZVC_SUB(yop, xop, res); |
| 1288 | | break; |
| 1289 | | case 0x0b<<13: |
| 1290 | | /* NOT X */ |
| 1291 | | res = ALU_GETXREG_UNSIGNED(xop) ^ 0xffff; |
| 1292 | | CALC_NZ(res); |
| 1293 | | break; |
| 1294 | | case 0x0c<<13: |
| 1295 | | /* X AND Y */ |
| 1296 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1297 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1298 | | res = xop & yop; |
| 1299 | | CALC_NZ(res); |
| 1300 | | break; |
| 1301 | | case 0x0d<<13: |
| 1302 | | /* X OR Y */ |
| 1303 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1304 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1305 | | res = xop | yop; |
| 1306 | | CALC_NZ(res); |
| 1307 | | break; |
| 1308 | | case 0x0e<<13: |
| 1309 | | /* X XOR Y */ |
| 1310 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1311 | | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1312 | | res = xop ^ yop; |
| 1313 | | CALC_NZ(res); |
| 1314 | | break; |
| 1315 | | case 0x0f<<13: |
| 1316 | | /* ABS X */ |
| 1317 | | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1318 | | res = (xop & 0x8000) ? -xop : xop; |
| 1319 | | CLR_FLAGS; |
| 1320 | | if (xop == 0) SET_Z; |
| 1321 | | if (xop == 0x8000) SET_N, SET_V; |
| 1322 | | if (xop & 0x8000) SET_S; |
| 1323 | | break; |
| 1324 | | } |
| 1325 | | } |
| 1326 | | |
| 1327 | | |
| 1328 | | |
| 1329 | | /*=========================================================================== |
| 1330 | | MAC operations (result in MR) |
| 1331 | | ===========================================================================*/ |
| 1332 | | |
| 1333 | | void adsp21xx_device::mac_op_mr(int op) |
| 1334 | | { |
| 1335 | | INT8 shift = ((m_mstat & MSTAT_INTEGER) >> 4) ^ 1; |
| 1336 | | INT32 xop = (op >> 8) & 7; |
| 1337 | | INT32 yop = (op >> 11) & 3; |
| 1338 | | INT32 temp; |
| 1339 | | INT64 res; |
| 1340 | | |
| 1341 | | switch (op & (15<<13)) /*JB*/ |
| 1342 | | { |
| 1343 | | case 0x00<<13: |
| 1344 | | /* no-op */ |
| 1345 | | return; |
| 1346 | | case 0x01<<13: |
| 1347 | | /* X * Y (RND) */ |
| 1348 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1349 | | yop = MAC_GETYREG_SIGNED(yop); |
| 1350 | | temp = (xop * yop) << shift; |
| 1351 | | res = (INT64)temp; |
| 1352 | | #if 0 |
| 1353 | | if ((res & 0xffff) == 0x8000) res &= ~((UINT64)0x10000); |
| 1354 | | else res += (res & 0x8000) << 1; |
| 1355 | | #else |
| 1356 | | temp &= 0xffff; |
| 1357 | | res += 0x8000; |
| 1358 | | if ( temp == 0x8000 ) |
| 1359 | | res &= ~((UINT64)0x10000); |
| 1360 | | #endif |
| 1361 | | break; |
| 1362 | | case 0x02<<13: |
| 1363 | | /* MR + X * Y (RND) */ |
| 1364 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1365 | | yop = MAC_GETYREG_SIGNED(yop); |
| 1366 | | temp = (xop * yop) << shift; |
| 1367 | | res = m_core.mr.mr + (INT64)temp; |
| 1368 | | #if 0 |
| 1369 | | if ((res & 0xffff) == 0x8000) res &= ~((UINT64)0x10000); |
| 1370 | | else res += (res & 0x8000) << 1; |
| 1371 | | #else |
| 1372 | | temp &= 0xffff; |
| 1373 | | res += 0x8000; |
| 1374 | | if ( temp == 0x8000 ) |
| 1375 | | res &= ~((UINT64)0x10000); |
| 1376 | | #endif |
| 1377 | | break; |
| 1378 | | case 0x03<<13: |
| 1379 | | /* MR - X * Y (RND) */ |
| 1380 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1381 | | yop = MAC_GETYREG_SIGNED(yop); |
| 1382 | | temp = (xop * yop) << shift; |
| 1383 | | res = m_core.mr.mr - (INT64)temp; |
| 1384 | | #if 0 |
| 1385 | | if ((res & 0xffff) == 0x8000) res &= ~((UINT64)0x10000); |
| 1386 | | else res += (res & 0x8000) << 1; |
| 1387 | | #else |
| 1388 | | temp &= 0xffff; |
| 1389 | | res += 0x8000; |
| 1390 | | if ( temp == 0x8000 ) |
| 1391 | | res &= ~((UINT64)0x10000); |
| 1392 | | #endif |
| 1393 | | break; |
| 1394 | | case 0x04<<13: |
| 1395 | | /* X * Y (SS) Clear when y = 0 */ |
| 1396 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1397 | | yop = MAC_GETYREG_SIGNED(yop); |
| 1398 | | temp = (xop * yop) << shift; |
| 1399 | | res = (INT64)temp; |
| 1400 | | break; |
| 1401 | | case 0x05<<13: |
| 1402 | | /* X * Y (SU) */ |
| 1403 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1404 | | yop = MAC_GETYREG_UNSIGNED(yop); |
| 1405 | | temp = (xop * yop) << shift; |
| 1406 | | res = (INT64)temp; |
| 1407 | | break; |
| 1408 | | case 0x06<<13: |
| 1409 | | /* X * Y (US) */ |
| 1410 | | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1411 | | yop = MAC_GETYREG_SIGNED(yop); |
| 1412 | | temp = (xop * yop) << shift; |
| 1413 | | res = (INT64)temp; |
| 1414 | | break; |
| 1415 | | case 0x07<<13: |
| 1416 | | /* X * Y (UU) */ |
| 1417 | | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1418 | | yop = MAC_GETYREG_UNSIGNED(yop); |
| 1419 | | temp = (xop * yop) << shift; |
| 1420 | | res = (INT64)temp; |
| 1421 | | break; |
| 1422 | | case 0x08<<13: |
| 1423 | | /* MR + X * Y (SS) */ |
| 1424 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1425 | | yop = MAC_GETYREG_SIGNED(yop); |
| 1426 | | temp = (xop * yop) << shift; |
| 1427 | | res = m_core.mr.mr + (INT64)temp; |
| 1428 | | break; |
| 1429 | | case 0x09<<13: |
| 1430 | | /* MR + X * Y (SU) */ |
| 1431 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1432 | | yop = MAC_GETYREG_UNSIGNED(yop); |
| 1433 | | temp = (xop * yop) << shift; |
| 1434 | | res = m_core.mr.mr + (INT64)temp; |
| 1435 | | break; |
| 1436 | | case 0x0a<<13: |
| 1437 | | /* MR + X * Y (US) */ |
| 1438 | | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1439 | | yop = MAC_GETYREG_SIGNED(yop); |
| 1440 | | temp = (xop * yop) << shift; |
| 1441 | | res = m_core.mr.mr + (INT64)temp; |
| 1442 | | break; |
| 1443 | | case 0x0b<<13: |
| 1444 | | /* MR + X * Y (UU) */ |
| 1445 | | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1446 | | yop = MAC_GETYREG_UNSIGNED(yop); |
| 1447 | | temp = (xop * yop) << shift; |
| 1448 | | res = m_core.mr.mr + (INT64)temp; |
| 1449 | | break; |
| 1450 | | case 0x0c<<13: |
| 1451 | | /* MR - X * Y (SS) */ |
| 1452 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1453 | | yop = MAC_GETYREG_SIGNED(yop); |
| 1454 | | temp = (xop * yop) << shift; |
| 1455 | | res = m_core.mr.mr - (INT64)temp; |
| 1456 | | break; |
| 1457 | | case 0x0d<<13: |
| 1458 | | /* MR - X * Y (SU) */ |
| 1459 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1460 | | yop = MAC_GETYREG_UNSIGNED(yop); |
| 1461 | | temp = (xop * yop) << shift; |
| 1462 | | res = m_core.mr.mr - (INT64)temp; |
| 1463 | | break; |
| 1464 | | case 0x0e<<13: |
| 1465 | | /* MR - X * Y (US) */ |
| 1466 | | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1467 | | yop = MAC_GETYREG_SIGNED(yop); |
| 1468 | | temp = (xop * yop) << shift; |
| 1469 | | res = m_core.mr.mr - (INT64)temp; |
| 1470 | | break; |
| 1471 | | case 0x0f<<13: |
| 1472 | | /* MR - X * Y (UU) */ |
| 1473 | | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1474 | | yop = MAC_GETYREG_UNSIGNED(yop); |
| 1475 | | temp = (xop * yop) << shift; |
| 1476 | | res = m_core.mr.mr - (INT64)temp; |
| 1477 | | break; |
| 1478 | | default: |
| 1479 | | res = 0; /* just to keep the compiler happy */ |
| 1480 | | break; |
| 1481 | | } |
| 1482 | | |
| 1483 | | /* set the final value */ |
| 1484 | | temp = (res >> 31) & 0x1ff; |
| 1485 | | CLR_MV; |
| 1486 | | if (temp != 0x000 && temp != 0x1ff) SET_MV; |
| 1487 | | m_core.mr.mr = res; |
| 1488 | | } |
| 1489 | | |
| 1490 | | |
| 1491 | | |
| 1492 | | /*=========================================================================== |
| 1493 | | MAC operations (result in MR, yop == xop) |
| 1494 | | ===========================================================================*/ |
| 1495 | | |
| 1496 | | void adsp21xx_device::mac_op_mr_xop(int op) |
| 1497 | | { |
| 1498 | | INT8 shift = ((m_mstat & MSTAT_INTEGER) >> 4) ^ 1; |
| 1499 | | INT32 xop = (op >> 8) & 7; |
| 1500 | | INT32 temp; |
| 1501 | | INT64 res; |
| 1502 | | |
| 1503 | | switch (op & (15<<13)) /*JB*/ |
| 1504 | | { |
| 1505 | | case 0x00<<13: |
| 1506 | | /* no-op */ |
| 1507 | | return; |
| 1508 | | case 0x01<<13: |
| 1509 | | /* X * Y (RND) */ |
| 1510 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1511 | | temp = (xop * xop) << shift; |
| 1512 | | res = (INT64)temp; |
| 1513 | | #if 0 |
| 1514 | | if ((res & 0xffff) == 0x8000) res &= ~((UINT64)0x10000); |
| 1515 | | else res += (res & 0x8000) << 1; |
| 1516 | | #else |
| 1517 | | temp &= 0xffff; |
| 1518 | | res += 0x8000; |
| 1519 | | if ( temp == 0x8000 ) |
| 1520 | | res &= ~((UINT64)0x10000); |
| 1521 | | #endif |
| 1522 | | break; |
| 1523 | | case 0x02<<13: |
| 1524 | | /* MR + X * Y (RND) */ |
| 1525 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1526 | | temp = (xop * xop) << shift; |
| 1527 | | res = m_core.mr.mr + (INT64)temp; |
| 1528 | | #if 0 |
| 1529 | | if ((res & 0xffff) == 0x8000) res &= ~((UINT64)0x10000); |
| 1530 | | else res += (res & 0x8000) << 1; |
| 1531 | | #else |
| 1532 | | temp &= 0xffff; |
| 1533 | | res += 0x8000; |
| 1534 | | if ( temp == 0x8000 ) |
| 1535 | | res &= ~((UINT64)0x10000); |
| 1536 | | #endif |
| 1537 | | break; |
| 1538 | | case 0x03<<13: |
| 1539 | | /* MR - X * Y (RND) */ |
| 1540 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1541 | | temp = (xop * xop) << shift; |
| 1542 | | res = m_core.mr.mr - (INT64)temp; |
| 1543 | | #if 0 |
| 1544 | | if ((res & 0xffff) == 0x8000) res &= ~((UINT64)0x10000); |
| 1545 | | else res += (res & 0x8000) << 1; |
| 1546 | | #else |
| 1547 | | temp &= 0xffff; |
| 1548 | | res += 0x8000; |
| 1549 | | if ( temp == 0x8000 ) |
| 1550 | | res &= ~((UINT64)0x10000); |
| 1551 | | #endif |
| 1552 | | break; |
| 1553 | | case 0x04<<13: |
| 1554 | | /* X * Y (SS) Clear when y = 0 */ |
| 1555 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1556 | | temp = (xop * xop) << shift; |
| 1557 | | res = (INT64)temp; |
| 1558 | | break; |
| 1559 | | case 0x05<<13: |
| 1560 | | /* X * Y (SU) */ |
| 1561 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1562 | | temp = (xop * xop) << shift; |
| 1563 | | res = (INT64)temp; |
| 1564 | | break; |
| 1565 | | case 0x06<<13: |
| 1566 | | /* X * Y (US) */ |
| 1567 | | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1568 | | temp = (xop * xop) << shift; |
| 1569 | | res = (INT64)temp; |
| 1570 | | break; |
| 1571 | | case 0x07<<13: |
| 1572 | | /* X * Y (UU) */ |
| 1573 | | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1574 | | temp = (xop * xop) << shift; |
| 1575 | | res = (INT64)temp; |
| 1576 | | break; |
| 1577 | | case 0x08<<13: |
| 1578 | | /* MR + X * Y (SS) */ |
| 1579 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1580 | | temp = (xop * xop) << shift; |
| 1581 | | res = m_core.mr.mr + (INT64)temp; |
| 1582 | | break; |
| 1583 | | case 0x09<<13: |
| 1584 | | /* MR + X * Y (SU) */ |
| 1585 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1586 | | temp = (xop * xop) << shift; |
| 1587 | | res = m_core.mr.mr + (INT64)temp; |
| 1588 | | break; |
| 1589 | | case 0x0a<<13: |
| 1590 | | /* MR + X * Y (US) */ |
| 1591 | | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1592 | | temp = (xop * xop) << shift; |
| 1593 | | res = m_core.mr.mr + (INT64)temp; |
| 1594 | | break; |
| 1595 | | case 0x0b<<13: |
| 1596 | | /* MR + X * Y (UU) */ |
| 1597 | | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1598 | | temp = (xop * xop) << shift; |
| 1599 | | res = m_core.mr.mr + (INT64)temp; |
| 1600 | | break; |
| 1601 | | case 0x0c<<13: |
| 1602 | | /* MR - X * Y (SS) */ |
| 1603 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1604 | | temp = (xop * xop) << shift; |
| 1605 | | res = m_core.mr.mr - (INT64)temp; |
| 1606 | | break; |
| 1607 | | case 0x0d<<13: |
| 1608 | | /* MR - X * Y (SU) */ |
| 1609 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1610 | | temp = (xop * xop) << shift; |
| 1611 | | res = m_core.mr.mr - (INT64)temp; |
| 1612 | | break; |
| 1613 | | case 0x0e<<13: |
| 1614 | | /* MR - X * Y (US) */ |
| 1615 | | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1616 | | temp = (xop * xop) << shift; |
| 1617 | | res = m_core.mr.mr - (INT64)temp; |
| 1618 | | break; |
| 1619 | | case 0x0f<<13: |
| 1620 | | /* MR - X * Y (UU) */ |
| 1621 | | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1622 | | temp = (xop * xop) << shift; |
| 1623 | | res = m_core.mr.mr - (INT64)temp; |
| 1624 | | break; |
| 1625 | | default: |
| 1626 | | res = 0; /* just to keep the compiler happy */ |
| 1627 | | break; |
| 1628 | | } |
| 1629 | | |
| 1630 | | /* set the final value */ |
| 1631 | | temp = (res >> 31) & 0x1ff; |
| 1632 | | CLR_MV; |
| 1633 | | if (temp != 0x000 && temp != 0x1ff) SET_MV; |
| 1634 | | m_core.mr.mr = res; |
| 1635 | | } |
| 1636 | | |
| 1637 | | |
| 1638 | | |
| 1639 | | /*=========================================================================== |
| 1640 | | MAC operations (result in MF) |
| 1641 | | ===========================================================================*/ |
| 1642 | | |
| 1643 | | void adsp21xx_device::mac_op_mf(int op) |
| 1644 | | { |
| 1645 | | INT8 shift = ((m_mstat & MSTAT_INTEGER) >> 4) ^ 1; |
| 1646 | | INT32 xop = (op >> 8) & 7; |
| 1647 | | INT32 yop = (op >> 11) & 3; |
| 1648 | | INT32 temp; |
| 1649 | | INT64 res; |
| 1650 | | |
| 1651 | | switch (op & (15<<13)) /*JB*/ |
| 1652 | | { |
| 1653 | | case 0x00<<13: |
| 1654 | | /* no-op */ |
| 1655 | | return; |
| 1656 | | case 0x01<<13: |
| 1657 | | /* X * Y (RND) */ |
| 1658 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1659 | | yop = MAC_GETYREG_SIGNED(yop); |
| 1660 | | temp = (xop * yop) << shift; |
| 1661 | | res = (INT64)temp; |
| 1662 | | #if 0 |
| 1663 | | if ((res & 0xffff) == 0x8000) res &= ~((UINT64)0x10000); |
| 1664 | | else res += (res & 0x8000) << 1; |
| 1665 | | #else |
| 1666 | | temp &= 0xffff; |
| 1667 | | res += 0x8000; |
| 1668 | | if ( temp == 0x8000 ) |
| 1669 | | res &= ~((UINT64)0x10000); |
| 1670 | | #endif |
| 1671 | | break; |
| 1672 | | case 0x02<<13: |
| 1673 | | /* MR + X * Y (RND) */ |
| 1674 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1675 | | yop = MAC_GETYREG_SIGNED(yop); |
| 1676 | | temp = (xop * yop) << shift; |
| 1677 | | res = m_core.mr.mr + (INT64)temp; |
| 1678 | | #if 0 |
| 1679 | | if ((res & 0xffff) == 0x8000) res &= ~((UINT64)0x10000); |
| 1680 | | else res += (res & 0x8000) << 1; |
| 1681 | | #else |
| 1682 | | temp &= 0xffff; |
| 1683 | | res += 0x8000; |
| 1684 | | if ( temp == 0x8000 ) |
| 1685 | | res &= ~((UINT64)0x10000); |
| 1686 | | #endif |
| 1687 | | break; |
| 1688 | | case 0x03<<13: |
| 1689 | | /* MR - X * Y (RND) */ |
| 1690 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1691 | | yop = MAC_GETYREG_SIGNED(yop); |
| 1692 | | temp = (xop * yop) << shift; |
| 1693 | | res = m_core.mr.mr - (INT64)temp; |
| 1694 | | #if 0 |
| 1695 | | if ((res & 0xffff) == 0x8000) res &= ~((UINT64)0x10000); |
| 1696 | | else res += (res & 0x8000) << 1; |
| 1697 | | #else |
| 1698 | | temp &= 0xffff; |
| 1699 | | res += 0x8000; |
| 1700 | | if ( temp == 0x8000 ) |
| 1701 | | res &= ~((UINT64)0x10000); |
| 1702 | | #endif |
| 1703 | | break; |
| 1704 | | case 0x04<<13: |
| 1705 | | /* X * Y (SS) Clear when y = 0 */ |
| 1706 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1707 | | yop = MAC_GETYREG_SIGNED(yop); |
| 1708 | | temp = (xop * yop) << shift; |
| 1709 | | res = (INT64)temp; |
| 1710 | | break; |
| 1711 | | case 0x05<<13: |
| 1712 | | /* X * Y (SU) */ |
| 1713 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1714 | | yop = MAC_GETYREG_UNSIGNED(yop); |
| 1715 | | temp = (xop * yop) << shift; |
| 1716 | | res = (INT64)temp; |
| 1717 | | break; |
| 1718 | | case 0x06<<13: |
| 1719 | | /* X * Y (US) */ |
| 1720 | | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1721 | | yop = MAC_GETYREG_SIGNED(yop); |
| 1722 | | temp = (xop * yop) << shift; |
| 1723 | | res = (INT64)temp; |
| 1724 | | break; |
| 1725 | | case 0x07<<13: |
| 1726 | | /* X * Y (UU) */ |
| 1727 | | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1728 | | yop = MAC_GETYREG_UNSIGNED(yop); |
| 1729 | | temp = (xop * yop) << shift; |
| 1730 | | res = (INT64)temp; |
| 1731 | | break; |
| 1732 | | case 0x08<<13: |
| 1733 | | /* MR + X * Y (SS) */ |
| 1734 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1735 | | yop = MAC_GETYREG_SIGNED(yop); |
| 1736 | | temp = (xop * yop) << shift; |
| 1737 | | res = m_core.mr.mr + (INT64)temp; |
| 1738 | | break; |
| 1739 | | case 0x09<<13: |
| 1740 | | /* MR + X * Y (SU) */ |
| 1741 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1742 | | yop = MAC_GETYREG_UNSIGNED(yop); |
| 1743 | | temp = (xop * yop) << shift; |
| 1744 | | res = m_core.mr.mr + (INT64)temp; |
| 1745 | | break; |
| 1746 | | case 0x0a<<13: |
| 1747 | | /* MR + X * Y (US) */ |
| 1748 | | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1749 | | yop = MAC_GETYREG_SIGNED(yop); |
| 1750 | | temp = (xop * yop) << shift; |
| 1751 | | res = m_core.mr.mr + (INT64)temp; |
| 1752 | | break; |
| 1753 | | case 0x0b<<13: |
| 1754 | | /* MR + X * Y (UU) */ |
| 1755 | | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1756 | | yop = MAC_GETYREG_UNSIGNED(yop); |
| 1757 | | temp = (xop * yop) << shift; |
| 1758 | | res = m_core.mr.mr + (INT64)temp; |
| 1759 | | break; |
| 1760 | | case 0x0c<<13: |
| 1761 | | /* MR - X * Y (SS) */ |
| 1762 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1763 | | yop = MAC_GETYREG_SIGNED(yop); |
| 1764 | | temp = (xop * yop) << shift; |
| 1765 | | res = m_core.mr.mr - (INT64)temp; |
| 1766 | | break; |
| 1767 | | case 0x0d<<13: |
| 1768 | | /* MR - X * Y (SU) */ |
| 1769 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1770 | | yop = MAC_GETYREG_UNSIGNED(yop); |
| 1771 | | temp = (xop * yop) << shift; |
| 1772 | | res = m_core.mr.mr - (INT64)temp; |
| 1773 | | break; |
| 1774 | | case 0x0e<<13: |
| 1775 | | /* MR - X * Y (US) */ |
| 1776 | | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1777 | | yop = MAC_GETYREG_SIGNED(yop); |
| 1778 | | temp = (xop * yop) << shift; |
| 1779 | | res = m_core.mr.mr - (INT64)temp; |
| 1780 | | break; |
| 1781 | | case 0x0f<<13: |
| 1782 | | /* MR - X * Y (UU) */ |
| 1783 | | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1784 | | yop = MAC_GETYREG_UNSIGNED(yop); |
| 1785 | | temp = (xop * yop) << shift; |
| 1786 | | res = m_core.mr.mr - (INT64)temp; |
| 1787 | | break; |
| 1788 | | default: |
| 1789 | | res = 0; /* just to keep the compiler happy */ |
| 1790 | | break; |
| 1791 | | } |
| 1792 | | |
| 1793 | | /* set the final value */ |
| 1794 | | m_core.mf.u = (UINT32)res >> 16; |
| 1795 | | } |
| 1796 | | |
| 1797 | | |
| 1798 | | |
| 1799 | | /*=========================================================================== |
| 1800 | | MAC operations (result in MF, yop == xop) |
| 1801 | | ===========================================================================*/ |
| 1802 | | |
| 1803 | | void adsp21xx_device::mac_op_mf_xop(int op) |
| 1804 | | { |
| 1805 | | INT8 shift = ((m_mstat & MSTAT_INTEGER) >> 4) ^ 1; |
| 1806 | | INT32 xop = (op >> 8) & 7; |
| 1807 | | INT32 temp; |
| 1808 | | INT64 res; |
| 1809 | | |
| 1810 | | switch (op & (15<<13)) /*JB*/ |
| 1811 | | { |
| 1812 | | case 0x00<<13: |
| 1813 | | /* no-op */ |
| 1814 | | return; |
| 1815 | | case 0x01<<13: |
| 1816 | | /* X * Y (RND) */ |
| 1817 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1818 | | temp = (xop * xop) << shift; |
| 1819 | | res = (INT64)temp; |
| 1820 | | #if 0 |
| 1821 | | if ((res & 0xffff) == 0x8000) res &= ~((UINT64)0x10000); |
| 1822 | | else res += (res & 0x8000) << 1; |
| 1823 | | #else |
| 1824 | | temp &= 0xffff; |
| 1825 | | res += 0x8000; |
| 1826 | | if ( temp == 0x8000 ) |
| 1827 | | res &= ~((UINT64)0x10000); |
| 1828 | | #endif |
| 1829 | | break; |
| 1830 | | case 0x02<<13: |
| 1831 | | /* MR + X * Y (RND) */ |
| 1832 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1833 | | temp = (xop * xop) << shift; |
| 1834 | | res = m_core.mr.mr + (INT64)temp; |
| 1835 | | #if 0 |
| 1836 | | if ((res & 0xffff) == 0x8000) res &= ~((UINT64)0x10000); |
| 1837 | | else res += (res & 0x8000) << 1; |
| 1838 | | #else |
| 1839 | | temp &= 0xffff; |
| 1840 | | res += 0x8000; |
| 1841 | | if ( temp == 0x8000 ) |
| 1842 | | res &= ~((UINT64)0x10000); |
| 1843 | | #endif |
| 1844 | | break; |
| 1845 | | case 0x03<<13: |
| 1846 | | /* MR - X * Y (RND) */ |
| 1847 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1848 | | temp = (xop * xop) << shift; |
| 1849 | | res = m_core.mr.mr - (INT64)temp; |
| 1850 | | #if 0 |
| 1851 | | if ((res & 0xffff) == 0x8000) res &= ~((UINT64)0x10000); |
| 1852 | | else res += (res & 0x8000) << 1; |
| 1853 | | #else |
| 1854 | | temp &= 0xffff; |
| 1855 | | res += 0x8000; |
| 1856 | | if ( temp == 0x8000 ) |
| 1857 | | res &= ~((UINT64)0x10000); |
| 1858 | | #endif |
| 1859 | | break; |
| 1860 | | case 0x04<<13: |
| 1861 | | /* X * Y (SS) Clear when y = 0 */ |
| 1862 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1863 | | temp = (xop * xop) << shift; |
| 1864 | | res = (INT64)temp; |
| 1865 | | break; |
| 1866 | | case 0x05<<13: |
| 1867 | | /* X * Y (SU) */ |
| 1868 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1869 | | temp = (xop * xop) << shift; |
| 1870 | | res = (INT64)temp; |
| 1871 | | break; |
| 1872 | | case 0x06<<13: |
| 1873 | | /* X * Y (US) */ |
| 1874 | | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1875 | | temp = (xop * xop) << shift; |
| 1876 | | res = (INT64)temp; |
| 1877 | | break; |
| 1878 | | case 0x07<<13: |
| 1879 | | /* X * Y (UU) */ |
| 1880 | | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1881 | | temp = (xop * xop) << shift; |
| 1882 | | res = (INT64)temp; |
| 1883 | | break; |
| 1884 | | case 0x08<<13: |
| 1885 | | /* MR + X * Y (SS) */ |
| 1886 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1887 | | temp = (xop * xop) << shift; |
| 1888 | | res = m_core.mr.mr + (INT64)temp; |
| 1889 | | break; |
| 1890 | | case 0x09<<13: |
| 1891 | | /* MR + X * Y (SU) */ |
| 1892 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1893 | | temp = (xop * xop) << shift; |
| 1894 | | res = m_core.mr.mr + (INT64)temp; |
| 1895 | | break; |
| 1896 | | case 0x0a<<13: |
| 1897 | | /* MR + X * Y (US) */ |
| 1898 | | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1899 | | temp = (xop * xop) << shift; |
| 1900 | | res = m_core.mr.mr + (INT64)temp; |
| 1901 | | break; |
| 1902 | | case 0x0b<<13: |
| 1903 | | /* MR + X * Y (UU) */ |
| 1904 | | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1905 | | temp = (xop * xop) << shift; |
| 1906 | | res = m_core.mr.mr + (INT64)temp; |
| 1907 | | break; |
| 1908 | | case 0x0c<<13: |
| 1909 | | /* MR - X * Y (SS) */ |
| 1910 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1911 | | temp = (xop * xop) << shift; |
| 1912 | | res = m_core.mr.mr - (INT64)temp; |
| 1913 | | break; |
| 1914 | | case 0x0d<<13: |
| 1915 | | /* MR - X * Y (SU) */ |
| 1916 | | xop = MAC_GETXREG_SIGNED(xop); |
| 1917 | | temp = (xop * xop) << shift; |
| 1918 | | res = m_core.mr.mr - (INT64)temp; |
| 1919 | | break; |
| 1920 | | case 0x0e<<13: |
| 1921 | | /* MR - X * Y (US) */ |
| 1922 | | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1923 | | temp = (xop * xop) << shift; |
| 1924 | | res = m_core.mr.mr - (INT64)temp; |
| 1925 | | break; |
| 1926 | | case 0x0f<<13: |
| 1927 | | /* MR - X * Y (UU) */ |
| 1928 | | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1929 | | temp = (xop * xop) << shift; |
| 1930 | | res = m_core.mr.mr - (INT64)temp; |
| 1931 | | break; |
| 1932 | | default: |
| 1933 | | res = 0; /* just to keep the compiler happy */ |
| 1934 | | break; |
| 1935 | | } |
| 1936 | | |
| 1937 | | /* set the final value */ |
| 1938 | | m_core.mf.u = (UINT32)res >> 16; |
| 1939 | | } |
| 1940 | | |
| 1941 | | |
| 1942 | | |
| 1943 | | /*=========================================================================== |
| 1944 | | SHIFT operations (result in SR/SE/SB) |
| 1945 | | ===========================================================================*/ |
| 1946 | | |
| 1947 | | void adsp21xx_device::shift_op(int op) |
| 1948 | | { |
| 1949 | | INT8 sc = m_core.se.s; |
| 1950 | | INT32 xop = (op >> 8) & 7; |
| 1951 | | UINT32 res; |
| 1952 | | |
| 1953 | | switch (op & (15<<11)) /*JB*/ |
| 1954 | | { |
| 1955 | | case 0x00<<11: |
| 1956 | | /* LSHIFT (HI) */ |
| 1957 | | xop = SHIFT_GETXREG_UNSIGNED(xop) << 16; |
| 1958 | | if (sc > 0) res = (sc < 32) ? (xop << sc) : 0; |
| 1959 | | else res = (sc > -32) ? ((UINT32)xop >> -sc) : 0; |
| 1960 | | m_core.sr.sr = res; |
| 1961 | | break; |
| 1962 | | case 0x01<<11: |
| 1963 | | /* LSHIFT (HI, OR) */ |
| 1964 | | xop = SHIFT_GETXREG_UNSIGNED(xop) << 16; |
| 1965 | | if (sc > 0) res = (sc < 32) ? (xop << sc) : 0; |
| 1966 | | else res = (sc > -32) ? ((UINT32)xop >> -sc) : 0; |
| 1967 | | m_core.sr.sr |= res; |
| 1968 | | break; |
| 1969 | | case 0x02<<11: |
| 1970 | | /* LSHIFT (LO) */ |
| 1971 | | xop = SHIFT_GETXREG_UNSIGNED(xop); |
| 1972 | | if (sc > 0) res = (sc < 32) ? (xop << sc) : 0; |
| 1973 | | else res = (sc > -32) ? (xop >> -sc) : 0; |
| 1974 | | m_core.sr.sr = res; |
| 1975 | | break; |
| 1976 | | case 0x03<<11: |
| 1977 | | /* LSHIFT (LO, OR) */ |
| 1978 | | xop = SHIFT_GETXREG_UNSIGNED(xop); |
| 1979 | | if (sc > 0) res = (sc < 32) ? (xop << sc) : 0; |
| 1980 | | else res = (sc > -32) ? (xop >> -sc) : 0; |
| 1981 | | m_core.sr.sr |= res; |
| 1982 | | break; |
| 1983 | | case 0x04<<11: |
| 1984 | | /* ASHIFT (HI) */ |
| 1985 | | xop = SHIFT_GETXREG_SIGNED(xop) << 16; |
| 1986 | | if (sc > 0) res = (sc < 32) ? (xop << sc) : 0; |
| 1987 | | else res = (sc > -32) ? (xop >> -sc) : (xop >> 31); |
| 1988 | | m_core.sr.sr = res; |
| 1989 | | break; |
| 1990 | | case 0x05<<11: |
| 1991 | | /* ASHIFT (HI, OR) */ |
| 1992 | | xop = SHIFT_GETXREG_SIGNED(xop) << 16; |
| 1993 | | if (sc > 0) res = (sc < 32) ? (xop << sc) : 0; |
| 1994 | | else res = (sc > -32) ? (xop >> -sc) : (xop >> 31); |
| 1995 | | m_core.sr.sr |= res; |
| 1996 | | break; |
| 1997 | | case 0x06<<11: |
| 1998 | | /* ASHIFT (LO) */ |
| 1999 | | xop = SHIFT_GETXREG_SIGNED(xop); |
| 2000 | | if (sc > 0) res = (sc < 32) ? (xop << sc) : 0; |
| 2001 | | else res = (sc > -32) ? (xop >> -sc) : (xop >> 31); |
| 2002 | | m_core.sr.sr = res; |
| 2003 | | break; |
| 2004 | | case 0x07<<11: |
| 2005 | | /* ASHIFT (LO, OR) */ |
| 2006 | | xop = SHIFT_GETXREG_SIGNED(xop); |
| 2007 | | if (sc > 0) res = (sc < 32) ? (xop << sc) : 0; |
| 2008 | | else res = (sc > -32) ? (xop >> -sc) : (xop >> 31); |
| 2009 | | m_core.sr.sr |= res; |
| 2010 | | break; |
| 2011 | | case 0x08<<11: |
| 2012 | | /* NORM (HI) */ |
| 2013 | | xop = SHIFT_GETXREG_SIGNED(xop) << 16; |
| 2014 | | if (sc > 0) |
| 2015 | | { |
| 2016 | | xop = ((UINT32)xop >> 1) | ((m_astat & CFLAG) << 28); |
| 2017 | | res = xop >> (sc - 1); |
| 2018 | | } |
| 2019 | | else res = (sc > -32) ? (xop << -sc) : 0; |
| 2020 | | m_core.sr.sr = res; |
| 2021 | | break; |
| 2022 | | case 0x09<<11: |
| 2023 | | /* NORM (HI, OR) */ |
| 2024 | | xop = SHIFT_GETXREG_SIGNED(xop) << 16; |
| 2025 | | if (sc > 0) |
| 2026 | | { |
| 2027 | | xop = ((UINT32)xop >> 1) | ((m_astat & CFLAG) << 28); |
| 2028 | | res = xop >> (sc - 1); |
| 2029 | | } |
| 2030 | | else res = (sc > -32) ? (xop << -sc) : 0; |
| 2031 | | m_core.sr.sr |= res; |
| 2032 | | break; |
| 2033 | | case 0x0a<<11: |
| 2034 | | /* NORM (LO) */ |
| 2035 | | xop = SHIFT_GETXREG_UNSIGNED(xop); |
| 2036 | | if (sc > 0) res = (sc < 32) ? (xop >> sc) : 0; |
| 2037 | | else res = (sc > -32) ? (xop << -sc) : 0; |
| 2038 | | m_core.sr.sr = res; |
| 2039 | | break; |
| 2040 | | case 0x0b<<11: |
| 2041 | | /* NORM (LO, OR) */ |
| 2042 | | xop = SHIFT_GETXREG_UNSIGNED(xop); |
| 2043 | | if (sc > 0) res = (sc < 32) ? (xop >> sc) : 0; |
| 2044 | | else res = (sc > -32) ? (xop << -sc) : 0; |
| 2045 | | m_core.sr.sr |= res; |
| 2046 | | break; |
| 2047 | | case 0x0c<<11: |
| 2048 | | /* EXP (HI) */ |
| 2049 | | xop = SHIFT_GETXREG_SIGNED(xop) << 16; |
| 2050 | | res = 0; |
| 2051 | | if (xop < 0) |
| 2052 | | { |
| 2053 | | SET_SS; |
| 2054 | | while ((xop & 0x40000000) != 0) res++, xop <<= 1; |
| 2055 | | } |
| 2056 | | else |
| 2057 | | { |
| 2058 | | CLR_SS; |
| 2059 | | xop |= 0x8000; |
| 2060 | | while ((xop & 0x40000000) == 0) res++, xop <<= 1; |
| 2061 | | } |
| 2062 | | m_core.se.s = -res; |
| 2063 | | break; |
| 2064 | | case 0x0d<<11: |
| 2065 | | /* EXP (HIX) */ |
| 2066 | | xop = SHIFT_GETXREG_SIGNED(xop) << 16; |
| 2067 | | if (GET_V) |
| 2068 | | { |
| 2069 | | m_core.se.s = 1; |
| 2070 | | if (xop < 0) CLR_SS; |
| 2071 | | else SET_SS; |
| 2072 | | } |
| 2073 | | else |
| 2074 | | { |
| 2075 | | res = 0; |
| 2076 | | if (xop < 0) |
| 2077 | | { |
| 2078 | | SET_SS; |
| 2079 | | while ((xop & 0x40000000) != 0) res++, xop <<= 1; |
| 2080 | | } |
| 2081 | | else |
| 2082 | | { |
| 2083 | | CLR_SS; |
| 2084 | | xop |= 0x8000; |
| 2085 | | while ((xop & 0x40000000) == 0) res++, xop <<= 1; |
| 2086 | | } |
| 2087 | | m_core.se.s = -res; |
| 2088 | | } |
| 2089 | | break; |
| 2090 | | case 0x0e<<11: |
| 2091 | | /* EXP (LO) */ |
| 2092 | | if (m_core.se.s == -15) |
| 2093 | | { |
| 2094 | | xop = SHIFT_GETXREG_SIGNED(xop); |
| 2095 | | res = 15; |
| 2096 | | if (GET_SS) |
| 2097 | | while ((xop & 0x8000) != 0) res++, xop <<= 1; |
| 2098 | | else |
| 2099 | | { |
| 2100 | | xop = (xop << 1) | 1; |
| 2101 | | while ((xop & 0x10000) == 0) res++, xop <<= 1; |
| 2102 | | } |
| 2103 | | m_core.se.s = -res; |
| 2104 | | } |
| 2105 | | break; |
| 2106 | | case 0x0f<<11: |
| 2107 | | /* EXPADJ */ |
| 2108 | | xop = SHIFT_GETXREG_SIGNED(xop) << 16; |
| 2109 | | res = 0; |
| 2110 | | if (xop < 0) |
| 2111 | | while ((xop & 0x40000000) != 0) res++, xop <<= 1; |
| 2112 | | else |
| 2113 | | { |
| 2114 | | xop |= 0x8000; |
| 2115 | | while ((xop & 0x40000000) == 0) res++, xop <<= 1; |
| 2116 | | } |
| 2117 | | if (res < -m_core.sb.s) |
| 2118 | | m_core.sb.s = -res; |
| 2119 | | break; |
| 2120 | | } |
| 2121 | | } |
| 2122 | | |
| 2123 | | |
| 2124 | | |
| 2125 | | /*=========================================================================== |
| 2126 | | Immediate SHIFT operations (result in SR/SE/SB) |
| 2127 | | ===========================================================================*/ |
| 2128 | | |
| 2129 | | void adsp21xx_device::shift_op_imm(int op) |
| 2130 | | { |
| 2131 | | INT8 sc = (INT8)op; |
| 2132 | | INT32 xop = (op >> 8) & 7; |
| 2133 | | UINT32 res; |
| 2134 | | |
| 2135 | | switch (op & (15<<11)) /*JB*/ |
| 2136 | | { |
| 2137 | | case 0x00<<11: |
| 2138 | | /* LSHIFT (HI) */ |
| 2139 | | xop = SHIFT_GETXREG_UNSIGNED(xop) << 16; |
| 2140 | | if (sc > 0) res = (sc < 32) ? (xop << sc) : 0; |
| 2141 | | else res = (sc > -32) ? ((UINT32)xop >> -sc) : 0; |
| 2142 | | m_core.sr.sr = res; |
| 2143 | | break; |
| 2144 | | case 0x01<<11: |
| 2145 | | /* LSHIFT (HI, OR) */ |
| 2146 | | xop = SHIFT_GETXREG_UNSIGNED(xop) << 16; |
| 2147 | | if (sc > 0) res = (sc < 32) ? (xop << sc) : 0; |
| 2148 | | else res = (sc > -32) ? ((UINT32)xop >> -sc) : 0; |
| 2149 | | m_core.sr.sr |= res; |
| 2150 | | break; |
| 2151 | | case 0x02<<11: |
| 2152 | | /* LSHIFT (LO) */ |
| 2153 | | xop = SHIFT_GETXREG_UNSIGNED(xop); |
| 2154 | | if (sc > 0) res = (sc < 32) ? (xop << sc) : 0; |
| 2155 | | else res = (sc > -32) ? (xop >> -sc) : 0; |
| 2156 | | m_core.sr.sr = res; |
| 2157 | | break; |
| 2158 | | case 0x03<<11: |
| 2159 | | /* LSHIFT (LO, OR) */ |
| 2160 | | xop = SHIFT_GETXREG_UNSIGNED(xop); |
| 2161 | | if (sc > 0) res = (sc < 32) ? (xop << sc) : 0; |
| 2162 | | else res = (sc > -32) ? (xop >> -sc) : 0; |
| 2163 | | m_core.sr.sr |= res; |
| 2164 | | break; |
| 2165 | | case 0x04<<11: |
| 2166 | | /* ASHIFT (HI) */ |
| 2167 | | xop = SHIFT_GETXREG_SIGNED(xop) << 16; |
| 2168 | | if (sc > 0) res = (sc < 32) ? (xop << sc) : 0; |
| 2169 | | else res = (sc > -32) ? (xop >> -sc) : (xop >> 31); |
| 2170 | | m_core.sr.sr = res; |
| 2171 | | break; |
| 2172 | | case 0x05<<11: |
| 2173 | | /* ASHIFT (HI, OR) */ |
| 2174 | | xop = SHIFT_GETXREG_SIGNED(xop) << 16; |
| 2175 | | if (sc > 0) res = (sc < 32) ? (xop << sc) : 0; |
| 2176 | | else res = (sc > -32) ? (xop >> -sc) : (xop >> 31); |
| 2177 | | m_core.sr.sr |= res; |
| 2178 | | break; |
| 2179 | | case 0x06<<11: |
| 2180 | | /* ASHIFT (LO) */ |
| 2181 | | xop = SHIFT_GETXREG_SIGNED(xop); |
| 2182 | | if (sc > 0) res = (sc < 32) ? (xop << sc) : 0; |
| 2183 | | else res = (sc > -32) ? (xop >> -sc) : (xop >> 31); |
| 2184 | | m_core.sr.sr = res; |
| 2185 | | break; |
| 2186 | | case 0x07<<11: |
| 2187 | | /* ASHIFT (LO, OR) */ |
| 2188 | | xop = SHIFT_GETXREG_SIGNED(xop); |
| 2189 | | if (sc > 0) res = (sc < 32) ? (xop << sc) : 0; |
| 2190 | | else res = (sc > -32) ? (xop >> -sc) : (xop >> 31); |
| 2191 | | m_core.sr.sr |= res; |
| 2192 | | break; |
| 2193 | | case 0x08<<11: |
| 2194 | | /* NORM (HI) */ |
| 2195 | | xop = SHIFT_GETXREG_SIGNED(xop) << 16; |
| 2196 | | if (sc > 0) |
| 2197 | | { |
| 2198 | | xop = ((UINT32)xop >> 1) | ((m_astat & CFLAG) << 28); |
| 2199 | | res = xop >> (sc - 1); |
| 2200 | | } |
| 2201 | | else res = (sc > -32) ? (xop << -sc) : 0; |
| 2202 | | m_core.sr.sr = res; |
| 2203 | | break; |
| 2204 | | case 0x09<<11: |
| 2205 | | /* NORM (HI, OR) */ |
| 2206 | | xop = SHIFT_GETXREG_SIGNED(xop) << 16; |
| 2207 | | if (sc > 0) |
| 2208 | | { |
| 2209 | | xop = ((UINT32)xop >> 1) | ((m_astat & CFLAG) << 28); |
| 2210 | | res = xop >> (sc - 1); |
| 2211 | | } |
| 2212 | | else res = (sc > -32) ? (xop << -sc) : 0; |
| 2213 | | m_core.sr.sr |= res; |
| 2214 | | break; |
| 2215 | | case 0x0a<<11: |
| 2216 | | /* NORM (LO) */ |
| 2217 | | xop = SHIFT_GETXREG_UNSIGNED(xop); |
| 2218 | | if (sc > 0) res = (sc < 32) ? (xop >> sc) : 0; |
| 2219 | | else res = (sc > -32) ? (xop << -sc) : 0; |
| 2220 | | m_core.sr.sr = res; |
| 2221 | | break; |
| 2222 | | case 0x0b<<11: |
| 2223 | | /* NORM (LO, OR) */ |
| 2224 | | xop = SHIFT_GETXREG_UNSIGNED(xop); |
| 2225 | | if (sc > 0) res = (sc < 32) ? (xop >> sc) : 0; |
| 2226 | | else res = (sc > -32) ? (xop << -sc) : 0; |
| 2227 | | m_core.sr.sr |= res; |
| 2228 | | break; |
| 2229 | | } |
| 2230 | | } |
trunk/src/emu/cpu/adsp2100/2100ops.inc
| r0 | r28736 | |
| 1 | /*=========================================================================== |
| 2 | ASTAT -- ALU/MAC status register |
| 3 | ===========================================================================*/ |
| 4 | |
| 5 | /* extracts flags */ |
| 6 | #define GET_SS (m_astat & SSFLAG) |
| 7 | #define GET_MV (m_astat & MVFLAG) |
| 8 | #define GET_Q (m_astat & QFLAG) |
| 9 | #define GET_S (m_astat & SFLAG) |
| 10 | #define GET_C (m_astat & CFLAG) |
| 11 | #define GET_V (m_astat & VFLAG) |
| 12 | #define GET_N (m_astat & NFLAG) |
| 13 | #define GET_Z (m_astat & ZFLAG) |
| 14 | |
| 15 | /* clears flags */ |
| 16 | #define CLR_SS (m_astat &= ~SSFLAG) |
| 17 | #define CLR_MV (m_astat &= ~MVFLAG) |
| 18 | #define CLR_Q (m_astat &= ~QFLAG) |
| 19 | #define CLR_S (m_astat &= ~SFLAG) |
| 20 | #define CLR_C (m_astat &= ~CFLAG) |
| 21 | #define CLR_V (m_astat &= ~VFLAG) |
| 22 | #define CLR_N (m_astat &= ~NFLAG) |
| 23 | #define CLR_Z (m_astat &= ~ZFLAG) |
| 24 | |
| 25 | /* sets flags */ |
| 26 | #define SET_SS (m_astat |= SSFLAG) |
| 27 | #define SET_MV (m_astat |= MVFLAG) |
| 28 | #define SET_Q (m_astat |= QFLAG) |
| 29 | #define SET_S (m_astat |= SFLAG) |
| 30 | #define SET_C (m_astat |= CFLAG) |
| 31 | #define SET_V (m_astat |= VFLAG) |
| 32 | #define SET_Z (m_astat |= ZFLAG) |
| 33 | #define SET_N (m_astat |= NFLAG) |
| 34 | |
| 35 | /* flag clearing; must be done before setting */ |
| 36 | #define CLR_FLAGS (m_astat &= m_astat_clear) |
| 37 | |
| 38 | /* compute flags */ |
| 39 | #define CALC_Z(r) (m_astat |= ((r & 0xffff) == 0)) |
| 40 | #define CALC_N(r) (m_astat |= (r >> 14) & 0x02) |
| 41 | #define CALC_V(s,d,r) (m_astat |= ((s ^ d ^ r ^ (r >> 1)) >> 13) & 0x04) |
| 42 | #define CALC_C(r) (m_astat |= (r >> 13) & 0x08) |
| 43 | #define CALC_C_SUB(r) (m_astat |= (~r >> 13) & 0x08) |
| 44 | #define CALC_NZ(r) CLR_FLAGS; CALC_N(r); CALC_Z(r) |
| 45 | #define CALC_NZV(s,d,r) CLR_FLAGS; CALC_N(r); CALC_Z(r); CALC_V(s,d,r) |
| 46 | #define CALC_NZVC(s,d,r) CLR_FLAGS; CALC_N(r); CALC_Z(r); CALC_V(s,d,r); CALC_C(r) |
| 47 | #define CALC_NZVC_SUB(s,d,r) CLR_FLAGS; CALC_N(r); CALC_Z(r); CALC_V(s,d,r); CALC_C_SUB(r) |
| 48 | |
| 49 | /* ADSP-218x constants */ |
| 50 | static const INT32 constants[] = |
| 51 | { |
| 52 | 0x0001, 0xfffe, 0x0002, 0xfffd, 0x0004, 0xfffb, 0x0008, 0xfff7, |
| 53 | 0x0010, 0xffef, 0x0020, 0xffdf, 0x0040, 0xffbf, 0x0080, 0xff7f, |
| 54 | 0x0100, 0xfeff, 0x0200, 0xfdff, 0x0400, 0xfbff, 0x0800, 0xf7ff, |
| 55 | 0x1000, 0xefff, 0x2000, 0xdfff, 0x4000, 0xbfff, 0x8000, 0x7fff |
| 56 | }; |
| 57 | |
| 58 | |
| 59 | |
| 60 | /*=========================================================================== |
| 61 | MSTAT -- ALU/MAC control register |
| 62 | ===========================================================================*/ |
| 63 | |
| 64 | /* flag definitions */ |
| 65 | #define MSTAT_BANK 0x01 /* register bank select */ |
| 66 | #define MSTAT_REVERSE 0x02 /* bit-reverse addressing enable (DAG1) */ |
| 67 | #define MSTAT_STICKYV 0x04 /* sticky ALU overflow enable */ |
| 68 | #define MSTAT_SATURATE 0x08 /* AR saturation mode enable */ |
| 69 | #define MSTAT_INTEGER 0x10 /* MAC result placement; 0=fractional, 1=integer */ |
| 70 | #define MSTAT_TIMER 0x20 /* timer enable */ |
| 71 | #define MSTAT_GOMODE 0x40 /* go mode enable */ |
| 72 | |
| 73 | /* you must call this in order to change MSTAT */ |
| 74 | inline void adsp21xx_device::update_mstat() |
| 75 | { |
| 76 | if ((m_mstat ^ m_mstat_prev) & MSTAT_BANK) |
| 77 | { |
| 78 | adsp_core temp = m_core; |
| 79 | m_core = m_alt; |
| 80 | m_alt = temp; |
| 81 | } |
| 82 | if ((m_mstat ^ m_mstat_prev) & MSTAT_TIMER) |
| 83 | if (m_timer_fired != NULL) |
| 84 | (*m_timer_fired)(*this, (m_mstat & MSTAT_TIMER) != 0); |
| 85 | if (m_mstat & MSTAT_STICKYV) |
| 86 | m_astat_clear = ~(CFLAG | NFLAG | ZFLAG); |
| 87 | else |
| 88 | m_astat_clear = ~(CFLAG | VFLAG | NFLAG | ZFLAG); |
| 89 | m_mstat_prev = m_mstat; |
| 90 | } |
| 91 | |
| 92 | |
| 93 | /*=========================================================================== |
| 94 | SSTAT -- stack status register |
| 95 | ===========================================================================*/ |
| 96 | |
| 97 | /* flag definitions */ |
| 98 | #define PC_EMPTY 0x01 /* PC stack empty */ |
| 99 | #define PC_OVER 0x02 /* PC stack overflow */ |
| 100 | #define COUNT_EMPTY 0x04 /* count stack empty */ |
| 101 | #define COUNT_OVER 0x08 /* count stack overflow */ |
| 102 | #define STATUS_EMPTY 0x10 /* status stack empty */ |
| 103 | #define STATUS_OVER 0x20 /* status stack overflow */ |
| 104 | #define LOOP_EMPTY 0x40 /* loop stack empty */ |
| 105 | #define LOOP_OVER 0x80 /* loop stack overflow */ |
| 106 | |
| 107 | |
| 108 | |
| 109 | /*=========================================================================== |
| 110 | PC stack handlers |
| 111 | ===========================================================================*/ |
| 112 | |
| 113 | inline UINT32 adsp21xx_device::pc_stack_top() |
| 114 | { |
| 115 | if (m_pc_sp > 0) |
| 116 | return m_pc_stack[m_pc_sp - 1]; |
| 117 | else |
| 118 | return m_pc_stack[0]; |
| 119 | } |
| 120 | |
| 121 | inline void adsp21xx_device::set_pc_stack_top(UINT32 top) |
| 122 | { |
| 123 | if (m_pc_sp > 0) |
| 124 | m_pc_stack[m_pc_sp - 1] = top; |
| 125 | else |
| 126 | m_pc_stack[0] = top; |
| 127 | } |
| 128 | |
| 129 | inline void adsp21xx_device::pc_stack_push() |
| 130 | { |
| 131 | if (m_pc_sp < PC_STACK_DEPTH) |
| 132 | { |
| 133 | m_pc_stack[m_pc_sp] = m_pc; |
| 134 | m_pc_sp++; |
| 135 | m_sstat &= ~PC_EMPTY; |
| 136 | } |
| 137 | else |
| 138 | m_sstat |= PC_OVER; |
| 139 | } |
| 140 | |
| 141 | inline void adsp21xx_device::pc_stack_push_val(UINT32 val) |
| 142 | { |
| 143 | if (m_pc_sp < PC_STACK_DEPTH) |
| 144 | { |
| 145 | m_pc_stack[m_pc_sp] = val; |
| 146 | m_pc_sp++; |
| 147 | m_sstat &= ~PC_EMPTY; |
| 148 | } |
| 149 | else |
| 150 | m_sstat |= PC_OVER; |
| 151 | } |
| 152 | |
| 153 | inline void adsp21xx_device::pc_stack_pop() |
| 154 | { |
| 155 | if (m_pc_sp > 0) |
| 156 | { |
| 157 | m_pc_sp--; |
| 158 | if (m_pc_sp == 0) |
| 159 | m_sstat |= PC_EMPTY; |
| 160 | } |
| 161 | m_pc = m_pc_stack[m_pc_sp]; |
| 162 | } |
| 163 | |
| 164 | inline UINT32 adsp21xx_device::pc_stack_pop_val() |
| 165 | { |
| 166 | if (m_pc_sp > 0) |
| 167 | { |
| 168 | m_pc_sp--; |
| 169 | if (m_pc_sp == 0) |
| 170 | m_sstat |= PC_EMPTY; |
| 171 | } |
| 172 | return m_pc_stack[m_pc_sp]; |
| 173 | } |
| 174 | |
| 175 | |
| 176 | /*=========================================================================== |
| 177 | CNTR stack handlers |
| 178 | ===========================================================================*/ |
| 179 | |
| 180 | inline UINT32 adsp21xx_device::cntr_stack_top() |
| 181 | { |
| 182 | if (m_cntr_sp > 0) |
| 183 | return m_cntr_stack[m_cntr_sp - 1]; |
| 184 | else |
| 185 | return m_cntr_stack[0]; |
| 186 | } |
| 187 | |
| 188 | inline void adsp21xx_device::cntr_stack_push() |
| 189 | { |
| 190 | if (m_cntr_sp < CNTR_STACK_DEPTH) |
| 191 | { |
| 192 | m_cntr_stack[m_cntr_sp] = m_cntr; |
| 193 | m_cntr_sp++; |
| 194 | m_sstat &= ~COUNT_EMPTY; |
| 195 | } |
| 196 | else |
| 197 | m_sstat |= COUNT_OVER; |
| 198 | } |
| 199 | |
| 200 | inline void adsp21xx_device::cntr_stack_pop() |
| 201 | { |
| 202 | if (m_cntr_sp > 0) |
| 203 | { |
| 204 | m_cntr_sp--; |
| 205 | if (m_cntr_sp == 0) |
| 206 | m_sstat |= COUNT_EMPTY; |
| 207 | } |
| 208 | m_cntr = m_cntr_stack[m_cntr_sp]; |
| 209 | } |
| 210 | |
| 211 | |
| 212 | /*=========================================================================== |
| 213 | LOOP stack handlers |
| 214 | ===========================================================================*/ |
| 215 | |
| 216 | inline UINT32 adsp21xx_device::loop_stack_top() |
| 217 | { |
| 218 | if (m_loop_sp > 0) |
| 219 | return m_loop_stack[m_loop_sp - 1]; |
| 220 | else |
| 221 | return m_loop_stack[0]; |
| 222 | } |
| 223 | |
| 224 | inline void adsp21xx_device::loop_stack_push(UINT32 value) |
| 225 | { |
| 226 | if (m_loop_sp < LOOP_STACK_DEPTH) |
| 227 | { |
| 228 | m_loop_stack[m_loop_sp] = value; |
| 229 | m_loop_sp++; |
| 230 | m_loop = value >> 4; |
| 231 | m_loop_condition = value & 15; |
| 232 | m_sstat &= ~LOOP_EMPTY; |
| 233 | } |
| 234 | else |
| 235 | m_sstat |= LOOP_OVER; |
| 236 | } |
| 237 | |
| 238 | inline void adsp21xx_device::loop_stack_pop() |
| 239 | { |
| 240 | if (m_loop_sp > 0) |
| 241 | { |
| 242 | m_loop_sp--; |
| 243 | if (m_loop_sp == 0) |
| 244 | { |
| 245 | m_loop = 0xffff; |
| 246 | m_loop_condition = 0; |
| 247 | m_sstat |= LOOP_EMPTY; |
| 248 | } |
| 249 | else |
| 250 | { |
| 251 | m_loop = m_loop_stack[m_loop_sp -1] >> 4; |
| 252 | m_loop_condition = m_loop_stack[m_loop_sp - 1] & 15; |
| 253 | } |
| 254 | } |
| 255 | } |
| 256 | |
| 257 | |
| 258 | /*=========================================================================== |
| 259 | STAT stack handlers |
| 260 | ===========================================================================*/ |
| 261 | |
| 262 | inline void adsp21xx_device::stat_stack_push() |
| 263 | { |
| 264 | if (m_stat_sp < STAT_STACK_DEPTH) |
| 265 | { |
| 266 | m_stat_stack[m_stat_sp][0] = m_mstat; |
| 267 | m_stat_stack[m_stat_sp][1] = m_imask; |
| 268 | m_stat_stack[m_stat_sp][2] = m_astat; |
| 269 | m_stat_sp++; |
| 270 | m_sstat &= ~STATUS_EMPTY; |
| 271 | } |
| 272 | else |
| 273 | m_sstat |= STATUS_OVER; |
| 274 | } |
| 275 | |
| 276 | inline void adsp21xx_device::stat_stack_pop() |
| 277 | { |
| 278 | if (m_stat_sp > 0) |
| 279 | { |
| 280 | m_stat_sp--; |
| 281 | if (m_stat_sp == 0) |
| 282 | m_sstat |= STATUS_EMPTY; |
| 283 | } |
| 284 | m_mstat = m_stat_stack[m_stat_sp][0]; |
| 285 | update_mstat(); |
| 286 | m_imask = m_stat_stack[m_stat_sp][1]; |
| 287 | m_astat = m_stat_stack[m_stat_sp][2]; |
| 288 | check_irqs(); |
| 289 | } |
| 290 | |
| 291 | |
| 292 | |
| 293 | /*=========================================================================== |
| 294 | condition code checking |
| 295 | ===========================================================================*/ |
| 296 | |
| 297 | // gcc doesn't want to inline this, so we use a macro |
| 298 | #define condition(c) (((c) != 14) ? (m_condition_table[((c) << 8) | m_astat]) : slow_condition()) |
| 299 | |
| 300 | /* |
| 301 | inline int adsp21xx_device::condition(int c) |
| 302 | { |
| 303 | if (c != 14) |
| 304 | return m_condition_table[((c) << 8) | m_astat]; |
| 305 | else |
| 306 | return slow_condition(c); |
| 307 | } |
| 308 | */ |
| 309 | |
| 310 | int adsp21xx_device::slow_condition() |
| 311 | { |
| 312 | if ((INT32)--m_cntr > 0) |
| 313 | return 1; |
| 314 | else |
| 315 | { |
| 316 | cntr_stack_pop(); |
| 317 | return 0; |
| 318 | } |
| 319 | } |
| 320 | |
| 321 | |
| 322 | |
| 323 | /*=========================================================================== |
| 324 | register writing |
| 325 | ===========================================================================*/ |
| 326 | |
| 327 | inline void adsp21xx_device::update_i(int which) |
| 328 | { |
| 329 | m_base[which] = m_i[which] & m_lmask[which]; |
| 330 | } |
| 331 | |
| 332 | inline void adsp21xx_device::update_l(int which) |
| 333 | { |
| 334 | m_lmask[which] = m_mask_table[m_l[which] & 0x3fff]; |
| 335 | m_base[which] = m_i[which] & m_lmask[which]; |
| 336 | } |
| 337 | |
| 338 | void adsp21xx_device::write_reg0(int regnum, INT32 val) |
| 339 | { |
| 340 | switch (regnum) |
| 341 | { |
| 342 | case 0x00: m_core.ax0.s = val; break; |
| 343 | case 0x01: m_core.ax1.s = val; break; |
| 344 | case 0x02: m_core.mx0.s = val; break; |
| 345 | case 0x03: m_core.mx1.s = val; break; |
| 346 | case 0x04: m_core.ay0.s = val; break; |
| 347 | case 0x05: m_core.ay1.s = val; break; |
| 348 | case 0x06: m_core.my0.s = val; break; |
| 349 | case 0x07: m_core.my1.s = val; break; |
| 350 | case 0x08: m_core.si.s = val; break; |
| 351 | case 0x09: m_core.se.s = (INT8)val; break; |
| 352 | case 0x0a: m_core.ar.s = val; break; |
| 353 | case 0x0b: m_core.mr.mrx.mr0.s = val; break; |
| 354 | case 0x0c: m_core.mr.mrx.mr1.s = val; m_core.mr.mrx.mr2.s = (INT16)val >> 15; break; |
| 355 | case 0x0d: m_core.mr.mrx.mr2.s = (INT8)val; break; |
| 356 | case 0x0e: m_core.sr.srx.sr0.s = val; break; |
| 357 | case 0x0f: m_core.sr.srx.sr1.s = val; break; |
| 358 | } |
| 359 | } |
| 360 | |
| 361 | void adsp21xx_device::write_reg1(int regnum, INT32 val) |
| 362 | { |
| 363 | int index = regnum & 3; |
| 364 | switch (regnum >> 2) |
| 365 | { |
| 366 | case 0: |
| 367 | m_i[index] = val & 0x3fff; |
| 368 | update_i(index); |
| 369 | break; |
| 370 | |
| 371 | case 1: |
| 372 | m_m[index] = (INT32)(val << 18) >> 18; |
| 373 | break; |
| 374 | |
| 375 | case 2: |
| 376 | m_l[index] = val & 0x3fff; |
| 377 | update_l(index); |
| 378 | break; |
| 379 | |
| 380 | case 3: |
| 381 | logerror("ADSP %04x: Writing to an invalid register!\n", m_ppc); |
| 382 | break; |
| 383 | } |
| 384 | } |
| 385 | |
| 386 | void adsp21xx_device::write_reg2(int regnum, INT32 val) |
| 387 | { |
| 388 | int index = 4 + (regnum & 3); |
| 389 | switch (regnum >> 2) |
| 390 | { |
| 391 | case 0: |
| 392 | m_i[index] = val & 0x3fff; |
| 393 | update_i(index); |
| 394 | break; |
| 395 | |
| 396 | case 1: |
| 397 | m_m[index] = (INT32)(val << 18) >> 18; |
| 398 | break; |
| 399 | |
| 400 | case 2: |
| 401 | m_l[index] = val & 0x3fff; |
| 402 | update_l(index); |
| 403 | break; |
| 404 | |
| 405 | case 3: |
| 406 | logerror("ADSP %04x: Writing to an invalid register!\n", m_ppc); |
| 407 | break; |
| 408 | } |
| 409 | } |
| 410 | |
| 411 | void adsp21xx_device::write_reg3(int regnum, INT32 val) |
| 412 | { |
| 413 | switch (regnum) |
| 414 | { |
| 415 | case 0x00: m_astat = val & 0x00ff; break; |
| 416 | case 0x01: m_mstat = val & m_mstat_mask; update_mstat(); break; |
| 417 | case 0x03: m_imask = val & m_imask_mask; check_irqs(); break; |
| 418 | case 0x04: m_icntl = val & 0x001f; check_irqs(); break; |
| 419 | case 0x05: cntr_stack_push(); m_cntr = val & 0x3fff; break; |
| 420 | case 0x06: m_core.sb.s = (INT32)(val << 27) >> 27; break; |
| 421 | case 0x07: m_px = val; break; |
| 422 | case 0x09: if (m_sport_tx_callback != NULL) (*m_sport_tx_callback)(*this, 0, val); break; |
| 423 | case 0x0b: if (m_sport_tx_callback != NULL) (*m_sport_tx_callback)(*this, 1, val); break; |
| 424 | case 0x0c: |
| 425 | m_ifc = val; |
| 426 | if (m_chip_type >= CHIP_TYPE_ADSP2181) |
| 427 | { |
| 428 | /* clear timer */ |
| 429 | if (val & 0x0002) m_irq_latch[ADSP2181_IRQ0] = 0; |
| 430 | if (val & 0x0004) m_irq_latch[ADSP2181_IRQ1] = 0; |
| 431 | /* clear BDMA */ |
| 432 | if (val & 0x0010) m_irq_latch[ADSP2181_IRQE] = 0; |
| 433 | if (val & 0x0020) m_irq_latch[ADSP2181_SPORT0_RX] = 0; |
| 434 | if (val & 0x0040) m_irq_latch[ADSP2181_SPORT0_TX] = 0; |
| 435 | if (val & 0x0080) m_irq_latch[ADSP2181_IRQ2] = 0; |
| 436 | /* force timer */ |
| 437 | if (val & 0x0200) m_irq_latch[ADSP2181_IRQ0] = 1; |
| 438 | if (val & 0x0400) m_irq_latch[ADSP2181_IRQ1] = 1; |
| 439 | /* force BDMA */ |
| 440 | if (val & 0x1000) m_irq_latch[ADSP2181_IRQE] = 1; |
| 441 | if (val & 0x2000) m_irq_latch[ADSP2181_SPORT0_RX] = 1; |
| 442 | if (val & 0x4000) m_irq_latch[ADSP2181_SPORT0_TX] = 1; |
| 443 | if (val & 0x8000) m_irq_latch[ADSP2181_IRQ2] = 1; |
| 444 | } |
| 445 | else |
| 446 | { |
| 447 | /* clear timer */ |
| 448 | if (val & 0x002) m_irq_latch[ADSP2101_IRQ0] = 0; |
| 449 | if (val & 0x004) m_irq_latch[ADSP2101_IRQ1] = 0; |
| 450 | if (val & 0x008) m_irq_latch[ADSP2101_SPORT0_RX] = 0; |
| 451 | if (val & 0x010) m_irq_latch[ADSP2101_SPORT0_TX] = 0; |
| 452 | if (val & 0x020) m_irq_latch[ADSP2101_IRQ2] = 0; |
| 453 | /* set timer */ |
| 454 | if (val & 0x080) m_irq_latch[ADSP2101_IRQ0] = 1; |
| 455 | if (val & 0x100) m_irq_latch[ADSP2101_IRQ1] = 1; |
| 456 | if (val & 0x200) m_irq_latch[ADSP2101_SPORT0_RX] = 1; |
| 457 | if (val & 0x400) m_irq_latch[ADSP2101_SPORT0_TX] = 1; |
| 458 | if (val & 0x800) m_irq_latch[ADSP2101_IRQ2] = 1; |
| 459 | } |
| 460 | check_irqs(); |
| 461 | break; |
| 462 | case 0x0d: m_cntr = val & 0x3fff; break; |
| 463 | case 0x0f: pc_stack_push_val(val & 0x3fff); break; |
| 464 | default: logerror("ADSP %04x: Writing to an invalid register!\n", m_ppc); break; |
| 465 | } |
| 466 | } |
| 467 | |
| 468 | #define WRITE_REG(adsp,grp,reg,val) ((this->*wr_reg[grp][reg])(val)) |
| 469 | |
| 470 | |
| 471 | |
| 472 | /*=========================================================================== |
| 473 | register reading |
| 474 | ===========================================================================*/ |
| 475 | |
| 476 | INT32 adsp21xx_device::read_reg0(int regnum) |
| 477 | { |
| 478 | return *m_read0_ptr[regnum]; |
| 479 | } |
| 480 | |
| 481 | INT32 adsp21xx_device::read_reg1(int regnum) |
| 482 | { |
| 483 | return *m_read1_ptr[regnum]; |
| 484 | } |
| 485 | |
| 486 | INT32 adsp21xx_device::read_reg2(int regnum) |
| 487 | { |
| 488 | return *m_read2_ptr[regnum]; |
| 489 | } |
| 490 | |
| 491 | INT32 adsp21xx_device::read_reg3(int regnum) |
| 492 | { |
| 493 | switch (regnum) |
| 494 | { |
| 495 | case 0x00: return m_astat; |
| 496 | case 0x01: return m_mstat; |
| 497 | case 0x02: return m_sstat; |
| 498 | case 0x03: return m_imask; |
| 499 | case 0x04: return m_icntl; |
| 500 | case 0x05: return m_cntr; |
| 501 | case 0x06: return m_core.sb.s; |
| 502 | case 0x07: return m_px; |
| 503 | case 0x08: if (m_sport_rx_callback) return (*m_sport_rx_callback)(*this, 0); else return 0; |
| 504 | case 0x0a: if (m_sport_rx_callback) return (*m_sport_rx_callback)(*this, 1); else return 0; |
| 505 | case 0x0f: return pc_stack_pop_val(); |
| 506 | default: logerror("ADSP %04x: Reading from an invalid register!\n", m_ppc); return 0; |
| 507 | } |
| 508 | } |
| 509 | |
| 510 | |
| 511 | |
| 512 | /*=========================================================================== |
| 513 | Modulus addressing logic |
| 514 | ===========================================================================*/ |
| 515 | |
| 516 | inline void adsp21xx_device::modify_address(UINT32 ireg, UINT32 mreg) |
| 517 | { |
| 518 | UINT32 base = m_base[ireg]; |
| 519 | UINT32 i = m_i[ireg]; |
| 520 | UINT32 l = m_l[ireg]; |
| 521 | |
| 522 | i += m_m[mreg]; |
| 523 | if (i < base) i += l; |
| 524 | else if (i >= base + l) i -= l; |
| 525 | m_i[ireg] = i; |
| 526 | } |
| 527 | |
| 528 | |
| 529 | |
| 530 | /*=========================================================================== |
| 531 | Data memory accessors |
| 532 | ===========================================================================*/ |
| 533 | |
| 534 | inline void adsp21xx_device::data_write_dag1(UINT32 op, INT32 val) |
| 535 | { |
| 536 | UINT32 ireg = (op >> 2) & 3; |
| 537 | UINT32 mreg = op & 3; |
| 538 | UINT32 base = m_base[ireg]; |
| 539 | UINT32 i = m_i[ireg]; |
| 540 | UINT32 l = m_l[ireg]; |
| 541 | |
| 542 | if ( m_mstat & MSTAT_REVERSE ) |
| 543 | { |
| 544 | UINT32 ir = m_reverse_table[ i & 0x3fff ]; |
| 545 | data_write(ir, val); |
| 546 | } |
| 547 | else |
| 548 | data_write(i, val); |
| 549 | |
| 550 | i += m_m[mreg]; |
| 551 | if (i < base) i += l; |
| 552 | else if (i >= base + l) i -= l; |
| 553 | m_i[ireg] = i; |
| 554 | } |
| 555 | |
| 556 | |
| 557 | inline UINT32 adsp21xx_device::data_read_dag1(UINT32 op) |
| 558 | { |
| 559 | UINT32 ireg = (op >> 2) & 3; |
| 560 | UINT32 mreg = op & 3; |
| 561 | UINT32 base = m_base[ireg]; |
| 562 | UINT32 i = m_i[ireg]; |
| 563 | UINT32 l = m_l[ireg]; |
| 564 | UINT32 res; |
| 565 | |
| 566 | if (m_mstat & MSTAT_REVERSE) |
| 567 | { |
| 568 | UINT32 ir = m_reverse_table[i & 0x3fff]; |
| 569 | res = data_read(ir); |
| 570 | } |
| 571 | else |
| 572 | res = data_read(i); |
| 573 | |
| 574 | i += m_m[mreg]; |
| 575 | if (i < base) i += l; |
| 576 | else if (i >= base + l) i -= l; |
| 577 | m_i[ireg] = i; |
| 578 | |
| 579 | return res; |
| 580 | } |
| 581 | |
| 582 | inline void adsp21xx_device::data_write_dag2(UINT32 op, INT32 val) |
| 583 | { |
| 584 | UINT32 ireg = 4 + ((op >> 2) & 3); |
| 585 | UINT32 mreg = 4 + (op & 3); |
| 586 | UINT32 base = m_base[ireg]; |
| 587 | UINT32 i = m_i[ireg]; |
| 588 | UINT32 l = m_l[ireg]; |
| 589 | |
| 590 | data_write(i, val); |
| 591 | |
| 592 | i += m_m[mreg]; |
| 593 | if (i < base) i += l; |
| 594 | else if (i >= base + l) i -= l; |
| 595 | m_i[ireg] = i; |
| 596 | } |
| 597 | |
| 598 | |
| 599 | inline UINT32 adsp21xx_device::data_read_dag2(UINT32 op) |
| 600 | { |
| 601 | UINT32 ireg = 4 + ((op >> 2) & 3); |
| 602 | UINT32 mreg = 4 + (op & 3); |
| 603 | UINT32 base = m_base[ireg]; |
| 604 | UINT32 i = m_i[ireg]; |
| 605 | UINT32 l = m_l[ireg]; |
| 606 | |
| 607 | UINT32 res = data_read(i); |
| 608 | |
| 609 | i += m_m[mreg]; |
| 610 | if (i < base) i += l; |
| 611 | else if (i >= base + l) i -= l; |
| 612 | m_i[ireg] = i; |
| 613 | |
| 614 | return res; |
| 615 | } |
| 616 | |
| 617 | /*=========================================================================== |
| 618 | Program memory accessors |
| 619 | ===========================================================================*/ |
| 620 | |
| 621 | inline void adsp21xx_device::pgm_write_dag2(UINT32 op, INT32 val) |
| 622 | { |
| 623 | UINT32 ireg = 4 + ((op >> 2) & 3); |
| 624 | UINT32 mreg = 4 + (op & 3); |
| 625 | UINT32 base = m_base[ireg]; |
| 626 | UINT32 i = m_i[ireg]; |
| 627 | UINT32 l = m_l[ireg]; |
| 628 | |
| 629 | program_write(i, (val << 8) | m_px); |
| 630 | |
| 631 | i += m_m[mreg]; |
| 632 | if (i < base) i += l; |
| 633 | else if (i >= base + l) i -= l; |
| 634 | m_i[ireg] = i; |
| 635 | } |
| 636 | |
| 637 | |
| 638 | inline UINT32 adsp21xx_device::pgm_read_dag2(UINT32 op) |
| 639 | { |
| 640 | UINT32 ireg = 4 + ((op >> 2) & 3); |
| 641 | UINT32 mreg = 4 + (op & 3); |
| 642 | UINT32 base = m_base[ireg]; |
| 643 | UINT32 i = m_i[ireg]; |
| 644 | UINT32 l = m_l[ireg]; |
| 645 | UINT32 res; |
| 646 | |
| 647 | res = program_read(i); |
| 648 | m_px = res; |
| 649 | res >>= 8; |
| 650 | |
| 651 | i += m_m[mreg]; |
| 652 | if (i < base) i += l; |
| 653 | else if (i >= base + l) i -= l; |
| 654 | m_i[ireg] = i; |
| 655 | |
| 656 | return res; |
| 657 | } |
| 658 | |
| 659 | |
| 660 | |
| 661 | /*=========================================================================== |
| 662 | register reading |
| 663 | ===========================================================================*/ |
| 664 | |
| 665 | #define ALU_GETXREG_UNSIGNED(x) (*(UINT16 *)m_alu_xregs[x]) |
| 666 | #define ALU_GETYREG_UNSIGNED(y) (*(UINT16 *)m_alu_yregs[y]) |
| 667 | |
| 668 | #define MAC_GETXREG_UNSIGNED(x) (*(UINT16 *)m_mac_xregs[x]) |
| 669 | #define MAC_GETXREG_SIGNED(x) (*( INT16 *)m_mac_xregs[x]) |
| 670 | #define MAC_GETYREG_UNSIGNED(y) (*(UINT16 *)m_mac_yregs[y]) |
| 671 | #define MAC_GETYREG_SIGNED(y) (*( INT16 *)m_mac_yregs[y]) |
| 672 | |
| 673 | #define SHIFT_GETXREG_UNSIGNED(x) (*(UINT16 *)m_shift_xregs[x]) |
| 674 | #define SHIFT_GETXREG_SIGNED(x) (*( INT16 *)m_shift_xregs[x]) |
| 675 | |
| 676 | |
| 677 | |
| 678 | /*=========================================================================== |
| 679 | ALU operations (result in AR) |
| 680 | ===========================================================================*/ |
| 681 | |
| 682 | void adsp21xx_device::alu_op_ar(int op) |
| 683 | { |
| 684 | INT32 xop = (op >> 8) & 7; |
| 685 | INT32 yop = (op >> 11) & 3; |
| 686 | INT32 res; |
| 687 | |
| 688 | switch (op & (15<<13)) /*JB*/ |
| 689 | { |
| 690 | case 0x00<<13: |
| 691 | /* Y Clear when y = 0 */ |
| 692 | res = ALU_GETYREG_UNSIGNED(yop); |
| 693 | CALC_NZ(res); |
| 694 | break; |
| 695 | case 0x01<<13: |
| 696 | /* Y + 1 PASS 1 when y = 0 */ |
| 697 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 698 | res = yop + 1; |
| 699 | CALC_NZ(res); |
| 700 | if (yop == 0x7fff) SET_V; |
| 701 | else if (yop == 0xffff) SET_C; |
| 702 | break; |
| 703 | case 0x02<<13: |
| 704 | /* X + Y + C */ |
| 705 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 706 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 707 | yop += GET_C >> 3; |
| 708 | res = xop + yop; |
| 709 | CALC_NZVC(xop, yop, res); |
| 710 | break; |
| 711 | case 0x03<<13: |
| 712 | /* X + Y X when y = 0 */ |
| 713 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 714 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 715 | res = xop + yop; |
| 716 | CALC_NZVC(xop, yop, res); |
| 717 | break; |
| 718 | case 0x04<<13: |
| 719 | /* NOT Y */ |
| 720 | res = ALU_GETYREG_UNSIGNED(yop) ^ 0xffff; |
| 721 | CALC_NZ(res); |
| 722 | break; |
| 723 | case 0x05<<13: |
| 724 | /* -Y */ |
| 725 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 726 | res = -yop; |
| 727 | CALC_NZ(res); |
| 728 | if (yop == 0x8000) SET_V; |
| 729 | if (yop == 0x0000) SET_C; |
| 730 | break; |
| 731 | case 0x06<<13: |
| 732 | /* X - Y + C - 1 X + C - 1 when y = 0 */ |
| 733 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 734 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 735 | res = xop - yop + (GET_C >> 3) - 1; |
| 736 | CALC_NZVC_SUB(xop, yop, res); |
| 737 | break; |
| 738 | case 0x07<<13: |
| 739 | /* X - Y */ |
| 740 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 741 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 742 | res = xop - yop; |
| 743 | CALC_NZVC_SUB(xop, yop, res); |
| 744 | break; |
| 745 | case 0x08<<13: |
| 746 | /* Y - 1 PASS -1 when y = 0 */ |
| 747 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 748 | res = yop - 1; |
| 749 | CALC_NZ(res); |
| 750 | if (yop == 0x8000) SET_V; |
| 751 | else if (yop == 0x0000) SET_C; |
| 752 | break; |
| 753 | case 0x09<<13: |
| 754 | /* Y - X -X when y = 0 */ |
| 755 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 756 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 757 | res = yop - xop; |
| 758 | CALC_NZVC_SUB(yop, xop, res); |
| 759 | break; |
| 760 | case 0x0a<<13: |
| 761 | /* Y - X + C - 1 -X + C - 1 when y = 0 */ |
| 762 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 763 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 764 | res = yop - xop + (GET_C >> 3) - 1; |
| 765 | CALC_NZVC_SUB(yop, xop, res); |
| 766 | break; |
| 767 | case 0x0b<<13: |
| 768 | /* NOT X */ |
| 769 | res = ALU_GETXREG_UNSIGNED(xop) ^ 0xffff; |
| 770 | CALC_NZ(res); |
| 771 | break; |
| 772 | case 0x0c<<13: |
| 773 | /* X AND Y */ |
| 774 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 775 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 776 | res = xop & yop; |
| 777 | CALC_NZ(res); |
| 778 | break; |
| 779 | case 0x0d<<13: |
| 780 | /* X OR Y */ |
| 781 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 782 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 783 | res = xop | yop; |
| 784 | CALC_NZ(res); |
| 785 | break; |
| 786 | case 0x0e<<13: |
| 787 | /* X XOR Y */ |
| 788 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 789 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 790 | res = xop ^ yop; |
| 791 | CALC_NZ(res); |
| 792 | break; |
| 793 | case 0x0f<<13: |
| 794 | /* ABS X */ |
| 795 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 796 | res = (xop & 0x8000) ? -xop : xop; |
| 797 | CLR_FLAGS; |
| 798 | if (xop == 0) SET_Z; |
| 799 | if (xop == 0x8000) SET_N, SET_V; |
| 800 | if (xop & 0x8000) SET_S; |
| 801 | break; |
| 802 | default: |
| 803 | res = 0; /* just to keep the compiler happy */ |
| 804 | break; |
| 805 | } |
| 806 | |
| 807 | /* saturate */ |
| 808 | if ((m_mstat & MSTAT_SATURATE) && GET_V) res = GET_C ? -32768 : 32767; |
| 809 | |
| 810 | /* set the final value */ |
| 811 | m_core.ar.u = res; |
| 812 | } |
| 813 | |
| 814 | |
| 815 | |
| 816 | /*=========================================================================== |
| 817 | ALU operations (result in AR, constant yop) |
| 818 | ===========================================================================*/ |
| 819 | |
| 820 | void adsp21xx_device::alu_op_ar_const(int op) |
| 821 | { |
| 822 | INT32 xop = (op >> 8) & 7; |
| 823 | INT32 yop = constants[((op >> 5) & 0x07) | ((op >> 8) & 0x18)]; |
| 824 | INT32 res; |
| 825 | |
| 826 | switch (op & (15<<13)) /*JB*/ |
| 827 | { |
| 828 | case 0x00<<13: |
| 829 | /* Y Clear when y = 0 */ |
| 830 | res = yop; |
| 831 | CALC_NZ(res); |
| 832 | break; |
| 833 | case 0x01<<13: |
| 834 | /* Y + 1 PASS 1 when y = 0 */ |
| 835 | res = yop + 1; |
| 836 | CALC_NZ(res); |
| 837 | if (yop == 0x7fff) SET_V; |
| 838 | else if (yop == 0xffff) SET_C; |
| 839 | break; |
| 840 | case 0x02<<13: |
| 841 | /* X + Y + C */ |
| 842 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 843 | yop += GET_C >> 3; |
| 844 | res = xop + yop; |
| 845 | CALC_NZVC(xop, yop, res); |
| 846 | break; |
| 847 | case 0x03<<13: |
| 848 | /* X + Y X when y = 0 */ |
| 849 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 850 | res = xop + yop; |
| 851 | CALC_NZVC(xop, yop, res); |
| 852 | break; |
| 853 | case 0x04<<13: |
| 854 | /* NOT Y */ |
| 855 | res = yop ^ 0xffff; |
| 856 | CALC_NZ(res); |
| 857 | break; |
| 858 | case 0x05<<13: |
| 859 | /* -Y */ |
| 860 | res = -yop; |
| 861 | CALC_NZ(res); |
| 862 | if (yop == 0x8000) SET_V; |
| 863 | if (yop == 0x0000) SET_C; |
| 864 | break; |
| 865 | case 0x06<<13: |
| 866 | /* X - Y + C - 1 X + C - 1 when y = 0 */ |
| 867 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 868 | res = xop - yop + (GET_C >> 3) - 1; |
| 869 | CALC_NZVC_SUB(xop, yop, res); |
| 870 | break; |
| 871 | case 0x07<<13: |
| 872 | /* X - Y */ |
| 873 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 874 | res = xop - yop; |
| 875 | CALC_NZVC_SUB(xop, yop, res); |
| 876 | break; |
| 877 | case 0x08<<13: |
| 878 | /* Y - 1 PASS -1 when y = 0 */ |
| 879 | res = yop - 1; |
| 880 | CALC_NZ(res); |
| 881 | if (yop == 0x8000) SET_V; |
| 882 | else if (yop == 0x0000) SET_C; |
| 883 | break; |
| 884 | case 0x09<<13: |
| 885 | /* Y - X -X when y = 0 */ |
| 886 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 887 | res = yop - xop; |
| 888 | CALC_NZVC_SUB(yop, xop, res); |
| 889 | break; |
| 890 | case 0x0a<<13: |
| 891 | /* Y - X + C - 1 -X + C - 1 when y = 0 */ |
| 892 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 893 | res = yop - xop + (GET_C >> 3) - 1; |
| 894 | CALC_NZVC_SUB(yop, xop, res); |
| 895 | break; |
| 896 | case 0x0b<<13: |
| 897 | /* NOT X */ |
| 898 | res = ALU_GETXREG_UNSIGNED(xop) ^ 0xffff; |
| 899 | CALC_NZ(res); |
| 900 | break; |
| 901 | case 0x0c<<13: |
| 902 | /* X AND Y */ |
| 903 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 904 | res = xop & yop; |
| 905 | CALC_NZ(res); |
| 906 | break; |
| 907 | case 0x0d<<13: |
| 908 | /* X OR Y */ |
| 909 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 910 | res = xop | yop; |
| 911 | CALC_NZ(res); |
| 912 | break; |
| 913 | case 0x0e<<13: |
| 914 | /* X XOR Y */ |
| 915 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 916 | res = xop ^ yop; |
| 917 | CALC_NZ(res); |
| 918 | break; |
| 919 | case 0x0f<<13: |
| 920 | /* ABS X */ |
| 921 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 922 | res = (xop & 0x8000) ? -xop : xop; |
| 923 | CLR_FLAGS; |
| 924 | if (xop == 0) SET_Z; |
| 925 | if (xop == 0x8000) SET_N, SET_V; |
| 926 | if (xop & 0x8000) SET_S; |
| 927 | break; |
| 928 | default: |
| 929 | res = 0; /* just to keep the compiler happy */ |
| 930 | break; |
| 931 | } |
| 932 | |
| 933 | /* saturate */ |
| 934 | if ((m_mstat & MSTAT_SATURATE) && GET_V) res = GET_C ? -32768 : 32767; |
| 935 | |
| 936 | /* set the final value */ |
| 937 | m_core.ar.u = res; |
| 938 | } |
| 939 | |
| 940 | |
| 941 | |
| 942 | /*=========================================================================== |
| 943 | ALU operations (result in AF) |
| 944 | ===========================================================================*/ |
| 945 | |
| 946 | void adsp21xx_device::alu_op_af(int op) |
| 947 | { |
| 948 | INT32 xop = (op >> 8) & 7; |
| 949 | INT32 yop = (op >> 11) & 3; |
| 950 | INT32 res; |
| 951 | |
| 952 | switch (op & (15<<13)) /*JB*/ |
| 953 | { |
| 954 | case 0x00<<13: |
| 955 | /* Y Clear when y = 0 */ |
| 956 | res = ALU_GETYREG_UNSIGNED(yop); |
| 957 | CALC_NZ(res); |
| 958 | break; |
| 959 | case 0x01<<13: |
| 960 | /* Y + 1 PASS 1 when y = 0 */ |
| 961 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 962 | res = yop + 1; |
| 963 | CALC_NZ(res); |
| 964 | if (yop == 0x7fff) SET_V; |
| 965 | else if (yop == 0xffff) SET_C; |
| 966 | break; |
| 967 | case 0x02<<13: |
| 968 | /* X + Y + C */ |
| 969 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 970 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 971 | yop += GET_C >> 3; |
| 972 | res = xop + yop; |
| 973 | CALC_NZVC(xop, yop, res); |
| 974 | break; |
| 975 | case 0x03<<13: |
| 976 | /* X + Y X when y = 0 */ |
| 977 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 978 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 979 | res = xop + yop; |
| 980 | CALC_NZVC(xop, yop, res); |
| 981 | break; |
| 982 | case 0x04<<13: |
| 983 | /* NOT Y */ |
| 984 | res = ALU_GETYREG_UNSIGNED(yop) ^ 0xffff; |
| 985 | CALC_NZ(res); |
| 986 | break; |
| 987 | case 0x05<<13: |
| 988 | /* -Y */ |
| 989 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 990 | res = -yop; |
| 991 | CALC_NZ(res); |
| 992 | if (yop == 0x8000) SET_V; |
| 993 | if (yop == 0x0000) SET_C; |
| 994 | break; |
| 995 | case 0x06<<13: |
| 996 | /* X - Y + C - 1 X + C - 1 when y = 0 */ |
| 997 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 998 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 999 | res = xop - yop + (GET_C >> 3) - 1; |
| 1000 | CALC_NZVC_SUB(xop, yop, res); |
| 1001 | break; |
| 1002 | case 0x07<<13: |
| 1003 | /* X - Y */ |
| 1004 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1005 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1006 | res = xop - yop; |
| 1007 | CALC_NZVC_SUB(xop, yop, res); |
| 1008 | break; |
| 1009 | case 0x08<<13: |
| 1010 | /* Y - 1 PASS -1 when y = 0 */ |
| 1011 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1012 | res = yop - 1; |
| 1013 | CALC_NZ(res); |
| 1014 | if (yop == 0x8000) SET_V; |
| 1015 | else if (yop == 0x0000) SET_C; |
| 1016 | break; |
| 1017 | case 0x09<<13: |
| 1018 | /* Y - X -X when y = 0 */ |
| 1019 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1020 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1021 | res = yop - xop; |
| 1022 | CALC_NZVC_SUB(yop, xop, res); |
| 1023 | break; |
| 1024 | case 0x0a<<13: |
| 1025 | /* Y - X + C - 1 -X + C - 1 when y = 0 */ |
| 1026 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1027 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1028 | res = yop - xop + (GET_C >> 3) - 1; |
| 1029 | CALC_NZVC_SUB(yop, xop, res); |
| 1030 | break; |
| 1031 | case 0x0b<<13: |
| 1032 | /* NOT X */ |
| 1033 | res = ALU_GETXREG_UNSIGNED(xop) ^ 0xffff; |
| 1034 | CALC_NZ(res); |
| 1035 | break; |
| 1036 | case 0x0c<<13: |
| 1037 | /* X AND Y */ |
| 1038 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1039 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1040 | res = xop & yop; |
| 1041 | CALC_NZ(res); |
| 1042 | break; |
| 1043 | case 0x0d<<13: |
| 1044 | /* X OR Y */ |
| 1045 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1046 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1047 | res = xop | yop; |
| 1048 | CALC_NZ(res); |
| 1049 | break; |
| 1050 | case 0x0e<<13: |
| 1051 | /* X XOR Y */ |
| 1052 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1053 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1054 | res = xop ^ yop; |
| 1055 | CALC_NZ(res); |
| 1056 | break; |
| 1057 | case 0x0f<<13: |
| 1058 | /* ABS X */ |
| 1059 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1060 | res = (xop & 0x8000) ? -xop : xop; |
| 1061 | CLR_FLAGS; |
| 1062 | if (xop == 0) SET_Z; |
| 1063 | if (xop == 0x8000) SET_N, SET_V; |
| 1064 | if (xop & 0x8000) SET_S; |
| 1065 | break; |
| 1066 | default: |
| 1067 | res = 0; /* just to keep the compiler happy */ |
| 1068 | break; |
| 1069 | } |
| 1070 | |
| 1071 | /* set the final value */ |
| 1072 | m_core.af.u = res; |
| 1073 | } |
| 1074 | |
| 1075 | |
| 1076 | |
| 1077 | /*=========================================================================== |
| 1078 | ALU operations (result in AF, constant yop) |
| 1079 | ===========================================================================*/ |
| 1080 | |
| 1081 | void adsp21xx_device::alu_op_af_const(int op) |
| 1082 | { |
| 1083 | INT32 xop = (op >> 8) & 7; |
| 1084 | INT32 yop = constants[((op >> 5) & 0x07) | ((op >> 8) & 0x18)]; |
| 1085 | INT32 res; |
| 1086 | |
| 1087 | switch (op & (15<<13)) /*JB*/ |
| 1088 | { |
| 1089 | case 0x00<<13: |
| 1090 | /* Y Clear when y = 0 */ |
| 1091 | res = yop; |
| 1092 | CALC_NZ(res); |
| 1093 | break; |
| 1094 | case 0x01<<13: |
| 1095 | /* Y + 1 PASS 1 when y = 0 */ |
| 1096 | res = yop + 1; |
| 1097 | CALC_NZ(res); |
| 1098 | if (yop == 0x7fff) SET_V; |
| 1099 | else if (yop == 0xffff) SET_C; |
| 1100 | break; |
| 1101 | case 0x02<<13: |
| 1102 | /* X + Y + C */ |
| 1103 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1104 | yop += GET_C >> 3; |
| 1105 | res = xop + yop; |
| 1106 | CALC_NZVC(xop, yop, res); |
| 1107 | break; |
| 1108 | case 0x03<<13: |
| 1109 | /* X + Y X when y = 0 */ |
| 1110 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1111 | res = xop + yop; |
| 1112 | CALC_NZVC(xop, yop, res); |
| 1113 | break; |
| 1114 | case 0x04<<13: |
| 1115 | /* NOT Y */ |
| 1116 | res = yop ^ 0xffff; |
| 1117 | CALC_NZ(res); |
| 1118 | break; |
| 1119 | case 0x05<<13: |
| 1120 | /* -Y */ |
| 1121 | res = -yop; |
| 1122 | CALC_NZ(res); |
| 1123 | if (yop == 0x8000) SET_V; |
| 1124 | if (yop == 0x0000) SET_C; |
| 1125 | break; |
| 1126 | case 0x06<<13: |
| 1127 | /* X - Y + C - 1 X + C - 1 when y = 0 */ |
| 1128 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1129 | res = xop - yop + (GET_C >> 3) - 1; |
| 1130 | CALC_NZVC_SUB(xop, yop, res); |
| 1131 | break; |
| 1132 | case 0x07<<13: |
| 1133 | /* X - Y */ |
| 1134 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1135 | res = xop - yop; |
| 1136 | CALC_NZVC_SUB(xop, yop, res); |
| 1137 | break; |
| 1138 | case 0x08<<13: |
| 1139 | /* Y - 1 PASS -1 when y = 0 */ |
| 1140 | res = yop - 1; |
| 1141 | CALC_NZ(res); |
| 1142 | if (yop == 0x8000) SET_V; |
| 1143 | else if (yop == 0x0000) SET_C; |
| 1144 | break; |
| 1145 | case 0x09<<13: |
| 1146 | /* Y - X -X when y = 0 */ |
| 1147 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1148 | res = yop - xop; |
| 1149 | CALC_NZVC_SUB(yop, xop, res); |
| 1150 | break; |
| 1151 | case 0x0a<<13: |
| 1152 | /* Y - X + C - 1 -X + C - 1 when y = 0 */ |
| 1153 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1154 | res = yop - xop + (GET_C >> 3) - 1; |
| 1155 | CALC_NZVC_SUB(yop, xop, res); |
| 1156 | break; |
| 1157 | case 0x0b<<13: |
| 1158 | /* NOT X */ |
| 1159 | res = ALU_GETXREG_UNSIGNED(xop) ^ 0xffff; |
| 1160 | CALC_NZ(res); |
| 1161 | break; |
| 1162 | case 0x0c<<13: |
| 1163 | /* X AND Y */ |
| 1164 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1165 | res = xop & yop; |
| 1166 | CALC_NZ(res); |
| 1167 | break; |
| 1168 | case 0x0d<<13: |
| 1169 | /* X OR Y */ |
| 1170 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1171 | res = xop | yop; |
| 1172 | CALC_NZ(res); |
| 1173 | break; |
| 1174 | case 0x0e<<13: |
| 1175 | /* X XOR Y */ |
| 1176 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1177 | res = xop ^ yop; |
| 1178 | CALC_NZ(res); |
| 1179 | break; |
| 1180 | case 0x0f<<13: |
| 1181 | /* ABS X */ |
| 1182 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1183 | res = (xop & 0x8000) ? -xop : xop; |
| 1184 | CLR_FLAGS; |
| 1185 | if (xop == 0) SET_Z; |
| 1186 | if (xop == 0x8000) SET_N, SET_V; |
| 1187 | if (xop & 0x8000) SET_S; |
| 1188 | break; |
| 1189 | default: |
| 1190 | res = 0; /* just to keep the compiler happy */ |
| 1191 | break; |
| 1192 | } |
| 1193 | |
| 1194 | /* set the final value */ |
| 1195 | m_core.af.u = res; |
| 1196 | } |
| 1197 | |
| 1198 | |
| 1199 | |
| 1200 | /*=========================================================================== |
| 1201 | ALU operations (no result) |
| 1202 | ===========================================================================*/ |
| 1203 | |
| 1204 | void adsp21xx_device::alu_op_none(int op) |
| 1205 | { |
| 1206 | INT32 xop = (op >> 8) & 7; |
| 1207 | INT32 yop = (op >> 11) & 3; |
| 1208 | INT32 res; |
| 1209 | |
| 1210 | switch (op & (15<<13)) /*JB*/ |
| 1211 | { |
| 1212 | case 0x00<<13: |
| 1213 | /* Y Clear when y = 0 */ |
| 1214 | res = ALU_GETYREG_UNSIGNED(yop); |
| 1215 | CALC_NZ(res); |
| 1216 | break; |
| 1217 | case 0x01<<13: |
| 1218 | /* Y + 1 PASS 1 when y = 0 */ |
| 1219 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1220 | res = yop + 1; |
| 1221 | CALC_NZ(res); |
| 1222 | if (yop == 0x7fff) SET_V; |
| 1223 | else if (yop == 0xffff) SET_C; |
| 1224 | break; |
| 1225 | case 0x02<<13: |
| 1226 | /* X + Y + C */ |
| 1227 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1228 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1229 | yop += GET_C >> 3; |
| 1230 | res = xop + yop; |
| 1231 | CALC_NZVC(xop, yop, res); |
| 1232 | break; |
| 1233 | case 0x03<<13: |
| 1234 | /* X + Y X when y = 0 */ |
| 1235 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1236 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1237 | res = xop + yop; |
| 1238 | CALC_NZVC(xop, yop, res); |
| 1239 | break; |
| 1240 | case 0x04<<13: |
| 1241 | /* NOT Y */ |
| 1242 | res = ALU_GETYREG_UNSIGNED(yop) ^ 0xffff; |
| 1243 | CALC_NZ(res); |
| 1244 | break; |
| 1245 | case 0x05<<13: |
| 1246 | /* -Y */ |
| 1247 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1248 | res = -yop; |
| 1249 | CALC_NZ(res); |
| 1250 | if (yop == 0x8000) SET_V; |
| 1251 | if (yop == 0x0000) SET_C; |
| 1252 | break; |
| 1253 | case 0x06<<13: |
| 1254 | /* X - Y + C - 1 X + C - 1 when y = 0 */ |
| 1255 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1256 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1257 | res = xop - yop + (GET_C >> 3) - 1; |
| 1258 | CALC_NZVC_SUB(xop, yop, res); |
| 1259 | break; |
| 1260 | case 0x07<<13: |
| 1261 | /* X - Y */ |
| 1262 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1263 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1264 | res = xop - yop; |
| 1265 | CALC_NZVC_SUB(xop, yop, res); |
| 1266 | break; |
| 1267 | case 0x08<<13: |
| 1268 | /* Y - 1 PASS -1 when y = 0 */ |
| 1269 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1270 | res = yop - 1; |
| 1271 | CALC_NZ(res); |
| 1272 | if (yop == 0x8000) SET_V; |
| 1273 | else if (yop == 0x0000) SET_C; |
| 1274 | break; |
| 1275 | case 0x09<<13: |
| 1276 | /* Y - X -X when y = 0 */ |
| 1277 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1278 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1279 | res = yop - xop; |
| 1280 | CALC_NZVC_SUB(yop, xop, res); |
| 1281 | break; |
| 1282 | case 0x0a<<13: |
| 1283 | /* Y - X + C - 1 -X + C - 1 when y = 0 */ |
| 1284 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1285 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1286 | res = yop - xop + (GET_C >> 3) - 1; |
| 1287 | CALC_NZVC_SUB(yop, xop, res); |
| 1288 | break; |
| 1289 | case 0x0b<<13: |
| 1290 | /* NOT X */ |
| 1291 | res = ALU_GETXREG_UNSIGNED(xop) ^ 0xffff; |
| 1292 | CALC_NZ(res); |
| 1293 | break; |
| 1294 | case 0x0c<<13: |
| 1295 | /* X AND Y */ |
| 1296 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1297 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1298 | res = xop & yop; |
| 1299 | CALC_NZ(res); |
| 1300 | break; |
| 1301 | case 0x0d<<13: |
| 1302 | /* X OR Y */ |
| 1303 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1304 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1305 | res = xop | yop; |
| 1306 | CALC_NZ(res); |
| 1307 | break; |
| 1308 | case 0x0e<<13: |
| 1309 | /* X XOR Y */ |
| 1310 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1311 | yop = ALU_GETYREG_UNSIGNED(yop); |
| 1312 | res = xop ^ yop; |
| 1313 | CALC_NZ(res); |
| 1314 | break; |
| 1315 | case 0x0f<<13: |
| 1316 | /* ABS X */ |
| 1317 | xop = ALU_GETXREG_UNSIGNED(xop); |
| 1318 | res = (xop & 0x8000) ? -xop : xop; |
| 1319 | CLR_FLAGS; |
| 1320 | if (xop == 0) SET_Z; |
| 1321 | if (xop == 0x8000) SET_N, SET_V; |
| 1322 | if (xop & 0x8000) SET_S; |
| 1323 | break; |
| 1324 | } |
| 1325 | } |
| 1326 | |
| 1327 | |
| 1328 | |
| 1329 | /*=========================================================================== |
| 1330 | MAC operations (result in MR) |
| 1331 | ===========================================================================*/ |
| 1332 | |
| 1333 | void adsp21xx_device::mac_op_mr(int op) |
| 1334 | { |
| 1335 | INT8 shift = ((m_mstat & MSTAT_INTEGER) >> 4) ^ 1; |
| 1336 | INT32 xop = (op >> 8) & 7; |
| 1337 | INT32 yop = (op >> 11) & 3; |
| 1338 | INT32 temp; |
| 1339 | INT64 res; |
| 1340 | |
| 1341 | switch (op & (15<<13)) /*JB*/ |
| 1342 | { |
| 1343 | case 0x00<<13: |
| 1344 | /* no-op */ |
| 1345 | return; |
| 1346 | case 0x01<<13: |
| 1347 | /* X * Y (RND) */ |
| 1348 | xop = MAC_GETXREG_SIGNED(xop); |
| 1349 | yop = MAC_GETYREG_SIGNED(yop); |
| 1350 | temp = (xop * yop) << shift; |
| 1351 | res = (INT64)temp; |
| 1352 | #if 0 |
| 1353 | if ((res & 0xffff) == 0x8000) res &= ~((UINT64)0x10000); |
| 1354 | else res += (res & 0x8000) << 1; |
| 1355 | #else |
| 1356 | temp &= 0xffff; |
| 1357 | res += 0x8000; |
| 1358 | if ( temp == 0x8000 ) |
| 1359 | res &= ~((UINT64)0x10000); |
| 1360 | #endif |
| 1361 | break; |
| 1362 | case 0x02<<13: |
| 1363 | /* MR + X * Y (RND) */ |
| 1364 | xop = MAC_GETXREG_SIGNED(xop); |
| 1365 | yop = MAC_GETYREG_SIGNED(yop); |
| 1366 | temp = (xop * yop) << shift; |
| 1367 | res = m_core.mr.mr + (INT64)temp; |
| 1368 | #if 0 |
| 1369 | if ((res & 0xffff) == 0x8000) res &= ~((UINT64)0x10000); |
| 1370 | else res += (res & 0x8000) << 1; |
| 1371 | #else |
| 1372 | temp &= 0xffff; |
| 1373 | res += 0x8000; |
| 1374 | if ( temp == 0x8000 ) |
| 1375 | res &= ~((UINT64)0x10000); |
| 1376 | #endif |
| 1377 | break; |
| 1378 | case 0x03<<13: |
| 1379 | /* MR - X * Y (RND) */ |
| 1380 | xop = MAC_GETXREG_SIGNED(xop); |
| 1381 | yop = MAC_GETYREG_SIGNED(yop); |
| 1382 | temp = (xop * yop) << shift; |
| 1383 | res = m_core.mr.mr - (INT64)temp; |
| 1384 | #if 0 |
| 1385 | if ((res & 0xffff) == 0x8000) res &= ~((UINT64)0x10000); |
| 1386 | else res += (res & 0x8000) << 1; |
| 1387 | #else |
| 1388 | temp &= 0xffff; |
| 1389 | res += 0x8000; |
| 1390 | if ( temp == 0x8000 ) |
| 1391 | res &= ~((UINT64)0x10000); |
| 1392 | #endif |
| 1393 | break; |
| 1394 | case 0x04<<13: |
| 1395 | /* X * Y (SS) Clear when y = 0 */ |
| 1396 | xop = MAC_GETXREG_SIGNED(xop); |
| 1397 | yop = MAC_GETYREG_SIGNED(yop); |
| 1398 | temp = (xop * yop) << shift; |
| 1399 | res = (INT64)temp; |
| 1400 | break; |
| 1401 | case 0x05<<13: |
| 1402 | /* X * Y (SU) */ |
| 1403 | xop = MAC_GETXREG_SIGNED(xop); |
| 1404 | yop = MAC_GETYREG_UNSIGNED(yop); |
| 1405 | temp = (xop * yop) << shift; |
| 1406 | res = (INT64)temp; |
| 1407 | break; |
| 1408 | case 0x06<<13: |
| 1409 | /* X * Y (US) */ |
| 1410 | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1411 | yop = MAC_GETYREG_SIGNED(yop); |
| 1412 | temp = (xop * yop) << shift; |
| 1413 | res = (INT64)temp; |
| 1414 | break; |
| 1415 | case 0x07<<13: |
| 1416 | /* X * Y (UU) */ |
| 1417 | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1418 | yop = MAC_GETYREG_UNSIGNED(yop); |
| 1419 | temp = (xop * yop) << shift; |
| 1420 | res = (INT64)temp; |
| 1421 | break; |
| 1422 | case 0x08<<13: |
| 1423 | /* MR + X * Y (SS) */ |
| 1424 | xop = MAC_GETXREG_SIGNED(xop); |
| 1425 | yop = MAC_GETYREG_SIGNED(yop); |
| 1426 | temp = (xop * yop) << shift; |
| 1427 | res = m_core.mr.mr + (INT64)temp; |
| 1428 | break; |
| 1429 | case 0x09<<13: |
| 1430 | /* MR + X * Y (SU) */ |
| 1431 | xop = MAC_GETXREG_SIGNED(xop); |
| 1432 | yop = MAC_GETYREG_UNSIGNED(yop); |
| 1433 | temp = (xop * yop) << shift; |
| 1434 | res = m_core.mr.mr + (INT64)temp; |
| 1435 | break; |
| 1436 | case 0x0a<<13: |
| 1437 | /* MR + X * Y (US) */ |
| 1438 | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1439 | yop = MAC_GETYREG_SIGNED(yop); |
| 1440 | temp = (xop * yop) << shift; |
| 1441 | res = m_core.mr.mr + (INT64)temp; |
| 1442 | break; |
| 1443 | case 0x0b<<13: |
| 1444 | /* MR + X * Y (UU) */ |
| 1445 | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1446 | yop = MAC_GETYREG_UNSIGNED(yop); |
| 1447 | temp = (xop * yop) << shift; |
| 1448 | res = m_core.mr.mr + (INT64)temp; |
| 1449 | break; |
| 1450 | case 0x0c<<13: |
| 1451 | /* MR - X * Y (SS) */ |
| 1452 | xop = MAC_GETXREG_SIGNED(xop); |
| 1453 | yop = MAC_GETYREG_SIGNED(yop); |
| 1454 | temp = (xop * yop) << shift; |
| 1455 | res = m_core.mr.mr - (INT64)temp; |
| 1456 | break; |
| 1457 | case 0x0d<<13: |
| 1458 | /* MR - X * Y (SU) */ |
| 1459 | xop = MAC_GETXREG_SIGNED(xop); |
| 1460 | yop = MAC_GETYREG_UNSIGNED(yop); |
| 1461 | temp = (xop * yop) << shift; |
| 1462 | res = m_core.mr.mr - (INT64)temp; |
| 1463 | break; |
| 1464 | case 0x0e<<13: |
| 1465 | /* MR - X * Y (US) */ |
| 1466 | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1467 | yop = MAC_GETYREG_SIGNED(yop); |
| 1468 | temp = (xop * yop) << shift; |
| 1469 | res = m_core.mr.mr - (INT64)temp; |
| 1470 | break; |
| 1471 | case 0x0f<<13: |
| 1472 | /* MR - X * Y (UU) */ |
| 1473 | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1474 | yop = MAC_GETYREG_UNSIGNED(yop); |
| 1475 | temp = (xop * yop) << shift; |
| 1476 | res = m_core.mr.mr - (INT64)temp; |
| 1477 | break; |
| 1478 | default: |
| 1479 | res = 0; /* just to keep the compiler happy */ |
| 1480 | break; |
| 1481 | } |
| 1482 | |
| 1483 | /* set the final value */ |
| 1484 | temp = (res >> 31) & 0x1ff; |
| 1485 | CLR_MV; |
| 1486 | if (temp != 0x000 && temp != 0x1ff) SET_MV; |
| 1487 | m_core.mr.mr = res; |
| 1488 | } |
| 1489 | |
| 1490 | |
| 1491 | |
| 1492 | /*=========================================================================== |
| 1493 | MAC operations (result in MR, yop == xop) |
| 1494 | ===========================================================================*/ |
| 1495 | |
| 1496 | void adsp21xx_device::mac_op_mr_xop(int op) |
| 1497 | { |
| 1498 | INT8 shift = ((m_mstat & MSTAT_INTEGER) >> 4) ^ 1; |
| 1499 | INT32 xop = (op >> 8) & 7; |
| 1500 | INT32 temp; |
| 1501 | INT64 res; |
| 1502 | |
| 1503 | switch (op & (15<<13)) /*JB*/ |
| 1504 | { |
| 1505 | case 0x00<<13: |
| 1506 | /* no-op */ |
| 1507 | return; |
| 1508 | case 0x01<<13: |
| 1509 | /* X * Y (RND) */ |
| 1510 | xop = MAC_GETXREG_SIGNED(xop); |
| 1511 | temp = (xop * xop) << shift; |
| 1512 | res = (INT64)temp; |
| 1513 | #if 0 |
| 1514 | if ((res & 0xffff) == 0x8000) res &= ~((UINT64)0x10000); |
| 1515 | else res += (res & 0x8000) << 1; |
| 1516 | #else |
| 1517 | temp &= 0xffff; |
| 1518 | res += 0x8000; |
| 1519 | if ( temp == 0x8000 ) |
| 1520 | res &= ~((UINT64)0x10000); |
| 1521 | #endif |
| 1522 | break; |
| 1523 | case 0x02<<13: |
| 1524 | /* MR + X * Y (RND) */ |
| 1525 | xop = MAC_GETXREG_SIGNED(xop); |
| 1526 | temp = (xop * xop) << shift; |
| 1527 | res = m_core.mr.mr + (INT64)temp; |
| 1528 | #if 0 |
| 1529 | if ((res & 0xffff) == 0x8000) res &= ~((UINT64)0x10000); |
| 1530 | else res += (res & 0x8000) << 1; |
| 1531 | #else |
| 1532 | temp &= 0xffff; |
| 1533 | res += 0x8000; |
| 1534 | if ( temp == 0x8000 ) |
| 1535 | res &= ~((UINT64)0x10000); |
| 1536 | #endif |
| 1537 | break; |
| 1538 | case 0x03<<13: |
| 1539 | /* MR - X * Y (RND) */ |
| 1540 | xop = MAC_GETXREG_SIGNED(xop); |
| 1541 | temp = (xop * xop) << shift; |
| 1542 | res = m_core.mr.mr - (INT64)temp; |
| 1543 | #if 0 |
| 1544 | if ((res & 0xffff) == 0x8000) res &= ~((UINT64)0x10000); |
| 1545 | else res += (res & 0x8000) << 1; |
| 1546 | #else |
| 1547 | temp &= 0xffff; |
| 1548 | res += 0x8000; |
| 1549 | if ( temp == 0x8000 ) |
| 1550 | res &= ~((UINT64)0x10000); |
| 1551 | #endif |
| 1552 | break; |
| 1553 | case 0x04<<13: |
| 1554 | /* X * Y (SS) Clear when y = 0 */ |
| 1555 | xop = MAC_GETXREG_SIGNED(xop); |
| 1556 | temp = (xop * xop) << shift; |
| 1557 | res = (INT64)temp; |
| 1558 | break; |
| 1559 | case 0x05<<13: |
| 1560 | /* X * Y (SU) */ |
| 1561 | xop = MAC_GETXREG_SIGNED(xop); |
| 1562 | temp = (xop * xop) << shift; |
| 1563 | res = (INT64)temp; |
| 1564 | break; |
| 1565 | case 0x06<<13: |
| 1566 | /* X * Y (US) */ |
| 1567 | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1568 | temp = (xop * xop) << shift; |
| 1569 | res = (INT64)temp; |
| 1570 | break; |
| 1571 | case 0x07<<13: |
| 1572 | /* X * Y (UU) */ |
| 1573 | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1574 | temp = (xop * xop) << shift; |
| 1575 | res = (INT64)temp; |
| 1576 | break; |
| 1577 | case 0x08<<13: |
| 1578 | /* MR + X * Y (SS) */ |
| 1579 | xop = MAC_GETXREG_SIGNED(xop); |
| 1580 | temp = (xop * xop) << shift; |
| 1581 | res = m_core.mr.mr + (INT64)temp; |
| 1582 | break; |
| 1583 | case 0x09<<13: |
| 1584 | /* MR + X * Y (SU) */ |
| 1585 | xop = MAC_GETXREG_SIGNED(xop); |
| 1586 | temp = (xop * xop) << shift; |
| 1587 | res = m_core.mr.mr + (INT64)temp; |
| 1588 | break; |
| 1589 | case 0x0a<<13: |
| 1590 | /* MR + X * Y (US) */ |
| 1591 | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1592 | temp = (xop * xop) << shift; |
| 1593 | res = m_core.mr.mr + (INT64)temp; |
| 1594 | break; |
| 1595 | case 0x0b<<13: |
| 1596 | /* MR + X * Y (UU) */ |
| 1597 | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1598 | temp = (xop * xop) << shift; |
| 1599 | res = m_core.mr.mr + (INT64)temp; |
| 1600 | break; |
| 1601 | case 0x0c<<13: |
| 1602 | /* MR - X * Y (SS) */ |
| 1603 | xop = MAC_GETXREG_SIGNED(xop); |
| 1604 | temp = (xop * xop) << shift; |
| 1605 | res = m_core.mr.mr - (INT64)temp; |
| 1606 | break; |
| 1607 | case 0x0d<<13: |
| 1608 | /* MR - X * Y (SU) */ |
| 1609 | xop = MAC_GETXREG_SIGNED(xop); |
| 1610 | temp = (xop * xop) << shift; |
| 1611 | res = m_core.mr.mr - (INT64)temp; |
| 1612 | break; |
| 1613 | case 0x0e<<13: |
| 1614 | /* MR - X * Y (US) */ |
| 1615 | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1616 | temp = (xop * xop) << shift; |
| 1617 | res = m_core.mr.mr - (INT64)temp; |
| 1618 | break; |
| 1619 | case 0x0f<<13: |
| 1620 | /* MR - X * Y (UU) */ |
| 1621 | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1622 | temp = (xop * xop) << shift; |
| 1623 | res = m_core.mr.mr - (INT64)temp; |
| 1624 | break; |
| 1625 | default: |
| 1626 | res = 0; /* just to keep the compiler happy */ |
| 1627 | break; |
| 1628 | } |
| 1629 | |
| 1630 | /* set the final value */ |
| 1631 | temp = (res >> 31) & 0x1ff; |
| 1632 | CLR_MV; |
| 1633 | if (temp != 0x000 && temp != 0x1ff) SET_MV; |
| 1634 | m_core.mr.mr = res; |
| 1635 | } |
| 1636 | |
| 1637 | |
| 1638 | |
| 1639 | /*=========================================================================== |
| 1640 | MAC operations (result in MF) |
| 1641 | ===========================================================================*/ |
| 1642 | |
| 1643 | void adsp21xx_device::mac_op_mf(int op) |
| 1644 | { |
| 1645 | INT8 shift = ((m_mstat & MSTAT_INTEGER) >> 4) ^ 1; |
| 1646 | INT32 xop = (op >> 8) & 7; |
| 1647 | INT32 yop = (op >> 11) & 3; |
| 1648 | INT32 temp; |
| 1649 | INT64 res; |
| 1650 | |
| 1651 | switch (op & (15<<13)) /*JB*/ |
| 1652 | { |
| 1653 | case 0x00<<13: |
| 1654 | /* no-op */ |
| 1655 | return; |
| 1656 | case 0x01<<13: |
| 1657 | /* X * Y (RND) */ |
| 1658 | xop = MAC_GETXREG_SIGNED(xop); |
| 1659 | yop = MAC_GETYREG_SIGNED(yop); |
| 1660 | temp = (xop * yop) << shift; |
| 1661 | res = (INT64)temp; |
| 1662 | #if 0 |
| 1663 | if ((res & 0xffff) == 0x8000) res &= ~((UINT64)0x10000); |
| 1664 | else res += (res & 0x8000) << 1; |
| 1665 | #else |
| 1666 | temp &= 0xffff; |
| 1667 | res += 0x8000; |
| 1668 | if ( temp == 0x8000 ) |
| 1669 | res &= ~((UINT64)0x10000); |
| 1670 | #endif |
| 1671 | break; |
| 1672 | case 0x02<<13: |
| 1673 | /* MR + X * Y (RND) */ |
| 1674 | xop = MAC_GETXREG_SIGNED(xop); |
| 1675 | yop = MAC_GETYREG_SIGNED(yop); |
| 1676 | temp = (xop * yop) << shift; |
| 1677 | res = m_core.mr.mr + (INT64)temp; |
| 1678 | #if 0 |
| 1679 | if ((res & 0xffff) == 0x8000) res &= ~((UINT64)0x10000); |
| 1680 | else res += (res & 0x8000) << 1; |
| 1681 | #else |
| 1682 | temp &= 0xffff; |
| 1683 | res += 0x8000; |
| 1684 | if ( temp == 0x8000 ) |
| 1685 | res &= ~((UINT64)0x10000); |
| 1686 | #endif |
| 1687 | break; |
| 1688 | case 0x03<<13: |
| 1689 | /* MR - X * Y (RND) */ |
| 1690 | xop = MAC_GETXREG_SIGNED(xop); |
| 1691 | yop = MAC_GETYREG_SIGNED(yop); |
| 1692 | temp = (xop * yop) << shift; |
| 1693 | res = m_core.mr.mr - (INT64)temp; |
| 1694 | #if 0 |
| 1695 | if ((res & 0xffff) == 0x8000) res &= ~((UINT64)0x10000); |
| 1696 | else res += (res & 0x8000) << 1; |
| 1697 | #else |
| 1698 | temp &= 0xffff; |
| 1699 | res += 0x8000; |
| 1700 | if ( temp == 0x8000 ) |
| 1701 | res &= ~((UINT64)0x10000); |
| 1702 | #endif |
| 1703 | break; |
| 1704 | case 0x04<<13: |
| 1705 | /* X * Y (SS) Clear when y = 0 */ |
| 1706 | xop = MAC_GETXREG_SIGNED(xop); |
| 1707 | yop = MAC_GETYREG_SIGNED(yop); |
| 1708 | temp = (xop * yop) << shift; |
| 1709 | res = (INT64)temp; |
| 1710 | break; |
| 1711 | case 0x05<<13: |
| 1712 | /* X * Y (SU) */ |
| 1713 | xop = MAC_GETXREG_SIGNED(xop); |
| 1714 | yop = MAC_GETYREG_UNSIGNED(yop); |
| 1715 | temp = (xop * yop) << shift; |
| 1716 | res = (INT64)temp; |
| 1717 | break; |
| 1718 | case 0x06<<13: |
| 1719 | /* X * Y (US) */ |
| 1720 | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1721 | yop = MAC_GETYREG_SIGNED(yop); |
| 1722 | temp = (xop * yop) << shift; |
| 1723 | res = (INT64)temp; |
| 1724 | break; |
| 1725 | case 0x07<<13: |
| 1726 | /* X * Y (UU) */ |
| 1727 | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1728 | yop = MAC_GETYREG_UNSIGNED(yop); |
| 1729 | temp = (xop * yop) << shift; |
| 1730 | res = (INT64)temp; |
| 1731 | break; |
| 1732 | case 0x08<<13: |
| 1733 | /* MR + X * Y (SS) */ |
| 1734 | xop = MAC_GETXREG_SIGNED(xop); |
| 1735 | yop = MAC_GETYREG_SIGNED(yop); |
| 1736 | temp = (xop * yop) << shift; |
| 1737 | res = m_core.mr.mr + (INT64)temp; |
| 1738 | break; |
| 1739 | case 0x09<<13: |
| 1740 | /* MR + X * Y (SU) */ |
| 1741 | xop = MAC_GETXREG_SIGNED(xop); |
| 1742 | yop = MAC_GETYREG_UNSIGNED(yop); |
| 1743 | temp = (xop * yop) << shift; |
| 1744 | res = m_core.mr.mr + (INT64)temp; |
| 1745 | break; |
| 1746 | case 0x0a<<13: |
| 1747 | /* MR + X * Y (US) */ |
| 1748 | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1749 | yop = MAC_GETYREG_SIGNED(yop); |
| 1750 | temp = (xop * yop) << shift; |
| 1751 | res = m_core.mr.mr + (INT64)temp; |
| 1752 | break; |
| 1753 | case 0x0b<<13: |
| 1754 | /* MR + X * Y (UU) */ |
| 1755 | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1756 | yop = MAC_GETYREG_UNSIGNED(yop); |
| 1757 | temp = (xop * yop) << shift; |
| 1758 | res = m_core.mr.mr + (INT64)temp; |
| 1759 | break; |
| 1760 | case 0x0c<<13: |
| 1761 | /* MR - X * Y (SS) */ |
| 1762 | xop = MAC_GETXREG_SIGNED(xop); |
| 1763 | yop = MAC_GETYREG_SIGNED(yop); |
| 1764 | temp = (xop * yop) << shift; |
| 1765 | res = m_core.mr.mr - (INT64)temp; |
| 1766 | break; |
| 1767 | case 0x0d<<13: |
| 1768 | /* MR - X * Y (SU) */ |
| 1769 | xop = MAC_GETXREG_SIGNED(xop); |
| 1770 | yop = MAC_GETYREG_UNSIGNED(yop); |
| 1771 | temp = (xop * yop) << shift; |
| 1772 | res = m_core.mr.mr - (INT64)temp; |
| 1773 | break; |
| 1774 | case 0x0e<<13: |
| 1775 | /* MR - X * Y (US) */ |
| 1776 | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1777 | yop = MAC_GETYREG_SIGNED(yop); |
| 1778 | temp = (xop * yop) << shift; |
| 1779 | res = m_core.mr.mr - (INT64)temp; |
| 1780 | break; |
| 1781 | case 0x0f<<13: |
| 1782 | /* MR - X * Y (UU) */ |
| 1783 | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1784 | yop = MAC_GETYREG_UNSIGNED(yop); |
| 1785 | temp = (xop * yop) << shift; |
| 1786 | res = m_core.mr.mr - (INT64)temp; |
| 1787 | break; |
| 1788 | default: |
| 1789 | res = 0; /* just to keep the compiler happy */ |
| 1790 | break; |
| 1791 | } |
| 1792 | |
| 1793 | /* set the final value */ |
| 1794 | m_core.mf.u = (UINT32)res >> 16; |
| 1795 | } |
| 1796 | |
| 1797 | |
| 1798 | |
| 1799 | /*=========================================================================== |
| 1800 | MAC operations (result in MF, yop == xop) |
| 1801 | ===========================================================================*/ |
| 1802 | |
| 1803 | void adsp21xx_device::mac_op_mf_xop(int op) |
| 1804 | { |
| 1805 | INT8 shift = ((m_mstat & MSTAT_INTEGER) >> 4) ^ 1; |
| 1806 | INT32 xop = (op >> 8) & 7; |
| 1807 | INT32 temp; |
| 1808 | INT64 res; |
| 1809 | |
| 1810 | switch (op & (15<<13)) /*JB*/ |
| 1811 | { |
| 1812 | case 0x00<<13: |
| 1813 | /* no-op */ |
| 1814 | return; |
| 1815 | case 0x01<<13: |
| 1816 | /* X * Y (RND) */ |
| 1817 | xop = MAC_GETXREG_SIGNED(xop); |
| 1818 | temp = (xop * xop) << shift; |
| 1819 | res = (INT64)temp; |
| 1820 | #if 0 |
| 1821 | if ((res & 0xffff) == 0x8000) res &= ~((UINT64)0x10000); |
| 1822 | else res += (res & 0x8000) << 1; |
| 1823 | #else |
| 1824 | temp &= 0xffff; |
| 1825 | res += 0x8000; |
| 1826 | if ( temp == 0x8000 ) |
| 1827 | res &= ~((UINT64)0x10000); |
| 1828 | #endif |
| 1829 | break; |
| 1830 | case 0x02<<13: |
| 1831 | /* MR + X * Y (RND) */ |
| 1832 | xop = MAC_GETXREG_SIGNED(xop); |
| 1833 | temp = (xop * xop) << shift; |
| 1834 | res = m_core.mr.mr + (INT64)temp; |
| 1835 | #if 0 |
| 1836 | if ((res & 0xffff) == 0x8000) res &= ~((UINT64)0x10000); |
| 1837 | else res += (res & 0x8000) << 1; |
| 1838 | #else |
| 1839 | temp &= 0xffff; |
| 1840 | res += 0x8000; |
| 1841 | if ( temp == 0x8000 ) |
| 1842 | res &= ~((UINT64)0x10000); |
| 1843 | #endif |
| 1844 | break; |
| 1845 | case 0x03<<13: |
| 1846 | /* MR - X * Y (RND) */ |
| 1847 | xop = MAC_GETXREG_SIGNED(xop); |
| 1848 | temp = (xop * xop) << shift; |
| 1849 | res = m_core.mr.mr - (INT64)temp; |
| 1850 | #if 0 |
| 1851 | if ((res & 0xffff) == 0x8000) res &= ~((UINT64)0x10000); |
| 1852 | else res += (res & 0x8000) << 1; |
| 1853 | #else |
| 1854 | temp &= 0xffff; |
| 1855 | res += 0x8000; |
| 1856 | if ( temp == 0x8000 ) |
| 1857 | res &= ~((UINT64)0x10000); |
| 1858 | #endif |
| 1859 | break; |
| 1860 | case 0x04<<13: |
| 1861 | /* X * Y (SS) Clear when y = 0 */ |
| 1862 | xop = MAC_GETXREG_SIGNED(xop); |
| 1863 | temp = (xop * xop) << shift; |
| 1864 | res = (INT64)temp; |
| 1865 | break; |
| 1866 | case 0x05<<13: |
| 1867 | /* X * Y (SU) */ |
| 1868 | xop = MAC_GETXREG_SIGNED(xop); |
| 1869 | temp = (xop * xop) << shift; |
| 1870 | res = (INT64)temp; |
| 1871 | break; |
| 1872 | case 0x06<<13: |
| 1873 | /* X * Y (US) */ |
| 1874 | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1875 | temp = (xop * xop) << shift; |
| 1876 | res = (INT64)temp; |
| 1877 | break; |
| 1878 | case 0x07<<13: |
| 1879 | /* X * Y (UU) */ |
| 1880 | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1881 | temp = (xop * xop) << shift; |
| 1882 | res = (INT64)temp; |
| 1883 | break; |
| 1884 | case 0x08<<13: |
| 1885 | /* MR + X * Y (SS) */ |
| 1886 | xop = MAC_GETXREG_SIGNED(xop); |
| 1887 | temp = (xop * xop) << shift; |
| 1888 | res = m_core.mr.mr + (INT64)temp; |
| 1889 | break; |
| 1890 | case 0x09<<13: |
| 1891 | /* MR + X * Y (SU) */ |
| 1892 | xop = MAC_GETXREG_SIGNED(xop); |
| 1893 | temp = (xop * xop) << shift; |
| 1894 | res = m_core.mr.mr + (INT64)temp; |
| 1895 | break; |
| 1896 | case 0x0a<<13: |
| 1897 | /* MR + X * Y (US) */ |
| 1898 | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1899 | temp = (xop * xop) << shift; |
| 1900 | res = m_core.mr.mr + (INT64)temp; |
| 1901 | break; |
| 1902 | case 0x0b<<13: |
| 1903 | /* MR + X * Y (UU) */ |
| 1904 | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1905 | temp = (xop * xop) << shift; |
| 1906 | res = m_core.mr.mr + (INT64)temp; |
| 1907 | break; |
| 1908 | case 0x0c<<13: |
| 1909 | /* MR - X * Y (SS) */ |
| 1910 | xop = MAC_GETXREG_SIGNED(xop); |
| 1911 | temp = (xop * xop) << shift; |
| 1912 | res = m_core.mr.mr - (INT64)temp; |
| 1913 | break; |
| 1914 | case 0x0d<<13: |
| 1915 | /* MR - X * Y (SU) */ |
| 1916 | xop = MAC_GETXREG_SIGNED(xop); |
| 1917 | temp = (xop * xop) << shift; |
| 1918 | res = m_core.mr.mr - (INT64)temp; |
| 1919 | break; |
| 1920 | case 0x0e<<13: |
| 1921 | /* MR - X * Y (US) */ |
| 1922 | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1923 | temp = (xop * xop) << shift; |
| 1924 | res = m_core.mr.mr - (INT64)temp; |
| 1925 | break; |
| 1926 | case 0x0f<<13: |
| 1927 | /* MR - X * Y (UU) */ |
| 1928 | xop = MAC_GETXREG_UNSIGNED(xop); |
| 1929 | temp = (xop * xop) << shift; |
| 1930 | res = m_core.mr.mr - (INT64)temp; |
| 1931 | break; |
| 1932 | default: |
| 1933 | res = 0; /* just to keep the compiler happy */ |
| 1934 | break; |
| 1935 | } |
| 1936 | |
| 1937 | /* set the final value */ |
| 1938 | m_core.mf.u = (UINT32)res >> 16; |
| 1939 | } |
| 1940 | |
| 1941 | |
| 1942 | |
| 1943 | /*=========================================================================== |
| 1944 | SHIFT operations (result in SR/SE/SB) |
| 1945 | ===========================================================================*/ |
| 1946 | |
| 1947 | void adsp21xx_device::shift_op(int op) |
| 1948 | { |
| 1949 | INT8 sc = m_core.se.s; |
| 1950 | INT32 xop = (op >> 8) & 7; |
| 1951 | UINT32 res; |
| 1952 | |
| 1953 | switch (op & (15<<11)) /*JB*/ |
| 1954 | { |
| 1955 | case 0x00<<11: |
| 1956 | /* LSHIFT (HI) */ |
| 1957 | xop = SHIFT_GETXREG_UNSIGNED(xop) << 16; |
| 1958 | if (sc > 0) res = (sc < 32) ? (xop << sc) : 0; |
| 1959 | else res = (sc > -32) ? ((UINT32)xop >> -sc) : 0; |
| 1960 | m_core.sr.sr = res; |
| 1961 | break; |
| 1962 | case 0x01<<11: |
| 1963 | /* LSHIFT (HI, OR) */ |
| 1964 | xop = SHIFT_GETXREG_UNSIGNED(xop) << 16; |
| 1965 | if (sc > 0) res = (sc < 32) ? (xop << sc) : 0; |
| 1966 | else res = (sc > -32) ? ((UINT32)xop >> -sc) : 0; |
| 1967 | m_core.sr.sr |= res; |
| 1968 | break; |
| 1969 | case 0x02<<11: |
| 1970 | /* LSHIFT (LO) */ |
| 1971 | xop = SHIFT_GETXREG_UNSIGNED(xop); |
| 1972 | if (sc > 0) res = (sc < 32) ? (xop << sc) : 0; |
| 1973 | else res = (sc > -32) ? (xop >> -sc) : 0; |
| 1974 | m_core.sr.sr = res; |
| 1975 | break; |
| 1976 | case 0x03<<11: |
| 1977 | /* LSHIFT (LO, OR) */ |
| 1978 | xop = SHIFT_GETXREG_UNSIGNED(xop); |
| 1979 | if (sc > 0) res = (sc < 32) ? (xop << sc) : 0; |
| 1980 | else res = (sc > -32) ? (xop >> -sc) : 0; |
| 1981 | m_core.sr.sr |= res; |
| 1982 | break; |
| 1983 | case 0x04<<11: |
| 1984 | /* ASHIFT (HI) */ |
| 1985 | xop = SHIFT_GETXREG_SIGNED(xop) << 16; |
| 1986 | if (sc > 0) res = (sc < 32) ? (xop << sc) : 0; |
| 1987 | else res = (sc > -32) ? (xop >> -sc) : (xop >> 31); |
| 1988 | m_core.sr.sr = res; |
| 1989 | break; |
| 1990 | case 0x05<<11: |
| 1991 | /* ASHIFT (HI, OR) */ |
| 1992 | xop = SHIFT_GETXREG_SIGNED(xop) << 16; |
| 1993 | if (sc > 0) res = (sc < 32) ? (xop << sc) : 0; |
| 1994 | else res = (sc > -32) ? (xop >> -sc) : (xop >> 31); |
| 1995 | m_core.sr.sr |= res; |
| 1996 | break; |
| 1997 | case 0x06<<11: |
| 1998 | /* ASHIFT (LO) */ |
| 1999 | xop = SHIFT_GETXREG_SIGNED(xop); |
| 2000 | if (sc > 0) res = (sc < 32) ? (xop << sc) : 0; |
| 2001 | else res = (sc > -32) ? (xop >> -sc) : (xop >> 31); |
| 2002 | m_core.sr.sr = res; |
| 2003 | break; |
| 2004 | case 0x07<<11: |
| 2005 | /* ASHIFT (LO, OR) */ |
| 2006 | xop = SHIFT_GETXREG_SIGNED(xop); |
| 2007 | if (sc > 0) res = (sc < 32) ? (xop << sc) : 0; |
| 2008 | else res = (sc > -32) ? (xop >> -sc) : (xop >> 31); |
| 2009 | m_core.sr.sr |= res; |
| 2010 | break; |
| 2011 | case 0x08<<11: |
| 2012 | /* NORM (HI) */ |
| 2013 | xop = SHIFT_GETXREG_SIGNED(xop) << 16; |
| 2014 | if (sc > 0) |
| 2015 | { |
| 2016 | xop = ((UINT32)xop >> 1) | ((m_astat & CFLAG) << 28); |
| 2017 | res = xop >> (sc - 1); |
| 2018 | } |
| 2019 | else res = (sc > -32) ? (xop << -sc) : 0; |
| 2020 | m_core.sr.sr = res; |
| 2021 | break; |
| 2022 | case 0x09<<11: |
| 2023 | /* NORM (HI, OR) */ |
| 2024 | xop = SHIFT_GETXREG_SIGNED(xop) << 16; |
| 2025 | if (sc > 0) |
| 2026 | { |
| 2027 | xop = ((UINT32)xop >> 1) | ((m_astat & CFLAG) << 28); |
| 2028 | res = xop >> (sc - 1); |
| 2029 | } |
| 2030 | else res = (sc > -32) ? (xop << -sc) : 0; |
| 2031 | m_core.sr.sr |= res; |
| 2032 | break; |
| 2033 | case 0x0a<<11: |
| 2034 | /* NORM (LO) */ |
| 2035 | xop = SHIFT_GETXREG_UNSIGNED(xop); |
| 2036 | if (sc > 0) res = (sc < 32) ? (xop >> sc) : 0; |
| 2037 | else res = (sc > -32) ? (xop << -sc) : 0; |
| 2038 | m_core.sr.sr = res; |
| 2039 | break; |
| 2040 | case 0x0b<<11: |
| 2041 | /* NORM (LO, OR) */ |
| 2042 | xop = SHIFT_GETXREG_UNSIGNED(xop); |
| 2043 | if (sc > 0) res = (sc < 32) ? (xop >> sc) : 0; |
| 2044 | else res = (sc > -32) ? (xop << -sc) : 0; |
| 2045 | m_core.sr.sr |= res; |
| 2046 | break; |
| 2047 | case 0x0c<<11: |
| 2048 | /* EXP (HI) */ |
| 2049 | xop = SHIFT_GETXREG_SIGNED(xop) << 16; |
| 2050 | res = 0; |
| 2051 | if (xop < 0) |
| 2052 | { |
| 2053 | SET_SS; |
| 2054 | while ((xop & 0x40000000) != 0) res++, xop <<= 1; |
| 2055 | } |
| 2056 | else |
| 2057 | { |
| 2058 | CLR_SS; |
| 2059 | xop |= 0x8000; |
| 2060 | while ((xop & 0x40000000) == 0) res++, xop <<= 1; |
| 2061 | } |
| 2062 | m_core.se.s = -res; |
| 2063 | break; |
| 2064 | case 0x0d<<11: |
| 2065 | /* EXP (HIX) */ |
| 2066 | xop = SHIFT_GETXREG_SIGNED(xop) << 16; |
| 2067 | if (GET_V) |
| 2068 | { |
| 2069 | m_core.se.s = 1; |
| 2070 | if (xop < 0) CLR_SS; |
| 2071 | else SET_SS; |
| 2072 | } |
| 2073 | else |
| 2074 | { |
| 2075 | res = 0; |
| 2076 | if (xop < 0) |
| 2077 | { |
| 2078 | SET_SS; |
| 2079 | while ((xop & 0x40000000) != 0) res++, xop <<= 1; |
| 2080 | } |
| 2081 | else |
| 2082 | { |
| 2083 | CLR_SS; |
| 2084 | xop |= 0x8000; |
| 2085 | while ((xop & 0x40000000) == 0) res++, xop <<= 1; |
| 2086 | } |
| 2087 | m_core.se.s = -res; |
| 2088 | } |
| 2089 | break; |
| 2090 | case 0x0e<<11: |
| 2091 | /* EXP (LO) */ |
| 2092 | if (m_core.se.s == -15) |
| 2093 | { |
| 2094 | xop = SHIFT_GETXREG_SIGNED(xop); |
| 2095 | res = 15; |
| 2096 | if (GET_SS) |
| 2097 | while ((xop & 0x8000) != 0) res++, xop <<= 1; |
| 2098 | else |
| 2099 | { |
| 2100 | xop = (xop << 1) | 1; |
| 2101 | while ((xop & 0x10000) == 0) res++, xop <<= 1; |
| 2102 | } |
| 2103 | m_core.se.s = -res; |
| 2104 | } |
| 2105 | break; |
| 2106 | case 0x0f<<11: |
| 2107 | /* EXPADJ */ |
| 2108 | xop = SHIFT_GETXREG_SIGNED(xop) << 16; |
| 2109 | res = 0; |
| 2110 | if (xop < 0) |
| 2111 | while ((xop & 0x40000000) != 0) res++, xop <<= 1; |
| 2112 | else |
| 2113 | { |
| 2114 | xop |= 0x8000; |
| 2115 | while ((xop & 0x40000000) == 0) res++, xop <<= 1; |
| 2116 | } |
| 2117 | if (res < -m_core.sb.s) |
| 2118 | m_core.sb.s = -res; |
| 2119 | break; |
| 2120 | } |
| 2121 | } |
| 2122 | |
| 2123 | |
| 2124 | |
| 2125 | /*=========================================================================== |
| 2126 | Immediate SHIFT operations (result in SR/SE/SB) |
| 2127 | ===========================================================================*/ |
| 2128 | |
| 2129 | void adsp21xx_device::shift_op_imm(int op) |
| 2130 | { |
| 2131 | INT8 sc = (INT8)op; |
| 2132 | INT32 xop = (op >> 8) & 7; |
| 2133 | UINT32 res; |
| 2134 | |
| 2135 | switch (op & (15<<11)) /*JB*/ |
| 2136 | { |
| 2137 | case 0x00<<11: |
| 2138 | /* LSHIFT (HI) */ |
| 2139 | xop = SHIFT_GETXREG_UNSIGNED(xop) << 16; |
| 2140 | if (sc > 0) res = (sc < 32) ? (xop << sc) : 0; |
| 2141 | else res = (sc > -32) ? ((UINT32)xop >> -sc) : 0; |
| 2142 | m_core.sr.sr = res; |
| 2143 | break; |
| 2144 | case 0x01<<11: |
| 2145 | /* LSHIFT (HI, OR) */ |
| 2146 | xop = SHIFT_GETXREG_UNSIGNED(xop) << 16; |
| 2147 | if (sc > 0) res = (sc < 32) ? (xop << sc) : 0; |
| 2148 | else res = (sc > -32) ? ((UINT32)xop >> -sc) : 0; |
| 2149 | m_core.sr.sr |= res; |
| 2150 | break; |
| 2151 | case 0x02<<11: |
| 2152 | /* LSHIFT (LO) */ |
| 2153 | xop = SHIFT_GETXREG_UNSIGNED(xop); |
| 2154 | if (sc > 0) res = (sc < 32) ? (xop << sc) : 0; |
| 2155 | else res = (sc > -32) ? (xop >> -sc) : 0; |
| 2156 | m_core.sr.sr = res; |
| 2157 | break; |
| 2158 | case 0x03<<11: |
| 2159 | /* LSHIFT (LO, OR) */ |
| 2160 | xop = SHIFT_GETXREG_UNSIGNED(xop); |
| 2161 | if (sc > 0) res = (sc < 32) ? (xop << sc) : 0; |
| 2162 | else res = (sc > -32) ? (xop >> -sc) : 0; |
| 2163 | m_core.sr.sr |= res; |
| 2164 | break; |
| 2165 | case 0x04<<11: |
| 2166 | /* ASHIFT (HI) */ |
| 2167 | xop = SHIFT_GETXREG_SIGNED(xop) << 16; |
| 2168 | if (sc > 0) res = (sc < 32) ? (xop << sc) : 0; |
| 2169 | else res = (sc > -32) ? (xop >> -sc) : (xop >> 31); |
| 2170 | m_core.sr.sr = res; |
| 2171 | break; |
| 2172 | case 0x05<<11: |
| 2173 | /* ASHIFT (HI, OR) */ |
| 2174 | xop = SHIFT_GETXREG_SIGNED(xop) << 16; |
| 2175 | if (sc > 0) res = (sc < 32) ? (xop << sc) : 0; |
| 2176 | else res = (sc > -32) ? (xop >> -sc) : (xop >> 31); |
| 2177 | m_core.sr.sr |= res; |
| 2178 | break; |
| 2179 | case 0x06<<11: |
| 2180 | /* ASHIFT (LO) */ |
| 2181 | xop = SHIFT_GETXREG_SIGNED(xop); |
| 2182 | if (sc > 0) res = (sc < 32) ? (xop << sc) : 0; |
| 2183 | else res = (sc > -32) ? (xop >> -sc) : (xop >> 31); |
| 2184 | m_core.sr.sr = res; |
| 2185 | break; |
| 2186 | case 0x07<<11: |
| 2187 | /* ASHIFT (LO, OR) */ |
| 2188 | xop = SHIFT_GETXREG_SIGNED(xop); |
| 2189 | if (sc > 0) res = (sc < 32) ? (xop << sc) : 0; |
| 2190 | else res = (sc > -32) ? (xop >> -sc) : (xop >> 31); |
| 2191 | m_core.sr.sr |= res; |
| 2192 | break; |
| 2193 | case 0x08<<11: |
| 2194 | /* NORM (HI) */ |
| 2195 | xop = SHIFT_GETXREG_SIGNED(xop) << 16; |
| 2196 | if (sc > 0) |
| 2197 | { |
| 2198 | xop = ((UINT32)xop >> 1) | ((m_astat & CFLAG) << 28); |
| 2199 | res = xop >> (sc - 1); |
| 2200 | } |
| 2201 | else res = (sc > -32) ? (xop << -sc) : 0; |
| 2202 | m_core.sr.sr = res; |
| 2203 | break; |
| 2204 | case 0x09<<11: |
| 2205 | /* NORM (HI, OR) */ |
| 2206 | xop = SHIFT_GETXREG_SIGNED(xop) << 16; |
| 2207 | if (sc > 0) |
| 2208 | { |
| 2209 | xop = ((UINT32)xop >> 1) | ((m_astat & CFLAG) << 28); |
| 2210 | res = xop >> (sc - 1); |
| 2211 | } |
| 2212 | else res = (sc > -32) ? (xop << -sc) : 0; |
| 2213 | m_core.sr.sr |= res; |
| 2214 | break; |
| 2215 | case 0x0a<<11: |
| 2216 | /* NORM (LO) */ |
| 2217 | xop = SHIFT_GETXREG_UNSIGNED(xop); |
| 2218 | if (sc > 0) res = (sc < 32) ? (xop >> sc) : 0; |
| 2219 | else res = (sc > -32) ? (xop << -sc) : 0; |
| 2220 | m_core.sr.sr = res; |
| 2221 | break; |
| 2222 | case 0x0b<<11: |
| 2223 | /* NORM (LO, OR) */ |
| 2224 | xop = SHIFT_GETXREG_UNSIGNED(xop); |
| 2225 | if (sc > 0) res = (sc < 32) ? (xop >> sc) : 0; |
| 2226 | else res = (sc > -32) ? (xop << -sc) : 0; |
| 2227 | m_core.sr.sr |= res; |
| 2228 | break; |
| 2229 | } |
| 2230 | } |