trunk/src/emu/memory.c
r23728 | r23729 | |
64 | 64 | |
65 | 65 | 0 .. STATIC_COUNT - 1 = fixed handlers |
66 | 66 | STATIC_COUNT .. SUBTABLE_BASE - 1 = driver-specific handlers |
67 | | SUBTABLE_BASE .. 255 = need to look up lower bits in subtable |
| 67 | SUBTABLE_BASE .. TOTAL_MEMORY_BANKS - 1 = need to look up lower bits in subtable |
68 | 68 | |
69 | 69 | Caveats: |
70 | 70 | |
r23728 | r23729 | |
565 | 565 | static const int LEVEL1_BITS = 18; // number of address bits in the level 1 table |
566 | 566 | static const int LEVEL2_BITS = 32 - LEVEL1_BITS; // number of address bits in the level 2 table |
567 | 567 | static const int SUBTABLE_COUNT = 64; // number of slots reserved for subtables |
568 | | static const int SUBTABLE_BASE = 256 - SUBTABLE_COUNT; // first index of a subtable |
| 568 | static const int SUBTABLE_BASE = TOTAL_MEMORY_BANKS - SUBTABLE_COUNT; // first index of a subtable |
569 | 569 | static const int ENTRY_COUNT = SUBTABLE_BASE; // number of legitimate (non-subtable) entries |
570 | 570 | static const int SUBTABLE_ALLOC = 8; // number of subtables to allocate at a time |
571 | 571 | |
r23728 | r23729 | |
604 | 604 | void enable_watchpoints(bool enable = true) { m_live_lookup = enable ? s_watchpoint_table : m_table; } |
605 | 605 | |
606 | 606 | // table mapping helpers |
607 | | void map_range(offs_t bytestart, offs_t byteend, offs_t bytemask, offs_t bytemirror, UINT8 staticentry); |
| 607 | void map_range(offs_t bytestart, offs_t byteend, offs_t bytemask, offs_t bytemirror, UINT16 staticentry); |
608 | 608 | void setup_range(offs_t bytestart, offs_t byteend, offs_t bytemask, offs_t bytemirror, UINT64 mask, std::list<UINT32> &entries); |
609 | | UINT8 derive_range(offs_t byteaddress, offs_t &bytestart, offs_t &byteend) const; |
| 609 | UINT16 derive_range(offs_t byteaddress, offs_t &bytestart, offs_t &byteend) const; |
610 | 610 | |
611 | 611 | // misc helpers |
612 | 612 | void mask_all_handlers(offs_t mask); |
613 | | const char *handler_name(UINT8 entry) const; |
| 613 | const char *handler_name(UINT16 entry) const; |
614 | 614 | |
615 | 615 | protected: |
616 | 616 | // determine table indexes based on the address |
617 | 617 | UINT32 level1_index_large(offs_t address) const { return address >> LEVEL2_BITS; } |
618 | | UINT32 level2_index_large(UINT8 l1entry, offs_t address) const { return (1 << LEVEL1_BITS) + ((l1entry - SUBTABLE_BASE) << LEVEL2_BITS) + (address & ((1 << LEVEL2_BITS) - 1)); } |
| 618 | UINT32 level2_index_large(UINT16 l1entry, offs_t address) const { return (1 << LEVEL1_BITS) + ((l1entry - SUBTABLE_BASE) << LEVEL2_BITS) + (address & ((1 << LEVEL2_BITS) - 1)); } |
619 | 619 | UINT32 level1_index(offs_t address) const { return m_large ? level1_index_large(address) : address; } |
620 | | UINT32 level2_index(UINT8 l1entry, offs_t address) const { return m_large ? level2_index_large(l1entry, address) : 0; } |
| 620 | UINT32 level2_index(UINT16 l1entry, offs_t address) const { return m_large ? level2_index_large(l1entry, address) : 0; } |
621 | 621 | |
622 | 622 | // table population/depopulation |
623 | | void populate_range_mirrored(offs_t bytestart, offs_t byteend, offs_t bytemirror, UINT8 handler); |
624 | | void populate_range(offs_t bytestart, offs_t byteend, UINT8 handler); |
| 623 | void populate_range_mirrored(offs_t bytestart, offs_t byteend, offs_t bytemirror, UINT16 handler); |
| 624 | void populate_range(offs_t bytestart, offs_t byteend, UINT16 handler); |
625 | 625 | |
626 | 626 | // subtable management |
627 | | UINT8 subtable_alloc(); |
628 | | void subtable_realloc(UINT8 subentry); |
| 627 | UINT16 subtable_alloc(); |
| 628 | void subtable_realloc(UINT16 subentry); |
629 | 629 | int subtable_merge(); |
630 | | void subtable_release(UINT8 subentry); |
631 | | UINT8 *subtable_open(offs_t l1index); |
| 630 | void subtable_release(UINT16 subentry); |
| 631 | UINT16 *subtable_open(offs_t l1index); |
632 | 632 | void subtable_close(offs_t l1index); |
633 | | UINT8 *subtable_ptr(UINT8 entry) { return &m_table[level2_index(entry, 0)]; } |
| 633 | UINT16 *subtable_ptr(UINT16 entry) { return &m_table[level2_index(entry, 0)]; } |
634 | 634 | |
635 | 635 | // internal state |
636 | | UINT8 * m_table; // pointer to base of table |
637 | | UINT8 * m_live_lookup; // current lookup |
| 636 | UINT16 * m_table; // pointer to base of table |
| 637 | UINT16 * m_live_lookup; // current lookup |
638 | 638 | address_space & m_space; // pointer back to the space |
639 | 639 | bool m_large; // large memory model? |
640 | 640 | |
r23728 | r23729 | |
652 | 652 | UINT32 m_usecount; // number of times this has been used |
653 | 653 | }; |
654 | 654 | subtable_data * m_subtable; // info about each subtable |
655 | | UINT8 m_subtable_alloc; // number of subtables allocated |
| 655 | UINT16 m_subtable_alloc; // number of subtables allocated |
656 | 656 | |
657 | 657 | // static global read-only watchpoint table |
658 | | static UINT8 s_watchpoint_table[1 << LEVEL1_BITS]; |
| 658 | static UINT16 s_watchpoint_table[1 << LEVEL1_BITS]; |
659 | 659 | |
660 | 660 | private: |
661 | 661 | int handler_refcount[SUBTABLE_BASE-STATIC_COUNT]; |
662 | | UINT8 handler_next_free[SUBTABLE_BASE-STATIC_COUNT]; |
663 | | UINT8 handler_free; |
664 | | UINT8 get_free_handler(); |
| 662 | UINT16 handler_next_free[SUBTABLE_BASE-STATIC_COUNT]; |
| 663 | UINT16 handler_free; |
| 664 | UINT16 get_free_handler(); |
665 | 665 | void verify_reference_counts(); |
666 | 666 | void setup_range_solid(offs_t addrstart, offs_t addrend, offs_t addrmask, offs_t addrmirror, std::list<UINT32> &entries); |
667 | 667 | void setup_range_masked(offs_t addrstart, offs_t addrend, offs_t addrmask, offs_t addrmirror, UINT64 mask, std::list<UINT32> &entries); |
668 | 668 | |
669 | | void handler_ref(UINT8 entry, int count) |
| 669 | void handler_ref(UINT16 entry, int count) |
670 | 670 | { |
671 | 671 | assert(entry < SUBTABLE_BASE); |
672 | 672 | if (entry >= STATIC_COUNT) |
673 | 673 | handler_refcount[entry - STATIC_COUNT] += count; |
674 | 674 | } |
675 | 675 | |
676 | | void handler_unref(UINT8 entry) |
| 676 | void handler_unref(UINT16 entry) |
677 | 677 | { |
678 | 678 | assert(entry < SUBTABLE_BASE); |
679 | 679 | if (entry >= STATIC_COUNT) |
r23728 | r23729 | |
742 | 742 | { |
743 | 743 | m_space.device().debug()->memory_read_hook(m_space, offset * sizeof(_UintType), mask); |
744 | 744 | |
745 | | UINT8 *oldtable = m_live_lookup; |
| 745 | UINT16 *oldtable = m_live_lookup; |
746 | 746 | m_live_lookup = m_table; |
747 | 747 | _UintType result; |
748 | 748 | if (sizeof(_UintType) == 1) result = m_space.read_byte(offset); |
r23728 | r23729 | |
754 | 754 | } |
755 | 755 | |
756 | 756 | // internal state |
757 | | handler_entry_read * m_handlers[256]; // array of user-installed handlers |
| 757 | handler_entry_read * m_handlers[TOTAL_MEMORY_BANKS]; // array of user-installed handlers |
758 | 758 | }; |
759 | 759 | |
760 | 760 | |
r23728 | r23729 | |
810 | 810 | { |
811 | 811 | m_space.device().debug()->memory_write_hook(m_space, offset * sizeof(_UintType), data, mask); |
812 | 812 | |
813 | | UINT8 *oldtable = m_live_lookup; |
| 813 | UINT16 *oldtable = m_live_lookup; |
814 | 814 | m_live_lookup = m_table; |
815 | 815 | if (sizeof(_UintType) == 1) m_space.write_byte(offset, data); |
816 | 816 | if (sizeof(_UintType) == 2) m_space.write_word(offset << 1, data, mask); |
r23728 | r23729 | |
820 | 820 | } |
821 | 821 | |
822 | 822 | // internal state |
823 | | handler_entry_write * m_handlers[256]; // array of user-installed handlers |
| 823 | handler_entry_write * m_handlers[TOTAL_MEMORY_BANKS]; // array of user-installed handlers |
824 | 824 | }; |
825 | 825 | |
826 | 826 | |
r23728 | r23729 | |
1459 | 1459 | //************************************************************************** |
1460 | 1460 | |
1461 | 1461 | // global watchpoint table |
1462 | | UINT8 address_table::s_watchpoint_table[1 << LEVEL1_BITS]; |
| 1462 | UINT16 address_table::s_watchpoint_table[1 << LEVEL1_BITS]; |
1463 | 1463 | |
1464 | 1464 | |
1465 | 1465 | |
r23728 | r23729 | |
2208 | 2208 | offs_t bytestart, byteend; |
2209 | 2209 | for (offs_t byteaddress = 0; byteaddress <= m_bytemask; byteaddress = byteend) |
2210 | 2210 | { |
2211 | | UINT8 entry = table.derive_range(byteaddress, bytestart, byteend); |
| 2211 | UINT16 entry = table.derive_range(byteaddress, bytestart, byteend); |
2212 | 2212 | fprintf(file, "%08X-%08X = %02X: %s [offset=%08X]\n", |
2213 | 2213 | bytestart, byteend, entry, table.handler_name(entry), table.handler(entry).bytestart()); |
2214 | 2214 | if (++byteend == 0) |
r23728 | r23729 | |
2787 | 2787 | //------------------------------------------------- |
2788 | 2788 | |
2789 | 2789 | address_table::address_table(address_space &space, bool large) |
2790 | | : m_table(auto_alloc_array(space.machine(), UINT8, 1 << LEVEL1_BITS)), |
| 2790 | : m_table(auto_alloc_array(space.machine(), UINT16, 1 << LEVEL1_BITS)), |
2791 | 2791 | m_live_lookup(m_table), |
2792 | 2792 | m_space(space), |
2793 | 2793 | m_large(large), |
r23728 | r23729 | |
2796 | 2796 | { |
2797 | 2797 | // make our static table all watchpoints |
2798 | 2798 | if (s_watchpoint_table[0] != STATIC_WATCHPOINT) |
2799 | | memset(s_watchpoint_table, STATIC_WATCHPOINT, sizeof(s_watchpoint_table)); |
| 2799 | for (unsigned int i=0; i != sizeof(s_watchpoint_table)/sizeof(s_watchpoint_table[0]); i++) |
| 2800 | s_watchpoint_table[i] = STATIC_WATCHPOINT; |
2800 | 2801 | |
2801 | 2802 | // initialize everything to unmapped |
2802 | | memset(m_table, STATIC_UNMAP, 1 << LEVEL1_BITS); |
| 2803 | for (unsigned int i=0; i != 1 << LEVEL1_BITS; i++) |
| 2804 | m_table[i] = STATIC_UNMAP; |
2803 | 2805 | |
2804 | 2806 | // initialize the handlers freelist |
2805 | 2807 | for (int i=0; i != SUBTABLE_BASE-STATIC_COUNT-1; i++) |
r23728 | r23729 | |
2828 | 2830 | // map |
2829 | 2831 | //------------------------------------------------- |
2830 | 2832 | |
2831 | | void address_table::map_range(offs_t addrstart, offs_t addrend, offs_t addrmask, offs_t addrmirror, UINT8 entry) |
| 2833 | void address_table::map_range(offs_t addrstart, offs_t addrend, offs_t addrmask, offs_t addrmirror, UINT16 entry) |
2832 | 2834 | { |
2833 | 2835 | // convert addresses to bytes |
2834 | 2836 | offs_t bytestart = addrstart; |
r23728 | r23729 | |
2856 | 2858 | // verify_reference_counts(); |
2857 | 2859 | } |
2858 | 2860 | |
2859 | | UINT8 address_table::get_free_handler() |
| 2861 | UINT16 address_table::get_free_handler() |
2860 | 2862 | { |
2861 | 2863 | if (handler_free == STATIC_INVALID) |
2862 | 2864 | throw emu_fatalerror("Out of handler entries in address table"); |
2863 | 2865 | |
2864 | | UINT8 handler = handler_free; |
| 2866 | UINT16 handler = handler_free; |
2865 | 2867 | handler_free = handler_next_free[handler - STATIC_COUNT]; |
2866 | 2868 | return handler; |
2867 | 2869 | } |
r23728 | r23729 | |
2893 | 2895 | void address_table::setup_range_solid(offs_t addrstart, offs_t addrend, offs_t addrmask, offs_t addrmirror, std::list<UINT32> &entries) |
2894 | 2896 | { |
2895 | 2897 | // Grab a free entry |
2896 | | UINT8 entry = get_free_handler(); |
| 2898 | UINT16 entry = get_free_handler(); |
2897 | 2899 | |
2898 | 2900 | // Add it in the "to be setup" list |
2899 | 2901 | entries.push_back(entry); |
r23728 | r23729 | |
2931 | 2933 | |
2932 | 2934 | // Scan the memory to see what has to be done |
2933 | 2935 | std::list<subrange> range_override; |
2934 | | std::map<UINT8, std::list<subrange> > range_partial; |
| 2936 | std::map<UINT16, std::list<subrange> > range_partial; |
2935 | 2937 | |
2936 | 2938 | offs_t base_mirror = 0; |
2937 | 2939 | do |
r23728 | r23729 | |
2942 | 2944 | do |
2943 | 2945 | { |
2944 | 2946 | offs_t range_start, range_end; |
2945 | | UINT8 entry = derive_range(base_address, range_start, range_end); |
| 2947 | UINT16 entry = derive_range(base_address, range_start, range_end); |
2946 | 2948 | UINT32 stop_address = range_end > end_address ? end_address : range_end; |
2947 | 2949 | |
2948 | 2950 | if (entry < STATIC_COUNT || handler(entry).overriden_by_mask(mask)) |
r23728 | r23729 | |
2963 | 2965 | if (!range_override.empty()) |
2964 | 2966 | { |
2965 | 2967 | // Grab a free entry |
2966 | | UINT8 entry = get_free_handler(); |
| 2968 | UINT16 entry = get_free_handler(); |
2967 | 2969 | |
2968 | 2970 | // configure the entry to our parameters |
2969 | 2971 | handler_entry &curentry = handler(entry); |
r23728 | r23729 | |
2983 | 2985 | // Ranges in range_partial must duplicated then partially changed |
2984 | 2986 | if (!range_partial.empty()) |
2985 | 2987 | { |
2986 | | for (std::map<UINT8, std::list<subrange> >::const_iterator i = range_partial.begin(); i != range_partial.end(); i++) |
| 2988 | for (std::map<UINT16, std::list<subrange> >::const_iterator i = range_partial.begin(); i != range_partial.end(); i++) |
2987 | 2989 | { |
2988 | 2990 | // Theorically, if the handler to change matches the |
2989 | 2991 | // characteristics of ours, we can directly change it. In |
r23728 | r23729 | |
3002 | 3004 | throw emu_fatalerror("Handlers on different subunits of the same address with different address masks are not supported."); |
3003 | 3005 | |
3004 | 3006 | // Grab a new handler and copy it there |
3005 | | UINT8 entry = get_free_handler(); |
| 3007 | UINT16 entry = get_free_handler(); |
3006 | 3008 | handler_entry &curentry = handler(entry); |
3007 | 3009 | curentry.copy(base_entry); |
3008 | 3010 | |
r23728 | r23729 | |
3038 | 3040 | int actual_refcounts[SUBTABLE_BASE-STATIC_COUNT]; |
3039 | 3041 | memset(actual_refcounts, 0, sizeof(actual_refcounts)); |
3040 | 3042 | |
3041 | | bool subtable_seen[256 - SUBTABLE_BASE]; |
| 3043 | bool subtable_seen[TOTAL_MEMORY_BANKS - SUBTABLE_BASE]; |
3042 | 3044 | memset(subtable_seen, 0, sizeof(subtable_seen)); |
3043 | 3045 | |
3044 | 3046 | for (int level1 = 0; level1 != 1 << LEVEL1_BITS; level1++) |
3045 | 3047 | { |
3046 | | UINT8 l1_entry = m_table[level1]; |
| 3048 | UINT16 l1_entry = m_table[level1]; |
3047 | 3049 | if (l1_entry >= SUBTABLE_BASE) |
3048 | 3050 | { |
3049 | 3051 | assert(m_large); |
r23728 | r23729 | |
3051 | 3053 | continue; |
3052 | 3054 | |
3053 | 3055 | subtable_seen[l1_entry - SUBTABLE_BASE] = true; |
3054 | | const UINT8 *subtable = subtable_ptr(l1_entry); |
| 3056 | const UINT16 *subtable = subtable_ptr(l1_entry); |
3055 | 3057 | for (int level2 = 0; level2 != 1 << LEVEL2_BITS; level2++) |
3056 | 3058 | { |
3057 | | UINT8 l2_entry = subtable[level2]; |
| 3059 | UINT16 l2_entry = subtable[level2]; |
3058 | 3060 | assert(l2_entry < SUBTABLE_BASE); |
3059 | 3061 | if (l2_entry >= STATIC_COUNT) |
3060 | 3062 | actual_refcounts[l2_entry - STATIC_COUNT]++; |
r23728 | r23729 | |
3079 | 3081 | // range of addresses |
3080 | 3082 | //------------------------------------------------- |
3081 | 3083 | |
3082 | | void address_table::populate_range(offs_t bytestart, offs_t byteend, UINT8 handlerindex) |
| 3084 | void address_table::populate_range(offs_t bytestart, offs_t byteend, UINT16 handlerindex) |
3083 | 3085 | { |
3084 | 3086 | offs_t l2mask = (1 << level2_bits()) - 1; |
3085 | 3087 | offs_t l1start = bytestart >> level2_bits(); |
r23728 | r23729 | |
3094 | 3096 | // handle the starting edge if it's not on a block boundary |
3095 | 3097 | if (l2start != 0) |
3096 | 3098 | { |
3097 | | UINT8 *subtable = subtable_open(l1start); |
| 3099 | UINT16 *subtable = subtable_open(l1start); |
3098 | 3100 | |
3099 | 3101 | // if the start and stop end within the same block, handle that |
3100 | 3102 | if (l1start == l1stop) |
r23728 | r23729 | |
3124 | 3126 | // handle the trailing edge if it's not on a block boundary |
3125 | 3127 | if (l2stop != l2mask) |
3126 | 3128 | { |
3127 | | UINT8 *subtable = subtable_open(l1stop); |
| 3129 | UINT16 *subtable = subtable_open(l1stop); |
3128 | 3130 | |
3129 | 3131 | // fill from the beginning |
3130 | 3132 | handler_ref(handlerindex, l2stop+1); |
r23728 | r23729 | |
3146 | 3148 | handler_ref(handlerindex, l1stop - l1start + 1); |
3147 | 3149 | for (offs_t l1index = l1start; l1index <= l1stop; l1index++) |
3148 | 3150 | { |
3149 | | UINT8 subindex = m_table[l1index]; |
| 3151 | UINT16 subindex = m_table[l1index]; |
3150 | 3152 | |
3151 | 3153 | // if we have a subtable here, release it |
3152 | 3154 | if (subindex >= SUBTABLE_BASE) |
r23728 | r23729 | |
3164 | 3166 | // mirrors |
3165 | 3167 | //------------------------------------------------- |
3166 | 3168 | |
3167 | | void address_table::populate_range_mirrored(offs_t bytestart, offs_t byteend, offs_t bytemirror, UINT8 handlerindex) |
| 3169 | void address_table::populate_range_mirrored(offs_t bytestart, offs_t byteend, offs_t bytemirror, UINT16 handlerindex) |
3168 | 3170 | { |
3169 | 3171 | // determine the mirror bits |
3170 | 3172 | offs_t lmirrorbits = 0; |
r23728 | r23729 | |
3180 | 3182 | hmirrorbit[hmirrorbits++] = 1 << bit; |
3181 | 3183 | |
3182 | 3184 | // loop over mirrors in the level 2 table |
3183 | | UINT8 prev_entry = STATIC_INVALID; |
| 3185 | UINT16 prev_entry = STATIC_INVALID; |
3184 | 3186 | int prev_index = 0; |
3185 | 3187 | for (offs_t hmirrorcount = 0; hmirrorcount < (1 << hmirrorbits); hmirrorcount++) |
3186 | 3188 | { |
r23728 | r23729 | |
3253 | 3255 | // range based on the lookup tables |
3254 | 3256 | //------------------------------------------------- |
3255 | 3257 | |
3256 | | UINT8 address_table::derive_range(offs_t byteaddress, offs_t &bytestart, offs_t &byteend) const |
| 3258 | UINT16 address_table::derive_range(offs_t byteaddress, offs_t &bytestart, offs_t &byteend) const |
3257 | 3259 | { |
3258 | 3260 | // look up the initial address to get the entry we care about |
3259 | | UINT8 l1entry; |
3260 | | UINT8 entry = l1entry = m_table[level1_index(byteaddress)]; |
| 3261 | UINT16 l1entry; |
| 3262 | UINT16 entry = l1entry = m_table[level1_index(byteaddress)]; |
3261 | 3263 | if (l1entry >= SUBTABLE_BASE) |
3262 | 3264 | entry = m_table[level2_index(l1entry, byteaddress)]; |
3263 | 3265 | |
r23728 | r23729 | |
3266 | 3268 | handler(entry).mirrored_start_end(byteaddress, minscan, maxscan); |
3267 | 3269 | |
3268 | 3270 | // first scan backwards to find the start address |
3269 | | UINT8 curl1entry = l1entry; |
3270 | | UINT8 curentry = entry; |
| 3271 | UINT16 curl1entry = l1entry; |
| 3272 | UINT16 curentry = entry; |
3271 | 3273 | bytestart = byteaddress; |
3272 | 3274 | while (1) |
3273 | 3275 | { |
r23728 | r23729 | |
3368 | 3370 | // and set its usecount to 1 |
3369 | 3371 | //------------------------------------------------- |
3370 | 3372 | |
3371 | | UINT8 address_table::subtable_alloc() |
| 3373 | UINT16 address_table::subtable_alloc() |
3372 | 3374 | { |
3373 | 3375 | // loop |
3374 | 3376 | while (1) |
3375 | 3377 | { |
3376 | 3378 | // find a subtable with a usecount of 0 |
3377 | | for (UINT8 subindex = 0; subindex < SUBTABLE_COUNT; subindex++) |
| 3379 | for (UINT16 subindex = 0; subindex < SUBTABLE_COUNT; subindex++) |
3378 | 3380 | if (m_subtable[subindex].m_usecount == 0) |
3379 | 3381 | { |
3380 | 3382 | // if this is past our allocation budget, allocate some more |
r23728 | r23729 | |
3384 | 3386 | m_subtable_alloc += SUBTABLE_ALLOC; |
3385 | 3387 | UINT32 newsize = (1 << LEVEL1_BITS) + (m_subtable_alloc << level2_bits()); |
3386 | 3388 | |
3387 | | UINT8 *newtable = auto_alloc_array_clear(m_space.machine(), UINT8, newsize); |
3388 | | memcpy(newtable, m_table, oldsize); |
| 3389 | UINT16 *newtable = auto_alloc_array_clear(m_space.machine(), UINT16, newsize); |
| 3390 | memcpy(newtable, m_table, 2*oldsize); |
3389 | 3391 | if (m_live_lookup == m_table) |
3390 | 3392 | m_live_lookup = newtable; |
3391 | 3393 | auto_free(m_space.machine(), m_table); |
3392 | 3394 | m_table = newtable; |
3393 | 3395 | } |
3394 | | |
3395 | 3396 | // bump the usecount and return |
3396 | 3397 | m_subtable[subindex].m_usecount++; |
3397 | 3398 | return subindex + SUBTABLE_BASE; |
r23728 | r23729 | |
3409 | 3410 | // a subtable |
3410 | 3411 | //------------------------------------------------- |
3411 | 3412 | |
3412 | | void address_table::subtable_realloc(UINT8 subentry) |
| 3413 | void address_table::subtable_realloc(UINT16 subentry) |
3413 | 3414 | { |
3414 | | UINT8 subindex = subentry - SUBTABLE_BASE; |
| 3415 | UINT16 subindex = subentry - SUBTABLE_BASE; |
3415 | 3416 | |
3416 | 3417 | // sanity check |
3417 | 3418 | if (m_subtable[subindex].m_usecount <= 0) |
r23728 | r23729 | |
3430 | 3431 | int address_table::subtable_merge() |
3431 | 3432 | { |
3432 | 3433 | int merged = 0; |
3433 | | UINT8 subindex; |
| 3434 | UINT16 subindex; |
3434 | 3435 | |
3435 | 3436 | VPRINTF(("Merging subtables....\n")); |
3436 | 3437 | |
r23728 | r23729 | |
3452 | 3453 | for (subindex = 0; subindex < SUBTABLE_COUNT; subindex++) |
3453 | 3454 | if (m_subtable[subindex].m_usecount != 0) |
3454 | 3455 | { |
3455 | | UINT8 *subtable = subtable_ptr(subindex + SUBTABLE_BASE); |
| 3456 | UINT16 *subtable = subtable_ptr(subindex + SUBTABLE_BASE); |
3456 | 3457 | UINT32 checksum = m_subtable[subindex].m_checksum; |
3457 | | UINT8 sumindex; |
| 3458 | UINT16 sumindex; |
3458 | 3459 | |
3459 | 3460 | for (sumindex = subindex + 1; sumindex < SUBTABLE_COUNT; sumindex++) |
3460 | 3461 | if (m_subtable[sumindex].m_usecount != 0 && |
3461 | 3462 | m_subtable[sumindex].m_checksum == checksum && |
3462 | | !memcmp(subtable, subtable_ptr(sumindex + SUBTABLE_BASE), 1 << level2_bits())) |
| 3463 | !memcmp(subtable, subtable_ptr(sumindex + SUBTABLE_BASE), 2*(1 << level2_bits()))) |
3463 | 3464 | { |
3464 | 3465 | int l1index; |
3465 | 3466 | |
r23728 | r23729 | |
3486 | 3487 | // a subtable and free it if we're done |
3487 | 3488 | //------------------------------------------------- |
3488 | 3489 | |
3489 | | void address_table::subtable_release(UINT8 subentry) |
| 3490 | void address_table::subtable_release(UINT16 subentry) |
3490 | 3491 | { |
3491 | | UINT8 subindex = subentry - SUBTABLE_BASE; |
3492 | | |
| 3492 | UINT16 subindex = subentry - SUBTABLE_BASE; |
3493 | 3493 | // sanity check |
3494 | 3494 | if (m_subtable[subindex].m_usecount <= 0) |
3495 | 3495 | fatalerror("Called subtable_release on a table with a usecount of 0\n"); |
r23728 | r23729 | |
3500 | 3500 | if (m_subtable[subindex].m_usecount == 0) |
3501 | 3501 | { |
3502 | 3502 | m_subtable[subindex].m_checksum = 0; |
3503 | | UINT8 *subtable = subtable_ptr(subentry); |
| 3503 | UINT16 *subtable = subtable_ptr(subentry); |
3504 | 3504 | for (int i = 0; i < (1 << LEVEL2_BITS); i++) |
3505 | 3505 | handler_unref(subtable[i]); |
3506 | 3506 | } |
r23728 | r23729 | |
3512 | 3512 | // modification |
3513 | 3513 | //------------------------------------------------- |
3514 | 3514 | |
3515 | | UINT8 *address_table::subtable_open(offs_t l1index) |
| 3515 | UINT16 *address_table::subtable_open(offs_t l1index) |
3516 | 3516 | { |
3517 | | UINT8 subentry = m_table[l1index]; |
| 3517 | UINT16 subentry = m_table[l1index]; |
3518 | 3518 | |
3519 | 3519 | // if we don't have a subtable yet, allocate a new one |
3520 | 3520 | if (subentry < SUBTABLE_BASE) |
3521 | 3521 | { |
3522 | 3522 | int size = 1 << level2_bits(); |
3523 | | UINT8 newentry = subtable_alloc(); |
| 3523 | UINT16 newentry = subtable_alloc(); |
3524 | 3524 | handler_ref(subentry, size-1); |
3525 | | memset(subtable_ptr(newentry), subentry, size); |
| 3525 | UINT16 *subptr = subtable_ptr(newentry); |
| 3526 | for (int i=0; i<size; i++) |
| 3527 | subptr[i] = subentry; |
3526 | 3528 | m_table[l1index] = newentry; |
3527 | 3529 | m_subtable[newentry - SUBTABLE_BASE].m_checksum = (subentry + (subentry << 8) + (subentry << 16) + (subentry << 24)) * ((1 << level2_bits())/4); |
3528 | 3530 | subentry = newentry; |
r23728 | r23729 | |
3531 | 3533 | // if we're sharing this subtable, we also need to allocate a fresh copy |
3532 | 3534 | else if (m_subtable[subentry - SUBTABLE_BASE].m_usecount > 1) |
3533 | 3535 | { |
3534 | | UINT8 newentry = subtable_alloc(); |
| 3536 | UINT16 newentry = subtable_alloc(); |
3535 | 3537 | |
3536 | 3538 | // allocate may cause some additional merging -- look up the subentry again |
3537 | 3539 | // when we're done; it should still require a split |
r23728 | r23729 | |
3540 | 3542 | assert(m_subtable[subentry - SUBTABLE_BASE].m_usecount > 1); |
3541 | 3543 | |
3542 | 3544 | int size = 1 << level2_bits(); |
3543 | | UINT8 *src = subtable_ptr(subentry); |
| 3545 | UINT16 *src = subtable_ptr(subentry); |
3544 | 3546 | for(int i=0; i != size; i++) |
3545 | 3547 | handler_ref(src[i], 1); |
3546 | 3548 | |
3547 | | memcpy(subtable_ptr(newentry), src, size); |
| 3549 | memcpy(subtable_ptr(newentry), src, 2*size); |
3548 | 3550 | subtable_release(subentry); |
3549 | 3551 | m_table[l1index] = newentry; |
3550 | 3552 | m_subtable[newentry - SUBTABLE_BASE].m_checksum = m_subtable[subentry - SUBTABLE_BASE].m_checksum; |
r23728 | r23729 | |
3574 | 3576 | // description of a handler |
3575 | 3577 | //------------------------------------------------- |
3576 | 3578 | |
3577 | | const char *address_table::handler_name(UINT8 entry) const |
| 3579 | const char *address_table::handler_name(UINT16 entry) const |
3578 | 3580 | { |
3579 | 3581 | static const char *const strings[] = |
3580 | 3582 | { |
r23728 | r23729 | |
3868 | 3870 | // find_range - find a byte address in a range |
3869 | 3871 | //------------------------------------------------- |
3870 | 3872 | |
3871 | | direct_read_data::direct_range *direct_read_data::find_range(offs_t byteaddress, UINT8 &entry) |
| 3873 | direct_read_data::direct_range *direct_read_data::find_range(offs_t byteaddress, UINT16 &entry) |
3872 | 3874 | { |
3873 | 3875 | // determine which entry |
3874 | 3876 | byteaddress &= m_space.m_bytemask; |