trunk/src/lib/util/chd.c
| r242636 | r242637 | |
| 19 | 19 | #include <stddef.h> |
| 20 | 20 | #include <stdlib.h> |
| 21 | 21 | #include <new> |
| 22 | #include "eminline.h" |
| 22 | 23 | |
| 23 | 24 | |
| 24 | 25 | //************************************************************************** |
| r242636 | r242637 | |
| 2418 | 2419 | |
| 2419 | 2420 | // queue the next read |
| 2420 | 2421 | for (curitem = startitem; curitem < enditem; curitem++) |
| 2421 | | m_work_item[curitem % WORK_BUFFER_HUNKS].m_status = WS_READING; |
| 2422 | atomic_exchange32(&m_work_item[curitem % WORK_BUFFER_HUNKS].m_status, WS_READING); |
| 2422 | 2423 | osd_work_item_queue(m_read_queue, async_read_static, this, WORK_ITEM_FLAG_AUTO_RELEASE); |
| 2423 | 2424 | m_read_queue_offset += WORK_BUFFER_HUNKS * hunk_bytes() / 2; |
| 2424 | 2425 | } |
| r242636 | r242637 | |
| 2489 | 2490 | } while (0); |
| 2490 | 2491 | |
| 2491 | 2492 | // reset the item and advance |
| 2492 | | item.m_status = WS_READY; |
| 2493 | atomic_exchange32(&item.m_status, WS_READY); |
| 2493 | 2494 | m_write_hunk++; |
| 2494 | 2495 | |
| 2495 | 2496 | // if we hit the end, finalize |
| r242636 | r242637 | |
| 2502 | 2503 | m_read_queue_offset = m_read_done_offset = 0; |
| 2503 | 2504 | m_write_hunk = 0; |
| 2504 | 2505 | for (int itemnum = 0; itemnum < WORK_BUFFER_HUNKS; itemnum++) |
| 2505 | | m_work_item[itemnum].m_status = WS_READY; |
| 2506 | atomic_exchange32(&m_work_item[itemnum].m_status, WS_READY); |
| 2506 | 2507 | } |
| 2507 | 2508 | |
| 2508 | 2509 | // wait for all reads to finish and if we're compressed, write the final SHA1 and map |
| r242636 | r242637 | |
| 2555 | 2556 | item.m_hash[unit].m_crc16 = crc16_creator::simple(item.m_data + unit * unit_bytes(), hunk_bytes()); |
| 2556 | 2557 | item.m_hash[unit].m_sha1 = sha1_creator::simple(item.m_data + unit * unit_bytes(), hunk_bytes()); |
| 2557 | 2558 | } |
| 2558 | | item.m_status = WS_COMPLETE; |
| 2559 | atomic_exchange32(&item.m_status, WS_COMPLETE); |
| 2559 | 2560 | } |
| 2560 | 2561 | |
| 2561 | 2562 | |
| r242636 | r242637 | |
| 2583 | 2584 | |
| 2584 | 2585 | // find the best compression scheme, unless we already have a self or parent match |
| 2585 | 2586 | // (note we may miss a self match from blocks not yet added, but this just results in extra work) |
| 2587 | // TODO: data race |
| 2586 | 2588 | if (m_current_map.find(item.m_hash[0].m_crc16, item.m_hash[0].m_sha1) == hashmap::NOT_FOUND && |
| 2587 | 2589 | m_parent_map.find(item.m_hash[0].m_crc16, item.m_hash[0].m_sha1) == hashmap::NOT_FOUND) |
| 2588 | 2590 | item.m_compression = item.m_codecs->find_best_compressor(item.m_data, item.m_compressed, item.m_complen); |
| 2589 | 2591 | |
| 2590 | 2592 | // mark us complete |
| 2591 | | item.m_status = WS_COMPLETE; |
| 2593 | atomic_exchange32(&item.m_status, WS_COMPLETE); |
| 2592 | 2594 | } |
| 2593 | 2595 | |
| 2594 | 2596 | |
| r242636 | r242637 | |
| 2644 | 2646 | UINT32 hunknum = curoffs / hunk_bytes(); |
| 2645 | 2647 | work_item &item = m_work_item[hunknum % WORK_BUFFER_HUNKS]; |
| 2646 | 2648 | assert(item.m_status == WS_READING); |
| 2647 | | item.m_status = WS_QUEUED; |
| 2649 | atomic_exchange32(&item.m_status, WS_QUEUED); |
| 2648 | 2650 | item.m_hunknum = hunknum; |
| 2649 | 2651 | item.m_osd = osd_work_item_queue(m_work_queue, m_walking_parent ? async_walk_parent_static : async_compress_hunk_static, &item, 0); |
| 2650 | 2652 | } |
trunk/src/lib/util/chd.h
| r242636 | r242637 | |
| 530 | 530 | |
| 531 | 531 | osd_work_item * m_osd; // OSD work item running on this block |
| 532 | 532 | chd_file_compressor *m_compressor; // pointer back to the compressor |
| 533 | | volatile work_status m_status; // current status of this item |
| 533 | // TODO: had to change this to be able to use atomic_* functions on this |
| 534 | //volatile work_status m_status; // current status of this item |
| 535 | volatile INT32 m_status; // current status of this item |
| 534 | 536 | UINT32 m_hunknum; // number of the hunk we're working on |
| 535 | 537 | UINT8 * m_data; // pointer to the data we are working on |
| 536 | 538 | UINT8 * m_compressed; // pointer to the compressed data |
trunk/src/osd/sdl/sdlwork.c
| r242636 | r242637 | |
| 436 | 436 | item->param = parambase; |
| 437 | 437 | item->result = NULL; |
| 438 | 438 | item->flags = flags; |
| 439 | | item->done = FALSE; |
| 439 | atomic_exchange32(&item->done, FALSE); |
| 440 | 440 | |
| 441 | 441 | // advance to the next |
| 442 | 442 | lastitem = item; |
| r242636 | r242637 | |
| 502 | 502 | |
| 503 | 503 | // if we don't have an event, create one |
| 504 | 504 | if (item->event == NULL) |
| 505 | { |
| 506 | INT32 lockslot = osd_scalable_lock_acquire(item->queue->lock); |
| 505 | 507 | item->event = osd_event_alloc(TRUE, FALSE); // manual reset, not signalled |
| 508 | osd_scalable_lock_release(item->queue->lock, lockslot); |
| 509 | } |
| 506 | 510 | else |
| 507 | 511 | osd_event_reset(item->event); |
| 508 | 512 | |
| r242636 | r242637 | |
| 719 | 723 | osd_work_item_release(item); |
| 720 | 724 | |
| 721 | 725 | // set the result and signal the event |
| 722 | | else if (item->event != NULL) |
| 726 | else |
| 723 | 727 | { |
| 724 | | osd_event_set(item->event); |
| 725 | | add_to_stat(&item->queue->setevents, 1); |
| 728 | INT32 lockslot = osd_scalable_lock_acquire(item->queue->lock); |
| 729 | if (item->event != NULL) |
| 730 | { |
| 731 | osd_event_set(item->event); |
| 732 | add_to_stat(&item->queue->setevents, 1); |
| 733 | } |
| 734 | osd_scalable_lock_release(item->queue->lock, lockslot); |
| 726 | 735 | } |
| 727 | 736 | |
| 728 | 737 | // if we removed an item and there's still work to do, bump the stats |
| 738 | // TODO: data race |
| 729 | 739 | if (queue->list != NULL) |
| 730 | 740 | add_to_stat(&queue->extraitems, 1); |
| 731 | 741 | } |
trunk/src/osd/windows/winwork.c
| r242636 | r242637 | |
| 446 | 446 | item->param = parambase; |
| 447 | 447 | item->result = NULL; |
| 448 | 448 | item->flags = flags; |
| 449 | | item->done = FALSE; |
| 449 | atomic_exchange32(&item->done, FALSE); |
| 450 | 450 | |
| 451 | 451 | // advance to the next |
| 452 | 452 | lastitem = item; |
| r242636 | r242637 | |
| 509 | 509 | |
| 510 | 510 | // if we don't have an event, create one |
| 511 | 511 | if (item->event == NULL) |
| 512 | { |
| 513 | INT32 lockslot = osd_scalable_lock_acquire(item->queue->lock); |
| 512 | 514 | item->event = osd_event_alloc(TRUE, FALSE); // manual reset, not signalled |
| 515 | osd_scalable_lock_release(item->queue->lock, lockslot); |
| 516 | } |
| 513 | 517 | else |
| 514 | | osd_event_reset(item->event); |
| 518 | osd_event_reset(item->event); |
| 515 | 519 | |
| 516 | 520 | // if we don't have an event, we need to spin (shouldn't ever really happen) |
| 517 | 521 | if (item->event == NULL) |
| r242636 | r242637 | |
| 710 | 714 | osd_work_item_release(item); |
| 711 | 715 | |
| 712 | 716 | // set the result and signal the event |
| 713 | | else if (item->event != NULL) |
| 717 | else |
| 714 | 718 | { |
| 715 | | osd_event_set(item->event); |
| 716 | | add_to_stat(&item->queue->setevents, 1); |
| 719 | INT32 lockslot = osd_scalable_lock_acquire(item->queue->lock); |
| 720 | if (item->event != NULL) |
| 721 | { |
| 722 | osd_event_set(item->event); |
| 723 | add_to_stat(&item->queue->setevents, 1); |
| 724 | } |
| 725 | osd_scalable_lock_release(item->queue->lock, lockslot); |
| 717 | 726 | } |
| 718 | 727 | |
| 719 | 728 | // if we removed an item and there's still work to do, bump the stats |
| 729 | // TODO: data race |
| 720 | 730 | if (queue->list != NULL) |
| 721 | 731 | add_to_stat(&queue->extraitems, 1); |
| 722 | 732 | } |