//------------------------------------------------------------------------------------ // DSt_cube<T>::datac - get // // Description - Does required file cache stuff to obtain location z,y,x // Note: should never be called when CACHE_DISABLED //------------------------------------------------------------------------------------ template <class T> T DSt_cube<T>::dataCache( unsigned short z, unsigned short y, unsigned short x ) { // Check what kind of caching is used switch( cache_state ) { case CACHE_FULL_SLICE: // See if desired slice is in cache if ( cache_validity_table[z][0][0] != NULL ) { #if CACHING_USE_LINKED_LIST // Move to head of linked list oldest_seg_ptr = cache_validity_table[z][0][0]->queue_node_ptr; cache_age_queue->unlinkNode( oldest_seg_ptr ); cache_age_queue->insertAtStart( oldest_seg_ptr ); return oldest_seg_ptr->item.data[y][x]; #else // It is a cache hit, return data return cache_validity_table[z][0][0]->data[y][x]; #endif } break; case CACHE_FULL_ROW: // See if desired row is in cache if ( cache_validity_table[z][y][0] != NULL ) { #if CACHING_USE_LINKED_LIST // Move to head of linked list oldest_seg_ptr = cache_validity_table[z][y][0]->queue_node_ptr; cache_age_queue->unlinkNode( oldest_seg_ptr ); cache_age_queue->insertAtStart( oldest_seg_ptr ); return oldest_seg_ptr->item.data[0][x]; #else // It is a cache hit, return data return cache_validity_table[z][y][0]->data[0][x]; #endif } break; case CACHE_SUB_ROW: // See if desired row is in cache if ( cache_validity_table[z][y][x>>cache_sub_row_shift_val] != NULL ) { #if CACHING_USE_LINKED_LIST // Move to head of linked list oldest_seg_ptr = cache_validity_table[z][y][x>>cache_sub_row_shift_val]->queue_node_ptr; cache_age_queue->unlinkNode( oldest_seg_ptr ); cache_age_queue->insertAtStart( oldest_seg_ptr ); return oldest_seg_ptr->item.data[0][(~(0xFFFF<<cache_sub_row_shift_val)) & x]; #else // It is a cache hit, return data return cache_validity_table[z][y][x>>cache_sub_row_shift_val]->data[0][(~(0xFFFF<<cache_sub_row_shift_val)) & x]; #endif } break; default: // Invalid, should never happen... log error, and crash :) write_to_log("DSt_cube::Cached Get - Fatal Error, invalid cache state %d", cache_state); exit( 1 ); } // We have had a Cache miss. // Need to read in correct data range into cache RAM readIntoCache( z, y, x ); // Now we are guaranteed a hit, call again and return valid data return dataCache(z, y, x); }; //------------------------------------------------------------------------------------ // DSt_cube<T>::datac - set // // Description - Does required file cache stuff to write to location z,y,x // Note: should never be called when CACHE_DISABLED //------------------------------------------------------------------------------------ template <class T> void DSt_cube<T>::dataCache( unsigned short z, unsigned short y, unsigned short x, T new_value ) { // Check what kind of caching is used switch( cache_state ) { case CACHE_FULL_SLICE: // See if desired slice is in cache if ( cache_validity_table[z][0][0] != NULL ) { #if CACHING_USE_LINKED_LIST // Move to head of linked list oldest_seg_ptr = cache_validity_table[z][0][0]->queue_node_ptr; cache_age_queue->unlinkNode( oldest_seg_ptr ); cache_age_queue->insertAtStart( oldest_seg_ptr ); oldest_seg_ptr->item.data[y][x] = new_value; oldest_seg_ptr->item.needs_sync = true; #else // It is a cache hit, set data and and sync needed status cache_validity_table[z][0][0]->data[y][x] = new_value; cache_validity_table[z][0][0]->needs_sync = true; #endif return; } break; case CACHE_FULL_ROW: // See if desired row is in cache if ( cache_validity_table[z][y][0] != NULL ) { #if CACHING_USE_LINKED_LIST // Move to head of linked list oldest_seg_ptr = cache_validity_table[z][y][0]->queue_node_ptr; cache_age_queue->unlinkNode( oldest_seg_ptr ); cache_age_queue->insertAtStart( oldest_seg_ptr ); oldest_seg_ptr->item.data[0][x] = new_value; oldest_seg_ptr->item.needs_sync = true; #else // It is a cache hit, set data and and sync needed status cache_validity_table[z][y][0]->data[0][x] = new_value; cache_validity_table[z][y][0]->needs_sync = true; #endif return; } break; case CACHE_SUB_ROW: // See if desired row is in cache if ( cache_validity_table[z][y][x>>cache_sub_row_shift_val] != NULL ) { #if CACHING_USE_LINKED_LIST // Move to head of linked list oldest_seg_ptr = cache_validity_table[z][y][x>>cache_sub_row_shift_val]->queue_node_ptr; cache_age_queue->unlinkNode( oldest_seg_ptr ); cache_age_queue->insertAtStart( oldest_seg_ptr ); oldest_seg_ptr->item.data[0][(~(0xFFFF<<cache_sub_row_shift_val)) & x] = new_value; oldest_seg_ptr->item.needs_sync = true; #else // It is a cache hit, set data and and sync needed status cache_validity_table[z][y][x>>cache_sub_row_shift_val]->data[0][(~(0xFFFF<<cache_sub_row_shift_val)) & x] = new_value; cache_validity_table[z][y][x>>cache_sub_row_shift_val]->needs_sync = true; #endif return; } break; default: // Invalid, should never happen... log error, and crash :) write_to_log("DSt_cube::Cached Set - Fatal Error, invalid cache state %d", cache_state); exit( 1 ); } // We have had a Cache miss. // Need to read in correct data range into cache RAM readIntoCache( z, y, x ); // Now we are guaranteed a hit, call again and set to desired data dataCache(z, y, x, new_value); } //------------------------------------------------------------------------------------ // DSt_cube<T>::invalidateValidityTable // // Description - Invalidates OLDEST cache location in ValidityTable //------------------------------------------------------------------------------------ #if CACHING_USE_LINKED_LIST template <class T> void DSt_cube<T>::invalidateOldestInValidityTable( ) { switch( cache_state ) { case CACHE_FULL_SLICE: // Invalidate the cache info in table cache_validity_table[ oldest_seg_ptr->item.z ][ 0 ][ 0 ] = NULL; break; case CACHE_FULL_ROW: // Invalidate the cache info in table cache_validity_table[ oldest_seg_ptr->item.z ][ oldest_seg_ptr->item.y ][ 0 ] = NULL; break; case CACHE_SUB_ROW: // Invalidate the cache info in table cache_validity_table[ oldest_seg_ptr->item.z ] \ [ oldest_seg_ptr->item.y ] \ [ oldest_seg_ptr->item.x >> cache_sub_row_shift_val ] = NULL; break; default: // Invalid, should never happen... log error, and crash :) write_to_log("DSt_cube::invalidateValidityTable - Fatal Error, invalid cache state %d", cache_state); exit( 1 ); } } #else template <class T> void DSt_cube<T>::invalidateOldestInValidityTable( ) { switch( cache_state ) { case CACHE_FULL_SLICE: // Invalidate the cache info in table cache_validity_table[ cache_age_list[cache_oldest_indx].z ][ 0 ][ 0 ] = NULL; break; case CACHE_FULL_ROW: // Invalidate the cache info in table cache_validity_table[ cache_age_list[cache_oldest_indx].z ][ cache_age_list[cache_oldest_indx].y ][ 0 ] = NULL; break; case CACHE_SUB_ROW: // Invalidate the cache info in table cache_validity_table[ cache_age_list[cache_oldest_indx].z ] \ [ cache_age_list[cache_oldest_indx].y ] \ [ cache_age_list[cache_oldest_indx].x >> cache_sub_row_shift_val ] = NULL; break; default: // Invalid, should never happen... log error, and crash :) write_to_log("DSt_cube::invalidateValidityTable - Fatal Error, invalid cache state %d", cache_state); exit( 1 ); } } #endif //------------------------------------------------------------------------------------ // DSt_cube<T>::obtainFreeCache // Description - Makes available new cache in an unknown state. // Writes out current RAM cache contents to file cache if needed. // // In link list mode: oldest_seg_ptr will contain new segment after call // In array mode: cache_available_indx is new segement after call //------------------------------------------------------------------------------------ template <class T> void DSt_cube<T>::obtainFreeCache( ) { // See if we can get away with obtaining a cache entry without disc access. #if CACHING_USE_LINKED_LIST oldest_seg_ptr = cache_age_queue->getAndUnlinkTailNode(); // Insert the segment at head of queue (making it 'newest') cache_age_queue->insertAtStart( oldest_seg_ptr ); // Only need to write out to disc if the following is true: // 1) Oldest segement needs sync if ( oldest_seg_ptr->item.needs_sync == false ) { // Do not invalidate if we have not filled up cache yet if ( cache_available_indx == cache_number_of_entries ) { invalidateOldestInValidityTable(); } else { ++ cache_available_indx; } return; } #else // Only need disc access if both of the following are true: // 1) Cache is full. This happens when cache_available_indx+1 == cache_oldest_indx. // 2) Oldest row in cache was written to and needs to be sync'ed if ( !( (((cache_available_indx+1)%cache_number_of_entries) == cache_oldest_indx ) && ( cache_age_list[cache_oldest_indx].needs_sync == true ) ) ) { // We do not need disc acess, update cache available index to free one cache_available_indx = (cache_available_indx+1) % cache_number_of_entries; // If we are replacing an old used cache that didn't need a sync, invalidate cache info in table if ( cache_is_full ) { invalidateOldestInValidityTable(); } // Once cache is full, we need to increment oldest index by 1 each time also if ( cache_available_indx == cache_oldest_indx ) { cache_oldest_indx = (cache_oldest_indx+1) % cache_number_of_entries; cache_is_full = true; } return; } #endif // Looks like we need to free up a cache entry, first write out to disc oldest one. LARGE_INTEGER start_offset_bytes; DWORD bytes_written, bytes_to_write = sizeof(T) * cache_entry_size; // This is disc seek offset to desired slice in file #if CACHING_USE_LINKED_LIST start_offset_bytes.QuadPart = (LONGLONG) sizeof(T) * wX * wY * oldest_seg_ptr->item.z; unsigned short x = oldest_seg_ptr->item.x; unsigned short y = oldest_seg_ptr->item.y; #else start_offset_bytes.QuadPart = (LONGLONG) sizeof(T) * wX * wY * cache_age_list[cache_oldest_indx].z; unsigned short x = cache_age_list[cache_oldest_indx].x; unsigned short y = cache_age_list[cache_oldest_indx].y; #endif // Set cache type specific details switch( cache_state ) { case CACHE_FULL_SLICE: // Nothing to do break; case CACHE_FULL_ROW: // Need to offset row number as well start_offset_bytes.QuadPart = start_offset_bytes.QuadPart + sizeof(T) * y * wX; break; case CACHE_SUB_ROW: if ( cache_sub_row_last_seg_indx == (x>>cache_sub_row_shift_val) ) { // If we are in last segment of row, might be different size than normal bytes_to_write = sizeof(T) * cache_sub_row_last_entry_size; } // Need to offset row and sub row number as well start_offset_bytes.QuadPart = start_offset_bytes.QuadPart + sizeof(T)* ( y * wX + (x>>cache_sub_row_shift_val) * cache_entry_size ); break; default: // Invalid, should never happen... log error, and crash :) write_to_log("DSt_cube::obtainFreeCache - Fatal Error, invalid cache state %d", cache_state); exit( 1 ); } // Write out oldest row to cache file SetFilePointerEx( cache_file, start_offset_bytes, NULL, FILE_BEGIN ); #if CACHING_USE_LINKED_LIST WriteFile( cache_file, oldest_seg_ptr->item.data[0], bytes_to_write, &bytes_written, NULL ); // Invalidate the cache info in table invalidateOldestInValidityTable(); // Mark newly available entry as not yet needing a sync back to disc oldest_seg_ptr->item.needs_sync = false; #else WriteFile( cache_file, cache_age_list[cache_oldest_indx].data[0], bytes_to_write, &bytes_written, NULL ); // Invalidate the cache info in table invalidateOldestInValidityTable(); // Mark newly available entry as not yet needing a sync back to disc cache_age_list[cache_oldest_indx].needs_sync = false; // Update cache info array indexes cache_oldest_indx = (cache_oldest_indx+1) % cache_number_of_entries; cache_available_indx = (cache_available_indx+1) % cache_number_of_entries; #endif } //------------------------------------------------------------------------------------ // DSt_cube<T>::readIntoCache // Description - Transfers into RAM cache desired location from file cache. Should // only be called on a cache miss. //------------------------------------------------------------------------------------ template <class T> void DSt_cube<T>::readIntoCache( unsigned short z, unsigned short y, unsigned short x ) { // First we need to get a free cache memory block obtainFreeCache (); // Update cache info (z is universal, others cache type specific) #if CACHING_USE_LINKED_LIST oldest_seg_ptr->item.z = z; #else cache_age_list[cache_available_indx].z = z; #endif // Shared variables for file access functions LARGE_INTEGER start_offset_bytes; DWORD bytes_read, bytes_to_read = sizeof(T) * cache_entry_size; // This is disc seek offset to desired slice in file start_offset_bytes.QuadPart = (LONGLONG) sizeof(T) * wX * wY * z; // Set cache type specific details switch( cache_state ) { case CACHE_FULL_SLICE: // Make row validity table point to correct row #if CACHING_USE_LINKED_LIST cache_validity_table[z][0][0] = &( oldest_seg_ptr->item ); #else cache_validity_table[z][0][0] = &( cache_age_list[cache_available_indx] ); #endif break; case CACHE_FULL_ROW: // Need to offset row number as well start_offset_bytes.QuadPart = start_offset_bytes.QuadPart + sizeof(T)*y*wX; // Make row validity table point to correct row #if CACHING_USE_LINKED_LIST cache_validity_table[z][y][0] = &( oldest_seg_ptr->item ); // Update cache y info oldest_seg_ptr->item.y = y; #else cache_validity_table[z][y][0] = &( cache_age_list[cache_available_indx] ); // Update cache y info cache_age_list[cache_available_indx].y = y; #endif break; case CACHE_SUB_ROW: if ( cache_sub_row_last_seg_indx == (x>>cache_sub_row_shift_val) ) { // If we are in last segment of row, might be different size than normal bytes_to_read = sizeof(T) * cache_sub_row_last_entry_size; } // Need to offset row and sub row number as well start_offset_bytes.QuadPart = start_offset_bytes.QuadPart + sizeof(T)* ( y*wX + (x>>cache_sub_row_shift_val)*cache_entry_size ); // Make row validity table point to correct row #if CACHING_USE_LINKED_LIST cache_validity_table[z][y][x>>cache_sub_row_shift_val] = &( oldest_seg_ptr->item ); // Update cache y info oldest_seg_ptr->item.y = y; oldest_seg_ptr->item.x = x; #else cache_validity_table[z][y][x>>cache_sub_row_shift_val] = &( cache_age_list[cache_available_indx] ); // Update cache y and x info cache_age_list[cache_available_indx].y = y; cache_age_list[cache_available_indx].x = x; #endif break; default: // Invalid, should never happen... log error, and crash :) write_to_log("DSt_cube::Cached Set - Fatal Error, invalid cache state %d", cache_state); exit( 1 ); } // Read into available cache RAM data from cache File SetFilePointerEx( cache_file, start_offset_bytes, NULL, FILE_BEGIN ); #if CACHING_USE_LINKED_LIST ReadFile( cache_file, oldest_seg_ptr->item.data[0], bytes_to_read, &bytes_read, NULL ); #else ReadFile( cache_file, cache_age_list[cache_available_indx].data[0], bytes_to_read, &bytes_read, NULL ); #endif }; //------------------------------------------------------------------------------------ // DSt_cube<T>::forceCacheSync // Description - If RAM cache has been written, forces it to sync with cache File. // Preserves cache state. //------------------------------------------------------------------------------------ template <class T> void DSt_cube<T>::forceCacheSync( ) { LARGE_INTEGER start_offset_bytes; DWORD bytes_written; DWORD bytes_to_write = sizeof(T) * cache_entry_size; unsigned short z, y, x; start_offset_bytes.QuadPart = 0; #if CACHING_USE_LINKED_LIST oldest_seg_ptr = cache_age_queue->getHeadPtr(); #endif // For every cache item, see if a sync is needed, and if so write to file // Note that we look at cache items that might have no data, but those should not need sync for (int i = 0; i < cache_number_of_entries; i++) { #if CACHING_USE_LINKED_LIST if ( oldest_seg_ptr->item.needs_sync ) #else if ( cache_age_list[i].needs_sync ) #endif { switch( cache_state ) { case CACHE_FULL_SLICE: #if CACHING_USE_LINKED_LIST z = oldest_seg_ptr->item.z; #else z = cache_age_list[i].z; #endif // Find offset start_offset_bytes.QuadPart = (LONGLONG) sizeof(T) * z*wY*wX; break; case CACHE_FULL_ROW: #if CACHING_USE_LINKED_LIST z = oldest_seg_ptr->item.z; y = oldest_seg_ptr->item.y; #else z = cache_age_list[i].z; y = cache_age_list[i].y; #endif // Find offset start_offset_bytes.QuadPart = (LONGLONG) sizeof(T)*(z*wY*wX + y*wX); break; case CACHE_SUB_ROW: #if CACHING_USE_LINKED_LIST z = oldest_seg_ptr->item.z; y = oldest_seg_ptr->item.y; x = oldest_seg_ptr->item.x; #else z = cache_age_list[i].z; y = cache_age_list[i].y; x = cache_age_list[i].x; #endif bytes_to_write = (cache_sub_row_last_seg_indx == ( x>>cache_sub_row_shift_val) ) ? (sizeof(T) * cache_sub_row_last_entry_size) : (sizeof(T) * cache_entry_size); // Find offset start_offset_bytes.QuadPart = (LONGLONG) sizeof(T)*( z*wY*wX + y*wX + (x>>cache_sub_row_shift_val)*cache_entry_size ); break; default: // Invalid, should never happen... log error, and crash :) write_to_log("DSt_cube::forceCacheSync - Fatal Error, invalid cache state %d", cache_state); exit( 1 ); } // Write out cached slice to cache file SetFilePointerEx( cache_file, start_offset_bytes, NULL, FILE_BEGIN ); #if CACHING_USE_LINKED_LIST WriteFile( cache_file, oldest_seg_ptr->item.data[0], bytes_to_write, &bytes_written, NULL ); // Mark as no longer needing a sync oldest_seg_ptr->item.needs_sync = false; #else WriteFile( cache_file, cache_age_list[i].data[0], bytes_to_write, &bytes_written, NULL ); // Mark as no longer needing a sync cache_age_list[i].needs_sync = false; #endif } // End IF needs sync #if CACHING_USE_LINKED_LIST oldest_seg_ptr = oldest_seg_ptr->next; #endif } FlushFileBuffers( cache_file ); } //------------------------------------------------------------------------------------ // DSt_cube<T>::writeBufferToEntireCacheFile // Description - writes buffer contents to entire cache file, invalidates table //------------------------------------------------------------------------------------ template <class T> void DSt_cube<T>::writeBufferToEntireCacheFile(T *buffer, unsigned int bytes_in_buffer) { // Write out cache into full file if ( cache_state == CACHE_DISABLED ) { return; } LARGE_INTEGER file_length; LARGE_INTEGER cur_pos; DWORD bytes_written; file_length.QuadPart = (LONGLONG) wX * wY * wZ * sizeof(T); // Seek to start of file cur_pos.QuadPart = 0; SetFilePointerEx( cache_file, cur_pos, NULL, FILE_BEGIN ); for ( cur_pos.QuadPart = bytes_in_buffer; cur_pos.QuadPart < file_length.QuadPart; cur_pos.QuadPart = cur_pos.QuadPart + bytes_in_buffer ) { WriteFile( cache_file, buffer, bytes_in_buffer, &bytes_written, NULL ); } // Write out what is left WriteFile( cache_file, buffer, (DWORD)(file_length.QuadPart - (cur_pos.QuadPart - bytes_in_buffer)), &bytes_written, NULL ); // Invalidate the cache valididty table memset( cache_validity_table_mem, 0, sizeof(cache_entry_info*) * cache_validity_table_dim.z * cache_validity_table_dim.y * cache_validity_table_dim.x ); } //------------------------------------------------------------------------------------ // DSt_cube<T>::resizeCacheBuffer // Description - writes buffer contents to entire cache file, invalidates table. Then // resizes the buffer to a new size (does not change cache type) //------------------------------------------------------------------------------------ template <class T> void DSt_cube<T>::resizeCacheBuffer( UINT64 new_cache_size_bytes ) { // Make sure caching is enabled if ( cache_state == CACHE_DISABLED ) { return; } // First make sure current buffer is in sync with cache file forceCacheSync(); // Free up buffer related memory #if CACHING_USE_LINKED_LIST delete cache_age_queue; #else delete[] cache_age_list; #endif delete[] rows; delete[] memory_block_ptrs[0]; delete[] memory_block_ptrs; // Allocate new number of desired segements allocateBufferMemory( new_cache_size_bytes ); }