Panda3D
|
00001 // Filename: vertexDataPage.cxx 00002 // Created by: drose (04Jun07) 00003 // 00004 //////////////////////////////////////////////////////////////////// 00005 // 00006 // PANDA 3D SOFTWARE 00007 // Copyright (c) Carnegie Mellon University. All rights reserved. 00008 // 00009 // All use of this software is subject to the terms of the revised BSD 00010 // license. You should have received a copy of this license along 00011 // with this source code in a file named "LICENSE." 00012 // 00013 //////////////////////////////////////////////////////////////////// 00014 00015 #include "vertexDataPage.h" 00016 #include "configVariableInt.h" 00017 #include "vertexDataSaveFile.h" 00018 #include "vertexDataBook.h" 00019 #include "pStatTimer.h" 00020 #include "memoryHook.h" 00021 00022 #ifdef HAVE_ZLIB 00023 #include <zlib.h> 00024 #endif 00025 00026 ConfigVariableInt max_resident_vertex_data 00027 ("max-resident-vertex-data", -1, 00028 PRC_DESC("Specifies the maximum number of bytes of all vertex data " 00029 "that is allowed to remain resident in system RAM at one time. " 00030 "If more than this number of bytes of vertices are created, " 00031 "the least-recently-used ones will be temporarily compressed in " 00032 "system RAM until they are needed. Set it to -1 for no limit.")); 00033 00034 ConfigVariableInt max_compressed_vertex_data 00035 ("max-compressed-vertex-data", 0, 00036 PRC_DESC("Specifies the maximum number of bytes of all vertex data " 00037 "that is allowed to remain compressed in system RAM at one time. " 00038 "If more than this number of bytes of vertices are created, " 00039 "the least-recently-used ones will be temporarily flushed to " 00040 "disk until they are needed. Set it to -1 for no limit.")); 00041 00042 ConfigVariableInt vertex_data_compression_level 00043 ("vertex-data-compression-level", 1, 00044 PRC_DESC("Specifies the zlib compression level to use when compressing " 00045 "vertex data. The number should be in the range 1 to 9, where " 00046 "larger values are slower but give better compression.")); 00047 00048 ConfigVariableInt max_disk_vertex_data 00049 ("max-disk-vertex-data", -1, 00050 PRC_DESC("Specifies the maximum number of bytes of vertex data " 00051 "that is allowed to be written to disk. Set it to -1 for no " 00052 "limit.")); 00053 00054 PT(VertexDataPage::PageThreadManager) VertexDataPage::_thread_mgr; 00055 00056 // This is a reference to an allocated Mutex, instead of just a static 00057 // Mutex, to protect against ordering issues when the application 00058 // shuts down. 00059 Mutex &VertexDataPage::_tlock = *(new Mutex("VertexDataPage::_tlock")); 00060 00061 SimpleLru VertexDataPage::_resident_lru("resident", max_resident_vertex_data); 00062 SimpleLru VertexDataPage::_compressed_lru("compressed", max_compressed_vertex_data); 00063 SimpleLru VertexDataPage::_disk_lru("disk", 0); 00064 SimpleLru VertexDataPage::_pending_lru("pending", 0); 00065 00066 SimpleLru *VertexDataPage::_global_lru[RC_end_of_list] = { 00067 &VertexDataPage::_resident_lru, 00068 &VertexDataPage::_compressed_lru, 00069 &VertexDataPage::_disk_lru, 00070 }; 00071 00072 VertexDataSaveFile *VertexDataPage::_save_file; 00073 00074 // This mutex is (mostly) unused. We just need a Mutex to pass 00075 // to the Book Constructor, below. 00076 Mutex VertexDataPage::_unused_mutex; 00077 00078 PStatCollector VertexDataPage::_vdata_compress_pcollector("*:Vertex Data:Compress"); 00079 PStatCollector VertexDataPage::_vdata_decompress_pcollector("*:Vertex Data:Decompress"); 00080 PStatCollector VertexDataPage::_vdata_save_pcollector("*:Vertex Data:Save"); 00081 PStatCollector VertexDataPage::_vdata_restore_pcollector("*:Vertex Data:Restore"); 00082 PStatCollector VertexDataPage::_thread_wait_pcollector("Wait:Idle"); 00083 PStatCollector VertexDataPage::_alloc_pages_pcollector("System memory:MMap:Vertex data"); 00084 00085 TypeHandle VertexDataPage::_type_handle; 00086 TypeHandle VertexDataPage::DeflatePage::_type_handle; 00087 00088 #if defined(HAVE_ZLIB) && !defined(USE_MEMORY_NOWRAPPERS) 00089 // Define functions that hook zlib into panda's memory allocation system. 00090 static void * 00091 do_zlib_alloc(voidpf opaque, uInt items, uInt size) { 00092 return PANDA_MALLOC_ARRAY(items * size); 00093 } 00094 static void 00095 do_zlib_free(voidpf opaque, voidpf address) { 00096 PANDA_FREE_ARRAY(address); 00097 } 00098 #endif // HAVE_ZLIB && !USE_MEMORY_NOWRAPPERS 00099 00100 00101 //////////////////////////////////////////////////////////////////// 00102 // Function: VertexDataPage::Book Constructor 00103 // Access: Private 00104 // Description: This constructor is used only by VertexDataBook, to 00105 // create a mostly-empty object that can be used to 00106 // search for a particular page size in the set. 00107 //////////////////////////////////////////////////////////////////// 00108 VertexDataPage:: 00109 VertexDataPage(size_t book_size) : 00110 SimpleAllocator(book_size, _unused_mutex), 00111 SimpleLruPage(book_size), 00112 _book_size(book_size), 00113 _block_size(0), 00114 _book(NULL) 00115 { 00116 _page_data = NULL; 00117 _size = 0; 00118 _uncompressed_size = 0; 00119 _ram_class = RC_resident; 00120 _pending_ram_class = RC_resident; 00121 } 00122 00123 //////////////////////////////////////////////////////////////////// 00124 // Function: VertexDataPage::Constructor 00125 // Access: Private 00126 // Description: 00127 //////////////////////////////////////////////////////////////////// 00128 VertexDataPage:: 00129 VertexDataPage(VertexDataBook *book, size_t page_size, size_t block_size) : 00130 SimpleAllocator(page_size, book->_lock), 00131 SimpleLruPage(page_size), 00132 _book_size(page_size), 00133 _block_size(block_size), 00134 _book(book) 00135 { 00136 _allocated_size = round_up(page_size); 00137 _page_data = alloc_page_data(_allocated_size); 00138 _size = page_size; 00139 00140 _uncompressed_size = _size; 00141 _pending_ram_class = RC_resident; 00142 set_ram_class(RC_resident); 00143 } 00144 00145 //////////////////////////////////////////////////////////////////// 00146 // Function: VertexDataPage::Destructor 00147 // Access: Private, Virtual 00148 // Description: 00149 //////////////////////////////////////////////////////////////////// 00150 VertexDataPage:: 00151 ~VertexDataPage() { 00152 00153 // Since the only way to delete a page is via the 00154 // changed_contiguous() method, the lock will already be held. 00155 // MutexHolder holder(_lock); 00156 00157 { 00158 MutexHolder holder2(_tlock); 00159 if (_pending_ram_class != _ram_class) { 00160 nassertv(_thread_mgr != (PageThreadManager *)NULL); 00161 _thread_mgr->remove_page(this); 00162 } 00163 } 00164 00165 if (_page_data != NULL) { 00166 free_page_data(_page_data, _allocated_size); 00167 _size = 0; 00168 } 00169 00170 nassertv(_book == NULL); 00171 } 00172 00173 //////////////////////////////////////////////////////////////////// 00174 // Function: VertexDataPage::stop_threads 00175 // Access: Published, Static 00176 // Description: Call this to stop the paging threads, if they were 00177 // started. This may block until all of the pending 00178 // tasks have been completed. 00179 //////////////////////////////////////////////////////////////////// 00180 void VertexDataPage:: 00181 stop_threads() { 00182 PT(PageThreadManager) thread_mgr; 00183 { 00184 MutexHolder holder(_tlock); 00185 thread_mgr = _thread_mgr; 00186 _thread_mgr.clear(); 00187 } 00188 00189 if (thread_mgr != (PageThreadManager *)NULL) { 00190 gobj_cat.info() 00191 << "Stopping vertex paging threads.\n"; 00192 thread_mgr->stop_threads(); 00193 } 00194 } 00195 00196 //////////////////////////////////////////////////////////////////// 00197 // Function: VertexDataPage::flush_threads 00198 // Access: Published, Static 00199 // Description: Waits for all of the pending thread tasks to finish 00200 // before returning. 00201 //////////////////////////////////////////////////////////////////// 00202 void VertexDataPage:: 00203 flush_threads() { 00204 int num_threads = vertex_data_page_threads; 00205 if (num_threads == 0) { 00206 stop_threads(); 00207 return; 00208 } 00209 00210 PT(PageThreadManager) thread_mgr; 00211 { 00212 MutexHolder holder(_tlock); 00213 thread_mgr = _thread_mgr; 00214 } 00215 00216 if (thread_mgr != (PageThreadManager *)NULL) { 00217 thread_mgr->stop_threads(); 00218 MutexHolder holder(_tlock); 00219 thread_mgr->start_threads(num_threads); 00220 } 00221 } 00222 00223 //////////////////////////////////////////////////////////////////// 00224 // Function: VertexDataPage::output 00225 // Access: Published, Virtual 00226 // Description: 00227 //////////////////////////////////////////////////////////////////// 00228 void VertexDataPage:: 00229 output(ostream &out) const { 00230 SimpleAllocator::output(out); 00231 } 00232 00233 //////////////////////////////////////////////////////////////////// 00234 // Function: VertexDataPage::write 00235 // Access: Published, Virtual 00236 // Description: 00237 //////////////////////////////////////////////////////////////////// 00238 void VertexDataPage:: 00239 write(ostream &out, int indent_level) const { 00240 SimpleAllocator::write(out); 00241 } 00242 00243 //////////////////////////////////////////////////////////////////// 00244 // Function: VertexDataPage::make_block 00245 // Access: Protected, Virtual 00246 // Description: Creates a new SimpleAllocatorBlock object. Override 00247 // this function to specialize the block type returned. 00248 //////////////////////////////////////////////////////////////////// 00249 SimpleAllocatorBlock *VertexDataPage:: 00250 make_block(size_t start, size_t size) { 00251 return new VertexDataBlock(this, start, size); 00252 } 00253 00254 //////////////////////////////////////////////////////////////////// 00255 // Function: VertexDataPage::changed_contiguous 00256 // Access: Protected, Virtual 00257 // Description: This callback function is made whenever the estimate 00258 // of contiguous available space changes, either through 00259 // an alloc or free. The lock will be held. 00260 //////////////////////////////////////////////////////////////////// 00261 void VertexDataPage:: 00262 changed_contiguous() { 00263 if (do_is_empty()) { 00264 // If the page is now empty, delete it. 00265 VertexDataBook::Pages::iterator pi = _book->_pages.find(this); 00266 nassertv(pi != _book->_pages.end()); 00267 _book->_pages.erase(pi); 00268 _book = NULL; 00269 delete this; 00270 return; 00271 } 00272 00273 adjust_book_size(); 00274 } 00275 00276 //////////////////////////////////////////////////////////////////// 00277 // Function: VertexDataPage::evict_lru 00278 // Access: Public, Virtual 00279 // Description: Evicts the page from the LRU. Called internally when 00280 // the LRU determines that it is full. May also be 00281 // called externally when necessary to explicitly evict 00282 // the page. 00283 // 00284 // It is legal for this method to either evict the page 00285 // as requested, do nothing (in which case the eviction 00286 // will be requested again at the next epoch), or 00287 // requeue itself on the tail of the queue (in which 00288 // case the eviction will be requested again much 00289 // later). 00290 //////////////////////////////////////////////////////////////////// 00291 void VertexDataPage:: 00292 evict_lru() { 00293 MutexHolder holder(_lock); 00294 00295 switch (_ram_class) { 00296 case RC_resident: 00297 if (_compressed_lru.get_max_size() == 0) { 00298 request_ram_class(RC_disk); 00299 } else { 00300 request_ram_class(RC_compressed); 00301 } 00302 break; 00303 00304 case RC_compressed: 00305 request_ram_class(RC_disk); 00306 break; 00307 00308 case RC_disk: 00309 case RC_end_of_list: 00310 gobj_cat.warning() 00311 << "Internal error: attempt to evict array data " << this 00312 << " in inappropriate state " << _ram_class << ".\n"; 00313 break; 00314 } 00315 } 00316 00317 //////////////////////////////////////////////////////////////////// 00318 // Function: VertexDataPage::do_alloc 00319 // Access: Private 00320 // Description: Allocates a new block. Returns NULL if a block of the 00321 // requested size cannot be allocated. 00322 // 00323 // To free the allocated block, call block->free(), or 00324 // simply delete the block pointer. 00325 // 00326 // Assumes the lock is already held. 00327 //////////////////////////////////////////////////////////////////// 00328 VertexDataBlock *VertexDataPage:: 00329 do_alloc(size_t size) { 00330 VertexDataBlock *block = (VertexDataBlock *)SimpleAllocator::do_alloc(size); 00331 00332 if (block != (VertexDataBlock *)NULL && _ram_class != RC_disk) { 00333 // When we allocate a new block within a resident page, we have to 00334 // clear the disk cache (since we have just invalidated it). 00335 _saved_block.clear(); 00336 } 00337 00338 return block; 00339 } 00340 00341 //////////////////////////////////////////////////////////////////// 00342 // Function: VertexDataPage::make_resident_now 00343 // Access: Private 00344 // Description: Short-circuits the thread and forces the page into 00345 // resident status immediately. 00346 // 00347 // Intended to be called from the main thread. Assumes 00348 // the lock is already held. 00349 //////////////////////////////////////////////////////////////////// 00350 void VertexDataPage:: 00351 make_resident_now() { 00352 MutexHolder holder(_tlock); 00353 if (_pending_ram_class != _ram_class) { 00354 nassertv(_thread_mgr != (PageThreadManager *)NULL); 00355 _thread_mgr->remove_page(this); 00356 } 00357 00358 make_resident(); 00359 _pending_ram_class = RC_resident; 00360 } 00361 00362 //////////////////////////////////////////////////////////////////// 00363 // Function: VertexDataPage::make_resident 00364 // Access: Private 00365 // Description: Moves the page to fully resident status by 00366 // expanding it or reading it from disk as necessary. 00367 // 00368 // Intended to be called from the sub-thread. Assumes 00369 // the lock is already held. 00370 //////////////////////////////////////////////////////////////////// 00371 void VertexDataPage:: 00372 make_resident() { 00373 if (_ram_class == RC_resident) { 00374 mark_used_lru(); 00375 return; 00376 } 00377 00378 if (_ram_class == RC_disk) { 00379 do_restore_from_disk(); 00380 } 00381 00382 if (_ram_class == RC_compressed) { 00383 #ifdef HAVE_ZLIB 00384 PStatTimer timer(_vdata_decompress_pcollector); 00385 00386 if (gobj_cat.is_debug()) { 00387 gobj_cat.debug() 00388 << "Expanding page from " << _size 00389 << " to " << _uncompressed_size << "\n"; 00390 } 00391 size_t new_allocated_size = round_up(_uncompressed_size); 00392 unsigned char *new_data = alloc_page_data(new_allocated_size); 00393 unsigned char *end_data = new_data + new_allocated_size; 00394 uLongf dest_len = _uncompressed_size; 00395 00396 z_stream z_source; 00397 #ifdef USE_MEMORY_NOWRAPPERS 00398 z_source.zalloc = Z_NULL; 00399 z_source.zfree = Z_NULL; 00400 #else 00401 z_source.zalloc = (alloc_func)&do_zlib_alloc; 00402 z_source.zfree = (free_func)&do_zlib_free; 00403 #endif 00404 00405 z_source.opaque = Z_NULL; 00406 z_source.msg = (char *) "no error message"; 00407 00408 z_source.next_in = (Bytef *)(char *)_page_data; 00409 z_source.avail_in = _size; 00410 z_source.next_out = (Bytef *)new_data; 00411 z_source.avail_out = new_allocated_size; 00412 00413 int result = inflateInit(&z_source); 00414 if (result < 0) { 00415 nassert_raise("zlib error"); 00416 return; 00417 } 00418 Thread::consider_yield(); 00419 00420 size_t output_size = 0; 00421 00422 int flush = 0; 00423 result = 0; 00424 while (result != Z_STREAM_END) { 00425 unsigned char *start_out = (unsigned char *)z_source.next_out; 00426 nassertv(start_out < end_data); 00427 z_source.avail_out = min((size_t)(end_data - start_out), (size_t)inflate_page_size); 00428 nassertv(z_source.avail_out != 0); 00429 result = inflate(&z_source, flush); 00430 if (result < 0 && result != Z_BUF_ERROR) { 00431 nassert_raise("zlib error"); 00432 return; 00433 } 00434 size_t bytes_produced = (size_t)((unsigned char *)z_source.next_out - start_out); 00435 output_size += bytes_produced; 00436 if (bytes_produced == 0) { 00437 // If we ever produce no bytes, then start flushing the output. 00438 flush = Z_FINISH; 00439 } 00440 00441 Thread::consider_yield(); 00442 } 00443 nassertv(z_source.avail_in == 0); 00444 nassertv(output_size == _uncompressed_size); 00445 00446 result = inflateEnd(&z_source); 00447 nassertv(result == Z_OK); 00448 00449 free_page_data(_page_data, _allocated_size); 00450 _page_data = new_data; 00451 _size = _uncompressed_size; 00452 _allocated_size = new_allocated_size; 00453 #endif 00454 00455 set_lru_size(_size); 00456 set_ram_class(RC_resident); 00457 } 00458 } 00459 00460 //////////////////////////////////////////////////////////////////// 00461 // Function: VertexDataPage::make_compressed 00462 // Access: Private 00463 // Description: Moves the page to compressed status by 00464 // compressing it or reading it from disk as necessary. 00465 // 00466 // Assumes the lock is already held. 00467 //////////////////////////////////////////////////////////////////// 00468 void VertexDataPage:: 00469 make_compressed() { 00470 if (_ram_class == RC_compressed) { 00471 // If we're already compressed, just mark the page recently used. 00472 mark_used_lru(); 00473 return; 00474 } 00475 00476 if (_ram_class == RC_disk) { 00477 do_restore_from_disk(); 00478 } 00479 00480 if (_ram_class == RC_resident) { 00481 nassertv(_size == _uncompressed_size); 00482 00483 #ifdef HAVE_ZLIB 00484 PStatTimer timer(_vdata_compress_pcollector); 00485 00486 DeflatePage *page = new DeflatePage; 00487 DeflatePage *head = page; 00488 00489 z_stream z_dest; 00490 #ifdef USE_MEMORY_NOWRAPPERS 00491 z_dest.zalloc = Z_NULL; 00492 z_dest.zfree = Z_NULL; 00493 #else 00494 z_dest.zalloc = (alloc_func)&do_zlib_alloc; 00495 z_dest.zfree = (free_func)&do_zlib_free; 00496 #endif 00497 00498 z_dest.opaque = Z_NULL; 00499 z_dest.msg = (char *) "no error message"; 00500 00501 int result = deflateInit(&z_dest, vertex_data_compression_level); 00502 if (result < 0) { 00503 nassert_raise("zlib error"); 00504 return; 00505 } 00506 Thread::consider_yield(); 00507 00508 z_dest.next_in = (Bytef *)(char *)_page_data; 00509 z_dest.avail_in = _uncompressed_size; 00510 size_t output_size = 0; 00511 00512 // Compress the data into one or more individual pages. We have 00513 // to compress it page-at-a-time, since we're not really sure how 00514 // big the result will be (so we can't easily pre-allocate a 00515 // buffer). 00516 int flush = 0; 00517 result = 0; 00518 while (result != Z_STREAM_END) { 00519 unsigned char *start_out = (page->_buffer + page->_used_size); 00520 z_dest.next_out = (Bytef *)start_out; 00521 z_dest.avail_out = (size_t)deflate_page_size - page->_used_size; 00522 if (z_dest.avail_out == 0) { 00523 DeflatePage *new_page = new DeflatePage; 00524 page->_next = new_page; 00525 page = new_page; 00526 start_out = page->_buffer; 00527 z_dest.next_out = (Bytef *)start_out; 00528 z_dest.avail_out = deflate_page_size; 00529 } 00530 00531 result = deflate(&z_dest, flush); 00532 if (result < 0 && result != Z_BUF_ERROR) { 00533 nassert_raise("zlib error"); 00534 return; 00535 } 00536 size_t bytes_produced = (size_t)((unsigned char *)z_dest.next_out - start_out); 00537 page->_used_size += bytes_produced; 00538 nassertv(page->_used_size <= deflate_page_size); 00539 output_size += bytes_produced; 00540 if (bytes_produced == 0) { 00541 // If we ever produce no bytes, then start flushing the output. 00542 flush = Z_FINISH; 00543 } 00544 00545 Thread::consider_yield(); 00546 } 00547 nassertv(z_dest.avail_in == 0); 00548 00549 result = deflateEnd(&z_dest); 00550 nassertv(result == Z_OK); 00551 00552 // Now we know how big the result will be. Allocate a buffer, and 00553 // copy the data from the various pages. 00554 00555 size_t new_allocated_size = round_up(output_size); 00556 unsigned char *new_data = alloc_page_data(new_allocated_size); 00557 00558 size_t copied_size = 0; 00559 unsigned char *p = new_data; 00560 page = head; 00561 while (page != NULL) { 00562 memcpy(p, page->_buffer, page->_used_size); 00563 copied_size += page->_used_size; 00564 p += page->_used_size; 00565 DeflatePage *next = page->_next; 00566 delete page; 00567 page = next; 00568 } 00569 nassertv(copied_size == output_size); 00570 00571 // Now free the original, uncompressed data, and put this new 00572 // compressed buffer in its place. 00573 free_page_data(_page_data, _allocated_size); 00574 _page_data = new_data; 00575 _size = output_size; 00576 _allocated_size = new_allocated_size; 00577 00578 if (gobj_cat.is_debug()) { 00579 gobj_cat.debug() 00580 << "Compressed " << *this << " from " << _uncompressed_size 00581 << " to " << _size << "\n"; 00582 } 00583 #endif 00584 set_lru_size(_size); 00585 set_ram_class(RC_compressed); 00586 } 00587 } 00588 00589 //////////////////////////////////////////////////////////////////// 00590 // Function: VertexDataPage::make_disk 00591 // Access: Private 00592 // Description: Moves the page to disk status by writing it to disk 00593 // as necessary. 00594 // 00595 // Assumes the lock is already held. 00596 //////////////////////////////////////////////////////////////////// 00597 void VertexDataPage:: 00598 make_disk() { 00599 if (_ram_class == RC_disk) { 00600 // If we're already on disk, just mark the page recently used. 00601 mark_used_lru(); 00602 return; 00603 } 00604 00605 if (_ram_class == RC_resident || _ram_class == RC_compressed) { 00606 if (!do_save_to_disk()) { 00607 // Can't save it to disk for some reason. 00608 gobj_cat.warning() 00609 << "Couldn't save page " << this << " to disk.\n"; 00610 mark_used_lru(); 00611 return; 00612 } 00613 00614 free_page_data(_page_data, _allocated_size); 00615 _page_data = NULL; 00616 _size = 0; 00617 00618 set_ram_class(RC_disk); 00619 } 00620 } 00621 00622 //////////////////////////////////////////////////////////////////// 00623 // Function: VertexDataPage::do_save_to_disk 00624 // Access: Private 00625 // Description: Writes the page to disk, but does not evict it from 00626 // memory or affect its LRU status. If it gets evicted 00627 // later without having been modified, it will not need 00628 // to write itself to disk again. 00629 // 00630 // Returns true on success, false on failure. Assumes 00631 // the lock is already held. 00632 //////////////////////////////////////////////////////////////////// 00633 bool VertexDataPage:: 00634 do_save_to_disk() { 00635 if (_ram_class == RC_resident || _ram_class == RC_compressed) { 00636 PStatTimer timer(_vdata_save_pcollector); 00637 00638 if (_saved_block == (VertexDataSaveBlock *)NULL) { 00639 if (gobj_cat.is_debug()) { 00640 gobj_cat.debug() 00641 << "Storing page, " << _size << " bytes, to disk\n"; 00642 } 00643 00644 bool compressed = (_ram_class == RC_compressed); 00645 00646 _saved_block = get_save_file()->write_data(_page_data, _allocated_size, compressed); 00647 if (_saved_block == (VertexDataSaveBlock *)NULL) { 00648 // Can't write it to disk. Too bad. 00649 return false; 00650 } 00651 } else { 00652 if (gobj_cat.is_debug()) { 00653 gobj_cat.debug() 00654 << "Page already stored: " << _size << " bytes\n"; 00655 } 00656 } 00657 } 00658 00659 return true; 00660 } 00661 00662 //////////////////////////////////////////////////////////////////// 00663 // Function: VertexDataPage::do_restore_from_disk 00664 // Access: Private 00665 // Description: Restores the page from disk and makes it 00666 // either compressed or resident (according to whether 00667 // it was stored compressed on disk). 00668 // 00669 // Assumes the lock is already held. 00670 //////////////////////////////////////////////////////////////////// 00671 void VertexDataPage:: 00672 do_restore_from_disk() { 00673 if (_ram_class == RC_disk) { 00674 nassertv(_saved_block != (VertexDataSaveBlock *)NULL); 00675 nassertv(_page_data == (unsigned char *)NULL && _size == 0); 00676 00677 PStatTimer timer(_vdata_restore_pcollector); 00678 00679 size_t buffer_size = _saved_block->get_size(); 00680 if (gobj_cat.is_debug()) { 00681 gobj_cat.debug() 00682 << "Restoring page, " << buffer_size << " bytes, from disk\n"; 00683 } 00684 00685 size_t new_allocated_size = round_up(buffer_size); 00686 unsigned char *new_data = alloc_page_data(new_allocated_size); 00687 if (!get_save_file()->read_data(new_data, new_allocated_size, _saved_block)) { 00688 nassert_raise("read error"); 00689 } 00690 00691 nassertv(_page_data == (unsigned char *)NULL); 00692 _page_data = new_data; 00693 _size = buffer_size; 00694 _allocated_size = new_allocated_size; 00695 00696 set_lru_size(_size); 00697 if (_saved_block->get_compressed()) { 00698 set_ram_class(RC_compressed); 00699 } else { 00700 set_ram_class(RC_resident); 00701 } 00702 } 00703 } 00704 00705 //////////////////////////////////////////////////////////////////// 00706 // Function: VertexDataPage::adjust_book_size 00707 // Access: Private 00708 // Description: Called when the "book size"--the size of the page as 00709 // recorded in its book's table--has changed for some 00710 // reason. Assumes the lock is held. 00711 //////////////////////////////////////////////////////////////////// 00712 void VertexDataPage:: 00713 adjust_book_size() { 00714 size_t new_size = _contiguous; 00715 if (_ram_class != RC_resident) { 00716 // Let's not attempt to allocate new buffers from non-resident 00717 // pages. 00718 new_size = 0; 00719 } 00720 00721 if (_book != (VertexDataBook *)NULL && new_size != _book_size) { 00722 VertexDataBook::Pages::iterator pi = _book->_pages.find(this); 00723 nassertv(pi != _book->_pages.end()); 00724 _book->_pages.erase(pi); 00725 00726 _book_size = new_size; 00727 bool inserted = _book->_pages.insert(this).second; 00728 nassertv(inserted); 00729 } 00730 } 00731 00732 //////////////////////////////////////////////////////////////////// 00733 // Function: VertexDataPage::request_ram_class 00734 // Access: Private 00735 // Description: Requests the thread set the page to the indicated ram 00736 // class (if we are using threading). The page will be 00737 // enqueued in the thread, which will eventually be 00738 // responsible for setting the requested ram class. 00739 // 00740 // Assumes the page's lock is already held. 00741 //////////////////////////////////////////////////////////////////// 00742 void VertexDataPage:: 00743 request_ram_class(RamClass ram_class) { 00744 int num_threads = vertex_data_page_threads; 00745 if (num_threads == 0 || !Thread::is_threading_supported()) { 00746 // No threads. Do it immediately. 00747 switch (ram_class) { 00748 case RC_resident: 00749 make_resident(); 00750 break; 00751 00752 case RC_compressed: 00753 make_compressed(); 00754 break; 00755 00756 case RC_disk: 00757 make_disk(); 00758 break; 00759 00760 case RC_end_of_list: 00761 break; 00762 } 00763 _pending_ram_class = ram_class; 00764 return; 00765 } 00766 00767 MutexHolder holder(_tlock); 00768 if (_thread_mgr == (PageThreadManager *)NULL) { 00769 // Create the thread manager. 00770 gobj_cat.info() 00771 << "Spawning " << num_threads << " vertex paging threads.\n"; 00772 _thread_mgr = new PageThreadManager(num_threads); 00773 } 00774 00775 _thread_mgr->add_page(this, ram_class); 00776 } 00777 00778 //////////////////////////////////////////////////////////////////// 00779 // Function: VertexDataPage::make_save_file 00780 // Access: Private, Static 00781 // Description: Creates the global VertexDataSaveFile that will be 00782 // used to save vertex data buffers to disk when 00783 // necessary. 00784 //////////////////////////////////////////////////////////////////// 00785 void VertexDataPage:: 00786 make_save_file() { 00787 size_t max_size = (size_t)max_disk_vertex_data; 00788 00789 _save_file = new VertexDataSaveFile(vertex_save_file_directory, 00790 vertex_save_file_prefix, max_size); 00791 } 00792 00793 //////////////////////////////////////////////////////////////////// 00794 // Function: VertexDataPage::alloc_page_data 00795 // Access: Private 00796 // Description: Allocates and returns a freshly-allocated buffer of 00797 // at least the indicated size for holding vertex data. 00798 //////////////////////////////////////////////////////////////////// 00799 unsigned char *VertexDataPage:: 00800 alloc_page_data(size_t page_size) const { 00801 _alloc_pages_pcollector.add_level_now(page_size); 00802 return (unsigned char *)memory_hook->mmap_alloc(page_size, false); 00803 } 00804 00805 //////////////////////////////////////////////////////////////////// 00806 // Function: VertexDataPage::free_page_data 00807 // Access: Private 00808 // Description: Releases a buffer allocated via alloc_page_data(). 00809 //////////////////////////////////////////////////////////////////// 00810 void VertexDataPage:: 00811 free_page_data(unsigned char *page_data, size_t page_size) const { 00812 _alloc_pages_pcollector.sub_level_now(page_size); 00813 memory_hook->mmap_free(page_data, page_size); 00814 } 00815 00816 //////////////////////////////////////////////////////////////////// 00817 // Function: VertexDataPage::PageThreadManager::Constructor 00818 // Access: Public 00819 // Description: Assumes _tlock is held. 00820 //////////////////////////////////////////////////////////////////// 00821 VertexDataPage::PageThreadManager:: 00822 PageThreadManager(int num_threads) : 00823 _shutdown(false), 00824 _pending_cvar(_tlock) 00825 { 00826 start_threads(num_threads); 00827 } 00828 00829 //////////////////////////////////////////////////////////////////// 00830 // Function: VertexDataPage::PageThreadManager::add_page 00831 // Access: Public 00832 // Description: Enqueues the indicated page on the thread queue to 00833 // convert it to the specified ram class. 00834 // 00835 // It is assumed the page's lock is already held, and 00836 // that _tlock is already held. 00837 //////////////////////////////////////////////////////////////////// 00838 void VertexDataPage::PageThreadManager:: 00839 add_page(VertexDataPage *page, RamClass ram_class) { 00840 nassertv(!_shutdown); 00841 00842 if (page->_pending_ram_class == ram_class) { 00843 // It's already queued. 00844 nassertv(page->get_lru() == &_pending_lru); 00845 return; 00846 } 00847 00848 if (page->_pending_ram_class != page->_ram_class) { 00849 // It's already queued, but for a different ram class. Dequeue it 00850 // so we can requeue it. 00851 remove_page(page); 00852 } 00853 00854 if (page->_pending_ram_class != ram_class) { 00855 // First, move the page to the "pending" LRU. When it eventually 00856 // gets its requested ram class set, it will be requeued on the 00857 // appropriate live LRU. 00858 page->mark_used_lru(&_pending_lru); 00859 00860 page->_pending_ram_class = ram_class; 00861 if (ram_class == RC_resident) { 00862 _pending_reads.push_back(page); 00863 } else { 00864 _pending_writes.push_back(page); 00865 } 00866 _pending_cvar.notify(); 00867 } 00868 } 00869 00870 //////////////////////////////////////////////////////////////////// 00871 // Function: VertexDataPage::PageThreadManager::remove_page 00872 // Access: Public 00873 // Description: Dequeues the indicated page and removes it from the 00874 // pending task list. 00875 // 00876 // It is assumed the page's lock is already held, and 00877 // that _tlock is already held. 00878 //////////////////////////////////////////////////////////////////// 00879 void VertexDataPage::PageThreadManager:: 00880 remove_page(VertexDataPage *page) { 00881 nassertv(page != (VertexDataPage *)NULL); 00882 00883 PageThreads::iterator ti; 00884 for (ti = _threads.begin(); ti != _threads.end(); ++ti) { 00885 PageThread *thread = (*ti); 00886 if (page == thread->_working_page) { 00887 // Oops, this thread is currently working on this one. We'll have 00888 // to wait for the thread to finish. 00889 page->_lock.release(); 00890 while (page == thread->_working_page) { 00891 thread->_working_cvar.wait(); 00892 } 00893 page->_lock.acquire(); 00894 return; 00895 } 00896 } 00897 00898 if (page->_pending_ram_class == RC_resident) { 00899 PendingPages::iterator pi = 00900 find(_pending_reads.begin(), _pending_reads.end(), page); 00901 nassertv(pi != _pending_reads.end()); 00902 _pending_reads.erase(pi); 00903 } else { 00904 PendingPages::iterator pi = 00905 find(_pending_writes.begin(), _pending_writes.end(), page); 00906 nassertv(pi != _pending_writes.end()); 00907 _pending_writes.erase(pi); 00908 } 00909 00910 page->_pending_ram_class = page->_ram_class; 00911 00912 // Put the page back on its proper LRU. 00913 page->mark_used_lru(_global_lru[page->_ram_class]); 00914 } 00915 00916 //////////////////////////////////////////////////////////////////// 00917 // Function: VertexDataPage::PageThreadManager::get_num_threads 00918 // Access: Public 00919 // Description: Returns the number of threads active on the thread 00920 // manager. Assumes _tlock is held. 00921 //////////////////////////////////////////////////////////////////// 00922 int VertexDataPage::PageThreadManager:: 00923 get_num_threads() const { 00924 return (int)_threads.size(); 00925 } 00926 00927 //////////////////////////////////////////////////////////////////// 00928 // Function: VertexDataPage::PageThreadManager::get_num_pending_reads 00929 // Access: Public 00930 // Description: Returns the number of read requests waiting on the 00931 // queue. Assumes _tlock is held. 00932 //////////////////////////////////////////////////////////////////// 00933 int VertexDataPage::PageThreadManager:: 00934 get_num_pending_reads() const { 00935 return (int)_pending_reads.size(); 00936 } 00937 00938 //////////////////////////////////////////////////////////////////// 00939 // Function: VertexDataPage::PageThreadManager::get_num_pending_writes 00940 // Access: Public 00941 // Description: Returns the number of write requests waiting on the 00942 // queue. Assumes _tlock is held. 00943 //////////////////////////////////////////////////////////////////// 00944 int VertexDataPage::PageThreadManager:: 00945 get_num_pending_writes() const { 00946 return (int)_pending_writes.size(); 00947 } 00948 00949 //////////////////////////////////////////////////////////////////// 00950 // Function: VertexDataPage::PageThreadManager::start_threads 00951 // Access: Public 00952 // Description: Adds the indicated of threads to the list of active 00953 // threads. Assumes _tlock is held. 00954 //////////////////////////////////////////////////////////////////// 00955 void VertexDataPage::PageThreadManager:: 00956 start_threads(int num_threads) { 00957 _shutdown = false; 00958 00959 _threads.reserve(num_threads); 00960 for (int i = 0; i < num_threads; ++i) { 00961 ostringstream name_strm; 00962 name_strm << "VertexDataPage" << _threads.size(); 00963 PT(PageThread) thread = new PageThread(this, name_strm.str()); 00964 thread->start(TP_low, true); 00965 _threads.push_back(thread); 00966 } 00967 } 00968 00969 //////////////////////////////////////////////////////////////////// 00970 // Function: VertexDataPage::PageThreadManager::stop_threads 00971 // Access: Public 00972 // Description: Signals all the threads to stop and waits for them. 00973 // Does not return until the threads have finished. 00974 // Assumes _tlock is *not* held. 00975 //////////////////////////////////////////////////////////////////// 00976 void VertexDataPage::PageThreadManager:: 00977 stop_threads() { 00978 PageThreads threads; 00979 { 00980 MutexHolder holder(_tlock); 00981 _shutdown = true; 00982 _pending_cvar.notify_all(); 00983 threads.swap(_threads); 00984 } 00985 00986 PageThreads::iterator ti; 00987 for (ti = threads.begin(); ti != threads.end(); ++ti) { 00988 PageThread *thread = (*ti); 00989 thread->join(); 00990 } 00991 00992 nassertv(_pending_reads.empty() && _pending_writes.empty()); 00993 } 00994 00995 //////////////////////////////////////////////////////////////////// 00996 // Function: VertexDataPage::PageThread::Constructor 00997 // Access: Public 00998 // Description: 00999 //////////////////////////////////////////////////////////////////// 01000 VertexDataPage::PageThread:: 01001 PageThread(PageThreadManager *manager, const string &name) : 01002 Thread(name, name), 01003 _manager(manager), 01004 _working_cvar(_tlock) 01005 { 01006 } 01007 01008 //////////////////////////////////////////////////////////////////// 01009 // Function: VertexDataPage::PageThread::thread_main 01010 // Access: Protected, Virtual 01011 // Description: The main processing loop for each sub-thread. 01012 //////////////////////////////////////////////////////////////////// 01013 void VertexDataPage::PageThread:: 01014 thread_main() { 01015 _tlock.acquire(); 01016 01017 while (true) { 01018 PStatClient::thread_tick(get_sync_name()); 01019 01020 while (_manager->_pending_reads.empty() && 01021 _manager->_pending_writes.empty()) { 01022 if (_manager->_shutdown) { 01023 _tlock.release(); 01024 return; 01025 } 01026 PStatTimer timer(_thread_wait_pcollector); 01027 _manager->_pending_cvar.wait(); 01028 } 01029 01030 // Reads always have priority. 01031 if (!_manager->_pending_reads.empty()) { 01032 _working_page = _manager->_pending_reads.front(); 01033 _manager->_pending_reads.pop_front(); 01034 } else { 01035 _working_page = _manager->_pending_writes.front(); 01036 _manager->_pending_writes.pop_front(); 01037 } 01038 01039 RamClass ram_class = _working_page->_pending_ram_class; 01040 _tlock.release(); 01041 01042 { 01043 MutexHolder holder(_working_page->_lock); 01044 switch (ram_class) { 01045 case RC_resident: 01046 _working_page->make_resident(); 01047 break; 01048 01049 case RC_compressed: 01050 _working_page->make_compressed(); 01051 break; 01052 01053 case RC_disk: 01054 _working_page->make_disk(); 01055 break; 01056 01057 case RC_end_of_list: 01058 break; 01059 } 01060 } 01061 01062 _tlock.acquire(); 01063 01064 _working_page = NULL; 01065 _working_cvar.notify(); 01066 01067 Thread::consider_yield(); 01068 } 01069 }