29 (
"max-resident-vertex-data", -1,
30 PRC_DESC(
"Specifies the maximum number of bytes of all vertex data "
31 "that is allowed to remain resident in system RAM at one time. "
32 "If more than this number of bytes of vertices are created, "
33 "the least-recently-used ones will be temporarily compressed in "
34 "system RAM until they are needed. Set it to -1 for no limit."));
37 (
"max-compressed-vertex-data", 0,
38 PRC_DESC(
"Specifies the maximum number of bytes of all vertex data "
39 "that is allowed to remain compressed in system RAM at one time. "
40 "If more than this number of bytes of vertices are created, "
41 "the least-recently-used ones will be temporarily flushed to "
42 "disk until they are needed. Set it to -1 for no limit."));
45 (
"vertex-data-compression-level", 1,
46 PRC_DESC(
"Specifies the zlib compression level to use when compressing "
47 "vertex data. The number should be in the range 1 to 9, where "
48 "larger values are slower but give better compression."));
51 (
"max-disk-vertex-data", -1,
52 PRC_DESC(
"Specifies the maximum number of bytes of vertex data "
53 "that is allowed to be written to disk. Set it to -1 for no "
56 PT(VertexDataPage::PageThreadManager) VertexDataPage::_thread_mgr;
60 Mutex &VertexDataPage::_tlock = *(
new Mutex(
"VertexDataPage::_tlock"));
62 SimpleLru VertexDataPage::_resident_lru(
"resident", max_resident_vertex_data);
63 SimpleLru VertexDataPage::_compressed_lru(
"compressed", max_compressed_vertex_data);
64 SimpleLru VertexDataPage::_disk_lru(
"disk", 0);
65 SimpleLru VertexDataPage::_pending_lru(
"pending", 0);
67 SimpleLru *VertexDataPage::_global_lru[RC_end_of_list] = {
68 &VertexDataPage::_resident_lru,
69 &VertexDataPage::_compressed_lru,
70 &VertexDataPage::_disk_lru,
77 Mutex VertexDataPage::_unused_mutex;
79 PStatCollector VertexDataPage::_vdata_compress_pcollector(
"*:Vertex Data:Compress");
80 PStatCollector VertexDataPage::_vdata_decompress_pcollector(
"*:Vertex Data:Decompress");
81 PStatCollector VertexDataPage::_vdata_save_pcollector(
"*:Vertex Data:Save");
82 PStatCollector VertexDataPage::_vdata_restore_pcollector(
"*:Vertex Data:Restore");
83 PStatCollector VertexDataPage::_thread_wait_pcollector(
"Wait:Idle");
84 PStatCollector VertexDataPage::_alloc_pages_pcollector(
"System memory:MMap:Vertex data");
87 TypeHandle VertexDataPage::DeflatePage::_type_handle;
89 #if defined(HAVE_ZLIB) && !defined(USE_MEMORY_NOWRAPPERS)
92 do_zlib_alloc(voidpf opaque, uInt items, uInt size) {
93 return PANDA_MALLOC_ARRAY(items * size);
96 do_zlib_free(voidpf opaque, voidpf address) {
97 PANDA_FREE_ARRAY(address);
99 #endif // HAVE_ZLIB && !USE_MEMORY_NOWRAPPERS
107 VertexDataPage(
size_t book_size) :
110 _book_size(book_size),
114 _page_data =
nullptr;
116 _uncompressed_size = 0;
117 _ram_class = RC_resident;
118 _pending_ram_class = RC_resident;
125 VertexDataPage(
VertexDataBook *book,
size_t page_size,
size_t block_size) :
128 _book_size(page_size),
129 _block_size(block_size),
132 _allocated_size = round_up(page_size);
133 _page_data = alloc_page_data(_allocated_size);
136 _uncompressed_size = _size;
137 _pending_ram_class = RC_resident;
138 set_ram_class(RC_resident);
152 if (_pending_ram_class != _ram_class) {
153 nassertv(_thread_mgr !=
nullptr);
154 _thread_mgr->remove_page(
this);
158 if (_page_data !=
nullptr) {
159 free_page_data(_page_data, _allocated_size);
163 nassertv(_book ==
nullptr);
172 PT(PageThreadManager) thread_mgr;
175 thread_mgr = _thread_mgr;
179 if (thread_mgr !=
nullptr) {
181 <<
"Stopping vertex paging threads.\n";
182 thread_mgr->stop_threads();
191 int num_threads = vertex_data_page_threads;
192 if (num_threads == 0) {
197 PT(PageThreadManager) thread_mgr;
200 thread_mgr = _thread_mgr;
203 if (thread_mgr !=
nullptr) {
204 thread_mgr->stop_threads();
206 thread_mgr->start_threads(num_threads);
213 void VertexDataPage::
214 output(std::ostream &out)
const {
215 SimpleAllocator::output(out);
221 void VertexDataPage::
222 write(std::ostream &out,
int indent_level)
const {
223 SimpleAllocator::write(out);
231 make_block(
size_t start,
size_t size) {
240 void VertexDataPage::
241 changed_contiguous() {
244 VertexDataBook::Pages::iterator pi = _book->_pages.find(
this);
245 nassertv(pi != _book->_pages.end());
246 _book->_pages.erase(pi);
265 void VertexDataPage::
269 switch (_ram_class) {
272 request_ram_class(RC_disk);
274 request_ram_class(RC_compressed);
279 request_ram_class(RC_disk);
285 <<
"Internal error: attempt to evict array data " <<
this
286 <<
" in inappropriate state " << _ram_class <<
".\n";
301 do_alloc(
size_t size) {
304 if (block !=
nullptr && _ram_class != RC_disk) {
307 _saved_block.clear();
320 void VertexDataPage::
321 make_resident_now() {
323 if (_pending_ram_class != _ram_class) {
324 nassertv(_thread_mgr !=
nullptr);
325 _thread_mgr->remove_page(
this);
329 _pending_ram_class = RC_resident;
339 void VertexDataPage::
341 if (_ram_class == RC_resident) {
346 if (_ram_class == RC_disk) {
347 do_restore_from_disk();
350 if (_ram_class == RC_compressed) {
352 PStatTimer timer(_vdata_decompress_pcollector);
354 if (gobj_cat.is_debug()) {
356 <<
"Expanding page from " << _size
357 <<
" to " << _uncompressed_size <<
"\n";
359 size_t new_allocated_size = round_up(_uncompressed_size);
360 unsigned char *new_data = alloc_page_data(new_allocated_size);
361 unsigned char *end_data = new_data + new_allocated_size;
364 #ifdef USE_MEMORY_NOWRAPPERS
365 z_source.zalloc = Z_NULL;
366 z_source.zfree = Z_NULL;
368 z_source.zalloc = (alloc_func)&do_zlib_alloc;
369 z_source.zfree = (free_func)&do_zlib_free;
372 z_source.opaque = Z_NULL;
373 z_source.msg = (
char *)
"no error message";
375 z_source.next_in = (Bytef *)(
char *)_page_data;
376 z_source.avail_in = _size;
377 z_source.next_out = (Bytef *)new_data;
378 z_source.avail_out = new_allocated_size;
380 int result = inflateInit(&z_source);
382 nassert_raise(
"zlib error");
387 size_t output_size = 0;
391 while (result != Z_STREAM_END) {
392 unsigned char *start_out = (
unsigned char *)z_source.next_out;
393 nassertv(start_out < end_data);
394 z_source.avail_out = std::min((
size_t)(end_data - start_out), (
size_t)inflate_page_size);
395 nassertv(z_source.avail_out != 0);
396 result = inflate(&z_source, flush);
397 if (result < 0 && result != Z_BUF_ERROR) {
398 nassert_raise(
"zlib error");
401 size_t bytes_produced = (size_t)((
unsigned char *)z_source.next_out - start_out);
402 output_size += bytes_produced;
403 if (bytes_produced == 0) {
410 nassertv(z_source.avail_in == 0);
411 nassertv(output_size == _uncompressed_size);
413 result = inflateEnd(&z_source);
414 nassertv(result == Z_OK);
416 free_page_data(_page_data, _allocated_size);
417 _page_data = new_data;
418 _size = _uncompressed_size;
419 _allocated_size = new_allocated_size;
423 set_ram_class(RC_resident);
433 void VertexDataPage::
435 if (_ram_class == RC_compressed) {
441 if (_ram_class == RC_disk) {
442 do_restore_from_disk();
445 if (_ram_class == RC_resident) {
446 nassertv(_size == _uncompressed_size);
451 DeflatePage *page =
new DeflatePage;
452 DeflatePage *head = page;
455 #ifdef USE_MEMORY_NOWRAPPERS
456 z_dest.zalloc = Z_NULL;
457 z_dest.zfree = Z_NULL;
459 z_dest.zalloc = (alloc_func)&do_zlib_alloc;
460 z_dest.zfree = (free_func)&do_zlib_free;
463 z_dest.opaque = Z_NULL;
464 z_dest.msg = (
char *)
"no error message";
466 int result = deflateInit(&z_dest, vertex_data_compression_level);
468 nassert_raise(
"zlib error");
473 z_dest.next_in = (Bytef *)(
char *)_page_data;
474 z_dest.avail_in = _uncompressed_size;
475 size_t output_size = 0;
482 while (result != Z_STREAM_END) {
483 unsigned char *start_out = (page->_buffer + page->_used_size);
484 z_dest.next_out = (Bytef *)start_out;
485 z_dest.avail_out = (size_t)deflate_page_size - page->_used_size;
486 if (z_dest.avail_out == 0) {
487 DeflatePage *new_page =
new DeflatePage;
488 page->_next = new_page;
490 start_out = page->_buffer;
491 z_dest.next_out = (Bytef *)start_out;
492 z_dest.avail_out = deflate_page_size;
495 result = deflate(&z_dest, flush);
496 if (result < 0 && result != Z_BUF_ERROR) {
497 nassert_raise(
"zlib error");
500 size_t bytes_produced = (size_t)((
unsigned char *)z_dest.next_out - start_out);
501 page->_used_size += bytes_produced;
502 nassertv(page->_used_size <= deflate_page_size);
503 output_size += bytes_produced;
504 if (bytes_produced == 0) {
511 nassertv(z_dest.avail_in == 0);
513 result = deflateEnd(&z_dest);
514 nassertv(result == Z_OK);
519 size_t new_allocated_size = round_up(output_size);
520 unsigned char *new_data = alloc_page_data(new_allocated_size);
522 size_t copied_size = 0;
523 unsigned char *p = new_data;
525 while (page !=
nullptr) {
526 memcpy(p, page->_buffer, page->_used_size);
527 copied_size += page->_used_size;
528 p += page->_used_size;
529 DeflatePage *next = page->_next;
533 nassertv(copied_size == output_size);
537 free_page_data(_page_data, _allocated_size);
538 _page_data = new_data;
540 _allocated_size = new_allocated_size;
542 if (gobj_cat.is_debug()) {
544 <<
"Compressed " << *
this <<
" from " << _uncompressed_size
545 <<
" to " << _size <<
"\n";
549 set_ram_class(RC_compressed);
558 void VertexDataPage::
560 if (_ram_class == RC_disk) {
566 if (_ram_class == RC_resident || _ram_class == RC_compressed) {
567 if (!do_save_to_disk()) {
570 <<
"Couldn't save page " <<
this <<
" to disk.\n";
575 free_page_data(_page_data, _allocated_size);
576 _page_data =
nullptr;
579 set_ram_class(RC_disk);
591 bool VertexDataPage::
593 if (_ram_class == RC_resident || _ram_class == RC_compressed) {
596 if (_saved_block ==
nullptr) {
597 if (gobj_cat.is_debug()) {
599 <<
"Storing page, " << _size <<
" bytes, to disk\n";
602 bool compressed = (_ram_class == RC_compressed);
604 _saved_block =
get_save_file()->write_data(_page_data, _allocated_size, compressed);
605 if (_saved_block ==
nullptr) {
610 if (gobj_cat.is_debug()) {
612 <<
"Page already stored: " << _size <<
" bytes\n";
626 void VertexDataPage::
627 do_restore_from_disk() {
628 if (_ram_class == RC_disk) {
629 nassertv(_saved_block !=
nullptr);
630 nassertv(_page_data ==
nullptr && _size == 0);
634 size_t buffer_size = _saved_block->get_size();
635 if (gobj_cat.is_debug()) {
637 <<
"Restoring page, " << buffer_size <<
" bytes, from disk\n";
640 size_t new_allocated_size = round_up(buffer_size);
641 unsigned char *new_data = alloc_page_data(new_allocated_size);
642 if (!
get_save_file()->read_data(new_data, new_allocated_size, _saved_block)) {
643 nassert_raise(
"read error");
646 nassertv(_page_data ==
nullptr);
647 _page_data = new_data;
649 _allocated_size = new_allocated_size;
652 if (_saved_block->get_compressed()) {
653 set_ram_class(RC_compressed);
655 set_ram_class(RC_resident);
664 void VertexDataPage::
666 size_t new_size = _contiguous;
667 if (_ram_class != RC_resident) {
672 if (_book !=
nullptr && new_size != _book_size) {
673 VertexDataBook::Pages::iterator pi = _book->_pages.find(
this);
674 nassertv(pi != _book->_pages.end());
675 _book->_pages.erase(pi);
677 _book_size = new_size;
678 bool inserted = _book->_pages.insert(
this).second;
690 void VertexDataPage::
691 request_ram_class(RamClass ram_class) {
692 int num_threads = vertex_data_page_threads;
711 _pending_ram_class = ram_class;
716 if (_thread_mgr ==
nullptr) {
719 <<
"Spawning " << num_threads <<
" vertex paging threads.\n";
720 _thread_mgr =
new PageThreadManager(num_threads);
723 _thread_mgr->add_page(
this, ram_class);
730 void VertexDataPage::
732 size_t max_size = (size_t)max_disk_vertex_data;
735 vertex_save_file_prefix, max_size);
742 unsigned char *VertexDataPage::
743 alloc_page_data(
size_t page_size)
const {
744 _alloc_pages_pcollector.add_level_now(page_size);
745 return (
unsigned char *)memory_hook->
mmap_alloc(page_size,
false);
751 void VertexDataPage::
752 free_page_data(
unsigned char *page_data,
size_t page_size)
const {
753 _alloc_pages_pcollector.sub_level_now(page_size);
754 memory_hook->
mmap_free(page_data, page_size);
760 VertexDataPage::PageThreadManager::
761 PageThreadManager(
int num_threads) :
763 _pending_cvar(_tlock)
765 start_threads(num_threads);
775 void VertexDataPage::PageThreadManager::
777 nassertv(!_shutdown);
779 if (page->_pending_ram_class == ram_class) {
781 nassertv(page->
get_lru() == &_pending_lru);
785 if (page->_pending_ram_class != page->_ram_class) {
791 if (page->_pending_ram_class != ram_class) {
797 page->_pending_ram_class = ram_class;
798 if (ram_class == RC_resident) {
799 _pending_reads.push_back(page);
801 _pending_writes.push_back(page);
803 _pending_cvar.notify();
813 void VertexDataPage::PageThreadManager::
815 nassertv(page !=
nullptr);
817 PageThreads::iterator ti;
818 for (ti = _threads.begin(); ti != _threads.end(); ++ti) {
819 PageThread *thread = (*ti);
820 if (page == thread->_working_page) {
823 page->_lock.release();
824 while (page == thread->_working_page) {
825 thread->_working_cvar.wait();
827 page->_lock.acquire();
832 if (page->_pending_ram_class == RC_resident) {
833 PendingPages::iterator pi =
834 find(_pending_reads.begin(), _pending_reads.end(), page);
835 nassertv(pi != _pending_reads.end());
836 _pending_reads.erase(pi);
838 PendingPages::iterator pi =
839 find(_pending_writes.begin(), _pending_writes.end(), page);
840 nassertv(pi != _pending_writes.end());
841 _pending_writes.erase(pi);
844 page->_pending_ram_class = page->_ram_class;
854 int VertexDataPage::PageThreadManager::
855 get_num_threads()
const {
856 return (
int)_threads.size();
863 int VertexDataPage::PageThreadManager::
864 get_num_pending_reads()
const {
865 return (
int)_pending_reads.size();
872 int VertexDataPage::PageThreadManager::
873 get_num_pending_writes()
const {
874 return (
int)_pending_writes.size();
881 void VertexDataPage::PageThreadManager::
882 start_threads(
int num_threads) {
885 _threads.reserve(num_threads);
886 for (
int i = 0; i < num_threads; ++i) {
887 std::ostringstream name_strm;
888 name_strm <<
"VertexDataPage" << _threads.size();
889 PT(PageThread) thread =
new PageThread(
this, name_strm.str());
890 thread->start(TP_low,
true);
891 _threads.push_back(thread);
899 void VertexDataPage::PageThreadManager::
905 _pending_cvar.notify_all();
906 threads.swap(_threads);
909 PageThreads::iterator ti;
910 for (ti = threads.begin(); ti != threads.end(); ++ti) {
911 PageThread *thread = (*ti);
915 nassertv(_pending_reads.empty() && _pending_writes.empty());
921 VertexDataPage::PageThread::
922 PageThread(PageThreadManager *manager,
const std::string &name) :
925 _working_cvar(_tlock)
932 void VertexDataPage::PageThread::
937 PStatClient::thread_tick(get_sync_name());
939 while (_manager->_pending_reads.empty() &&
940 _manager->_pending_writes.empty()) {
941 if (_manager->_shutdown) {
946 _manager->_pending_cvar.wait();
950 if (!_manager->_pending_reads.empty()) {
951 _working_page = _manager->_pending_reads.front();
952 _manager->_pending_reads.pop_front();
954 _working_page = _manager->_pending_writes.front();
955 _manager->_pending_writes.pop_front();
958 RamClass ram_class = _working_page->_pending_ram_class;
965 _working_page->make_resident();
969 _working_page->make_compressed();
973 _working_page->make_disk();
983 _working_page =
nullptr;
984 _working_cvar.notify();