15 #include "vertexDataPage.h"
16 #include "configVariableInt.h"
17 #include "vertexDataSaveFile.h"
18 #include "vertexDataBook.h"
19 #include "vertexDataBlock.h"
20 #include "pStatTimer.h"
21 #include "memoryHook.h"
22 #include "config_gobj.h"
30 (
"max-resident-vertex-data", -1,
31 PRC_DESC(
"Specifies the maximum number of bytes of all vertex data "
32 "that is allowed to remain resident in system RAM at one time. "
33 "If more than this number of bytes of vertices are created, "
34 "the least-recently-used ones will be temporarily compressed in "
35 "system RAM until they are needed. Set it to -1 for no limit."));
38 (
"max-compressed-vertex-data", 0,
39 PRC_DESC(
"Specifies the maximum number of bytes of all vertex data "
40 "that is allowed to remain compressed in system RAM at one time. "
41 "If more than this number of bytes of vertices are created, "
42 "the least-recently-used ones will be temporarily flushed to "
43 "disk until they are needed. Set it to -1 for no limit."));
46 (
"vertex-data-compression-level", 1,
47 PRC_DESC(
"Specifies the zlib compression level to use when compressing "
48 "vertex data. The number should be in the range 1 to 9, where "
49 "larger values are slower but give better compression."));
52 (
"max-disk-vertex-data", -1,
53 PRC_DESC(
"Specifies the maximum number of bytes of vertex data "
54 "that is allowed to be written to disk. Set it to -1 for no "
70 &VertexDataPage::_resident_lru,
71 &VertexDataPage::_compressed_lru,
72 &VertexDataPage::_disk_lru,
79 Mutex VertexDataPage::_unused_mutex;
81 PStatCollector VertexDataPage::_vdata_compress_pcollector(
"*:Vertex Data:Compress");
82 PStatCollector VertexDataPage::_vdata_decompress_pcollector(
"*:Vertex Data:Decompress");
83 PStatCollector VertexDataPage::_vdata_save_pcollector(
"*:Vertex Data:Save");
84 PStatCollector VertexDataPage::_vdata_restore_pcollector(
"*:Vertex Data:Restore");
85 PStatCollector VertexDataPage::_thread_wait_pcollector(
"Wait:Idle");
86 PStatCollector VertexDataPage::_alloc_pages_pcollector(
"System memory:MMap:Vertex data");
89 TypeHandle VertexDataPage::DeflatePage::_type_handle;
91 #if defined(HAVE_ZLIB) && !defined(USE_MEMORY_NOWRAPPERS)
94 do_zlib_alloc(voidpf opaque, uInt items, uInt size) {
95 return PANDA_MALLOC_ARRAY(items * size);
98 do_zlib_free(voidpf opaque, voidpf address) {
99 PANDA_FREE_ARRAY(address);
101 #endif // HAVE_ZLIB && !USE_MEMORY_NOWRAPPERS
112 VertexDataPage(
size_t book_size) :
115 _book_size(book_size),
121 _uncompressed_size = 0;
122 _ram_class = RC_resident;
123 _pending_ram_class = RC_resident;
132 VertexDataPage(
VertexDataBook *book,
size_t page_size,
size_t block_size) :
135 _book_size(page_size),
136 _block_size(block_size),
139 _allocated_size = round_up(page_size);
140 _page_data = alloc_page_data(_allocated_size);
143 _uncompressed_size = _size;
144 _pending_ram_class = RC_resident;
145 set_ram_class(RC_resident);
162 if (_pending_ram_class != _ram_class) {
163 nassertv(_thread_mgr != (PageThreadManager *)NULL);
164 _thread_mgr->remove_page(
this);
168 if (_page_data != NULL) {
169 free_page_data(_page_data, _allocated_size);
173 nassertv(_book == NULL);
185 PT(PageThreadManager) thread_mgr;
188 thread_mgr = _thread_mgr;
192 if (thread_mgr != (PageThreadManager *)NULL) {
194 <<
"Stopping vertex paging threads.\n";
195 thread_mgr->stop_threads();
207 int num_threads = vertex_data_page_threads;
208 if (num_threads == 0) {
213 PT(PageThreadManager) thread_mgr;
216 thread_mgr = _thread_mgr;
219 if (thread_mgr != (PageThreadManager *)NULL) {
220 thread_mgr->stop_threads();
222 thread_mgr->start_threads(num_threads);
231 void VertexDataPage::
232 output(ostream &out)
const {
233 SimpleAllocator::output(out);
241 void VertexDataPage::
242 write(ostream &out,
int indent_level)
const {
243 SimpleAllocator::write(out);
253 make_block(
size_t start,
size_t size) {
264 void VertexDataPage::
265 changed_contiguous() {
268 VertexDataBook::Pages::iterator pi = _book->_pages.find(
this);
269 nassertv(pi != _book->_pages.end());
270 _book->_pages.erase(pi);
294 void VertexDataPage::
298 switch (_ram_class) {
301 request_ram_class(RC_disk);
303 request_ram_class(RC_compressed);
308 request_ram_class(RC_disk);
314 <<
"Internal error: attempt to evict array data " <<
this
315 <<
" in inappropriate state " << _ram_class <<
".\n";
332 do_alloc(
size_t size) {
338 _saved_block.clear();
353 void VertexDataPage::
354 make_resident_now() {
356 if (_pending_ram_class != _ram_class) {
357 nassertv(_thread_mgr != (PageThreadManager *)NULL);
358 _thread_mgr->remove_page(
this);
362 _pending_ram_class = RC_resident;
374 void VertexDataPage::
376 if (_ram_class == RC_resident) {
381 if (_ram_class == RC_disk) {
382 do_restore_from_disk();
385 if (_ram_class == RC_compressed) {
387 PStatTimer timer(_vdata_decompress_pcollector);
389 if (gobj_cat.is_debug()) {
391 <<
"Expanding page from " << _size
392 <<
" to " << _uncompressed_size <<
"\n";
394 size_t new_allocated_size = round_up(_uncompressed_size);
395 unsigned char *new_data = alloc_page_data(new_allocated_size);
396 unsigned char *end_data = new_data + new_allocated_size;
399 #ifdef USE_MEMORY_NOWRAPPERS
400 z_source.zalloc = Z_NULL;
401 z_source.zfree = Z_NULL;
403 z_source.zalloc = (alloc_func)&do_zlib_alloc;
404 z_source.zfree = (free_func)&do_zlib_free;
407 z_source.opaque = Z_NULL;
408 z_source.msg = (
char *)
"no error message";
410 z_source.next_in = (Bytef *)(
char *)_page_data;
411 z_source.avail_in = _size;
412 z_source.next_out = (Bytef *)new_data;
413 z_source.avail_out = new_allocated_size;
415 int result = inflateInit(&z_source);
417 nassert_raise(
"zlib error");
422 size_t output_size = 0;
426 while (result != Z_STREAM_END) {
427 unsigned char *start_out = (
unsigned char *)z_source.next_out;
428 nassertv(start_out < end_data);
429 z_source.avail_out = min((
size_t)(end_data - start_out), (
size_t)inflate_page_size);
430 nassertv(z_source.avail_out != 0);
431 result = inflate(&z_source, flush);
432 if (result < 0 && result != Z_BUF_ERROR) {
433 nassert_raise(
"zlib error");
436 size_t bytes_produced = (size_t)((
unsigned char *)z_source.next_out - start_out);
437 output_size += bytes_produced;
438 if (bytes_produced == 0) {
445 nassertv(z_source.avail_in == 0);
446 nassertv(output_size == _uncompressed_size);
448 result = inflateEnd(&z_source);
449 nassertv(result == Z_OK);
451 free_page_data(_page_data, _allocated_size);
452 _page_data = new_data;
453 _size = _uncompressed_size;
454 _allocated_size = new_allocated_size;
458 set_ram_class(RC_resident);
470 void VertexDataPage::
472 if (_ram_class == RC_compressed) {
478 if (_ram_class == RC_disk) {
479 do_restore_from_disk();
482 if (_ram_class == RC_resident) {
483 nassertv(_size == _uncompressed_size);
488 DeflatePage *page =
new DeflatePage;
489 DeflatePage *head = page;
492 #ifdef USE_MEMORY_NOWRAPPERS
493 z_dest.zalloc = Z_NULL;
494 z_dest.zfree = Z_NULL;
496 z_dest.zalloc = (alloc_func)&do_zlib_alloc;
497 z_dest.zfree = (free_func)&do_zlib_free;
500 z_dest.opaque = Z_NULL;
501 z_dest.msg = (
char *)
"no error message";
503 int result = deflateInit(&z_dest, vertex_data_compression_level);
505 nassert_raise(
"zlib error");
510 z_dest.next_in = (Bytef *)(
char *)_page_data;
511 z_dest.avail_in = _uncompressed_size;
512 size_t output_size = 0;
520 while (result != Z_STREAM_END) {
521 unsigned char *start_out = (page->_buffer + page->_used_size);
522 z_dest.next_out = (Bytef *)start_out;
523 z_dest.avail_out = (size_t)deflate_page_size - page->_used_size;
524 if (z_dest.avail_out == 0) {
525 DeflatePage *new_page =
new DeflatePage;
526 page->_next = new_page;
528 start_out = page->_buffer;
529 z_dest.next_out = (Bytef *)start_out;
530 z_dest.avail_out = deflate_page_size;
533 result = deflate(&z_dest, flush);
534 if (result < 0 && result != Z_BUF_ERROR) {
535 nassert_raise(
"zlib error");
538 size_t bytes_produced = (size_t)((
unsigned char *)z_dest.next_out - start_out);
539 page->_used_size += bytes_produced;
540 nassertv(page->_used_size <= deflate_page_size);
541 output_size += bytes_produced;
542 if (bytes_produced == 0) {
549 nassertv(z_dest.avail_in == 0);
551 result = deflateEnd(&z_dest);
552 nassertv(result == Z_OK);
557 size_t new_allocated_size = round_up(output_size);
558 unsigned char *new_data = alloc_page_data(new_allocated_size);
560 size_t copied_size = 0;
561 unsigned char *p = new_data;
563 while (page != NULL) {
564 memcpy(p, page->_buffer, page->_used_size);
565 copied_size += page->_used_size;
566 p += page->_used_size;
567 DeflatePage *next = page->_next;
571 nassertv(copied_size == output_size);
575 free_page_data(_page_data, _allocated_size);
576 _page_data = new_data;
578 _allocated_size = new_allocated_size;
580 if (gobj_cat.is_debug()) {
582 <<
"Compressed " << *
this <<
" from " << _uncompressed_size
583 <<
" to " << _size <<
"\n";
587 set_ram_class(RC_compressed);
599 void VertexDataPage::
601 if (_ram_class == RC_disk) {
607 if (_ram_class == RC_resident || _ram_class == RC_compressed) {
608 if (!do_save_to_disk()) {
611 <<
"Couldn't save page " <<
this <<
" to disk.\n";
616 free_page_data(_page_data, _allocated_size);
620 set_ram_class(RC_disk);
635 bool VertexDataPage::
637 if (_ram_class == RC_resident || _ram_class == RC_compressed) {
641 if (gobj_cat.is_debug()) {
643 <<
"Storing page, " << _size <<
" bytes, to disk\n";
646 bool compressed = (_ram_class == RC_compressed);
648 _saved_block =
get_save_file()->write_data(_page_data, _allocated_size, compressed);
654 if (gobj_cat.is_debug()) {
656 <<
"Page already stored: " << _size <<
" bytes\n";
673 void VertexDataPage::
674 do_restore_from_disk() {
675 if (_ram_class == RC_disk) {
677 nassertv(_page_data == (
unsigned char *)NULL && _size == 0);
681 size_t buffer_size = _saved_block->get_size();
682 if (gobj_cat.is_debug()) {
684 <<
"Restoring page, " << buffer_size <<
" bytes, from disk\n";
687 size_t new_allocated_size = round_up(buffer_size);
688 unsigned char *new_data = alloc_page_data(new_allocated_size);
689 if (!
get_save_file()->read_data(new_data, new_allocated_size, _saved_block)) {
690 nassert_raise(
"read error");
693 nassertv(_page_data == (
unsigned char *)NULL);
694 _page_data = new_data;
696 _allocated_size = new_allocated_size;
699 if (_saved_block->get_compressed()) {
700 set_ram_class(RC_compressed);
702 set_ram_class(RC_resident);
714 void VertexDataPage::
716 size_t new_size = _contiguous;
717 if (_ram_class != RC_resident) {
724 VertexDataBook::Pages::iterator pi = _book->_pages.find(
this);
725 nassertv(pi != _book->_pages.end());
726 _book->_pages.erase(pi);
728 _book_size = new_size;
729 bool inserted = _book->_pages.insert(
this).second;
744 void VertexDataPage::
745 request_ram_class(RamClass ram_class) {
746 int num_threads = vertex_data_page_threads;
765 _pending_ram_class = ram_class;
770 if (_thread_mgr == (PageThreadManager *)NULL) {
773 <<
"Spawning " << num_threads <<
" vertex paging threads.\n";
774 _thread_mgr =
new PageThreadManager(num_threads);
777 _thread_mgr->add_page(
this, ram_class);
787 void VertexDataPage::
789 size_t max_size = (size_t)max_disk_vertex_data;
792 vertex_save_file_prefix, max_size);
801 unsigned char *VertexDataPage::
802 alloc_page_data(
size_t page_size)
const {
803 _alloc_pages_pcollector.add_level_now(page_size);
804 return (
unsigned char *)memory_hook->
mmap_alloc(page_size,
false);
812 void VertexDataPage::
813 free_page_data(
unsigned char *page_data,
size_t page_size)
const {
814 _alloc_pages_pcollector.sub_level_now(page_size);
815 memory_hook->
mmap_free(page_data, page_size);
823 VertexDataPage::PageThreadManager::
824 PageThreadManager(
int num_threads) :
826 _pending_cvar(_tlock)
828 start_threads(num_threads);
840 void VertexDataPage::PageThreadManager::
842 nassertv(!_shutdown);
844 if (page->_pending_ram_class == ram_class) {
846 nassertv(page->
get_lru() == &_pending_lru);
850 if (page->_pending_ram_class != page->_ram_class) {
856 if (page->_pending_ram_class != ram_class) {
862 page->_pending_ram_class = ram_class;
863 if (ram_class == RC_resident) {
864 _pending_reads.push_back(page);
866 _pending_writes.push_back(page);
868 _pending_cvar.notify();
881 void VertexDataPage::PageThreadManager::
885 PageThreads::iterator ti;
886 for (ti = _threads.begin(); ti != _threads.end(); ++ti) {
887 PageThread *thread = (*ti);
888 if (page == thread->_working_page) {
891 page->_lock.release();
892 while (page == thread->_working_page) {
893 thread->_working_cvar.wait();
895 page->_lock.acquire();
900 if (page->_pending_ram_class == RC_resident) {
901 PendingPages::iterator pi =
902 find(_pending_reads.begin(), _pending_reads.end(), page);
903 nassertv(pi != _pending_reads.end());
904 _pending_reads.erase(pi);
906 PendingPages::iterator pi =
907 find(_pending_writes.begin(), _pending_writes.end(), page);
908 nassertv(pi != _pending_writes.end());
909 _pending_writes.erase(pi);
912 page->_pending_ram_class = page->_ram_class;
924 int VertexDataPage::PageThreadManager::
925 get_num_threads()
const {
926 return (
int)_threads.size();
935 int VertexDataPage::PageThreadManager::
936 get_num_pending_reads()
const {
937 return (
int)_pending_reads.size();
946 int VertexDataPage::PageThreadManager::
947 get_num_pending_writes()
const {
948 return (
int)_pending_writes.size();
957 void VertexDataPage::PageThreadManager::
958 start_threads(
int num_threads) {
961 _threads.reserve(num_threads);
962 for (
int i = 0; i < num_threads; ++i) {
964 name_strm <<
"VertexDataPage" << _threads.size();
965 PT(PageThread) thread = new PageThread(this, name_strm.str());
966 thread->start(TP_low, true);
967 _threads.push_back(thread);
984 _pending_cvar.notify_all();
985 threads.swap(_threads);
988 PageThreads::iterator ti;
989 for (ti = threads.begin(); ti != threads.end(); ++ti) {
990 PageThread *thread = (*ti);
994 nassertv(_pending_reads.empty() && _pending_writes.empty());
1002 VertexDataPage::PageThread::
1003 PageThread(PageThreadManager *manager,
const string &name) :
1006 _working_cvar(_tlock)
1015 void VertexDataPage::PageThread::
1020 PStatClient::thread_tick(get_sync_name());
1022 while (_manager->_pending_reads.empty() &&
1023 _manager->_pending_writes.empty()) {
1024 if (_manager->_shutdown) {
1029 _manager->_pending_cvar.wait();
1033 if (!_manager->_pending_reads.empty()) {
1034 _working_page = _manager->_pending_reads.front();
1035 _manager->_pending_reads.pop_front();
1037 _working_page = _manager->_pending_writes.front();
1038 _manager->_pending_writes.pop_front();
1041 RamClass ram_class = _working_page->_pending_ram_class;
1046 switch (ram_class) {
1048 _working_page->make_resident();
1052 _working_page->make_compressed();
1056 _working_page->make_disk();
1059 case RC_end_of_list:
1066 _working_page = NULL;
1067 _working_cvar.notify();
A block of bytes on the save file.
An implementation of a very simple LRU algorithm.
A block of bytes that holds one or more VertexDataBlocks.
A temporary file to hold the vertex data that has been evicted from memory and written to disk...
An implementation of a very simple block allocator.
SimpleLru * get_lru() const
Returns the LRU that manages this page, or NULL if it is not currently managed by any LRU...
static VertexDataSaveFile * get_save_file()
Returns the global VertexDataSaveFile that will be used to save vertex data buffers to disk when nece...
void mark_used_lru() const
To be called when the page is used; this will move it to the tail of the SimpleLru queue it is alread...
A standard mutex, or mutual exclusion lock.
A lightweight class that can be used to automatically start and stop a PStatCollector around a sectio...
A lightweight C++ object whose constructor calls acquire() and whose destructor calls release() on a ...
static void consider_yield()
Possibly suspends the current thread for the rest of the current epoch, if it has run for enough this...
void set_lru_size(size_t lru_size)
Specifies the size of this page, presumably in bytes, although any unit is possible.
A block of bytes that stores the actual raw vertex data referenced by a GeomVertexArrayData object...
A single block as returned from SimpleAllocator::alloc().
static bool is_threading_supported()
Returns true if threading support has been compiled in and enabled, or false if no threading is avail...
A lightweight class that represents a single element that may be timed and/or counted via stats...
virtual void mmap_free(void *ptr, size_t size)
Frees a block of memory previously allocated via mmap_alloc().
One atomic piece that may be managed by a SimpleLru chain.
A collection of VertexDataPages, which can be used to allocate new VertexDataBlock objects...
static void stop_threads()
Call this to stop the paging threads, if they were started.
A thread; that is, a lightweight process.
size_t get_max_size() const
Returns the max size of all objects that are allowed to be active on the LRU.
This is a convenience class to specialize ConfigVariable as an integer type.
TypeHandle is the identifier used to differentiate C++ class types.
virtual void * mmap_alloc(size_t size, bool allow_exec)
Allocates a raw page or pages of memory directly from the OS.
static void flush_threads()
Waits for all of the pending thread tasks to finish before returning.