Panda3D
vertexDataPage.cxx
1 // Filename: vertexDataPage.cxx
2 // Created by: drose (04Jun07)
3 //
4 ////////////////////////////////////////////////////////////////////
5 //
6 // PANDA 3D SOFTWARE
7 // Copyright (c) Carnegie Mellon University. All rights reserved.
8 //
9 // All use of this software is subject to the terms of the revised BSD
10 // license. You should have received a copy of this license along
11 // with this source code in a file named "LICENSE."
12 //
13 ////////////////////////////////////////////////////////////////////
14 
15 #include "vertexDataPage.h"
16 #include "configVariableInt.h"
17 #include "vertexDataSaveFile.h"
18 #include "vertexDataBook.h"
19 #include "vertexDataBlock.h"
20 #include "pStatTimer.h"
21 #include "memoryHook.h"
22 #include "config_gobj.h"
23 #include <algorithm>
24 
25 #ifdef HAVE_ZLIB
26 #include <zlib.h>
27 #endif
28 
29 ConfigVariableInt max_resident_vertex_data
30 ("max-resident-vertex-data", -1,
31  PRC_DESC("Specifies the maximum number of bytes of all vertex data "
32  "that is allowed to remain resident in system RAM at one time. "
33  "If more than this number of bytes of vertices are created, "
34  "the least-recently-used ones will be temporarily compressed in "
35  "system RAM until they are needed. Set it to -1 for no limit."));
36 
37 ConfigVariableInt max_compressed_vertex_data
38 ("max-compressed-vertex-data", 0,
39  PRC_DESC("Specifies the maximum number of bytes of all vertex data "
40  "that is allowed to remain compressed in system RAM at one time. "
41  "If more than this number of bytes of vertices are created, "
42  "the least-recently-used ones will be temporarily flushed to "
43  "disk until they are needed. Set it to -1 for no limit."));
44 
45 ConfigVariableInt vertex_data_compression_level
46 ("vertex-data-compression-level", 1,
47  PRC_DESC("Specifies the zlib compression level to use when compressing "
48  "vertex data. The number should be in the range 1 to 9, where "
49  "larger values are slower but give better compression."));
50 
51 ConfigVariableInt max_disk_vertex_data
52 ("max-disk-vertex-data", -1,
53  PRC_DESC("Specifies the maximum number of bytes of vertex data "
54  "that is allowed to be written to disk. Set it to -1 for no "
55  "limit."));
56 
57 PT(VertexDataPage::PageThreadManager) VertexDataPage::_thread_mgr;
58 
59 // This is a reference to an allocated Mutex, instead of just a static
60 // Mutex, to protect against ordering issues when the application
61 // shuts down.
62 Mutex &VertexDataPage::_tlock = *(new Mutex("VertexDataPage::_tlock"));
63 
64 SimpleLru VertexDataPage::_resident_lru("resident", max_resident_vertex_data);
65 SimpleLru VertexDataPage::_compressed_lru("compressed", max_compressed_vertex_data);
66 SimpleLru VertexDataPage::_disk_lru("disk", 0);
67 SimpleLru VertexDataPage::_pending_lru("pending", 0);
68 
69 SimpleLru *VertexDataPage::_global_lru[RC_end_of_list] = {
70  &VertexDataPage::_resident_lru,
71  &VertexDataPage::_compressed_lru,
72  &VertexDataPage::_disk_lru,
73 };
74 
75 VertexDataSaveFile *VertexDataPage::_save_file;
76 
77 // This mutex is (mostly) unused. We just need a Mutex to pass
78 // to the Book Constructor, below.
79 Mutex VertexDataPage::_unused_mutex;
80 
81 PStatCollector VertexDataPage::_vdata_compress_pcollector("*:Vertex Data:Compress");
82 PStatCollector VertexDataPage::_vdata_decompress_pcollector("*:Vertex Data:Decompress");
83 PStatCollector VertexDataPage::_vdata_save_pcollector("*:Vertex Data:Save");
84 PStatCollector VertexDataPage::_vdata_restore_pcollector("*:Vertex Data:Restore");
85 PStatCollector VertexDataPage::_thread_wait_pcollector("Wait:Idle");
86 PStatCollector VertexDataPage::_alloc_pages_pcollector("System memory:MMap:Vertex data");
87 
88 TypeHandle VertexDataPage::_type_handle;
89 TypeHandle VertexDataPage::DeflatePage::_type_handle;
90 
91 #if defined(HAVE_ZLIB) && !defined(USE_MEMORY_NOWRAPPERS)
92 // Define functions that hook zlib into panda's memory allocation system.
93 static void *
94 do_zlib_alloc(voidpf opaque, uInt items, uInt size) {
95  return PANDA_MALLOC_ARRAY(items * size);
96 }
97 static void
98 do_zlib_free(voidpf opaque, voidpf address) {
99  PANDA_FREE_ARRAY(address);
100 }
101 #endif // HAVE_ZLIB && !USE_MEMORY_NOWRAPPERS
102 
103 
104 ////////////////////////////////////////////////////////////////////
105 // Function: VertexDataPage::Book Constructor
106 // Access: Private
107 // Description: This constructor is used only by VertexDataBook, to
108 // create a mostly-empty object that can be used to
109 // search for a particular page size in the set.
110 ////////////////////////////////////////////////////////////////////
111 VertexDataPage::
112 VertexDataPage(size_t book_size) :
113  SimpleAllocator(book_size, _unused_mutex),
114  SimpleLruPage(book_size),
115  _book_size(book_size),
116  _block_size(0),
117  _book(NULL)
118 {
119  _page_data = NULL;
120  _size = 0;
121  _uncompressed_size = 0;
122  _ram_class = RC_resident;
123  _pending_ram_class = RC_resident;
124 }
125 
126 ////////////////////////////////////////////////////////////////////
127 // Function: VertexDataPage::Constructor
128 // Access: Private
129 // Description:
130 ////////////////////////////////////////////////////////////////////
131 VertexDataPage::
132 VertexDataPage(VertexDataBook *book, size_t page_size, size_t block_size) :
133  SimpleAllocator(page_size, book->_lock),
134  SimpleLruPage(page_size),
135  _book_size(page_size),
136  _block_size(block_size),
137  _book(book)
138 {
139  _allocated_size = round_up(page_size);
140  _page_data = alloc_page_data(_allocated_size);
141  _size = page_size;
142 
143  _uncompressed_size = _size;
144  _pending_ram_class = RC_resident;
145  set_ram_class(RC_resident);
146 }
147 
148 ////////////////////////////////////////////////////////////////////
149 // Function: VertexDataPage::Destructor
150 // Access: Private, Virtual
151 // Description:
152 ////////////////////////////////////////////////////////////////////
153 VertexDataPage::
154 ~VertexDataPage() {
155 
156  // Since the only way to delete a page is via the
157  // changed_contiguous() method, the lock will already be held.
158  // MutexHolder holder(_lock);
159 
160  {
161  MutexHolder holder2(_tlock);
162  if (_pending_ram_class != _ram_class) {
163  nassertv(_thread_mgr != (PageThreadManager *)NULL);
164  _thread_mgr->remove_page(this);
165  }
166  }
167 
168  if (_page_data != NULL) {
169  free_page_data(_page_data, _allocated_size);
170  _size = 0;
171  }
172 
173  nassertv(_book == NULL);
174 }
175 
176 ////////////////////////////////////////////////////////////////////
177 // Function: VertexDataPage::stop_threads
178 // Access: Published, Static
179 // Description: Call this to stop the paging threads, if they were
180 // started. This may block until all of the pending
181 // tasks have been completed.
182 ////////////////////////////////////////////////////////////////////
183 void VertexDataPage::
185  PT(PageThreadManager) thread_mgr;
186  {
187  MutexHolder holder(_tlock);
188  thread_mgr = _thread_mgr;
189  _thread_mgr.clear();
190  }
191 
192  if (thread_mgr != (PageThreadManager *)NULL) {
193  gobj_cat.info()
194  << "Stopping vertex paging threads.\n";
195  thread_mgr->stop_threads();
196  }
197 }
198 
199 ////////////////////////////////////////////////////////////////////
200 // Function: VertexDataPage::flush_threads
201 // Access: Published, Static
202 // Description: Waits for all of the pending thread tasks to finish
203 // before returning.
204 ////////////////////////////////////////////////////////////////////
205 void VertexDataPage::
207  int num_threads = vertex_data_page_threads;
208  if (num_threads == 0) {
209  stop_threads();
210  return;
211  }
212 
213  PT(PageThreadManager) thread_mgr;
214  {
215  MutexHolder holder(_tlock);
216  thread_mgr = _thread_mgr;
217  }
218 
219  if (thread_mgr != (PageThreadManager *)NULL) {
220  thread_mgr->stop_threads();
221  MutexHolder holder(_tlock);
222  thread_mgr->start_threads(num_threads);
223  }
224 }
225 
226 ////////////////////////////////////////////////////////////////////
227 // Function: VertexDataPage::output
228 // Access: Published, Virtual
229 // Description:
230 ////////////////////////////////////////////////////////////////////
231 void VertexDataPage::
232 output(ostream &out) const {
233  SimpleAllocator::output(out);
234 }
235 
236 ////////////////////////////////////////////////////////////////////
237 // Function: VertexDataPage::write
238 // Access: Published, Virtual
239 // Description:
240 ////////////////////////////////////////////////////////////////////
241 void VertexDataPage::
242 write(ostream &out, int indent_level) const {
243  SimpleAllocator::write(out);
244 }
245 
246 ////////////////////////////////////////////////////////////////////
247 // Function: VertexDataPage::make_block
248 // Access: Protected, Virtual
249 // Description: Creates a new SimpleAllocatorBlock object. Override
250 // this function to specialize the block type returned.
251 ////////////////////////////////////////////////////////////////////
252 SimpleAllocatorBlock *VertexDataPage::
253 make_block(size_t start, size_t size) {
254  return new VertexDataBlock(this, start, size);
255 }
256 
257 ////////////////////////////////////////////////////////////////////
258 // Function: VertexDataPage::changed_contiguous
259 // Access: Protected, Virtual
260 // Description: This callback function is made whenever the estimate
261 // of contiguous available space changes, either through
262 // an alloc or free. The lock will be held.
263 ////////////////////////////////////////////////////////////////////
264 void VertexDataPage::
265 changed_contiguous() {
266  if (do_is_empty()) {
267  // If the page is now empty, delete it.
268  VertexDataBook::Pages::iterator pi = _book->_pages.find(this);
269  nassertv(pi != _book->_pages.end());
270  _book->_pages.erase(pi);
271  _book = NULL;
272  delete this;
273  return;
274  }
275 
276  adjust_book_size();
277 }
278 
279 ////////////////////////////////////////////////////////////////////
280 // Function: VertexDataPage::evict_lru
281 // Access: Public, Virtual
282 // Description: Evicts the page from the LRU. Called internally when
283 // the LRU determines that it is full. May also be
284 // called externally when necessary to explicitly evict
285 // the page.
286 //
287 // It is legal for this method to either evict the page
288 // as requested, do nothing (in which case the eviction
289 // will be requested again at the next epoch), or
290 // requeue itself on the tail of the queue (in which
291 // case the eviction will be requested again much
292 // later).
293 ////////////////////////////////////////////////////////////////////
294 void VertexDataPage::
295 evict_lru() {
296  MutexHolder holder(_lock);
297 
298  switch (_ram_class) {
299  case RC_resident:
300  if (_compressed_lru.get_max_size() == 0) {
301  request_ram_class(RC_disk);
302  } else {
303  request_ram_class(RC_compressed);
304  }
305  break;
306 
307  case RC_compressed:
308  request_ram_class(RC_disk);
309  break;
310 
311  case RC_disk:
312  case RC_end_of_list:
313  gobj_cat.warning()
314  << "Internal error: attempt to evict array data " << this
315  << " in inappropriate state " << _ram_class << ".\n";
316  break;
317  }
318 }
319 
320 ////////////////////////////////////////////////////////////////////
321 // Function: VertexDataPage::do_alloc
322 // Access: Private
323 // Description: Allocates a new block. Returns NULL if a block of the
324 // requested size cannot be allocated.
325 //
326 // To free the allocated block, call block->free(), or
327 // simply delete the block pointer.
328 //
329 // Assumes the lock is already held.
330 ////////////////////////////////////////////////////////////////////
331 VertexDataBlock *VertexDataPage::
332 do_alloc(size_t size) {
333  VertexDataBlock *block = (VertexDataBlock *)SimpleAllocator::do_alloc(size);
334 
335  if (block != (VertexDataBlock *)NULL && _ram_class != RC_disk) {
336  // When we allocate a new block within a resident page, we have to
337  // clear the disk cache (since we have just invalidated it).
338  _saved_block.clear();
339  }
340 
341  return block;
342 }
343 
344 ////////////////////////////////////////////////////////////////////
345 // Function: VertexDataPage::make_resident_now
346 // Access: Private
347 // Description: Short-circuits the thread and forces the page into
348 // resident status immediately.
349 //
350 // Intended to be called from the main thread. Assumes
351 // the lock is already held.
352 ////////////////////////////////////////////////////////////////////
353 void VertexDataPage::
354 make_resident_now() {
355  MutexHolder holder(_tlock);
356  if (_pending_ram_class != _ram_class) {
357  nassertv(_thread_mgr != (PageThreadManager *)NULL);
358  _thread_mgr->remove_page(this);
359  }
360 
361  make_resident();
362  _pending_ram_class = RC_resident;
363 }
364 
365 ////////////////////////////////////////////////////////////////////
366 // Function: VertexDataPage::make_resident
367 // Access: Private
368 // Description: Moves the page to fully resident status by
369 // expanding it or reading it from disk as necessary.
370 //
371 // Intended to be called from the sub-thread. Assumes
372 // the lock is already held.
373 ////////////////////////////////////////////////////////////////////
374 void VertexDataPage::
375 make_resident() {
376  if (_ram_class == RC_resident) {
377  mark_used_lru();
378  return;
379  }
380 
381  if (_ram_class == RC_disk) {
382  do_restore_from_disk();
383  }
384 
385  if (_ram_class == RC_compressed) {
386 #ifdef HAVE_ZLIB
387  PStatTimer timer(_vdata_decompress_pcollector);
388 
389  if (gobj_cat.is_debug()) {
390  gobj_cat.debug()
391  << "Expanding page from " << _size
392  << " to " << _uncompressed_size << "\n";
393  }
394  size_t new_allocated_size = round_up(_uncompressed_size);
395  unsigned char *new_data = alloc_page_data(new_allocated_size);
396  unsigned char *end_data = new_data + new_allocated_size;
397 
398  z_stream z_source;
399 #ifdef USE_MEMORY_NOWRAPPERS
400  z_source.zalloc = Z_NULL;
401  z_source.zfree = Z_NULL;
402 #else
403  z_source.zalloc = (alloc_func)&do_zlib_alloc;
404  z_source.zfree = (free_func)&do_zlib_free;
405 #endif
406 
407  z_source.opaque = Z_NULL;
408  z_source.msg = (char *) "no error message";
409 
410  z_source.next_in = (Bytef *)(char *)_page_data;
411  z_source.avail_in = _size;
412  z_source.next_out = (Bytef *)new_data;
413  z_source.avail_out = new_allocated_size;
414 
415  int result = inflateInit(&z_source);
416  if (result < 0) {
417  nassert_raise("zlib error");
418  return;
419  }
421 
422  size_t output_size = 0;
423 
424  int flush = 0;
425  result = 0;
426  while (result != Z_STREAM_END) {
427  unsigned char *start_out = (unsigned char *)z_source.next_out;
428  nassertv(start_out < end_data);
429  z_source.avail_out = min((size_t)(end_data - start_out), (size_t)inflate_page_size);
430  nassertv(z_source.avail_out != 0);
431  result = inflate(&z_source, flush);
432  if (result < 0 && result != Z_BUF_ERROR) {
433  nassert_raise("zlib error");
434  return;
435  }
436  size_t bytes_produced = (size_t)((unsigned char *)z_source.next_out - start_out);
437  output_size += bytes_produced;
438  if (bytes_produced == 0) {
439  // If we ever produce no bytes, then start flushing the output.
440  flush = Z_FINISH;
441  }
442 
444  }
445  nassertv(z_source.avail_in == 0);
446  nassertv(output_size == _uncompressed_size);
447 
448  result = inflateEnd(&z_source);
449  nassertv(result == Z_OK);
450 
451  free_page_data(_page_data, _allocated_size);
452  _page_data = new_data;
453  _size = _uncompressed_size;
454  _allocated_size = new_allocated_size;
455 #endif
456 
457  set_lru_size(_size);
458  set_ram_class(RC_resident);
459  }
460 }
461 
462 ////////////////////////////////////////////////////////////////////
463 // Function: VertexDataPage::make_compressed
464 // Access: Private
465 // Description: Moves the page to compressed status by
466 // compressing it or reading it from disk as necessary.
467 //
468 // Assumes the lock is already held.
469 ////////////////////////////////////////////////////////////////////
470 void VertexDataPage::
471 make_compressed() {
472  if (_ram_class == RC_compressed) {
473  // If we're already compressed, just mark the page recently used.
474  mark_used_lru();
475  return;
476  }
477 
478  if (_ram_class == RC_disk) {
479  do_restore_from_disk();
480  }
481 
482  if (_ram_class == RC_resident) {
483  nassertv(_size == _uncompressed_size);
484 
485 #ifdef HAVE_ZLIB
486  PStatTimer timer(_vdata_compress_pcollector);
487 
488  DeflatePage *page = new DeflatePage;
489  DeflatePage *head = page;
490 
491  z_stream z_dest;
492 #ifdef USE_MEMORY_NOWRAPPERS
493  z_dest.zalloc = Z_NULL;
494  z_dest.zfree = Z_NULL;
495 #else
496  z_dest.zalloc = (alloc_func)&do_zlib_alloc;
497  z_dest.zfree = (free_func)&do_zlib_free;
498 #endif
499 
500  z_dest.opaque = Z_NULL;
501  z_dest.msg = (char *) "no error message";
502 
503  int result = deflateInit(&z_dest, vertex_data_compression_level);
504  if (result < 0) {
505  nassert_raise("zlib error");
506  return;
507  }
509 
510  z_dest.next_in = (Bytef *)(char *)_page_data;
511  z_dest.avail_in = _uncompressed_size;
512  size_t output_size = 0;
513 
514  // Compress the data into one or more individual pages. We have
515  // to compress it page-at-a-time, since we're not really sure how
516  // big the result will be (so we can't easily pre-allocate a
517  // buffer).
518  int flush = 0;
519  result = 0;
520  while (result != Z_STREAM_END) {
521  unsigned char *start_out = (page->_buffer + page->_used_size);
522  z_dest.next_out = (Bytef *)start_out;
523  z_dest.avail_out = (size_t)deflate_page_size - page->_used_size;
524  if (z_dest.avail_out == 0) {
525  DeflatePage *new_page = new DeflatePage;
526  page->_next = new_page;
527  page = new_page;
528  start_out = page->_buffer;
529  z_dest.next_out = (Bytef *)start_out;
530  z_dest.avail_out = deflate_page_size;
531  }
532 
533  result = deflate(&z_dest, flush);
534  if (result < 0 && result != Z_BUF_ERROR) {
535  nassert_raise("zlib error");
536  return;
537  }
538  size_t bytes_produced = (size_t)((unsigned char *)z_dest.next_out - start_out);
539  page->_used_size += bytes_produced;
540  nassertv(page->_used_size <= deflate_page_size);
541  output_size += bytes_produced;
542  if (bytes_produced == 0) {
543  // If we ever produce no bytes, then start flushing the output.
544  flush = Z_FINISH;
545  }
546 
548  }
549  nassertv(z_dest.avail_in == 0);
550 
551  result = deflateEnd(&z_dest);
552  nassertv(result == Z_OK);
553 
554  // Now we know how big the result will be. Allocate a buffer, and
555  // copy the data from the various pages.
556 
557  size_t new_allocated_size = round_up(output_size);
558  unsigned char *new_data = alloc_page_data(new_allocated_size);
559 
560  size_t copied_size = 0;
561  unsigned char *p = new_data;
562  page = head;
563  while (page != NULL) {
564  memcpy(p, page->_buffer, page->_used_size);
565  copied_size += page->_used_size;
566  p += page->_used_size;
567  DeflatePage *next = page->_next;
568  delete page;
569  page = next;
570  }
571  nassertv(copied_size == output_size);
572 
573  // Now free the original, uncompressed data, and put this new
574  // compressed buffer in its place.
575  free_page_data(_page_data, _allocated_size);
576  _page_data = new_data;
577  _size = output_size;
578  _allocated_size = new_allocated_size;
579 
580  if (gobj_cat.is_debug()) {
581  gobj_cat.debug()
582  << "Compressed " << *this << " from " << _uncompressed_size
583  << " to " << _size << "\n";
584  }
585 #endif
586  set_lru_size(_size);
587  set_ram_class(RC_compressed);
588  }
589 }
590 
591 ////////////////////////////////////////////////////////////////////
592 // Function: VertexDataPage::make_disk
593 // Access: Private
594 // Description: Moves the page to disk status by writing it to disk
595 // as necessary.
596 //
597 // Assumes the lock is already held.
598 ////////////////////////////////////////////////////////////////////
599 void VertexDataPage::
600 make_disk() {
601  if (_ram_class == RC_disk) {
602  // If we're already on disk, just mark the page recently used.
603  mark_used_lru();
604  return;
605  }
606 
607  if (_ram_class == RC_resident || _ram_class == RC_compressed) {
608  if (!do_save_to_disk()) {
609  // Can't save it to disk for some reason.
610  gobj_cat.warning()
611  << "Couldn't save page " << this << " to disk.\n";
612  mark_used_lru();
613  return;
614  }
615 
616  free_page_data(_page_data, _allocated_size);
617  _page_data = NULL;
618  _size = 0;
619 
620  set_ram_class(RC_disk);
621  }
622 }
623 
624 ////////////////////////////////////////////////////////////////////
625 // Function: VertexDataPage::do_save_to_disk
626 // Access: Private
627 // Description: Writes the page to disk, but does not evict it from
628 // memory or affect its LRU status. If it gets evicted
629 // later without having been modified, it will not need
630 // to write itself to disk again.
631 //
632 // Returns true on success, false on failure. Assumes
633 // the lock is already held.
634 ////////////////////////////////////////////////////////////////////
635 bool VertexDataPage::
636 do_save_to_disk() {
637  if (_ram_class == RC_resident || _ram_class == RC_compressed) {
638  PStatTimer timer(_vdata_save_pcollector);
639 
640  if (_saved_block == (VertexDataSaveBlock *)NULL) {
641  if (gobj_cat.is_debug()) {
642  gobj_cat.debug()
643  << "Storing page, " << _size << " bytes, to disk\n";
644  }
645 
646  bool compressed = (_ram_class == RC_compressed);
647 
648  _saved_block = get_save_file()->write_data(_page_data, _allocated_size, compressed);
649  if (_saved_block == (VertexDataSaveBlock *)NULL) {
650  // Can't write it to disk. Too bad.
651  return false;
652  }
653  } else {
654  if (gobj_cat.is_debug()) {
655  gobj_cat.debug()
656  << "Page already stored: " << _size << " bytes\n";
657  }
658  }
659  }
660 
661  return true;
662 }
663 
664 ////////////////////////////////////////////////////////////////////
665 // Function: VertexDataPage::do_restore_from_disk
666 // Access: Private
667 // Description: Restores the page from disk and makes it
668 // either compressed or resident (according to whether
669 // it was stored compressed on disk).
670 //
671 // Assumes the lock is already held.
672 ////////////////////////////////////////////////////////////////////
673 void VertexDataPage::
674 do_restore_from_disk() {
675  if (_ram_class == RC_disk) {
676  nassertv(_saved_block != (VertexDataSaveBlock *)NULL);
677  nassertv(_page_data == (unsigned char *)NULL && _size == 0);
678 
679  PStatTimer timer(_vdata_restore_pcollector);
680 
681  size_t buffer_size = _saved_block->get_size();
682  if (gobj_cat.is_debug()) {
683  gobj_cat.debug()
684  << "Restoring page, " << buffer_size << " bytes, from disk\n";
685  }
686 
687  size_t new_allocated_size = round_up(buffer_size);
688  unsigned char *new_data = alloc_page_data(new_allocated_size);
689  if (!get_save_file()->read_data(new_data, new_allocated_size, _saved_block)) {
690  nassert_raise("read error");
691  }
692 
693  nassertv(_page_data == (unsigned char *)NULL);
694  _page_data = new_data;
695  _size = buffer_size;
696  _allocated_size = new_allocated_size;
697 
698  set_lru_size(_size);
699  if (_saved_block->get_compressed()) {
700  set_ram_class(RC_compressed);
701  } else {
702  set_ram_class(RC_resident);
703  }
704  }
705 }
706 
707 ////////////////////////////////////////////////////////////////////
708 // Function: VertexDataPage::adjust_book_size
709 // Access: Private
710 // Description: Called when the "book size"--the size of the page as
711 // recorded in its book's table--has changed for some
712 // reason. Assumes the lock is held.
713 ////////////////////////////////////////////////////////////////////
714 void VertexDataPage::
715 adjust_book_size() {
716  size_t new_size = _contiguous;
717  if (_ram_class != RC_resident) {
718  // Let's not attempt to allocate new buffers from non-resident
719  // pages.
720  new_size = 0;
721  }
722 
723  if (_book != (VertexDataBook *)NULL && new_size != _book_size) {
724  VertexDataBook::Pages::iterator pi = _book->_pages.find(this);
725  nassertv(pi != _book->_pages.end());
726  _book->_pages.erase(pi);
727 
728  _book_size = new_size;
729  bool inserted = _book->_pages.insert(this).second;
730  nassertv(inserted);
731  }
732 }
733 
734 ////////////////////////////////////////////////////////////////////
735 // Function: VertexDataPage::request_ram_class
736 // Access: Private
737 // Description: Requests the thread set the page to the indicated ram
738 // class (if we are using threading). The page will be
739 // enqueued in the thread, which will eventually be
740 // responsible for setting the requested ram class.
741 //
742 // Assumes the page's lock is already held.
743 ////////////////////////////////////////////////////////////////////
744 void VertexDataPage::
745 request_ram_class(RamClass ram_class) {
746  int num_threads = vertex_data_page_threads;
747  if (num_threads == 0 || !Thread::is_threading_supported()) {
748  // No threads. Do it immediately.
749  switch (ram_class) {
750  case RC_resident:
751  make_resident();
752  break;
753 
754  case RC_compressed:
755  make_compressed();
756  break;
757 
758  case RC_disk:
759  make_disk();
760  break;
761 
762  case RC_end_of_list:
763  break;
764  }
765  _pending_ram_class = ram_class;
766  return;
767  }
768 
769  MutexHolder holder(_tlock);
770  if (_thread_mgr == (PageThreadManager *)NULL) {
771  // Create the thread manager.
772  gobj_cat.info()
773  << "Spawning " << num_threads << " vertex paging threads.\n";
774  _thread_mgr = new PageThreadManager(num_threads);
775  }
776 
777  _thread_mgr->add_page(this, ram_class);
778 }
779 
780 ////////////////////////////////////////////////////////////////////
781 // Function: VertexDataPage::make_save_file
782 // Access: Private, Static
783 // Description: Creates the global VertexDataSaveFile that will be
784 // used to save vertex data buffers to disk when
785 // necessary.
786 ////////////////////////////////////////////////////////////////////
787 void VertexDataPage::
788 make_save_file() {
789  size_t max_size = (size_t)max_disk_vertex_data;
790 
791  _save_file = new VertexDataSaveFile(vertex_save_file_directory,
792  vertex_save_file_prefix, max_size);
793 }
794 
795 ////////////////////////////////////////////////////////////////////
796 // Function: VertexDataPage::alloc_page_data
797 // Access: Private
798 // Description: Allocates and returns a freshly-allocated buffer of
799 // at least the indicated size for holding vertex data.
800 ////////////////////////////////////////////////////////////////////
801 unsigned char *VertexDataPage::
802 alloc_page_data(size_t page_size) const {
803  _alloc_pages_pcollector.add_level_now(page_size);
804  return (unsigned char *)memory_hook->mmap_alloc(page_size, false);
805 }
806 
807 ////////////////////////////////////////////////////////////////////
808 // Function: VertexDataPage::free_page_data
809 // Access: Private
810 // Description: Releases a buffer allocated via alloc_page_data().
811 ////////////////////////////////////////////////////////////////////
812 void VertexDataPage::
813 free_page_data(unsigned char *page_data, size_t page_size) const {
814  _alloc_pages_pcollector.sub_level_now(page_size);
815  memory_hook->mmap_free(page_data, page_size);
816 }
817 
818 ////////////////////////////////////////////////////////////////////
819 // Function: VertexDataPage::PageThreadManager::Constructor
820 // Access: Public
821 // Description: Assumes _tlock is held.
822 ////////////////////////////////////////////////////////////////////
823 VertexDataPage::PageThreadManager::
824 PageThreadManager(int num_threads) :
825  _shutdown(false),
826  _pending_cvar(_tlock)
827 {
828  start_threads(num_threads);
829 }
830 
831 ////////////////////////////////////////////////////////////////////
832 // Function: VertexDataPage::PageThreadManager::add_page
833 // Access: Public
834 // Description: Enqueues the indicated page on the thread queue to
835 // convert it to the specified ram class.
836 //
837 // It is assumed the page's lock is already held, and
838 // that _tlock is already held.
839 ////////////////////////////////////////////////////////////////////
840 void VertexDataPage::PageThreadManager::
841 add_page(VertexDataPage *page, RamClass ram_class) {
842  nassertv(!_shutdown);
843 
844  if (page->_pending_ram_class == ram_class) {
845  // It's already queued.
846  nassertv(page->get_lru() == &_pending_lru);
847  return;
848  }
849 
850  if (page->_pending_ram_class != page->_ram_class) {
851  // It's already queued, but for a different ram class. Dequeue it
852  // so we can requeue it.
853  remove_page(page);
854  }
855 
856  if (page->_pending_ram_class != ram_class) {
857  // First, move the page to the "pending" LRU. When it eventually
858  // gets its requested ram class set, it will be requeued on the
859  // appropriate live LRU.
860  page->mark_used_lru(&_pending_lru);
861 
862  page->_pending_ram_class = ram_class;
863  if (ram_class == RC_resident) {
864  _pending_reads.push_back(page);
865  } else {
866  _pending_writes.push_back(page);
867  }
868  _pending_cvar.notify();
869  }
870 }
871 
872 ////////////////////////////////////////////////////////////////////
873 // Function: VertexDataPage::PageThreadManager::remove_page
874 // Access: Public
875 // Description: Dequeues the indicated page and removes it from the
876 // pending task list.
877 //
878 // It is assumed the page's lock is already held, and
879 // that _tlock is already held.
880 ////////////////////////////////////////////////////////////////////
881 void VertexDataPage::PageThreadManager::
882 remove_page(VertexDataPage *page) {
883  nassertv(page != (VertexDataPage *)NULL);
884 
885  PageThreads::iterator ti;
886  for (ti = _threads.begin(); ti != _threads.end(); ++ti) {
887  PageThread *thread = (*ti);
888  if (page == thread->_working_page) {
889  // Oops, this thread is currently working on this one. We'll have
890  // to wait for the thread to finish.
891  page->_lock.release();
892  while (page == thread->_working_page) {
893  thread->_working_cvar.wait();
894  }
895  page->_lock.acquire();
896  return;
897  }
898  }
899 
900  if (page->_pending_ram_class == RC_resident) {
901  PendingPages::iterator pi =
902  find(_pending_reads.begin(), _pending_reads.end(), page);
903  nassertv(pi != _pending_reads.end());
904  _pending_reads.erase(pi);
905  } else {
906  PendingPages::iterator pi =
907  find(_pending_writes.begin(), _pending_writes.end(), page);
908  nassertv(pi != _pending_writes.end());
909  _pending_writes.erase(pi);
910  }
911 
912  page->_pending_ram_class = page->_ram_class;
913 
914  // Put the page back on its proper LRU.
915  page->mark_used_lru(_global_lru[page->_ram_class]);
916 }
917 
918 ////////////////////////////////////////////////////////////////////
919 // Function: VertexDataPage::PageThreadManager::get_num_threads
920 // Access: Public
921 // Description: Returns the number of threads active on the thread
922 // manager. Assumes _tlock is held.
923 ////////////////////////////////////////////////////////////////////
924 int VertexDataPage::PageThreadManager::
925 get_num_threads() const {
926  return (int)_threads.size();
927 }
928 
929 ////////////////////////////////////////////////////////////////////
930 // Function: VertexDataPage::PageThreadManager::get_num_pending_reads
931 // Access: Public
932 // Description: Returns the number of read requests waiting on the
933 // queue. Assumes _tlock is held.
934 ////////////////////////////////////////////////////////////////////
935 int VertexDataPage::PageThreadManager::
936 get_num_pending_reads() const {
937  return (int)_pending_reads.size();
938 }
939 
940 ////////////////////////////////////////////////////////////////////
941 // Function: VertexDataPage::PageThreadManager::get_num_pending_writes
942 // Access: Public
943 // Description: Returns the number of write requests waiting on the
944 // queue. Assumes _tlock is held.
945 ////////////////////////////////////////////////////////////////////
946 int VertexDataPage::PageThreadManager::
947 get_num_pending_writes() const {
948  return (int)_pending_writes.size();
949 }
950 
951 ////////////////////////////////////////////////////////////////////
952 // Function: VertexDataPage::PageThreadManager::start_threads
953 // Access: Public
954 // Description: Adds the indicated of threads to the list of active
955 // threads. Assumes _tlock is held.
956 ////////////////////////////////////////////////////////////////////
957 void VertexDataPage::PageThreadManager::
958 start_threads(int num_threads) {
959  _shutdown = false;
960 
961  _threads.reserve(num_threads);
962  for (int i = 0; i < num_threads; ++i) {
963  ostringstream name_strm;
964  name_strm << "VertexDataPage" << _threads.size();
965  PT(PageThread) thread = new PageThread(this, name_strm.str());
966  thread->start(TP_low, true);
967  _threads.push_back(thread);
968  }
969 }
970 
971 ////////////////////////////////////////////////////////////////////
972 // Function: VertexDataPage::PageThreadManager::stop_threads
973 // Access: Public
974 // Description: Signals all the threads to stop and waits for them.
975 // Does not return until the threads have finished.
976 // Assumes _tlock is *not* held.
977 ////////////////////////////////////////////////////////////////////
978 void VertexDataPage::PageThreadManager::
979 stop_threads() {
980  PageThreads threads;
981  {
982  MutexHolder holder(_tlock);
983  _shutdown = true;
984  _pending_cvar.notify_all();
985  threads.swap(_threads);
986  }
987 
988  PageThreads::iterator ti;
989  for (ti = threads.begin(); ti != threads.end(); ++ti) {
990  PageThread *thread = (*ti);
991  thread->join();
992  }
993 
994  nassertv(_pending_reads.empty() && _pending_writes.empty());
995 }
996 
997 ////////////////////////////////////////////////////////////////////
998 // Function: VertexDataPage::PageThread::Constructor
999 // Access: Public
1000 // Description:
1001 ////////////////////////////////////////////////////////////////////
1002 VertexDataPage::PageThread::
1003 PageThread(PageThreadManager *manager, const string &name) :
1004  Thread(name, name),
1005  _manager(manager),
1006  _working_cvar(_tlock)
1007 {
1008 }
1009 
1010 ////////////////////////////////////////////////////////////////////
1011 // Function: VertexDataPage::PageThread::thread_main
1012 // Access: Protected, Virtual
1013 // Description: The main processing loop for each sub-thread.
1014 ////////////////////////////////////////////////////////////////////
1015 void VertexDataPage::PageThread::
1016 thread_main() {
1017  _tlock.acquire();
1018 
1019  while (true) {
1020  PStatClient::thread_tick(get_sync_name());
1021 
1022  while (_manager->_pending_reads.empty() &&
1023  _manager->_pending_writes.empty()) {
1024  if (_manager->_shutdown) {
1025  _tlock.release();
1026  return;
1027  }
1028  PStatTimer timer(_thread_wait_pcollector);
1029  _manager->_pending_cvar.wait();
1030  }
1031 
1032  // Reads always have priority.
1033  if (!_manager->_pending_reads.empty()) {
1034  _working_page = _manager->_pending_reads.front();
1035  _manager->_pending_reads.pop_front();
1036  } else {
1037  _working_page = _manager->_pending_writes.front();
1038  _manager->_pending_writes.pop_front();
1039  }
1040 
1041  RamClass ram_class = _working_page->_pending_ram_class;
1042  _tlock.release();
1043 
1044  {
1045  MutexHolder holder(_working_page->_lock);
1046  switch (ram_class) {
1047  case RC_resident:
1048  _working_page->make_resident();
1049  break;
1050 
1051  case RC_compressed:
1052  _working_page->make_compressed();
1053  break;
1054 
1055  case RC_disk:
1056  _working_page->make_disk();
1057  break;
1058 
1059  case RC_end_of_list:
1060  break;
1061  }
1062  }
1063 
1064  _tlock.acquire();
1065 
1066  _working_page = NULL;
1067  _working_cvar.notify();
1068 
1070  }
1071 }
A block of bytes on the save file.
An implementation of a very simple LRU algorithm.
Definition: simpleLru.h:31
A block of bytes that holds one or more VertexDataBlocks.
A temporary file to hold the vertex data that has been evicted from memory and written to disk...
An implementation of a very simple block allocator.
static VertexDataSaveFile * get_save_file()
Returns the global VertexDataSaveFile that will be used to save vertex data buffers to disk when nece...
void mark_used_lru() const
To be called when the page is used; this will move it to the tail of the SimpleLru queue it is alread...
Definition: simpleLru.I:184
A standard mutex, or mutual exclusion lock.
Definition: pmutex.h:44
A lightweight class that can be used to automatically start and stop a PStatCollector around a sectio...
Definition: pStatTimer.h:34
A lightweight C++ object whose constructor calls acquire() and whose destructor calls release() on a ...
Definition: mutexHolder.h:29
static void consider_yield()
Possibly suspends the current thread for the rest of the current epoch, if it has run for enough this...
Definition: thread.I:263
void set_lru_size(size_t lru_size)
Specifies the size of this page, presumably in bytes, although any unit is possible.
Definition: simpleLru.I:219
A block of bytes that stores the actual raw vertex data referenced by a GeomVertexArrayData object...
A single block as returned from SimpleAllocator::alloc().
static bool is_threading_supported()
Returns true if threading support has been compiled in and enabled, or false if no threading is avail...
Definition: thread.I:185
A lightweight class that represents a single element that may be timed and/or counted via stats...
void acquire() const
Grabs the mutex if it is available.
Definition: mutexDirect.I:70
virtual void mmap_free(void *ptr, size_t size)
Frees a block of memory previously allocated via mmap_alloc().
One atomic piece that may be managed by a SimpleLru chain.
Definition: simpleLru.h:70
A collection of VertexDataPages, which can be used to allocate new VertexDataBlock objects...
static void stop_threads()
Call this to stop the paging threads, if they were started.
A thread; that is, a lightweight process.
Definition: thread.h:51
size_t get_max_size() const
Returns the max size of all objects that are allowed to be active on the LRU.
Definition: simpleLru.I:35
This is a convenience class to specialize ConfigVariable as an integer type.
void release() const
Releases the mutex.
Definition: mutexDirect.I:99
SimpleLru * get_lru() const
Returns the LRU that manages this page, or NULL if it is not currently managed by any LRU...
Definition: simpleLru.I:153
TypeHandle is the identifier used to differentiate C++ class types.
Definition: typeHandle.h:85
virtual void * mmap_alloc(size_t size, bool allow_exec)
Allocates a raw page or pages of memory directly from the OS.
static void flush_threads()
Waits for all of the pending thread tasks to finish before returning.