Panda3D
renderState.cxx
Go to the documentation of this file.
1 /**
2  * PANDA 3D SOFTWARE
3  * Copyright (c) Carnegie Mellon University. All rights reserved.
4  *
5  * All use of this software is subject to the terms of the revised BSD
6  * license. You should have received a copy of this license along
7  * with this source code in a file named "LICENSE."
8  *
9  * @file renderState.cxx
10  * @author drose
11  * @date 2002-02-21
12  */
13 
14 #include "renderState.h"
15 #include "transparencyAttrib.h"
16 #include "cullBinAttrib.h"
17 #include "cullBinManager.h"
18 #include "fogAttrib.h"
19 #include "clipPlaneAttrib.h"
20 #include "scissorAttrib.h"
21 #include "transparencyAttrib.h"
22 #include "colorAttrib.h"
23 #include "colorScaleAttrib.h"
24 #include "textureAttrib.h"
25 #include "texGenAttrib.h"
26 #include "shaderAttrib.h"
27 #include "pStatTimer.h"
28 #include "config_pgraph.h"
29 #include "bamReader.h"
30 #include "bamWriter.h"
31 #include "datagramIterator.h"
32 #include "indent.h"
33 #include "compareTo.h"
34 #include "lightReMutexHolder.h"
35 #include "lightMutexHolder.h"
36 #include "thread.h"
37 #include "renderAttribRegistry.h"
38 
39 using std::ostream;
40 
41 LightReMutex *RenderState::_states_lock = nullptr;
42 RenderState::States *RenderState::_states = nullptr;
43 const RenderState *RenderState::_empty_state = nullptr;
44 UpdateSeq RenderState::_last_cycle_detect;
45 size_t RenderState::_garbage_index = 0;
46 
47 PStatCollector RenderState::_cache_update_pcollector("*:State Cache:Update");
48 PStatCollector RenderState::_garbage_collect_pcollector("*:State Cache:Garbage Collect");
49 PStatCollector RenderState::_state_compose_pcollector("*:State Cache:Compose State");
50 PStatCollector RenderState::_state_invert_pcollector("*:State Cache:Invert State");
51 PStatCollector RenderState::_node_counter("RenderStates:On nodes");
52 PStatCollector RenderState::_cache_counter("RenderStates:Cached");
53 PStatCollector RenderState::_state_break_cycles_pcollector("*:State Cache:Break Cycles");
54 PStatCollector RenderState::_state_validate_pcollector("*:State Cache:Validate");
55 
56 CacheStats RenderState::_cache_stats;
57 
58 TypeHandle RenderState::_type_handle;
59 
60 
61 /**
62  * Actually, this could be a private constructor, since no one inherits from
63  * RenderState, but gcc gives us a spurious warning if all constructors are
64  * private.
65  */
66 RenderState::
67 RenderState() :
68  _flags(0),
69  _lock("RenderState")
70 {
71  if (_states == nullptr) {
72  init_states();
73  }
74  _saved_entry = -1;
75  _last_mi = -1;
76  _cache_stats.add_num_states(1);
77  _read_overrides = nullptr;
78  _generated_shader = nullptr;
79 
80 #ifdef DO_MEMORY_USAGE
81  MemoryUsage::update_type(this, this);
82 #endif
83 }
84 
85 /**
86  * RenderStates are only meant to be copied internally.
87  */
88 RenderState::
89 RenderState(const RenderState &copy) :
90  _filled_slots(copy._filled_slots),
91  _flags(0),
92  _lock("RenderState")
93 {
94  // Copy over the attributes.
95  for (int i = 0; i < RenderAttribRegistry::_max_slots; ++i) {
96  _attributes[i] = copy._attributes[i];
97  }
98 
99  _saved_entry = -1;
100  _last_mi = -1;
101  _cache_stats.add_num_states(1);
102  _read_overrides = nullptr;
103  _generated_shader = nullptr;
104 
105 #ifdef DO_MEMORY_USAGE
106  MemoryUsage::update_type(this, this);
107 #endif
108 }
109 
110 /**
111  * The destructor is responsible for removing the RenderState from the global
112  * set if it is there.
113  */
116  // We'd better not call the destructor twice on a particular object.
117  nassertv(!is_destructing());
118  set_destructing();
119 
120  LightReMutexHolder holder(*_states_lock);
121 
122  // unref() should have cleared these.
123  nassertv(_saved_entry == -1);
124  nassertv(_composition_cache.is_empty() && _invert_composition_cache.is_empty());
125 
126  // If this was true at the beginning of the destructor, but is no longer
127  // true now, probably we've been double-deleted.
128  nassertv(get_ref_count() == 0);
129  _cache_stats.add_num_states(-1);
130 }
131 
132 /**
133  * Provides an arbitrary ordering among all unique RenderStates, so we can
134  * store the essentially different ones in a big set and throw away the rest.
135  *
136  * This method is not needed outside of the RenderState class because all
137  * equivalent RenderState objects are guaranteed to share the same pointer;
138  * thus, a pointer comparison is always sufficient.
139  */
140 int RenderState::
141 compare_to(const RenderState &other) const {
142  SlotMask mask = _filled_slots | other._filled_slots;
143  int slot = mask.get_lowest_on_bit();
144  while (slot >= 0) {
145  int result = _attributes[slot].compare_to(other._attributes[slot]);
146  if (result != 0) {
147  return result;
148  }
149  mask.clear_bit(slot);
150  slot = mask.get_lowest_on_bit();
151  }
152 
153  return 0;
154 }
155 
156 /**
157  * Returns -1, 0, or 1 according to the relative sorting of these two
158  * RenderStates, with regards to rendering performance, so that "heavier"
159  * RenderAttribs (as defined by RenderAttribRegistry::get_slot_sort()) are
160  * more likely to be grouped together. This is not related to the sorting
161  * order defined by compare_to.
162  */
163 int RenderState::
164 compare_sort(const RenderState &other) const {
165  if (this == &other) {
166  // Trivial case.
167  return 0;
168  }
169 
171  int num_sorted_slots = reg->get_num_sorted_slots();
172  for (int n = 0; n < num_sorted_slots; ++n) {
173  int slot = reg->get_sorted_slot(n);
174  nassertr((_attributes[slot]._attrib != nullptr) == _filled_slots.get_bit(slot), 0);
175 
176  const RenderAttrib *a = _attributes[slot]._attrib;
177  const RenderAttrib *b = other._attributes[slot]._attrib;
178  if (a != b) {
179  return a < b ? -1 : 1;
180  }
181  }
182 
183  return 0;
184 }
185 
186 /**
187  * This version of compare_to takes a slot mask that indicates which
188  * attributes to include in the comparison. Unlike compare_to, this method
189  * compares the attributes by pointer.
190  */
191 int RenderState::
192 compare_mask(const RenderState &other, SlotMask compare_mask) const {
193  SlotMask mask = (_filled_slots | other._filled_slots) & compare_mask;
194  int slot = mask.get_lowest_on_bit();
195  while (slot >= 0) {
196  const RenderAttrib *a = _attributes[slot]._attrib;
197  const RenderAttrib *b = other._attributes[slot]._attrib;
198  if (a != b) {
199  return a < b ? -1 : 1;
200  }
201  mask.clear_bit(slot);
202  slot = mask.get_lowest_on_bit();
203  }
204 
205  return 0;
206 }
207 
208 /**
209  * Calls cull_callback() on each attrib. If any attrib returns false,
210  * interrupts the list and returns false immediately; otherwise, completes the
211  * list and returns true.
212  */
213 bool RenderState::
214 cull_callback(CullTraverser *trav, const CullTraverserData &data) const {
215  SlotMask mask = _filled_slots;
216  int slot = mask.get_lowest_on_bit();
217  while (slot >= 0) {
218  const Attribute &attrib = _attributes[slot];
219  nassertr(attrib._attrib != nullptr, false);
220  if (!attrib._attrib->cull_callback(trav, data)) {
221  return false;
222  }
223 
224  mask.clear_bit(slot);
225  slot = mask.get_lowest_on_bit();
226  }
227 
228  return true;
229 }
230 
231 /**
232  * Returns a RenderState with one attribute set.
233  */
234 CPT(RenderState) RenderState::
235 make(const RenderAttrib *attrib, int override) {
236  RenderState *state = new RenderState;
237  int slot = attrib->get_slot();
238  state->_attributes[slot].set(attrib, override);
239  state->_filled_slots.set_bit(slot);
240  return return_new(state);
241 }
242 
243 /**
244  * Returns a RenderState with two attributes set.
245  */
246 CPT(RenderState) RenderState::
247 make(const RenderAttrib *attrib1,
248  const RenderAttrib *attrib2, int override) {
249  RenderState *state = new RenderState;
250  state->_attributes[attrib1->get_slot()].set(attrib1, override);
251  state->_attributes[attrib2->get_slot()].set(attrib2, override);
252  state->_filled_slots.set_bit(attrib1->get_slot());
253  state->_filled_slots.set_bit(attrib2->get_slot());
254  return return_new(state);
255 }
256 
257 /**
258  * Returns a RenderState with three attributes set.
259  */
260 CPT(RenderState) RenderState::
261 make(const RenderAttrib *attrib1,
262  const RenderAttrib *attrib2,
263  const RenderAttrib *attrib3, int override) {
264  RenderState *state = new RenderState;
265  state->_attributes[attrib1->get_slot()].set(attrib1, override);
266  state->_attributes[attrib2->get_slot()].set(attrib2, override);
267  state->_attributes[attrib3->get_slot()].set(attrib3, override);
268  state->_filled_slots.set_bit(attrib1->get_slot());
269  state->_filled_slots.set_bit(attrib2->get_slot());
270  state->_filled_slots.set_bit(attrib3->get_slot());
271  return return_new(state);
272 }
273 
274 /**
275  * Returns a RenderState with four attributes set.
276  */
277 CPT(RenderState) RenderState::
278 make(const RenderAttrib *attrib1,
279  const RenderAttrib *attrib2,
280  const RenderAttrib *attrib3,
281  const RenderAttrib *attrib4, int override) {
282  RenderState *state = new RenderState;
283  state->_attributes[attrib1->get_slot()].set(attrib1, override);
284  state->_attributes[attrib2->get_slot()].set(attrib2, override);
285  state->_attributes[attrib3->get_slot()].set(attrib3, override);
286  state->_attributes[attrib4->get_slot()].set(attrib4, override);
287  state->_filled_slots.set_bit(attrib1->get_slot());
288  state->_filled_slots.set_bit(attrib2->get_slot());
289  state->_filled_slots.set_bit(attrib3->get_slot());
290  state->_filled_slots.set_bit(attrib4->get_slot());
291  return return_new(state);
292 }
293 
294 /**
295  * Returns a RenderState with five attributes set.
296  */
297 CPT(RenderState) RenderState::
298 make(const RenderAttrib *attrib1,
299  const RenderAttrib *attrib2,
300  const RenderAttrib *attrib3,
301  const RenderAttrib *attrib4,
302  const RenderAttrib *attrib5, int override) {
303  RenderState *state = new RenderState;
304  state->_attributes[attrib1->get_slot()].set(attrib1, override);
305  state->_attributes[attrib2->get_slot()].set(attrib2, override);
306  state->_attributes[attrib3->get_slot()].set(attrib3, override);
307  state->_attributes[attrib4->get_slot()].set(attrib4, override);
308  state->_attributes[attrib5->get_slot()].set(attrib5, override);
309  state->_filled_slots.set_bit(attrib1->get_slot());
310  state->_filled_slots.set_bit(attrib2->get_slot());
311  state->_filled_slots.set_bit(attrib3->get_slot());
312  state->_filled_slots.set_bit(attrib4->get_slot());
313  state->_filled_slots.set_bit(attrib5->get_slot());
314  return return_new(state);
315 }
316 
317 /**
318  * Returns a RenderState with n attributes set.
319  */
320 CPT(RenderState) RenderState::
321 make(const RenderAttrib * const *attrib, int num_attribs, int override) {
322  if (num_attribs == 0) {
323  return _empty_state;
324  }
325  RenderState *state = new RenderState;
326  for (int i = 0; i < num_attribs; i++) {
327  int slot = attrib[i]->get_slot();
328  state->_attributes[slot].set(attrib[i], override);
329  state->_filled_slots.set_bit(slot);
330  }
331  return return_new(state);
332 }
333 
334 /**
335  * Returns a new RenderState object that represents the composition of this
336  * state with the other state.
337  *
338  * The result of this operation is cached, and will be retained as long as
339  * both this RenderState object and the other RenderState object continue to
340  * exist. Should one of them destruct, the cached entry will be removed, and
341  * its pointer will be allowed to destruct as well.
342  */
343 CPT(RenderState) RenderState::
344 compose(const RenderState *other) const {
345  // This method isn't strictly const, because it updates the cache, but we
346  // pretend that it is because it's only a cache which is transparent to the
347  // rest of the interface.
348 
349  // We handle empty state (identity) as a trivial special case.
350  if (is_empty()) {
351  return other;
352  }
353  if (other->is_empty()) {
354  return this;
355  }
356 
357  if (!state_cache) {
358  return do_compose(other);
359  }
360 
361  LightReMutexHolder holder(*_states_lock);
362 
363  // Is this composition already cached?
364  int index = _composition_cache.find(other);
365  if (index != -1) {
366  Composition &comp = ((RenderState *)this)->_composition_cache.modify_data(index);
367  if (comp._result == nullptr) {
368  // Well, it wasn't cached already, but we already had an entry (probably
369  // created for the reverse direction), so use the same entry to store
370  // the new result.
371  CPT(RenderState) result = do_compose(other);
372  comp._result = result;
373 
374  if (result != (const RenderState *)this) {
375  // See the comments below about the need to up the reference count
376  // only when the result is not the same as this.
377  result->cache_ref();
378  }
379  }
380  // Here's the cache!
381  _cache_stats.inc_hits();
382  return comp._result;
383  }
384  _cache_stats.inc_misses();
385 
386  // We need to make a new cache entry, both in this object and in the other
387  // object. We make both records so the other RenderState object will know
388  // to delete the entry from this object when it destructs, and vice-versa.
389 
390  // The cache entry in this object is the only one that indicates the result;
391  // the other will be NULL for now.
392  CPT(RenderState) result = do_compose(other);
393 
394  _cache_stats.add_total_size(1);
395  _cache_stats.inc_adds(_composition_cache.is_empty());
396 
397  ((RenderState *)this)->_composition_cache[other]._result = result;
398 
399  if (other != this) {
400  _cache_stats.add_total_size(1);
401  _cache_stats.inc_adds(other->_composition_cache.is_empty());
402  ((RenderState *)other)->_composition_cache[this]._result = nullptr;
403  }
404 
405  if (result != (const RenderState *)this) {
406  // If the result of compose() is something other than this, explicitly
407  // increment the reference count. We have to be sure to decrement it
408  // again later, when the composition entry is removed from the cache.
409  result->cache_ref();
410 
411  // (If the result was just this again, we still store the result, but we
412  // don't increment the reference count, since that would be a self-
413  // referential leak.)
414  }
415 
416  _cache_stats.maybe_report("RenderState");
417 
418  return result;
419 }
420 
421 /**
422  * Returns a new RenderState object that represents the composition of this
423  * state's inverse with the other state.
424  *
425  * This is similar to compose(), but is particularly useful for computing the
426  * relative state of a node as viewed from some other node.
427  */
428 CPT(RenderState) RenderState::
429 invert_compose(const RenderState *other) const {
430  // This method isn't strictly const, because it updates the cache, but we
431  // pretend that it is because it's only a cache which is transparent to the
432  // rest of the interface.
433 
434  // We handle empty state (identity) as a trivial special case.
435  if (is_empty()) {
436  return other;
437  }
438  // Unlike compose(), the case of other->is_empty() is not quite as trivial
439  // for invert_compose().
440 
441  if (other == this) {
442  // a->invert_compose(a) always produces identity.
443  return _empty_state;
444  }
445 
446  if (!state_cache) {
447  return do_invert_compose(other);
448  }
449 
450  LightReMutexHolder holder(*_states_lock);
451 
452  // Is this composition already cached?
453  int index = _invert_composition_cache.find(other);
454  if (index != -1) {
455  Composition &comp = ((RenderState *)this)->_invert_composition_cache.modify_data(index);
456  if (comp._result == nullptr) {
457  // Well, it wasn't cached already, but we already had an entry (probably
458  // created for the reverse direction), so use the same entry to store
459  // the new result.
460  CPT(RenderState) result = do_invert_compose(other);
461  comp._result = result;
462 
463  if (result != (const RenderState *)this) {
464  // See the comments below about the need to up the reference count
465  // only when the result is not the same as this.
466  result->cache_ref();
467  }
468  }
469  // Here's the cache!
470  _cache_stats.inc_hits();
471  return comp._result;
472  }
473  _cache_stats.inc_misses();
474 
475  // We need to make a new cache entry, both in this object and in the other
476  // object. We make both records so the other RenderState object will know
477  // to delete the entry from this object when it destructs, and vice-versa.
478 
479  // The cache entry in this object is the only one that indicates the result;
480  // the other will be NULL for now.
481  CPT(RenderState) result = do_invert_compose(other);
482 
483  _cache_stats.add_total_size(1);
484  _cache_stats.inc_adds(_invert_composition_cache.is_empty());
485  ((RenderState *)this)->_invert_composition_cache[other]._result = result;
486 
487  if (other != this) {
488  _cache_stats.add_total_size(1);
489  _cache_stats.inc_adds(other->_invert_composition_cache.is_empty());
490  ((RenderState *)other)->_invert_composition_cache[this]._result = nullptr;
491  }
492 
493  if (result != (const RenderState *)this) {
494  // If the result of compose() is something other than this, explicitly
495  // increment the reference count. We have to be sure to decrement it
496  // again later, when the composition entry is removed from the cache.
497  result->cache_ref();
498 
499  // (If the result was just this again, we still store the result, but we
500  // don't increment the reference count, since that would be a self-
501  // referential leak.)
502  }
503 
504  return result;
505 }
506 
507 /**
508  * Returns a new RenderState object that represents the same as the source
509  * state, with the new RenderAttrib added. If there is already a RenderAttrib
510  * with the same type, it is replaced (unless the override is lower).
511  */
512 CPT(RenderState) RenderState::
513 add_attrib(const RenderAttrib *attrib, int override) const {
514  int slot = attrib->get_slot();
515  if (_filled_slots.get_bit(slot) &&
516  _attributes[slot]._override > override) {
517  // The existing attribute overrides.
518  return this;
519  }
520 
521  // The new attribute replaces.
522  RenderState *new_state = new RenderState(*this);
523  new_state->_attributes[slot].set(attrib, override);
524  new_state->_filled_slots.set_bit(slot);
525  return return_new(new_state);
526 }
527 
528 /**
529  * Returns a new RenderState object that represents the same as the source
530  * state, with the new RenderAttrib added. If there is already a RenderAttrib
531  * with the same type, it is replaced unconditionally. The override is not
532  * changed.
533  */
534 CPT(RenderState) RenderState::
535 set_attrib(const RenderAttrib *attrib) const {
536  RenderState *new_state = new RenderState(*this);
537  int slot = attrib->get_slot();
538  new_state->_attributes[slot]._attrib = attrib;
539  new_state->_filled_slots.set_bit(slot);
540  return return_new(new_state);
541 }
542 
543 /**
544  * Returns a new RenderState object that represents the same as the source
545  * state, with the new RenderAttrib added. If there is already a RenderAttrib
546  * with the same type, it is replaced unconditionally. The override is also
547  * replaced unconditionally.
548  */
549 CPT(RenderState) RenderState::
550 set_attrib(const RenderAttrib *attrib, int override) const {
551  RenderState *new_state = new RenderState(*this);
552  int slot = attrib->get_slot();
553  new_state->_attributes[slot].set(attrib, override);
554  new_state->_filled_slots.set_bit(slot);
555  return return_new(new_state);
556 }
557 
558 /**
559  * Returns a new RenderState object that represents the same as the source
560  * state, with the indicated RenderAttrib removed.
561  */
562 CPT(RenderState) RenderState::
563 remove_attrib(int slot) const {
564  if (_attributes[slot]._attrib == nullptr) {
565  // Already removed.
566  return this;
567  }
568 
569  // Will this bring us down to the empty state?
570  if (_filled_slots.get_num_on_bits() == 1) {
571  return _empty_state;
572  }
573 
574  RenderState *new_state = new RenderState(*this);
575  new_state->_attributes[slot].set(nullptr, 0);
576  new_state->_filled_slots.clear_bit(slot);
577  return return_new(new_state);
578 }
579 
580 /**
581  * Returns a new RenderState object that represents the same as the source
582  * state, with all attributes' override values incremented (or decremented, if
583  * negative) by the indicated amount. If the override would drop below zero,
584  * it is set to zero.
585  */
586 CPT(RenderState) RenderState::
587 adjust_all_priorities(int adjustment) const {
588  RenderState *new_state = new RenderState(*this);
589 
590  SlotMask mask = _filled_slots;
591  int slot = mask.get_lowest_on_bit();
592  while (slot >= 0) {
593  Attribute &attrib = new_state->_attributes[slot];
594  nassertr(attrib._attrib != nullptr, this);
595  attrib._override = std::max(attrib._override + adjustment, 0);
596 
597  mask.clear_bit(slot);
598  slot = mask.get_lowest_on_bit();
599  }
600 
601  return return_new(new_state);
602 }
603 
604 /**
605  * This method overrides ReferenceCount::unref() to check whether the
606  * remaining reference count is entirely in the cache, and if so, it checks
607  * for and breaks a cycle in the cache involving this object. This is
608  * designed to prevent leaks from cyclical references within the cache.
609  */
610 bool RenderState::
611 unref() const {
612  if (garbage_collect_states || !state_cache) {
613  // If we're not using the cache at all, or if we're relying on garbage
614  // collection, just allow the pointer to unref normally.
615  return ReferenceCount::unref();
616  }
617 
618  // Here is the normal refcounting case, with a normal cache, and without
619  // garbage collection in effect. In this case we will pull the object out
620  // of the cache when its reference count goes to 0.
621 
622  // We always have to grab the lock, since we will definitely need to be
623  // holding it if we happen to drop the reference count to 0. Having to grab
624  // the lock at every call to unref() is a big limiting factor on
625  // parallelization.
626  LightReMutexHolder holder(*_states_lock);
627 
628  if (auto_break_cycles && uniquify_states) {
629  if (get_cache_ref_count() > 0 &&
630  get_ref_count() == get_cache_ref_count() + 1) {
631  // If we are about to remove the one reference that is not in the cache,
632  // leaving only references in the cache, then we need to check for a
633  // cycle involving this RenderState and break it if it exists.
634  ((RenderState *)this)->detect_and_break_cycles();
635  }
636  }
637 
638  if (ReferenceCount::unref()) {
639  // The reference count is still nonzero.
640  return true;
641  }
642 
643  // The reference count has just reached zero. Make sure the object is
644  // removed from the global object pool, before anyone else finds it and
645  // tries to ref it.
646  ((RenderState *)this)->release_new();
647  ((RenderState *)this)->remove_cache_pointers();
648 
649  return false;
650 }
651 
652 /**
653  *
654  */
655 void RenderState::
656 output(ostream &out) const {
657  out << "S:";
658  if (is_empty()) {
659  out << "(empty)";
660 
661  } else {
662  out << "(";
663  const char *sep = "";
664 
665  SlotMask mask = _filled_slots;
666  int slot = mask.get_lowest_on_bit();
667  while (slot >= 0) {
668  const Attribute &attrib = _attributes[slot];
669  nassertv(attrib._attrib != nullptr);
670  out << sep << attrib._attrib->get_type();
671  sep = " ";
672 
673  mask.clear_bit(slot);
674  slot = mask.get_lowest_on_bit();
675  }
676  out << ")";
677  }
678 }
679 
680 /**
681  *
682  */
683 void RenderState::
684 write(ostream &out, int indent_level) const {
685  if (is_empty()) {
686  indent(out, indent_level)
687  << "(empty)\n";
688  }
689 
690  SlotMask mask = _filled_slots;
691  int slot = mask.get_lowest_on_bit();
692  while (slot >= 0) {
693  const Attribute &attrib = _attributes[slot];
694  nassertv(attrib._attrib != nullptr);
695  attrib._attrib->write(out, indent_level);
696 
697  mask.clear_bit(slot);
698  slot = mask.get_lowest_on_bit();
699  }
700 }
701 
702 /**
703  * Returns the maximum priority number (sometimes called override) that may be
704  * set on any node. This may or may not be enforced, but the scene graph code
705  * assumes that no priority numbers will be larger than this, and some effects
706  * may not work properly if you use a larger number.
707  */
708 int RenderState::
709 get_max_priority() {
710  return 1000000000;
711 }
712 
713 /**
714  * Returns the total number of unique RenderState objects allocated in the
715  * world. This will go up and down during normal operations.
716  */
717 int RenderState::
719  if (_states == nullptr) {
720  return 0;
721  }
722  LightReMutexHolder holder(*_states_lock);
723  return _states->get_num_entries();
724 }
725 
726 /**
727  * Returns the total number of RenderState objects that have been allocated
728  * but have no references outside of the internal RenderState cache.
729  *
730  * A nonzero return value is not necessarily indicative of leaked references;
731  * it is normal for two RenderState objects, both of which have references
732  * held outside the cache, to have to result of their composition stored
733  * within the cache. This result will be retained within the cache until one
734  * of the base RenderStates is released.
735  *
736  * Use list_cycles() to get an idea of the number of actual "leaked"
737  * RenderState objects.
738  */
739 int RenderState::
741  if (_states == nullptr) {
742  return 0;
743  }
744  LightReMutexHolder holder(*_states_lock);
745 
746  // First, we need to count the number of times each RenderState object is
747  // recorded in the cache.
748  typedef pmap<const RenderState *, int> StateCount;
749  StateCount state_count;
750 
751  size_t size = _states->get_num_entries();
752  for (size_t si = 0; si < size; ++si) {
753  const RenderState *state = _states->get_key(si);
754 
755  size_t i;
756  size_t cache_size = state->_composition_cache.get_num_entries();
757  for (i = 0; i < cache_size; ++i) {
758  const RenderState *result = state->_composition_cache.get_data(i)._result;
759  if (result != nullptr && result != state) {
760  // Here's a RenderState that's recorded in the cache. Count it.
761  std::pair<StateCount::iterator, bool> ir =
762  state_count.insert(StateCount::value_type(result, 1));
763  if (!ir.second) {
764  // If the above insert operation fails, then it's already in the
765  // cache; increment its value.
766  (*(ir.first)).second++;
767  }
768  }
769  }
770  cache_size = state->_invert_composition_cache.get_num_entries();
771  for (i = 0; i < cache_size; ++i) {
772  const RenderState *result = state->_invert_composition_cache.get_data(i)._result;
773  if (result != nullptr && result != state) {
774  std::pair<StateCount::iterator, bool> ir =
775  state_count.insert(StateCount::value_type(result, 1));
776  if (!ir.second) {
777  (*(ir.first)).second++;
778  }
779  }
780  }
781  }
782 
783  // Now that we have the appearance count of each RenderState object, we can
784  // tell which ones are unreferenced outside of the RenderState cache, by
785  // comparing these to the reference counts.
786  int num_unused = 0;
787 
788  StateCount::iterator sci;
789  for (sci = state_count.begin(); sci != state_count.end(); ++sci) {
790  const RenderState *state = (*sci).first;
791  int count = (*sci).second;
792  nassertr(count == state->get_cache_ref_count(), num_unused);
793  nassertr(count <= state->get_ref_count(), num_unused);
794  if (count == state->get_ref_count()) {
795  num_unused++;
796 
797  if (pgraph_cat.is_debug()) {
798  pgraph_cat.debug()
799  << "Unused state: " << (void *)state << ":"
800  << state->get_ref_count() << " =\n";
801  state->write(pgraph_cat.debug(false), 2);
802  }
803  }
804  }
805 
806  return num_unused;
807 }
808 
809 /**
810  * Empties the cache of composed RenderStates. This makes every RenderState
811  * forget what results when it is composed with other RenderStates.
812  *
813  * This will eliminate any RenderState objects that have been allocated but
814  * have no references outside of the internal RenderState map. It will not
815  * eliminate RenderState objects that are still in use.
816  *
817  * Nowadays, this method should not be necessary, as reference-count cycles in
818  * the composition cache should be automatically detected and broken.
819  *
820  * The return value is the number of RenderStates freed by this operation.
821  */
822 int RenderState::
824  if (_states == nullptr) {
825  return 0;
826  }
827  LightReMutexHolder holder(*_states_lock);
828 
829  PStatTimer timer(_cache_update_pcollector);
830  int orig_size = _states->get_num_entries();
831 
832  // First, we need to copy the entire set of states to a temporary vector,
833  // reference-counting each object. That way we can walk through the copy,
834  // without fear of dereferencing (and deleting) the objects in the map as we
835  // go.
836  {
837  typedef pvector< CPT(RenderState) > TempStates;
838  TempStates temp_states;
839  temp_states.reserve(orig_size);
840 
841  size_t size = _states->get_num_entries();
842  for (size_t si = 0; si < size; ++si) {
843  const RenderState *state = _states->get_key(si);
844  temp_states.push_back(state);
845  }
846 
847  // Now it's safe to walk through the list, destroying the cache within
848  // each object as we go. Nothing will be destructed till we're done.
849  TempStates::iterator ti;
850  for (ti = temp_states.begin(); ti != temp_states.end(); ++ti) {
851  RenderState *state = (RenderState *)(*ti).p();
852 
853  size_t i;
854  size_t cache_size = (int)state->_composition_cache.get_num_entries();
855  for (i = 0; i < cache_size; ++i) {
856  const RenderState *result = state->_composition_cache.get_data(i)._result;
857  if (result != nullptr && result != state) {
858  result->cache_unref();
859  nassertr(result->get_ref_count() > 0, 0);
860  }
861  }
862  _cache_stats.add_total_size(-(int)state->_composition_cache.get_num_entries());
863  state->_composition_cache.clear();
864 
865  cache_size = (int)state->_invert_composition_cache.get_num_entries();
866  for (i = 0; i < cache_size; ++i) {
867  const RenderState *result = state->_invert_composition_cache.get_data(i)._result;
868  if (result != nullptr && result != state) {
869  result->cache_unref();
870  nassertr(result->get_ref_count() > 0, 0);
871  }
872  }
873  _cache_stats.add_total_size(-(int)state->_invert_composition_cache.get_num_entries());
874  state->_invert_composition_cache.clear();
875  }
876 
877  // Once this block closes and the temp_states object goes away, all the
878  // destruction will begin. Anything whose reference was held only within
879  // the various objects' caches will go away.
880  }
881 
882  int new_size = _states->get_num_entries();
883  return orig_size - new_size;
884 }
885 
886 /**
887  * Performs a garbage-collection cycle. This must be called periodically if
888  * garbage-collect-states is true to ensure that RenderStates get cleaned up
889  * appropriately. It does no harm to call it even if this variable is not
890  * true, but there is probably no advantage in that case.
891  *
892  * This automatically calls RenderAttrib::garbage_collect() as well.
893  */
894 int RenderState::
896  int num_attribs = RenderAttrib::garbage_collect();
897 
898  if (_states == nullptr || !garbage_collect_states) {
899  return num_attribs;
900  }
901 
902  LightReMutexHolder holder(*_states_lock);
903 
904  PStatTimer timer(_garbage_collect_pcollector);
905  size_t orig_size = _states->get_num_entries();
906 
907  // How many elements to process this pass?
908  size_t size = orig_size;
909  size_t num_this_pass = std::max(0, int(size * garbage_collect_states_rate));
910  if (num_this_pass <= 0) {
911  return num_attribs;
912  }
913 
914  bool break_and_uniquify = (auto_break_cycles && uniquify_transforms);
915 
916  size_t si = _garbage_index;
917  if (si >= size) {
918  si = 0;
919  }
920 
921  num_this_pass = std::min(num_this_pass, size);
922  size_t stop_at_element = (si + num_this_pass) % size;
923 
924  do {
925  RenderState *state = (RenderState *)_states->get_key(si);
926  if (break_and_uniquify) {
927  if (state->get_cache_ref_count() > 0 &&
928  state->get_ref_count() == state->get_cache_ref_count()) {
929  // If we have removed all the references to this state not in the
930  // cache, leaving only references in the cache, then we need to
931  // check for a cycle involving this RenderState and break it if it
932  // exists.
933  state->detect_and_break_cycles();
934  }
935  }
936 
937  if (state->get_ref_count() == 1) {
938  // This state has recently been unreffed to 1 (the one we added when
939  // we stored it in the cache). Now it's time to delete it. This is
940  // safe, because we're holding the _states_lock, so it's not possible
941  // for some other thread to find the state in the cache and ref it
942  // while we're doing this.
943  state->release_new();
944  state->remove_cache_pointers();
945  state->cache_unref();
946  delete state;
947 
948  // When we removed it from the hash map, it swapped the last element
949  // with the one we just removed. So the current index contains one we
950  // still need to visit.
951  --size;
952  --si;
953  if (stop_at_element > 0) {
954  --stop_at_element;
955  }
956  }
957 
958  si = (si + 1) % size;
959  } while (si != stop_at_element);
960  _garbage_index = si;
961 
962  nassertr(_states->get_num_entries() == size, 0);
963 
964 #ifdef _DEBUG
965  nassertr(_states->validate(), 0);
966 #endif
967 
968  // If we just cleaned up a lot of states, see if we can reduce the table in
969  // size. This will help reduce iteration overhead in the future.
970  _states->consider_shrink_table();
971 
972  return (int)orig_size - (int)size + num_attribs;
973 }
974 
975 /**
976  * Completely empties the cache of state + gsg -> munger, for all states and
977  * all gsg's. Normally there is no need to empty this cache.
978  */
979 void RenderState::
981  LightReMutexHolder holder(*_states_lock);
982 
983  size_t size = _states->get_num_entries();
984  for (size_t si = 0; si < size; ++si) {
985  RenderState *state = (RenderState *)(_states->get_key(si));
986  state->_mungers.clear();
987  state->_munged_states.clear();
988  state->_last_mi = -1;
989  }
990 }
991 
992 /**
993  * Detects all of the reference-count cycles in the cache and reports them to
994  * standard output.
995  *
996  * These cycles may be inadvertently created when state compositions cycle
997  * back to a starting point. Nowadays, these cycles should be automatically
998  * detected and broken, so this method should never list any cycles unless
999  * there is a bug in that detection logic.
1000  *
1001  * The cycles listed here are not leaks in the strictest sense of the word,
1002  * since they can be reclaimed by a call to clear_cache(); but they will not
1003  * be reclaimed automatically.
1004  */
1005 void RenderState::
1006 list_cycles(ostream &out) {
1007  if (_states == nullptr) {
1008  return;
1009  }
1010  LightReMutexHolder holder(*_states_lock);
1011 
1012  typedef pset<const RenderState *> VisitedStates;
1013  VisitedStates visited;
1014  CompositionCycleDesc cycle_desc;
1015 
1016  size_t size = _states->get_num_entries();
1017  for (size_t si = 0; si < size; ++si) {
1018  const RenderState *state = _states->get_key(si);
1019 
1020  bool inserted = visited.insert(state).second;
1021  if (inserted) {
1022  ++_last_cycle_detect;
1023  if (r_detect_cycles(state, state, 1, _last_cycle_detect, &cycle_desc)) {
1024  // This state begins a cycle.
1025  CompositionCycleDesc::reverse_iterator csi;
1026 
1027  out << "\nCycle detected of length " << cycle_desc.size() + 1 << ":\n"
1028  << "state " << (void *)state << ":" << state->get_ref_count()
1029  << " =\n";
1030  state->write(out, 2);
1031  for (csi = cycle_desc.rbegin(); csi != cycle_desc.rend(); ++csi) {
1032  const CompositionCycleDescEntry &entry = (*csi);
1033  if (entry._inverted) {
1034  out << "invert composed with ";
1035  } else {
1036  out << "composed with ";
1037  }
1038  out << (const void *)entry._obj << ":" << entry._obj->get_ref_count()
1039  << " " << *entry._obj << "\n"
1040  << "produces " << (const void *)entry._result << ":"
1041  << entry._result->get_ref_count() << " =\n";
1042  entry._result->write(out, 2);
1043  visited.insert(entry._result);
1044  }
1045 
1046  cycle_desc.clear();
1047  } else {
1048  ++_last_cycle_detect;
1049  if (r_detect_reverse_cycles(state, state, 1, _last_cycle_detect, &cycle_desc)) {
1050  // This state begins a cycle.
1051  CompositionCycleDesc::iterator csi;
1052 
1053  out << "\nReverse cycle detected of length " << cycle_desc.size() + 1 << ":\n"
1054  << "state ";
1055  for (csi = cycle_desc.begin(); csi != cycle_desc.end(); ++csi) {
1056  const CompositionCycleDescEntry &entry = (*csi);
1057  out << (const void *)entry._result << ":"
1058  << entry._result->get_ref_count() << " =\n";
1059  entry._result->write(out, 2);
1060  out << (const void *)entry._obj << ":"
1061  << entry._obj->get_ref_count() << " =\n";
1062  entry._obj->write(out, 2);
1063  visited.insert(entry._result);
1064  }
1065  out << (void *)state << ":"
1066  << state->get_ref_count() << " =\n";
1067  state->write(out, 2);
1068 
1069  cycle_desc.clear();
1070  }
1071  }
1072  }
1073  }
1074 }
1075 
1076 
1077 /**
1078  * Lists all of the RenderStates in the cache to the output stream, one per
1079  * line. This can be quite a lot of output if the cache is large, so be
1080  * prepared.
1081  */
1082 void RenderState::
1083 list_states(ostream &out) {
1084  if (_states == nullptr) {
1085  out << "0 states:\n";
1086  return;
1087  }
1088  LightReMutexHolder holder(*_states_lock);
1089 
1090  size_t size = _states->get_num_entries();
1091  out << size << " states:\n";
1092  for (size_t si = 0; si < size; ++si) {
1093  const RenderState *state = _states->get_key(si);
1094  state->write(out, 2);
1095  }
1096 }
1097 
1098 /**
1099  * Ensures that the cache is still stored in sorted order, and that none of
1100  * the cache elements have been inadvertently deleted. Returns true if so,
1101  * false if there is a problem (which implies someone has modified one of the
1102  * supposedly-const RenderState objects).
1103  */
1104 bool RenderState::
1106  if (_states == nullptr) {
1107  return true;
1108  }
1109 
1110  PStatTimer timer(_state_validate_pcollector);
1111 
1112  LightReMutexHolder holder(*_states_lock);
1113  if (_states->is_empty()) {
1114  return true;
1115  }
1116 
1117  if (!_states->validate()) {
1118  pgraph_cat.error()
1119  << "RenderState::_states cache is invalid!\n";
1120  return false;
1121  }
1122 
1123  size_t size = _states->get_num_entries();
1124  size_t si = 0;
1125  nassertr(si < size, false);
1126  nassertr(_states->get_key(si)->get_ref_count() >= 0, false);
1127  size_t snext = si;
1128  ++snext;
1129  while (snext < size) {
1130  nassertr(_states->get_key(snext)->get_ref_count() >= 0, false);
1131  const RenderState *ssi = _states->get_key(si);
1132  const RenderState *ssnext = _states->get_key(snext);
1133  int c = ssi->compare_to(*ssnext);
1134  int ci = ssnext->compare_to(*ssi);
1135  if ((ci < 0) != (c > 0) ||
1136  (ci > 0) != (c < 0) ||
1137  (ci == 0) != (c == 0)) {
1138  pgraph_cat.error()
1139  << "RenderState::compare_to() not defined properly!\n";
1140  pgraph_cat.error(false)
1141  << "(a, b): " << c << "\n";
1142  pgraph_cat.error(false)
1143  << "(b, a): " << ci << "\n";
1144  ssi->write(pgraph_cat.error(false), 2);
1145  ssnext->write(pgraph_cat.error(false), 2);
1146  return false;
1147  }
1148  si = snext;
1149  ++snext;
1150  }
1151 
1152  return true;
1153 }
1154 
1155 /**
1156  * Returns the union of the Geom::GeomRendering bits that will be required
1157  * once this RenderState is applied to a geom which includes the indicated
1158  * geom_rendering bits.
1159  */
1160 int RenderState::
1161 get_geom_rendering(int geom_rendering) const {
1162  const RenderModeAttrib *render_mode;
1163  const TexGenAttrib *tex_gen;
1164  const TexMatrixAttrib *tex_matrix;
1165 
1166  if (get_attrib(render_mode)) {
1167  geom_rendering = render_mode->get_geom_rendering(geom_rendering);
1168  }
1169  if (get_attrib(tex_gen)) {
1170  geom_rendering = tex_gen->get_geom_rendering(geom_rendering);
1171  }
1172  if (get_attrib(tex_matrix)) {
1173  geom_rendering = tex_matrix->get_geom_rendering(geom_rendering);
1174  }
1175 
1176  return geom_rendering;
1177 }
1178 
1179 /**
1180  * Intended to be called by CullBinManager::remove_bin(), this informs all the
1181  * RenderStates in the world to remove the indicated bin_index from their
1182  * cache if it has been cached.
1183  */
1184 void RenderState::
1185 bin_removed(int bin_index) {
1186  // Do something here.
1187  nassertv(false);
1188 }
1189 
1190 /**
1191  * Returns true if the _filled_slots bitmask is consistent with the table of
1192  * RenderAttrib pointers, false otherwise.
1193  */
1194 bool RenderState::
1195 validate_filled_slots() const {
1196  SlotMask mask;
1197 
1199  int max_slots = reg->get_max_slots();
1200  for (int slot = 1; slot < max_slots; ++slot) {
1201  const Attribute &attribute = _attributes[slot];
1202  if (attribute._attrib != nullptr) {
1203  mask.set_bit(slot);
1204  }
1205  }
1206 
1207  return (mask == _filled_slots);
1208 }
1209 
1210 /**
1211  * Computes a suitable hash value for phash_map.
1212  */
1213 void RenderState::
1214 do_calc_hash() {
1215  _hash = 0;
1216 
1217  SlotMask mask = _filled_slots;
1218  int slot = mask.get_lowest_on_bit();
1219  while (slot >= 0) {
1220  const Attribute &attrib = _attributes[slot];
1221  nassertv(attrib._attrib != nullptr);
1222  _hash = pointer_hash::add_hash(_hash, attrib._attrib);
1223  _hash = int_hash::add_hash(_hash, attrib._override);
1224 
1225  mask.clear_bit(slot);
1226  slot = mask.get_lowest_on_bit();
1227  }
1228 
1229  _flags |= F_hash_known;
1230 }
1231 
1232 /**
1233  * This function is used to share a common RenderState pointer for all
1234  * equivalent RenderState objects.
1235  *
1236  * This is different from return_unique() in that it does not actually
1237  * guarantee a unique pointer, unless uniquify-states is set.
1238  */
1239 CPT(RenderState) RenderState::
1240 return_new(RenderState *state) {
1241  nassertr(state != nullptr, state);
1242 
1243  // Make sure we don't have anything in the 0 slot. If we did, that would
1244  // indicate an uninitialized slot number.
1245 #ifndef NDEBUG
1246  if (state->_attributes[0]._attrib != nullptr) {
1247  const RenderAttrib *attrib = state->_attributes[0]._attrib;
1248  if (attrib->get_type() == TypeHandle::none()) {
1249  ((RenderAttrib *)attrib)->force_init_type();
1250  pgraph_cat->error()
1251  << "Uninitialized RenderAttrib type: " << attrib->get_type()
1252  << "\n";
1253 
1254  } else {
1255  static pset<TypeHandle> already_reported;
1256  if (already_reported.insert(attrib->get_type()).second) {
1257  pgraph_cat->error()
1258  << attrib->get_type() << " did not initialize its slot number.\n";
1259  }
1260  }
1261  }
1262 #endif
1263  state->_attributes[0]._attrib = nullptr;
1264  state->_filled_slots.clear_bit(0);
1265 
1266 #ifndef NDEBUG
1267  nassertr(state->validate_filled_slots(), state);
1268 #endif
1269 
1270  if (!uniquify_states && !state->is_empty()) {
1271  return state;
1272  }
1273 
1274  return return_unique(state);
1275 }
1276 
1277 /**
1278  * This function is used to share a common RenderState pointer for all
1279  * equivalent RenderState objects.
1280  *
1281  * See the similar logic in RenderAttrib. The idea is to create a new
1282  * RenderState object and pass it through this function, which will share the
1283  * pointer with a previously-created RenderState object if it is equivalent.
1284  */
1285 CPT(RenderState) RenderState::
1286 return_unique(RenderState *state) {
1287  nassertr(state != nullptr, nullptr);
1288 
1289  if (!state_cache) {
1290  return state;
1291  }
1292 
1293 #ifndef NDEBUG
1294  if (paranoid_const) {
1295  nassertr(validate_states(), state);
1296  }
1297 #endif
1298 
1299  LightReMutexHolder holder(*_states_lock);
1300 
1301  if (state->_saved_entry != -1) {
1302  // This state is already in the cache. nassertr(_states->find(state) ==
1303  // state->_saved_entry, pt_state);
1304  return state;
1305  }
1306 
1307  // Ensure each of the individual attrib pointers has been uniquified before
1308  // we add the state to the cache.
1309  if (!uniquify_attribs && !state->is_empty()) {
1310  SlotMask mask = state->_filled_slots;
1311  int slot = mask.get_lowest_on_bit();
1312  while (slot >= 0) {
1313  Attribute &attrib = state->_attributes[slot];
1314  nassertd(attrib._attrib != nullptr) continue;
1315  attrib._attrib = attrib._attrib->get_unique();
1316  mask.clear_bit(slot);
1317  slot = mask.get_lowest_on_bit();
1318  }
1319  }
1320 
1321  int si = _states->find(state);
1322  if (si != -1) {
1323  // There's an equivalent state already in the set. Return it. The state
1324  // that was passed may be newly created and therefore may not be
1325  // automatically deleted. Do that if necessary.
1326  if (state->get_ref_count() == 0) {
1327  delete state;
1328  }
1329  return _states->get_key(si);
1330  }
1331 
1332  // Not already in the set; add it.
1333  if (garbage_collect_states) {
1334  // If we'll be garbage collecting states explicitly, we'll increment the
1335  // reference count when we store it in the cache, so that it won't be
1336  // deleted while it's in it.
1337  state->cache_ref();
1338  }
1339  si = _states->store(state, nullptr);
1340 
1341  // Save the index and return the input state.
1342  state->_saved_entry = si;
1343  return state;
1344 }
1345 
1346 /**
1347  * The private implemention of compose(); this actually composes two
1348  * RenderStates, without bothering with the cache.
1349  */
1350 CPT(RenderState) RenderState::
1351 do_compose(const RenderState *other) const {
1352  PStatTimer timer(_state_compose_pcollector);
1353 
1354  RenderState *new_state = new RenderState;
1355 
1356  SlotMask mask = _filled_slots | other->_filled_slots;
1357  new_state->_filled_slots = mask;
1358 
1359  int slot = mask.get_lowest_on_bit();
1360  while (slot >= 0) {
1361  const Attribute &a = _attributes[slot];
1362  const Attribute &b = other->_attributes[slot];
1363  Attribute &result = new_state->_attributes[slot];
1364 
1365  if (a._attrib == nullptr) {
1366  nassertr(b._attrib != nullptr, this);
1367  // B wins.
1368  result = b;
1369 
1370  } else if (b._attrib == nullptr) {
1371  // A wins.
1372  result = a;
1373 
1374  } else if (b._override < a._override) {
1375  // A, the higher RenderAttrib, overrides.
1376  result = a;
1377 
1378  } else if (a._override < b._override &&
1379  a._attrib->lower_attrib_can_override()) {
1380  // B, the higher RenderAttrib, overrides. This is a special case;
1381  // normally, a lower RenderAttrib does not override a higher one, even
1382  // if it has a higher override value. But certain kinds of
1383  // RenderAttribs redefine lower_attrib_can_override() to return true,
1384  // allowing this override.
1385  result = b;
1386 
1387  } else {
1388  // Either they have the same override value, or B is higher. In either
1389  // case, the result is the composition of the two, with B's override
1390  // value.
1391  result.set(a._attrib->compose(b._attrib), b._override);
1392  }
1393 
1394  mask.clear_bit(slot);
1395  slot = mask.get_lowest_on_bit();
1396  }
1397 
1398  return return_new(new_state);
1399 }
1400 
1401 /**
1402  * The private implemention of invert_compose().
1403  */
1404 CPT(RenderState) RenderState::
1405 do_invert_compose(const RenderState *other) const {
1406  PStatTimer timer(_state_invert_pcollector);
1407 
1408  RenderState *new_state = new RenderState;
1409 
1410  SlotMask mask = _filled_slots | other->_filled_slots;
1411  new_state->_filled_slots = mask;
1412 
1413  int slot = mask.get_lowest_on_bit();
1414  while (slot >= 0) {
1415  const Attribute &a = _attributes[slot];
1416  const Attribute &b = other->_attributes[slot];
1417  Attribute &result = new_state->_attributes[slot];
1418 
1419  if (a._attrib == nullptr) {
1420  nassertr(b._attrib != nullptr, this);
1421  // B wins.
1422  result = b;
1423 
1424  } else if (b._attrib == nullptr) {
1425  // A wins. Invert it.
1427  result.set(a._attrib->invert_compose(reg->get_slot_default(slot)), 0);
1428 
1429  } else {
1430  // Both are good. (Overrides are not used in invert_compose.) Compose.
1431  result.set(a._attrib->invert_compose(b._attrib), 0);
1432  }
1433 
1434  mask.clear_bit(slot);
1435  slot = mask.get_lowest_on_bit();
1436  }
1437  return return_new(new_state);
1438 }
1439 
1440 /**
1441  * Detects whether there is a cycle in the cache that begins with this state.
1442  * If any are detected, breaks them by removing this state from the cache.
1443  */
1444 void RenderState::
1445 detect_and_break_cycles() {
1446  PStatTimer timer(_state_break_cycles_pcollector);
1447 
1448  ++_last_cycle_detect;
1449  if (r_detect_cycles(this, this, 1, _last_cycle_detect, nullptr)) {
1450  // Ok, we have a cycle. This will be a leak unless we break the cycle by
1451  // freeing the cache on this object.
1452  if (pgraph_cat.is_debug()) {
1453  pgraph_cat.debug()
1454  << "Breaking cycle involving " << (*this) << "\n";
1455  }
1456 
1457  ((RenderState *)this)->remove_cache_pointers();
1458  } else {
1459  ++_last_cycle_detect;
1460  if (r_detect_reverse_cycles(this, this, 1, _last_cycle_detect, nullptr)) {
1461  if (pgraph_cat.is_debug()) {
1462  pgraph_cat.debug()
1463  << "Breaking cycle involving " << (*this) << "\n";
1464  }
1465 
1466  ((RenderState *)this)->remove_cache_pointers();
1467  }
1468  }
1469 }
1470 
1471 /**
1472  * Detects whether there is a cycle in the cache that begins with the
1473  * indicated state. Returns true if at least one cycle is found, false if
1474  * this state is not part of any cycles. If a cycle is found and cycle_desc
1475  * is not NULL, then cycle_desc is filled in with the list of the steps of the
1476  * cycle, in reverse order.
1477  */
1478 bool RenderState::
1479 r_detect_cycles(const RenderState *start_state,
1480  const RenderState *current_state,
1481  int length, UpdateSeq this_seq,
1482  RenderState::CompositionCycleDesc *cycle_desc) {
1483  if (current_state->_cycle_detect == this_seq) {
1484  // We've already seen this state; therefore, we've found a cycle.
1485 
1486  // However, we only care about cycles that return to the starting state
1487  // and involve more than two steps. If only one or two nodes are
1488  // involved, it doesn't represent a memory leak, so no problem there.
1489  return (current_state == start_state && length > 2);
1490  }
1491  ((RenderState *)current_state)->_cycle_detect = this_seq;
1492 
1493  size_t i;
1494  size_t cache_size = current_state->_composition_cache.get_num_entries();
1495  for (i = 0; i < cache_size; ++i) {
1496  const RenderState *result = current_state->_composition_cache.get_data(i)._result;
1497  if (result != nullptr) {
1498  if (r_detect_cycles(start_state, result, length + 1,
1499  this_seq, cycle_desc)) {
1500  // Cycle detected.
1501  if (cycle_desc != nullptr) {
1502  const RenderState *other = current_state->_composition_cache.get_key(i);
1503  CompositionCycleDescEntry entry(other, result, false);
1504  cycle_desc->push_back(entry);
1505  }
1506  return true;
1507  }
1508  }
1509  }
1510 
1511  cache_size = current_state->_invert_composition_cache.get_num_entries();
1512  for (i = 0; i < cache_size; ++i) {
1513  const RenderState *result = current_state->_invert_composition_cache.get_data(i)._result;
1514  if (result != nullptr) {
1515  if (r_detect_cycles(start_state, result, length + 1,
1516  this_seq, cycle_desc)) {
1517  // Cycle detected.
1518  if (cycle_desc != nullptr) {
1519  const RenderState *other = current_state->_invert_composition_cache.get_key(i);
1520  CompositionCycleDescEntry entry(other, result, true);
1521  cycle_desc->push_back(entry);
1522  }
1523  return true;
1524  }
1525  }
1526  }
1527 
1528  // No cycle detected.
1529  return false;
1530 }
1531 
1532 /**
1533  * Works the same as r_detect_cycles, but checks for cycles in the reverse
1534  * direction along the cache chain. (A cycle may appear in either direction,
1535  * and we must check both.)
1536  */
1537 bool RenderState::
1538 r_detect_reverse_cycles(const RenderState *start_state,
1539  const RenderState *current_state,
1540  int length, UpdateSeq this_seq,
1541  RenderState::CompositionCycleDesc *cycle_desc) {
1542  if (current_state->_cycle_detect == this_seq) {
1543  // We've already seen this state; therefore, we've found a cycle.
1544 
1545  // However, we only care about cycles that return to the starting state
1546  // and involve more than two steps. If only one or two nodes are
1547  // involved, it doesn't represent a memory leak, so no problem there.
1548  return (current_state == start_state && length > 2);
1549  }
1550  ((RenderState *)current_state)->_cycle_detect = this_seq;
1551 
1552  size_t i;
1553  size_t cache_size = current_state->_composition_cache.get_num_entries();
1554  for (i = 0; i < cache_size; ++i) {
1555  const RenderState *other = current_state->_composition_cache.get_key(i);
1556  if (other != current_state) {
1557  int oi = other->_composition_cache.find(current_state);
1558  nassertr(oi != -1, false);
1559 
1560  const RenderState *result = other->_composition_cache.get_data(oi)._result;
1561  if (result != nullptr) {
1562  if (r_detect_reverse_cycles(start_state, result, length + 1,
1563  this_seq, cycle_desc)) {
1564  // Cycle detected.
1565  if (cycle_desc != nullptr) {
1566  const RenderState *other = current_state->_composition_cache.get_key(i);
1567  CompositionCycleDescEntry entry(other, result, false);
1568  cycle_desc->push_back(entry);
1569  }
1570  return true;
1571  }
1572  }
1573  }
1574  }
1575 
1576  cache_size = current_state->_invert_composition_cache.get_num_entries();
1577  for (i = 0; i < cache_size; ++i) {
1578  const RenderState *other = current_state->_invert_composition_cache.get_key(i);
1579  if (other != current_state) {
1580  int oi = other->_invert_composition_cache.find(current_state);
1581  nassertr(oi != -1, false);
1582 
1583  const RenderState *result = other->_invert_composition_cache.get_data(oi)._result;
1584  if (result != nullptr) {
1585  if (r_detect_reverse_cycles(start_state, result, length + 1,
1586  this_seq, cycle_desc)) {
1587  // Cycle detected.
1588  if (cycle_desc != nullptr) {
1589  const RenderState *other = current_state->_invert_composition_cache.get_key(i);
1590  CompositionCycleDescEntry entry(other, result, false);
1591  cycle_desc->push_back(entry);
1592  }
1593  return true;
1594  }
1595  }
1596  }
1597  }
1598 
1599  // No cycle detected.
1600  return false;
1601 }
1602 
1603 /**
1604  * This inverse of return_new, this releases this object from the global
1605  * RenderState table.
1606  *
1607  * You must already be holding _states_lock before you call this method.
1608  */
1609 void RenderState::
1610 release_new() {
1611  nassertv(_states_lock->debug_is_locked());
1612 
1613  if (_saved_entry != -1) {
1614  _saved_entry = -1;
1615  nassertv_always(_states->remove(this));
1616  }
1617 }
1618 
1619 /**
1620  * Remove all pointers within the cache from and to this particular
1621  * RenderState. The pointers to this object may be scattered around in the
1622  * various CompositionCaches from other RenderState objects.
1623  *
1624  * You must already be holding _states_lock before you call this method.
1625  */
1626 void RenderState::
1627 remove_cache_pointers() {
1628  nassertv(_states_lock->debug_is_locked());
1629 
1630  // Fortunately, since we added CompositionCache records in pairs, we know
1631  // exactly the set of RenderState objects that have us in their cache: it's
1632  // the same set of RenderState objects that we have in our own cache.
1633 
1634 /*
1635  * We do need to put considerable thought into this loop, because as we clear
1636  * out cache entries we'll cause other RenderState objects to destruct, which
1637  * could cause things to get pulled out of our own _composition_cache map. We
1638  * want to allow this (so that we don't encounter any just-destructed pointers
1639  * in our cache), but we don't want to get bitten by this cascading effect.
1640  * Instead of walking through the map from beginning to end, therefore, we
1641  * just pull out the first one each time, and erase it.
1642  */
1643 
1644 #ifdef DO_PSTATS
1645  if (_composition_cache.is_empty() && _invert_composition_cache.is_empty()) {
1646  return;
1647  }
1648  PStatTimer timer(_cache_update_pcollector);
1649 #endif // DO_PSTATS
1650 
1651  // There are lots of ways to do this loop wrong. Be very careful if you
1652  // need to modify it for any reason.
1653  size_t i = 0;
1654  while (!_composition_cache.is_empty()) {
1655  // It is possible that the "other" RenderState object is currently within
1656  // its own destructor. We therefore can't use a PT() to hold its pointer;
1657  // that could end up calling its destructor twice. Fortunately, we don't
1658  // need to hold its reference count to ensure it doesn't destruct while we
1659  // process this loop; as long as we ensure that no *other* RenderState
1660  // objects destruct, there will be no reason for that one to.
1661  RenderState *other = (RenderState *)_composition_cache.get_key(i);
1662 
1663  // We hold a copy of the composition result so we can dereference it
1664  // later.
1665  Composition comp = _composition_cache.get_data(i);
1666 
1667  // Now we can remove the element from our cache. We do this now, rather
1668  // than later, before any other RenderState objects have had a chance to
1669  // destruct, so we are confident that our iterator is still valid.
1670  _composition_cache.remove_element(i);
1671  _cache_stats.add_total_size(-1);
1672  _cache_stats.inc_dels();
1673 
1674  if (other != this) {
1675  int oi = other->_composition_cache.find(this);
1676 
1677  // We may or may not still be listed in the other's cache (it might be
1678  // halfway through pulling entries out, from within its own destructor).
1679  if (oi != -1) {
1680  // Hold a copy of the other composition result, too.
1681  Composition ocomp = other->_composition_cache.get_data(oi);
1682 
1683  other->_composition_cache.remove_element(oi);
1684  _cache_stats.add_total_size(-1);
1685  _cache_stats.inc_dels();
1686 
1687  // It's finally safe to let our held pointers go away. This may have
1688  // cascading effects as other RenderState objects are destructed, but
1689  // there will be no harm done if they destruct now.
1690  if (ocomp._result != nullptr && ocomp._result != other) {
1691  cache_unref_delete(ocomp._result);
1692  }
1693  }
1694  }
1695 
1696  // It's finally safe to let our held pointers go away. (See comment
1697  // above.)
1698  if (comp._result != nullptr && comp._result != this) {
1699  cache_unref_delete(comp._result);
1700  }
1701  }
1702 
1703  // A similar bit of code for the invert cache.
1704  i = 0;
1705  while (!_invert_composition_cache.is_empty()) {
1706  RenderState *other = (RenderState *)_invert_composition_cache.get_key(i);
1707  nassertv(other != this);
1708  Composition comp = _invert_composition_cache.get_data(i);
1709  _invert_composition_cache.remove_element(i);
1710  _cache_stats.add_total_size(-1);
1711  _cache_stats.inc_dels();
1712  if (other != this) {
1713  int oi = other->_invert_composition_cache.find(this);
1714  if (oi != -1) {
1715  Composition ocomp = other->_invert_composition_cache.get_data(oi);
1716  other->_invert_composition_cache.remove_element(oi);
1717  _cache_stats.add_total_size(-1);
1718  _cache_stats.inc_dels();
1719  if (ocomp._result != nullptr && ocomp._result != other) {
1720  cache_unref_delete(ocomp._result);
1721  }
1722  }
1723  }
1724  if (comp._result != nullptr && comp._result != this) {
1725  cache_unref_delete(comp._result);
1726  }
1727  }
1728 }
1729 
1730 /**
1731  * This is the private implementation of get_bin_index() and get_draw_order().
1732  */
1733 void RenderState::
1734 determine_bin_index() {
1735  LightMutexHolder holder(_lock);
1736  if ((_flags & F_checked_bin_index) != 0) {
1737  // Someone else checked it first.
1738  return;
1739  }
1740 
1741  std::string bin_name;
1742  _draw_order = 0;
1743 
1744  const CullBinAttrib *bin;
1745  if (get_attrib(bin)) {
1746  bin_name = bin->get_bin_name();
1747  _draw_order = bin->get_draw_order();
1748  }
1749 
1750  if (bin_name.empty()) {
1751  // No explicit bin is specified; put in the in the default bin, either
1752  // opaque or transparent, based on the transparency setting.
1753  bin_name = "opaque";
1754 
1755  const TransparencyAttrib *transparency;
1756  if (get_attrib(transparency)) {
1757  switch (transparency->get_mode()) {
1758  case TransparencyAttrib::M_alpha:
1759  case TransparencyAttrib::M_premultiplied_alpha:
1760  case TransparencyAttrib::M_dual:
1761  // These transparency modes require special back-to-front sorting.
1762  bin_name = "transparent";
1763  break;
1764 
1765  default:
1766  break;
1767  }
1768  }
1769  }
1770 
1772  _bin_index = bin_manager->find_bin(bin_name);
1773  if (_bin_index == -1) {
1774  pgraph_cat.warning()
1775  << "No bin named " << bin_name << "; creating default bin.\n";
1776  _bin_index = bin_manager->add_bin(bin_name, CullBinManager::BT_unsorted, 0);
1777  }
1778  _flags |= F_checked_bin_index;
1779 }
1780 
1781 /**
1782  * This is the private implementation of has_cull_callback().
1783  */
1784 void RenderState::
1785 determine_cull_callback() {
1786  LightMutexHolder holder(_lock);
1787  if ((_flags & F_checked_cull_callback) != 0) {
1788  // Someone else checked it first.
1789  return;
1790  }
1791 
1792  SlotMask mask = _filled_slots;
1793  int slot = mask.get_lowest_on_bit();
1794  while (slot >= 0) {
1795  const Attribute &attrib = _attributes[slot];
1796  nassertv(attrib._attrib != nullptr);
1797  if (attrib._attrib->has_cull_callback()) {
1798  _flags |= F_has_cull_callback;
1799  break;
1800  }
1801 
1802  mask.clear_bit(slot);
1803  slot = mask.get_lowest_on_bit();
1804  }
1805 
1806  _flags |= F_checked_cull_callback;
1807 }
1808 
1809 /**
1810  * Fills up the state with all of the default attribs.
1811  */
1812 void RenderState::
1813 fill_default() {
1815  int num_slots = reg->get_num_slots();
1816  for (int slot = 1; slot < num_slots; ++slot) {
1817  _attributes[slot].set(reg->get_slot_default(slot), 0);
1818  _filled_slots.set_bit(slot);
1819  }
1820 }
1821 
1822 /**
1823  * Moves the RenderState object from one PStats category to another, so that
1824  * we can track in PStats how many pointers are held by nodes, and how many
1825  * are held in the cache only.
1826  */
1827 void RenderState::
1828 update_pstats(int old_referenced_bits, int new_referenced_bits) {
1829 #ifdef DO_PSTATS
1830  if ((old_referenced_bits & R_node) != 0) {
1831  _node_counter.sub_level(1);
1832  } else if ((old_referenced_bits & R_cache) != 0) {
1833  _cache_counter.sub_level(1);
1834  }
1835  if ((new_referenced_bits & R_node) != 0) {
1836  _node_counter.add_level(1);
1837  } else if ((new_referenced_bits & R_cache) != 0) {
1838  _cache_counter.add_level(1);
1839  }
1840 #endif // DO_PSTATS
1841 }
1842 
1843 /**
1844  * Make sure the global _states map is allocated. This only has to be done
1845  * once. We could make this map static, but then we run into problems if
1846  * anyone creates a RenderState object at static init time; it also seems to
1847  * cause problems when the Panda shared library is unloaded at application
1848  * exit time.
1849  */
1850 void RenderState::
1852  _states = new States;
1853 
1854  // TODO: we should have a global Panda mutex to allow us to safely create
1855  // _states_lock without a startup race condition. For the meantime, this is
1856  // OK because we guarantee that this method is called at static init time,
1857  // presumably when there is still only one thread in the world.
1858  _states_lock = new LightReMutex("RenderState::_states_lock");
1859  _cache_stats.init();
1860  nassertv(Thread::get_current_thread() == Thread::get_main_thread());
1861 
1862  // Initialize the empty state object as well. It is used so often that it
1863  // is declared globally, and lives forever.
1864  RenderState *state = new RenderState;
1865  state->local_object();
1866  state->_saved_entry = _states->store(state, nullptr);
1867  _empty_state = state;
1868 }
1869 
1870 /**
1871  * Tells the BamReader how to create objects of type RenderState.
1872  */
1873 void RenderState::
1875  BamReader::get_factory()->register_factory(get_class_type(), make_from_bam);
1876 }
1877 
1878 /**
1879  * Writes the contents of this object to the datagram for shipping out to a
1880  * Bam file.
1881  */
1882 void RenderState::
1884  TypedWritable::write_datagram(manager, dg);
1885 
1886  int num_attribs = _filled_slots.get_num_on_bits();
1887  nassertv(num_attribs == (int)(uint16_t)num_attribs);
1888  dg.add_uint16(num_attribs);
1889 
1890  // **** We should smarten up the writing of the override number--most of the
1891  // time these will all be zero.
1892  SlotMask mask = _filled_slots;
1893  int slot = mask.get_lowest_on_bit();
1894  while (slot >= 0) {
1895  const Attribute &attrib = _attributes[slot];
1896  nassertv(attrib._attrib != nullptr);
1897  manager->write_pointer(dg, attrib._attrib);
1898  dg.add_int32(attrib._override);
1899 
1900  mask.clear_bit(slot);
1901  slot = mask.get_lowest_on_bit();
1902  }
1903 }
1904 
1905 /**
1906  * Receives an array of pointers, one for each time manager->read_pointer()
1907  * was called in fillin(). Returns the number of pointers processed.
1908  */
1909 int RenderState::
1911  int pi = TypedWritable::complete_pointers(p_list, manager);
1912 
1913  int num_attribs = 0;
1914 
1916  for (size_t i = 0; i < (*_read_overrides).size(); ++i) {
1917  int override = (*_read_overrides)[i];
1918 
1919  RenderAttrib *attrib = DCAST(RenderAttrib, p_list[pi++]);
1920  if (attrib != nullptr) {
1921  int slot = attrib->get_slot();
1922  if (slot > 0 && slot < reg->get_max_slots()) {
1923  _attributes[slot].set(attrib, override);
1924  _filled_slots.set_bit(slot);
1925  ++num_attribs;
1926  }
1927  }
1928  }
1929 
1930  delete _read_overrides;
1931  _read_overrides = nullptr;
1932 
1933  return pi;
1934 }
1935 
1936 /**
1937  * Called immediately after complete_pointers(), this gives the object a
1938  * chance to adjust its own pointer if desired. Most objects don't change
1939  * pointers after completion, but some need to.
1940  *
1941  * Once this function has been called, the old pointer will no longer be
1942  * accessed.
1943  */
1945 change_this(TypedWritable *old_ptr, BamReader *manager) {
1946  // First, uniquify the pointer.
1947  RenderState *state = DCAST(RenderState, old_ptr);
1948  CPT(RenderState) pointer = return_unique(state);
1949 
1950  // But now we have a problem, since we have to hold the reference count and
1951  // there's no way to return a TypedWritable while still holding the
1952  // reference count! We work around this by explicitly upping the count, and
1953  // also setting a finalize() callback to down it later.
1954  if (pointer == state) {
1955  pointer->ref();
1956  manager->register_finalize(state);
1957  }
1958 
1959  // We have to cast the pointer back to non-const, because the bam reader
1960  // expects that.
1961  return (RenderState *)pointer.p();
1962 }
1963 
1964 /**
1965  * Called by the BamReader to perform any final actions needed for setting up
1966  * the object after all objects have been read and all pointers have been
1967  * completed.
1968  */
1969 void RenderState::
1971  // Unref the pointer that we explicitly reffed in change_this().
1972  unref();
1973 
1974  // We should never get back to zero after unreffing our own count, because
1975  // we expect to have been stored in a pointer somewhere. If we do get to
1976  // zero, it's a memory leak; the way to avoid this is to call unref_delete()
1977  // above instead of unref(), but this is dangerous to do from within a
1978  // virtual function.
1979  nassertv(get_ref_count() != 0);
1980 }
1981 
1982 /**
1983  * This function is called by the BamReader's factory when a new object of
1984  * type RenderState is encountered in the Bam file. It should create the
1985  * RenderState and extract its information from the file.
1986  */
1987 TypedWritable *RenderState::
1988 make_from_bam(const FactoryParams &params) {
1989  RenderState *state = new RenderState;
1990  DatagramIterator scan;
1991  BamReader *manager;
1992 
1993  parse_params(params, scan, manager);
1994  state->fillin(scan, manager);
1995  manager->register_change_this(change_this, state);
1996 
1997  return state;
1998 }
1999 
2000 /**
2001  * This internal function is called by make_from_bam to read in all of the
2002  * relevant data from the BamFile for the new RenderState.
2003  */
2004 void RenderState::
2005 fillin(DatagramIterator &scan, BamReader *manager) {
2006  TypedWritable::fillin(scan, manager);
2007 
2008  int num_attribs = scan.get_uint16();
2009  _read_overrides = new vector_int;
2010  (*_read_overrides).reserve(num_attribs);
2011 
2012  for (int i = 0; i < num_attribs; ++i) {
2013  manager->read_pointer(scan);
2014  int override = scan.get_int32();
2015  (*_read_overrides).push_back(override);
2016  }
2017 }
static void list_states(std::ostream &out)
Lists all of the RenderStates in the cache to the output stream, one per line.
void clear_bit(int index)
Sets the nth bit off.
Definition: bitMask.I:129
bool is_empty() const
Returns true if the state is empty, false otherwise.
Definition: renderState.I:27
get_ref_count
Returns the current reference count.
static size_t add_hash(size_t start, const void *key)
Adds the indicated key into a running hash.
Definition: stl_compares.I:110
This is our own Panda specialization on the default STL map.
Definition: pmap.h:49
static int get_num_states()
Returns the total number of unique RenderState objects allocated in the world.
get_mode
Returns the transparency mode.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
This is the base class for a number of render attributes (other than transform) that may be set on sc...
Definition: renderAttrib.h:51
int get_geom_rendering(int geom_rendering) const
Returns the union of the Geom::GeomRendering bits that will be required once this TexGenAttrib is app...
Definition: texGenAttrib.I:44
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
void inc_dels()
Increments by 1 the count of elements removed from the cache.
Definition: cacheStats.I:69
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
This is the fundamental interface for extracting binary objects from a Bam file, as generated by a Ba...
Definition: bamReader.h:110
void cache_unref_delete(RefCountType *ptr)
This global helper function will unref the given ReferenceCount object, and if the reference count re...
This controls the enabling of transparency.
get_bin_name
Returns the name of the bin this attribute specifies.
Definition: cullBinAttrib.h:39
Base class for objects that can be written to and read from Bam files.
Definition: typedWritable.h:35
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
static void clear_munger_cache()
Completely empties the cache of state + gsg -> munger, for all states and all gsg's.
bool debug_is_locked() const
Returns true if the current thread has locked the LightReMutex, false otherwise.
A lightweight reentrant mutex.
Definition: lightReMutex.h:30
This collects together the pieces of data that are accumulated for each node while walking the scene ...
const Value & get_data(size_t n) const
Returns the data in the nth entry of the table.
int store(const Key &key, const Value &data)
Records the indicated key/data pair in the map.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
A lightweight class that can be used to automatically start and stop a PStatCollector around a sectio...
Definition: pStatTimer.h:30
int get_num_slots() const
Returns the number of RenderAttrib slots that have been allocated.
This template class implements an unordered map of keys to data, implemented as a hashtable.
Definition: simpleHashMap.h:81
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
static void register_with_read_factory()
Tells the BamReader how to create objects of type RenderState.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
This is the fundamental interface for writing binary objects to a Bam file, to be extracted later by ...
Definition: bamWriter.h:63
int get_lowest_on_bit(uint16_t x)
Returns the index of the lowest 1 bit in the word.
Definition: pbitops.I:129
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
void clear()
Completely empties the table.
int32_t get_int32()
Extracts a signed 32-bit integer.
static int garbage_collect()
Performs a garbage-collection cycle.
virtual bool unref() const
Explicitly decrements the reference count.
int get_geom_rendering(int geom_rendering) const
Returns the union of the Geom::GeomRendering bits that will be required once this RenderState is appl...
virtual void fillin(DatagramIterator &scan, BamReader *manager)
This internal function is intended to be called by each class's make_from_bam() method to read in all...
void register_change_this(ChangeThisFunc func, TypedWritable *whom)
Called by an object reading itself from the bam file to indicate that the object pointer that will be...
Definition: bamReader.cxx:835
virtual void write_datagram(BamWriter *manager, Datagram &dg)
Writes the contents of this object to the datagram for shipping out to a Bam file.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
static CullBinManager * get_global_ptr()
Returns the pointer to the global CullBinManager object.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
This is our own Panda specialization on the default STL vector.
Definition: pvector.h:42
int find_bin(const std::string &name) const
Returns the bin_index associated with the bin of the given name, or -1 if no bin has that name.
static size_t add_hash(size_t start, const Key &key)
Adds the indicated key into a running hash.
Definition: stl_compares.I:101
virtual ~RenderState()
The destructor is responsible for removing the RenderState from the global set if it is there.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
size_t get_num_entries() const
Returns the number of active entries in the table.
bool get_bit(int index) const
Returns true if the nth bit is set, false if it is cleared.
Definition: bitMask.I:109
A lightweight class that represents a single element that may be timed and/or counted via stats.
void maybe_report(const char *name)
Outputs a report if enough time has elapsed.
Definition: cacheStats.I:18
void parse_params(const FactoryParams &params, DatagramIterator &scan, BamReader *&manager)
Takes in a FactoryParams, passed from a WritableFactory into any TypedWritable's make function,...
Definition: bamReader.I:275
void add_uint16(uint16_t value)
Adds an unsigned 16-bit integer to the datagram.
Definition: datagram.I:85
void inc_hits()
Increments by 1 the count of cache hits.
Definition: cacheStats.I:35
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
int get_lowest_on_bit() const
Returns the index of the lowest 1 bit in the mask.
Definition: bitMask.I:283
virtual int complete_pointers(TypedWritable **p_list, BamReader *manager)
Receives an array of pointers, one for each time manager->read_pointer() was called in fillin().
static int garbage_collect()
Performs a garbage-collection cycle.
virtual void write_datagram(BamWriter *manager, Datagram &dg)
Writes the contents of this object to the datagram for shipping out to a Bam file.
void inc_misses()
Increments by 1 the count of cache misses.
Definition: cacheStats.I:45
static bool validate_states()
Ensures that the cache is still stored in sorted order, and that none of the cache elements have been...
This class is used to associate each RenderAttrib with a different slot index at runtime,...
std::ostream & indent(std::ostream &out, int indent_level)
A handy function for doing text formatting.
Definition: indent.cxx:20
int get_sorted_slot(int n) const
Returns the nth slot in sorted order.
Similar to MutexHolder, but for a light mutex.
void init()
Initializes the CacheStats for the first time.
Definition: cacheStats.cxx:22
static int get_num_unused_states()
Returns the total number of RenderState objects that have been allocated but have no references outsi...
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
An instance of this class is passed to the Factory when requesting it to do its business and construc...
Definition: factoryParams.h:36
const Key & get_key(size_t n) const
Returns the key in the nth entry of the table.
void add_total_size(int count)
Adds the indicated count (positive or negative) to the total number of entries for the cache (net occ...
Definition: cacheStats.I:80
bool cull_callback(CullTraverser *trav, const CullTraverserData &data) const
Calls cull_callback() on each attrib.
void register_factory(TypeHandle handle, CreateFunc *func, void *user_data=nullptr)
Registers a new kind of thing the Factory will be able to create.
Definition: factory.I:73
static void list_cycles(std::ostream &out)
Detects all of the reference-count cycles in the cache and reports them to standard output.
int compare_mask(const RenderState &other, SlotMask compare_mask) const
This version of compare_to takes a slot mask that indicates which attributes to include in the compar...
virtual int complete_pointers(TypedWritable **plist, BamReader *manager)
Receives an array of pointers, one for each time manager->read_pointer() was called in fillin().
void register_finalize(TypedWritable *whom)
Should be called by an object reading itself from the Bam file to indicate that this particular objec...
Definition: bamReader.cxx:808
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
bool validate() const
Returns true if the internal table appears to be consistent, false if there are some internal errors.
int add_bin(const std::string &name, BinType type, int sort)
Defines a new bin with the indicated name, and returns the new bin_index.
void set_bit(int index)
Sets the nth bit on.
Definition: bitMask.I:119
Similar to MutexHolder, but for a light reentrant mutex.
void remove_element(size_t n)
Removes the nth entry from the table.
This represents a unique collection of RenderAttrib objects that correspond to a particular renderabl...
Definition: renderState.h:47
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
int get_num_sorted_slots() const
Returns the number of entries in the sorted_slots list.
bool consider_shrink_table()
Shrinks the table if the allocated storage is significantly larger than the number of elements in it.
static void update_type(ReferenceCount *ptr, TypeHandle type)
Associates the indicated type with the given pointer.
Definition: memoryUsage.I:55
Assigns geometry to a particular bin by name.
Definition: cullBinAttrib.h:27
void local_object()
This function should be called, once, immediately after creating a new instance of some ReferenceCoun...
Applies a transform matrix to UV's before they are rendered.
CPT(RenderState) RenderState
Returns a RenderState with one attribute set.
get_cache_ref_count
Returns the current reference count.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
void add_int32(int32_t value)
Adds a signed 32-bit integer to the datagram.
Definition: datagram.I:67
uint16_t get_uint16()
Extracts an unsigned 16-bit integer.
static WritableFactory * get_factory()
Returns the global WritableFactory for generating TypedWritable objects.
Definition: bamReader.I:177
void add_num_states(int count)
Adds the indicated count (positive or negative) to the total count of individual RenderState or Trans...
Definition: cacheStats.I:91
This is used to track the utilization of the TransformState and RenderState caches,...
Definition: cacheStats.h:25
bool read_pointer(DatagramIterator &scan)
The interface for reading a pointer to another object from a Bam file.
Definition: bamReader.cxx:610
bool is_empty() const
Returns true if the table is empty; i.e.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
Specifies how polygons are to be drawn.
const RenderAttrib * get_slot_default(int slot) const
Returns the default RenderAttrib object associated with slot n.
This is our own Panda specialization on the default STL set.
Definition: pset.h:49
int compare_sort(const RenderState &other) const
Returns -1, 0, or 1 according to the relative sorting of these two RenderStates, with regards to rend...
int get_num_on_bits() const
Returns the number of bits that are set to 1 in the mask.
Definition: bitMask.I:264
A class to retrieve the individual data elements previously stored in a Datagram.
static TypedWritable * change_this(TypedWritable *old_ptr, BamReader *manager)
Called immediately after complete_pointers(), this gives the object a chance to adjust its own pointe...
int get_geom_rendering(int geom_rendering) const
Returns the union of the Geom::GeomRendering bits that will be required once this RenderModeAttrib is...
virtual void finalize(BamReader *manager)
Called by the BamReader to perform any final actions needed for setting up the object after all objec...
TypeHandle is the identifier used to differentiate C++ class types.
Definition: typeHandle.h:81
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
static int clear_cache()
Empties the cache of composed RenderStates.
This is a sequence number that increments monotonically.
Definition: updateSeq.h:37
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
This is a global object that maintains the collection of named CullBins in the world.
void inc_adds(bool is_new)
Increments by 1 the count of elements added to the cache.
Definition: cacheStats.I:56
An ordered list of data elements, formatted in memory for transmission over a socket or writing to a ...
Definition: datagram.h:38
Computes texture coordinates for geometry automatically based on vertex position and/or normal.
Definition: texGenAttrib.h:32
int compare_to(const RenderState &other) const
Provides an arbitrary ordering among all unique RenderStates, so we can store the essentially differe...
get_draw_order
Returns the draw order this attribute specifies.
Definition: cullBinAttrib.h:40
bool remove(const Key &key)
Removes the indicated key and its associated data from the table.
This object performs a depth-first traversal of the scene graph, with optional view-frustum culling,...
Definition: cullTraverser.h:45
int get_geom_rendering(int geom_rendering) const
Returns the union of the Geom::GeomRendering bits that will be required once this TexMatrixAttrib is ...
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
static RenderAttribRegistry * quick_get_global_ptr()
Returns the global_ptr without first ensuring it has been initialized.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
virtual bool unref() const
Explicitly decrements the reference count.
void write_pointer(Datagram &packet, const TypedWritable *dest)
The interface for writing a pointer to another object to a Bam file.
Definition: bamWriter.cxx:317
int find(const Key &key) const
Searches for the indicated key in the table.
static void init_states()
Make sure the global _states map is allocated.
static void bin_removed(int bin_index)
Intended to be called by CullBinManager::remove_bin(), this informs all the RenderStates in the world...