Panda3D
renderState.cxx
Go to the documentation of this file.
1 /**
2  * PANDA 3D SOFTWARE
3  * Copyright (c) Carnegie Mellon University. All rights reserved.
4  *
5  * All use of this software is subject to the terms of the revised BSD
6  * license. You should have received a copy of this license along
7  * with this source code in a file named "LICENSE."
8  *
9  * @file renderState.cxx
10  * @author drose
11  * @date 2002-02-21
12  */
13 
14 #include "renderState.h"
15 #include "transparencyAttrib.h"
16 #include "cullBinAttrib.h"
17 #include "cullBinManager.h"
18 #include "fogAttrib.h"
19 #include "clipPlaneAttrib.h"
20 #include "scissorAttrib.h"
21 #include "transparencyAttrib.h"
22 #include "colorAttrib.h"
23 #include "colorScaleAttrib.h"
24 #include "textureAttrib.h"
25 #include "texGenAttrib.h"
26 #include "shaderAttrib.h"
27 #include "pStatTimer.h"
28 #include "config_pgraph.h"
29 #include "bamReader.h"
30 #include "bamWriter.h"
31 #include "datagramIterator.h"
32 #include "indent.h"
33 #include "compareTo.h"
34 #include "lightReMutexHolder.h"
35 #include "lightMutexHolder.h"
36 #include "thread.h"
37 #include "renderAttribRegistry.h"
38 
39 using std::ostream;
40 
41 LightReMutex *RenderState::_states_lock = nullptr;
42 RenderState::States *RenderState::_states = nullptr;
43 const RenderState *RenderState::_empty_state = nullptr;
44 UpdateSeq RenderState::_last_cycle_detect;
45 size_t RenderState::_garbage_index = 0;
46 
47 PStatCollector RenderState::_cache_update_pcollector("*:State Cache:Update");
48 PStatCollector RenderState::_garbage_collect_pcollector("*:State Cache:Garbage Collect");
49 PStatCollector RenderState::_state_compose_pcollector("*:State Cache:Compose State");
50 PStatCollector RenderState::_state_invert_pcollector("*:State Cache:Invert State");
51 PStatCollector RenderState::_node_counter("RenderStates:On nodes");
52 PStatCollector RenderState::_cache_counter("RenderStates:Cached");
53 PStatCollector RenderState::_state_break_cycles_pcollector("*:State Cache:Break Cycles");
54 PStatCollector RenderState::_state_validate_pcollector("*:State Cache:Validate");
55 
56 CacheStats RenderState::_cache_stats;
57 
58 TypeHandle RenderState::_type_handle;
59 
60 
61 /**
62  * Actually, this could be a private constructor, since no one inherits from
63  * RenderState, but gcc gives us a spurious warning if all constructors are
64  * private.
65  */
66 RenderState::
67 RenderState() :
68  _flags(0),
69  _lock("RenderState")
70 {
71  if (_states == nullptr) {
72  init_states();
73  }
74  _saved_entry = -1;
75  _last_mi = -1;
76  _cache_stats.add_num_states(1);
77  _read_overrides = nullptr;
78  _generated_shader = nullptr;
79 
80 #ifdef DO_MEMORY_USAGE
81  MemoryUsage::update_type(this, this);
82 #endif
83 }
84 
85 /**
86  * RenderStates are only meant to be copied internally.
87  */
88 RenderState::
89 RenderState(const RenderState &copy) :
90  _filled_slots(copy._filled_slots),
91  _flags(0),
92  _lock("RenderState")
93 {
94  // Copy over the attributes.
95  for (int i = 0; i < RenderAttribRegistry::_max_slots; ++i) {
96  _attributes[i] = copy._attributes[i];
97  }
98 
99  _saved_entry = -1;
100  _last_mi = -1;
101  _cache_stats.add_num_states(1);
102  _read_overrides = nullptr;
103  _generated_shader = nullptr;
104 
105 #ifdef DO_MEMORY_USAGE
106  MemoryUsage::update_type(this, this);
107 #endif
108 }
109 
110 /**
111  * The destructor is responsible for removing the RenderState from the global
112  * set if it is there.
113  */
115 ~RenderState() {
116  // We'd better not call the destructor twice on a particular object.
117  nassertv(!is_destructing());
118  set_destructing();
119 
120  LightReMutexHolder holder(*_states_lock);
121 
122  // unref() should have cleared these.
123  nassertv(_saved_entry == -1);
124  nassertv(_composition_cache.is_empty() && _invert_composition_cache.is_empty());
125 
126  // If this was true at the beginning of the destructor, but is no longer
127  // true now, probably we've been double-deleted.
128  nassertv(get_ref_count() == 0);
129  _cache_stats.add_num_states(-1);
130 }
131 
132 /**
133  * Provides an arbitrary ordering among all unique RenderStates, so we can
134  * store the essentially different ones in a big set and throw away the rest.
135  *
136  * This method is not needed outside of the RenderState class because all
137  * equivalent RenderState objects are guaranteed to share the same pointer;
138  * thus, a pointer comparison is always sufficient.
139  */
141 compare_to(const RenderState &other) const {
142  SlotMask mask = _filled_slots | other._filled_slots;
143  int slot = mask.get_lowest_on_bit();
144  while (slot >= 0) {
145  int result = _attributes[slot].compare_to(other._attributes[slot]);
146  if (result != 0) {
147  return result;
148  }
149  mask.clear_bit(slot);
150  slot = mask.get_lowest_on_bit();
151  }
152 
153  return 0;
154 }
155 
156 /**
157  * Returns -1, 0, or 1 according to the relative sorting of these two
158  * RenderStates, with regards to rendering performance, so that "heavier"
159  * RenderAttribs (as defined by RenderAttribRegistry::get_slot_sort()) are
160  * more likely to be grouped together. This is not related to the sorting
161  * order defined by compare_to.
162  */
164 compare_sort(const RenderState &other) const {
165  if (this == &other) {
166  // Trivial case.
167  return 0;
168  }
169 
171  int num_sorted_slots = reg->get_num_sorted_slots();
172  for (int n = 0; n < num_sorted_slots; ++n) {
173  int slot = reg->get_sorted_slot(n);
174  nassertr((_attributes[slot]._attrib != nullptr) == _filled_slots.get_bit(slot), 0);
175 
176  const RenderAttrib *a = _attributes[slot]._attrib;
177  const RenderAttrib *b = other._attributes[slot]._attrib;
178  if (a != b) {
179  return a < b ? -1 : 1;
180  }
181  }
182 
183  return 0;
184 }
185 
186 /**
187  * This version of compare_to takes a slot mask that indicates which
188  * attributes to include in the comparison. Unlike compare_to, this method
189  * compares the attributes by pointer.
190  */
192 compare_mask(const RenderState &other, SlotMask compare_mask) const {
193  SlotMask mask = (_filled_slots | other._filled_slots) & compare_mask;
194  int slot = mask.get_lowest_on_bit();
195  while (slot >= 0) {
196  const RenderAttrib *a = _attributes[slot]._attrib;
197  const RenderAttrib *b = other._attributes[slot]._attrib;
198  if (a != b) {
199  return a < b ? -1 : 1;
200  }
201  mask.clear_bit(slot);
202  slot = mask.get_lowest_on_bit();
203  }
204 
205  return 0;
206 }
207 
208 /**
209  * Calls cull_callback() on each attrib. If any attrib returns false,
210  * interrupts the list and returns false immediately; otherwise, completes the
211  * list and returns true.
212  */
214 cull_callback(CullTraverser *trav, const CullTraverserData &data) const {
215  SlotMask mask = _filled_slots;
216  int slot = mask.get_lowest_on_bit();
217  while (slot >= 0) {
218  const Attribute &attrib = _attributes[slot];
219  nassertr(attrib._attrib != nullptr, false);
220  if (!attrib._attrib->cull_callback(trav, data)) {
221  return false;
222  }
223 
224  mask.clear_bit(slot);
225  slot = mask.get_lowest_on_bit();
226  }
227 
228  return true;
229 }
230 
231 /**
232  * Returns a RenderState with one attribute set.
233  */
234 CPT(RenderState) RenderState::
235 make(const RenderAttrib *attrib, int override) {
236  RenderState *state = new RenderState;
237  int slot = attrib->get_slot();
238  state->_attributes[slot].set(attrib, override);
239  state->_filled_slots.set_bit(slot);
240  return return_new(state);
241 }
242 
243 /**
244  * Returns a RenderState with two attributes set.
245  */
246 CPT(RenderState) RenderState::
247 make(const RenderAttrib *attrib1,
248  const RenderAttrib *attrib2, int override) {
249  RenderState *state = new RenderState;
250  state->_attributes[attrib1->get_slot()].set(attrib1, override);
251  state->_attributes[attrib2->get_slot()].set(attrib2, override);
252  state->_filled_slots.set_bit(attrib1->get_slot());
253  state->_filled_slots.set_bit(attrib2->get_slot());
254  return return_new(state);
255 }
256 
257 /**
258  * Returns a RenderState with three attributes set.
259  */
260 CPT(RenderState) RenderState::
261 make(const RenderAttrib *attrib1,
262  const RenderAttrib *attrib2,
263  const RenderAttrib *attrib3, int override) {
264  RenderState *state = new RenderState;
265  state->_attributes[attrib1->get_slot()].set(attrib1, override);
266  state->_attributes[attrib2->get_slot()].set(attrib2, override);
267  state->_attributes[attrib3->get_slot()].set(attrib3, override);
268  state->_filled_slots.set_bit(attrib1->get_slot());
269  state->_filled_slots.set_bit(attrib2->get_slot());
270  state->_filled_slots.set_bit(attrib3->get_slot());
271  return return_new(state);
272 }
273 
274 /**
275  * Returns a RenderState with four attributes set.
276  */
277 CPT(RenderState) RenderState::
278 make(const RenderAttrib *attrib1,
279  const RenderAttrib *attrib2,
280  const RenderAttrib *attrib3,
281  const RenderAttrib *attrib4, int override) {
282  RenderState *state = new RenderState;
283  state->_attributes[attrib1->get_slot()].set(attrib1, override);
284  state->_attributes[attrib2->get_slot()].set(attrib2, override);
285  state->_attributes[attrib3->get_slot()].set(attrib3, override);
286  state->_attributes[attrib4->get_slot()].set(attrib4, override);
287  state->_filled_slots.set_bit(attrib1->get_slot());
288  state->_filled_slots.set_bit(attrib2->get_slot());
289  state->_filled_slots.set_bit(attrib3->get_slot());
290  state->_filled_slots.set_bit(attrib4->get_slot());
291  return return_new(state);
292 }
293 
294 /**
295  * Returns a RenderState with five attributes set.
296  */
297 CPT(RenderState) RenderState::
298 make(const RenderAttrib *attrib1,
299  const RenderAttrib *attrib2,
300  const RenderAttrib *attrib3,
301  const RenderAttrib *attrib4,
302  const RenderAttrib *attrib5, int override) {
303  RenderState *state = new RenderState;
304  state->_attributes[attrib1->get_slot()].set(attrib1, override);
305  state->_attributes[attrib2->get_slot()].set(attrib2, override);
306  state->_attributes[attrib3->get_slot()].set(attrib3, override);
307  state->_attributes[attrib4->get_slot()].set(attrib4, override);
308  state->_attributes[attrib5->get_slot()].set(attrib5, override);
309  state->_filled_slots.set_bit(attrib1->get_slot());
310  state->_filled_slots.set_bit(attrib2->get_slot());
311  state->_filled_slots.set_bit(attrib3->get_slot());
312  state->_filled_slots.set_bit(attrib4->get_slot());
313  state->_filled_slots.set_bit(attrib5->get_slot());
314  return return_new(state);
315 }
316 
317 /**
318  * Returns a RenderState with n attributes set.
319  */
320 CPT(RenderState) RenderState::
321 make(const RenderAttrib * const *attrib, int num_attribs, int override) {
322  if (num_attribs == 0) {
323  return _empty_state;
324  }
325  RenderState *state = new RenderState;
326  for (int i = 0; i < num_attribs; i++) {
327  int slot = attrib[i]->get_slot();
328  state->_attributes[slot].set(attrib[i], override);
329  state->_filled_slots.set_bit(slot);
330  }
331  return return_new(state);
332 }
333 
334 /**
335  * Returns a new RenderState object that represents the composition of this
336  * state with the other state.
337  *
338  * The result of this operation is cached, and will be retained as long as
339  * both this RenderState object and the other RenderState object continue to
340  * exist. Should one of them destruct, the cached entry will be removed, and
341  * its pointer will be allowed to destruct as well.
342  */
343 CPT(RenderState) RenderState::
344 compose(const RenderState *other) const {
345  // This method isn't strictly const, because it updates the cache, but we
346  // pretend that it is because it's only a cache which is transparent to the
347  // rest of the interface.
348 
349  // We handle empty state (identity) as a trivial special case.
350  if (is_empty()) {
351  return other;
352  }
353  if (other->is_empty()) {
354  return this;
355  }
356 
357  if (!state_cache) {
358  return do_compose(other);
359  }
360 
361  LightReMutexHolder holder(*_states_lock);
362 
363  // Is this composition already cached?
364  int index = _composition_cache.find(other);
365  if (index != -1) {
366  Composition &comp = ((RenderState *)this)->_composition_cache.modify_data(index);
367  if (comp._result == nullptr) {
368  // Well, it wasn't cached already, but we already had an entry (probably
369  // created for the reverse direction), so use the same entry to store
370  // the new result.
371  CPT(RenderState) result = do_compose(other);
372  comp._result = result;
373 
374  if (result != (const RenderState *)this) {
375  // See the comments below about the need to up the reference count
376  // only when the result is not the same as this.
377  result->cache_ref();
378  }
379  }
380  // Here's the cache!
381  _cache_stats.inc_hits();
382  return comp._result;
383  }
384  _cache_stats.inc_misses();
385 
386  // We need to make a new cache entry, both in this object and in the other
387  // object. We make both records so the other RenderState object will know
388  // to delete the entry from this object when it destructs, and vice-versa.
389 
390  // The cache entry in this object is the only one that indicates the result;
391  // the other will be NULL for now.
392  CPT(RenderState) result = do_compose(other);
393 
394  _cache_stats.add_total_size(1);
395  _cache_stats.inc_adds(_composition_cache.is_empty());
396 
397  ((RenderState *)this)->_composition_cache[other]._result = result;
398 
399  if (other != this) {
400  _cache_stats.add_total_size(1);
401  _cache_stats.inc_adds(other->_composition_cache.is_empty());
402  ((RenderState *)other)->_composition_cache[this]._result = nullptr;
403  }
404 
405  if (result != (const RenderState *)this) {
406  // If the result of compose() is something other than this, explicitly
407  // increment the reference count. We have to be sure to decrement it
408  // again later, when the composition entry is removed from the cache.
409  result->cache_ref();
410 
411  // (If the result was just this again, we still store the result, but we
412  // don't increment the reference count, since that would be a self-
413  // referential leak.)
414  }
415 
416  _cache_stats.maybe_report("RenderState");
417 
418  return result;
419 }
420 
421 /**
422  * Returns a new RenderState object that represents the composition of this
423  * state's inverse with the other state.
424  *
425  * This is similar to compose(), but is particularly useful for computing the
426  * relative state of a node as viewed from some other node.
427  */
428 CPT(RenderState) RenderState::
429 invert_compose(const RenderState *other) const {
430  // This method isn't strictly const, because it updates the cache, but we
431  // pretend that it is because it's only a cache which is transparent to the
432  // rest of the interface.
433 
434  // We handle empty state (identity) as a trivial special case.
435  if (is_empty()) {
436  return other;
437  }
438  // Unlike compose(), the case of other->is_empty() is not quite as trivial
439  // for invert_compose().
440 
441  if (other == this) {
442  // a->invert_compose(a) always produces identity.
443  return _empty_state;
444  }
445 
446  if (!state_cache) {
447  return do_invert_compose(other);
448  }
449 
450  LightReMutexHolder holder(*_states_lock);
451 
452  // Is this composition already cached?
453  int index = _invert_composition_cache.find(other);
454  if (index != -1) {
455  Composition &comp = ((RenderState *)this)->_invert_composition_cache.modify_data(index);
456  if (comp._result == nullptr) {
457  // Well, it wasn't cached already, but we already had an entry (probably
458  // created for the reverse direction), so use the same entry to store
459  // the new result.
460  CPT(RenderState) result = do_invert_compose(other);
461  comp._result = result;
462 
463  if (result != (const RenderState *)this) {
464  // See the comments below about the need to up the reference count
465  // only when the result is not the same as this.
466  result->cache_ref();
467  }
468  }
469  // Here's the cache!
470  _cache_stats.inc_hits();
471  return comp._result;
472  }
473  _cache_stats.inc_misses();
474 
475  // We need to make a new cache entry, both in this object and in the other
476  // object. We make both records so the other RenderState object will know
477  // to delete the entry from this object when it destructs, and vice-versa.
478 
479  // The cache entry in this object is the only one that indicates the result;
480  // the other will be NULL for now.
481  CPT(RenderState) result = do_invert_compose(other);
482 
483  _cache_stats.add_total_size(1);
484  _cache_stats.inc_adds(_invert_composition_cache.is_empty());
485  ((RenderState *)this)->_invert_composition_cache[other]._result = result;
486 
487  if (other != this) {
488  _cache_stats.add_total_size(1);
489  _cache_stats.inc_adds(other->_invert_composition_cache.is_empty());
490  ((RenderState *)other)->_invert_composition_cache[this]._result = nullptr;
491  }
492 
493  if (result != (const RenderState *)this) {
494  // If the result of compose() is something other than this, explicitly
495  // increment the reference count. We have to be sure to decrement it
496  // again later, when the composition entry is removed from the cache.
497  result->cache_ref();
498 
499  // (If the result was just this again, we still store the result, but we
500  // don't increment the reference count, since that would be a self-
501  // referential leak.)
502  }
503 
504  return result;
505 }
506 
507 /**
508  * Returns a new RenderState object that represents the same as the source
509  * state, with the new RenderAttrib added. If there is already a RenderAttrib
510  * with the same type, it is replaced (unless the override is lower).
511  */
512 CPT(RenderState) RenderState::
513 add_attrib(const RenderAttrib *attrib, int override) const {
514  int slot = attrib->get_slot();
515  if (_filled_slots.get_bit(slot) &&
516  _attributes[slot]._override > override) {
517  // The existing attribute overrides.
518  return this;
519  }
520 
521  // The new attribute replaces.
522  RenderState *new_state = new RenderState(*this);
523  new_state->_attributes[slot].set(attrib, override);
524  new_state->_filled_slots.set_bit(slot);
525  return return_new(new_state);
526 }
527 
528 /**
529  * Returns a new RenderState object that represents the same as the source
530  * state, with the new RenderAttrib added. If there is already a RenderAttrib
531  * with the same type, it is replaced unconditionally. The override is not
532  * changed.
533  */
534 CPT(RenderState) RenderState::
535 set_attrib(const RenderAttrib *attrib) const {
536  RenderState *new_state = new RenderState(*this);
537  int slot = attrib->get_slot();
538  new_state->_attributes[slot]._attrib = attrib;
539  new_state->_filled_slots.set_bit(slot);
540  return return_new(new_state);
541 }
542 
543 /**
544  * Returns a new RenderState object that represents the same as the source
545  * state, with the new RenderAttrib added. If there is already a RenderAttrib
546  * with the same type, it is replaced unconditionally. The override is also
547  * replaced unconditionally.
548  */
549 CPT(RenderState) RenderState::
550 set_attrib(const RenderAttrib *attrib, int override) const {
551  RenderState *new_state = new RenderState(*this);
552  int slot = attrib->get_slot();
553  new_state->_attributes[slot].set(attrib, override);
554  new_state->_filled_slots.set_bit(slot);
555  return return_new(new_state);
556 }
557 
558 /**
559  * Returns a new RenderState object that represents the same as the source
560  * state, with the indicated RenderAttrib removed.
561  */
562 CPT(RenderState) RenderState::
563 remove_attrib(int slot) const {
564  if (_attributes[slot]._attrib == nullptr) {
565  // Already removed.
566  return this;
567  }
568 
569  // Will this bring us down to the empty state?
570  if (_filled_slots.get_num_on_bits() == 1) {
571  return _empty_state;
572  }
573 
574  RenderState *new_state = new RenderState(*this);
575  new_state->_attributes[slot].set(nullptr, 0);
576  new_state->_filled_slots.clear_bit(slot);
577  return return_new(new_state);
578 }
579 
580 /**
581  * Returns a new RenderState object that represents the same as the source
582  * state, with all attributes' override values incremented (or decremented, if
583  * negative) by the indicated amount. If the override would drop below zero,
584  * it is set to zero.
585  */
586 CPT(RenderState) RenderState::
587 adjust_all_priorities(int adjustment) const {
588  RenderState *new_state = new RenderState(*this);
589 
590  SlotMask mask = _filled_slots;
591  int slot = mask.get_lowest_on_bit();
592  while (slot >= 0) {
593  Attribute &attrib = new_state->_attributes[slot];
594  nassertr(attrib._attrib != nullptr, this);
595  attrib._override = std::max(attrib._override + adjustment, 0);
596 
597  mask.clear_bit(slot);
598  slot = mask.get_lowest_on_bit();
599  }
600 
601  return return_new(new_state);
602 }
603 
604 /**
605  * This method overrides ReferenceCount::unref() to check whether the
606  * remaining reference count is entirely in the cache, and if so, it checks
607  * for and breaks a cycle in the cache involving this object. This is
608  * designed to prevent leaks from cyclical references within the cache.
609  */
610 bool RenderState::
611 unref() const {
612  if (garbage_collect_states || !state_cache) {
613  // If we're not using the cache at all, or if we're relying on garbage
614  // collection, just allow the pointer to unref normally.
615  return ReferenceCount::unref();
616  }
617 
618  // Here is the normal refcounting case, with a normal cache, and without
619  // garbage collection in effect. In this case we will pull the object out
620  // of the cache when its reference count goes to 0.
621 
622  // We always have to grab the lock, since we will definitely need to be
623  // holding it if we happen to drop the reference count to 0. Having to grab
624  // the lock at every call to unref() is a big limiting factor on
625  // parallelization.
626  LightReMutexHolder holder(*_states_lock);
627 
628  if (auto_break_cycles && uniquify_states) {
629  if (get_cache_ref_count() > 0 &&
630  get_ref_count() == get_cache_ref_count() + 1) {
631  // If we are about to remove the one reference that is not in the cache,
632  // leaving only references in the cache, then we need to check for a
633  // cycle involving this RenderState and break it if it exists.
634  ((RenderState *)this)->detect_and_break_cycles();
635  }
636  }
637 
638  if (ReferenceCount::unref()) {
639  // The reference count is still nonzero.
640  return true;
641  }
642 
643  // The reference count has just reached zero. Make sure the object is
644  // removed from the global object pool, before anyone else finds it and
645  // tries to ref it.
646  ((RenderState *)this)->release_new();
647  ((RenderState *)this)->remove_cache_pointers();
648 
649  return false;
650 }
651 
652 /**
653  *
654  */
655 void RenderState::
656 output(ostream &out) const {
657  out << "S:";
658  if (is_empty()) {
659  out << "(empty)";
660 
661  } else {
662  out << "(";
663  const char *sep = "";
664 
665  SlotMask mask = _filled_slots;
666  int slot = mask.get_lowest_on_bit();
667  while (slot >= 0) {
668  const Attribute &attrib = _attributes[slot];
669  nassertv(attrib._attrib != nullptr);
670  out << sep << attrib._attrib->get_type();
671  sep = " ";
672 
673  mask.clear_bit(slot);
674  slot = mask.get_lowest_on_bit();
675  }
676  out << ")";
677  }
678 }
679 
680 /**
681  *
682  */
683 void RenderState::
684 write(ostream &out, int indent_level) const {
685  if (is_empty()) {
686  indent(out, indent_level)
687  << "(empty)\n";
688  }
689 
690  SlotMask mask = _filled_slots;
691  int slot = mask.get_lowest_on_bit();
692  while (slot >= 0) {
693  const Attribute &attrib = _attributes[slot];
694  nassertv(attrib._attrib != nullptr);
695  attrib._attrib->write(out, indent_level);
696 
697  mask.clear_bit(slot);
698  slot = mask.get_lowest_on_bit();
699  }
700 }
701 
702 /**
703  * Returns the maximum priority number (sometimes called override) that may be
704  * set on any node. This may or may not be enforced, but the scene graph code
705  * assumes that no priority numbers will be larger than this, and some effects
706  * may not work properly if you use a larger number.
707  */
708 int RenderState::
709 get_max_priority() {
710  return 1000000000;
711 }
712 
713 /**
714  * Returns the total number of unique RenderState objects allocated in the
715  * world. This will go up and down during normal operations.
716  */
718 get_num_states() {
719  if (_states == nullptr) {
720  return 0;
721  }
722  LightReMutexHolder holder(*_states_lock);
723  return _states->get_num_entries();
724 }
725 
726 /**
727  * Returns the total number of RenderState objects that have been allocated
728  * but have no references outside of the internal RenderState cache.
729  *
730  * A nonzero return value is not necessarily indicative of leaked references;
731  * it is normal for two RenderState objects, both of which have references
732  * held outside the cache, to have to result of their composition stored
733  * within the cache. This result will be retained within the cache until one
734  * of the base RenderStates is released.
735  *
736  * Use list_cycles() to get an idea of the number of actual "leaked"
737  * RenderState objects.
738  */
741  if (_states == nullptr) {
742  return 0;
743  }
744  LightReMutexHolder holder(*_states_lock);
745 
746  // First, we need to count the number of times each RenderState object is
747  // recorded in the cache.
748  typedef pmap<const RenderState *, int> StateCount;
749  StateCount state_count;
750 
751  size_t size = _states->get_num_entries();
752  for (size_t si = 0; si < size; ++si) {
753  const RenderState *state = _states->get_key(si);
754 
755  size_t i;
756  size_t cache_size = state->_composition_cache.get_num_entries();
757  for (i = 0; i < cache_size; ++i) {
758  const RenderState *result = state->_composition_cache.get_data(i)._result;
759  if (result != nullptr && result != state) {
760  // Here's a RenderState that's recorded in the cache. Count it.
761  std::pair<StateCount::iterator, bool> ir =
762  state_count.insert(StateCount::value_type(result, 1));
763  if (!ir.second) {
764  // If the above insert operation fails, then it's already in the
765  // cache; increment its value.
766  (*(ir.first)).second++;
767  }
768  }
769  }
770  cache_size = state->_invert_composition_cache.get_num_entries();
771  for (i = 0; i < cache_size; ++i) {
772  const RenderState *result = state->_invert_composition_cache.get_data(i)._result;
773  if (result != nullptr && result != state) {
774  std::pair<StateCount::iterator, bool> ir =
775  state_count.insert(StateCount::value_type(result, 1));
776  if (!ir.second) {
777  (*(ir.first)).second++;
778  }
779  }
780  }
781  }
782 
783  // Now that we have the appearance count of each RenderState object, we can
784  // tell which ones are unreferenced outside of the RenderState cache, by
785  // comparing these to the reference counts.
786  int num_unused = 0;
787 
788  StateCount::iterator sci;
789  for (sci = state_count.begin(); sci != state_count.end(); ++sci) {
790  const RenderState *state = (*sci).first;
791  int count = (*sci).second;
792  nassertr(count == state->get_cache_ref_count(), num_unused);
793  nassertr(count <= state->get_ref_count(), num_unused);
794  if (count == state->get_ref_count()) {
795  num_unused++;
796 
797  if (pgraph_cat.is_debug()) {
798  pgraph_cat.debug()
799  << "Unused state: " << (void *)state << ":"
800  << state->get_ref_count() << " =\n";
801  state->write(pgraph_cat.debug(false), 2);
802  }
803  }
804  }
805 
806  return num_unused;
807 }
808 
809 /**
810  * Empties the cache of composed RenderStates. This makes every RenderState
811  * forget what results when it is composed with other RenderStates.
812  *
813  * This will eliminate any RenderState objects that have been allocated but
814  * have no references outside of the internal RenderState map. It will not
815  * eliminate RenderState objects that are still in use.
816  *
817  * Nowadays, this method should not be necessary, as reference-count cycles in
818  * the composition cache should be automatically detected and broken.
819  *
820  * The return value is the number of RenderStates freed by this operation.
821  */
823 clear_cache() {
824  if (_states == nullptr) {
825  return 0;
826  }
827  LightReMutexHolder holder(*_states_lock);
828 
829  PStatTimer timer(_cache_update_pcollector);
830  int orig_size = _states->get_num_entries();
831 
832  // First, we need to copy the entire set of states to a temporary vector,
833  // reference-counting each object. That way we can walk through the copy,
834  // without fear of dereferencing (and deleting) the objects in the map as we
835  // go.
836  {
837  typedef pvector< CPT(RenderState) > TempStates;
838  TempStates temp_states;
839  temp_states.reserve(orig_size);
840 
841  size_t size = _states->get_num_entries();
842  for (size_t si = 0; si < size; ++si) {
843  const RenderState *state = _states->get_key(si);
844  temp_states.push_back(state);
845  }
846 
847  // Now it's safe to walk through the list, destroying the cache within
848  // each object as we go. Nothing will be destructed till we're done.
849  TempStates::iterator ti;
850  for (ti = temp_states.begin(); ti != temp_states.end(); ++ti) {
851  RenderState *state = (RenderState *)(*ti).p();
852 
853  size_t i;
854  size_t cache_size = (int)state->_composition_cache.get_num_entries();
855  for (i = 0; i < cache_size; ++i) {
856  const RenderState *result = state->_composition_cache.get_data(i)._result;
857  if (result != nullptr && result != state) {
858  result->cache_unref();
859  nassertr(result->get_ref_count() > 0, 0);
860  }
861  }
862  _cache_stats.add_total_size(-(int)state->_composition_cache.get_num_entries());
863  state->_composition_cache.clear();
864 
865  cache_size = (int)state->_invert_composition_cache.get_num_entries();
866  for (i = 0; i < cache_size; ++i) {
867  const RenderState *result = state->_invert_composition_cache.get_data(i)._result;
868  if (result != nullptr && result != state) {
869  result->cache_unref();
870  nassertr(result->get_ref_count() > 0, 0);
871  }
872  }
873  _cache_stats.add_total_size(-(int)state->_invert_composition_cache.get_num_entries());
874  state->_invert_composition_cache.clear();
875  }
876 
877  // Once this block closes and the temp_states object goes away, all the
878  // destruction will begin. Anything whose reference was held only within
879  // the various objects' caches will go away.
880  }
881 
882  int new_size = _states->get_num_entries();
883  return orig_size - new_size;
884 }
885 
886 /**
887  * Performs a garbage-collection cycle. This must be called periodically if
888  * garbage-collect-states is true to ensure that RenderStates get cleaned up
889  * appropriately. It does no harm to call it even if this variable is not
890  * true, but there is probably no advantage in that case.
891  *
892  * This automatically calls RenderAttrib::garbage_collect() as well.
893  */
895 garbage_collect() {
896  int num_attribs = RenderAttrib::garbage_collect();
897 
898  if (_states == nullptr || !garbage_collect_states) {
899  return num_attribs;
900  }
901 
902  LightReMutexHolder holder(*_states_lock);
903 
904  PStatTimer timer(_garbage_collect_pcollector);
905  size_t orig_size = _states->get_num_entries();
906 
907  // How many elements to process this pass?
908  size_t size = orig_size;
909  size_t num_this_pass = std::max(0, int(size * garbage_collect_states_rate));
910  if (num_this_pass <= 0) {
911  return num_attribs;
912  }
913 
914  bool break_and_uniquify = (auto_break_cycles && uniquify_transforms);
915 
916  size_t si = _garbage_index;
917  if (si >= size) {
918  si = 0;
919  }
920 
921  num_this_pass = std::min(num_this_pass, size);
922  size_t stop_at_element = (si + num_this_pass) % size;
923 
924  do {
925  RenderState *state = (RenderState *)_states->get_key(si);
926  if (break_and_uniquify) {
927  if (state->get_cache_ref_count() > 0 &&
928  state->get_ref_count() == state->get_cache_ref_count()) {
929  // If we have removed all the references to this state not in the
930  // cache, leaving only references in the cache, then we need to
931  // check for a cycle involving this RenderState and break it if it
932  // exists.
933  state->detect_and_break_cycles();
934  }
935  }
936 
937  if (!state->unref_if_one()) {
938  // This state has recently been unreffed to 1 (the one we added when
939  // we stored it in the cache). Now it's time to delete it. This is
940  // safe, because we're holding the _states_lock, so it's not possible
941  // for some other thread to find the state in the cache and ref it
942  // while we're doing this. Also, we've just made sure to unref it to 0,
943  // to ensure that another thread can't get it via a weak pointer.
944 
945  state->release_new();
946  state->remove_cache_pointers();
947  state->cache_unref_only();
948  delete state;
949 
950  // When we removed it from the hash map, it swapped the last element
951  // with the one we just removed. So the current index contains one we
952  // still need to visit.
953  --size;
954  --si;
955  if (stop_at_element > 0) {
956  --stop_at_element;
957  }
958  }
959 
960  si = (si + 1) % size;
961  } while (si != stop_at_element);
962  _garbage_index = si;
963 
964  nassertr(_states->get_num_entries() == size, 0);
965 
966 #ifdef _DEBUG
967  nassertr(_states->validate(), 0);
968 #endif
969 
970  // If we just cleaned up a lot of states, see if we can reduce the table in
971  // size. This will help reduce iteration overhead in the future.
972  _states->consider_shrink_table();
973 
974  return (int)orig_size - (int)size + num_attribs;
975 }
976 
977 /**
978  * Completely empties the cache of state + gsg -> munger, for all states and
979  * all gsg's. Normally there is no need to empty this cache.
980  */
983  LightReMutexHolder holder(*_states_lock);
984 
985  size_t size = _states->get_num_entries();
986  for (size_t si = 0; si < size; ++si) {
987  RenderState *state = (RenderState *)(_states->get_key(si));
988  state->_mungers.clear();
989  state->_munged_states.clear();
990  state->_last_mi = -1;
991  }
992 }
993 
994 /**
995  * Detects all of the reference-count cycles in the cache and reports them to
996  * standard output.
997  *
998  * These cycles may be inadvertently created when state compositions cycle
999  * back to a starting point. Nowadays, these cycles should be automatically
1000  * detected and broken, so this method should never list any cycles unless
1001  * there is a bug in that detection logic.
1002  *
1003  * The cycles listed here are not leaks in the strictest sense of the word,
1004  * since they can be reclaimed by a call to clear_cache(); but they will not
1005  * be reclaimed automatically.
1006  */
1008 list_cycles(ostream &out) {
1009  if (_states == nullptr) {
1010  return;
1011  }
1012  LightReMutexHolder holder(*_states_lock);
1013 
1014  typedef pset<const RenderState *> VisitedStates;
1015  VisitedStates visited;
1016  CompositionCycleDesc cycle_desc;
1017 
1018  size_t size = _states->get_num_entries();
1019  for (size_t si = 0; si < size; ++si) {
1020  const RenderState *state = _states->get_key(si);
1021 
1022  bool inserted = visited.insert(state).second;
1023  if (inserted) {
1024  ++_last_cycle_detect;
1025  if (r_detect_cycles(state, state, 1, _last_cycle_detect, &cycle_desc)) {
1026  // This state begins a cycle.
1027  CompositionCycleDesc::reverse_iterator csi;
1028 
1029  out << "\nCycle detected of length " << cycle_desc.size() + 1 << ":\n"
1030  << "state " << (void *)state << ":" << state->get_ref_count()
1031  << " =\n";
1032  state->write(out, 2);
1033  for (csi = cycle_desc.rbegin(); csi != cycle_desc.rend(); ++csi) {
1034  const CompositionCycleDescEntry &entry = (*csi);
1035  if (entry._inverted) {
1036  out << "invert composed with ";
1037  } else {
1038  out << "composed with ";
1039  }
1040  out << (const void *)entry._obj << ":" << entry._obj->get_ref_count()
1041  << " " << *entry._obj << "\n"
1042  << "produces " << (const void *)entry._result << ":"
1043  << entry._result->get_ref_count() << " =\n";
1044  entry._result->write(out, 2);
1045  visited.insert(entry._result);
1046  }
1047 
1048  cycle_desc.clear();
1049  } else {
1050  ++_last_cycle_detect;
1051  if (r_detect_reverse_cycles(state, state, 1, _last_cycle_detect, &cycle_desc)) {
1052  // This state begins a cycle.
1053  CompositionCycleDesc::iterator csi;
1054 
1055  out << "\nReverse cycle detected of length " << cycle_desc.size() + 1 << ":\n"
1056  << "state ";
1057  for (csi = cycle_desc.begin(); csi != cycle_desc.end(); ++csi) {
1058  const CompositionCycleDescEntry &entry = (*csi);
1059  out << (const void *)entry._result << ":"
1060  << entry._result->get_ref_count() << " =\n";
1061  entry._result->write(out, 2);
1062  out << (const void *)entry._obj << ":"
1063  << entry._obj->get_ref_count() << " =\n";
1064  entry._obj->write(out, 2);
1065  visited.insert(entry._result);
1066  }
1067  out << (void *)state << ":"
1068  << state->get_ref_count() << " =\n";
1069  state->write(out, 2);
1070 
1071  cycle_desc.clear();
1072  }
1073  }
1074  }
1075  }
1076 }
1077 
1078 
1079 /**
1080  * Lists all of the RenderStates in the cache to the output stream, one per
1081  * line. This can be quite a lot of output if the cache is large, so be
1082  * prepared.
1083  */
1085 list_states(ostream &out) {
1086  if (_states == nullptr) {
1087  out << "0 states:\n";
1088  return;
1089  }
1090  LightReMutexHolder holder(*_states_lock);
1091 
1092  size_t size = _states->get_num_entries();
1093  out << size << " states:\n";
1094  for (size_t si = 0; si < size; ++si) {
1095  const RenderState *state = _states->get_key(si);
1096  state->write(out, 2);
1097  }
1098 }
1099 
1100 /**
1101  * Ensures that the cache is still stored in sorted order, and that none of
1102  * the cache elements have been inadvertently deleted. Returns true if so,
1103  * false if there is a problem (which implies someone has modified one of the
1104  * supposedly-const RenderState objects).
1105  */
1107 validate_states() {
1108  if (_states == nullptr) {
1109  return true;
1110  }
1111 
1112  PStatTimer timer(_state_validate_pcollector);
1113 
1114  LightReMutexHolder holder(*_states_lock);
1115  if (_states->is_empty()) {
1116  return true;
1117  }
1118 
1119  if (!_states->validate()) {
1120  pgraph_cat.error()
1121  << "RenderState::_states cache is invalid!\n";
1122  return false;
1123  }
1124 
1125  size_t size = _states->get_num_entries();
1126  size_t si = 0;
1127  nassertr(si < size, false);
1128  nassertr(_states->get_key(si)->get_ref_count() >= 0, false);
1129  size_t snext = si;
1130  ++snext;
1131  while (snext < size) {
1132  nassertr(_states->get_key(snext)->get_ref_count() >= 0, false);
1133  const RenderState *ssi = _states->get_key(si);
1134  const RenderState *ssnext = _states->get_key(snext);
1135  int c = ssi->compare_to(*ssnext);
1136  int ci = ssnext->compare_to(*ssi);
1137  if ((ci < 0) != (c > 0) ||
1138  (ci > 0) != (c < 0) ||
1139  (ci == 0) != (c == 0)) {
1140  pgraph_cat.error()
1141  << "RenderState::compare_to() not defined properly!\n";
1142  pgraph_cat.error(false)
1143  << "(a, b): " << c << "\n";
1144  pgraph_cat.error(false)
1145  << "(b, a): " << ci << "\n";
1146  ssi->write(pgraph_cat.error(false), 2);
1147  ssnext->write(pgraph_cat.error(false), 2);
1148  return false;
1149  }
1150  si = snext;
1151  ++snext;
1152  }
1153 
1154  return true;
1155 }
1156 
1157 /**
1158  * Returns the union of the Geom::GeomRendering bits that will be required
1159  * once this RenderState is applied to a geom which includes the indicated
1160  * geom_rendering bits.
1161  */
1163 get_geom_rendering(int geom_rendering) const {
1164  const RenderModeAttrib *render_mode;
1165  const TexGenAttrib *tex_gen;
1166  const TexMatrixAttrib *tex_matrix;
1167 
1168  if (get_attrib(render_mode)) {
1169  geom_rendering = render_mode->get_geom_rendering(geom_rendering);
1170  }
1171  if (get_attrib(tex_gen)) {
1172  geom_rendering = tex_gen->get_geom_rendering(geom_rendering);
1173  }
1174  if (get_attrib(tex_matrix)) {
1175  geom_rendering = tex_matrix->get_geom_rendering(geom_rendering);
1176  }
1177 
1178  return geom_rendering;
1179 }
1180 
1181 /**
1182  * Intended to be called by CullBinManager::remove_bin(), this informs all the
1183  * RenderStates in the world to remove the indicated bin_index from their
1184  * cache if it has been cached.
1185  */
1187 bin_removed(int bin_index) {
1188  // Do something here.
1189  nassertv(false);
1190 }
1191 
1192 /**
1193  * Returns true if the _filled_slots bitmask is consistent with the table of
1194  * RenderAttrib pointers, false otherwise.
1195  */
1196 bool RenderState::
1197 validate_filled_slots() const {
1198  SlotMask mask;
1199 
1201  int max_slots = reg->get_max_slots();
1202  for (int slot = 1; slot < max_slots; ++slot) {
1203  const Attribute &attribute = _attributes[slot];
1204  if (attribute._attrib != nullptr) {
1205  mask.set_bit(slot);
1206  }
1207  }
1208 
1209  return (mask == _filled_slots);
1210 }
1211 
1212 /**
1213  * Computes a suitable hash value for phash_map.
1214  */
1215 void RenderState::
1216 do_calc_hash() {
1217  _hash = 0;
1218 
1219  SlotMask mask = _filled_slots;
1220  int slot = mask.get_lowest_on_bit();
1221  while (slot >= 0) {
1222  const Attribute &attrib = _attributes[slot];
1223  nassertv(attrib._attrib != nullptr);
1224  _hash = pointer_hash::add_hash(_hash, attrib._attrib);
1225  _hash = int_hash::add_hash(_hash, attrib._override);
1226 
1227  mask.clear_bit(slot);
1228  slot = mask.get_lowest_on_bit();
1229  }
1230 
1231  _flags |= F_hash_known;
1232 }
1233 
1234 /**
1235  * This function is used to share a common RenderState pointer for all
1236  * equivalent RenderState objects.
1237  *
1238  * This is different from return_unique() in that it does not actually
1239  * guarantee a unique pointer, unless uniquify-states is set.
1240  */
1241 CPT(RenderState) RenderState::
1242 return_new(RenderState *state) {
1243  nassertr(state != nullptr, state);
1244 
1245  // Make sure we don't have anything in the 0 slot. If we did, that would
1246  // indicate an uninitialized slot number.
1247 #ifndef NDEBUG
1248  if (state->_attributes[0]._attrib != nullptr) {
1249  const RenderAttrib *attrib = state->_attributes[0]._attrib;
1250  if (attrib->get_type() == TypeHandle::none()) {
1251  ((RenderAttrib *)attrib)->force_init_type();
1252  pgraph_cat->error()
1253  << "Uninitialized RenderAttrib type: " << attrib->get_type()
1254  << "\n";
1255 
1256  } else {
1257  static pset<TypeHandle> already_reported;
1258  if (already_reported.insert(attrib->get_type()).second) {
1259  pgraph_cat->error()
1260  << attrib->get_type() << " did not initialize its slot number.\n";
1261  }
1262  }
1263  }
1264 #endif
1265  state->_attributes[0]._attrib = nullptr;
1266  state->_filled_slots.clear_bit(0);
1267 
1268 #ifndef NDEBUG
1269  nassertr(state->validate_filled_slots(), state);
1270 #endif
1271 
1272  if (!uniquify_states && !state->is_empty()) {
1273  return state;
1274  }
1275 
1276  return return_unique(state);
1277 }
1278 
1279 /**
1280  * This function is used to share a common RenderState pointer for all
1281  * equivalent RenderState objects.
1282  *
1283  * See the similar logic in RenderAttrib. The idea is to create a new
1284  * RenderState object and pass it through this function, which will share the
1285  * pointer with a previously-created RenderState object if it is equivalent.
1286  */
1287 CPT(RenderState) RenderState::
1288 return_unique(RenderState *state) {
1289  nassertr(state != nullptr, nullptr);
1290 
1291  if (!state_cache) {
1292  return state;
1293  }
1294 
1295 #ifndef NDEBUG
1296  if (paranoid_const) {
1297  nassertr(validate_states(), state);
1298  }
1299 #endif
1300 
1301  LightReMutexHolder holder(*_states_lock);
1302 
1303  if (state->_saved_entry != -1) {
1304  // This state is already in the cache. nassertr(_states->find(state) ==
1305  // state->_saved_entry, pt_state);
1306  return state;
1307  }
1308 
1309  // Ensure each of the individual attrib pointers has been uniquified before
1310  // we add the state to the cache.
1311  if (!uniquify_attribs && !state->is_empty()) {
1312  SlotMask mask = state->_filled_slots;
1313  int slot = mask.get_lowest_on_bit();
1314  while (slot >= 0) {
1315  Attribute &attrib = state->_attributes[slot];
1316  nassertd(attrib._attrib != nullptr) continue;
1317  attrib._attrib = attrib._attrib->get_unique();
1318  mask.clear_bit(slot);
1319  slot = mask.get_lowest_on_bit();
1320  }
1321  }
1322 
1323  int si = _states->find(state);
1324  if (si != -1) {
1325  // There's an equivalent state already in the set. Return it. The state
1326  // that was passed may be newly created and therefore may not be
1327  // automatically deleted. Do that if necessary.
1328  if (state->get_ref_count() == 0) {
1329  delete state;
1330  }
1331  return _states->get_key(si);
1332  }
1333 
1334  // Not already in the set; add it.
1335  if (garbage_collect_states) {
1336  // If we'll be garbage collecting states explicitly, we'll increment the
1337  // reference count when we store it in the cache, so that it won't be
1338  // deleted while it's in it.
1339  state->cache_ref();
1340  }
1341  si = _states->store(state, nullptr);
1342 
1343  // Save the index and return the input state.
1344  state->_saved_entry = si;
1345  return state;
1346 }
1347 
1348 /**
1349  * The private implemention of compose(); this actually composes two
1350  * RenderStates, without bothering with the cache.
1351  */
1352 CPT(RenderState) RenderState::
1353 do_compose(const RenderState *other) const {
1354  PStatTimer timer(_state_compose_pcollector);
1355 
1356  RenderState *new_state = new RenderState;
1357 
1358  SlotMask mask = _filled_slots | other->_filled_slots;
1359  new_state->_filled_slots = mask;
1360 
1361  int slot = mask.get_lowest_on_bit();
1362  while (slot >= 0) {
1363  const Attribute &a = _attributes[slot];
1364  const Attribute &b = other->_attributes[slot];
1365  Attribute &result = new_state->_attributes[slot];
1366 
1367  if (a._attrib == nullptr) {
1368  nassertr(b._attrib != nullptr, this);
1369  // B wins.
1370  result = b;
1371 
1372  } else if (b._attrib == nullptr) {
1373  // A wins.
1374  result = a;
1375 
1376  } else if (b._override < a._override) {
1377  // A, the higher RenderAttrib, overrides.
1378  result = a;
1379 
1380  } else if (a._override < b._override &&
1381  a._attrib->lower_attrib_can_override()) {
1382  // B, the higher RenderAttrib, overrides. This is a special case;
1383  // normally, a lower RenderAttrib does not override a higher one, even
1384  // if it has a higher override value. But certain kinds of
1385  // RenderAttribs redefine lower_attrib_can_override() to return true,
1386  // allowing this override.
1387  result = b;
1388 
1389  } else {
1390  // Either they have the same override value, or B is higher. In either
1391  // case, the result is the composition of the two, with B's override
1392  // value.
1393  result.set(a._attrib->compose(b._attrib), b._override);
1394  }
1395 
1396  mask.clear_bit(slot);
1397  slot = mask.get_lowest_on_bit();
1398  }
1399 
1400  return return_new(new_state);
1401 }
1402 
1403 /**
1404  * The private implemention of invert_compose().
1405  */
1406 CPT(RenderState) RenderState::
1407 do_invert_compose(const RenderState *other) const {
1408  PStatTimer timer(_state_invert_pcollector);
1409 
1410  RenderState *new_state = new RenderState;
1411 
1412  SlotMask mask = _filled_slots | other->_filled_slots;
1413  new_state->_filled_slots = mask;
1414 
1415  int slot = mask.get_lowest_on_bit();
1416  while (slot >= 0) {
1417  const Attribute &a = _attributes[slot];
1418  const Attribute &b = other->_attributes[slot];
1419  Attribute &result = new_state->_attributes[slot];
1420 
1421  if (a._attrib == nullptr) {
1422  nassertr(b._attrib != nullptr, this);
1423  // B wins.
1424  result = b;
1425 
1426  } else if (b._attrib == nullptr) {
1427  // A wins. Invert it.
1429  result.set(a._attrib->invert_compose(reg->get_slot_default(slot)), 0);
1430 
1431  } else {
1432  // Both are good. (Overrides are not used in invert_compose.) Compose.
1433  result.set(a._attrib->invert_compose(b._attrib), 0);
1434  }
1435 
1436  mask.clear_bit(slot);
1437  slot = mask.get_lowest_on_bit();
1438  }
1439  return return_new(new_state);
1440 }
1441 
1442 /**
1443  * Detects whether there is a cycle in the cache that begins with this state.
1444  * If any are detected, breaks them by removing this state from the cache.
1445  */
1446 void RenderState::
1447 detect_and_break_cycles() {
1448  PStatTimer timer(_state_break_cycles_pcollector);
1449 
1450  ++_last_cycle_detect;
1451  if (r_detect_cycles(this, this, 1, _last_cycle_detect, nullptr)) {
1452  // Ok, we have a cycle. This will be a leak unless we break the cycle by
1453  // freeing the cache on this object.
1454  if (pgraph_cat.is_debug()) {
1455  pgraph_cat.debug()
1456  << "Breaking cycle involving " << (*this) << "\n";
1457  }
1458 
1459  ((RenderState *)this)->remove_cache_pointers();
1460  } else {
1461  ++_last_cycle_detect;
1462  if (r_detect_reverse_cycles(this, this, 1, _last_cycle_detect, nullptr)) {
1463  if (pgraph_cat.is_debug()) {
1464  pgraph_cat.debug()
1465  << "Breaking cycle involving " << (*this) << "\n";
1466  }
1467 
1468  ((RenderState *)this)->remove_cache_pointers();
1469  }
1470  }
1471 }
1472 
1473 /**
1474  * Detects whether there is a cycle in the cache that begins with the
1475  * indicated state. Returns true if at least one cycle is found, false if
1476  * this state is not part of any cycles. If a cycle is found and cycle_desc
1477  * is not NULL, then cycle_desc is filled in with the list of the steps of the
1478  * cycle, in reverse order.
1479  */
1480 bool RenderState::
1481 r_detect_cycles(const RenderState *start_state,
1482  const RenderState *current_state,
1483  int length, UpdateSeq this_seq,
1484  RenderState::CompositionCycleDesc *cycle_desc) {
1485  if (current_state->_cycle_detect == this_seq) {
1486  // We've already seen this state; therefore, we've found a cycle.
1487 
1488  // However, we only care about cycles that return to the starting state
1489  // and involve more than two steps. If only one or two nodes are
1490  // involved, it doesn't represent a memory leak, so no problem there.
1491  return (current_state == start_state && length > 2);
1492  }
1493  ((RenderState *)current_state)->_cycle_detect = this_seq;
1494 
1495  size_t i;
1496  size_t cache_size = current_state->_composition_cache.get_num_entries();
1497  for (i = 0; i < cache_size; ++i) {
1498  const RenderState *result = current_state->_composition_cache.get_data(i)._result;
1499  if (result != nullptr) {
1500  if (r_detect_cycles(start_state, result, length + 1,
1501  this_seq, cycle_desc)) {
1502  // Cycle detected.
1503  if (cycle_desc != nullptr) {
1504  const RenderState *other = current_state->_composition_cache.get_key(i);
1505  CompositionCycleDescEntry entry(other, result, false);
1506  cycle_desc->push_back(entry);
1507  }
1508  return true;
1509  }
1510  }
1511  }
1512 
1513  cache_size = current_state->_invert_composition_cache.get_num_entries();
1514  for (i = 0; i < cache_size; ++i) {
1515  const RenderState *result = current_state->_invert_composition_cache.get_data(i)._result;
1516  if (result != nullptr) {
1517  if (r_detect_cycles(start_state, result, length + 1,
1518  this_seq, cycle_desc)) {
1519  // Cycle detected.
1520  if (cycle_desc != nullptr) {
1521  const RenderState *other = current_state->_invert_composition_cache.get_key(i);
1522  CompositionCycleDescEntry entry(other, result, true);
1523  cycle_desc->push_back(entry);
1524  }
1525  return true;
1526  }
1527  }
1528  }
1529 
1530  // No cycle detected.
1531  return false;
1532 }
1533 
1534 /**
1535  * Works the same as r_detect_cycles, but checks for cycles in the reverse
1536  * direction along the cache chain. (A cycle may appear in either direction,
1537  * and we must check both.)
1538  */
1539 bool RenderState::
1540 r_detect_reverse_cycles(const RenderState *start_state,
1541  const RenderState *current_state,
1542  int length, UpdateSeq this_seq,
1543  RenderState::CompositionCycleDesc *cycle_desc) {
1544  if (current_state->_cycle_detect == this_seq) {
1545  // We've already seen this state; therefore, we've found a cycle.
1546 
1547  // However, we only care about cycles that return to the starting state
1548  // and involve more than two steps. If only one or two nodes are
1549  // involved, it doesn't represent a memory leak, so no problem there.
1550  return (current_state == start_state && length > 2);
1551  }
1552  ((RenderState *)current_state)->_cycle_detect = this_seq;
1553 
1554  size_t i;
1555  size_t cache_size = current_state->_composition_cache.get_num_entries();
1556  for (i = 0; i < cache_size; ++i) {
1557  const RenderState *other = current_state->_composition_cache.get_key(i);
1558  if (other != current_state) {
1559  int oi = other->_composition_cache.find(current_state);
1560  nassertr(oi != -1, false);
1561 
1562  const RenderState *result = other->_composition_cache.get_data(oi)._result;
1563  if (result != nullptr) {
1564  if (r_detect_reverse_cycles(start_state, result, length + 1,
1565  this_seq, cycle_desc)) {
1566  // Cycle detected.
1567  if (cycle_desc != nullptr) {
1568  const RenderState *other = current_state->_composition_cache.get_key(i);
1569  CompositionCycleDescEntry entry(other, result, false);
1570  cycle_desc->push_back(entry);
1571  }
1572  return true;
1573  }
1574  }
1575  }
1576  }
1577 
1578  cache_size = current_state->_invert_composition_cache.get_num_entries();
1579  for (i = 0; i < cache_size; ++i) {
1580  const RenderState *other = current_state->_invert_composition_cache.get_key(i);
1581  if (other != current_state) {
1582  int oi = other->_invert_composition_cache.find(current_state);
1583  nassertr(oi != -1, false);
1584 
1585  const RenderState *result = other->_invert_composition_cache.get_data(oi)._result;
1586  if (result != nullptr) {
1587  if (r_detect_reverse_cycles(start_state, result, length + 1,
1588  this_seq, cycle_desc)) {
1589  // Cycle detected.
1590  if (cycle_desc != nullptr) {
1591  const RenderState *other = current_state->_invert_composition_cache.get_key(i);
1592  CompositionCycleDescEntry entry(other, result, false);
1593  cycle_desc->push_back(entry);
1594  }
1595  return true;
1596  }
1597  }
1598  }
1599  }
1600 
1601  // No cycle detected.
1602  return false;
1603 }
1604 
1605 /**
1606  * This inverse of return_new, this releases this object from the global
1607  * RenderState table.
1608  *
1609  * You must already be holding _states_lock before you call this method.
1610  */
1611 void RenderState::
1612 release_new() {
1613  nassertv(_states_lock->debug_is_locked());
1614 
1615  if (_saved_entry != -1) {
1616  _saved_entry = -1;
1617  nassertv_always(_states->remove(this));
1618  }
1619 }
1620 
1621 /**
1622  * Remove all pointers within the cache from and to this particular
1623  * RenderState. The pointers to this object may be scattered around in the
1624  * various CompositionCaches from other RenderState objects.
1625  *
1626  * You must already be holding _states_lock before you call this method.
1627  */
1628 void RenderState::
1629 remove_cache_pointers() {
1630  nassertv(_states_lock->debug_is_locked());
1631 
1632  // Fortunately, since we added CompositionCache records in pairs, we know
1633  // exactly the set of RenderState objects that have us in their cache: it's
1634  // the same set of RenderState objects that we have in our own cache.
1635 
1636 /*
1637  * We do need to put considerable thought into this loop, because as we clear
1638  * out cache entries we'll cause other RenderState objects to destruct, which
1639  * could cause things to get pulled out of our own _composition_cache map. We
1640  * want to allow this (so that we don't encounter any just-destructed pointers
1641  * in our cache), but we don't want to get bitten by this cascading effect.
1642  * Instead of walking through the map from beginning to end, therefore, we
1643  * just pull out the first one each time, and erase it.
1644  */
1645 
1646 #ifdef DO_PSTATS
1647  if (_composition_cache.is_empty() && _invert_composition_cache.is_empty()) {
1648  return;
1649  }
1650  PStatTimer timer(_cache_update_pcollector);
1651 #endif // DO_PSTATS
1652 
1653  // There are lots of ways to do this loop wrong. Be very careful if you
1654  // need to modify it for any reason.
1655  size_t i = 0;
1656  while (!_composition_cache.is_empty()) {
1657  // It is possible that the "other" RenderState object is currently within
1658  // its own destructor. We therefore can't use a PT() to hold its pointer;
1659  // that could end up calling its destructor twice. Fortunately, we don't
1660  // need to hold its reference count to ensure it doesn't destruct while we
1661  // process this loop; as long as we ensure that no *other* RenderState
1662  // objects destruct, there will be no reason for that one to.
1663  RenderState *other = (RenderState *)_composition_cache.get_key(i);
1664 
1665  // We hold a copy of the composition result so we can dereference it
1666  // later.
1667  Composition comp = _composition_cache.get_data(i);
1668 
1669  // Now we can remove the element from our cache. We do this now, rather
1670  // than later, before any other RenderState objects have had a chance to
1671  // destruct, so we are confident that our iterator is still valid.
1672  _composition_cache.remove_element(i);
1673  _cache_stats.add_total_size(-1);
1674  _cache_stats.inc_dels();
1675 
1676  if (other != this) {
1677  int oi = other->_composition_cache.find(this);
1678 
1679  // We may or may not still be listed in the other's cache (it might be
1680  // halfway through pulling entries out, from within its own destructor).
1681  if (oi != -1) {
1682  // Hold a copy of the other composition result, too.
1683  Composition ocomp = other->_composition_cache.get_data(oi);
1684 
1685  other->_composition_cache.remove_element(oi);
1686  _cache_stats.add_total_size(-1);
1687  _cache_stats.inc_dels();
1688 
1689  // It's finally safe to let our held pointers go away. This may have
1690  // cascading effects as other RenderState objects are destructed, but
1691  // there will be no harm done if they destruct now.
1692  if (ocomp._result != nullptr && ocomp._result != other) {
1693  cache_unref_delete(ocomp._result);
1694  }
1695  }
1696  }
1697 
1698  // It's finally safe to let our held pointers go away. (See comment
1699  // above.)
1700  if (comp._result != nullptr && comp._result != this) {
1701  cache_unref_delete(comp._result);
1702  }
1703  }
1704 
1705  // A similar bit of code for the invert cache.
1706  i = 0;
1707  while (!_invert_composition_cache.is_empty()) {
1708  RenderState *other = (RenderState *)_invert_composition_cache.get_key(i);
1709  nassertv(other != this);
1710  Composition comp = _invert_composition_cache.get_data(i);
1711  _invert_composition_cache.remove_element(i);
1712  _cache_stats.add_total_size(-1);
1713  _cache_stats.inc_dels();
1714  if (other != this) {
1715  int oi = other->_invert_composition_cache.find(this);
1716  if (oi != -1) {
1717  Composition ocomp = other->_invert_composition_cache.get_data(oi);
1718  other->_invert_composition_cache.remove_element(oi);
1719  _cache_stats.add_total_size(-1);
1720  _cache_stats.inc_dels();
1721  if (ocomp._result != nullptr && ocomp._result != other) {
1722  cache_unref_delete(ocomp._result);
1723  }
1724  }
1725  }
1726  if (comp._result != nullptr && comp._result != this) {
1727  cache_unref_delete(comp._result);
1728  }
1729  }
1730 }
1731 
1732 /**
1733  * This is the private implementation of get_bin_index() and get_draw_order().
1734  */
1735 void RenderState::
1736 determine_bin_index() {
1737  LightMutexHolder holder(_lock);
1738  if ((_flags & F_checked_bin_index) != 0) {
1739  // Someone else checked it first.
1740  return;
1741  }
1742 
1743  std::string bin_name;
1744  _draw_order = 0;
1745 
1746  const CullBinAttrib *bin;
1747  if (get_attrib(bin)) {
1748  bin_name = bin->get_bin_name();
1749  _draw_order = bin->get_draw_order();
1750  }
1751 
1752  if (bin_name.empty()) {
1753  // No explicit bin is specified; put in the in the default bin, either
1754  // opaque or transparent, based on the transparency setting.
1755  bin_name = "opaque";
1756 
1757  const TransparencyAttrib *transparency;
1758  if (get_attrib(transparency)) {
1759  switch (transparency->get_mode()) {
1760  case TransparencyAttrib::M_alpha:
1761  case TransparencyAttrib::M_premultiplied_alpha:
1762  case TransparencyAttrib::M_dual:
1763  // These transparency modes require special back-to-front sorting.
1764  bin_name = "transparent";
1765  break;
1766 
1767  default:
1768  break;
1769  }
1770  }
1771  }
1772 
1774  _bin_index = bin_manager->find_bin(bin_name);
1775  if (_bin_index == -1) {
1776  pgraph_cat.warning()
1777  << "No bin named " << bin_name << "; creating default bin.\n";
1778  _bin_index = bin_manager->add_bin(bin_name, CullBinManager::BT_unsorted, 0);
1779  }
1780  _flags |= F_checked_bin_index;
1781 }
1782 
1783 /**
1784  * This is the private implementation of has_cull_callback().
1785  */
1786 void RenderState::
1787 determine_cull_callback() {
1788  LightMutexHolder holder(_lock);
1789  if ((_flags & F_checked_cull_callback) != 0) {
1790  // Someone else checked it first.
1791  return;
1792  }
1793 
1794  SlotMask mask = _filled_slots;
1795  int slot = mask.get_lowest_on_bit();
1796  while (slot >= 0) {
1797  const Attribute &attrib = _attributes[slot];
1798  nassertv(attrib._attrib != nullptr);
1799  if (attrib._attrib->has_cull_callback()) {
1800  _flags |= F_has_cull_callback;
1801  break;
1802  }
1803 
1804  mask.clear_bit(slot);
1805  slot = mask.get_lowest_on_bit();
1806  }
1807 
1808  _flags |= F_checked_cull_callback;
1809 }
1810 
1811 /**
1812  * Fills up the state with all of the default attribs.
1813  */
1814 void RenderState::
1815 fill_default() {
1817  int num_slots = reg->get_num_slots();
1818  for (int slot = 1; slot < num_slots; ++slot) {
1819  _attributes[slot].set(reg->get_slot_default(slot), 0);
1820  _filled_slots.set_bit(slot);
1821  }
1822 }
1823 
1824 /**
1825  * Moves the RenderState object from one PStats category to another, so that
1826  * we can track in PStats how many pointers are held by nodes, and how many
1827  * are held in the cache only.
1828  */
1829 void RenderState::
1830 update_pstats(int old_referenced_bits, int new_referenced_bits) {
1831 #ifdef DO_PSTATS
1832  if ((old_referenced_bits & R_node) != 0) {
1833  _node_counter.sub_level(1);
1834  } else if ((old_referenced_bits & R_cache) != 0) {
1835  _cache_counter.sub_level(1);
1836  }
1837  if ((new_referenced_bits & R_node) != 0) {
1838  _node_counter.add_level(1);
1839  } else if ((new_referenced_bits & R_cache) != 0) {
1840  _cache_counter.add_level(1);
1841  }
1842 #endif // DO_PSTATS
1843 }
1844 
1845 /**
1846  * Make sure the global _states map is allocated. This only has to be done
1847  * once. We could make this map static, but then we run into problems if
1848  * anyone creates a RenderState object at static init time; it also seems to
1849  * cause problems when the Panda shared library is unloaded at application
1850  * exit time.
1851  */
1853 init_states() {
1854  _states = new States;
1855 
1856  // TODO: we should have a global Panda mutex to allow us to safely create
1857  // _states_lock without a startup race condition. For the meantime, this is
1858  // OK because we guarantee that this method is called at static init time,
1859  // presumably when there is still only one thread in the world.
1860  _states_lock = new LightReMutex("RenderState::_states_lock");
1861  _cache_stats.init();
1863 
1864  // Initialize the empty state object as well. It is used so often that it
1865  // is declared globally, and lives forever.
1866  RenderState *state = new RenderState;
1867  state->local_object();
1868  state->_saved_entry = _states->store(state, nullptr);
1869  _empty_state = state;
1870 }
1871 
1872 /**
1873  * Tells the BamReader how to create objects of type RenderState.
1874  */
1877  BamReader::get_factory()->register_factory(get_class_type(), make_from_bam);
1878 }
1879 
1880 /**
1881  * Writes the contents of this object to the datagram for shipping out to a
1882  * Bam file.
1883  */
1885 write_datagram(BamWriter *manager, Datagram &dg) {
1886  TypedWritable::write_datagram(manager, dg);
1887 
1888  int num_attribs = _filled_slots.get_num_on_bits();
1889  nassertv(num_attribs == (int)(uint16_t)num_attribs);
1890  dg.add_uint16(num_attribs);
1891 
1892  // **** We should smarten up the writing of the override number--most of the
1893  // time these will all be zero.
1894  SlotMask mask = _filled_slots;
1895  int slot = mask.get_lowest_on_bit();
1896  while (slot >= 0) {
1897  const Attribute &attrib = _attributes[slot];
1898  nassertv(attrib._attrib != nullptr);
1899  manager->write_pointer(dg, attrib._attrib);
1900  dg.add_int32(attrib._override);
1901 
1902  mask.clear_bit(slot);
1903  slot = mask.get_lowest_on_bit();
1904  }
1905 }
1906 
1907 /**
1908  * Receives an array of pointers, one for each time manager->read_pointer()
1909  * was called in fillin(). Returns the number of pointers processed.
1910  */
1912 complete_pointers(TypedWritable **p_list, BamReader *manager) {
1913  int pi = TypedWritable::complete_pointers(p_list, manager);
1914 
1915  int num_attribs = 0;
1916 
1918  for (size_t i = 0; i < (*_read_overrides).size(); ++i) {
1919  int override = (*_read_overrides)[i];
1920 
1921  RenderAttrib *attrib = DCAST(RenderAttrib, p_list[pi++]);
1922  if (attrib != nullptr) {
1923  int slot = attrib->get_slot();
1924  if (slot > 0 && slot < reg->get_max_slots()) {
1925  _attributes[slot].set(attrib, override);
1926  _filled_slots.set_bit(slot);
1927  ++num_attribs;
1928  }
1929  }
1930  }
1931 
1932  delete _read_overrides;
1933  _read_overrides = nullptr;
1934 
1935  return pi;
1936 }
1937 
1938 /**
1939  * Called immediately after complete_pointers(), this gives the object a
1940  * chance to adjust its own pointer if desired. Most objects don't change
1941  * pointers after completion, but some need to.
1942  *
1943  * Once this function has been called, the old pointer will no longer be
1944  * accessed.
1945  */
1947 change_this(TypedWritable *old_ptr, BamReader *manager) {
1948  // First, uniquify the pointer.
1949  RenderState *state = DCAST(RenderState, old_ptr);
1950  CPT(RenderState) pointer = return_unique(state);
1951 
1952  // But now we have a problem, since we have to hold the reference count and
1953  // there's no way to return a TypedWritable while still holding the
1954  // reference count! We work around this by explicitly upping the count, and
1955  // also setting a finalize() callback to down it later.
1956  if (pointer == state) {
1957  pointer->ref();
1958  manager->register_finalize(state);
1959  }
1960 
1961  // We have to cast the pointer back to non-const, because the bam reader
1962  // expects that.
1963  return (RenderState *)pointer.p();
1964 }
1965 
1966 /**
1967  * Called by the BamReader to perform any final actions needed for setting up
1968  * the object after all objects have been read and all pointers have been
1969  * completed.
1970  */
1972 finalize(BamReader *) {
1973  // Unref the pointer that we explicitly reffed in change_this().
1974  unref();
1975 
1976  // We should never get back to zero after unreffing our own count, because
1977  // we expect to have been stored in a pointer somewhere. If we do get to
1978  // zero, it's a memory leak; the way to avoid this is to call unref_delete()
1979  // above instead of unref(), but this is dangerous to do from within a
1980  // virtual function.
1981  nassertv(get_ref_count() != 0);
1982 }
1983 
1984 /**
1985  * This function is called by the BamReader's factory when a new object of
1986  * type RenderState is encountered in the Bam file. It should create the
1987  * RenderState and extract its information from the file.
1988  */
1989 TypedWritable *RenderState::
1990 make_from_bam(const FactoryParams &params) {
1991  RenderState *state = new RenderState;
1992  DatagramIterator scan;
1993  BamReader *manager;
1994 
1995  parse_params(params, scan, manager);
1996  state->fillin(scan, manager);
1997  manager->register_change_this(change_this, state);
1998 
1999  return state;
2000 }
2001 
2002 /**
2003  * This internal function is called by make_from_bam to read in all of the
2004  * relevant data from the BamFile for the new RenderState.
2005  */
2006 void RenderState::
2007 fillin(DatagramIterator &scan, BamReader *manager) {
2008  TypedWritable::fillin(scan, manager);
2009 
2010  int num_attribs = scan.get_uint16();
2011  _read_overrides = new vector_int;
2012  (*_read_overrides).reserve(num_attribs);
2013 
2014  for (int i = 0; i < num_attribs; ++i) {
2015  manager->read_pointer(scan);
2016  int override = scan.get_int32();
2017  (*_read_overrides).push_back(override);
2018  }
2019 }
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
void parse_params(const FactoryParams &params, DatagramIterator &scan, BamReader *&manager)
Takes in a FactoryParams, passed from a WritableFactory into any TypedWritable's make function,...
Definition: bamReader.I:275
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
void cache_unref_delete(RefCountType *ptr)
This global helper function will unref the given ReferenceCount object, and if the reference count re...
This is the fundamental interface for extracting binary objects from a Bam file, as generated by a Ba...
Definition: bamReader.h:110
void register_finalize(TypedWritable *whom)
Should be called by an object reading itself from the Bam file to indicate that this particular objec...
Definition: bamReader.cxx:808
void register_change_this(ChangeThisFunc func, TypedWritable *whom)
Called by an object reading itself from the bam file to indicate that the object pointer that will be...
Definition: bamReader.cxx:835
bool read_pointer(DatagramIterator &scan)
The interface for reading a pointer to another object from a Bam file.
Definition: bamReader.cxx:610
static WritableFactory * get_factory()
Returns the global WritableFactory for generating TypedWritable objects.
Definition: bamReader.I:177
This is the fundamental interface for writing binary objects to a Bam file, to be extracted later by ...
Definition: bamWriter.h:63
void write_pointer(Datagram &packet, const TypedWritable *dest)
The interface for writing a pointer to another object to a Bam file.
Definition: bamWriter.cxx:317
int get_lowest_on_bit() const
Returns the index of the lowest 1 bit in the mask.
Definition: bitMask.I:283
void set_bit(int index)
Sets the nth bit on.
Definition: bitMask.I:119
bool get_bit(int index) const
Returns true if the nth bit is set, false if it is cleared.
Definition: bitMask.I:109
void clear_bit(int index)
Sets the nth bit off.
Definition: bitMask.I:129
int get_num_on_bits() const
Returns the number of bits that are set to 1 in the mask.
Definition: bitMask.I:264
This is used to track the utilization of the TransformState and RenderState caches,...
Definition: cacheStats.h:25
void inc_adds(bool is_new)
Increments by 1 the count of elements added to the cache.
Definition: cacheStats.I:56
void add_total_size(int count)
Adds the indicated count (positive or negative) to the total number of entries for the cache (net occ...
Definition: cacheStats.I:80
void add_num_states(int count)
Adds the indicated count (positive or negative) to the total count of individual RenderState or Trans...
Definition: cacheStats.I:91
void maybe_report(const char *name)
Outputs a report if enough time has elapsed.
Definition: cacheStats.I:18
void inc_dels()
Increments by 1 the count of elements removed from the cache.
Definition: cacheStats.I:69
void init()
Initializes the CacheStats for the first time.
Definition: cacheStats.cxx:22
void inc_hits()
Increments by 1 the count of cache hits.
Definition: cacheStats.I:35
void inc_misses()
Increments by 1 the count of cache misses.
Definition: cacheStats.I:45
get_cache_ref_count
Returns the current reference count.
Assigns geometry to a particular bin by name.
Definition: cullBinAttrib.h:27
get_draw_order
Returns the draw order this attribute specifies.
Definition: cullBinAttrib.h:40
get_bin_name
Returns the name of the bin this attribute specifies.
Definition: cullBinAttrib.h:39
This is a global object that maintains the collection of named CullBins in the world.
static CullBinManager * get_global_ptr()
Returns the pointer to the global CullBinManager object.
int add_bin(const std::string &name, BinType type, int sort)
Defines a new bin with the indicated name, and returns the new bin_index.
int find_bin(const std::string &name) const
Returns the bin_index associated with the bin of the given name, or -1 if no bin has that name.
This collects together the pieces of data that are accumulated for each node while walking the scene ...
This object performs a depth-first traversal of the scene graph, with optional view-frustum culling,...
Definition: cullTraverser.h:45
A class to retrieve the individual data elements previously stored in a Datagram.
uint16_t get_uint16()
Extracts an unsigned 16-bit integer.
int32_t get_int32()
Extracts a signed 32-bit integer.
An ordered list of data elements, formatted in memory for transmission over a socket or writing to a ...
Definition: datagram.h:38
void add_int32(int32_t value)
Adds a signed 32-bit integer to the datagram.
Definition: datagram.I:67
void add_uint16(uint16_t value)
Adds an unsigned 16-bit integer to the datagram.
Definition: datagram.I:85
An instance of this class is passed to the Factory when requesting it to do its business and construc...
Definition: factoryParams.h:36
void register_factory(TypeHandle handle, CreateFunc *func, void *user_data=nullptr)
Registers a new kind of thing the Factory will be able to create.
Definition: factory.I:73
Similar to MutexHolder, but for a light mutex.
bool debug_is_locked() const
Returns true if the current thread has locked the LightReMutex, false otherwise.
Similar to MutexHolder, but for a light reentrant mutex.
A lightweight reentrant mutex.
Definition: lightReMutex.h:32
static void update_type(ReferenceCount *ptr, TypeHandle type)
Associates the indicated type with the given pointer.
Definition: memoryUsage.I:55
A lightweight class that represents a single element that may be timed and/or counted via stats.
A lightweight class that can be used to automatically start and stop a PStatCollector around a sectio...
Definition: pStatTimer.h:30
void local_object()
This function should be called, once, immediately after creating a new instance of some ReferenceCoun...
bool unref_if_one() const
Atomically decreases the reference count of this object if it is one.
get_ref_count
Returns the current reference count.
virtual bool unref() const
Explicitly decrements the reference count.
This class is used to associate each RenderAttrib with a different slot index at runtime,...
const RenderAttrib * get_slot_default(int slot) const
Returns the default RenderAttrib object associated with slot n.
static RenderAttribRegistry * quick_get_global_ptr()
Returns the global_ptr without first ensuring it has been initialized.
int get_sorted_slot(int n) const
Returns the nth slot in sorted order.
int get_num_slots() const
Returns the number of RenderAttrib slots that have been allocated.
int get_num_sorted_slots() const
Returns the number of entries in the sorted_slots list.
This is the base class for a number of render attributes (other than transform) that may be set on sc...
Definition: renderAttrib.h:51
static int garbage_collect()
Performs a garbage-collection cycle.
Specifies how polygons are to be drawn.
int get_geom_rendering(int geom_rendering) const
Returns the union of the Geom::GeomRendering bits that will be required once this RenderModeAttrib is...
This represents a unique collection of RenderAttrib objects that correspond to a particular renderabl...
Definition: renderState.h:47
int compare_to(const RenderState &other) const
Provides an arbitrary ordering among all unique RenderStates, so we can store the essentially differe...
int compare_mask(const RenderState &other, SlotMask compare_mask) const
This version of compare_to takes a slot mask that indicates which attributes to include in the compar...
static void register_with_read_factory()
Tells the BamReader how to create objects of type RenderState.
static int get_num_states()
Returns the total number of unique RenderState objects allocated in the world.
static void list_states(std::ostream &out)
Lists all of the RenderStates in the cache to the output stream, one per line.
bool is_empty() const
Returns true if the state is empty, false otherwise.
Definition: renderState.I:27
virtual void write_datagram(BamWriter *manager, Datagram &dg)
Writes the contents of this object to the datagram for shipping out to a Bam file.
virtual int complete_pointers(TypedWritable **plist, BamReader *manager)
Receives an array of pointers, one for each time manager->read_pointer() was called in fillin().
static int garbage_collect()
Performs a garbage-collection cycle.
bool cull_callback(CullTraverser *trav, const CullTraverserData &data) const
Calls cull_callback() on each attrib.
static void bin_removed(int bin_index)
Intended to be called by CullBinManager::remove_bin(), this informs all the RenderStates in the world...
int compare_sort(const RenderState &other) const
Returns -1, 0, or 1 according to the relative sorting of these two RenderStates, with regards to rend...
virtual bool unref() const
Explicitly decrements the reference count.
static void init_states()
Make sure the global _states map is allocated.
static void clear_munger_cache()
Completely empties the cache of state + gsg -> munger, for all states and all gsg's.
static bool validate_states()
Ensures that the cache is still stored in sorted order, and that none of the cache elements have been...
static TypedWritable * change_this(TypedWritable *old_ptr, BamReader *manager)
Called immediately after complete_pointers(), this gives the object a chance to adjust its own pointe...
static void list_cycles(std::ostream &out)
Detects all of the reference-count cycles in the cache and reports them to standard output.
static int clear_cache()
Empties the cache of composed RenderStates.
virtual void finalize(BamReader *manager)
Called by the BamReader to perform any final actions needed for setting up the object after all objec...
virtual ~RenderState()
The destructor is responsible for removing the RenderState from the global set if it is there.
int get_geom_rendering(int geom_rendering) const
Returns the union of the Geom::GeomRendering bits that will be required once this RenderState is appl...
static int get_num_unused_states()
Returns the total number of RenderState objects that have been allocated but have no references outsi...
This template class implements an unordered map of keys to data, implemented as a hashtable.
Definition: simpleHashMap.h:81
const Key & get_key(size_t n) const
Returns the key in the nth entry of the table.
int store(const Key &key, const Value &data)
Records the indicated key/data pair in the map.
bool validate() const
Returns true if the internal table appears to be consistent, false if there are some internal errors.
void clear()
Completely empties the table.
const Value & get_data(size_t n) const
Returns the data in the nth entry of the table.
int find(const Key &key) const
Searches for the indicated key in the table.
bool remove(const Key &key)
Removes the indicated key and its associated data from the table.
bool consider_shrink_table()
Shrinks the table if the allocated storage is significantly larger than the number of elements in it.
void remove_element(size_t n)
Removes the nth entry from the table.
bool is_empty() const
Returns true if the table is empty; i.e.
size_t get_num_entries() const
Returns the number of active entries in the table.
Computes texture coordinates for geometry automatically based on vertex position and/or normal.
Definition: texGenAttrib.h:32
int get_geom_rendering(int geom_rendering) const
Returns the union of the Geom::GeomRendering bits that will be required once this TexGenAttrib is app...
Definition: texGenAttrib.I:44
Applies a transform matrix to UV's before they are rendered.
int get_geom_rendering(int geom_rendering) const
Returns the union of the Geom::GeomRendering bits that will be required once this TexMatrixAttrib is ...
get_main_thread
Returns a pointer to the "main" Thread object–this is the Thread that started the whole process.
Definition: thread.h:107
get_current_thread
Returns a pointer to the currently-executing Thread object.
Definition: thread.h:109
This controls the enabling of transparency.
get_mode
Returns the transparency mode.
TypeHandle is the identifier used to differentiate C++ class types.
Definition: typeHandle.h:81
Base class for objects that can be written to and read from Bam files.
Definition: typedWritable.h:35
virtual void fillin(DatagramIterator &scan, BamReader *manager)
This internal function is intended to be called by each class's make_from_bam() method to read in all...
virtual void write_datagram(BamWriter *manager, Datagram &dg)
Writes the contents of this object to the datagram for shipping out to a Bam file.
virtual int complete_pointers(TypedWritable **p_list, BamReader *manager)
Receives an array of pointers, one for each time manager->read_pointer() was called in fillin().
This is a sequence number that increments monotonically.
Definition: updateSeq.h:37
static size_t add_hash(size_t start, const Key &key)
Adds the indicated key into a running hash.
Definition: stl_compares.I:101
This is our own Panda specialization on the default STL map.
Definition: pmap.h:49
static size_t add_hash(size_t start, const void *key)
Adds the indicated key into a running hash.
Definition: stl_compares.I:110
This is our own Panda specialization on the default STL set.
Definition: pset.h:49
This is our own Panda specialization on the default STL vector.
Definition: pvector.h:42
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
std::ostream & indent(std::ostream &out, int indent_level)
A handy function for doing text formatting.
Definition: indent.cxx:20
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
int get_lowest_on_bit(unsigned short x)
Returns the index of the lowest 1 bit in the word.
Definition: pbitops.I:175
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
CPT(RenderState) RenderState
Returns a RenderState with one attribute set.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.