Panda3D
Loading...
Searching...
No Matches
renderState.cxx
Go to the documentation of this file.
1/**
2 * PANDA 3D SOFTWARE
3 * Copyright (c) Carnegie Mellon University. All rights reserved.
4 *
5 * All use of this software is subject to the terms of the revised BSD
6 * license. You should have received a copy of this license along
7 * with this source code in a file named "LICENSE."
8 *
9 * @file renderState.cxx
10 * @author drose
11 * @date 2002-02-21
12 */
13
14#include "renderState.h"
15#include "transparencyAttrib.h"
16#include "cullBinAttrib.h"
17#include "cullBinManager.h"
18#include "fogAttrib.h"
19#include "clipPlaneAttrib.h"
20#include "scissorAttrib.h"
21#include "transparencyAttrib.h"
22#include "colorAttrib.h"
23#include "colorScaleAttrib.h"
24#include "textureAttrib.h"
25#include "texGenAttrib.h"
26#include "shaderAttrib.h"
27#include "pStatTimer.h"
28#include "config_pgraph.h"
29#include "bamReader.h"
30#include "bamWriter.h"
31#include "datagramIterator.h"
32#include "indent.h"
33#include "compareTo.h"
34#include "lightReMutexHolder.h"
35#include "lightMutexHolder.h"
36#include "thread.h"
38
39using std::ostream;
40
41LightReMutex *RenderState::_states_lock = nullptr;
42RenderState::States *RenderState::_states = nullptr;
43const RenderState *RenderState::_empty_state = nullptr;
44UpdateSeq RenderState::_last_cycle_detect;
45size_t RenderState::_garbage_index = 0;
46
47PStatCollector RenderState::_cache_update_pcollector("*:State Cache:Update");
48PStatCollector RenderState::_garbage_collect_pcollector("*:State Cache:Garbage Collect");
49PStatCollector RenderState::_state_compose_pcollector("*:State Cache:Compose State");
50PStatCollector RenderState::_state_invert_pcollector("*:State Cache:Invert State");
51PStatCollector RenderState::_node_counter("RenderStates:On nodes");
52PStatCollector RenderState::_cache_counter("RenderStates:Cached");
53PStatCollector RenderState::_state_break_cycles_pcollector("*:State Cache:Break Cycles");
54PStatCollector RenderState::_state_validate_pcollector("*:State Cache:Validate");
55
56CacheStats RenderState::_cache_stats;
57
58TypeHandle RenderState::_type_handle;
59
60
61/**
62 * Actually, this could be a private constructor, since no one inherits from
63 * RenderState, but gcc gives us a spurious warning if all constructors are
64 * private.
65 */
66RenderState::
67RenderState() :
68 _flags(0),
69 _lock("RenderState")
70{
71 if (_states == nullptr) {
72 init_states();
73 }
74 _saved_entry = -1;
75 _last_mi = -1;
76 _cache_stats.add_num_states(1);
77 _read_overrides = nullptr;
78 _generated_shader = nullptr;
79
80#ifdef DO_MEMORY_USAGE
81 MemoryUsage::update_type(this, this);
82#endif
83}
84
85/**
86 * RenderStates are only meant to be copied internally.
87 */
88RenderState::
89RenderState(const RenderState &copy) :
90 _filled_slots(copy._filled_slots),
91 _flags(0),
92 _lock("RenderState")
93{
94 // Copy over the attributes.
95 for (int i = 0; i < RenderAttribRegistry::_max_slots; ++i) {
96 _attributes[i] = copy._attributes[i];
97 }
98
99 _saved_entry = -1;
100 _last_mi = -1;
101 _cache_stats.add_num_states(1);
102 _read_overrides = nullptr;
103 _generated_shader = nullptr;
104
105#ifdef DO_MEMORY_USAGE
106 MemoryUsage::update_type(this, this);
107#endif
108}
109
110/**
111 * The destructor is responsible for removing the RenderState from the global
112 * set if it is there.
113 */
115~RenderState() {
116 // We'd better not call the destructor twice on a particular object.
117 nassertv(!is_destructing());
118 set_destructing();
119
120 LightReMutexHolder holder(*_states_lock);
121
122 // unref() should have cleared these.
123 nassertv(_saved_entry == -1);
124 nassertv(_composition_cache.is_empty() && _invert_composition_cache.is_empty());
125
126 // If this was true at the beginning of the destructor, but is no longer
127 // true now, probably we've been double-deleted.
128 nassertv(get_ref_count() == 0);
129 _cache_stats.add_num_states(-1);
130}
131
132/**
133 * Provides an arbitrary ordering among all unique RenderStates, so we can
134 * store the essentially different ones in a big set and throw away the rest.
135 *
136 * This method is not needed outside of the RenderState class because all
137 * equivalent RenderState objects are guaranteed to share the same pointer;
138 * thus, a pointer comparison is always sufficient.
139 */
141compare_to(const RenderState &other) const {
142 SlotMask mask = _filled_slots | other._filled_slots;
143 int slot = mask.get_lowest_on_bit();
144 while (slot >= 0) {
145 int result = _attributes[slot].compare_to(other._attributes[slot]);
146 if (result != 0) {
147 return result;
148 }
149 mask.clear_bit(slot);
150 slot = mask.get_lowest_on_bit();
151 }
152
153 return 0;
154}
155
156/**
157 * Returns -1, 0, or 1 according to the relative sorting of these two
158 * RenderStates, with regards to rendering performance, so that "heavier"
159 * RenderAttribs (as defined by RenderAttribRegistry::get_slot_sort()) are
160 * more likely to be grouped together. This is not related to the sorting
161 * order defined by compare_to.
162 */
164compare_sort(const RenderState &other) const {
165 if (this == &other) {
166 // Trivial case.
167 return 0;
168 }
169
171 int num_sorted_slots = reg->get_num_sorted_slots();
172 for (int n = 0; n < num_sorted_slots; ++n) {
173 int slot = reg->get_sorted_slot(n);
174 nassertr((_attributes[slot]._attrib != nullptr) == _filled_slots.get_bit(slot), 0);
175
176 const RenderAttrib *a = _attributes[slot]._attrib;
177 const RenderAttrib *b = other._attributes[slot]._attrib;
178 if (a != b) {
179 return a < b ? -1 : 1;
180 }
181 }
182
183 return 0;
184}
185
186/**
187 * This version of compare_to takes a slot mask that indicates which
188 * attributes to include in the comparison. Unlike compare_to, this method
189 * compares the attributes by pointer.
190 */
192compare_mask(const RenderState &other, SlotMask compare_mask) const {
193 SlotMask mask = (_filled_slots | other._filled_slots) & compare_mask;
194 int slot = mask.get_lowest_on_bit();
195 while (slot >= 0) {
196 const RenderAttrib *a = _attributes[slot]._attrib;
197 const RenderAttrib *b = other._attributes[slot]._attrib;
198 if (a != b) {
199 return a < b ? -1 : 1;
200 }
201 mask.clear_bit(slot);
202 slot = mask.get_lowest_on_bit();
203 }
204
205 return 0;
206}
207
208/**
209 * Calls cull_callback() on each attrib. If any attrib returns false,
210 * interrupts the list and returns false immediately; otherwise, completes the
211 * list and returns true.
212 */
214cull_callback(CullTraverser *trav, const CullTraverserData &data) const {
215 SlotMask mask = _filled_slots;
216 int slot = mask.get_lowest_on_bit();
217 while (slot >= 0) {
218 const Attribute &attrib = _attributes[slot];
219 nassertr(attrib._attrib != nullptr, false);
220 if (!attrib._attrib->cull_callback(trav, data)) {
221 return false;
222 }
223
224 mask.clear_bit(slot);
225 slot = mask.get_lowest_on_bit();
226 }
227
228 return true;
229}
230
231/**
232 * Returns a RenderState with one attribute set.
233 */
234CPT(RenderState) RenderState::
235make(const RenderAttrib *attrib, int override) {
236 RenderState *state = new RenderState;
237 int slot = attrib->get_slot();
238 state->_attributes[slot].set(attrib, override);
239 state->_filled_slots.set_bit(slot);
240 return return_new(state);
241}
242
243/**
244 * Returns a RenderState with two attributes set.
245 */
246CPT(RenderState) RenderState::
247make(const RenderAttrib *attrib1,
248 const RenderAttrib *attrib2, int override) {
249 RenderState *state = new RenderState;
250 state->_attributes[attrib1->get_slot()].set(attrib1, override);
251 state->_attributes[attrib2->get_slot()].set(attrib2, override);
252 state->_filled_slots.set_bit(attrib1->get_slot());
253 state->_filled_slots.set_bit(attrib2->get_slot());
254 return return_new(state);
255}
256
257/**
258 * Returns a RenderState with three attributes set.
259 */
260CPT(RenderState) RenderState::
261make(const RenderAttrib *attrib1,
262 const RenderAttrib *attrib2,
263 const RenderAttrib *attrib3, int override) {
264 RenderState *state = new RenderState;
265 state->_attributes[attrib1->get_slot()].set(attrib1, override);
266 state->_attributes[attrib2->get_slot()].set(attrib2, override);
267 state->_attributes[attrib3->get_slot()].set(attrib3, override);
268 state->_filled_slots.set_bit(attrib1->get_slot());
269 state->_filled_slots.set_bit(attrib2->get_slot());
270 state->_filled_slots.set_bit(attrib3->get_slot());
271 return return_new(state);
272}
273
274/**
275 * Returns a RenderState with four attributes set.
276 */
277CPT(RenderState) RenderState::
278make(const RenderAttrib *attrib1,
279 const RenderAttrib *attrib2,
280 const RenderAttrib *attrib3,
281 const RenderAttrib *attrib4, int override) {
282 RenderState *state = new RenderState;
283 state->_attributes[attrib1->get_slot()].set(attrib1, override);
284 state->_attributes[attrib2->get_slot()].set(attrib2, override);
285 state->_attributes[attrib3->get_slot()].set(attrib3, override);
286 state->_attributes[attrib4->get_slot()].set(attrib4, override);
287 state->_filled_slots.set_bit(attrib1->get_slot());
288 state->_filled_slots.set_bit(attrib2->get_slot());
289 state->_filled_slots.set_bit(attrib3->get_slot());
290 state->_filled_slots.set_bit(attrib4->get_slot());
291 return return_new(state);
292}
293
294/**
295 * Returns a RenderState with five attributes set.
296 */
297CPT(RenderState) RenderState::
298make(const RenderAttrib *attrib1,
299 const RenderAttrib *attrib2,
300 const RenderAttrib *attrib3,
301 const RenderAttrib *attrib4,
302 const RenderAttrib *attrib5, int override) {
303 RenderState *state = new RenderState;
304 state->_attributes[attrib1->get_slot()].set(attrib1, override);
305 state->_attributes[attrib2->get_slot()].set(attrib2, override);
306 state->_attributes[attrib3->get_slot()].set(attrib3, override);
307 state->_attributes[attrib4->get_slot()].set(attrib4, override);
308 state->_attributes[attrib5->get_slot()].set(attrib5, override);
309 state->_filled_slots.set_bit(attrib1->get_slot());
310 state->_filled_slots.set_bit(attrib2->get_slot());
311 state->_filled_slots.set_bit(attrib3->get_slot());
312 state->_filled_slots.set_bit(attrib4->get_slot());
313 state->_filled_slots.set_bit(attrib5->get_slot());
314 return return_new(state);
315}
316
317/**
318 * Returns a RenderState with n attributes set.
319 */
320CPT(RenderState) RenderState::
321make(const RenderAttrib * const *attrib, int num_attribs, int override) {
322 if (num_attribs == 0) {
323 return _empty_state;
324 }
325 RenderState *state = new RenderState;
326 for (int i = 0; i < num_attribs; i++) {
327 int slot = attrib[i]->get_slot();
328 state->_attributes[slot].set(attrib[i], override);
329 state->_filled_slots.set_bit(slot);
330 }
331 return return_new(state);
332}
333
334/**
335 * Returns a new RenderState object that represents the composition of this
336 * state with the other state.
337 *
338 * The result of this operation is cached, and will be retained as long as
339 * both this RenderState object and the other RenderState object continue to
340 * exist. Should one of them destruct, the cached entry will be removed, and
341 * its pointer will be allowed to destruct as well.
342 */
343CPT(RenderState) RenderState::
344compose(const RenderState *other) const {
345 // This method isn't strictly const, because it updates the cache, but we
346 // pretend that it is because it's only a cache which is transparent to the
347 // rest of the interface.
348
349 // We handle empty state (identity) as a trivial special case.
350 if (is_empty()) {
351 return other;
352 }
353 if (other->is_empty()) {
354 return this;
355 }
356
357 if (!state_cache) {
358 return do_compose(other);
359 }
360
361 LightReMutexHolder holder(*_states_lock);
362
363 // Is this composition already cached?
364 int index = _composition_cache.find(other);
365 if (index != -1) {
366 Composition &comp = ((RenderState *)this)->_composition_cache.modify_data(index);
367 if (comp._result == nullptr) {
368 // Well, it wasn't cached already, but we already had an entry (probably
369 // created for the reverse direction), so use the same entry to store
370 // the new result.
371 CPT(RenderState) result = do_compose(other);
372 comp._result = result;
373
374 if (result != (const RenderState *)this) {
375 // See the comments below about the need to up the reference count
376 // only when the result is not the same as this.
377 result->cache_ref();
378 }
379 }
380 // Here's the cache!
381 _cache_stats.inc_hits();
382 return comp._result;
383 }
384 _cache_stats.inc_misses();
385
386 // We need to make a new cache entry, both in this object and in the other
387 // object. We make both records so the other RenderState object will know
388 // to delete the entry from this object when it destructs, and vice-versa.
389
390 // The cache entry in this object is the only one that indicates the result;
391 // the other will be NULL for now.
392 CPT(RenderState) result = do_compose(other);
393
394 _cache_stats.add_total_size(1);
395 _cache_stats.inc_adds(_composition_cache.is_empty());
396
397 ((RenderState *)this)->_composition_cache[other]._result = result;
398
399 if (other != this) {
400 _cache_stats.add_total_size(1);
401 _cache_stats.inc_adds(other->_composition_cache.is_empty());
402 ((RenderState *)other)->_composition_cache[this]._result = nullptr;
403 }
404
405 if (result != (const RenderState *)this) {
406 // If the result of compose() is something other than this, explicitly
407 // increment the reference count. We have to be sure to decrement it
408 // again later, when the composition entry is removed from the cache.
409 result->cache_ref();
410
411 // (If the result was just this again, we still store the result, but we
412 // don't increment the reference count, since that would be a self-
413 // referential leak.)
414 }
415
416 _cache_stats.maybe_report("RenderState");
417
418 return result;
419}
420
421/**
422 * Returns a new RenderState object that represents the composition of this
423 * state's inverse with the other state.
424 *
425 * This is similar to compose(), but is particularly useful for computing the
426 * relative state of a node as viewed from some other node.
427 */
428CPT(RenderState) RenderState::
429invert_compose(const RenderState *other) const {
430 // This method isn't strictly const, because it updates the cache, but we
431 // pretend that it is because it's only a cache which is transparent to the
432 // rest of the interface.
433
434 // We handle empty state (identity) as a trivial special case.
435 if (is_empty()) {
436 return other;
437 }
438 // Unlike compose(), the case of other->is_empty() is not quite as trivial
439 // for invert_compose().
440
441 if (other == this) {
442 // a->invert_compose(a) always produces identity.
443 return _empty_state;
444 }
445
446 if (!state_cache) {
447 return do_invert_compose(other);
448 }
449
450 LightReMutexHolder holder(*_states_lock);
451
452 // Is this composition already cached?
453 int index = _invert_composition_cache.find(other);
454 if (index != -1) {
455 Composition &comp = ((RenderState *)this)->_invert_composition_cache.modify_data(index);
456 if (comp._result == nullptr) {
457 // Well, it wasn't cached already, but we already had an entry (probably
458 // created for the reverse direction), so use the same entry to store
459 // the new result.
460 CPT(RenderState) result = do_invert_compose(other);
461 comp._result = result;
462
463 if (result != (const RenderState *)this) {
464 // See the comments below about the need to up the reference count
465 // only when the result is not the same as this.
466 result->cache_ref();
467 }
468 }
469 // Here's the cache!
470 _cache_stats.inc_hits();
471 return comp._result;
472 }
473 _cache_stats.inc_misses();
474
475 // We need to make a new cache entry, both in this object and in the other
476 // object. We make both records so the other RenderState object will know
477 // to delete the entry from this object when it destructs, and vice-versa.
478
479 // The cache entry in this object is the only one that indicates the result;
480 // the other will be NULL for now.
481 CPT(RenderState) result = do_invert_compose(other);
482
483 _cache_stats.add_total_size(1);
484 _cache_stats.inc_adds(_invert_composition_cache.is_empty());
485 ((RenderState *)this)->_invert_composition_cache[other]._result = result;
486
487 if (other != this) {
488 _cache_stats.add_total_size(1);
489 _cache_stats.inc_adds(other->_invert_composition_cache.is_empty());
490 ((RenderState *)other)->_invert_composition_cache[this]._result = nullptr;
491 }
492
493 if (result != (const RenderState *)this) {
494 // If the result of compose() is something other than this, explicitly
495 // increment the reference count. We have to be sure to decrement it
496 // again later, when the composition entry is removed from the cache.
497 result->cache_ref();
498
499 // (If the result was just this again, we still store the result, but we
500 // don't increment the reference count, since that would be a self-
501 // referential leak.)
502 }
503
504 return result;
505}
506
507/**
508 * Returns a new RenderState object that represents the same as the source
509 * state, with the new RenderAttrib added. If there is already a RenderAttrib
510 * with the same type, it is replaced (unless the override is lower).
511 */
512CPT(RenderState) RenderState::
513add_attrib(const RenderAttrib *attrib, int override) const {
514 int slot = attrib->get_slot();
515 if (_filled_slots.get_bit(slot) &&
516 _attributes[slot]._override > override) {
517 // The existing attribute overrides.
518 return this;
519 }
520
521 // The new attribute replaces.
522 RenderState *new_state = new RenderState(*this);
523 new_state->_attributes[slot].set(attrib, override);
524 new_state->_filled_slots.set_bit(slot);
525 return return_new(new_state);
526}
527
528/**
529 * Returns a new RenderState object that represents the same as the source
530 * state, with the new RenderAttrib added. If there is already a RenderAttrib
531 * with the same type, it is replaced unconditionally. The override is not
532 * changed.
533 */
534CPT(RenderState) RenderState::
535set_attrib(const RenderAttrib *attrib) const {
536 RenderState *new_state = new RenderState(*this);
537 int slot = attrib->get_slot();
538 new_state->_attributes[slot]._attrib = attrib;
539 new_state->_filled_slots.set_bit(slot);
540 return return_new(new_state);
541}
542
543/**
544 * Returns a new RenderState object that represents the same as the source
545 * state, with the new RenderAttrib added. If there is already a RenderAttrib
546 * with the same type, it is replaced unconditionally. The override is also
547 * replaced unconditionally.
548 */
549CPT(RenderState) RenderState::
550set_attrib(const RenderAttrib *attrib, int override) const {
551 RenderState *new_state = new RenderState(*this);
552 int slot = attrib->get_slot();
553 new_state->_attributes[slot].set(attrib, override);
554 new_state->_filled_slots.set_bit(slot);
555 return return_new(new_state);
556}
557
558/**
559 * Returns a new RenderState object that represents the same as the source
560 * state, with the indicated RenderAttrib removed.
561 */
562CPT(RenderState) RenderState::
563remove_attrib(int slot) const {
564 if (_attributes[slot]._attrib == nullptr) {
565 // Already removed.
566 return this;
567 }
568
569 // Will this bring us down to the empty state?
570 if (_filled_slots.get_num_on_bits() == 1) {
571 return _empty_state;
572 }
573
574 RenderState *new_state = new RenderState(*this);
575 new_state->_attributes[slot].set(nullptr, 0);
576 new_state->_filled_slots.clear_bit(slot);
577 return return_new(new_state);
578}
579
580/**
581 * Returns a new RenderState object that represents the same as the source
582 * state, with all attributes' override values incremented (or decremented, if
583 * negative) by the indicated amount. If the override would drop below zero,
584 * it is set to zero.
585 */
586CPT(RenderState) RenderState::
587adjust_all_priorities(int adjustment) const {
588 RenderState *new_state = new RenderState(*this);
589
590 SlotMask mask = _filled_slots;
591 int slot = mask.get_lowest_on_bit();
592 while (slot >= 0) {
593 Attribute &attrib = new_state->_attributes[slot];
594 nassertr(attrib._attrib != nullptr, this);
595 attrib._override = std::max(attrib._override + adjustment, 0);
596
597 mask.clear_bit(slot);
598 slot = mask.get_lowest_on_bit();
599 }
600
601 return return_new(new_state);
602}
603
604/**
605 * This method overrides ReferenceCount::unref() to check whether the
606 * remaining reference count is entirely in the cache, and if so, it checks
607 * for and breaks a cycle in the cache involving this object. This is
608 * designed to prevent leaks from cyclical references within the cache.
609 */
610bool RenderState::
611unref() const {
612 if (garbage_collect_states || !state_cache) {
613 // If we're not using the cache at all, or if we're relying on garbage
614 // collection, just allow the pointer to unref normally.
615 return ReferenceCount::unref();
616 }
617
618 // Here is the normal refcounting case, with a normal cache, and without
619 // garbage collection in effect. In this case we will pull the object out
620 // of the cache when its reference count goes to 0.
621
622 // We always have to grab the lock, since we will definitely need to be
623 // holding it if we happen to drop the reference count to 0. Having to grab
624 // the lock at every call to unref() is a big limiting factor on
625 // parallelization.
626 LightReMutexHolder holder(*_states_lock);
627
628 if (auto_break_cycles && uniquify_states) {
629 if (get_cache_ref_count() > 0 &&
631 // If we are about to remove the one reference that is not in the cache,
632 // leaving only references in the cache, then we need to check for a
633 // cycle involving this RenderState and break it if it exists.
634 ((RenderState *)this)->detect_and_break_cycles();
635 }
636 }
637
638 if (ReferenceCount::unref()) {
639 // The reference count is still nonzero.
640 return true;
641 }
642
643 // The reference count has just reached zero. Make sure the object is
644 // removed from the global object pool, before anyone else finds it and
645 // tries to ref it.
646 ((RenderState *)this)->release_new();
647 ((RenderState *)this)->remove_cache_pointers();
648
649 return false;
650}
651
652/**
653 *
654 */
655void RenderState::
656output(ostream &out) const {
657 out << "S:";
658 if (is_empty()) {
659 out << "(empty)";
660
661 } else {
662 out << "(";
663 const char *sep = "";
664
665 SlotMask mask = _filled_slots;
666 int slot = mask.get_lowest_on_bit();
667 while (slot >= 0) {
668 const Attribute &attrib = _attributes[slot];
669 nassertv(attrib._attrib != nullptr);
670 out << sep << attrib._attrib->get_type();
671 sep = " ";
672
673 mask.clear_bit(slot);
674 slot = mask.get_lowest_on_bit();
675 }
676 out << ")";
677 }
678}
679
680/**
681 *
682 */
683void RenderState::
684write(ostream &out, int indent_level) const {
685 if (is_empty()) {
686 indent(out, indent_level)
687 << "(empty)\n";
688 }
689
690 SlotMask mask = _filled_slots;
691 int slot = mask.get_lowest_on_bit();
692 while (slot >= 0) {
693 const Attribute &attrib = _attributes[slot];
694 nassertv(attrib._attrib != nullptr);
695 attrib._attrib->write(out, indent_level);
696
697 mask.clear_bit(slot);
698 slot = mask.get_lowest_on_bit();
699 }
700}
701
702/**
703 * Returns the maximum priority number (sometimes called override) that may be
704 * set on any node. This may or may not be enforced, but the scene graph code
705 * assumes that no priority numbers will be larger than this, and some effects
706 * may not work properly if you use a larger number.
707 */
708int RenderState::
709get_max_priority() {
710 return 1000000000;
711}
712
713/**
714 * Returns the total number of unique RenderState objects allocated in the
715 * world. This will go up and down during normal operations.
716 */
719 if (_states == nullptr) {
720 return 0;
721 }
722 LightReMutexHolder holder(*_states_lock);
723 return _states->get_num_entries();
724}
725
726/**
727 * Returns the total number of RenderState objects that have been allocated
728 * but have no references outside of the internal RenderState cache.
729 *
730 * A nonzero return value is not necessarily indicative of leaked references;
731 * it is normal for two RenderState objects, both of which have references
732 * held outside the cache, to have to result of their composition stored
733 * within the cache. This result will be retained within the cache until one
734 * of the base RenderStates is released.
735 *
736 * Use list_cycles() to get an idea of the number of actual "leaked"
737 * RenderState objects.
738 */
741 if (_states == nullptr) {
742 return 0;
743 }
744 LightReMutexHolder holder(*_states_lock);
745
746 // First, we need to count the number of times each RenderState object is
747 // recorded in the cache.
748 typedef pmap<const RenderState *, int> StateCount;
749 StateCount state_count;
750
751 size_t size = _states->get_num_entries();
752 for (size_t si = 0; si < size; ++si) {
753 const RenderState *state = _states->get_key(si);
754
755 std::pair<StateCount::iterator, bool> ir =
756 state_count.insert(StateCount::value_type(state, 1));
757 if (!ir.second) {
758 // If the above insert operation fails, then it's already in the
759 // cache; increment its value.
760 (*(ir.first)).second++;
761 }
762
763 size_t i;
764 size_t cache_size = state->_composition_cache.get_num_entries();
765 for (i = 0; i < cache_size; ++i) {
766 const RenderState *result = state->_composition_cache.get_data(i)._result;
767 if (result != nullptr && result != state) {
768 // Here's a RenderState that's recorded in the cache. Count it.
769 std::pair<StateCount::iterator, bool> ir =
770 state_count.insert(StateCount::value_type(result, 1));
771 if (!ir.second) {
772 // If the above insert operation fails, then it's already in the
773 // cache; increment its value.
774 (*(ir.first)).second++;
775 }
776 }
777 }
778 cache_size = state->_invert_composition_cache.get_num_entries();
779 for (i = 0; i < cache_size; ++i) {
780 const RenderState *result = state->_invert_composition_cache.get_data(i)._result;
781 if (result != nullptr && result != state) {
782 std::pair<StateCount::iterator, bool> ir =
783 state_count.insert(StateCount::value_type(result, 1));
784 if (!ir.second) {
785 (*(ir.first)).second++;
786 }
787 }
788 }
789 }
790
791 // Now that we have the appearance count of each RenderState object, we can
792 // tell which ones are unreferenced outside of the RenderState cache, by
793 // comparing these to the reference counts.
794 int num_unused = 0;
795
796 StateCount::iterator sci;
797 for (sci = state_count.begin(); sci != state_count.end(); ++sci) {
798 const RenderState *state = (*sci).first;
799 int count = (*sci).second;
800 nassertr(count == state->get_cache_ref_count(), num_unused);
801 nassertr(count <= state->get_ref_count(), num_unused);
802 if (count == state->get_ref_count()) {
803 num_unused++;
804
805 if (pgraph_cat.is_debug()) {
806 pgraph_cat.debug()
807 << "Unused state: " << (void *)state << ":"
808 << state->get_ref_count() << " =\n";
809 state->write(pgraph_cat.debug(false), 2);
810 }
811 }
812 }
813
814 return num_unused;
815}
816
817/**
818 * Empties the cache of composed RenderStates. This makes every RenderState
819 * forget what results when it is composed with other RenderStates.
820 *
821 * This will eliminate any RenderState objects that have been allocated but
822 * have no references outside of the internal RenderState map. It will not
823 * eliminate RenderState objects that are still in use.
824 *
825 * Nowadays, this method should not be necessary, as reference-count cycles in
826 * the composition cache should be automatically detected and broken.
827 *
828 * The return value is the number of RenderStates freed by this operation.
829 */
831clear_cache() {
832 if (_states == nullptr) {
833 return 0;
834 }
835 LightReMutexHolder holder(*_states_lock);
836
837 PStatTimer timer(_cache_update_pcollector);
838 int orig_size = _states->get_num_entries();
839
840 // First, we need to copy the entire set of states to a temporary vector,
841 // reference-counting each object. That way we can walk through the copy,
842 // without fear of dereferencing (and deleting) the objects in the map as we
843 // go.
844 {
845 typedef pvector< CPT(RenderState) > TempStates;
846 TempStates temp_states;
847 temp_states.reserve(orig_size);
848
849 size_t size = _states->get_num_entries();
850 for (size_t si = 0; si < size; ++si) {
851 const RenderState *state = _states->get_key(si);
852 temp_states.push_back(state);
853 }
854
855 // Now it's safe to walk through the list, destroying the cache within
856 // each object as we go. Nothing will be destructed till we're done.
857 TempStates::iterator ti;
858 for (ti = temp_states.begin(); ti != temp_states.end(); ++ti) {
859 RenderState *state = (RenderState *)(*ti).p();
860
861 size_t i;
862 size_t cache_size = (int)state->_composition_cache.get_num_entries();
863 for (i = 0; i < cache_size; ++i) {
864 const RenderState *result = state->_composition_cache.get_data(i)._result;
865 if (result != nullptr && result != state) {
866 result->cache_unref();
867 nassertr(result->get_ref_count() > 0, 0);
868 }
869 }
870 _cache_stats.add_total_size(-(int)state->_composition_cache.get_num_entries());
871 state->_composition_cache.clear();
872
873 cache_size = (int)state->_invert_composition_cache.get_num_entries();
874 for (i = 0; i < cache_size; ++i) {
875 const RenderState *result = state->_invert_composition_cache.get_data(i)._result;
876 if (result != nullptr && result != state) {
877 result->cache_unref();
878 nassertr(result->get_ref_count() > 0, 0);
879 }
880 }
881 _cache_stats.add_total_size(-(int)state->_invert_composition_cache.get_num_entries());
882 state->_invert_composition_cache.clear();
883 }
884
885 // Once this block closes and the temp_states object goes away, all the
886 // destruction will begin. Anything whose reference was held only within
887 // the various objects' caches will go away.
888 }
889
890 int new_size = _states->get_num_entries();
891 return orig_size - new_size;
892}
893
894/**
895 * Performs a garbage-collection cycle. This must be called periodically if
896 * garbage-collect-states is true to ensure that RenderStates get cleaned up
897 * appropriately. It does no harm to call it even if this variable is not
898 * true, but there is probably no advantage in that case.
899 *
900 * This automatically calls RenderAttrib::garbage_collect() as well.
901 */
904 int num_attribs = RenderAttrib::garbage_collect();
905
906 if (_states == nullptr || !garbage_collect_states) {
907 return num_attribs;
908 }
909
910 LightReMutexHolder holder(*_states_lock);
911
912 PStatTimer timer(_garbage_collect_pcollector);
913 size_t orig_size = _states->get_num_entries();
914
915 // How many elements to process this pass?
916 size_t size = orig_size;
917 size_t num_this_pass = std::max(0, int(size * garbage_collect_states_rate));
918 if (num_this_pass <= 0) {
919 return num_attribs;
920 }
921
922 bool break_and_uniquify = (auto_break_cycles && uniquify_transforms);
923
924 size_t si = _garbage_index;
925 if (si >= size) {
926 si = 0;
927 }
928
929 num_this_pass = std::min(num_this_pass, size);
930 size_t stop_at_element = (si + num_this_pass) % size;
931
932 do {
933 RenderState *state = (RenderState *)_states->get_key(si);
934 if (break_and_uniquify) {
935 if (state->get_cache_ref_count() > 0 &&
936 state->get_ref_count() == state->get_cache_ref_count()) {
937 // If we have removed all the references to this state not in the
938 // cache, leaving only references in the cache, then we need to
939 // check for a cycle involving this RenderState and break it if it
940 // exists.
941 state->detect_and_break_cycles();
942 }
943 }
944
945 if (!state->unref_if_one()) {
946 // This state has recently been unreffed to 1 (the one we added when
947 // we stored it in the cache). Now it's time to delete it. This is
948 // safe, because we're holding the _states_lock, so it's not possible
949 // for some other thread to find the state in the cache and ref it
950 // while we're doing this. Also, we've just made sure to unref it to 0,
951 // to ensure that another thread can't get it via a weak pointer.
952
953 state->release_new();
954 state->remove_cache_pointers();
955 state->cache_unref_only();
956 delete state;
957
958 // When we removed it from the hash map, it swapped the last element
959 // with the one we just removed. So the current index contains one we
960 // still need to visit.
961 --size;
962 --si;
963 if (stop_at_element > 0) {
964 --stop_at_element;
965 }
966 }
967
968 si = (si + 1) % size;
969 } while (si != stop_at_element);
970 _garbage_index = si;
971
972 nassertr(_states->get_num_entries() == size, 0);
973
974#ifdef _DEBUG
975 nassertr(_states->validate(), 0);
976#endif
977
978 // If we just cleaned up a lot of states, see if we can reduce the table in
979 // size. This will help reduce iteration overhead in the future.
980 _states->consider_shrink_table();
981
982 return (int)orig_size - (int)size + num_attribs;
983}
984
985/**
986 * Completely empties the cache of state + gsg -> munger, for all states and
987 * all gsg's. Normally there is no need to empty this cache.
988 */
991 LightReMutexHolder holder(*_states_lock);
992
993 size_t size = _states->get_num_entries();
994 for (size_t si = 0; si < size; ++si) {
995 RenderState *state = (RenderState *)(_states->get_key(si));
996 state->_mungers.clear();
997 state->_munged_states.clear();
998 state->_last_mi = -1;
999 }
1000}
1001
1002/**
1003 * Detects all of the reference-count cycles in the cache and reports them to
1004 * standard output.
1005 *
1006 * These cycles may be inadvertently created when state compositions cycle
1007 * back to a starting point. Nowadays, these cycles should be automatically
1008 * detected and broken, so this method should never list any cycles unless
1009 * there is a bug in that detection logic.
1010 *
1011 * The cycles listed here are not leaks in the strictest sense of the word,
1012 * since they can be reclaimed by a call to clear_cache(); but they will not
1013 * be reclaimed automatically.
1014 */
1016list_cycles(ostream &out) {
1017 if (_states == nullptr) {
1018 return;
1019 }
1020 LightReMutexHolder holder(*_states_lock);
1021
1022 typedef pset<const RenderState *> VisitedStates;
1023 VisitedStates visited;
1024 CompositionCycleDesc cycle_desc;
1025
1026 size_t size = _states->get_num_entries();
1027 for (size_t si = 0; si < size; ++si) {
1028 const RenderState *state = _states->get_key(si);
1029
1030 bool inserted = visited.insert(state).second;
1031 if (inserted) {
1032 ++_last_cycle_detect;
1033 if (r_detect_cycles(state, state, 1, _last_cycle_detect, &cycle_desc)) {
1034 // This state begins a cycle.
1035 CompositionCycleDesc::reverse_iterator csi;
1036
1037 out << "\nCycle detected of length " << cycle_desc.size() + 1 << ":\n"
1038 << "state " << (void *)state << ":" << state->get_ref_count()
1039 << " =\n";
1040 state->write(out, 2);
1041 for (csi = cycle_desc.rbegin(); csi != cycle_desc.rend(); ++csi) {
1042 const CompositionCycleDescEntry &entry = (*csi);
1043 if (entry._inverted) {
1044 out << "invert composed with ";
1045 } else {
1046 out << "composed with ";
1047 }
1048 out << (const void *)entry._obj << ":" << entry._obj->get_ref_count()
1049 << " " << *entry._obj << "\n"
1050 << "produces " << (const void *)entry._result << ":"
1051 << entry._result->get_ref_count() << " =\n";
1052 entry._result->write(out, 2);
1053 visited.insert(entry._result);
1054 }
1055
1056 cycle_desc.clear();
1057 } else {
1058 ++_last_cycle_detect;
1059 if (r_detect_reverse_cycles(state, state, 1, _last_cycle_detect, &cycle_desc)) {
1060 // This state begins a cycle.
1061 CompositionCycleDesc::iterator csi;
1062
1063 out << "\nReverse cycle detected of length " << cycle_desc.size() + 1 << ":\n"
1064 << "state ";
1065 for (csi = cycle_desc.begin(); csi != cycle_desc.end(); ++csi) {
1066 const CompositionCycleDescEntry &entry = (*csi);
1067 out << (const void *)entry._result << ":"
1068 << entry._result->get_ref_count() << " =\n";
1069 entry._result->write(out, 2);
1070 out << (const void *)entry._obj << ":"
1071 << entry._obj->get_ref_count() << " =\n";
1072 entry._obj->write(out, 2);
1073 visited.insert(entry._result);
1074 }
1075 out << (void *)state << ":"
1076 << state->get_ref_count() << " =\n";
1077 state->write(out, 2);
1078
1079 cycle_desc.clear();
1080 }
1081 }
1082 }
1083 }
1084}
1085
1086
1087/**
1088 * Lists all of the RenderStates in the cache to the output stream, one per
1089 * line. This can be quite a lot of output if the cache is large, so be
1090 * prepared.
1091 */
1093list_states(ostream &out) {
1094 if (_states == nullptr) {
1095 out << "0 states:\n";
1096 return;
1097 }
1098 LightReMutexHolder holder(*_states_lock);
1099
1100 size_t size = _states->get_num_entries();
1101 out << size << " states:\n";
1102 for (size_t si = 0; si < size; ++si) {
1103 const RenderState *state = _states->get_key(si);
1104 state->write(out, 2);
1105 }
1106}
1107
1108/**
1109 * Ensures that the cache is still stored in sorted order, and that none of
1110 * the cache elements have been inadvertently deleted. Returns true if so,
1111 * false if there is a problem (which implies someone has modified one of the
1112 * supposedly-const RenderState objects).
1113 */
1116 if (_states == nullptr) {
1117 return true;
1118 }
1119
1120 PStatTimer timer(_state_validate_pcollector);
1121
1122 LightReMutexHolder holder(*_states_lock);
1123 if (_states->is_empty()) {
1124 return true;
1125 }
1126
1127 if (!_states->validate()) {
1128 pgraph_cat.error()
1129 << "RenderState::_states cache is invalid!\n";
1130 return false;
1131 }
1132
1133 size_t size = _states->get_num_entries();
1134 size_t si = 0;
1135 nassertr(si < size, false);
1136 nassertr(_states->get_key(si)->get_ref_count() >= 0, false);
1137 size_t snext = si;
1138 ++snext;
1139 while (snext < size) {
1140 nassertr(_states->get_key(snext)->get_ref_count() >= 0, false);
1141 const RenderState *ssi = _states->get_key(si);
1142 const RenderState *ssnext = _states->get_key(snext);
1143 int c = ssi->compare_to(*ssnext);
1144 int ci = ssnext->compare_to(*ssi);
1145 if ((ci < 0) != (c > 0) ||
1146 (ci > 0) != (c < 0) ||
1147 (ci == 0) != (c == 0)) {
1148 pgraph_cat.error()
1149 << "RenderState::compare_to() not defined properly!\n";
1150 pgraph_cat.error(false)
1151 << "(a, b): " << c << "\n";
1152 pgraph_cat.error(false)
1153 << "(b, a): " << ci << "\n";
1154 ssi->write(pgraph_cat.error(false), 2);
1155 ssnext->write(pgraph_cat.error(false), 2);
1156 return false;
1157 }
1158 si = snext;
1159 ++snext;
1160 }
1161
1162 return true;
1163}
1164
1165/**
1166 * Returns the union of the Geom::GeomRendering bits that will be required
1167 * once this RenderState is applied to a geom which includes the indicated
1168 * geom_rendering bits.
1169 */
1171get_geom_rendering(int geom_rendering) const {
1172 const RenderModeAttrib *render_mode;
1173 const TexGenAttrib *tex_gen;
1174 const TexMatrixAttrib *tex_matrix;
1175
1176 if (get_attrib(render_mode)) {
1177 geom_rendering = render_mode->get_geom_rendering(geom_rendering);
1178 }
1179 if (get_attrib(tex_gen)) {
1180 geom_rendering = tex_gen->get_geom_rendering(geom_rendering);
1181 }
1182 if (get_attrib(tex_matrix)) {
1183 geom_rendering = tex_matrix->get_geom_rendering(geom_rendering);
1184 }
1185
1186 return geom_rendering;
1187}
1188
1189/**
1190 * Intended to be called by CullBinManager::remove_bin(), this informs all the
1191 * RenderStates in the world to remove the indicated bin_index from their
1192 * cache if it has been cached.
1193 */
1195bin_removed(int bin_index) {
1196 // Do something here.
1197 nassertv(false);
1198}
1199
1200/**
1201 * Returns true if the _filled_slots bitmask is consistent with the table of
1202 * RenderAttrib pointers, false otherwise.
1203 */
1204bool RenderState::
1205validate_filled_slots() const {
1206 SlotMask mask;
1207
1209 int max_slots = reg->get_max_slots();
1210 for (int slot = 1; slot < max_slots; ++slot) {
1211 const Attribute &attribute = _attributes[slot];
1212 if (attribute._attrib != nullptr) {
1213 mask.set_bit(slot);
1214 }
1215 }
1216
1217 return (mask == _filled_slots);
1218}
1219
1220/**
1221 * Computes a suitable hash value for phash_map.
1222 */
1223void RenderState::
1224do_calc_hash() {
1225 _hash = 0;
1226
1227 SlotMask mask = _filled_slots;
1228 int slot = mask.get_lowest_on_bit();
1229 while (slot >= 0) {
1230 const Attribute &attrib = _attributes[slot];
1231 nassertv(attrib._attrib != nullptr);
1232 _hash = pointer_hash::add_hash(_hash, attrib._attrib);
1233 _hash = int_hash::add_hash(_hash, attrib._override);
1234
1235 mask.clear_bit(slot);
1236 slot = mask.get_lowest_on_bit();
1237 }
1238
1239 _flags |= F_hash_known;
1240}
1241
1242/**
1243 * This function is used to share a common RenderState pointer for all
1244 * equivalent RenderState objects.
1245 *
1246 * This is different from return_unique() in that it does not actually
1247 * guarantee a unique pointer, unless uniquify-states is set.
1248 */
1249CPT(RenderState) RenderState::
1250return_new(RenderState *state) {
1251 nassertr(state != nullptr, state);
1252
1253 // Make sure we don't have anything in the 0 slot. If we did, that would
1254 // indicate an uninitialized slot number.
1255#ifndef NDEBUG
1256 if (state->_attributes[0]._attrib != nullptr) {
1257 const RenderAttrib *attrib = state->_attributes[0]._attrib;
1258 if (attrib->get_type() == TypeHandle::none()) {
1259 ((RenderAttrib *)attrib)->force_init_type();
1260 pgraph_cat->error()
1261 << "Uninitialized RenderAttrib type: " << attrib->get_type()
1262 << "\n";
1263
1264 } else {
1265 static pset<TypeHandle> already_reported;
1266 if (already_reported.insert(attrib->get_type()).second) {
1267 pgraph_cat->error()
1268 << attrib->get_type() << " did not initialize its slot number.\n";
1269 }
1270 }
1271 }
1272#endif
1273 state->_attributes[0]._attrib = nullptr;
1274 state->_filled_slots.clear_bit(0);
1275
1276#ifndef NDEBUG
1277 nassertr(state->validate_filled_slots(), state);
1278#endif
1279
1280 if (!uniquify_states && !state->is_empty()) {
1281 return state;
1282 }
1283
1284 return return_unique(state);
1285}
1286
1287/**
1288 * This function is used to share a common RenderState pointer for all
1289 * equivalent RenderState objects.
1290 *
1291 * See the similar logic in RenderAttrib. The idea is to create a new
1292 * RenderState object and pass it through this function, which will share the
1293 * pointer with a previously-created RenderState object if it is equivalent.
1294 */
1295CPT(RenderState) RenderState::
1296return_unique(RenderState *state) {
1297 nassertr(state != nullptr, nullptr);
1298
1299 if (!state_cache) {
1300 return state;
1301 }
1302
1303#ifndef NDEBUG
1304 if (paranoid_const) {
1305 nassertr(validate_states(), state);
1306 }
1307#endif
1308
1309 LightReMutexHolder holder(*_states_lock);
1310
1311 if (state->_saved_entry != -1) {
1312 // This state is already in the cache. nassertr(_states->find(state) ==
1313 // state->_saved_entry, pt_state);
1314 return state;
1315 }
1316
1317 // Ensure each of the individual attrib pointers has been uniquified before
1318 // we add the state to the cache.
1319 if (!uniquify_attribs && !state->is_empty()) {
1320 SlotMask mask = state->_filled_slots;
1321 int slot = mask.get_lowest_on_bit();
1322 while (slot >= 0) {
1323 Attribute &attrib = state->_attributes[slot];
1324 nassertd(attrib._attrib != nullptr) continue;
1325 attrib._attrib = attrib._attrib->get_unique();
1326 mask.clear_bit(slot);
1327 slot = mask.get_lowest_on_bit();
1328 }
1329 }
1330
1331 int si = _states->find(state);
1332 if (si != -1) {
1333 // There's an equivalent state already in the set. Return it. The state
1334 // that was passed may be newly created and therefore may not be
1335 // automatically deleted. Do that if necessary.
1336 if (state->get_ref_count() == 0) {
1337 delete state;
1338 }
1339 return _states->get_key(si);
1340 }
1341
1342 // Not already in the set; add it.
1343 if (garbage_collect_states) {
1344 // If we'll be garbage collecting states explicitly, we'll increment the
1345 // reference count when we store it in the cache, so that it won't be
1346 // deleted while it's in it.
1347 state->cache_ref();
1348 }
1349 si = _states->store(state, nullptr);
1350
1351 // Save the index and return the input state.
1352 state->_saved_entry = si;
1353 return state;
1354}
1355
1356/**
1357 * The private implemention of compose(); this actually composes two
1358 * RenderStates, without bothering with the cache.
1359 */
1360CPT(RenderState) RenderState::
1361do_compose(const RenderState *other) const {
1362 PStatTimer timer(_state_compose_pcollector);
1363
1364 RenderState *new_state = new RenderState;
1365
1366 SlotMask mask = _filled_slots | other->_filled_slots;
1367 new_state->_filled_slots = mask;
1368
1369 int slot = mask.get_lowest_on_bit();
1370 while (slot >= 0) {
1371 const Attribute &a = _attributes[slot];
1372 const Attribute &b = other->_attributes[slot];
1373 Attribute &result = new_state->_attributes[slot];
1374
1375 if (a._attrib == nullptr) {
1376 nassertr(b._attrib != nullptr, this);
1377 // B wins.
1378 result = b;
1379
1380 } else if (b._attrib == nullptr) {
1381 // A wins.
1382 result = a;
1383
1384 } else if (b._override < a._override) {
1385 // A, the higher RenderAttrib, overrides.
1386 result = a;
1387
1388 } else if (a._override < b._override &&
1389 a._attrib->lower_attrib_can_override()) {
1390 // B, the higher RenderAttrib, overrides. This is a special case;
1391 // normally, a lower RenderAttrib does not override a higher one, even
1392 // if it has a higher override value. But certain kinds of
1393 // RenderAttribs redefine lower_attrib_can_override() to return true,
1394 // allowing this override.
1395 result = b;
1396
1397 } else {
1398 // Either they have the same override value, or B is higher. In either
1399 // case, the result is the composition of the two, with B's override
1400 // value.
1401 result.set(a._attrib->compose(b._attrib), b._override);
1402 }
1403
1404 mask.clear_bit(slot);
1405 slot = mask.get_lowest_on_bit();
1406 }
1407
1408 return return_new(new_state);
1409}
1410
1411/**
1412 * The private implemention of invert_compose().
1413 */
1414CPT(RenderState) RenderState::
1415do_invert_compose(const RenderState *other) const {
1416 PStatTimer timer(_state_invert_pcollector);
1417
1418 RenderState *new_state = new RenderState;
1419
1420 SlotMask mask = _filled_slots | other->_filled_slots;
1421 new_state->_filled_slots = mask;
1422
1423 int slot = mask.get_lowest_on_bit();
1424 while (slot >= 0) {
1425 const Attribute &a = _attributes[slot];
1426 const Attribute &b = other->_attributes[slot];
1427 Attribute &result = new_state->_attributes[slot];
1428
1429 if (a._attrib == nullptr) {
1430 nassertr(b._attrib != nullptr, this);
1431 // B wins.
1432 result = b;
1433
1434 } else if (b._attrib == nullptr) {
1435 // A wins. Invert it.
1437 result.set(a._attrib->invert_compose(reg->get_slot_default(slot)), 0);
1438
1439 } else {
1440 // Both are good. (Overrides are not used in invert_compose.) Compose.
1441 result.set(a._attrib->invert_compose(b._attrib), 0);
1442 }
1443
1444 mask.clear_bit(slot);
1445 slot = mask.get_lowest_on_bit();
1446 }
1447 return return_new(new_state);
1448}
1449
1450/**
1451 * Detects whether there is a cycle in the cache that begins with this state.
1452 * If any are detected, breaks them by removing this state from the cache.
1453 */
1454void RenderState::
1455detect_and_break_cycles() {
1456 PStatTimer timer(_state_break_cycles_pcollector);
1457
1458 ++_last_cycle_detect;
1459 if (r_detect_cycles(this, this, 1, _last_cycle_detect, nullptr)) {
1460 // Ok, we have a cycle. This will be a leak unless we break the cycle by
1461 // freeing the cache on this object.
1462 if (pgraph_cat.is_debug()) {
1463 pgraph_cat.debug()
1464 << "Breaking cycle involving " << (*this) << "\n";
1465 }
1466
1467 ((RenderState *)this)->remove_cache_pointers();
1468 } else {
1469 ++_last_cycle_detect;
1470 if (r_detect_reverse_cycles(this, this, 1, _last_cycle_detect, nullptr)) {
1471 if (pgraph_cat.is_debug()) {
1472 pgraph_cat.debug()
1473 << "Breaking cycle involving " << (*this) << "\n";
1474 }
1475
1476 ((RenderState *)this)->remove_cache_pointers();
1477 }
1478 }
1479}
1480
1481/**
1482 * Detects whether there is a cycle in the cache that begins with the
1483 * indicated state. Returns true if at least one cycle is found, false if
1484 * this state is not part of any cycles. If a cycle is found and cycle_desc
1485 * is not NULL, then cycle_desc is filled in with the list of the steps of the
1486 * cycle, in reverse order.
1487 */
1488bool RenderState::
1489r_detect_cycles(const RenderState *start_state,
1490 const RenderState *current_state,
1491 int length, UpdateSeq this_seq,
1493 if (current_state->_cycle_detect == this_seq) {
1494 // We've already seen this state; therefore, we've found a cycle.
1495
1496 // However, we only care about cycles that return to the starting state
1497 // and involve more than two steps. If only one or two nodes are
1498 // involved, it doesn't represent a memory leak, so no problem there.
1499 return (current_state == start_state && length > 2);
1500 }
1501 ((RenderState *)current_state)->_cycle_detect = this_seq;
1502
1503 size_t i;
1504 size_t cache_size = current_state->_composition_cache.get_num_entries();
1505 for (i = 0; i < cache_size; ++i) {
1506 const RenderState *result = current_state->_composition_cache.get_data(i)._result;
1507 if (result != nullptr) {
1508 if (r_detect_cycles(start_state, result, length + 1,
1509 this_seq, cycle_desc)) {
1510 // Cycle detected.
1511 if (cycle_desc != nullptr) {
1512 const RenderState *other = current_state->_composition_cache.get_key(i);
1513 CompositionCycleDescEntry entry(other, result, false);
1514 cycle_desc->push_back(entry);
1515 }
1516 return true;
1517 }
1518 }
1519 }
1520
1521 cache_size = current_state->_invert_composition_cache.get_num_entries();
1522 for (i = 0; i < cache_size; ++i) {
1523 const RenderState *result = current_state->_invert_composition_cache.get_data(i)._result;
1524 if (result != nullptr) {
1525 if (r_detect_cycles(start_state, result, length + 1,
1526 this_seq, cycle_desc)) {
1527 // Cycle detected.
1528 if (cycle_desc != nullptr) {
1529 const RenderState *other = current_state->_invert_composition_cache.get_key(i);
1530 CompositionCycleDescEntry entry(other, result, true);
1531 cycle_desc->push_back(entry);
1532 }
1533 return true;
1534 }
1535 }
1536 }
1537
1538 // No cycle detected.
1539 return false;
1540}
1541
1542/**
1543 * Works the same as r_detect_cycles, but checks for cycles in the reverse
1544 * direction along the cache chain. (A cycle may appear in either direction,
1545 * and we must check both.)
1546 */
1547bool RenderState::
1548r_detect_reverse_cycles(const RenderState *start_state,
1549 const RenderState *current_state,
1550 int length, UpdateSeq this_seq,
1552 if (current_state->_cycle_detect == this_seq) {
1553 // We've already seen this state; therefore, we've found a cycle.
1554
1555 // However, we only care about cycles that return to the starting state
1556 // and involve more than two steps. If only one or two nodes are
1557 // involved, it doesn't represent a memory leak, so no problem there.
1558 return (current_state == start_state && length > 2);
1559 }
1560 ((RenderState *)current_state)->_cycle_detect = this_seq;
1561
1562 size_t i;
1563 size_t cache_size = current_state->_composition_cache.get_num_entries();
1564 for (i = 0; i < cache_size; ++i) {
1565 const RenderState *other = current_state->_composition_cache.get_key(i);
1566 if (other != current_state) {
1567 int oi = other->_composition_cache.find(current_state);
1568 nassertr(oi != -1, false);
1569
1570 const RenderState *result = other->_composition_cache.get_data(oi)._result;
1571 if (result != nullptr) {
1572 if (r_detect_reverse_cycles(start_state, result, length + 1,
1573 this_seq, cycle_desc)) {
1574 // Cycle detected.
1575 if (cycle_desc != nullptr) {
1576 const RenderState *other = current_state->_composition_cache.get_key(i);
1577 CompositionCycleDescEntry entry(other, result, false);
1578 cycle_desc->push_back(entry);
1579 }
1580 return true;
1581 }
1582 }
1583 }
1584 }
1585
1586 cache_size = current_state->_invert_composition_cache.get_num_entries();
1587 for (i = 0; i < cache_size; ++i) {
1588 const RenderState *other = current_state->_invert_composition_cache.get_key(i);
1589 if (other != current_state) {
1590 int oi = other->_invert_composition_cache.find(current_state);
1591 nassertr(oi != -1, false);
1592
1593 const RenderState *result = other->_invert_composition_cache.get_data(oi)._result;
1594 if (result != nullptr) {
1595 if (r_detect_reverse_cycles(start_state, result, length + 1,
1596 this_seq, cycle_desc)) {
1597 // Cycle detected.
1598 if (cycle_desc != nullptr) {
1599 const RenderState *other = current_state->_invert_composition_cache.get_key(i);
1600 CompositionCycleDescEntry entry(other, result, false);
1601 cycle_desc->push_back(entry);
1602 }
1603 return true;
1604 }
1605 }
1606 }
1607 }
1608
1609 // No cycle detected.
1610 return false;
1611}
1612
1613/**
1614 * This inverse of return_new, this releases this object from the global
1615 * RenderState table.
1616 *
1617 * You must already be holding _states_lock before you call this method.
1618 */
1619void RenderState::
1620release_new() {
1621 nassertv(_states_lock->debug_is_locked());
1622
1623 if (_saved_entry != -1) {
1624 _saved_entry = -1;
1625 nassertv_always(_states->remove(this));
1626 }
1627}
1628
1629/**
1630 * Remove all pointers within the cache from and to this particular
1631 * RenderState. The pointers to this object may be scattered around in the
1632 * various CompositionCaches from other RenderState objects.
1633 *
1634 * You must already be holding _states_lock before you call this method.
1635 */
1636void RenderState::
1637remove_cache_pointers() {
1638 nassertv(_states_lock->debug_is_locked());
1639
1640 // Fortunately, since we added CompositionCache records in pairs, we know
1641 // exactly the set of RenderState objects that have us in their cache: it's
1642 // the same set of RenderState objects that we have in our own cache.
1643
1644/*
1645 * We do need to put considerable thought into this loop, because as we clear
1646 * out cache entries we'll cause other RenderState objects to destruct, which
1647 * could cause things to get pulled out of our own _composition_cache map. We
1648 * want to allow this (so that we don't encounter any just-destructed pointers
1649 * in our cache), but we don't want to get bitten by this cascading effect.
1650 * Instead of walking through the map from beginning to end, therefore, we
1651 * just pull out the first one each time, and erase it.
1652 */
1653
1654#ifdef DO_PSTATS
1655 if (_composition_cache.is_empty() && _invert_composition_cache.is_empty()) {
1656 return;
1657 }
1658 PStatTimer timer(_cache_update_pcollector);
1659#endif // DO_PSTATS
1660
1661 // There are lots of ways to do this loop wrong. Be very careful if you
1662 // need to modify it for any reason.
1663 size_t i = 0;
1664 while (!_composition_cache.is_empty()) {
1665 // It is possible that the "other" RenderState object is currently within
1666 // its own destructor. We therefore can't use a PT() to hold its pointer;
1667 // that could end up calling its destructor twice. Fortunately, we don't
1668 // need to hold its reference count to ensure it doesn't destruct while we
1669 // process this loop; as long as we ensure that no *other* RenderState
1670 // objects destruct, there will be no reason for that one to.
1671 RenderState *other = (RenderState *)_composition_cache.get_key(i);
1672
1673 // We hold a copy of the composition result so we can dereference it
1674 // later.
1675 Composition comp = _composition_cache.get_data(i);
1676
1677 // Now we can remove the element from our cache. We do this now, rather
1678 // than later, before any other RenderState objects have had a chance to
1679 // destruct, so we are confident that our iterator is still valid.
1680 _composition_cache.remove_element(i);
1681 _cache_stats.add_total_size(-1);
1682 _cache_stats.inc_dels();
1683
1684 if (other != this) {
1685 int oi = other->_composition_cache.find(this);
1686
1687 // We may or may not still be listed in the other's cache (it might be
1688 // halfway through pulling entries out, from within its own destructor).
1689 if (oi != -1) {
1690 // Hold a copy of the other composition result, too.
1691 Composition ocomp = other->_composition_cache.get_data(oi);
1692
1693 other->_composition_cache.remove_element(oi);
1694 _cache_stats.add_total_size(-1);
1695 _cache_stats.inc_dels();
1696
1697 // It's finally safe to let our held pointers go away. This may have
1698 // cascading effects as other RenderState objects are destructed, but
1699 // there will be no harm done if they destruct now.
1700 if (ocomp._result != nullptr && ocomp._result != other) {
1701 cache_unref_delete(ocomp._result);
1702 }
1703 }
1704 }
1705
1706 // It's finally safe to let our held pointers go away. (See comment
1707 // above.)
1708 if (comp._result != nullptr && comp._result != this) {
1709 cache_unref_delete(comp._result);
1710 }
1711 }
1712
1713 // A similar bit of code for the invert cache.
1714 i = 0;
1715 while (!_invert_composition_cache.is_empty()) {
1716 RenderState *other = (RenderState *)_invert_composition_cache.get_key(i);
1717 nassertv(other != this);
1718 Composition comp = _invert_composition_cache.get_data(i);
1719 _invert_composition_cache.remove_element(i);
1720 _cache_stats.add_total_size(-1);
1721 _cache_stats.inc_dels();
1722 if (other != this) {
1723 int oi = other->_invert_composition_cache.find(this);
1724 if (oi != -1) {
1725 Composition ocomp = other->_invert_composition_cache.get_data(oi);
1726 other->_invert_composition_cache.remove_element(oi);
1727 _cache_stats.add_total_size(-1);
1728 _cache_stats.inc_dels();
1729 if (ocomp._result != nullptr && ocomp._result != other) {
1730 cache_unref_delete(ocomp._result);
1731 }
1732 }
1733 }
1734 if (comp._result != nullptr && comp._result != this) {
1735 cache_unref_delete(comp._result);
1736 }
1737 }
1738}
1739
1740/**
1741 * This is the private implementation of get_bin_index() and get_draw_order().
1742 */
1743void RenderState::
1744determine_bin_index() {
1745 LightMutexHolder holder(_lock);
1746 if ((_flags & F_checked_bin_index) != 0) {
1747 // Someone else checked it first.
1748 return;
1749 }
1750
1751 std::string bin_name;
1752 _draw_order = 0;
1753
1754 const CullBinAttrib *bin;
1755 if (get_attrib(bin)) {
1756 bin_name = bin->get_bin_name();
1757 _draw_order = bin->get_draw_order();
1758 }
1759
1760 if (bin_name.empty()) {
1761 // No explicit bin is specified; put in the in the default bin, either
1762 // opaque or transparent, based on the transparency setting.
1763 bin_name = "opaque";
1764
1765 const TransparencyAttrib *transparency;
1766 if (get_attrib(transparency)) {
1767 switch (transparency->get_mode()) {
1768 case TransparencyAttrib::M_alpha:
1769 case TransparencyAttrib::M_premultiplied_alpha:
1770 case TransparencyAttrib::M_dual:
1771 // These transparency modes require special back-to-front sorting.
1772 bin_name = "transparent";
1773 break;
1774
1775 default:
1776 break;
1777 }
1778 }
1779 }
1780
1782 _bin_index = bin_manager->find_bin(bin_name);
1783 if (_bin_index == -1) {
1784 pgraph_cat.warning()
1785 << "No bin named " << bin_name << "; creating default bin.\n";
1786 _bin_index = bin_manager->add_bin(bin_name, CullBinManager::BT_unsorted, 0);
1787 }
1788 _flags |= F_checked_bin_index;
1789}
1790
1791/**
1792 * This is the private implementation of has_cull_callback().
1793 */
1794void RenderState::
1795determine_cull_callback() {
1796 LightMutexHolder holder(_lock);
1797 if ((_flags & F_checked_cull_callback) != 0) {
1798 // Someone else checked it first.
1799 return;
1800 }
1801
1802 SlotMask mask = _filled_slots;
1803 int slot = mask.get_lowest_on_bit();
1804 while (slot >= 0) {
1805 const Attribute &attrib = _attributes[slot];
1806 nassertv(attrib._attrib != nullptr);
1807 if (attrib._attrib->has_cull_callback()) {
1808 _flags |= F_has_cull_callback;
1809 break;
1810 }
1811
1812 mask.clear_bit(slot);
1813 slot = mask.get_lowest_on_bit();
1814 }
1815
1816 _flags |= F_checked_cull_callback;
1817}
1818
1819/**
1820 * Fills up the state with all of the default attribs.
1821 */
1822void RenderState::
1823fill_default() {
1825 int num_slots = reg->get_num_slots();
1826 for (int slot = 1; slot < num_slots; ++slot) {
1827 _attributes[slot].set(reg->get_slot_default(slot), 0);
1828 _filled_slots.set_bit(slot);
1829 }
1830}
1831
1832/**
1833 * Moves the RenderState object from one PStats category to another, so that
1834 * we can track in PStats how many pointers are held by nodes, and how many
1835 * are held in the cache only.
1836 */
1837void RenderState::
1838update_pstats(int old_referenced_bits, int new_referenced_bits) {
1839#ifdef DO_PSTATS
1840 if ((old_referenced_bits & R_node) != 0) {
1841 _node_counter.sub_level(1);
1842 } else if ((old_referenced_bits & R_cache) != 0) {
1843 _cache_counter.sub_level(1);
1844 }
1845 if ((new_referenced_bits & R_node) != 0) {
1846 _node_counter.add_level(1);
1847 } else if ((new_referenced_bits & R_cache) != 0) {
1848 _cache_counter.add_level(1);
1849 }
1850#endif // DO_PSTATS
1851}
1852
1853/**
1854 * Make sure the global _states map is allocated. This only has to be done
1855 * once. We could make this map static, but then we run into problems if
1856 * anyone creates a RenderState object at static init time; it also seems to
1857 * cause problems when the Panda shared library is unloaded at application
1858 * exit time.
1859 */
1861init_states() {
1862 _states = new States;
1863
1864 // TODO: we should have a global Panda mutex to allow us to safely create
1865 // _states_lock without a startup race condition. For the meantime, this is
1866 // OK because we guarantee that this method is called at static init time,
1867 // presumably when there is still only one thread in the world.
1868 _states_lock = new LightReMutex("RenderState::_states_lock");
1869 _cache_stats.init();
1871
1872 // Initialize the empty state object as well. It is used so often that it
1873 // is declared globally, and lives forever.
1874 RenderState *state = new RenderState;
1875 state->local_object();
1876 state->cache_ref_only();
1877 state->_saved_entry = _states->store(state, nullptr);
1878 _empty_state = state;
1879}
1880
1881/**
1882 * Tells the BamReader how to create objects of type RenderState.
1883 */
1886 BamReader::get_factory()->register_factory(get_class_type(), make_from_bam);
1887}
1888
1889/**
1890 * Writes the contents of this object to the datagram for shipping out to a
1891 * Bam file.
1892 */
1894write_datagram(BamWriter *manager, Datagram &dg) {
1895 TypedWritable::write_datagram(manager, dg);
1896
1897 int num_attribs = _filled_slots.get_num_on_bits();
1898 nassertv(num_attribs == (int)(uint16_t)num_attribs);
1899 dg.add_uint16(num_attribs);
1900
1901 // **** We should smarten up the writing of the override number--most of the
1902 // time these will all be zero.
1903 SlotMask mask = _filled_slots;
1904 int slot = mask.get_lowest_on_bit();
1905 while (slot >= 0) {
1906 const Attribute &attrib = _attributes[slot];
1907 nassertv(attrib._attrib != nullptr);
1908 manager->write_pointer(dg, attrib._attrib);
1909 dg.add_int32(attrib._override);
1910
1911 mask.clear_bit(slot);
1912 slot = mask.get_lowest_on_bit();
1913 }
1914}
1915
1916/**
1917 * Receives an array of pointers, one for each time manager->read_pointer()
1918 * was called in fillin(). Returns the number of pointers processed.
1919 */
1921complete_pointers(TypedWritable **p_list, BamReader *manager) {
1922 int pi = TypedWritable::complete_pointers(p_list, manager);
1923
1924 int num_attribs = 0;
1925
1927 for (size_t i = 0; i < (*_read_overrides).size(); ++i) {
1928 int override = (*_read_overrides)[i];
1929
1930 RenderAttrib *attrib = DCAST(RenderAttrib, p_list[pi++]);
1931 if (attrib != nullptr) {
1932 int slot = attrib->get_slot();
1933 if (slot > 0 && slot < reg->get_max_slots()) {
1934 _attributes[slot].set(attrib, override);
1935 _filled_slots.set_bit(slot);
1936 ++num_attribs;
1937 }
1938 }
1939 }
1940
1941 delete _read_overrides;
1942 _read_overrides = nullptr;
1943
1944 return pi;
1945}
1946
1947/**
1948 * Called immediately after complete_pointers(), this gives the object a
1949 * chance to adjust its own pointer if desired. Most objects don't change
1950 * pointers after completion, but some need to.
1951 *
1952 * Once this function has been called, the old pointer will no longer be
1953 * accessed.
1954 */
1956change_this(TypedWritable *old_ptr, BamReader *manager) {
1957 // First, uniquify the pointer.
1958 RenderState *state = DCAST(RenderState, old_ptr);
1959 CPT(RenderState) pointer = return_unique(state);
1960
1961 // But now we have a problem, since we have to hold the reference count and
1962 // there's no way to return a TypedWritable while still holding the
1963 // reference count! We work around this by explicitly upping the count, and
1964 // also setting a finalize() callback to down it later.
1965 if (pointer == state) {
1966 pointer->ref();
1967 manager->register_finalize(state);
1968 }
1969
1970 // We have to cast the pointer back to non-const, because the bam reader
1971 // expects that.
1972 return (RenderState *)pointer.p();
1973}
1974
1975/**
1976 * Called by the BamReader to perform any final actions needed for setting up
1977 * the object after all objects have been read and all pointers have been
1978 * completed.
1979 */
1982 // Unref the pointer that we explicitly reffed in change_this().
1983 unref();
1984
1985 // We should never get back to zero after unreffing our own count, because
1986 // we expect to have been stored in a pointer somewhere. If we do get to
1987 // zero, it's a memory leak; the way to avoid this is to call unref_delete()
1988 // above instead of unref(), but this is dangerous to do from within a
1989 // virtual function.
1990 nassertv(get_ref_count() != 0);
1991}
1992
1993/**
1994 * This function is called by the BamReader's factory when a new object of
1995 * type RenderState is encountered in the Bam file. It should create the
1996 * RenderState and extract its information from the file.
1997 */
1998TypedWritable *RenderState::
1999make_from_bam(const FactoryParams &params) {
2000 RenderState *state = new RenderState;
2001 DatagramIterator scan;
2002 BamReader *manager;
2003
2004 parse_params(params, scan, manager);
2005 state->fillin(scan, manager);
2006 manager->register_change_this(change_this, state);
2007
2008 return state;
2009}
2010
2011/**
2012 * This internal function is called by make_from_bam to read in all of the
2013 * relevant data from the BamFile for the new RenderState.
2014 */
2015void RenderState::
2016fillin(DatagramIterator &scan, BamReader *manager) {
2017 TypedWritable::fillin(scan, manager);
2018
2019 int num_attribs = scan.get_uint16();
2020 _read_overrides = new vector_int;
2021 (*_read_overrides).reserve(num_attribs);
2022
2023 for (int i = 0; i < num_attribs; ++i) {
2024 manager->read_pointer(scan);
2025 int override = scan.get_int32();
2026 (*_read_overrides).push_back(override);
2027 }
2028}
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
void parse_params(const FactoryParams &params, DatagramIterator &scan, BamReader *&manager)
Takes in a FactoryParams, passed from a WritableFactory into any TypedWritable's make function,...
Definition bamReader.I:275
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
void cache_unref_delete(RefCountType *ptr)
This global helper function will unref the given ReferenceCount object, and if the reference count re...
This is the fundamental interface for extracting binary objects from a Bam file, as generated by a Ba...
Definition bamReader.h:110
void register_finalize(TypedWritable *whom)
Should be called by an object reading itself from the Bam file to indicate that this particular objec...
void register_change_this(ChangeThisFunc func, TypedWritable *whom)
Called by an object reading itself from the bam file to indicate that the object pointer that will be...
bool read_pointer(DatagramIterator &scan)
The interface for reading a pointer to another object from a Bam file.
static WritableFactory * get_factory()
Returns the global WritableFactory for generating TypedWritable objects.
Definition bamReader.I:177
This is the fundamental interface for writing binary objects to a Bam file, to be extracted later by ...
Definition bamWriter.h:63
void write_pointer(Datagram &packet, const TypedWritable *dest)
The interface for writing a pointer to another object to a Bam file.
int get_lowest_on_bit() const
Returns the index of the lowest 1 bit in the mask.
Definition bitMask.I:283
void set_bit(int index)
Sets the nth bit on.
Definition bitMask.I:119
bool get_bit(int index) const
Returns true if the nth bit is set, false if it is cleared.
Definition bitMask.I:109
void clear_bit(int index)
Sets the nth bit off.
Definition bitMask.I:129
int get_num_on_bits() const
Returns the number of bits that are set to 1 in the mask.
Definition bitMask.I:264
This is used to track the utilization of the TransformState and RenderState caches,...
Definition cacheStats.h:25
void inc_adds(bool is_new)
Increments by 1 the count of elements added to the cache.
Definition cacheStats.I:56
void add_total_size(int count)
Adds the indicated count (positive or negative) to the total number of entries for the cache (net occ...
Definition cacheStats.I:80
void add_num_states(int count)
Adds the indicated count (positive or negative) to the total count of individual RenderState or Trans...
Definition cacheStats.I:91
void maybe_report(const char *name)
Outputs a report if enough time has elapsed.
Definition cacheStats.I:18
void inc_dels()
Increments by 1 the count of elements removed from the cache.
Definition cacheStats.I:69
void init()
Initializes the CacheStats for the first time.
void inc_hits()
Increments by 1 the count of cache hits.
Definition cacheStats.I:35
void inc_misses()
Increments by 1 the count of cache misses.
Definition cacheStats.I:45
get_cache_ref_count
Returns the current reference count.
Assigns geometry to a particular bin by name.
get_draw_order
Returns the draw order this attribute specifies.
get_bin_name
Returns the name of the bin this attribute specifies.
This is a global object that maintains the collection of named CullBins in the world.
static CullBinManager * get_global_ptr()
Returns the pointer to the global CullBinManager object.
int add_bin(const std::string &name, BinType type, int sort)
Defines a new bin with the indicated name, and returns the new bin_index.
int find_bin(const std::string &name) const
Returns the bin_index associated with the bin of the given name, or -1 if no bin has that name.
This collects together the pieces of data that are accumulated for each node while walking the scene ...
This object performs a depth-first traversal of the scene graph, with optional view-frustum culling,...
A class to retrieve the individual data elements previously stored in a Datagram.
uint16_t get_uint16()
Extracts an unsigned 16-bit integer.
int32_t get_int32()
Extracts a signed 32-bit integer.
An ordered list of data elements, formatted in memory for transmission over a socket or writing to a ...
Definition datagram.h:38
void add_int32(int32_t value)
Adds a signed 32-bit integer to the datagram.
Definition datagram.I:67
void add_uint16(uint16_t value)
Adds an unsigned 16-bit integer to the datagram.
Definition datagram.I:85
An instance of this class is passed to the Factory when requesting it to do its business and construc...
void register_factory(TypeHandle handle, CreateFunc *func, void *user_data=nullptr)
Registers a new kind of thing the Factory will be able to create.
Definition factory.I:73
Similar to MutexHolder, but for a light mutex.
bool debug_is_locked() const
Returns true if the current thread has locked the LightReMutex, false otherwise.
Similar to MutexHolder, but for a light reentrant mutex.
A lightweight reentrant mutex.
static void update_type(ReferenceCount *ptr, TypeHandle type)
Associates the indicated type with the given pointer.
Definition memoryUsage.I:55
A lightweight class that represents a single element that may be timed and/or counted via stats.
A lightweight class that can be used to automatically start and stop a PStatCollector around a sectio...
Definition pStatTimer.h:30
void local_object()
This function should be called, once, immediately after creating a new instance of some ReferenceCoun...
bool unref_if_one() const
Atomically decreases the reference count of this object if it is one.
get_ref_count
Returns the current reference count.
virtual bool unref() const
Explicitly decrements the reference count.
This class is used to associate each RenderAttrib with a different slot index at runtime,...
const RenderAttrib * get_slot_default(int slot) const
Returns the default RenderAttrib object associated with slot n.
static RenderAttribRegistry * quick_get_global_ptr()
Returns the global_ptr without first ensuring it has been initialized.
int get_sorted_slot(int n) const
Returns the nth slot in sorted order.
int get_num_slots() const
Returns the number of RenderAttrib slots that have been allocated.
int get_num_sorted_slots() const
Returns the number of entries in the sorted_slots list.
This is the base class for a number of render attributes (other than transform) that may be set on sc...
static int garbage_collect()
Performs a garbage-collection cycle.
Specifies how polygons are to be drawn.
int get_geom_rendering(int geom_rendering) const
Returns the union of the Geom::GeomRendering bits that will be required once this RenderModeAttrib is...
This represents a unique collection of RenderAttrib objects that correspond to a particular renderabl...
Definition renderState.h:47
int compare_to(const RenderState &other) const
Provides an arbitrary ordering among all unique RenderStates, so we can store the essentially differe...
int compare_mask(const RenderState &other, SlotMask compare_mask) const
This version of compare_to takes a slot mask that indicates which attributes to include in the compar...
static void register_with_read_factory()
Tells the BamReader how to create objects of type RenderState.
static int get_num_states()
Returns the total number of unique RenderState objects allocated in the world.
static void list_states(std::ostream &out)
Lists all of the RenderStates in the cache to the output stream, one per line.
void cache_ref_only() const
Overrides this method to update PStats appropriately.
bool is_empty() const
Returns true if the state is empty, false otherwise.
Definition renderState.I:27
virtual void write_datagram(BamWriter *manager, Datagram &dg)
Writes the contents of this object to the datagram for shipping out to a Bam file.
virtual int complete_pointers(TypedWritable **plist, BamReader *manager)
Receives an array of pointers, one for each time manager->read_pointer() was called in fillin().
static int garbage_collect()
Performs a garbage-collection cycle.
bool cull_callback(CullTraverser *trav, const CullTraverserData &data) const
Calls cull_callback() on each attrib.
static void bin_removed(int bin_index)
Intended to be called by CullBinManager::remove_bin(), this informs all the RenderStates in the world...
int compare_sort(const RenderState &other) const
Returns -1, 0, or 1 according to the relative sorting of these two RenderStates, with regards to rend...
virtual bool unref() const
Explicitly decrements the reference count.
static void init_states()
Make sure the global _states map is allocated.
static void clear_munger_cache()
Completely empties the cache of state + gsg -> munger, for all states and all gsg's.
static bool validate_states()
Ensures that the cache is still stored in sorted order, and that none of the cache elements have been...
static TypedWritable * change_this(TypedWritable *old_ptr, BamReader *manager)
Called immediately after complete_pointers(), this gives the object a chance to adjust its own pointe...
static void list_cycles(std::ostream &out)
Detects all of the reference-count cycles in the cache and reports them to standard output.
static int clear_cache()
Empties the cache of composed RenderStates.
virtual void finalize(BamReader *manager)
Called by the BamReader to perform any final actions needed for setting up the object after all objec...
virtual ~RenderState()
The destructor is responsible for removing the RenderState from the global set if it is there.
int get_geom_rendering(int geom_rendering) const
Returns the union of the Geom::GeomRendering bits that will be required once this RenderState is appl...
static int get_num_unused_states()
Returns the total number of RenderState objects that have been allocated but have no references outsi...
This template class implements an unordered map of keys to data, implemented as a hashtable.
const Key & get_key(size_t n) const
Returns the key in the nth entry of the table.
int store(const Key &key, const Value &data)
Records the indicated key/data pair in the map.
bool validate() const
Returns true if the internal table appears to be consistent, false if there are some internal errors.
void clear()
Completely empties the table.
const Value & get_data(size_t n) const
Returns the data in the nth entry of the table.
int find(const Key &key) const
Searches for the indicated key in the table.
bool remove(const Key &key)
Removes the indicated key and its associated data from the table.
bool consider_shrink_table()
Shrinks the table if the allocated storage is significantly larger than the number of elements in it.
void remove_element(size_t n)
Removes the nth entry from the table.
bool is_empty() const
Returns true if the table is empty; i.e.
size_t get_num_entries() const
Returns the number of active entries in the table.
Computes texture coordinates for geometry automatically based on vertex position and/or normal.
int get_geom_rendering(int geom_rendering) const
Returns the union of the Geom::GeomRendering bits that will be required once this TexGenAttrib is app...
Applies a transform matrix to UV's before they are rendered.
int get_geom_rendering(int geom_rendering) const
Returns the union of the Geom::GeomRendering bits that will be required once this TexMatrixAttrib is ...
get_main_thread
Returns a pointer to the "main" Thread object–this is the Thread that started the whole process.
Definition thread.h:107
get_current_thread
Returns a pointer to the currently-executing Thread object.
Definition thread.h:109
This controls the enabling of transparency.
get_mode
Returns the transparency mode.
TypeHandle is the identifier used to differentiate C++ class types.
Definition typeHandle.h:81
Base class for objects that can be written to and read from Bam files.
virtual void fillin(DatagramIterator &scan, BamReader *manager)
This internal function is intended to be called by each class's make_from_bam() method to read in all...
virtual void write_datagram(BamWriter *manager, Datagram &dg)
Writes the contents of this object to the datagram for shipping out to a Bam file.
virtual int complete_pointers(TypedWritable **p_list, BamReader *manager)
Receives an array of pointers, one for each time manager->read_pointer() was called in fillin().
This is a sequence number that increments monotonically.
Definition updateSeq.h:37
static size_t add_hash(size_t start, const Key &key)
Adds the indicated key into a running hash.
This is our own Panda specialization on the default STL map.
Definition pmap.h:49
static size_t add_hash(size_t start, const void *key)
Adds the indicated key into a running hash.
This is our own Panda specialization on the default STL set.
Definition pset.h:49
This is our own Panda specialization on the default STL vector.
Definition pvector.h:42
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
std::ostream & indent(std::ostream &out, int indent_level)
A handy function for doing text formatting.
Definition indent.cxx:20
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
int get_lowest_on_bit(unsigned short x)
Returns the index of the lowest 1 bit in the word.
Definition pbitops.I:175
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.