Panda3D
 All Classes Functions Variables Enumerations
renderState.cxx
1 // Filename: renderState.cxx
2 // Created by: drose (21Feb02)
3 //
4 ////////////////////////////////////////////////////////////////////
5 //
6 // PANDA 3D SOFTWARE
7 // Copyright (c) Carnegie Mellon University. All rights reserved.
8 //
9 // All use of this software is subject to the terms of the revised BSD
10 // license. You should have received a copy of this license along
11 // with this source code in a file named "LICENSE."
12 //
13 ////////////////////////////////////////////////////////////////////
14 
15 #include "renderState.h"
16 #include "transparencyAttrib.h"
17 #include "cullBinAttrib.h"
18 #include "cullBinManager.h"
19 #include "fogAttrib.h"
20 #include "clipPlaneAttrib.h"
21 #include "scissorAttrib.h"
22 #include "transparencyAttrib.h"
23 #include "colorAttrib.h"
24 #include "colorScaleAttrib.h"
25 #include "textureAttrib.h"
26 #include "texGenAttrib.h"
27 #include "shaderAttrib.h"
28 #include "pStatTimer.h"
29 #include "config_pgraph.h"
30 #include "bamReader.h"
31 #include "bamWriter.h"
32 #include "datagramIterator.h"
33 #include "indent.h"
34 #include "compareTo.h"
35 #include "lightReMutexHolder.h"
36 #include "lightMutexHolder.h"
37 #include "thread.h"
38 #include "renderAttribRegistry.h"
39 #include "py_panda.h"
40 
41 LightReMutex *RenderState::_states_lock = NULL;
42 RenderState::States *RenderState::_states = NULL;
43 CPT(RenderState) RenderState::_empty_state;
44 CPT(RenderState) RenderState::_full_default_state;
45 UpdateSeq RenderState::_last_cycle_detect;
46 int RenderState::_garbage_index = 0;
47 
48 PStatCollector RenderState::_cache_update_pcollector("*:State Cache:Update");
49 PStatCollector RenderState::_garbage_collect_pcollector("*:State Cache:Garbage Collect");
50 PStatCollector RenderState::_state_compose_pcollector("*:State Cache:Compose State");
51 PStatCollector RenderState::_state_invert_pcollector("*:State Cache:Invert State");
52 PStatCollector RenderState::_node_counter("RenderStates:On nodes");
53 PStatCollector RenderState::_cache_counter("RenderStates:Cached");
54 PStatCollector RenderState::_state_break_cycles_pcollector("*:State Cache:Break Cycles");
55 PStatCollector RenderState::_state_validate_pcollector("*:State Cache:Validate");
56 
57 CacheStats RenderState::_cache_stats;
58 
59 TypeHandle RenderState::_type_handle;
60 
61 
62 ////////////////////////////////////////////////////////////////////
63 // Function: RenderState::Constructor
64 // Access: Protected
65 // Description: Actually, this could be a private constructor, since
66 // no one inherits from RenderState, but gcc gives us a
67 // spurious warning if all constructors are private.
68 ////////////////////////////////////////////////////////////////////
70 RenderState() :
71  _flags(0),
72  _auto_shader_state(NULL),
73  _lock("RenderState")
74 {
75  // Allocate the _attributes array.
76  RenderAttribRegistry *reg = RenderAttribRegistry::get_global_ptr();
77  _attributes = (Attribute *)reg->get_array_chain()->allocate(reg->get_max_slots() * sizeof(Attribute), get_class_type());
78 
79  // Also make sure each element gets initialized.
80  for (int i = 0; i < reg->get_max_slots(); ++i) {
81  new(&_attributes[i]) Attribute();
82  }
83 
84  if (_states == (States *)NULL) {
85  init_states();
86  }
87  _saved_entry = -1;
88  _last_mi = _mungers.end();
89  _cache_stats.add_num_states(1);
90  _read_overrides = NULL;
91  _generated_shader = NULL;
92 }
93 
94 ////////////////////////////////////////////////////////////////////
95 // Function: RenderState::Copy Constructor
96 // Access: Private
97 // Description: RenderStates are only meant to be copied internally.
98 ////////////////////////////////////////////////////////////////////
99 RenderState::
100 RenderState(const RenderState &copy) :
101  _filled_slots(copy._filled_slots),
102  _flags(0),
103  _auto_shader_state(NULL),
104  _lock("RenderState")
105 {
106  // Allocate the _attributes array.
107  RenderAttribRegistry *reg = RenderAttribRegistry::get_global_ptr();
108  _attributes = (Attribute *)reg->get_array_chain()->allocate(reg->get_max_slots() * sizeof(Attribute), get_class_type());
109 
110  // Also make sure each element gets initialized, as a copy.
111  for (int i = 0; i < reg->get_max_slots(); ++i) {
112  new(&_attributes[i]) Attribute(copy._attributes[i]);
113  }
114 
115  _saved_entry = -1;
116  _last_mi = _mungers.end();
117  _cache_stats.add_num_states(1);
118  _read_overrides = NULL;
119  _generated_shader = NULL;
120 }
121 
122 ////////////////////////////////////////////////////////////////////
123 // Function: RenderState::Copy Assignment Operator
124 // Access: Private
125 // Description: RenderStates are not meant to be copied.
126 ////////////////////////////////////////////////////////////////////
127 void RenderState::
128 operator = (const RenderState &) {
129  nassertv(false);
130 }
131 
132 ////////////////////////////////////////////////////////////////////
133 // Function: RenderState::Destructor
134 // Access: Public, Virtual
135 // Description: The destructor is responsible for removing the
136 // RenderState from the global set if it is there.
137 ////////////////////////////////////////////////////////////////////
140  // We'd better not call the destructor twice on a particular object.
141  nassertv(!is_destructing());
142  set_destructing();
143 
144  LightReMutexHolder holder(*_states_lock);
145 
146  // unref() should have cleared these.
147  nassertv(_saved_entry == -1);
148  nassertv(_composition_cache.is_empty() && _invert_composition_cache.is_empty());
149 
150  // Make sure the _auto_shader_state cache pointer is cleared.
151  if (_auto_shader_state != (const RenderState *)NULL) {
152  if (_auto_shader_state != this) {
153  cache_unref_delete(_auto_shader_state);
154  }
155  _auto_shader_state = NULL;
156  }
157 
158  // If this was true at the beginning of the destructor, but is no
159  // longer true now, probably we've been double-deleted.
160  nassertv(get_ref_count() == 0);
161  _cache_stats.add_num_states(-1);
162 
163  // Free the _attributes array.
164  RenderAttribRegistry *reg = RenderAttribRegistry::get_global_ptr();
165  for (int i = 0; i < reg->get_max_slots(); ++i) {
166  _attributes[i].~Attribute();
167  }
168  reg->get_array_chain()->deallocate(_attributes, get_class_type());
169  _attributes = NULL;
170 }
171 
172 ////////////////////////////////////////////////////////////////////
173 // Function: RenderState::compare_to
174 // Access: Published
175 // Description: Provides an arbitrary ordering among all unique
176 // RenderStates, so we can store the essentially
177 // different ones in a big set and throw away the rest.
178 //
179 // This method is not needed outside of the RenderState
180 // class because all equivalent RenderState objects are
181 // guaranteed to share the same pointer; thus, a pointer
182 // comparison is always sufficient.
183 ////////////////////////////////////////////////////////////////////
184 int RenderState::
185 compare_to(const RenderState &other) const {
186  SlotMask mask = _filled_slots | other._filled_slots;
187  int slot = mask.get_lowest_on_bit();
188  while (slot >= 0) {
189  int result = _attributes[slot].compare_to(other._attributes[slot]);
190  if (result != 0) {
191  return result;
192  }
193  mask.clear_bit(slot);
194  slot = mask.get_lowest_on_bit();
195  }
196 
197  return 0;
198 }
199 
200 ////////////////////////////////////////////////////////////////////
201 // Function: RenderState::compare_sort
202 // Access: Published
203 // Description: Returns -1, 0, or 1 according to the relative sorting
204 // of these two RenderStates, with regards to rendering
205 // performance, so that "heavier" RenderAttribs (as
206 // defined by RenderAttribRegistry::get_slot_sort()) are
207 // more likely to be grouped together. This is not
208 // related to the sorting order defined by compare_to.
209 ////////////////////////////////////////////////////////////////////
210 int RenderState::
211 compare_sort(const RenderState &other) const {
212  if (this == &other) {
213  // Trivial case.
214  return 0;
215  }
216 
218  int num_sorted_slots = reg->get_num_sorted_slots();
219  for (int n = 0; n < num_sorted_slots; ++n) {
220  int slot = reg->get_sorted_slot(n);
221  nassertr((_attributes[slot]._attrib != NULL) == _filled_slots.get_bit(slot), 0);
222 
223  const RenderAttrib *a = _attributes[slot]._attrib;
224  const RenderAttrib *b = other._attributes[slot]._attrib;
225  if (a != b) {
226  return a < b ? -1 : 1;
227  }
228  }
229 
230  return 0;
231 }
232 
233 ////////////////////////////////////////////////////////////////////
234 // Function: RenderState::compare_mask
235 // Access: Published
236 // Description: This version of compare_to takes a slot mask that
237 // indicates which attributes to include in the
238 // comparison. Unlike compare_to, this method
239 // compares the attributes by pointer.
240 ////////////////////////////////////////////////////////////////////
241 int RenderState::
242 compare_mask(const RenderState &other, SlotMask compare_mask) const {
243  SlotMask mask = (_filled_slots | other._filled_slots) & compare_mask;
244  int slot = mask.get_lowest_on_bit();
245  while (slot >= 0) {
246  const RenderAttrib *a = _attributes[slot]._attrib;
247  const RenderAttrib *b = other._attributes[slot]._attrib;
248  if (a != b) {
249  return a < b ? -1 : 1;
250  }
251  mask.clear_bit(slot);
252  slot = mask.get_lowest_on_bit();
253  }
254 
255  return 0;
256 }
257 
258 ////////////////////////////////////////////////////////////////////
259 // Function: RenderState::cull_callback
260 // Access: Published
261 // Description: Calls cull_callback() on each attrib. If any attrib
262 // returns false, interrupts the list and returns false
263 // immediately; otherwise, completes the list and
264 // returns true.
265 ////////////////////////////////////////////////////////////////////
266 bool RenderState::
267 cull_callback(CullTraverser *trav, const CullTraverserData &data) const {
268  SlotMask mask = _filled_slots;
269  int slot = mask.get_lowest_on_bit();
270  while (slot >= 0) {
271  const Attribute &attrib = _attributes[slot];
272  nassertr(attrib._attrib != NULL, false);
273  if (!attrib._attrib->cull_callback(trav, data)) {
274  return false;
275  }
276 
277  mask.clear_bit(slot);
278  slot = mask.get_lowest_on_bit();
279  }
280 
281  return true;
282 }
283 
284 ////////////////////////////////////////////////////////////////////
285 // Function: RenderState::make
286 // Access: Published, Static
287 // Description: Returns a RenderState with one attribute set.
288 ////////////////////////////////////////////////////////////////////
289 CPT(RenderState) RenderState::
290 make(const RenderAttrib *attrib, int override) {
291  RenderState *state = new RenderState;
292  int slot = attrib->get_slot();
293  state->_attributes[slot].set(attrib, override);
294  state->_filled_slots.set_bit(slot);
295  return return_new(state);
296 }
297 
298 ////////////////////////////////////////////////////////////////////
299 // Function: RenderState::make
300 // Access: Published, Static
301 // Description: Returns a RenderState with two attributes set.
302 ////////////////////////////////////////////////////////////////////
303 CPT(RenderState) RenderState::
304 make(const RenderAttrib *attrib1,
305  const RenderAttrib *attrib2, int override) {
306  RenderState *state = new RenderState;
307  state->_attributes[attrib1->get_slot()].set(attrib1, override);
308  state->_attributes[attrib2->get_slot()].set(attrib2, override);
309  state->_filled_slots.set_bit(attrib1->get_slot());
310  state->_filled_slots.set_bit(attrib2->get_slot());
311  return return_new(state);
312 }
313 
314 ////////////////////////////////////////////////////////////////////
315 // Function: RenderState::make
316 // Access: Published, Static
317 // Description: Returns a RenderState with three attributes set.
318 ////////////////////////////////////////////////////////////////////
319 CPT(RenderState) RenderState::
320 make(const RenderAttrib *attrib1,
321  const RenderAttrib *attrib2,
322  const RenderAttrib *attrib3, int override) {
323  RenderState *state = new RenderState;
324  state->_attributes[attrib1->get_slot()].set(attrib1, override);
325  state->_attributes[attrib2->get_slot()].set(attrib2, override);
326  state->_attributes[attrib3->get_slot()].set(attrib3, override);
327  state->_filled_slots.set_bit(attrib1->get_slot());
328  state->_filled_slots.set_bit(attrib2->get_slot());
329  state->_filled_slots.set_bit(attrib3->get_slot());
330  return return_new(state);
331 }
332 
333 ////////////////////////////////////////////////////////////////////
334 // Function: RenderState::make
335 // Access: Published, Static
336 // Description: Returns a RenderState with four attributes set.
337 ////////////////////////////////////////////////////////////////////
338 CPT(RenderState) RenderState::
339 make(const RenderAttrib *attrib1,
340  const RenderAttrib *attrib2,
341  const RenderAttrib *attrib3,
342  const RenderAttrib *attrib4, int override) {
343  RenderState *state = new RenderState;
344  state->_attributes[attrib1->get_slot()].set(attrib1, override);
345  state->_attributes[attrib2->get_slot()].set(attrib2, override);
346  state->_attributes[attrib3->get_slot()].set(attrib3, override);
347  state->_attributes[attrib4->get_slot()].set(attrib4, override);
348  state->_filled_slots.set_bit(attrib1->get_slot());
349  state->_filled_slots.set_bit(attrib2->get_slot());
350  state->_filled_slots.set_bit(attrib3->get_slot());
351  state->_filled_slots.set_bit(attrib4->get_slot());
352  return return_new(state);
353 }
354 
355 ////////////////////////////////////////////////////////////////////
356 // Function: RenderState::make
357 // Access: Published, Static
358 // Description: Returns a RenderState with five attributes set.
359 ////////////////////////////////////////////////////////////////////
360 CPT(RenderState) RenderState::
361 make(const RenderAttrib *attrib1,
362  const RenderAttrib *attrib2,
363  const RenderAttrib *attrib3,
364  const RenderAttrib *attrib4,
365  const RenderAttrib *attrib5, int override) {
366  RenderState *state = new RenderState;
367  state->_attributes[attrib1->get_slot()].set(attrib1, override);
368  state->_attributes[attrib2->get_slot()].set(attrib2, override);
369  state->_attributes[attrib3->get_slot()].set(attrib3, override);
370  state->_attributes[attrib4->get_slot()].set(attrib4, override);
371  state->_attributes[attrib5->get_slot()].set(attrib5, override);
372  state->_filled_slots.set_bit(attrib1->get_slot());
373  state->_filled_slots.set_bit(attrib2->get_slot());
374  state->_filled_slots.set_bit(attrib3->get_slot());
375  state->_filled_slots.set_bit(attrib4->get_slot());
376  state->_filled_slots.set_bit(attrib5->get_slot());
377  return return_new(state);
378 }
379 
380 ////////////////////////////////////////////////////////////////////
381 // Function: RenderState::make
382 // Access: Published, Static
383 // Description: Returns a RenderState with n attributes set.
384 ////////////////////////////////////////////////////////////////////
385 CPT(RenderState) RenderState::
386 make(const RenderAttrib * const *attrib, int num_attribs, int override) {
387  if (num_attribs == 0) {
388  return make_empty();
389  }
390  RenderState *state = new RenderState;
391  for (int i = 0; i < num_attribs; i++) {
392  int slot = attrib[i]->get_slot();
393  state->_attributes[slot].set(attrib[i], override);
394  state->_filled_slots.set_bit(slot);
395  }
396  return return_new(state);
397 }
398 
399 ////////////////////////////////////////////////////////////////////
400 // Function: RenderState::compose
401 // Access: Published
402 // Description: Returns a new RenderState object that represents the
403 // composition of this state with the other state.
404 //
405 // The result of this operation is cached, and will be
406 // retained as long as both this RenderState object and
407 // the other RenderState object continue to exist.
408 // Should one of them destruct, the cached entry will be
409 // removed, and its pointer will be allowed to destruct
410 // as well.
411 ////////////////////////////////////////////////////////////////////
412 CPT(RenderState) RenderState::
413 compose(const RenderState *other) const {
414  // This method isn't strictly const, because it updates the cache,
415  // but we pretend that it is because it's only a cache which is
416  // transparent to the rest of the interface.
417 
418  // We handle empty state (identity) as a trivial special case.
419  if (is_empty()) {
420  return other;
421  }
422  if (other->is_empty()) {
423  return this;
424  }
425 
426  if (!state_cache) {
427  return do_compose(other);
428  }
429 
430  LightReMutexHolder holder(*_states_lock);
431 
432  // Is this composition already cached?
433  int index = _composition_cache.find(other);
434  if (index != -1) {
435  Composition &comp = ((RenderState *)this)->_composition_cache.modify_data(index);
436  if (comp._result == (const RenderState *)NULL) {
437  // Well, it wasn't cached already, but we already had an entry
438  // (probably created for the reverse direction), so use the same
439  // entry to store the new result.
440  CPT(RenderState) result = do_compose(other);
441  comp._result = result;
442 
443  if (result != (const RenderState *)this) {
444  // See the comments below about the need to up the reference
445  // count only when the result is not the same as this.
446  result->cache_ref();
447  }
448  }
449  // Here's the cache!
450  _cache_stats.inc_hits();
451  return comp._result;
452  }
453  _cache_stats.inc_misses();
454 
455  // We need to make a new cache entry, both in this object and in the
456  // other object. We make both records so the other RenderState
457  // object will know to delete the entry from this object when it
458  // destructs, and vice-versa.
459 
460  // The cache entry in this object is the only one that indicates the
461  // result; the other will be NULL for now.
462  CPT(RenderState) result = do_compose(other);
463 
464  _cache_stats.add_total_size(1);
465  _cache_stats.inc_adds(_composition_cache.get_size() == 0);
466 
467  ((RenderState *)this)->_composition_cache[other]._result = result;
468 
469  if (other != this) {
470  _cache_stats.add_total_size(1);
471  _cache_stats.inc_adds(other->_composition_cache.get_size() == 0);
472  ((RenderState *)other)->_composition_cache[this]._result = NULL;
473  }
474 
475  if (result != (const RenderState *)this) {
476  // If the result of compose() is something other than this,
477  // explicitly increment the reference count. We have to be sure
478  // to decrement it again later, when the composition entry is
479  // removed from the cache.
480  result->cache_ref();
481 
482  // (If the result was just this again, we still store the
483  // result, but we don't increment the reference count, since
484  // that would be a self-referential leak.)
485  }
486 
487  _cache_stats.maybe_report("RenderState");
488 
489  return result;
490 }
491 
492 ////////////////////////////////////////////////////////////////////
493 // Function: RenderState::invert_compose
494 // Access: Published
495 // Description: Returns a new RenderState object that represents the
496 // composition of this state's inverse with the other
497 // state.
498 //
499 // This is similar to compose(), but is particularly
500 // useful for computing the relative state of a node as
501 // viewed from some other node.
502 ////////////////////////////////////////////////////////////////////
503 CPT(RenderState) RenderState::
504 invert_compose(const RenderState *other) const {
505  // This method isn't strictly const, because it updates the cache,
506  // but we pretend that it is because it's only a cache which is
507  // transparent to the rest of the interface.
508 
509  // We handle empty state (identity) as a trivial special case.
510  if (is_empty()) {
511  return other;
512  }
513  // Unlike compose(), the case of other->is_empty() is not quite as
514  // trivial for invert_compose().
515 
516  if (other == this) {
517  // a->invert_compose(a) always produces identity.
518  return make_empty();
519  }
520 
521  if (!state_cache) {
522  return do_invert_compose(other);
523  }
524 
525  LightReMutexHolder holder(*_states_lock);
526 
527  // Is this composition already cached?
528  int index = _invert_composition_cache.find(other);
529  if (index != -1) {
530  Composition &comp = ((RenderState *)this)->_invert_composition_cache.modify_data(index);
531  if (comp._result == (const RenderState *)NULL) {
532  // Well, it wasn't cached already, but we already had an entry
533  // (probably created for the reverse direction), so use the same
534  // entry to store the new result.
535  CPT(RenderState) result = do_invert_compose(other);
536  comp._result = result;
537 
538  if (result != (const RenderState *)this) {
539  // See the comments below about the need to up the reference
540  // count only when the result is not the same as this.
541  result->cache_ref();
542  }
543  }
544  // Here's the cache!
545  _cache_stats.inc_hits();
546  return comp._result;
547  }
548  _cache_stats.inc_misses();
549 
550  // We need to make a new cache entry, both in this object and in the
551  // other object. We make both records so the other RenderState
552  // object will know to delete the entry from this object when it
553  // destructs, and vice-versa.
554 
555  // The cache entry in this object is the only one that indicates the
556  // result; the other will be NULL for now.
557  CPT(RenderState) result = do_invert_compose(other);
558 
559  _cache_stats.add_total_size(1);
560  _cache_stats.inc_adds(_invert_composition_cache.get_size() == 0);
561  ((RenderState *)this)->_invert_composition_cache[other]._result = result;
562 
563  if (other != this) {
564  _cache_stats.add_total_size(1);
565  _cache_stats.inc_adds(other->_invert_composition_cache.get_size() == 0);
566  ((RenderState *)other)->_invert_composition_cache[this]._result = NULL;
567  }
568 
569  if (result != (const RenderState *)this) {
570  // If the result of compose() is something other than this,
571  // explicitly increment the reference count. We have to be sure
572  // to decrement it again later, when the composition entry is
573  // removed from the cache.
574  result->cache_ref();
575 
576  // (If the result was just this again, we still store the
577  // result, but we don't increment the reference count, since
578  // that would be a self-referential leak.)
579  }
580 
581  return result;
582 }
583 
584 ////////////////////////////////////////////////////////////////////
585 // Function: RenderState::add_attrib
586 // Access: Published
587 // Description: Returns a new RenderState object that represents the
588 // same as the source state, with the new RenderAttrib
589 // added. If there is already a RenderAttrib with the
590 // same type, it is replaced (unless the override is
591 // lower).
592 ////////////////////////////////////////////////////////////////////
593 CPT(RenderState) RenderState::
594 add_attrib(const RenderAttrib *attrib, int override) const {
595  int slot = attrib->get_slot();
596  if (_filled_slots.get_bit(slot) &&
597  _attributes[slot]._override > override) {
598  // The existing attribute overrides.
599  return this;
600  }
601 
602  // The new attribute replaces.
603  RenderState *new_state = new RenderState(*this);
604  new_state->_attributes[slot].set(attrib, override);
605  new_state->_filled_slots.set_bit(slot);
606  return return_new(new_state);
607 }
608 
609 ////////////////////////////////////////////////////////////////////
610 // Function: RenderState::set_attrib
611 // Access: Published
612 // Description: Returns a new RenderState object that represents the
613 // same as the source state, with the new RenderAttrib
614 // added. If there is already a RenderAttrib with the
615 // same type, it is replaced unconditionally. The
616 // override is not changed.
617 ////////////////////////////////////////////////////////////////////
618 CPT(RenderState) RenderState::
619 set_attrib(const RenderAttrib *attrib) const {
620  RenderState *new_state = new RenderState(*this);
621  int slot = attrib->get_slot();
622  new_state->_attributes[slot]._attrib = attrib;
623  new_state->_filled_slots.set_bit(slot);
624  return return_new(new_state);
625 }
626 
627 ////////////////////////////////////////////////////////////////////
628 // Function: RenderState::set_attrib
629 // Access: Published
630 // Description: Returns a new RenderState object that represents the
631 // same as the source state, with the new RenderAttrib
632 // added. If there is already a RenderAttrib with the
633 // same type, it is replaced unconditionally. The
634 // override is also replaced unconditionally.
635 ////////////////////////////////////////////////////////////////////
636 CPT(RenderState) RenderState::
637 set_attrib(const RenderAttrib *attrib, int override) const {
638  RenderState *new_state = new RenderState(*this);
639  int slot = attrib->get_slot();
640  new_state->_attributes[slot].set(attrib, override);
641  new_state->_filled_slots.set_bit(slot);
642  return return_new(new_state);
643 }
644 
645 ////////////////////////////////////////////////////////////////////
646 // Function: RenderState::remove_attrib
647 // Access: Published
648 // Description: Returns a new RenderState object that represents the
649 // same as the source state, with the indicated
650 // RenderAttrib removed.
651 ////////////////////////////////////////////////////////////////////
652 CPT(RenderState) RenderState::
653 remove_attrib(int slot) const {
654  if (_attributes[slot]._attrib == NULL) {
655  // Already removed.
656  return this;
657  }
658 
659  // Will this bring us down to the empty state?
660  if (_filled_slots.get_num_on_bits() == 1) {
661  return make_empty();
662  }
663 
664  RenderState *new_state = new RenderState(*this);
665  new_state->_attributes[slot].set(NULL, 0);
666  new_state->_filled_slots.clear_bit(slot);
667  return return_new(new_state);
668 }
669 
670 ////////////////////////////////////////////////////////////////////
671 // Function: RenderState::adjust_all_priorities
672 // Access: Published
673 // Description: Returns a new RenderState object that represents the
674 // same as the source state, with all attributes'
675 // override values incremented (or decremented, if
676 // negative) by the indicated amount. If the override
677 // would drop below zero, it is set to zero.
678 ////////////////////////////////////////////////////////////////////
679 CPT(RenderState) RenderState::
680 adjust_all_priorities(int adjustment) const {
681  RenderState *new_state = new RenderState(*this);
682 
683  SlotMask mask = _filled_slots;
684  int slot = mask.get_lowest_on_bit();
685  while (slot >= 0) {
686  Attribute &attrib = new_state->_attributes[slot];
687  nassertr(attrib._attrib != (RenderAttrib *)NULL, this);
688  attrib._override = max(attrib._override + adjustment, 0);
689 
690  mask.clear_bit(slot);
691  slot = mask.get_lowest_on_bit();
692  }
693 
694  return return_new(new_state);
695 }
696 
697 ////////////////////////////////////////////////////////////////////
698 // Function: RenderState::unref
699 // Access: Published, Virtual
700 // Description: This method overrides ReferenceCount::unref() to
701 // check whether the remaining reference count is
702 // entirely in the cache, and if so, it checks for and
703 // breaks a cycle in the cache involving this object.
704 // This is designed to prevent leaks from cyclical
705 // references within the cache.
706 ////////////////////////////////////////////////////////////////////
707 bool RenderState::
708 unref() const {
709  if (!state_cache || garbage_collect_states) {
710  // If we're not using the cache at all, or if we're relying on
711  // garbage collection, just allow the pointer to unref normally.
712  return ReferenceCount::unref();
713  }
714 
715  // Here is the normal refcounting case, with a normal cache, and
716  // without garbage collection in effect. In this case we will pull
717  // the object out of the cache when its reference count goes to 0.
718 
719  // We always have to grab the lock, since we will definitely need to
720  // be holding it if we happen to drop the reference count to 0.
721  // Having to grab the lock at every call to unref() is a big
722  // limiting factor on parallelization.
723  LightReMutexHolder holder(*_states_lock);
724 
725  if (auto_break_cycles && uniquify_states) {
726  if (get_cache_ref_count() > 0 &&
727  get_ref_count() == get_cache_ref_count() + 1) {
728  // If we are about to remove the one reference that is not in the
729  // cache, leaving only references in the cache, then we need to
730  // check for a cycle involving this RenderState and break it if
731  // it exists.
732  ((RenderState *)this)->detect_and_break_cycles();
733  }
734  }
735 
736  if (ReferenceCount::unref()) {
737  // The reference count is still nonzero.
738  return true;
739  }
740 
741  // The reference count has just reached zero. Make sure the object
742  // is removed from the global object pool, before anyone else finds
743  // it and tries to ref it.
744  ((RenderState *)this)->release_new();
745  ((RenderState *)this)->remove_cache_pointers();
746 
747  return false;
748 }
749 
750 ////////////////////////////////////////////////////////////////////
751 // Function: RenderState::get_auto_shader_state
752 // Access: Published
753 // Description: Returns the base RenderState that should have the
754 // generated_shader stored within it, for generated
755 // shader states. The returned object might be the same
756 // as this object, or it might be a different
757 // RenderState with certain attributes removed, or set
758 // to their default values.
759 //
760 // The point is to avoid needless regeneration of the
761 // shader attrib by storing the generated shader on a
762 // common RenderState object, with all irrelevant
763 // attributes removed.
764 ////////////////////////////////////////////////////////////////////
765 const RenderState *RenderState::
767  if (_auto_shader_state == (const RenderState *)NULL) {
768  ((RenderState *)this)->assign_auto_shader_state();
769  }
770  return _auto_shader_state;
771 }
772 
773 ////////////////////////////////////////////////////////////////////
774 // Function: RenderState::output
775 // Access: Published
776 // Description:
777 ////////////////////////////////////////////////////////////////////
778 void RenderState::
779 output(ostream &out) const {
780  out << "S:";
781  if (is_empty()) {
782  out << "(empty)";
783 
784  } else {
785  out << "(";
786  const char *sep = "";
787 
788  SlotMask mask = _filled_slots;
789  int slot = mask.get_lowest_on_bit();
790  while (slot >= 0) {
791  const Attribute &attrib = _attributes[slot];
792  nassertv(attrib._attrib != (RenderAttrib *)NULL);
793  out << sep << attrib._attrib->get_type();
794  sep = " ";
795 
796  mask.clear_bit(slot);
797  slot = mask.get_lowest_on_bit();
798  }
799  out << ")";
800  }
801 }
802 
803 ////////////////////////////////////////////////////////////////////
804 // Function: RenderState::write
805 // Access: Published
806 // Description:
807 ////////////////////////////////////////////////////////////////////
808 void RenderState::
809 write(ostream &out, int indent_level) const {
810  if (is_empty()) {
811  indent(out, indent_level)
812  << "(empty)\n";
813  }
814 
815  SlotMask mask = _filled_slots;
816  int slot = mask.get_lowest_on_bit();
817  while (slot >= 0) {
818  const Attribute &attrib = _attributes[slot];
819  nassertv(attrib._attrib != (RenderAttrib *)NULL);
820  attrib._attrib->write(out, indent_level);
821 
822  mask.clear_bit(slot);
823  slot = mask.get_lowest_on_bit();
824  }
825 }
826 
827 ////////////////////////////////////////////////////////////////////
828 // Function: RenderState::get_max_priority
829 // Access: Published, Static
830 // Description: Returns the maximum priority number (sometimes called
831 // override) that may be set on any node. This may or
832 // may not be enforced, but the scene graph code assumes
833 // that no priority numbers will be larger than this,
834 // and some effects may not work properly if you use a
835 // larger number.
836 ////////////////////////////////////////////////////////////////////
837 int RenderState::
839  return 1000000000;
840 }
841 
842 ////////////////////////////////////////////////////////////////////
843 // Function: RenderState::get_num_states
844 // Access: Published, Static
845 // Description: Returns the total number of unique RenderState
846 // objects allocated in the world. This will go up and
847 // down during normal operations.
848 ////////////////////////////////////////////////////////////////////
849 int RenderState::
851  if (_states == (States *)NULL) {
852  return 0;
853  }
854  LightReMutexHolder holder(*_states_lock);
855  return _states->get_num_entries();
856 }
857 
858 ////////////////////////////////////////////////////////////////////
859 // Function: RenderState::get_num_unused_states
860 // Access: Published, Static
861 // Description: Returns the total number of RenderState objects that
862 // have been allocated but have no references outside of
863 // the internal RenderState cache.
864 //
865 // A nonzero return value is not necessarily indicative
866 // of leaked references; it is normal for two
867 // RenderState objects, both of which have references
868 // held outside the cache, to have to result of their
869 // composition stored within the cache. This result
870 // will be retained within the cache until one of the
871 // base RenderStates is released.
872 //
873 // Use list_cycles() to get an idea of the number of
874 // actual "leaked" RenderState objects.
875 ////////////////////////////////////////////////////////////////////
876 int RenderState::
878  if (_states == (States *)NULL) {
879  return 0;
880  }
881  LightReMutexHolder holder(*_states_lock);
882 
883  // First, we need to count the number of times each RenderState
884  // object is recorded in the cache.
885  typedef pmap<const RenderState *, int> StateCount;
886  StateCount state_count;
887 
888  int size = _states->get_size();
889  for (int si = 0; si < size; ++si) {
890  if (!_states->has_element(si)) {
891  continue;
892  }
893  const RenderState *state = _states->get_key(si);
894 
895  int i;
896  int cache_size = state->_composition_cache.get_size();
897  for (i = 0; i < cache_size; ++i) {
898  if (state->_composition_cache.has_element(i)) {
899  const RenderState *result = state->_composition_cache.get_data(i)._result;
900  if (result != (const RenderState *)NULL && result != state) {
901  // Here's a RenderState that's recorded in the cache.
902  // Count it.
903  pair<StateCount::iterator, bool> ir =
904  state_count.insert(StateCount::value_type(result, 1));
905  if (!ir.second) {
906  // If the above insert operation fails, then it's already in
907  // the cache; increment its value.
908  (*(ir.first)).second++;
909  }
910  }
911  }
912  }
913  cache_size = state->_invert_composition_cache.get_size();
914  for (i = 0; i < cache_size; ++i) {
915  if (state->_invert_composition_cache.has_element(i)) {
916  const RenderState *result = state->_invert_composition_cache.get_data(i)._result;
917  if (result != (const RenderState *)NULL && result != state) {
918  pair<StateCount::iterator, bool> ir =
919  state_count.insert(StateCount::value_type(result, 1));
920  if (!ir.second) {
921  (*(ir.first)).second++;
922  }
923  }
924  }
925  }
926  }
927 
928  // Now that we have the appearance count of each RenderState
929  // object, we can tell which ones are unreferenced outside of the
930  // RenderState cache, by comparing these to the reference counts.
931  int num_unused = 0;
932 
933  StateCount::iterator sci;
934  for (sci = state_count.begin(); sci != state_count.end(); ++sci) {
935  const RenderState *state = (*sci).first;
936  int count = (*sci).second;
937  nassertr(count == state->get_cache_ref_count(), num_unused);
938  nassertr(count <= state->get_ref_count(), num_unused);
939  if (count == state->get_ref_count()) {
940  num_unused++;
941 
942  if (pgraph_cat.is_debug()) {
943  pgraph_cat.debug()
944  << "Unused state: " << (void *)state << ":"
945  << state->get_ref_count() << " =\n";
946  state->write(pgraph_cat.debug(false), 2);
947  }
948  }
949  }
950 
951  return num_unused;
952 }
953 
954 ////////////////////////////////////////////////////////////////////
955 // Function: RenderState::clear_cache
956 // Access: Published, Static
957 // Description: Empties the cache of composed RenderStates. This
958 // makes every RenderState forget what results when
959 // it is composed with other RenderStates.
960 //
961 // This will eliminate any RenderState objects that
962 // have been allocated but have no references outside of
963 // the internal RenderState map. It will not
964 // eliminate RenderState objects that are still in
965 // use.
966 //
967 // Nowadays, this method should not be necessary, as
968 // reference-count cycles in the composition cache
969 // should be automatically detected and broken.
970 //
971 // The return value is the number of RenderStates
972 // freed by this operation.
973 ////////////////////////////////////////////////////////////////////
974 int RenderState::
976  if (_states == (States *)NULL) {
977  return 0;
978  }
979  LightReMutexHolder holder(*_states_lock);
980 
981  PStatTimer timer(_cache_update_pcollector);
982  int orig_size = _states->get_num_entries();
983 
984  // First, we need to copy the entire set of states to a temporary
985  // vector, reference-counting each object. That way we can walk
986  // through the copy, without fear of dereferencing (and deleting)
987  // the objects in the map as we go.
988  {
989  typedef pvector< CPT(RenderState) > TempStates;
990  TempStates temp_states;
991  temp_states.reserve(orig_size);
992 
993  int size = _states->get_size();
994  for (int si = 0; si < size; ++si) {
995  if (!_states->has_element(si)) {
996  continue;
997  }
998  const RenderState *state = _states->get_key(si);
999  temp_states.push_back(state);
1000  }
1001 
1002  // Now it's safe to walk through the list, destroying the cache
1003  // within each object as we go. Nothing will be destructed till
1004  // we're done.
1005  TempStates::iterator ti;
1006  for (ti = temp_states.begin(); ti != temp_states.end(); ++ti) {
1007  RenderState *state = (RenderState *)(*ti).p();
1008 
1009  int i;
1010  int cache_size = state->_composition_cache.get_size();
1011  for (i = 0; i < cache_size; ++i) {
1012  if (state->_composition_cache.has_element(i)) {
1013  const RenderState *result = state->_composition_cache.get_data(i)._result;
1014  if (result != (const RenderState *)NULL && result != state) {
1015  result->cache_unref();
1016  nassertr(result->get_ref_count() > 0, 0);
1017  }
1018  }
1019  }
1020  _cache_stats.add_total_size(-state->_composition_cache.get_num_entries());
1021  state->_composition_cache.clear();
1022 
1023  cache_size = state->_invert_composition_cache.get_size();
1024  for (i = 0; i < cache_size; ++i) {
1025  if (state->_invert_composition_cache.has_element(i)) {
1026  const RenderState *result = state->_invert_composition_cache.get_data(i)._result;
1027  if (result != (const RenderState *)NULL && result != state) {
1028  result->cache_unref();
1029  nassertr(result->get_ref_count() > 0, 0);
1030  }
1031  }
1032  }
1033  _cache_stats.add_total_size(-state->_invert_composition_cache.get_num_entries());
1034  state->_invert_composition_cache.clear();
1035  }
1036 
1037  // Once this block closes and the temp_states object goes away,
1038  // all the destruction will begin. Anything whose reference was
1039  // held only within the various objects' caches will go away.
1040  }
1041 
1042  int new_size = _states->get_num_entries();
1043  return orig_size - new_size;
1044 }
1045 
1046 ////////////////////////////////////////////////////////////////////
1047 // Function: RenderState::garbage_collect
1048 // Access: Published, Static
1049 // Description: Performs a garbage-collection cycle. This must be
1050 // called periodically if garbage-collect-states is true
1051 // to ensure that RenderStates get cleaned up
1052 // appropriately. It does no harm to call it even if
1053 // this variable is not true, but there is probably no
1054 // advantage in that case.
1055 //
1056 // This automatically calls
1057 // RenderAttrib::garbage_collect() as well.
1058 ////////////////////////////////////////////////////////////////////
1059 int RenderState::
1061  int num_attribs = RenderAttrib::garbage_collect();
1062 
1063  if (_states == (States *)NULL || !garbage_collect_states) {
1064  return num_attribs;
1065  }
1066  LightReMutexHolder holder(*_states_lock);
1067 
1068  PStatTimer timer(_garbage_collect_pcollector);
1069  int orig_size = _states->get_num_entries();
1070 
1071  // How many elements to process this pass?
1072  int size = _states->get_size();
1073  int num_this_pass = int(size * garbage_collect_states_rate);
1074  if (num_this_pass <= 0) {
1075  return num_attribs;
1076  }
1077  num_this_pass = min(num_this_pass, size);
1078  int stop_at_element = (_garbage_index + num_this_pass) % size;
1079 
1080  int num_elements = 0;
1081  int si = _garbage_index;
1082  do {
1083  if (_states->has_element(si)) {
1084  ++num_elements;
1085  RenderState *state = (RenderState *)_states->get_key(si);
1086  if (auto_break_cycles && uniquify_states) {
1087  if (state->get_cache_ref_count() > 0 &&
1088  state->get_ref_count() == state->get_cache_ref_count()) {
1089  // If we have removed all the references to this state not in
1090  // the cache, leaving only references in the cache, then we
1091  // need to check for a cycle involving this RenderState and
1092  // break it if it exists.
1093  state->detect_and_break_cycles();
1094  }
1095  }
1096 
1097  if (state->get_ref_count() == 1) {
1098  // This state has recently been unreffed to 1 (the one we
1099  // added when we stored it in the cache). Now it's time to
1100  // delete it. This is safe, because we're holding the
1101  // _states_lock, so it's not possible for some other thread to
1102  // find the state in the cache and ref it while we're doing
1103  // this.
1104  state->release_new();
1105  state->remove_cache_pointers();
1106  state->cache_unref();
1107  delete state;
1108  }
1109  }
1110 
1111  si = (si + 1) % size;
1112  } while (si != stop_at_element);
1113  _garbage_index = si;
1114  nassertr(_states->validate(), 0);
1115 
1116  int new_size = _states->get_num_entries();
1117  return orig_size - new_size + num_attribs;
1118 }
1119 
1120 ////////////////////////////////////////////////////////////////////
1121 // Function: RenderState::clear_munger_cache
1122 // Access: Published, Static
1123 // Description: Completely empties the cache of state + gsg ->
1124 // munger, for all states and all gsg's. Normally there
1125 // is no need to empty this cache.
1126 ////////////////////////////////////////////////////////////////////
1127 void RenderState::
1129  LightReMutexHolder holder(*_states_lock);
1130 
1131  int size = _states->get_size();
1132  for (int si = 0; si < size; ++si) {
1133  if (!_states->has_element(si)) {
1134  continue;
1135  }
1136  RenderState *state = (RenderState *)(_states->get_key(si));
1137  state->_mungers.clear();
1138  state->_last_mi = state->_mungers.end();
1139  }
1140 }
1141 
1142 ////////////////////////////////////////////////////////////////////
1143 // Function: RenderState::list_cycles
1144 // Access: Published, Static
1145 // Description: Detects all of the reference-count cycles in the
1146 // cache and reports them to standard output.
1147 //
1148 // These cycles may be inadvertently created when state
1149 // compositions cycle back to a starting point.
1150 // Nowadays, these cycles should be automatically
1151 // detected and broken, so this method should never list
1152 // any cycles unless there is a bug in that detection
1153 // logic.
1154 //
1155 // The cycles listed here are not leaks in the strictest
1156 // sense of the word, since they can be reclaimed by a
1157 // call to clear_cache(); but they will not be reclaimed
1158 // automatically.
1159 ////////////////////////////////////////////////////////////////////
1160 void RenderState::
1161 list_cycles(ostream &out) {
1162  if (_states == (States *)NULL) {
1163  return;
1164  }
1165  LightReMutexHolder holder(*_states_lock);
1166 
1167  typedef pset<const RenderState *> VisitedStates;
1168  VisitedStates visited;
1169  CompositionCycleDesc cycle_desc;
1170 
1171  int size = _states->get_size();
1172  for (int si = 0; si < size; ++si) {
1173  if (!_states->has_element(si)) {
1174  continue;
1175  }
1176  const RenderState *state = _states->get_key(si);
1177 
1178  bool inserted = visited.insert(state).second;
1179  if (inserted) {
1180  ++_last_cycle_detect;
1181  if (r_detect_cycles(state, state, 1, _last_cycle_detect, &cycle_desc)) {
1182  // This state begins a cycle.
1183  CompositionCycleDesc::reverse_iterator csi;
1184 
1185  out << "\nCycle detected of length " << cycle_desc.size() + 1 << ":\n"
1186  << "state " << (void *)state << ":" << state->get_ref_count()
1187  << " =\n";
1188  state->write(out, 2);
1189  for (csi = cycle_desc.rbegin(); csi != cycle_desc.rend(); ++csi) {
1190  const CompositionCycleDescEntry &entry = (*csi);
1191  if (entry._inverted) {
1192  out << "invert composed with ";
1193  } else {
1194  out << "composed with ";
1195  }
1196  out << (const void *)entry._obj << ":" << entry._obj->get_ref_count()
1197  << " " << *entry._obj << "\n"
1198  << "produces " << (const void *)entry._result << ":"
1199  << entry._result->get_ref_count() << " =\n";
1200  entry._result->write(out, 2);
1201  visited.insert(entry._result);
1202  }
1203 
1204  cycle_desc.clear();
1205  } else {
1206  ++_last_cycle_detect;
1207  if (r_detect_reverse_cycles(state, state, 1, _last_cycle_detect, &cycle_desc)) {
1208  // This state begins a cycle.
1209  CompositionCycleDesc::iterator csi;
1210 
1211  out << "\nReverse cycle detected of length " << cycle_desc.size() + 1 << ":\n"
1212  << "state ";
1213  for (csi = cycle_desc.begin(); csi != cycle_desc.end(); ++csi) {
1214  const CompositionCycleDescEntry &entry = (*csi);
1215  out << (const void *)entry._result << ":"
1216  << entry._result->get_ref_count() << " =\n";
1217  entry._result->write(out, 2);
1218  out << (const void *)entry._obj << ":"
1219  << entry._obj->get_ref_count() << " =\n";
1220  entry._obj->write(out, 2);
1221  visited.insert(entry._result);
1222  }
1223  out << (void *)state << ":"
1224  << state->get_ref_count() << " =\n";
1225  state->write(out, 2);
1226 
1227  cycle_desc.clear();
1228  }
1229  }
1230  }
1231  }
1232 }
1233 
1234 
1235 ////////////////////////////////////////////////////////////////////
1236 // Function: RenderState::list_states
1237 // Access: Published, Static
1238 // Description: Lists all of the RenderStates in the cache to the
1239 // output stream, one per line. This can be quite a lot
1240 // of output if the cache is large, so be prepared.
1241 ////////////////////////////////////////////////////////////////////
1242 void RenderState::
1243 list_states(ostream &out) {
1244  if (_states == (States *)NULL) {
1245  out << "0 states:\n";
1246  return;
1247  }
1248  LightReMutexHolder holder(*_states_lock);
1249 
1250  out << _states->get_num_entries() << " states:\n";
1251 
1252  int size = _states->get_size();
1253  for (int si = 0; si < size; ++si) {
1254  if (!_states->has_element(si)) {
1255  continue;
1256  }
1257  const RenderState *state = _states->get_key(si);
1258  state->write(out, 2);
1259  }
1260 }
1261 
1262 ////////////////////////////////////////////////////////////////////
1263 // Function: RenderState::validate_states
1264 // Access: Published, Static
1265 // Description: Ensures that the cache is still stored in sorted
1266 // order, and that none of the cache elements have been
1267 // inadvertently deleted. Returns true if so, false if
1268 // there is a problem (which implies someone has
1269 // modified one of the supposedly-const RenderState
1270 // objects).
1271 ////////////////////////////////////////////////////////////////////
1272 bool RenderState::
1274  if (_states == (States *)NULL) {
1275  return true;
1276  }
1277 
1278  PStatTimer timer(_state_validate_pcollector);
1279 
1280  LightReMutexHolder holder(*_states_lock);
1281  if (_states->is_empty()) {
1282  return true;
1283  }
1284 
1285  if (!_states->validate()) {
1286  pgraph_cat.error()
1287  << "RenderState::_states cache is invalid!\n";
1288  return false;
1289  }
1290 
1291  int size = _states->get_size();
1292  int si = 0;
1293  while (si < size && !_states->has_element(si)) {
1294  ++si;
1295  }
1296  nassertr(si < size, false);
1297  nassertr(_states->get_key(si)->get_ref_count() >= 0, false);
1298  int snext = si;
1299  ++snext;
1300  while (snext < size && !_states->has_element(snext)) {
1301  ++snext;
1302  }
1303  while (snext < size) {
1304  nassertr(_states->get_key(snext)->get_ref_count() >= 0, false);
1305  const RenderState *ssi = _states->get_key(si);
1306  const RenderState *ssnext = _states->get_key(snext);
1307  int c = ssi->compare_to(*ssnext);
1308  int ci = ssnext->compare_to(*ssi);
1309  if ((ci < 0) != (c > 0) ||
1310  (ci > 0) != (c < 0) ||
1311  (ci == 0) != (c == 0)) {
1312  pgraph_cat.error()
1313  << "RenderState::compare_to() not defined properly!\n";
1314  pgraph_cat.error(false)
1315  << "(a, b): " << c << "\n";
1316  pgraph_cat.error(false)
1317  << "(b, a): " << ci << "\n";
1318  ssi->write(pgraph_cat.error(false), 2);
1319  ssnext->write(pgraph_cat.error(false), 2);
1320  return false;
1321  }
1322  si = snext;
1323  ++snext;
1324  while (snext < size && !_states->has_element(snext)) {
1325  ++snext;
1326  }
1327  }
1328 
1329  return true;
1330 }
1331 
1332 ////////////////////////////////////////////////////////////////////
1333 // Function: RenderState::get_geom_rendering
1334 // Access: Published
1335 // Description: Returns the union of the Geom::GeomRendering bits
1336 // that will be required once this RenderState is
1337 // applied to a geom which includes the indicated
1338 // geom_rendering bits.
1339 ////////////////////////////////////////////////////////////////////
1340 int RenderState::
1341 get_geom_rendering(int geom_rendering) const {
1342  const RenderModeAttrib *render_mode = DCAST(RenderModeAttrib, get_attrib(RenderModeAttrib::get_class_slot()));
1343  const TexGenAttrib *tex_gen = DCAST(TexGenAttrib, get_attrib(TexGenAttrib::get_class_slot()));
1344  const TexMatrixAttrib *tex_matrix = DCAST(TexMatrixAttrib, get_attrib(TexMatrixAttrib::get_class_slot()));
1345 
1346  if (render_mode != (const RenderModeAttrib *)NULL) {
1347  geom_rendering = render_mode->get_geom_rendering(geom_rendering);
1348  }
1349  if (tex_gen != (const TexGenAttrib *)NULL) {
1350  geom_rendering = tex_gen->get_geom_rendering(geom_rendering);
1351  }
1352  if (tex_matrix != (const TexMatrixAttrib *)NULL) {
1353  geom_rendering = tex_matrix->get_geom_rendering(geom_rendering);
1354  }
1355 
1356  return geom_rendering;
1357 }
1358 
1359 ////////////////////////////////////////////////////////////////////
1360 // Function: RenderState::bin_removed
1361 // Access: Public, Static
1362 // Description: Intended to be called by
1363 // CullBinManager::remove_bin(), this informs all the
1364 // RenderStates in the world to remove the indicated
1365 // bin_index from their cache if it has been cached.
1366 ////////////////////////////////////////////////////////////////////
1367 void RenderState::
1368 bin_removed(int bin_index) {
1369  // Do something here.
1370  nassertv(false);
1371 }
1372 
1373 ////////////////////////////////////////////////////////////////////
1374 // Function: RenderState::validate_filled_slots
1375 // Access: Private
1376 // Description: Returns true if the _filled_slots bitmask is
1377 // consistent with the table of RenderAttrib pointers,
1378 // false otherwise.
1379 ////////////////////////////////////////////////////////////////////
1380 bool RenderState::
1381 validate_filled_slots() const {
1382  SlotMask mask;
1383 
1385  int max_slots = reg->get_max_slots();
1386  for (int slot = 1; slot < max_slots; ++slot) {
1387  const Attribute &attribute = _attributes[slot];
1388  if (attribute._attrib != (RenderAttrib *)NULL) {
1389  mask.set_bit(slot);
1390  }
1391  }
1392 
1393  return (mask == _filled_slots);
1394 }
1395 
1396 ////////////////////////////////////////////////////////////////////
1397 // Function: RenderState::do_calc_hash
1398 // Access: Private
1399 // Description: Computes a suitable hash value for phash_map.
1400 ////////////////////////////////////////////////////////////////////
1401 void RenderState::
1402 do_calc_hash() {
1403  _hash = 0;
1404 
1405  SlotMask mask = _filled_slots;
1406  int slot = mask.get_lowest_on_bit();
1407  while (slot >= 0) {
1408  const Attribute &attrib = _attributes[slot];
1409  nassertv(attrib._attrib != (RenderAttrib *)NULL);
1410  _hash = pointer_hash::add_hash(_hash, attrib._attrib);
1411  _hash = int_hash::add_hash(_hash, attrib._override);
1412 
1413  mask.clear_bit(slot);
1414  slot = mask.get_lowest_on_bit();
1415  }
1416 
1417  _flags |= F_hash_known;
1418 }
1419 
1420 ////////////////////////////////////////////////////////////////////
1421 // Function: RenderState::assign_auto_shader_state
1422 // Access: Private
1423 // Description: Sets _auto_shader_state to the appropriate
1424 // RenderState object pointer, either the same pointer
1425 // as this object, or some other (simpler) RenderState.
1426 ////////////////////////////////////////////////////////////////////
1427 void RenderState::
1428 assign_auto_shader_state() {
1429  CPT(RenderState) state = do_calc_auto_shader_state();
1430 
1431  {
1432  LightReMutexHolder holder(*_states_lock);
1433  if (_auto_shader_state == (const RenderState *)NULL) {
1434  _auto_shader_state = state;
1435  if (_auto_shader_state != this) {
1436  _auto_shader_state->cache_ref();
1437  }
1438  }
1439  }
1440 }
1441 
1442 ////////////////////////////////////////////////////////////////////
1443 // Function: RenderState::do_calc_auto_shader_state
1444 // Access: Private
1445 // Description: Returns the appropriate RenderState that should be
1446 // used to store the auto shader pointer for nodes that
1447 // shader this RenderState.
1448 ////////////////////////////////////////////////////////////////////
1449 CPT(RenderState) RenderState::
1450 do_calc_auto_shader_state() {
1451  RenderState *state = new RenderState;
1452 
1453  SlotMask mask = _filled_slots;
1454  int slot = mask.get_lowest_on_bit();
1455  while (slot >= 0) {
1456  const Attribute &attrib = _attributes[slot];
1457  nassertr(attrib._attrib != (RenderAttrib *)NULL, this);
1458  CPT(RenderAttrib) new_attrib = attrib._attrib->get_auto_shader_attrib(this);
1459  if (new_attrib != NULL) {
1460  nassertr(new_attrib->get_slot() == slot, this);
1461  state->_attributes[slot].set(new_attrib, 0);
1462  state->_filled_slots.set_bit(slot);
1463  }
1464 
1465  mask.clear_bit(slot);
1466  slot = mask.get_lowest_on_bit();
1467  }
1468 
1469  return return_new(state);
1470 }
1471 
1472 
1473 ////////////////////////////////////////////////////////////////////
1474 // Function: RenderState::return_new
1475 // Access: Private, Static
1476 // Description: This function is used to share a common RenderState
1477 // pointer for all equivalent RenderState objects.
1478 //
1479 // This is different from return_unique() in that it
1480 // does not actually guarantee a unique pointer, unless
1481 // uniquify-states is set.
1482 ////////////////////////////////////////////////////////////////////
1483 CPT(RenderState) RenderState::
1484 return_new(RenderState *state) {
1485  nassertr(state != (RenderState *)NULL, state);
1486 
1487  // Make sure we don't have anything in the 0 slot. If we did, that
1488  // would indicate an uninitialized slot number.
1489 #ifndef NDEBUG
1490  if (state->_attributes[0]._attrib != (RenderAttrib *)NULL) {
1491  const RenderAttrib *attrib = state->_attributes[0]._attrib;
1492  if (attrib->get_type() == TypeHandle::none()) {
1493  ((RenderAttrib *)attrib)->force_init_type();
1494  pgraph_cat->error()
1495  << "Uninitialized RenderAttrib type: " << attrib->get_type()
1496  << "\n";
1497 
1498  } else {
1499  static pset<TypeHandle> already_reported;
1500  if (already_reported.insert(attrib->get_type()).second) {
1501  pgraph_cat->error()
1502  << attrib->get_type() << " did not initialize its slot number.\n";
1503  }
1504  }
1505  }
1506 #endif
1507  state->_attributes[0]._attrib = NULL;
1508  state->_filled_slots.clear_bit(0);
1509 
1510 #ifndef NDEBUG
1511  nassertr(state->validate_filled_slots(), state);
1512 #endif
1513 
1514  if (!uniquify_states && !state->is_empty()) {
1515  return state;
1516  }
1517 
1518  return return_unique(state);
1519 }
1520 
1521 ////////////////////////////////////////////////////////////////////
1522 // Function: RenderState::return_unique
1523 // Access: Private, Static
1524 // Description: This function is used to share a common RenderState
1525 // pointer for all equivalent RenderState objects.
1526 //
1527 // See the similar logic in RenderAttrib. The idea is
1528 // to create a new RenderState object and pass it
1529 // through this function, which will share the pointer
1530 // with a previously-created RenderState object if it is
1531 // equivalent.
1532 ////////////////////////////////////////////////////////////////////
1533 CPT(RenderState) RenderState::
1534 return_unique(RenderState *state) {
1535  nassertr(state != (RenderState *)NULL, state);
1536 
1537  if (!state_cache) {
1538  return state;
1539  }
1540 
1541 #ifndef NDEBUG
1542  if (paranoid_const) {
1543  nassertr(validate_states(), state);
1544  }
1545 #endif
1546 
1547  LightReMutexHolder holder(*_states_lock);
1548 
1549  if (state->_saved_entry != -1) {
1550  // This state is already in the cache.
1551  //nassertr(_states->find(state) == state->_saved_entry, state);
1552  return state;
1553  }
1554 
1555  // Save the state in a local PointerTo so that it will be freed at
1556  // the end of this function if no one else uses it.
1557  CPT(RenderState) pt_state = state;
1558 
1559  // Ensure each of the individual attrib pointers has been uniquified
1560  // before we add the state to the cache.
1561  if (!uniquify_attribs && !state->is_empty()) {
1562  SlotMask mask = state->_filled_slots;
1563  int slot = mask.get_lowest_on_bit();
1564  while (slot >= 0) {
1565  Attribute &attrib = state->_attributes[slot];
1566  nassertr(attrib._attrib != (RenderAttrib *)NULL, state);
1567  attrib._attrib = attrib._attrib->get_unique();
1568  mask.clear_bit(slot);
1569  slot = mask.get_lowest_on_bit();
1570  }
1571  }
1572 
1573  int si = _states->find(state);
1574  if (si != -1) {
1575  // There's an equivalent state already in the set. Return it.
1576  return _states->get_key(si);
1577  }
1578 
1579  // Not already in the set; add it.
1580  if (garbage_collect_states) {
1581  // If we'll be garbage collecting states explicitly, we'll
1582  // increment the reference count when we store it in the cache, so
1583  // that it won't be deleted while it's in it.
1584  state->cache_ref();
1585  }
1586  si = _states->store(state, Empty());
1587 
1588  // Save the index and return the input state.
1589  state->_saved_entry = si;
1590  return pt_state;
1591 }
1592 
1593 ////////////////////////////////////////////////////////////////////
1594 // Function: RenderState::do_compose
1595 // Access: Private
1596 // Description: The private implemention of compose(); this actually
1597 // composes two RenderStates, without bothering with the
1598 // cache.
1599 ////////////////////////////////////////////////////////////////////
1600 CPT(RenderState) RenderState::
1601 do_compose(const RenderState *other) const {
1602  PStatTimer timer(_state_compose_pcollector);
1603 
1604  RenderState *new_state = new RenderState;
1605 
1606  SlotMask mask = _filled_slots | other->_filled_slots;
1607  new_state->_filled_slots = mask;
1608 
1609  int slot = mask.get_lowest_on_bit();
1610  while (slot >= 0) {
1611  const Attribute &a = _attributes[slot];
1612  const Attribute &b = other->_attributes[slot];
1613  Attribute &result = new_state->_attributes[slot];
1614 
1615  if (a._attrib == NULL) {
1616  nassertr(b._attrib != NULL, this);
1617  // B wins.
1618  result = b;
1619 
1620  } else if (b._attrib == NULL) {
1621  // A wins.
1622  result = a;
1623 
1624  } else if (b._override < a._override) {
1625  // A, the higher RenderAttrib, overrides.
1626  result = a;
1627 
1628  } else if (a._override < b._override &&
1629  a._attrib->lower_attrib_can_override()) {
1630  // B, the higher RenderAttrib, overrides. This is a special
1631  // case; normally, a lower RenderAttrib does not override a
1632  // higher one, even if it has a higher override value. But
1633  // certain kinds of RenderAttribs redefine
1634  // lower_attrib_can_override() to return true, allowing this
1635  // override.
1636  result = b;
1637 
1638  } else {
1639  // Either they have the same override value, or B is higher.
1640  // In either case, the result is the composition of the two,
1641  // with B's override value.
1642  result.set(a._attrib->compose(b._attrib), b._override);
1643  }
1644 
1645  mask.clear_bit(slot);
1646  slot = mask.get_lowest_on_bit();
1647  }
1648 
1649  // If we have any ShaderAttrib with auto-shader enabled,
1650  // remove any shader inputs on it. This is a workaround for an
1651  // issue that makes the shader-generator regenerate the shader
1652  // every time a shader input changes.
1653  CPT(ShaderAttrib) sattrib = DCAST(ShaderAttrib, new_state->get_attrib_def(ShaderAttrib::get_class_slot()));
1654  if (sattrib->auto_shader()) {
1655  sattrib = DCAST(ShaderAttrib, sattrib->clear_all_shader_inputs());
1656  }
1657 
1658  return return_new(new_state);
1659 }
1660 
1661 ////////////////////////////////////////////////////////////////////
1662 // Function: RenderState::do_invert_compose
1663 // Access: Private
1664 // Description: The private implemention of invert_compose().
1665 ////////////////////////////////////////////////////////////////////
1666 CPT(RenderState) RenderState::
1667 do_invert_compose(const RenderState *other) const {
1668  PStatTimer timer(_state_invert_pcollector);
1669 
1670  RenderState *new_state = new RenderState;
1671 
1672  SlotMask mask = _filled_slots | other->_filled_slots;
1673  new_state->_filled_slots = mask;
1674 
1675  int slot = mask.get_lowest_on_bit();
1676  while (slot >= 0) {
1677  const Attribute &a = _attributes[slot];
1678  const Attribute &b = other->_attributes[slot];
1679  Attribute &result = new_state->_attributes[slot];
1680 
1681  if (a._attrib == NULL) {
1682  nassertr(b._attrib != NULL, this);
1683  // B wins.
1684  result = b;
1685 
1686  } else if (b._attrib == NULL) {
1687  // A wins. Invert it.
1688  CPT(RenderState) full_default = make_full_default();
1689  CPT(RenderAttrib) default_attrib = full_default->get_attrib(slot);
1690  result.set(a._attrib->invert_compose(default_attrib), 0);
1691 
1692  } else {
1693  // Both are good. (Overrides are not used in invert_compose.)
1694  // Compose.
1695  result.set(a._attrib->invert_compose(b._attrib), 0);
1696  }
1697 
1698  mask.clear_bit(slot);
1699  slot = mask.get_lowest_on_bit();
1700  }
1701  return return_new(new_state);
1702 }
1703 
1704 ////////////////////////////////////////////////////////////////////
1705 // Function: RenderState::detect_and_break_cycles
1706 // Access: Private
1707 // Description: Detects whether there is a cycle in the cache that
1708 // begins with this state. If any are detected, breaks
1709 // them by removing this state from the cache.
1710 ////////////////////////////////////////////////////////////////////
1711 void RenderState::
1712 detect_and_break_cycles() {
1713  PStatTimer timer(_state_break_cycles_pcollector);
1714 
1715  ++_last_cycle_detect;
1716  if (r_detect_cycles(this, this, 1, _last_cycle_detect, NULL)) {
1717  // Ok, we have a cycle. This will be a leak unless we break the
1718  // cycle by freeing the cache on this object.
1719  if (pgraph_cat.is_debug()) {
1720  pgraph_cat.debug()
1721  << "Breaking cycle involving " << (*this) << "\n";
1722  }
1723 
1724  ((RenderState *)this)->remove_cache_pointers();
1725  } else {
1726  ++_last_cycle_detect;
1727  if (r_detect_reverse_cycles(this, this, 1, _last_cycle_detect, NULL)) {
1728  if (pgraph_cat.is_debug()) {
1729  pgraph_cat.debug()
1730  << "Breaking cycle involving " << (*this) << "\n";
1731  }
1732 
1733  ((RenderState *)this)->remove_cache_pointers();
1734  }
1735  }
1736 }
1737 
1738 ////////////////////////////////////////////////////////////////////
1739 // Function: RenderState::r_detect_cycles
1740 // Access: Private, Static
1741 // Description: Detects whether there is a cycle in the cache that
1742 // begins with the indicated state. Returns true if at
1743 // least one cycle is found, false if this state is not
1744 // part of any cycles. If a cycle is found and
1745 // cycle_desc is not NULL, then cycle_desc is filled in
1746 // with the list of the steps of the cycle, in reverse
1747 // order.
1748 ////////////////////////////////////////////////////////////////////
1749 bool RenderState::
1750 r_detect_cycles(const RenderState *start_state,
1751  const RenderState *current_state,
1752  int length, UpdateSeq this_seq,
1753  RenderState::CompositionCycleDesc *cycle_desc) {
1754  if (current_state->_cycle_detect == this_seq) {
1755  // We've already seen this state; therefore, we've found a cycle.
1756 
1757  // However, we only care about cycles that return to the starting
1758  // state and involve more than two steps. If only one or two
1759  // nodes are involved, it doesn't represent a memory leak, so no
1760  // problem there.
1761  return (current_state == start_state && length > 2);
1762  }
1763  ((RenderState *)current_state)->_cycle_detect = this_seq;
1764 
1765  int i;
1766  int cache_size = current_state->_composition_cache.get_size();
1767  for (i = 0; i < cache_size; ++i) {
1768  if (current_state->_composition_cache.has_element(i)) {
1769  const RenderState *result = current_state->_composition_cache.get_data(i)._result;
1770  if (result != (const RenderState *)NULL) {
1771  if (r_detect_cycles(start_state, result, length + 1,
1772  this_seq, cycle_desc)) {
1773  // Cycle detected.
1774  if (cycle_desc != (CompositionCycleDesc *)NULL) {
1775  const RenderState *other = current_state->_composition_cache.get_key(i);
1776  CompositionCycleDescEntry entry(other, result, false);
1777  cycle_desc->push_back(entry);
1778  }
1779  return true;
1780  }
1781  }
1782  }
1783  }
1784 
1785  cache_size = current_state->_invert_composition_cache.get_size();
1786  for (i = 0; i < cache_size; ++i) {
1787  if (current_state->_invert_composition_cache.has_element(i)) {
1788  const RenderState *result = current_state->_invert_composition_cache.get_data(i)._result;
1789  if (result != (const RenderState *)NULL) {
1790  if (r_detect_cycles(start_state, result, length + 1,
1791  this_seq, cycle_desc)) {
1792  // Cycle detected.
1793  if (cycle_desc != (CompositionCycleDesc *)NULL) {
1794  const RenderState *other = current_state->_invert_composition_cache.get_key(i);
1795  CompositionCycleDescEntry entry(other, result, true);
1796  cycle_desc->push_back(entry);
1797  }
1798  return true;
1799  }
1800  }
1801  }
1802  }
1803 
1804  // No cycle detected.
1805  return false;
1806 }
1807 
1808 ////////////////////////////////////////////////////////////////////
1809 // Function: RenderState::r_detect_reverse_cycles
1810 // Access: Private, Static
1811 // Description: Works the same as r_detect_cycles, but checks for
1812 // cycles in the reverse direction along the cache
1813 // chain. (A cycle may appear in either direction, and
1814 // we must check both.)
1815 ////////////////////////////////////////////////////////////////////
1816 bool RenderState::
1817 r_detect_reverse_cycles(const RenderState *start_state,
1818  const RenderState *current_state,
1819  int length, UpdateSeq this_seq,
1820  RenderState::CompositionCycleDesc *cycle_desc) {
1821  if (current_state->_cycle_detect == this_seq) {
1822  // We've already seen this state; therefore, we've found a cycle.
1823 
1824  // However, we only care about cycles that return to the starting
1825  // state and involve more than two steps. If only one or two
1826  // nodes are involved, it doesn't represent a memory leak, so no
1827  // problem there.
1828  return (current_state == start_state && length > 2);
1829  }
1830  ((RenderState *)current_state)->_cycle_detect = this_seq;
1831 
1832  int i;
1833  int cache_size = current_state->_composition_cache.get_size();
1834  for (i = 0; i < cache_size; ++i) {
1835  if (current_state->_composition_cache.has_element(i)) {
1836  const RenderState *other = current_state->_composition_cache.get_key(i);
1837  if (other != current_state) {
1838  int oi = other->_composition_cache.find(current_state);
1839  nassertr(oi != -1, false);
1840 
1841  const RenderState *result = other->_composition_cache.get_data(oi)._result;
1842  if (result != (const RenderState *)NULL) {
1843  if (r_detect_reverse_cycles(start_state, result, length + 1,
1844  this_seq, cycle_desc)) {
1845  // Cycle detected.
1846  if (cycle_desc != (CompositionCycleDesc *)NULL) {
1847  const RenderState *other = current_state->_composition_cache.get_key(i);
1848  CompositionCycleDescEntry entry(other, result, false);
1849  cycle_desc->push_back(entry);
1850  }
1851  return true;
1852  }
1853  }
1854  }
1855  }
1856  }
1857 
1858  cache_size = current_state->_invert_composition_cache.get_size();
1859  for (i = 0; i < cache_size; ++i) {
1860  if (current_state->_invert_composition_cache.has_element(i)) {
1861  const RenderState *other = current_state->_invert_composition_cache.get_key(i);
1862  if (other != current_state) {
1863  int oi = other->_invert_composition_cache.find(current_state);
1864  nassertr(oi != -1, false);
1865 
1866  const RenderState *result = other->_invert_composition_cache.get_data(oi)._result;
1867  if (result != (const RenderState *)NULL) {
1868  if (r_detect_reverse_cycles(start_state, result, length + 1,
1869  this_seq, cycle_desc)) {
1870  // Cycle detected.
1871  if (cycle_desc != (CompositionCycleDesc *)NULL) {
1872  const RenderState *other = current_state->_invert_composition_cache.get_key(i);
1873  CompositionCycleDescEntry entry(other, result, false);
1874  cycle_desc->push_back(entry);
1875  }
1876  return true;
1877  }
1878  }
1879  }
1880  }
1881  }
1882 
1883  // No cycle detected.
1884  return false;
1885 }
1886 
1887 ////////////////////////////////////////////////////////////////////
1888 // Function: RenderState::release_new
1889 // Access: Private
1890 // Description: This inverse of return_new, this releases this object
1891 // from the global RenderState table.
1892 //
1893 // You must already be holding _states_lock before you
1894 // call this method.
1895 ////////////////////////////////////////////////////////////////////
1896 void RenderState::
1897 release_new() {
1898  nassertv(_states_lock->debug_is_locked());
1899 
1900  if (_saved_entry != -1) {
1901  //nassertv(_states->find(this) == _saved_entry);
1902  _saved_entry = _states->find(this);
1903  _states->remove_element(_saved_entry);
1904  _saved_entry = -1;
1905  }
1906 }
1907 
1908 ////////////////////////////////////////////////////////////////////
1909 // Function: RenderState::remove_cache_pointers
1910 // Access: Private
1911 // Description: Remove all pointers within the cache from and to this
1912 // particular RenderState. The pointers to this
1913 // object may be scattered around in the various
1914 // CompositionCaches from other RenderState objects.
1915 //
1916 // You must already be holding _states_lock before you
1917 // call this method.
1918 ////////////////////////////////////////////////////////////////////
1919 void RenderState::
1920 remove_cache_pointers() {
1921  nassertv(_states_lock->debug_is_locked());
1922 
1923  // First, make sure the _auto_shader_state cache pointer is cleared.
1924  if (_auto_shader_state != (const RenderState *)NULL) {
1925  if (_auto_shader_state != this) {
1926  cache_unref_delete(_auto_shader_state);
1927  }
1928  _auto_shader_state = NULL;
1929  }
1930 
1931  // Fortunately, since we added CompositionCache records in pairs, we
1932  // know exactly the set of RenderState objects that have us in their
1933  // cache: it's the same set of RenderState objects that we have in
1934  // our own cache.
1935 
1936  // We do need to put considerable thought into this loop, because as
1937  // we clear out cache entries we'll cause other RenderState
1938  // objects to destruct, which could cause things to get pulled out
1939  // of our own _composition_cache map. We want to allow this (so
1940  // that we don't encounter any just-destructed pointers in our
1941  // cache), but we don't want to get bitten by this cascading effect.
1942  // Instead of walking through the map from beginning to end,
1943  // therefore, we just pull out the first one each time, and erase
1944  // it.
1945 
1946 #ifdef DO_PSTATS
1947  if (_composition_cache.is_empty() && _invert_composition_cache.is_empty()) {
1948  return;
1949  }
1950  PStatTimer timer(_cache_update_pcollector);
1951 #endif // DO_PSTATS
1952 
1953  // There are lots of ways to do this loop wrong. Be very careful if
1954  // you need to modify it for any reason.
1955  int i = 0;
1956  while (!_composition_cache.is_empty()) {
1957  // Scan for the next used slot in the table.
1958  while (!_composition_cache.has_element(i)) {
1959  ++i;
1960  }
1961 
1962  // It is possible that the "other" RenderState object is
1963  // currently within its own destructor. We therefore can't use a
1964  // PT() to hold its pointer; that could end up calling its
1965  // destructor twice. Fortunately, we don't need to hold its
1966  // reference count to ensure it doesn't destruct while we process
1967  // this loop; as long as we ensure that no *other* RenderState
1968  // objects destruct, there will be no reason for that one to.
1969  RenderState *other = (RenderState *)_composition_cache.get_key(i);
1970 
1971  // We hold a copy of the composition result so we can dereference
1972  // it later.
1973  Composition comp = _composition_cache.get_data(i);
1974 
1975  // Now we can remove the element from our cache. We do this now,
1976  // rather than later, before any other RenderState objects have
1977  // had a chance to destruct, so we are confident that our iterator
1978  // is still valid.
1979  _composition_cache.remove_element(i);
1980  _cache_stats.add_total_size(-1);
1981  _cache_stats.inc_dels();
1982 
1983  if (other != this) {
1984  int oi = other->_composition_cache.find(this);
1985 
1986  // We may or may not still be listed in the other's cache (it
1987  // might be halfway through pulling entries out, from within its
1988  // own destructor).
1989  if (oi != -1) {
1990  // Hold a copy of the other composition result, too.
1991  Composition ocomp = other->_composition_cache.get_data(oi);
1992 
1993  other->_composition_cache.remove_element(oi);
1994  _cache_stats.add_total_size(-1);
1995  _cache_stats.inc_dels();
1996 
1997  // It's finally safe to let our held pointers go away. This may
1998  // have cascading effects as other RenderState objects are
1999  // destructed, but there will be no harm done if they destruct
2000  // now.
2001  if (ocomp._result != (const RenderState *)NULL && ocomp._result != other) {
2002  cache_unref_delete(ocomp._result);
2003  }
2004  }
2005  }
2006 
2007  // It's finally safe to let our held pointers go away. (See
2008  // comment above.)
2009  if (comp._result != (const RenderState *)NULL && comp._result != this) {
2010  cache_unref_delete(comp._result);
2011  }
2012  }
2013 
2014  // A similar bit of code for the invert cache.
2015  i = 0;
2016  while (!_invert_composition_cache.is_empty()) {
2017  while (!_invert_composition_cache.has_element(i)) {
2018  ++i;
2019  }
2020 
2021  RenderState *other = (RenderState *)_invert_composition_cache.get_key(i);
2022  nassertv(other != this);
2023  Composition comp = _invert_composition_cache.get_data(i);
2024  _invert_composition_cache.remove_element(i);
2025  _cache_stats.add_total_size(-1);
2026  _cache_stats.inc_dels();
2027  if (other != this) {
2028  int oi = other->_invert_composition_cache.find(this);
2029  if (oi != -1) {
2030  Composition ocomp = other->_invert_composition_cache.get_data(oi);
2031  other->_invert_composition_cache.remove_element(oi);
2032  _cache_stats.add_total_size(-1);
2033  _cache_stats.inc_dels();
2034  if (ocomp._result != (const RenderState *)NULL && ocomp._result != other) {
2035  cache_unref_delete(ocomp._result);
2036  }
2037  }
2038  }
2039  if (comp._result != (const RenderState *)NULL && comp._result != this) {
2040  cache_unref_delete(comp._result);
2041  }
2042  }
2043 }
2044 
2045 ////////////////////////////////////////////////////////////////////
2046 // Function: RenderState::determine_bin_index
2047 // Access: Private
2048 // Description: This is the private implementation of
2049 // get_bin_index() and get_draw_order().
2050 ////////////////////////////////////////////////////////////////////
2051 void RenderState::
2052 determine_bin_index() {
2053  LightMutexHolder holder(_lock);
2054  if ((_flags & F_checked_bin_index) != 0) {
2055  // Someone else checked it first.
2056  return;
2057  }
2058 
2059  string bin_name;
2060  _draw_order = 0;
2061 
2062  const CullBinAttrib *bin = DCAST(CullBinAttrib, get_attrib(CullBinAttrib::get_class_slot()));
2063  if (bin != (const CullBinAttrib *)NULL) {
2064  bin_name = bin->get_bin_name();
2065  _draw_order = bin->get_draw_order();
2066  }
2067 
2068  if (bin_name.empty()) {
2069  // No explicit bin is specified; put in the in the default bin,
2070  // either opaque or transparent, based on the transparency
2071  // setting.
2072  bin_name = "opaque";
2073 
2074  const TransparencyAttrib *transparency = DCAST(TransparencyAttrib, get_attrib(TransparencyAttrib::get_class_slot()));
2075  if (transparency != (const TransparencyAttrib *)NULL) {
2076  switch (transparency->get_mode()) {
2077  case TransparencyAttrib::M_alpha:
2078  case TransparencyAttrib::M_dual:
2079  // These transparency modes require special back-to-front sorting.
2080  bin_name = "transparent";
2081  break;
2082 
2083  default:
2084  break;
2085  }
2086  }
2087  }
2088 
2090  _bin_index = bin_manager->find_bin(bin_name);
2091  if (_bin_index == -1) {
2092  pgraph_cat.warning()
2093  << "No bin named " << bin_name << "; creating default bin.\n";
2094  _bin_index = bin_manager->add_bin(bin_name, CullBinManager::BT_unsorted, 0);
2095  }
2096  _flags |= F_checked_bin_index;
2097 }
2098 
2099 ////////////////////////////////////////////////////////////////////
2100 // Function: RenderState::determine_cull_callback
2101 // Access: Private
2102 // Description: This is the private implementation of has_cull_callback().
2103 ////////////////////////////////////////////////////////////////////
2104 void RenderState::
2105 determine_cull_callback() {
2106  LightMutexHolder holder(_lock);
2107  if ((_flags & F_checked_cull_callback) != 0) {
2108  // Someone else checked it first.
2109  return;
2110  }
2111 
2112  SlotMask mask = _filled_slots;
2113  int slot = mask.get_lowest_on_bit();
2114  while (slot >= 0) {
2115  const Attribute &attrib = _attributes[slot];
2116  nassertv(attrib._attrib != (RenderAttrib *)NULL);
2117  if (attrib._attrib->has_cull_callback()) {
2118  _flags |= F_has_cull_callback;
2119  break;
2120  }
2121 
2122  mask.clear_bit(slot);
2123  slot = mask.get_lowest_on_bit();
2124  }
2125 
2126  _flags |= F_checked_cull_callback;
2127 }
2128 
2129 ////////////////////////////////////////////////////////////////////
2130 // Function: RenderState::fill_default
2131 // Access: Private
2132 // Description: Fills up the state with all of the default attribs.
2133 ////////////////////////////////////////////////////////////////////
2134 void RenderState::
2135 fill_default() {
2137  int num_slots = reg->get_num_slots();
2138  for (int slot = 1; slot < num_slots; ++slot) {
2139  _attributes[slot].set(reg->get_slot_default(slot), 0);
2140  _filled_slots.set_bit(slot);
2141  }
2142 }
2143 
2144 ////////////////////////////////////////////////////////////////////
2145 // Function: RenderState::update_pstats
2146 // Access: Private
2147 // Description: Moves the RenderState object from one PStats category
2148 // to another, so that we can track in PStats how many
2149 // pointers are held by nodes, and how many are held in
2150 // the cache only.
2151 ////////////////////////////////////////////////////////////////////
2152 void RenderState::
2153 update_pstats(int old_referenced_bits, int new_referenced_bits) {
2154 #ifdef DO_PSTATS
2155  if ((old_referenced_bits & R_node) != 0) {
2156  _node_counter.sub_level(1);
2157  } else if ((old_referenced_bits & R_cache) != 0) {
2158  _cache_counter.sub_level(1);
2159  }
2160  if ((new_referenced_bits & R_node) != 0) {
2161  _node_counter.add_level(1);
2162  } else if ((new_referenced_bits & R_cache) != 0) {
2163  _cache_counter.add_level(1);
2164  }
2165 #endif // DO_PSTATS
2166 }
2167 
2168 ////////////////////////////////////////////////////////////////////
2169 // Function: RenderState::init_states
2170 // Access: Public, Static
2171 // Description: Make sure the global _states map is allocated. This
2172 // only has to be done once. We could make this map
2173 // static, but then we run into problems if anyone
2174 // creates a RenderState object at static init time;
2175 // it also seems to cause problems when the Panda shared
2176 // library is unloaded at application exit time.
2177 ////////////////////////////////////////////////////////////////////
2178 void RenderState::
2180  _states = new States;
2181 
2182  // TODO: we should have a global Panda mutex to allow us to safely
2183  // create _states_lock without a startup race condition. For the
2184  // meantime, this is OK because we guarantee that this method is
2185  // called at static init time, presumably when there is still only
2186  // one thread in the world.
2187  _states_lock = new LightReMutex("RenderState::_states_lock");
2188  _cache_stats.init();
2190 }
2191 
2192 
2193 ////////////////////////////////////////////////////////////////////
2194 // Function: RenderState::register_with_read_factory
2195 // Access: Public, Static
2196 // Description: Tells the BamReader how to create objects of type
2197 // RenderState.
2198 ////////////////////////////////////////////////////////////////////
2199 void RenderState::
2201  BamReader::get_factory()->register_factory(get_class_type(), make_from_bam);
2202 }
2203 
2204 ////////////////////////////////////////////////////////////////////
2205 // Function: RenderState::write_datagram
2206 // Access: Public, Virtual
2207 // Description: Writes the contents of this object to the datagram
2208 // for shipping out to a Bam file.
2209 ////////////////////////////////////////////////////////////////////
2210 void RenderState::
2212  TypedWritable::write_datagram(manager, dg);
2213 
2214  int num_attribs = _filled_slots.get_num_on_bits();
2215  nassertv(num_attribs == (int)(PN_uint16)num_attribs);
2216  dg.add_uint16(num_attribs);
2217 
2218  // **** We should smarten up the writing of the override
2219  // number--most of the time these will all be zero.
2220  SlotMask mask = _filled_slots;
2221  int slot = mask.get_lowest_on_bit();
2222  while (slot >= 0) {
2223  const Attribute &attrib = _attributes[slot];
2224  nassertv(attrib._attrib != (RenderAttrib *)NULL);
2225  manager->write_pointer(dg, attrib._attrib);
2226  dg.add_int32(attrib._override);
2227 
2228  mask.clear_bit(slot);
2229  slot = mask.get_lowest_on_bit();
2230  }
2231 }
2232 
2233 ////////////////////////////////////////////////////////////////////
2234 // Function: RenderState::complete_pointers
2235 // Access: Public, Virtual
2236 // Description: Receives an array of pointers, one for each time
2237 // manager->read_pointer() was called in fillin().
2238 // Returns the number of pointers processed.
2239 ////////////////////////////////////////////////////////////////////
2240 int RenderState::
2242  int pi = TypedWritable::complete_pointers(p_list, manager);
2243 
2244  int num_attribs = 0;
2245 
2247  for (size_t i = 0; i < (*_read_overrides).size(); ++i) {
2248  int override = (*_read_overrides)[i];
2249 
2250  RenderAttrib *attrib = DCAST(RenderAttrib, p_list[pi++]);
2251  if (attrib != (RenderAttrib *)NULL) {
2252  int slot = attrib->get_slot();
2253  if (slot > 0 && slot < reg->get_max_slots()) {
2254  _attributes[slot].set(attrib, override);
2255  _filled_slots.set_bit(slot);
2256  ++num_attribs;
2257  }
2258  }
2259  }
2260 
2261  delete _read_overrides;
2262  _read_overrides = NULL;
2263 
2264  return pi;
2265 }
2266 
2267 ////////////////////////////////////////////////////////////////////
2268 // Function: RenderState::change_this
2269 // Access: Public, Static
2270 // Description: Called immediately after complete_pointers(), this
2271 // gives the object a chance to adjust its own pointer
2272 // if desired. Most objects don't change pointers after
2273 // completion, but some need to.
2274 //
2275 // Once this function has been called, the old pointer
2276 // will no longer be accessed.
2277 ////////////////////////////////////////////////////////////////////
2279 change_this(TypedWritable *old_ptr, BamReader *manager) {
2280  // First, uniquify the pointer.
2281  RenderState *state = DCAST(RenderState, old_ptr);
2282  CPT(RenderState) pointer = return_unique(state);
2283 
2284  // But now we have a problem, since we have to hold the reference
2285  // count and there's no way to return a TypedWritable while still
2286  // holding the reference count! We work around this by explicitly
2287  // upping the count, and also setting a finalize() callback to down
2288  // it later.
2289  if (pointer == state) {
2290  pointer->ref();
2291  manager->register_finalize(state);
2292  }
2293 
2294  // We have to cast the pointer back to non-const, because the bam
2295  // reader expects that.
2296  return (RenderState *)pointer.p();
2297 }
2298 
2299 ////////////////////////////////////////////////////////////////////
2300 // Function: RenderState::finalize
2301 // Access: Public, Virtual
2302 // Description: Called by the BamReader to perform any final actions
2303 // needed for setting up the object after all objects
2304 // have been read and all pointers have been completed.
2305 ////////////////////////////////////////////////////////////////////
2306 void RenderState::
2308  // Unref the pointer that we explicitly reffed in change_this().
2309  unref();
2310 
2311  // We should never get back to zero after unreffing our own count,
2312  // because we expect to have been stored in a pointer somewhere. If
2313  // we do get to zero, it's a memory leak; the way to avoid this is
2314  // to call unref_delete() above instead of unref(), but this is
2315  // dangerous to do from within a virtual function.
2316  nassertv(get_ref_count() != 0);
2317 }
2318 
2319 ////////////////////////////////////////////////////////////////////
2320 // Function: RenderState::make_from_bam
2321 // Access: Protected, Static
2322 // Description: This function is called by the BamReader's factory
2323 // when a new object of type RenderState is encountered
2324 // in the Bam file. It should create the RenderState
2325 // and extract its information from the file.
2326 ////////////////////////////////////////////////////////////////////
2327 TypedWritable *RenderState::
2328 make_from_bam(const FactoryParams &params) {
2329  RenderState *state = new RenderState;
2330  DatagramIterator scan;
2331  BamReader *manager;
2332 
2333  parse_params(params, scan, manager);
2334  state->fillin(scan, manager);
2335  manager->register_change_this(change_this, state);
2336 
2337  return state;
2338 }
2339 
2340 ////////////////////////////////////////////////////////////////////
2341 // Function: RenderState::fillin
2342 // Access: Protected
2343 // Description: This internal function is called by make_from_bam to
2344 // read in all of the relevant data from the BamFile for
2345 // the new RenderState.
2346 ////////////////////////////////////////////////////////////////////
2347 void RenderState::
2348 fillin(DatagramIterator &scan, BamReader *manager) {
2349  TypedWritable::fillin(scan, manager);
2350 
2351  int num_attribs = scan.get_uint16();
2352  _read_overrides = new vector_int;
2353  (*_read_overrides).reserve(num_attribs);
2354 
2355  for (int i = 0; i < num_attribs; ++i) {
2356  manager->read_pointer(scan);
2357  int override = scan.get_int32();
2358  (*_read_overrides).push_back(override);
2359  }
2360 }
2361 
void clear_bit(int index)
Sets the nth bit off.
Definition: bitMask.I:238
static size_t add_hash(size_t start, const void *key)
Adds the indicated key into a running hash.
Definition: stl_compares.I:133
int find(const Key &key) const
Searches for the indicated key in the table.
Definition: simpleHashMap.I:78
This is our own Panda specialization on the default STL map.
Definition: pmap.h:52
static int get_num_states()
Returns the total number of unique RenderState objects allocated in the world.
int get_geom_rendering(int geom_rendering) const
Returns the union of the Geom::GeomRendering bits that will be required once this TexMatrixAttrib is ...
This is the base class for a number of render attributes (other than transform) that may be set on sc...
Definition: renderAttrib.h:60
int get_geom_rendering(int geom_rendering) const
Returns the union of the Geom::GeomRendering bits that will be required once this RenderState is appl...
static TypeHandle none()
Returns a special zero-valued TypeHandle that is used to indicate no type.
Definition: typeHandle.I:274
This is the fundamental interface for extracting binary objects from a Bam file, as generated by a Ba...
Definition: bamReader.h:122
static int get_max_priority()
Returns the maximum priority number (sometimes called override) that may be set on any node...
bool debug_is_locked() const
Returns true if the current thread has locked the LightReMutex, false otherwise.
This controls the enabling of transparency.
int get_geom_rendering(int geom_rendering) const
Returns the union of the Geom::GeomRendering bits that will be required once this RenderModeAttrib is...
int get_num_entries() const
Returns the number of active entries in the table.
Base class for objects that can be written to and read from Bam files.
Definition: typedWritable.h:37
const string & get_bin_name() const
Returns the name of the bin this attribute specifies.
Definition: cullBinAttrib.I:35
static void clear_munger_cache()
Completely empties the cache of state + gsg -&gt; munger, for all states and all gsg&#39;s.
A lightweight reentrant mutex.
Definition: lightReMutex.h:34
This collects together the pieces of data that are accumulated for each node while walking the scene ...
int get_num_sorted_slots() const
Returns the number of entries in the sorted_slots list.
const RenderState * get_auto_shader_state() const
Returns the base RenderState that should have the generated_shader stored within it, for generated shader states.
A lightweight class that can be used to automatically start and stop a PStatCollector around a sectio...
Definition: pStatTimer.h:34
This template class implements an unordered map of keys to data, implemented as a hashtable...
Definition: simpleHashMap.h:33
static void register_with_read_factory()
Tells the BamReader how to create objects of type RenderState.
This is the fundamental interface for writing binary objects to a Bam file, to be extracted later by ...
Definition: bamWriter.h:73
int get_cache_ref_count() const
Returns the current reference count.
void clear()
Completely empties the table.
DeletedBufferChain * get_array_chain() const
Returns the DeletedBufferChain object that may be used to allocated appropriately-sized arrays of Ren...
PN_int32 get_int32()
Extracts a signed 32-bit integer.
void * allocate(size_t size, TypeHandle type_handle)
Allocates the memory for a new buffer of the indicated size (which must be no greater than the fixed ...
static int garbage_collect()
Performs a garbage-collection cycle.
virtual void fillin(DatagramIterator &scan, BamReader *manager)
This internal function is intended to be called by each class&#39;s make_from_bam() method to read in all...
PN_uint16 get_uint16()
Extracts an unsigned 16-bit integer.
void register_change_this(ChangeThisFunc func, TypedWritable *whom)
Called by an object reading itself from the bam file to indicate that the object pointer that will be...
Definition: bamReader.cxx:911
static Thread * get_current_thread()
Returns a pointer to the currently-executing Thread object.
Definition: thread.I:145
int find_bin(const string &name) const
Returns the bin_index associated with the bin of the given name, or -1 if no bin has that name...
virtual void write_datagram(BamWriter *manager, Datagram &dg)
Writes the contents of this object to the datagram for shipping out to a Bam file.
int get_geom_rendering(int geom_rendering) const
Returns the union of the Geom::GeomRendering bits that will be required once this TexGenAttrib is app...
Definition: texGenAttrib.I:55
static CullBinManager * get_global_ptr()
Returns the pointer to the global CullBinManager object.
bool cull_callback(CullTraverser *trav, const CullTraverserData &data) const
Calls cull_callback() on each attrib.
int compare_sort(const RenderState &other) const
Returns -1, 0, or 1 according to the relative sorting of these two RenderStates, with regards to rend...
This is our own Panda specialization on the default STL vector.
Definition: pvector.h:39
static size_t add_hash(size_t start, const Key &key)
Adds the indicated key into a running hash.
Definition: stl_compares.I:122
static Thread * get_main_thread()
Returns a pointer to the &quot;main&quot; Thread object–this is the Thread that started the whole process...
Definition: thread.I:107
virtual ~RenderState()
The destructor is responsible for removing the RenderState from the global set if it is there...
A lightweight class that represents a single element that may be timed and/or counted via stats...
bool is_empty() const
Returns true if the table is empty; i.e.
int get_size() const
Returns the total number of slots in the table.
static void list_cycles(ostream &out)
Detects all of the reference-count cycles in the cache and reports them to standard output...
virtual int complete_pointers(TypedWritable **p_list, BamReader *manager)
Receives an array of pointers, one for each time manager-&gt;read_pointer() was called in fillin()...
virtual bool unref() const
Explicitly decrements the reference count.
static int garbage_collect()
Performs a garbage-collection cycle.
virtual void write_datagram(BamWriter *manager, Datagram &dg)
Writes the contents of this object to the datagram for shipping out to a Bam file.
static bool validate_states()
Ensures that the cache is still stored in sorted order, and that none of the cache elements have been...
int get_lowest_on_bit() const
Returns the index of the lowest 1 bit in the mask.
Definition: bitMask.I:431
This class is used to associate each RenderAttrib with a different slot index at runtime, so we can store a list of RenderAttribs in the RenderState object, and very quickly look them up by type.
Similar to MutexHolder, but for a light mutex.
void deallocate(void *ptr, TypeHandle type_handle)
Frees the memory for a buffer previously allocated via allocate().
static int get_num_unused_states()
Returns the total number of RenderState objects that have been allocated but have no references outsi...
virtual bool unref() const
This method overrides ReferenceCount::unref() to check whether the remaining reference count is entir...
int get_num_slots() const
Returns the number of RenderAttrib slots that have been allocated.
An instance of this class is passed to the Factory when requesting it to do its business and construc...
Definition: factoryParams.h:40
bool is_empty() const
Returns true if the state is empty, false otherwise.
Definition: renderState.I:33
bool cache_unref() const
Overrides this method to update PStats appropriately.
Definition: renderState.I:230
virtual int complete_pointers(TypedWritable **plist, BamReader *manager)
Receives an array of pointers, one for each time manager-&gt;read_pointer() was called in fillin()...
void register_finalize(TypedWritable *whom)
Should be called by an object reading itself from the Bam file to indicate that this particular objec...
Definition: bamReader.cxx:880
const RenderAttrib * get_attrib(TypeHandle type) const
Looks for a RenderAttrib of the indicated type in the state, and returns it if it is found...
Definition: renderState.I:134
void set_bit(int index)
Sets the nth bit on.
Definition: bitMask.I:225
Similar to MutexHolder, but for a light reentrant mutex.
int get_sorted_slot(int n) const
Returns the nth slot in sorted order.
This represents a unique collection of RenderAttrib objects that correspond to a particular renderabl...
Definition: renderState.h:53
void register_factory(TypeHandle handle, CreateFunc *func)
Registers a new kind of thing the Factory will be able to create.
Definition: factory.I:90
void add_uint16(PN_uint16 value)
Adds an unsigned 16-bit integer to the datagram.
Definition: datagram.I:181
Assigns geometry to a particular bin by name.
Definition: cullBinAttrib.h:30
Applies a transform matrix to UV&#39;s before they are rendered.
Mode get_mode() const
Returns the transparency mode.
int add_bin(const string &name, BinType type, int sort)
Defines a new bin with the indicated name, and returns the new bin_index.
int get_draw_order() const
Returns the draw order this attribute specifies.
Definition: cullBinAttrib.I:48
static WritableFactory * get_factory()
Returns the global WritableFactory for generating TypedWritable objects.
Definition: bamReader.I:213
This is used to track the utilization of the TransformState and RenderState caches, for low-level performance tuning information.
Definition: cacheStats.h:28
Specifies how polygons are to be drawn.
int compare_mask(const RenderState &other, SlotMask compare_mask) const
This version of compare_to takes a slot mask that indicates which attributes to include in the compar...
This is our own Panda specialization on the default STL set.
Definition: pset.h:52
bool validate() const
Returns true if the internal table appears to be consistent, false if there are some internal errors...
void add_int32(PN_int32 value)
Adds a signed 32-bit integer to the datagram.
Definition: datagram.I:159
A class to retrieve the individual data elements previously stored in a Datagram. ...
static TypedWritable * change_this(TypedWritable *old_ptr, BamReader *manager)
Called immediately after complete_pointers(), this gives the object a chance to adjust its own pointe...
int compare_to(const RenderState &other) const
Provides an arbitrary ordering among all unique RenderStates, so we can store the essentially differe...
const Value & get_data(int n) const
Returns the data in the nth slot of the table.
virtual void finalize(BamReader *manager)
Called by the BamReader to perform any final actions needed for setting up the object after all objec...
TypeHandle is the identifier used to differentiate C++ class types.
Definition: typeHandle.h:85
static int clear_cache()
Empties the cache of composed RenderStates.
This is a sequence number that increments monotonically.
Definition: updateSeq.h:43
This is a global object that maintains the collection of named CullBins in the world.
int get_ref_count() const
Returns the current reference count.
An ordered list of data elements, formatted in memory for transmission over a socket or writing to a ...
Definition: datagram.h:43
Computes texture coordinates for geometry automatically based on vertex position and/or normal...
Definition: texGenAttrib.h:36
static void list_states(ostream &out)
Lists all of the RenderStates in the cache to the output stream, one per line.
bool get_bit(int index) const
Returns true if the nth bit is set, false if it is cleared.
Definition: bitMask.I:212
void remove_element(int n)
Removes the nth slot from the table.
This object performs a depth-first traversal of the scene graph, with optional view-frustum culling...
Definition: cullTraverser.h:48
static RenderAttribRegistry * quick_get_global_ptr()
Returns the global_ptr without first ensuring it has been initialized.
const Key & get_key(int n) const
Returns the key in the nth slot of the table.
void write_pointer(Datagram &packet, const TypedWritable *dest)
The interface for writing a pointer to another object to a Bam file.
Definition: bamWriter.cxx:279
void read_pointer(DatagramIterator &scan)
The interface for reading a pointer to another object from a Bam file.
Definition: bamReader.cxx:652
bool has_element(int n) const
Returns true if there is an element stored in the nth slot, false otherwise.
int get_num_on_bits() const
Returns the number of bits that are set to 1 in the mask.
Definition: bitMask.I:407
int get_max_slots() const
Returns the maximum number that any slot number is allowed to grow.
static void init_states()
Make sure the global _states map is allocated.
static void bin_removed(int bin_index)
Intended to be called by CullBinManager::remove_bin(), this informs all the RenderStates in the world...