Panda3D
preparedGraphicsObjects.cxx
Go to the documentation of this file.
1 /**
2  * PANDA 3D SOFTWARE
3  * Copyright (c) Carnegie Mellon University. All rights reserved.
4  *
5  * All use of this software is subject to the terms of the revised BSD
6  * license. You should have received a copy of this license along
7  * with this source code in a file named "LICENSE."
8  *
9  * @file preparedGraphicsObjects.cxx
10  * @author drose
11  * @date 2004-02-19
12  */
13 
15 #include "textureContext.h"
16 #include "vertexBufferContext.h"
17 #include "indexBufferContext.h"
18 #include "texture.h"
19 #include "geom.h"
20 #include "geomVertexArrayData.h"
21 #include "geomPrimitive.h"
22 #include "samplerContext.h"
23 #include "shader.h"
24 #include "reMutexHolder.h"
25 #include "geomContext.h"
26 #include "shaderContext.h"
27 #include "config_gobj.h"
28 #include "throw_event.h"
29 
30 TypeHandle PreparedGraphicsObjects::EnqueuedObject::_type_handle;
31 
32 int PreparedGraphicsObjects::_name_index = 0;
33 
34 /**
35  *
36  */
37 PreparedGraphicsObjects::
38 PreparedGraphicsObjects() :
39  _lock("PreparedGraphicsObjects::_lock"),
40  _name(init_name()),
41  _vertex_buffer_cache_size(0),
42  _index_buffer_cache_size(0),
43  _texture_residency(_name, "texture"),
44  _vbuffer_residency(_name, "vbuffer"),
45  _ibuffer_residency(_name, "ibuffer"),
46  _sbuffer_residency(_name, "sbuffer"),
47  _graphics_memory_lru("graphics_memory_lru", graphics_memory_limit),
48  _sampler_object_lru("sampler_object_lru", sampler_object_limit)
49 {
50  // GLGSG will turn this flag on. This is a temporary hack to disable this
51  // feature for DX8DX9 for now, until we work out the fine points of updating
52  // the fvf properly.
53  _support_released_buffer_cache = false;
54 }
55 
56 /**
57  *
58  */
59 PreparedGraphicsObjects::
60 ~PreparedGraphicsObjects() {
61  // There may be objects that are still prepared when we destruct. If this
62  // is so, then all of the GSG's that own them have already destructed, so we
63  // can assume their resources were internally cleaned up. Besides, we may
64  // not even be allowed to call the GSG release methods since some APIs (eg.
65  // OpenGL) require a context current. So we just call the destructors.
66  ReMutexHolder holder(_lock);
67 
69  Textures::iterator tci;
70  for (tci = _released_textures.begin();
71  tci != _released_textures.end();
72  ++tci) {
73  TextureContext *tc = (*tci);
74  delete tc;
75  }
76  _released_textures.clear();
77 
79  ReleasedSamplers::iterator ssci;
80  for (ssci = _released_samplers.begin();
81  ssci != _released_samplers.end();
82  ++ssci) {
83  SamplerContext *sc = (*ssci);
84  delete sc;
85  }
86  _released_samplers.clear();
87 
89  Geoms::iterator gci;
90  for (gci = _released_geoms.begin();
91  gci != _released_geoms.end();
92  ++gci) {
93  GeomContext *gc = (*gci);
94  delete gc;
95  }
96  _released_geoms.clear();
97 
99  Shaders::iterator sci;
100  for (sci = _released_shaders.begin();
101  sci != _released_shaders.end();
102  ++sci) {
103  ShaderContext *sc = (*sci);
104  delete sc;
105  }
106  _released_shaders.clear();
107 
109  Buffers::iterator vbci;
110  for (vbci = _released_vertex_buffers.begin();
111  vbci != _released_vertex_buffers.end();
112  ++vbci) {
113  VertexBufferContext *vbc = (VertexBufferContext *)(*vbci);
114  delete vbc;
115  }
116  _released_vertex_buffers.clear();
117 
119  Buffers::iterator ibci;
120  for (ibci = _released_index_buffers.begin();
121  ibci != _released_index_buffers.end();
122  ++ibci) {
123  IndexBufferContext *ibc = (IndexBufferContext *)(*ibci);
124  delete ibc;
125  }
126  _released_index_buffers.clear();
127 
129  Buffers::iterator bci;
130  for (bci = _released_shader_buffers.begin();
131  bci != _released_shader_buffers.end();
132  ++bci) {
133  BufferContext *bc = (BufferContext *)(*bci);
134  delete bc;
135  }
136  _released_shader_buffers.clear();
137 }
138 
139 /**
140  * Sets an artificial cap on graphics memory that will be imposed on this GSG.
141  *
142  * This limits the total amount of graphics memory, including texture memory
143  * and vertex buffer memory, that will be consumed by the GSG, regardless of
144  * whether the hardware claims to provide more graphics memory than this. It
145  * is useful to put a ceiling on graphics memory consumed, since some drivers
146  * seem to allow the application to consume more memory than the hardware can
147  * realistically support.
148  */
151  if (limit != _graphics_memory_lru.get_max_size()) {
152  _graphics_memory_lru.set_max_size(limit);
153 
154  // We throw an event here so global objects (particularly the
155  // TexMemWatcher) can automatically respond to this change.
156  throw_event("graphics_memory_limit_changed");
157  }
158 }
159 
160 /**
161  * Writes to the indicated ostream a report of how the various textures and
162  * vertex buffers are allocated in the LRU.
163  */
165 show_graphics_memory_lru(std::ostream &out) const {
166  _graphics_memory_lru.write(out, 0);
167 }
168 
169 /**
170  * Writes to the indicated ostream a report of how the various textures and
171  * vertex buffers are allocated in the LRU.
172  */
174 show_residency_trackers(std::ostream &out) const {
175  out << "Textures:\n";
176  _texture_residency.write(out, 2);
177 
178  out << "\nVertex buffers:\n";
179  _vbuffer_residency.write(out, 2);
180 
181  out << "\nIndex buffers:\n";
182  _ibuffer_residency.write(out, 2);
183 
184  out << "\nShader buffers:\n";
185  _sbuffer_residency.write(out, 2);
186 }
187 
188 /**
189  * Indicates that a texture would like to be put on the list to be prepared
190  * when the GSG is next ready to do this (presumably at the next frame).
191  */
194  ReMutexHolder holder(_lock);
195 
196  _enqueued_textures.insert(EnqueuedTextures::value_type(tex, nullptr));
197 }
198 
199 /**
200  * Like enqueue_texture, but returns an AsyncFuture that can be used to query
201  * the status of the texture's preparation.
202  */
203 PT(PreparedGraphicsObjects::EnqueuedObject) PreparedGraphicsObjects::
204 enqueue_texture_future(Texture *tex) {
205  ReMutexHolder holder(_lock);
206 
207  std::pair<EnqueuedTextures::iterator, bool> result =
208  _enqueued_textures.insert(EnqueuedTextures::value_type(tex, nullptr));
209  if (result.first->second == nullptr) {
210  result.first->second = new EnqueuedObject(this, tex);
211  }
212  PT(EnqueuedObject) fut = result.first->second;
213  nassertr(!fut->cancelled(), fut)
214  return fut;
215 }
216 
217 /**
218  * Returns true if the texture has been queued on this GSG, false otherwise.
219  */
221 is_texture_queued(const Texture *tex) const {
222  ReMutexHolder holder(_lock);
223 
224  EnqueuedTextures::const_iterator qi = _enqueued_textures.find((Texture *)tex);
225  return (qi != _enqueued_textures.end());
226 }
227 
228 /**
229  * Removes a texture from the queued list of textures to be prepared.
230  * Normally it is not necessary to call this, unless you change your mind
231  * about preparing it at the last minute, since the texture will automatically
232  * be dequeued and prepared at the next frame.
233  *
234  * The return value is true if the texture is successfully dequeued, false if
235  * it had not been queued.
236  */
239  ReMutexHolder holder(_lock);
240 
241  EnqueuedTextures::iterator qi = _enqueued_textures.find(tex);
242  if (qi != _enqueued_textures.end()) {
243  if (qi->second != nullptr) {
244  qi->second->notify_removed();
245  }
246  _enqueued_textures.erase(qi);
247  return true;
248  }
249  return false;
250 }
251 
252 /**
253  * Returns true if the texture has been prepared on this GSG, false otherwise.
254  */
256 is_texture_prepared(const Texture *tex) const {
257  return tex->is_prepared((PreparedGraphicsObjects *)this);
258 }
259 
260 /**
261  * Indicates that a texture context, created by a previous call to
262  * prepare_texture(), is no longer needed. The driver resources will not be
263  * freed until some GSG calls update(), indicating it is at a stage where it
264  * is ready to release textures--this prevents conflicts from threading or
265  * multiple GSG's sharing textures (we have no way of knowing which graphics
266  * context is currently active, or what state it's in, at the time
267  * release_texture is called).
268  */
271  ReMutexHolder holder(_lock);
272 
273  tc->get_texture()->clear_prepared(tc->get_view(), this);
274 
275  // We have to set the Texture pointer to NULL at this point, since the
276  // Texture itself might destruct at any time after it has been released.
277  tc->_object = nullptr;
278 
279  bool removed = (_prepared_textures.erase(tc) != 0);
280  nassertv(removed);
281 
282  _released_textures.insert(tc);
283 }
284 
285 /**
286  * Releases a texture if it has already been prepared, or removes it from the
287  * preparation queue.
288  */
291  tex->release(this);
292 }
293 
294 /**
295  * Releases all textures at once. This will force them to be reloaded into
296  * texture memory for all GSG's that share this object. Returns the number of
297  * textures released.
298  */
301  ReMutexHolder holder(_lock);
302 
303  int num_textures = (int)_prepared_textures.size() + (int)_enqueued_textures.size();
304 
305  Textures::iterator tci;
306  for (tci = _prepared_textures.begin();
307  tci != _prepared_textures.end();
308  ++tci) {
309  TextureContext *tc = (*tci);
310  tc->get_texture()->clear_prepared(tc->get_view(), this);
311  tc->_object = nullptr;
312 
313  _released_textures.insert(tc);
314  }
315 
316  _prepared_textures.clear();
317 
318  // Mark any futures as cancelled.
319  EnqueuedTextures::iterator qti;
320  for (qti = _enqueued_textures.begin();
321  qti != _enqueued_textures.end();
322  ++qti) {
323  if (qti->second != nullptr) {
324  qti->second->notify_removed();
325  }
326  }
327 
328  _enqueued_textures.clear();
329 
330  return num_textures;
331 }
332 
333 /**
334  * Returns the number of textures that have been enqueued to be prepared on
335  * this GSG.
336  */
339  return _enqueued_textures.size();
340 }
341 
342 /**
343  * Returns the number of textures that have already been prepared on this GSG.
344  */
347  return _prepared_textures.size();
348 }
349 
350 /**
351  * Immediately creates a new TextureContext for the indicated texture and
352  * returns it. This assumes that the GraphicsStateGuardian is the currently
353  * active rendering context and that it is ready to accept new textures. If
354  * this is not necessarily the case, you should use enqueue_texture() instead.
355  *
356  * Normally, this function is not called directly. Call
357  * Texture::prepare_now() instead.
358  *
359  * The TextureContext contains all of the pertinent information needed by the
360  * GSG to keep track of this one particular texture, and will exist as long as
361  * the texture is ready to be rendered.
362  *
363  * When either the Texture or the PreparedGraphicsObjects object destructs,
364  * the TextureContext will be deleted.
365  */
368  ReMutexHolder holder(_lock);
369 
370  // Ask the GSG to create a brand new TextureContext. There might be several
371  // GSG's sharing the same set of textures; if so, it doesn't matter which of
372  // them creates the context (since they're all shared anyway).
373  TextureContext *tc = gsg->prepare_texture(tex, view);
374 
375  if (tc != nullptr) {
376  bool prepared = _prepared_textures.insert(tc).second;
377  nassertr(prepared, tc);
378  }
379 
380  return tc;
381 }
382 
383 /**
384  * Indicates that a sampler would like to be put on the list to be prepared
385  * when the GSG is next ready to do this (presumably at the next frame).
386  */
388 enqueue_sampler(const SamplerState &sampler) {
389  ReMutexHolder holder(_lock);
390 
391  _enqueued_samplers.insert(sampler);
392 }
393 
394 /**
395  * Returns true if the sampler has been queued on this GSG, false otherwise.
396  */
398 is_sampler_queued(const SamplerState &sampler) const {
399  ReMutexHolder holder(_lock);
400 
401  EnqueuedSamplers::const_iterator qi = _enqueued_samplers.find(sampler);
402  return (qi != _enqueued_samplers.end());
403 }
404 
405 /**
406  * Removes a sampler from the queued list of samplers to be prepared.
407  * Normally it is not necessary to call this, unless you change your mind
408  * about preparing it at the last minute, since the sampler will automatically
409  * be dequeued and prepared at the next frame.
410  *
411  * The return value is true if the sampler is successfully dequeued, false if
412  * it had not been queued.
413  */
415 dequeue_sampler(const SamplerState &sampler) {
416  ReMutexHolder holder(_lock);
417 
418  EnqueuedSamplers::iterator qi = _enqueued_samplers.find(sampler);
419  if (qi != _enqueued_samplers.end()) {
420  _enqueued_samplers.erase(qi);
421  return true;
422  }
423  return false;
424 }
425 
426 /**
427  * Returns true if the sampler has been prepared on this GSG, false otherwise.
428  */
430 is_sampler_prepared(const SamplerState &sampler) const {
431  ReMutexHolder holder(_lock);
432 
433  PreparedSamplers::const_iterator it = _prepared_samplers.find(sampler);
434  return (it != _prepared_samplers.end());
435 }
436 
437 /**
438  * Indicates that a sampler context, created by a previous call to
439  * prepare_sampler(), is no longer needed. The driver resources will not be
440  * freed until some GSG calls update(), indicating it is at a stage where it
441  * is ready to release samplers.
442  */
445  ReMutexHolder holder(_lock);
446 
447  _released_samplers.insert(sc);
448 }
449 
450 /**
451  * Releases a sampler if it has already been prepared, or removes it from the
452  * preparation queue.
453  */
455 release_sampler(const SamplerState &sampler) {
456  ReMutexHolder holder(_lock);
457 
458  PreparedSamplers::iterator it = _prepared_samplers.find(sampler);
459  if (it != _prepared_samplers.end()) {
460  _released_samplers.insert(it->second);
461  _prepared_samplers.erase(it);
462  }
463 
464  _enqueued_samplers.erase(sampler);
465 }
466 
467 /**
468  * Releases all samplers at once. This will force them to be reloaded for all
469  * GSG's that share this object. Returns the number of samplers released.
470  */
473  ReMutexHolder holder(_lock);
474 
475  int num_samplers = (int)_prepared_samplers.size() + (int)_enqueued_samplers.size();
476 
477  PreparedSamplers::iterator sci;
478  for (sci = _prepared_samplers.begin();
479  sci != _prepared_samplers.end();
480  ++sci) {
481  _released_samplers.insert(sci->second);
482  }
483 
484  _prepared_samplers.clear();
485  _enqueued_samplers.clear();
486 
487  return num_samplers;
488 }
489 
490 /**
491  * Returns the number of samplers that have been enqueued to be prepared on
492  * this GSG.
493  */
496  return _enqueued_samplers.size();
497 }
498 
499 /**
500  * Returns the number of samplers that have already been prepared on this GSG.
501  */
504  return _prepared_samplers.size();
505 }
506 
507 /**
508  * Immediately creates a new SamplerContext for the indicated sampler and
509  * returns it. This assumes that the GraphicsStateGuardian is the currently
510  * active rendering context and that it is ready to accept new samplers. If
511  * this is not necessarily the case, you should use enqueue_sampler() instead.
512  *
513  * Normally, this function is not called directly. Call
514  * Sampler::prepare_now() instead.
515  *
516  * The SamplerContext contains all of the pertinent information needed by the
517  * GSG to keep track of this one particular sampler, and will exist as long as
518  * the sampler is ready to be rendered.
519  *
520  * When either the Sampler or the PreparedGraphicsObjects object destructs,
521  * the SamplerContext will be deleted.
522  */
525  ReMutexHolder holder(_lock);
526 
527  PreparedSamplers::const_iterator it = _prepared_samplers.find(sampler);
528  if (it != _prepared_samplers.end()) {
529  return it->second;
530  }
531 
532  // Ask the GSG to create a brand new SamplerContext.
533  SamplerContext *sc = gsg->prepare_sampler(sampler);
534 
535  if (sc != nullptr) {
536  _prepared_samplers[sampler] = sc;
537  }
538 
539  return sc;
540 }
541 
542 /**
543  * Indicates that a geom would like to be put on the list to be prepared when
544  * the GSG is next ready to do this (presumably at the next frame).
545  */
548  ReMutexHolder holder(_lock);
549 
550  _enqueued_geoms.insert(geom);
551 }
552 
553 /**
554  * Returns true if the geom has been queued on this GSG, false otherwise.
555  */
557 is_geom_queued(const Geom *geom) const {
558  ReMutexHolder holder(_lock);
559 
560  EnqueuedGeoms::const_iterator qi = _enqueued_geoms.find((Geom *)geom);
561  return (qi != _enqueued_geoms.end());
562 }
563 
564 /**
565  * Removes a geom from the queued list of geoms to be prepared. Normally it
566  * is not necessary to call this, unless you change your mind about preparing
567  * it at the last minute, since the geom will automatically be dequeued and
568  * prepared at the next frame.
569  *
570  * The return value is true if the geom is successfully dequeued, false if it
571  * had not been queued.
572  */
575  ReMutexHolder holder(_lock);
576 
577  EnqueuedGeoms::iterator qi = _enqueued_geoms.find(geom);
578  if (qi != _enqueued_geoms.end()) {
579  _enqueued_geoms.erase(qi);
580  return true;
581  }
582  return false;
583 }
584 
585 /**
586  * Returns true if the vertex buffer has been prepared on this GSG, false
587  * otherwise.
588  */
590 is_geom_prepared(const Geom *geom) const {
591  return geom->is_prepared((PreparedGraphicsObjects *)this);
592 }
593 
594 /**
595  * Indicates that a geom context, created by a previous call to
596  * prepare_geom(), is no longer needed. The driver resources will not be
597  * freed until some GSG calls update(), indicating it is at a stage where it
598  * is ready to release geoms--this prevents conflicts from threading or
599  * multiple GSG's sharing geoms (we have no way of knowing which graphics
600  * context is currently active, or what state it's in, at the time
601  * release_geom is called).
602  */
605  ReMutexHolder holder(_lock);
606 
607  gc->_geom->clear_prepared(this);
608 
609  // We have to set the Geom pointer to NULL at this point, since the Geom
610  // itself might destruct at any time after it has been released.
611  gc->_geom = nullptr;
612 
613  bool removed = (_prepared_geoms.erase(gc) != 0);
614  nassertv(removed);
615 
616  _released_geoms.insert(gc);
617 }
618 
619 /**
620  * Releases all geoms at once. This will force them to be reloaded into geom
621  * memory for all GSG's that share this object. Returns the number of geoms
622  * released.
623  */
626  ReMutexHolder holder(_lock);
627 
628  int num_geoms = (int)_prepared_geoms.size() + (int)_enqueued_geoms.size();
629 
630  Geoms::iterator gci;
631  for (gci = _prepared_geoms.begin();
632  gci != _prepared_geoms.end();
633  ++gci) {
634  GeomContext *gc = (*gci);
635  gc->_geom->clear_prepared(this);
636  gc->_geom = nullptr;
637 
638  _released_geoms.insert(gc);
639  }
640 
641  _prepared_geoms.clear();
642  _enqueued_geoms.clear();
643 
644  return num_geoms;
645 }
646 
647 /**
648  * Returns the number of geoms that have been enqueued to be prepared on this
649  * GSG.
650  */
653  return _enqueued_geoms.size();
654 }
655 
656 /**
657  * Returns the number of geoms that have already been prepared on this GSG.
658  */
661  return _prepared_geoms.size();
662 }
663 
664 /**
665  * Immediately creates a new GeomContext for the indicated geom and returns
666  * it. This assumes that the GraphicsStateGuardian is the currently active
667  * rendering context and that it is ready to accept new geoms. If this is not
668  * necessarily the case, you should use enqueue_geom() instead.
669  *
670  * Normally, this function is not called directly. Call Geom::prepare_now()
671  * instead.
672  *
673  * The GeomContext contains all of the pertinent information needed by the GSG
674  * to keep track of this one particular geom, and will exist as long as the
675  * geom is ready to be rendered.
676  *
677  * When either the Geom or the PreparedGraphicsObjects object destructs, the
678  * GeomContext will be deleted.
679  */
682  ReMutexHolder holder(_lock);
683 
684  // Ask the GSG to create a brand new GeomContext. There might be several
685  // GSG's sharing the same set of geoms; if so, it doesn't matter which of
686  // them creates the context (since they're all shared anyway).
687  GeomContext *gc = gsg->prepare_geom(geom);
688 
689  if (gc != nullptr) {
690  bool prepared = _prepared_geoms.insert(gc).second;
691  nassertr(prepared, gc);
692  }
693 
694  return gc;
695 }
696 
697 /**
698  * Indicates that a shader would like to be put on the list to be prepared
699  * when the GSG is next ready to do this (presumably at the next frame).
700  */
703  ReMutexHolder holder(_lock);
704 
705  _enqueued_shaders.insert(EnqueuedShaders::value_type(shader, nullptr));
706 }
707 
708 /**
709  * Like enqueue_shader, but returns an AsyncFuture that can be used to query
710  * the status of the shader's preparation.
711  */
712 PT(PreparedGraphicsObjects::EnqueuedObject) PreparedGraphicsObjects::
713 enqueue_shader_future(Shader *shader) {
714  ReMutexHolder holder(_lock);
715 
716  std::pair<EnqueuedShaders::iterator, bool> result =
717  _enqueued_shaders.insert(EnqueuedShaders::value_type(shader, nullptr));
718  if (result.first->second == nullptr) {
719  result.first->second = new EnqueuedObject(this, shader);
720  }
721  PT(EnqueuedObject) fut = result.first->second;
722  nassertr(!fut->cancelled(), fut)
723  return fut;
724 }
725 
726 /**
727  * Returns true if the shader has been queued on this GSG, false otherwise.
728  */
730 is_shader_queued(const Shader *shader) const {
731  ReMutexHolder holder(_lock);
732 
733  EnqueuedShaders::const_iterator qi = _enqueued_shaders.find((Shader *)shader);
734  return (qi != _enqueued_shaders.end());
735 }
736 
737 /**
738  * Removes a shader from the queued list of shaders to be prepared. Normally
739  * it is not necessary to call this, unless you change your mind about
740  * preparing it at the last minute, since the shader will automatically be
741  * dequeued and prepared at the next frame.
742  *
743  * The return value is true if the shader is successfully dequeued, false if
744  * it had not been queued.
745  */
748  ReMutexHolder holder(_lock);
749 
750  EnqueuedShaders::iterator qi = _enqueued_shaders.find(se);
751  if (qi != _enqueued_shaders.end()) {
752  if (qi->second != nullptr) {
753  qi->second->notify_removed();
754  }
755  _enqueued_shaders.erase(qi);
756  return true;
757  }
758  return false;
759 }
760 
761 /**
762  * Returns true if the shader has been prepared on this GSG, false otherwise.
763  */
765 is_shader_prepared(const Shader *shader) const {
766  return shader->is_prepared((PreparedGraphicsObjects *)this);
767 }
768 
769 /**
770  * Indicates that a shader context, created by a previous call to
771  * prepare_shader(), is no longer needed. The driver resources will not be
772  * freed until some GSG calls update(), indicating it is at a stage where it
773  * is ready to release shaders--this prevents conflicts from threading or
774  * multiple GSG's sharing shaders (we have no way of knowing which graphics
775  * context is currently active, or what state it's in, at the time
776  * release_shader is called).
777  */
780  ReMutexHolder holder(_lock);
781 
782  sc->_shader->clear_prepared(this);
783 
784  // We have to set the Shader pointer to NULL at this point, since the Shader
785  // itself might destruct at any time after it has been released.
786  sc->_shader = nullptr;
787 
788  bool removed = (_prepared_shaders.erase(sc) != 0);
789  nassertv(removed);
790 
791  _released_shaders.insert(sc);
792 }
793 
794 /**
795  * Releases all shaders at once. This will force them to be reloaded into
796  * shader memory for all GSG's that share this object. Returns the number of
797  * shaders released.
798  */
801  ReMutexHolder holder(_lock);
802 
803  int num_shaders = (int)_prepared_shaders.size() + (int)_enqueued_shaders.size();
804 
805  Shaders::iterator sci;
806  for (sci = _prepared_shaders.begin();
807  sci != _prepared_shaders.end();
808  ++sci) {
809  ShaderContext *sc = (*sci);
810  sc->_shader->clear_prepared(this);
811  sc->_shader = nullptr;
812 
813  _released_shaders.insert(sc);
814  }
815 
816  _prepared_shaders.clear();
817 
818  // Mark any futures as cancelled.
819  EnqueuedShaders::iterator qsi;
820  for (qsi = _enqueued_shaders.begin();
821  qsi != _enqueued_shaders.end();
822  ++qsi) {
823  if (qsi->second != nullptr) {
824  qsi->second->notify_removed();
825  }
826  }
827 
828  _enqueued_shaders.clear();
829 
830  return num_shaders;
831 }
832 
833 /**
834  * Returns the number of shaders that have been enqueued to be prepared on
835  * this GSG.
836  */
839  return _enqueued_shaders.size();
840 }
841 
842 /**
843  * Returns the number of shaders that have already been prepared on this GSG.
844  */
847  return _prepared_shaders.size();
848 }
849 
850 /**
851  * Immediately creates a new ShaderContext for the indicated shader and
852  * returns it. This assumes that the GraphicsStateGuardian is the currently
853  * active rendering context and that it is ready to accept new shaders. If
854  * this is not necessarily the case, you should use enqueue_shader() instead.
855  *
856  * Normally, this function is not called directly. Call Shader::prepare_now()
857  * instead.
858  *
859  * The ShaderContext contains all of the pertinent information needed by the
860  * GSG to keep track of this one particular shader, and will exist as long as
861  * the shader is ready to be rendered.
862  *
863  * When either the Shader or the PreparedGraphicsObjects object destructs, the
864  * ShaderContext will be deleted.
865  */
868  ReMutexHolder holder(_lock);
869 
870  // Ask the GSG to create a brand new ShaderContext. There might be several
871  // GSG's sharing the same set of shaders; if so, it doesn't matter which of
872  // them creates the context (since they're all shared anyway).
873  ShaderContext *sc = gsg->prepare_shader(se);
874 
875  if (sc != nullptr) {
876  bool prepared = _prepared_shaders.insert(sc).second;
877  nassertr(prepared, sc);
878  }
879 
880  return sc;
881 }
882 
883 /**
884  * Indicates that a buffer would like to be put on the list to be prepared
885  * when the GSG is next ready to do this (presumably at the next frame).
886  */
889  ReMutexHolder holder(_lock);
890 
891  _enqueued_vertex_buffers.insert(data);
892 }
893 
894 /**
895  * Returns true if the vertex buffer has been queued on this GSG, false
896  * otherwise.
897  */
900  ReMutexHolder holder(_lock);
901 
902  EnqueuedVertexBuffers::const_iterator qi = _enqueued_vertex_buffers.find((GeomVertexArrayData *)data);
903  return (qi != _enqueued_vertex_buffers.end());
904 }
905 
906 /**
907  * Removes a buffer from the queued list of data arrays to be prepared.
908  * Normally it is not necessary to call this, unless you change your mind
909  * about preparing it at the last minute, since the data will automatically be
910  * dequeued and prepared at the next frame.
911  *
912  * The return value is true if the buffer is successfully dequeued, false if
913  * it had not been queued.
914  */
917  ReMutexHolder holder(_lock);
918 
919  EnqueuedVertexBuffers::iterator qi = _enqueued_vertex_buffers.find(data);
920  if (qi != _enqueued_vertex_buffers.end()) {
921  _enqueued_vertex_buffers.erase(qi);
922  return true;
923  }
924  return false;
925 }
926 
927 /**
928  * Returns true if the vertex buffer has been prepared on this GSG, false
929  * otherwise.
930  */
933  return data->is_prepared((PreparedGraphicsObjects *)this);
934 }
935 
936 /**
937  * Indicates that a data context, created by a previous call to
938  * prepare_vertex_buffer(), is no longer needed. The driver resources will
939  * not be freed until some GSG calls update(), indicating it is at a stage
940  * where it is ready to release datas--this prevents conflicts from threading
941  * or multiple GSG's sharing datas (we have no way of knowing which graphics
942  * context is currently active, or what state it's in, at the time
943  * release_vertex_buffer is called).
944  */
947  ReMutexHolder holder(_lock);
948 
949  vbc->get_data()->clear_prepared(this);
950 
951  size_t data_size_bytes = vbc->get_data()->get_data_size_bytes();
952  GeomEnums::UsageHint usage_hint = vbc->get_data()->get_usage_hint();
953 
954  // We have to set the Data pointer to NULL at this point, since the Data
955  // itself might destruct at any time after it has been released.
956  vbc->_object = nullptr;
957 
958  bool removed = (_prepared_vertex_buffers.erase(vbc) != 0);
959  nassertv(removed);
960 
961  if (_support_released_buffer_cache) {
962  cache_unprepared_buffer(vbc, data_size_bytes, usage_hint,
963  _vertex_buffer_cache,
964  _vertex_buffer_cache_lru, _vertex_buffer_cache_size,
965  released_vbuffer_cache_size,
966  _released_vertex_buffers);
967  } else {
968  _released_vertex_buffers.insert(vbc);
969  }
970 }
971 
972 /**
973  * Releases all datas at once. This will force them to be reloaded into data
974  * memory for all GSG's that share this object. Returns the number of datas
975  * released.
976  */
979  ReMutexHolder holder(_lock);
980 
981  int num_vertex_buffers = (int)_prepared_vertex_buffers.size() + (int)_enqueued_vertex_buffers.size();
982 
983  Buffers::iterator vbci;
984  for (vbci = _prepared_vertex_buffers.begin();
985  vbci != _prepared_vertex_buffers.end();
986  ++vbci) {
987  VertexBufferContext *vbc = (VertexBufferContext *)(*vbci);
988  vbc->get_data()->clear_prepared(this);
989  vbc->_object = nullptr;
990 
991  _released_vertex_buffers.insert(vbc);
992  }
993 
994  _prepared_vertex_buffers.clear();
995  _enqueued_vertex_buffers.clear();
996 
997  // Also clear the cache of recently-unprepared vertex buffers.
998  BufferCache::iterator bci;
999  for (bci = _vertex_buffer_cache.begin();
1000  bci != _vertex_buffer_cache.end();
1001  ++bci) {
1002  BufferList &buffer_list = (*bci).second;
1003  nassertr(!buffer_list.empty(), num_vertex_buffers);
1004  BufferList::iterator li;
1005  for (li = buffer_list.begin(); li != buffer_list.end(); ++li) {
1006  VertexBufferContext *vbc = (VertexBufferContext *)(*li);
1007  _released_vertex_buffers.insert(vbc);
1008  }
1009  }
1010  _vertex_buffer_cache.clear();
1011  _vertex_buffer_cache_lru.clear();
1012  _vertex_buffer_cache_size = 0;
1013 
1014  return num_vertex_buffers;
1015 }
1016 
1017 /**
1018  * Returns the number of vertex buffers that have been enqueued to be prepared
1019  * on this GSG.
1020  */
1023  return _enqueued_vertex_buffers.size();
1024 }
1025 
1026 /**
1027  * Returns the number of vertex buffers that have already been prepared on
1028  * this GSG.
1029  */
1032  return _prepared_vertex_buffers.size();
1033 }
1034 
1035 /**
1036  * Immediately creates a new VertexBufferContext for the indicated data and
1037  * returns it. This assumes that the GraphicsStateGuardian is the currently
1038  * active rendering context and that it is ready to accept new datas. If this
1039  * is not necessarily the case, you should use enqueue_vertex_buffer()
1040  * instead.
1041  *
1042  * Normally, this function is not called directly. Call Data::prepare_now()
1043  * instead.
1044  *
1045  * The VertexBufferContext contains all of the pertinent information needed by
1046  * the GSG to keep track of this one particular data, and will exist as long
1047  * as the data is ready to be rendered.
1048  *
1049  * When either the Data or the PreparedGraphicsObjects object destructs, the
1050  * VertexBufferContext will be deleted.
1051  */
1054  ReMutexHolder holder(_lock);
1055 
1056  // First, see if there might be a cached context of the appropriate size.
1057  size_t data_size_bytes = data->get_data_size_bytes();
1058  GeomEnums::UsageHint usage_hint = data->get_usage_hint();
1060  get_cached_buffer(data_size_bytes, usage_hint,
1061  _vertex_buffer_cache, _vertex_buffer_cache_lru,
1062  _vertex_buffer_cache_size);
1063  if (vbc != nullptr) {
1064  vbc->_object = data;
1065 
1066  } else {
1067  // Ask the GSG to create a brand new VertexBufferContext. There might be
1068  // several GSG's sharing the same set of datas; if so, it doesn't matter
1069  // which of them creates the context (since they're all shared anyway).
1070  vbc = gsg->prepare_vertex_buffer(data);
1071  }
1072 
1073  if (vbc != nullptr) {
1074  bool prepared = _prepared_vertex_buffers.insert(vbc).second;
1075  nassertr(prepared, vbc);
1076  }
1077 
1078  return vbc;
1079 }
1080 
1081 /**
1082  * Indicates that a buffer would like to be put on the list to be prepared
1083  * when the GSG is next ready to do this (presumably at the next frame).
1084  */
1087  ReMutexHolder holder(_lock);
1088 
1089  _enqueued_index_buffers.insert(data);
1090 }
1091 
1092 /**
1093  * Returns true if the index buffer has been queued on this GSG, false
1094  * otherwise.
1095  */
1098  ReMutexHolder holder(_lock);
1099 
1100  EnqueuedIndexBuffers::const_iterator qi = _enqueued_index_buffers.find((GeomPrimitive *)data);
1101  return (qi != _enqueued_index_buffers.end());
1102 }
1103 
1104 /**
1105  * Removes a buffer from the queued list of data arrays to be prepared.
1106  * Normally it is not necessary to call this, unless you change your mind
1107  * about preparing it at the last minute, since the data will automatically be
1108  * dequeued and prepared at the next frame.
1109  *
1110  * The return value is true if the buffer is successfully dequeued, false if
1111  * it had not been queued.
1112  */
1115  ReMutexHolder holder(_lock);
1116 
1117  EnqueuedIndexBuffers::iterator qi = _enqueued_index_buffers.find(data);
1118  if (qi != _enqueued_index_buffers.end()) {
1119  _enqueued_index_buffers.erase(qi);
1120  return true;
1121  }
1122  return false;
1123 }
1124 
1125 /**
1126  * Returns true if the index buffer has been prepared on this GSG, false
1127  * otherwise.
1128  */
1131  return data->is_prepared((PreparedGraphicsObjects *)this);
1132 }
1133 
1134 /**
1135  * Indicates that a data context, created by a previous call to
1136  * prepare_index_buffer(), is no longer needed. The driver resources will not
1137  * be freed until some GSG calls update(), indicating it is at a stage where
1138  * it is ready to release datas--this prevents conflicts from threading or
1139  * multiple GSG's sharing datas (we have no way of knowing which graphics
1140  * context is currently active, or what state it's in, at the time
1141  * release_index_buffer is called).
1142  */
1145  ReMutexHolder holder(_lock);
1146 
1147  ibc->get_data()->clear_prepared(this);
1148 
1149  size_t data_size_bytes = ibc->get_data()->get_data_size_bytes();
1150  GeomEnums::UsageHint usage_hint = ibc->get_data()->get_usage_hint();
1151 
1152  // We have to set the Data pointer to NULL at this point, since the Data
1153  // itself might destruct at any time after it has been released.
1154  ibc->_object = nullptr;
1155 
1156  bool removed = (_prepared_index_buffers.erase(ibc) != 0);
1157  nassertv(removed);
1158 
1159  if (_support_released_buffer_cache) {
1160  cache_unprepared_buffer(ibc, data_size_bytes, usage_hint,
1161  _index_buffer_cache,
1162  _index_buffer_cache_lru, _index_buffer_cache_size,
1163  released_ibuffer_cache_size,
1164  _released_index_buffers);
1165  } else {
1166  _released_index_buffers.insert(ibc);
1167  }
1168 }
1169 
1170 /**
1171  * Releases all datas at once. This will force them to be reloaded into data
1172  * memory for all GSG's that share this object. Returns the number of datas
1173  * released.
1174  */
1177  ReMutexHolder holder(_lock);
1178 
1179  int num_index_buffers = (int)_prepared_index_buffers.size() + (int)_enqueued_index_buffers.size();
1180 
1181  Buffers::iterator ibci;
1182  for (ibci = _prepared_index_buffers.begin();
1183  ibci != _prepared_index_buffers.end();
1184  ++ibci) {
1185  IndexBufferContext *ibc = (IndexBufferContext *)(*ibci);
1186  ibc->get_data()->clear_prepared(this);
1187  ibc->_object = nullptr;
1188 
1189  _released_index_buffers.insert(ibc);
1190  }
1191 
1192  _prepared_index_buffers.clear();
1193  _enqueued_index_buffers.clear();
1194 
1195  // Also clear the cache of recently-unprepared index buffers.
1196  BufferCache::iterator bci;
1197  for (bci = _index_buffer_cache.begin();
1198  bci != _index_buffer_cache.end();
1199  ++bci) {
1200  BufferList &buffer_list = (*bci).second;
1201  nassertr(!buffer_list.empty(), num_index_buffers);
1202  BufferList::iterator li;
1203  for (li = buffer_list.begin(); li != buffer_list.end(); ++li) {
1204  IndexBufferContext *vbc = (IndexBufferContext *)(*li);
1205  _released_index_buffers.insert(vbc);
1206  }
1207  }
1208  _index_buffer_cache.clear();
1209  _index_buffer_cache_lru.clear();
1210  _index_buffer_cache_size = 0;
1211 
1212  return num_index_buffers;
1213 }
1214 
1215 /**
1216  * Returns the number of index buffers that have been enqueued to be prepared
1217  * on this GSG.
1218  */
1221  return _enqueued_index_buffers.size();
1222 }
1223 
1224 /**
1225  * Returns the number of index buffers that have already been prepared on this
1226  * GSG.
1227  */
1230  return _prepared_index_buffers.size();
1231 }
1232 
1233 /**
1234  * Immediately creates a new IndexBufferContext for the indicated data and
1235  * returns it. This assumes that the GraphicsStateGuardian is the currently
1236  * active rendering context and that it is ready to accept new datas. If this
1237  * is not necessarily the case, you should use enqueue_index_buffer() instead.
1238  *
1239  * Normally, this function is not called directly. Call Data::prepare_now()
1240  * instead.
1241  *
1242  * The IndexBufferContext contains all of the pertinent information needed by
1243  * the GSG to keep track of this one particular data, and will exist as long
1244  * as the data is ready to be rendered.
1245  *
1246  * When either the Data or the PreparedGraphicsObjects object destructs, the
1247  * IndexBufferContext will be deleted.
1248  */
1251  ReMutexHolder holder(_lock);
1252 
1253  // First, see if there might be a cached context of the appropriate size.
1254  size_t data_size_bytes = data->get_data_size_bytes();
1255  GeomEnums::UsageHint usage_hint = data->get_usage_hint();
1257  get_cached_buffer(data_size_bytes, usage_hint,
1258  _index_buffer_cache, _index_buffer_cache_lru,
1259  _index_buffer_cache_size);
1260  if (ibc != nullptr) {
1261  ibc->_object = data;
1262 
1263  } else {
1264  // Ask the GSG to create a brand new IndexBufferContext. There might be
1265  // several GSG's sharing the same set of datas; if so, it doesn't matter
1266  // which of them creates the context (since they're all shared anyway).
1267  ibc = gsg->prepare_index_buffer(data);
1268  }
1269 
1270  if (ibc != nullptr) {
1271  bool prepared = _prepared_index_buffers.insert(ibc).second;
1272  nassertr(prepared, ibc);
1273  }
1274 
1275  return ibc;
1276 }
1277 
1278 /**
1279  * Indicates that a buffer would like to be put on the list to be prepared
1280  * when the GSG is next ready to do this (presumably at the next frame).
1281  */
1284  ReMutexHolder holder(_lock);
1285 
1286  _enqueued_shader_buffers.insert(data);
1287 }
1288 
1289 /**
1290  * Returns true if the index buffer has been queued on this GSG, false
1291  * otherwise.
1292  */
1295  ReMutexHolder holder(_lock);
1296 
1297  EnqueuedShaderBuffers::const_iterator qi = _enqueued_shader_buffers.find((ShaderBuffer *)data);
1298  return (qi != _enqueued_shader_buffers.end());
1299 }
1300 
1301 /**
1302  * Removes a buffer from the queued list of data arrays to be prepared.
1303  * Normally it is not necessary to call this, unless you change your mind
1304  * about preparing it at the last minute, since the data will automatically be
1305  * dequeued and prepared at the next frame.
1306  *
1307  * The return value is true if the buffer is successfully dequeued, false if
1308  * it had not been queued.
1309  */
1312  ReMutexHolder holder(_lock);
1313 
1314  EnqueuedShaderBuffers::iterator qi = _enqueued_shader_buffers.find(data);
1315  if (qi != _enqueued_shader_buffers.end()) {
1316  _enqueued_shader_buffers.erase(qi);
1317  return true;
1318  }
1319  return false;
1320 }
1321 
1322 /**
1323  * Returns true if the index buffer has been prepared on this GSG, false
1324  * otherwise.
1325  */
1328  return data->is_prepared((PreparedGraphicsObjects *)this);
1329 }
1330 
1331 /**
1332  * Indicates that a data context, created by a previous call to
1333  * prepare_shader_buffer(), is no longer needed. The driver resources will not
1334  * be freed until some GSG calls update(), indicating it is at a stage where
1335  * it is ready to release datas--this prevents conflicts from threading or
1336  * multiple GSG's sharing datas (we have no way of knowing which graphics
1337  * context is currently active, or what state it's in, at the time
1338  * release_shader_buffer is called).
1339  */
1342  ReMutexHolder holder(_lock);
1343 
1344  ShaderBuffer *buffer = (ShaderBuffer *)bc->_object;
1345  buffer->clear_prepared(this);
1346 
1347  // We have to set the ShaderBuffer pointer to NULL at this point, since the
1348  // buffer itself might destruct at any time after it has been released.
1349  bc->_object = nullptr;
1350 
1351  bool removed = (_prepared_shader_buffers.erase(bc) != 0);
1352  nassertv(removed);
1353 
1354  _released_shader_buffers.insert(bc);
1355 }
1356 
1357 /**
1358  * Releases all datas at once. This will force them to be reloaded into data
1359  * memory for all GSG's that share this object. Returns the number of datas
1360  * released.
1361  */
1364  ReMutexHolder holder(_lock);
1365 
1366  int num_shader_buffers = (int)_prepared_shader_buffers.size() + (int)_enqueued_shader_buffers.size();
1367 
1368  Buffers::iterator bci;
1369  for (bci = _prepared_shader_buffers.begin();
1370  bci != _prepared_shader_buffers.end();
1371  ++bci) {
1372 
1373  BufferContext *bc = (BufferContext *)(*bci);
1374  _released_shader_buffers.insert(bc);
1375  }
1376 
1377  _prepared_shader_buffers.clear();
1378  _enqueued_shader_buffers.clear();
1379 
1380  return num_shader_buffers;
1381 }
1382 
1383 /**
1384  * Returns the number of index buffers that have been enqueued to be prepared
1385  * on this GSG.
1386  */
1389  return _enqueued_shader_buffers.size();
1390 }
1391 
1392 /**
1393  * Returns the number of index buffers that have already been prepared on this
1394  * GSG.
1395  */
1398  return _prepared_shader_buffers.size();
1399 }
1400 
1401 /**
1402  * Immediately creates a new BufferContext for the indicated data and
1403  * returns it. This assumes that the GraphicsStateGuardian is the currently
1404  * active rendering context and that it is ready to accept new datas. If this
1405  * is not necessarily the case, you should use enqueue_shader_buffer() instead.
1406  *
1407  * Normally, this function is not called directly. Call Data::prepare_now()
1408  * instead.
1409  *
1410  * The BufferContext contains all of the pertinent information needed by
1411  * the GSG to keep track of this one particular data, and will exist as long
1412  * as the data is ready to be rendered.
1413  *
1414  * When either the Data or the PreparedGraphicsObjects object destructs, the
1415  * BufferContext will be deleted.
1416  */
1419  ReMutexHolder holder(_lock);
1420 
1421  // Ask the GSG to create a brand new BufferContext. There might be
1422  // several GSG's sharing the same set of datas; if so, it doesn't matter
1423  // which of them creates the context (since they're all shared anyway).
1424  BufferContext *bc = gsg->prepare_shader_buffer(data);
1425 
1426  if (bc != nullptr) {
1427  bool prepared = _prepared_shader_buffers.insert(bc).second;
1428  nassertr(prepared, bc);
1429  }
1430 
1431  return bc;
1432 }
1433 
1434 /**
1435  * Creates a new future for the given object.
1436  */
1439  _pgo(pgo),
1440  _object(object) {
1441 }
1442 
1443 /**
1444  * Indicates that the preparation request is done.
1445  */
1448  nassertv(!done());
1449  AsyncFuture::set_result(context);
1450  _pgo = nullptr;
1451 }
1452 
1453 /**
1454  * Called by PreparedGraphicsObjects to indicate that the preparation request
1455  * has been cancelled.
1456  */
1459  _pgo = nullptr;
1460  nassertv_always(AsyncFuture::cancel());
1461 }
1462 
1463 /**
1464  * Cancels the pending preparation request. Has no effect if the preparation
1465  * is already complete or was already cancelled.
1466  */
1469  PreparedGraphicsObjects *pgo = _pgo;
1470  if (_object == nullptr || pgo == nullptr) {
1471  nassertr(done(), false);
1472  return false;
1473  }
1474 
1475  // We don't upcall here, because the dequeue function will end up calling
1476  // notify_removed().
1477  _result = nullptr;
1478  _pgo = nullptr;
1479 
1480  if (_object->is_of_type(Texture::get_class_type())) {
1481  return pgo->dequeue_texture((Texture *)_object.p());
1482 
1483  } else if (_object->is_of_type(Geom::get_class_type())) {
1484  return pgo->dequeue_geom((Geom *)_object.p());
1485 
1486  } else if (_object->is_of_type(Shader::get_class_type())) {
1487  return pgo->dequeue_shader((Shader *)_object.p());
1488 
1489  } else if (_object->is_of_type(GeomVertexArrayData::get_class_type())) {
1490  return pgo->dequeue_vertex_buffer((GeomVertexArrayData *)_object.p());
1491 
1492  } else if (_object->is_of_type(GeomPrimitive::get_class_type())) {
1493  return pgo->dequeue_index_buffer((GeomPrimitive *)_object.p());
1494 
1495  } else if (_object->is_of_type(ShaderBuffer::get_class_type())) {
1496  return pgo->dequeue_shader_buffer((ShaderBuffer *)_object.p());
1497  }
1498  return false;
1499 }
1500 
1501 /**
1502  * This is called by the GraphicsStateGuardian to indicate that it is about to
1503  * begin processing of the frame.
1504  *
1505  * Any texture contexts that were previously passed to release_texture() are
1506  * actually passed to the GSG to be freed at this point; textures that were
1507  * previously passed to prepare_texture are actually loaded.
1508  */
1511  ReMutexHolder holder(_lock, current_thread);
1512 
1513  // First, release all the textures, geoms, and buffers awaiting release.
1514  if (!_released_textures.empty()) {
1515  Textures::iterator tci;
1516  for (tci = _released_textures.begin();
1517  tci != _released_textures.end();
1518  ++tci) {
1519  TextureContext *tc = (*tci);
1520  gsg->release_texture(tc);
1521  }
1522 
1523  _released_textures.clear();
1524  }
1525 
1526  if (!_released_samplers.empty()) {
1527  ReleasedSamplers::iterator sci;
1528  for (sci = _released_samplers.begin();
1529  sci != _released_samplers.end();
1530  ++sci) {
1531  SamplerContext *sc = (*sci);
1532  gsg->release_sampler(sc);
1533  }
1534 
1535  _released_samplers.clear();
1536  }
1537 
1538  Geoms::iterator gci;
1539  for (gci = _released_geoms.begin();
1540  gci != _released_geoms.end();
1541  ++gci) {
1542  GeomContext *gc = (*gci);
1543  gsg->release_geom(gc);
1544  }
1545 
1546  _released_geoms.clear();
1547 
1548  Shaders::iterator sci;
1549  for (sci = _released_shaders.begin();
1550  sci != _released_shaders.end();
1551  ++sci) {
1552  ShaderContext *sc = (*sci);
1553  gsg->release_shader(sc);
1554  }
1555 
1556  _released_shaders.clear();
1557 
1558  Buffers::iterator vbci;
1559  for (vbci = _released_vertex_buffers.begin();
1560  vbci != _released_vertex_buffers.end();
1561  ++vbci) {
1562  VertexBufferContext *vbc = (VertexBufferContext *)(*vbci);
1563  gsg->release_vertex_buffer(vbc);
1564  }
1565 
1566  _released_vertex_buffers.clear();
1567 
1568  Buffers::iterator ibci;
1569  for (ibci = _released_index_buffers.begin();
1570  ibci != _released_index_buffers.end();
1571  ++ibci) {
1572  IndexBufferContext *ibc = (IndexBufferContext *)(*ibci);
1573  gsg->release_index_buffer(ibc);
1574  }
1575 
1576  _released_index_buffers.clear();
1577 
1578  // Reset the residency trackers.
1579  _texture_residency.begin_frame(current_thread);
1580  _vbuffer_residency.begin_frame(current_thread);
1581  _ibuffer_residency.begin_frame(current_thread);
1582  _sbuffer_residency.begin_frame(current_thread);
1583 
1584  // Now prepare all the textures, geoms, and buffers awaiting preparation.
1585  EnqueuedTextures::iterator qti;
1586  for (qti = _enqueued_textures.begin();
1587  qti != _enqueued_textures.end();
1588  ++qti) {
1589  Texture *tex = qti->first;
1590  for (int view = 0; view < tex->get_num_views(); ++view) {
1591  TextureContext *tc = tex->prepare_now(view, this, gsg);
1592  if (tc != nullptr) {
1593  gsg->update_texture(tc, true);
1594  if (view == 0 && qti->second != nullptr) {
1595  qti->second->set_result(tc);
1596  }
1597  }
1598  }
1599  }
1600 
1601  _enqueued_textures.clear();
1602 
1603  EnqueuedSamplers::iterator qsmi;
1604  for (qsmi = _enqueued_samplers.begin();
1605  qsmi != _enqueued_samplers.end();
1606  ++qsmi) {
1607  const SamplerState &sampler = (*qsmi);
1608  sampler.prepare_now(this, gsg);
1609  }
1610 
1611  _enqueued_samplers.clear();
1612 
1613  EnqueuedGeoms::iterator qgi;
1614  for (qgi = _enqueued_geoms.begin();
1615  qgi != _enqueued_geoms.end();
1616  ++qgi) {
1617  Geom *geom = (*qgi);
1618  geom->prepare_now(this, gsg);
1619  }
1620 
1621  _enqueued_geoms.clear();
1622 
1623  EnqueuedShaders::iterator qsi;
1624  for (qsi = _enqueued_shaders.begin();
1625  qsi != _enqueued_shaders.end();
1626  ++qsi) {
1627  Shader *shader = qsi->first;
1628  ShaderContext *sc = shader->prepare_now(this, gsg);
1629  if (qsi->second != nullptr) {
1630  qsi->second->set_result(sc);
1631  }
1632  }
1633 
1634  _enqueued_shaders.clear();
1635 
1636  EnqueuedVertexBuffers::iterator qvbi;
1637  for (qvbi = _enqueued_vertex_buffers.begin();
1638  qvbi != _enqueued_vertex_buffers.end();
1639  ++qvbi) {
1640  GeomVertexArrayData *data = (*qvbi);
1641  data->prepare_now(this, gsg);
1642  }
1643 
1644  _enqueued_vertex_buffers.clear();
1645 
1646  EnqueuedIndexBuffers::iterator qibi;
1647  for (qibi = _enqueued_index_buffers.begin();
1648  qibi != _enqueued_index_buffers.end();
1649  ++qibi) {
1650  GeomPrimitive *data = (*qibi);
1651  // We need this check because the actual index data may not actually have
1652  // propagated to the draw thread yet.
1653  if (data->is_indexed()) {
1654  data->prepare_now(this, gsg);
1655  }
1656  }
1657 
1658  _enqueued_index_buffers.clear();
1659 }
1660 
1661 /**
1662  * This is called by the GraphicsStateGuardian to indicate that it has
1663  * finished processing of the frame.
1664  */
1666 end_frame(Thread *current_thread) {
1667  ReMutexHolder holder(_lock, current_thread);
1668 
1669  _texture_residency.end_frame(current_thread);
1670  _vbuffer_residency.end_frame(current_thread);
1671  _ibuffer_residency.end_frame(current_thread);
1672  _sbuffer_residency.end_frame(current_thread);
1673 }
1674 
1675 /**
1676  * Returns a new, unique name for a newly-constructed object.
1677  */
1678 std::string PreparedGraphicsObjects::
1679 init_name() {
1680  ++_name_index;
1681  std::ostringstream strm;
1682  strm << "context" << _name_index;
1683  return strm.str();
1684 }
1685 
1686 /**
1687  * Called when a vertex or index buffer is no longer officially "prepared".
1688  * However, we still have the context on the graphics card, and we might be
1689  * able to reuse that context if we're about to re-prepare a different buffer,
1690  * especially one exactly the same size. So instead of immediately enqueuing
1691  * the vertex buffer for release, we cache it.
1692  */
1693 void PreparedGraphicsObjects::
1694 cache_unprepared_buffer(BufferContext *buffer, size_t data_size_bytes,
1695  GeomEnums::UsageHint usage_hint,
1697  PreparedGraphicsObjects::BufferCacheLRU &buffer_cache_lru,
1698  size_t &buffer_cache_size,
1699  int released_buffer_cache_size,
1700  PreparedGraphicsObjects::Buffers &released_buffers) {
1701  BufferCacheKey key;
1702  key._data_size_bytes = data_size_bytes;
1703  key._usage_hint = usage_hint;
1704 
1705  buffer_cache[key].push_back(buffer);
1706  buffer_cache_size += data_size_bytes;
1707 
1708  // Move the key to the head of the LRU.
1709  BufferCacheLRU::iterator li =
1710  find(buffer_cache_lru.begin(), buffer_cache_lru.end(), key);
1711  if (li != buffer_cache_lru.end()) {
1712  buffer_cache_lru.erase(li);
1713  }
1714  buffer_cache_lru.insert(buffer_cache_lru.begin(), key);
1715 
1716  // Now release not-recently-used buffers until we fit within the constrained
1717  // size.
1718  while ((int)buffer_cache_size > released_buffer_cache_size) {
1719  nassertv(!buffer_cache_lru.empty());
1720  const BufferCacheKey &release_key = *buffer_cache_lru.rbegin();
1721  BufferList &buffer_list = buffer_cache[release_key];
1722  while (!buffer_list.empty() &&
1723  (int)buffer_cache_size > released_buffer_cache_size) {
1724  BufferContext *released_buffer = buffer_list.back();
1725  buffer_list.pop_back();
1726  released_buffers.insert(released_buffer);
1727  buffer_cache_size -= release_key._data_size_bytes;
1728  }
1729 
1730  if (buffer_list.empty()) {
1731  buffer_cache.erase(release_key);
1732  buffer_cache_lru.pop_back();
1733  }
1734  }
1735 }
1736 
1737 /**
1738  * Returns a previously-cached buffer from the cache, or NULL if there is no
1739  * such buffer.
1740  */
1741 BufferContext *PreparedGraphicsObjects::
1742 get_cached_buffer(size_t data_size_bytes, GeomEnums::UsageHint usage_hint,
1744  PreparedGraphicsObjects::BufferCacheLRU &buffer_cache_lru,
1745  size_t &buffer_cache_size) {
1746  BufferCacheKey key;
1747  key._data_size_bytes = data_size_bytes;
1748  key._usage_hint = usage_hint;
1749 
1750  BufferCache::iterator bci = buffer_cache.find(key);
1751  if (bci == buffer_cache.end()) {
1752  return nullptr;
1753  }
1754 
1755  BufferList &buffer_list = (*bci).second;
1756  nassertr(!buffer_list.empty(), nullptr);
1757 
1758  BufferContext *buffer = buffer_list.back();
1759  buffer_list.pop_back();
1760  if (buffer_list.empty()) {
1761  buffer_cache.erase(bci);
1762  BufferCacheLRU::iterator li =
1763  find(buffer_cache_lru.begin(), buffer_cache_lru.end(), key);
1764  if (li != buffer_cache_lru.end()) {
1765  buffer_cache_lru.erase(li);
1766  }
1767  }
1768 
1769  buffer_cache_size -= data_size_bytes;
1770  return buffer;
1771 }
EnqueuedObject(PreparedGraphicsObjects *pgo, TypedWritableReferenceCount *object)
Creates a new future for the given object.
SamplerContext * prepare_now(PreparedGraphicsObjects *prepared_objects, GraphicsStateGuardianBase *gsg) const
Creates a context for the sampler on the particular GSG, if it does not already exist.
int release_all_shaders()
Releases all shaders at once.
bool is_texture_prepared(const Texture *tex) const
Returns true if the texture has been prepared on this GSG, false otherwise.
Texture * get_texture() const
Returns the pointer to the associated Texture object.
bool is_index_buffer_queued(const GeomPrimitive *data) const
Returns true if the index buffer has been queued on this GSG, false otherwise.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
void release_shader(ShaderContext *sc)
Indicates that a shader context, created by a previous call to prepare_shader(), is no longer needed.
void enqueue_texture(Texture *tex)
Indicates that a texture would like to be put on the list to be prepared when the GSG is next ready t...
This is a special class object that holds all the information returned by a particular GSG to indicat...
Definition: geomContext.h:34
int get_num_queued_vertex_buffers() const
Returns the number of vertex buffers that have been enqueued to be prepared on this GSG.
get_usage_hint
Returns the usage hint for this primitive.
Definition: geomPrimitive.h:81
This is a generic buffer object that lives in graphics memory.
Definition: shaderBuffer.h:33
int get_num_queued_geoms() const
Returns the number of geoms that have been enqueued to be prepared on this GSG.
void release_geom(GeomContext *gc)
Indicates that a geom context, created by a previous call to prepare_geom(), is no longer needed.
GeomContext * prepare_geom_now(Geom *geom, GraphicsStateGuardianBase *gsg)
Immediately creates a new GeomContext for the indicated geom and returns it.
void release_texture(TextureContext *tc)
Indicates that a texture context, created by a previous call to prepare_texture(),...
void release_shader_buffer(BufferContext *bc)
Indicates that a data context, created by a previous call to prepare_shader_buffer(),...
bool is_shader_buffer_queued(const ShaderBuffer *data) const
Returns true if the index buffer has been queued on this GSG, false otherwise.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
bool is_shader_buffer_prepared(const ShaderBuffer *data) const
Returns true if the index buffer has been prepared on this GSG, false otherwise.
int get_view() const
Returns the specific view of a multiview texture this context represents.
int release_all_vertex_buffers()
Releases all datas at once.
int get_num_prepared_textures() const
Returns the number of textures that have already been prepared on this GSG.
bool dequeue_texture(Texture *tex)
Removes a texture from the queued list of textures to be prepared.
bool dequeue_index_buffer(GeomPrimitive *data)
Removes a buffer from the queued list of data arrays to be prepared.
int get_num_prepared_vertex_buffers() const
Returns the number of vertex buffers that have already been prepared on this GSG.
This is a special class object that holds all the information returned by a particular GSG to indicat...
bool is_vertex_buffer_prepared(const GeomVertexArrayData *data) const
Returns true if the vertex buffer has been prepared on this GSG, false otherwise.
This is a base class for those kinds of SavedContexts that occupy an easily-measured (and substantial...
Definition: bufferContext.h:38
Represents a texture object, which is typically a single 2-d image but may also represent a 1-d or 3-...
Definition: texture.h:71
void set_max_size(size_t max_size)
Changes the max size of all objects that are allowed to be active on the LRU.
Definition: adaptiveLru.I:40
bool dequeue_sampler(const SamplerState &sampler)
Removes a sampler from the queued list of samplers to be prepared.
This is an abstract base class for a family of classes that represent the fundamental geometry primit...
Definition: geomPrimitive.h:56
IndexBufferContext * prepare_index_buffer_now(GeomPrimitive *data, GraphicsStateGuardianBase *gsg)
Immediately creates a new IndexBufferContext for the indicated data and returns it.
void notify_removed()
Called by PreparedGraphicsObjects to indicate that the preparation request has been cancelled.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
bool is_vertex_buffer_queued(const GeomVertexArrayData *data) const
Returns true if the vertex buffer has been queued on this GSG, false otherwise.
Definition: shader.h:49
void enqueue_shader(Shader *shader)
Indicates that a shader would like to be put on the list to be prepared when the GSG is next ready to...
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
This is a special class object that holds all the information returned by a particular GSG to indicat...
void enqueue_index_buffer(GeomPrimitive *data)
Indicates that a buffer would like to be put on the list to be prepared when the GSG is next ready to...
void release_vertex_buffer(VertexBufferContext *vbc)
Indicates that a data context, created by a previous call to prepare_vertex_buffer(),...
int get_num_queued_shader_buffers() const
Returns the number of index buffers that have been enqueued to be prepared on this GSG.
int get_num_queued_textures() const
Returns the number of textures that have been enqueued to be prepared on this GSG.
void enqueue_sampler(const SamplerState &sampler)
Indicates that a sampler would like to be put on the list to be prepared when the GSG is next ready t...
bool is_sampler_prepared(const SamplerState &sampler) const
Returns true if the sampler has been prepared on this GSG, false otherwise.
VertexBufferContext * prepare_vertex_buffer_now(GeomVertexArrayData *data, GraphicsStateGuardianBase *gsg)
Immediately creates a new VertexBufferContext for the indicated data and returns it.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
int release_all_shader_buffers()
Releases all datas at once.
A table of objects that are saved within the graphics context for reference by handle later.
int get_num_prepared_index_buffers() const
Returns the number of index buffers that have already been prepared on this GSG.
int get_num_queued_index_buffers() const
Returns the number of index buffers that have been enqueued to be prepared on this GSG.
The ShaderContext is meant to contain the compiled version of a shader string.
Definition: shaderContext.h:31
This is our own Panda specialization on the default STL vector.
Definition: pvector.h:42
int get_num_prepared_shader_buffers() const
Returns the number of index buffers that have already been prepared on this GSG.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
This is a handle to an enqueued object, from which the result can be obtained upon completion.
bool release(PreparedGraphicsObjects *prepared_objects)
Frees the texture context only on the indicated object, if it exists there.
Definition: texture.cxx:1567
ShaderContext * prepare_shader_now(Shader *shader, GraphicsStateGuardianBase *gsg)
Immediately creates a new ShaderContext for the indicated shader and returns it.
bool is_geom_queued(const Geom *geom) const
Returns true if the geom has been queued on this GSG, false otherwise.
virtual bool cancel() final
Cancels the pending preparation request.
bool is_geom_prepared(const Geom *geom) const
Returns true if the vertex buffer has been prepared on this GSG, false otherwise.
BufferContext * prepare_shader_buffer_now(ShaderBuffer *data, GraphicsStateGuardianBase *gsg)
Immediately creates a new BufferContext for the indicated data and returns it.
void release_index_buffer(IndexBufferContext *ibc)
Indicates that a data context, created by a previous call to prepare_index_buffer(),...
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PT(PreparedGraphicsObjects::EnqueuedObject) PreparedGraphicsObjects
Like enqueue_texture, but returns an AsyncFuture that can be used to query the status of the texture'...
size_t get_max_size() const
Returns the max size of all objects that are allowed to be active on the LRU.
Definition: adaptiveLru.I:28
void enqueue_shader_buffer(ShaderBuffer *data)
Indicates that a buffer would like to be put on the list to be prepared when the GSG is next ready to...
void set_result(std::nullptr_t)
Sets this future's result.
Definition: asyncFuture.I:92
int get_num_queued_samplers() const
Returns the number of samplers that have been enqueued to be prepared on this GSG.
A container for geometry primitives.
Definition: geom.h:54
GeomContext * prepare_now(PreparedGraphicsObjects *prepared_objects, GraphicsStateGuardianBase *gsg)
Creates a context for the geom on the particular GSG, if it does not already exist.
Definition: geom.cxx:1282
bool dequeue_vertex_buffer(GeomVertexArrayData *data)
Removes a buffer from the queued list of data arrays to be prepared.
int get_num_queued_shaders() const
Returns the number of shaders that have been enqueued to be prepared on this GSG.
get_data_size_bytes
Returns the number of bytes stored in the vertices array.
SamplerContext * prepare_sampler_now(const SamplerState &sampler, GraphicsStateGuardianBase *gsg)
Immediately creates a new SamplerContext for the indicated sampler and returns it.
void enqueue_vertex_buffer(GeomVertexArrayData *data)
Indicates that a buffer would like to be put on the list to be prepared when the GSG is next ready to...
Similar to MutexHolder, but for a reentrant mutex.
Definition: reMutexHolder.h:25
int release_all_textures()
Releases all textures at once.
A base class for things which need to inherit from both TypedWritable and from ReferenceCount.
bool dequeue_shader(Shader *shader)
Removes a shader from the queued list of shaders to be prepared.
get_data_size_bytes
Returns the number of bytes stored in the array.
int release_all_geoms()
Releases all geoms at once.
bool is_sampler_queued(const SamplerState &sampler) const
Returns true if the sampler has been queued on this GSG, false otherwise.
get_num_views
Returns the number of "views" in the texture.
Definition: texture.h:346
bool is_texture_queued(const Texture *tex) const
Returns true if the texture has been queued on this GSG, false otherwise.
Represents a set of settings that indicate how a texture is sampled.
Definition: samplerState.h:36
TextureContext * prepare_now(int view, PreparedGraphicsObjects *prepared_objects, GraphicsStateGuardianBase *gsg)
Creates a context for the texture on the particular GSG, if it does not already exist.
Definition: texture.cxx:1956
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
void end_frame(Thread *current_thread)
This is called by the GraphicsStateGuardian to indicate that it has finished processing of the frame.
This is a special class object that holds a handle to the sampler state object given by the graphics ...
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
This is a base class for the GraphicsStateGuardian class, which is itself a base class for the variou...
int get_num_prepared_shaders() const
Returns the number of shaders that have already been prepared on this GSG.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
GeomVertexArrayData * get_data() const
Returns the pointer to the client-side array data object.
bool is_shader_prepared(const Shader *shader) const
Returns true if the shader has been prepared on this GSG, false otherwise.
A thread; that is, a lightweight process.
Definition: thread.h:46
TextureContext * prepare_texture_now(Texture *tex, int view, GraphicsStateGuardianBase *gsg)
Immediately creates a new TextureContext for the indicated texture and returns it.
void show_residency_trackers(std::ostream &out) const
Writes to the indicated ostream a report of how the various textures and vertex buffers are allocated...
bool is_prepared(PreparedGraphicsObjects *prepared_objects) const
Returns true if the texture has already been prepared or enqueued for preparation on the indicated GS...
Definition: texture.cxx:1438
This is a special class object that holds all the information returned by a particular GSG to indicat...
bool is_prepared(PreparedGraphicsObjects *prepared_objects) const
Returns true if the shader has already been prepared or enqueued for preparation on the indicated GSG...
Definition: shader.cxx:3660
void show_graphics_memory_lru(std::ostream &out) const
Writes to the indicated ostream a report of how the various textures and vertex buffers are allocated...
void begin_frame(GraphicsStateGuardianBase *gsg, Thread *current_thread)
This is called by the GraphicsStateGuardian to indicate that it is about to begin processing of the f...
void enqueue_geom(Geom *geom)
Indicates that a geom would like to be put on the list to be prepared when the GSG is next ready to d...
get_usage_hint
Returns the usage hint that describes to the rendering backend how often the vertex data will be modi...
GeomPrimitive * get_data() const
Returns the pointer to the client-side array data object.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
void release_sampler(SamplerContext *sc)
Indicates that a sampler context, created by a previous call to prepare_sampler(),...
bool is_index_buffer_prepared(const GeomPrimitive *data) const
Returns true if the index buffer has been prepared on this GSG, false otherwise.
void begin_frame(Thread *current_thread)
To be called at the beginning of a frame, this initializes the active/inactive status.
bool is_prepared(PreparedGraphicsObjects *prepared_objects) const
Returns true if the geom has already been prepared or enqueued for preparation on the indicated GSG,...
Definition: geom.cxx:1216
TypeHandle is the identifier used to differentiate C++ class types.
Definition: typeHandle.h:81
void set_graphics_memory_limit(size_t limit)
Sets an artificial cap on graphics memory that will be imposed on this GSG.
virtual bool cancel()
Cancels the future.
Definition: asyncFuture.cxx:43
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
This is the base class for all GSG-specific context objects, such as TextureContext and GeomContext.
Definition: savedContext.h:26
void end_frame(Thread *current_thread)
To be called at the end of a frame, this updates the PStatCollectors appropriately.
int get_num_prepared_geoms() const
Returns the number of geoms that have already been prepared on this GSG.
int release_all_samplers()
Releases all samplers at once.
ShaderContext * prepare_now(PreparedGraphicsObjects *prepared_objects, GraphicsStateGuardianBase *gsg)
Creates a context for the shader on the particular GSG, if it does not already exist.
Definition: shader.cxx:3703
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
int get_num_prepared_samplers() const
Returns the number of samplers that have already been prepared on this GSG.
bool dequeue_shader_buffer(ShaderBuffer *data)
Removes a buffer from the queued list of data arrays to be prepared.
bool dequeue_geom(Geom *geom)
Removes a geom from the queued list of geoms to be prepared.
This is the data for one array of a GeomVertexData structure.
int release_all_index_buffers()
Releases all datas at once.
bool is_shader_queued(const Shader *shader) const
Returns true if the shader has been queued on this GSG, false otherwise.
void set_result(SavedContext *result)
Indicates that the preparation request is done.