Panda3D
Loading...
Searching...
No Matches
preparedGraphicsObjects.cxx
Go to the documentation of this file.
1/**
2 * PANDA 3D SOFTWARE
3 * Copyright (c) Carnegie Mellon University. All rights reserved.
4 *
5 * All use of this software is subject to the terms of the revised BSD
6 * license. You should have received a copy of this license along
7 * with this source code in a file named "LICENSE."
8 *
9 * @file preparedGraphicsObjects.cxx
10 * @author drose
11 * @date 2004-02-19
12 */
13
15#include "textureContext.h"
16#include "vertexBufferContext.h"
17#include "indexBufferContext.h"
18#include "texture.h"
19#include "geom.h"
20#include "geomVertexArrayData.h"
21#include "geomPrimitive.h"
22#include "samplerContext.h"
23#include "shader.h"
24#include "reMutexHolder.h"
25#include "geomContext.h"
26#include "shaderContext.h"
27#include "config_gobj.h"
28#include "throw_event.h"
29
30TypeHandle PreparedGraphicsObjects::EnqueuedObject::_type_handle;
31
32int PreparedGraphicsObjects::_name_index = 0;
33
34/**
35 *
36 */
37PreparedGraphicsObjects::
38PreparedGraphicsObjects() :
39 _lock("PreparedGraphicsObjects::_lock"),
40 _name(init_name()),
41 _vertex_buffer_cache_size(0),
42 _index_buffer_cache_size(0),
43 _texture_residency(_name, "texture"),
44 _vbuffer_residency(_name, "vbuffer"),
45 _ibuffer_residency(_name, "ibuffer"),
46 _sbuffer_residency(_name, "sbuffer"),
47 _graphics_memory_lru("graphics_memory_lru", graphics_memory_limit),
48 _sampler_object_lru("sampler_object_lru", sampler_object_limit)
49{
50 // GLGSG will turn this flag on. This is a temporary hack to disable this
51 // feature for DX8DX9 for now, until we work out the fine points of updating
52 // the fvf properly.
53 _support_released_buffer_cache = false;
54}
55
56/**
57 *
58 */
59PreparedGraphicsObjects::
60~PreparedGraphicsObjects() {
61 // There may be objects that are still prepared when we destruct. If this
62 // is so, then all of the GSG's that own them have already destructed, so we
63 // can assume their resources were internally cleaned up. Besides, we may
64 // not even be allowed to call the GSG release methods since some APIs (eg.
65 // OpenGL) require a context current. So we just call the destructors.
66 ReMutexHolder holder(_lock);
67
69 Textures::iterator tci;
70 for (tci = _released_textures.begin();
71 tci != _released_textures.end();
72 ++tci) {
73 TextureContext *tc = (*tci);
74 delete tc;
75 }
76 _released_textures.clear();
77
79 ReleasedSamplers::iterator ssci;
80 for (ssci = _released_samplers.begin();
81 ssci != _released_samplers.end();
82 ++ssci) {
83 SamplerContext *sc = (*ssci);
84 delete sc;
85 }
86 _released_samplers.clear();
87
89 Geoms::iterator gci;
90 for (gci = _released_geoms.begin();
91 gci != _released_geoms.end();
92 ++gci) {
93 GeomContext *gc = (*gci);
94 delete gc;
95 }
96 _released_geoms.clear();
97
99 Shaders::iterator sci;
100 for (sci = _released_shaders.begin();
101 sci != _released_shaders.end();
102 ++sci) {
103 ShaderContext *sc = (*sci);
104 delete sc;
105 }
106 _released_shaders.clear();
107
109 Buffers::iterator vbci;
110 for (vbci = _released_vertex_buffers.begin();
111 vbci != _released_vertex_buffers.end();
112 ++vbci) {
114 delete vbc;
115 }
116 _released_vertex_buffers.clear();
117
119 Buffers::iterator ibci;
120 for (ibci = _released_index_buffers.begin();
121 ibci != _released_index_buffers.end();
122 ++ibci) {
123 IndexBufferContext *ibc = (IndexBufferContext *)(*ibci);
124 delete ibc;
125 }
126 _released_index_buffers.clear();
127
129 Buffers::iterator bci;
130 for (bci = _released_shader_buffers.begin();
131 bci != _released_shader_buffers.end();
132 ++bci) {
133 BufferContext *bc = (BufferContext *)(*bci);
134 delete bc;
135 }
136 _released_shader_buffers.clear();
137}
138
139/**
140 * Sets an artificial cap on graphics memory that will be imposed on this GSG.
141 *
142 * This limits the total amount of graphics memory, including texture memory
143 * and vertex buffer memory, that will be consumed by the GSG, regardless of
144 * whether the hardware claims to provide more graphics memory than this. It
145 * is useful to put a ceiling on graphics memory consumed, since some drivers
146 * seem to allow the application to consume more memory than the hardware can
147 * realistically support.
148 */
150set_graphics_memory_limit(size_t limit) {
151 if (limit != _graphics_memory_lru.get_max_size()) {
152 _graphics_memory_lru.set_max_size(limit);
153
154 // We throw an event here so global objects (particularly the
155 // TexMemWatcher) can automatically respond to this change.
156 throw_event("graphics_memory_limit_changed");
157 }
158}
159
160/**
161 * Writes to the indicated ostream a report of how the various textures and
162 * vertex buffers are allocated in the LRU.
163 */
165show_graphics_memory_lru(std::ostream &out) const {
166 _graphics_memory_lru.write(out, 0);
167}
168
169/**
170 * Writes to the indicated ostream a report of how the various textures and
171 * vertex buffers are allocated in the LRU.
172 */
174show_residency_trackers(std::ostream &out) const {
175 out << "Textures:\n";
176 _texture_residency.write(out, 2);
177
178 out << "\nVertex buffers:\n";
179 _vbuffer_residency.write(out, 2);
180
181 out << "\nIndex buffers:\n";
182 _ibuffer_residency.write(out, 2);
183
184 out << "\nShader buffers:\n";
185 _sbuffer_residency.write(out, 2);
186}
187
188/**
189 * Indicates that a texture would like to be put on the list to be prepared
190 * when the GSG is next ready to do this (presumably at the next frame).
191 */
194 ReMutexHolder holder(_lock);
195
196 _enqueued_textures.insert(EnqueuedTextures::value_type(tex, nullptr));
197}
198
199/**
200 * Like enqueue_texture, but returns an AsyncFuture that can be used to query
201 * the status of the texture's preparation.
202 */
203PT(PreparedGraphicsObjects::EnqueuedObject) PreparedGraphicsObjects::
204enqueue_texture_future(Texture *tex) {
205 ReMutexHolder holder(_lock);
206
207 std::pair<EnqueuedTextures::iterator, bool> result =
208 _enqueued_textures.insert(EnqueuedTextures::value_type(tex, nullptr));
209 if (result.first->second == nullptr) {
210 result.first->second = new EnqueuedObject(this, tex);
211 }
212 PT(EnqueuedObject) fut = result.first->second;
213 nassertr(!fut->cancelled(), fut)
214 return fut;
215}
216
217/**
218 * Returns true if the texture has been queued on this GSG, false otherwise.
219 */
221is_texture_queued(const Texture *tex) const {
222 ReMutexHolder holder(_lock);
223
224 EnqueuedTextures::const_iterator qi = _enqueued_textures.find((Texture *)tex);
225 return (qi != _enqueued_textures.end());
226}
227
228/**
229 * Removes a texture from the queued list of textures to be prepared.
230 * Normally it is not necessary to call this, unless you change your mind
231 * about preparing it at the last minute, since the texture will automatically
232 * be dequeued and prepared at the next frame.
233 *
234 * The return value is true if the texture is successfully dequeued, false if
235 * it had not been queued.
236 */
239 ReMutexHolder holder(_lock);
240
241 EnqueuedTextures::iterator qi = _enqueued_textures.find(tex);
242 if (qi != _enqueued_textures.end()) {
243 if (qi->second != nullptr) {
244 qi->second->notify_removed();
245 }
246 _enqueued_textures.erase(qi);
247 return true;
248 }
249 return false;
250}
251
252/**
253 * Returns true if the texture has been prepared on this GSG, false otherwise.
254 */
256is_texture_prepared(const Texture *tex) const {
257 return tex->is_prepared((PreparedGraphicsObjects *)this);
258}
259
260/**
261 * Indicates that a texture context, created by a previous call to
262 * prepare_texture(), is no longer needed. The driver resources will not be
263 * freed until some GSG calls update(), indicating it is at a stage where it
264 * is ready to release textures--this prevents conflicts from threading or
265 * multiple GSG's sharing textures (we have no way of knowing which graphics
266 * context is currently active, or what state it's in, at the time
267 * release_texture is called).
268 */
271 ReMutexHolder holder(_lock);
272
273 tc->get_texture()->clear_prepared(tc->get_view(), this);
274
275 // We have to set the Texture pointer to NULL at this point, since the
276 // Texture itself might destruct at any time after it has been released.
277 tc->_object = nullptr;
278
279 bool removed = (_prepared_textures.erase(tc) != 0);
280 nassertv(removed);
281
282 _released_textures.insert(tc);
283}
284
285/**
286 * Releases a texture if it has already been prepared, or removes it from the
287 * preparation queue.
288 */
291 tex->release(this);
292}
293
294/**
295 * Releases all textures at once. This will force them to be reloaded into
296 * texture memory for all GSG's that share this object. Returns the number of
297 * textures released.
298 */
301 ReMutexHolder holder(_lock);
302
303 int num_textures = (int)_prepared_textures.size() + (int)_enqueued_textures.size();
304
305 Textures::iterator tci;
306 for (tci = _prepared_textures.begin();
307 tci != _prepared_textures.end();
308 ++tci) {
309 TextureContext *tc = (*tci);
310 tc->get_texture()->clear_prepared(tc->get_view(), this);
311 tc->_object = nullptr;
312
313 _released_textures.insert(tc);
314 }
315
316 _prepared_textures.clear();
317
318 // Mark any futures as cancelled.
319 EnqueuedTextures::iterator qti;
320 for (qti = _enqueued_textures.begin();
321 qti != _enqueued_textures.end();
322 ++qti) {
323 if (qti->second != nullptr) {
324 qti->second->notify_removed();
325 }
326 }
327
328 _enqueued_textures.clear();
329
330 return num_textures;
331}
332
333/**
334 * Returns the number of textures that have been enqueued to be prepared on
335 * this GSG.
336 */
339 return _enqueued_textures.size();
340}
341
342/**
343 * Returns the number of textures that have already been prepared on this GSG.
344 */
347 return _prepared_textures.size();
348}
349
350/**
351 * Immediately creates a new TextureContext for the indicated texture and
352 * returns it. This assumes that the GraphicsStateGuardian is the currently
353 * active rendering context and that it is ready to accept new textures. If
354 * this is not necessarily the case, you should use enqueue_texture() instead.
355 *
356 * Normally, this function is not called directly. Call
357 * Texture::prepare_now() instead.
358 *
359 * The TextureContext contains all of the pertinent information needed by the
360 * GSG to keep track of this one particular texture, and will exist as long as
361 * the texture is ready to be rendered.
362 *
363 * When either the Texture or the PreparedGraphicsObjects object destructs,
364 * the TextureContext will be deleted.
365 */
368 ReMutexHolder holder(_lock);
369
370 // Ask the GSG to create a brand new TextureContext. There might be several
371 // GSG's sharing the same set of textures; if so, it doesn't matter which of
372 // them creates the context (since they're all shared anyway).
373 TextureContext *tc = gsg->prepare_texture(tex, view);
374
375 if (tc != nullptr) {
376 bool prepared = _prepared_textures.insert(tc).second;
377 nassertr(prepared, tc);
378 }
379
380 return tc;
381}
382
383/**
384 * Indicates that a sampler would like to be put on the list to be prepared
385 * when the GSG is next ready to do this (presumably at the next frame).
386 */
388enqueue_sampler(const SamplerState &sampler) {
389 ReMutexHolder holder(_lock);
390
391 _enqueued_samplers.insert(sampler);
392}
393
394/**
395 * Returns true if the sampler has been queued on this GSG, false otherwise.
396 */
398is_sampler_queued(const SamplerState &sampler) const {
399 ReMutexHolder holder(_lock);
400
401 EnqueuedSamplers::const_iterator qi = _enqueued_samplers.find(sampler);
402 return (qi != _enqueued_samplers.end());
403}
404
405/**
406 * Removes a sampler from the queued list of samplers to be prepared.
407 * Normally it is not necessary to call this, unless you change your mind
408 * about preparing it at the last minute, since the sampler will automatically
409 * be dequeued and prepared at the next frame.
410 *
411 * The return value is true if the sampler is successfully dequeued, false if
412 * it had not been queued.
413 */
415dequeue_sampler(const SamplerState &sampler) {
416 ReMutexHolder holder(_lock);
417
418 EnqueuedSamplers::iterator qi = _enqueued_samplers.find(sampler);
419 if (qi != _enqueued_samplers.end()) {
420 _enqueued_samplers.erase(qi);
421 return true;
422 }
423 return false;
424}
425
426/**
427 * Returns true if the sampler has been prepared on this GSG, false otherwise.
428 */
430is_sampler_prepared(const SamplerState &sampler) const {
431 ReMutexHolder holder(_lock);
432
433 PreparedSamplers::const_iterator it = _prepared_samplers.find(sampler);
434 return (it != _prepared_samplers.end());
435}
436
437/**
438 * Indicates that a sampler context, created by a previous call to
439 * prepare_sampler(), is no longer needed. The driver resources will not be
440 * freed until some GSG calls update(), indicating it is at a stage where it
441 * is ready to release samplers.
442 */
445 ReMutexHolder holder(_lock);
446
447 _released_samplers.insert(sc);
448}
449
450/**
451 * Releases a sampler if it has already been prepared, or removes it from the
452 * preparation queue.
453 */
455release_sampler(const SamplerState &sampler) {
456 ReMutexHolder holder(_lock);
457
458 PreparedSamplers::iterator it = _prepared_samplers.find(sampler);
459 if (it != _prepared_samplers.end()) {
460 _released_samplers.insert(it->second);
461 _prepared_samplers.erase(it);
462 }
463
464 _enqueued_samplers.erase(sampler);
465}
466
467/**
468 * Releases all samplers at once. This will force them to be reloaded for all
469 * GSG's that share this object. Returns the number of samplers released.
470 */
473 ReMutexHolder holder(_lock);
474
475 int num_samplers = (int)_prepared_samplers.size() + (int)_enqueued_samplers.size();
476
477 PreparedSamplers::iterator sci;
478 for (sci = _prepared_samplers.begin();
479 sci != _prepared_samplers.end();
480 ++sci) {
481 _released_samplers.insert(sci->second);
482 }
483
484 _prepared_samplers.clear();
485 _enqueued_samplers.clear();
486
487 return num_samplers;
488}
489
490/**
491 * Returns the number of samplers that have been enqueued to be prepared on
492 * this GSG.
493 */
496 return _enqueued_samplers.size();
497}
498
499/**
500 * Returns the number of samplers that have already been prepared on this GSG.
501 */
504 return _prepared_samplers.size();
505}
506
507/**
508 * Immediately creates a new SamplerContext for the indicated sampler and
509 * returns it. This assumes that the GraphicsStateGuardian is the currently
510 * active rendering context and that it is ready to accept new samplers. If
511 * this is not necessarily the case, you should use enqueue_sampler() instead.
512 *
513 * Normally, this function is not called directly. Call
514 * Sampler::prepare_now() instead.
515 *
516 * The SamplerContext contains all of the pertinent information needed by the
517 * GSG to keep track of this one particular sampler, and will exist as long as
518 * the sampler is ready to be rendered.
519 *
520 * When either the Sampler or the PreparedGraphicsObjects object destructs,
521 * the SamplerContext will be deleted.
522 */
525 ReMutexHolder holder(_lock);
526
527 PreparedSamplers::const_iterator it = _prepared_samplers.find(sampler);
528 if (it != _prepared_samplers.end()) {
529 return it->second;
530 }
531
532 // Ask the GSG to create a brand new SamplerContext.
533 SamplerContext *sc = gsg->prepare_sampler(sampler);
534
535 if (sc != nullptr) {
536 _prepared_samplers[sampler] = sc;
537 }
538
539 return sc;
540}
541
542/**
543 * Indicates that a geom would like to be put on the list to be prepared when
544 * the GSG is next ready to do this (presumably at the next frame).
545 */
547enqueue_geom(Geom *geom) {
548 ReMutexHolder holder(_lock);
549
550 _enqueued_geoms.insert(geom);
551}
552
553/**
554 * Returns true if the geom has been queued on this GSG, false otherwise.
555 */
557is_geom_queued(const Geom *geom) const {
558 ReMutexHolder holder(_lock);
559
560 EnqueuedGeoms::const_iterator qi = _enqueued_geoms.find((Geom *)geom);
561 return (qi != _enqueued_geoms.end());
562}
563
564/**
565 * Removes a geom from the queued list of geoms to be prepared. Normally it
566 * is not necessary to call this, unless you change your mind about preparing
567 * it at the last minute, since the geom will automatically be dequeued and
568 * prepared at the next frame.
569 *
570 * The return value is true if the geom is successfully dequeued, false if it
571 * had not been queued.
572 */
574dequeue_geom(Geom *geom) {
575 ReMutexHolder holder(_lock);
576
577 EnqueuedGeoms::iterator qi = _enqueued_geoms.find(geom);
578 if (qi != _enqueued_geoms.end()) {
579 _enqueued_geoms.erase(qi);
580 return true;
581 }
582 return false;
583}
584
585/**
586 * Returns true if the vertex buffer has been prepared on this GSG, false
587 * otherwise.
588 */
590is_geom_prepared(const Geom *geom) const {
591 return geom->is_prepared((PreparedGraphicsObjects *)this);
592}
593
594/**
595 * Indicates that a geom context, created by a previous call to
596 * prepare_geom(), is no longer needed. The driver resources will not be
597 * freed until some GSG calls update(), indicating it is at a stage where it
598 * is ready to release geoms--this prevents conflicts from threading or
599 * multiple GSG's sharing geoms (we have no way of knowing which graphics
600 * context is currently active, or what state it's in, at the time
601 * release_geom is called).
602 */
605 ReMutexHolder holder(_lock);
606
607 gc->_geom->clear_prepared(this);
608
609 // We have to set the Geom pointer to NULL at this point, since the Geom
610 // itself might destruct at any time after it has been released.
611 gc->_geom = nullptr;
612
613 bool removed = (_prepared_geoms.erase(gc) != 0);
614 nassertv(removed);
615
616 _released_geoms.insert(gc);
617}
618
619/**
620 * Releases all geoms at once. This will force them to be reloaded into geom
621 * memory for all GSG's that share this object. Returns the number of geoms
622 * released.
623 */
626 ReMutexHolder holder(_lock);
627
628 int num_geoms = (int)_prepared_geoms.size() + (int)_enqueued_geoms.size();
629
630 Geoms::iterator gci;
631 for (gci = _prepared_geoms.begin();
632 gci != _prepared_geoms.end();
633 ++gci) {
634 GeomContext *gc = (*gci);
635 gc->_geom->clear_prepared(this);
636 gc->_geom = nullptr;
637
638 _released_geoms.insert(gc);
639 }
640
641 _prepared_geoms.clear();
642 _enqueued_geoms.clear();
643
644 return num_geoms;
645}
646
647/**
648 * Returns the number of geoms that have been enqueued to be prepared on this
649 * GSG.
650 */
652get_num_queued_geoms() const {
653 return _enqueued_geoms.size();
654}
655
656/**
657 * Returns the number of geoms that have already been prepared on this GSG.
658 */
661 return _prepared_geoms.size();
662}
663
664/**
665 * Immediately creates a new GeomContext for the indicated geom and returns
666 * it. This assumes that the GraphicsStateGuardian is the currently active
667 * rendering context and that it is ready to accept new geoms. If this is not
668 * necessarily the case, you should use enqueue_geom() instead.
669 *
670 * Normally, this function is not called directly. Call Geom::prepare_now()
671 * instead.
672 *
673 * The GeomContext contains all of the pertinent information needed by the GSG
674 * to keep track of this one particular geom, and will exist as long as the
675 * geom is ready to be rendered.
676 *
677 * When either the Geom or the PreparedGraphicsObjects object destructs, the
678 * GeomContext will be deleted.
679 */
682 ReMutexHolder holder(_lock);
683
684 // Ask the GSG to create a brand new GeomContext. There might be several
685 // GSG's sharing the same set of geoms; if so, it doesn't matter which of
686 // them creates the context (since they're all shared anyway).
687 GeomContext *gc = gsg->prepare_geom(geom);
688
689 if (gc != nullptr) {
690 bool prepared = _prepared_geoms.insert(gc).second;
691 nassertr(prepared, gc);
692 }
693
694 return gc;
695}
696
697/**
698 * Indicates that a shader would like to be put on the list to be prepared
699 * when the GSG is next ready to do this (presumably at the next frame).
700 */
702enqueue_shader(Shader *shader) {
703 ReMutexHolder holder(_lock);
704
705 _enqueued_shaders.insert(EnqueuedShaders::value_type(shader, nullptr));
706}
707
708/**
709 * Like enqueue_shader, but returns an AsyncFuture that can be used to query
710 * the status of the shader's preparation.
711 */
712PT(PreparedGraphicsObjects::EnqueuedObject) PreparedGraphicsObjects::
713enqueue_shader_future(Shader *shader) {
714 ReMutexHolder holder(_lock);
715
716 std::pair<EnqueuedShaders::iterator, bool> result =
717 _enqueued_shaders.insert(EnqueuedShaders::value_type(shader, nullptr));
718 if (result.first->second == nullptr) {
719 result.first->second = new EnqueuedObject(this, shader);
720 }
721 PT(EnqueuedObject) fut = result.first->second;
722 nassertr(!fut->cancelled(), fut)
723 return fut;
724}
725
726/**
727 * Returns true if the shader has been queued on this GSG, false otherwise.
728 */
730is_shader_queued(const Shader *shader) const {
731 ReMutexHolder holder(_lock);
732
733 EnqueuedShaders::const_iterator qi = _enqueued_shaders.find((Shader *)shader);
734 return (qi != _enqueued_shaders.end());
735}
736
737/**
738 * Removes a shader from the queued list of shaders to be prepared. Normally
739 * it is not necessary to call this, unless you change your mind about
740 * preparing it at the last minute, since the shader will automatically be
741 * dequeued and prepared at the next frame.
742 *
743 * The return value is true if the shader is successfully dequeued, false if
744 * it had not been queued.
745 */
748 ReMutexHolder holder(_lock);
749
750 EnqueuedShaders::iterator qi = _enqueued_shaders.find(se);
751 if (qi != _enqueued_shaders.end()) {
752 if (qi->second != nullptr) {
753 qi->second->notify_removed();
754 }
755 _enqueued_shaders.erase(qi);
756 return true;
757 }
758 return false;
759}
760
761/**
762 * Returns true if the shader has been prepared on this GSG, false otherwise.
763 */
765is_shader_prepared(const Shader *shader) const {
766 return shader->is_prepared((PreparedGraphicsObjects *)this);
767}
768
769/**
770 * Indicates that a shader context, created by a previous call to
771 * prepare_shader(), is no longer needed. The driver resources will not be
772 * freed until some GSG calls update(), indicating it is at a stage where it
773 * is ready to release shaders--this prevents conflicts from threading or
774 * multiple GSG's sharing shaders (we have no way of knowing which graphics
775 * context is currently active, or what state it's in, at the time
776 * release_shader is called).
777 */
780 ReMutexHolder holder(_lock);
781
782 sc->_shader->clear_prepared(this);
783
784 // We have to set the Shader pointer to NULL at this point, since the Shader
785 // itself might destruct at any time after it has been released.
786 sc->_shader = nullptr;
787
788 bool removed = (_prepared_shaders.erase(sc) != 0);
789 nassertv(removed);
790
791 _released_shaders.insert(sc);
792}
793
794/**
795 * Releases all shaders at once. This will force them to be reloaded into
796 * shader memory for all GSG's that share this object. Returns the number of
797 * shaders released.
798 */
801 ReMutexHolder holder(_lock);
802
803 int num_shaders = (int)_prepared_shaders.size() + (int)_enqueued_shaders.size();
804
805 Shaders::iterator sci;
806 for (sci = _prepared_shaders.begin();
807 sci != _prepared_shaders.end();
808 ++sci) {
809 ShaderContext *sc = (*sci);
810 sc->_shader->clear_prepared(this);
811 sc->_shader = nullptr;
812
813 _released_shaders.insert(sc);
814 }
815
816 _prepared_shaders.clear();
817
818 // Mark any futures as cancelled.
819 EnqueuedShaders::iterator qsi;
820 for (qsi = _enqueued_shaders.begin();
821 qsi != _enqueued_shaders.end();
822 ++qsi) {
823 if (qsi->second != nullptr) {
824 qsi->second->notify_removed();
825 }
826 }
827
828 _enqueued_shaders.clear();
829
830 return num_shaders;
831}
832
833/**
834 * Returns the number of shaders that have been enqueued to be prepared on
835 * this GSG.
836 */
839 return _enqueued_shaders.size();
840}
841
842/**
843 * Returns the number of shaders that have already been prepared on this GSG.
844 */
847 return _prepared_shaders.size();
848}
849
850/**
851 * Immediately creates a new ShaderContext for the indicated shader and
852 * returns it. This assumes that the GraphicsStateGuardian is the currently
853 * active rendering context and that it is ready to accept new shaders. If
854 * this is not necessarily the case, you should use enqueue_shader() instead.
855 *
856 * Normally, this function is not called directly. Call Shader::prepare_now()
857 * instead.
858 *
859 * The ShaderContext contains all of the pertinent information needed by the
860 * GSG to keep track of this one particular shader, and will exist as long as
861 * the shader is ready to be rendered.
862 *
863 * When either the Shader or the PreparedGraphicsObjects object destructs, the
864 * ShaderContext will be deleted.
865 */
868 ReMutexHolder holder(_lock);
869
870 // Ask the GSG to create a brand new ShaderContext. There might be several
871 // GSG's sharing the same set of shaders; if so, it doesn't matter which of
872 // them creates the context (since they're all shared anyway).
873 ShaderContext *sc = gsg->prepare_shader(se);
874
875 if (sc != nullptr) {
876 bool prepared = _prepared_shaders.insert(sc).second;
877 nassertr(prepared, sc);
878 }
879
880 return sc;
881}
882
883/**
884 * Indicates that a buffer would like to be put on the list to be prepared
885 * when the GSG is next ready to do this (presumably at the next frame).
886 */
889 ReMutexHolder holder(_lock);
890
891 _enqueued_vertex_buffers.insert(data);
892}
893
894/**
895 * Returns true if the vertex buffer has been queued on this GSG, false
896 * otherwise.
897 */
900 ReMutexHolder holder(_lock);
901
902 EnqueuedVertexBuffers::const_iterator qi = _enqueued_vertex_buffers.find((GeomVertexArrayData *)data);
903 return (qi != _enqueued_vertex_buffers.end());
904}
905
906/**
907 * Removes a buffer from the queued list of data arrays to be prepared.
908 * Normally it is not necessary to call this, unless you change your mind
909 * about preparing it at the last minute, since the data will automatically be
910 * dequeued and prepared at the next frame.
911 *
912 * The return value is true if the buffer is successfully dequeued, false if
913 * it had not been queued.
914 */
917 ReMutexHolder holder(_lock);
918
919 EnqueuedVertexBuffers::iterator qi = _enqueued_vertex_buffers.find(data);
920 if (qi != _enqueued_vertex_buffers.end()) {
921 _enqueued_vertex_buffers.erase(qi);
922 return true;
923 }
924 return false;
925}
926
927/**
928 * Returns true if the vertex buffer has been prepared on this GSG, false
929 * otherwise.
930 */
933 return data->is_prepared((PreparedGraphicsObjects *)this);
934}
935
936/**
937 * Indicates that a data context, created by a previous call to
938 * prepare_vertex_buffer(), is no longer needed. The driver resources will
939 * not be freed until some GSG calls update(), indicating it is at a stage
940 * where it is ready to release datas--this prevents conflicts from threading
941 * or multiple GSG's sharing datas (we have no way of knowing which graphics
942 * context is currently active, or what state it's in, at the time
943 * release_vertex_buffer is called).
944 */
947 ReMutexHolder holder(_lock);
948
949 vbc->get_data()->clear_prepared(this);
950
951 size_t data_size_bytes = vbc->get_data()->get_data_size_bytes();
952 GeomEnums::UsageHint usage_hint = vbc->get_data()->get_usage_hint();
953
954 // We have to set the Data pointer to NULL at this point, since the Data
955 // itself might destruct at any time after it has been released.
956 vbc->_object = nullptr;
957
958 bool removed = (_prepared_vertex_buffers.erase(vbc) != 0);
959 nassertv(removed);
960
961 if (_support_released_buffer_cache) {
962 cache_unprepared_buffer(vbc, data_size_bytes, usage_hint,
963 _vertex_buffer_cache,
964 _vertex_buffer_cache_lru, _vertex_buffer_cache_size,
965 released_vbuffer_cache_size,
966 _released_vertex_buffers);
967 } else {
968 _released_vertex_buffers.insert(vbc);
969 }
970}
971
972/**
973 * Releases all datas at once. This will force them to be reloaded into data
974 * memory for all GSG's that share this object. Returns the number of datas
975 * released.
976 */
979 ReMutexHolder holder(_lock);
980
981 int num_vertex_buffers = (int)_prepared_vertex_buffers.size() + (int)_enqueued_vertex_buffers.size();
982
983 Buffers::iterator vbci;
984 for (vbci = _prepared_vertex_buffers.begin();
985 vbci != _prepared_vertex_buffers.end();
986 ++vbci) {
988 vbc->get_data()->clear_prepared(this);
989 vbc->_object = nullptr;
990
991 _released_vertex_buffers.insert(vbc);
992 }
993
994 _prepared_vertex_buffers.clear();
995 _enqueued_vertex_buffers.clear();
996
997 // Also clear the cache of recently-unprepared vertex buffers.
998 BufferCache::iterator bci;
999 for (bci = _vertex_buffer_cache.begin();
1000 bci != _vertex_buffer_cache.end();
1001 ++bci) {
1002 BufferList &buffer_list = (*bci).second;
1003 nassertr(!buffer_list.empty(), num_vertex_buffers);
1004 BufferList::iterator li;
1005 for (li = buffer_list.begin(); li != buffer_list.end(); ++li) {
1007 _released_vertex_buffers.insert(vbc);
1008 }
1009 }
1010 _vertex_buffer_cache.clear();
1011 _vertex_buffer_cache_lru.clear();
1012 _vertex_buffer_cache_size = 0;
1013
1014 return num_vertex_buffers;
1015}
1016
1017/**
1018 * Returns the number of vertex buffers that have been enqueued to be prepared
1019 * on this GSG.
1020 */
1023 return _enqueued_vertex_buffers.size();
1024}
1025
1026/**
1027 * Returns the number of vertex buffers that have already been prepared on
1028 * this GSG.
1029 */
1032 return _prepared_vertex_buffers.size();
1033}
1034
1035/**
1036 * Immediately creates a new VertexBufferContext for the indicated data and
1037 * returns it. This assumes that the GraphicsStateGuardian is the currently
1038 * active rendering context and that it is ready to accept new datas. If this
1039 * is not necessarily the case, you should use enqueue_vertex_buffer()
1040 * instead.
1041 *
1042 * Normally, this function is not called directly. Call Data::prepare_now()
1043 * instead.
1044 *
1045 * The VertexBufferContext contains all of the pertinent information needed by
1046 * the GSG to keep track of this one particular data, and will exist as long
1047 * as the data is ready to be rendered.
1048 *
1049 * When either the Data or the PreparedGraphicsObjects object destructs, the
1050 * VertexBufferContext will be deleted.
1051 */
1054 ReMutexHolder holder(_lock);
1055
1056 // First, see if there might be a cached context of the appropriate size.
1057 size_t data_size_bytes = data->get_data_size_bytes();
1058 GeomEnums::UsageHint usage_hint = data->get_usage_hint();
1060 get_cached_buffer(data_size_bytes, usage_hint,
1061 _vertex_buffer_cache, _vertex_buffer_cache_lru,
1062 _vertex_buffer_cache_size);
1063 if (vbc != nullptr) {
1064 vbc->_object = data;
1065
1066 } else {
1067 // Ask the GSG to create a brand new VertexBufferContext. There might be
1068 // several GSG's sharing the same set of datas; if so, it doesn't matter
1069 // which of them creates the context (since they're all shared anyway).
1070 vbc = gsg->prepare_vertex_buffer(data);
1071 }
1072
1073 if (vbc != nullptr) {
1074 bool prepared = _prepared_vertex_buffers.insert(vbc).second;
1075 nassertr(prepared, vbc);
1076 }
1077
1078 return vbc;
1079}
1080
1081/**
1082 * Indicates that a buffer would like to be put on the list to be prepared
1083 * when the GSG is next ready to do this (presumably at the next frame).
1084 */
1087 ReMutexHolder holder(_lock);
1088
1089 _enqueued_index_buffers.insert(data);
1090}
1091
1092/**
1093 * Returns true if the index buffer has been queued on this GSG, false
1094 * otherwise.
1095 */
1097is_index_buffer_queued(const GeomPrimitive *data) const {
1098 ReMutexHolder holder(_lock);
1099
1100 EnqueuedIndexBuffers::const_iterator qi = _enqueued_index_buffers.find((GeomPrimitive *)data);
1101 return (qi != _enqueued_index_buffers.end());
1102}
1103
1104/**
1105 * Removes a buffer from the queued list of data arrays to be prepared.
1106 * Normally it is not necessary to call this, unless you change your mind
1107 * about preparing it at the last minute, since the data will automatically be
1108 * dequeued and prepared at the next frame.
1109 *
1110 * The return value is true if the buffer is successfully dequeued, false if
1111 * it had not been queued.
1112 */
1115 ReMutexHolder holder(_lock);
1116
1117 EnqueuedIndexBuffers::iterator qi = _enqueued_index_buffers.find(data);
1118 if (qi != _enqueued_index_buffers.end()) {
1119 _enqueued_index_buffers.erase(qi);
1120 return true;
1121 }
1122 return false;
1123}
1124
1125/**
1126 * Returns true if the index buffer has been prepared on this GSG, false
1127 * otherwise.
1128 */
1130is_index_buffer_prepared(const GeomPrimitive *data) const {
1131 return data->is_prepared((PreparedGraphicsObjects *)this);
1132}
1133
1134/**
1135 * Indicates that a data context, created by a previous call to
1136 * prepare_index_buffer(), is no longer needed. The driver resources will not
1137 * be freed until some GSG calls update(), indicating it is at a stage where
1138 * it is ready to release datas--this prevents conflicts from threading or
1139 * multiple GSG's sharing datas (we have no way of knowing which graphics
1140 * context is currently active, or what state it's in, at the time
1141 * release_index_buffer is called).
1142 */
1145 ReMutexHolder holder(_lock);
1146
1147 ibc->get_data()->clear_prepared(this);
1148
1149 size_t data_size_bytes = ibc->get_data()->get_data_size_bytes();
1150 GeomEnums::UsageHint usage_hint = ibc->get_data()->get_usage_hint();
1151
1152 // We have to set the Data pointer to NULL at this point, since the Data
1153 // itself might destruct at any time after it has been released.
1154 ibc->_object = nullptr;
1155
1156 bool removed = (_prepared_index_buffers.erase(ibc) != 0);
1157 nassertv(removed);
1158
1159 if (_support_released_buffer_cache) {
1160 cache_unprepared_buffer(ibc, data_size_bytes, usage_hint,
1161 _index_buffer_cache,
1162 _index_buffer_cache_lru, _index_buffer_cache_size,
1163 released_ibuffer_cache_size,
1164 _released_index_buffers);
1165 } else {
1166 _released_index_buffers.insert(ibc);
1167 }
1168}
1169
1170/**
1171 * Releases all datas at once. This will force them to be reloaded into data
1172 * memory for all GSG's that share this object. Returns the number of datas
1173 * released.
1174 */
1177 ReMutexHolder holder(_lock);
1178
1179 int num_index_buffers = (int)_prepared_index_buffers.size() + (int)_enqueued_index_buffers.size();
1180
1181 Buffers::iterator ibci;
1182 for (ibci = _prepared_index_buffers.begin();
1183 ibci != _prepared_index_buffers.end();
1184 ++ibci) {
1185 IndexBufferContext *ibc = (IndexBufferContext *)(*ibci);
1186 ibc->get_data()->clear_prepared(this);
1187 ibc->_object = nullptr;
1188
1189 _released_index_buffers.insert(ibc);
1190 }
1191
1192 _prepared_index_buffers.clear();
1193 _enqueued_index_buffers.clear();
1194
1195 // Also clear the cache of recently-unprepared index buffers.
1196 BufferCache::iterator bci;
1197 for (bci = _index_buffer_cache.begin();
1198 bci != _index_buffer_cache.end();
1199 ++bci) {
1200 BufferList &buffer_list = (*bci).second;
1201 nassertr(!buffer_list.empty(), num_index_buffers);
1202 BufferList::iterator li;
1203 for (li = buffer_list.begin(); li != buffer_list.end(); ++li) {
1204 IndexBufferContext *vbc = (IndexBufferContext *)(*li);
1205 _released_index_buffers.insert(vbc);
1206 }
1207 }
1208 _index_buffer_cache.clear();
1209 _index_buffer_cache_lru.clear();
1210 _index_buffer_cache_size = 0;
1211
1212 return num_index_buffers;
1213}
1214
1215/**
1216 * Returns the number of index buffers that have been enqueued to be prepared
1217 * on this GSG.
1218 */
1221 return _enqueued_index_buffers.size();
1222}
1223
1224/**
1225 * Returns the number of index buffers that have already been prepared on this
1226 * GSG.
1227 */
1230 return _prepared_index_buffers.size();
1231}
1232
1233/**
1234 * Immediately creates a new IndexBufferContext for the indicated data and
1235 * returns it. This assumes that the GraphicsStateGuardian is the currently
1236 * active rendering context and that it is ready to accept new datas. If this
1237 * is not necessarily the case, you should use enqueue_index_buffer() instead.
1238 *
1239 * Normally, this function is not called directly. Call Data::prepare_now()
1240 * instead.
1241 *
1242 * The IndexBufferContext contains all of the pertinent information needed by
1243 * the GSG to keep track of this one particular data, and will exist as long
1244 * as the data is ready to be rendered.
1245 *
1246 * When either the Data or the PreparedGraphicsObjects object destructs, the
1247 * IndexBufferContext will be deleted.
1248 */
1251 ReMutexHolder holder(_lock);
1252
1253 // First, see if there might be a cached context of the appropriate size.
1254 size_t data_size_bytes = data->get_data_size_bytes();
1255 GeomEnums::UsageHint usage_hint = data->get_usage_hint();
1257 get_cached_buffer(data_size_bytes, usage_hint,
1258 _index_buffer_cache, _index_buffer_cache_lru,
1259 _index_buffer_cache_size);
1260 if (ibc != nullptr) {
1261 ibc->_object = data;
1262
1263 } else {
1264 // Ask the GSG to create a brand new IndexBufferContext. There might be
1265 // several GSG's sharing the same set of datas; if so, it doesn't matter
1266 // which of them creates the context (since they're all shared anyway).
1267 ibc = gsg->prepare_index_buffer(data);
1268 }
1269
1270 if (ibc != nullptr) {
1271 bool prepared = _prepared_index_buffers.insert(ibc).second;
1272 nassertr(prepared, ibc);
1273 }
1274
1275 return ibc;
1276}
1277
1278/**
1279 * Indicates that a buffer would like to be put on the list to be prepared
1280 * when the GSG is next ready to do this (presumably at the next frame).
1281 */
1284 ReMutexHolder holder(_lock);
1285
1286 _enqueued_shader_buffers.insert(data);
1287}
1288
1289/**
1290 * Returns true if the index buffer has been queued on this GSG, false
1291 * otherwise.
1292 */
1294is_shader_buffer_queued(const ShaderBuffer *data) const {
1295 ReMutexHolder holder(_lock);
1296
1297 EnqueuedShaderBuffers::const_iterator qi = _enqueued_shader_buffers.find((ShaderBuffer *)data);
1298 return (qi != _enqueued_shader_buffers.end());
1299}
1300
1301/**
1302 * Removes a buffer from the queued list of data arrays to be prepared.
1303 * Normally it is not necessary to call this, unless you change your mind
1304 * about preparing it at the last minute, since the data will automatically be
1305 * dequeued and prepared at the next frame.
1306 *
1307 * The return value is true if the buffer is successfully dequeued, false if
1308 * it had not been queued.
1309 */
1312 ReMutexHolder holder(_lock);
1313
1314 EnqueuedShaderBuffers::iterator qi = _enqueued_shader_buffers.find(data);
1315 if (qi != _enqueued_shader_buffers.end()) {
1316 _enqueued_shader_buffers.erase(qi);
1317 return true;
1318 }
1319 return false;
1320}
1321
1322/**
1323 * Returns true if the index buffer has been prepared on this GSG, false
1324 * otherwise.
1325 */
1327is_shader_buffer_prepared(const ShaderBuffer *data) const {
1328 return data->is_prepared((PreparedGraphicsObjects *)this);
1329}
1330
1331/**
1332 * Indicates that a data context, created by a previous call to
1333 * prepare_shader_buffer(), is no longer needed. The driver resources will not
1334 * be freed until some GSG calls update(), indicating it is at a stage where
1335 * it is ready to release datas--this prevents conflicts from threading or
1336 * multiple GSG's sharing datas (we have no way of knowing which graphics
1337 * context is currently active, or what state it's in, at the time
1338 * release_shader_buffer is called).
1339 */
1342 ReMutexHolder holder(_lock);
1343
1344 ShaderBuffer *buffer = (ShaderBuffer *)bc->_object;
1345 buffer->clear_prepared(this);
1346
1347 // We have to set the ShaderBuffer pointer to NULL at this point, since the
1348 // buffer itself might destruct at any time after it has been released.
1349 bc->_object = nullptr;
1350
1351 bool removed = (_prepared_shader_buffers.erase(bc) != 0);
1352 nassertv(removed);
1353
1354 _released_shader_buffers.insert(bc);
1355}
1356
1357/**
1358 * Releases all datas at once. This will force them to be reloaded into data
1359 * memory for all GSG's that share this object. Returns the number of datas
1360 * released.
1361 */
1364 ReMutexHolder holder(_lock);
1365
1366 int num_shader_buffers = (int)_prepared_shader_buffers.size() + (int)_enqueued_shader_buffers.size();
1367
1368 Buffers::iterator bci;
1369 for (bci = _prepared_shader_buffers.begin();
1370 bci != _prepared_shader_buffers.end();
1371 ++bci) {
1372
1373 BufferContext *bc = (BufferContext *)(*bci);
1374 ((ShaderBuffer *)bc->_object)->clear_prepared(this);
1375 bc->_object = nullptr;
1376 _released_shader_buffers.insert(bc);
1377 }
1378
1379 _prepared_shader_buffers.clear();
1380 _enqueued_shader_buffers.clear();
1381
1382 return num_shader_buffers;
1383}
1384
1385/**
1386 * Returns the number of index buffers that have been enqueued to be prepared
1387 * on this GSG.
1388 */
1391 return _enqueued_shader_buffers.size();
1392}
1393
1394/**
1395 * Returns the number of index buffers that have already been prepared on this
1396 * GSG.
1397 */
1400 return _prepared_shader_buffers.size();
1401}
1402
1403/**
1404 * Immediately creates a new BufferContext for the indicated data and
1405 * returns it. This assumes that the GraphicsStateGuardian is the currently
1406 * active rendering context and that it is ready to accept new datas. If this
1407 * is not necessarily the case, you should use enqueue_shader_buffer() instead.
1408 *
1409 * Normally, this function is not called directly. Call Data::prepare_now()
1410 * instead.
1411 *
1412 * The BufferContext contains all of the pertinent information needed by
1413 * the GSG to keep track of this one particular data, and will exist as long
1414 * as the data is ready to be rendered.
1415 *
1416 * When either the Data or the PreparedGraphicsObjects object destructs, the
1417 * BufferContext will be deleted.
1418 */
1421 ReMutexHolder holder(_lock);
1422
1423 // Ask the GSG to create a brand new BufferContext. There might be
1424 // several GSG's sharing the same set of datas; if so, it doesn't matter
1425 // which of them creates the context (since they're all shared anyway).
1426 BufferContext *bc = gsg->prepare_shader_buffer(data);
1427
1428 if (bc != nullptr) {
1429 bool prepared = _prepared_shader_buffers.insert(bc).second;
1430 nassertr(prepared, bc);
1431 }
1432
1433 return bc;
1434}
1435
1436/**
1437 * Creates a new future for the given object.
1438 */
1444
1445/**
1446 * Indicates that the preparation request is done.
1447 */
1449set_result(SavedContext *context) {
1450 nassertv(!done());
1451 AsyncFuture::set_result(context);
1452 _pgo = nullptr;
1453}
1454
1455/**
1456 * Called by PreparedGraphicsObjects to indicate that the preparation request
1457 * has been cancelled.
1458 */
1461 _pgo = nullptr;
1462 nassertv_always(AsyncFuture::cancel());
1463}
1464
1465/**
1466 * Cancels the pending preparation request. Has no effect if the preparation
1467 * is already complete or was already cancelled.
1468 */
1470cancel() {
1471 PreparedGraphicsObjects *pgo = _pgo;
1472 if (_object == nullptr || pgo == nullptr) {
1473 nassertr(done(), false);
1474 return false;
1475 }
1476
1477 // We don't upcall here, because the dequeue function will end up calling
1478 // notify_removed().
1479 _result = nullptr;
1480 _pgo = nullptr;
1481
1482 if (_object->is_of_type(Texture::get_class_type())) {
1483 return pgo->dequeue_texture((Texture *)_object.p());
1484
1485 } else if (_object->is_of_type(Geom::get_class_type())) {
1486 return pgo->dequeue_geom((Geom *)_object.p());
1487
1488 } else if (_object->is_of_type(Shader::get_class_type())) {
1489 return pgo->dequeue_shader((Shader *)_object.p());
1490
1491 } else if (_object->is_of_type(GeomVertexArrayData::get_class_type())) {
1492 return pgo->dequeue_vertex_buffer((GeomVertexArrayData *)_object.p());
1493
1494 } else if (_object->is_of_type(GeomPrimitive::get_class_type())) {
1495 return pgo->dequeue_index_buffer((GeomPrimitive *)_object.p());
1496
1497 } else if (_object->is_of_type(ShaderBuffer::get_class_type())) {
1498 return pgo->dequeue_shader_buffer((ShaderBuffer *)_object.p());
1499 }
1500 return false;
1501}
1502
1503/**
1504 * This is called by the GraphicsStateGuardian to indicate that it is about to
1505 * begin processing of the frame.
1506 *
1507 * Any texture contexts that were previously passed to release_texture() are
1508 * actually passed to the GSG to be freed at this point; textures that were
1509 * previously passed to prepare_texture are actually loaded.
1510 */
1512begin_frame(GraphicsStateGuardianBase *gsg, Thread *current_thread) {
1513 ReMutexHolder holder(_lock, current_thread);
1514
1515 // First, release all the textures, geoms, and buffers awaiting release.
1516 if (!_released_textures.empty()) {
1517 Textures::iterator tci;
1518 for (tci = _released_textures.begin();
1519 tci != _released_textures.end();
1520 ++tci) {
1521 TextureContext *tc = (*tci);
1522 gsg->release_texture(tc);
1523 }
1524
1525 _released_textures.clear();
1526 }
1527
1528 if (!_released_samplers.empty()) {
1529 ReleasedSamplers::iterator sci;
1530 for (sci = _released_samplers.begin();
1531 sci != _released_samplers.end();
1532 ++sci) {
1533 SamplerContext *sc = (*sci);
1534 gsg->release_sampler(sc);
1535 }
1536
1537 _released_samplers.clear();
1538 }
1539
1540 Geoms::iterator gci;
1541 for (gci = _released_geoms.begin();
1542 gci != _released_geoms.end();
1543 ++gci) {
1544 GeomContext *gc = (*gci);
1545 gsg->release_geom(gc);
1546 }
1547
1548 _released_geoms.clear();
1549
1550 Shaders::iterator sci;
1551 for (sci = _released_shaders.begin();
1552 sci != _released_shaders.end();
1553 ++sci) {
1554 ShaderContext *sc = (*sci);
1555 gsg->release_shader(sc);
1556 }
1557
1558 _released_shaders.clear();
1559
1560 Buffers::iterator vbci;
1561 for (vbci = _released_vertex_buffers.begin();
1562 vbci != _released_vertex_buffers.end();
1563 ++vbci) {
1564 VertexBufferContext *vbc = (VertexBufferContext *)(*vbci);
1565 gsg->release_vertex_buffer(vbc);
1566 }
1567
1568 _released_vertex_buffers.clear();
1569
1570 Buffers::iterator ibci;
1571 for (ibci = _released_index_buffers.begin();
1572 ibci != _released_index_buffers.end();
1573 ++ibci) {
1574 IndexBufferContext *ibc = (IndexBufferContext *)(*ibci);
1575 gsg->release_index_buffer(ibc);
1576 }
1577
1578 _released_index_buffers.clear();
1579
1580 // Reset the residency trackers.
1581 _texture_residency.begin_frame(current_thread);
1582 _vbuffer_residency.begin_frame(current_thread);
1583 _ibuffer_residency.begin_frame(current_thread);
1584 _sbuffer_residency.begin_frame(current_thread);
1585
1586 // Now prepare all the textures, geoms, and buffers awaiting preparation.
1587 EnqueuedTextures::iterator qti;
1588 for (qti = _enqueued_textures.begin();
1589 qti != _enqueued_textures.end();
1590 ++qti) {
1591 Texture *tex = qti->first;
1592 for (int view = 0; view < tex->get_num_views(); ++view) {
1593 TextureContext *tc = tex->prepare_now(view, this, gsg);
1594 if (tc != nullptr) {
1595 gsg->update_texture(tc, true);
1596 if (view == 0 && qti->second != nullptr) {
1597 qti->second->set_result(tc);
1598 }
1599 }
1600 }
1601 }
1602
1603 _enqueued_textures.clear();
1604
1605 EnqueuedSamplers::iterator qsmi;
1606 for (qsmi = _enqueued_samplers.begin();
1607 qsmi != _enqueued_samplers.end();
1608 ++qsmi) {
1609 const SamplerState &sampler = (*qsmi);
1610 sampler.prepare_now(this, gsg);
1611 }
1612
1613 _enqueued_samplers.clear();
1614
1615 EnqueuedGeoms::iterator qgi;
1616 for (qgi = _enqueued_geoms.begin();
1617 qgi != _enqueued_geoms.end();
1618 ++qgi) {
1619 Geom *geom = (*qgi);
1620 geom->prepare_now(this, gsg);
1621 }
1622
1623 _enqueued_geoms.clear();
1624
1625 EnqueuedShaders::iterator qsi;
1626 for (qsi = _enqueued_shaders.begin();
1627 qsi != _enqueued_shaders.end();
1628 ++qsi) {
1629 Shader *shader = qsi->first;
1630 ShaderContext *sc = shader->prepare_now(this, gsg);
1631 if (qsi->second != nullptr) {
1632 qsi->second->set_result(sc);
1633 }
1634 }
1635
1636 _enqueued_shaders.clear();
1637
1638 EnqueuedVertexBuffers::iterator qvbi;
1639 for (qvbi = _enqueued_vertex_buffers.begin();
1640 qvbi != _enqueued_vertex_buffers.end();
1641 ++qvbi) {
1642 GeomVertexArrayData *data = (*qvbi);
1643 data->prepare_now(this, gsg);
1644 }
1645
1646 _enqueued_vertex_buffers.clear();
1647
1648 EnqueuedIndexBuffers::iterator qibi;
1649 for (qibi = _enqueued_index_buffers.begin();
1650 qibi != _enqueued_index_buffers.end();
1651 ++qibi) {
1652 GeomPrimitive *data = (*qibi);
1653 // We need this check because the actual index data may not actually have
1654 // propagated to the draw thread yet.
1655 if (data->is_indexed()) {
1656 data->prepare_now(this, gsg);
1657 }
1658 }
1659
1660 _enqueued_index_buffers.clear();
1661
1662 for (ShaderBuffer *buffer : _enqueued_shader_buffers) {
1663 buffer->prepare_now(this, gsg);
1664 }
1665
1666 _enqueued_shader_buffers.clear();
1667}
1668
1669/**
1670 * This is called by the GraphicsStateGuardian to indicate that it has
1671 * finished processing of the frame.
1672 */
1674end_frame(Thread *current_thread) {
1675 ReMutexHolder holder(_lock, current_thread);
1676
1677 _texture_residency.end_frame(current_thread);
1678 _vbuffer_residency.end_frame(current_thread);
1679 _ibuffer_residency.end_frame(current_thread);
1680 _sbuffer_residency.end_frame(current_thread);
1681}
1682
1683/**
1684 * Returns a new, unique name for a newly-constructed object.
1685 */
1686std::string PreparedGraphicsObjects::
1687init_name() {
1688 ++_name_index;
1689 std::ostringstream strm;
1690 strm << "context" << _name_index;
1691 return strm.str();
1692}
1693
1694/**
1695 * Called when a vertex or index buffer is no longer officially "prepared".
1696 * However, we still have the context on the graphics card, and we might be
1697 * able to reuse that context if we're about to re-prepare a different buffer,
1698 * especially one exactly the same size. So instead of immediately enqueuing
1699 * the vertex buffer for release, we cache it.
1700 */
1701void PreparedGraphicsObjects::
1702cache_unprepared_buffer(BufferContext *buffer, size_t data_size_bytes,
1703 GeomEnums::UsageHint usage_hint,
1706 size_t &buffer_cache_size,
1707 int released_buffer_cache_size,
1708 PreparedGraphicsObjects::Buffers &released_buffers) {
1709 BufferCacheKey key;
1710 key._data_size_bytes = data_size_bytes;
1711 key._usage_hint = usage_hint;
1712
1713 buffer_cache[key].push_back(buffer);
1714 buffer_cache_size += data_size_bytes;
1715
1716 // Move the key to the head of the LRU.
1717 BufferCacheLRU::iterator li =
1718 find(buffer_cache_lru.begin(), buffer_cache_lru.end(), key);
1719 if (li != buffer_cache_lru.end()) {
1720 buffer_cache_lru.erase(li);
1721 }
1722 buffer_cache_lru.insert(buffer_cache_lru.begin(), key);
1723
1724 // Now release not-recently-used buffers until we fit within the constrained
1725 // size.
1726 while ((int)buffer_cache_size > released_buffer_cache_size) {
1727 nassertv(!buffer_cache_lru.empty());
1728 const BufferCacheKey &release_key = *buffer_cache_lru.rbegin();
1729 BufferList &buffer_list = buffer_cache[release_key];
1730 while (!buffer_list.empty() &&
1731 (int)buffer_cache_size > released_buffer_cache_size) {
1732 BufferContext *released_buffer = buffer_list.back();
1733 buffer_list.pop_back();
1734 released_buffers.insert(released_buffer);
1735 buffer_cache_size -= release_key._data_size_bytes;
1736 }
1737
1738 if (buffer_list.empty()) {
1739 buffer_cache.erase(release_key);
1740 buffer_cache_lru.pop_back();
1741 }
1742 }
1743}
1744
1745/**
1746 * Returns a previously-cached buffer from the cache, or NULL if there is no
1747 * such buffer.
1748 */
1749BufferContext *PreparedGraphicsObjects::
1750get_cached_buffer(size_t data_size_bytes, GeomEnums::UsageHint usage_hint,
1753 size_t &buffer_cache_size) {
1754 BufferCacheKey key;
1755 key._data_size_bytes = data_size_bytes;
1756 key._usage_hint = usage_hint;
1757
1758 BufferCache::iterator bci = buffer_cache.find(key);
1759 if (bci == buffer_cache.end()) {
1760 return nullptr;
1761 }
1762
1763 BufferList &buffer_list = (*bci).second;
1764 nassertr(!buffer_list.empty(), nullptr);
1765
1766 BufferContext *buffer = buffer_list.back();
1767 buffer_list.pop_back();
1768 if (buffer_list.empty()) {
1769 buffer_cache.erase(bci);
1770 BufferCacheLRU::iterator li =
1771 find(buffer_cache_lru.begin(), buffer_cache_lru.end(), key);
1772 if (li != buffer_cache_lru.end()) {
1773 buffer_cache_lru.erase(li);
1774 }
1775 }
1776
1777 buffer_cache_size -= data_size_bytes;
1778 return buffer;
1779}
void set_max_size(size_t max_size)
Changes the max size of all objects that are allowed to be active on the LRU.
Definition adaptiveLru.I:40
size_t get_max_size() const
Returns the max size of all objects that are allowed to be active on the LRU.
Definition adaptiveLru.I:28
virtual bool cancel()
Cancels the future.
void set_result(std::nullptr_t)
Sets this future's result.
Definition asyncFuture.I:92
This is a base class for those kinds of SavedContexts that occupy an easily-measured (and substantial...
void end_frame(Thread *current_thread)
To be called at the end of a frame, this updates the PStatCollectors appropriately.
void begin_frame(Thread *current_thread)
To be called at the beginning of a frame, this initializes the active/inactive status.
This is a special class object that holds all the information returned by a particular GSG to indicat...
Definition geomContext.h:34
This is an abstract base class for a family of classes that represent the fundamental geometry primit...
get_data_size_bytes
Returns the number of bytes stored in the vertices array.
get_usage_hint
Returns the usage hint for this primitive.
This is the data for one array of a GeomVertexData structure.
get_usage_hint
Returns the usage hint that describes to the rendering backend how often the vertex data will be modi...
get_data_size_bytes
Returns the number of bytes stored in the array.
A container for geometry primitives.
Definition geom.h:54
GeomContext * prepare_now(PreparedGraphicsObjects *prepared_objects, GraphicsStateGuardianBase *gsg)
Creates a context for the geom on the particular GSG, if it does not already exist.
Definition geom.cxx:1314
bool is_prepared(PreparedGraphicsObjects *prepared_objects) const
Returns true if the geom has already been prepared or enqueued for preparation on the indicated GSG,...
Definition geom.cxx:1248
This is a base class for the GraphicsStateGuardian class, which is itself a base class for the variou...
This is a special class object that holds all the information returned by a particular GSG to indicat...
GeomPrimitive * get_data() const
Returns the pointer to the client-side array data object.
This is a handle to an enqueued object, from which the result can be obtained upon completion.
void set_result(SavedContext *result)
Indicates that the preparation request is done.
virtual bool cancel() final
Cancels the pending preparation request.
EnqueuedObject(PreparedGraphicsObjects *pgo, TypedWritableReferenceCount *object)
Creates a new future for the given object.
void notify_removed()
Called by PreparedGraphicsObjects to indicate that the preparation request has been cancelled.
A table of objects that are saved within the graphics context for reference by handle later.
void enqueue_vertex_buffer(GeomVertexArrayData *data)
Indicates that a buffer would like to be put on the list to be prepared when the GSG is next ready to...
int get_num_queued_geoms() const
Returns the number of geoms that have been enqueued to be prepared on this GSG.
int release_all_geoms()
Releases all geoms at once.
void release_sampler(SamplerContext *sc)
Indicates that a sampler context, created by a previous call to prepare_sampler(),...
bool dequeue_vertex_buffer(GeomVertexArrayData *data)
Removes a buffer from the queued list of data arrays to be prepared.
bool is_texture_queued(const Texture *tex) const
Returns true if the texture has been queued on this GSG, false otherwise.
int release_all_textures()
Releases all textures at once.
int get_num_prepared_shaders() const
Returns the number of shaders that have already been prepared on this GSG.
int get_num_queued_index_buffers() const
Returns the number of index buffers that have been enqueued to be prepared on this GSG.
int get_num_prepared_shader_buffers() const
Returns the number of index buffers that have already been prepared on this GSG.
TextureContext * prepare_texture_now(Texture *tex, int view, GraphicsStateGuardianBase *gsg)
Immediately creates a new TextureContext for the indicated texture and returns it.
int get_num_prepared_vertex_buffers() const
Returns the number of vertex buffers that have already been prepared on this GSG.
void release_geom(GeomContext *gc)
Indicates that a geom context, created by a previous call to prepare_geom(), is no longer needed.
bool is_index_buffer_prepared(const GeomPrimitive *data) const
Returns true if the index buffer has been prepared on this GSG, false otherwise.
int get_num_prepared_textures() const
Returns the number of textures that have already been prepared on this GSG.
void enqueue_sampler(const SamplerState &sampler)
Indicates that a sampler would like to be put on the list to be prepared when the GSG is next ready t...
void enqueue_texture(Texture *tex)
Indicates that a texture would like to be put on the list to be prepared when the GSG is next ready t...
int get_num_queued_vertex_buffers() const
Returns the number of vertex buffers that have been enqueued to be prepared on this GSG.
void show_residency_trackers(std::ostream &out) const
Writes to the indicated ostream a report of how the various textures and vertex buffers are allocated...
bool is_shader_queued(const Shader *shader) const
Returns true if the shader has been queued on this GSG, false otherwise.
SamplerContext * prepare_sampler_now(const SamplerState &sampler, GraphicsStateGuardianBase *gsg)
Immediately creates a new SamplerContext for the indicated sampler and returns it.
bool is_geom_prepared(const Geom *geom) const
Returns true if the vertex buffer has been prepared on this GSG, false otherwise.
int get_num_queued_samplers() const
Returns the number of samplers that have been enqueued to be prepared on this GSG.
bool is_shader_buffer_queued(const ShaderBuffer *data) const
Returns true if the index buffer has been queued on this GSG, false otherwise.
int get_num_queued_shaders() const
Returns the number of shaders that have been enqueued to be prepared on this GSG.
bool dequeue_shader(Shader *shader)
Removes a shader from the queued list of shaders to be prepared.
bool is_texture_prepared(const Texture *tex) const
Returns true if the texture has been prepared on this GSG, false otherwise.
void release_shader_buffer(BufferContext *bc)
Indicates that a data context, created by a previous call to prepare_shader_buffer(),...
int release_all_shaders()
Releases all shaders at once.
bool is_sampler_prepared(const SamplerState &sampler) const
Returns true if the sampler has been prepared on this GSG, false otherwise.
int get_num_queued_textures() const
Returns the number of textures that have been enqueued to be prepared on this GSG.
bool is_sampler_queued(const SamplerState &sampler) const
Returns true if the sampler has been queued on this GSG, false otherwise.
bool dequeue_sampler(const SamplerState &sampler)
Removes a sampler from the queued list of samplers to be prepared.
int release_all_index_buffers()
Releases all datas at once.
bool is_geom_queued(const Geom *geom) const
Returns true if the geom has been queued on this GSG, false otherwise.
int get_num_prepared_geoms() const
Returns the number of geoms that have already been prepared on this GSG.
void release_shader(ShaderContext *sc)
Indicates that a shader context, created by a previous call to prepare_shader(), is no longer needed.
int release_all_shader_buffers()
Releases all datas at once.
bool is_index_buffer_queued(const GeomPrimitive *data) const
Returns true if the index buffer has been queued on this GSG, false otherwise.
bool is_shader_buffer_prepared(const ShaderBuffer *data) const
Returns true if the index buffer has been prepared on this GSG, false otherwise.
void set_graphics_memory_limit(size_t limit)
Sets an artificial cap on graphics memory that will be imposed on this GSG.
bool dequeue_texture(Texture *tex)
Removes a texture from the queued list of textures to be prepared.
GeomContext * prepare_geom_now(Geom *geom, GraphicsStateGuardianBase *gsg)
Immediately creates a new GeomContext for the indicated geom and returns it.
void end_frame(Thread *current_thread)
This is called by the GraphicsStateGuardian to indicate that it has finished processing of the frame.
int release_all_samplers()
Releases all samplers at once.
void show_graphics_memory_lru(std::ostream &out) const
Writes to the indicated ostream a report of how the various textures and vertex buffers are allocated...
bool dequeue_shader_buffer(ShaderBuffer *data)
Removes a buffer from the queued list of data arrays to be prepared.
bool dequeue_geom(Geom *geom)
Removes a geom from the queued list of geoms to be prepared.
void release_index_buffer(IndexBufferContext *ibc)
Indicates that a data context, created by a previous call to prepare_index_buffer(),...
int get_num_prepared_samplers() const
Returns the number of samplers that have already been prepared on this GSG.
bool is_shader_prepared(const Shader *shader) const
Returns true if the shader has been prepared on this GSG, false otherwise.
bool is_vertex_buffer_prepared(const GeomVertexArrayData *data) const
Returns true if the vertex buffer has been prepared on this GSG, false otherwise.
void begin_frame(GraphicsStateGuardianBase *gsg, Thread *current_thread)
This is called by the GraphicsStateGuardian to indicate that it is about to begin processing of the f...
void enqueue_index_buffer(GeomPrimitive *data)
Indicates that a buffer would like to be put on the list to be prepared when the GSG is next ready to...
BufferContext * prepare_shader_buffer_now(ShaderBuffer *data, GraphicsStateGuardianBase *gsg)
Immediately creates a new BufferContext for the indicated data and returns it.
void release_vertex_buffer(VertexBufferContext *vbc)
Indicates that a data context, created by a previous call to prepare_vertex_buffer(),...
void enqueue_shader(Shader *shader)
Indicates that a shader would like to be put on the list to be prepared when the GSG is next ready to...
void enqueue_geom(Geom *geom)
Indicates that a geom would like to be put on the list to be prepared when the GSG is next ready to d...
bool is_vertex_buffer_queued(const GeomVertexArrayData *data) const
Returns true if the vertex buffer has been queued on this GSG, false otherwise.
void release_texture(TextureContext *tc)
Indicates that a texture context, created by a previous call to prepare_texture(),...
int get_num_queued_shader_buffers() const
Returns the number of index buffers that have been enqueued to be prepared on this GSG.
int release_all_vertex_buffers()
Releases all datas at once.
int get_num_prepared_index_buffers() const
Returns the number of index buffers that have already been prepared on this GSG.
IndexBufferContext * prepare_index_buffer_now(GeomPrimitive *data, GraphicsStateGuardianBase *gsg)
Immediately creates a new IndexBufferContext for the indicated data and returns it.
ShaderContext * prepare_shader_now(Shader *shader, GraphicsStateGuardianBase *gsg)
Immediately creates a new ShaderContext for the indicated shader and returns it.
void enqueue_shader_buffer(ShaderBuffer *data)
Indicates that a buffer would like to be put on the list to be prepared when the GSG is next ready to...
VertexBufferContext * prepare_vertex_buffer_now(GeomVertexArrayData *data, GraphicsStateGuardianBase *gsg)
Immediately creates a new VertexBufferContext for the indicated data and returns it.
bool dequeue_index_buffer(GeomPrimitive *data)
Removes a buffer from the queued list of data arrays to be prepared.
Similar to MutexHolder, but for a reentrant mutex.
This is a special class object that holds a handle to the sampler state object given by the graphics ...
Represents a set of settings that indicate how a texture is sampled.
SamplerContext * prepare_now(PreparedGraphicsObjects *prepared_objects, GraphicsStateGuardianBase *gsg) const
Creates a context for the sampler on the particular GSG, if it does not already exist.
This is the base class for all GSG-specific context objects, such as TextureContext and GeomContext.
This is a generic buffer object that lives in graphics memory.
The ShaderContext is meant to contain the compiled version of a shader string.
ShaderContext * prepare_now(PreparedGraphicsObjects *prepared_objects, GraphicsStateGuardianBase *gsg)
Creates a context for the shader on the particular GSG, if it does not already exist.
Definition shader.cxx:3762
bool is_prepared(PreparedGraphicsObjects *prepared_objects) const
Returns true if the shader has already been prepared or enqueued for preparation on the indicated GSG...
Definition shader.cxx:3719
This is a special class object that holds all the information returned by a particular GSG to indicat...
Texture * get_texture() const
Returns the pointer to the associated Texture object.
int get_view() const
Returns the specific view of a multiview texture this context represents.
Represents a texture object, which is typically a single 2-d image but may also represent a 1-d or 3-...
Definition texture.h:72
TextureContext * prepare_now(int view, PreparedGraphicsObjects *prepared_objects, GraphicsStateGuardianBase *gsg)
Creates a context for the texture on the particular GSG, if it does not already exist.
Definition texture.cxx:1981
get_num_views
Returns the number of "views" in the texture.
Definition texture.h:355
bool release(PreparedGraphicsObjects *prepared_objects)
Frees the texture context only on the indicated object, if it exists there.
Definition texture.cxx:1573
bool is_prepared(PreparedGraphicsObjects *prepared_objects) const
Returns true if the texture has already been prepared or enqueued for preparation on the indicated GS...
Definition texture.cxx:1444
A thread; that is, a lightweight process.
Definition thread.h:46
TypeHandle is the identifier used to differentiate C++ class types.
Definition typeHandle.h:81
A base class for things which need to inherit from both TypedWritable and from ReferenceCount.
This is a special class object that holds all the information returned by a particular GSG to indicat...
GeomVertexArrayData * get_data() const
Returns the pointer to the client-side array data object.
This is our own Panda specialization on the default STL vector.
Definition pvector.h:42
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.