Panda3D
Loading...
Searching...
No Matches
pipelineCyclerTrueImpl.I
Go to the documentation of this file.
1/**
2 * PANDA 3D SOFTWARE
3 * Copyright (c) Carnegie Mellon University. All rights reserved.
4 *
5 * All use of this software is subject to the terms of the revised BSD
6 * license. You should have received a copy of this license along
7 * with this source code in a file named "LICENSE."
8 *
9 * @file pipelineCyclerTrueImpl.I
10 * @author drose
11 * @date 2006-01-31
12 */
13
14/**
15 * Grabs an overall lock on the cycler. Release it with a call to release().
16 * This lock should be held while walking the list of stages.
17 */
18INLINE void PipelineCyclerTrueImpl::
19acquire() {
20 TAU_PROFILE("void PipelineCyclerTrueImpl::acquire()", " ", TAU_USER);
21 _lock.acquire();
22}
23
24/**
25 * Grabs an overall lock on the cycler. Release it with a call to release().
26 * This lock should be held while walking the list of stages.
27 */
28INLINE void PipelineCyclerTrueImpl::
29acquire(Thread *current_thread) {
30 TAU_PROFILE("void PipelineCyclerTrueImpl::acquire(Thread *)", " ", TAU_USER);
31 _lock.acquire(current_thread);
32}
33
34/**
35 * Release the overall lock on the cycler that was grabbed via acquire().
36 */
37INLINE void PipelineCyclerTrueImpl::
38release() {
39 TAU_PROFILE("void PipelineCyclerTrueImpl::release()", " ", TAU_USER);
40 _lock.release();
41}
42
43/**
44 * Returns a const CycleData pointer, filled with the data for the current
45 * stage of the pipeline as seen by this thread. No lock is made on the
46 * contents; there is no guarantee that some other thread won't modify this
47 * object's data while you are working on it. (However, the data within the
48 * returned CycleData object itself is safe from modification; if another
49 * thread modifies the data, it will perform a copy-on-write, and thereby
50 * change the pointer stored within the object.)
51 */
52INLINE const CycleData *PipelineCyclerTrueImpl::
53read_unlocked(Thread *current_thread) const {
54 TAU_PROFILE("const CycleData *PipelineCyclerTrueImpl::read_unlocked(Thread *)", " ", TAU_USER);
55 int pipeline_stage = current_thread->get_pipeline_stage();
56#ifdef _DEBUG
57 nassertr(pipeline_stage >= 0 && pipeline_stage < _num_stages, nullptr);
58#endif
59 return _data[pipeline_stage]._cdata;
60}
61
62/**
63 * Returns a const CycleData pointer, filled with the data for the current
64 * stage of the pipeline as seen by this thread. This pointer should
65 * eventually be released by calling release_read().
66 *
67 * There should be no outstanding write pointers on the data when this
68 * function is called.
69 */
70INLINE const CycleData *PipelineCyclerTrueImpl::
71read(Thread *current_thread) const {
72 TAU_PROFILE("const CycleData *PipelineCyclerTrueImpl::read(Thread *)", " ", TAU_USER);
73 int pipeline_stage = current_thread->get_pipeline_stage();
74#ifdef _DEBUG
75 nassertr(pipeline_stage >= 0 && pipeline_stage < _num_stages, nullptr);
76#endif
77 _lock.acquire(current_thread);
78 return _data[pipeline_stage]._cdata;
79}
80
81/**
82 * Increments the count on a pointer previously retrieved by read(); now the
83 * pointer will need to be released twice.
84 */
85INLINE void PipelineCyclerTrueImpl::
86increment_read(const CycleData *pointer) const {
87 TAU_PROFILE("void PipelineCyclerTrueImpl::increment_read(const CycleData *)", " ", TAU_USER);
88#ifdef _DEBUG
89 int pipeline_stage = Thread::get_current_pipeline_stage();
90 nassertv(pipeline_stage >= 0 && pipeline_stage < _num_stages);
91 nassertv(_data[pipeline_stage]._cdata == pointer);
92#endif
93 _lock.elevate_lock();
94}
95
96/**
97 * Releases a pointer previously obtained via a call to read().
98 */
99INLINE void PipelineCyclerTrueImpl::
100release_read(const CycleData *pointer) const {
101 TAU_PROFILE("void PipelineCyclerTrueImpl::release_read(const CycleData *)", " ", TAU_USER);
102#ifdef _DEBUG
103 int pipeline_stage = Thread::get_current_pipeline_stage();
104 nassertv(pipeline_stage >= 0 && pipeline_stage < _num_stages);
105 nassertv(_data[pipeline_stage]._cdata == pointer);
106#endif
107 _lock.release();
108}
109
110/**
111 * Returns a non-const CycleData pointer, filled with a unique copy of the
112 * data for the current stage of the pipeline as seen by this thread. This
113 * pointer may now be used to write to the data, and that copy of the data
114 * will be propagated to all later stages of the pipeline. This pointer
115 * should eventually be released by calling release_write().
116 *
117 * There may only be one outstanding write pointer on a given stage at a time,
118 * and if there is a write pointer there may be no read pointers on the same
119 * stage (but see elevate_read).
120 */
121INLINE CycleData *PipelineCyclerTrueImpl::
122write(Thread *current_thread) {
123 TAU_PROFILE("CycleData *PipelineCyclerTrueImpl::write(Thread *)", " ", TAU_USER);
124 return write_stage(current_thread->get_pipeline_stage(), current_thread);
125}
126
127/**
128 * This special variant on write() will automatically propagate changes back
129 * to upstream pipeline stages. If force_to_0 is false, then it propagates
130 * back only as long as the CycleData pointers are equivalent, guaranteeing
131 * that it does not modify upstream data (other than the modification that
132 * will be performed by the code that returns this pointer). This is
133 * particularly appropriate for minor updates, where it doesn't matter much if
134 * the update is lost, such as storing a cached value.
135 *
136 * If force_to_0 is true, then the CycleData pointer for the current pipeline
137 * stage is propagated all the way back up to stage 0; after this call, there
138 * will be only one CycleData pointer that is duplicated in all stages between
139 * stage 0 and the current stage. This may undo some recent changes that were
140 * made independently at pipeline stage 0 (or any other upstream stage).
141 * However, it guarantees that the change that is to be applied at this
142 * pipeline stage will stick. This is slightly dangerous because of the risk
143 * of losing upstream changes; generally, this should only be done when you
144 * are confident that there are no upstream changes to be lost (for instance,
145 * for an object that has been recently created).
146 */
147INLINE CycleData *PipelineCyclerTrueImpl::
148write_upstream(bool force_to_0, Thread *current_thread) {
149 TAU_PROFILE("CycleData *PipelineCyclerTrueImpl::write_upstream(bool, Thread *)", " ", TAU_USER);
150 return write_stage_upstream(current_thread->get_pipeline_stage(), force_to_0,
151 current_thread);
152}
153
154/**
155 * Elevates a currently-held read pointer into a write pointer. This may or
156 * may not change the value of the pointer. It is only valid to do this if
157 * this is the only currently-outstanding read pointer on the current stage.
158 */
159INLINE CycleData *PipelineCyclerTrueImpl::
160elevate_read(const CycleData *pointer, Thread *current_thread) {
161 TAU_PROFILE("CycleData *PipelineCyclerTrueImpl::elevate_read(const CycleData *)", " ", TAU_USER);
162#ifdef _DEBUG
163 int pipeline_stage = current_thread->get_pipeline_stage();
164 nassertr(pipeline_stage >= 0 && pipeline_stage < _num_stages, nullptr);
165 nassertr(_data[pipeline_stage]._cdata == pointer, nullptr);
166#endif
167 CycleData *new_pointer = write(current_thread);
168 _lock.release();
169 return new_pointer;
170}
171
172/**
173 * Elevates a currently-held read pointer into a write pointer, like
174 * elevate_read(), but also propagates the pointer back to upstream stages,
175 * like write_upstream().
176 */
177INLINE CycleData *PipelineCyclerTrueImpl::
178elevate_read_upstream(const CycleData *pointer, bool force_to_0, Thread *current_thread) {
179 TAU_PROFILE("CycleData *PipelineCyclerTrueImpl::elevate_read_upstream(const CycleData *, bool)", " ", TAU_USER);
180#ifdef _DEBUG
181 int pipeline_stage = current_thread->get_pipeline_stage();
182 nassertr(pipeline_stage >= 0 && pipeline_stage < _num_stages, nullptr);
183 nassertr(_data[pipeline_stage]._cdata == pointer, nullptr);
184#endif
185 CycleData *new_pointer = write_upstream(force_to_0, current_thread);
186 _lock.release();
187 return new_pointer;
188}
189
190/**
191 * Increments the count on a pointer previously retrieved by write(); now the
192 * pointer will need to be released twice.
193 */
194INLINE void PipelineCyclerTrueImpl::
195increment_write(CycleData *pointer) const {
196 TAU_PROFILE("void PipelineCyclerTrueImpl::increment_write(CycleData *)", " ", TAU_USER);
197 int pipeline_stage = Thread::get_current_pipeline_stage();
198#ifdef _DEBUG
199 nassertv(pipeline_stage >= 0 && pipeline_stage < _num_stages);
200 nassertv(_data[pipeline_stage]._cdata == pointer);
201#endif
202 ++(_data[pipeline_stage]._writes_outstanding);
203 _lock.elevate_lock();
204}
205
206/**
207 * Releases a pointer previously obtained via a call to write().
208 */
209INLINE void PipelineCyclerTrueImpl::
210release_write(CycleData *pointer) {
211 TAU_PROFILE("void PipelineCyclerTrueImpl::release_write(CycleData *)", " ", TAU_USER);
212 int pipeline_stage = Thread::get_current_pipeline_stage();
213 return release_write_stage(pipeline_stage, pointer);
214}
215
216/**
217 * Returns the number of stages in the pipeline.
218 */
219INLINE int PipelineCyclerTrueImpl::
220get_num_stages() {
221 return _num_stages;
222}
223
224/**
225 * Returns a const CycleData pointer, filled with the data for the indicated
226 * stage of the pipeline. As in read_unlocked(), no lock is held on the
227 * returned pointer.
228 */
229INLINE const CycleData *PipelineCyclerTrueImpl::
230read_stage_unlocked(int pipeline_stage) const {
231 TAU_PROFILE("const CycleData *PipelineCyclerTrueImpl::read_stage_unlocked(int)", " ", TAU_USER);
232#ifdef _DEBUG
233 nassertr(pipeline_stage >= 0 && pipeline_stage < _num_stages, nullptr);
234#elif defined(__has_builtin) && __has_builtin(__builtin_assume)
235 __builtin_assume(pipeline_stage >= 0);
236#endif
237 return _data[pipeline_stage]._cdata;
238}
239
240/**
241 * Returns a const CycleData pointer, filled with the data for the indicated
242 * stage of the pipeline. This pointer should eventually be released by
243 * calling release_read_stage().
244 *
245 * There should be no outstanding write pointers on the data when this
246 * function is called.
247 */
248INLINE const CycleData *PipelineCyclerTrueImpl::
249read_stage(int pipeline_stage, Thread *current_thread) const {
250 TAU_PROFILE("const CycleData *PipelineCyclerTrueImpl::read_stage(int, Thread *)", " ", TAU_USER);
251#ifdef _DEBUG
252 nassertr(pipeline_stage >= 0 && pipeline_stage < _num_stages, nullptr);
253#elif defined(__has_builtin) && __has_builtin(__builtin_assume)
254 __builtin_assume(pipeline_stage >= 0);
255#endif
256 _lock.acquire(current_thread);
257 return _data[pipeline_stage]._cdata;
258}
259
260/**
261 * Releases a pointer previously obtained via a call to read_stage().
262 */
263INLINE void PipelineCyclerTrueImpl::
264release_read_stage(int pipeline_stage, const CycleData *pointer) const {
265 TAU_PROFILE("void PipelineCyclerTrueImpl::release_read_stage(int, const CycleData *)", " ", TAU_USER);
266#ifdef _DEBUG
267 nassertv(pipeline_stage >= 0 && pipeline_stage < _num_stages);
268 nassertv(_data[pipeline_stage]._cdata == pointer);
269#endif
270 _lock.release();
271}
272
273/**
274 * Elevates a currently-held read pointer into a write pointer. This may or
275 * may not change the value of the pointer. It is only valid to do this if
276 * this is the only currently-outstanding read pointer on the indicated stage.
277 */
278INLINE CycleData *PipelineCyclerTrueImpl::
279elevate_read_stage(int pipeline_stage, const CycleData *pointer,
280 Thread *current_thread) {
281 TAU_PROFILE("CycleData *PipelineCyclerTrueImpl::elevate_read_stage(int, const CycleData *)", " ", TAU_USER);
282#ifdef _DEBUG
283 nassertr(pipeline_stage >= 0 && pipeline_stage < _num_stages, nullptr);
284 nassertr(_data[pipeline_stage]._cdata == pointer, nullptr);
285#elif defined(__has_builtin) && __has_builtin(__builtin_assume)
286 __builtin_assume(pipeline_stage >= 0);
287#endif
288 CycleData *new_pointer = write_stage(pipeline_stage, current_thread);
289 _lock.release();
290 return new_pointer;
291}
292
293/**
294 * Elevates a currently-held read pointer into a write pointer. This may or
295 * may not change the value of the pointer. It is only valid to do this if
296 * this is the only currently-outstanding read pointer on the indicated stage.
297 */
298INLINE CycleData *PipelineCyclerTrueImpl::
299elevate_read_stage_upstream(int pipeline_stage, const CycleData *pointer,
300 bool force_to_0, Thread *current_thread) {
301 TAU_PROFILE("CycleData *PipelineCyclerTrueImpl::elevate_read_stage(int, const CycleData *)", " ", TAU_USER);
302#ifdef _DEBUG
303 nassertr(pipeline_stage >= 0 && pipeline_stage < _num_stages, nullptr);
304 nassertr(_data[pipeline_stage]._cdata == pointer, nullptr);
305#elif defined(__has_builtin) && __has_builtin(__builtin_assume)
306 __builtin_assume(pipeline_stage >= 0);
307#endif
308 CycleData *new_pointer =
309 write_stage_upstream(pipeline_stage, force_to_0, current_thread);
310 _lock.release();
311 return new_pointer;
312}
313
314/**
315 * Releases a pointer previously obtained via a call to write_stage().
316 */
317INLINE void PipelineCyclerTrueImpl::
318release_write_stage(int pipeline_stage, CycleData *pointer) {
319 TAU_PROFILE("void PipelineCyclerTrueImpl::release_write_stage(int, const CycleData *)", " ", TAU_USER);
320#ifdef _DEBUG
321 nassertv(pipeline_stage >= 0 && pipeline_stage < _num_stages);
322 nassertv(_data[pipeline_stage]._cdata == pointer);
323 nassertv(_data[pipeline_stage]._writes_outstanding > 0);
324#elif defined(__has_builtin) && __has_builtin(__builtin_assume)
325 __builtin_assume(pipeline_stage >= 0);
326#endif
327 --(_data[pipeline_stage]._writes_outstanding);
328 _lock.release();
329}
330
331/**
332 * Returns the type of object that owns this cycler, as reported by
333 * CycleData::get_parent_type().
334 */
335INLINE TypeHandle PipelineCyclerTrueImpl::
336get_parent_type() const {
337 return _data[0]._cdata->get_parent_type();
338}
339
340/**
341 * Returns a pointer without counting it. This is only intended for use as
342 * the return value for certain nassertr() functions, so the application can
343 * recover after a failure to manage the read and write pointers correctly.
344 * You should never call this function directly.
345 */
346INLINE CycleData *PipelineCyclerTrueImpl::
347cheat() const {
348 TAU_PROFILE("CycleData *PipelineCyclerTrueImpl::cheat()", " ", TAU_USER);
349 int pipeline_stage = Thread::get_current_pipeline_stage();
350 nassertr(pipeline_stage >= 0 && pipeline_stage < _num_stages, nullptr);
351 return _data[pipeline_stage]._cdata;
352}
353
354/**
355 * Returns the number of handles currently outstanding to read the current
356 * stage of the data. This should only be used for debugging purposes.
357 */
358INLINE int PipelineCyclerTrueImpl::
359get_read_count() const {
360 return 0;
361}
362
363/**
364 * Returns the number of handles currently outstanding to read the current
365 * stage of the data. This will normally only be either 0 or 1. This should
366 * only be used for debugging purposes.
367 */
368INLINE int PipelineCyclerTrueImpl::
369get_write_count() const {
370 return 0;
371}
372
373/**
374 * This is a special implementation of cycle() for the special case of just
375 * two stages to the pipeline. It does the same thing as cycle(), but is a
376 * little bit faster because it knows there are exactly two stages.
377 */
378INLINE PT(CycleData) PipelineCyclerTrueImpl::
379cycle_2() {
380 TAU_PROFILE("PT(CycleData) PipelineCyclerTrueImpl::cycle_2()", " ", TAU_USER);
381
382 // This trick moves an NPT into a PT without unnecessarily incrementing and
383 // subsequently decrementing the regular reference count.
384 PT(CycleData) last_val;
385 last_val.swap(_data[1]._cdata);
386 last_val->node_unref_only();
387
388 nassertr(_lock.debug_is_locked(), last_val);
389 nassertr(_dirty, last_val);
390 nassertr(_num_stages == 2, last_val);
391
392 nassertr(_data[1]._writes_outstanding == 0, last_val);
393 _data[1]._cdata = _data[0]._cdata;
394
395 // No longer dirty.
396 _dirty = 0;
397 return last_val;
398}
399
400/**
401 * This is a special implementation of cycle() for the special case of exactly
402 * three stages to the pipeline. It does the same thing as cycle(), but is a
403 * little bit faster because it knows there are exactly three stages.
404 */
405INLINE PT(CycleData) PipelineCyclerTrueImpl::
406cycle_3() {
407 TAU_PROFILE("PT(CycleData) PipelineCyclerTrueImpl::cycle_3()", " ", TAU_USER);
408
409 // This trick moves an NPT into a PT without unnecessarily incrementing and
410 // subsequently decrementing the regular reference count.
411 PT(CycleData) last_val;
412 last_val.swap(_data[2]._cdata);
413 last_val->node_unref_only();
414
415 nassertr(_lock.debug_is_locked(), last_val);
416 nassertr(_dirty, last_val);
417 nassertr(_num_stages == 3, last_val);
418
419 nassertr(_data[2]._writes_outstanding == 0, last_val);
420 nassertr(_data[1]._writes_outstanding == 0, last_val);
421 _data[2]._cdata = _data[1]._cdata;
422 _data[1]._cdata = _data[0]._cdata;
423
424 if (_data[2]._cdata == _data[1]._cdata) {
425 // No longer dirty.
426 _dirty = 0;
427 }
428
429 return last_val;
430}
431
432/**
433 *
434 */
435INLINE PipelineCyclerTrueImpl::CyclerMutex::
436CyclerMutex(PipelineCyclerTrueImpl *cycler) {
437#ifdef DEBUG_THREADS
438 _cycler = cycler;
439#endif
440}
441
442/**
443 *
444 */
445INLINE PipelineCyclerTrueImpl::CycleDataNode::
446CycleDataNode() :
447 _writes_outstanding(0)
448{
449}
450
451/**
452 *
453 */
454INLINE PipelineCyclerTrueImpl::CycleDataNode::
455CycleDataNode(const PipelineCyclerTrueImpl::CycleDataNode &copy) :
456 _cdata(copy._cdata),
457 _writes_outstanding(0)
458{
459}
460
461/**
462 *
463 */
464INLINE PipelineCyclerTrueImpl::CycleDataNode::
465~CycleDataNode() {
466 nassertv(_writes_outstanding == 0);
467}
468
469/**
470 *
471 */
472INLINE void PipelineCyclerTrueImpl::CycleDataNode::
473operator = (const PipelineCyclerTrueImpl::CycleDataNode &copy) {
474 _cdata = copy._cdata;
475}
A single page of data maintained by a PipelineCycler.
Definition cycleData.h:50
A thread; that is, a lightweight process.
Definition thread.h:46
get_pipeline_stage
Returns the Pipeline stage number associated with this thread.
Definition thread.h:105
get_current_pipeline_stage
Returns the integer pipeline stage associated with the current thread.
Definition thread.h:110
TypeHandle is the identifier used to differentiate C++ class types.
Definition typeHandle.h:81