18 INLINE
void PipelineCyclerTrueImpl::
20 TAU_PROFILE(
"void PipelineCyclerTrueImpl::acquire()",
" ", TAU_USER);
28 INLINE
void PipelineCyclerTrueImpl::
29 acquire(
Thread *current_thread) {
30 TAU_PROFILE(
"void PipelineCyclerTrueImpl::acquire(Thread *)",
" ", TAU_USER);
31 _lock.acquire(current_thread);
37 INLINE
void PipelineCyclerTrueImpl::
39 TAU_PROFILE(
"void PipelineCyclerTrueImpl::release()",
" ", TAU_USER);
52 INLINE
const CycleData *PipelineCyclerTrueImpl::
53 read_unlocked(
Thread *current_thread)
const {
54 TAU_PROFILE(
"const CycleData *PipelineCyclerTrueImpl::read_unlocked(Thread *)",
" ", TAU_USER);
57 nassertr(pipeline_stage >= 0 && pipeline_stage < _num_stages,
nullptr);
59 return _data[pipeline_stage]._cdata;
70 INLINE
const CycleData *PipelineCyclerTrueImpl::
71 read(
Thread *current_thread)
const {
72 TAU_PROFILE(
"const CycleData *PipelineCyclerTrueImpl::read(Thread *)",
" ", TAU_USER);
75 nassertr(pipeline_stage >= 0 && pipeline_stage < _num_stages,
nullptr);
77 _lock.acquire(current_thread);
78 return _data[pipeline_stage]._cdata;
85 INLINE
void PipelineCyclerTrueImpl::
86 increment_read(
const CycleData *pointer)
const {
87 TAU_PROFILE(
"void PipelineCyclerTrueImpl::increment_read(const CycleData *)",
" ", TAU_USER);
90 nassertv(pipeline_stage >= 0 && pipeline_stage < _num_stages);
91 nassertv(_data[pipeline_stage]._cdata == pointer);
99 INLINE
void PipelineCyclerTrueImpl::
100 release_read(
const CycleData *pointer)
const {
101 TAU_PROFILE(
"void PipelineCyclerTrueImpl::release_read(const CycleData *)",
" ", TAU_USER);
104 nassertv(pipeline_stage >= 0 && pipeline_stage < _num_stages);
105 nassertv(_data[pipeline_stage]._cdata == pointer);
121 INLINE
CycleData *PipelineCyclerTrueImpl::
122 write(
Thread *current_thread) {
123 TAU_PROFILE(
"CycleData *PipelineCyclerTrueImpl::write(Thread *)",
" ", TAU_USER);
147 INLINE
CycleData *PipelineCyclerTrueImpl::
148 write_upstream(
bool force_to_0,
Thread *current_thread) {
149 TAU_PROFILE(
"CycleData *PipelineCyclerTrueImpl::write_upstream(bool, Thread *)",
" ", TAU_USER);
159 INLINE
CycleData *PipelineCyclerTrueImpl::
161 TAU_PROFILE(
"CycleData *PipelineCyclerTrueImpl::elevate_read(const CycleData *)",
" ", TAU_USER);
164 nassertr(pipeline_stage >= 0 && pipeline_stage < _num_stages,
nullptr);
165 nassertr(_data[pipeline_stage]._cdata == pointer,
nullptr);
167 CycleData *new_pointer = write(current_thread);
177 INLINE
CycleData *PipelineCyclerTrueImpl::
178 elevate_read_upstream(
const CycleData *pointer,
bool force_to_0,
Thread *current_thread) {
179 TAU_PROFILE(
"CycleData *PipelineCyclerTrueImpl::elevate_read_upstream(const CycleData *, bool)",
" ", TAU_USER);
182 nassertr(pipeline_stage >= 0 && pipeline_stage < _num_stages,
nullptr);
183 nassertr(_data[pipeline_stage]._cdata == pointer,
nullptr);
185 CycleData *new_pointer = write_upstream(force_to_0, current_thread);
194 INLINE
void PipelineCyclerTrueImpl::
195 increment_write(
CycleData *pointer)
const {
196 TAU_PROFILE(
"void PipelineCyclerTrueImpl::increment_write(CycleData *)",
" ", TAU_USER);
199 nassertv(pipeline_stage >= 0 && pipeline_stage < _num_stages);
200 nassertv(_data[pipeline_stage]._cdata == pointer);
202 ++(_data[pipeline_stage]._writes_outstanding);
203 _lock.elevate_lock();
209 INLINE
void PipelineCyclerTrueImpl::
211 TAU_PROFILE(
"void PipelineCyclerTrueImpl::release_write(CycleData *)",
" ", TAU_USER);
213 return release_write_stage(pipeline_stage, pointer);
219 INLINE
int PipelineCyclerTrueImpl::
229 INLINE
const CycleData *PipelineCyclerTrueImpl::
230 read_stage_unlocked(
int pipeline_stage)
const {
231 TAU_PROFILE(
"const CycleData *PipelineCyclerTrueImpl::read_stage_unlocked(int)",
" ", TAU_USER);
233 nassertr(pipeline_stage >= 0 && pipeline_stage < _num_stages,
nullptr);
234 #elif defined(__has_builtin) && __has_builtin(__builtin_assume) 235 __builtin_assume(pipeline_stage >= 0);
237 return _data[pipeline_stage]._cdata;
248 INLINE
const CycleData *PipelineCyclerTrueImpl::
249 read_stage(
int pipeline_stage,
Thread *current_thread)
const {
250 TAU_PROFILE(
"const CycleData *PipelineCyclerTrueImpl::read_stage(int, Thread *)",
" ", TAU_USER);
252 nassertr(pipeline_stage >= 0 && pipeline_stage < _num_stages,
nullptr);
253 #elif defined(__has_builtin) && __has_builtin(__builtin_assume) 254 __builtin_assume(pipeline_stage >= 0);
256 _lock.acquire(current_thread);
257 return _data[pipeline_stage]._cdata;
263 INLINE
void PipelineCyclerTrueImpl::
264 release_read_stage(
int pipeline_stage,
const CycleData *pointer)
const {
265 TAU_PROFILE(
"void PipelineCyclerTrueImpl::release_read_stage(int, const CycleData *)",
" ", TAU_USER);
267 nassertv(pipeline_stage >= 0 && pipeline_stage < _num_stages);
268 nassertv(_data[pipeline_stage]._cdata == pointer);
278 INLINE
CycleData *PipelineCyclerTrueImpl::
279 elevate_read_stage(
int pipeline_stage,
const CycleData *pointer,
281 TAU_PROFILE(
"CycleData *PipelineCyclerTrueImpl::elevate_read_stage(int, const CycleData *)",
" ", TAU_USER);
283 nassertr(pipeline_stage >= 0 && pipeline_stage < _num_stages,
nullptr);
284 nassertr(_data[pipeline_stage]._cdata == pointer,
nullptr);
285 #elif defined(__has_builtin) && __has_builtin(__builtin_assume) 286 __builtin_assume(pipeline_stage >= 0);
288 CycleData *new_pointer = write_stage(pipeline_stage, current_thread);
298 INLINE
CycleData *PipelineCyclerTrueImpl::
299 elevate_read_stage_upstream(
int pipeline_stage,
const CycleData *pointer,
300 bool force_to_0,
Thread *current_thread) {
301 TAU_PROFILE(
"CycleData *PipelineCyclerTrueImpl::elevate_read_stage(int, const CycleData *)",
" ", TAU_USER);
303 nassertr(pipeline_stage >= 0 && pipeline_stage < _num_stages,
nullptr);
304 nassertr(_data[pipeline_stage]._cdata == pointer,
nullptr);
305 #elif defined(__has_builtin) && __has_builtin(__builtin_assume) 306 __builtin_assume(pipeline_stage >= 0);
309 write_stage_upstream(pipeline_stage, force_to_0, current_thread);
317 INLINE
void PipelineCyclerTrueImpl::
318 release_write_stage(
int pipeline_stage,
CycleData *pointer) {
319 TAU_PROFILE(
"void PipelineCyclerTrueImpl::release_write_stage(int, const CycleData *)",
" ", TAU_USER);
321 nassertv(pipeline_stage >= 0 && pipeline_stage < _num_stages);
322 nassertv(_data[pipeline_stage]._cdata == pointer);
323 nassertv(_data[pipeline_stage]._writes_outstanding > 0);
324 #elif defined(__has_builtin) && __has_builtin(__builtin_assume) 325 __builtin_assume(pipeline_stage >= 0);
327 --(_data[pipeline_stage]._writes_outstanding);
336 get_parent_type()
const {
337 return _data[0]._cdata->get_parent_type();
346 INLINE
CycleData *PipelineCyclerTrueImpl::
348 TAU_PROFILE(
"CycleData *PipelineCyclerTrueImpl::cheat()",
" ", TAU_USER);
350 nassertr(pipeline_stage >= 0 && pipeline_stage < _num_stages,
nullptr);
351 return _data[pipeline_stage]._cdata;
358 INLINE
int PipelineCyclerTrueImpl::
359 get_read_count()
const {
368 INLINE
int PipelineCyclerTrueImpl::
369 get_write_count()
const {
380 TAU_PROFILE(
"PT(CycleData) PipelineCyclerTrueImpl::cycle_2()",
" ", TAU_USER);
385 last_val.swap(_data[1]._cdata);
386 last_val->node_unref_only();
388 nassertr(_lock.debug_is_locked(), last_val);
389 nassertr(_dirty, last_val);
390 nassertr(_num_stages == 2, last_val);
392 nassertr(_data[1]._writes_outstanding == 0, last_val);
393 _data[1]._cdata = _data[0]._cdata;
407 TAU_PROFILE(
"PT(CycleData) PipelineCyclerTrueImpl::cycle_3()",
" ", TAU_USER);
412 last_val.swap(_data[2]._cdata);
413 last_val->node_unref_only();
415 nassertr(_lock.debug_is_locked(), last_val);
416 nassertr(_dirty, last_val);
417 nassertr(_num_stages == 3, last_val);
419 nassertr(_data[2]._writes_outstanding == 0, last_val);
420 nassertr(_data[1]._writes_outstanding == 0, last_val);
421 _data[2]._cdata = _data[1]._cdata;
422 _data[1]._cdata = _data[0]._cdata;
424 if (_data[2]._cdata == _data[1]._cdata) {
435 INLINE PipelineCyclerTrueImpl::CyclerMutex::
436 CyclerMutex(PipelineCyclerTrueImpl *cycler) {
445 INLINE PipelineCyclerTrueImpl::CycleDataNode::
447 _writes_outstanding(0)
454 INLINE PipelineCyclerTrueImpl::CycleDataNode::
455 CycleDataNode(
const PipelineCyclerTrueImpl::CycleDataNode ©) :
457 _writes_outstanding(0)
464 INLINE PipelineCyclerTrueImpl::CycleDataNode::
466 nassertv(_writes_outstanding == 0);
472 INLINE
void PipelineCyclerTrueImpl::CycleDataNode::
473 operator = (
const PipelineCyclerTrueImpl::CycleDataNode ©) {
474 _cdata = copy._cdata;
A single page of data maintained by a PipelineCycler.
get_pipeline_stage
Returns the Pipeline stage number associated with this thread.
PT(CycleData) PipelineCyclerTrueImpl
This is a special implementation of cycle() for the special case of just two stages to the pipeline.
get_current_pipeline_stage
Returns the integer pipeline stage associated with the current thread.
A thread; that is, a lightweight process.
TypeHandle is the identifier used to differentiate C++ class types.