16#ifdef THREADED_PIPELINE
24PipelineCyclerTrueImpl::
30 if (_pipeline ==
nullptr) {
34 _num_stages = _pipeline->get_num_stages();
35 _data =
new CycleDataNode[_num_stages];
36 for (
int i = 0; i < _num_stages; ++i) {
37 _data[i]._cdata = initial_data;
40 _pipeline->add_cycler(
this);
46PipelineCyclerTrueImpl::
47PipelineCyclerTrueImpl(
const PipelineCyclerTrueImpl ©) :
48 _pipeline(copy._pipeline),
55 _num_stages = _pipeline->get_num_stages();
56 nassertv(_num_stages == copy._num_stages);
57 _data =
new CycleDataNode[_num_stages];
59 if (_num_stages == 1) {
60 _data[0]._cdata = copy._data[0]._cdata->make_copy();
69 for (
int i = 0; i < _num_stages; ++i) {
70 PT(
CycleData) &new_pt = pointers[copy._data[i]._cdata];
71 if (new_pt ==
nullptr) {
72 new_pt = copy._data[i]._cdata->make_copy();
74 _data[i]._cdata = new_pt.p();
78 _pipeline->add_cycler(
this, copy._dirty != 0);
84void PipelineCyclerTrueImpl::
85operator = (
const PipelineCyclerTrueImpl ©) {
88 nassertv(get_parent_type() == copy.get_parent_type());
93 for (
int i = 0; i < _num_stages; ++i) {
94 PT(
CycleData) &new_pt = pointers[copy._data[i]._cdata];
95 if (new_pt ==
nullptr) {
96 new_pt = copy._data[i]._cdata->make_copy();
98 _data[i]._cdata = new_pt.p();
101 if (copy._dirty && !_dirty) {
102 _pipeline->add_dirty_cycler(
this);
109PipelineCyclerTrueImpl::
110~PipelineCyclerTrueImpl() {
113 _pipeline->remove_cycler(
this);
127write_stage(
int pipeline_stage,
Thread *current_thread) {
128 _lock.acquire(current_thread);
131 nassertd(pipeline_stage >= 0 && pipeline_stage < _num_stages) {
137 CycleData *old_data = _data[pipeline_stage]._cdata;
143 if (_data[pipeline_stage]._writes_outstanding == 0) {
148 if (old_data->get_node_ref_count() != 1) {
150 _data[pipeline_stage]._cdata = old_data->make_copy();
151 if (pipeline_cat.is_debug()) {
153 <<
"Copy-on-write a: " << old_data <<
" becomes "
154 << _data[pipeline_stage]._cdata <<
"\n";
160 if (!_dirty && _num_stages != 1) {
161 _pipeline->add_dirty_cycler(
this);
166 ++(_data[pipeline_stage]._writes_outstanding);
167 return _data[pipeline_stage]._cdata;
175write_stage_upstream(
int pipeline_stage,
bool force_to_0,
Thread *current_thread) {
176 _lock.acquire(current_thread);
179 nassertd(pipeline_stage >= 0 && pipeline_stage < _num_stages) {
185 CycleData *old_data = _data[pipeline_stage]._cdata;
187 if (old_data->get_ref_count() != 1 || force_to_0) {
190 int external_count = old_data->get_ref_count() - 1;
191 int k = pipeline_stage - 1;
192 while (k >= 0 && _data[k]._cdata == old_data) {
201 if (external_count > 0 && _data[pipeline_stage]._writes_outstanding == 0) {
204 PT(
CycleData) new_data = old_data->make_copy();
205 if (pipeline_cat.is_debug()) {
207 <<
"Copy-on-write b: " << old_data <<
" becomes "
212 k = pipeline_stage - 1;
213 while (k >= 0 && (_data[k]._cdata == old_data || force_to_0)) {
214 nassertr(_data[k]._writes_outstanding == 0,
nullptr);
215 _data[k]._cdata = new_data.p();
219 _data[pipeline_stage]._cdata = new_data;
221 if (k >= 0 || pipeline_stage + 1 < _num_stages) {
225 _pipeline->add_dirty_cycler(
this);
229 }
else if (k >= 0 && force_to_0) {
233 nassertr(_data[k]._writes_outstanding == 0,
nullptr);
234 _data[k]._cdata = old_data;
240 ++(_data[pipeline_stage]._writes_outstanding);
241 return _data[pipeline_stage]._cdata;
263 last_val.swap(_data[_num_stages - 1]._cdata);
264 last_val->node_unref_only();
266 nassertr(_lock.debug_is_locked(), last_val);
267 nassertr(_dirty, last_val);
270 for (i = _num_stages - 1; i > 0; --i) {
271 nassertr(_data[i]._writes_outstanding == 0, last_val);
272 _data[i]._cdata = _data[i - 1]._cdata;
275 for (i = 1; i < _num_stages; ++i) {
276 if (_data[i]._cdata != _data[i - 1]._cdata) {
291void PipelineCyclerTrueImpl::
292set_num_stages(
int num_stages) {
293 nassertv(_lock.debug_is_locked());
295 if (num_stages <= _num_stages) {
298 for (
int i = _num_stages; i < num_stages; ++i) {
299 nassertv(_data[i]._writes_outstanding == 0);
300 _data[i]._cdata.clear();
303 _num_stages = num_stages;
308 CycleDataNode *new_data =
new CycleDataNode[num_stages];
310 for (i = 0; i < _num_stages; ++i) {
311 nassertv(_data[i]._writes_outstanding == 0);
312 new_data[i]._cdata = _data[i]._cdata;
314 for (i = _num_stages; i < num_stages; ++i) {
315 new_data[i]._cdata = _data[_num_stages - 1]._cdata;
319 _num_stages = num_stages;
328void PipelineCyclerTrueImpl::CyclerMutex::
329output(std::ostream &out)
const {
330 out <<
"CyclerMutex ";
331 _cycler->cheat()->output(out);
A single page of data maintained by a PipelineCycler.
This class manages a staged pipeline of data, for instance the render pipeline, so that each stage of...
static Pipeline * get_render_pipeline()
Returns a pointer to the global render pipeline.
Similar to MutexHolder, but for a reentrant mutex.
A thread; that is, a lightweight process.
This is our own Panda specialization on the default STL map.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.