15 #include "asyncTaskChain.h"
16 #include "asyncTaskManager.h"
18 #include "mutexHolder.h"
20 #include "pStatClient.h"
21 #include "pStatTimer.h"
22 #include "clockObject.h"
23 #include "config_event.h"
41 _cvar(manager->_lock),
43 _timeslice_priority(false),
45 _thread_priority(TP_normal),
51 _current_sort(-INT_MAX),
53 _needs_cleanup(false),
56 _block_till_next_frame(false)
86 _tick_clock = tick_clock;
109 nassertv(num_threads >= 0);
111 if (task_cat.is_debug()) {
112 do_output(task_cat.debug());
113 task_cat.debug(
false)
114 <<
": set_num_threads(" << num_threads <<
")\n";
122 if (_num_threads != num_threads) {
124 _num_threads = num_threads;
126 if (_num_tasks != 0) {
156 return _threads.size();
169 if (_thread_priority != priority) {
171 _thread_priority = priority;
173 if (_num_tasks != 0) {
188 return _thread_priority;
206 _frame_budget = frame_budget;
219 return _frame_budget;
245 _frame_sync = frame_sync;
286 _timeslice_priority = timeslice_priority;
300 return _timeslice_priority;
313 if (_state == S_started || _state == S_interrupted) {
329 if (_state == S_initial || _state == S_interrupted) {
345 if (task->_chain !=
this) {
346 nassertr(!do_has_task(task),
false);
350 if (task->_state == AsyncTask::S_servicing_removed) {
406 return do_get_active_tasks();
419 return do_get_sleeping_tasks();
451 return do_get_next_wake_time();
459 void AsyncTaskChain::
460 output(ostream &out)
const {
470 void AsyncTaskChain::
471 write(ostream &out,
int indent_level)
const {
473 do_write(out, indent_level);
486 void AsyncTaskChain::
488 nassertv(task->_chain == NULL &&
489 task->_manager == NULL &&
490 task->_chain_name == get_name() &&
491 task->_state == AsyncTask::S_inactive);
492 nassertv(!do_has_task(task));
497 task->_manager = _manager;
499 double now = _manager->_clock->get_frame_time();
500 task->_start_time = now;
501 task->_start_frame = _manager->_clock->get_frame_count();
503 _manager->add_task_by_name(task);
507 task->_wake_time = now + task->
get_delay();
508 task->_start_time = task->_wake_time;
509 task->_state = AsyncTask::S_sleeping;
510 _sleeping.push_back(task);
511 push_heap(_sleeping.begin(), _sleeping.end(), AsyncTaskSortWakeTime());
515 task->_state = AsyncTask::S_active;
516 if (task_cat.is_spam()) {
518 <<
"Adding " << *task <<
" with sort " << task->
get_sort()
519 <<
" to chain " << get_name() <<
" with current_sort "
520 << _current_sort <<
"\n";
522 if (task->
get_sort() >= _current_sort) {
524 _active.push_back(task);
525 push_heap(_active.begin(), _active.end(), AsyncTaskSortPriority());
528 _next_active.push_back(task);
532 ++(_manager->_num_tasks);
533 _needs_cleanup =
true;
546 bool AsyncTaskChain::
548 bool removed =
false;
550 nassertr(task->_chain ==
this,
false);
552 switch (task->_state) {
553 case AsyncTask::S_servicing:
555 task->_state = AsyncTask::S_servicing_removed;
559 case AsyncTask::S_servicing_removed:
563 case AsyncTask::S_sleeping:
566 int index = find_task_on_heap(_sleeping, task);
567 nassertr(index != -1,
false);
568 _sleeping.erase(_sleeping.begin() + index);
569 make_heap(_sleeping.begin(), _sleeping.end(), AsyncTaskSortWakeTime());
571 cleanup_task(task,
false,
false);
575 case AsyncTask::S_active:
578 int index = find_task_on_heap(_active, task);
580 _active.erase(_active.begin() + index);
581 make_heap(_active.begin(), _active.end(), AsyncTaskSortPriority());
583 index = find_task_on_heap(_next_active, task);
585 _next_active.erase(_next_active.begin() + index);
587 index = find_task_on_heap(_this_active, task);
588 nassertr(index != -1,
false);
592 cleanup_task(task,
false,
false);
608 void AsyncTaskChain::
609 do_wait_for_tasks() {
612 if (_threads.empty()) {
614 while (_num_tasks > 0) {
615 if (_state == S_shutdown || _state == S_interrupted) {
623 while (_num_tasks > 0) {
624 if (_state == S_shutdown || _state == S_interrupted) {
641 void AsyncTaskChain::
643 if (task_cat.is_spam()) {
644 do_output(task_cat.spam());
646 <<
": do_cleanup()\n";
658 dead.reserve(_num_tasks);
660 _needs_cleanup =
false;
662 TaskHeap::const_iterator ti;
663 for (ti = _active.begin(); ti != _active.end(); ++ti) {
665 dead.push_back(task);
666 cleanup_task(task,
false,
false);
668 for (ti = _this_active.begin(); ti != _this_active.end(); ++ti) {
670 dead.push_back(task);
671 cleanup_task(task,
false,
false);
673 for (ti = _next_active.begin(); ti != _next_active.end(); ++ti) {
675 dead.push_back(task);
676 cleanup_task(task,
false,
false);
678 for (ti = _sleeping.begin(); ti != _sleeping.end(); ++ti) {
680 dead.push_back(task);
681 cleanup_task(task,
false,
false);
686 nassertv(_num_tasks == 0 || _num_tasks == 1);
690 for (ti = dead.begin(); ti != dead.end(); ++ti) {
691 (*ti)->upon_death(_manager,
false);
695 if (task_cat.is_spam()) {
696 do_output(task_cat.spam());
698 <<
": done do_cleanup()\n";
710 bool AsyncTaskChain::
712 return (find_task_on_heap(_active, task) != -1 ||
713 find_task_on_heap(_next_active, task) != -1 ||
714 find_task_on_heap(_sleeping, task) != -1 ||
715 find_task_on_heap(_this_active, task) != -1);
728 find_task_on_heap(
const TaskHeap &heap,
AsyncTask *task)
const {
729 for (
int i = 0; i < (int)heap.size(); ++i) {
730 if (heap[i] == task) {
749 void AsyncTaskChain::
750 service_one_task(AsyncTaskChain::AsyncTaskChainThread *thread) {
751 if (!_active.empty()) {
753 pop_heap(_active.begin(), _active.end(), AsyncTaskSortPriority());
757 thread->_servicing = task;
760 if (task_cat.is_spam()) {
762 <<
"Servicing " << *task <<
" in "
766 nassertv(task->
get_sort() == _current_sort);
767 nassertv(task->_state == AsyncTask::S_active);
768 task->_state = AsyncTask::S_servicing;
769 task->_servicing_thread = thread;
771 AsyncTask::DoneStatus ds = task->unlock_and_do_task();
773 if (thread != (AsyncTaskChain::AsyncTaskChainThread *)NULL) {
774 thread->_servicing = NULL;
776 task->_servicing_thread = NULL;
778 if (task->_chain ==
this) {
779 if (task->_state == AsyncTask::S_servicing_removed) {
781 cleanup_task(task,
true,
false);
783 }
else if (task->_chain_name != get_name()) {
786 cleanup_task(task, false, false);
787 task->jump_to_task_chain(_manager);
791 case AsyncTask::DS_cont:
794 task->_state = AsyncTask::S_active;
795 _next_active.push_back(task);
799 case AsyncTask::DS_again:
802 double now = _manager->_clock->get_frame_time();
803 task->_wake_time = now + task->get_delay();
804 task->_start_time = task->_wake_time;
805 task->_state = AsyncTask::S_sleeping;
806 _sleeping.push_back(task);
807 push_heap(_sleeping.begin(), _sleeping.end(), AsyncTaskSortWakeTime());
808 if (task_cat.is_spam()) {
810 <<
"Sleeping " << *task <<
", wake time at "
811 << task->_wake_time - now <<
"\n";
817 case AsyncTask::DS_pickup:
819 task->_state = AsyncTask::S_active;
820 _this_active.push_back(task);
824 case AsyncTask::DS_interrupt:
826 task->_state = AsyncTask::S_active;
827 _next_active.push_back(task);
828 if (_state == S_started) {
829 _state = S_interrupted;
836 cleanup_task(task,
true,
true);
841 <<
"Task is no longer on chain " << get_name()
842 <<
": " << *task <<
"\n";
845 if (task_cat.is_spam()) {
847 <<
"Done servicing " << *task <<
" in "
851 thread_consider_yield();
868 void AsyncTaskChain::
869 cleanup_task(
AsyncTask *task,
bool upon_death,
bool clean_exit) {
870 if (task_cat.is_spam()) {
871 do_output(task_cat.spam());
873 <<
": cleanup_task(" << *task <<
", " << upon_death <<
", " << clean_exit
877 nassertv(task->_chain ==
this);
882 task->_manager = NULL;
884 --(_manager->_num_tasks);
886 _manager->remove_task_by_name(task);
889 _manager->_lock.release();
890 task->upon_death(_manager, clean_exit);
891 _manager->_lock.acquire();
907 bool AsyncTaskChain::
908 finish_sort_group() {
909 nassertr(_num_busy_threads == 0,
true);
911 if (!_threads.empty()) {
912 PStatClient::thread_tick(get_name());
915 if (!_active.empty()) {
917 nassertr(_current_sort < _active.front()->get_sort(),
true);
918 _current_sort = _active.front()->get_sort();
925 if (!_this_active.empty() && _frame_budget >= 0.0) {
931 if (task_cat.is_spam()) {
932 do_output(task_cat.spam());
934 <<
": next epoch (pickup mode)\n";
938 _active.swap(_this_active);
943 if (task_cat.is_spam()) {
944 do_output(task_cat.spam());
949 _pickup_mode =
false;
953 _next_active.insert(_next_active.end(), _this_active.begin(), _this_active.end());
954 _this_active.clear();
956 _active.swap(_next_active);
963 if (task_cat.is_spam()) {
964 do_output(task_cat.spam());
968 _manager->_clock->tick();
969 _manager->_frame_cvar.notify_all();
971 }
else if (_frame_sync) {
974 _block_till_next_frame =
true;
978 double now = _manager->_clock->get_frame_time();
979 while (!_sleeping.empty() && _sleeping.front()->_wake_time <= now) {
981 if (task_cat.is_spam()) {
983 <<
"Waking " << *task <<
", wake time at "
984 << task->_wake_time - now <<
"\n";
986 pop_heap(_sleeping.begin(), _sleeping.end(), AsyncTaskSortWakeTime());
987 _sleeping.pop_back();
988 task->_state = AsyncTask::S_active;
989 task->_start_frame = _manager->_clock->get_frame_count();
990 _active.push_back(task);
993 if (task_cat.is_spam()) {
994 if (_sleeping.empty()) {
996 <<
"No more tasks on sleeping queue.\n";
999 <<
"Next sleeper: " << *_sleeping.front() <<
", wake time at "
1000 << _sleeping.front()->_wake_time - now <<
"\n";
1006 TaskHeap::const_iterator ti;
1007 for (ti = _active.begin(); ti != _active.end(); ++ti) {
1009 ++task->_num_frames;
1013 if (_timeslice_priority) {
1014 filter_timeslice_priority();
1017 nassertr((
size_t)_num_tasks == _active.size() + _this_active.size() + _next_active.size() + _sleeping.size(),
true);
1018 make_heap(_active.begin(), _active.end(), AsyncTaskSortPriority());
1020 _current_sort = -INT_MAX;
1022 if (!_active.empty()) {
1029 _pickup_mode =
false;
1030 nassertr(_this_active.empty(),
false);
1045 void AsyncTaskChain::
1046 filter_timeslice_priority() {
1047 if (_active.empty()) {
1050 nassertv(_timeslice_priority);
1053 double net_runtime = 0.0;
1054 int net_priority = 0;
1056 TaskHeap::iterator ti;
1057 for (ti = _active.begin(); ti != _active.end(); ++ti) {
1060 int priority = max(task->_priority, 1);
1061 net_runtime += runtime;
1062 net_priority += priority;
1066 double average_budget = net_runtime / (double)net_priority;
1068 TaskHeap keep, postpone;
1069 for (ti = _active.begin(); ti != _active.end(); ++ti) {
1072 int priority = max(task->_priority, 1);
1073 double consumed = runtime / (double)priority;
1075 if (consumed > average_budget) {
1077 postpone.push_back(task);
1080 keep.push_back(task);
1087 nassertv(!postpone.empty());
1088 ti = postpone.begin();
1089 TaskHeap::iterator max_ti = ti;
1091 while (ti != postpone.end()) {
1092 if ((*ti)->_priority > (*max_ti)->_priority) {
1099 keep.push_back(*max_ti);
1100 postpone.erase(max_ti);
1105 _this_active.insert(_this_active.end(), postpone.begin(), postpone.end());
1107 _next_active.insert(_next_active.end(), postpone.begin(), postpone.end());
1110 nassertv(!_active.empty());
1119 void AsyncTaskChain::
1121 if (_state == S_started || _state == S_interrupted) {
1122 if (task_cat.is_debug() && !_threads.empty()) {
1124 <<
"Stopping " << _threads.size()
1125 <<
" threads for " << _manager->get_name()
1126 <<
" chain " << get_name()
1130 _state = S_shutdown;
1132 _manager->_frame_cvar.notify_all();
1134 Threads wait_threads;
1135 wait_threads.swap(_threads);
1139 _manager->_lock.release();
1140 Threads::iterator ti;
1141 for (ti = wait_threads.begin(); ti != wait_threads.end(); ++ti) {
1142 if (task_cat.is_debug()) {
1144 <<
"Waiting for " << *(*ti) <<
" in "
1148 if (task_cat.is_spam()) {
1150 <<
"Done waiting for " << *(*ti) <<
" in "
1154 _manager->_lock.acquire();
1159 nassertv(_num_busy_threads == 0 || _num_busy_threads == 1);
1160 cleanup_pickup_mode();
1170 void AsyncTaskChain::
1171 do_start_threads() {
1172 if (_state == S_interrupted) {
1176 if (_state == S_initial) {
1179 if (task_cat.is_debug()) {
1181 <<
"Starting " << _num_threads <<
" threads for "
1182 << _manager->get_name() <<
" chain " << get_name() <<
"\n";
1184 _needs_cleanup =
true;
1185 _threads.reserve(_num_threads);
1186 for (
int i = 0; i < _num_threads; ++i) {
1188 strm << _manager->get_name() <<
"_" << get_name() <<
"_" << i;
1189 PT(AsyncTaskChainThread) thread = new AsyncTaskChainThread(strm.str(), this);
1190 if (thread->start(_thread_priority, true)) {
1191 _threads.push_back(thread);
1206 do_get_active_tasks()
const {
1209 Threads::const_iterator thi;
1210 for (thi = _threads.begin(); thi != _threads.end(); ++thi) {
1216 TaskHeap::const_iterator ti;
1217 for (ti = _active.begin(); ti != _active.end(); ++ti) {
1221 for (ti = _this_active.begin(); ti != _this_active.end(); ++ti) {
1225 for (ti = _next_active.begin(); ti != _next_active.end(); ++ti) {
1241 do_get_sleeping_tasks()
const {
1244 TaskHeap::const_iterator ti;
1245 for (ti = _sleeping.begin(); ti != _sleeping.end(); ++ti) {
1259 void AsyncTaskChain::
1261 thread_consider_yield();
1262 if (_num_tasks == 0) {
1268 if (!_threads.empty()) {
1272 if (_num_busy_threads != 0) {
1275 <<
"Ignoring recursive poll() within another task.\n";
1279 nassertv(!_pickup_mode);
1282 while (!_active.empty()) {
1283 if (_state == S_shutdown || _state == S_interrupted) {
1286 int frame = _manager->_clock->get_frame_count();
1287 if (_current_frame != frame) {
1288 _current_frame = frame;
1289 _time_in_frame = 0.0;
1290 _block_till_next_frame =
false;
1292 if (_block_till_next_frame ||
1293 (_frame_budget >= 0.0 && _time_in_frame >= _frame_budget)) {
1296 cleanup_pickup_mode();
1300 _current_sort = _active.front()->get_sort();
1305 _num_busy_threads++;
1306 service_one_task(NULL);
1307 _num_busy_threads--;
1310 if (!_threads.empty()) {
1315 finish_sort_group();
1316 }
while (_pickup_mode);
1327 void AsyncTaskChain::
1328 cleanup_pickup_mode() {
1330 _pickup_mode =
false;
1333 _next_active.insert(_next_active.end(), _this_active.begin(), _this_active.end());
1334 _this_active.clear();
1335 _next_active.insert(_next_active.end(), _active.begin(), _active.end());
1339 finish_sort_group();
1349 void AsyncTaskChain::
1350 do_output(ostream &out)
const {
1352 out << _manager->get_type() <<
" " << _manager->get_name();
1354 out <<
"(no manager)";
1356 out <<
" task chain " << get_name()
1357 <<
"; " << _num_tasks <<
" tasks";
1366 void AsyncTaskChain::
1367 do_write(ostream &out,
int indent_level)
const {
1368 indent(out, indent_level)
1369 <<
"Task chain \"" << get_name() <<
"\"\n";
1370 if (_num_threads > 0) {
1371 indent(out, indent_level + 2)
1372 << _num_threads <<
" threads, priority " << _thread_priority <<
"\n";
1374 if (_frame_budget >= 0.0) {
1375 indent(out, indent_level + 2)
1376 <<
"frame budget " << _frame_budget <<
" s\n";
1378 if (_timeslice_priority) {
1379 indent(out, indent_level + 2)
1380 <<
"timeslice priority\n";
1383 indent(out, indent_level + 2)
1387 static const size_t buffer_size = 1024;
1388 char buffer[buffer_size];
1389 sprintf(buffer,
" %-32s %8s %8s %8s %8s %6s",
1392 "dt(ms)",
"avg",
"max",
1394 nassertv(strlen(buffer) < buffer_size);
1396 indent(out, indent_level)
1399 indent(out, indent_level);
1400 for (
int i = 0; i < 32+8+8+8+8+6+7; ++i) {
1407 TaskHeap tasks = _active;
1408 tasks.insert(tasks.end(), _this_active.begin(), _this_active.end());
1409 tasks.insert(tasks.end(), _next_active.begin(), _next_active.end());
1411 Threads::const_iterator thi;
1412 for (thi = _threads.begin(); thi != _threads.end(); ++thi) {
1415 tasks.push_back(task);
1419 double now = _manager->_clock->get_frame_time();
1421 if (!tasks.empty()) {
1422 sort(tasks.begin(), tasks.end(), AsyncTaskSortPriority());
1427 TaskHeap::reverse_iterator ti;
1428 for (ti = tasks.rbegin(); ti != tasks.rend(); ++ti) {
1430 write_task_line(out, indent_level, task, now);
1437 TaskHeap sleeping = _sleeping;
1438 while (!sleeping.empty()) {
1440 pop_heap(sleeping.begin(), sleeping.end(), AsyncTaskSortWakeTime());
1441 sleeping.pop_back();
1443 write_task_line(out, indent_level, task, now);
1455 write_task_line(ostream &out,
int indent_level,
AsyncTask *task,
double now)
const {
1456 char servicing_flag =
' ';
1457 if (task->_state == AsyncTask::S_servicing) {
1458 servicing_flag =
'*';
1459 }
else if (task->_state == AsyncTask::S_servicing_removed) {
1460 servicing_flag =
'-';
1463 static const size_t buffer_size = 1024;
1464 char buffer[buffer_size];
1466 if (task->_state == AsyncTask::S_sleeping) {
1469 string name = task->get_name().substr(0, 32);
1470 sprintf(buffer,
"%c%-32s %8.1f",
1471 servicing_flag, name.c_str(),
1472 task->_wake_time - now);
1476 string name = task->get_name().substr(0, 41);
1477 sprintf(buffer,
"%c%-41s",
1478 servicing_flag, name.c_str());
1480 nassertv(strlen(buffer) < buffer_size);
1482 indent(out, indent_level)
1485 if (task->_num_frames > 0) {
1486 sprintf(buffer,
" %8.1f %8.1f %8.1f %6d",
1487 task->_dt * 1000.0, task->get_average_dt() * 1000.0,
1488 task->_max_dt * 1000.0,
1492 sprintf(buffer,
" %8s %8s %8s %6d",
1497 nassertv(strlen(buffer) < buffer_size);
1498 out << buffer <<
"\n";
1506 AsyncTaskChain::AsyncTaskChainThread::
1507 AsyncTaskChainThread(
const string &name,
AsyncTaskChain *chain) :
1508 Thread(name, chain->get_name()),
1519 void AsyncTaskChain::AsyncTaskChainThread::
1522 while (_chain->_state != S_shutdown && _chain->_state != S_interrupted) {
1523 thread_consider_yield();
1524 if (!_chain->_active.empty() &&
1525 _chain->_active.front()->get_sort() == _chain->_current_sort) {
1527 int frame = _chain->_manager->_clock->get_frame_count();
1528 if (_chain->_current_frame != frame) {
1529 _chain->_current_frame = frame;
1530 _chain->_time_in_frame = 0.0;
1531 _chain->_block_till_next_frame =
false;
1536 if (_chain->_block_till_next_frame ||
1537 (_chain->_frame_budget >= 0.0 && _chain->_time_in_frame >= _chain->_frame_budget)) {
1538 while ((_chain->_block_till_next_frame ||
1539 (_chain->_frame_budget >= 0.0 && _chain->_time_in_frame >= _chain->_frame_budget)) &&
1540 _chain->_state != S_shutdown && _chain->_state != S_interrupted) {
1541 _chain->cleanup_pickup_mode();
1542 _chain->_manager->_frame_cvar.wait();
1543 frame = _chain->_manager->_clock->get_frame_count();
1544 if (_chain->_current_frame != frame) {
1545 _chain->_current_frame = frame;
1546 _chain->_time_in_frame = 0.0;
1547 _chain->_block_till_next_frame =
false;
1555 _chain->_num_busy_threads++;
1556 _chain->service_one_task(
this);
1557 _chain->_num_busy_threads--;
1558 _chain->_cvar.notify_all();
1564 if (_chain->_num_busy_threads == 0) {
1566 if (!_chain->finish_sort_group()) {
1568 if (_chain->_sleeping.empty()) {
1570 _chain->_cvar.wait();
1572 double wake_time = _chain->do_get_next_wake_time();
1573 double now = _chain->_manager->_clock->get_frame_time();
1574 double timeout = max(wake_time - now, 0.0);
1576 _chain->_cvar.wait(timeout);
1584 _chain->_cvar.wait();
bool get_frame_sync() const
Returns the frame_sync flag.
int get_num_tasks() const
Returns the number of tasks that are currently active or sleeping within the task chain...
void wait()
Waits on the condition.
bool has_delay() const
Returns true if a delay has been set for this task via set_delay(), or false otherwise.
A class to manage a loose queue of isolated tasks, which can be performed either synchronously (in th...
ThreadPriority get_thread_priority() const
Returns the priority associated with threads that serve this task chain.
void set_frame_sync(bool frame_sync)
Sets the frame_sync flag.
void add_task(AsyncTask *task)
Adds a new AsyncTask to the collection.
A list of tasks, for instance as returned by some of the AsyncTaskManager query functions.
void start_threads()
Starts any requested threads to service the tasks on the queue.
int get_num_threads() const
Returns the number of threads that will be servicing tasks for this chain.
A lightweight class that can be used to automatically start and stop a PStatCollector around a sectio...
A lightweight C++ object whose constructor calls acquire() and whose destructor calls release() on a ...
int get_sort() const
Returns the task's current sort value.
bool has_task(AsyncTask *task) const
Returns true if the indicated task has been added to this AsyncTaskChain, false otherwise.
void wait_for_tasks()
Blocks until the task list is empty.
static Thread * get_current_thread()
Returns a pointer to the currently-executing Thread object.
void set_num_threads(int num_threads)
Changes the number of threads for this task chain.
void notify_all()
Informs all of the other threads who are currently blocked on wait() that the relevant condition has ...
void acquire() const
Grabs the mutex if it is available.
static bool is_threading_supported()
Returns true if threading support has been compiled in and enabled, or false if no threading is avail...
A lightweight class that represents a single element that may be timed and/or counted via stats...
void set_frame_budget(double frame_budget)
Sets the maximum amount of time per frame the tasks on this chain are granted for execution...
AsyncTaskCollection get_sleeping_tasks() const
Returns the set of tasks that are sleeping (and not active) on the task chain, at the time of the cal...
A base class for all things which can have a name.
double get_delay() const
Returns the delay value that has been set via set_delay, if any.
AsyncTaskCollection get_active_tasks() const
Returns the set of tasks that are active (and not sleeping) on the task chain, at the time of the cal...
int get_num_running_threads() const
Returns the number of threads that have been created and are actively running.
void add_tasks_from(const AsyncTaskCollection &other)
Adds all the AsyncTasks indicated in the other collection to this task.
double get_frame_budget() const
Returns the maximum amount of time per frame the tasks on this chain are granted for execution...
The AsyncTaskChain is a subset of the AsyncTaskManager.
AsyncTaskCollection get_tasks() const
Returns the set of tasks that are active or sleeping on the task chain, at the time of the call...
double get_average_dt() const
Returns the average amount of time elapsed during each of the task's previous run cycles...
void poll()
Runs through all the tasks in the task list, once, if the task chain is running in single-threaded mo...
void release() const
Releases the mutex.
This class represents a concrete task performed by an AsyncManager.
void join()
Blocks the calling process until the thread terminates.
A thread; that is, a lightweight process.
void set_tick_clock(bool tick_clock)
Sets the tick_clock flag.
void set_timeslice_priority(bool timeslice_priority)
Sets the timeslice_priority flag.
void stop_threads()
Stops any threads that are currently running.
TypeHandle is the identifier used to differentiate C++ class types.
void set_thread_priority(ThreadPriority priority)
Changes the priority associated with threads that serve this task chain.
bool get_tick_clock() const
Returns the tick_clock flag.
bool get_timeslice_priority() const
Returns the timeslice_priority flag.
double get_next_wake_time() const
Returns the scheduled time (on the manager's clock) of the next sleeping task, on any task chain...