Panda3D
Loading...
Searching...
No Matches
threadSimpleManager.cxx
Go to the documentation of this file.
1/**
2 * PANDA 3D SOFTWARE
3 * Copyright (c) Carnegie Mellon University. All rights reserved.
4 *
5 * All use of this software is subject to the terms of the revised BSD
6 * license. You should have received a copy of this license along
7 * with this source code in a file named "LICENSE."
8 *
9 * @file threadSimpleManager.cxx
10 * @author drose
11 * @date 2007-06-19
12 */
13
14#include "threadSimpleManager.h"
15
16#ifdef THREAD_SIMPLE_IMPL
17
18#include "threadSimpleImpl.h"
19#include "blockerSimple.h"
20#include "mainThread.h"
21
22#ifdef WIN32
23#ifndef WIN32_LEAN_AND_MEAN
24#define WIN32_LEAN_AND_MEAN 1
25#endif
26#include <windows.h>
27#endif
28
29bool ThreadSimpleManager::_pointers_initialized;
30ThreadSimpleManager *ThreadSimpleManager::_global_ptr;
31
32/**
33 *
34 */
35ThreadSimpleManager::
36ThreadSimpleManager() :
37 _simple_thread_epoch_timeslice
38 ("simple-thread-epoch-timeslice", 0.05,
39 PRC_DESC("When SIMPLE_THREADS is defined, this defines the amount of time, "
40 "in seconds, that should be considered the "
41 "typical timeslice for one epoch (to run all threads once).")),
42 _simple_thread_volunteer_delay
43 ("simple-thread-volunteer-delay", 0.0,
44 PRC_DESC("When SIMPLE_THREADS is defined, this defines the amount of time, "
45 "in seconds, for which a task that voluntarily yields should "
46 "be delayed.")),
47 _simple_thread_yield_sleep
48 ("simple-thread-yield-sleep", 0.001,
49 PRC_DESC("When SIMPLE_THREADS is defined, this defines the amount of time, "
50 "in seconds, for which the process should be put to sleep when "
51 "yielding the timeslice to the system.")),
52 _simple_thread_window
53 ("simple-thread-window", 1.0,
54 PRC_DESC("When SIMPLE_THREADS is defined, this defines the amount of time, "
55 "in seconds, over which to average all the threads' runtimes, "
56 "for the purpose of scheduling threads.")),
57 _simple_thread_low_weight
58 ("simple-thread-low-weight", 0.2,
59 PRC_DESC("When SIMPLE_THREADS is defined, this determines the relative "
60 "amount of time that is given to threads with priority TP_low.")),
61 _simple_thread_normal_weight
62 ("simple-thread-normal-weight", 1.0,
63 PRC_DESC("When SIMPLE_THREADS is defined, this determines the relative "
64 "amount of time that is given to threads with priority TP_normal.")),
65 _simple_thread_high_weight
66 ("simple-thread-high-weight", 5.0,
67 PRC_DESC("When SIMPLE_THREADS is defined, this determines the relative "
68 "amount of time that is given to threads with priority TP_high.")),
69 _simple_thread_urgent_weight
70 ("simple-thread-urgent-weight", 10.0,
71 PRC_DESC("When SIMPLE_THREADS is defined, this determines the relative "
72 "amount of time that is given to threads with priority TP_urgent."))
73{
74 _tick_scale = 1000000.0;
75 _total_ticks = 0;
76 _current_thread = nullptr;
78 _waiting_for_exit = nullptr;
79
80 // Install these global pointers so very low-level code (code defined before
81 // the pipeline directory) can yield when necessary.
82 global_thread_yield = &Thread::force_yield;
83 global_thread_consider_yield = &Thread::consider_yield;
84}
85
86/**
87 * Adds the indicated thread to the ready queue. The thread will be executed
88 * when its turn comes. If the thread is not the currently executing thread,
89 * its _jmp_context should be filled appropriately.
90 *
91 * If volunteer is true, the thread is volunteering to sleep before its
92 * timeslice has been used up. If volunteer is false, the thread would still
93 * be running if it could.
94 */
95void ThreadSimpleManager::
96enqueue_ready(ThreadSimpleImpl *thread, bool volunteer) {
97 // We actually add it to _next_ready, so that we can tell when we have
98 // processed every thread in a given epoch.
99 if (!volunteer) {
100 _next_ready.push_back(thread);
101
102 } else {
103 // Unless it's a volunteer, in which case we actually put it to sleep for
104 // the duration of the timeslice, so it won't interfere with timeslice
105 // accounting for the remaining ready threads.
106 double now = get_current_time();
107 thread->_wake_time = now + _simple_thread_volunteer_delay;
108 _volunteers.push_back(thread);
109 push_heap(_volunteers.begin(), _volunteers.end(), CompareStartTime());
110 }
111}
112
113/**
114 * Adds the indicated thread to the sleep queue, until the indicated number of
115 * seconds have elapsed. Then the thread will be automatically moved to the
116 * ready queue.
117 */
118void ThreadSimpleManager::
119enqueue_sleep(ThreadSimpleImpl *thread, double seconds) {
120 if (thread_cat->is_debug()) {
121 thread_cat.debug()
122 << *_current_thread->_parent_obj << " sleeping for "
123 << seconds << " seconds\n";
124 }
125
126 double now = get_current_time();
127 thread->_wake_time = now + seconds;
128 _sleeping.push_back(thread);
129 push_heap(_sleeping.begin(), _sleeping.end(), CompareStartTime());
130}
131
132/**
133 * Adds the indicated thread to the blocked queue for the indicated blocker.
134 * The thread will be awoken by a later call to unblock_one() or
135 * unblock_all().
136 */
137void ThreadSimpleManager::
138enqueue_block(ThreadSimpleImpl *thread, BlockerSimple *blocker) {
139 _blocked[blocker].push_back(thread);
140 blocker->_flags |= BlockerSimple::F_has_waiters;
141}
142
143/**
144 * Unblocks one thread waiting on the indicated blocker, if any. Returns true
145 * if anything was unblocked, false otherwise.
146 */
147bool ThreadSimpleManager::
148unblock_one(BlockerSimple *blocker) {
149 Blocked::iterator bi = _blocked.find(blocker);
150 if (bi != _blocked.end()) {
151 nassertr(blocker->_flags & BlockerSimple::F_has_waiters, false);
152
153 FifoThreads &threads = (*bi).second;
154 nassertr(!threads.empty(), false);
155 ThreadSimpleImpl *thread = threads.front();
156 threads.pop_front();
157 _ready.push_back(thread);
158 if (threads.empty()) {
159 blocker->_flags &= ~BlockerSimple::F_has_waiters;
160 _blocked.erase(bi);
161 }
162
163 return true;
164 }
165
166 return false;
167}
168
169/**
170 * Unblocks all threads waiting on the indicated blocker. Returns true if
171 * anything was unblocked, false otherwise.
172 */
173bool ThreadSimpleManager::
174unblock_all(BlockerSimple *blocker) {
175 Blocked::iterator bi = _blocked.find(blocker);
176 if (bi != _blocked.end()) {
177 nassertr(blocker->_flags & BlockerSimple::F_has_waiters, false);
178
179 FifoThreads &threads = (*bi).second;
180 nassertr(!threads.empty(), false);
181 while (!threads.empty()) {
182 ThreadSimpleImpl *thread = threads.front();
183 threads.pop_front();
184 _ready.push_back(thread);
185 }
186 blocker->_flags &= ~BlockerSimple::F_has_waiters;
187 _blocked.erase(bi);
188 return true;
189 }
190 return false;
191}
192
193/**
194 * Adds the indicated thread to the finished queue. The manager will drop the
195 * reference count on the indicated thread at the next epoch. (A thread can't
196 * drop its own reference count while it is running, since that might
197 * deallocate its own stack.)
198 */
199void ThreadSimpleManager::
200enqueue_finished(ThreadSimpleImpl *thread) {
201 _finished.push_back(thread);
202}
203
204/**
205 * Moves the indicated thread to the head of the ready queue. If it is not
206 * already on the ready queue, does nothing.
207 */
208void ThreadSimpleManager::
209preempt(ThreadSimpleImpl *thread) {
210 FifoThreads::iterator ti;
211 ti = find(_ready.begin(), _ready.end(), thread);
212 if (ti != _ready.end()) {
213 _ready.erase(ti);
214 _ready.push_front(thread);
215 }
216}
217
218/**
219 * Switches out the currently executing thread and chooses a new thread for
220 * execution. Before calling this, the current thread should have already re-
221 * enqueued itself with a call to enqueue(), if it intends to run again.
222 *
223 * This will fill in the current thread's _jmp_context member appropriately,
224 * and then change the global current_thread pointer.
225 */
226void ThreadSimpleManager::
227next_context() {
228 // Delete any threads that need it. We can't delete the current thread,
229 // though.
230 while (!_finished.empty() && _finished.front() != _current_thread) {
231 ThreadSimpleImpl *finished_thread = _finished.front();
232 _finished.pop_front();
233 unref_delete(finished_thread->_parent_obj);
234 }
235
236 // Mark the current thread's resume point.
237
238#ifdef HAVE_PYTHON
239 // Save the current Python thread state.
240 _current_thread->_python_state = thread_state_swap(nullptr);
241#endif // HAVE_PYTHON
242
243#ifdef DO_PSTATS
244 Thread::PStatsCallback *pstats_callback = _current_thread->_parent_obj->get_pstats_callback();
245 if (pstats_callback != nullptr) {
246 pstats_callback->deactivate_hook(_current_thread->_parent_obj);
247 }
248#endif // DO_PSTATS
249
250 save_thread_context(_current_thread->_context, st_choose_next_context, this);
251 // Pass 2: we have returned into the context, and are now resuming the
252 // current thread.
253
254#ifdef DO_PSTATS
255 if (pstats_callback != nullptr) {
256 pstats_callback->activate_hook(_current_thread->_parent_obj);
257 }
258#endif // DO_PSTATS
259
260#ifdef HAVE_PYTHON
261 thread_state_swap(_current_thread->_python_state);
262#endif // HAVE_PYTHON
263}
264
265/**
266 * Blocks until all running threads (other than the current thread) have
267 * finished. This only works when called from the main thread; if called on
268 * any other thread, nothing will happen.
269 */
270void ThreadSimpleManager::
271prepare_for_exit() {
272 if (!_current_thread->_parent_obj->is_exact_type(MainThread::get_class_type())) {
273 if (thread_cat->is_debug()) {
274 thread_cat.debug()
275 << "Ignoring prepare_for_exit called from "
276 << *(_current_thread->_parent_obj) << "\n";
277 }
278 return;
279 }
280
281 if (thread_cat->is_debug()) {
282 thread_cat.debug()
283 << "prepare_for_exit\n";
284 }
285
286 nassertv(_waiting_for_exit == nullptr);
287 _waiting_for_exit = _current_thread;
288
289 // At this point, any non-joinable threads on any of the queues are
290 // automatically killed.
291 kill_non_joinable(_ready);
292
293 Blocked::iterator bi = _blocked.begin();
294 while (bi != _blocked.end()) {
295 Blocked::iterator bnext = bi;
296 ++bnext;
297 BlockerSimple *blocker = (*bi).first;
298 FifoThreads &threads = (*bi).second;
299 kill_non_joinable(threads);
300 if (threads.empty()) {
301 blocker->_flags &= ~BlockerSimple::F_has_waiters;
302 _blocked.erase(bi);
303 }
304 bi = bnext;
305 }
306
307 kill_non_joinable(_sleeping);
308 kill_non_joinable(_volunteers);
309
310 next_context();
311
312 // Delete any remaining threads.
313 while (!_finished.empty() && _finished.front() != _current_thread) {
314 ThreadSimpleImpl *finished_thread = _finished.front();
315 _finished.pop_front();
316 unref_delete(finished_thread->_parent_obj);
317 }
318}
319
320/**
321 * Sets the initial value of the current_thread pointer, i.e. the main
322 * thread. It is valid to call this method only exactly once.
323 */
324void ThreadSimpleManager::
325set_current_thread(ThreadSimpleImpl *current_thread) {
326 nassertv(_current_thread == nullptr);
327 _current_thread = current_thread;
328}
329
330/**
331 * Removes the indicated thread from the accounting, for instance just before
332 * the thread destructs.
333 */
334void ThreadSimpleManager::
335remove_thread(ThreadSimpleImpl *thread) {
336 TickRecords new_records;
337 TickRecords::iterator ri;
338 for (ri = _tick_records.begin(); ri != _tick_records.end(); ++ri) {
339 if ((*ri)._thread != thread) {
340 // Keep this record.
341 new_records.push_back(*ri);
342 } else {
343 // Lose this record.
344 nassertv(_total_ticks >= (*ri)._tick_count);
345 _total_ticks -= (*ri)._tick_count;
346 }
347 }
348
349 _tick_records.swap(new_records);
350}
351
352/**
353 * Calls the appropriate system sleep function to sleep the whole process for
354 * the indicated number of seconds.
355 */
356void ThreadSimpleManager::
357system_sleep(double seconds) {
358#ifdef WIN32
359 Sleep((int)(seconds * 1000 + 0.5));
360
361#else
362 /*
363 struct timespec rqtp;
364 rqtp.tv_sec = time_t(seconds);
365 rqtp.tv_nsec = long((seconds - (double)rqtp.tv_sec) * 1000000000.0 + 0.5);
366 nanosleep(&rqtp, NULL);
367 */
368
369 // We use select() as the only way that seems to actually yield the
370 // timeslice. sleep() and nanosleep() don't appear to do the trick.
371 struct timeval tv;
372 tv.tv_sec = time_t(seconds);
373 tv.tv_usec = long((seconds - (double)tv.tv_sec) * 1000000.0 + 0.5);
374 select(0, nullptr, nullptr, nullptr, &tv);
375#endif // WIN32
376}
377
378/**
379 * Writes a list of threads running and threads blocked.
380 */
381void ThreadSimpleManager::
382write_status(std::ostream &out) const {
383 out << "Currently running: " << *_current_thread->_parent_obj << "\n";
384
385 out << "Ready:";
386 FifoThreads::const_iterator ti;
387 Sleeping::const_iterator si;
388 for (ti = _ready.begin(); ti != _ready.end(); ++ti) {
389 out << " " << *(*ti)->_parent_obj;
390 }
391 for (ti = _next_ready.begin(); ti != _next_ready.end(); ++ti) {
392 out << " " << *(*ti)->_parent_obj;
393 }
394 for (si = _volunteers.begin(); si != _volunteers.end(); ++si) {
395 out << " " << *(*si)->_parent_obj;
396 }
397 out << "\n";
398
399 double now = get_current_time();
400
401 out << "Sleeping:";
402 // Copy and sort for convenience.
403 Sleeping s2 = _sleeping;
404 sort(s2.begin(), s2.end(), CompareStartTime());
405 for (si = s2.begin(); si != s2.end(); ++si) {
406 out << " " << *(*si)->_parent_obj << "(" << (*si)->_wake_time - now
407 << "s)";
408 }
409 out << "\n";
410
411 Blocked::const_iterator bi;
412 for (bi = _blocked.begin(); bi != _blocked.end(); ++bi) {
413 BlockerSimple *blocker = (*bi).first;
414 const FifoThreads &threads = (*bi).second;
415 out << "On blocker " << blocker << ":\n";
416 FifoThreads::const_iterator ti;
417 for (ti = threads.begin(); ti != threads.end(); ++ti) {
418 ThreadSimpleImpl *thread = (*ti);
419 out << " " << *thread->_parent_obj;
420#ifdef DEBUG_THREADS
421 out << " (";
422 thread->_parent_obj->output_blocker(out);
423 out << ")";
424#endif // DEBUG_THREADS
425 }
426 out << "\n";
427 }
428}
429
430/**
431 * Calls the appropriate system function to yield the whole process to any
432 * other system processes.
433 */
434void ThreadSimpleManager::
435system_yield() {
436 if (!_pointers_initialized) {
437 // Ignore this call before we construct the global ThreadSimpleManager.
438 return;
439 }
440
441 if (thread_cat->is_debug()) {
442 thread_cat.debug()
443 << "system_yield\n";
444 }
445
446 // There seem to be some issues with modern operating systems not wanting to
447 // actually yield the timeslice in response to sleep(0). In particular,
448 // Windows and OSX both seemed to do nothing in that call. Whatever. We'll
449 // force the point by explicitly sleeping for 1 ms in both cases. This is
450 // user-configurable in case 1 ms is too much (though on Windows that's all
451 // the resolution you have).
452 system_sleep(_global_ptr->_simple_thread_yield_sleep);
453}
454
455/**
456 * Returns elapsed time in seconds from some undefined epoch, via whatever
457 * clock the manager is using for all thread timing.
458 */
459double ThreadSimpleManager::
460get_current_time() const {
461 return _clock->get_short_raw_time();
462}
463
464/**
465 * Should be called at startup to initialize the simple threading system.
466 */
467void ThreadSimpleManager::
468init_pointers() {
469 if (!_pointers_initialized) {
470 _pointers_initialized = true;
471 _global_ptr = new ThreadSimpleManager;
473 }
474}
475
476/**
477 * Select the next context to run. Continuing the work of next_context().
478 */
479void ThreadSimpleManager::
480st_choose_next_context(struct ThreadContext *from_context, void *data) {
481 ThreadSimpleManager *self = (ThreadSimpleManager *)data;
482 self->choose_next_context(from_context);
483}
484
485/**
486 * Select the next context to run. Continuing the work of next_context().
487 */
488void ThreadSimpleManager::
489choose_next_context(struct ThreadContext *from_context) {
490 double now = get_current_time();
491
492 do_timeslice_accounting(_current_thread, now);
493 _current_thread = nullptr;
494
495 if (!_sleeping.empty() || !_volunteers.empty()) {
496 if (_ready.empty() && _next_ready.empty()) {
497 // All of our threads are currently sleeping. Therefore, wake the
498 // volunteer(s) immediately.
499 wake_all_sleepers(_volunteers);
500
501 // We should also yield the whole process now, to be polite to the rest
502 // of the system.
503 system_yield();
504 now = get_current_time();
505 }
506 wake_sleepers(_sleeping, now);
507 wake_sleepers(_volunteers, now);
508 }
509
510 bool new_epoch = !_ready.empty() && _next_ready.empty();
511
512 // Choose a new thread to execute.
513 while (true) {
514 // If there are no threads, sleep.
515 while (_ready.empty()) {
516 if (!_next_ready.empty()) {
517 // We've finished an epoch.
518 _ready.swap(_next_ready);
519
520 if (new_epoch && !_tick_records.empty()) {
521 // Pop the oldest timeslice record off when we finish an epoch
522 // without executing any threads, to ensure we don't get caught in
523 // an "all threads reached budget" loop.
524 if (thread_cat->is_debug()) {
525 thread_cat.debug()
526 << "All threads exceeded budget.\n";
527 }
528 TickRecord &record = _tick_records.front();
529 _total_ticks -= record._tick_count;
530
531 if (record._thread->_run_ticks >= record._tick_count) {
532 // Ensure we don't go negative.
533 record._thread->_run_ticks -= record._tick_count;
534 } else {
535 // It is possible for this to happen if the application has been
536 // paused for more than 2^31 ticks.
537 record._thread->_run_ticks = 0;
538 }
539 _tick_records.pop_front();
540 }
541 new_epoch = true;
542
543 } else if (!_volunteers.empty()) {
544 // There are some volunteers. Wake them. Also wake any sleepers that
545 // need it.
546 if (thread_cat->is_debug()) {
547 thread_cat.debug()
548 << "Waking volunteers.\n";
549 }
550 // We should yield the whole process now, to be polite to the rest of
551 // the system.
552 system_yield();
553 now = get_current_time();
554 wake_all_sleepers(_volunteers);
555 wake_sleepers(_sleeping, now);
556
557 } else if (!_sleeping.empty()) {
558 // All threads are sleeping.
559 double wait = _sleeping.front()->_wake_time - now;
560 if (wait > 0.0) {
561 if (thread_cat->is_debug()) {
562 thread_cat.debug()
563 << "Sleeping all threads " << wait << " seconds\n";
564 }
565 system_sleep(wait);
566 }
567 now = get_current_time();
568 wake_sleepers(_sleeping, now);
569 wake_sleepers(_volunteers, now);
570
571 } else {
572 // No threads are ready!
573 if (_waiting_for_exit != nullptr) {
574 // This is a shutdown situation. In this case, we quietly abandoned
575 // the remaining blocked threads, if any, and switch back to the
576 // main thread to finish shutting down.
577 _ready.push_back(_waiting_for_exit);
578 _waiting_for_exit = nullptr;
579 break;
580 }
581
582 // No threads are ready to run, but we're not explicitly shutting
583 // down. This is an error condition, an unintentional deadlock.
584 if (!_blocked.empty()) {
585 thread_cat->error()
586 << "Deadlock! All threads blocked.\n";
587 report_deadlock();
588 abort();
589 }
590
591 // No threads are queued anywhere. This is some kind of internal
592 // error, since normally the main thread, at least, should be queued
593 // somewhere.
594 thread_cat->error()
595 << "All threads disappeared!\n";
596 exit(0);
597 }
598 }
599
600 ThreadSimpleImpl *chosen_thread = _ready.front();
601 _ready.pop_front();
602
603 double timeslice = determine_timeslice(chosen_thread);
604 if (timeslice > 0.0) {
605 // This thread is ready to roll. Break out of the loop.
606 chosen_thread->_start_time = now;
607 chosen_thread->_stop_time = now + timeslice;
608 _current_thread = chosen_thread;
609 break;
610 }
611
612 // This thread is not ready to wake up yet. Put it back for next epoch.
613 // It doesn't count as a volunteer, though--its timeslice was used up.
614 _next_ready.push_back(chosen_thread);
615 }
616
617 // All right, the thread is ready to roll. Begin.
618 if (thread_cat->is_debug()) {
619 size_t blocked_count = 0;
620 Blocked::const_iterator bi;
621 for (bi = _blocked.begin(); bi != _blocked.end(); ++bi) {
622 const FifoThreads &threads = (*bi).second;
623 blocked_count += threads.size();
624 }
625
626 double timeslice = _current_thread->_stop_time - _current_thread->_start_time;
627 thread_cat.debug()
628 << "Switching to " << *_current_thread->_parent_obj
629 << " for " << timeslice << " s ("
630 << _ready.size() << " + " << _next_ready.size()
631 << " + " << _volunteers.size()
632 << " other threads ready, " << blocked_count
633 << " blocked, " << _sleeping.size() << " sleeping)\n";
634 }
635
636 switch_to_thread_context(from_context, _current_thread->_context);
637
638 // Shouldn't get here.
639 nassertv(false);
640 abort();
641}
642
643/**
644 * Records the amount of time the indicated thread has run, and updates the
645 * moving average.
646 */
647void ThreadSimpleManager::
648do_timeslice_accounting(ThreadSimpleImpl *thread, double now) {
649 double elapsed = now - thread->_start_time;
650 if (thread_cat.is_debug()) {
651 thread_cat.debug()
652 << *thread->_parent_obj << " ran for " << elapsed << " s of "
653 << thread->_stop_time - thread->_start_time << " requested.\n";
654 }
655
656 // Clamp the elapsed time at 0. (If it's less than 0, the clock is running
657 // backwards, ick.)
658 elapsed = std::max(elapsed, 0.0);
659
660 unsigned int ticks = (unsigned int)(elapsed * _tick_scale + 0.5);
661 thread->_run_ticks += ticks;
662
663 // Now remove any old records.
664 unsigned int ticks_window = (unsigned int)(_simple_thread_window * _tick_scale + 0.5);
665 while (_total_ticks > ticks_window) {
666 nassertv(!_tick_records.empty());
667 TickRecord &record = _tick_records.front();
668 _total_ticks -= record._tick_count;
669 if (record._thread->_run_ticks >= record._tick_count) {
670 // Ensure we don't go negative.
671 record._thread->_run_ticks -= record._tick_count;
672 } else {
673 // It is possible for this to happen if the application has been paused
674 // for more than 2^31 ticks.
675 record._thread->_run_ticks = 0;
676 }
677 _tick_records.pop_front();
678 }
679
680 // Finally, record the new record.
681 TickRecord record;
682 record._tick_count = ticks;
683 record._thread = thread;
684 _tick_records.push_back(record);
685 _total_ticks += ticks;
686}
687
688
689/**
690 * Moves any threads due to wake up from the sleeping queue to the ready
691 * queue.
692 */
693void ThreadSimpleManager::
694wake_sleepers(ThreadSimpleManager::Sleeping &sleepers, double now) {
695 while (!sleepers.empty() && sleepers.front()->_wake_time <= now) {
696 ThreadSimpleImpl *thread = sleepers.front();
697 pop_heap(sleepers.begin(), sleepers.end(), CompareStartTime());
698 sleepers.pop_back();
699 _ready.push_back(thread);
700 }
701}
702
703/**
704 * Moves all threads from the indicated sleeping queue to the ready queue,
705 * regardless of wake time.
706 */
707void ThreadSimpleManager::
708wake_all_sleepers(ThreadSimpleManager::Sleeping &sleepers) {
709 while (!sleepers.empty()) {
710 ThreadSimpleImpl *thread = sleepers.front();
711 pop_heap(sleepers.begin(), sleepers.end(), CompareStartTime());
712 sleepers.pop_back();
713 _ready.push_back(thread);
714 }
715}
716
717/**
718 *
719 */
720void ThreadSimpleManager::
721report_deadlock() {
722 Blocked::const_iterator bi;
723 for (bi = _blocked.begin(); bi != _blocked.end(); ++bi) {
724 BlockerSimple *blocker = (*bi).first;
725 const FifoThreads &threads = (*bi).second;
726 thread_cat.info()
727 << "On blocker " << blocker << ":\n";
728 FifoThreads::const_iterator ti;
729 for (ti = threads.begin(); ti != threads.end(); ++ti) {
730 ThreadSimpleImpl *thread = (*ti);
731 thread_cat.info()
732 << " " << *thread->_parent_obj;
733#ifdef DEBUG_THREADS
734 thread_cat.info(false) << " (";
735 thread->_parent_obj->output_blocker(thread_cat.info(false));
736 thread_cat.info(false) << ")";
737#endif // DEBUG_THREADS
738 thread_cat.info(false) << "\n";
739 }
740 }
741}
742
743/**
744 * Determines the amount of time that should be allocated to the next
745 * timeslice of this thread, based on its priority weight and the amount of
746 * time it has run recently relative to other threads.
747 */
748double ThreadSimpleManager::
749determine_timeslice(ThreadSimpleImpl *chosen_thread) {
750 if (_ready.empty() && _next_ready.empty()) {
751 // This is the only ready thread. It gets the full timeslice.
752 return _simple_thread_epoch_timeslice;
753 }
754
755 // Count up the total runtime and weight of all ready threads.
756 unsigned int total_ticks = chosen_thread->_run_ticks;
757 double total_weight = chosen_thread->_priority_weight;
758
759 FifoThreads::const_iterator ti;
760 for (ti = _ready.begin(); ti != _ready.end(); ++ti) {
761 total_ticks += (*ti)->_run_ticks;
762 total_weight += (*ti)->_priority_weight;
763 }
764 for (ti = _next_ready.begin(); ti != _next_ready.end(); ++ti) {
765 total_ticks += (*ti)->_run_ticks;
766 total_weight += (*ti)->_priority_weight;
767 }
768
769 nassertr(total_weight != 0.0, 0.0);
770 double budget_ratio = chosen_thread->_priority_weight / total_weight;
771
772 if (total_ticks == 0) {
773 // This must be the first thread. Special case.
774 return budget_ratio * _simple_thread_epoch_timeslice;
775 }
776
777 double run_ratio = (double)chosen_thread->_run_ticks / (double)total_ticks;
778 double remaining_ratio = budget_ratio - run_ratio;
779
780 if (thread_cat->is_debug()) {
781 thread_cat.debug()
782 << *chosen_thread->_parent_obj << " accrued "
783 << chosen_thread->_run_ticks / _tick_scale << " s of "
784 << total_ticks / _tick_scale << "; budget is "
785 << budget_ratio * total_ticks / _tick_scale << ".\n";
786 if (remaining_ratio <= 0.0) {
787 thread_cat.debug()
788 << "Exceeded budget.\n";
789 }
790 }
791
792 return remaining_ratio * _simple_thread_epoch_timeslice;
793}
794
795/**
796 * Removes any non-joinable threads from the indicated queue and marks them
797 * killed.
798 */
799void ThreadSimpleManager::
800kill_non_joinable(ThreadSimpleManager::FifoThreads &threads) {
801 FifoThreads new_threads;
802 FifoThreads::iterator ti;
803 for (ti = threads.begin(); ti != threads.end(); ++ti) {
804 ThreadSimpleImpl *thread = (*ti);
805 if (thread->_joinable) {
806 new_threads.push_back(thread);
807 } else {
808 if (thread_cat->is_debug()) {
809 thread_cat.debug()
810 << "Killing " << *thread->_parent_obj << "\n";
811 }
812 thread->_status = ThreadSimpleImpl::TS_killed;
813 enqueue_finished(thread);
814 }
815 }
816
817 threads.swap(new_threads);
818}
819
820/**
821 * Removes any non-joinable threads from the indicated queue and marks them
822 * killed.
823 */
824void ThreadSimpleManager::
825kill_non_joinable(ThreadSimpleManager::Sleeping &threads) {
826 Sleeping new_threads;
827 Sleeping::iterator ti;
828 for (ti = threads.begin(); ti != threads.end(); ++ti) {
829 ThreadSimpleImpl *thread = (*ti);
830 if (thread->_joinable) {
831 new_threads.push_back(thread);
832 } else {
833 if (thread_cat->is_debug()) {
834 thread_cat.debug()
835 << "Killing " << *thread->_parent_obj << "\n";
836 }
837 thread->_status = ThreadSimpleImpl::TS_killed;
838 enqueue_finished(thread);
839 }
840 }
841 make_heap(new_threads.begin(), new_threads.end(), CompareStartTime());
842 threads.swap(new_threads);
843}
844
845#endif // THREAD_SIMPLE_IMPL
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
virtual void deactivate_hook(Thread *thread)
Called when the thread is deactivated (swapped for another running thread).
Definition thread.cxx:247
virtual void activate_hook(Thread *thread)
Called when the thread is activated (resumes execution).
Definition thread.cxx:256
get_main_thread
Returns a pointer to the "main" Thread object–this is the Thread that started the whole process.
Definition thread.h:107
static void consider_yield()
Possibly suspends the current thread for the rest of the current epoch, if it has run for enough this...
Definition thread.I:212
static void force_yield()
Suspends the current thread for the rest of the current epoch.
Definition thread.I:201
static TrueClock * get_global_ptr()
Returns a pointer to the one TrueClock object in the world.
Definition trueClock.I:68
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
void unref_delete(RefCountType *ptr)
This global helper function will unref the given ReferenceCount object, and if the reference count re...
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.