32PStatCollector PStatClient::_heap_total_size_pcollector(
"System memory:Heap");
33PStatCollector PStatClient::_heap_overhead_size_pcollector(
"System memory:Heap:Overhead");
34PStatCollector PStatClient::_heap_single_size_pcollector(
"System memory:Heap:Single");
35PStatCollector PStatClient::_heap_single_other_size_pcollector(
"System memory:Heap:Single:Other");
36PStatCollector PStatClient::_heap_array_size_pcollector(
"System memory:Heap:Array");
37PStatCollector PStatClient::_heap_array_other_size_pcollector(
"System memory:Heap:Array:Other");
38PStatCollector PStatClient::_heap_external_size_pcollector(
"System memory:Heap:External");
39PStatCollector PStatClient::_mmap_size_pcollector(
"System memory:MMap");
41PStatCollector PStatClient::_mmap_nf_unused_size_pcollector(
"System memory:MMap:NeverFree:Unused");
42PStatCollector PStatClient::_mmap_dc_active_other_size_pcollector(
"System memory:MMap:NeverFree:Active:Other");
43PStatCollector PStatClient::_mmap_dc_inactive_other_size_pcollector(
"System memory:MMap:NeverFree:Inactive:Other");
45PStatCollector PStatClient::_clock_wait_pcollector(
"Wait:Clock Wait:Sleep");
46PStatCollector PStatClient::_clock_busy_wait_pcollector(
"Wait:Clock Wait:Spin");
47PStatCollector PStatClient::_thread_block_pcollector(
"Wait:Thread block");
54class TypeHandleCollector {
59static TypeHandleCols type_handle_cols;
65PStatClient::PerThreadData::
77 _lock(
"PStatClient::_lock"),
80 _collectors =
nullptr;
91 Collector *collector =
new Collector(0,
"Frame");
95 add_collector(collector);
114set_client_name(
const string &name) {
115 get_impl()->set_client_name(name);
122get_client_name()
const {
123 return get_impl()->get_client_name();
137set_max_rate(
double rate) {
138 get_impl()->set_max_rate(rate);
146get_max_rate()
const {
147 return get_impl()->get_max_rate();
154get_collector(
int index)
const {
163get_collector_name(
int index)
const {
166 return get_collector_ptr(index)->get_name();
175get_collector_fullname(
int index)
const {
178 Collector *collector = get_collector_ptr(index);
179 int parent_index = collector->get_parent_index();
180 if (parent_index == 0) {
181 return collector->get_name();
183 return get_collector_fullname(parent_index) +
":" +
184 collector->get_name();
192get_thread(
int index)
const {
194 nassertr(index >= 0 && index < _num_threads,
PStatThread());
203get_main_thread()
const {
212get_current_thread()
const {
213 if (!client_is_connected()) {
216 return get_main_thread();
228get_real_time()
const {
230 return _impl->get_real_time();
246#ifdef DO_MEMORY_USAGE
260 while ((
int)type_handle_cols.size() < num_typehandles) {
261 type_handle_cols.push_back(TypeHandleCollector());
264 size_t single_total_usage = 0;
265 size_t array_total_usage = 0;
266 size_t dc_active_total_usage = 0;
267 size_t dc_inactive_total_usage = 0;
269 for (i = 0; i < num_typehandles; ++i) {
271 for (
int mi = 0; mi < (int)TypeHandle::MC_limit; ++mi) {
272 TypeHandle::MemoryClass mc = (TypeHandle::MemoryClass)mi;
276 case TypeHandle::MC_singleton:
277 single_total_usage += usage;
280 case TypeHandle::MC_array:
281 array_total_usage += usage;
284 case TypeHandle::MC_deleted_chain_active:
285 dc_active_total_usage += usage;
288 case TypeHandle::MC_deleted_chain_inactive:
289 dc_inactive_total_usage += usage;
292 case TypeHandle::MC_limit:
298 size_t min_usage = (single_total_usage + array_total_usage + dc_active_total_usage + dc_inactive_total_usage) / 1024;
299 if (!pstats_mem_other) {
302 size_t single_other_usage = single_total_usage;
303 size_t array_other_usage = array_total_usage;
304 size_t dc_active_other_usage = dc_active_total_usage;
305 size_t dc_inactive_other_usage = dc_inactive_total_usage;
307 for (i = 0; i < num_typehandles; ++i) {
309 for (
int mi = 0; mi < (int)TypeHandle::MC_limit; ++mi) {
310 TypeHandle::MemoryClass mc = (TypeHandle::MemoryClass)mi;
313 if (usage > min_usage || col.is_valid()) {
316 if (!col.is_valid()) {
317 const char *category =
"";
319 case TypeHandle::MC_singleton:
320 category =
"Heap:Single";
323 case TypeHandle::MC_array:
324 category =
"Heap:Array";
327 case TypeHandle::MC_deleted_chain_active:
328 category =
"MMap:NeverFree:Active";
331 case TypeHandle::MC_deleted_chain_inactive:
332 category =
"MMap:NeverFree:Inactive";
335 case TypeHandle::MC_limit:
339 std::ostringstream strm;
340 strm <<
"System memory:" << category <<
":" << type;
343 col.set_level(usage);
346 case TypeHandle::MC_singleton:
347 single_other_usage -= usage;
350 case TypeHandle::MC_array:
351 array_other_usage -= usage;
354 case TypeHandle::MC_deleted_chain_active:
355 dc_active_other_usage -= usage;
358 case TypeHandle::MC_deleted_chain_inactive:
359 dc_inactive_other_usage -= usage;
362 case TypeHandle::MC_limit:
374 _heap_single_other_size_pcollector.set_level(single_other_usage);
375 _heap_array_other_size_pcollector.set_level(array_other_usage);
376 _mmap_dc_active_other_size_pcollector.set_level(dc_active_other_usage);
377 _mmap_dc_inactive_other_size_pcollector.set_level(dc_inactive_other_usage);
381 get_global_pstats()->client_main_tick();
389thread_tick(
const string &sync_name) {
390 get_global_pstats()->client_thread_tick(sync_name);
401 if (!_impl->client_is_connected()) {
406 _impl->client_main_tick();
408 MultiThingsByName::const_iterator ni =
409 _threads_by_sync_name.find(
"Main");
410 if (ni != _threads_by_sync_name.end()) {
411 const vector_int &indices = (*ni).second;
412 for (vector_int::const_iterator vi = indices.begin();
415 _impl->new_frame(*vi);
426client_thread_tick(
const string &sync_name) {
430 MultiThingsByName::const_iterator ni =
431 _threads_by_sync_name.find(sync_name);
432 if (ni != _threads_by_sync_name.end()) {
433 const vector_int &indices = (*ni).second;
434 for (vector_int::const_iterator vi = indices.begin();
437 _impl->new_frame(*vi);
447client_connect(
string hostname,
int port) {
450 return get_impl()->client_connect(hostname, port);
460 _impl->client_disconnect();
465 ThreadPointer *threads = (ThreadPointer *)_threads;
466 for (
int ti = 0; ti < _num_threads; ++ti) {
467 InternalThread *thread = threads[ti];
468 thread->_frame_number = 0;
469 thread->_is_active =
false;
470 thread->_next_packet = 0.0;
471 thread->_frame_data.clear();
474 CollectorPointer *collectors = (CollectorPointer *)_collectors;
475 for (
int ci = 0; ci < _num_collectors; ++ci) {
476 Collector *collector = collectors[ci];
477 PerThread::iterator ii;
478 for (ii = collector->_per_thread.begin();
479 ii != collector->_per_thread.end();
481 (*ii)._nested_count = 0;
490client_is_connected()
const {
491 return has_impl() && _impl->client_is_connected();
500client_resume_after_pause() {
502 _impl->client_resume_after_pause();
513 if (_global_pstats ==
nullptr) {
516 ClockObject::_start_clock_wait = start_clock_wait;
517 ClockObject::_start_clock_busy_wait = start_clock_busy_wait;
518 ClockObject::_stop_clock_wait = stop_clock_wait;
520 return _global_pstats;
539make_collector_with_relname(
int parent_index,
string relname) {
542 if (relname.empty()) {
548 while (start < relname.size() && relname[start] ==
':') {
554 size_t colon = relname.find(
':', start);
555 while (colon != string::npos) {
556 string parent_name = relname.substr(start, colon - start);
558 make_collector_with_name(parent_index, parent_name);
559 parent_index = parent_collector._index;
560 relname = relname.substr(colon + 1);
562 colon = relname.find(
':');
565 string name = relname.substr(start);
566 return make_collector_with_name(parent_index, name);
576make_collector_with_name(
int parent_index,
const string &name) {
579 nassertr(parent_index >= 0 && parent_index < _num_collectors,
582 Collector *parent = get_collector_ptr(parent_index);
587 if (parent->get_name() == name) {
591 ThingsByName::const_iterator ni = parent->_children.find(name);
593 if (ni != parent->_children.end()) {
595 int index = (*ni).second;
596 nassertr(index >= 0 && index < _num_collectors,
PStatCollector());
601 int new_index = _num_collectors;
602 parent->_children.insert(ThingsByName::value_type(name, new_index));
604 Collector *collector =
new Collector(parent_index, name);
610 while ((
int)collector->_per_thread.size() < _num_threads) {
611 collector->_per_thread.push_back(PerThreadData());
613 add_collector(collector);
622do_get_current_thread()
const {
625 if (thread_index != -1) {
631 return ((
PStatClient *)
this)->do_make_thread(thread);
639make_thread(
Thread *thread) {
641 return do_make_thread(thread);
648do_make_thread(
Thread *thread) {
650 if (thread_index != -1) {
654 MultiThingsByName::const_iterator ni =
655 _threads_by_name.find(thread->get_name());
657 if (ni != _threads_by_name.end()) {
660 const vector_int &indices = (*ni).second;
661 for (vector_int::const_iterator vi = indices.begin();
665 nassertr(index >= 0 && index < _num_threads,
PStatThread());
666 ThreadPointer *threads = (ThreadPointer *)_threads;
667 if (threads[index]->_thread.was_deleted() &&
670 threads[index]->_thread = thread;
679 int new_index = _num_threads;
683 InternalThread *pthread =
new InternalThread(thread);
694make_gpu_thread(
const string &name) {
696 int new_index = _num_threads;
698 InternalThread *pthread =
new InternalThread(name,
"GPU");
712is_active(
int collector_index,
int thread_index)
const {
713 nassertr(collector_index >= 0 && collector_index <
AtomicAdjust::get(_num_collectors),
false);
714 nassertr(thread_index >= 0 && thread_index <
AtomicAdjust::get(_num_threads),
false);
716 return (client_is_connected() &&
717 get_collector_ptr(collector_index)->is_active() &&
718 get_thread_ptr(thread_index)->_is_active);
729is_started(
int collector_index,
int thread_index)
const {
730 nassertr(collector_index >= 0 && collector_index <
AtomicAdjust::get(_num_collectors),
false);
731 nassertr(thread_index >= 0 && thread_index <
AtomicAdjust::get(_num_threads),
false);
733 Collector *collector = get_collector_ptr(collector_index);
734 InternalThread *thread = get_thread_ptr(thread_index);
736 if (client_is_connected() && collector->is_active() && thread->_is_active) {
738 if (collector->_per_thread[thread_index]._nested_count == 0) {
755start(
int collector_index,
int thread_index) {
756 if (!client_is_connected()) {
761 nassertv(collector_index >= 0 && collector_index <
AtomicAdjust::get(_num_collectors));
765 Collector *collector = get_collector_ptr(collector_index);
766 InternalThread *thread = get_thread_ptr(thread_index);
768 if (collector->is_active() && thread->_is_active) {
770 if (collector->_per_thread[thread_index]._nested_count == 0) {
773 if (thread->_thread_active) {
774 thread->_frame_data.add_start(collector_index, get_real_time());
777 collector->_per_thread[thread_index]._nested_count++;
786start(
int collector_index,
int thread_index,
double as_of) {
787 if (!client_is_connected()) {
792 nassertv(collector_index >= 0 && collector_index <
AtomicAdjust::get(_num_collectors));
796 Collector *collector = get_collector_ptr(collector_index);
797 InternalThread *thread = get_thread_ptr(thread_index);
799 if (collector->is_active() && thread->_is_active) {
801 if (collector->_per_thread[thread_index]._nested_count == 0) {
804 if (thread->_thread_active) {
805 thread->_frame_data.add_start(collector_index, as_of);
808 collector->_per_thread[thread_index]._nested_count++;
817stop(
int collector_index,
int thread_index) {
818 if (!client_is_connected()) {
823 nassertv(collector_index >= 0 && collector_index <
AtomicAdjust::get(_num_collectors));
827 Collector *collector = get_collector_ptr(collector_index);
828 InternalThread *thread = get_thread_ptr(thread_index);
830 if (collector->is_active() && thread->_is_active) {
832 if (collector->_per_thread[thread_index]._nested_count == 0) {
833 if (pstats_cat.is_debug()) {
835 <<
"Collector " << get_collector_fullname(collector_index)
842 collector->_per_thread[thread_index]._nested_count--;
844 if (collector->_per_thread[thread_index]._nested_count == 0) {
847 if (thread->_thread_active) {
848 thread->_frame_data.add_stop(collector_index, get_real_time());
859stop(
int collector_index,
int thread_index,
double as_of) {
860 if (!client_is_connected()) {
865 nassertv(collector_index >= 0 && collector_index <
AtomicAdjust::get(_num_collectors));
869 Collector *collector = get_collector_ptr(collector_index);
870 InternalThread *thread = get_thread_ptr(thread_index);
872 if (collector->is_active() && thread->_is_active) {
874 if (collector->_per_thread[thread_index]._nested_count == 0) {
875 if (pstats_cat.is_debug()) {
877 <<
"Collector " << get_collector_fullname(collector_index)
884 collector->_per_thread[thread_index]._nested_count--;
886 if (collector->_per_thread[thread_index]._nested_count == 0) {
889 thread->_frame_data.add_stop(collector_index, as_of);
902clear_level(
int collector_index,
int thread_index) {
903 if (!client_is_connected()) {
908 nassertv(collector_index >= 0 && collector_index <
AtomicAdjust::get(_num_collectors));
912 Collector *collector = get_collector_ptr(collector_index);
913 InternalThread *thread = get_thread_ptr(thread_index);
916 collector->_per_thread[thread_index]._has_level =
true;
917 collector->_per_thread[thread_index]._level = 0.0;
927set_level(
int collector_index,
int thread_index,
double level) {
928 if (!client_is_connected()) {
933 nassertv(collector_index >= 0 && collector_index <
AtomicAdjust::get(_num_collectors));
937 Collector *collector = get_collector_ptr(collector_index);
938 InternalThread *thread = get_thread_ptr(thread_index);
945 level *= collector->get_def(
this, collector_index)->_factor;
947 collector->_per_thread[thread_index]._has_level =
true;
948 collector->_per_thread[thread_index]._level = level;
960add_level(
int collector_index,
int thread_index,
double increment) {
961 if (!client_is_connected()) {
966 nassertv(collector_index >= 0 && collector_index <
AtomicAdjust::get(_num_collectors));
970 Collector *collector = get_collector_ptr(collector_index);
971 InternalThread *thread = get_thread_ptr(thread_index);
974 increment *= collector->get_def(
this, collector_index)->_factor;
976 collector->_per_thread[thread_index]._has_level =
true;
977 collector->_per_thread[thread_index]._level += increment;
988get_level(
int collector_index,
int thread_index)
const {
989 if (!client_is_connected()) {
994 nassertr(collector_index >= 0 && collector_index <
AtomicAdjust::get(_num_collectors), 0.0f);
995 nassertr(thread_index >= 0 && thread_index <
AtomicAdjust::get(_num_threads), 0.0f);
998 Collector *collector = get_collector_ptr(collector_index);
999 InternalThread *thread = get_thread_ptr(thread_index);
1002 double factor = collector->get_def(
this, collector_index)->_factor;
1004 return collector->_per_thread[thread_index]._level / factor;
1017 _clock_wait_pcollector.start();
1029start_clock_busy_wait() {
1030 _clock_wait_pcollector.stop();
1031 _clock_busy_wait_pcollector.start();
1044 _clock_busy_wait_pcollector.stop();
1052add_collector(PStatClient::Collector *collector) {
1053 if (_num_collectors >= _collectors_size) {
1057 int new_collectors_size = (_collectors_size == 0) ? 128 : _collectors_size * 2;
1058 CollectorPointer *new_collectors =
new CollectorPointer[new_collectors_size];
1059 if (_collectors !=
nullptr) {
1060 memcpy(new_collectors, _collectors, _num_collectors *
sizeof(CollectorPointer));
1071 new_collectors[_num_collectors] = collector;
1075 CollectorPointer *collectors = (CollectorPointer *)_collectors;
1076 collectors[_num_collectors] = collector;
1086add_thread(PStatClient::InternalThread *thread) {
1087 _threads_by_name[thread->_name].push_back(_num_threads);
1088 _threads_by_sync_name[thread->_sync_name].push_back(_num_threads);
1090 if (_num_threads >= _threads_size) {
1094 int new_threads_size = (_threads_size == 0) ? 128 : _threads_size * 2;
1095 ThreadPointer *new_threads =
new ThreadPointer[new_threads_size];
1096 if (_threads !=
nullptr) {
1097 memcpy(new_threads, _threads, _num_threads *
sizeof(ThreadPointer));
1109 new_threads[_num_threads] = thread;
1112 ThreadPointer *threads = (ThreadPointer *)_threads;
1113 threads[_num_threads] = thread;
1120 CollectorPointer *collectors = (CollectorPointer *)_collectors;
1121 for (
int ci = 0; ci < _num_collectors; ++ci) {
1122 Collector *collector = collectors[ci];
1123 collector->_per_thread.push_back(PerThreadData());
1124 nassertv((
int)collector->_per_thread.size() == _num_threads);
1134deactivate_hook(
Thread *thread) {
1138 if (_impl ==
nullptr) {
1142 InternalThread *ithread = get_thread_ptr(thread_index);
1144 if (ithread->_thread_active) {
1147 double now = _impl->get_real_time();
1148 ithread->_frame_data.add_start(_thread_block_pcollector.get_index(), now);
1149 ithread->_thread_active =
false;
1159activate_hook(
Thread *thread) {
1163 if (_impl ==
nullptr) {
1169 if (!ithread->_thread_active) {
1170 double now = _impl->get_real_time();
1171 ithread->_frame_data.add_stop(_thread_block_pcollector.get_index(), now);
1172 ithread->_thread_active =
true;
1179void PStatClient::Collector::
1180make_def(
const PStatClient *client,
int this_index) {
1182 if (_def ==
nullptr) {
1184 if (_parent_index != this_index) {
1186 client->get_collector_def(_parent_index);
1187 _def->set_parent(*parent_def);
1189 initialize_collector_def(client, _def);
1196PStatClient::InternalThread::
1197InternalThread(
Thread *thread) :
1199 _name(thread->get_name()),
1200 _sync_name(thread->get_sync_name()),
1204 _thread_active(true),
1205 _thread_lock(string(
"PStatClient::InternalThread ") + thread->get_name())
1212PStatClient::InternalThread::
1213InternalThread(
const string &name,
const string &sync_name) :
1216 _sync_name(sync_name),
1220 _thread_active(true),
1221 _thread_lock(string(
"PStatClient::InternalThread ") + name)
1228set_client_name(
const std::string &name) {
1231std::string PStatClient::
1232get_client_name()
const {
1233 return std::string();
1237set_max_rate(
double rate) {
1241get_max_rate()
const {
1246get_collector(
int index)
const {
1250std::string PStatClient::
1251get_collector_name(
int index)
const {
1252 return std::string();
1255std::string PStatClient::
1256get_collector_fullname(
int index)
const {
1257 return std::string();
1261get_thread(
int index)
const {
1266get_real_time()
const {
1271get_main_thread()
const {
1276get_current_thread()
const {
1277 return get_main_thread();
1281make_collector_with_relname(
int parent_index, std::string relname) {
1286make_thread(
Thread *thread) {
1295thread_tick(
const std::string &) {
1303client_thread_tick(
const std::string &sync_name) {
1307client_connect(std::string hostname,
int port) {
1312client_disconnect() {
1317client_is_connected()
const {
1322client_resume_after_pause() {
1327get_global_pstats() {
1329 return &global_pstats;
1333is_active(
int collector_index,
int thread_index)
const {
1338is_started(
int collector_index,
int thread_index)
const {
1343start(
int collector_index,
int thread_index) {
1347start(
int collector_index,
int thread_index,
double as_of) {
1351stop(
int collector_index,
int thread_index) {
1355stop(
int collector_index,
int thread_index,
double as_of) {
1359clear_level(
int collector_index,
int thread_index) {
1363set_level(
int collector_index,
int thread_index,
double level) {
1367add_level(
int collector_index,
int thread_index,
double increment) {
1371get_level(
int collector_index,
int thread_index)
const {
static Integer set(Integer &var, Integer new_value)
Atomically changes the indicated variable and returns the original value.
static Pointer set_ptr(Pointer &var, Pointer new_value)
Atomically changes the indicated variable and returns the original value.
static void inc(Integer &var)
Atomically increments the indicated variable.
static Integer get(const Integer &var)
Atomically retrieves the snapshot value of the indicated variable.
Similar to MutexHolder, but for a light mutex.
get_external_size
Returns the total number of bytes of allocated memory in the heap that Panda didn't seem to be respon...
get_panda_mmap_size
Returns the total number of bytes allocated from the virtual memory pool from code within Panda.
get_panda_heap_array_size
Returns the total number of bytes allocated from the heap from code within Panda, for arrays.
get_total_size
Returns the total size of allocated memory consumed by the process, as nearly as can be determined.
get_panda_heap_single_size
Returns the total number of bytes allocated from the heap from code within Panda, for individual obje...
get_panda_heap_overhead
Returns the extra bytes allocated from the system that are not immediately used for holding allocated...
static size_t get_total_unused()
Returns the difference between get_total_alloc() and get_total_used().
Manages the communications to report statistics via a network connection to a remote PStatServer.
static void disconnect()
Closes the connection previously established.
static bool is_connected()
Returns true if the client believes it is connected to a working PStatServer, false otherwise.
std::string get_thread_name(int index) const
Returns the name of the indicated thread.
Defines the details about the Collectors: the name, the suggested color, etc.
A lightweight class that represents a single element that may be timed and/or counted via stats.
A lightweight class that represents a single thread of execution to PStats.
Similar to MutexHolder, but for a reentrant mutex.
A thread; that is, a lightweight process.
get_sync_name
Returns the sync name of the thread.
void set_pstats_index(int pstats_index)
Stores a PStats index to be associated with this thread.
get_main_thread
Returns a pointer to the "main" Thread object–this is the Thread that started the whole process.
get_current_thread
Returns a pointer to the currently-executing Thread object.
void set_pstats_callback(PStatsCallback *pstats_callback)
Stores a PStats callback to be associated with this thread.
get_pstats_index
Returns the PStats index associated with this thread, or -1 if no index has yet been associated with ...
TypeHandle is the identifier used to differentiate C++ class types.
size_t get_memory_usage(MemoryClass memory_class) const
Returns the total allocated memory used by objects of this type, for the indicated memory class.
The TypeRegistry class maintains all the assigned TypeHandles in a given system.
get_typehandle
Returns the nth TypeHandle in the system.
get_num_typehandles
Returns the total number of unique TypeHandles in the system.
static TypeRegistry * ptr()
Returns the pointer to the global TypeRegistry object.
This is our own Panda specialization on the default STL vector.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.