Panda3D
 All Classes Functions Variables Enumerations
dlmalloc.h
1 /*
2  Default header file for malloc-2.8.x, written by Doug Lea
3  and released to the public domain, as explained at
4  http://creativecommons.org/licenses/publicdomain.
5 
6  last update: Mon Aug 15 08:55:52 2005 Doug Lea (dl at gee)
7 
8  This header is for ANSI C/C++ only. You can set any of
9  the following #defines before including:
10 
11  * If USE_DL_PREFIX is defined, it is assumed that malloc.c
12  was also compiled with this option, so all routines
13  have names starting with "dl".
14 
15  * If HAVE_USR_INCLUDE_MALLOC_H is defined, it is assumed that this
16  file will be #included AFTER <malloc.h>. This is needed only if
17  your system defines a struct mallinfo that is incompatible with the
18  standard one declared here. Otherwise, you can include this file
19  INSTEAD of your system system <malloc.h>. At least on ANSI, all
20  declarations should be compatible with system versions
21 
22  * If MSPACES is defined, declarations for mspace versions are included.
23 */
24 
25 #ifndef MALLOC_280_H
26 #define MALLOC_280_H
27 
28 #ifdef __cplusplus
29 extern "C" {
30 #endif
31 
32 #include <stddef.h> /* for size_t */
33 
34 #if !ONLY_MSPACES
35 
36 #ifndef USE_DL_PREFIX
37 #define dlcalloc calloc
38 #define dlfree free
39 #define dlmalloc malloc
40 #define dlmemalign memalign
41 #define dlrealloc realloc
42 #define dlvalloc valloc
43 #define dlpvalloc pvalloc
44 #define dlmallinfo mallinfo
45 #define dlmallopt mallopt
46 #define dlmalloc_trim malloc_trim
47 #define dlmalloc_stats malloc_stats
48 #define dlmalloc_usable_size malloc_usable_size
49 #define dlmalloc_footprint malloc_footprint
50 #define dlindependent_calloc independent_calloc
51 #define dlindependent_comalloc independent_comalloc
52 #endif /* USE_DL_PREFIX */
53 
54 
55 /*
56  malloc(size_t n)
57  Returns a pointer to a newly allocated chunk of at least n bytes, or
58  null if no space is available, in which case errno is set to ENOMEM
59  on ANSI C systems.
60 
61  If n is zero, malloc returns a minimum-sized chunk. (The minimum
62  size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
63  systems.) Note that size_t is an unsigned type, so calls with
64  arguments that would be negative if signed are interpreted as
65  requests for huge amounts of space, which will often fail. The
66  maximum supported value of n differs across systems, but is in all
67  cases less than the maximum representable value of a size_t.
68 */
69 void* dlmalloc(size_t);
70 
71 /*
72  free(void* p)
73  Releases the chunk of memory pointed to by p, that had been previously
74  allocated using malloc or a related routine such as realloc.
75  It has no effect if p is null. If p was not malloced or already
76  freed, free(p) will by default cuase the current program to abort.
77 */
78 void dlfree(void*);
79 
80 /*
81  calloc(size_t n_elements, size_t element_size);
82  Returns a pointer to n_elements * element_size bytes, with all locations
83  set to zero.
84 */
85 void* dlcalloc(size_t, size_t);
86 
87 /*
88  realloc(void* p, size_t n)
89  Returns a pointer to a chunk of size n that contains the same data
90  as does chunk p up to the minimum of (n, p's size) bytes, or null
91  if no space is available.
92 
93  The returned pointer may or may not be the same as p. The algorithm
94  prefers extending p in most cases when possible, otherwise it
95  employs the equivalent of a malloc-copy-free sequence.
96 
97  If p is null, realloc is equivalent to malloc.
98 
99  If space is not available, realloc returns null, errno is set (if on
100  ANSI) and p is NOT freed.
101 
102  if n is for fewer bytes than already held by p, the newly unused
103  space is lopped off and freed if possible. realloc with a size
104  argument of zero (re)allocates a minimum-sized chunk.
105 
106  The old unix realloc convention of allowing the last-free'd chunk
107  to be used as an argument to realloc is not supported.
108 */
109 
110 void* dlrealloc(void*, size_t);
111 
112 /*
113  memalign(size_t alignment, size_t n);
114  Returns a pointer to a newly allocated chunk of n bytes, aligned
115  in accord with the alignment argument.
116 
117  The alignment argument should be a power of two. If the argument is
118  not a power of two, the nearest greater power is used.
119  8-byte alignment is guaranteed by normal malloc calls, so don't
120  bother calling memalign with an argument of 8 or less.
121 
122  Overreliance on memalign is a sure way to fragment space.
123 */
124 void* dlmemalign(size_t, size_t);
125 
126 /*
127  valloc(size_t n);
128  Equivalent to memalign(pagesize, n), where pagesize is the page
129  size of the system. If the pagesize is unknown, 4096 is used.
130 */
131 void* dlvalloc(size_t);
132 
133 /*
134  mallopt(int parameter_number, int parameter_value)
135  Sets tunable parameters The format is to provide a
136  (parameter-number, parameter-value) pair. mallopt then sets the
137  corresponding parameter to the argument value if it can (i.e., so
138  long as the value is meaningful), and returns 1 if successful else
139  0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
140  normally defined in malloc.h. None of these are use in this malloc,
141  so setting them has no effect. But this malloc also supports other
142  options in mallopt:
143 
144  Symbol param # default allowed param values
145  M_TRIM_THRESHOLD -1 2*1024*1024 any (-1U disables trimming)
146  M_GRANULARITY -2 page size any power of 2 >= page size
147  M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
148 */
149 int dlmallopt(int, int);
150 
151 #ifndef M_TRIM_THRESHOLD
152 #define M_TRIM_THRESHOLD (-1)
153 #endif
154 #ifndef M_GRANULARITY
155 #define M_GRANULARITY (-2)
156 #endif
157 #ifndef M_MMAP_THRESHOLD
158 #define M_MMAP_THRESHOLD (-3)
159 #endif
160 
161 /*
162  malloc_footprint();
163  Returns the number of bytes obtained from the system. The total
164  number of bytes allocated by malloc, realloc etc., is less than this
165  value. Unlike mallinfo, this function returns only a precomputed
166  result, so can be called frequently to monitor memory consumption.
167  Even if locks are otherwise defined, this function does not use them,
168  so results might not be up to date.
169 */
170 size_t dlmalloc_footprint();
171 
172 #if !NO_MALLINFO
173 /*
174  mallinfo()
175  Returns (by copy) a struct containing various summary statistics:
176 
177  arena: current total non-mmapped bytes allocated from system
178  ordblks: the number of free chunks
179  smblks: always zero.
180  hblks: current number of mmapped regions
181  hblkhd: total bytes held in mmapped regions
182  usmblks: the maximum total allocated space. This will be greater
183  than current total if trimming has occurred.
184  fsmblks: always zero
185  uordblks: current total allocated space (normal or mmapped)
186  fordblks: total free space
187  keepcost: the maximum number of bytes that could ideally be released
188  back to system via malloc_trim. ("ideally" means that
189  it ignores page restrictions etc.)
190 
191  Because these fields are ints, but internal bookkeeping may
192  be kept as longs, the reported values may wrap around zero and
193  thus be inaccurate.
194 */
195 #ifndef HAVE_USR_INCLUDE_MALLOC_H
196 #ifndef _MALLOC_H
197 #ifndef MALLINFO_FIELD_TYPE
198 #define MALLINFO_FIELD_TYPE size_t
199 #endif /* MALLINFO_FIELD_TYPE */
200 struct mallinfo {
201  MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */
202  MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */
203  MALLINFO_FIELD_TYPE smblks; /* always 0 */
204  MALLINFO_FIELD_TYPE hblks; /* always 0 */
205  MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */
206  MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */
207  MALLINFO_FIELD_TYPE fsmblks; /* always 0 */
208  MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
209  MALLINFO_FIELD_TYPE fordblks; /* total free space */
210  MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
211 };
212 #endif /* _MALLOC_H */
213 #endif /* HAVE_USR_INCLUDE_MALLOC_H */
214 
215 struct mallinfo dlmallinfo(void);
216 #endif /* NO_MALLINFO */
217 
218 /*
219  independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
220 
221  independent_calloc is similar to calloc, but instead of returning a
222  single cleared space, it returns an array of pointers to n_elements
223  independent elements that can hold contents of size elem_size, each
224  of which starts out cleared, and can be independently freed,
225  realloc'ed etc. The elements are guaranteed to be adjacently
226  allocated (this is not guaranteed to occur with multiple callocs or
227  mallocs), which may also improve cache locality in some
228  applications.
229 
230  The "chunks" argument is optional (i.e., may be null, which is
231  probably the most typical usage). If it is null, the returned array
232  is itself dynamically allocated and should also be freed when it is
233  no longer needed. Otherwise, the chunks array must be of at least
234  n_elements in length. It is filled in with the pointers to the
235  chunks.
236 
237  In either case, independent_calloc returns this pointer array, or
238  null if the allocation failed. If n_elements is zero and "chunks"
239  is null, it returns a chunk representing an array with zero elements
240  (which should be freed if not wanted).
241 
242  Each element must be individually freed when it is no longer
243  needed. If you'd like to instead be able to free all at once, you
244  should instead use regular calloc and assign pointers into this
245  space to represent elements. (In this case though, you cannot
246  independently free elements.)
247 
248  independent_calloc simplifies and speeds up implementations of many
249  kinds of pools. It may also be useful when constructing large data
250  structures that initially have a fixed number of fixed-sized nodes,
251  but the number is not known at compile time, and some of the nodes
252  may later need to be freed. For example:
253 
254  struct Node { int item; struct Node* next; };
255 
256  struct Node* build_list() {
257  struct Node** pool;
258  int n = read_number_of_nodes_needed();
259  if (n <= 0) return 0;
260  pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
261  if (pool == 0) die();
262  // organize into a linked list...
263  struct Node* first = pool[0];
264  for (i = 0; i < n-1; ++i)
265  pool[i]->next = pool[i+1];
266  free(pool); // Can now free the array (or not, if it is needed later)
267  return first;
268  }
269 */
270 void** dlindependent_calloc(size_t, size_t, void**);
271 
272 /*
273  independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
274 
275  independent_comalloc allocates, all at once, a set of n_elements
276  chunks with sizes indicated in the "sizes" array. It returns
277  an array of pointers to these elements, each of which can be
278  independently freed, realloc'ed etc. The elements are guaranteed to
279  be adjacently allocated (this is not guaranteed to occur with
280  multiple callocs or mallocs), which may also improve cache locality
281  in some applications.
282 
283  The "chunks" argument is optional (i.e., may be null). If it is null
284  the returned array is itself dynamically allocated and should also
285  be freed when it is no longer needed. Otherwise, the chunks array
286  must be of at least n_elements in length. It is filled in with the
287  pointers to the chunks.
288 
289  In either case, independent_comalloc returns this pointer array, or
290  null if the allocation failed. If n_elements is zero and chunks is
291  null, it returns a chunk representing an array with zero elements
292  (which should be freed if not wanted).
293 
294  Each element must be individually freed when it is no longer
295  needed. If you'd like to instead be able to free all at once, you
296  should instead use a single regular malloc, and assign pointers at
297  particular offsets in the aggregate space. (In this case though, you
298  cannot independently free elements.)
299 
300  independent_comallac differs from independent_calloc in that each
301  element may have a different size, and also that it does not
302  automatically clear elements.
303 
304  independent_comalloc can be used to speed up allocation in cases
305  where several structs or objects must always be allocated at the
306  same time. For example:
307 
308  struct Head { ... }
309  struct Foot { ... }
310 
311  void send_message(char* msg) {
312  int msglen = strlen(msg);
313  size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
314  void* chunks[3];
315  if (independent_comalloc(3, sizes, chunks) == 0)
316  die();
317  struct Head* head = (struct Head*)(chunks[0]);
318  char* body = (char*)(chunks[1]);
319  struct Foot* foot = (struct Foot*)(chunks[2]);
320  // ...
321  }
322 
323  In general though, independent_comalloc is worth using only for
324  larger values of n_elements. For small values, you probably won't
325  detect enough difference from series of malloc calls to bother.
326 
327  Overuse of independent_comalloc can increase overall memory usage,
328  since it cannot reuse existing noncontiguous small chunks that
329  might be available for some of the elements.
330 */
331 void** dlindependent_comalloc(size_t, size_t*, void**);
332 
333 
334 /*
335  pvalloc(size_t n);
336  Equivalent to valloc(minimum-page-that-holds(n)), that is,
337  round up n to nearest pagesize.
338  */
339 void* dlpvalloc(size_t);
340 
341 /*
342  malloc_trim(size_t pad);
343 
344  If possible, gives memory back to the system (via negative arguments
345  to sbrk) if there is unused memory at the `high' end of the malloc
346  pool or in unused MMAP segments. You can call this after freeing
347  large blocks of memory to potentially reduce the system-level memory
348  requirements of a program. However, it cannot guarantee to reduce
349  memory. Under some allocation patterns, some large free blocks of
350  memory will be locked between two used chunks, so they cannot be
351  given back to the system.
352 
353  The `pad' argument to malloc_trim represents the amount of free
354  trailing space to leave untrimmed. If this argument is zero, only
355  the minimum amount of memory to maintain internal data structures
356  will be left. Non-zero arguments can be supplied to maintain enough
357  trailing space to service future expected allocations without having
358  to re-obtain memory from the system.
359 
360  Malloc_trim returns 1 if it actually released any memory, else 0.
361 */
362 int dlmalloc_trim(size_t);
363 
364 /*
365  malloc_usable_size(void* p);
366 
367  Returns the number of bytes you can actually use in
368  an allocated chunk, which may be more than you requested (although
369  often not) due to alignment and minimum size constraints.
370  You can use this many bytes without worrying about
371  overwriting other allocated objects. This is not a particularly great
372  programming practice. malloc_usable_size can be more useful in
373  debugging and assertions, for example:
374 
375  p = malloc(n);
376  assert(malloc_usable_size(p) >= 256);
377 */
378 size_t dlmalloc_usable_size(void*);
379 
380 /*
381  malloc_stats();
382  Prints on stderr the amount of space obtained from the system (both
383  via sbrk and mmap), the maximum amount (which may be more than
384  current if malloc_trim and/or munmap got called), and the current
385  number of bytes allocated via malloc (or realloc, etc) but not yet
386  freed. Note that this is the number of bytes allocated, not the
387  number requested. It will be larger than the number requested
388  because of alignment and bookkeeping overhead. Because it includes
389  alignment wastage as being in use, this figure may be greater than
390  zero even when no user-level chunks are allocated.
391 
392  The reported current and maximum system memory can be inaccurate if
393  a program makes other calls to system memory allocation functions
394  (normally sbrk) outside of malloc.
395 
396  malloc_stats prints only the most commonly interesting statistics.
397  More information can be obtained by calling mallinfo.
398 */
399 void dlmalloc_stats();
400 
401 #endif /* !ONLY_MSPACES */
402 
403 #if MSPACES
404 
405 /*
406  mspace is an opaque type representing an independent
407  region of space that supports mspace_malloc, etc.
408 */
409 typedef void* mspace;
410 
411 /*
412  create_mspace creates and returns a new independent space with the
413  given initial capacity, or, if 0, the default granularity size. It
414  returns null if there is no system memory available to create the
415  space. If argument locked is non-zero, the space uses a separate
416  lock to control access. The capacity of the space will grow
417  dynamically as needed to service mspace_malloc requests. You can
418  control the sizes of incremental increases of this space by
419  compiling with a different DEFAULT_GRANULARITY or dynamically
420  setting with mallopt(M_GRANULARITY, value).
421 */
422 mspace create_mspace(size_t capacity, int locked);
423 
424 /*
425  destroy_mspace destroys the given space, and attempts to return all
426  of its memory back to the system, returning the total number of
427  bytes freed. After destruction, the results of access to all memory
428  used by the space become undefined.
429 */
430 size_t destroy_mspace(mspace msp);
431 
432 /*
433  create_mspace_with_base uses the memory supplied as the initial base
434  of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
435  space is used for bookkeeping, so the capacity must be at least this
436  large. (Otherwise 0 is returned.) When this initial space is
437  exhausted, additional memory will be obtained from the system.
438  Destroying this space will deallocate all additionally allocated
439  space (if possible) but not the initial base.
440 */
441 mspace create_mspace_with_base(void* base, size_t capacity, int locked);
442 
443 /*
444  mspace_malloc behaves as malloc, but operates within
445  the given space.
446 */
447 void* mspace_malloc(mspace msp, size_t bytes);
448 
449 /*
450  mspace_free behaves as free, but operates within
451  the given space.
452 
453  If compiled with FOOTERS==1, mspace_free is not actually needed.
454  free may be called instead of mspace_free because freed chunks from
455  any space are handled by their originating spaces.
456 */
457 void mspace_free(mspace msp, void* mem);
458 
459 /*
460  mspace_realloc behaves as realloc, but operates within
461  the given space.
462 
463  If compiled with FOOTERS==1, mspace_realloc is not actually
464  needed. realloc may be called instead of mspace_realloc because
465  realloced chunks from any space are handled by their originating
466  spaces.
467 */
468 void* mspace_realloc(mspace msp, void* mem, size_t newsize);
469 
470 /*
471  mspace_calloc behaves as calloc, but operates within
472  the given space.
473 */
474 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
475 
476 /*
477  mspace_memalign behaves as memalign, but operates within
478  the given space.
479 */
480 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
481 
482 /*
483  mspace_independent_calloc behaves as independent_calloc, but
484  operates within the given space.
485 */
486 void** mspace_independent_calloc(mspace msp, size_t n_elements,
487  size_t elem_size, void* chunks[]);
488 
489 /*
490  mspace_independent_comalloc behaves as independent_comalloc, but
491  operates within the given space.
492 */
493 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
494  size_t sizes[], void* chunks[]);
495 
496 /*
497  mspace_footprint() returns the number of bytes obtained from the
498  system for this space.
499 */
500 size_t mspace_footprint(mspace msp);
501 
502 
503 #if !NO_MALLINFO
504 /*
505  mspace_mallinfo behaves as mallinfo, but reports properties of
506  the given space.
507 */
508 struct mallinfo mspace_mallinfo(mspace msp);
509 #endif /* NO_MALLINFO */
510 
511 /*
512  mspace_malloc_stats behaves as malloc_stats, but reports
513  properties of the given space.
514 */
515 void mspace_malloc_stats(mspace msp);
516 
517 /*
518  mspace_trim behaves as malloc_trim, but
519  operates within the given space.
520 */
521 int mspace_trim(mspace msp, size_t pad);
522 
523 /*
524  An alias for mallopt.
525 */
526 int mspace_mallopt(int, int);
527 
528 #endif /* MSPACES */
529 
530 #ifdef __cplusplus
531 }; /* end of extern "C" */
532 #endif
533 
534 #endif /* MALLOC_280_H */