SDL  2.0
SDL_malloc.c
Go to the documentation of this file.
1 /*
2  Simple DirectMedia Layer
3  Copyright (C) 1997-2018 Sam Lantinga <slouken@libsdl.org>
4 
5  This software is provided 'as-is', without any express or implied
6  warranty. In no event will the authors be held liable for any damages
7  arising from the use of this software.
8 
9  Permission is granted to anyone to use this software for any purpose,
10  including commercial applications, and to alter it and redistribute it
11  freely, subject to the following restrictions:
12 
13  1. The origin of this software must not be misrepresented; you must not
14  claim that you wrote the original software. If you use this software
15  in a product, an acknowledgment in the product documentation would be
16  appreciated but is not required.
17  2. Altered source versions must be plainly marked as such, and must not be
18  misrepresented as being the original software.
19  3. This notice may not be removed or altered from any source distribution.
20 */
21 
22 #if defined(__clang_analyzer__) && !defined(SDL_DISABLE_ANALYZE_MACROS)
23 #define SDL_DISABLE_ANALYZE_MACROS 1
24 #endif
25 
26 #include "../SDL_internal.h"
27 
28 /* This file contains portable memory management functions for SDL */
29 #include "SDL_stdinc.h"
30 #include "SDL_atomic.h"
31 #include "SDL_error.h"
32 
33 #ifndef HAVE_MALLOC
34 #define LACKS_SYS_TYPES_H
35 #define LACKS_STDIO_H
36 #define LACKS_STRINGS_H
37 #define LACKS_STRING_H
38 #define LACKS_STDLIB_H
39 #define ABORT
40 #define USE_LOCKS 1
41 #define USE_DL_PREFIX
42 
43 /*
44  This is a version (aka dlmalloc) of malloc/free/realloc written by
45  Doug Lea and released to the public domain, as explained at
46  http://creativecommons.org/licenses/publicdomain. Send questions,
47  comments, complaints, performance data, etc to dl@cs.oswego.edu
48 
49 * Version 2.8.3 Thu Sep 22 11:16:15 2005 Doug Lea (dl at gee)
50 
51  Note: There may be an updated version of this malloc obtainable at
52  ftp://gee.cs.oswego.edu/pub/misc/malloc.c
53  Check before installing!
54 
55 * Quickstart
56 
57  This library is all in one file to simplify the most common usage:
58  ftp it, compile it (-O3), and link it into another program. All of
59  the compile-time options default to reasonable values for use on
60  most platforms. You might later want to step through various
61  compile-time and dynamic tuning options.
62 
63  For convenience, an include file for code using this malloc is at:
64  ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.3.h
65  You don't really need this .h file unless you call functions not
66  defined in your system include files. The .h file contains only the
67  excerpts from this file needed for using this malloc on ANSI C/C++
68  systems, so long as you haven't changed compile-time options about
69  naming and tuning parameters. If you do, then you can create your
70  own malloc.h that does include all settings by cutting at the point
71  indicated below. Note that you may already by default be using a C
72  library containing a malloc that is based on some version of this
73  malloc (for example in linux). You might still want to use the one
74  in this file to customize settings or to avoid overheads associated
75  with library versions.
76 
77 * Vital statistics:
78 
79  Supported pointer/size_t representation: 4 or 8 bytes
80  size_t MUST be an unsigned type of the same width as
81  pointers. (If you are using an ancient system that declares
82  size_t as a signed type, or need it to be a different width
83  than pointers, you can use a previous release of this malloc
84  (e.g. 2.7.2) supporting these.)
85 
86  Alignment: 8 bytes (default)
87  This suffices for nearly all current machines and C compilers.
88  However, you can define MALLOC_ALIGNMENT to be wider than this
89  if necessary (up to 128bytes), at the expense of using more space.
90 
91  Minimum overhead per allocated chunk: 4 or 8 bytes (if 4byte sizes)
92  8 or 16 bytes (if 8byte sizes)
93  Each malloced chunk has a hidden word of overhead holding size
94  and status information, and additional cross-check word
95  if FOOTERS is defined.
96 
97  Minimum allocated size: 4-byte ptrs: 16 bytes (including overhead)
98  8-byte ptrs: 32 bytes (including overhead)
99 
100  Even a request for zero bytes (i.e., malloc(0)) returns a
101  pointer to something of the minimum allocatable size.
102  The maximum overhead wastage (i.e., number of extra bytes
103  allocated than were requested in malloc) is less than or equal
104  to the minimum size, except for requests >= mmap_threshold that
105  are serviced via mmap(), where the worst case wastage is about
106  32 bytes plus the remainder from a system page (the minimal
107  mmap unit); typically 4096 or 8192 bytes.
108 
109  Security: static-safe; optionally more or less
110  The "security" of malloc refers to the ability of malicious
111  code to accentuate the effects of errors (for example, freeing
112  space that is not currently malloc'ed or overwriting past the
113  ends of chunks) in code that calls malloc. This malloc
114  guarantees not to modify any memory locations below the base of
115  heap, i.e., static variables, even in the presence of usage
116  errors. The routines additionally detect most improper frees
117  and reallocs. All this holds as long as the static bookkeeping
118  for malloc itself is not corrupted by some other means. This
119  is only one aspect of security -- these checks do not, and
120  cannot, detect all possible programming errors.
121 
122  If FOOTERS is defined nonzero, then each allocated chunk
123  carries an additional check word to verify that it was malloced
124  from its space. These check words are the same within each
125  execution of a program using malloc, but differ across
126  executions, so externally crafted fake chunks cannot be
127  freed. This improves security by rejecting frees/reallocs that
128  could corrupt heap memory, in addition to the checks preventing
129  writes to statics that are always on. This may further improve
130  security at the expense of time and space overhead. (Note that
131  FOOTERS may also be worth using with MSPACES.)
132 
133  By default detected errors cause the program to abort (calling
134  "abort()"). You can override this to instead proceed past
135  errors by defining PROCEED_ON_ERROR. In this case, a bad free
136  has no effect, and a malloc that encounters a bad address
137  caused by user overwrites will ignore the bad address by
138  dropping pointers and indices to all known memory. This may
139  be appropriate for programs that should continue if at all
140  possible in the face of programming errors, although they may
141  run out of memory because dropped memory is never reclaimed.
142 
143  If you don't like either of these options, you can define
144  CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything
145  else. And if if you are sure that your program using malloc has
146  no errors or vulnerabilities, you can define INSECURE to 1,
147  which might (or might not) provide a small performance improvement.
148 
149  Thread-safety: NOT thread-safe unless USE_LOCKS defined
150  When USE_LOCKS is defined, each public call to malloc, free,
151  etc is surrounded with either a pthread mutex or a win32
152  spinlock (depending on WIN32). This is not especially fast, and
153  can be a major bottleneck. It is designed only to provide
154  minimal protection in concurrent environments, and to provide a
155  basis for extensions. If you are using malloc in a concurrent
156  program, consider instead using ptmalloc, which is derived from
157  a version of this malloc. (See http://www.malloc.de).
158 
159  System requirements: Any combination of MORECORE and/or MMAP/MUNMAP
160  This malloc can use unix sbrk or any emulation (invoked using
161  the CALL_MORECORE macro) and/or mmap/munmap or any emulation
162  (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system
163  memory. On most unix systems, it tends to work best if both
164  MORECORE and MMAP are enabled. On Win32, it uses emulations
165  based on VirtualAlloc. It also uses common C library functions
166  like memset.
167 
168  Compliance: I believe it is compliant with the Single Unix Specification
169  (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably
170  others as well.
171 
172 * Overview of algorithms
173 
174  This is not the fastest, most space-conserving, most portable, or
175  most tunable malloc ever written. However it is among the fastest
176  while also being among the most space-conserving, portable and
177  tunable. Consistent balance across these factors results in a good
178  general-purpose allocator for malloc-intensive programs.
179 
180  In most ways, this malloc is a best-fit allocator. Generally, it
181  chooses the best-fitting existing chunk for a request, with ties
182  broken in approximately least-recently-used order. (This strategy
183  normally maintains low fragmentation.) However, for requests less
184  than 256bytes, it deviates from best-fit when there is not an
185  exactly fitting available chunk by preferring to use space adjacent
186  to that used for the previous small request, as well as by breaking
187  ties in approximately most-recently-used order. (These enhance
188  locality of series of small allocations.) And for very large requests
189  (>= 256Kb by default), it relies on system memory mapping
190  facilities, if supported. (This helps avoid carrying around and
191  possibly fragmenting memory used only for large chunks.)
192 
193  All operations (except malloc_stats and mallinfo) have execution
194  times that are bounded by a constant factor of the number of bits in
195  a size_t, not counting any clearing in calloc or copying in realloc,
196  or actions surrounding MORECORE and MMAP that have times
197  proportional to the number of non-contiguous regions returned by
198  system allocation routines, which is often just 1.
199 
200  The implementation is not very modular and seriously overuses
201  macros. Perhaps someday all C compilers will do as good a job
202  inlining modular code as can now be done by brute-force expansion,
203  but now, enough of them seem not to.
204 
205  Some compilers issue a lot of warnings about code that is
206  dead/unreachable only on some platforms, and also about intentional
207  uses of negation on unsigned types. All known cases of each can be
208  ignored.
209 
210  For a longer but out of date high-level description, see
211  http://gee.cs.oswego.edu/dl/html/malloc.html
212 
213 * MSPACES
214  If MSPACES is defined, then in addition to malloc, free, etc.,
215  this file also defines mspace_malloc, mspace_free, etc. These
216  are versions of malloc routines that take an "mspace" argument
217  obtained using create_mspace, to control all internal bookkeeping.
218  If ONLY_MSPACES is defined, only these versions are compiled.
219  So if you would like to use this allocator for only some allocations,
220  and your system malloc for others, you can compile with
221  ONLY_MSPACES and then do something like...
222  static mspace mymspace = create_mspace(0,0); // for example
223  #define mymalloc(bytes) mspace_malloc(mymspace, bytes)
224 
225  (Note: If you only need one instance of an mspace, you can instead
226  use "USE_DL_PREFIX" to relabel the global malloc.)
227 
228  You can similarly create thread-local allocators by storing
229  mspaces as thread-locals. For example:
230  static __thread mspace tlms = 0;
231  void* tlmalloc(size_t bytes) {
232  if (tlms == 0) tlms = create_mspace(0, 0);
233  return mspace_malloc(tlms, bytes);
234  }
235  void tlfree(void* mem) { mspace_free(tlms, mem); }
236 
237  Unless FOOTERS is defined, each mspace is completely independent.
238  You cannot allocate from one and free to another (although
239  conformance is only weakly checked, so usage errors are not always
240  caught). If FOOTERS is defined, then each chunk carries around a tag
241  indicating its originating mspace, and frees are directed to their
242  originating spaces.
243 
244  ------------------------- Compile-time options ---------------------------
245 
246 Be careful in setting #define values for numerical constants of type
247 size_t. On some systems, literal values are not automatically extended
248 to size_t precision unless they are explicitly casted.
249 
250 WIN32 default: defined if _WIN32 defined
251  Defining WIN32 sets up defaults for MS environment and compilers.
252  Otherwise defaults are for unix.
253 
254 MALLOC_ALIGNMENT default: (size_t)8
255  Controls the minimum alignment for malloc'ed chunks. It must be a
256  power of two and at least 8, even on machines for which smaller
257  alignments would suffice. It may be defined as larger than this
258  though. Note however that code and data structures are optimized for
259  the case of 8-byte alignment.
260 
261 MSPACES default: 0 (false)
262  If true, compile in support for independent allocation spaces.
263  This is only supported if HAVE_MMAP is true.
264 
265 ONLY_MSPACES default: 0 (false)
266  If true, only compile in mspace versions, not regular versions.
267 
268 USE_LOCKS default: 0 (false)
269  Causes each call to each public routine to be surrounded with
270  pthread or WIN32 mutex lock/unlock. (If set true, this can be
271  overridden on a per-mspace basis for mspace versions.)
272 
273 FOOTERS default: 0
274  If true, provide extra checking and dispatching by placing
275  information in the footers of allocated chunks. This adds
276  space and time overhead.
277 
278 INSECURE default: 0
279  If true, omit checks for usage errors and heap space overwrites.
280 
281 USE_DL_PREFIX default: NOT defined
282  Causes compiler to prefix all public routines with the string 'dl'.
283  This can be useful when you only want to use this malloc in one part
284  of a program, using your regular system malloc elsewhere.
285 
286 ABORT default: defined as abort()
287  Defines how to abort on failed checks. On most systems, a failed
288  check cannot die with an "assert" or even print an informative
289  message, because the underlying print routines in turn call malloc,
290  which will fail again. Generally, the best policy is to simply call
291  abort(). It's not very useful to do more than this because many
292  errors due to overwriting will show up as address faults (null, odd
293  addresses etc) rather than malloc-triggered checks, so will also
294  abort. Also, most compilers know that abort() does not return, so
295  can better optimize code conditionally calling it.
296 
297 PROCEED_ON_ERROR default: defined as 0 (false)
298  Controls whether detected bad addresses cause them to bypassed
299  rather than aborting. If set, detected bad arguments to free and
300  realloc are ignored. And all bookkeeping information is zeroed out
301  upon a detected overwrite of freed heap space, thus losing the
302  ability to ever return it from malloc again, but enabling the
303  application to proceed. If PROCEED_ON_ERROR is defined, the
304  static variable malloc_corruption_error_count is compiled in
305  and can be examined to see if errors have occurred. This option
306  generates slower code than the default abort policy.
307 
308 DEBUG default: NOT defined
309  The DEBUG setting is mainly intended for people trying to modify
310  this code or diagnose problems when porting to new platforms.
311  However, it may also be able to better isolate user errors than just
312  using runtime checks. The assertions in the check routines spell
313  out in more detail the assumptions and invariants underlying the
314  algorithms. The checking is fairly extensive, and will slow down
315  execution noticeably. Calling malloc_stats or mallinfo with DEBUG
316  set will attempt to check every non-mmapped allocated and free chunk
317  in the course of computing the summaries.
318 
319 ABORT_ON_ASSERT_FAILURE default: defined as 1 (true)
320  Debugging assertion failures can be nearly impossible if your
321  version of the assert macro causes malloc to be called, which will
322  lead to a cascade of further failures, blowing the runtime stack.
323  ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(),
324  which will usually make debugging easier.
325 
326 MALLOC_FAILURE_ACTION default: sets errno to ENOMEM, or no-op on win32
327  The action to take before "return 0" when malloc fails to be able to
328  return memory because there is none available.
329 
330 HAVE_MORECORE default: 1 (true) unless win32 or ONLY_MSPACES
331  True if this system supports sbrk or an emulation of it.
332 
333 MORECORE default: sbrk
334  The name of the sbrk-style system routine to call to obtain more
335  memory. See below for guidance on writing custom MORECORE
336  functions. The type of the argument to sbrk/MORECORE varies across
337  systems. It cannot be size_t, because it supports negative
338  arguments, so it is normally the signed type of the same width as
339  size_t (sometimes declared as "intptr_t"). It doesn't much matter
340  though. Internally, we only call it with arguments less than half
341  the max value of a size_t, which should work across all reasonable
342  possibilities, although sometimes generating compiler warnings. See
343  near the end of this file for guidelines for creating a custom
344  version of MORECORE.
345 
346 MORECORE_CONTIGUOUS default: 1 (true)
347  If true, take advantage of fact that consecutive calls to MORECORE
348  with positive arguments always return contiguous increasing
349  addresses. This is true of unix sbrk. It does not hurt too much to
350  set it true anyway, since malloc copes with non-contiguities.
351  Setting it false when definitely non-contiguous saves time
352  and possibly wasted space it would take to discover this though.
353 
354 MORECORE_CANNOT_TRIM default: NOT defined
355  True if MORECORE cannot release space back to the system when given
356  negative arguments. This is generally necessary only if you are
357  using a hand-crafted MORECORE function that cannot handle negative
358  arguments.
359 
360 HAVE_MMAP default: 1 (true)
361  True if this system supports mmap or an emulation of it. If so, and
362  HAVE_MORECORE is not true, MMAP is used for all system
363  allocation. If set and HAVE_MORECORE is true as well, MMAP is
364  primarily used to directly allocate very large blocks. It is also
365  used as a backup strategy in cases where MORECORE fails to provide
366  space from system. Note: A single call to MUNMAP is assumed to be
367  able to unmap memory that may have be allocated using multiple calls
368  to MMAP, so long as they are adjacent.
369 
370 HAVE_MREMAP default: 1 on linux, else 0
371  If true realloc() uses mremap() to re-allocate large blocks and
372  extend or shrink allocation spaces.
373 
374 MMAP_CLEARS default: 1 on unix
375  True if mmap clears memory so calloc doesn't need to. This is true
376  for standard unix mmap using /dev/zero.
377 
378 USE_BUILTIN_FFS default: 0 (i.e., not used)
379  Causes malloc to use the builtin ffs() function to compute indices.
380  Some compilers may recognize and intrinsify ffs to be faster than the
381  supplied C version. Also, the case of x86 using gcc is special-cased
382  to an asm instruction, so is already as fast as it can be, and so
383  this setting has no effect. (On most x86s, the asm version is only
384  slightly faster than the C version.)
385 
386 malloc_getpagesize default: derive from system includes, or 4096.
387  The system page size. To the extent possible, this malloc manages
388  memory from the system in page-size units. This may be (and
389  usually is) a function rather than a constant. This is ignored
390  if WIN32, where page size is determined using getSystemInfo during
391  initialization.
392 
393 USE_DEV_RANDOM default: 0 (i.e., not used)
394  Causes malloc to use /dev/random to initialize secure magic seed for
395  stamping footers. Otherwise, the current time is used.
396 
397 NO_MALLINFO default: 0
398  If defined, don't compile "mallinfo". This can be a simple way
399  of dealing with mismatches between system declarations and
400  those in this file.
401 
402 MALLINFO_FIELD_TYPE default: size_t
403  The type of the fields in the mallinfo struct. This was originally
404  defined as "int" in SVID etc, but is more usefully defined as
405  size_t. The value is used only if HAVE_USR_INCLUDE_MALLOC_H is not set
406 
407 REALLOC_ZERO_BYTES_FREES default: not defined
408  This should be set if a call to realloc with zero bytes should
409  be the same as a call to free. Some people think it should. Otherwise,
410  since this malloc returns a unique pointer for malloc(0), so does
411  realloc(p, 0).
412 
413 LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H
414 LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H, LACKS_ERRNO_H
415 LACKS_STDLIB_H default: NOT defined unless on WIN32
416  Define these if your system does not have these header files.
417  You might need to manually insert some of the declarations they provide.
418 
419 DEFAULT_GRANULARITY default: page size if MORECORE_CONTIGUOUS,
420  system_info.dwAllocationGranularity in WIN32,
421  otherwise 64K.
422  Also settable using mallopt(M_GRANULARITY, x)
423  The unit for allocating and deallocating memory from the system. On
424  most systems with contiguous MORECORE, there is no reason to
425  make this more than a page. However, systems with MMAP tend to
426  either require or encourage larger granularities. You can increase
427  this value to prevent system allocation functions to be called so
428  often, especially if they are slow. The value must be at least one
429  page and must be a power of two. Setting to 0 causes initialization
430  to either page size or win32 region size. (Note: In previous
431  versions of malloc, the equivalent of this option was called
432  "TOP_PAD")
433 
434 DEFAULT_TRIM_THRESHOLD default: 2MB
435  Also settable using mallopt(M_TRIM_THRESHOLD, x)
436  The maximum amount of unused top-most memory to keep before
437  releasing via malloc_trim in free(). Automatic trimming is mainly
438  useful in long-lived programs using contiguous MORECORE. Because
439  trimming via sbrk can be slow on some systems, and can sometimes be
440  wasteful (in cases where programs immediately afterward allocate
441  more large chunks) the value should be high enough so that your
442  overall system performance would improve by releasing this much
443  memory. As a rough guide, you might set to a value close to the
444  average size of a process (program) running on your system.
445  Releasing this much memory would allow such a process to run in
446  memory. Generally, it is worth tuning trim thresholds when a
447  program undergoes phases where several large chunks are allocated
448  and released in ways that can reuse each other's storage, perhaps
449  mixed with phases where there are no such chunks at all. The trim
450  value must be greater than page size to have any useful effect. To
451  disable trimming completely, you can set to MAX_SIZE_T. Note that the trick
452  some people use of mallocing a huge space and then freeing it at
453  program startup, in an attempt to reserve system memory, doesn't
454  have the intended effect under automatic trimming, since that memory
455  will immediately be returned to the system.
456 
457 DEFAULT_MMAP_THRESHOLD default: 256K
458  Also settable using mallopt(M_MMAP_THRESHOLD, x)
459  The request size threshold for using MMAP to directly service a
460  request. Requests of at least this size that cannot be allocated
461  using already-existing space will be serviced via mmap. (If enough
462  normal freed space already exists it is used instead.) Using mmap
463  segregates relatively large chunks of memory so that they can be
464  individually obtained and released from the host system. A request
465  serviced through mmap is never reused by any other request (at least
466  not directly; the system may just so happen to remap successive
467  requests to the same locations). Segregating space in this way has
468  the benefits that: Mmapped space can always be individually released
469  back to the system, which helps keep the system level memory demands
470  of a long-lived program low. Also, mapped memory doesn't become
471  `locked' between other chunks, as can happen with normally allocated
472  chunks, which means that even trimming via malloc_trim would not
473  release them. However, it has the disadvantage that the space
474  cannot be reclaimed, consolidated, and then used to service later
475  requests, as happens with normal chunks. The advantages of mmap
476  nearly always outweigh disadvantages for "large" chunks, but the
477  value of "large" may vary across systems. The default is an
478  empirically derived value that works well in most systems. You can
479  disable mmap by setting to MAX_SIZE_T.
480 
481 */
482 
483 #ifndef WIN32
484 #ifdef _WIN32
485 #define WIN32 1
486 #endif /* _WIN32 */
487 #endif /* WIN32 */
488 #ifdef WIN32
489 #define WIN32_LEAN_AND_MEAN
490 #include <windows.h>
491 #define HAVE_MMAP 1
492 #define HAVE_MORECORE 0
493 #define LACKS_UNISTD_H
494 #define LACKS_SYS_PARAM_H
495 #define LACKS_SYS_MMAN_H
496 #define LACKS_STRING_H
497 #define LACKS_STRINGS_H
498 #define LACKS_SYS_TYPES_H
499 #define LACKS_ERRNO_H
500 #define LACKS_FCNTL_H
501 #define MALLOC_FAILURE_ACTION
502 #define MMAP_CLEARS 0 /* WINCE and some others apparently don't clear */
503 #endif /* WIN32 */
504 
505 #if defined(DARWIN) || defined(_DARWIN)
506 /* Mac OSX docs advise not to use sbrk; it seems better to use mmap */
507 #ifndef HAVE_MORECORE
508 #define HAVE_MORECORE 0
509 #define HAVE_MMAP 1
510 #endif /* HAVE_MORECORE */
511 #endif /* DARWIN */
512 
513 #ifndef LACKS_SYS_TYPES_H
514 #include <sys/types.h> /* For size_t */
515 #endif /* LACKS_SYS_TYPES_H */
516 
517 /* The maximum possible size_t value has all bits set */
518 #define MAX_SIZE_T (~(size_t)0)
519 
520 #ifndef ONLY_MSPACES
521 #define ONLY_MSPACES 0
522 #endif /* ONLY_MSPACES */
523 #ifndef MSPACES
524 #if ONLY_MSPACES
525 #define MSPACES 1
526 #else /* ONLY_MSPACES */
527 #define MSPACES 0
528 #endif /* ONLY_MSPACES */
529 #endif /* MSPACES */
530 #ifndef MALLOC_ALIGNMENT
531 #define MALLOC_ALIGNMENT ((size_t)8U)
532 #endif /* MALLOC_ALIGNMENT */
533 #ifndef FOOTERS
534 #define FOOTERS 0
535 #endif /* FOOTERS */
536 #ifndef ABORT
537 #define ABORT abort()
538 #endif /* ABORT */
539 #ifndef ABORT_ON_ASSERT_FAILURE
540 #define ABORT_ON_ASSERT_FAILURE 1
541 #endif /* ABORT_ON_ASSERT_FAILURE */
542 #ifndef PROCEED_ON_ERROR
543 #define PROCEED_ON_ERROR 0
544 #endif /* PROCEED_ON_ERROR */
545 #ifndef USE_LOCKS
546 #define USE_LOCKS 0
547 #endif /* USE_LOCKS */
548 #ifndef INSECURE
549 #define INSECURE 0
550 #endif /* INSECURE */
551 #ifndef HAVE_MMAP
552 #define HAVE_MMAP 1
553 #endif /* HAVE_MMAP */
554 #ifndef MMAP_CLEARS
555 #define MMAP_CLEARS 1
556 #endif /* MMAP_CLEARS */
557 #ifndef HAVE_MREMAP
558 #ifdef linux
559 #define HAVE_MREMAP 1
560 #else /* linux */
561 #define HAVE_MREMAP 0
562 #endif /* linux */
563 #endif /* HAVE_MREMAP */
564 #ifndef MALLOC_FAILURE_ACTION
565 #define MALLOC_FAILURE_ACTION errno = ENOMEM;
566 #endif /* MALLOC_FAILURE_ACTION */
567 #ifndef HAVE_MORECORE
568 #if ONLY_MSPACES
569 #define HAVE_MORECORE 0
570 #else /* ONLY_MSPACES */
571 #define HAVE_MORECORE 1
572 #endif /* ONLY_MSPACES */
573 #endif /* HAVE_MORECORE */
574 #if !HAVE_MORECORE
575 #define MORECORE_CONTIGUOUS 0
576 #else /* !HAVE_MORECORE */
577 #ifndef MORECORE
578 #define MORECORE sbrk
579 #endif /* MORECORE */
580 #ifndef MORECORE_CONTIGUOUS
581 #define MORECORE_CONTIGUOUS 1
582 #endif /* MORECORE_CONTIGUOUS */
583 #endif /* HAVE_MORECORE */
584 #ifndef DEFAULT_GRANULARITY
585 #if MORECORE_CONTIGUOUS
586 #define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */
587 #else /* MORECORE_CONTIGUOUS */
588 #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
589 #endif /* MORECORE_CONTIGUOUS */
590 #endif /* DEFAULT_GRANULARITY */
591 #ifndef DEFAULT_TRIM_THRESHOLD
592 #ifndef MORECORE_CANNOT_TRIM
593 #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
594 #else /* MORECORE_CANNOT_TRIM */
595 #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
596 #endif /* MORECORE_CANNOT_TRIM */
597 #endif /* DEFAULT_TRIM_THRESHOLD */
598 #ifndef DEFAULT_MMAP_THRESHOLD
599 #if HAVE_MMAP
600 #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
601 #else /* HAVE_MMAP */
602 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
603 #endif /* HAVE_MMAP */
604 #endif /* DEFAULT_MMAP_THRESHOLD */
605 #ifndef USE_BUILTIN_FFS
606 #define USE_BUILTIN_FFS 0
607 #endif /* USE_BUILTIN_FFS */
608 #ifndef USE_DEV_RANDOM
609 #define USE_DEV_RANDOM 0
610 #endif /* USE_DEV_RANDOM */
611 #ifndef NO_MALLINFO
612 #define NO_MALLINFO 0
613 #endif /* NO_MALLINFO */
614 #ifndef MALLINFO_FIELD_TYPE
615 #define MALLINFO_FIELD_TYPE size_t
616 #endif /* MALLINFO_FIELD_TYPE */
617 
618 #ifndef memset
619 #define memset SDL_memset
620 #endif
621 #ifndef memcpy
622 #define memcpy SDL_memcpy
623 #endif
624 
625 /*
626  mallopt tuning options. SVID/XPG defines four standard parameter
627  numbers for mallopt, normally defined in malloc.h. None of these
628  are used in this malloc, so setting them has no effect. But this
629  malloc does support the following options.
630 */
631 
632 #define M_TRIM_THRESHOLD (-1)
633 #define M_GRANULARITY (-2)
634 #define M_MMAP_THRESHOLD (-3)
635 
636 /* ------------------------ Mallinfo declarations ------------------------ */
637 
638 #if !NO_MALLINFO
639 /*
640  This version of malloc supports the standard SVID/XPG mallinfo
641  routine that returns a struct containing usage properties and
642  statistics. It should work on any system that has a
643  /usr/include/malloc.h defining struct mallinfo. The main
644  declaration needed is the mallinfo struct that is returned (by-copy)
645  by mallinfo(). The malloinfo struct contains a bunch of fields that
646  are not even meaningful in this version of malloc. These fields are
647  are instead filled by mallinfo() with other numbers that might be of
648  interest.
649 
650  HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
651  /usr/include/malloc.h file that includes a declaration of struct
652  mallinfo. If so, it is included; else a compliant version is
653  declared below. These must be precisely the same for mallinfo() to
654  work. The original SVID version of this struct, defined on most
655  systems with mallinfo, declares all fields as ints. But some others
656  define as unsigned long. If your system defines the fields using a
657  type of different width than listed here, you MUST #include your
658  system version and #define HAVE_USR_INCLUDE_MALLOC_H.
659 */
660 
661 /* #define HAVE_USR_INCLUDE_MALLOC_H */
662 
663 #ifdef HAVE_USR_INCLUDE_MALLOC_H
664 #include "/usr/include/malloc.h"
665 #else /* HAVE_USR_INCLUDE_MALLOC_H */
666 
667 struct mallinfo
668 {
669  MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */
670  MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */
671  MALLINFO_FIELD_TYPE smblks; /* always 0 */
672  MALLINFO_FIELD_TYPE hblks; /* always 0 */
673  MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */
674  MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */
675  MALLINFO_FIELD_TYPE fsmblks; /* always 0 */
676  MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
677  MALLINFO_FIELD_TYPE fordblks; /* total free space */
678  MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
679 };
680 
681 #endif /* HAVE_USR_INCLUDE_MALLOC_H */
682 #endif /* NO_MALLINFO */
683 
684 #ifdef __cplusplus
685 extern "C"
686 {
687 #endif /* __cplusplus */
688 
689 #if !ONLY_MSPACES
690 
691 /* ------------------- Declarations of public routines ------------------- */
692 
693 #ifndef USE_DL_PREFIX
694 #define dlcalloc calloc
695 #define dlfree free
696 #define dlmalloc malloc
697 #define dlmemalign memalign
698 #define dlrealloc realloc
699 #define dlvalloc valloc
700 #define dlpvalloc pvalloc
701 #define dlmallinfo mallinfo
702 #define dlmallopt mallopt
703 #define dlmalloc_trim malloc_trim
704 #define dlmalloc_stats malloc_stats
705 #define dlmalloc_usable_size malloc_usable_size
706 #define dlmalloc_footprint malloc_footprint
707 #define dlmalloc_max_footprint malloc_max_footprint
708 #define dlindependent_calloc independent_calloc
709 #define dlindependent_comalloc independent_comalloc
710 #endif /* USE_DL_PREFIX */
711 
712 
713 /*
714  malloc(size_t n)
715  Returns a pointer to a newly allocated chunk of at least n bytes, or
716  null if no space is available, in which case errno is set to ENOMEM
717  on ANSI C systems.
718 
719  If n is zero, malloc returns a minimum-sized chunk. (The minimum
720  size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
721  systems.) Note that size_t is an unsigned type, so calls with
722  arguments that would be negative if signed are interpreted as
723  requests for huge amounts of space, which will often fail. The
724  maximum supported value of n differs across systems, but is in all
725  cases less than the maximum representable value of a size_t.
726 */
727  void *dlmalloc(size_t);
728 
729 /*
730  free(void* p)
731  Releases the chunk of memory pointed to by p, that had been previously
732  allocated using malloc or a related routine such as realloc.
733  It has no effect if p is null. If p was not malloced or already
734  freed, free(p) will by default cause the current program to abort.
735 */
736  void dlfree(void *);
737 
738 /*
739  calloc(size_t n_elements, size_t element_size);
740  Returns a pointer to n_elements * element_size bytes, with all locations
741  set to zero.
742 */
743  void *dlcalloc(size_t, size_t);
744 
745 /*
746  realloc(void* p, size_t n)
747  Returns a pointer to a chunk of size n that contains the same data
748  as does chunk p up to the minimum of (n, p's size) bytes, or null
749  if no space is available.
750 
751  The returned pointer may or may not be the same as p. The algorithm
752  prefers extending p in most cases when possible, otherwise it
753  employs the equivalent of a malloc-copy-free sequence.
754 
755  If p is null, realloc is equivalent to malloc.
756 
757  If space is not available, realloc returns null, errno is set (if on
758  ANSI) and p is NOT freed.
759 
760  if n is for fewer bytes than already held by p, the newly unused
761  space is lopped off and freed if possible. realloc with a size
762  argument of zero (re)allocates a minimum-sized chunk.
763 
764  The old unix realloc convention of allowing the last-free'd chunk
765  to be used as an argument to realloc is not supported.
766 */
767 
768  void *dlrealloc(void *, size_t);
769 
770 /*
771  memalign(size_t alignment, size_t n);
772  Returns a pointer to a newly allocated chunk of n bytes, aligned
773  in accord with the alignment argument.
774 
775  The alignment argument should be a power of two. If the argument is
776  not a power of two, the nearest greater power is used.
777  8-byte alignment is guaranteed by normal malloc calls, so don't
778  bother calling memalign with an argument of 8 or less.
779 
780  Overreliance on memalign is a sure way to fragment space.
781 */
782  void *dlmemalign(size_t, size_t);
783 
784 /*
785  valloc(size_t n);
786  Equivalent to memalign(pagesize, n), where pagesize is the page
787  size of the system. If the pagesize is unknown, 4096 is used.
788 */
789  void *dlvalloc(size_t);
790 
791 /*
792  mallopt(int parameter_number, int parameter_value)
793  Sets tunable parameters The format is to provide a
794  (parameter-number, parameter-value) pair. mallopt then sets the
795  corresponding parameter to the argument value if it can (i.e., so
796  long as the value is meaningful), and returns 1 if successful else
797  0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
798  normally defined in malloc.h. None of these are use in this malloc,
799  so setting them has no effect. But this malloc also supports other
800  options in mallopt. See below for details. Briefly, supported
801  parameters are as follows (listed defaults are for "typical"
802  configurations).
803 
804  Symbol param # default allowed param values
805  M_TRIM_THRESHOLD -1 2*1024*1024 any (MAX_SIZE_T disables)
806  M_GRANULARITY -2 page size any power of 2 >= page size
807  M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
808 */
809  int dlmallopt(int, int);
810 
811 /*
812  malloc_footprint();
813  Returns the number of bytes obtained from the system. The total
814  number of bytes allocated by malloc, realloc etc., is less than this
815  value. Unlike mallinfo, this function returns only a precomputed
816  result, so can be called frequently to monitor memory consumption.
817  Even if locks are otherwise defined, this function does not use them,
818  so results might not be up to date.
819 */
820  size_t dlmalloc_footprint(void);
821 
822 /*
823  malloc_max_footprint();
824  Returns the maximum number of bytes obtained from the system. This
825  value will be greater than current footprint if deallocated space
826  has been reclaimed by the system. The peak number of bytes allocated
827  by malloc, realloc etc., is less than this value. Unlike mallinfo,
828  this function returns only a precomputed result, so can be called
829  frequently to monitor memory consumption. Even if locks are
830  otherwise defined, this function does not use them, so results might
831  not be up to date.
832 */
833  size_t dlmalloc_max_footprint(void);
834 
835 #if !NO_MALLINFO
836 /*
837  mallinfo()
838  Returns (by copy) a struct containing various summary statistics:
839 
840  arena: current total non-mmapped bytes allocated from system
841  ordblks: the number of free chunks
842  smblks: always zero.
843  hblks: current number of mmapped regions
844  hblkhd: total bytes held in mmapped regions
845  usmblks: the maximum total allocated space. This will be greater
846  than current total if trimming has occurred.
847  fsmblks: always zero
848  uordblks: current total allocated space (normal or mmapped)
849  fordblks: total free space
850  keepcost: the maximum number of bytes that could ideally be released
851  back to system via malloc_trim. ("ideally" means that
852  it ignores page restrictions etc.)
853 
854  Because these fields are ints, but internal bookkeeping may
855  be kept as longs, the reported values may wrap around zero and
856  thus be inaccurate.
857 */
858  struct mallinfo dlmallinfo(void);
859 #endif /* NO_MALLINFO */
860 
861 /*
862  independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
863 
864  independent_calloc is similar to calloc, but instead of returning a
865  single cleared space, it returns an array of pointers to n_elements
866  independent elements that can hold contents of size elem_size, each
867  of which starts out cleared, and can be independently freed,
868  realloc'ed etc. The elements are guaranteed to be adjacently
869  allocated (this is not guaranteed to occur with multiple callocs or
870  mallocs), which may also improve cache locality in some
871  applications.
872 
873  The "chunks" argument is optional (i.e., may be null, which is
874  probably the most typical usage). If it is null, the returned array
875  is itself dynamically allocated and should also be freed when it is
876  no longer needed. Otherwise, the chunks array must be of at least
877  n_elements in length. It is filled in with the pointers to the
878  chunks.
879 
880  In either case, independent_calloc returns this pointer array, or
881  null if the allocation failed. If n_elements is zero and "chunks"
882  is null, it returns a chunk representing an array with zero elements
883  (which should be freed if not wanted).
884 
885  Each element must be individually freed when it is no longer
886  needed. If you'd like to instead be able to free all at once, you
887  should instead use regular calloc and assign pointers into this
888  space to represent elements. (In this case though, you cannot
889  independently free elements.)
890 
891  independent_calloc simplifies and speeds up implementations of many
892  kinds of pools. It may also be useful when constructing large data
893  structures that initially have a fixed number of fixed-sized nodes,
894  but the number is not known at compile time, and some of the nodes
895  may later need to be freed. For example:
896 
897  struct Node { int item; struct Node* next; };
898 
899  struct Node* build_list() {
900  struct Node** pool;
901  int n = read_number_of_nodes_needed();
902  if (n <= 0) return 0;
903  pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
904  if (pool == 0) die();
905  // organize into a linked list...
906  struct Node* first = pool[0];
907  for (i = 0; i < n-1; ++i)
908  pool[i]->next = pool[i+1];
909  free(pool); // Can now free the array (or not, if it is needed later)
910  return first;
911  }
912 */
913  void **dlindependent_calloc(size_t, size_t, void **);
914 
915 /*
916  independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
917 
918  independent_comalloc allocates, all at once, a set of n_elements
919  chunks with sizes indicated in the "sizes" array. It returns
920  an array of pointers to these elements, each of which can be
921  independently freed, realloc'ed etc. The elements are guaranteed to
922  be adjacently allocated (this is not guaranteed to occur with
923  multiple callocs or mallocs), which may also improve cache locality
924  in some applications.
925 
926  The "chunks" argument is optional (i.e., may be null). If it is null
927  the returned array is itself dynamically allocated and should also
928  be freed when it is no longer needed. Otherwise, the chunks array
929  must be of at least n_elements in length. It is filled in with the
930  pointers to the chunks.
931 
932  In either case, independent_comalloc returns this pointer array, or
933  null if the allocation failed. If n_elements is zero and chunks is
934  null, it returns a chunk representing an array with zero elements
935  (which should be freed if not wanted).
936 
937  Each element must be individually freed when it is no longer
938  needed. If you'd like to instead be able to free all at once, you
939  should instead use a single regular malloc, and assign pointers at
940  particular offsets in the aggregate space. (In this case though, you
941  cannot independently free elements.)
942 
943  independent_comallac differs from independent_calloc in that each
944  element may have a different size, and also that it does not
945  automatically clear elements.
946 
947  independent_comalloc can be used to speed up allocation in cases
948  where several structs or objects must always be allocated at the
949  same time. For example:
950 
951  struct Head { ... }
952  struct Foot { ... }
953 
954  void send_message(char* msg) {
955  int msglen = strlen(msg);
956  size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
957  void* chunks[3];
958  if (independent_comalloc(3, sizes, chunks) == 0)
959  die();
960  struct Head* head = (struct Head*)(chunks[0]);
961  char* body = (char*)(chunks[1]);
962  struct Foot* foot = (struct Foot*)(chunks[2]);
963  // ...
964  }
965 
966  In general though, independent_comalloc is worth using only for
967  larger values of n_elements. For small values, you probably won't
968  detect enough difference from series of malloc calls to bother.
969 
970  Overuse of independent_comalloc can increase overall memory usage,
971  since it cannot reuse existing noncontiguous small chunks that
972  might be available for some of the elements.
973 */
974  void **dlindependent_comalloc(size_t, size_t *, void **);
975 
976 
977 /*
978  pvalloc(size_t n);
979  Equivalent to valloc(minimum-page-that-holds(n)), that is,
980  round up n to nearest pagesize.
981  */
982  void *dlpvalloc(size_t);
983 
984 /*
985  malloc_trim(size_t pad);
986 
987  If possible, gives memory back to the system (via negative arguments
988  to sbrk) if there is unused memory at the `high' end of the malloc
989  pool or in unused MMAP segments. You can call this after freeing
990  large blocks of memory to potentially reduce the system-level memory
991  requirements of a program. However, it cannot guarantee to reduce
992  memory. Under some allocation patterns, some large free blocks of
993  memory will be locked between two used chunks, so they cannot be
994  given back to the system.
995 
996  The `pad' argument to malloc_trim represents the amount of free
997  trailing space to leave untrimmed. If this argument is zero, only
998  the minimum amount of memory to maintain internal data structures
999  will be left. Non-zero arguments can be supplied to maintain enough
1000  trailing space to service future expected allocations without having
1001  to re-obtain memory from the system.
1002 
1003  Malloc_trim returns 1 if it actually released any memory, else 0.
1004 */
1005  int dlmalloc_trim(size_t);
1006 
1007 /*
1008  malloc_usable_size(void* p);
1009 
1010  Returns the number of bytes you can actually use in
1011  an allocated chunk, which may be more than you requested (although
1012  often not) due to alignment and minimum size constraints.
1013  You can use this many bytes without worrying about
1014  overwriting other allocated objects. This is not a particularly great
1015  programming practice. malloc_usable_size can be more useful in
1016  debugging and assertions, for example:
1017 
1018  p = malloc(n);
1019  assert(malloc_usable_size(p) >= 256);
1020 */
1021  size_t dlmalloc_usable_size(void *);
1022 
1023 /*
1024  malloc_stats();
1025  Prints on stderr the amount of space obtained from the system (both
1026  via sbrk and mmap), the maximum amount (which may be more than
1027  current if malloc_trim and/or munmap got called), and the current
1028  number of bytes allocated via malloc (or realloc, etc) but not yet
1029  freed. Note that this is the number of bytes allocated, not the
1030  number requested. It will be larger than the number requested
1031  because of alignment and bookkeeping overhead. Because it includes
1032  alignment wastage as being in use, this figure may be greater than
1033  zero even when no user-level chunks are allocated.
1034 
1035  The reported current and maximum system memory can be inaccurate if
1036  a program makes other calls to system memory allocation functions
1037  (normally sbrk) outside of malloc.
1038 
1039  malloc_stats prints only the most commonly interesting statistics.
1040  More information can be obtained by calling mallinfo.
1041 */
1042  void dlmalloc_stats(void);
1043 
1044 #endif /* ONLY_MSPACES */
1045 
1046 #if MSPACES
1047 
1048 /*
1049  mspace is an opaque type representing an independent
1050  region of space that supports mspace_malloc, etc.
1051 */
1052  typedef void *mspace;
1053 
1054 /*
1055  create_mspace creates and returns a new independent space with the
1056  given initial capacity, or, if 0, the default granularity size. It
1057  returns null if there is no system memory available to create the
1058  space. If argument locked is non-zero, the space uses a separate
1059  lock to control access. The capacity of the space will grow
1060  dynamically as needed to service mspace_malloc requests. You can
1061  control the sizes of incremental increases of this space by
1062  compiling with a different DEFAULT_GRANULARITY or dynamically
1063  setting with mallopt(M_GRANULARITY, value).
1064 */
1065  mspace create_mspace(size_t capacity, int locked);
1066 
1067 /*
1068  destroy_mspace destroys the given space, and attempts to return all
1069  of its memory back to the system, returning the total number of
1070  bytes freed. After destruction, the results of access to all memory
1071  used by the space become undefined.
1072 */
1073  size_t destroy_mspace(mspace msp);
1074 
1075 /*
1076  create_mspace_with_base uses the memory supplied as the initial base
1077  of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
1078  space is used for bookkeeping, so the capacity must be at least this
1079  large. (Otherwise 0 is returned.) When this initial space is
1080  exhausted, additional memory will be obtained from the system.
1081  Destroying this space will deallocate all additionally allocated
1082  space (if possible) but not the initial base.
1083 */
1084  mspace create_mspace_with_base(void *base, size_t capacity, int locked);
1085 
1086 /*
1087  mspace_malloc behaves as malloc, but operates within
1088  the given space.
1089 */
1090  void *mspace_malloc(mspace msp, size_t bytes);
1091 
1092 /*
1093  mspace_free behaves as free, but operates within
1094  the given space.
1095 
1096  If compiled with FOOTERS==1, mspace_free is not actually needed.
1097  free may be called instead of mspace_free because freed chunks from
1098  any space are handled by their originating spaces.
1099 */
1100  void mspace_free(mspace msp, void *mem);
1101 
1102 /*
1103  mspace_realloc behaves as realloc, but operates within
1104  the given space.
1105 
1106  If compiled with FOOTERS==1, mspace_realloc is not actually
1107  needed. realloc may be called instead of mspace_realloc because
1108  realloced chunks from any space are handled by their originating
1109  spaces.
1110 */
1111  void *mspace_realloc(mspace msp, void *mem, size_t newsize);
1112 
1113 /*
1114  mspace_calloc behaves as calloc, but operates within
1115  the given space.
1116 */
1117  void *mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
1118 
1119 /*
1120  mspace_memalign behaves as memalign, but operates within
1121  the given space.
1122 */
1123  void *mspace_memalign(mspace msp, size_t alignment, size_t bytes);
1124 
1125 /*
1126  mspace_independent_calloc behaves as independent_calloc, but
1127  operates within the given space.
1128 */
1129  void **mspace_independent_calloc(mspace msp, size_t n_elements,
1130  size_t elem_size, void *chunks[]);
1131 
1132 /*
1133  mspace_independent_comalloc behaves as independent_comalloc, but
1134  operates within the given space.
1135 */
1136  void **mspace_independent_comalloc(mspace msp, size_t n_elements,
1137  size_t sizes[], void *chunks[]);
1138 
1139 /*
1140  mspace_footprint() returns the number of bytes obtained from the
1141  system for this space.
1142 */
1143  size_t mspace_footprint(mspace msp);
1144 
1145 /*
1146  mspace_max_footprint() returns the peak number of bytes obtained from the
1147  system for this space.
1148 */
1149  size_t mspace_max_footprint(mspace msp);
1150 
1151 
1152 #if !NO_MALLINFO
1153 /*
1154  mspace_mallinfo behaves as mallinfo, but reports properties of
1155  the given space.
1156 */
1157  struct mallinfo mspace_mallinfo(mspace msp);
1158 #endif /* NO_MALLINFO */
1159 
1160 /*
1161  mspace_malloc_stats behaves as malloc_stats, but reports
1162  properties of the given space.
1163 */
1164  void mspace_malloc_stats(mspace msp);
1165 
1166 /*
1167  mspace_trim behaves as malloc_trim, but
1168  operates within the given space.
1169 */
1170  int mspace_trim(mspace msp, size_t pad);
1171 
1172 /*
1173  An alias for mallopt.
1174 */
1175  int mspace_mallopt(int, int);
1176 
1177 #endif /* MSPACES */
1178 
1179 #ifdef __cplusplus
1180 }; /* end of extern "C" */
1181 #endif /* __cplusplus */
1182 
1183 /*
1184  ========================================================================
1185  To make a fully customizable malloc.h header file, cut everything
1186  above this line, put into file malloc.h, edit to suit, and #include it
1187  on the next line, as well as in programs that use this malloc.
1188  ========================================================================
1189 */
1190 
1191 /* #include "malloc.h" */
1192 
1193 /*------------------------------ internal #includes ---------------------- */
1194 
1195 #ifdef _MSC_VER
1196 #pragma warning( disable : 4146 ) /* no "unsigned" warnings */
1197 #endif /* _MSC_VER */
1198 
1199 #ifndef LACKS_STDIO_H
1200 #include <stdio.h> /* for printing in malloc_stats */
1201 #endif
1202 
1203 #ifndef LACKS_ERRNO_H
1204 #include <errno.h> /* for MALLOC_FAILURE_ACTION */
1205 #endif /* LACKS_ERRNO_H */
1206 #if FOOTERS
1207 #include <time.h> /* for magic initialization */
1208 #endif /* FOOTERS */
1209 #ifndef LACKS_STDLIB_H
1210 #include <stdlib.h> /* for abort() */
1211 #endif /* LACKS_STDLIB_H */
1212 #ifdef DEBUG
1213 #if ABORT_ON_ASSERT_FAILURE
1214 #define assert(x) if(!(x)) ABORT
1215 #else /* ABORT_ON_ASSERT_FAILURE */
1216 #include <assert.h>
1217 #endif /* ABORT_ON_ASSERT_FAILURE */
1218 #else /* DEBUG */
1219 #define assert(x)
1220 #endif /* DEBUG */
1221 #ifndef LACKS_STRING_H
1222 #include <string.h> /* for memset etc */
1223 #endif /* LACKS_STRING_H */
1224 #if USE_BUILTIN_FFS
1225 #ifndef LACKS_STRINGS_H
1226 #include <strings.h> /* for ffs */
1227 #endif /* LACKS_STRINGS_H */
1228 #endif /* USE_BUILTIN_FFS */
1229 #if HAVE_MMAP
1230 #ifndef LACKS_SYS_MMAN_H
1231 #include <sys/mman.h> /* for mmap */
1232 #endif /* LACKS_SYS_MMAN_H */
1233 #ifndef LACKS_FCNTL_H
1234 #include <fcntl.h>
1235 #endif /* LACKS_FCNTL_H */
1236 #endif /* HAVE_MMAP */
1237 #if HAVE_MORECORE
1238 #ifndef LACKS_UNISTD_H
1239 #include <unistd.h> /* for sbrk */
1240 #else /* LACKS_UNISTD_H */
1241 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
1242 extern void *sbrk(ptrdiff_t);
1243 #endif /* FreeBSD etc */
1244 #endif /* LACKS_UNISTD_H */
1245 #endif /* HAVE_MMAP */
1246 
1247 #ifndef WIN32
1248 #ifndef malloc_getpagesize
1249 # ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
1250 # ifndef _SC_PAGE_SIZE
1251 # define _SC_PAGE_SIZE _SC_PAGESIZE
1252 # endif
1253 # endif
1254 # ifdef _SC_PAGE_SIZE
1255 # define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
1256 # else
1257 # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
1258 extern size_t getpagesize();
1259 # define malloc_getpagesize getpagesize()
1260 # else
1261 # ifdef WIN32 /* use supplied emulation of getpagesize */
1262 # define malloc_getpagesize getpagesize()
1263 # else
1264 # ifndef LACKS_SYS_PARAM_H
1265 # include <sys/param.h>
1266 # endif
1267 # ifdef EXEC_PAGESIZE
1268 # define malloc_getpagesize EXEC_PAGESIZE
1269 # else
1270 # ifdef NBPG
1271 # ifndef CLSIZE
1272 # define malloc_getpagesize NBPG
1273 # else
1274 # define malloc_getpagesize (NBPG * CLSIZE)
1275 # endif
1276 # else
1277 # ifdef NBPC
1278 # define malloc_getpagesize NBPC
1279 # else
1280 # ifdef PAGESIZE
1281 # define malloc_getpagesize PAGESIZE
1282 # else /* just guess */
1283 # define malloc_getpagesize ((size_t)4096U)
1284 # endif
1285 # endif
1286 # endif
1287 # endif
1288 # endif
1289 # endif
1290 # endif
1291 #endif
1292 #endif
1293 
1294 /* ------------------- size_t and alignment properties -------------------- */
1295 
1296 /* The byte and bit size of a size_t */
1297 #define SIZE_T_SIZE (sizeof(size_t))
1298 #define SIZE_T_BITSIZE (sizeof(size_t) << 3)
1299 
1300 /* Some constants coerced to size_t */
1301 /* Annoying but necessary to avoid errors on some plaftorms */
1302 #define SIZE_T_ZERO ((size_t)0)
1303 #define SIZE_T_ONE ((size_t)1)
1304 #define SIZE_T_TWO ((size_t)2)
1305 #define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
1306 #define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
1307 #define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
1308 #define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
1309 
1310 /* The bit mask value corresponding to MALLOC_ALIGNMENT */
1311 #define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
1312 
1313 /* True if address a has acceptable alignment */
1314 #define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
1315 
1316 /* the number of bytes to offset an address to align it */
1317 #define align_offset(A)\
1318  ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
1319  ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
1320 
1321 /* -------------------------- MMAP preliminaries ------------------------- */
1322 
1323 /*
1324  If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
1325  checks to fail so compiler optimizer can delete code rather than
1326  using so many "#if"s.
1327 */
1328 
1329 
1330 /* MORECORE and MMAP must return MFAIL on failure */
1331 #define MFAIL ((void*)(MAX_SIZE_T))
1332 #define CMFAIL ((char*)(MFAIL)) /* defined for convenience */
1333 
1334 #if !HAVE_MMAP
1335 #define IS_MMAPPED_BIT (SIZE_T_ZERO)
1336 #define USE_MMAP_BIT (SIZE_T_ZERO)
1337 #define CALL_MMAP(s) MFAIL
1338 #define CALL_MUNMAP(a, s) (-1)
1339 #define DIRECT_MMAP(s) MFAIL
1340 
1341 #else /* HAVE_MMAP */
1342 #define IS_MMAPPED_BIT (SIZE_T_ONE)
1343 #define USE_MMAP_BIT (SIZE_T_ONE)
1344 
1345 #ifndef WIN32
1346 #define CALL_MUNMAP(a, s) munmap((a), (s))
1347 #define MMAP_PROT (PROT_READ|PROT_WRITE)
1348 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1349 #define MAP_ANONYMOUS MAP_ANON
1350 #endif /* MAP_ANON */
1351 #ifdef MAP_ANONYMOUS
1352 #define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
1353 #define CALL_MMAP(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
1354 #else /* MAP_ANONYMOUS */
1355 /*
1356  Nearly all versions of mmap support MAP_ANONYMOUS, so the following
1357  is unlikely to be needed, but is supplied just in case.
1358 */
1359 #define MMAP_FLAGS (MAP_PRIVATE)
1360 static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
1361 #define CALL_MMAP(s) ((dev_zero_fd < 0) ? \
1362  (dev_zero_fd = open("/dev/zero", O_RDWR), \
1363  mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
1364  mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
1365 #endif /* MAP_ANONYMOUS */
1366 
1367 #define DIRECT_MMAP(s) CALL_MMAP(s)
1368 #else /* WIN32 */
1369 
1370 /* Win32 MMAP via VirtualAlloc */
1371 static void *
1373 {
1374  void *ptr =
1375  VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
1376  return (ptr != 0) ? ptr : MFAIL;
1377 }
1378 
1379 /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
1380 static void *
1382 {
1383  void *ptr = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN,
1384  PAGE_READWRITE);
1385  return (ptr != 0) ? ptr : MFAIL;
1386 }
1387 
1388 /* This function supports releasing coalesed segments */
1389 static int
1390 win32munmap(void *ptr, size_t size)
1391 {
1392  MEMORY_BASIC_INFORMATION minfo;
1393  char *cptr = ptr;
1394  while (size) {
1395  if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
1396  return -1;
1397  if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
1398  minfo.State != MEM_COMMIT || minfo.RegionSize > size)
1399  return -1;
1400  if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
1401  return -1;
1402  cptr += minfo.RegionSize;
1403  size -= minfo.RegionSize;
1404  }
1405  return 0;
1406 }
1407 
1408 #define CALL_MMAP(s) win32mmap(s)
1409 #define CALL_MUNMAP(a, s) win32munmap((a), (s))
1410 #define DIRECT_MMAP(s) win32direct_mmap(s)
1411 #endif /* WIN32 */
1412 #endif /* HAVE_MMAP */
1413 
1414 #if HAVE_MMAP && HAVE_MREMAP
1415 #define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
1416 #else /* HAVE_MMAP && HAVE_MREMAP */
1417 #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
1418 #endif /* HAVE_MMAP && HAVE_MREMAP */
1419 
1420 #if HAVE_MORECORE
1421 #define CALL_MORECORE(S) MORECORE(S)
1422 #else /* HAVE_MORECORE */
1423 #define CALL_MORECORE(S) MFAIL
1424 #endif /* HAVE_MORECORE */
1425 
1426 /* mstate bit set if continguous morecore disabled or failed */
1427 #define USE_NONCONTIGUOUS_BIT (4U)
1428 
1429 /* segment bit set in create_mspace_with_base */
1430 #define EXTERN_BIT (8U)
1431 
1432 
1433 /* --------------------------- Lock preliminaries ------------------------ */
1434 
1435 #if USE_LOCKS
1436 
1437 /*
1438  When locks are defined, there are up to two global locks:
1439 
1440  * If HAVE_MORECORE, morecore_mutex protects sequences of calls to
1441  MORECORE. In many cases sys_alloc requires two calls, that should
1442  not be interleaved with calls by other threads. This does not
1443  protect against direct calls to MORECORE by other threads not
1444  using this lock, so there is still code to cope the best we can on
1445  interference.
1446 
1447  * magic_init_mutex ensures that mparams.magic and other
1448  unique mparams values are initialized only once.
1449 */
1450 
1451 #ifndef WIN32
1452 /* By default use posix locks */
1453 #include <pthread.h>
1454 #define MLOCK_T pthread_mutex_t
1455 #define INITIAL_LOCK(l) pthread_mutex_init(l, NULL)
1456 #define ACQUIRE_LOCK(l) pthread_mutex_lock(l)
1457 #define RELEASE_LOCK(l) pthread_mutex_unlock(l)
1458 
1459 #if HAVE_MORECORE
1460 static MLOCK_T morecore_mutex = PTHREAD_MUTEX_INITIALIZER;
1461 #endif /* HAVE_MORECORE */
1462 
1463 static MLOCK_T magic_init_mutex = PTHREAD_MUTEX_INITIALIZER;
1464 
1465 #else /* WIN32 */
1466 /*
1467  Because lock-protected regions have bounded times, and there
1468  are no recursive lock calls, we can use simple spinlocks.
1469 */
1470 
1471 #define MLOCK_T long
1472 static int
1474 {
1475  for (;;) {
1476 #ifdef InterlockedCompareExchangePointer
1477  if (!InterlockedCompareExchange(sl, 1, 0))
1478  return 0;
1479 #else /* Use older void* version */
1480  if (!InterlockedCompareExchange((void **) sl, (void *) 1, (void *) 0))
1481  return 0;
1482 #endif /* InterlockedCompareExchangePointer */
1483  Sleep(0);
1484  }
1485 }
1486 
1487 static void
1489 {
1490  InterlockedExchange(sl, 0);
1491 }
1492 
1493 #define INITIAL_LOCK(l) *(l)=0
1494 #define ACQUIRE_LOCK(l) win32_acquire_lock(l)
1495 #define RELEASE_LOCK(l) win32_release_lock(l)
1496 #if HAVE_MORECORE
1497 static MLOCK_T morecore_mutex;
1498 #endif /* HAVE_MORECORE */
1500 #endif /* WIN32 */
1501 
1502 #define USE_LOCK_BIT (2U)
1503 #else /* USE_LOCKS */
1504 #define USE_LOCK_BIT (0U)
1505 #define INITIAL_LOCK(l)
1506 #endif /* USE_LOCKS */
1507 
1508 #if USE_LOCKS && HAVE_MORECORE
1509 #define ACQUIRE_MORECORE_LOCK() ACQUIRE_LOCK(&morecore_mutex);
1510 #define RELEASE_MORECORE_LOCK() RELEASE_LOCK(&morecore_mutex);
1511 #else /* USE_LOCKS && HAVE_MORECORE */
1512 #define ACQUIRE_MORECORE_LOCK()
1513 #define RELEASE_MORECORE_LOCK()
1514 #endif /* USE_LOCKS && HAVE_MORECORE */
1515 
1516 #if USE_LOCKS
1517 #define ACQUIRE_MAGIC_INIT_LOCK() ACQUIRE_LOCK(&magic_init_mutex);
1518 #define RELEASE_MAGIC_INIT_LOCK() RELEASE_LOCK(&magic_init_mutex);
1519 #else /* USE_LOCKS */
1520 #define ACQUIRE_MAGIC_INIT_LOCK()
1521 #define RELEASE_MAGIC_INIT_LOCK()
1522 #endif /* USE_LOCKS */
1523 
1524 
1525 /* ----------------------- Chunk representations ------------------------ */
1526 
1527 /*
1528  (The following includes lightly edited explanations by Colin Plumb.)
1529 
1530  The malloc_chunk declaration below is misleading (but accurate and
1531  necessary). It declares a "view" into memory allowing access to
1532  necessary fields at known offsets from a given base.
1533 
1534  Chunks of memory are maintained using a `boundary tag' method as
1535  originally described by Knuth. (See the paper by Paul Wilson
1536  ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
1537  techniques.) Sizes of free chunks are stored both in the front of
1538  each chunk and at the end. This makes consolidating fragmented
1539  chunks into bigger chunks fast. The head fields also hold bits
1540  representing whether chunks are free or in use.
1541 
1542  Here are some pictures to make it clearer. They are "exploded" to
1543  show that the state of a chunk can be thought of as extending from
1544  the high 31 bits of the head field of its header through the
1545  prev_foot and PINUSE_BIT bit of the following chunk header.
1546 
1547  A chunk that's in use looks like:
1548 
1549  chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1550  | Size of previous chunk (if P = 1) |
1551  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1552  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
1553  | Size of this chunk 1| +-+
1554  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1555  | |
1556  +- -+
1557  | |
1558  +- -+
1559  | :
1560  +- size - sizeof(size_t) available payload bytes -+
1561  : |
1562  chunk-> +- -+
1563  | |
1564  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1565  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|
1566  | Size of next chunk (may or may not be in use) | +-+
1567  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1568 
1569  And if it's free, it looks like this:
1570 
1571  chunk-> +- -+
1572  | User payload (must be in use, or we would have merged!) |
1573  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1574  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
1575  | Size of this chunk 0| +-+
1576  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1577  | Next pointer |
1578  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1579  | Prev pointer |
1580  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1581  | :
1582  +- size - sizeof(struct chunk) unused bytes -+
1583  : |
1584  chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1585  | Size of this chunk |
1586  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1587  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|
1588  | Size of next chunk (must be in use, or we would have merged)| +-+
1589  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1590  | :
1591  +- User payload -+
1592  : |
1593  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1594  |0|
1595  +-+
1596  Note that since we always merge adjacent free chunks, the chunks
1597  adjacent to a free chunk must be in use.
1598 
1599  Given a pointer to a chunk (which can be derived trivially from the
1600  payload pointer) we can, in O(1) time, find out whether the adjacent
1601  chunks are free, and if so, unlink them from the lists that they
1602  are on and merge them with the current chunk.
1603 
1604  Chunks always begin on even word boundaries, so the mem portion
1605  (which is returned to the user) is also on an even word boundary, and
1606  thus at least double-word aligned.
1607 
1608  The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
1609  chunk size (which is always a multiple of two words), is an in-use
1610  bit for the *previous* chunk. If that bit is *clear*, then the
1611  word before the current chunk size contains the previous chunk
1612  size, and can be used to find the front of the previous chunk.
1613  The very first chunk allocated always has this bit set, preventing
1614  access to non-existent (or non-owned) memory. If pinuse is set for
1615  any given chunk, then you CANNOT determine the size of the
1616  previous chunk, and might even get a memory addressing fault when
1617  trying to do so.
1618 
1619  The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
1620  the chunk size redundantly records whether the current chunk is
1621  inuse. This redundancy enables usage checks within free and realloc,
1622  and reduces indirection when freeing and consolidating chunks.
1623 
1624  Each freshly allocated chunk must have both cinuse and pinuse set.
1625  That is, each allocated chunk borders either a previously allocated
1626  and still in-use chunk, or the base of its memory arena. This is
1627  ensured by making all allocations from the the `lowest' part of any
1628  found chunk. Further, no free chunk physically borders another one,
1629  so each free chunk is known to be preceded and followed by either
1630  inuse chunks or the ends of memory.
1631 
1632  Note that the `foot' of the current chunk is actually represented
1633  as the prev_foot of the NEXT chunk. This makes it easier to
1634  deal with alignments etc but can be very confusing when trying
1635  to extend or adapt this code.
1636 
1637  The exceptions to all this are
1638 
1639  1. The special chunk `top' is the top-most available chunk (i.e.,
1640  the one bordering the end of available memory). It is treated
1641  specially. Top is never included in any bin, is used only if
1642  no other chunk is available, and is released back to the
1643  system if it is very large (see M_TRIM_THRESHOLD). In effect,
1644  the top chunk is treated as larger (and thus less well
1645  fitting) than any other available chunk. The top chunk
1646  doesn't update its trailing size field since there is no next
1647  contiguous chunk that would have to index off it. However,
1648  space is still allocated for it (TOP_FOOT_SIZE) to enable
1649  separation or merging when space is extended.
1650 
1651  3. Chunks allocated via mmap, which have the lowest-order bit
1652  (IS_MMAPPED_BIT) set in their prev_foot fields, and do not set
1653  PINUSE_BIT in their head fields. Because they are allocated
1654  one-by-one, each must carry its own prev_foot field, which is
1655  also used to hold the offset this chunk has within its mmapped
1656  region, which is needed to preserve alignment. Each mmapped
1657  chunk is trailed by the first two fields of a fake next-chunk
1658  for sake of usage checks.
1659 
1660 */
1661 
1663 {
1664  size_t prev_foot; /* Size of previous chunk (if free). */
1665  size_t head; /* Size and inuse bits. */
1666  struct malloc_chunk *fd; /* double links -- used only if free. */
1667  struct malloc_chunk *bk;
1668 };
1669 
1670 typedef struct malloc_chunk mchunk;
1671 typedef struct malloc_chunk *mchunkptr;
1672 typedef struct malloc_chunk *sbinptr; /* The type of bins of chunks */
1673 typedef size_t bindex_t; /* Described below */
1674 typedef unsigned int binmap_t; /* Described below */
1675 typedef unsigned int flag_t; /* The type of various bit flag sets */
1676 
1677 /* ------------------- Chunks sizes and alignments ----------------------- */
1678 
1679 #define MCHUNK_SIZE (sizeof(mchunk))
1680 
1681 #if FOOTERS
1682 #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
1683 #else /* FOOTERS */
1684 #define CHUNK_OVERHEAD (SIZE_T_SIZE)
1685 #endif /* FOOTERS */
1686 
1687 /* MMapped chunks need a second word of overhead ... */
1688 #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
1689 /* ... and additional padding for fake next-chunk at foot */
1690 #define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
1691 
1692 /* The smallest size we can malloc is an aligned minimal chunk */
1693 #define MIN_CHUNK_SIZE\
1694  ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
1695 
1696 /* conversion from malloc headers to user pointers, and back */
1697 #define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))
1698 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
1699 /* chunk associated with aligned address A */
1700 #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
1701 
1702 /* Bounds on request (not chunk) sizes. */
1703 #define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
1704 #define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
1705 
1706 /* pad request bytes into a usable size */
1707 #define pad_request(req) \
1708  (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
1709 
1710 /* pad request, checking for minimum (but not maximum) */
1711 #define request2size(req) \
1712  (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
1713 
1714 
1715 /* ------------------ Operations on head and foot fields ----------------- */
1716 
1717 /*
1718  The head field of a chunk is or'ed with PINUSE_BIT when previous
1719  adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
1720  use. If the chunk was obtained with mmap, the prev_foot field has
1721  IS_MMAPPED_BIT set, otherwise holding the offset of the base of the
1722  mmapped region to the base of the chunk.
1723 */
1724 
1725 #define PINUSE_BIT (SIZE_T_ONE)
1726 #define CINUSE_BIT (SIZE_T_TWO)
1727 #define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
1728 
1729 /* Head value for fenceposts */
1730 #define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
1731 
1732 /* extraction of fields from head words */
1733 #define cinuse(p) ((p)->head & CINUSE_BIT)
1734 #define pinuse(p) ((p)->head & PINUSE_BIT)
1735 #define chunksize(p) ((p)->head & ~(INUSE_BITS))
1736 
1737 #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
1738 #define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT)
1739 
1740 /* Treat space at ptr +/- offset as a chunk */
1741 #define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
1742 #define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
1743 
1744 /* Ptr to next or previous physical malloc_chunk. */
1745 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~INUSE_BITS)))
1746 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
1747 
1748 /* extract next chunk's pinuse bit */
1749 #define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
1750 
1751 /* Get/set size at footer */
1752 #define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)
1753 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
1754 
1755 /* Set size, pinuse bit, and foot */
1756 #define set_size_and_pinuse_of_free_chunk(p, s)\
1757  ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
1758 
1759 /* Set size, pinuse bit, foot, and clear next pinuse */
1760 #define set_free_with_pinuse(p, s, n)\
1761  (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
1762 
1763 #define is_mmapped(p)\
1764  (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT))
1765 
1766 /* Get the internal overhead associated with chunk p */
1767 #define overhead_for(p)\
1768  (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
1769 
1770 /* Return true if malloced space is not necessarily cleared */
1771 #if MMAP_CLEARS
1772 #define calloc_must_clear(p) (!is_mmapped(p))
1773 #else /* MMAP_CLEARS */
1774 #define calloc_must_clear(p) (1)
1775 #endif /* MMAP_CLEARS */
1776 
1777 /* ---------------------- Overlaid data structures ----------------------- */
1778 
1779 /*
1780  When chunks are not in use, they are treated as nodes of either
1781  lists or trees.
1782 
1783  "Small" chunks are stored in circular doubly-linked lists, and look
1784  like this:
1785 
1786  chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1787  | Size of previous chunk |
1788  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1789  `head:' | Size of chunk, in bytes |P|
1790  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1791  | Forward pointer to next chunk in list |
1792  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1793  | Back pointer to previous chunk in list |
1794  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1795  | Unused space (may be 0 bytes long) .
1796  . .
1797  . |
1798 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1799  `foot:' | Size of chunk, in bytes |
1800  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1801 
1802  Larger chunks are kept in a form of bitwise digital trees (aka
1803  tries) keyed on chunksizes. Because malloc_tree_chunks are only for
1804  free chunks greater than 256 bytes, their size doesn't impose any
1805  constraints on user chunk sizes. Each node looks like:
1806 
1807  chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1808  | Size of previous chunk |
1809  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1810  `head:' | Size of chunk, in bytes |P|
1811  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1812  | Forward pointer to next chunk of same size |
1813  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1814  | Back pointer to previous chunk of same size |
1815  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1816  | Pointer to left child (child[0]) |
1817  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1818  | Pointer to right child (child[1]) |
1819  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1820  | Pointer to parent |
1821  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1822  | bin index of this chunk |
1823  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1824  | Unused space .
1825  . |
1826 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1827  `foot:' | Size of chunk, in bytes |
1828  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1829 
1830  Each tree holding treenodes is a tree of unique chunk sizes. Chunks
1831  of the same size are arranged in a circularly-linked list, with only
1832  the oldest chunk (the next to be used, in our FIFO ordering)
1833  actually in the tree. (Tree members are distinguished by a non-null
1834  parent pointer.) If a chunk with the same size an an existing node
1835  is inserted, it is linked off the existing node using pointers that
1836  work in the same way as fd/bk pointers of small chunks.
1837 
1838  Each tree contains a power of 2 sized range of chunk sizes (the
1839  smallest is 0x100 <= x < 0x180), which is is divided in half at each
1840  tree level, with the chunks in the smaller half of the range (0x100
1841  <= x < 0x140 for the top nose) in the left subtree and the larger
1842  half (0x140 <= x < 0x180) in the right subtree. This is, of course,
1843  done by inspecting individual bits.
1844 
1845  Using these rules, each node's left subtree contains all smaller
1846  sizes than its right subtree. However, the node at the root of each
1847  subtree has no particular ordering relationship to either. (The
1848  dividing line between the subtree sizes is based on trie relation.)
1849  If we remove the last chunk of a given size from the interior of the
1850  tree, we need to replace it with a leaf node. The tree ordering
1851  rules permit a node to be replaced by any leaf below it.
1852 
1853  The smallest chunk in a tree (a common operation in a best-fit
1854  allocator) can be found by walking a path to the leftmost leaf in
1855  the tree. Unlike a usual binary tree, where we follow left child
1856  pointers until we reach a null, here we follow the right child
1857  pointer any time the left one is null, until we reach a leaf with
1858  both child pointers null. The smallest chunk in the tree will be
1859  somewhere along that path.
1860 
1861  The worst case number of steps to add, find, or remove a node is
1862  bounded by the number of bits differentiating chunks within
1863  bins. Under current bin calculations, this ranges from 6 up to 21
1864  (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
1865  is of course much better.
1866 */
1867 
1869 {
1870  /* The first four fields must be compatible with malloc_chunk */
1871  size_t prev_foot;
1872  size_t head;
1875 
1879 };
1880 
1881 typedef struct malloc_tree_chunk tchunk;
1882 typedef struct malloc_tree_chunk *tchunkptr;
1883 typedef struct malloc_tree_chunk *tbinptr; /* The type of bins of trees */
1884 
1885 /* A little helper macro for trees */
1886 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
1887 
1888 /* ----------------------------- Segments -------------------------------- */
1889 
1890 /*
1891  Each malloc space may include non-contiguous segments, held in a
1892  list headed by an embedded malloc_segment record representing the
1893  top-most space. Segments also include flags holding properties of
1894  the space. Large chunks that are directly allocated by mmap are not
1895  included in this list. They are instead independently created and
1896  destroyed without otherwise keeping track of them.
1897 
1898  Segment management mainly comes into play for spaces allocated by
1899  MMAP. Any call to MMAP might or might not return memory that is
1900  adjacent to an existing segment. MORECORE normally contiguously
1901  extends the current space, so this space is almost always adjacent,
1902  which is simpler and faster to deal with. (This is why MORECORE is
1903  used preferentially to MMAP when both are available -- see
1904  sys_alloc.) When allocating using MMAP, we don't use any of the
1905  hinting mechanisms (inconsistently) supported in various
1906  implementations of unix mmap, or distinguish reserving from
1907  committing memory. Instead, we just ask for space, and exploit
1908  contiguity when we get it. It is probably possible to do
1909  better than this on some systems, but no general scheme seems
1910  to be significantly better.
1911 
1912  Management entails a simpler variant of the consolidation scheme
1913  used for chunks to reduce fragmentation -- new adjacent memory is
1914  normally prepended or appended to an existing segment. However,
1915  there are limitations compared to chunk consolidation that mostly
1916  reflect the fact that segment processing is relatively infrequent
1917  (occurring only when getting memory from system) and that we
1918  don't expect to have huge numbers of segments:
1919 
1920  * Segments are not indexed, so traversal requires linear scans. (It
1921  would be possible to index these, but is not worth the extra
1922  overhead and complexity for most programs on most platforms.)
1923  * New segments are only appended to old ones when holding top-most
1924  memory; if they cannot be prepended to others, they are held in
1925  different segments.
1926 
1927  Except for the top-most segment of an mstate, each segment record
1928  is kept at the tail of its segment. Segments are added by pushing
1929  segment records onto the list headed by &mstate.seg for the
1930  containing mstate.
1931 
1932  Segment flags control allocation/merge/deallocation policies:
1933  * If EXTERN_BIT set, then we did not allocate this segment,
1934  and so should not try to deallocate or merge with others.
1935  (This currently holds only for the initial segment passed
1936  into create_mspace_with_base.)
1937  * If IS_MMAPPED_BIT set, the segment may be merged with
1938  other surrounding mmapped segments and trimmed/de-allocated
1939  using munmap.
1940  * If neither bit is set, then the segment was obtained using
1941  MORECORE so can be merged with surrounding MORECORE'd segments
1942  and deallocated/trimmed using MORECORE with negative arguments.
1943 */
1944 
1946 {
1947  char *base; /* base address */
1948  size_t size; /* allocated size */
1949  struct malloc_segment *next; /* ptr to next segment */
1950  flag_t sflags; /* mmap and extern flag */
1951 };
1952 
1953 #define is_mmapped_segment(S) ((S)->sflags & IS_MMAPPED_BIT)
1954 #define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
1955 
1956 typedef struct malloc_segment msegment;
1957 typedef struct malloc_segment *msegmentptr;
1958 
1959 /* ---------------------------- malloc_state ----------------------------- */
1960 
1961 /*
1962  A malloc_state holds all of the bookkeeping for a space.
1963  The main fields are:
1964 
1965  Top
1966  The topmost chunk of the currently active segment. Its size is
1967  cached in topsize. The actual size of topmost space is
1968  topsize+TOP_FOOT_SIZE, which includes space reserved for adding
1969  fenceposts and segment records if necessary when getting more
1970  space from the system. The size at which to autotrim top is
1971  cached from mparams in trim_check, except that it is disabled if
1972  an autotrim fails.
1973 
1974  Designated victim (dv)
1975  This is the preferred chunk for servicing small requests that
1976  don't have exact fits. It is normally the chunk split off most
1977  recently to service another small request. Its size is cached in
1978  dvsize. The link fields of this chunk are not maintained since it
1979  is not kept in a bin.
1980 
1981  SmallBins
1982  An array of bin headers for free chunks. These bins hold chunks
1983  with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
1984  chunks of all the same size, spaced 8 bytes apart. To simplify
1985  use in double-linked lists, each bin header acts as a malloc_chunk
1986  pointing to the real first node, if it exists (else pointing to
1987  itself). This avoids special-casing for headers. But to avoid
1988  waste, we allocate only the fd/bk pointers of bins, and then use
1989  repositioning tricks to treat these as the fields of a chunk.
1990 
1991  TreeBins
1992  Treebins are pointers to the roots of trees holding a range of
1993  sizes. There are 2 equally spaced treebins for each power of two
1994  from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
1995  larger.
1996 
1997  Bin maps
1998  There is one bit map for small bins ("smallmap") and one for
1999  treebins ("treemap). Each bin sets its bit when non-empty, and
2000  clears the bit when empty. Bit operations are then used to avoid
2001  bin-by-bin searching -- nearly all "search" is done without ever
2002  looking at bins that won't be selected. The bit maps
2003  conservatively use 32 bits per map word, even if on 64bit system.
2004  For a good description of some of the bit-based techniques used
2005  here, see Henry S. Warren Jr's book "Hacker's Delight" (and
2006  supplement at http://hackersdelight.org/). Many of these are
2007  intended to reduce the branchiness of paths through malloc etc, as
2008  well as to reduce the number of memory locations read or written.
2009 
2010  Segments
2011  A list of segments headed by an embedded malloc_segment record
2012  representing the initial space.
2013 
2014  Address check support
2015  The least_addr field is the least address ever obtained from
2016  MORECORE or MMAP. Attempted frees and reallocs of any address less
2017  than this are trapped (unless INSECURE is defined).
2018 
2019  Magic tag
2020  A cross-check field that should always hold same value as mparams.magic.
2021 
2022  Flags
2023  Bits recording whether to use MMAP, locks, or contiguous MORECORE
2024 
2025  Statistics
2026  Each space keeps track of current and maximum system memory
2027  obtained via MORECORE or MMAP.
2028 
2029  Locking
2030  If USE_LOCKS is defined, the "mutex" lock is acquired and released
2031  around every public call using this mspace.
2032 */
2033 
2034 /* Bin types, widths and sizes */
2035 #define NSMALLBINS (32U)
2036 #define NTREEBINS (32U)
2037 #define SMALLBIN_SHIFT (3U)
2038 #define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
2039 #define TREEBIN_SHIFT (8U)
2040 #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
2041 #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
2042 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
2043 
2045 {
2048  size_t dvsize;
2049  size_t topsize;
2050  char *least_addr;
2051  mchunkptr dv;
2052  mchunkptr top;
2053  size_t trim_check;
2054  size_t magic;
2055  mchunkptr smallbins[(NSMALLBINS + 1) * 2];
2056  tbinptr treebins[NTREEBINS];
2057  size_t footprint;
2060 #if USE_LOCKS
2061  MLOCK_T mutex; /* locate lock among fields that rarely change */
2062 #endif /* USE_LOCKS */
2063  msegment seg;
2064 };
2065 
2066 typedef struct malloc_state *mstate;
2067 
2068 /* ------------- Global malloc_state and malloc_params ------------------- */
2069 
2070 /*
2071  malloc_params holds global properties, including those that can be
2072  dynamically set using mallopt. There is a single instance, mparams,
2073  initialized in init_mparams.
2074 */
2075 
2077 {
2078  size_t magic;
2079  size_t page_size;
2080  size_t granularity;
2084 };
2085 
2086 static struct malloc_params mparams;
2087 
2088 /* The global malloc_state used for all non-"mspace" calls */
2089 static struct malloc_state _gm_;
2090 #define gm (&_gm_)
2091 #define is_global(M) ((M) == &_gm_)
2092 #define is_initialized(M) ((M)->top != 0)
2093 
2094 /* -------------------------- system alloc setup ------------------------- */
2095 
2096 /* Operations on mflags */
2097 
2098 #define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
2099 #define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
2100 #define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
2101 
2102 #define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)
2103 #define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)
2104 #define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)
2105 
2106 #define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)
2107 #define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)
2108 
2109 #define set_lock(M,L)\
2110  ((M)->mflags = (L)?\
2111  ((M)->mflags | USE_LOCK_BIT) :\
2112  ((M)->mflags & ~USE_LOCK_BIT))
2113 
2114 /* page-align a size */
2115 #define page_align(S)\
2116  (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE))
2117 
2118 /* granularity-align a size */
2119 #define granularity_align(S)\
2120  (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE))
2121 
2122 #define is_page_aligned(S)\
2123  (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
2124 #define is_granularity_aligned(S)\
2125  (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
2126 
2127 /* True if segment S holds address A */
2128 #define segment_holds(S, A)\
2129  ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
2130 
2131 /* Return segment holding given address */
2132 static msegmentptr
2133 segment_holding(mstate m, char *addr)
2134 {
2135  msegmentptr sp = &m->seg;
2136  for (;;) {
2137  if (addr >= sp->base && addr < sp->base + sp->size)
2138  return sp;
2139  if ((sp = sp->next) == 0)
2140  return 0;
2141  }
2142 }
2143 
2144 /* Return true if segment contains a segment link */
2145 static int
2146 has_segment_link(mstate m, msegmentptr ss)
2147 {
2148  msegmentptr sp = &m->seg;
2149  for (;;) {
2150  if ((char *) sp >= ss->base && (char *) sp < ss->base + ss->size)
2151  return 1;
2152  if ((sp = sp->next) == 0)
2153  return 0;
2154  }
2155 }
2156 
2157 #ifndef MORECORE_CANNOT_TRIM
2158 #define should_trim(M,s) ((s) > (M)->trim_check)
2159 #else /* MORECORE_CANNOT_TRIM */
2160 #define should_trim(M,s) (0)
2161 #endif /* MORECORE_CANNOT_TRIM */
2162 
2163 /*
2164  TOP_FOOT_SIZE is padding at the end of a segment, including space
2165  that may be needed to place segment records and fenceposts when new
2166  noncontiguous segments are added.
2167 */
2168 #define TOP_FOOT_SIZE\
2169  (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
2170 
2171 
2172 /* ------------------------------- Hooks -------------------------------- */
2173 
2174 /*
2175  PREACTION should be defined to return 0 on success, and nonzero on
2176  failure. If you are not using locking, you can redefine these to do
2177  anything you like.
2178 */
2179 
2180 #if USE_LOCKS
2181 
2182 /* Ensure locks are initialized */
2183 #define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams())
2184 
2185 #define PREACTION(M) ((GLOBALLY_INITIALIZE() || use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
2186 #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
2187 #else /* USE_LOCKS */
2188 
2189 #ifndef PREACTION
2190 #define PREACTION(M) (0)
2191 #endif /* PREACTION */
2192 
2193 #ifndef POSTACTION
2194 #define POSTACTION(M)
2195 #endif /* POSTACTION */
2196 
2197 #endif /* USE_LOCKS */
2198 
2199 /*
2200  CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
2201  USAGE_ERROR_ACTION is triggered on detected bad frees and
2202  reallocs. The argument p is an address that might have triggered the
2203  fault. It is ignored by the two predefined actions, but might be
2204  useful in custom actions that try to help diagnose errors.
2205 */
2206 
2207 #if PROCEED_ON_ERROR
2208 
2209 /* A count of the number of corruption errors causing resets */
2210 int malloc_corruption_error_count;
2211 
2212 /* default corruption action */
2213 static void reset_on_error(mstate m);
2214 
2215 #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
2216 #define USAGE_ERROR_ACTION(m, p)
2217 
2218 #else /* PROCEED_ON_ERROR */
2219 
2220 #ifndef CORRUPTION_ERROR_ACTION
2221 #define CORRUPTION_ERROR_ACTION(m) ABORT
2222 #endif /* CORRUPTION_ERROR_ACTION */
2223 
2224 #ifndef USAGE_ERROR_ACTION
2225 #define USAGE_ERROR_ACTION(m,p) ABORT
2226 #endif /* USAGE_ERROR_ACTION */
2227 
2228 #endif /* PROCEED_ON_ERROR */
2229 
2230 /* -------------------------- Debugging setup ---------------------------- */
2231 
2232 #if ! DEBUG
2233 
2234 #define check_free_chunk(M,P)
2235 #define check_inuse_chunk(M,P)
2236 #define check_malloced_chunk(M,P,N)
2237 #define check_mmapped_chunk(M,P)
2238 #define check_malloc_state(M)
2239 #define check_top_chunk(M,P)
2240 
2241 #else /* DEBUG */
2242 #define check_free_chunk(M,P) do_check_free_chunk(M,P)
2243 #define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)
2244 #define check_top_chunk(M,P) do_check_top_chunk(M,P)
2245 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
2246 #define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P)
2247 #define check_malloc_state(M) do_check_malloc_state(M)
2248 
2249 static void do_check_any_chunk(mstate m, mchunkptr p);
2250 static void do_check_top_chunk(mstate m, mchunkptr p);
2251 static void do_check_mmapped_chunk(mstate m, mchunkptr p);
2252 static void do_check_inuse_chunk(mstate m, mchunkptr p);
2253 static void do_check_free_chunk(mstate m, mchunkptr p);
2254 static void do_check_malloced_chunk(mstate m, void *mem, size_t s);
2255 static void do_check_tree(mstate m, tchunkptr t);
2256 static void do_check_treebin(mstate m, bindex_t i);
2257 static void do_check_smallbin(mstate m, bindex_t i);
2258 static void do_check_malloc_state(mstate m);
2259 static int bin_find(mstate m, mchunkptr x);
2260 static size_t traverse_and_check(mstate m);
2261 #endif /* DEBUG */
2262 
2263 /* ---------------------------- Indexing Bins ---------------------------- */
2264 
2265 #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
2266 #define small_index(s) ((s) >> SMALLBIN_SHIFT)
2267 #define small_index2size(i) ((i) << SMALLBIN_SHIFT)
2268 #define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
2269 
2270 /* addressing by index. See above about smallbin repositioning */
2271 #define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
2272 #define treebin_at(M,i) (&((M)->treebins[i]))
2273 
2274 /* assign tree index for size S to variable I */
2275 #if defined(__GNUC__) && defined(i386)
2276 #define compute_tree_index(S, I)\
2277 {\
2278  size_t X = S >> TREEBIN_SHIFT;\
2279  if (X == 0)\
2280  I = 0;\
2281  else if (X > 0xFFFF)\
2282  I = NTREEBINS-1;\
2283  else {\
2284  unsigned int K;\
2285  __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm" (X));\
2286  I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
2287  }\
2288 }
2289 #else /* GNUC */
2290 #define compute_tree_index(S, I)\
2291 {\
2292  size_t X = S >> TREEBIN_SHIFT;\
2293  if (X == 0)\
2294  I = 0;\
2295  else if (X > 0xFFFF)\
2296  I = NTREEBINS-1;\
2297  else {\
2298  unsigned int Y = (unsigned int)X;\
2299  unsigned int N = ((Y - 0x100) >> 16) & 8;\
2300  unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
2301  N += K;\
2302  N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
2303  K = 14 - N + ((Y <<= K) >> 15);\
2304  I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
2305  }\
2306 }
2307 #endif /* GNUC */
2308 
2309 /* Bit representing maximum resolved size in a treebin at i */
2310 #define bit_for_tree_index(i) \
2311  (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
2312 
2313 /* Shift placing maximum resolved bit in a treebin at i as sign bit */
2314 #define leftshift_for_tree_index(i) \
2315  ((i == NTREEBINS-1)? 0 : \
2316  ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
2317 
2318 /* The size of the smallest chunk held in bin with index i */
2319 #define minsize_for_tree_index(i) \
2320  ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
2321  (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
2322 
2323 
2324 /* ------------------------ Operations on bin maps ----------------------- */
2325 
2326 /* bit corresponding to given index */
2327 #define idx2bit(i) ((binmap_t)(1) << (i))
2328 
2329 /* Mark/Clear bits with given index */
2330 #define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
2331 #define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
2332 #define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
2333 
2334 #define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
2335 #define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
2336 #define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
2337 
2338 /* index corresponding to given bit */
2339 
2340 #if defined(__GNUC__) && defined(i386)
2341 #define compute_bit2idx(X, I)\
2342 {\
2343  unsigned int J;\
2344  __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\
2345  I = (bindex_t)J;\
2346 }
2347 
2348 #else /* GNUC */
2349 #if USE_BUILTIN_FFS
2350 #define compute_bit2idx(X, I) I = ffs(X)-1
2351 
2352 #else /* USE_BUILTIN_FFS */
2353 #define compute_bit2idx(X, I)\
2354 {\
2355  unsigned int Y = X - 1;\
2356  unsigned int K = Y >> (16-4) & 16;\
2357  unsigned int N = K; Y >>= K;\
2358  N += K = Y >> (8-3) & 8; Y >>= K;\
2359  N += K = Y >> (4-2) & 4; Y >>= K;\
2360  N += K = Y >> (2-1) & 2; Y >>= K;\
2361  N += K = Y >> (1-0) & 1; Y >>= K;\
2362  I = (bindex_t)(N + Y);\
2363 }
2364 #endif /* USE_BUILTIN_FFS */
2365 #endif /* GNUC */
2366 
2367 /* isolate the least set bit of a bitmap */
2368 #define least_bit(x) ((x) & -(x))
2369 
2370 /* mask with all bits to left of least bit of x on */
2371 #define left_bits(x) ((x<<1) | -(x<<1))
2372 
2373 /* mask with all bits to left of or equal to least bit of x on */
2374 #define same_or_left_bits(x) ((x) | -(x))
2375 
2376 
2377 /* ----------------------- Runtime Check Support ------------------------- */
2378 
2379 /*
2380  For security, the main invariant is that malloc/free/etc never
2381  writes to a static address other than malloc_state, unless static
2382  malloc_state itself has been corrupted, which cannot occur via
2383  malloc (because of these checks). In essence this means that we
2384  believe all pointers, sizes, maps etc held in malloc_state, but
2385  check all of those linked or offsetted from other embedded data
2386  structures. These checks are interspersed with main code in a way
2387  that tends to minimize their run-time cost.
2388 
2389  When FOOTERS is defined, in addition to range checking, we also
2390  verify footer fields of inuse chunks, which can be used guarantee
2391  that the mstate controlling malloc/free is intact. This is a
2392  streamlined version of the approach described by William Robertson
2393  et al in "Run-time Detection of Heap-based Overflows" LISA'03
2394  http://www.usenix.org/events/lisa03/tech/robertson.html The footer
2395  of an inuse chunk holds the xor of its mstate and a random seed,
2396  that is checked upon calls to free() and realloc(). This is
2397  (probablistically) unguessable from outside the program, but can be
2398  computed by any code successfully malloc'ing any chunk, so does not
2399  itself provide protection against code that has already broken
2400  security through some other means. Unlike Robertson et al, we
2401  always dynamically check addresses of all offset chunks (previous,
2402  next, etc). This turns out to be cheaper than relying on hashes.
2403 */
2404 
2405 #if !INSECURE
2406 /* Check if address a is at least as high as any from MORECORE or MMAP */
2407 #define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
2408 /* Check if address of next chunk n is higher than base chunk p */
2409 #define ok_next(p, n) ((char*)(p) < (char*)(n))
2410 /* Check if p has its cinuse bit on */
2411 #define ok_cinuse(p) cinuse(p)
2412 /* Check if p has its pinuse bit on */
2413 #define ok_pinuse(p) pinuse(p)
2414 
2415 #else /* !INSECURE */
2416 #define ok_address(M, a) (1)
2417 #define ok_next(b, n) (1)
2418 #define ok_cinuse(p) (1)
2419 #define ok_pinuse(p) (1)
2420 #endif /* !INSECURE */
2421 
2422 #if (FOOTERS && !INSECURE)
2423 /* Check if (alleged) mstate m has expected magic field */
2424 #define ok_magic(M) ((M)->magic == mparams.magic)
2425 #else /* (FOOTERS && !INSECURE) */
2426 #define ok_magic(M) (1)
2427 #endif /* (FOOTERS && !INSECURE) */
2428 
2429 
2430 /* In gcc, use __builtin_expect to minimize impact of checks */
2431 #if !INSECURE
2432 #if defined(__GNUC__) && __GNUC__ >= 3
2433 #define RTCHECK(e) __builtin_expect(e, 1)
2434 #else /* GNUC */
2435 #define RTCHECK(e) (e)
2436 #endif /* GNUC */
2437 #else /* !INSECURE */
2438 #define RTCHECK(e) (1)
2439 #endif /* !INSECURE */
2440 
2441 /* macros to set up inuse chunks with or without footers */
2442 
2443 #if !FOOTERS
2444 
2445 #define mark_inuse_foot(M,p,s)
2446 
2447 /* Set cinuse bit and pinuse bit of next chunk */
2448 #define set_inuse(M,p,s)\
2449  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
2450  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
2451 
2452 /* Set cinuse and pinuse of this chunk and pinuse of next chunk */
2453 #define set_inuse_and_pinuse(M,p,s)\
2454  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
2455  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
2456 
2457 /* Set size, cinuse and pinuse bit of this chunk */
2458 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
2459  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
2460 
2461 #else /* FOOTERS */
2462 
2463 /* Set foot of inuse chunk to be xor of mstate and seed */
2464 #define mark_inuse_foot(M,p,s)\
2465  (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
2466 
2467 #define get_mstate_for(p)\
2468  ((mstate)(((mchunkptr)((char*)(p) +\
2469  (chunksize(p))))->prev_foot ^ mparams.magic))
2470 
2471 #define set_inuse(M,p,s)\
2472  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
2473  (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
2474  mark_inuse_foot(M,p,s))
2475 
2476 #define set_inuse_and_pinuse(M,p,s)\
2477  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
2478  (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
2479  mark_inuse_foot(M,p,s))
2480 
2481 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
2482  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
2483  mark_inuse_foot(M, p, s))
2484 
2485 #endif /* !FOOTERS */
2486 
2487 /* ---------------------------- setting mparams -------------------------- */
2488 
2489 /* Initialize mparams */
2490 static int
2492 {
2493  if (mparams.page_size == 0) {
2494  size_t s;
2495 
2498 #if MORECORE_CONTIGUOUS
2500 #else /* MORECORE_CONTIGUOUS */
2503 #endif /* MORECORE_CONTIGUOUS */
2504 
2505 #if (FOOTERS && !INSECURE)
2506  {
2507 #if USE_DEV_RANDOM
2508  int fd;
2509  unsigned char buf[sizeof(size_t)];
2510  /* Try to use /dev/urandom, else fall back on using time */
2511  if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
2512  read(fd, buf, sizeof(buf)) == sizeof(buf)) {
2513  s = *((size_t *) buf);
2514  close(fd);
2515  } else
2516 #endif /* USE_DEV_RANDOM */
2517  s = (size_t) (time(0) ^ (size_t) 0x55555555U);
2518 
2519  s |= (size_t) 8U; /* ensure nonzero */
2520  s &= ~(size_t) 7U; /* improve chances of fault for bad values */
2521 
2522  }
2523 #else /* (FOOTERS && !INSECURE) */
2524  s = (size_t) 0x58585858U;
2525 #endif /* (FOOTERS && !INSECURE) */
2527  if (mparams.magic == 0) {
2528  mparams.magic = s;
2529  /* Set up lock for main malloc area */
2530  INITIAL_LOCK(&gm->mutex);
2531  gm->mflags = mparams.default_mflags;
2532  }
2534 
2535 #ifndef WIN32
2536  mparams.page_size = malloc_getpagesize;
2539 #else /* WIN32 */
2540  {
2541  SYSTEM_INFO system_info;
2542  GetSystemInfo(&system_info);
2543  mparams.page_size = system_info.dwPageSize;
2544  mparams.granularity = system_info.dwAllocationGranularity;
2545  }
2546 #endif /* WIN32 */
2547 
2548  /* Sanity-check configuration:
2549  size_t must be unsigned and as wide as pointer type.
2550  ints must be at least 4 bytes.
2551  alignment must be at least 8.
2552  Alignment, min chunk size, and page size must all be powers of 2.
2553  */
2554  if ((sizeof(size_t) != sizeof(char *)) ||
2555  (MAX_SIZE_T < MIN_CHUNK_SIZE) ||
2556  (sizeof(int) < 4) ||
2557  (MALLOC_ALIGNMENT < (size_t) 8U) ||
2558  ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - SIZE_T_ONE)) != 0) ||
2559  ((MCHUNK_SIZE & (MCHUNK_SIZE - SIZE_T_ONE)) != 0) ||
2561  || ((mparams.page_size & (mparams.page_size - SIZE_T_ONE)) != 0))
2562  ABORT;
2563  }
2564  return 0;
2565 }
2566 
2567 /* support for mallopt */
2568 static int
2569 change_mparam(int param_number, int value)
2570 {
2571  size_t val = (size_t) value;
2572  init_mparams();
2573  switch (param_number) {
2574  case M_TRIM_THRESHOLD:
2576  return 1;
2577  case M_GRANULARITY:
2578  if (val >= mparams.page_size && ((val & (val - 1)) == 0)) {
2580  return 1;
2581  } else
2582  return 0;
2583  case M_MMAP_THRESHOLD:
2585  return 1;
2586  default:
2587  return 0;
2588  }
2589 }
2590 
2591 #if DEBUG
2592 /* ------------------------- Debugging Support --------------------------- */
2593 
2594 /* Check properties of any chunk, whether free, inuse, mmapped etc */
2595 static void
2596 do_check_any_chunk(mstate m, mchunkptr p)
2597 {
2598  assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
2599  assert(ok_address(m, p));
2600 }
2601 
2602 /* Check properties of top chunk */
2603 static void
2604 do_check_top_chunk(mstate m, mchunkptr p)
2605 {
2606  msegmentptr sp = segment_holding(m, (char *) p);
2607  size_t sz = chunksize(p);
2608  assert(sp != 0);
2609  assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
2610  assert(ok_address(m, p));
2611  assert(sz == m->topsize);
2612  assert(sz > 0);
2613  assert(sz == ((sp->base + sp->size) - (char *) p) - TOP_FOOT_SIZE);
2614  assert(pinuse(p));
2615  assert(!next_pinuse(p));
2616 }
2617 
2618 /* Check properties of (inuse) mmapped chunks */
2619 static void
2620 do_check_mmapped_chunk(mstate m, mchunkptr p)
2621 {
2622  size_t sz = chunksize(p);
2623  size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD);
2624  assert(is_mmapped(p));
2625  assert(use_mmap(m));
2626  assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
2627  assert(ok_address(m, p));
2628  assert(!is_small(sz));
2629  assert((len & (mparams.page_size - SIZE_T_ONE)) == 0);
2631  assert(chunk_plus_offset(p, sz + SIZE_T_SIZE)->head == 0);
2632 }
2633 
2634 /* Check properties of inuse chunks */
2635 static void
2636 do_check_inuse_chunk(mstate m, mchunkptr p)
2637 {
2638  do_check_any_chunk(m, p);
2639  assert(cinuse(p));
2640  assert(next_pinuse(p));
2641  /* If not pinuse and not mmapped, previous chunk has OK offset */
2642  assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
2643  if (is_mmapped(p))
2644  do_check_mmapped_chunk(m, p);
2645 }
2646 
2647 /* Check properties of free chunks */
2648 static void
2649 do_check_free_chunk(mstate m, mchunkptr p)
2650 {
2651  size_t sz = p->head & ~(PINUSE_BIT | CINUSE_BIT);
2652  mchunkptr next = chunk_plus_offset(p, sz);
2653  do_check_any_chunk(m, p);
2654  assert(!cinuse(p));
2655  assert(!next_pinuse(p));
2656  assert(!is_mmapped(p));
2657  if (p != m->dv && p != m->top) {
2658  if (sz >= MIN_CHUNK_SIZE) {
2659  assert((sz & CHUNK_ALIGN_MASK) == 0);
2661  assert(next->prev_foot == sz);
2662  assert(pinuse(p));
2663  assert(next == m->top || cinuse(next));
2664  assert(p->fd->bk == p);
2665  assert(p->bk->fd == p);
2666  } else /* markers are always of size SIZE_T_SIZE */
2667  assert(sz == SIZE_T_SIZE);
2668  }
2669 }
2670 
2671 /* Check properties of malloced chunks at the point they are malloced */
2672 static void
2673 do_check_malloced_chunk(mstate m, void *mem, size_t s)
2674 {
2675  if (mem != 0) {
2676  mchunkptr p = mem2chunk(mem);
2677  size_t sz = p->head & ~(PINUSE_BIT | CINUSE_BIT);
2678  do_check_inuse_chunk(m, p);
2679  assert((sz & CHUNK_ALIGN_MASK) == 0);
2680  assert(sz >= MIN_CHUNK_SIZE);
2681  assert(sz >= s);
2682  /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
2683  assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
2684  }
2685 }
2686 
2687 /* Check a tree and its subtrees. */
2688 static void
2689 do_check_tree(mstate m, tchunkptr t)
2690 {
2691  tchunkptr head = 0;
2692  tchunkptr u = t;
2693  bindex_t tindex = t->index;
2694  size_t tsize = chunksize(t);
2695  bindex_t idx;
2696  compute_tree_index(tsize, idx);
2697  assert(tindex == idx);
2698  assert(tsize >= MIN_LARGE_SIZE);
2699  assert(tsize >= minsize_for_tree_index(idx));
2700  assert((idx == NTREEBINS - 1)
2701  || (tsize < minsize_for_tree_index((idx + 1))));
2702 
2703  do { /* traverse through chain of same-sized nodes */
2704  do_check_any_chunk(m, ((mchunkptr) u));
2705  assert(u->index == tindex);
2706  assert(chunksize(u) == tsize);
2707  assert(!cinuse(u));
2708  assert(!next_pinuse(u));
2709  assert(u->fd->bk == u);
2710  assert(u->bk->fd == u);
2711  if (u->parent == 0) {
2712  assert(u->child[0] == 0);
2713  assert(u->child[1] == 0);
2714  } else {
2715  assert(head == 0); /* only one node on chain has parent */
2716  head = u;
2717  assert(u->parent != u);
2718  assert(u->parent->child[0] == u ||
2719  u->parent->child[1] == u ||
2720  *((tbinptr *) (u->parent)) == u);
2721  if (u->child[0] != 0) {
2722  assert(u->child[0]->parent == u);
2723  assert(u->child[0] != u);
2724  do_check_tree(m, u->child[0]);
2725  }
2726  if (u->child[1] != 0) {
2727  assert(u->child[1]->parent == u);
2728  assert(u->child[1] != u);
2729  do_check_tree(m, u->child[1]);
2730  }
2731  if (u->child[0] != 0 && u->child[1] != 0) {
2732  assert(chunksize(u->child[0]) < chunksize(u->child[1]));
2733  }
2734  }
2735  u = u->fd;
2736  } while (u != t);
2737  assert(head != 0);
2738 }
2739 
2740 /* Check all the chunks in a treebin. */
2741 static void
2742 do_check_treebin(mstate m, bindex_t i)
2743 {
2744  tbinptr *tb = treebin_at(m, i);
2745  tchunkptr t = *tb;
2746  int empty = (m->treemap & (1U << i)) == 0;
2747  if (t == 0)
2748  assert(empty);
2749  if (!empty)
2750  do_check_tree(m, t);
2751 }
2752 
2753 /* Check all the chunks in a smallbin. */
2754 static void
2755 do_check_smallbin(mstate m, bindex_t i)
2756 {
2757  sbinptr b = smallbin_at(m, i);
2758  mchunkptr p = b->bk;
2759  unsigned int empty = (m->smallmap & (1U << i)) == 0;
2760  if (p == b)
2761  assert(empty);
2762  if (!empty) {
2763  for (; p != b; p = p->bk) {
2764  size_t size = chunksize(p);
2765  mchunkptr q;
2766  /* each chunk claims to be free */
2767  do_check_free_chunk(m, p);
2768  /* chunk belongs in bin */
2769  assert(small_index(size) == i);
2770  assert(p->bk == b || chunksize(p->bk) == chunksize(p));
2771  /* chunk is followed by an inuse chunk */
2772  q = next_chunk(p);
2773  if (q->head != FENCEPOST_HEAD)
2774  do_check_inuse_chunk(m, q);
2775  }
2776  }
2777 }
2778 
2779 /* Find x in a bin. Used in other check functions. */
2780 static int
2781 bin_find(mstate m, mchunkptr x)
2782 {
2783  size_t size = chunksize(x);
2784  if (is_small(size)) {
2785  bindex_t sidx = small_index(size);
2786  sbinptr b = smallbin_at(m, sidx);
2787  if (smallmap_is_marked(m, sidx)) {
2788  mchunkptr p = b;
2789  do {
2790  if (p == x)
2791  return 1;
2792  } while ((p = p->fd) != b);
2793  }
2794  } else {
2795  bindex_t tidx;
2796  compute_tree_index(size, tidx);
2797  if (treemap_is_marked(m, tidx)) {
2798  tchunkptr t = *treebin_at(m, tidx);
2799  size_t sizebits = size << leftshift_for_tree_index(tidx);
2800  while (t != 0 && chunksize(t) != size) {
2801  t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1];
2802  sizebits <<= 1;
2803  }
2804  if (t != 0) {
2805  tchunkptr u = t;
2806  do {
2807  if (u == (tchunkptr) x)
2808  return 1;
2809  } while ((u = u->fd) != t);
2810  }
2811  }
2812  }
2813  return 0;
2814 }
2815 
2816 /* Traverse each chunk and check it; return total */
2817 static size_t
2818 traverse_and_check(mstate m)
2819 {
2820  size_t sum = 0;
2821  if (is_initialized(m)) {
2822  msegmentptr s = &m->seg;
2823  sum += m->topsize + TOP_FOOT_SIZE;
2824  while (s != 0) {
2825  mchunkptr q = align_as_chunk(s->base);
2826  mchunkptr lastq = 0;
2827  assert(pinuse(q));
2828  while (segment_holds(s, q) &&
2829  q != m->top && q->head != FENCEPOST_HEAD) {
2830  sum += chunksize(q);
2831  if (cinuse(q)) {
2832  assert(!bin_find(m, q));
2833  do_check_inuse_chunk(m, q);
2834  } else {
2835  assert(q == m->dv || bin_find(m, q));
2836  assert(lastq == 0 || cinuse(lastq)); /* Not 2 consecutive free */
2837  do_check_free_chunk(m, q);
2838  }
2839  lastq = q;
2840  q = next_chunk(q);
2841  }
2842  s = s->next;
2843  }
2844  }
2845  return sum;
2846 }
2847 
2848 /* Check all properties of malloc_state. */
2849 static void
2850 do_check_malloc_state(mstate m)
2851 {
2852  bindex_t i;
2853  size_t total;
2854  /* check bins */
2855  for (i = 0; i < NSMALLBINS; ++i)
2856  do_check_smallbin(m, i);
2857  for (i = 0; i < NTREEBINS; ++i)
2858  do_check_treebin(m, i);
2859 
2860  if (m->dvsize != 0) { /* check dv chunk */
2861  do_check_any_chunk(m, m->dv);
2862  assert(m->dvsize == chunksize(m->dv));
2863  assert(m->dvsize >= MIN_CHUNK_SIZE);
2864  assert(bin_find(m, m->dv) == 0);
2865  }
2866 
2867  if (m->top != 0) { /* check top chunk */
2868  do_check_top_chunk(m, m->top);
2869  assert(m->topsize == chunksize(m->top));
2870  assert(m->topsize > 0);
2871  assert(bin_find(m, m->top) == 0);
2872  }
2873 
2874  total = traverse_and_check(m);
2875  assert(total <= m->footprint);
2876  assert(m->footprint <= m->max_footprint);
2877 }
2878 #endif /* DEBUG */
2879 
2880 /* ----------------------------- statistics ------------------------------ */
2881 
2882 #if !NO_MALLINFO
2883 static struct mallinfo
2885 {
2886  struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2887  if (!PREACTION(m)) {
2889  if (is_initialized(m)) {
2890  size_t nfree = SIZE_T_ONE; /* top always free */
2891  size_t mfree = m->topsize + TOP_FOOT_SIZE;
2892  size_t sum = mfree;
2893  msegmentptr s = &m->seg;
2894  while (s != 0) {
2895  mchunkptr q = align_as_chunk(s->base);
2896  while (segment_holds(s, q) &&
2897  q != m->top && q->head != FENCEPOST_HEAD) {
2898  size_t sz = chunksize(q);
2899  sum += sz;
2900  if (!cinuse(q)) {
2901  mfree += sz;
2902  ++nfree;
2903  }
2904  q = next_chunk(q);
2905  }
2906  s = s->next;
2907  }
2908 
2909  nm.arena = sum;
2910  nm.ordblks = nfree;
2911  nm.hblkhd = m->footprint - sum;
2912  nm.usmblks = m->max_footprint;
2913  nm.uordblks = m->footprint - mfree;
2914  nm.fordblks = mfree;
2915  nm.keepcost = m->topsize;
2916  }
2917 
2918  POSTACTION(m);
2919  }
2920  return nm;
2921 }
2922 #endif /* !NO_MALLINFO */
2923 
2924 static void
2926 {
2927  if (!PREACTION(m)) {
2928 #ifndef LACKS_STDIO_H
2929  size_t maxfp = 0;
2930 #endif
2931  size_t fp = 0;
2932  size_t used = 0;
2933  check_malloc_state(m);
2934  if (is_initialized(m)) {
2935  msegmentptr s = &m->seg;
2936 #ifndef LACKS_STDIO_H
2937  maxfp = m->max_footprint;
2938 #endif
2939  fp = m->footprint;
2940  used = fp - (m->topsize + TOP_FOOT_SIZE);
2941 
2942  while (s != 0) {
2943  mchunkptr q = align_as_chunk(s->base);
2944  while (segment_holds(s, q) &&
2945  q != m->top && q->head != FENCEPOST_HEAD) {
2946  if (!cinuse(q))
2947  used -= chunksize(q);
2948  q = next_chunk(q);
2949  }
2950  s = s->next;
2951  }
2952  }
2953 #ifndef LACKS_STDIO_H
2954  fprintf(stderr, "max system bytes = %10lu\n",
2955  (unsigned long) (maxfp));
2956  fprintf(stderr, "system bytes = %10lu\n", (unsigned long) (fp));
2957  fprintf(stderr, "in use bytes = %10lu\n", (unsigned long) (used));
2958 #endif
2959 
2960  POSTACTION(m);
2961  }
2962 }
2963 
2964 /* ----------------------- Operations on smallbins ----------------------- */
2965 
2966 /*
2967  Various forms of linking and unlinking are defined as macros. Even
2968  the ones for trees, which are very long but have very short typical
2969  paths. This is ugly but reduces reliance on inlining support of
2970  compilers.
2971 */
2972 
2973 /* Link a free chunk into a smallbin */
2974 #define insert_small_chunk(M, P, S) {\
2975  bindex_t I = small_index(S);\
2976  mchunkptr B = smallbin_at(M, I);\
2977  mchunkptr F = B;\
2978  assert(S >= MIN_CHUNK_SIZE);\
2979  if (!smallmap_is_marked(M, I))\
2980  mark_smallmap(M, I);\
2981  else if (RTCHECK(ok_address(M, B->fd)))\
2982  F = B->fd;\
2983  else {\
2984  CORRUPTION_ERROR_ACTION(M);\
2985  }\
2986  B->fd = P;\
2987  F->bk = P;\
2988  P->fd = F;\
2989  P->bk = B;\
2990 }
2991 
2992 /* Unlink a chunk from a smallbin */
2993 #define unlink_small_chunk(M, P, S) {\
2994  mchunkptr F = P->fd;\
2995  mchunkptr B = P->bk;\
2996  bindex_t I = small_index(S);\
2997  assert(P != B);\
2998  assert(P != F);\
2999  assert(chunksize(P) == small_index2size(I));\
3000  if (F == B)\
3001  clear_smallmap(M, I);\
3002  else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
3003  (B == smallbin_at(M,I) || ok_address(M, B)))) {\
3004  F->bk = B;\
3005  B->fd = F;\
3006  }\
3007  else {\
3008  CORRUPTION_ERROR_ACTION(M);\
3009  }\
3010 }
3011 
3012 /* Unlink the first chunk from a smallbin */
3013 #define unlink_first_small_chunk(M, B, P, I) {\
3014  mchunkptr F = P->fd;\
3015  assert(P != B);\
3016  assert(P != F);\
3017  assert(chunksize(P) == small_index2size(I));\
3018  if (B == F)\
3019  clear_smallmap(M, I);\
3020  else if (RTCHECK(ok_address(M, F))) {\
3021  B->fd = F;\
3022  F->bk = B;\
3023  }\
3024  else {\
3025  CORRUPTION_ERROR_ACTION(M);\
3026  }\
3027 }
3028 
3029 /* Replace dv node, binning the old one */
3030 /* Used only when dvsize known to be small */
3031 #define replace_dv(M, P, S) {\
3032  size_t DVS = M->dvsize;\
3033  if (DVS != 0) {\
3034  mchunkptr DV = M->dv;\
3035  assert(is_small(DVS));\
3036  insert_small_chunk(M, DV, DVS);\
3037  }\
3038  M->dvsize = S;\
3039  M->dv = P;\
3040 }
3041 
3042 /* ------------------------- Operations on trees ------------------------- */
3043 
3044 /* Insert chunk into tree */
3045 #define insert_large_chunk(M, X, S) {\
3046  tbinptr* H;\
3047  bindex_t I;\
3048  compute_tree_index(S, I);\
3049  H = treebin_at(M, I);\
3050  X->index = I;\
3051  X->child[0] = X->child[1] = 0;\
3052  if (!treemap_is_marked(M, I)) {\
3053  mark_treemap(M, I);\
3054  *H = X;\
3055  X->parent = (tchunkptr)H;\
3056  X->fd = X->bk = X;\
3057  }\
3058  else {\
3059  tchunkptr T = *H;\
3060  size_t K = S << leftshift_for_tree_index(I);\
3061  for (;;) {\
3062  if (chunksize(T) != S) {\
3063  tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
3064  K <<= 1;\
3065  if (*C != 0)\
3066  T = *C;\
3067  else if (RTCHECK(ok_address(M, C))) {\
3068  *C = X;\
3069  X->parent = T;\
3070  X->fd = X->bk = X;\
3071  break;\
3072  }\
3073  else {\
3074  CORRUPTION_ERROR_ACTION(M);\
3075  break;\
3076  }\
3077  }\
3078  else {\
3079  tchunkptr F = T->fd;\
3080  if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
3081  T->fd = F->bk = X;\
3082  X->fd = F;\
3083  X->bk = T;\
3084  X->parent = 0;\
3085  break;\
3086  }\
3087  else {\
3088  CORRUPTION_ERROR_ACTION(M);\
3089  break;\
3090  }\
3091  }\
3092  }\
3093  }\
3094 }
3095 
3096 /*
3097  Unlink steps:
3098 
3099  1. If x is a chained node, unlink it from its same-sized fd/bk links
3100  and choose its bk node as its replacement.
3101  2. If x was the last node of its size, but not a leaf node, it must
3102  be replaced with a leaf node (not merely one with an open left or
3103  right), to make sure that lefts and rights of descendents
3104  correspond properly to bit masks. We use the rightmost descendent
3105  of x. We could use any other leaf, but this is easy to locate and
3106  tends to counteract removal of leftmosts elsewhere, and so keeps
3107  paths shorter than minimally guaranteed. This doesn't loop much
3108  because on average a node in a tree is near the bottom.
3109  3. If x is the base of a chain (i.e., has parent links) relink
3110  x's parent and children to x's replacement (or null if none).
3111 */
3112 
3113 #define unlink_large_chunk(M, X) {\
3114  tchunkptr XP = X->parent;\
3115  tchunkptr R;\
3116  if (X->bk != X) {\
3117  tchunkptr F = X->fd;\
3118  R = X->bk;\
3119  if (RTCHECK(ok_address(M, F))) {\
3120  F->bk = R;\
3121  R->fd = F;\
3122  }\
3123  else {\
3124  CORRUPTION_ERROR_ACTION(M);\
3125  }\
3126  }\
3127  else {\
3128  tchunkptr* RP;\
3129  if (((R = *(RP = &(X->child[1]))) != 0) ||\
3130  ((R = *(RP = &(X->child[0]))) != 0)) {\
3131  tchunkptr* CP;\
3132  while ((*(CP = &(R->child[1])) != 0) ||\
3133  (*(CP = &(R->child[0])) != 0)) {\
3134  R = *(RP = CP);\
3135  }\
3136  if (RTCHECK(ok_address(M, RP)))\
3137  *RP = 0;\
3138  else {\
3139  CORRUPTION_ERROR_ACTION(M);\
3140  }\
3141  }\
3142  }\
3143  if (XP != 0) {\
3144  tbinptr* H = treebin_at(M, X->index);\
3145  if (X == *H) {\
3146  if ((*H = R) == 0) \
3147  clear_treemap(M, X->index);\
3148  }\
3149  else if (RTCHECK(ok_address(M, XP))) {\
3150  if (XP->child[0] == X) \
3151  XP->child[0] = R;\
3152  else \
3153  XP->child[1] = R;\
3154  }\
3155  else\
3156  CORRUPTION_ERROR_ACTION(M);\
3157  if (R != 0) {\
3158  if (RTCHECK(ok_address(M, R))) {\
3159  tchunkptr C0, C1;\
3160  R->parent = XP;\
3161  if ((C0 = X->child[0]) != 0) {\
3162  if (RTCHECK(ok_address(M, C0))) {\
3163  R->child[0] = C0;\
3164  C0->parent = R;\
3165  }\
3166  else\
3167  CORRUPTION_ERROR_ACTION(M);\
3168  }\
3169  if ((C1 = X->child[1]) != 0) {\
3170  if (RTCHECK(ok_address(M, C1))) {\
3171  R->child[1] = C1;\
3172  C1->parent = R;\
3173  }\
3174  else\
3175  CORRUPTION_ERROR_ACTION(M);\
3176  }\
3177  }\
3178  else\
3179  CORRUPTION_ERROR_ACTION(M);\
3180  }\
3181  }\
3182 }
3183 
3184 /* Relays to large vs small bin operations */
3185 
3186 #define insert_chunk(M, P, S)\
3187  if (is_small(S)) insert_small_chunk(M, P, S)\
3188  else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
3189 
3190 #define unlink_chunk(M, P, S)\
3191  if (is_small(S)) unlink_small_chunk(M, P, S)\
3192  else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
3193 
3194 
3195 /* Relays to internal calls to malloc/free from realloc, memalign etc */
3196 
3197 #if ONLY_MSPACES
3198 #define internal_malloc(m, b) mspace_malloc(m, b)
3199 #define internal_free(m, mem) mspace_free(m,mem);
3200 #else /* ONLY_MSPACES */
3201 #if MSPACES
3202 #define internal_malloc(m, b)\
3203  (m == gm)? dlmalloc(b) : mspace_malloc(m, b)
3204 #define internal_free(m, mem)\
3205  if (m == gm) dlfree(mem); else mspace_free(m,mem);
3206 #else /* MSPACES */
3207 #define internal_malloc(m, b) dlmalloc(b)
3208 #define internal_free(m, mem) dlfree(mem)
3209 #endif /* MSPACES */
3210 #endif /* ONLY_MSPACES */
3211 
3212 /* ----------------------- Direct-mmapping chunks ----------------------- */
3213 
3214 /*
3215  Directly mmapped chunks are set up with an offset to the start of
3216  the mmapped region stored in the prev_foot field of the chunk. This
3217  allows reconstruction of the required argument to MUNMAP when freed,
3218  and also allows adjustment of the returned chunk to meet alignment
3219  requirements (especially in memalign). There is also enough space
3220  allocated to hold a fake next chunk of size SIZE_T_SIZE to maintain
3221  the PINUSE bit so frees can be checked.
3222 */
3223 
3224 /* Malloc using mmap */
3225 static void *
3226 mmap_alloc(mstate m, size_t nb)
3227 {
3228  size_t mmsize =
3230  if (mmsize > nb) { /* Check for wrap around 0 */
3231  char *mm = (char *) (DIRECT_MMAP(mmsize));
3232  if (mm != CMFAIL) {
3233  size_t offset = align_offset(chunk2mem(mm));
3234  size_t psize = mmsize - offset - MMAP_FOOT_PAD;
3235  mchunkptr p = (mchunkptr) (mm + offset);
3236  p->prev_foot = offset | IS_MMAPPED_BIT;
3237  (p)->head = (psize | CINUSE_BIT);
3238  mark_inuse_foot(m, p, psize);
3239  chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
3240  chunk_plus_offset(p, psize + SIZE_T_SIZE)->head = 0;
3241 
3242  if (mm < m->least_addr)
3243  m->least_addr = mm;
3244  if ((m->footprint += mmsize) > m->max_footprint)
3245  m->max_footprint = m->footprint;
3247  check_mmapped_chunk(m, p);
3248  return chunk2mem(p);
3249  }
3250  }
3251  return 0;
3252 }
3253 
3254 /* Realloc using mmap */
3255 static mchunkptr
3256 mmap_resize(mstate m, mchunkptr oldp, size_t nb)
3257 {
3258  size_t oldsize = chunksize(oldp);
3259  if (is_small(nb)) /* Can't shrink mmap regions below small size */
3260  return 0;
3261  /* Keep old chunk if big enough but not too big */
3262  if (oldsize >= nb + SIZE_T_SIZE &&
3263  (oldsize - nb) <= (mparams.granularity << 1))
3264  return oldp;
3265  else {
3266  size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT;
3267  size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
3268  size_t newmmsize = granularity_align(nb + SIX_SIZE_T_SIZES +
3270  char *cp = (char *) CALL_MREMAP((char *) oldp - offset,
3271  oldmmsize, newmmsize, 1);
3272  if (cp != CMFAIL) {
3273  mchunkptr newp = (mchunkptr) (cp + offset);
3274  size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
3275  newp->head = (psize | CINUSE_BIT);
3276  mark_inuse_foot(m, newp, psize);
3277  chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
3278  chunk_plus_offset(newp, psize + SIZE_T_SIZE)->head = 0;
3279 
3280  if (cp < m->least_addr)
3281  m->least_addr = cp;
3282  if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
3283  m->max_footprint = m->footprint;
3284  check_mmapped_chunk(m, newp);
3285  return newp;
3286  }
3287  }
3288  return 0;
3289 }
3290 
3291 /* -------------------------- mspace management -------------------------- */
3292 
3293 /* Initialize top chunk and its size */
3294 static void
3295 init_top(mstate m, mchunkptr p, size_t psize)
3296 {
3297  /* Ensure alignment */
3298  size_t offset = align_offset(chunk2mem(p));
3299  p = (mchunkptr) ((char *) p + offset);
3300  psize -= offset;
3301 
3302  m->top = p;
3303  m->topsize = psize;
3304  p->head = psize | PINUSE_BIT;
3305  /* set size of fake trailing chunk holding overhead space only once */
3306  chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
3307  m->trim_check = mparams.trim_threshold; /* reset on each update */
3308 }
3309 
3310 /* Initialize bins for a new mstate that is otherwise zeroed out */
3311 static void
3312 init_bins(mstate m)
3313 {
3314  /* Establish circular links for smallbins */
3315  bindex_t i;
3316  for (i = 0; i < NSMALLBINS; ++i) {
3317  sbinptr bin = smallbin_at(m, i);
3318  bin->fd = bin->bk = bin;
3319  }
3320 }
3321 
3322 #if PROCEED_ON_ERROR
3323 
3324 /* default corruption action */
3325 static void
3326 reset_on_error(mstate m)
3327 {
3328  int i;
3329  ++malloc_corruption_error_count;
3330  /* Reinitialize fields to forget about all memory */
3331  m->smallbins = m->treebins = 0;
3332  m->dvsize = m->topsize = 0;
3333  m->seg.base = 0;
3334  m->seg.size = 0;
3335  m->seg.next = 0;
3336  m->top = m->dv = 0;
3337  for (i = 0; i < NTREEBINS; ++i)
3338  *treebin_at(m, i) = 0;
3339  init_bins(m);
3340 }
3341 #endif /* PROCEED_ON_ERROR */
3342 
3343 /* Allocate chunk and prepend remainder with chunk in successor base. */
3344 static void *
3345 prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb)
3346 {
3347  mchunkptr p = align_as_chunk(newbase);
3348  mchunkptr oldfirst = align_as_chunk(oldbase);
3349  size_t psize = (char *) oldfirst - (char *) p;
3350  mchunkptr q = chunk_plus_offset(p, nb);
3351  size_t qsize = psize - nb;
3353 
3354  assert((char *) oldfirst > (char *) q);
3355  assert(pinuse(oldfirst));
3356  assert(qsize >= MIN_CHUNK_SIZE);
3357 
3358  /* consolidate remainder with first chunk of old base */
3359  if (oldfirst == m->top) {
3360  size_t tsize = m->topsize += qsize;
3361  m->top = q;
3362  q->head = tsize | PINUSE_BIT;
3363  check_top_chunk(m, q);
3364  } else if (oldfirst == m->dv) {
3365  size_t dsize = m->dvsize += qsize;
3366  m->dv = q;
3368  } else {
3369  if (!cinuse(oldfirst)) {
3370  size_t nsize = chunksize(oldfirst);
3371  unlink_chunk(m, oldfirst, nsize);
3372  oldfirst = chunk_plus_offset(oldfirst, nsize);
3373  qsize += nsize;
3374  }
3375  set_free_with_pinuse(q, qsize, oldfirst);
3376  insert_chunk(m, q, qsize);
3377  check_free_chunk(m, q);
3378  }
3379 
3380  check_malloced_chunk(m, chunk2mem(p), nb);
3381  return chunk2mem(p);
3382 }
3383 
3384 
3385 /* Add a segment to hold a new noncontiguous region */
3386 static void
3387 add_segment(mstate m, char *tbase, size_t tsize, flag_t mmapped)
3388 {
3389  /* Determine locations and sizes of segment, fenceposts, old top */
3390  char *old_top = (char *) m->top;
3391  msegmentptr oldsp = segment_holding(m, old_top);
3392  char *old_end = oldsp->base + oldsp->size;
3393  size_t ssize = pad_request(sizeof(struct malloc_segment));
3394  char *rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
3395  size_t offset = align_offset(chunk2mem(rawsp));
3396  char *asp = rawsp + offset;
3397  char *csp = (asp < (old_top + MIN_CHUNK_SIZE)) ? old_top : asp;
3398  mchunkptr sp = (mchunkptr) csp;
3399  msegmentptr ss = (msegmentptr) (chunk2mem(sp));
3400  mchunkptr tnext = chunk_plus_offset(sp, ssize);
3401  mchunkptr p = tnext;
3402  int nfences = 0;
3403 
3404  /* reset top to new space */
3405  init_top(m, (mchunkptr) tbase, tsize - TOP_FOOT_SIZE);
3406 
3407  /* Set up segment record */
3408  assert(is_aligned(ss));
3409  set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
3410  *ss = m->seg; /* Push current record */
3411  m->seg.base = tbase;
3412  m->seg.size = tsize;
3413  m->seg.sflags = mmapped;
3414  m->seg.next = ss;
3415 
3416  /* Insert trailing fenceposts */
3417  for (;;) {
3418  mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
3419  p->head = FENCEPOST_HEAD;
3420  ++nfences;
3421  if ((char *) (&(nextp->head)) < old_end)
3422  p = nextp;
3423  else
3424  break;
3425  }
3426  assert(nfences >= 2);
3427 
3428  /* Insert the rest of old top into a bin as an ordinary free chunk */
3429  if (csp != old_top) {
3430  mchunkptr q = (mchunkptr) old_top;
3431  size_t psize = csp - old_top;
3432  mchunkptr tn = chunk_plus_offset(q, psize);
3433  set_free_with_pinuse(q, psize, tn);
3434  insert_chunk(m, q, psize);
3435  }
3436 
3437  check_top_chunk(m, m->top);
3438 }
3439 
3440 /* -------------------------- System allocation -------------------------- */
3441 
3442 /* Get memory from system using MORECORE or MMAP */
3443 static void *
3444 sys_alloc(mstate m, size_t nb)
3445 {
3446  char *tbase = CMFAIL;
3447  size_t tsize = 0;
3448  flag_t mmap_flag = 0;
3449 
3450  init_mparams();
3451 
3452  /* Directly map large chunks */
3453  if (use_mmap(m) && nb >= mparams.mmap_threshold) {
3454  void *mem = mmap_alloc(m, nb);
3455  if (mem != 0)
3456  return mem;
3457  }
3458 
3459  /*
3460  Try getting memory in any of three ways (in most-preferred to
3461  least-preferred order):
3462  1. A call to MORECORE that can normally contiguously extend memory.
3463  (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
3464  or main space is mmapped or a previous contiguous call failed)
3465  2. A call to MMAP new space (disabled if not HAVE_MMAP).
3466  Note that under the default settings, if MORECORE is unable to
3467  fulfill a request, and HAVE_MMAP is true, then mmap is
3468  used as a noncontiguous system allocator. This is a useful backup
3469  strategy for systems with holes in address spaces -- in this case
3470  sbrk cannot contiguously expand the heap, but mmap may be able to
3471  find space.
3472  3. A call to MORECORE that cannot usually contiguously extend memory.
3473  (disabled if not HAVE_MORECORE)
3474  */
3475 
3477  char *br = CMFAIL;
3478  msegmentptr ss =
3479  (m->top == 0) ? 0 : segment_holding(m, (char *) m->top);
3480  size_t asize = 0;
3482 
3483  if (ss == 0) { /* First time through or recovery */
3484  char *base = (char *) CALL_MORECORE(0);
3485  if (base != CMFAIL) {
3486  asize =
3488  SIZE_T_ONE);
3489  /* Adjust to end on a page boundary */
3490  if (!is_page_aligned(base))
3491  asize += (page_align((size_t) base) - (size_t) base);
3492  /* Can't call MORECORE if size is negative when treated as signed */
3493  if (asize < HALF_MAX_SIZE_T &&
3494  (br = (char *) (CALL_MORECORE(asize))) == base) {
3495  tbase = base;
3496  tsize = asize;
3497  }
3498  }
3499  } else {
3500  /* Subtract out existing available top space from MORECORE request. */
3501  asize =
3502  granularity_align(nb - m->topsize + TOP_FOOT_SIZE +
3504  /* Use mem here only if it did continuously extend old space */
3505  if (asize < HALF_MAX_SIZE_T &&
3506  (br =
3507  (char *) (CALL_MORECORE(asize))) == ss->base + ss->size) {
3508  tbase = br;
3509  tsize = asize;
3510  }
3511  }
3512 
3513  if (tbase == CMFAIL) { /* Cope with partial failure */
3514  if (br != CMFAIL) { /* Try to use/extend the space we did get */
3515  if (asize < HALF_MAX_SIZE_T &&
3516  asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) {
3517  size_t esize =
3520  asize);
3521  if (esize < HALF_MAX_SIZE_T) {
3522  char *end = (char *) CALL_MORECORE(esize);
3523  if (end != CMFAIL)
3524  asize += esize;
3525  else { /* Can't use; try to release */
3526  end = (char *) CALL_MORECORE(-asize);
3527  br = CMFAIL;
3528  }
3529  }
3530  }
3531  }
3532  if (br != CMFAIL) { /* Use the space we did get */
3533  tbase = br;
3534  tsize = asize;
3535  } else
3536  disable_contiguous(m); /* Don't try contiguous path in the future */
3537  }
3538 
3540  }
3541 
3542  if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */
3543  size_t req = nb + TOP_FOOT_SIZE + MALLOC_ALIGNMENT + SIZE_T_ONE;
3544  size_t rsize = granularity_align(req);
3545  if (rsize > nb) { /* Fail if wraps around zero */
3546  char *mp = (char *) (CALL_MMAP(rsize));
3547  if (mp != CMFAIL) {
3548  tbase = mp;
3549  tsize = rsize;
3550  mmap_flag = IS_MMAPPED_BIT;
3551  }
3552  }
3553  }
3554 
3555  if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
3556  size_t asize =
3558  SIZE_T_ONE);
3559  if (asize < HALF_MAX_SIZE_T) {
3560  char *br = CMFAIL;
3561  char *end = CMFAIL;
3563  br = (char *) (CALL_MORECORE(asize));
3564  end = (char *) (CALL_MORECORE(0));
3566  if (br != CMFAIL && end != CMFAIL && br < end) {
3567  size_t ssize = end - br;
3568  if (ssize > nb + TOP_FOOT_SIZE) {
3569  tbase = br;
3570  tsize = ssize;
3571  }
3572  }
3573  }
3574  }
3575 
3576  if (tbase != CMFAIL) {
3577 
3578  if ((m->footprint += tsize) > m->max_footprint)
3579  m->max_footprint = m->footprint;
3580 
3581  if (!is_initialized(m)) { /* first-time initialization */
3582  m->seg.base = m->least_addr = tbase;
3583  m->seg.size = tsize;
3584  m->seg.sflags = mmap_flag;
3585  m->magic = mparams.magic;
3586  init_bins(m);
3587  if (is_global(m))
3588  init_top(m, (mchunkptr) tbase, tsize - TOP_FOOT_SIZE);
3589  else {
3590  /* Offset top by embedded malloc_state */
3591  mchunkptr mn = next_chunk(mem2chunk(m));
3592  init_top(m, mn,
3593  (size_t) ((tbase + tsize) - (char *) mn) -
3594  TOP_FOOT_SIZE);
3595  }
3596  }
3597 
3598  else {
3599  /* Try to merge with an existing segment */
3600  msegmentptr sp = &m->seg;
3601  while (sp != 0 && tbase != sp->base + sp->size)
3602  sp = sp->next;
3603  if (sp != 0 && !is_extern_segment(sp) && (sp->sflags & IS_MMAPPED_BIT) == mmap_flag && segment_holds(sp, m->top)) { /* append */
3604  sp->size += tsize;
3605  init_top(m, m->top, m->topsize + tsize);
3606  } else {
3607  if (tbase < m->least_addr)
3608  m->least_addr = tbase;
3609  sp = &m->seg;
3610  while (sp != 0 && sp->base != tbase + tsize)
3611  sp = sp->next;
3612  if (sp != 0 &&
3613  !is_extern_segment(sp) &&
3614  (sp->sflags & IS_MMAPPED_BIT) == mmap_flag) {
3615  char *oldbase = sp->base;
3616  sp->base = tbase;
3617  sp->size += tsize;
3618  return prepend_alloc(m, tbase, oldbase, nb);
3619  } else
3620  add_segment(m, tbase, tsize, mmap_flag);
3621  }
3622  }
3623 
3624  if (nb < m->topsize) { /* Allocate from new or extended top space */
3625  size_t rsize = m->topsize -= nb;
3626  mchunkptr p = m->top;
3627  mchunkptr r = m->top = chunk_plus_offset(p, nb);
3628  r->head = rsize | PINUSE_BIT;
3630  check_top_chunk(m, m->top);
3631  check_malloced_chunk(m, chunk2mem(p), nb);
3632  return chunk2mem(p);
3633  }
3634  }
3635 
3637  return 0;
3638 }
3639 
3640 /* ----------------------- system deallocation -------------------------- */
3641 
3642 /* Unmap and unlink any mmapped segments that don't contain used chunks */
3643 static size_t
3645 {
3646  size_t released = 0;
3647  msegmentptr pred = &m->seg;
3648  msegmentptr sp = pred->next;
3649  while (sp != 0) {
3650  char *base = sp->base;
3651  size_t size = sp->size;
3652  msegmentptr next = sp->next;
3653  if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
3654  mchunkptr p = align_as_chunk(base);
3655  size_t psize = chunksize(p);
3656  /* Can unmap if first chunk holds entire segment and not pinned */
3657  if (!cinuse(p)
3658  && (char *) p + psize >= base + size - TOP_FOOT_SIZE) {
3659  tchunkptr tp = (tchunkptr) p;
3660  assert(segment_holds(sp, (char *) sp));
3661  if (p == m->dv) {
3662  m->dv = 0;
3663  m->dvsize = 0;
3664  } else {
3665  unlink_large_chunk(m, tp);
3666  }
3667  if (CALL_MUNMAP(base, size) == 0) {
3668  released += size;
3669  m->footprint -= size;
3670  /* unlink obsoleted record */
3671  sp = pred;
3672  sp->next = next;
3673  } else { /* back out if cannot unmap */
3674  insert_large_chunk(m, tp, psize);
3675  }
3676  }
3677  }
3678  pred = sp;
3679  sp = next;
3680  }
3681  return released;
3682 }
3683 
3684 static int
3685 sys_trim(mstate m, size_t pad)
3686 {
3687  size_t released = 0;
3688  if (pad < MAX_REQUEST && is_initialized(m)) {
3689  pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
3690 
3691  if (m->topsize > pad) {
3692  /* Shrink top space in granularity-size units, keeping at least one */
3693  size_t unit = mparams.granularity;
3694  size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
3695  SIZE_T_ONE) * unit;
3696  msegmentptr sp = segment_holding(m, (char *) m->top);
3697 
3698  if (!is_extern_segment(sp)) {
3699  if (is_mmapped_segment(sp)) {
3700  if (HAVE_MMAP && sp->size >= extra && !has_segment_link(m, sp)) { /* can't shrink if pinned */
3701  size_t newsize = sp->size - extra;
3702  /* Prefer mremap, fall back to munmap */
3703  if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) !=
3704  MFAIL)
3705  || (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
3706  released = extra;
3707  }
3708  }
3709  } else if (HAVE_MORECORE) {
3710  if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
3711  extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
3713  {
3714  /* Make sure end of memory is where we last set it. */
3715  char *old_br = (char *) (CALL_MORECORE(0));
3716  if (old_br == sp->base + sp->size) {
3717  char *rel_br = (char *) (CALL_MORECORE(-extra));
3718  char *new_br = (char *) (CALL_MORECORE(0));
3719  if (rel_br != CMFAIL && new_br < old_br)
3720  released = old_br - new_br;
3721  }
3722  }
3724  }
3725  }
3726 
3727  if (released != 0) {
3728  sp->size -= released;
3729  m->footprint -= released;
3730  init_top(m, m->top, m->topsize - released);
3731  check_top_chunk(m, m->top);
3732  }
3733  }
3734 
3735  /* Unmap any unused mmapped segments */
3736  if (HAVE_MMAP)
3737  released += release_unused_segments(m);
3738 
3739  /* On failure, disable autotrim to avoid repeated failed future calls */
3740  if (released == 0)
3741  m->trim_check = MAX_SIZE_T;
3742  }
3743 
3744  return (released != 0) ? 1 : 0;
3745 }
3746 
3747 /* ---------------------------- malloc support --------------------------- */
3748 
3749 /* allocate a large request from the best fitting chunk in a treebin */
3750 static void *
3751 tmalloc_large(mstate m, size_t nb)
3752 {
3753  tchunkptr v = 0;
3754  size_t rsize = -nb; /* Unsigned negation */
3755  tchunkptr t;
3756  bindex_t idx;
3757  compute_tree_index(nb, idx);
3758 
3759  if ((t = *treebin_at(m, idx)) != 0) {
3760  /* Traverse tree for this bin looking for node with size == nb */
3761  size_t sizebits = nb << leftshift_for_tree_index(idx);
3762  tchunkptr rst = 0; /* The deepest untaken right subtree */
3763  for (;;) {
3764  tchunkptr rt;
3765  size_t trem = chunksize(t) - nb;
3766  if (trem < rsize) {
3767  v = t;
3768  if ((rsize = trem) == 0)
3769  break;
3770  }
3771  rt = t->child[1];
3772  t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1];
3773  if (rt != 0 && rt != t)
3774  rst = rt;
3775  if (t == 0) {
3776  t = rst; /* set t to least subtree holding sizes > nb */
3777  break;
3778  }
3779  sizebits <<= 1;
3780  }
3781  }
3782 
3783  if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
3784  binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
3785  if (leftbits != 0) {
3786  bindex_t i;
3787  binmap_t leastbit = least_bit(leftbits);
3788  compute_bit2idx(leastbit, i);
3789  t = *treebin_at(m, i);
3790  }
3791  }
3792 
3793  while (t != 0) { /* find smallest of tree or subtree */
3794  size_t trem = chunksize(t) - nb;
3795  if (trem < rsize) {
3796  rsize = trem;
3797  v = t;
3798  }
3799  t = leftmost_child(t);
3800  }
3801 
3802  /* If dv is a better fit, return 0 so malloc will use it */
3803  if (v != 0 && rsize < (size_t) (m->dvsize - nb)) {
3804  if (RTCHECK(ok_address(m, v))) { /* split */
3805  mchunkptr r = chunk_plus_offset(v, nb);
3806  assert(chunksize(v) == rsize + nb);
3807  if (RTCHECK(ok_next(v, r))) {
3808  unlink_large_chunk(m, v);
3809  if (rsize < MIN_CHUNK_SIZE)
3810  set_inuse_and_pinuse(m, v, (rsize + nb));
3811  else {
3814  insert_chunk(m, r, rsize);
3815  }
3816  return chunk2mem(v);
3817  }
3818  }
3820  }
3821  return 0;
3822 }
3823 
3824 /* allocate a small request from the best fitting chunk in a treebin */
3825 static void *
3826 tmalloc_small(mstate m, size_t nb)
3827 {
3828  tchunkptr t, v;
3829  size_t rsize;
3830  bindex_t i;
3831  binmap_t leastbit = least_bit(m->treemap);
3832  compute_bit2idx(leastbit, i);
3833 
3834  v = t = *treebin_at(m, i);
3835  rsize = chunksize(t) - nb;
3836 
3837  while ((t = leftmost_child(t)) != 0) {
3838  size_t trem = chunksize(t) - nb;
3839  if (trem < rsize) {
3840  rsize = trem;
3841  v = t;
3842  }
3843  }
3844 
3845  if (RTCHECK(ok_address(m, v))) {
3846  mchunkptr r = chunk_plus_offset(v, nb);
3847  assert(chunksize(v) == rsize + nb);
3848  if (RTCHECK(ok_next(v, r))) {
3849  unlink_large_chunk(m, v);
3850  if (rsize < MIN_CHUNK_SIZE)
3851  set_inuse_and_pinuse(m, v, (rsize + nb));
3852  else {
3855  replace_dv(m, r, rsize);
3856  }
3857  return chunk2mem(v);
3858  }
3859  }
3860 
3862  return 0;
3863 }
3864 
3865 /* --------------------------- realloc support --------------------------- */
3866 
3867 static void *
3868 internal_realloc(mstate m, void *oldmem, size_t bytes)
3869 {
3870  if (bytes >= MAX_REQUEST) {
3872  return 0;
3873  }
3874  if (!PREACTION(m)) {
3875  mchunkptr oldp = mem2chunk(oldmem);
3876  size_t oldsize = chunksize(oldp);
3877  mchunkptr next = chunk_plus_offset(oldp, oldsize);
3878  mchunkptr newp = 0;
3879  void *extra = 0;
3880 
3881  /* Try to either shrink or extend into top. Else malloc-copy-free */
3882 
3883  if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&
3884  ok_next(oldp, next) && ok_pinuse(next))) {
3885  size_t nb = request2size(bytes);
3886  if (is_mmapped(oldp))
3887  newp = mmap_resize(m, oldp, nb);
3888  else if (oldsize >= nb) { /* already big enough */
3889  size_t rsize = oldsize - nb;
3890  newp = oldp;
3891  if (rsize >= MIN_CHUNK_SIZE) {
3892  mchunkptr remainder = chunk_plus_offset(newp, nb);
3893  set_inuse(m, newp, nb);
3894  set_inuse(m, remainder, rsize);
3895  extra = chunk2mem(remainder);
3896  }
3897  } else if (next == m->top && oldsize + m->topsize > nb) {
3898  /* Expand into top */
3899  size_t newsize = oldsize + m->topsize;
3900  size_t newtopsize = newsize - nb;
3901  mchunkptr newtop = chunk_plus_offset(oldp, nb);
3902  set_inuse(m, oldp, nb);
3903  newtop->head = newtopsize | PINUSE_BIT;
3904  m->top = newtop;
3905  m->topsize = newtopsize;
3906  newp = oldp;
3907  }
3908  } else {
3909  USAGE_ERROR_ACTION(m, oldmem);
3910  POSTACTION(m);
3911  return 0;
3912  }
3913 
3914  POSTACTION(m);
3915 
3916  if (newp != 0) {
3917  if (extra != 0) {
3918  internal_free(m, extra);
3919  }
3920  check_inuse_chunk(m, newp);
3921  return chunk2mem(newp);
3922  } else {
3923  void *newmem = internal_malloc(m, bytes);
3924  if (newmem != 0) {
3925  size_t oc = oldsize - overhead_for(oldp);
3926  memcpy(newmem, oldmem, (oc < bytes) ? oc : bytes);
3927  internal_free(m, oldmem);
3928  }
3929  return newmem;
3930  }
3931  }
3932  return 0;
3933 }
3934 
3935 /* --------------------------- memalign support -------------------------- */
3936 
3937 static void *
3938 internal_memalign(mstate m, size_t alignment, size_t bytes)
3939 {
3940  if (alignment <= MALLOC_ALIGNMENT) /* Can just use malloc */
3941  return internal_malloc(m, bytes);
3942  if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
3943  alignment = MIN_CHUNK_SIZE;
3944  if ((alignment & (alignment - SIZE_T_ONE)) != 0) { /* Ensure a power of 2 */
3945  size_t a = MALLOC_ALIGNMENT << 1;
3946  while (a < alignment)
3947  a <<= 1;
3948  alignment = a;
3949  }
3950 
3951  if (bytes >= MAX_REQUEST - alignment) {
3952  if (m != 0) { /* Test isn't needed but avoids compiler warning */
3954  }
3955  } else {
3956  size_t nb = request2size(bytes);
3957  size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
3958  char *mem = (char *) internal_malloc(m, req);
3959  if (mem != 0) {
3960  void *leader = 0;
3961  void *trailer = 0;
3962  mchunkptr p = mem2chunk(mem);
3963 
3964  if (PREACTION(m))
3965  return 0;
3966  if ((((size_t) (mem)) % alignment) != 0) { /* misaligned */
3967  /*
3968  Find an aligned spot inside chunk. Since we need to give
3969  back leading space in a chunk of at least MIN_CHUNK_SIZE, if
3970  the first calculation places us at a spot with less than
3971  MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
3972  We've allocated enough total room so that this is always
3973  possible.
3974  */
3975  char *br = (char *) mem2chunk((size_t) (((size_t) (mem +
3976  alignment -
3977  SIZE_T_ONE))
3978  & -alignment));
3979  char *pos =
3980  ((size_t) (br - (char *) (p)) >=
3981  MIN_CHUNK_SIZE) ? br : br + alignment;
3982  mchunkptr newp = (mchunkptr) pos;
3983  size_t leadsize = pos - (char *) (p);
3984  size_t newsize = chunksize(p) - leadsize;
3985 
3986  if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
3987  newp->prev_foot = p->prev_foot + leadsize;
3988  newp->head = (newsize | CINUSE_BIT);
3989  } else { /* Otherwise, give back leader, use the rest */
3990  set_inuse(m, newp, newsize);
3991  set_inuse(m, p, leadsize);
3992  leader = chunk2mem(p);
3993  }
3994  p = newp;
3995  }
3996 
3997  /* Give back spare room at the end */
3998  if (!is_mmapped(p)) {
3999  size_t size = chunksize(p);
4000  if (size > nb + MIN_CHUNK_SIZE) {
4001  size_t remainder_size = size - nb;
4002  mchunkptr remainder = chunk_plus_offset(p, nb);
4003  set_inuse(m, p, nb);
4004  set_inuse(m, remainder, remainder_size);
4005  trailer = chunk2mem(remainder);
4006  }
4007  }
4008 
4009  assert(chunksize(p) >= nb);
4010  assert((((size_t) (chunk2mem(p))) % alignment) == 0);
4011  check_inuse_chunk(m, p);
4012  POSTACTION(m);
4013  if (leader != 0) {
4014  internal_free(m, leader);
4015  }
4016  if (trailer != 0) {
4017  internal_free(m, trailer);
4018  }
4019  return chunk2mem(p);
4020  }
4021  }
4022  return 0;
4023 }
4024 
4025 /* ------------------------ comalloc/coalloc support --------------------- */
4026 
4027 static void **
4028 ialloc(mstate m, size_t n_elements, size_t * sizes, int opts, void *chunks[])
4029 {
4030  /*
4031  This provides common support for independent_X routines, handling
4032  all of the combinations that can result.
4033 
4034  The opts arg has:
4035  bit 0 set if all elements are same size (using sizes[0])
4036  bit 1 set if elements should be zeroed
4037  */
4038 
4039  size_t element_size; /* chunksize of each element, if all same */
4040  size_t contents_size; /* total size of elements */
4041  size_t array_size; /* request size of pointer array */
4042  void *mem; /* malloced aggregate space */
4043  mchunkptr p; /* corresponding chunk */
4044  size_t remainder_size; /* remaining bytes while splitting */
4045  void **marray; /* either "chunks" or malloced ptr array */
4046  mchunkptr array_chunk; /* chunk for malloced ptr array */
4047  flag_t was_enabled; /* to disable mmap */
4048  size_t size;
4049  size_t i;
4050 
4051  /* compute array length, if needed */
4052  if (chunks != 0) {
4053  if (n_elements == 0)
4054  return chunks; /* nothing to do */
4055  marray = chunks;
4056  array_size = 0;
4057  } else {
4058  /* if empty req, must still return chunk representing empty array */
4059  if (n_elements == 0)
4060  return (void **) internal_malloc(m, 0);
4061  marray = 0;
4062  array_size = request2size(n_elements * (sizeof(void *)));
4063  }
4064 
4065  /* compute total element size */
4066  if (opts & 0x1) { /* all-same-size */
4067  element_size = request2size(*sizes);
4068  contents_size = n_elements * element_size;
4069  } else { /* add up all the sizes */
4070  element_size = 0;
4071  contents_size = 0;
4072  for (i = 0; i != n_elements; ++i)
4073  contents_size += request2size(sizes[i]);
4074  }
4075 
4076  size = contents_size + array_size;
4077 
4078  /*
4079  Allocate the aggregate chunk. First disable direct-mmapping so
4080  malloc won't use it, since we would not be able to later
4081  free/realloc space internal to a segregated mmap region.
4082  */
4083  was_enabled = use_mmap(m);
4084  disable_mmap(m);
4085  mem = internal_malloc(m, size - CHUNK_OVERHEAD);
4086  if (was_enabled)
4087  enable_mmap(m);
4088  if (mem == 0)
4089  return 0;
4090 
4091  if (PREACTION(m))
4092  return 0;
4093  p = mem2chunk(mem);
4094  remainder_size = chunksize(p);
4095 
4096  assert(!is_mmapped(p));
4097 
4098  if (opts & 0x2) { /* optionally clear the elements */
4099  memset((size_t *) mem, 0, remainder_size - SIZE_T_SIZE - array_size);
4100  }
4101 
4102  /* If not provided, allocate the pointer array as final part of chunk */
4103  if (marray == 0) {
4104  size_t array_chunk_size;
4105  array_chunk = chunk_plus_offset(p, contents_size);
4106  array_chunk_size = remainder_size - contents_size;
4107  marray = (void **) (chunk2mem(array_chunk));
4108  set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
4109  remainder_size = contents_size;
4110  }
4111 
4112  /* split out elements */
4113  for (i = 0;; ++i) {
4114  marray[i] = chunk2mem(p);
4115  if (i != n_elements - 1) {
4116  if (element_size != 0)
4117  size = element_size;
4118  else
4119  size = request2size(sizes[i]);
4120  remainder_size -= size;
4122  p = chunk_plus_offset(p, size);
4123  } else { /* the final element absorbs any overallocation slop */
4124  set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
4125  break;
4126  }
4127  }
4128 
4129 #if DEBUG
4130  if (marray != chunks) {
4131  /* final element must have exactly exhausted chunk */
4132  if (element_size != 0) {
4133  assert(remainder_size == element_size);
4134  } else {
4135  assert(remainder_size == request2size(sizes[i]));
4136  }
4137  check_inuse_chunk(m, mem2chunk(marray));
4138  }
4139  for (i = 0; i != n_elements; ++i)
4140  check_inuse_chunk(m, mem2chunk(marray[i]));
4141 
4142 #endif /* DEBUG */
4143 
4144  POSTACTION(m);
4145  return marray;
4146 }
4147 
4148 
4149 /* -------------------------- public routines ---------------------------- */
4150 
4151 #if !ONLY_MSPACES
4152 
4153 void *
4154 dlmalloc(size_t bytes)
4155 {
4156  /*
4157  Basic algorithm:
4158  If a small request (< 256 bytes minus per-chunk overhead):
4159  1. If one exists, use a remainderless chunk in associated smallbin.
4160  (Remainderless means that there are too few excess bytes to
4161  represent as a chunk.)
4162  2. If it is big enough, use the dv chunk, which is normally the
4163  chunk adjacent to the one used for the most recent small request.
4164  3. If one exists, split the smallest available chunk in a bin,
4165  saving remainder in dv.
4166  4. If it is big enough, use the top chunk.
4167  5. If available, get memory from system and use it
4168  Otherwise, for a large request:
4169  1. Find the smallest available binned chunk that fits, and use it
4170  if it is better fitting than dv chunk, splitting if necessary.
4171  2. If better fitting than any binned chunk, use the dv chunk.
4172  3. If it is big enough, use the top chunk.
4173  4. If request size >= mmap threshold, try to directly mmap this chunk.
4174  5. If available, get memory from system and use it
4175 
4176  The ugly goto's here ensure that postaction occurs along all paths.
4177  */
4178 
4179  if (!PREACTION(gm)) {
4180  void *mem;
4181  size_t nb;
4182  if (bytes <= MAX_SMALL_REQUEST) {
4183  bindex_t idx;
4184  binmap_t smallbits;
4185  nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes);
4186  idx = small_index(nb);
4187  smallbits = gm->smallmap >> idx;
4188 
4189  if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
4190  mchunkptr b, p;
4191  idx += ~smallbits & 1; /* Uses next bin if idx empty */
4192  b = smallbin_at(gm, idx);
4193  p = b->fd;
4194  assert(chunksize(p) == small_index2size(idx));
4195  unlink_first_small_chunk(gm, b, p, idx);
4197  mem = chunk2mem(p);
4198  check_malloced_chunk(gm, mem, nb);
4199  goto postaction;
4200  }
4201 
4202  else if (nb > gm->dvsize) {
4203  if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
4204  mchunkptr b, p, r;
4205  size_t rsize;
4206  bindex_t i;
4207  binmap_t leftbits =
4208  (smallbits << idx) & left_bits(idx2bit(idx));
4209  binmap_t leastbit = least_bit(leftbits);
4210  compute_bit2idx(leastbit, i);
4211  b = smallbin_at(gm, i);
4212  p = b->fd;
4213  assert(chunksize(p) == small_index2size(i));
4214  unlink_first_small_chunk(gm, b, p, i);
4215  rsize = small_index2size(i) - nb;
4216  /* Fit here cannot be remainderless if 4byte sizes */
4217  if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
4219  else {
4221  r = chunk_plus_offset(p, nb);
4223  replace_dv(gm, r, rsize);
4224  }
4225  mem = chunk2mem(p);
4226  check_malloced_chunk(gm, mem, nb);
4227  goto postaction;
4228  }
4229 
4230  else if (gm->treemap != 0
4231  && (mem = tmalloc_small(gm, nb)) != 0) {
4232  check_malloced_chunk(gm, mem, nb);
4233  goto postaction;
4234  }
4235  }
4236  } else if (bytes >= MAX_REQUEST)
4237  nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
4238  else {
4239  nb = pad_request(bytes);
4240  if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
4241  check_malloced_chunk(gm, mem, nb);
4242  goto postaction;
4243  }
4244  }
4245 
4246  if (nb <= gm->dvsize) {
4247  size_t rsize = gm->dvsize - nb;
4248  mchunkptr p = gm->dv;
4249  if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
4250  mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
4251  gm->dvsize = rsize;
4254  } else { /* exhaust dv */
4255  size_t dvs = gm->dvsize;
4256  gm->dvsize = 0;
4257  gm->dv = 0;
4258  set_inuse_and_pinuse(gm, p, dvs);
4259  }
4260  mem = chunk2mem(p);
4261  check_malloced_chunk(gm, mem, nb);
4262  goto postaction;
4263  }
4264 
4265  else if (nb < gm->topsize) { /* Split top */
4266  size_t rsize = gm->topsize -= nb;
4267  mchunkptr p = gm->top;
4268  mchunkptr r = gm->top = chunk_plus_offset(p, nb);
4269  r->head = rsize | PINUSE_BIT;
4271  mem = chunk2mem(p);
4272  check_top_chunk(gm, gm->top);
4273  check_malloced_chunk(gm, mem, nb);
4274  goto postaction;
4275  }
4276 
4277  mem = sys_alloc(gm, nb);
4278 
4279  postaction:
4280  POSTACTION(gm);
4281  return mem;
4282  }
4283 
4284  return 0;
4285 }
4286 
4287 void
4288 dlfree(void *mem)
4289 {
4290  /*
4291  Consolidate freed chunks with preceeding or succeeding bordering
4292  free chunks, if they exist, and then place in a bin. Intermixed
4293  with special cases for top, dv, mmapped chunks, and usage errors.
4294  */
4295 
4296  if (mem != 0) {
4297  mchunkptr p = mem2chunk(mem);
4298 #if FOOTERS
4299  mstate fm = get_mstate_for(p);
4300  if (!ok_magic(fm)) {
4301  USAGE_ERROR_ACTION(fm, p);
4302  return;
4303  }
4304 #else /* FOOTERS */
4305 #define fm gm
4306 #endif /* FOOTERS */
4307  if (!PREACTION(fm)) {
4308  check_inuse_chunk(fm, p);
4309  if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
4310  size_t psize = chunksize(p);
4311  mchunkptr next = chunk_plus_offset(p, psize);
4312  if (!pinuse(p)) {
4313  size_t prevsize = p->prev_foot;
4314  if ((prevsize & IS_MMAPPED_BIT) != 0) {
4315  prevsize &= ~IS_MMAPPED_BIT;
4316  psize += prevsize + MMAP_FOOT_PAD;
4317  if (CALL_MUNMAP((char *) p - prevsize, psize) == 0)
4318  fm->footprint -= psize;
4319  goto postaction;
4320  } else {
4321  mchunkptr prev = chunk_minus_offset(p, prevsize);
4322  psize += prevsize;
4323  p = prev;
4324  if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
4325  if (p != fm->dv) {
4326  unlink_chunk(fm, p, prevsize);
4327  } else if ((next->head & INUSE_BITS) ==
4328  INUSE_BITS) {
4329  fm->dvsize = psize;
4330  set_free_with_pinuse(p, psize, next);
4331  goto postaction;
4332  }
4333  } else
4334  goto erroraction;
4335  }
4336  }
4337 
4338  if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
4339  if (!cinuse(next)) { /* consolidate forward */
4340  if (next == fm->top) {
4341  size_t tsize = fm->topsize += psize;
4342  fm->top = p;
4343  p->head = tsize | PINUSE_BIT;
4344  if (p == fm->dv) {
4345  fm->dv = 0;
4346  fm->dvsize = 0;
4347  }
4348  if (should_trim(fm, tsize))
4349  sys_trim(fm, 0);
4350  goto postaction;
4351  } else if (next == fm->dv) {
4352  size_t dsize = fm->dvsize += psize;
4353  fm->dv = p;
4355  goto postaction;
4356  } else {
4357  size_t nsize = chunksize(next);
4358  psize += nsize;
4359  unlink_chunk(fm, next, nsize);
4361  if (p == fm->dv) {
4362  fm->dvsize = psize;
4363  goto postaction;
4364  }
4365  }
4366  } else
4367  set_free_with_pinuse(p, psize, next);
4368  insert_chunk(fm, p, psize);
4369  check_free_chunk(fm, p);
4370  goto postaction;
4371  }
4372  }
4373  erroraction:
4374  USAGE_ERROR_ACTION(fm, p);
4375  postaction:
4376  POSTACTION(fm);
4377  }
4378  }
4379 #if !FOOTERS
4380 #undef fm
4381 #endif /* FOOTERS */
4382 }
4383 
4384 void *
4385 dlcalloc(size_t n_elements, size_t elem_size)
4386 {
4387  void *mem;
4388  size_t req = 0;
4389  if (n_elements != 0) {
4390  req = n_elements * elem_size;
4391  if (((n_elements | elem_size) & ~(size_t) 0xffff) &&
4392  (req / n_elements != elem_size))
4393  req = MAX_SIZE_T; /* force downstream failure on overflow */
4394  }
4395  mem = dlmalloc(req);
4396  if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
4397  memset(mem, 0, req);
4398  return mem;
4399 }
4400 
4401 void *
4402 dlrealloc(void *oldmem, size_t bytes)
4403 {
4404  if (oldmem == 0)
4405  return dlmalloc(bytes);
4406 #ifdef REALLOC_ZERO_BYTES_FREES
4407  if (bytes == 0) {
4408  dlfree(oldmem);
4409  return 0;
4410  }
4411 #endif /* REALLOC_ZERO_BYTES_FREES */
4412  else {
4413 #if ! FOOTERS
4414  mstate m = gm;
4415 #else /* FOOTERS */
4416  mstate m = get_mstate_for(mem2chunk(oldmem));
4417  if (!ok_magic(m)) {
4418  USAGE_ERROR_ACTION(m, oldmem);
4419  return 0;
4420  }
4421 #endif /* FOOTERS */
4422  return internal_realloc(m, oldmem, bytes);
4423  }
4424 }
4425 
4426 void *
4427 dlmemalign(size_t alignment, size_t bytes)
4428 {
4429  return internal_memalign(gm, alignment, bytes);
4430 }
4431 
4432 void **
4433 dlindependent_calloc(size_t n_elements, size_t elem_size, void *chunks[])
4434 {
4435  size_t sz = elem_size; /* serves as 1-element array */
4436  return ialloc(gm, n_elements, &sz, 3, chunks);
4437 }
4438 
4439 void **
4440 dlindependent_comalloc(size_t n_elements, size_t sizes[], void *chunks[])
4441 {
4442  return ialloc(gm, n_elements, sizes, 0, chunks);
4443 }
4444 
4445 void *
4446 dlvalloc(size_t bytes)
4447 {
4448  size_t pagesz;
4449  init_mparams();
4450  pagesz = mparams.page_size;
4451  return dlmemalign(pagesz, bytes);
4452 }
4453 
4454 void *
4455 dlpvalloc(size_t bytes)
4456 {
4457  size_t pagesz;
4458  init_mparams();
4459  pagesz = mparams.page_size;
4460  return dlmemalign(pagesz,
4461  (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
4462 }
4463 
4464 int
4465 dlmalloc_trim(size_t pad)
4466 {
4467  int result = 0;
4468  if (!PREACTION(gm)) {
4469  result = sys_trim(gm, pad);
4470  POSTACTION(gm);
4471  }
4472  return result;
4473 }
4474 
4475 size_t
4477 {
4478  return gm->footprint;
4479 }
4480 
4481 size_t
4483 {
4484  return gm->max_footprint;
4485 }
4486 
4487 #if !NO_MALLINFO
4488 struct mallinfo
4490 {
4491  return internal_mallinfo(gm);
4492 }
4493 #endif /* NO_MALLINFO */
4494 
4495 void
4497 {
4499 }
4500 
4501 size_t
4503 {
4504  if (mem != 0) {
4505  mchunkptr p = mem2chunk(mem);
4506  if (cinuse(p))
4507  return chunksize(p) - overhead_for(p);
4508  }
4509  return 0;
4510 }
4511 
4512 int
4513 dlmallopt(int param_number, int value)
4514 {
4515  return change_mparam(param_number, value);
4516 }
4517 
4518 #endif /* !ONLY_MSPACES */
4519 
4520 /* ----------------------------- user mspaces ---------------------------- */
4521 
4522 #if MSPACES
4523 
4524 static mstate
4525 init_user_mstate(char *tbase, size_t tsize)
4526 {
4527  size_t msize = pad_request(sizeof(struct malloc_state));
4528  mchunkptr mn;
4529  mchunkptr msp = align_as_chunk(tbase);
4530  mstate m = (mstate) (chunk2mem(msp));
4531  memset(m, 0, msize);
4532  INITIAL_LOCK(&m->mutex);
4533  msp->head = (msize | PINUSE_BIT | CINUSE_BIT);
4534  m->seg.base = m->least_addr = tbase;
4535  m->seg.size = m->footprint = m->max_footprint = tsize;
4536  m->magic = mparams.magic;
4537  m->mflags = mparams.default_mflags;
4538  disable_contiguous(m);
4539  init_bins(m);
4540  mn = next_chunk(mem2chunk(m));
4541  init_top(m, mn, (size_t) ((tbase + tsize) - (char *) mn) - TOP_FOOT_SIZE);
4542  check_top_chunk(m, m->top);
4543  return m;
4544 }
4545 
4546 mspace
4547 create_mspace(size_t capacity, int locked)
4548 {
4549  mstate m = 0;
4550  size_t msize = pad_request(sizeof(struct malloc_state));
4551  init_mparams(); /* Ensure pagesize etc initialized */
4552 
4553  if (capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) {
4554  size_t rs = ((capacity == 0) ? mparams.granularity :
4555  (capacity + TOP_FOOT_SIZE + msize));
4556  size_t tsize = granularity_align(rs);
4557  char *tbase = (char *) (CALL_MMAP(tsize));
4558  if (tbase != CMFAIL) {
4559  m = init_user_mstate(tbase, tsize);
4560  m->seg.sflags = IS_MMAPPED_BIT;
4561  set_lock(m, locked);
4562  }
4563  }
4564  return (mspace) m;
4565 }
4566 
4567 mspace
4568 create_mspace_with_base(void *base, size_t capacity, int locked)
4569 {
4570  mstate m = 0;
4571  size_t msize = pad_request(sizeof(struct malloc_state));
4572  init_mparams(); /* Ensure pagesize etc initialized */
4573 
4574  if (capacity > msize + TOP_FOOT_SIZE &&
4575  capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) {
4576  m = init_user_mstate((char *) base, capacity);
4577  m->seg.sflags = EXTERN_BIT;
4578  set_lock(m, locked);
4579  }
4580  return (mspace) m;
4581 }
4582 
4583 size_t
4584 destroy_mspace(mspace msp)
4585 {
4586  size_t freed = 0;
4587  mstate ms = (mstate) msp;
4588  if (ok_magic(ms)) {
4589  msegmentptr sp = &ms->seg;
4590  while (sp != 0) {
4591  char *base = sp->base;
4592  size_t size = sp->size;
4593  flag_t flag = sp->sflags;
4594  sp = sp->next;
4595  if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) &&
4596  CALL_MUNMAP(base, size) == 0)
4597  freed += size;
4598  }
4599  } else {
4600  USAGE_ERROR_ACTION(ms, ms);
4601  }
4602  return freed;
4603 }
4604 
4605 /*
4606  mspace versions of routines are near-clones of the global
4607  versions. This is not so nice but better than the alternatives.
4608 */
4609 
4610 
4611 void *
4612 mspace_malloc(mspace msp, size_t bytes)
4613 {
4614  mstate ms = (mstate) msp;
4615  if (!ok_magic(ms)) {
4616  USAGE_ERROR_ACTION(ms, ms);
4617  return 0;
4618  }
4619  if (!PREACTION(ms)) {
4620  void *mem;
4621  size_t nb;
4622  if (bytes <= MAX_SMALL_REQUEST) {
4623  bindex_t idx;
4624  binmap_t smallbits;
4625  nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes);
4626  idx = small_index(nb);
4627  smallbits = ms->smallmap >> idx;
4628 
4629  if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
4630  mchunkptr b, p;
4631  idx += ~smallbits & 1; /* Uses next bin if idx empty */
4632  b = smallbin_at(ms, idx);
4633  p = b->fd;
4634  assert(chunksize(p) == small_index2size(idx));
4635  unlink_first_small_chunk(ms, b, p, idx);
4637  mem = chunk2mem(p);
4638  check_malloced_chunk(ms, mem, nb);
4639  goto postaction;
4640  }
4641 
4642  else if (nb > ms->dvsize) {
4643  if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
4644  mchunkptr b, p, r;
4645  size_t rsize;
4646  bindex_t i;
4647  binmap_t leftbits =
4648  (smallbits << idx) & left_bits(idx2bit(idx));
4649  binmap_t leastbit = least_bit(leftbits);
4650  compute_bit2idx(leastbit, i);
4651  b = smallbin_at(ms, i);
4652  p = b->fd;
4653  assert(chunksize(p) == small_index2size(i));
4654  unlink_first_small_chunk(ms, b, p, i);
4655  rsize = small_index2size(i) - nb;
4656  /* Fit here cannot be remainderless if 4byte sizes */
4657  if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
4659  else {
4661  r = chunk_plus_offset(p, nb);
4663  replace_dv(ms, r, rsize);
4664  }
4665  mem = chunk2mem(p);
4666  check_malloced_chunk(ms, mem, nb);
4667  goto postaction;
4668  }
4669 
4670  else if (ms->treemap != 0
4671  && (mem = tmalloc_small(ms, nb)) != 0) {
4672  check_malloced_chunk(ms, mem, nb);
4673  goto postaction;
4674  }
4675  }
4676  } else if (bytes >= MAX_REQUEST)
4677  nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
4678  else {
4679  nb = pad_request(bytes);
4680  if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
4681  check_malloced_chunk(ms, mem, nb);
4682  goto postaction;
4683  }
4684  }
4685 
4686  if (nb <= ms->dvsize) {
4687  size_t rsize = ms->dvsize - nb;
4688  mchunkptr p = ms->dv;
4689  if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
4690  mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
4691  ms->dvsize = rsize;
4694  } else { /* exhaust dv */
4695  size_t dvs = ms->dvsize;
4696  ms->dvsize = 0;
4697  ms->dv = 0;
4698  set_inuse_and_pinuse(ms, p, dvs);
4699  }
4700  mem = chunk2mem(p);
4701  check_malloced_chunk(ms, mem, nb);
4702  goto postaction;
4703  }
4704 
4705  else if (nb < ms->topsize) { /* Split top */
4706  size_t rsize = ms->topsize -= nb;
4707  mchunkptr p = ms->top;
4708  mchunkptr r = ms->top = chunk_plus_offset(p, nb);
4709  r->head = rsize | PINUSE_BIT;
4711  mem = chunk2mem(p);
4712  check_top_chunk(ms, ms->top);
4713  check_malloced_chunk(ms, mem, nb);
4714  goto postaction;
4715  }
4716 
4717  mem = sys_alloc(ms, nb);
4718 
4719  postaction:
4720  POSTACTION(ms);
4721  return mem;
4722  }
4723 
4724  return 0;
4725 }
4726 
4727 void
4728 mspace_free(mspace msp, void *mem)
4729 {
4730  if (mem != 0) {
4731  mchunkptr p = mem2chunk(mem);
4732 #if FOOTERS
4733  mstate fm = get_mstate_for(p);
4734 #else /* FOOTERS */
4735  mstate fm = (mstate) msp;
4736 #endif /* FOOTERS */
4737  if (!ok_magic(fm)) {
4738  USAGE_ERROR_ACTION(fm, p);
4739  return;
4740  }
4741  if (!PREACTION(fm)) {
4742  check_inuse_chunk(fm, p);
4743  if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
4744  size_t psize = chunksize(p);
4745  mchunkptr next = chunk_plus_offset(p, psize);
4746  if (!pinuse(p)) {
4747  size_t prevsize = p->prev_foot;
4748  if ((prevsize & IS_MMAPPED_BIT) != 0) {
4749  prevsize &= ~IS_MMAPPED_BIT;
4750  psize += prevsize + MMAP_FOOT_PAD;
4751  if (CALL_MUNMAP((char *) p - prevsize, psize) == 0)
4752  fm->footprint -= psize;
4753  goto postaction;
4754  } else {
4755  mchunkptr prev = chunk_minus_offset(p, prevsize);
4756  psize += prevsize;
4757  p = prev;
4758  if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
4759  if (p != fm->dv) {
4760  unlink_chunk(fm, p, prevsize);
4761  } else if ((next->head & INUSE_BITS) ==
4762  INUSE_BITS) {
4763  fm->dvsize = psize;
4764  set_free_with_pinuse(p, psize, next);
4765  goto postaction;
4766  }
4767  } else
4768  goto erroraction;
4769  }
4770  }
4771 
4772  if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
4773  if (!cinuse(next)) { /* consolidate forward */
4774  if (next == fm->top) {
4775  size_t tsize = fm->topsize += psize;
4776  fm->top = p;
4777  p->head = tsize | PINUSE_BIT;
4778  if (p == fm->dv) {
4779  fm->dv = 0;
4780  fm->dvsize = 0;
4781  }
4782  if (should_trim(fm, tsize))
4783  sys_trim(fm, 0);
4784  goto postaction;
4785  } else if (next == fm->dv) {
4786  size_t dsize = fm->dvsize += psize;
4787  fm->dv = p;
4789  goto postaction;
4790  } else {
4791  size_t nsize = chunksize(next);
4792  psize += nsize;
4793  unlink_chunk(fm, next, nsize);
4795  if (p == fm->dv) {
4796  fm->dvsize = psize;
4797  goto postaction;
4798  }
4799  }
4800  } else
4801  set_free_with_pinuse(p, psize, next);
4802  insert_chunk(fm, p, psize);
4803  check_free_chunk(fm, p);
4804  goto postaction;
4805  }
4806  }
4807  erroraction:
4808  USAGE_ERROR_ACTION(fm, p);
4809  postaction:
4810  POSTACTION(fm);
4811  }
4812  }
4813 }
4814 
4815 void *
4816 mspace_calloc(mspace msp, size_t n_elements, size_t elem_size)
4817 {
4818  void *mem;
4819  size_t req = 0;
4820  mstate ms = (mstate) msp;
4821  if (!ok_magic(ms)) {
4822  USAGE_ERROR_ACTION(ms, ms);
4823  return 0;
4824  }
4825  if (n_elements != 0) {
4826  req = n_elements * elem_size;
4827  if (((n_elements | elem_size) & ~(size_t) 0xffff) &&
4828  (req / n_elements != elem_size))
4829  req = MAX_SIZE_T; /* force downstream failure on overflow */
4830  }
4831  mem = internal_malloc(ms, req);
4832  if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
4833  memset(mem, 0, req);
4834  return mem;
4835 }
4836 
4837 void *
4838 mspace_realloc(mspace msp, void *oldmem, size_t bytes)
4839 {
4840  if (oldmem == 0)
4841  return mspace_malloc(msp, bytes);
4842 #ifdef REALLOC_ZERO_BYTES_FREES
4843  if (bytes == 0) {
4844  mspace_free(msp, oldmem);
4845  return 0;
4846  }
4847 #endif /* REALLOC_ZERO_BYTES_FREES */
4848  else {
4849 #if FOOTERS
4850  mchunkptr p = mem2chunk(oldmem);
4851  mstate ms = get_mstate_for(p);
4852 #else /* FOOTERS */
4853  mstate ms = (mstate) msp;
4854 #endif /* FOOTERS */
4855  if (!ok_magic(ms)) {
4856  USAGE_ERROR_ACTION(ms, ms);
4857  return 0;
4858  }
4859  return internal_realloc(ms, oldmem, bytes);
4860  }
4861 }
4862 
4863 void *
4864 mspace_memalign(mspace msp, size_t alignment, size_t bytes)
4865 {
4866  mstate ms = (mstate) msp;
4867  if (!ok_magic(ms)) {
4868  USAGE_ERROR_ACTION(ms, ms);
4869  return 0;
4870  }
4871  return internal_memalign(ms, alignment, bytes);
4872 }
4873 
4874 void **
4875 mspace_independent_calloc(mspace msp, size_t n_elements,
4876  size_t elem_size, void *chunks[])
4877 {
4878  size_t sz = elem_size; /* serves as 1-element array */
4879  mstate ms = (mstate) msp;
4880  if (!ok_magic(ms)) {
4881  USAGE_ERROR_ACTION(ms, ms);
4882  return 0;
4883  }
4884  return ialloc(ms, n_elements, &sz, 3, chunks);
4885 }
4886 
4887 void **
4888 mspace_independent_comalloc(mspace msp, size_t n_elements,
4889  size_t sizes[], void *chunks[])
4890 {
4891  mstate ms = (mstate) msp;
4892  if (!ok_magic(ms)) {
4893  USAGE_ERROR_ACTION(ms, ms);
4894  return 0;
4895  }
4896  return ialloc(ms, n_elements, sizes, 0, chunks);
4897 }
4898 
4899 int
4900 mspace_trim(mspace msp, size_t pad)
4901 {
4902  int result = 0;
4903  mstate ms = (mstate) msp;
4904  if (ok_magic(ms)) {
4905  if (!PREACTION(ms)) {
4906  result = sys_trim(ms, pad);
4907  POSTACTION(ms);
4908  }
4909  } else {
4910  USAGE_ERROR_ACTION(ms, ms);
4911  }
4912  return result;
4913 }
4914 
4915 void
4916 mspace_malloc_stats(mspace msp)
4917 {
4918  mstate ms = (mstate) msp;
4919  if (ok_magic(ms)) {
4921  } else {
4922  USAGE_ERROR_ACTION(ms, ms);
4923  }
4924 }
4925 
4926 size_t
4927 mspace_footprint(mspace msp)
4928 {
4929  size_t result;
4930  mstate ms = (mstate) msp;
4931  if (ok_magic(ms)) {
4932  result = ms->footprint;
4933  }
4934  USAGE_ERROR_ACTION(ms, ms);
4935  return result;
4936 }
4937 
4938 
4939 size_t
4940 mspace_max_footprint(mspace msp)
4941 {
4942  size_t result;
4943  mstate ms = (mstate) msp;
4944  if (ok_magic(ms)) {
4945  result = ms->max_footprint;
4946  }
4947  USAGE_ERROR_ACTION(ms, ms);
4948  return result;
4949 }
4950 
4951 
4952 #if !NO_MALLINFO
4953 struct mallinfo
4954 mspace_mallinfo(mspace msp)
4955 {
4956  mstate ms = (mstate) msp;
4957  if (!ok_magic(ms)) {
4958  USAGE_ERROR_ACTION(ms, ms);
4959  }
4960  return internal_mallinfo(ms);
4961 }
4962 #endif /* NO_MALLINFO */
4963 
4964 int
4965 mspace_mallopt(int param_number, int value)
4966 {
4967  return change_mparam(param_number, value);
4968 }
4969 
4970 #endif /* MSPACES */
4971 
4972 /* -------------------- Alternative MORECORE functions ------------------- */
4973 
4974 /*
4975  Guidelines for creating a custom version of MORECORE:
4976 
4977  * For best performance, MORECORE should allocate in multiples of pagesize.
4978  * MORECORE may allocate more memory than requested. (Or even less,
4979  but this will usually result in a malloc failure.)
4980  * MORECORE must not allocate memory when given argument zero, but
4981  instead return one past the end address of memory from previous
4982  nonzero call.
4983  * For best performance, consecutive calls to MORECORE with positive
4984  arguments should return increasing addresses, indicating that
4985  space has been contiguously extended.
4986  * Even though consecutive calls to MORECORE need not return contiguous
4987  addresses, it must be OK for malloc'ed chunks to span multiple
4988  regions in those cases where they do happen to be contiguous.
4989  * MORECORE need not handle negative arguments -- it may instead
4990  just return MFAIL when given negative arguments.
4991  Negative arguments are always multiples of pagesize. MORECORE
4992  must not misinterpret negative args as large positive unsigned
4993  args. You can suppress all such calls from even occurring by defining
4994  MORECORE_CANNOT_TRIM,
4995 
4996  As an example alternative MORECORE, here is a custom allocator
4997  kindly contributed for pre-OSX macOS. It uses virtually but not
4998  necessarily physically contiguous non-paged memory (locked in,
4999  present and won't get swapped out). You can use it by uncommenting
5000  this section, adding some #includes, and setting up the appropriate
5001  defines above:
5002 
5003  #define MORECORE osMoreCore
5004 
5005  There is also a shutdown routine that should somehow be called for
5006  cleanup upon program exit.
5007 
5008  #define MAX_POOL_ENTRIES 100
5009  #define MINIMUM_MORECORE_SIZE (64 * 1024U)
5010  static int next_os_pool;
5011  void *our_os_pools[MAX_POOL_ENTRIES];
5012 
5013  void *osMoreCore(int size)
5014  {
5015  void *ptr = 0;
5016  static void *sbrk_top = 0;
5017 
5018  if (size > 0)
5019  {
5020  if (size < MINIMUM_MORECORE_SIZE)
5021  size = MINIMUM_MORECORE_SIZE;
5022  if (CurrentExecutionLevel() == kTaskLevel)
5023  ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
5024  if (ptr == 0)
5025  {
5026  return (void *) MFAIL;
5027  }
5028  // save ptrs so they can be freed during cleanup
5029  our_os_pools[next_os_pool] = ptr;
5030  next_os_pool++;
5031  ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
5032  sbrk_top = (char *) ptr + size;
5033  return ptr;
5034  }
5035  else if (size < 0)
5036  {
5037  // we don't currently support shrink behavior
5038  return (void *) MFAIL;
5039  }
5040  else
5041  {
5042  return sbrk_top;
5043  }
5044  }
5045 
5046  // cleanup any allocated memory pools
5047  // called as last thing before shutting down driver
5048 
5049  void osCleanupMem(void)
5050  {
5051  void **ptr;
5052 
5053  for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
5054  if (*ptr)
5055  {
5056  PoolDeallocate(*ptr);
5057  *ptr = 0;
5058  }
5059  }
5060 
5061 */
5062 
5063 
5064 /* -----------------------------------------------------------------------
5065 History:
5066  V2.8.3 Thu Sep 22 11:16:32 2005 Doug Lea (dl at gee)
5067  * Add max_footprint functions
5068  * Ensure all appropriate literals are size_t
5069  * Fix conditional compilation problem for some #define settings
5070  * Avoid concatenating segments with the one provided
5071  in create_mspace_with_base
5072  * Rename some variables to avoid compiler shadowing warnings
5073  * Use explicit lock initialization.
5074  * Better handling of sbrk interference.
5075  * Simplify and fix segment insertion, trimming and mspace_destroy
5076  * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x
5077  * Thanks especially to Dennis Flanagan for help on these.
5078 
5079  V2.8.2 Sun Jun 12 16:01:10 2005 Doug Lea (dl at gee)
5080  * Fix memalign brace error.
5081 
5082  V2.8.1 Wed Jun 8 16:11:46 2005 Doug Lea (dl at gee)
5083  * Fix improper #endif nesting in C++
5084  * Add explicit casts needed for C++
5085 
5086  V2.8.0 Mon May 30 14:09:02 2005 Doug Lea (dl at gee)
5087  * Use trees for large bins
5088  * Support mspaces
5089  * Use segments to unify sbrk-based and mmap-based system allocation,
5090  removing need for emulation on most platforms without sbrk.
5091  * Default safety checks
5092  * Optional footer checks. Thanks to William Robertson for the idea.
5093  * Internal code refactoring
5094  * Incorporate suggestions and platform-specific changes.
5095  Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas,
5096  Aaron Bachmann, Emery Berger, and others.
5097  * Speed up non-fastbin processing enough to remove fastbins.
5098  * Remove useless cfree() to avoid conflicts with other apps.
5099  * Remove internal memcpy, memset. Compilers handle builtins better.
5100  * Remove some options that no one ever used and rename others.
5101 
5102  V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
5103  * Fix malloc_state bitmap array misdeclaration
5104 
5105  V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee)
5106  * Allow tuning of FIRST_SORTED_BIN_SIZE
5107  * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
5108  * Better detection and support for non-contiguousness of MORECORE.
5109  Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
5110  * Bypass most of malloc if no frees. Thanks To Emery Berger.
5111  * Fix freeing of old top non-contiguous chunk im sysmalloc.
5112  * Raised default trim and map thresholds to 256K.
5113  * Fix mmap-related #defines. Thanks to Lubos Lunak.
5114  * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
5115  * Branch-free bin calculation
5116  * Default trim and mmap thresholds now 256K.
5117 
5118  V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
5119  * Introduce independent_comalloc and independent_calloc.
5120  Thanks to Michael Pachos for motivation and help.
5121  * Make optional .h file available
5122  * Allow > 2GB requests on 32bit systems.
5123  * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
5124  Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
5125  and Anonymous.
5126  * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
5127  helping test this.)
5128  * memalign: check alignment arg
5129  * realloc: don't try to shift chunks backwards, since this
5130  leads to more fragmentation in some programs and doesn't
5131  seem to help in any others.
5132  * Collect all cases in malloc requiring system memory into sysmalloc
5133  * Use mmap as backup to sbrk
5134  * Place all internal state in malloc_state
5135  * Introduce fastbins (although similar to 2.5.1)
5136  * Many minor tunings and cosmetic improvements
5137  * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
5138  * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
5139  Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
5140  * Include errno.h to support default failure action.
5141 
5142  V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee)
5143  * return null for negative arguments
5144  * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
5145  * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
5146  (e.g. WIN32 platforms)
5147  * Cleanup header file inclusion for WIN32 platforms
5148  * Cleanup code to avoid Microsoft Visual C++ compiler complaints
5149  * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
5150  memory allocation routines
5151  * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
5152  * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
5153  usage of 'assert' in non-WIN32 code
5154  * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
5155  avoid infinite loop
5156  * Always call 'fREe()' rather than 'free()'
5157 
5158  V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee)
5159  * Fixed ordering problem with boundary-stamping
5160 
5161  V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee)
5162  * Added pvalloc, as recommended by H.J. Liu
5163  * Added 64bit pointer support mainly from Wolfram Gloger
5164  * Added anonymously donated WIN32 sbrk emulation
5165  * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
5166  * malloc_extend_top: fix mask error that caused wastage after
5167  foreign sbrks
5168  * Add linux mremap support code from HJ Liu
5169 
5170  V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee)
5171  * Integrated most documentation with the code.
5172  * Add support for mmap, with help from
5173  Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
5174  * Use last_remainder in more cases.
5175  * Pack bins using idea from colin@nyx10.cs.du.edu
5176  * Use ordered bins instead of best-fit threshhold
5177  * Eliminate block-local decls to simplify tracing and debugging.
5178  * Support another case of realloc via move into top
5179  * Fix error occuring when initial sbrk_base not word-aligned.
5180  * Rely on page size for units instead of SBRK_UNIT to
5181  avoid surprises about sbrk alignment conventions.
5182  * Add mallinfo, mallopt. Thanks to Raymond Nijssen
5183  (raymond@es.ele.tue.nl) for the suggestion.
5184  * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
5185  * More precautions for cases where other routines call sbrk,
5186  courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
5187  * Added macros etc., allowing use in linux libc from
5188  H.J. Lu (hjl@gnu.ai.mit.edu)
5189  * Inverted this history list
5190 
5191  V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee)
5192  * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
5193  * Removed all preallocation code since under current scheme
5194  the work required to undo bad preallocations exceeds
5195  the work saved in good cases for most test programs.
5196  * No longer use return list or unconsolidated bins since
5197  no scheme using them consistently outperforms those that don't
5198  given above changes.
5199  * Use best fit for very large chunks to prevent some worst-cases.
5200  * Added some support for debugging
5201 
5202  V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee)
5203  * Removed footers when chunks are in use. Thanks to
5204  Paul Wilson (wilson@cs.texas.edu) for the suggestion.
5205 
5206  V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee)
5207  * Added malloc_trim, with help from Wolfram Gloger
5208  (wmglo@Dent.MED.Uni-Muenchen.DE).
5209 
5210  V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g)
5211 
5212  V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g)
5213  * realloc: try to expand in both directions
5214  * malloc: swap order of clean-bin strategy;
5215  * realloc: only conditionally expand backwards
5216  * Try not to scavenge used bins
5217  * Use bin counts as a guide to preallocation
5218  * Occasionally bin return list chunks in first scan
5219  * Add a few optimizations from colin@nyx10.cs.du.edu
5220 
5221  V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g)
5222  * faster bin computation & slightly different binning
5223  * merged all consolidations to one part of malloc proper
5224  (eliminating old malloc_find_space & malloc_clean_bin)
5225  * Scan 2 returns chunks (not just 1)
5226  * Propagate failure in realloc if malloc returns 0
5227  * Add stuff to allow compilation on non-ANSI compilers
5228  from kpv@research.att.com
5229 
5230  V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu)
5231  * removed potential for odd address access in prev_chunk
5232  * removed dependency on getpagesize.h
5233  * misc cosmetics and a bit more internal documentation
5234  * anticosmetics: mangled names in macros to evade debugger strangeness
5235  * tested on sparc, hp-700, dec-mips, rs6000
5236  with gcc & native cc (hp, dec only) allowing
5237  Detlefs & Zorn comparison study (in SIGPLAN Notices.)
5238 
5239  Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)
5240  * Based loosely on libg++-1.2X malloc. (It retains some of the overall
5241  structure of old version, but most details differ.)
5242 
5243 */
5244 
5245 #endif /* !HAVE_MALLOC */
5246 
5247 #ifdef HAVE_MALLOC
5248 #define real_malloc malloc
5249 #define real_calloc calloc
5250 #define real_realloc realloc
5251 #define real_free free
5252 #else
5253 #define real_malloc dlmalloc
5254 #define real_calloc dlcalloc
5255 #define real_realloc dlrealloc
5256 #define real_free dlfree
5257 #endif
5258 
5259 /* Memory functions used by SDL that can be replaced by the application */
5260 static struct
5261 {
5267 } s_mem = {
5269 };
5270 
5275 {
5276  if (malloc_func) {
5277  *malloc_func = s_mem.malloc_func;
5278  }
5279  if (calloc_func) {
5280  *calloc_func = s_mem.calloc_func;
5281  }
5282  if (realloc_func) {
5283  *realloc_func = s_mem.realloc_func;
5284  }
5285  if (free_func) {
5286  *free_func = s_mem.free_func;
5287  }
5288 }
5289 
5294 {
5295  if (!malloc_func) {
5296  return SDL_InvalidParamError("malloc_func");
5297  }
5298  if (!calloc_func) {
5299  return SDL_InvalidParamError("calloc_func");
5300  }
5301  if (!realloc_func) {
5302  return SDL_InvalidParamError("realloc_func");
5303  }
5304  if (!free_func) {
5305  return SDL_InvalidParamError("free_func");
5306  }
5307 
5308  s_mem.malloc_func = malloc_func;
5309  s_mem.calloc_func = calloc_func;
5310  s_mem.realloc_func = realloc_func;
5311  s_mem.free_func = free_func;
5312  return 0;
5313 }
5314 
5316 {
5317  return SDL_AtomicGet(&s_mem.num_allocations);
5318 }
5319 
5320 void *SDL_malloc(size_t size)
5321 {
5322  void *mem;
5323 
5324  if (!size) {
5325  size = 1;
5326  }
5327 
5328  mem = s_mem.malloc_func(size);
5329  if (mem) {
5330  SDL_AtomicIncRef(&s_mem.num_allocations);
5331  }
5332  return mem;
5333 }
5334 
5335 void *SDL_calloc(size_t nmemb, size_t size)
5336 {
5337  void *mem;
5338 
5339  if (!nmemb || !size) {
5340  nmemb = 1;
5341  size = 1;
5342  }
5343 
5344  mem = s_mem.calloc_func(nmemb, size);
5345  if (mem) {
5346  SDL_AtomicIncRef(&s_mem.num_allocations);
5347  }
5348  return mem;
5349 }
5350 
5351 void *SDL_realloc(void *ptr, size_t size)
5352 {
5353  void *mem;
5354 
5355  if (!ptr && !size) {
5356  size = 1;
5357  }
5358 
5359  mem = s_mem.realloc_func(ptr, size);
5360  if (mem && !ptr) {
5361  SDL_AtomicIncRef(&s_mem.num_allocations);
5362  }
5363  return mem;
5364 }
5365 
5366 void SDL_free(void *ptr)
5367 {
5368  if (!ptr) {
5369  return;
5370  }
5371 
5372  s_mem.free_func(ptr);
5373  (void)SDL_AtomicDecRef(&s_mem.num_allocations);
5374 }
5375 
5376 /* vi: set ts=4 sw=4 expandtab: */
#define DIRECT_MMAP(s)
Definition: SDL_malloc.c:1410
#define USAGE_ERROR_ACTION(m, p)
Definition: SDL_malloc.c:2225
#define DEFAULT_MMAP_THRESHOLD
Definition: SDL_malloc.c:600
#define unlink_large_chunk(M, X)
Definition: SDL_malloc.c:3113
#define INITIAL_LOCK(l)
Definition: SDL_malloc.c:1493
flag_t default_mflags
Definition: SDL_malloc.c:2083
#define next_chunk(p)
Definition: SDL_malloc.c:1745
#define POSTACTION(M)
Definition: SDL_malloc.c:2186
#define request2size(req)
Definition: SDL_malloc.c:1711
GLdouble GLdouble GLdouble r
Definition: SDL_opengl.h:2079
static void * sys_alloc(mstate m, size_t nb)
Definition: SDL_malloc.c:3444
size_t bindex_t
Definition: SDL_malloc.c:1673
#define minsize_for_tree_index(i)
Definition: SDL_malloc.c:2319
#define ACQUIRE_MAGIC_INIT_LOCK()
Definition: SDL_malloc.c:1517
#define HAVE_MMAP
Definition: SDL_malloc.c:491
#define fm
GLuint GLfloat GLfloat GLfloat x1
static int has_segment_link(mstate m, msegmentptr ss)
Definition: SDL_malloc.c:2146
void dlfree(void *)
Definition: SDL_malloc.c:4288
MALLINFO_FIELD_TYPE arena
Definition: SDL_malloc.c:669
static void * internal_realloc(mstate m, void *oldmem, size_t bytes)
Definition: SDL_malloc.c:3868
GLuint64EXT * result
#define compute_tree_index(S, I)
Definition: SDL_malloc.c:2290
GLdouble s
Definition: SDL_opengl.h:2063
void * dlvalloc(size_t)
Definition: SDL_malloc.c:4446
MALLINFO_FIELD_TYPE hblks
Definition: SDL_malloc.c:672
unsigned int flag_t
Definition: SDL_malloc.c:1675
EGLSurface EGLnsecsANDROID time
Definition: eglext.h:518
#define ok_cinuse(p)
Definition: SDL_malloc.c:2411
const GLdouble * v
Definition: SDL_opengl.h:2064
#define insert_chunk(M, P, S)
Definition: SDL_malloc.c:3186
GLint GLint GLint GLint GLint x
Definition: SDL_opengl.h:1574
static int win32_acquire_lock(MLOCK_T *sl)
Definition: SDL_malloc.c:1473
static int change_mparam(int param_number, int value)
Definition: SDL_malloc.c:2569
#define mark_inuse_foot(M, p, s)
Definition: SDL_malloc.c:2445
flag_t mflags
Definition: SDL_malloc.c:2059
#define DEFAULT_TRIM_THRESHOLD
Definition: SDL_malloc.c:593
A type representing an atomic integer value. It is a struct so people don&#39;t accidentally use numeric ...
Definition: SDL_atomic.h:198
binmap_t treemap
Definition: SDL_malloc.c:2047
#define MALLOC_FAILURE_ACTION
Definition: SDL_malloc.c:501
GLdouble GLdouble GLdouble GLdouble q
Definition: SDL_opengl.h:2087
#define cinuse(p)
Definition: SDL_malloc.c:1733
#define RTCHECK(e)
Definition: SDL_malloc.c:2435
void * dlcalloc(size_t, size_t)
Definition: SDL_malloc.c:4385
#define real_realloc
Definition: SDL_malloc.c:5255
static void * tmalloc_large(mstate m, size_t nb)
Definition: SDL_malloc.c:3751
#define smallbin_at(M, i)
Definition: SDL_malloc.c:2271
void ** dlindependent_calloc(size_t, size_t, void **)
GLuint GLuint end
Definition: SDL_opengl.h:1571
size_t footprint
Definition: SDL_malloc.c:2057
size_t magic
Definition: SDL_malloc.c:2054
GLfloat GLfloat p
#define replace_dv(M, P, S)
Definition: SDL_malloc.c:3031
#define disable_contiguous(M)
Definition: SDL_malloc.c:2107
const GLfloat * m
#define FENCEPOST_HEAD
Definition: SDL_malloc.c:1730
void * SDL_realloc(void *ptr, size_t size)
Definition: SDL_malloc.c:5351
#define ABORT
Definition: SDL_malloc.c:39
#define is_initialized(M)
Definition: SDL_malloc.c:2092
#define set_free_with_pinuse(p, s, n)
Definition: SDL_malloc.c:1760
#define IS_MMAPPED_BIT
Definition: SDL_malloc.c:1342
#define memset
Definition: SDL_malloc.c:619
void dlmalloc_stats(void)
Definition: SDL_malloc.c:4496
void *(* SDL_calloc_func)(size_t nmemb, size_t size)
Definition: SDL_stdinc.h:367
#define SIX_SIZE_T_SIZES
Definition: SDL_malloc.c:1307
char * least_addr
Definition: SDL_malloc.c:2050
MALLINFO_FIELD_TYPE ordblks
Definition: SDL_malloc.c:670
void * dlrealloc(void *, size_t)
Definition: SDL_malloc.c:4402
GLintptr offset
#define MCHUNK_SIZE
Definition: SDL_malloc.c:1679
void(* SDL_free_func)(void *mem)
Definition: SDL_stdinc.h:369
#define compute_bit2idx(X, I)
Definition: SDL_malloc.c:2353
#define assert(x)
Definition: SDL_malloc.c:1219
#define ok_pinuse(p)
Definition: SDL_malloc.c:2413
#define SDL_InvalidParamError(param)
Definition: SDL_error.h:54
#define SIZE_T_SIZE
Definition: SDL_malloc.c:1297
#define chunk_plus_offset(p, s)
Definition: SDL_malloc.c:1741
#define CALL_MUNMAP(a, s)
Definition: SDL_malloc.c:1409
#define leftmost_child(t)
Definition: SDL_malloc.c:1886
#define FOUR_SIZE_T_SIZES
Definition: SDL_malloc.c:1306
#define CORRUPTION_ERROR_ACTION(m)
Definition: SDL_malloc.c:2221
GLfixed GLfixed x2
GLenum GLsizei len
#define CMFAIL
Definition: SDL_malloc.c:1332
#define real_calloc
Definition: SDL_malloc.c:5254
#define EXTERN_BIT
Definition: SDL_malloc.c:1430
int SDL_SetMemoryFunctions(SDL_malloc_func malloc_func, SDL_calloc_func calloc_func, SDL_realloc_func realloc_func, SDL_free_func free_func)
Replace SDL&#39;s memory allocation functions with a custom set.
Definition: SDL_malloc.c:5290
#define CHUNK_ALIGN_MASK
Definition: SDL_malloc.c:1311
static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb)
Definition: SDL_malloc.c:3256
#define USE_NONCONTIGUOUS_BIT
Definition: SDL_malloc.c:1427
size_t dlmalloc_usable_size(void *)
Definition: SDL_malloc.c:4502
static struct @32 s_mem
#define small_index2size(i)
Definition: SDL_malloc.c:2267
#define MIN_LARGE_SIZE
Definition: SDL_malloc.c:2040
#define M_MMAP_THRESHOLD
Definition: SDL_malloc.c:634
struct malloc_chunk * bk
Definition: SDL_malloc.c:1667
SDL_realloc_func realloc_func
Definition: SDL_malloc.c:5264
MLOCK_T mutex
Definition: SDL_malloc.c:2061
SDL_atomic_t num_allocations
Definition: SDL_malloc.c:5266
size_t max_footprint
Definition: SDL_malloc.c:2058
#define CHUNK_OVERHEAD
Definition: SDL_malloc.c:1684
#define MLOCK_T
Definition: SDL_malloc.c:1471
#define ACQUIRE_MORECORE_LOCK()
Definition: SDL_malloc.c:1512
#define M_GRANULARITY
Definition: SDL_malloc.c:633
#define CALL_MORECORE(S)
Definition: SDL_malloc.c:1423
unsigned int size_t
#define is_aligned(A)
Definition: SDL_malloc.c:1314
void * SDL_malloc(size_t size)
Definition: SDL_malloc.c:5320
#define use_noncontiguous(M)
Definition: SDL_malloc.c:2106
#define INUSE_BITS
Definition: SDL_malloc.c:1727
size_t dvsize
Definition: SDL_malloc.c:2048
int dlmalloc_trim(size_t)
Definition: SDL_malloc.c:4465
#define MIN_REQUEST
Definition: SDL_malloc.c:1704
GLuint GLfloat * val
static int sys_trim(mstate m, size_t pad)
Definition: SDL_malloc.c:3685
#define NTREEBINS
Definition: SDL_malloc.c:2036
#define unlink_first_small_chunk(M, B, P, I)
Definition: SDL_malloc.c:3013
#define MFAIL
Definition: SDL_malloc.c:1331
int dlmallopt(int, int)
Definition: SDL_malloc.c:4513
#define check_inuse_chunk(M, P)
Definition: SDL_malloc.c:2235
size_t trim_threshold
Definition: SDL_malloc.c:2082
static void ** ialloc(mstate m, size_t n_elements, size_t *sizes, int opts, void *chunks[])
Definition: SDL_malloc.c:4028
#define CINUSE_BIT
Definition: SDL_malloc.c:1726
#define MIN_CHUNK_SIZE
Definition: SDL_malloc.c:1693
static void init_bins(mstate m)
Definition: SDL_malloc.c:3312
size_t page_size
Definition: SDL_malloc.c:2079
#define treemap_is_marked(M, i)
Definition: SDL_malloc.c:2336
void *(* SDL_malloc_func)(size_t size)
Definition: SDL_stdinc.h:366
static struct malloc_params mparams
Definition: SDL_malloc.c:2086
#define MORECORE_CONTIGUOUS
Definition: SDL_malloc.c:575
#define gm
Definition: SDL_malloc.c:2090
#define USE_MMAP_BIT
Definition: SDL_malloc.c:1343
void * dlmalloc(size_t)
Definition: SDL_malloc.c:4154
#define set_inuse(M, p, s)
Definition: SDL_malloc.c:2448
size_t topsize
Definition: SDL_malloc.c:2049
static void * tmalloc_small(mstate m, size_t nb)
Definition: SDL_malloc.c:3826
GLenum const void * addr
void SDL_free(void *ptr)
Definition: SDL_malloc.c:5366
#define MALLOC_ALIGNMENT
Definition: SDL_malloc.c:531
#define MAX_SIZE_T
Definition: SDL_malloc.c:518
void *(* SDL_realloc_func)(void *mem, size_t size)
Definition: SDL_stdinc.h:368
static struct mallinfo internal_mallinfo(mstate m)
Definition: SDL_malloc.c:2884
GLuint GLsizei const GLuint const GLintptr const GLsizeiptr * sizes
static MLOCK_T magic_init_mutex
Definition: SDL_malloc.c:1499
#define is_global(M)
Definition: SDL_malloc.c:2091
MALLINFO_FIELD_TYPE fordblks
Definition: SDL_malloc.c:677
mchunkptr top
Definition: SDL_malloc.c:2052
GLsizei const GLfloat * value
#define idx2bit(i)
Definition: SDL_malloc.c:2327
#define align_as_chunk(A)
Definition: SDL_malloc.c:1700
SDL_malloc_func malloc_func
Definition: SDL_malloc.c:5262
void * dlmemalign(size_t, size_t)
Definition: SDL_malloc.c:4427
#define unlink_chunk(M, P, S)
Definition: SDL_malloc.c:3190
size_t dlmalloc_max_footprint(void)
Definition: SDL_malloc.c:4482
struct malloc_chunk * fd
Definition: SDL_malloc.c:1666
#define page_align(S)
Definition: SDL_malloc.c:2115
void * SDL_calloc(size_t nmemb, size_t size)
Definition: SDL_malloc.c:5335
#define is_small(s)
Definition: SDL_malloc.c:2265
#define MAX_SMALL_REQUEST
Definition: SDL_malloc.c:2042
#define ok_next(p, n)
Definition: SDL_malloc.c:2409
#define leftshift_for_tree_index(i)
Definition: SDL_malloc.c:2314
SDL_free_func free_func
Definition: SDL_malloc.c:5265
#define DEFAULT_GRANULARITY
Definition: SDL_malloc.c:588
struct malloc_segment * next
Definition: SDL_malloc.c:1949
GLenum GLuint GLenum GLsizei const GLchar * buf
#define SIZE_T_BITSIZE
Definition: SDL_malloc.c:1298
#define chunk_minus_offset(p, s)
Definition: SDL_malloc.c:1742
static void internal_malloc_stats(mstate m)
Definition: SDL_malloc.c:2925
#define SDL_AtomicIncRef(a)
Increment an atomic variable used as a reference count.
Definition: SDL_atomic.h:234
#define should_trim(M, s)
Definition: SDL_malloc.c:2158
GLsizeiptr size
struct malloc_tree_chunk * bk
Definition: SDL_malloc.c:1874
struct malloc_tree_chunk * parent
Definition: SDL_malloc.c:1877
#define set_lock(M, L)
Definition: SDL_malloc.c:2109
#define M_TRIM_THRESHOLD
Definition: SDL_malloc.c:632
#define treebin_at(M, i)
Definition: SDL_malloc.c:2272
return Display return Display Bool Bool int int int return Display XEvent Bool(*) XPointer return Display return Display Drawable _Xconst char unsigned int unsigned int return Display Pixmap Pixmap XColor XColor unsigned int unsigned int return Display _Xconst char char int char return Display Visual unsigned int int int char unsigned int unsigned int in i)
Definition: SDL_x11sym.h:50
SDL_calloc_func calloc_func
Definition: SDL_malloc.c:5263
#define RELEASE_MAGIC_INIT_LOCK()
Definition: SDL_malloc.c:1518
#define next_pinuse(p)
Definition: SDL_malloc.c:1749
#define CALL_MMAP(s)
Definition: SDL_malloc.c:1408
#define PREACTION(M)
Definition: SDL_malloc.c:2185
struct mallinfo dlmallinfo(void)
Definition: SDL_malloc.c:4489
MALLINFO_FIELD_TYPE fsmblks
Definition: SDL_malloc.c:675
size_t trim_check
Definition: SDL_malloc.c:2053
static void init_top(mstate m, mchunkptr p, size_t psize)
Definition: SDL_malloc.c:3295
#define check_malloc_state(M)
Definition: SDL_malloc.c:2238
static void win32_release_lock(MLOCK_T *sl)
Definition: SDL_malloc.c:1488
MALLINFO_FIELD_TYPE keepcost
Definition: SDL_malloc.c:678
#define HALF_MAX_SIZE_T
Definition: SDL_malloc.c:1308
#define HAVE_MORECORE
Definition: SDL_malloc.c:492
#define check_malloced_chunk(M, P, N)
Definition: SDL_malloc.c:2236
static void * prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb)
Definition: SDL_malloc.c:3345
binmap_t smallmap
Definition: SDL_malloc.c:2046
#define insert_large_chunk(M, X, S)
Definition: SDL_malloc.c:3045
size_t granularity
Definition: SDL_malloc.c:2080
msegment seg
Definition: SDL_malloc.c:2063
#define segment_holds(S, A)
Definition: SDL_malloc.c:2128
#define left_bits(x)
Definition: SDL_malloc.c:2371
static int init_mparams(void)
Definition: SDL_malloc.c:2491
MALLINFO_FIELD_TYPE hblkhd
Definition: SDL_malloc.c:673
#define enable_mmap(M)
Definition: SDL_malloc.c:2103
static size_t release_unused_segments(mstate m)
Definition: SDL_malloc.c:3644
#define memcpy
Definition: SDL_malloc.c:622
static void add_segment(mstate m, char *tbase, size_t tsize, flag_t mmapped)
Definition: SDL_malloc.c:3387
#define RELEASE_MORECORE_LOCK()
Definition: SDL_malloc.c:1513
struct malloc_tree_chunk * child[2]
Definition: SDL_malloc.c:1876
SDL_PRINTF_FORMAT_STRING const char int SDL_PRINTF_FORMAT_STRING const char int SDL_PRINTF_FORMAT_STRING const char int SDL_PRINTF_FORMAT_STRING const char const char SDL_SCANF_FORMAT_STRING const char return SDL_ThreadFunction const char void return Uint32 return Uint32 void
#define NSMALLBINS
Definition: SDL_malloc.c:2035
int SDL_GetNumAllocations(void)
Get the number of outstanding (unfreed) allocations.
Definition: SDL_malloc.c:5315
#define SDL_AtomicDecRef(a)
Decrement an atomic variable used as a reference count.
Definition: SDL_atomic.h:244
#define MAX_REQUEST
Definition: SDL_malloc.c:1703
#define CALL_MREMAP(addr, osz, nsz, mv)
Definition: SDL_malloc.c:1417
#define mem2chunk(mem)
Definition: SDL_malloc.c:1698
#define small_index(s)
Definition: SDL_malloc.c:2266
#define USE_LOCK_BIT
Definition: SDL_malloc.c:1502
#define internal_free(m, mem)
Definition: SDL_malloc.c:3208
#define check_mmapped_chunk(M, P)
Definition: SDL_malloc.c:2237
static const double cp
Definition: e_pow.c:92
#define SDL_AtomicGet
static void * internal_memalign(mstate m, size_t alignment, size_t bytes)
Definition: SDL_malloc.c:3938
#define chunksize(p)
Definition: SDL_malloc.c:1735
#define prev_chunk(p)
Definition: SDL_malloc.c:1746
#define smallmap_is_marked(M, i)
Definition: SDL_malloc.c:2332
#define use_mmap(M)
Definition: SDL_malloc.c:2102
#define calloc_must_clear(p)
Definition: SDL_malloc.c:1774
#define overhead_for(p)
Definition: SDL_malloc.c:1767
static msegmentptr segment_holding(mstate m, char *addr)
Definition: SDL_malloc.c:2133
unsigned int binmap_t
Definition: SDL_malloc.c:1674
void ** dlindependent_comalloc(size_t, size_t *, void **)
#define pad_request(req)
Definition: SDL_malloc.c:1707
#define MMAP_FOOT_PAD
Definition: SDL_malloc.c:1690
#define real_free
Definition: SDL_malloc.c:5256
void * dlpvalloc(size_t)
Definition: SDL_malloc.c:4455
SDL_EventEntry * head
Definition: SDL_events.c:82
#define is_mmapped_segment(S)
Definition: SDL_malloc.c:1953
#define align_offset(A)
Definition: SDL_malloc.c:1317
static void * win32direct_mmap(size_t size)
Definition: SDL_malloc.c:1381
#define set_size_and_pinuse_of_free_chunk(p, s)
Definition: SDL_malloc.c:1756
GLboolean GLboolean GLboolean GLboolean a
#define SIZE_T_ONE
Definition: SDL_malloc.c:1303
#define is_extern_segment(S)
Definition: SDL_malloc.c:1954
static struct malloc_state _gm_
Definition: SDL_malloc.c:2089
#define internal_malloc(m, b)
Definition: SDL_malloc.c:3207
#define pinuse(p)
Definition: SDL_malloc.c:1734
#define check_free_chunk(M, P)
Definition: SDL_malloc.c:2234
#define TOP_FOOT_SIZE
Definition: SDL_malloc.c:2168
#define granularity_align(S)
Definition: SDL_malloc.c:2119
static void * mmap_alloc(mstate m, size_t nb)
Definition: SDL_malloc.c:3226
#define ok_address(M, a)
Definition: SDL_malloc.c:2407
GLboolean GLboolean GLboolean b
size_t dlmalloc_footprint(void)
Definition: SDL_malloc.c:4476
#define least_bit(x)
Definition: SDL_malloc.c:2368
#define chunk2mem(p)
Definition: SDL_malloc.c:1697
void SDL_GetMemoryFunctions(SDL_malloc_func *malloc_func, SDL_calloc_func *calloc_func, SDL_realloc_func *realloc_func, SDL_free_func *free_func)
Get the current set of SDL memory functions.
Definition: SDL_malloc.c:5271
#define MALLINFO_FIELD_TYPE
Definition: SDL_malloc.c:615
mchunkptr dv
Definition: SDL_malloc.c:2051
#define PINUSE_BIT
Definition: SDL_malloc.c:1725
#define real_malloc
Definition: SDL_malloc.c:5253
GLuint64 GLenum GLint fd
Definition: gl2ext.h:1508
GLdouble GLdouble t
Definition: SDL_opengl.h:2071
#define disable_mmap(M)
Definition: SDL_malloc.c:2104
#define is_page_aligned(S)
Definition: SDL_malloc.c:2122
#define check_top_chunk(M, P)
Definition: SDL_malloc.c:2239
size_t prev_foot
Definition: SDL_malloc.c:1664
#define is_mmapped(p)
Definition: SDL_malloc.c:1763
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)
Definition: SDL_malloc.c:2458
#define ok_magic(M)
Definition: SDL_malloc.c:2426
MALLINFO_FIELD_TYPE uordblks
Definition: SDL_malloc.c:676
#define set_inuse_and_pinuse(M, p, s)
Definition: SDL_malloc.c:2453
static int win32munmap(void *ptr, size_t size)
Definition: SDL_malloc.c:1390
struct malloc_tree_chunk * fd
Definition: SDL_malloc.c:1873
MALLINFO_FIELD_TYPE smblks
Definition: SDL_malloc.c:671
MALLINFO_FIELD_TYPE usmblks
Definition: SDL_malloc.c:674
static void * win32mmap(size_t size)
Definition: SDL_malloc.c:1372
size_t mmap_threshold
Definition: SDL_malloc.c:2081