source: trunk/src/dnmalloc.c@ 372

Last change on this file since 372 was 362, checked in by katerina, 13 years ago

Fix for ticket #267 (Multiple compiler warnings with gcc 4.6.1).

File size: 164.5 KB
RevLine 
[171]1/* DistriNet malloc (dnmalloc): a more secure memory allocator.
[172]2 Copyright (C) 2005, Yves Younan, Wouter Joosen, Frank Piessens
3 and Rainer Wichmann
4
[171]5 The authors can be contacted by:
6 Email: dnmalloc@fort-knox.org
7 Address:
[362]8#if defined(WITH_TPT)
[171]9 Yves Younan
10 Celestijnenlaan 200A
11 B-3001 Heverlee
12 Belgium
13
14 This library is free software; you can redistribute it and/or
15 modify it under the terms of the GNU Lesser General Public
16 License as published by the Free Software Foundation; either
17 version 2.1 of the License, or (at your option) any later version.
18
19 This library is distributed in the hope that it will be useful,
20 but WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 Lesser General Public License for more details.
23
24 You should have received a copy of the GNU Lesser General Public
25 License along with this library; if not, write to the Free Software
26 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
27
28*/
29
[172]30/* Current version: dnmalloc 1.0 */
[171]31/* Includes arc4random from OpenBSD, which is under the BDS license */
32
33/* Versions:
34 0.1-0.5:
35 Proof of concept implementation by Hans Van den Eynden and Yves Younan
36 0.6-0.7:
37 Bug fixes by Yves Younan
38 0.8-1.0.beta4:
39 Reimplementation from scratch by Yves Younan
40 1.0.beta4:
41 Public release
42 1.0.beta5:
43 Prev_chunkinfo speeded up, was really slow because of the way we did lookups
44 A freechunkinfo region is now freed when it is completely empty and
45 not the current one
46
47 1.0 (Rainer Wichmann [support at la dash samhna dot org]):
48 ---------------------
49
50 Compiler warnings fixed
51 Define REALLOC_ZERO_BYTES_FREES because it's what GNU libc does
52 (and what the standard says)
53 Removed unused code
54 Fix assert(aligned_OK(chunk(newp)));
55 -> assert(aligned_OK(chunk(oldp)));
56 Fix statistics in sYSMALLOc
57 Fix overwrite of av->top in sYSMALLOc
58 Provide own assert(), glibc assert() doesn't work (calls malloc)
59 Fix bug in mEMALIGn(), put remainder in hashtable before calling fREe
60 Remove cfree, independent_cmalloc, independent_comalloc (untested
61 public functions not covered by any standard)
62 Provide posix_memalign (that one is in the standard)
63 Move the malloc_state struct to mmapped memory protected by guard pages
64 Add arc4random function to initialize random canary on startup
65 Implement random canary at end of (re|m)alloced/memaligned buffer,
66 check at free/realloc
67 Remove code conditional on !HAVE_MMAP, since mmap is required anyway.
68 Use standard HAVE_foo macros (as generated by autoconf) instead of LACKS_foo
69
70 Profiling: Reorder branches in hashtable_add, next_chunkinfo,
71 prev_chunkinfo, hashtable_insert, mALLOc, fREe, request2size,
72 checked_request2size (gcc predicts if{} branch to be taken).
73 Use UNLIKELY macro (gcc __builtin_expect()) where branch
74 reordering would make the code awkward.
75
76 Portability: Hashtable always covers full 32bit address space to
77 avoid assumptions about memory layout.
78 Portability: Try hard to enforce mapping of mmapped memory into
79 32bit address space, even on 64bit systems.
80 Portability: Provide a dnmalloc_pthread_init() function, since
81 pthread locking on HP-UX only works if initialized
82 after the application has entered main().
83 Portability: On *BSD, pthread_mutex_lock is unusable since it
84 calls malloc, use spinlocks instead.
85 Portability: Dynamically detect whether the heap is within
86 32bit address range (e.g. on Linux x86_64, it isn't).
87 Don't use sbrk() if the heap is mapped to an address
88 outside the 32bit range, since this doesn't work with
89 the hashtable. New macro morecore32bit.
90
91 Success on: HP-UX 11.11/pthread, Linux/pthread (32/64 bit),
92 FreeBSD/pthread, and Solaris 10 i386/pthread.
93 Fail on: OpenBSD/pthread (in _thread_machdep_save_float_state),
94 might be related to OpenBSD pthread internals (??).
[228]95 Non-treaded version (#undef USE_MALLOC_LOCK)
[171]96 works on OpenBSD.
97
[172]98 further to 1.0:
99 Valgrind client requests inserted (#define USE_VALGRIND)
[173]100 Fix: malloc_consolidate (nextchunk->fd, nextchunk->bck may be NULL)
[172]101 Portability: minsize = 32 bit on 64bit architecture
102 Minor cleanups
[173]103 Fix: eliminate prototypes for memset, memcpy (they're in string.h)
[228]104 Fix: one more malloc_consolidate segfault
[172]105
[171]106 There may be some bugs left in this version. please use with caution.
107*/
108
109
110
111/* Please read the following papers for documentation:
112
113 Yves Younan, Wouter Joosen, and Frank Piessens, A Methodology for Designing
114 Countermeasures against Current and Future Code Injection Attacks,
115 Proceedings of the Third IEEE International Information Assurance
116 Workshop 2005 (IWIA2005), College Park, Maryland, U.S.A., March 2005,
117 IEEE, IEEE Press.
118 http://www.fort-knox.org/younany_countermeasures.pdf
119
120 Yves Younan, Wouter Joosen and Frank Piessens and Hans Van den
121 Eynden. Security of Memory Allocators for C and C++. Technical Report
122 CW419, Departement Computerwetenschappen, Katholieke Universiteit
123 Leuven, July 2005. http://www.fort-knox.org/CW419.pdf
124
125 */
126
127/* Compile:
128 gcc -fPIC -rdynamic -c -Wall dnmalloc-portable.c
129 "Link":
130 Dynamic:
131 gcc -shared -Wl,-soname,libdnmalloc.so.0 -o libdnmalloc.so.0.0 dnmalloc-portable.o -lc
132 Static:
133 ar -rv libdnmalloc.a dnmalloc-portable.o
134
135*/
136
137/*
138 dnmalloc is based on dlmalloc 2.7.2 (by Doug Lea (dl@cs.oswego.edu))
139 dlmalloc was released as public domain and contained the following license:
140
141 "This is a version (aka dlmalloc) of malloc/free/realloc written by
142 Doug Lea and released to the public domain. Use, modify, and
143 redistribute this code without permission or acknowledgement in any
144 way you wish. Send questions, comments, complaints, performance
145 data, etc to dl@cs.oswego.edu
146
147 * VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
148
149 Note: There may be an updated version of this malloc obtainable at
150 ftp://gee.cs.oswego.edu/pub/misc/malloc.c
151 Check before installing!"
152
153*/
154
155/* The following preprocessor macros are tested,
156 * and hence should have #define directives:
157 *
158 * HAVE_CONFIG_H Define to #include "config.h" (autoconf-generated)
159 *
160 * HAVE_UNISTD_H Define to #include <unistd.h>
161 *
162 * HAVE_SYS_UIO_H Define to #include <sys/uio.h> (for writev)
163 * HAVE_WRITEV Define if the 'writev' function is available
164 *
165 * HAVE_SYS_PARAM_H Define to #include <sys/param.h> (for pagesize)
166 *
167 * HAVE_MALLOC_H Define to #include <malloc.h> (for struct mallinfo)
168 *
169 * HAVE_FCNTL_H Define to #include <fcntl.h>
170 *
171 * HAVE_SYS_MMAN_H Define to #include <sys/mman.h>
172 * HAVE_MMAP Define if the 'mmap' function is available.
173 *
174 * HAVE_SCHED_H Define to #include <sched.h>
175 * HAVE_SCHED_YIELD Define id the 'sched_yield' function is available
176 */
177
178
179/*
180 __STD_C should be nonzero if using ANSI-standard C compiler, a C++
181 compiler, or a C compiler sufficiently close to ANSI to get away
182 with it.
183*/
184
185#ifdef HAVE_CONFIG_H
186#include "config.h"
187#endif
188
[172]189#ifdef USE_VALGRIND
190#include <valgrind/memcheck.h>
191#else
192#define VALGRIND_FREELIKE_BLOCK(a,b) ((void)0)
193#define VALGRIND_MALLOCLIKE_BLOCK(a,b,c,d) ((void)0)
194#define VALGRIND_CREATE_MEMPOOL(a,b,c) ((void)0)
195#define VALGRIND_MEMPOOL_ALLOC(a,b,c) ((void)0)
196#define VALGRIND_MEMPOOL_FREE(a,b) ((void)0)
197#define VALGRIND_DESTROY_MEMPOOL(a) ((void)0)
198#define VALGRIND_MAKE_MEM_DEFINED(a,b) ((void)0)
199#define VALGRIND_MAKE_MEM_UNDEFINED(a,b) ((void)0)
200#define VALGRIND_MAKE_MEM_NOACCESS(a,b) ((void)0)
201#endif
202
[171]203#if defined (__GNUC__) && __GNUC__ > 2
204# define LIKELY(expression) (__builtin_expect(!!(expression), 1))
205# define UNLIKELY(expression) (__builtin_expect(!!(expression), 0))
206# define __attribute_malloc__ __attribute__ ((__malloc__))
207#else
208# define LIKELY(x) (x)
209# define UNLIKELY(x) (x)
210# define __attribute_malloc__ /* Ignore */
211#endif
212
213/*
214 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
215 large blocks. This is currently only possible on Linux with
216 kernel versions newer than 1.3.77.
217*/
218
219#ifndef HAVE_MREMAP
220#ifdef linux
221#define HAVE_MREMAP 1
222#define _GNU_SOURCE
223#else
224#define HAVE_MREMAP 0
225#endif
226#endif /* HAVE_MREMAP */
227
228
229
230#ifndef __STD_C
231#if defined(__STDC__) || defined(_cplusplus)
232#define __STD_C 1
233#else
234#define __STD_C 0
235#endif
236#endif /*__STD_C*/
237
238
239/*
240 Void_t* is the pointer type that malloc should say it returns
241*/
242
243#ifndef Void_t
244#if (__STD_C || defined(WIN32))
245#define Void_t void
246#else
247#define Void_t char
248#endif
249#endif /*Void_t*/
250
251#if __STD_C
252#include <stddef.h> /* for size_t */
253#else
254#include <sys/types.h>
255#endif
256
257#if !defined(USE_SYSTEM_MALLOC)
258
259#ifdef __cplusplus
260extern "C" {
261#endif
262
263/* define HAVE_UNISTD_H if your system has a <unistd.h>. */
264
265#ifdef HAVE_UNISTD_H
266#include <unistd.h>
267#endif
268
269#ifdef HAVE_SYS_UIO_H
270#include <sys/uio.h>
271#endif
272
273#include <stdio.h> /* needed for malloc_stats */
274#include <errno.h> /* needed for optional MALLOC_FAILURE_ACTION */
275
276#include <string.h>
277#include <stdlib.h>
278
279#include <sys/resource.h>
280
281extern int errno;
282
283 /* 0: lazy,
284 * 1: medium (assertions compiled in),
285 * 2: high (guard pages at end of hash table and ciregions)
286 * 3: paranoid (guards at end of each allocated chunk, check at free)
287 */
288#ifndef PARANOIA
289#define PARANOIA 9
290#endif
291
292 /* Using assert() with multithreading will cause the code
293 * to deadlock since glibc __assert_fail will call malloc().
294 * We need our very own assert().
295 */
296typedef void assert_handler_tp(const char * error, const char *file, int line);
297
298#if PARANOIA > 0
299
300#ifdef NDEBUG
301#undef NDEBUG
302#endif
303
304static void default_assert_handler(const char *error,
305 const char *file, int line)
306{
307#ifdef HAVE_WRITEV
308 struct iovec iov[5];
309 char * i1 = "assertion failed (";
310 char * i3 = "): ";
311 char * i5 = "\n";
[279]312 int res = 0;
[171]313
314 iov[0].iov_base = i1; iov[0].iov_len = strlen(i1);
315 iov[1].iov_base = (char*) file; iov[1].iov_len = strlen(file);
316 iov[2].iov_base = i3; iov[2].iov_len = strlen(i3);
317 iov[3].iov_base = (char*) error; iov[3].iov_len = strlen(error);
[279]318 iov[4].iov_base = i5; iov[4].iov_len = strlen(i5);
319 do {
320 res = writev(STDERR_FILENO, iov, 5);
321 } while (res < 0 && errno == EINTR);
[171]322#else
323 fputs("assertion failed (", stderr);
324 fputs(file, stderr);
325 fputs("): ", stderr);
326 fputs(error, stderr);
327 fputc('\n', stderr);
328#endif
[172]329 (void) line;
[171]330 abort();
331}
332static assert_handler_tp *assert_handler = default_assert_handler;
333
[172]334
[171]335#define assert(x) \
336 do { \
337 if (UNLIKELY(!(x))) { \
338 assert_handler(#x, __FILE__, __LINE__); \
339 } \
340 } while (0)
341
342#else
343
344static assert_handler_tp *assert_handler = NULL;
345#define NDEBUG
346#define assert(x) ((void)0)
347
348#endif
349
350assert_handler_tp *dnmalloc_set_handler(assert_handler_tp *new)
351{
352 assert_handler_tp *old = assert_handler;
353 assert_handler = new;
354 return old;
355}
356
357
358#include <stdarg.h>
359
360 /* define for debugging */
361 /* #define DNMALLOC_DEBUG */
362
[172]363 /* Do some extra checks? if not, covered by assrt()s */
[171]364 /* #define DNMALLOC_CHECKS */
365
366 /*
367 The unsigned integer type used for comparing any two chunk sizes.
368 This should be at least as wide as size_t, but should not be signed.
369 */
370
371#ifndef CHUNK_SIZE_T
372#define CHUNK_SIZE_T unsigned long
373#endif
374
375/*
376 The unsigned integer type used to hold addresses when they are are
377 manipulated as integers. Except that it is not defined on all
378 systems, intptr_t would suffice.
379*/
380#ifndef PTR_UINT
381#define PTR_UINT unsigned long
382#endif
383
384
385/*
386 INTERNAL_SIZE_T is the word-size used for internal bookkeeping
387 of chunk sizes.
388
389 The default version is the same as size_t.
390
391 While not strictly necessary, it is best to define this as an
392 unsigned type, even if size_t is a signed type. This may avoid some
393 artificial size limitations on some systems.
394
395 On a 64-bit machine, you may be able to reduce malloc overhead by
396 defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
397 expense of not being able to handle more than 2^32 of malloced
398 space. If this limitation is acceptable, you are encouraged to set
399 this unless you are on a platform requiring 16byte alignments. In
400 this case the alignment requirements turn out to negate any
401 potential advantages of decreasing size_t word size.
402
403 Implementors: Beware of the possible combinations of:
404 - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
405 and might be the same width as int or as long
406 - size_t might have different width and signedness as INTERNAL_SIZE_T
407 - int and long might be 32 or 64 bits, and might be the same width
408 To deal with this, most comparisons and difference computations
409 among INTERNAL_SIZE_Ts should cast them to CHUNK_SIZE_T, being
410 aware of the fact that casting an unsigned int to a wider long does
411 not sign-extend. (This also makes checking for negative numbers
412 awkward.) Some of these casts result in harmless compiler warnings
413 on some systems.
414*/
415
416#ifndef INTERNAL_SIZE_T
417#define INTERNAL_SIZE_T size_t
418#endif
419
420/* The corresponding word size */
421#define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
422
423
424
425/*
426 MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
427 It must be a power of two at least 2 * SIZE_SZ, even on machines
428 for which smaller alignments would suffice. It may be defined as
429 larger than this though. Note however that code and data structures
430 are optimized for the case of 8-byte alignment.
431*/
432
433
434#ifndef MALLOC_ALIGNMENT
435#define MALLOC_ALIGNMENT (2 * SIZE_SZ)
436#endif
437
438/* The corresponding bit mask value */
439#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
440
441
442
443/*
444 REALLOC_ZERO_BYTES_FREES should be set if a call to
445 realloc with zero bytes should be the same as a call to free.
446 Some people think it should. Otherwise, since this malloc
447 returns a unique pointer for malloc(0), so does realloc(p, 0).
448*/
449
450#define REALLOC_ZERO_BYTES_FREES
451
452/*
453 TRIM_FASTBINS controls whether free() of a very small chunk can
454 immediately lead to trimming. Setting to true (1) can reduce memory
455 footprint, but will almost always slow down programs that use a lot
456 of small chunks.
457
458 Define this only if you are willing to give up some speed to more
459 aggressively reduce system-level memory footprint when releasing
460 memory in programs that use many small chunks. You can get
461 essentially the same effect by setting MXFAST to 0, but this can
462 lead to even greater slowdowns in programs using many small chunks.
463 TRIM_FASTBINS is an in-between compile-time option, that disables
464 only those chunks bordering topmost memory from being placed in
465 fastbins.
466*/
467
468#ifndef TRIM_FASTBINS
469#define TRIM_FASTBINS 0
470#endif
471
472
473/*
474 USE_DL_PREFIX will prefix all public routines with the string 'dl'.
475 This is necessary when you only want to use this malloc in one part
476 of a program, using your regular system malloc elsewhere.
477*/
478
479/* #define USE_DL_PREFIX */
480
481
482/*
483 USE_MALLOC_LOCK causes wrapper functions to surround each
484 callable routine with pthread mutex lock/unlock.
485
486 USE_MALLOC_LOCK forces USE_PUBLIC_MALLOC_WRAPPERS to be defined
487*/
488
489/* #define USE_MALLOC_LOCK */
490
491
492/*
493 If USE_PUBLIC_MALLOC_WRAPPERS is defined, every public routine is
494 actually a wrapper function that first calls MALLOC_PREACTION, then
495 calls the internal routine, and follows it with
496 MALLOC_POSTACTION. This is needed for locking, but you can also use
497 this, without USE_MALLOC_LOCK, for purposes of interception,
498 instrumentation, etc. It is a sad fact that using wrappers often
499 noticeably degrades performance of malloc-intensive programs.
500*/
501
502
503#ifdef USE_MALLOC_LOCK
504#define USE_PUBLIC_MALLOC_WRAPPERS
505#else
506/* #define USE_PUBLIC_MALLOC_WRAPPERS */
507#endif
508
509
510/*
511 Two-phase name translation.
512 All of the actual routines are given mangled names.
513 When wrappers are used, they become the public callable versions.
514 When DL_PREFIX is used, the callable names are prefixed.
515*/
516
517#ifndef USE_PUBLIC_MALLOC_WRAPPERS
518#define cALLOc public_cALLOc
519#define fREe public_fREe
520#define mALLOc public_mALLOc
521#define mEMALIGn public_mEMALIGn
522#define posix_mEMALIGn public_posix_mEMALIGn
523#define rEALLOc public_rEALLOc
524#define vALLOc public_vALLOc
525#define pVALLOc public_pVALLOc
526#define mALLINFo public_mALLINFo
527#define mALLOPt public_mALLOPt
528#define mTRIm public_mTRIm
529#define mSTATs public_mSTATs
530#define mUSABLe public_mUSABLe
531#endif
532
533#ifdef USE_DL_PREFIX
534#define public_cALLOc dlcalloc
535#define public_fREe dlfree
536#define public_mALLOc dlmalloc
537#define public_mEMALIGn dlmemalign
538#define public_posix_mEMALIGn dlposix_memalign
539#define public_rEALLOc dlrealloc
540#define public_vALLOc dlvalloc
541#define public_pVALLOc dlpvalloc
542#define public_mALLINFo dlmallinfo
543#define public_mALLOPt dlmallopt
544#define public_mTRIm dlmalloc_trim
545#define public_mSTATs dlmalloc_stats
546#define public_mUSABLe dlmalloc_usable_size
547#else /* USE_DL_PREFIX */
548#define public_cALLOc calloc
549#define public_fREe free
550#define public_mALLOc malloc
551#define public_mEMALIGn memalign
552#define public_posix_mEMALIGn posix_memalign
553#define public_rEALLOc realloc
554#define public_vALLOc valloc
555#define public_pVALLOc pvalloc
556#define public_mALLINFo mallinfo
557#define public_mALLOPt mallopt
558#define public_mTRIm malloc_trim
559#define public_mSTATs malloc_stats
560#define public_mUSABLe malloc_usable_size
561#endif /* USE_DL_PREFIX */
562
563
564/*
565 HAVE_MEMCPY should be defined if you are not otherwise using
566 ANSI STD C, but still have memcpy and memset in your C library
567 and want to use them in calloc and realloc. Otherwise simple
568 macro versions are defined below.
569
570 USE_MEMCPY should be defined as 1 if you actually want to
571 have memset and memcpy called. People report that the macro
572 versions are faster than libc versions on some systems.
573
574 Even if USE_MEMCPY is set to 1, loops to copy/clear small chunks
575 (of <= 36 bytes) are manually unrolled in realloc and calloc.
576*/
577
578#ifndef HAVE_MEMCPY
579#define HAVE_MEMCPY
580#endif
581
582#ifndef USE_MEMCPY
583#ifdef HAVE_MEMCPY
584#define USE_MEMCPY 1
585#else
586#define USE_MEMCPY 0
587#endif
588#endif
589
590
591#if (__STD_C || defined(HAVE_MEMCPY))
592
593#ifdef WIN32
[173]594 /* On Win32 memset and memcpy are already declared in windows.h */
[171]595#else
596#if __STD_C
[173]597 /* Defined in string.h */
[171]598#else
599Void_t* memset();
600Void_t* memcpy();
601#endif
602#endif
603#endif
604
605/*
606 MALLOC_FAILURE_ACTION is the action to take before "return 0" when
607 malloc fails to be able to return memory, either because memory is
608 exhausted or because of illegal arguments.
609
610 By default, sets errno if running on STD_C platform, else does nothing.
611*/
612
613#ifndef MALLOC_FAILURE_ACTION
614#if __STD_C
615#define MALLOC_FAILURE_ACTION \
616 errno = ENOMEM;
617
618#else
619#define MALLOC_FAILURE_ACTION
620#endif
621#endif
622
623/*
624 MORECORE-related declarations. By default, rely on sbrk
625*/
626
627
628#if !defined(HAVE_UNISTD_H)
629#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
630#if __STD_C
631extern Void_t* sbrk(ptrdiff_t);
632#else
633extern Void_t* sbrk();
634#endif
635#endif
636#endif
637
638/*
639 MORECORE_FAILURE is the value returned upon failure of MORECORE
640 as well as mmap. Since it cannot be an otherwise valid memory address,
641 and must reflect values of standard sys calls, you probably ought not
642 try to redefine it.
643*/
644
645#ifndef MORECORE_FAILURE
646#define MORECORE_FAILURE ((void*)(-1UL))
647#endif
648
649/*
650 MORECORE is the name of the routine to call to obtain more memory
651 from the system. See below for general guidance on writing
652 alternative MORECORE functions, as well as a version for WIN32 and a
653 sample version for pre-OSX macos.
654*/
655
656#ifndef MORECORE
657#define MORECORE sbrk
658#endif
659
660
661/*
662 If MORECORE_CONTIGUOUS is true, take advantage of fact that
663 consecutive calls to MORECORE with positive arguments always return
664 contiguous increasing addresses. This is true of unix sbrk. Even
665 if not defined, when regions happen to be contiguous, malloc will
666 permit allocations spanning regions obtained from different
667 calls. But defining this when applicable enables some stronger
668 consistency checks and space efficiencies.
669*/
670
671#ifndef MORECORE_CONTIGUOUS
672#define MORECORE_CONTIGUOUS 1
673#endif
674
675/*
676 Define MORECORE_CANNOT_TRIM if your version of MORECORE
677 cannot release space back to the system when given negative
678 arguments. This is generally necessary only if you are using
679 a hand-crafted MORECORE function that cannot handle negative arguments.
680*/
681
682/* #define MORECORE_CANNOT_TRIM */
683
684
685/*
686 This malloc requires mmap for heap management data. It is an error
687 if mmap is not available.
688
689 Additionally, mmap will be used to satisfy large requests.
690*/
691
692#ifndef HAVE_MMAP
[172]693# error HAVE_MMAP not defined, has your operating system mmap?
[171]694#endif
695
696/*
697 Standard unix mmap using /dev/zero clears memory so calloc doesn't
698 need to.
699*/
700
701#ifndef MMAP_CLEARS
702#define MMAP_CLEARS 1
703#endif
704
705
706/*
707 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
708 sbrk fails, and mmap is used as a backup (which is done only if
709 HAVE_MMAP). The value must be a multiple of page size. This
710 backup strategy generally applies only when systems have "holes" in
711 address space, so sbrk cannot perform contiguous expansion, but
712 there is still space available on system. On systems for which
713 this is known to be useful (i.e. most linux kernels), this occurs
714 only when programs allocate huge amounts of memory. Between this,
715 and the fact that mmap regions tend to be limited, the size should
716 be large, to avoid too many mmap calls and thus avoid running out
717 of kernel resources.
718*/
719
720#ifndef MMAP_AS_MORECORE_SIZE
721#define MMAP_AS_MORECORE_SIZE (1024 * 1024)
722#endif
723
724
725/*
726 The system page size. To the extent possible, this malloc manages
727 memory from the system in page-size units. Note that this value is
728 cached during initialization into a field of malloc_state. So even
729 if malloc_getpagesize is a function, it is only called once.
730
731 The following mechanics for getpagesize were adapted from bsd/gnu
732 getpagesize.h. If none of the system-probes here apply, a value of
733 4096 is used, which should be OK: If they don't apply, then using
734 the actual value probably doesn't impact performance.
735*/
736
737
738#ifndef malloc_getpagesize
739
740# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
741# ifndef _SC_PAGE_SIZE
742# define _SC_PAGE_SIZE _SC_PAGESIZE
743# endif
744# endif
745
746# ifdef _SC_PAGE_SIZE
747# define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
748# else
749# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
750 extern size_t getpagesize();
751# define malloc_getpagesize getpagesize()
752# else
753# ifdef WIN32 /* use supplied emulation of getpagesize */
754# define malloc_getpagesize getpagesize()
755# else
756# if defined(HAVE_SYS_PARAM_H)
757# include <sys/param.h>
758# endif
759# ifdef EXEC_PAGESIZE
760# define malloc_getpagesize EXEC_PAGESIZE
761# else
762# ifdef NBPG
763# ifndef CLSIZE
764# define malloc_getpagesize NBPG
765# else
766# define malloc_getpagesize (NBPG * CLSIZE)
767# endif
768# else
769# ifdef NBPC
770# define malloc_getpagesize NBPC
771# else
772# ifdef PAGESIZE
773# define malloc_getpagesize PAGESIZE
774# else /* just guess */
775# define malloc_getpagesize (4096)
776# endif
777# endif
778# endif
779# endif
780# endif
781# endif
782# endif
783#endif
784
785/*
786 This version of malloc supports the standard SVID/XPG mallinfo
787 routine that returns a struct containing usage properties and
788 statistics. It should work on any SVID/XPG compliant system that has
789 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
790 install such a thing yourself, cut out the preliminary declarations
791 as described above and below and save them in a malloc.h file. But
792 there's no compelling reason to bother to do this.)
793
794 The main declaration needed is the mallinfo struct that is returned
795 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
796 bunch of fields that are not even meaningful in this version of
797 malloc. These fields are are instead filled by mallinfo() with
798 other numbers that might be of interest.
799
800 HAVE_MALLOC_H should be set if you have a
801 /usr/include/malloc.h file that includes a declaration of struct
802 mallinfo. If so, it is included; else an SVID2/XPG2 compliant
803 version is declared below. These must be precisely the same for
804 mallinfo() to work. The original SVID version of this struct,
805 defined on most systems with mallinfo, declares all fields as
806 ints. But some others define as unsigned long. If your system
807 defines the fields using a type of different width than listed here,
808 you must #include your system version and #define
809 HAVE_MALLOC_H.
810*/
811
812/* #define HAVE_MALLOC_H */
813
814/* On *BSD, malloc.h is deprecated, and on some *BSD including
815 * it may actually raise an error.
816 */
817#if defined(HAVE_MALLOC_H) && !defined(__OpenBSD__) && !defined(__FreeBSD__) && !defined(__NetBSD__)
818#include <malloc.h>
819#else
820
821/* SVID2/XPG mallinfo structure */
822
823struct mallinfo {
824 int arena; /* non-mmapped space allocated from system */
825 int ordblks; /* number of free chunks */
826 int smblks; /* number of fastbin blocks */
827 int hblks; /* number of mmapped regions */
828 int hblkhd; /* space in mmapped regions */
829 int usmblks; /* maximum total allocated space */
830 int fsmblks; /* space available in freed fastbin blocks */
831 int uordblks; /* total allocated space */
832 int fordblks; /* total free space */
833 int keepcost; /* top-most, releasable (via malloc_trim) space */
834};
835
836/*
837 SVID/XPG defines four standard parameter numbers for mallopt,
838 normally defined in malloc.h. Only one of these (M_MXFAST) is used
839 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
840 so setting them has no effect. But this malloc also supports other
841 options in mallopt described below.
842*/
843#endif
844
845
846/* ---------- description of public routines ------------ */
847
848/*
849 malloc(size_t n)
850 Returns a pointer to a newly allocated chunk of at least n bytes, or null
851 if no space is available. Additionally, on failure, errno is
852 set to ENOMEM on ANSI C systems.
853
854 If n is zero, malloc returns a minumum-sized chunk. (The minimum
855 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
856 systems.) On most systems, size_t is an unsigned type, so calls
857 with negative arguments are interpreted as requests for huge amounts
858 of space, which will often fail. The maximum supported value of n
859 differs across systems, but is in all cases less than the maximum
860 representable value of a size_t.
861*/
862#if __STD_C
863Void_t* public_mALLOc(size_t) __attribute_malloc__;
864#else
865Void_t* public_mALLOc();
866#endif
867
868/*
869 free(Void_t* p)
870 Releases the chunk of memory pointed to by p, that had been previously
871 allocated using malloc or a related routine such as realloc.
872 It has no effect if p is null. It can have arbitrary (i.e., bad!)
873 effects if p has already been freed.
874
875 Unless disabled (using mallopt), freeing very large spaces will
876 when possible, automatically trigger operations that give
877 back unused memory to the system, thus reducing program footprint.
878*/
879#if __STD_C
880void public_fREe(Void_t*);
881#else
882void public_fREe();
883#endif
884
885/*
886 calloc(size_t n_elements, size_t element_size);
887 Returns a pointer to n_elements * element_size bytes, with all locations
888 set to zero.
889*/
890#if __STD_C
891Void_t* public_cALLOc(size_t, size_t) __attribute_malloc__;
892#else
893Void_t* public_cALLOc();
894#endif
895
896/*
897 realloc(Void_t* p, size_t n)
898 Returns a pointer to a chunk of size n that contains the same data
899 as does chunk p up to the minimum of (n, p's size) bytes, or null
900 if no space is available.
901
902 The returned pointer may or may not be the same as p. The algorithm
903 prefers extending p when possible, otherwise it employs the
904 equivalent of a malloc-copy-free sequence.
905
906 If p is null, realloc is equivalent to malloc.
907
908 If space is not available, realloc returns null, errno is set (if on
909 ANSI) and p is NOT freed.
910
911 if n is for fewer bytes than already held by p, the newly unused
912 space is lopped off and freed if possible. Unless the #define
913 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
914 zero (re)allocates a minimum-sized chunk.
915
916 Large chunks that were internally obtained via mmap will always
917 be reallocated using malloc-copy-free sequences unless
918 the system supports MREMAP (currently only linux).
919
920 The old unix realloc convention of allowing the last-free'd chunk
921 to be used as an argument to realloc is not supported.
922*/
923#if __STD_C
924Void_t* public_rEALLOc(Void_t*, size_t) __attribute_malloc__;
925#else
926Void_t* public_rEALLOc();
927#endif
928
929/*
930 memalign(size_t alignment, size_t n);
931 Returns a pointer to a newly allocated chunk of n bytes, aligned
932 in accord with the alignment argument.
933
934 The alignment argument should be a power of two. If the argument is
935 not a power of two, the nearest greater power is used.
936 8-byte alignment is guaranteed by normal malloc calls, so don't
937 bother calling memalign with an argument of 8 or less.
938
939 Overreliance on memalign is a sure way to fragment space.
940*/
941#if __STD_C
942Void_t* public_mEMALIGn(size_t, size_t) __attribute_malloc__;
943#else
944Void_t* public_mEMALIGn();
945#endif
946
947/*
948 posix_memalign(void** memptr, size_t alignment, size_t n);
949 Sets *memptr to the address of a newly allocated chunk of n bytes, aligned
950 in accord with the alignment argument. Returns 0 on success, otherwise
951 an error (EINVAL for incorrect alignment, ENOMEM for out of memory).
952
953 The alignment must be a power of two, and a multiple of sizeof(void *).
954*/
955#if __STD_C
956int public_posix_mEMALIGn(Void_t**, size_t, size_t);
957#else
958int public_posix_mEMALIGn();
959#endif
960
961/*
962 valloc(size_t n);
963 Equivalent to memalign(pagesize, n), where pagesize is the page
964 size of the system. If the pagesize is unknown, 4096 is used.
965*/
966#if __STD_C
967Void_t* public_vALLOc(size_t) __attribute_malloc__;
968#else
969Void_t* public_vALLOc();
970#endif
971
972
973
974/*
975 mallopt(int parameter_number, int parameter_value)
976 Sets tunable parameters The format is to provide a
977 (parameter-number, parameter-value) pair. mallopt then sets the
978 corresponding parameter to the argument value if it can (i.e., so
979 long as the value is meaningful), and returns 1 if successful else
980 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
981 normally defined in malloc.h. Only one of these (M_MXFAST) is used
982 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
983 so setting them has no effect. But this malloc also supports four
984 other options in mallopt. See below for details. Briefly, supported
985 parameters are as follows (listed defaults are for "typical"
986 configurations).
987
988 Symbol param # default allowed param values
989 M_MXFAST 1 64 0-80 (0 disables fastbins)
990 M_TRIM_THRESHOLD -1 256*1024 any (-1U disables trimming)
991 M_TOP_PAD -2 0 any
992 M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
993 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
994*/
995#if __STD_C
996int public_mALLOPt(int, int);
997#else
998int public_mALLOPt();
999#endif
1000
1001
1002/*
1003 mallinfo()
1004 Returns (by copy) a struct containing various summary statistics:
1005
1006 arena: current total non-mmapped bytes allocated from system
1007 ordblks: the number of free chunks
1008 smblks: the number of fastbin blocks (i.e., small chunks that
1009 have been freed but not use resused or consolidated)
1010 hblks: current number of mmapped regions
1011 hblkhd: total bytes held in mmapped regions
1012 usmblks: the maximum total allocated space. This will be greater
1013 than current total if trimming has occurred.
1014 fsmblks: total bytes held in fastbin blocks
1015 uordblks: current total allocated space (normal or mmapped)
1016 fordblks: total free space
1017 keepcost: the maximum number of bytes that could ideally be released
1018 back to system via malloc_trim. ("ideally" means that
1019 it ignores page restrictions etc.)
1020
1021 Because these fields are ints, but internal bookkeeping may
1022 be kept as longs, the reported values may wrap around zero and
1023 thus be inaccurate.
1024*/
1025#if __STD_C
1026struct mallinfo public_mALLINFo(void);
1027#else
1028struct mallinfo public_mALLINFo();
1029#endif
1030
1031/*
1032 pvalloc(size_t n);
1033 Equivalent to valloc(minimum-page-that-holds(n)), that is,
1034 round up n to nearest pagesize.
1035 */
1036#if __STD_C
1037Void_t* public_pVALLOc(size_t) __attribute_malloc__;
1038#else
1039Void_t* public_pVALLOc();
1040#endif
1041
1042/*
1043 malloc_trim(size_t pad);
1044
1045 If possible, gives memory back to the system (via negative
1046 arguments to sbrk) if there is unused memory at the `high' end of
1047 the malloc pool. You can call this after freeing large blocks of
1048 memory to potentially reduce the system-level memory requirements
1049 of a program. However, it cannot guarantee to reduce memory. Under
1050 some allocation patterns, some large free blocks of memory will be
1051 locked between two used chunks, so they cannot be given back to
1052 the system.
1053
1054 The `pad' argument to malloc_trim represents the amount of free
1055 trailing space to leave untrimmed. If this argument is zero,
1056 only the minimum amount of memory to maintain internal data
1057 structures will be left (one page or less). Non-zero arguments
1058 can be supplied to maintain enough trailing space to service
1059 future expected allocations without having to re-obtain memory
1060 from the system.
1061
1062 Malloc_trim returns 1 if it actually released any memory, else 0.
1063 On systems that do not support "negative sbrks", it will always
1064 rreturn 0.
1065*/
1066#if __STD_C
1067int public_mTRIm(size_t);
1068#else
1069int public_mTRIm();
1070#endif
1071
1072/*
1073 malloc_usable_size(Void_t* p);
1074
1075 Returns the number of bytes you can actually use in
1076 an allocated chunk, which may be more than you requested (although
1077 often not) due to alignment and minimum size constraints.
1078 You can use this many bytes without worrying about
1079 overwriting other allocated objects. This is not a particularly great
1080 programming practice. malloc_usable_size can be more useful in
1081 debugging and assertions, for example:
1082
1083 p = malloc(n);
1084 assert(malloc_usable_size(p) >= 256);
1085
1086*/
1087#if __STD_C
1088size_t public_mUSABLe(Void_t*);
1089#else
1090size_t public_mUSABLe();
1091#endif
1092
1093/*
1094 malloc_stats();
1095 Prints on stderr the amount of space obtained from the system (both
1096 via sbrk and mmap), the maximum amount (which may be more than
1097 current if malloc_trim and/or munmap got called), and the current
1098 number of bytes allocated via malloc (or realloc, etc) but not yet
1099 freed. Note that this is the number of bytes allocated, not the
1100 number requested. It will be larger than the number requested
1101 because of alignment and bookkeeping overhead. Because it includes
1102 alignment wastage as being in use, this figure may be greater than
1103 zero even when no user-level chunks are allocated.
1104
1105 The reported current and maximum system memory can be inaccurate if
1106 a program makes other calls to system memory allocation functions
1107 (normally sbrk) outside of malloc.
1108
1109 malloc_stats prints only the most commonly interesting statistics.
1110 More information can be obtained by calling mallinfo.
1111
1112*/
1113#if __STD_C
1114void public_mSTATs();
1115#else
1116void public_mSTATs();
1117#endif
1118
1119/* mallopt tuning options */
1120
1121/*
1122 M_MXFAST is the maximum request size used for "fastbins", special bins
1123 that hold returned chunks without consolidating their spaces. This
1124 enables future requests for chunks of the same size to be handled
1125 very quickly, but can increase fragmentation, and thus increase the
1126 overall memory footprint of a program.
1127
1128 This malloc manages fastbins very conservatively yet still
1129 efficiently, so fragmentation is rarely a problem for values less
1130 than or equal to the default. The maximum supported value of MXFAST
1131 is 80. You wouldn't want it any higher than this anyway. Fastbins
1132 are designed especially for use with many small structs, objects or
1133 strings -- the default handles structs/objects/arrays with sizes up
1134 to 16 4byte fields, or small strings representing words, tokens,
1135 etc. Using fastbins for larger objects normally worsens
1136 fragmentation without improving speed.
1137
1138 M_MXFAST is set in REQUEST size units. It is internally used in
1139 chunksize units, which adds padding and alignment. You can reduce
1140 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
1141 algorithm to be a closer approximation of fifo-best-fit in all cases,
1142 not just for larger requests, but will generally cause it to be
1143 slower.
1144*/
1145
1146
1147/* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
1148#ifndef M_MXFAST
1149#define M_MXFAST 1
1150#endif
1151
1152#ifndef DEFAULT_MXFAST
1153#define DEFAULT_MXFAST 64
1154#endif
1155
1156
1157/*
1158 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
1159 to keep before releasing via malloc_trim in free().
1160
1161 Automatic trimming is mainly useful in long-lived programs.
1162 Because trimming via sbrk can be slow on some systems, and can
1163 sometimes be wasteful (in cases where programs immediately
1164 afterward allocate more large chunks) the value should be high
1165 enough so that your overall system performance would improve by
1166 releasing this much memory.
1167
1168 The trim threshold and the mmap control parameters (see below)
1169 can be traded off with one another. Trimming and mmapping are
1170 two different ways of releasing unused memory back to the
1171 system. Between these two, it is often possible to keep
1172 system-level demands of a long-lived program down to a bare
1173 minimum. For example, in one test suite of sessions measuring
1174 the XF86 X server on Linux, using a trim threshold of 128K and a
1175 mmap threshold of 192K led to near-minimal long term resource
1176 consumption.
1177
1178 If you are using this malloc in a long-lived program, it should
1179 pay to experiment with these values. As a rough guide, you
1180 might set to a value close to the average size of a process
1181 (program) running on your system. Releasing this much memory
1182 would allow such a process to run in memory. Generally, it's
1183 worth it to tune for trimming rather tham memory mapping when a
1184 program undergoes phases where several large chunks are
1185 allocated and released in ways that can reuse each other's
1186 storage, perhaps mixed with phases where there are no such
1187 chunks at all. And in well-behaved long-lived programs,
1188 controlling release of large blocks via trimming versus mapping
1189 is usually faster.
1190
1191 However, in most programs, these parameters serve mainly as
1192 protection against the system-level effects of carrying around
1193 massive amounts of unneeded memory. Since frequent calls to
1194 sbrk, mmap, and munmap otherwise degrade performance, the default
1195 parameters are set to relatively high values that serve only as
1196 safeguards.
1197
1198 The trim value must be greater than page size to have any useful
1199 effect. To disable trimming completely, you can set to
1200 (unsigned long)(-1)
1201
1202 Trim settings interact with fastbin (MXFAST) settings: Unless
1203 TRIM_FASTBINS is defined, automatic trimming never takes place upon
1204 freeing a chunk with size less than or equal to MXFAST. Trimming is
1205 instead delayed until subsequent freeing of larger chunks. However,
1206 you can still force an attempted trim by calling malloc_trim.
1207
1208 Also, trimming is not generally possible in cases where
1209 the main arena is obtained via mmap.
1210
1211 Note that the trick some people use of mallocing a huge space and
1212 then freeing it at program startup, in an attempt to reserve system
1213 memory, doesn't have the intended effect under automatic trimming,
1214 since that memory will immediately be returned to the system.
1215*/
1216
1217#define M_TRIM_THRESHOLD -1
1218
1219#ifndef DEFAULT_TRIM_THRESHOLD
1220#define DEFAULT_TRIM_THRESHOLD (256 * 1024)
1221#endif
1222
1223/*
1224 M_TOP_PAD is the amount of extra `padding' space to allocate or
1225 retain whenever sbrk is called. It is used in two ways internally:
1226
1227 * When sbrk is called to extend the top of the arena to satisfy
1228 a new malloc request, this much padding is added to the sbrk
1229 request.
1230
1231 * When malloc_trim is called automatically from free(),
1232 it is used as the `pad' argument.
1233
1234 In both cases, the actual amount of padding is rounded
1235 so that the end of the arena is always a system page boundary.
1236
1237 The main reason for using padding is to avoid calling sbrk so
1238 often. Having even a small pad greatly reduces the likelihood
1239 that nearly every malloc request during program start-up (or
1240 after trimming) will invoke sbrk, which needlessly wastes
1241 time.
1242
1243 Automatic rounding-up to page-size units is normally sufficient
1244 to avoid measurable overhead, so the default is 0. However, in
1245 systems where sbrk is relatively slow, it can pay to increase
1246 this value, at the expense of carrying around more memory than
1247 the program needs.
1248*/
1249
1250#define M_TOP_PAD -2
1251
1252#ifndef DEFAULT_TOP_PAD
1253#define DEFAULT_TOP_PAD (0)
1254#endif
1255
1256/*
1257 M_MMAP_THRESHOLD is the request size threshold for using mmap()
1258 to service a request. Requests of at least this size that cannot
1259 be allocated using already-existing space will be serviced via mmap.
1260 (If enough normal freed space already exists it is used instead.)
1261
1262 Using mmap segregates relatively large chunks of memory so that
1263 they can be individually obtained and released from the host
1264 system. A request serviced through mmap is never reused by any
1265 other request (at least not directly; the system may just so
1266 happen to remap successive requests to the same locations).
1267
1268 Segregating space in this way has the benefits that:
1269
1270 1. Mmapped space can ALWAYS be individually released back
1271 to the system, which helps keep the system level memory
1272 demands of a long-lived program low.
1273 2. Mapped memory can never become `locked' between
1274 other chunks, as can happen with normally allocated chunks, which
1275 means that even trimming via malloc_trim would not release them.
1276 3. On some systems with "holes" in address spaces, mmap can obtain
1277 memory that sbrk cannot.
1278
1279 However, it has the disadvantages that:
1280
1281 1. The space cannot be reclaimed, consolidated, and then
1282 used to service later requests, as happens with normal chunks.
1283 2. It can lead to more wastage because of mmap page alignment
1284 requirements
1285 3. It causes malloc performance to be more dependent on host
1286 system memory management support routines which may vary in
1287 implementation quality and may impose arbitrary
1288 limitations. Generally, servicing a request via normal
1289 malloc steps is faster than going through a system's mmap.
1290
1291 The advantages of mmap nearly always outweigh disadvantages for
1292 "large" chunks, but the value of "large" varies across systems. The
1293 default is an empirically derived value that works well in most
1294 systems.
1295*/
1296
1297#define M_MMAP_THRESHOLD -3
1298
1299#ifndef DEFAULT_MMAP_THRESHOLD
1300#define DEFAULT_MMAP_THRESHOLD (256 * 1024)
1301#endif
1302
1303/*
1304 M_MMAP_MAX is the maximum number of requests to simultaneously
1305 service using mmap. This parameter exists because
1306. Some systems have a limited number of internal tables for
1307 use by mmap, and using more than a few of them may degrade
1308 performance.
1309
1310 The default is set to a value that serves only as a safeguard.
1311 Setting to 0 disables use of mmap for servicing large requests. If
1312 HAVE_MMAP is not set, the default value is 0, and attempts to set it
1313 to non-zero values in mallopt will fail.
1314*/
1315
1316#define M_MMAP_MAX -4
1317
1318#ifndef DEFAULT_MMAP_MAX
1319#define DEFAULT_MMAP_MAX (65536)
1320#endif
1321
1322#ifdef __cplusplus
1323}; /* end of extern "C" */
1324#endif
1325
1326/*
1327 ========================================================================
1328 To make a fully customizable malloc.h header file, cut everything
1329 above this line, put into file malloc.h, edit to suit, and #include it
1330 on the next line, as well as in programs that use this malloc.
1331 ========================================================================
1332*/
1333
1334/* #include "malloc.h" */
1335
1336/* --------------------- public wrappers ---------------------- */
1337
1338#ifdef USE_PUBLIC_MALLOC_WRAPPERS
1339
1340/* DL_STATIC used to make functions (deep down) consistent
1341 * with prototypes (otherwise the prototypes are static
1342 * with USE_PUBLIC_MALLOC_WRAPPERS, but the functions aren't).
1343 * The gcc compiler doesn't care, but the HP-UX compiler does.
1344 */
1345#define DL_STATIC static
1346
1347/* Declare all routines as internal */
1348#if __STD_C
1349static Void_t* mALLOc(size_t) __attribute_malloc__;
1350static void fREe(Void_t*);
1351static Void_t* rEALLOc(Void_t*, size_t) __attribute_malloc__;
1352static Void_t* mEMALIGn(size_t, size_t) __attribute_malloc__;
1353static int posix_mEMALIGn(Void_t**, size_t, size_t);
1354static Void_t* vALLOc(size_t) __attribute_malloc__;
1355static Void_t* pVALLOc(size_t) __attribute_malloc__;
1356static Void_t* cALLOc(size_t, size_t) __attribute_malloc__;
1357static int mTRIm(size_t);
1358static size_t mUSABLe(Void_t*);
1359static void mSTATs();
1360static int mALLOPt(int, int);
1361static struct mallinfo mALLINFo(void);
1362#else
1363static Void_t* mALLOc();
1364static void fREe();
1365static Void_t* rEALLOc();
1366static Void_t* mEMALIGn();
1367static int posix_mEMALIGn();
1368static Void_t* vALLOc();
1369static Void_t* pVALLOc();
1370static Void_t* cALLOc();
1371static int mTRIm();
1372static size_t mUSABLe();
1373static void mSTATs();
1374static int mALLOPt();
1375static struct mallinfo mALLINFo();
1376#endif
1377
1378/*
1379 MALLOC_PREACTION and MALLOC_POSTACTION should be
1380 defined to return 0 on success, and nonzero on failure.
1381 The return value of MALLOC_POSTACTION is currently ignored
1382 in wrapper functions since there is no reasonable default
1383 action to take on failure.
1384*/
1385
1386
1387#ifdef USE_MALLOC_LOCK
1388
1389# ifdef WIN32
1390
1391static int mALLOC_MUTEx;
1392#define MALLOC_PREACTION slwait(&mALLOC_MUTEx)
1393#define MALLOC_POSTACTION slrelease(&mALLOC_MUTEx)
1394int dnmalloc_pthread_init(void) { return 0; }
1395
1396# elif defined(__NetBSD__) || defined(__OpenBSD__) || defined(__FreeBSD__)
1397
1398# if defined(__NetBSD__)
1399#include <reentrant.h>
1400extern int __isthreaded;
1401static mutex_t thread_lock = MUTEX_INITIALIZER;
1402#define _MALLOC_LOCK() if (__isthreaded) mutex_lock(&thread_lock)
1403#define _MALLOC_UNLOCK() if (__isthreaded) mutex_unlock(&thread_lock)
1404void _malloc_prefork(void) { _MALLOC_LOCK(); }
1405void _malloc_postfork(void) { _MALLOC_UNLOCK(); }
1406# endif
1407
1408# if defined(__OpenBSD__)
1409extern int __isthreaded;
1410void _thread_malloc_lock(void);
1411void _thread_malloc_unlock(void);
1412#define _MALLOC_LOCK() if (__isthreaded) _thread_malloc_lock()
1413#define _MALLOC_UNLOCK() if (__isthreaded) _thread_malloc_unlock()
1414# endif
1415
1416# if defined(__FreeBSD__)
1417extern int __isthreaded;
1418struct _spinlock {
1419 volatile long access_lock;
1420 volatile long lock_owner;
1421 volatile char *fname;
1422 volatile int lineno;
1423};
1424typedef struct _spinlock spinlock_t;
1425#define _SPINLOCK_INITIALIZER { 0, 0, 0, 0 }
1426void _spinlock(spinlock_t *);
1427void _spinunlock(spinlock_t *);
1428/* # include "/usr/src/lib/libc/include/spinlock.h" */
1429static spinlock_t thread_lock = _SPINLOCK_INITIALIZER;
1430spinlock_t *__malloc_lock = &thread_lock;
1431#define _MALLOC_LOCK() if (__isthreaded) _spinlock(&thread_lock)
1432#define _MALLOC_UNLOCK() if (__isthreaded) _spinunlock(&thread_lock)
1433# endif
1434
1435/* Common for all three *BSD
1436 */
1437static int malloc_active = 0;
1438static int dnmalloc_mutex_lock()
1439{
1440 _MALLOC_LOCK();
1441 if (!malloc_active)
1442 {
1443 ++malloc_active;
1444 return 0;
1445 }
1446 assert(malloc_active == 0);
1447 _MALLOC_UNLOCK();
1448 errno = EDEADLK;
1449 return 1;
1450}
1451static int dnmalloc_mutex_unlock()
1452{
1453 --malloc_active;
1454 _MALLOC_UNLOCK();
1455 return 0;
1456}
1457#define MALLOC_PREACTION dnmalloc_mutex_lock()
1458#define MALLOC_POSTACTION dnmalloc_mutex_unlock()
1459int dnmalloc_pthread_init(void) { return 0; }
1460
1461# else
1462
1463/* Wrapping malloc with pthread_mutex_lock/pthread_mutex_unlock
1464 *
1465 * Works fine on linux (no malloc in pthread_mutex_lock)
1466 * Works with on HP-UX if initialized after entering main()
1467 */
1468#include <pthread.h>
1469static int malloc_active = 0;
1470void dnmalloc_fork_prepare(void);
1471void dnmalloc_fork_parent(void);
1472void dnmalloc_fork_child(void);
1473
1474#if !defined(__linux__)
1475
1476static pthread_mutex_t mALLOC_MUTEx;
1477pthread_once_t dnmalloc_once_control = PTHREAD_ONCE_INIT;
1478static int dnmalloc_use_mutex = 0;
1479static void dnmalloc_pthread_init_int(void)
1480{
1481 pthread_mutexattr_t mta;
1482 pthread_mutexattr_init(&mta);
1483 pthread_mutexattr_settype(&mta, PTHREAD_MUTEX_RECURSIVE);
1484 pthread_mutex_init(&(mALLOC_MUTEx), &mta);
1485 pthread_mutexattr_destroy(&mta);
1486 pthread_atfork(dnmalloc_fork_prepare,
1487 dnmalloc_fork_parent,
1488 dnmalloc_fork_child);
1489 dnmalloc_use_mutex = 1;
1490}
1491int dnmalloc_pthread_init(void)
1492{
1493 return pthread_once(&dnmalloc_once_control, dnmalloc_pthread_init_int);
1494}
1495
1496#else
1497
1498static pthread_mutex_t mALLOC_MUTEx = PTHREAD_MUTEX_INITIALIZER;
1499static int dnmalloc_use_mutex = 1;
1500int dnmalloc_pthread_init(void) {
1501 return pthread_atfork(dnmalloc_fork_prepare,
1502 dnmalloc_fork_parent,
1503 dnmalloc_fork_child);
1504}
1505#endif /* !defined(__linux__) */
1506
1507void dnmalloc_fork_prepare(void) {
1508 if (dnmalloc_use_mutex)
1509 pthread_mutex_lock(&mALLOC_MUTEx);
1510}
1511void dnmalloc_fork_parent(void) {
1512 if (dnmalloc_use_mutex)
1513 pthread_mutex_unlock(&mALLOC_MUTEx);
1514}
[174]1515void dnmalloc_fork_child(void) {
[180]1516 int rc = 0;
[171]1517#ifdef __GLIBC__
1518 if (dnmalloc_use_mutex)
[174]1519 {
1520 pthread_mutex_unlock (&mALLOC_MUTEx);
1521 pthread_mutex_destroy(&mALLOC_MUTEx);
1522 rc = pthread_mutex_init(&mALLOC_MUTEx, NULL);
1523 }
[171]1524#else
1525 if (dnmalloc_use_mutex)
[174]1526 rc = pthread_mutex_unlock(&mALLOC_MUTEx);
[171]1527#endif
[174]1528 if (rc != 0)
1529 {
1530 fputs("fork_child failed", stderr);
1531 _exit(EXIT_FAILURE);
1532 }
[171]1533}
1534static int dnmalloc_mutex_lock(pthread_mutex_t *mutex)
1535{
1536 if (dnmalloc_use_mutex)
1537 {
1538 int rc = pthread_mutex_lock(mutex);
1539 if (rc == 0)
1540 {
1541 if (!malloc_active)
1542 {
1543 ++malloc_active;
1544 return 0;
1545 }
1546 assert(malloc_active == 0);
1547 (void) pthread_mutex_unlock(mutex);
1548 errno = EDEADLK;
1549 return 1;
1550 }
1551 return rc;
1552 }
1553 return 0;
1554}
1555static int dnmalloc_mutex_unlock(pthread_mutex_t *mutex)
1556{
1557 if (dnmalloc_use_mutex)
1558 {
1559 --malloc_active;
1560 return pthread_mutex_unlock(mutex);
1561 }
1562 return 0;
1563}
1564# define MALLOC_PREACTION dnmalloc_mutex_lock(&mALLOC_MUTEx)
1565# define MALLOC_POSTACTION dnmalloc_mutex_unlock(&mALLOC_MUTEx)
1566
1567# endif
1568
1569#else
1570
1571/* Substitute anything you like for these */
1572
1573# define MALLOC_PREACTION (0)
1574# define MALLOC_POSTACTION (0)
1575int dnmalloc_pthread_init(void) { return 0; }
1576
1577#endif /* USE_MALLOC_LOCK */
1578
1579Void_t* public_mALLOc(size_t bytes) {
1580 Void_t* m;
1581 if (MALLOC_PREACTION == 0) {
1582 m = mALLOc(bytes);
1583 (void) MALLOC_POSTACTION;
1584 return m;
1585 }
1586 return 0;
1587}
1588
1589void public_fREe(Void_t* m) {
1590 if (MALLOC_PREACTION == 0) {
1591 fREe(m);
1592 (void) MALLOC_POSTACTION;
1593 }
1594}
1595
1596Void_t* public_rEALLOc(Void_t* m, size_t bytes) {
1597 if (MALLOC_PREACTION == 0) {
1598 m = rEALLOc(m, bytes);
1599 (void) MALLOC_POSTACTION;
1600 return m;
1601 }
1602 return 0;
1603}
1604
1605Void_t* public_mEMALIGn(size_t alignment, size_t bytes) {
1606 Void_t* m;
1607 if (MALLOC_PREACTION == 0) {
1608 m = mEMALIGn(alignment, bytes);
1609 (void) MALLOC_POSTACTION;
1610 return m;
1611 }
1612 return 0;
1613}
1614
1615int public_posix_mEMALIGn(Void_t**memptr, size_t alignment, size_t bytes) {
1616 int m, ret;
1617 if ((ret = MALLOC_PREACTION) == 0) {
1618 m = posix_mEMALIGn(memptr, alignment, bytes);
1619 (void) MALLOC_POSTACTION;
1620 return m;
1621 }
1622 return ret;
1623}
1624
1625Void_t* public_vALLOc(size_t bytes) {
1626 Void_t* m;
1627 if (MALLOC_PREACTION == 0) {
1628 m = vALLOc(bytes);
1629 (void) MALLOC_POSTACTION;
1630 return m;
1631 }
1632 return 0;
1633}
1634
1635Void_t* public_pVALLOc(size_t bytes) {
1636 Void_t* m;
1637 if (MALLOC_PREACTION == 0) {
1638 m = pVALLOc(bytes);
1639 (void) MALLOC_POSTACTION;
1640 return m;
1641 }
1642 return 0;
1643}
1644
1645Void_t* public_cALLOc(size_t n, size_t elem_size) {
1646 Void_t* m;
1647 if (MALLOC_PREACTION == 0) {
1648 m = cALLOc(n, elem_size);
1649 (void) MALLOC_POSTACTION;
1650 return m;
1651 }
1652 return 0;
1653}
1654
1655int public_mTRIm(size_t s) {
1656 int result;
1657 if (MALLOC_PREACTION == 0) {
1658 result = mTRIm(s);
1659 (void) MALLOC_POSTACTION;
1660 return result;
1661 }
1662 return 0;
1663}
1664
1665size_t public_mUSABLe(Void_t* m) {
1666 size_t result;
1667 if (MALLOC_PREACTION == 0) {
1668 result = mUSABLe(m);
1669 (void) MALLOC_POSTACTION;
1670 return result;
1671 }
1672 return 0;
1673}
1674
1675void public_mSTATs() {
1676 if (MALLOC_PREACTION == 0) {
1677 mSTATs();
1678 (void) MALLOC_POSTACTION;
1679 }
1680}
1681
1682struct mallinfo public_mALLINFo() {
1683 struct mallinfo m;
1684 if (MALLOC_PREACTION == 0) {
1685 m = mALLINFo();
1686 (void) MALLOC_POSTACTION;
1687 return m;
1688 } else {
1689 struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1690 return nm;
1691 }
1692}
1693
1694int public_mALLOPt(int p, int v) {
1695 int result;
1696 if (MALLOC_PREACTION == 0) {
1697 result = mALLOPt(p, v);
1698 (void) MALLOC_POSTACTION;
1699 return result;
1700 }
1701 return 0;
1702}
1703
1704#else
1705
1706int dnmalloc_pthread_init(void) { return 0; }
1707#define DL_STATIC
1708
1709#endif /* USE_PUBLIC_MALLOC_WRAPPERS */
1710
1711
1712
1713/* ------------- Optional versions of memcopy ---------------- */
1714
1715
1716#if USE_MEMCPY
1717
1718/*
1719 Note: memcpy is ONLY invoked with non-overlapping regions,
1720 so the (usually slower) memmove is not needed.
1721*/
1722
1723#define MALLOC_COPY(dest, src, nbytes) memcpy(dest, src, nbytes)
1724#define MALLOC_ZERO(dest, nbytes) memset(dest, 0, nbytes)
1725
1726#else /* !USE_MEMCPY */
1727
1728/* Use Duff's device for good zeroing/copying performance. */
1729
1730#define MALLOC_ZERO(charp, nbytes) \
1731do { \
1732 INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \
1733 CHUNK_SIZE_T mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1734 long mcn; \
1735 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1736 switch (mctmp) { \
1737 case 0: for(;;) { *mzp++ = 0; \
1738 case 7: *mzp++ = 0; \
1739 case 6: *mzp++ = 0; \
1740 case 5: *mzp++ = 0; \
1741 case 4: *mzp++ = 0; \
1742 case 3: *mzp++ = 0; \
1743 case 2: *mzp++ = 0; \
1744 case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \
1745 } \
1746} while(0)
1747
1748#define MALLOC_COPY(dest,src,nbytes) \
1749do { \
1750 INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \
1751 INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \
1752 CHUNK_SIZE_T mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1753 long mcn; \
1754 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1755 switch (mctmp) { \
1756 case 0: for(;;) { *mcdst++ = *mcsrc++; \
1757 case 7: *mcdst++ = *mcsrc++; \
1758 case 6: *mcdst++ = *mcsrc++; \
1759 case 5: *mcdst++ = *mcsrc++; \
1760 case 4: *mcdst++ = *mcsrc++; \
1761 case 3: *mcdst++ = *mcsrc++; \
1762 case 2: *mcdst++ = *mcsrc++; \
1763 case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \
1764 } \
1765} while(0)
1766
1767#endif
1768
1769/* ------------------ MMAP support ------------------ */
1770
1771
1772#if defined(HAVE_FCNTL_H)
1773#include <fcntl.h>
1774#endif
1775
1776#if defined(HAVE_SYS_MMAN_H)
1777#include <sys/mman.h>
1778#endif
1779
1780#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1781#define MAP_ANONYMOUS MAP_ANON
1782#endif
1783
1784/*
1785 Nearly all versions of mmap support MAP_ANONYMOUS,
1786 so the following is unlikely to be needed, but is
1787 supplied just in case.
1788*/
1789
1790#ifndef MAP_ANONYMOUS
1791
1792/* rw 19.05.2008 changed to avoid cached file descriptor, untested
1793 */
1794void * anon_mmap (void *addr, size_t length, int prot, int flags)
1795{
1796 void * retval = NULL;
1797 int dev_zero_fd = -1; /* File descriptor for /dev/zero. */
1798
1799 dev_zero_fd = open("/dev/zero", O_RDWR);
1800 if (dev_zero_fd >= 0)
1801 {
1802 retval = mmap((addr), (size), (prot), (flags), dev_zero_fd, 0);
1803 /* closing the file descriptor does not unmap the region */
1804 close(dev_zero_fd);
1805 }
1806 return retval;
1807}
1808
1809#define MMAP(addr, size, prot, flags) \
1810 (anon_mmap((addr), (size), (prot), (flags)))
1811
1812
1813#else /* have MAP_ANONYMOUS */
1814
1815#if !defined(MAP_32BIT) && defined(MAP_ADDR32)
1816#define MAP_32BIT MAP_ADDR32
1817#endif
1818
1819#if defined(MAP_32BIT)
1820#define MMAP(addr, size, prot, flags) \
1821 (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS|MAP_32BIT, -1, 0))
1822#elif defined(__sun)
1823/*
1824 * Hint an address within 32bit address space
1825 */
1826#define MMAP(addr, size, prot, flags) \
1827 (mmap((void*)0xC0000000, (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
1828#else
1829/* *BSD */
1830#define MMAP(addr, size, prot, flags) \
1831 (mmap((void*)0x80000000, (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
1832#endif
1833
1834#endif /* have MAP_ANONYMOUS */
1835
1836
1837/*
1838 ----------------------- Chunk representations -----------------------
1839*/
1840
1841typedef void * mchunkptr;
1842
1843struct chunkinfo {
1844 INTERNAL_SIZE_T prev_size; /* Size of previous in bytes */
1845 INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
1846 INTERNAL_SIZE_T req; /* Original request size, for guard. */
1847 struct chunkinfo* hash_next; /* contains a pointer to the next chunk
1848 in the linked list if the hash
1849 value is the same as the chunk */
1850 struct chunkinfo* fd; /* double links -- used only if free. */
1851 struct chunkinfo* bk;
1852 mchunkptr chunk;
1853};
1854
1855typedef struct chunkinfo* chunkinfoptr;
1856
1857struct cireginfo {
1858 unsigned long position;
1859 unsigned long *freebitmap;
1860 struct cireginfo* next;
1861 struct chunkinfo *freelist;
1862 struct chunkinfo *begin;
1863 unsigned long freecounter;
1864};
1865
1866/*
1867 ---------- Size and alignment checks and conversions ----------
1868*/
1869
1870/* conversion from malloc headers to user pointers, and back */
1871#define chunk(p) (p->chunk)
1872
1873
1874#define chunk2mem(p) (chunk(p))
1875#define mem2chunk(mem) (hashtable_lookup(mem))
1876
[187]1877/* The smallest possible chunk */
1878/* #define MIN_CHUNK_SIZE 16 */
1879#if (SIZEOF_UNSIGNED_LONG == 8) || defined(__arch64__) || defined(__ia64__) || defined(__x86_64__) || defined(__LP64__) || defined(__64BIT__) || defined(_LP64) || defined(_M_IA64) || (defined(_MIPS_SZLONG) && (_MIPS_SZLONG == 64))
1880# define MIN_CHUNK_SIZE 32
1881#else
1882# define MIN_CHUNK_SIZE 16
1883#endif
[171]1884
1885/* The smallest size we can malloc is an aligned minimal chunk */
1886
1887#define MINSIZE \
1888 (CHUNK_SIZE_T)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
1889
1890/* Check if m has acceptable alignment */
1891
1892#define aligned_OK(m) (((PTR_UINT)((m)) & (MALLOC_ALIGN_MASK)) == 0)
1893
1894#define GUARD_SIZE 4
1895
1896/*
1897 Check if a request is so large that it would wrap around zero when
1898 padded and aligned. To simplify some other code, the bound is made
1899 low enough so that adding MINSIZE will also not wrap around zero.
1900
1901 Make it 4*MINSIZE.
1902*/
1903
1904#define REQUEST_OUT_OF_RANGE(req) \
1905 ((CHUNK_SIZE_T)(req) >= \
1906 (CHUNK_SIZE_T)(INTERNAL_SIZE_T)(-4 * MINSIZE))
1907
1908/* pad request bytes into a usable size -- internal version */
1909
1910#define request2size(req) \
1911 (((req) + GUARD_SIZE + MALLOC_ALIGN_MASK >= MINSIZE) ? \
1912 ((req) + GUARD_SIZE + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK :\
1913 MINSIZE)
1914
1915/* Same, except also perform argument check */
1916
1917#define checked_request2size(req, sz) \
1918 if (!REQUEST_OUT_OF_RANGE(req)) { \
1919 (sz) = request2size(req); \
1920 assert((sz-req) >= GUARD_SIZE); \
1921 } else { \
1922 MALLOC_FAILURE_ACTION; \
1923 return 0; \
1924 }
1925
1926#if PARANOIA > 2
1927static char * guard_set_p;
1928static char * guard_set_q;
1929
1930#define guard_set(guard, P, request, sz) \
1931 assert((sz-request) >= GUARD_SIZE); \
1932 guard_set_p = (char*)(chunk(P)); \
1933 guard_set_p += request; \
[172]1934 VALGRIND_MAKE_MEM_UNDEFINED(guard_set_p,GUARD_SIZE); \
[171]1935 guard_set_q = (char*)(guard); \
1936 *guard_set_p = *guard_set_q; ++guard_set_p; ++guard_set_q; \
1937 *guard_set_p = *guard_set_q; ++guard_set_p; ++guard_set_q; \
1938 *guard_set_p = *guard_set_q; ++guard_set_p; ++guard_set_q; \
1939 *guard_set_p = *guard_set_q; \
[172]1940 VALGRIND_MAKE_MEM_NOACCESS((((char*)chunk(P))+request),GUARD_SIZE); \
[171]1941 (P)->req = request
1942
1943#define guard_check(guard, P) \
[172]1944 VALGRIND_MAKE_MEM_DEFINED((((char *)chunk(P))+(P)->req), GUARD_SIZE); \
1945 assert(0 == memcmp((((char *)chunk(P))+(P)->req),(void*)(guard),GUARD_SIZE));\
1946 VALGRIND_MAKE_MEM_NOACCESS((((char *)chunk(P))+(P)->req), GUARD_SIZE);
[171]1947
1948#else
1949#define guard_set(guard, P, request, sz) ((void)0)
1950#define guard_check(guard, P) ((void)0)
1951#endif /* PARANOIA > 2 */
1952
1953/* dnmalloc forward declarations */
1954static char * dnmalloc_arc4random(void);
1955static void dnmalloc_init (void);
1956static void malloc_mmap_state(void);
1957static void cireg_extend (void);
1958static chunkinfoptr cireg_getfree (void);
1959static void hashtable_add (chunkinfoptr ci);
1960static void hashtable_insert (chunkinfoptr ci_orig, chunkinfoptr ci_insert);
1961static void hashtable_remove (mchunkptr p);
1962static void hashtable_skiprm (chunkinfoptr ci_orig, chunkinfoptr ci_todelete);
1963static chunkinfoptr hashtable_lookup (mchunkptr p);
1964static chunkinfoptr next_chunkinfo (chunkinfoptr ci);
1965static chunkinfoptr prev_chunkinfo (chunkinfoptr ci);
1966
1967
1968
1969/*
1970 --------------- Physical chunk operations ---------------
1971*/
1972
1973
1974/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1975#define PREV_INUSE 0x1
1976
1977/* extract inuse bit of previous chunk */
1978#define prev_inuse(p) ((p)->size & PREV_INUSE)
1979
1980/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1981#define IS_MMAPPED 0x2
1982
1983/* check for mmap()'ed chunk */
1984#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
1985
1986
1987/* size field is or'ed when the chunk is in use */
1988#define INUSE 0x4
1989
1990/* extract inuse bit of chunk */
1991#define inuse(p) ((p)->size & INUSE)
1992
1993/*
1994 Bits to mask off when extracting size
1995
1996 Note: IS_MMAPPED is intentionally not masked off from size field in
1997 macros for which mmapped chunks should never be seen. This should
1998 cause helpful core dumps to occur if it is tried by accident by
1999 people extending or adapting this malloc.
2000*/
2001#define SIZE_BITS (PREV_INUSE|IS_MMAPPED|INUSE)
2002
2003/* Bits to mask off when extracting size of chunks for macros which do not use mmap */
2004#define SIZE_NOMMAP (PREV_INUSE|INUSE)
2005
2006/* Get size, ignoring use bits */
2007#define chunksize(p) ((p)->size & ~(SIZE_BITS))
2008
2009/* Ptr to chunkinfo of next physical malloc_chunk. */
2010#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & SIZE_NOMMAP) ))
2011
2012/* Treat space at ptr + offset as a chunk */
2013#define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
2014
2015/* set/clear chunk as being inuse without otherwise disturbing */
2016#define set_inuse(p) ((p)->size |= INUSE)
2017
2018#define clear_inuse(p) ((p)->size &= ~(INUSE))
2019
2020#define set_previnuse(p) ((p)->size |= PREV_INUSE)
2021
2022#define clear_previnuse(p) ((p)->size &= ~(PREV_INUSE))
2023
2024static void set_previnuse_next (chunkinfoptr p)
2025{
2026 chunkinfoptr q;
2027 q = next_chunkinfo (p);
2028 if (q)
2029 set_previnuse (q);
2030}
2031
2032#define set_all_inuse(p) \
2033set_inuse(p); \
2034set_previnuse_next(p);
2035
2036
2037/* Set size at head, without disturbing its use bit */
2038#define set_head_size(p, s) ((p)->size = (((p)->size & SIZE_NOMMAP) | (s)))
2039
2040/* Set size/use field */
2041#define set_head(p, s) ((p)->size = (s))
2042
2043/*
2044 Bins
2045
2046 An array of bin headers for free chunks. Each bin is doubly
2047 linked. The bins are approximately proportionally (log) spaced.
2048 There are a lot of these bins (128). This may look excessive, but
2049 works very well in practice. Most bins hold sizes that are
2050 unusual as malloc request sizes, but are more usual for fragments
2051 and consolidated sets of chunks, which is what these bins hold, so
2052 they can be found quickly. All procedures maintain the invariant
2053 that no consolidated chunk physically borders another one, so each
2054 chunk in a list is known to be preceeded and followed by either
2055 inuse chunks or the ends of memory.
2056
2057 Chunks in bins are kept in size order, with ties going to the
2058 approximately least recently used chunk. Ordering isn't needed
2059 for the small bins, which all contain the same-sized chunks, but
2060 facilitates best-fit allocation for larger chunks. These lists
2061 are just sequential. Keeping them in order almost never requires
2062 enough traversal to warrant using fancier ordered data
2063 structures.
2064
2065 Chunks of the same size are linked with the most
2066 recently freed at the front, and allocations are taken from the
2067 back. This results in LRU (FIFO) allocation order, which tends
2068 to give each chunk an equal opportunity to be consolidated with
2069 adjacent freed chunks, resulting in larger free chunks and less
2070 fragmentation.
2071
2072 To simplify use in double-linked lists, each bin header acts
2073 as a malloc_chunk. This avoids special-casing for headers.
2074 But to conserve space and improve locality, we allocate
2075 only the fd/bk pointers of bins, and then use repositioning tricks
2076 to treat these as the fields of a malloc_chunk*.
2077*/
2078
2079typedef struct chunkinfo* mbinptr;
2080
2081/* addressing -- note that bin_at(0) does not exist */
2082#define bin_at(m, i) (&(m)->bins[i])
2083
2084/* analog of ++bin */
2085#define next_bin(b) (b+1)
2086
2087/* Reminders about list directionality within bins */
2088#define first(b) ((b)->fd)
2089#define last(b) ((b)->bk)
2090
2091/* Take a chunk off a bin list */
2092#define unlink(P, BK, FD) { \
2093 FD = P->fd; \
2094 BK = P->bk; \
2095 FD->bk = BK; \
2096 BK->fd = FD; \
2097}
2098
2099/*
2100 Indexing
2101
2102 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
2103 8 bytes apart. Larger bins are approximately logarithmically spaced:
2104
2105 64 bins of size 8
2106 32 bins of size 64
2107 16 bins of size 512
2108 8 bins of size 4096
2109 4 bins of size 32768
2110 2 bins of size 262144
2111 1 bin of size what's left
2112
2113 The bins top out around 1MB because we expect to service large
2114 requests via mmap.
2115*/
2116
2117#define NBINS 96
2118#define NSMALLBINS 32
2119#define SMALLBIN_WIDTH 8
2120#define MIN_LARGE_SIZE 256
2121
2122#define in_smallbin_range(sz) \
2123 ((CHUNK_SIZE_T)(sz) < (CHUNK_SIZE_T)MIN_LARGE_SIZE)
2124
2125#define smallbin_index(sz) (((unsigned)(sz)) >> 3)
2126
2127/*
2128 Compute index for size. We expect this to be inlined when
2129 compiled with optimization, else not, which works out well.
2130*/
2131static int largebin_index(size_t sz) {
2132
2133 unsigned long xx = sz >> SMALLBIN_WIDTH;
2134
2135 if (xx < 0x10000)
2136 {
2137 unsigned int m; /* bit position of highest set bit of m */
2138
2139 /* On intel, use BSRL instruction to find highest bit */
2140#if defined(__GNUC__) && defined(i386) && !defined(USE_UNO)
2141
2142 unsigned int x = (unsigned int) xx;
2143
2144 __asm__("bsrl %1,%0\n\t"
2145 : "=r" (m)
2146 : "rm" (x));
2147
2148#elif defined(__GNUC__) && defined(x86_64) && !defined(USE_UNO)
2149
2150 __asm__("bsrq %1,%0\n\t"
2151 : "=r" (m)
2152 : "rm" (xx));
2153
2154#else
2155
2156 /* Taken from Bit Twiddling Hacks
2157 * http://graphics.stanford.edu/~seander/bithacks.html
2158 * public domain
2159 */
2160 unsigned int v = (unsigned int) xx;
2161 register unsigned int shift;
2162
2163 m = (v > 0xFFFF) << 4; v >>= m;
2164 shift = (v > 0xFF ) << 3; v >>= shift; m |= shift;
2165 shift = (v > 0xF ) << 2; v >>= shift; m |= shift;
2166 shift = (v > 0x3 ) << 1; v >>= shift; m |= shift;
2167 m |= (v >> 1);
2168
2169#endif
2170
2171 /* Use next 2 bits to create finer-granularity bins */
2172 return NSMALLBINS + (m << 2) + ((sz >> (m + 6)) & 3);
2173 }
2174 else
2175 {
2176 return NBINS-1;
2177 }
2178}
2179
2180#define bin_index(sz) \
2181 ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
2182
2183/*
2184 FIRST_SORTED_BIN_SIZE is the chunk size corresponding to the
2185 first bin that is maintained in sorted order. This must
2186 be the smallest size corresponding to a given bin.
2187
2188 Normally, this should be MIN_LARGE_SIZE. But you can weaken
2189 best fit guarantees to sometimes speed up malloc by increasing value.
2190 Doing this means that malloc may choose a chunk that is
2191 non-best-fitting by up to the width of the bin.
2192
2193 Some useful cutoff values:
2194 512 - all bins sorted
2195 2560 - leaves bins <= 64 bytes wide unsorted
2196 12288 - leaves bins <= 512 bytes wide unsorted
2197 65536 - leaves bins <= 4096 bytes wide unsorted
2198 262144 - leaves bins <= 32768 bytes wide unsorted
2199 -1 - no bins sorted (not recommended!)
2200*/
2201
2202/* #define FIRST_SORTED_BIN_SIZE 65536 */
2203
2204#define FIRST_SORTED_BIN_SIZE MIN_LARGE_SIZE
2205
[172]2206
[171]2207/*
2208 Unsorted chunks
2209
2210 All remainders from chunk splits, as well as all returned chunks,
2211 are first placed in the "unsorted" bin. They are then placed
2212 in regular bins after malloc gives them ONE chance to be used before
2213 binning. So, basically, the unsorted_chunks list acts as a queue,
2214 with chunks being placed on it in free (and malloc_consolidate),
2215 and taken off (to be either used or placed in bins) in malloc.
2216*/
2217
2218/* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
2219#define unsorted_chunks(M) (bin_at(M, 1))
2220
2221/*
2222 Top
2223
2224 The top-most available chunk (i.e., the one bordering the end of
2225 available memory) is treated specially. It is never included in
2226 any bin, is used only if no other chunk is available, and is
2227 released back to the system if it is very large (see
2228 M_TRIM_THRESHOLD). Because top initially
2229 points to its own bin with initial zero size, thus forcing
2230 extension on the first malloc request, we avoid having any special
2231 code in malloc to check whether it even exists yet. But we still
2232 need to do so when getting memory from system, so we make
2233 initial_top treat the bin as a legal but unusable chunk during the
2234 interval between initialization and the first call to
2235 sYSMALLOc. (This is somewhat delicate, since it relies on
2236 the 2 preceding words to be zero during this interval as well.)
2237*/
2238
2239/* Conveniently, the unsorted bin can be used as dummy top on first call */
2240#define initial_top(M) (unsorted_chunks(M))
2241
2242/*
2243 Binmap
2244
2245 To help compensate for the large number of bins, a one-level index
2246 structure is used for bin-by-bin searching. `binmap' is a
2247 bitvector recording whether bins are definitely empty so they can
2248 be skipped over during during traversals. The bits are NOT always
2249 cleared as soon as bins are empty, but instead only
2250 when they are noticed to be empty during traversal in malloc.
2251*/
2252
2253/* Conservatively use 32 bits per map word, even if on 64bit system */
2254#define BINMAPSHIFT 5
2255#define BITSPERMAP (1U << BINMAPSHIFT)
2256#define BINMAPSIZE (NBINS / BITSPERMAP)
2257
2258#define idx2block(i) ((i) >> BINMAPSHIFT)
2259#define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
2260
2261#define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i))
2262#define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
2263#define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i))
2264
2265/*
2266 Fastbins
2267
2268 An array of lists holding recently freed small chunks. Fastbins
2269 are not doubly linked. It is faster to single-link them, and
2270 since chunks are never removed from the middles of these lists,
2271 double linking is not necessary. Also, unlike regular bins, they
2272 are not even processed in FIFO order (they use faster LIFO) since
2273 ordering doesn't much matter in the transient contexts in which
2274 fastbins are normally used.
2275
2276 Chunks in fastbins keep their inuse bit set, so they cannot
2277 be consolidated with other free chunks. malloc_consolidate
2278 releases all chunks in fastbins and consolidates them with
2279 other free chunks.
2280*/
2281
2282typedef struct chunkinfo* mfastbinptr;
2283
2284/* offset 2 to use otherwise unindexable first 2 bins */
2285#define fastbin_index(sz) ((((unsigned int)(sz)) >> 3) - 2)
2286
2287/* The maximum fastbin request size we support */
2288#define MAX_FAST_SIZE 80
2289
2290#define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1)
2291
2292/*
2293 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
2294 that triggers automatic consolidation of possibly-surrounding
2295 fastbin chunks. This is a heuristic, so the exact value should not
2296 matter too much. It is defined at half the default trim threshold as a
2297 compromise heuristic to only attempt consolidation if it is likely
2298 to lead to trimming. However, it is not dynamically tunable, since
2299 consolidation reduces fragmentation surrounding loarge chunks even
2300 if trimming is not used.
2301*/
2302
2303#define FASTBIN_CONSOLIDATION_THRESHOLD \
2304 ((unsigned long)(DEFAULT_TRIM_THRESHOLD) >> 1)
2305
2306/*
2307 Since the lowest 2 bits in max_fast don't matter in size comparisons,
2308 they are used as flags.
2309*/
2310
2311/*
2312 ANYCHUNKS_BIT held in max_fast indicates that there may be any
2313 freed chunks at all. It is set true when entering a chunk into any
2314 bin.
2315*/
2316
2317#define ANYCHUNKS_BIT (1U)
2318
2319#define have_anychunks(M) (((M)->max_fast & ANYCHUNKS_BIT))
2320#define set_anychunks(M) ((M)->max_fast |= ANYCHUNKS_BIT)
2321#define clear_anychunks(M) ((M)->max_fast &= ~ANYCHUNKS_BIT)
2322
2323/*
2324 FASTCHUNKS_BIT held in max_fast indicates that there are probably
2325 some fastbin chunks. It is set true on entering a chunk into any
2326 fastbin, and cleared only in malloc_consolidate.
2327*/
2328
2329#define FASTCHUNKS_BIT (2U)
2330
2331#define have_fastchunks(M) (((M)->max_fast & FASTCHUNKS_BIT))
2332#define set_fastchunks(M) ((M)->max_fast |= (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
2333#define clear_fastchunks(M) ((M)->max_fast &= ~(FASTCHUNKS_BIT))
2334
2335/*
2336 Set value of max_fast.
2337 Use impossibly small value if 0.
2338*/
2339
2340#define set_max_fast(M, s) \
2341 (M)->max_fast = (((s) == 0)? SMALLBIN_WIDTH: request2size(s)) | \
2342 ((M)->max_fast & (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
2343
2344#define get_max_fast(M) \
2345 ((M)->max_fast & ~(FASTCHUNKS_BIT | ANYCHUNKS_BIT))
2346
2347
2348/*
2349 morecore_properties is a status word holding dynamically discovered
2350 or controlled properties of the morecore function
2351*/
2352
2353#define MORECORE_CONTIGUOUS_BIT (1U)
2354
2355#define contiguous(M) \
2356 (((M)->morecore_properties & MORECORE_CONTIGUOUS_BIT))
2357#define noncontiguous(M) \
2358 (((M)->morecore_properties & MORECORE_CONTIGUOUS_BIT) == 0)
2359#define set_contiguous(M) \
2360 ((M)->morecore_properties |= MORECORE_CONTIGUOUS_BIT)
2361#define set_noncontiguous(M) \
2362 ((M)->morecore_properties &= ~MORECORE_CONTIGUOUS_BIT)
2363
2364#define MORECORE_32BIT_BIT (2U)
2365
2366#define morecore32bit(M) \
2367 (((M)->morecore_properties & MORECORE_32BIT_BIT))
2368#define nonmorecore32bit(M) \
2369 (((M)->morecore_properties & MORECORE_32BIT_BIT) == 0)
2370#define set_morecore32bit(M) \
2371 ((M)->morecore_properties |= MORECORE_32BIT_BIT)
2372#define set_nonmorecore32bit(M) \
2373 ((M)->morecore_properties &= ~MORECORE_32BIT_BIT)
2374
2375
2376
2377/* ----------------- dnmalloc -------------------- */
2378
2379/* size of pages */
2380#define PGSIZE malloc_getpagesize
2381/* pointer size */
2382#define PTRSIZE sizeof(long)
2383
2384
2385
2386/* TODO: mmapped chunks are always multiples of pagesize -> we're wasting
2387 address space: the hashtable has granularity of 16*8, set it to something
2388 closer to pagesize for mmapped chunks (current waste: 32 positions/mmapped
2389 page)
2390*/
2391
2392/* The maximum heap size that dnmalloc can operate with
2393 * represented in hex to avoid annoying gcc warning
2394 *
2395 * Avoid integer overflow, cover complete 32bit address
2396 * space for portability. With deferred allocation, the
2397 * hashtable size is a non-issue.
2398 */
2399#define HEAPMAXSIZE_HALF 0x80000000UL
2400
2401/* How many elements are stored in the linked list */
2402#define LINKEDLSTELS 8
2403
2404/* Minimum size of a chunk */
2405
[172]2406#if (SIZEOF_UNSIGNED_LONG == 8) || defined(__arch64__) || defined(__ia64__) || defined(__x86_64__) || defined(__LP64__) || defined(__64BIT__) || defined(_LP64) || defined(_M_IA64) || (defined(_MIPS_SZLONG) && (_MIPS_SZLONG == 64))
2407# define MINCHUNKSIZE 32
2408#else
2409# define MINCHUNKSIZE 16
2410#endif
2411
2412
[171]2413/* The amount of hashtable entries for each page:
2414Pagesize divded by the numer of elements in the linkedlists
2415divided by the minimum chunk size
2416*/
2417#define CHUNKINFOPAGE (PGSIZE / LINKEDLSTELS / MINCHUNKSIZE)
2418
2419/* The amount of hashtable entries needed to manage the memory:
2420Maximum heap size divided by page size multiplied by the amount
2421of chunk info's per page
2422*/
2423#define AMOUNTHASH ((HEAPMAXSIZE_HALF / PGSIZE) * CHUNKINFOPAGE * 2)
2424
2425/* Initial size of the map for the hashtable
2426Amount of entries muliplied by pointer size
2427*/
2428#define HASHTABLESIZE (AMOUNTHASH * PTRSIZE)
2429
2430/* Amount of free chunks that the system should allocate at the start */
2431#define NUMBER_FREE_CHUNKS 32768
2432
2433/* Initial size of the chunk info region,
2434also used when growing the region */
2435#define CIREGSIZE (NUMBER_FREE_CHUNKS * sizeof(struct chunkinfo))
2436
2437/* Start address of the heap */
2438char *startheap;
2439
2440/* pointer to the hashtable: struct chunkinfo **hashtable -> *hashtable[] */
2441chunkinfoptr *hashtable;
2442
2443/* Current chunkinfo region */
2444struct cireginfo *currciinfo = 0;
2445struct cireginfo *firstciinfo = 0;
2446
2447unsigned long totalcictr = 0;
2448
2449
2450/* Initialize the area for chunkinfos and the hashtable and protect
2451 * it with non-writable pages
2452 */
2453static void
2454dnmalloc_init ()
2455{
2456 void *hashtb;
2457 int mprot;
2458 int flags = MAP_PRIVATE;
2459
2460 /* Allocate the malloc_state struct */
2461 malloc_mmap_state();
2462
2463 /* Use MAP_NORESERVE if available (Solaris, HP-UX; most other
2464 * systems use defered allocation anyway.
2465 */
2466#ifdef MAP_NORESERVE
2467 flags |= MAP_NORESERVE;
2468#endif
2469
2470 /* Always start at 0, hashtable covers whole 32bit address space
2471 */
2472#define STARTHEAP_IS_ZERO
2473 startheap = 0;
2474
2475 /* Map space for the hashtable */
2476#if PARANOIA > 1
2477 hashtb = MMAP(0, HASHTABLESIZE+(2*PGSIZE), PROT_READ|PROT_WRITE, flags);
2478#else
2479 hashtb = MMAP(0, HASHTABLESIZE+PGSIZE, PROT_READ|PROT_WRITE, flags);
2480#endif
2481
2482#ifdef NDEBUG
2483 if (hashtb == MAP_FAILED) {
2484 fprintf (stderr, "Couldn't mmap hashtable: %s\n", strerror (errno));
2485 abort ();
2486 }
2487#else
2488 assert(hashtb != MAP_FAILED);
2489#endif
2490
2491 /* Protect the hashtable with non-writable pages */
2492 mprot = mprotect(hashtb, (size_t) PGSIZE, PROT_NONE);
2493#ifdef NDEBUG
2494 if (mprot == -1) {
2495 fprintf (stderr, "Couldn't mprotect first non-rw page for hashtable: %s\n",
2496 strerror (errno));
2497 abort ();
2498 }
2499#else
2500 assert(mprot != -1);
2501#endif
2502
2503 /* HP-UX: Cannot do arithmetic with pointers to objects of unknown size. */
2504 hashtable = (chunkinfoptr *) (((char*)hashtb) + PGSIZE);
2505
2506 /* Protect the hashtable with non-writable pages */
2507#if PARANOIA > 1
2508 mprot = mprotect((void*)((char*)hashtb+HASHTABLESIZE+PGSIZE), (size_t) PGSIZE, PROT_NONE);
2509#ifdef NDEBUG
2510 if (mprot == -1) {
2511 fprintf (stderr, "Couldn't mprotect last non-rw page for hashtable: %s\n",
2512 strerror (errno));
2513 abort ();
2514 }
2515#else
2516 assert(mprot != -1);
2517#endif
2518#endif
2519}
2520
2521
2522
2523/* Extend the region for chunk infos by mapping more memory before the region */
2524static void
2525cireg_extend ()
2526{
2527 void *newcireg;
2528 int mprot;
2529 struct cireginfo *tempciinfo = 0;
2530
2531#if PARANOIA > 1
2532 newcireg = MMAP(0, CIREGSIZE+(2*PGSIZE), PROT_READ|PROT_WRITE, MAP_PRIVATE);
2533#else
2534 newcireg = MMAP(0, CIREGSIZE+PGSIZE, PROT_READ|PROT_WRITE, MAP_PRIVATE);
2535#endif
2536
2537#ifdef NDEBUG
2538 if (newcireg == MAP_FAILED)
2539 {
2540 fprintf (stderr, "Couldn't extend chunkinfo region: %s\n",
2541 strerror (errno));
2542 abort ();
2543 }
2544#else
2545 assert(newcireg != MAP_FAILED);
2546#endif
2547 mprot = mprotect(newcireg, PGSIZE, PROT_NONE);
2548#ifdef NDEBUG
2549 if (mprot == -1) {
2550 fprintf (stderr, "Couldn't mprotect first non-rw page for extended region: %s\n",
2551 strerror (errno));
2552 abort ();
2553 }
2554#else
2555 assert(mprot != -1);
2556#endif
2557 newcireg = ((char*)newcireg)+PGSIZE;
2558
2559#if PARANOIA > 1
2560 mprot = mprotect((void*)((char*)newcireg+CIREGSIZE), (size_t) PGSIZE, PROT_NONE);
2561#ifdef NDEBUG
2562 if (mprot == -1) {
2563 fprintf (stderr, "Couldn't mprotect last non-rw page for extended region: %s\n",
2564 strerror (errno));
2565 abort ();
2566 }
2567#else
2568 assert(mprot != -1);
2569#endif
2570#endif
2571
2572 tempciinfo = currciinfo;
2573 currciinfo = (struct cireginfo *) newcireg;
2574 if (tempciinfo)
2575 tempciinfo->next = currciinfo;
2576 currciinfo->position = 1;
2577 currciinfo->freecounter = NUMBER_FREE_CHUNKS;
2578 if (!firstciinfo)
2579 firstciinfo = currciinfo;
2580 totalcictr++;
[172]2581 VALGRIND_CREATE_MEMPOOL(newcireg, 0, 0);
[171]2582}
2583
2584
2585/* Get a free chunkinfo */
2586static chunkinfoptr
2587cireg_getfree ()
2588{
2589 chunkinfoptr freeci;
2590 chunkinfoptr freelst = 0;
2591 struct cireginfo *newciinfo = firstciinfo;
2592
2593 if (newciinfo) {
2594 freelst = newciinfo->freelist;
2595
2596 if (!freelst && newciinfo->next) {
2597 do {
2598 newciinfo = newciinfo->next;
2599 freelst = newciinfo->freelist;
2600 } while (!freelst && newciinfo->next);
2601 }
2602 }
2603
2604 /* Check if there are any free chunkinfos on the list of free chunkinfos */
2605 if (freelst)
2606 {
[172]2607 freeci = freelst;
2608 newciinfo->freecounter--;
2609 newciinfo->freelist = freelst->fd;
2610
2611 VALGRIND_MEMPOOL_ALLOC((char*)currciinfo, (char*)freeci,
2612 sizeof(struct chunkinfo));
2613
2614 freeci->prev_size = 0;
2615 freeci->size = 0;
2616 freeci->req = 0;
2617 freeci->hash_next = NULL;
2618 freeci->fd = NULL;
2619 freeci->bk = NULL;
2620 freeci->chunk = NULL;
2621 return (freeci);
[171]2622 }
2623 else
2624 {
2625 /* No free chunkinfos, check if chunkinfo region still has place
2626 * for a chunkinfo. If not, extend the region.
2627 */
2628 if (UNLIKELY(!currciinfo || currciinfo->position == NUMBER_FREE_CHUNKS))
2629 cireg_extend ();
2630 /* Get a chunkinfo from the chunkinfo region */
2631 freeci = (chunkinfoptr) currciinfo + currciinfo->position;
2632 currciinfo->freecounter--;
2633 currciinfo->position++;
[172]2634
2635 VALGRIND_MEMPOOL_ALLOC((char*)currciinfo, (char*)freeci,
2636 sizeof(struct chunkinfo));
2637
[171]2638 return (freeci);
2639 }
2640}
2641
2642static void freeciregion(struct cireginfo *freeme) {
2643 /* free the chunkinfo region */
2644 struct cireginfo *newciinfo = firstciinfo;
2645 struct cireginfo *prevciinfo = firstciinfo;
2646 void *unmapme;
[172]2647
[171]2648 while (newciinfo && newciinfo != freeme) {
2649 prevciinfo = newciinfo;
2650 newciinfo = newciinfo->next;
2651 }
2652 assert(freeme == newciinfo); /* rw */
2653 assert(newciinfo != NULL); /* rw */
2654 if (newciinfo)
2655 prevciinfo->next = newciinfo->next;
2656 unmapme = (void *) ((char*)freeme - PGSIZE);
[172]2657 VALGRIND_DESTROY_MEMPOOL((char*)freeme);
[171]2658#if PARANOIA > 1
2659 munmap(unmapme, CIREGSIZE+(2*PGSIZE));
2660#else
2661 munmap(unmapme, CIREGSIZE+PGSIZE);
2662#endif
2663}
2664
2665
2666static void freecilst_add(chunkinfoptr p) {
2667
2668 struct cireginfo *newciinfo;
[172]2669
[171]2670 newciinfo = currciinfo;
2671 if (((chunkinfoptr) newciinfo < p) && (p < (chunkinfoptr) (newciinfo+NUMBER_FREE_CHUNKS))) {
2672 p->fd = newciinfo->freelist;
2673 newciinfo->freelist = p;
2674 newciinfo->freecounter++;
[172]2675 VALGRIND_MEMPOOL_FREE((char*)newciinfo, (char*)p);
2676 VALGRIND_MAKE_MEM_DEFINED(p,sizeof(struct chunkinfo));
2677 VALGRIND_MAKE_MEM_NOACCESS(p->size, sizeof(INTERNAL_SIZE_T));
2678 VALGRIND_MAKE_MEM_NOACCESS(p->req, sizeof(INTERNAL_SIZE_T));
2679 VALGRIND_MAKE_MEM_NOACCESS(p->bk, sizeof(struct chunkinfo*));
2680 VALGRIND_MAKE_MEM_NOACCESS(p->chunk, sizeof(mchunkptr));
[171]2681 } else {
2682 newciinfo = firstciinfo;
2683 if (newciinfo) {
2684 do {
2685 if (((chunkinfoptr) newciinfo < p) && (p < (chunkinfoptr) (newciinfo+NUMBER_FREE_CHUNKS))) {
2686 p->fd = newciinfo->freelist;
2687 newciinfo->freelist = p;
2688 newciinfo->freecounter++;
[172]2689 VALGRIND_MEMPOOL_FREE((char*)newciinfo, (char*)p);
2690 VALGRIND_MAKE_MEM_DEFINED(p,sizeof(struct chunkinfo));
2691 VALGRIND_MAKE_MEM_NOACCESS(p->size, sizeof(INTERNAL_SIZE_T));
2692 VALGRIND_MAKE_MEM_NOACCESS(p->req, sizeof(INTERNAL_SIZE_T));
2693 VALGRIND_MAKE_MEM_NOACCESS(p->bk, sizeof(struct chunkinfo*));
2694 VALGRIND_MAKE_MEM_NOACCESS(p->chunk, sizeof(mchunkptr));
[171]2695 if (UNLIKELY(newciinfo->freecounter == NUMBER_FREE_CHUNKS))
2696 freeciregion(newciinfo);
2697 break;
2698 }
2699 newciinfo = newciinfo->next;
2700 } while (newciinfo);
2701 }
2702 }
2703}
2704
2705/* Calculate the hash table entry for a chunk */
2706#ifdef STARTHEAP_IS_ZERO
2707#define hash(p) (((unsigned long) p) >> 7)
2708#else
2709#define hash(p) (((unsigned long) p - (unsigned long) startheap) >> 7)
2710#endif
2711
2712static void
2713hashtable_add (chunkinfoptr ci)
2714{
2715 chunkinfoptr temp, next;
2716 unsigned long hashval;
2717 mchunkptr cic = chunk (ci);
2718
2719 hashval = hash (cic);
2720
2721 if (hashval < AMOUNTHASH) {
2722
2723 temp = hashtable[hashval];
2724
2725#ifdef DNMALLOC_DEBUG
2726 fprintf(stderr, "hashtable_add: %p, %lu\n", chunk(ci), hashval);
2727#endif
2728
2729 /* If no pointer to a chunk info list is stored at this location
2730 * in the hashtable or if the chunk's address is smaller than the
2731 * one present, add the chunk to the front of the linked list
2732 */
2733 if (temp == 0 || chunk (temp) > cic)
2734 {
2735 ci->hash_next = temp;
2736 hashtable[hashval] = ci;
2737 if (!temp) /* more likely case */
2738 goto out;
2739 temp->prev_size = chunksize(ci);
2740 return;
2741 }
2742 else
2743 {
2744 /* We must place the chunk in the linked list for this hashentry
2745 * Loop to end of list or to a position where temp's chunk's address
2746 * is larger than the new chunkinfo's chunk's address
2747 */
2748 if (!temp->hash_next || (chunk (temp->hash_next) > cic))
2749 {
2750 ci->hash_next = temp->hash_next;
2751 temp->hash_next = ci;
2752 }
2753 else
2754 {
2755 while ((temp->hash_next != 0) && (chunk (temp->hash_next) < cic))
2756 {
2757 temp = temp->hash_next;
2758 }
2759 /* Place in linked list if not already there */
2760 if (!temp->hash_next || !(chunk (temp->hash_next) == cic))
2761 {
2762 ci->hash_next = temp->hash_next;
2763 temp->hash_next = ci;
2764 }
2765 }
2766 }
2767 }
2768 else {
2769#ifdef DNMALLOC_CHECKS
2770 if (hashval >= AMOUNTHASH) {
2771 fprintf(stderr, "Dnmalloc error: trying to write outside of the bounds of the hashtable, this is definitely a bug, please email dnmalloc@fort-knox.org (hashval: %lu, AMOUNTHASH: %lu, HEAPMAXSIZE_HALF %lu PGSIZE %ld CHUNKINFOPAGE %ld chunk: %p, chunkinfo: %p, startheap: %p).\n", hashval, AMOUNTHASH, HEAPMAXSIZE_HALF, PGSIZE, CHUNKINFOPAGE, chunk(ci), ci, startheap);
2772 abort();
2773 }
2774#else
2775 assert(hashval < AMOUNTHASH);
2776#endif
2777 }
2778
2779 out:
2780 next = next_chunkinfo(ci);
2781 if (!next)
2782 return;
2783 next->prev_size = chunksize(ci);
2784}
2785
2786static void
2787hashtable_insert (chunkinfoptr ci_orig, chunkinfoptr ci_insert)
2788{
2789 chunkinfoptr next;
2790
2791#ifdef DNMALLOC_DEBUG
2792 fprintf(stderr, "hashtable_ins: %p, %lu\n", chunk(ci_insert),
2793 (unsigned long)hash(chunk(ci_insert));
2794#endif
2795
2796 if (hash(chunk(ci_orig)) != hash(chunk(ci_insert))) {
2797 hashtable_add(ci_insert);
2798 }
2799 else {
[172]2800
[171]2801 ci_insert->hash_next = ci_orig->hash_next;
2802 ci_orig->hash_next = ci_insert;
2803
2804 /* added for prevsize */
2805 if (!(ci_insert->hash_next))
2806 next = next_chunkinfo(ci_insert);
2807 else
2808 next = ci_insert->hash_next;
2809
2810 if (!next)
2811 {
2812 ci_insert->prev_size = chunksize(ci_orig);
2813 }
2814 else
2815 {
2816 next->prev_size = chunksize(ci_insert);
2817 ci_insert->prev_size = chunksize(ci_orig);
2818 }
2819 }
2820}
2821
2822static void
2823hashtable_remove (mchunkptr p)
2824{
2825 chunkinfoptr prevtemp, temp;
2826 unsigned long hashval;
2827
2828 hashval = hash (p);
2829#ifdef DNMALLOC_DEBUG
2830 fprintf(stderr, "hashtable_rem: %p, %lu\n", p, hashval);
2831#endif
2832 assert(hashval < AMOUNTHASH); /* rw */
2833 prevtemp = temp = hashtable[hashval];
2834 if (chunk (temp) == p) {
2835 hashtable[hashval] = temp->hash_next;
2836 }
2837 else
2838 {
2839 if (temp && chunk (temp) != p) {
2840 do
2841 {
2842 prevtemp = temp;
2843 temp = temp->hash_next;
2844 } while (temp && chunk (temp) != p);
2845 }
2846#ifdef DNMALLOC_CHECKS
2847 if (!temp) {
2848 fprintf (stderr,
2849 "Dnmalloc error (hash_rm): could not find a chunkinfo for the chunk %p in the hashtable at entry %lu\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n",
2850 p, hashval);
2851 abort();
2852 }
2853#else
2854 assert(temp != NULL);
2855#endif
2856 prevtemp->hash_next = temp->hash_next;
2857 }
2858}
2859
[172]2860/* mmapped chunks are multiples of pagesize, no hash_nexts,
2861 * just remove from the hashtable
2862 */
[171]2863#define hashtable_remove_mmapped(p) hashtable[hash(p)] = 0;
2864
2865static void
2866hashtable_skiprm (chunkinfoptr ci_orig, chunkinfoptr ci_todelete)
2867{
2868 unsigned long hashval;
2869 chunkinfoptr next;
2870
2871#ifdef DNMALLOC_DEBUG
[172]2872 fprintf(stderr, "hashtable_skiprm: %p, %lu\n", chunk(ci_todelete), hash(chunk(ci_todelete)));
[171]2873#endif
2874
2875 if (ci_orig->hash_next != ci_todelete) {
2876 hashval = hash(chunk(ci_todelete));
2877 assert(hashval < AMOUNTHASH); /* rw */
2878#ifdef DNMALLOC_CHECKS
2879 if (hashtable[hashval] != ci_todelete ) {
2880 fprintf(stderr, "Dnmalloc error: trying to delete wrong value (hash: %lu): ci_todelete: %p (%p), hashtable[hashval]: %p (%p)\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n", hashval, ci_todelete, chunk(ci_todelete), hashtable[hashval], chunk(hashtable[hashval]));
2881 }
2882#else
2883 assert(hashtable[hashval] == ci_todelete);
2884#endif
2885 hashtable[hashval] = ci_todelete->hash_next;
2886 }
2887
2888 else {
2889 ci_orig->hash_next = ci_todelete->hash_next;
2890 if (!ci_orig->hash_next) {
2891 next = next_chunkinfo(ci_orig);
2892 } else {
2893 next = ci_orig->hash_next;
2894 }
2895 if (next)
2896 next->prev_size = chunksize(ci_orig);
2897
2898 }
2899}
2900
2901
2902static chunkinfoptr
2903hashtable_lookup (mchunkptr p)
2904{
2905 chunkinfoptr ci;
2906 unsigned long hashval;
2907
2908 /* if we were called wrongly
2909 * if ((char *) p < startheap) return 0;
2910 */
2911 if ((char *) p >= startheap)
2912 {
2913 hashval = hash (p);
2914 assert(hashval < AMOUNTHASH); /* rw */
2915 ci = hashtable[hashval];
2916 if (ci && chunk (ci) == p)
2917 return ci;
2918
2919 if (ci) {
2920 do {
2921 ci = ci->hash_next;
2922 } while (ci && chunk (ci) != p);
2923 }
2924#ifdef DNMALLOC_CHECKS
[237]2925 /* This should never occur but if it does, we'd like to know */
[171]2926 if (!ci) {
2927 fprintf (stderr,
2928 "Dnmalloc error: could not find a chunkinfo for the chunk %p in the hashtable at entry %lu\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n",
2929 p, hashval);
2930 abort();
2931 }
2932#else
2933 assert(ci != NULL);
2934#endif
2935 return ci;
2936 }
2937 return 0;
2938}
2939
2940
2941
2942/*
2943 ----------- Internal state representation and initialization -----------
2944*/
2945
2946struct malloc_state {
2947
2948 /* The maximum chunk size to be eligible for fastbin */
2949 INTERNAL_SIZE_T max_fast; /* low 2 bits used as flags */
2950
2951 /* Fastbins */
2952 mfastbinptr fastbins[NFASTBINS];
2953
2954 /* Base of the topmost chunk -- not otherwise kept in a bin */
2955 chunkinfoptr top;
2956
2957 /* The remainder from the most recent split of a small request */
2958 chunkinfoptr last_remainder;
2959
2960 /* Normal bins */
2961 struct chunkinfo bins[NBINS];
2962
2963 /* Bitmap of bins. Trailing zero map handles cases of largest binned size */
2964 unsigned int binmap[BINMAPSIZE+1];
2965
2966 /* Tunable parameters */
2967 CHUNK_SIZE_T trim_threshold;
2968 INTERNAL_SIZE_T top_pad;
2969 INTERNAL_SIZE_T mmap_threshold;
2970
2971 /* Memory map support */
2972 int n_mmaps;
2973 int n_mmaps_max;
2974 int max_n_mmaps;
2975
2976 /* Cache malloc_getpagesize */
2977 unsigned int pagesize;
2978
2979 /* Canary */
2980 char guard_stored[GUARD_SIZE];
2981
2982 /* Track properties of MORECORE */
2983 unsigned int morecore_properties;
2984
2985 /* Statistics */
2986 INTERNAL_SIZE_T mmapped_mem;
2987 INTERNAL_SIZE_T sbrked_mem;
2988 INTERNAL_SIZE_T max_sbrked_mem;
2989 INTERNAL_SIZE_T max_mmapped_mem;
2990 INTERNAL_SIZE_T max_total_mem;
2991};
2992
2993typedef struct malloc_state *mstate;
2994
2995/*
2996 There is exactly one instance of this struct in this malloc.
2997 If you are adapting this malloc in a way that does NOT use a static
2998 malloc_state, you MUST explicitly zero-fill it before using. This
2999 malloc relies on the property that malloc_state is initialized to
3000 all zeroes (as is true of C statics).
3001*/
3002
3003static struct malloc_state * av_ = NULL; /* never directly referenced */
3004
3005/*
3006 All uses of av_ are via get_malloc_state().
3007 At most one "call" to get_malloc_state is made per invocation of
3008 the public versions of malloc and free, but other routines
3009 that in turn invoke malloc and/or free may call more then once.
3010 Also, it is called in check* routines if DEBUG is set.
3011*/
3012
3013#define get_malloc_state() (av_)
3014
3015/*
3016 Initialize a malloc_state struct.
3017
3018 This is called only from within malloc_consolidate, which needs
3019 be called in the same contexts anyway. It is never called directly
3020 outside of malloc_consolidate because some optimizing compilers try
3021 to inline it at all call points, which turns out not to be an
3022 optimization at all. (Inlining it in malloc_consolidate is fine though.)
3023*/
3024
3025#if __STD_C
3026static void malloc_mmap_state(void)
3027#else
3028static void malloc_mmap_state()
3029#endif
3030{
3031 int mprot;
3032 unsigned long pagesize = malloc_getpagesize;
3033 size_t size = (sizeof(struct malloc_state) + pagesize - 1) & ~(pagesize - 1);
3034
3035 void * foo = MMAP(0, size+(2*pagesize), PROT_READ|PROT_WRITE, MAP_PRIVATE);
3036
3037
3038#ifdef NDEBUG
3039 if (foo == MAP_FAILED) {
3040 fprintf (stderr, "Couldn't mmap struct malloc_state: %s\n", strerror (errno));
3041 abort ();
3042 }
3043#else
3044 assert(foo != MAP_FAILED);
3045#endif
3046
3047 mprot = mprotect(foo, pagesize, PROT_NONE);
3048#ifdef NDEBUG
3049 if (mprot == -1) {
3050 fprintf (stderr, "Couldn't mprotect first non-rw page for struct malloc_state: %s\n",
3051 strerror (errno));
3052 abort ();
3053 }
3054#else
3055 assert(mprot != -1);
3056#endif
3057
3058 av_ = (struct malloc_state *) ((char*)foo + pagesize);
3059
3060 MALLOC_ZERO(av_, sizeof(struct malloc_state));
3061
3062 mprot = mprotect((void*)((char*)foo + size + pagesize), (size_t) pagesize, PROT_NONE);
3063#ifdef NDEBUG
3064 if (mprot == -1) {
3065 fprintf (stderr,
3066 "Couldn't mprotect last non-rw page for struct malloc_state: %s\n",
3067 strerror (errno));
3068 abort ();
3069 }
3070#else
3071 assert(mprot != -1);
3072#endif
3073}
3074
3075#if __STD_C
3076static void malloc_init_state(mstate av)
3077#else
3078static void malloc_init_state(av) mstate av;
3079#endif
3080{
3081 int i;
3082 mbinptr bin;
3083
3084 void * morecore_test = MORECORE(0);
3085 unsigned long hashval;
3086
3087 /* Test morecore function
3088 */
3089 set_morecore32bit(av);
3090
3091 if (morecore_test == MORECORE_FAILURE)
3092 {
3093 set_nonmorecore32bit(av);
3094 }
3095 else
3096 {
3097 /* On 64bit systems, the heap may be located above the
3098 * 32bit address space. Since mmap() probably still can be
3099 * convinced to map within 32bit, we don't use sbrk().
3100 */
3101 hashval = hash (morecore_test);
3102 if (hashval >= AMOUNTHASH)
3103 {
3104 set_nonmorecore32bit(av);
3105 }
3106 }
3107
3108
3109 /* Establish circular links for normal bins */
3110 for (i = 1; i < NBINS; ++i) {
3111 bin = bin_at(av,i);
3112 bin->fd = bin->bk = bin;
3113 }
3114
3115 av->top_pad = DEFAULT_TOP_PAD;
3116 av->n_mmaps_max = DEFAULT_MMAP_MAX;
3117 av->mmap_threshold = DEFAULT_MMAP_THRESHOLD;
3118 av->trim_threshold = DEFAULT_TRIM_THRESHOLD;
3119
3120#if MORECORE_CONTIGUOUS
3121 set_contiguous(av);
3122#else
3123 set_noncontiguous(av);
3124#endif
3125
3126 set_max_fast(av, DEFAULT_MXFAST);
3127
3128 av->top = cireg_getfree ();
3129 av->top->chunk = (mchunkptr) startheap;
3130 av->top->size = 0;
3131 set_previnuse(av->top);
3132 clear_inuse(av->top);
3133 hashtable[0] = av->top;
3134 av->pagesize = malloc_getpagesize;
3135
3136 memcpy(av->guard_stored, dnmalloc_arc4random(), GUARD_SIZE);
3137
3138}
3139
3140/*
3141 Other internal utilities operating on mstates
3142*/
3143
3144#if __STD_C
3145static Void_t* sYSMALLOc(INTERNAL_SIZE_T, mstate);
3146static int sYSTRIm(size_t, mstate);
3147static void malloc_consolidate(mstate);
3148#else
3149static Void_t* sYSMALLOc();
3150static int sYSTRIm();
3151static void malloc_consolidate();
3152#endif
3153
3154/* dnmalloc functions */
3155/* needs mstate so moved here */
3156
3157static chunkinfoptr
3158next_chunkinfo (chunkinfoptr ci)
3159{
3160 mchunkptr nextp;
3161 unsigned long hashval;
3162 chunkinfoptr cinfonextp;
3163 mstate av = get_malloc_state();
3164
3165 /* ci is not the last element in the linked list, just
3166 return the next chunkinfo from the list
3167 */
3168 if (!ci->hash_next)
3169 {
3170 /* ci is the last element, find the next chunkinfo by
3171 * looking up the chunkinfo for the chunk that is after p's chunk
3172 */
3173 nextp = (mchunkptr) (((char *) (ci->chunk)) + chunksize (ci));
3174
3175 if (!(nextp == av->top->chunk))
3176 {
3177 hashval = hash (nextp);
3178 /* assert(hashval < AMOUNTHASH); *//* major bottleneck */
3179 cinfonextp = hashtable[hashval];
3180 if (cinfonextp && chunk (cinfonextp) == nextp)
3181 return cinfonextp;
3182
3183#ifdef DNMALLOC_CHECKS_EXTRA
3184 /* This seems bogus; a chunkinfo may legally have no nextp if
3185 * it's the last one allocated (?)
3186 */
3187 else {
3188 if (cinfonextp)
3189 fprintf (stderr,
3190 "Dnmalloc error: could not find a next chunkinfo for the chunk %p in the hashtable at entry %lu, cinfonextp: %p, chunk(cinfonextp): %p, nextp: %p\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n",
3191 chunk(ci), hashval, cinfonextp, chunk(cinfonextp), nextp);
3192 else
3193 fprintf (stderr,
3194 "Dnmalloc error: could not find a next chunkinfo for the chunk %p in the hashtable at entry %lu, cinfonextp: %s, chunk(cinfonextp): %s, nextp: %p\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n",
3195 chunk(ci), hashval, "null", "null", nextp);
3196 }
3197#endif
3198
3199 return NULL;
3200 }
3201 else
3202 {
3203 return av->top;
3204 }
3205
3206 }
3207 else
3208 {
3209 return (ci->hash_next);
3210 }
3211}
3212
3213static int is_next_chunk(chunkinfoptr oldp, chunkinfoptr newp) {
3214 mchunkptr nextp;
3215 if (oldp->hash_next == newp)
3216 return 1;
3217 nextp = (mchunkptr) (((char *) (oldp->chunk)) + chunksize (oldp));
3218 if (nextp == chunk(newp))
3219 return 1;
3220 return 0;
3221}
3222
3223
3224
3225/* Get the chunkinfo of the physically previous chunk */
3226/* Since we disposed of prev_size, we need this function to find the previous */
3227
3228static chunkinfoptr
3229prev_chunkinfo (chunkinfoptr ci)
3230{
3231 unsigned int i;
3232 chunkinfoptr prev;
3233 mchunkptr prevchunk = 0;
3234 /* chunkinfoptr temp; */
3235
3236 /* Get the hashtable location of the chunkinfo */
3237 i = hash (chunk (ci));
3238 assert(i < AMOUNTHASH); /* rw */
3239
3240 /* Get the first element of the linked list of chunkinfo's that contains p */
3241 prev = hashtable[i];
3242
3243 if (ci == prev) {
3244 prevchunk = (mchunkptr) (((char *) (ci->chunk)) - (ci->prev_size));
3245 i = hash(prevchunk);
3246 assert(i < AMOUNTHASH); /* rw */
3247 /* Loop over the linked list until we reach the last element */
3248 for (prev = hashtable[i]; prev->hash_next != 0; prev = prev->hash_next) ;
3249 } else {
3250 /* p is not the first element in the linked list, we can just
3251 loop over the list and return the previous
3252 */
3253 for (prev = hashtable[i]; prev->hash_next != ci; prev = prev->hash_next);
3254 }
3255
3256 return prev;
3257}
3258
3259
3260/*
3261 Debugging support
3262 Dnmalloc broke dlmallocs debugging functions, should fix them some
3263 time in the future, for now leave them undefined.
3264*/
3265
3266#define check_chunk(P)
3267#define check_free_chunk(P)
3268#define check_inuse_chunk(P)
3269#define check_remalloced_chunk(P,N)
3270#define check_malloced_chunk(P,N)
3271#define check_malloc_state()
3272
3273
3274/* ----------- Routines dealing with system allocation -------------- */
3275
3276/*
3277 sysmalloc handles malloc cases requiring more memory from the system.
3278 On entry, it is assumed that av->top does not have enough
3279 space to service request for nb bytes, thus requiring that av->top
3280 be extended or replaced.
3281*/
3282
3283#if __STD_C
3284static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av)
3285#else
3286static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
3287#endif
3288{
3289 chunkinfoptr old_top; /* incoming value of av->top */
3290 INTERNAL_SIZE_T old_size; /* its size */
3291 char* old_end; /* its end address */
3292
3293 long size; /* arg to first MORECORE or mmap call */
3294 char* brk; /* return value from MORECORE */
3295
3296 long correction; /* arg to 2nd MORECORE call */
3297 char* snd_brk; /* 2nd return val */
3298
3299 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
3300 INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
3301 char* aligned_brk; /* aligned offset into brk */
3302
3303 chunkinfoptr p; /* the allocated/returned chunk */
3304 chunkinfoptr remainder; /* remainder from allocation */
3305 chunkinfoptr fencepost; /* fencepost */
3306 CHUNK_SIZE_T remainder_size; /* its size */
3307
3308 CHUNK_SIZE_T sum; /* for updating stats */
3309
3310 size_t pagemask = av->pagesize - 1;
3311
3312#ifdef DNMALLOC_DEBUG
3313 fprintf(stderr, "Enter sysmalloc\n");
3314#endif
3315 /*
3316 If there is space available in fastbins, consolidate and retry
3317 malloc from scratch rather than getting memory from system. This
3318 can occur only if nb is in smallbin range so we didn't consolidate
3319 upon entry to malloc. It is much easier to handle this case here
3320 than in malloc proper.
3321 */
3322
3323
3324 if (have_fastchunks(av)) {
[172]3325 Void_t * retval;
[171]3326 assert(in_smallbin_range(nb));
3327 malloc_consolidate(av);
3328#ifdef DNMALLOC_DEBUG
3329 fprintf(stderr, "Return sysmalloc have_fastchunks\n");
3330#endif
[172]3331 retval = mALLOc(nb - MALLOC_ALIGN_MASK);
3332 VALGRIND_FREELIKE_BLOCK(retval, 0);
3333 return retval;
[171]3334 }
3335
3336
3337 /*
3338 If have mmap, and the request size meets the mmap threshold, and
3339 the system supports mmap, and there are few enough currently
3340 allocated mmapped regions, try to directly map this request
3341 rather than expanding top.
3342 */
3343
3344 if (UNLIKELY((CHUNK_SIZE_T)(nb) >= (CHUNK_SIZE_T)(av->mmap_threshold) &&
3345 (av->n_mmaps < av->n_mmaps_max))) {
3346
3347 char* mm; /* return value from mmap call*/
3348
3349 /*
3350 Round up size to nearest page. For mmapped chunks, the overhead
3351 is one SIZE_SZ unit larger than for normal chunks, because there
3352 is no following chunk whose prev_size field could be used.
3353 */
3354 size = (nb + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
3355
3356 /* Don't try if size wraps around 0 */
3357 if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb)) {
3358
3359
3360 mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
3361
3362 if (mm != (char*)(MORECORE_FAILURE)) {
3363
[172]3364 VALGRIND_MAKE_MEM_NOACCESS(mm,size);
3365
[171]3366 /*
3367 The offset to the start of the mmapped region is stored
3368 in the prev_size field of the chunk. This allows us to adjust
3369 returned start address to meet alignment requirements here
3370 and in memalign(), and still be able to compute proper
3371 address argument for later munmap in free() and realloc().
3372 */
3373
3374 front_misalign = (INTERNAL_SIZE_T) mm & MALLOC_ALIGN_MASK;
3375 p = cireg_getfree();
3376
3377 if (front_misalign > 0) {
3378 correction = MALLOC_ALIGNMENT - front_misalign;
3379 p->chunk = (mchunkptr)(mm + correction);
3380 p->hash_next = (chunkinfoptr) correction;
3381 set_head(p, (size - correction) |INUSE|IS_MMAPPED);
3382 }
3383 else {
3384 p->chunk = (mchunkptr)mm;
3385 p->hash_next = 0;
3386 set_head(p, size|INUSE|IS_MMAPPED);
3387 }
3388 hashtable_add(p);
3389 /* update statistics */
3390
3391 if (++av->n_mmaps > av->max_n_mmaps)
3392 av->max_n_mmaps = av->n_mmaps;
3393
3394 sum = av->mmapped_mem += size;
3395 if (sum > (CHUNK_SIZE_T)(av->max_mmapped_mem))
3396 av->max_mmapped_mem = sum;
3397 sum += av->sbrked_mem;
3398 if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
3399 av->max_total_mem = sum;
3400
3401 check_chunk(p);
3402
3403#ifdef DNMALLOC_DEBUG
3404 fprintf(stderr, "Return mmapped (%lu, total %lu)\n",
3405 size, (unsigned long)/* size_t */av->max_total_mem );
3406#endif
3407 return chunk(p);
3408 }
3409 }
3410 }
3411
3412 /* Record incoming configuration of top */
3413
3414 old_top = av->top;
3415 old_size = chunksize(old_top);
3416 old_end = (char*)(chunk_at_offset(chunk(old_top), old_size));
3417
3418 brk = snd_brk = (char*)(MORECORE_FAILURE);
3419
3420 /*
3421 If not the first time through, we require old_size to be
3422 at least MINSIZE and to have prev_inuse set.
3423 */
3424
3425 /* assert((old_top == initial_top(av) && old_size == 0) ||
3426 ((CHUNK_SIZE_T) (old_size) >= MINSIZE &&
3427 prev_inuse(old_top))); */
3428
3429 /* Precondition: not enough current space to satisfy nb request */
3430 assert((CHUNK_SIZE_T)(old_size) < (CHUNK_SIZE_T)(nb + MINSIZE));
3431
3432 /* Precondition: all fastbins are consolidated */
3433 assert(!have_fastchunks(av));
3434
3435 /* Request enough space for nb + pad + overhead */
3436 size = nb + av->top_pad + MINSIZE;
3437
3438 /*
3439 If contiguous, we can subtract out existing space that we hope to
3440 combine with new space. We add it back later only if
3441 we don't actually get contiguous space.
3442 */
3443 if (contiguous(av))
3444 size -= old_size;
3445
3446 /*
3447 Round to a multiple of page size.
3448 If MORECORE is not contiguous, this ensures that we only call it
3449 with whole-page arguments. And if MORECORE is contiguous and
3450 this is not first time through, this preserves page-alignment of
3451 previous calls. Otherwise, we correct to page-align below.
3452 */
3453
3454 size = (size + pagemask) & ~pagemask;
3455
3456 /*
3457 Don't try to call MORECORE if argument is so big as to appear
3458 negative. Note that since mmap takes size_t arg, it may succeed
3459 below even if we cannot call MORECORE.
3460 */
3461 if (size > 0 && morecore32bit(av))
3462 brk = (char*)(MORECORE(size));
3463
3464 /*
3465 If have mmap, try using it as a backup when MORECORE fails or
3466 cannot be used. This is worth doing on systems that have "holes" in
3467 address space, so sbrk cannot extend to give contiguous space, but
3468 space is available elsewhere. Note that we ignore mmap max count
3469 and threshold limits, since the space will not be used as a
3470 segregated mmap region.
3471 */
3472 if (brk != (char*)(MORECORE_FAILURE)) {
3473 av->sbrked_mem += size;
[172]3474 VALGRIND_MAKE_MEM_NOACCESS(brk,size);
[171]3475 }
3476
3477 else {
3478
3479#ifdef DNMALLOC_DEBUG
3480 fprintf(stderr, "Morecore failure in sysmalloc\n");
3481#endif
3482
3483 /* Cannot merge with old top, so add its size back in */
3484 if (contiguous(av))
3485 size = (size + old_size + pagemask) & ~pagemask;
3486
3487 /* If we are relying on mmap as backup, then use larger units */
3488 if ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(MMAP_AS_MORECORE_SIZE))
3489 size = MMAP_AS_MORECORE_SIZE;
3490
3491 /* Don't try if size wraps around 0 */
3492 if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb)) {
3493
3494#ifdef DNMALLOC_DEBUG
3495 fprintf(stderr, "Try mmap in sysmalloc\n");
3496#endif
3497 brk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
3498
3499 if (brk != (char*)(MORECORE_FAILURE)) {
3500
[172]3501 VALGRIND_MAKE_MEM_NOACCESS(brk,size);
3502
[171]3503 av->mmapped_mem += size;
3504#ifdef DNMALLOC_DEBUG
3505 fprintf(stderr, "Mmapped successfully in sysmalloc %p\n", brk);
3506#endif
3507
3508 /* We do not need, and cannot use, another sbrk call to find end */
3509 snd_brk = brk + size;
3510
3511 /*
3512 Record that we no longer have a contiguous sbrk region.
3513 After the first time mmap is used as backup, we do not
3514 ever rely on contiguous space since this could incorrectly
3515 bridge regions.
3516 */
3517 set_noncontiguous(av);
3518 }
3519 }
3520 }
3521
3522 if (brk != (char*)(MORECORE_FAILURE)) {
3523#ifdef DNMALLOC_DEBUG
3524 fprintf(stderr, "Success path %lu allocated, sbrked %lu\n",
3525 size, (unsigned long)av->sbrked_mem);
3526#endif
3527 /* av->sbrked_mem += size; moved up */
3528
3529 /*
3530 If MORECORE extends previous space, we can likewise extend top size.
3531 */
3532
3533 if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE)) {
3534 set_head(old_top, (size + old_size) | PREV_INUSE);
3535#ifdef DNMALLOC_DEBUG
3536 fprintf(stderr, "Previous space extended\n");
3537#endif
3538 }
3539
3540 /*
3541 Otherwise, make adjustments:
3542
3543 * If the first time through or noncontiguous, we need to call sbrk
3544 just to find out where the end of memory lies.
3545
3546 * We need to ensure that all returned chunks from malloc will meet
3547 MALLOC_ALIGNMENT
3548
3549 * If there was an intervening foreign sbrk, we need to adjust sbrk
3550 request size to account for fact that we will not be able to
3551 combine new space with existing space in old_top.
3552
3553 * Almost all systems internally allocate whole pages at a time, in
3554 which case we might as well use the whole last page of request.
3555 So we allocate enough more memory to hit a page boundary now,
3556 which in turn causes future contiguous calls to page-align.
3557 */
3558
3559 else {
3560 front_misalign = 0;
3561 end_misalign = 0;
3562 correction = 0;
3563 aligned_brk = brk;
3564
3565 /*
3566 If MORECORE returns an address lower than we have seen before,
3567 we know it isn't really contiguous. This and some subsequent
3568 checks help cope with non-conforming MORECORE functions and
3569 the presence of "foreign" calls to MORECORE from outside of
3570 malloc or by other threads. We cannot guarantee to detect
3571 these in all cases, but cope with the ones we do detect.
3572 */
3573 if (contiguous(av) && old_size != 0 && brk < old_end) {
3574 set_noncontiguous(av);
3575 }
3576
3577 /* handle contiguous cases */
3578 if (contiguous(av)) {
3579
3580#ifdef DNMALLOC_DEBUG
3581 fprintf(stderr, "Handle contiguous cases\n");
3582#endif
3583 /*
3584 We can tolerate forward non-contiguities here (usually due
3585 to foreign calls) but treat them as part of our space for
3586 stats reporting.
3587 */
3588 if (old_size != 0)
3589 av->sbrked_mem += brk - old_end;
3590
3591 /* Guarantee alignment of first new chunk made from this space */
3592
3593 front_misalign = (INTERNAL_SIZE_T) brk & MALLOC_ALIGN_MASK;
3594 if (front_misalign > 0) {
3595
3596 /*
3597 Skip over some bytes to arrive at an aligned position.
3598 We don't need to specially mark these wasted front bytes.
3599 They will never be accessed anyway because
3600 prev_inuse of av->top (and any chunk created from its start)
3601 is always true after initialization.
3602 */
3603
3604 correction = MALLOC_ALIGNMENT - front_misalign;
3605 aligned_brk += correction;
3606 }
3607
3608 /*
3609 If this isn't adjacent to existing space, then we will not
3610 be able to merge with old_top space, so must add to 2nd request.
3611 */
3612
3613 correction += old_size;
3614
3615 /* Extend the end address to hit a page boundary */
3616 end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
3617 correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
3618
3619 assert(correction >= 0);
3620 snd_brk = (char*)(MORECORE(correction));
3621
3622 if (snd_brk == (char*)(MORECORE_FAILURE)) {
3623 /*
3624 If can't allocate correction, try to at least find out current
3625 brk. It might be enough to proceed without failing.
3626 */
3627 correction = 0;
3628 snd_brk = (char*)(MORECORE(0));
3629 }
3630 else if (snd_brk < brk) {
3631 /*
3632 If the second call gives noncontiguous space even though
3633 it says it won't, the only course of action is to ignore
3634 results of second call, and conservatively estimate where
3635 the first call left us. Also set noncontiguous, so this
3636 won't happen again, leaving at most one hole.
3637
3638 Note that this check is intrinsically incomplete. Because
3639 MORECORE is allowed to give more space than we ask for,
3640 there is no reliable way to detect a noncontiguity
3641 producing a forward gap for the second call.
3642 */
3643 snd_brk = brk + size;
3644 correction = 0;
3645 set_noncontiguous(av);
3646 }
[172]3647 else {
3648 VALGRIND_MAKE_MEM_NOACCESS(snd_brk,correction);
3649 }
[171]3650
3651 }
3652
3653 /* handle non-contiguous cases */
3654 else {
3655
3656#ifdef DNMALLOC_DEBUG
3657 fprintf(stderr, "Handle non-contiguous cases\n");
3658#endif
3659
3660 /* MORECORE/mmap must correctly align */
3661 assert(aligned_OK(brk));
3662
3663 /* Find out current end of memory */
3664 if (snd_brk == (char*)(MORECORE_FAILURE)) {
3665 snd_brk = (char*)(MORECORE(0));
3666 av->sbrked_mem += snd_brk - brk - size;
3667 }
3668#ifdef DNMALLOC_DEBUG
3669 fprintf(stderr, "Sbrked now %lu\n", (unsigned long)av->sbrked_mem);
3670#endif
3671 }
3672
3673 /* Adjust top based on results of second sbrk.
3674 *
3675 * If mmap() has been used as backup for failed morecore(),
3676 * we end up in this branch as well.
3677 */
3678 if (snd_brk != (char*)(MORECORE_FAILURE)) {
3679#ifdef DNMALLOC_DEBUG
3680 fprintf(stderr, "Adjust top, correction %lu\n", correction);
3681#endif
3682 /* hashtable_remove(chunk(av->top)); *//* rw 19.05.2008 removed */
3683 av->top = cireg_getfree();
3684 av->top->chunk = (mchunkptr)aligned_brk;
3685 set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
3686#ifdef DNMALLOC_DEBUG
3687 fprintf(stderr, "Adjust top, top %p size %lu\n",
3688 av->top, (unsigned long)chunksize(av->top));
3689#endif
3690 hashtable_add(av->top);
3691 av->sbrked_mem += correction;
3692
3693 /*
3694 If not the first time through, we either have a
3695 gap due to foreign sbrk or a non-contiguous region. Insert a
3696 double fencepost at old_top to prevent consolidation with space
3697 we don't own. These fenceposts are artificial chunks that are
3698 marked as inuse. Original dlmalloc had two of these but too
3699 small to use. To ensure that the linked lists contain a maximum
3700 of 8 elements we only use 1. Inuse is determined by the
3701 current rather than the next chunk anyway.
3702 */
3703
3704 if (old_size != 0) {
3705#ifdef DNMALLOC_DEBUG
3706 fprintf(stderr, "Shrink old_top to insert fenceposts\n");
3707#endif
3708 /*
3709 Shrink old_top to insert fenceposts, keeping size a
3710 multiple of MALLOC_ALIGNMENT. We know there is at least
3711 enough space in old_top to do this.
3712 */
3713#ifdef DNMALLOC_DEBUG
3714 fprintf(stderr, "Adjust top, old_top %p old_size before %lu\n",
3715 old_top, (unsigned long)old_size);
3716#endif
3717 old_size = (old_size - 4*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
3718 set_head(old_top, old_size | PREV_INUSE);
3719#ifdef DNMALLOC_DEBUG
3720 fprintf(stderr, "Adjust top, old_size after %lu\n",
3721 (unsigned long)old_size);
3722#endif
3723
3724 /*
3725 Note that the following assignments completely overwrite
3726 old_top when old_size was previously MINSIZE. This is
3727 intentional. We need the fencepost, even if old_top otherwise gets
3728 lost.
3729 */
3730 /* dnmalloc, we need the fencepost to be 16 bytes, however since
3731 it's marked inuse it will never be coalesced
3732 */
3733 fencepost = cireg_getfree();
3734 fencepost->chunk = (mchunkptr) chunk_at_offset(chunk(old_top),
3735 old_size);
3736 fencepost->size = 16|INUSE|PREV_INUSE;
3737 hashtable_add(fencepost);
3738 /*
3739 If possible, release the rest, suppressing trimming.
3740 */
3741 if (old_size >= MINSIZE) {
3742 INTERNAL_SIZE_T tt = av->trim_threshold;
3743#ifdef DNMALLOC_DEBUG
3744 fprintf(stderr, "Release\n");
3745#endif
3746 av->trim_threshold = (INTERNAL_SIZE_T)(-1);
3747 set_head(old_top, old_size | PREV_INUSE | INUSE);
3748 guard_set(av->guard_stored, old_top, 0, old_size);
[172]3749 VALGRIND_MALLOCLIKE_BLOCK(chunk(old_top), old_size, 0, 0);
[171]3750 fREe(chunk(old_top));
3751 av->trim_threshold = tt;
3752#ifdef DNMALLOC_DEBUG
3753 fprintf(stderr, "Release done\n");
3754#endif
3755 }
3756
3757#ifdef DNMALLOC_DEBUG
3758 fprintf(stderr, "Adjust top, size %lu\n",
3759 (unsigned long)chunksize(av->top));
3760#endif
3761
3762 } /* fenceposts */
3763 } /* adjust top */
3764 } /* not extended previous region */
3765
3766 /* Update statistics */ /* FIXME check this */
3767 sum = av->sbrked_mem;
3768 if (sum > (CHUNK_SIZE_T)(av->max_sbrked_mem))
3769 av->max_sbrked_mem = sum;
3770
3771 sum += av->mmapped_mem;
3772 if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
3773 av->max_total_mem = sum;
3774
3775 check_malloc_state();
3776
3777 /* finally, do the allocation */
3778
3779 p = av->top;
3780 size = chunksize(p);
3781
3782#ifdef DNMALLOC_DEBUG
3783 fprintf(stderr, "Size: %lu nb+MINSIZE: %lu\n",
3784 (CHUNK_SIZE_T)(size), (CHUNK_SIZE_T)(nb + MINSIZE));
3785#endif
3786
3787 /* check that one of the above allocation paths succeeded */
3788 if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb + MINSIZE)) {
3789 remainder_size = size - nb;
3790 remainder = cireg_getfree();
3791 remainder->chunk = chunk_at_offset(chunk(p), nb);
3792 av->top = remainder;
3793 set_head(p, nb | PREV_INUSE | INUSE);
3794 set_head(remainder, remainder_size | PREV_INUSE);
3795 hashtable_insert (p, av->top);
3796 check_malloced_chunk(p, nb);
3797#ifdef DNMALLOC_DEBUG
3798 fprintf(stderr, "Return any (total %lu)\n",
3799 (unsigned long)/* size_t */av->max_total_mem );
3800#endif
3801 return chunk(p);
3802 }
3803
3804 }
3805
3806#ifdef DNMALLOC_DEBUG
3807 fprintf(stderr, "Return failed (total %lu)\n",
3808 (unsigned long)/* size_t */av->max_total_mem );
3809#endif
3810
3811 /* catch all failure paths */
3812 MALLOC_FAILURE_ACTION;
3813 return 0;
3814}
3815
3816
3817
3818
3819/*
3820 sYSTRIm is an inverse of sorts to sYSMALLOc. It gives memory back
3821 to the system (via negative arguments to sbrk) if there is unused
3822 memory at the `high' end of the malloc pool. It is called
3823 automatically by free() when top space exceeds the trim
3824 threshold. It is also called by the public malloc_trim routine. It
3825 returns 1 if it actually released any memory, else 0.
3826*/
3827
3828#if __STD_C
3829static int sYSTRIm(size_t pad, mstate av)
3830#else
3831static int sYSTRIm(pad, av) size_t pad; mstate av;
3832#endif
3833{
3834 long top_size; /* Amount of top-most memory */
3835 long extra; /* Amount to release */
3836 long released; /* Amount actually released */
3837 char* current_brk; /* address returned by pre-check sbrk call */
3838 char* new_brk; /* address returned by post-check sbrk call */
3839 size_t pagesz;
3840
3841 pagesz = av->pagesize;
3842 top_size = chunksize(av->top);
3843
3844 /* Release in pagesize units, keeping at least one page */
3845 extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
3846
3847 if (extra > 0) {
3848
3849 /*
3850 Only proceed if end of memory is where we last set it.
3851 This avoids problems if there were foreign sbrk calls.
3852 */
3853 current_brk = (char*)(MORECORE(0));
3854 if (current_brk == (char*)(av->top) + top_size) {
3855
3856 /*
3857 Attempt to release memory. We ignore MORECORE return value,
3858 and instead call again to find out where new end of memory is.
3859 This avoids problems if first call releases less than we asked,
3860 of if failure somehow altered brk value. (We could still
3861 encounter problems if it altered brk in some very bad way,
3862 but the only thing we can do is adjust anyway, which will cause
3863 some downstream failure.)
3864 */
3865
3866 MORECORE(-extra);
3867 new_brk = (char*)(MORECORE(0));
3868
3869 if (new_brk != (char*)MORECORE_FAILURE) {
3870 released = (long)(current_brk - new_brk);
3871
3872 if (released != 0) {
3873 /* Success. Adjust top. */
3874 av->sbrked_mem -= released;
3875 set_head(av->top, (top_size - released) | PREV_INUSE);
3876 check_malloc_state();
3877 return 1;
3878 }
3879 }
3880 }
3881 }
3882 return 0;
3883}
3884
3885/*
3886 ------------------------------ malloc ------------------------------
3887*/
3888
3889
3890#if __STD_C
3891DL_STATIC Void_t* mALLOc(size_t bytes)
3892#else
3893DL_STATIC Void_t* mALLOc(bytes) size_t bytes;
3894#endif
3895{
3896 mstate av = get_malloc_state();
3897
3898 INTERNAL_SIZE_T nb; /* normalized request size */
3899 unsigned int idx; /* associated bin index */
3900 mbinptr bin; /* associated bin */
3901 mfastbinptr* fb; /* associated fastbin */
3902
3903 chunkinfoptr victim; /* inspected/selected chunk */
3904 INTERNAL_SIZE_T size; /* its size */
3905 int victim_index; /* its bin index */
3906
3907 chunkinfoptr remainder; /* remainder from a split */
3908 CHUNK_SIZE_T remainder_size; /* its size */
3909
3910 unsigned int block; /* bit map traverser */
3911 unsigned int bit; /* bit map traverser */
3912 unsigned int map; /* current word of binmap */
3913
3914 chunkinfoptr fwd; /* misc temp for linking */
3915 chunkinfoptr bck; /* misc temp for linking */
3916
3917 Void_t* retval;
3918
3919 /* chunkinfoptr next; */
3920
3921
3922 /*
3923 Convert request size to internal form by adding SIZE_SZ bytes
3924 overhead plus possibly more to obtain necessary alignment and/or
3925 to obtain a size of at least MINSIZE, the smallest allocatable
3926 size. Also, checked_request2size traps (returning 0) request sizes
3927 that are so large that they wrap around zero when padded and
3928 aligned.
3929 */
3930#if defined(SH_CUTEST)
3931 extern int malloc_count;
3932 ++malloc_count;
3933#endif
3934
3935 checked_request2size(bytes, nb);
3936
3937 /*
3938 Bypass search if no frees yet
3939 */
3940 if (av && have_anychunks(av)) {
3941 goto av_initialized;
3942 }
3943 else {
3944 if (!av || av->max_fast == 0) { /* initialization check */
3945 malloc_consolidate(av);
3946 av = get_malloc_state();
3947 }
3948 goto use_top;
3949 }
3950
3951 av_initialized:
3952
3953 /*
3954 If the size qualifies as a fastbin, first check corresponding bin.
3955 */
3956 if ((CHUNK_SIZE_T)(nb) <= (CHUNK_SIZE_T)(av->max_fast)) {
3957 fb = &(av->fastbins[(fastbin_index(nb))]);
3958 if ( (victim = *fb) != 0) {
3959 *fb = victim->fd;
3960 check_remalloced_chunk(victim, nb);
3961 guard_set(av->guard_stored, victim, bytes, nb);
[172]3962 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]3963 return chunk(victim);
3964 }
3965 }
3966
3967 /*
3968 If a small request, check regular bin. Since these "smallbins"
3969 hold one size each, no searching within bins is necessary.
3970 (For a large request, we need to wait until unsorted chunks are
3971 processed to find best fit. But for small ones, fits are exact
3972 anyway, so we can check now, which is faster.)
3973 */
3974
3975 if (in_smallbin_range(nb)) {
3976 idx = smallbin_index(nb);
3977 bin = bin_at(av,idx);
3978
3979 if ((victim = last(bin)) != bin) {
3980 bck = victim->bk;
3981 bin->bk = bck;
3982 bck->fd = bin;
3983
3984 set_all_inuse(victim);
3985
3986 check_malloced_chunk(victim, nb);
3987 guard_set(av->guard_stored, victim, bytes, nb);
[172]3988 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]3989 return chunk(victim);
3990 }
3991 }
3992
3993 /*
3994 If this is a large request, consolidate fastbins before continuing.
3995 While it might look excessive to kill all fastbins before
3996 even seeing if there is space available, this avoids
3997 fragmentation problems normally associated with fastbins.
3998 Also, in practice, programs tend to have runs of either small or
3999 large requests, but less often mixtures, so consolidation is not
4000 invoked all that often in most programs. And the programs that
4001 it is called frequently in otherwise tend to fragment.
4002 */
4003
4004 else {
4005 idx = largebin_index(nb);
4006 if (have_fastchunks(av))
4007 malloc_consolidate(av);
4008 }
4009
4010 /*
4011 Process recently freed or remaindered chunks, taking one only if
4012 it is exact fit, or, if this a small request, the chunk is remainder from
4013 the most recent non-exact fit. Place other traversed chunks in
4014 bins. Note that this step is the only place in any routine where
4015 chunks are placed in bins.
4016 */
4017
4018 while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
4019 bck = victim->bk;
4020 size = chunksize(victim);
4021
4022 /*
4023 If a small request, try to use last remainder if it is the
4024 only chunk in unsorted bin. This helps promote locality for
4025 runs of consecutive small requests. This is the only
4026 exception to best-fit, and applies only when there is
4027 no exact fit for a small chunk.
4028 */
4029
4030 if (UNLIKELY(in_smallbin_range(nb) &&
4031 bck == unsorted_chunks(av) &&
4032 victim == av->last_remainder &&
4033 (CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb + MINSIZE))) {
4034
4035 /* split and reattach remainder */
4036 remainder_size = size - nb;
4037 remainder = cireg_getfree();
4038 remainder->chunk = chunk_at_offset(chunk(victim), nb);
4039 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
4040 av->last_remainder = remainder;
4041 remainder->bk = remainder->fd = unsorted_chunks(av);
4042
4043 set_head(victim, nb | PREV_INUSE|INUSE);
4044 set_head(remainder, remainder_size | PREV_INUSE);
4045 hashtable_insert(victim, remainder);
4046
4047 check_malloced_chunk(victim, nb);
4048 guard_set(av->guard_stored, victim, bytes, nb);
[172]4049 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4050 return chunk(victim);
4051 }
4052
4053 /* remove from unsorted list */
4054 unsorted_chunks(av)->bk = bck;
4055 bck->fd = unsorted_chunks(av);
4056
4057 /* Take now instead of binning if exact fit */
4058
4059 if (UNLIKELY(size == nb)) {
4060 set_all_inuse(victim)
4061 check_malloced_chunk(victim, nb);
4062 guard_set(av->guard_stored, victim, bytes, nb);
[172]4063 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4064 return chunk(victim);
4065 }
4066
4067 /* place chunk in bin */
4068
4069 if (in_smallbin_range(size)) {
4070
4071 victim_index = smallbin_index(size);
4072 bck = bin_at(av, victim_index);
4073 fwd = bck->fd;
4074 }
4075 else {
4076 victim_index = largebin_index(size);
4077 bck = bin_at(av, victim_index);
4078 fwd = bck->fd;
4079
4080 if (UNLIKELY(fwd != bck)) {
4081 /* if smaller than smallest, place first */
4082 if ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(bck->bk->size)) {
4083 fwd = bck;
4084 bck = bck->bk;
4085 }
4086 else if ((CHUNK_SIZE_T)(size) >=
4087 (CHUNK_SIZE_T)(FIRST_SORTED_BIN_SIZE)) {
4088
4089 /* maintain large bins in sorted order */
4090 size |= PREV_INUSE|INUSE; /* Or with inuse bits to speed comparisons */
4091 while ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(fwd->size))
4092 fwd = fwd->fd;
4093 bck = fwd->bk;
4094 }
4095 }
4096 }
4097
4098 mark_bin(av, victim_index);
4099 victim->bk = bck;
4100 victim->fd = fwd;
4101 fwd->bk = victim;
4102 bck->fd = victim;
4103 }
4104
4105 /*
4106 If a large request, scan through the chunks of current bin to
4107 find one that fits. (This will be the smallest that fits unless
4108 FIRST_SORTED_BIN_SIZE has been changed from default.) This is
4109 the only step where an unbounded number of chunks might be
4110 scanned without doing anything useful with them. However the
4111 lists tend to be short.
4112 */
4113
4114 if (!in_smallbin_range(nb)) {
4115 bin = bin_at(av, idx);
4116
4117 victim = last(bin);
4118
4119 if (UNLIKELY(victim != bin)) {
4120
4121 do {
4122 size = chunksize(victim);
4123
4124 if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb)) {
4125 remainder_size = size - nb;
4126 unlink(victim, bck, fwd);
4127
4128 /* Split */
4129 if (remainder_size >= MINSIZE) {
4130 remainder = cireg_getfree();
4131 remainder->chunk = chunk_at_offset(chunk(victim), nb);
4132 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
4133 remainder->bk = remainder->fd = unsorted_chunks(av);
4134 set_head(victim, nb | PREV_INUSE | INUSE);
4135 set_head(remainder, remainder_size | PREV_INUSE);
4136 hashtable_insert(victim, remainder);
4137 check_malloced_chunk(victim, nb);
4138 guard_set(av->guard_stored, victim, bytes, nb);
[172]4139 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4140 return chunk(victim);
4141 }
4142 /* Exhaust */
4143 else {
4144 set_all_inuse(victim);
4145 check_malloced_chunk(victim, nb);
4146 guard_set(av->guard_stored, victim, bytes, nb);
[172]4147 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4148 return chunk(victim);
4149 }
4150 }
4151 victim = victim->bk;
4152 } while(victim != bin);
4153 }
4154 }
4155
4156 /*
4157 Search for a chunk by scanning bins, starting with next largest
4158 bin. This search is strictly by best-fit; i.e., the smallest
4159 (with ties going to approximately the least recently used) chunk
4160 that fits is selected.
4161
4162 The bitmap avoids needing to check that most blocks are nonempty.
4163 */
4164
4165
4166 ++idx;
4167 bin = bin_at(av,idx);
4168 block = idx2block(idx);
4169 map = av->binmap[block];
4170 bit = idx2bit(idx);
4171
4172 for (;;) {
4173
4174 /* Skip rest of block if there are no more set bits in this block. */
4175 if (bit > map || bit == 0) {
4176 do {
4177 if (++block >= BINMAPSIZE) /* out of bins */
4178 goto use_top;
4179 } while ( (map = av->binmap[block]) == 0);
4180
4181 bin = bin_at(av, (block << BINMAPSHIFT));
4182 bit = 1;
4183 }
4184
4185 /* Advance to bin with set bit. There must be one. */
4186 while ((bit & map) == 0) {
4187 bin = next_bin(bin);
4188 bit <<= 1;
4189 assert(bit != 0);
4190 }
4191
4192 /* Inspect the bin. It is likely to be non-empty */
4193 victim = last(bin);
4194
4195 /* If a false alarm (empty bin), clear the bit. */
4196 if (victim == bin) {
4197 av->binmap[block] = map &= ~bit; /* Write through */
4198 bin = next_bin(bin);
4199 bit <<= 1;
4200 }
4201
4202 else {
4203 size = chunksize(victim);
4204
4205 /* We know the first chunk in this bin is big enough to use. */
4206 assert((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb));
4207
4208 remainder_size = size - nb;
4209
4210 /* unlink */
4211 bck = victim->bk;
4212 bin->bk = bck;
4213 bck->fd = bin;
4214
4215 /* Split */
4216 if (remainder_size >= MINSIZE) {
4217 remainder = cireg_getfree();
4218 remainder->chunk = chunk_at_offset(chunk(victim), nb);
4219
4220 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
4221 remainder->bk = remainder->fd = unsorted_chunks(av);
4222 /* advertise as last remainder */
[172]4223 if (in_smallbin_range(nb))
4224 av->last_remainder = remainder;
[171]4225
4226 set_head(victim, nb | PREV_INUSE | INUSE);
4227 set_head(remainder, remainder_size | PREV_INUSE);
4228 hashtable_insert(victim, remainder);
4229 check_malloced_chunk(victim, nb);
4230 guard_set(av->guard_stored, victim, bytes, nb);
[172]4231 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4232 return chunk(victim);
4233 }
4234 /* Exhaust */
4235 else {
4236 set_all_inuse(victim);
4237 check_malloced_chunk(victim, nb);
4238 guard_set(av->guard_stored, victim, bytes, nb);
[172]4239 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4240 return chunk(victim);
4241 }
4242
4243 }
4244 }
4245
4246 use_top:
4247
4248
4249 /*
4250 If large enough, split off the chunk bordering the end of memory
4251 (held in av->top). Note that this is in accord with the best-fit
4252 search rule. In effect, av->top is treated as larger (and thus
4253 less well fitting) than any other available chunk since it can
4254 be extended to be as large as necessary (up to system
4255 limitations).
4256
4257 We require that av->top always exists (i.e., has size >=
4258 MINSIZE) after initialization, so if it would otherwise be
4259 exhuasted by current request, it is replenished. (The main
4260 reason for ensuring it exists is that we may need MINSIZE space
4261 to put in fenceposts in sysmalloc.)
4262 */
4263
4264 victim = av->top;
4265 size = chunksize(victim);
4266
4267 if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb + MINSIZE)) {
[172]4268 remainder = cireg_getfree();
[171]4269 remainder_size = size - nb;
4270 remainder->chunk = chunk_at_offset(chunk(victim), nb);
4271 av->top = remainder;
4272 set_head(victim, nb | PREV_INUSE | INUSE);
4273 set_head(remainder, remainder_size | PREV_INUSE);
4274 hashtable_insert(victim, remainder);
4275 check_malloced_chunk(victim, nb);
4276 guard_set(av->guard_stored, victim, bytes, nb);
[172]4277 VALGRIND_MALLOCLIKE_BLOCK(chunk(victim), bytes, 0, 0);
[171]4278 return chunk(victim);
4279 }
4280
4281 /*
4282 If no space in top, relay to handle system-dependent cases
4283 */
4284 retval = sYSMALLOc(nb, av);
4285 if (retval) {
4286 victim = mem2chunk(retval);
4287 guard_set(av->guard_stored, victim, bytes, nb);
4288 }
[172]4289 VALGRIND_MALLOCLIKE_BLOCK(retval, bytes, 0, 0);
[171]4290 return retval;
4291}
4292
4293/*
4294 ------------------------------ free ------------------------------
4295*/
4296
4297#if __STD_C
4298DL_STATIC void fREe(Void_t* mem)
4299#else
4300DL_STATIC void fREe(mem) Void_t* mem;
4301#endif
4302{
4303 mstate av = get_malloc_state();
4304
4305 chunkinfoptr p; /* chunk corresponding to mem */
4306 INTERNAL_SIZE_T size; /* its size */
4307 mfastbinptr* fb; /* associated fastbin */
4308 chunkinfoptr prevchunk; /* previous physical chunk */
4309 chunkinfoptr nextchunk; /* next contiguous chunk */
4310 INTERNAL_SIZE_T nextsize; /* its size */
4311 INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
4312 chunkinfoptr bck; /* misc temp for linking */
4313 chunkinfoptr fwd; /* misc temp for linking */
4314 chunkinfoptr next;
4315#if defined(SH_CUTEST)
4316 extern int malloc_count;
4317 --malloc_count;
4318#endif
4319
4320 /* free(0) has no effect */
4321 if (mem != 0) {
4322 p = hashtable_lookup(mem);
4323 /* check that memory is managed by us
4324 * and is inuse
4325 */
4326 if (UNLIKELY(!p || !inuse(p)))
4327 {
4328#ifdef DNMALLOC_CHECKS
4329 if (p) {
4330 fprintf(stderr, "Attempt to free memory not in use\n");
4331 abort();
4332 } else {
4333 fprintf(stderr, "Attempt to free memory not allocated\n");
4334 abort();
4335 }
4336#endif
4337 assert(p && inuse(p));
4338 return;
4339 }
4340
[172]4341 VALGRIND_FREELIKE_BLOCK(mem, 0);
4342
[171]4343 guard_check(av->guard_stored, p);
[172]4344
[171]4345 size = chunksize(p);
4346
4347 check_inuse_chunk(p);
4348
4349 /*
4350 If eligible, place chunk on a fastbin so it can be found
4351 and used quickly in malloc.
4352 */
4353
4354 if ((CHUNK_SIZE_T)(size) <= (CHUNK_SIZE_T)(av->max_fast)
4355
4356#if TRIM_FASTBINS
4357 /*
4358 If TRIM_FASTBINS set, don't place chunks
4359 bordering top into fastbins
4360 */
4361 && (chunk_at_offset(chunk(p), size) != av->top)
4362#endif
4363 ) {
4364
4365 set_fastchunks(av);
4366 fb = &(av->fastbins[fastbin_index(size)]);
4367 p->fd = *fb;
4368 *fb = p;
4369 }
4370
4371 /*
4372 Consolidate other non-mmapped chunks as they arrive.
4373 */
4374
4375 else if (!chunk_is_mmapped(p)) {
4376 set_anychunks(av);
4377
4378 nextchunk = next_chunkinfo(p);
4379 if (nextchunk)
4380 nextsize = chunksize(nextchunk);
4381 else
4382 nextsize = 0;/* gcc doesn't notice that it's only used if (nextchunk)*/
4383
4384 /* consolidate backward */
4385 if (UNLIKELY(!prev_inuse(p))) {
4386 prevchunk = prev_chunkinfo(p);
4387 prevsize = chunksize(prevchunk);
4388#ifdef DNMALLOC_CHECKS
4389 if (inuse(prevchunk)) {
4390 fprintf(stderr, "Dnmalloc error: trying to unlink an inuse chunk: %p (chunk: %p)\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n", prevchunk, chunk(prevchunk));
4391 abort();
4392 }
4393#else
4394 assert(!inuse(prevchunk));
4395#endif
4396 size += prevsize;
4397 unlink(prevchunk, bck, fwd);
4398 set_head(p, size | PREV_INUSE);
4399 hashtable_skiprm(prevchunk,p);
4400 /* This chunk no longer exists in any form: release the chunkinfoptr
4401 */
4402 freecilst_add(p);
4403 p = prevchunk;
4404 }
4405
4406 if (nextchunk) {
4407 if (nextchunk != av->top) {
4408 /* get and clear inuse bit */
4409 clear_previnuse(nextchunk);
4410
4411 /* consolidate forward */
4412 if (!inuse(nextchunk)) {
[228]4413 /* if mmap is used instead of sbrk, we may have a
4414 * chunk with !nextchunk->fd && !nextchunk->bk
4415 */
4416 if (nextchunk->fd && nextchunk->fd)
4417 {
4418 unlink(nextchunk, bck, fwd);
4419 size += nextsize;
4420 set_head(p, size | PREV_INUSE);
4421 hashtable_skiprm(p, nextchunk);
4422 freecilst_add (nextchunk);
4423 }
[171]4424 }
4425
4426 set_head(p, size | PREV_INUSE);
4427 next = next_chunkinfo(p);
4428 if (next)
4429 next->prev_size = size;
4430
4431 /*
4432 Place the chunk in unsorted chunk list. Chunks are
4433 not placed into regular bins until after they have
4434 been given one chance to be used in malloc.
4435 */
4436
4437 bck = unsorted_chunks(av);
4438 fwd = bck->fd;
4439 p->bk = bck;
4440 p->fd = fwd;
4441 bck->fd = p;
4442 fwd->bk = p;
4443
4444 nextchunk = next_chunkinfo(p);
4445 if (nextchunk)
4446 nextchunk->prev_size = chunksize(p);
4447
4448 check_free_chunk(p);
4449 }
4450
4451 /*
4452 If the chunk borders the current high end of memory,
4453 consolidate into top
4454 */
4455
4456 else {
4457 size += nextsize;
4458 set_head(p, size | PREV_INUSE);
4459 hashtable_remove(chunk(av->top));
4460 freecilst_add(av->top);
4461 av->top = p;
4462 check_chunk(p);
4463 }
4464 } /* if (nextchunk) */
4465
4466 /*
4467 If freeing a large space, consolidate possibly-surrounding
4468 chunks. Then, if the total unused topmost memory exceeds trim
4469 threshold, ask malloc_trim to reduce top.
4470
4471 Unless max_fast is 0, we don't know if there are fastbins
4472 bordering top, so we cannot tell for sure whether threshold
4473 has been reached unless fastbins are consolidated. But we
4474 don't want to consolidate on each free. As a compromise,
4475 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
4476 is reached.
4477 */
4478
4479 if (UNLIKELY((CHUNK_SIZE_T)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD)) {
4480 if (have_fastchunks(av))
4481 malloc_consolidate(av);
4482
4483#ifndef MORECORE_CANNOT_TRIM
4484 if ((CHUNK_SIZE_T)(chunksize(av->top)) >=
4485 (CHUNK_SIZE_T)(av->trim_threshold))
4486 {
4487 if (morecore32bit(av))
4488 {
4489#ifdef DNMALLOC_DEBUG
4490 fprintf(stderr, "Calling systrim from free()\n");
4491#endif
4492 sYSTRIm(av->top_pad, av);
4493#ifdef DNMALLOC_DEBUG
4494 fprintf(stderr, "Systrim done\n");
4495#endif
4496 }
4497 }
4498#endif
4499 }
4500
4501 }
4502 /*
4503 If the chunk was allocated via mmap, release via munmap()
4504 Note that if HAVE_MMAP is false but chunk_is_mmapped is
4505 true, then user must have overwritten memory. There's nothing
4506 we can do to catch this error unless DEBUG is set, in which case
4507 check_inuse_chunk (above) will have triggered error.
4508 */
4509
4510 else {
[362]4511#if PARANOIA > 0
[171]4512 int ret;
[362]4513#endif
[171]4514 INTERNAL_SIZE_T offset = (INTERNAL_SIZE_T) p->hash_next;
4515 av->n_mmaps--;
4516 av->mmapped_mem -= (size + offset);
[362]4517#if PARANOIA > 0
[171]4518 ret = munmap((char*) chunk(p) - offset, size + offset);
[362]4519#else
4520 munmap((char*) chunk(p) - offset, size + offset);
4521#endif
[171]4522 hashtable_remove_mmapped(chunk(p));
4523 freecilst_add(p);
4524 /* munmap returns non-zero on failure */
4525 assert(ret == 0);
4526 }
4527 }
4528}
4529
4530/*
4531 ------------------------- malloc_consolidate -------------------------
4532
4533 malloc_consolidate is a specialized version of free() that tears
4534 down chunks held in fastbins. Free itself cannot be used for this
4535 purpose since, among other things, it might place chunks back onto
4536 fastbins. So, instead, we need to use a minor variant of the same
4537 code.
4538
4539 Also, because this routine needs to be called the first time through
4540 malloc anyway, it turns out to be the perfect place to trigger
4541 initialization code.
4542*/
4543
4544#if __STD_C
4545static void malloc_consolidate(mstate av)
4546#else
4547static void malloc_consolidate(av) mstate av;
4548#endif
4549{
4550 mfastbinptr* fb; /* current fastbin being consolidated */
4551 mfastbinptr* maxfb; /* last fastbin (for loop control) */
4552 chunkinfoptr p; /* current chunk being consolidated */
4553 chunkinfoptr nextp; /* next chunk to consolidate */
4554 chunkinfoptr prevp;
4555 chunkinfoptr unsorted_bin; /* bin header */
4556 chunkinfoptr first_unsorted; /* chunk to link to */
4557
4558 /* These have same use as in free() */
4559 chunkinfoptr nextchunk;
4560 INTERNAL_SIZE_T size;
4561 INTERNAL_SIZE_T nextsize;
4562 INTERNAL_SIZE_T prevsize;
4563 chunkinfoptr bck;
4564 chunkinfoptr fwd;
4565 chunkinfoptr next;
4566
4567 /*
4568 If max_fast is 0, we know that av hasn't
4569 yet been initialized, in which case do so below
4570 */
4571 if (av && av->max_fast != 0) {
[172]4572
[171]4573 clear_fastchunks(av);
4574
4575 unsorted_bin = unsorted_chunks(av);
4576
4577 /*
4578 Remove each chunk from fast bin and consolidate it, placing it
4579 then in unsorted bin. Among other reasons for doing this,
4580 placing in unsorted bin avoids needing to calculate actual bins
4581 until malloc is sure that chunks aren't immediately going to be
4582 reused anyway.
4583 */
4584
4585 maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
4586 fb = &(av->fastbins[0]);
4587 do {
4588 if ( UNLIKELY((p = *fb) != 0)) {
4589 *fb = 0;
4590 do {
4591 check_inuse_chunk(p);
4592 nextp = p->fd;
4593
4594 /*
4595 * Slightly streamlined version of consolidation code in free()
4596 */
4597
4598 size = chunksize(p);
4599 nextchunk = next_chunkinfo(p);
4600
4601 /* gcc doesn't notice that it's only used if (nextchunk) */
4602 if (nextchunk)
4603 nextsize = chunksize(nextchunk);
4604 else
4605 nextsize = 0;
4606
4607 if (!prev_inuse(p)) {
4608 prevp = prev_chunkinfo(p);
4609 prevsize = chunksize(prevp);
4610 size += prevsize;
4611#ifdef DNMALLOC_CHECKS
4612 if (inuse(prevp)) {
4613 fprintf(stderr, "Dnmalloc error: trying to unlink an inuse chunk (2): %p (chunk: %p)\n This is definitely a bug, please report it to dnmalloc@fort-knox.org.\n", prevp, chunk(prevp));
4614 abort();
4615 }
4616#else
4617 assert(!inuse(prevp));
4618#endif
4619 unlink(prevp, bck, fwd);
4620 set_head(p, size | PREV_INUSE);
4621 hashtable_skiprm(prevp,p);
4622 freecilst_add(p);
4623 p=prevp;
4624 }
4625
4626 if (nextchunk) {
4627 if (nextchunk != av->top) {
4628
4629 clear_previnuse(nextchunk);
4630
[172]4631 /* if mmap is used instead of sbrk, we may have a
4632 * chunk with !nextchunk->fd && !nextchunk->bk
4633 */
[171]4634 if (!inuse(nextchunk)) {
[172]4635 if( nextchunk->fd && nextchunk->bk) {
4636 size += nextsize;
4637 unlink(nextchunk, bck, fwd);
4638 set_head(p, size | PREV_INUSE);
4639 hashtable_skiprm(p,nextchunk);
4640 freecilst_add(nextchunk);
4641 }
[171]4642 }
4643
4644 first_unsorted = unsorted_bin->fd;
4645 unsorted_bin->fd = p;
4646 first_unsorted->bk = p;
4647
4648 set_head(p, size | PREV_INUSE);
4649 p->bk = unsorted_bin;
4650 p->fd = first_unsorted;
4651 next = next_chunkinfo(p);
4652 if (next)
4653 next->prev_size = size;
4654
4655
4656 }
4657
[172]4658 else if (nextchunk == av->top) {
[171]4659 size += nextsize;
4660 set_head(p, size | PREV_INUSE);
4661 hashtable_remove(chunk(av->top));
4662 freecilst_add(av->top);
4663 av->top = p;
4664 }
[172]4665 } /* if (nextchunk) */
[171]4666
4667 } while ( (p = nextp) != 0);
4668
4669 }
4670 } while (fb++ != maxfb);
4671 }
4672 else {
[237]4673 /* Initialize dnmalloc */
[171]4674 dnmalloc_init();
4675 malloc_init_state(get_malloc_state());
4676 check_malloc_state();
4677 }
4678}
4679
4680/*
4681 ------------------------------ realloc ------------------------------
4682*/
4683
4684
4685#if __STD_C
4686DL_STATIC Void_t* rEALLOc(Void_t* oldmem, size_t bytes)
4687#else
4688DL_STATIC Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
4689#endif
4690{
4691 mstate av = get_malloc_state();
4692
4693 INTERNAL_SIZE_T nb; /* padded request size */
4694
4695 chunkinfoptr oldp; /* chunk corresponding to oldmem */
4696 INTERNAL_SIZE_T oldsize; /* its size */
4697
4698 chunkinfoptr newp; /* chunk to return */
4699 INTERNAL_SIZE_T newsize; /* its size */
4700 Void_t* newmem; /* corresponding user mem */
4701
4702 chunkinfoptr next; /* next contiguous chunk after oldp */
4703
4704 chunkinfoptr remainder; /* extra space at end of newp */
4705 CHUNK_SIZE_T remainder_size; /* its size */
4706
4707 chunkinfoptr bck; /* misc temp for linking */
4708 chunkinfoptr fwd; /* misc temp for linking */
4709
4710 CHUNK_SIZE_T copysize; /* bytes to copy */
4711 unsigned int ncopies; /* INTERNAL_SIZE_T words to copy */
4712 INTERNAL_SIZE_T* s; /* copy source */
4713 INTERNAL_SIZE_T* d; /* copy destination */
4714
4715
4716#ifdef REALLOC_ZERO_BYTES_FREES
4717 if (UNLIKELY(bytes == 0)) {
4718 fREe(oldmem);
4719 return 0;
4720 }
4721#endif
4722
4723 if (UNLIKELY(!av || av->max_fast == 0)) {
4724 malloc_consolidate(av);
4725 av = get_malloc_state();
4726 }
4727
4728 /* realloc of null is supposed to be same as malloc */
4729 if (UNLIKELY(oldmem == 0))
4730 return mALLOc(bytes);
4731
4732 checked_request2size(bytes, nb);
4733
4734 oldp = hashtable_lookup(oldmem);
4735
4736 if (UNLIKELY(!oldp || !inuse(oldp))){
4737 /* attempt to either realloc memory not managed by us
4738 * or memory that is not in use
4739 */
4740#ifdef DNMALLOC_CHECKS
4741 if (oldp) {
4742 fprintf(stderr, "Attempt to free memory not in use\n");
4743 abort();
4744 } else {
4745 fprintf(stderr, "Attempt to free memory not allocated\n");
4746 abort();
4747 }
4748#endif
4749 assert(oldp && inuse(oldp));
4750 return 0;
4751 }
4752
[172]4753 VALGRIND_FREELIKE_BLOCK(oldmem, 0);
[171]4754 guard_check(av->guard_stored, oldp);
4755
4756 oldsize = chunksize(oldp);
4757
4758 check_inuse_chunk(oldp);
4759
4760 if (!chunk_is_mmapped(oldp)) {
4761
4762 if (UNLIKELY((CHUNK_SIZE_T)(oldsize) >= (CHUNK_SIZE_T)(nb))) {
4763 /* already big enough; split below */
4764 newp = oldp;
4765 newsize = oldsize;
4766 }
4767
4768 else {
4769 next = next_chunkinfo(oldp);
4770 if (next)
4771 next->prev_size = oldsize;
4772 /* Try to expand forward into top */
4773 if (next && next == av->top &&
4774 (CHUNK_SIZE_T)(newsize = oldsize + chunksize(next)) >=
4775 (CHUNK_SIZE_T)(nb + MINSIZE)) {
4776 set_head_size(oldp, nb);
4777 hashtable_remove(chunk(av->top));
4778 av->top->chunk = chunk_at_offset(chunk(oldp), nb);
4779 set_head(av->top, (newsize - nb) | PREV_INUSE);
4780 /* av->top->chunk has been moved move in hashtable */
4781 hashtable_insert(oldp, av->top);
4782 guard_set(av->guard_stored, oldp, bytes, nb);
[172]4783 VALGRIND_MALLOCLIKE_BLOCK(chunk(oldp), bytes, 0, 0);
[171]4784 return chunk(oldp);
4785 }
4786
4787 /* Try to expand forward into next chunk; split off remainder below */
4788 else if (next && next != av->top &&
4789 !inuse(next) &&
4790 (CHUNK_SIZE_T)(newsize = oldsize + chunksize(next)) >=
4791 (CHUNK_SIZE_T)(nb)) {
4792 newp = oldp;
4793 unlink(next, bck, fwd);
4794 hashtable_remove(chunk(next));
4795 freecilst_add(next);
4796 next = next_chunkinfo(oldp);
4797 if (next)
4798 next->prev_size = newsize;
4799 }
4800
4801 /* allocate, copy, free */
4802 else {
4803
4804 newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
[172]4805 if (newmem == 0)
[171]4806 return 0; /* propagate failure */
4807
4808 newp = hashtable_lookup(newmem);
4809 newsize = chunksize(newp);
4810
[172]4811
[171]4812 /* next = next_chunkinfo(oldp); *//* 'next' never used rw 19.05.2008 */
4813 /*
4814 Avoid copy if newp is next chunk after oldp.
4815 */
4816 if (UNLIKELY(is_next_chunk(oldp, newp))) {
4817 newsize += oldsize;
4818 set_head_size(oldp, newsize);
4819 hashtable_skiprm(oldp, newp);
4820 freecilst_add(newp);
4821 newp = oldp;
4822 }
4823 else {
4824 /*
4825 Unroll copy of <= 40 bytes (80 if 8byte sizes)
4826 We know that contents have an even number of
4827 INTERNAL_SIZE_T-sized words; minimally 4 (2 on amd64).
4828 */
4829
[172]4830 VALGRIND_MALLOCLIKE_BLOCK(chunk(oldp), chunksize(oldp), 0, 0);
4831
[171]4832 copysize = oldsize;
4833 s = (INTERNAL_SIZE_T*)(oldmem);
4834 d = (INTERNAL_SIZE_T*)(newmem);
4835 ncopies = copysize / sizeof(INTERNAL_SIZE_T);
4836 assert(ncopies >= 2);
4837
4838 if (ncopies > 10)
4839 MALLOC_COPY(d, s, copysize);
4840
4841 else {
4842 *(d+0) = *(s+0);
4843 *(d+1) = *(s+1);
4844 if (ncopies > 2) {
4845 *(d+2) = *(s+2);
4846 *(d+3) = *(s+3);
4847 if (ncopies > 4) {
4848 *(d+4) = *(s+4);
4849 *(d+5) = *(s+5);
4850 if (ncopies > 6) {
4851 *(d+6) = *(s+6);
4852 *(d+7) = *(s+7);
4853 if (ncopies > 8) {
4854 *(d+8) = *(s+8);
4855 *(d+9) = *(s+9);
4856 }
4857 }
4858 }
4859 }
4860 }
4861
4862 fREe(oldmem);
4863 check_inuse_chunk(newp);
4864 guard_set(av->guard_stored, newp, bytes, nb);
4865 return chunk(newp);
4866 }
4867 }
4868 }
4869
4870 /* If possible, free extra space in old or extended chunk */
4871
4872 assert((CHUNK_SIZE_T)(newsize) >= (CHUNK_SIZE_T)(nb));
4873
4874 remainder_size = newsize - nb;
4875
4876 if (remainder_size >= MINSIZE) { /* split remainder */
4877 remainder = cireg_getfree();
4878 remainder->chunk = chunk_at_offset(chunk(newp), nb);
4879 set_head_size(newp, nb);
4880 set_head(remainder, remainder_size | PREV_INUSE | INUSE);
4881 remainder->prev_size = nb;
4882 hashtable_insert(newp, remainder);
4883 /* Mark remainder as inuse so free() won't complain */
4884 set_all_inuse(remainder);
4885 guard_set(av->guard_stored, remainder, 0, remainder_size);
[172]4886 VALGRIND_MALLOCLIKE_BLOCK(chunk(remainder), remainder_size, 0, 0);
[171]4887 fREe(chunk(remainder));
4888 }
4889 else { /* not enough extra to split off */
4890 set_head_size(newp, newsize);
4891 set_all_inuse(newp);
4892 }
4893
4894 check_inuse_chunk(newp);
4895 guard_set(av->guard_stored, newp, bytes, nb);
[172]4896 VALGRIND_MALLOCLIKE_BLOCK(chunk(newp), bytes, 0, 0);
[171]4897 return chunk(newp);
4898 }
4899
4900 /*
4901 Handle mmap cases
4902 */
4903
4904 else {
4905
4906#if HAVE_MREMAP
4907 INTERNAL_SIZE_T offset = (INTERNAL_SIZE_T) oldp->hash_next;
4908 size_t pagemask = av->pagesize - 1;
4909 char *cp;
4910 CHUNK_SIZE_T sum;
4911
[172]4912
[171]4913 /* Note the extra SIZE_SZ overhead */
[237]4914 /* newsize = (nb + offset + SIZE_SZ + pagemask) & ~pagemask; */
[171]4915 newsize = (nb + offset + pagemask) & ~pagemask;
4916
4917 /* don't need to remap if still within same page */
4918 if (oldsize == newsize - offset)
4919 {
4920 guard_set(av->guard_stored, oldp, bytes, nb);
[172]4921 VALGRIND_FREELIKE_BLOCK(oldmem, 0);
4922 VALGRIND_MALLOCLIKE_BLOCK(oldmem, bytes, 0, 0);
[171]4923 return oldmem;
4924 }
4925
4926 cp = (char*)mremap((char*)chunk(oldp) - offset, oldsize + offset, newsize, 1);
4927
4928 if (cp != (char*)MORECORE_FAILURE) {
4929
4930 hashtable_remove_mmapped(chunk(oldp));
4931
4932 oldp->chunk = (mchunkptr)(cp + offset);
4933 set_head(oldp, (newsize - offset)|IS_MMAPPED|INUSE);
4934
4935 hashtable_add(oldp);
4936
4937 assert(aligned_OK(chunk(oldp))); /* rw fix: newp -> oldp */
4938 assert(( ((INTERNAL_SIZE_T) oldp->hash_next) == offset));
4939
4940 /* update statistics */
4941 sum = av->mmapped_mem += newsize - oldsize;
4942 if (sum > (CHUNK_SIZE_T)(av->max_mmapped_mem))
4943 av->max_mmapped_mem = sum;
4944 sum += av->sbrked_mem;
4945 if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
4946 av->max_total_mem = sum;
4947
4948 guard_set(av->guard_stored, oldp, bytes, nb);
[172]4949 VALGRIND_FREELIKE_BLOCK(oldmem, 0);
4950 VALGRIND_MALLOCLIKE_BLOCK(chunk(oldp), bytes, 0, 0);
[171]4951 return chunk(oldp);
4952 }
4953#endif /* have MREMAP */
4954
4955 /* Note the extra SIZE_SZ overhead. */
4956 if ((CHUNK_SIZE_T)(oldsize) >= (CHUNK_SIZE_T)(nb + SIZE_SZ))
4957 newmem = oldmem; /* do nothing */
4958 else {
4959 /* Must alloc, copy, free. */
4960 newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
4961 if (newmem != 0) {
4962 MALLOC_COPY(newmem, oldmem, oldsize);
4963 fREe(oldmem);
4964 }
4965 }
[172]4966 VALGRIND_MALLOCLIKE_BLOCK(newmem, bytes, 0, 0);
[171]4967 guard_set(av->guard_stored, mem2chunk(newmem), bytes, nb);
4968 return newmem;
4969 }
4970}
4971
4972/*
4973 ---------------------------posix_memalign ----------------------------
4974*/
4975
4976#if __STD_C
4977DL_STATIC int posix_mEMALIGn(Void_t** memptr, size_t alignment, size_t bytes)
4978#else
4979DL_STATIC int posix_mEMALIGn(memptr, alignment, bytes) Void_t** memptr; size_t alignment; size_t bytes;
4980#endif
4981{
4982 mstate av;
4983
4984 if (alignment % sizeof(void *) != 0)
4985 return EINVAL;
4986 if ((alignment & (alignment - 1)) != 0)
4987 return EINVAL;
4988
4989 av = get_malloc_state();
4990 if (!av || av->max_fast == 0) malloc_consolidate(av);
4991 *memptr = mEMALIGn(alignment, bytes);
4992
4993 return (*memptr != NULL ? 0 : ENOMEM);
4994}
4995
4996/*
4997 ------------------------------ memalign ------------------------------
4998*/
4999
5000#if __STD_C
5001DL_STATIC Void_t* mEMALIGn(size_t alignment, size_t bytes)
5002#else
5003DL_STATIC Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
5004#endif
5005{
5006 INTERNAL_SIZE_T nb; /* padded request size */
5007 char* m; /* memory returned by malloc call */
5008 chunkinfoptr p; /* corresponding chunk */
5009 char* brk; /* alignment point within p */
5010 chunkinfoptr newp; /* chunk to return */
5011 INTERNAL_SIZE_T newsize; /* its size */
5012 INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
5013 chunkinfoptr remainder; /* spare room at end to split off */
5014 CHUNK_SIZE_T remainder_size; /* its size */
5015 INTERNAL_SIZE_T size;
[362]5016#if PARANOIA > 2
[171]5017 mstate av;
[362]5018#endif
[171]5019
5020 /* If need less alignment than we give anyway, just relay to malloc */
5021
5022 if (UNLIKELY(alignment <= MALLOC_ALIGNMENT)) return mALLOc(bytes);
5023
5024 /* Otherwise, ensure that it is at least a minimum chunk size */
5025
5026 if (alignment < MINSIZE) alignment = MINSIZE;
5027
5028 /* Make sure alignment is power of 2 (in case MINSIZE is not). */
5029 if (UNLIKELY((alignment & (alignment - 1)) != 0)) {
5030 size_t a = MALLOC_ALIGNMENT * 2;
5031 while ((CHUNK_SIZE_T)a < (CHUNK_SIZE_T)alignment) a <<= 1;
5032 alignment = a;
5033 }
5034
5035 checked_request2size(bytes, nb);
5036
5037 /*
5038 Strategy: find a spot within that chunk that meets the alignment
5039 request, and then possibly free the leading and trailing space.
5040 */
5041
5042
5043 /* Call malloc with worst case padding to hit alignment. */
5044
5045 m = (char*)(mALLOc(nb + alignment + MINSIZE));
5046
5047 if (m == 0) return 0; /* propagate failure */
5048
[362]5049#if PARANOIA > 2
[171]5050 av = get_malloc_state();
[362]5051#endif
[171]5052
5053 p = hashtable_lookup((mchunkptr) m);
5054
5055 if ((((PTR_UINT)(m)) % alignment) != 0) { /* misaligned */
5056
5057 /*
5058 Find an aligned spot inside chunk. Since we need to give back
5059 leading space in a chunk of at least MINSIZE, if the first
5060 calculation places us at a spot with less than MINSIZE leader,
5061 we can move to the next aligned spot -- we've allocated enough
5062 total room so that this is always possible.
5063 */
5064
5065 brk = (char*) ((PTR_UINT)(((PTR_UINT)(m + alignment - 1)) &
5066 -((signed long) alignment)));
5067 if ((CHUNK_SIZE_T)(brk - (char*)(chunk(p))) < MINSIZE)
5068 brk += alignment;
5069
5070 newp = cireg_getfree();
5071 newp->chunk = (mchunkptr)brk;
5072 leadsize = brk - (char*)(chunk(p));
5073 newsize = chunksize(p) - leadsize;
5074
5075 /* For mmapped chunks, just adjust offset */
5076 if (UNLIKELY(chunk_is_mmapped(p))) {
5077 newp->hash_next = (chunkinfoptr) (((INTERNAL_SIZE_T) p->hash_next) + leadsize);
5078 set_head(newp, newsize|IS_MMAPPED|INUSE);
5079 hashtable_remove_mmapped(chunk(p));
5080 freecilst_add(p);
5081 hashtable_add(newp);
5082 guard_set(av->guard_stored, newp, bytes, nb);
5083 return chunk(newp);
5084 }
5085
5086 /* Otherwise, give back leader, use the rest */
5087 set_head(newp, newsize | PREV_INUSE | INUSE);
5088 set_head_size(p, leadsize);
5089 set_all_inuse(newp);
5090 hashtable_add(newp); /* 20.05.2008 rw */
5091 guard_set(av->guard_stored, p, 0, leadsize);
5092 fREe(chunk(p));
5093 p = newp;
5094
5095 assert (newsize >= nb &&
5096 (((PTR_UINT)(chunk(p))) % alignment) == 0);
5097 }
5098
5099 /* Also give back spare room at the end */
5100 if (!chunk_is_mmapped(p)) {
5101 size = chunksize(p);
5102 if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb + MINSIZE)) {
5103 remainder = cireg_getfree();
5104 remainder_size = size - nb;
5105 remainder->chunk = chunk_at_offset(chunk(p), nb);
5106 set_head(remainder, remainder_size | PREV_INUSE | INUSE);
5107 set_head_size(p, nb);
5108 hashtable_add(remainder); /* 20.05.2008 rw */
5109 guard_set(av->guard_stored, remainder, 0, remainder_size);
5110 fREe(chunk(remainder));
5111 }
5112 }
5113
5114 check_inuse_chunk(p);
5115 guard_set(av->guard_stored, p, bytes, nb);
5116 return chunk(p);
5117}
5118
5119/*
5120 ------------------------------ calloc ------------------------------
5121*/
5122
5123#if __STD_C
5124DL_STATIC Void_t* cALLOc(size_t n_elements, size_t elem_size)
5125#else
5126DL_STATIC Void_t* cALLOc(n_elements, elem_size) size_t n_elements; size_t elem_size;
5127#endif
5128{
5129 chunkinfoptr p;
5130 CHUNK_SIZE_T clearsize;
5131 CHUNK_SIZE_T nclears;
5132 INTERNAL_SIZE_T* d;
5133 Void_t* mem;
5134
5135
5136 mem = mALLOc(n_elements * elem_size);
5137
5138 if (mem != 0) {
5139 p = hashtable_lookup(mem);
5140
5141 if (!chunk_is_mmapped(p))
5142 {
5143 /*
5144 Unroll clear of <= 40 bytes (80 if 8byte sizes)
5145 We know that contents have an even number of
5146 INTERNAL_SIZE_T-sized words; minimally 4 (2 on amd64).
5147 */
5148
5149 d = (INTERNAL_SIZE_T*)mem;
5150 clearsize = chunksize(p);
5151 nclears = clearsize / sizeof(INTERNAL_SIZE_T);
5152 assert(nclears >= 2);
5153
5154 if (nclears > 10) {
5155 MALLOC_ZERO(d, clearsize);
5156 }
5157
5158 else {
5159 *(d+0) = 0;
5160 *(d+1) = 0;
5161 if (nclears > 2) {
5162 *(d+2) = 0;
5163 *(d+3) = 0;
5164 if (nclears > 4) {
5165 *(d+4) = 0;
5166 *(d+5) = 0;
5167 if (nclears > 6) {
5168 *(d+6) = 0;
5169 *(d+7) = 0;
5170 if (nclears > 8) {
5171 *(d+8) = 0;
5172 *(d+9) = 0;
5173 }
5174 }
5175 }
5176 }
5177 }
5178 }
5179#if ! MMAP_CLEARS
5180 else
5181 {
5182 d = (INTERNAL_SIZE_T*)mem;
5183 clearsize = chunksize(p);
5184 MALLOC_ZERO(d, clearsize);
5185 }
5186#endif
5187 /* Set guard again, since we just cleared it
5188 */
5189 guard_set(get_malloc_state()->guard_stored, p, (n_elements * elem_size), p->size);
5190 }
5191
5192 return mem;
5193}
5194
5195/*
5196 ------------------------------ valloc ------------------------------
5197*/
5198
5199#if __STD_C
5200DL_STATIC Void_t* vALLOc(size_t bytes)
5201#else
5202DL_STATIC Void_t* vALLOc(bytes) size_t bytes;
5203#endif
5204{
5205 /* Ensure initialization */
5206 mstate av = get_malloc_state();
5207 if (!av || av->max_fast == 0) {
5208 malloc_consolidate(av);
5209 av = get_malloc_state();
5210 }
5211 return mEMALIGn(av->pagesize, bytes);
5212}
5213
5214/*
5215 ------------------------------ pvalloc ------------------------------
5216*/
5217
5218
5219#if __STD_C
5220DL_STATIC Void_t* pVALLOc(size_t bytes)
5221#else
5222DL_STATIC Void_t* pVALLOc(bytes) size_t bytes;
5223#endif
5224{
5225 mstate av = get_malloc_state();
5226 size_t pagesz;
5227
5228 /* Ensure initialization */
5229 if (!av || av->max_fast == 0) {
5230 malloc_consolidate(av);
5231 av = get_malloc_state();
5232 }
5233 pagesz = av->pagesize;
5234 return mEMALIGn(pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
5235}
5236
5237
5238/*
5239 ------------------------------ malloc_trim ------------------------------
5240*/
5241
5242#if __STD_C
5243DL_STATIC int mTRIm(size_t pad)
5244#else
5245DL_STATIC int mTRIm(pad) size_t pad;
5246#endif
5247{
5248 mstate av = get_malloc_state();
5249 /* Ensure initialization/consolidation */
5250 malloc_consolidate(av);
5251 av = get_malloc_state();
5252#ifndef MORECORE_CANNOT_TRIM
5253 if (morecore32bit(av))
5254 return sYSTRIm(pad, av);
5255 else
5256 return 0;
5257#else
5258 return 0;
5259#endif
5260}
5261
5262
5263
5264/*
5265 ------------------------- malloc_usable_size -------------------------
5266*/
5267
5268#if __STD_C
5269DL_STATIC size_t mUSABLe(Void_t* mem)
5270#else
5271DL_STATIC size_t mUSABLe(mem) Void_t* mem;
5272#endif
5273{
5274 chunkinfoptr p;
5275 if (mem != 0) {
5276 p = hashtable_lookup(mem);
5277 if (p && inuse(p)) return chunksize(p);
5278 }
5279 return 0;
5280}
5281
5282/*
5283 ------------------------------ mallinfo ------------------------------
5284*/
5285
5286DL_STATIC struct mallinfo mALLINFo()
5287{
5288 mstate av = get_malloc_state();
5289 struct mallinfo mi;
5290 unsigned int i;
5291 mbinptr b;
5292 chunkinfoptr p;
5293 INTERNAL_SIZE_T avail;
5294 INTERNAL_SIZE_T fastavail;
5295 int nblocks;
5296 int nfastblocks;
5297
5298 /* Ensure initialization */
5299 if (!av || av->top == 0) {
5300 malloc_consolidate(av);
5301 av = get_malloc_state();
5302 }
5303 check_malloc_state();
5304
5305 /* Account for top */
5306 avail = chunksize(av->top);
5307 nblocks = 1; /* top always exists */
5308
5309 /* traverse fastbins */
5310 nfastblocks = 0;
5311 fastavail = 0;
5312
5313 for (i = 0; i < NFASTBINS; ++i) {
5314 for (p = av->fastbins[i]; p != 0; p = p->fd) {
5315 ++nfastblocks;
5316 fastavail += chunksize(p);
5317 }
5318 }
5319
5320 avail += fastavail;
5321
5322 /* traverse regular bins */
5323 for (i = 1; i < NBINS; ++i) {
5324 b = bin_at(av, i);
5325 for (p = last(b); p != b; p = p->bk) {
5326 ++nblocks;
5327 avail += chunksize(p);
5328 }
5329 }
5330
5331 mi.smblks = nfastblocks;
5332 mi.ordblks = nblocks;
5333 mi.fordblks = avail;
5334 mi.uordblks = av->sbrked_mem - avail;
5335 mi.arena = av->sbrked_mem;
5336 mi.hblks = av->n_mmaps;
5337 mi.hblkhd = av->mmapped_mem;
5338 mi.fsmblks = fastavail;
5339 mi.keepcost = chunksize(av->top);
5340 mi.usmblks = av->max_total_mem;
5341 return mi;
5342}
5343
5344/*
5345 ------------------------------ malloc_stats ------------------------------
5346*/
5347
5348DL_STATIC void mSTATs()
5349{
5350 struct mallinfo mi = mALLINFo();
5351
5352 fprintf(stderr, "hashtable = %10lu MB\n",
5353 (CHUNK_SIZE_T)(HASHTABLESIZE / (1024*1024)));
5354 fprintf(stderr, "max system bytes = %10lu\n",
5355 (CHUNK_SIZE_T)(mi.usmblks));
5356 fprintf(stderr, "system bytes = %10lu (%10lu sbrked, %10lu mmaped)\n",
5357 (CHUNK_SIZE_T)(mi.arena + mi.hblkhd),
5358 (CHUNK_SIZE_T)(mi.arena),
5359 (CHUNK_SIZE_T)(mi.hblkhd));
5360 fprintf(stderr, "in use bytes = %10lu\n",
5361 (CHUNK_SIZE_T)(mi.uordblks + mi.hblkhd));
5362
5363}
5364
5365
5366/*
5367 ------------------------------ mallopt ------------------------------
5368*/
5369
5370#if __STD_C
5371DL_STATIC int mALLOPt(int param_number, int value)
5372#else
5373DL_STATIC int mALLOPt(param_number, value) int param_number; int value;
5374#endif
5375{
5376 mstate av = get_malloc_state();
5377 /* Ensure initialization/consolidation */
5378 malloc_consolidate(av);
5379 av = get_malloc_state();
5380
5381 switch(param_number) {
5382 case M_MXFAST:
5383 if (value >= 0 && value <= MAX_FAST_SIZE) {
5384 set_max_fast(av, value);
5385 return 1;
5386 }
5387 else
5388 return 0;
5389
5390 case M_TRIM_THRESHOLD:
5391 av->trim_threshold = value;
5392 return 1;
5393
5394 case M_TOP_PAD:
5395 av->top_pad = value;
5396 return 1;
5397
5398 case M_MMAP_THRESHOLD:
5399 av->mmap_threshold = value;
5400 return 1;
5401
5402 case M_MMAP_MAX:
5403 if (value != 0)
5404 return 0;
5405 av->n_mmaps_max = value;
5406 return 1;
5407
5408 default:
5409 return 0;
5410 }
5411}
5412
5413
5414/* $OpenBSD: arc4random.c,v 1.19 2008/06/04 00:50:23 djm Exp $ */
5415
5416/*
5417 * Copyright (c) 1996, David Mazieres <dm@uun.org>
5418 * Copyright (c) 2008, Damien Miller <djm@openbsd.org>
5419 *
5420 * Permission to use, copy, modify, and distribute this software for any
5421 * purpose with or without fee is hereby granted, provided that the above
5422 * copyright notice and this permission notice appear in all copies.
5423 *
5424 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
5425 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
5426 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
5427 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
5428 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
5429 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
5430 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
5431 */
5432
5433/*
5434 * Arc4 random number generator for OpenBSD.
5435 *
5436 * This code is derived from section 17.1 of Applied Cryptography,
5437 * second edition, which describes a stream cipher allegedly
5438 * compatible with RSA Labs "RC4" cipher (the actual description of
5439 * which is a trade secret). The same algorithm is used as a stream
5440 * cipher called "arcfour" in Tatu Ylonen's ssh package.
5441 *
5442 * Here the stream cipher has been modified always to include the time
5443 * when initializing the state. That makes it impossible to
5444 * regenerate the same random sequence twice, so this can't be used
5445 * for encryption, but will generate good random numbers.
5446 *
5447 * RC4 is a registered trademark of RSA Laboratories.
5448 */
5449
5450/* Moved u_int8_t -> unsigned char (portability)
5451 * Eliminated unneeded functions, added read from /dev/urandom taken from:
5452 $MirOS: contrib/code/Snippets/arc4random.c,v 1.3 2008-03-04 22:53:14 tg Exp $
5453 * Modified by Robert Connolly from OpenBSD lib/libc/crypt/arc4random.c v1.11.
5454 * This is arc4random(3) using urandom.
5455 */
5456
5457#include <fcntl.h>
5458#include <limits.h>
5459#include <stdlib.h>
5460#include <sys/param.h>
5461#include <sys/time.h>
5462
5463struct arc4_stream {
5464 unsigned char i;
5465 unsigned char j;
5466 unsigned char s[256];
5467};
5468
5469static int rs_initialized;
5470static struct arc4_stream rs;
5471static pid_t arc4_stir_pid;
5472static int arc4_count;
5473
5474static unsigned char arc4_getbyte(void);
5475
5476static void
5477arc4_init(void)
5478{
5479 int n;
5480
5481 for (n = 0; n < 256; n++)
5482 rs.s[n] = n;
5483 rs.i = 0;
5484 rs.j = 0;
5485}
5486
5487static inline void
5488arc4_addrandom(unsigned char *dat, int datlen)
5489{
5490 int n;
5491 unsigned char si;
5492
5493 rs.i--;
5494 for (n = 0; n < 256; n++) {
5495 rs.i = (rs.i + 1);
5496 si = rs.s[rs.i];
5497 rs.j = (rs.j + si + dat[n % datlen]);
5498 rs.s[rs.i] = rs.s[rs.j];
5499 rs.s[rs.j] = si;
5500 }
5501 rs.j = rs.i;
5502}
5503
5504#ifdef HAVE_SCHED_H
5505#include <sched.h>
5506#endif
5507
5508static void
5509arc4_stir(void)
5510{
5511 int i;
5512 struct {
5513 struct timeval tv1;
5514 struct timeval tv2;
5515 u_int rnd[(128 - 2*sizeof(struct timeval)) / sizeof(u_int)];
5516 } rdat;
5517#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
5518 size_t sz = 0;
5519 int fd;
5520#endif
5521
5522 gettimeofday(&rdat.tv1, NULL);
5523
5524
5525 if (!rs_initialized) {
5526 arc4_init();
5527 rs_initialized = 1;
5528 }
5529
5530#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
5531
5532#ifdef HAVE_SCHED_YIELD
5533 /* Yield the processor to introduce some random delay. */
5534 (void) sched_yield();
5535#endif
5536
5537 /*
5538 * Pthread problem in multithreaded code on *BSD.
5539 */
5540 fd = open("/dev/urandom", O_RDONLY);
5541 if (fd != -1) {
5542 sz = (size_t)read(fd, rdat.rnd, sizeof (rdat.rnd));
5543 close(fd);
5544 }
5545 if (sz > sizeof (rdat.rnd))
5546 sz = 0;
5547 #endif
5548
5549 arc4_stir_pid = getpid();
5550 gettimeofday(&rdat.tv2, NULL);
5551
5552 arc4_addrandom((void *)&rdat, sizeof(rdat));
5553
5554 /*
5555 * Discard early keystream, as per recommendations in:
5556 * http://www.wisdom.weizmann.ac.il/~itsik/RC4/Papers/Rc4_ksa.ps
5557 */
5558 for (i = 0; i < 256; i++)
5559 (void)arc4_getbyte();
5560 arc4_count = 1600000;
5561}
5562
5563static unsigned char
5564arc4_getbyte(void)
5565{
5566 unsigned char si, sj;
5567
5568 rs.i = (rs.i + 1);
5569 si = rs.s[rs.i];
5570 rs.j = (rs.j + si);
5571 sj = rs.s[rs.j];
5572 rs.s[rs.i] = sj;
5573 rs.s[rs.j] = si;
5574 return (rs.s[(si + sj) & 0xff]);
5575}
5576
5577
5578 /* Changed to return char* */
5579static char *
5580dnmalloc_arc4random(void)
5581{
5582 static char val[4];
5583
5584 /* We only call this once, hence no need for locking. */
5585
5586 /* _ARC4_LOCK(); */
5587 arc4_count -= 4;
5588 if (arc4_count <= 0 || !rs_initialized || arc4_stir_pid != getpid())
5589 arc4_stir();
5590
5591 val[0] = (char) arc4_getbyte();
5592 val[1] = (char) arc4_getbyte();
5593 val[2] = (char) arc4_getbyte();
5594 val[3] = (char) arc4_getbyte();
5595
5596 arc4_stir();
5597 /* _ARC4_UNLOCK(); */
5598 return val;
5599}
5600
5601#else
5602int dnmalloc_pthread_init() { return 0; }
5603#endif /* ! USE_SYSTEM_MALLOC */
Note: See TracBrowser for help on using the repository browser.